1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/CodeGenPrepare.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/PointerIntPair.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/BranchProbabilityInfo.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/TargetTransformInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
37 #include "llvm/CodeGen/ISDOpcodes.h"
38 #include "llvm/CodeGen/SelectionDAGNodes.h"
39 #include "llvm/CodeGen/TargetLowering.h"
40 #include "llvm/CodeGen/TargetPassConfig.h"
41 #include "llvm/CodeGen/TargetSubtargetInfo.h"
42 #include "llvm/CodeGen/ValueTypes.h"
43 #include "llvm/CodeGenTypes/MachineValueType.h"
44 #include "llvm/Config/llvm-config.h"
45 #include "llvm/IR/Argument.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugInfo.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/Dominators.h"
54 #include "llvm/IR/Function.h"
55 #include "llvm/IR/GetElementPtrTypeIterator.h"
56 #include "llvm/IR/GlobalValue.h"
57 #include "llvm/IR/GlobalVariable.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InlineAsm.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/IntrinsicsAArch64.h"
66 #include "llvm/IR/LLVMContext.h"
67 #include "llvm/IR/MDBuilder.h"
68 #include "llvm/IR/Module.h"
69 #include "llvm/IR/Operator.h"
70 #include "llvm/IR/PatternMatch.h"
71 #include "llvm/IR/ProfDataUtils.h"
72 #include "llvm/IR/Statepoint.h"
73 #include "llvm/IR/Type.h"
74 #include "llvm/IR/Use.h"
75 #include "llvm/IR/User.h"
76 #include "llvm/IR/Value.h"
77 #include "llvm/IR/ValueHandle.h"
78 #include "llvm/IR/ValueMap.h"
79 #include "llvm/InitializePasses.h"
80 #include "llvm/Pass.h"
81 #include "llvm/Support/BlockFrequency.h"
82 #include "llvm/Support/BranchProbability.h"
83 #include "llvm/Support/Casting.h"
84 #include "llvm/Support/CommandLine.h"
85 #include "llvm/Support/Compiler.h"
86 #include "llvm/Support/Debug.h"
87 #include "llvm/Support/ErrorHandling.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/Target/TargetMachine.h"
90 #include "llvm/Target/TargetOptions.h"
91 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
92 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
93 #include "llvm/Transforms/Utils/Local.h"
94 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
95 #include "llvm/Transforms/Utils/SizeOpts.h"
106 using namespace llvm
;
107 using namespace llvm::PatternMatch
;
109 #define DEBUG_TYPE "codegenprepare"
111 STATISTIC(NumBlocksElim
, "Number of blocks eliminated");
112 STATISTIC(NumPHIsElim
, "Number of trivial PHIs eliminated");
113 STATISTIC(NumGEPsElim
, "Number of GEPs converted to casts");
114 STATISTIC(NumCmpUses
, "Number of uses of Cmp expressions replaced with uses of "
116 STATISTIC(NumCastUses
, "Number of uses of Cast expressions replaced with uses "
118 STATISTIC(NumMemoryInsts
, "Number of memory instructions whose address "
119 "computations were sunk");
120 STATISTIC(NumMemoryInstsPhiCreated
,
121 "Number of phis created when address "
122 "computations were sunk to memory instructions");
123 STATISTIC(NumMemoryInstsSelectCreated
,
124 "Number of select created when address "
125 "computations were sunk to memory instructions");
126 STATISTIC(NumExtsMoved
, "Number of [s|z]ext instructions combined with loads");
127 STATISTIC(NumExtUses
, "Number of uses of [s|z]ext instructions optimized");
128 STATISTIC(NumAndsAdded
,
129 "Number of and mask instructions added to form ext loads");
130 STATISTIC(NumAndUses
, "Number of uses of and mask instructions optimized");
131 STATISTIC(NumRetsDup
, "Number of return instructions duplicated");
132 STATISTIC(NumDbgValueMoved
, "Number of debug value instructions moved");
133 STATISTIC(NumSelectsExpanded
, "Number of selects turned into branches");
134 STATISTIC(NumStoreExtractExposed
, "Number of store(extractelement) exposed");
136 static cl::opt
<bool> DisableBranchOpts(
137 "disable-cgp-branch-opts", cl::Hidden
, cl::init(false),
138 cl::desc("Disable branch optimizations in CodeGenPrepare"));
141 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden
, cl::init(false),
142 cl::desc("Disable GC optimizations in CodeGenPrepare"));
145 DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden
,
147 cl::desc("Disable select to branch conversion."));
150 AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden
, cl::init(true),
151 cl::desc("Address sinking in CGP using GEPs."));
154 EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden
, cl::init(true),
155 cl::desc("Enable sinkinig and/cmp into branches."));
157 static cl::opt
<bool> DisableStoreExtract(
158 "disable-cgp-store-extract", cl::Hidden
, cl::init(false),
159 cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
161 static cl::opt
<bool> StressStoreExtract(
162 "stress-cgp-store-extract", cl::Hidden
, cl::init(false),
163 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
165 static cl::opt
<bool> DisableExtLdPromotion(
166 "disable-cgp-ext-ld-promotion", cl::Hidden
, cl::init(false),
167 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
170 static cl::opt
<bool> StressExtLdPromotion(
171 "stress-cgp-ext-ld-promotion", cl::Hidden
, cl::init(false),
172 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
173 "optimization in CodeGenPrepare"));
175 static cl::opt
<bool> DisablePreheaderProtect(
176 "disable-preheader-prot", cl::Hidden
, cl::init(false),
177 cl::desc("Disable protection against removing loop preheaders"));
179 static cl::opt
<bool> ProfileGuidedSectionPrefix(
180 "profile-guided-section-prefix", cl::Hidden
, cl::init(true),
181 cl::desc("Use profile info to add section prefix for hot/cold functions"));
183 static cl::opt
<bool> ProfileUnknownInSpecialSection(
184 "profile-unknown-in-special-section", cl::Hidden
,
185 cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
186 "profile, we cannot tell the function is cold for sure because "
187 "it may be a function newly added without ever being sampled. "
188 "With the flag enabled, compiler can put such profile unknown "
189 "functions into a special section, so runtime system can choose "
190 "to handle it in a different way than .text section, to save "
191 "RAM for example. "));
193 static cl::opt
<bool> BBSectionsGuidedSectionPrefix(
194 "bbsections-guided-section-prefix", cl::Hidden
, cl::init(true),
195 cl::desc("Use the basic-block-sections profile to determine the text "
196 "section prefix for hot functions. Functions with "
197 "basic-block-sections profile will be placed in `.text.hot` "
198 "regardless of their FDO profile info. Other functions won't be "
199 "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
202 static cl::opt
<uint64_t> FreqRatioToSkipMerge(
203 "cgp-freq-ratio-to-skip-merge", cl::Hidden
, cl::init(2),
204 cl::desc("Skip merging empty blocks if (frequency of empty block) / "
205 "(frequency of destination block) is greater than this ratio"));
207 static cl::opt
<bool> ForceSplitStore(
208 "force-split-store", cl::Hidden
, cl::init(false),
209 cl::desc("Force store splitting no matter what the target query says."));
211 static cl::opt
<bool> EnableTypePromotionMerge(
212 "cgp-type-promotion-merge", cl::Hidden
,
213 cl::desc("Enable merging of redundant sexts when one is dominating"
217 static cl::opt
<bool> DisableComplexAddrModes(
218 "disable-complex-addr-modes", cl::Hidden
, cl::init(false),
219 cl::desc("Disables combining addressing modes with different parts "
220 "in optimizeMemoryInst."));
223 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden
, cl::init(false),
224 cl::desc("Allow creation of Phis in Address sinking."));
226 static cl::opt
<bool> AddrSinkNewSelects(
227 "addr-sink-new-select", cl::Hidden
, cl::init(true),
228 cl::desc("Allow creation of selects in Address sinking."));
230 static cl::opt
<bool> AddrSinkCombineBaseReg(
231 "addr-sink-combine-base-reg", cl::Hidden
, cl::init(true),
232 cl::desc("Allow combining of BaseReg field in Address sinking."));
234 static cl::opt
<bool> AddrSinkCombineBaseGV(
235 "addr-sink-combine-base-gv", cl::Hidden
, cl::init(true),
236 cl::desc("Allow combining of BaseGV field in Address sinking."));
238 static cl::opt
<bool> AddrSinkCombineBaseOffs(
239 "addr-sink-combine-base-offs", cl::Hidden
, cl::init(true),
240 cl::desc("Allow combining of BaseOffs field in Address sinking."));
242 static cl::opt
<bool> AddrSinkCombineScaledReg(
243 "addr-sink-combine-scaled-reg", cl::Hidden
, cl::init(true),
244 cl::desc("Allow combining of ScaledReg field in Address sinking."));
247 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden
,
249 cl::desc("Enable splitting large offset of GEP."));
251 static cl::opt
<bool> EnableICMP_EQToICMP_ST(
252 "cgp-icmp-eq2icmp-st", cl::Hidden
, cl::init(false),
253 cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
256 VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden
, cl::init(false),
257 cl::desc("Enable BFI update verification for "
261 OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden
, cl::init(true),
262 cl::desc("Enable converting phi types in CodeGenPrepare"));
264 static cl::opt
<unsigned>
265 HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden
,
266 cl::desc("Least BB number of huge function."));
268 static cl::opt
<unsigned>
269 MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100),
271 cl::desc("Max number of address users to look at"));
274 DisableDeletePHIs("disable-cgp-delete-phis", cl::Hidden
, cl::init(false),
275 cl::desc("Disable elimination of dead PHI nodes."));
280 ZeroExtension
, // Zero extension has been seen.
281 SignExtension
, // Sign extension has been seen.
282 BothExtension
// This extension type is used if we saw sext after
283 // ZeroExtension had been set, or if we saw zext after
284 // SignExtension had been set. It makes the type
285 // information of a promoted instruction invalid.
289 NotModifyDT
, // Not Modify any DT.
290 ModifyBBDT
, // Modify the Basic Block Dominator Tree.
291 ModifyInstDT
// Modify the Instruction Dominator in a Basic Block,
292 // This usually means we move/delete/insert instruction
293 // in a Basic Block. So we should re-iterate instructions
294 // in such Basic Block.
297 using SetOfInstrs
= SmallPtrSet
<Instruction
*, 16>;
298 using TypeIsSExt
= PointerIntPair
<Type
*, 2, ExtType
>;
299 using InstrToOrigTy
= DenseMap
<Instruction
*, TypeIsSExt
>;
300 using SExts
= SmallVector
<Instruction
*, 16>;
301 using ValueToSExts
= MapVector
<Value
*, SExts
>;
303 class TypePromotionTransaction
;
305 class CodeGenPrepare
{
306 friend class CodeGenPrepareLegacyPass
;
307 const TargetMachine
*TM
= nullptr;
308 const TargetSubtargetInfo
*SubtargetInfo
= nullptr;
309 const TargetLowering
*TLI
= nullptr;
310 const TargetRegisterInfo
*TRI
= nullptr;
311 const TargetTransformInfo
*TTI
= nullptr;
312 const BasicBlockSectionsProfileReader
*BBSectionsProfileReader
= nullptr;
313 const TargetLibraryInfo
*TLInfo
= nullptr;
314 LoopInfo
*LI
= nullptr;
315 std::unique_ptr
<BlockFrequencyInfo
> BFI
;
316 std::unique_ptr
<BranchProbabilityInfo
> BPI
;
317 ProfileSummaryInfo
*PSI
= nullptr;
319 /// As we scan instructions optimizing them, this is the next instruction
320 /// to optimize. Transforms that can invalidate this should update it.
321 BasicBlock::iterator CurInstIterator
;
323 /// Keeps track of non-local addresses that have been sunk into a block.
324 /// This allows us to avoid inserting duplicate code for blocks with
325 /// multiple load/stores of the same address. The usage of WeakTrackingVH
326 /// enables SunkAddrs to be treated as a cache whose entries can be
327 /// invalidated if a sunken address computation has been erased.
328 ValueMap
<Value
*, WeakTrackingVH
> SunkAddrs
;
330 /// Keeps track of all instructions inserted for the current function.
331 SetOfInstrs InsertedInsts
;
333 /// Keeps track of the type of the related instruction before their
334 /// promotion for the current function.
335 InstrToOrigTy PromotedInsts
;
337 /// Keep track of instructions removed during promotion.
338 SetOfInstrs RemovedInsts
;
340 /// Keep track of sext chains based on their initial value.
341 DenseMap
<Value
*, Instruction
*> SeenChainsForSExt
;
343 /// Keep track of GEPs accessing the same data structures such as structs or
344 /// arrays that are candidates to be split later because of their large
346 MapVector
<AssertingVH
<Value
>,
347 SmallVector
<std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t>, 32>>
350 /// Keep track of new GEP base after splitting the GEPs having large offset.
351 SmallSet
<AssertingVH
<Value
>, 2> NewGEPBases
;
353 /// Map serial numbers to Large offset GEPs.
354 DenseMap
<AssertingVH
<GetElementPtrInst
>, int> LargeOffsetGEPID
;
356 /// Keep track of SExt promoted.
357 ValueToSExts ValToSExtendedUses
;
359 /// True if the function has the OptSize attribute.
362 /// DataLayout for the Function being processed.
363 const DataLayout
*DL
= nullptr;
365 /// Building the dominator tree can be expensive, so we only build it
366 /// lazily and update it when required.
367 std::unique_ptr
<DominatorTree
> DT
;
371 CodeGenPrepare(const TargetMachine
*TM
) : TM(TM
){};
372 /// If encounter huge function, we need to limit the build time.
373 bool IsHugeFunc
= false;
375 /// FreshBBs is like worklist, it collected the updated BBs which need
376 /// to be optimized again.
377 /// Note: Consider building time in this pass, when a BB updated, we need
378 /// to insert such BB into FreshBBs for huge function.
379 SmallSet
<BasicBlock
*, 32> FreshBBs
;
381 void releaseMemory() {
382 // Clear per function information.
383 InsertedInsts
.clear();
384 PromotedInsts
.clear();
390 bool run(Function
&F
, FunctionAnalysisManager
&AM
);
393 template <typename F
>
394 void resetIteratorIfInvalidatedWhileCalling(BasicBlock
*BB
, F f
) {
395 // Substituting can cause recursive simplifications, which can invalidate
396 // our iterator. Use a WeakTrackingVH to hold onto it in case this
398 Value
*CurValue
= &*CurInstIterator
;
399 WeakTrackingVH
IterHandle(CurValue
);
403 // If the iterator instruction was recursively deleted, start over at the
404 // start of the block.
405 if (IterHandle
!= CurValue
) {
406 CurInstIterator
= BB
->begin();
411 // Get the DominatorTree, building if necessary.
412 DominatorTree
&getDT(Function
&F
) {
414 DT
= std::make_unique
<DominatorTree
>(F
);
418 void removeAllAssertingVHReferences(Value
*V
);
419 bool eliminateAssumptions(Function
&F
);
420 bool eliminateFallThrough(Function
&F
, DominatorTree
*DT
= nullptr);
421 bool eliminateMostlyEmptyBlocks(Function
&F
);
422 BasicBlock
*findDestBlockOfMergeableEmptyBlock(BasicBlock
*BB
);
423 bool canMergeBlocks(const BasicBlock
*BB
, const BasicBlock
*DestBB
) const;
424 void eliminateMostlyEmptyBlock(BasicBlock
*BB
);
425 bool isMergingEmptyBlockProfitable(BasicBlock
*BB
, BasicBlock
*DestBB
,
427 bool makeBitReverse(Instruction
&I
);
428 bool optimizeBlock(BasicBlock
&BB
, ModifyDT
&ModifiedDT
);
429 bool optimizeInst(Instruction
*I
, ModifyDT
&ModifiedDT
);
430 bool optimizeMemoryInst(Instruction
*MemoryInst
, Value
*Addr
, Type
*AccessTy
,
432 bool optimizeGatherScatterInst(Instruction
*MemoryInst
, Value
*Ptr
);
433 bool optimizeInlineAsmInst(CallInst
*CS
);
434 bool optimizeCallInst(CallInst
*CI
, ModifyDT
&ModifiedDT
);
435 bool optimizeExt(Instruction
*&I
);
436 bool optimizeExtUses(Instruction
*I
);
437 bool optimizeLoadExt(LoadInst
*Load
);
438 bool optimizeShiftInst(BinaryOperator
*BO
);
439 bool optimizeFunnelShift(IntrinsicInst
*Fsh
);
440 bool optimizeSelectInst(SelectInst
*SI
);
441 bool optimizeShuffleVectorInst(ShuffleVectorInst
*SVI
);
442 bool optimizeSwitchType(SwitchInst
*SI
);
443 bool optimizeSwitchPhiConstants(SwitchInst
*SI
);
444 bool optimizeSwitchInst(SwitchInst
*SI
);
445 bool optimizeExtractElementInst(Instruction
*Inst
);
446 bool dupRetToEnableTailCallOpts(BasicBlock
*BB
, ModifyDT
&ModifiedDT
);
447 bool fixupDbgValue(Instruction
*I
);
448 bool fixupDbgVariableRecord(DbgVariableRecord
&I
);
449 bool fixupDbgVariableRecordsOnInst(Instruction
&I
);
450 bool placeDbgValues(Function
&F
);
451 bool placePseudoProbes(Function
&F
);
452 bool canFormExtLd(const SmallVectorImpl
<Instruction
*> &MovedExts
,
453 LoadInst
*&LI
, Instruction
*&Inst
, bool HasPromoted
);
454 bool tryToPromoteExts(TypePromotionTransaction
&TPT
,
455 const SmallVectorImpl
<Instruction
*> &Exts
,
456 SmallVectorImpl
<Instruction
*> &ProfitablyMovedExts
,
457 unsigned CreatedInstsCost
= 0);
458 bool mergeSExts(Function
&F
);
459 bool splitLargeGEPOffsets();
460 bool optimizePhiType(PHINode
*Inst
, SmallPtrSetImpl
<PHINode
*> &Visited
,
461 SmallPtrSetImpl
<Instruction
*> &DeletedInstrs
);
462 bool optimizePhiTypes(Function
&F
);
463 bool performAddressTypePromotion(
464 Instruction
*&Inst
, bool AllowPromotionWithoutCommonHeader
,
465 bool HasPromoted
, TypePromotionTransaction
&TPT
,
466 SmallVectorImpl
<Instruction
*> &SpeculativelyMovedExts
);
467 bool splitBranchCondition(Function
&F
, ModifyDT
&ModifiedDT
);
468 bool simplifyOffsetableRelocate(GCStatepointInst
&I
);
470 bool tryToSinkFreeOperands(Instruction
*I
);
471 bool replaceMathCmpWithIntrinsic(BinaryOperator
*BO
, Value
*Arg0
, Value
*Arg1
,
472 CmpInst
*Cmp
, Intrinsic::ID IID
);
473 bool optimizeCmp(CmpInst
*Cmp
, ModifyDT
&ModifiedDT
);
474 bool optimizeURem(Instruction
*Rem
);
475 bool combineToUSubWithOverflow(CmpInst
*Cmp
, ModifyDT
&ModifiedDT
);
476 bool combineToUAddWithOverflow(CmpInst
*Cmp
, ModifyDT
&ModifiedDT
);
477 void verifyBFIUpdates(Function
&F
);
478 bool _run(Function
&F
);
481 class CodeGenPrepareLegacyPass
: public FunctionPass
{
483 static char ID
; // Pass identification, replacement for typeid
485 CodeGenPrepareLegacyPass() : FunctionPass(ID
) {
486 initializeCodeGenPrepareLegacyPassPass(*PassRegistry::getPassRegistry());
489 bool runOnFunction(Function
&F
) override
;
491 StringRef
getPassName() const override
{ return "CodeGen Prepare"; }
493 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
494 // FIXME: When we can selectively preserve passes, preserve the domtree.
495 AU
.addRequired
<ProfileSummaryInfoWrapperPass
>();
496 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
497 AU
.addRequired
<TargetPassConfig
>();
498 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
499 AU
.addRequired
<LoopInfoWrapperPass
>();
500 AU
.addUsedIfAvailable
<BasicBlockSectionsProfileReaderWrapperPass
>();
504 } // end anonymous namespace
506 char CodeGenPrepareLegacyPass::ID
= 0;
508 bool CodeGenPrepareLegacyPass::runOnFunction(Function
&F
) {
511 auto TM
= &getAnalysis
<TargetPassConfig
>().getTM
<TargetMachine
>();
512 CodeGenPrepare
CGP(TM
);
513 CGP
.DL
= &F
.getDataLayout();
514 CGP
.SubtargetInfo
= TM
->getSubtargetImpl(F
);
515 CGP
.TLI
= CGP
.SubtargetInfo
->getTargetLowering();
516 CGP
.TRI
= CGP
.SubtargetInfo
->getRegisterInfo();
517 CGP
.TLInfo
= &getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
518 CGP
.TTI
= &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
519 CGP
.LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
520 CGP
.BPI
.reset(new BranchProbabilityInfo(F
, *CGP
.LI
));
521 CGP
.BFI
.reset(new BlockFrequencyInfo(F
, *CGP
.BPI
, *CGP
.LI
));
522 CGP
.PSI
= &getAnalysis
<ProfileSummaryInfoWrapperPass
>().getPSI();
524 getAnalysisIfAvailable
<BasicBlockSectionsProfileReaderWrapperPass
>();
525 CGP
.BBSectionsProfileReader
= BBSPRWP
? &BBSPRWP
->getBBSPR() : nullptr;
530 INITIALIZE_PASS_BEGIN(CodeGenPrepareLegacyPass
, DEBUG_TYPE
,
531 "Optimize for code generation", false, false)
532 INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReaderWrapperPass
)
533 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
534 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass
)
535 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
536 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig
)
537 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
538 INITIALIZE_PASS_END(CodeGenPrepareLegacyPass
, DEBUG_TYPE
,
539 "Optimize for code generation", false, false)
541 FunctionPass
*llvm::createCodeGenPrepareLegacyPass() {
542 return new CodeGenPrepareLegacyPass();
545 PreservedAnalyses
CodeGenPreparePass::run(Function
&F
,
546 FunctionAnalysisManager
&AM
) {
547 CodeGenPrepare
CGP(TM
);
549 bool Changed
= CGP
.run(F
, AM
);
551 return PreservedAnalyses::all();
553 PreservedAnalyses PA
;
554 PA
.preserve
<TargetLibraryAnalysis
>();
555 PA
.preserve
<TargetIRAnalysis
>();
556 PA
.preserve
<LoopAnalysis
>();
560 bool CodeGenPrepare::run(Function
&F
, FunctionAnalysisManager
&AM
) {
561 DL
= &F
.getDataLayout();
562 SubtargetInfo
= TM
->getSubtargetImpl(F
);
563 TLI
= SubtargetInfo
->getTargetLowering();
564 TRI
= SubtargetInfo
->getRegisterInfo();
565 TLInfo
= &AM
.getResult
<TargetLibraryAnalysis
>(F
);
566 TTI
= &AM
.getResult
<TargetIRAnalysis
>(F
);
567 LI
= &AM
.getResult
<LoopAnalysis
>(F
);
568 BPI
.reset(new BranchProbabilityInfo(F
, *LI
));
569 BFI
.reset(new BlockFrequencyInfo(F
, *BPI
, *LI
));
570 auto &MAMProxy
= AM
.getResult
<ModuleAnalysisManagerFunctionProxy
>(F
);
571 PSI
= MAMProxy
.getCachedResult
<ProfileSummaryAnalysis
>(*F
.getParent());
572 BBSectionsProfileReader
=
573 AM
.getCachedResult
<BasicBlockSectionsProfileReaderAnalysis
>(F
);
577 bool CodeGenPrepare::_run(Function
&F
) {
578 bool EverMadeChange
= false;
580 OptSize
= F
.hasOptSize();
581 // Use the basic-block-sections profile to promote hot functions to .text.hot
583 if (BBSectionsGuidedSectionPrefix
&& BBSectionsProfileReader
&&
584 BBSectionsProfileReader
->isFunctionHot(F
.getName())) {
585 F
.setSectionPrefix("hot");
586 } else if (ProfileGuidedSectionPrefix
) {
587 // The hot attribute overwrites profile count based hotness while profile
588 // counts based hotness overwrite the cold attribute.
589 // This is a conservative behabvior.
590 if (F
.hasFnAttribute(Attribute::Hot
) ||
591 PSI
->isFunctionHotInCallGraph(&F
, *BFI
))
592 F
.setSectionPrefix("hot");
593 // If PSI shows this function is not hot, we will placed the function
594 // into unlikely section if (1) PSI shows this is a cold function, or
595 // (2) the function has a attribute of cold.
596 else if (PSI
->isFunctionColdInCallGraph(&F
, *BFI
) ||
597 F
.hasFnAttribute(Attribute::Cold
))
598 F
.setSectionPrefix("unlikely");
599 else if (ProfileUnknownInSpecialSection
&& PSI
->hasPartialSampleProfile() &&
600 PSI
->isFunctionHotnessUnknown(F
))
601 F
.setSectionPrefix("unknown");
604 /// This optimization identifies DIV instructions that can be
605 /// profitably bypassed and carried out with a shorter, faster divide.
606 if (!OptSize
&& !PSI
->hasHugeWorkingSetSize() && TLI
->isSlowDivBypassed()) {
607 const DenseMap
<unsigned int, unsigned int> &BypassWidths
=
608 TLI
->getBypassSlowDivWidths();
609 BasicBlock
*BB
= &*F
.begin();
610 while (BB
!= nullptr) {
611 // bypassSlowDivision may create new BBs, but we don't want to reapply the
612 // optimization to those blocks.
613 BasicBlock
*Next
= BB
->getNextNode();
614 if (!llvm::shouldOptimizeForSize(BB
, PSI
, BFI
.get()))
615 EverMadeChange
|= bypassSlowDivision(BB
, BypassWidths
);
620 // Get rid of @llvm.assume builtins before attempting to eliminate empty
621 // blocks, since there might be blocks that only contain @llvm.assume calls
622 // (plus arguments that we can get rid of).
623 EverMadeChange
|= eliminateAssumptions(F
);
625 // Eliminate blocks that contain only PHI nodes and an
626 // unconditional branch.
627 EverMadeChange
|= eliminateMostlyEmptyBlocks(F
);
629 ModifyDT ModifiedDT
= ModifyDT::NotModifyDT
;
630 if (!DisableBranchOpts
)
631 EverMadeChange
|= splitBranchCondition(F
, ModifiedDT
);
633 // Split some critical edges where one of the sources is an indirect branch,
634 // to help generate sane code for PHIs involving such edges.
636 SplitIndirectBrCriticalEdges(F
, /*IgnoreBlocksWithoutPHI=*/true);
638 // If we are optimzing huge function, we need to consider the build time.
639 // Because the basic algorithm's complex is near O(N!).
640 IsHugeFunc
= F
.size() > HugeFuncThresholdInCGPP
;
642 // Transformations above may invalidate dominator tree and/or loop info.
645 LI
->analyze(getDT(F
));
647 bool MadeChange
= true;
648 bool FuncIterated
= false;
652 for (BasicBlock
&BB
: llvm::make_early_inc_range(F
)) {
653 if (FuncIterated
&& !FreshBBs
.contains(&BB
))
656 ModifyDT ModifiedDTOnIteration
= ModifyDT::NotModifyDT
;
657 bool Changed
= optimizeBlock(BB
, ModifiedDTOnIteration
);
659 if (ModifiedDTOnIteration
== ModifyDT::ModifyBBDT
)
662 MadeChange
|= Changed
;
664 // If the BB is updated, it may still has chance to be optimized.
665 // This usually happen at sink optimization.
669 // %and = and i32 %a, 4
670 // %cmp = icmp eq i32 %and, 0
672 // If the %cmp sink to other BB, the %and will has chance to sink.
674 FreshBBs
.insert(&BB
);
675 else if (FuncIterated
)
678 // For small/normal functions, we restart BB iteration if the dominator
679 // tree of the Function was changed.
680 if (ModifiedDTOnIteration
!= ModifyDT::NotModifyDT
)
684 // We have iterated all the BB in the (only work for huge) function.
685 FuncIterated
= IsHugeFunc
;
687 if (EnableTypePromotionMerge
&& !ValToSExtendedUses
.empty())
688 MadeChange
|= mergeSExts(F
);
689 if (!LargeOffsetGEPMap
.empty())
690 MadeChange
|= splitLargeGEPOffsets();
691 MadeChange
|= optimizePhiTypes(F
);
694 eliminateFallThrough(F
, DT
.get());
697 if (MadeChange
&& VerifyLoopInfo
)
698 LI
->verify(getDT(F
));
701 // Really free removed instructions during promotion.
702 for (Instruction
*I
: RemovedInsts
)
705 EverMadeChange
|= MadeChange
;
706 SeenChainsForSExt
.clear();
707 ValToSExtendedUses
.clear();
708 RemovedInsts
.clear();
709 LargeOffsetGEPMap
.clear();
710 LargeOffsetGEPID
.clear();
716 if (!DisableBranchOpts
) {
718 // Use a set vector to get deterministic iteration order. The order the
719 // blocks are removed may affect whether or not PHI nodes in successors
721 SmallSetVector
<BasicBlock
*, 8> WorkList
;
722 for (BasicBlock
&BB
: F
) {
723 SmallVector
<BasicBlock
*, 2> Successors(successors(&BB
));
724 MadeChange
|= ConstantFoldTerminator(&BB
, true);
728 for (BasicBlock
*Succ
: Successors
)
729 if (pred_empty(Succ
))
730 WorkList
.insert(Succ
);
733 // Delete the dead blocks and any of their dead successors.
734 MadeChange
|= !WorkList
.empty();
735 while (!WorkList
.empty()) {
736 BasicBlock
*BB
= WorkList
.pop_back_val();
737 SmallVector
<BasicBlock
*, 2> Successors(successors(BB
));
741 for (BasicBlock
*Succ
: Successors
)
742 if (pred_empty(Succ
))
743 WorkList
.insert(Succ
);
746 // Merge pairs of basic blocks with unconditional branches, connected by
748 if (EverMadeChange
|| MadeChange
)
749 MadeChange
|= eliminateFallThrough(F
);
751 EverMadeChange
|= MadeChange
;
754 if (!DisableGCOpts
) {
755 SmallVector
<GCStatepointInst
*, 2> Statepoints
;
756 for (BasicBlock
&BB
: F
)
757 for (Instruction
&I
: BB
)
758 if (auto *SP
= dyn_cast
<GCStatepointInst
>(&I
))
759 Statepoints
.push_back(SP
);
760 for (auto &I
: Statepoints
)
761 EverMadeChange
|= simplifyOffsetableRelocate(*I
);
764 // Do this last to clean up use-before-def scenarios introduced by other
765 // preparatory transforms.
766 EverMadeChange
|= placeDbgValues(F
);
767 EverMadeChange
|= placePseudoProbes(F
);
770 if (VerifyBFIUpdates
)
774 return EverMadeChange
;
777 bool CodeGenPrepare::eliminateAssumptions(Function
&F
) {
778 bool MadeChange
= false;
779 for (BasicBlock
&BB
: F
) {
780 CurInstIterator
= BB
.begin();
781 while (CurInstIterator
!= BB
.end()) {
782 Instruction
*I
= &*(CurInstIterator
++);
783 if (auto *Assume
= dyn_cast
<AssumeInst
>(I
)) {
785 Value
*Operand
= Assume
->getOperand(0);
786 Assume
->eraseFromParent();
788 resetIteratorIfInvalidatedWhileCalling(&BB
, [&]() {
789 RecursivelyDeleteTriviallyDeadInstructions(Operand
, TLInfo
, nullptr);
797 /// An instruction is about to be deleted, so remove all references to it in our
798 /// GEP-tracking data strcutures.
799 void CodeGenPrepare::removeAllAssertingVHReferences(Value
*V
) {
800 LargeOffsetGEPMap
.erase(V
);
801 NewGEPBases
.erase(V
);
803 auto GEP
= dyn_cast
<GetElementPtrInst
>(V
);
807 LargeOffsetGEPID
.erase(GEP
);
809 auto VecI
= LargeOffsetGEPMap
.find(GEP
->getPointerOperand());
810 if (VecI
== LargeOffsetGEPMap
.end())
813 auto &GEPVector
= VecI
->second
;
814 llvm::erase_if(GEPVector
, [=](auto &Elt
) { return Elt
.first
== GEP
; });
816 if (GEPVector
.empty())
817 LargeOffsetGEPMap
.erase(VecI
);
820 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
821 void LLVM_ATTRIBUTE_UNUSED
CodeGenPrepare::verifyBFIUpdates(Function
&F
) {
822 DominatorTree
NewDT(F
);
823 LoopInfo
NewLI(NewDT
);
824 BranchProbabilityInfo
NewBPI(F
, NewLI
, TLInfo
);
825 BlockFrequencyInfo
NewBFI(F
, NewBPI
, NewLI
);
826 NewBFI
.verifyMatch(*BFI
);
829 /// Merge basic blocks which are connected by a single edge, where one of the
830 /// basic blocks has a single successor pointing to the other basic block,
831 /// which has a single predecessor.
832 bool CodeGenPrepare::eliminateFallThrough(Function
&F
, DominatorTree
*DT
) {
833 bool Changed
= false;
834 // Scan all of the blocks in the function, except for the entry block.
835 // Use a temporary array to avoid iterator being invalidated when
837 SmallVector
<WeakTrackingVH
, 16> Blocks
;
838 for (auto &Block
: llvm::drop_begin(F
))
839 Blocks
.push_back(&Block
);
841 SmallSet
<WeakTrackingVH
, 16> Preds
;
842 for (auto &Block
: Blocks
) {
843 auto *BB
= cast_or_null
<BasicBlock
>(Block
);
846 // If the destination block has a single pred, then this is a trivial
847 // edge, just collapse it.
848 BasicBlock
*SinglePred
= BB
->getSinglePredecessor();
850 // Don't merge if BB's address is taken.
851 if (!SinglePred
|| SinglePred
== BB
|| BB
->hasAddressTaken())
854 // Make an effort to skip unreachable blocks.
855 if (DT
&& !DT
->isReachableFromEntry(BB
))
858 BranchInst
*Term
= dyn_cast
<BranchInst
>(SinglePred
->getTerminator());
859 if (Term
&& !Term
->isConditional()) {
861 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB
<< "\n\n\n");
863 // Merge BB into SinglePred and delete it.
864 MergeBlockIntoPredecessor(BB
, /* DTU */ nullptr, LI
, /* MSSAU */ nullptr,
865 /* MemDep */ nullptr,
866 /* PredecessorWithTwoSuccessors */ false, DT
);
867 Preds
.insert(SinglePred
);
870 // Update FreshBBs to optimize the merged BB.
871 FreshBBs
.insert(SinglePred
);
877 // (Repeatedly) merging blocks into their predecessors can create redundant
879 for (const auto &Pred
: Preds
)
880 if (auto *BB
= cast_or_null
<BasicBlock
>(Pred
))
881 RemoveRedundantDbgInstrs(BB
);
886 /// Find a destination block from BB if BB is mergeable empty block.
887 BasicBlock
*CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock
*BB
) {
888 // If this block doesn't end with an uncond branch, ignore it.
889 BranchInst
*BI
= dyn_cast
<BranchInst
>(BB
->getTerminator());
890 if (!BI
|| !BI
->isUnconditional())
893 // If the instruction before the branch (skipping debug info) isn't a phi
894 // node, then other stuff is happening here.
895 BasicBlock::iterator BBI
= BI
->getIterator();
896 if (BBI
!= BB
->begin()) {
898 while (isa
<DbgInfoIntrinsic
>(BBI
)) {
899 if (BBI
== BB
->begin())
903 if (!isa
<DbgInfoIntrinsic
>(BBI
) && !isa
<PHINode
>(BBI
))
907 // Do not break infinite loops.
908 BasicBlock
*DestBB
= BI
->getSuccessor(0);
912 if (!canMergeBlocks(BB
, DestBB
))
918 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
919 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
920 /// edges in ways that are non-optimal for isel. Start by eliminating these
921 /// blocks so we can split them the way we want them.
922 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function
&F
) {
923 SmallPtrSet
<BasicBlock
*, 16> Preheaders
;
924 SmallVector
<Loop
*, 16> LoopList(LI
->begin(), LI
->end());
925 while (!LoopList
.empty()) {
926 Loop
*L
= LoopList
.pop_back_val();
927 llvm::append_range(LoopList
, *L
);
928 if (BasicBlock
*Preheader
= L
->getLoopPreheader())
929 Preheaders
.insert(Preheader
);
932 bool MadeChange
= false;
933 // Copy blocks into a temporary array to avoid iterator invalidation issues
934 // as we remove them.
935 // Note that this intentionally skips the entry block.
936 SmallVector
<WeakTrackingVH
, 16> Blocks
;
937 for (auto &Block
: llvm::drop_begin(F
)) {
938 // Delete phi nodes that could block deleting other empty blocks.
939 if (!DisableDeletePHIs
)
940 MadeChange
|= DeleteDeadPHIs(&Block
, TLInfo
);
941 Blocks
.push_back(&Block
);
944 for (auto &Block
: Blocks
) {
945 BasicBlock
*BB
= cast_or_null
<BasicBlock
>(Block
);
948 BasicBlock
*DestBB
= findDestBlockOfMergeableEmptyBlock(BB
);
950 !isMergingEmptyBlockProfitable(BB
, DestBB
, Preheaders
.count(BB
)))
953 eliminateMostlyEmptyBlock(BB
);
959 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock
*BB
,
962 // Do not delete loop preheaders if doing so would create a critical edge.
963 // Loop preheaders can be good locations to spill registers. If the
964 // preheader is deleted and we create a critical edge, registers may be
965 // spilled in the loop body instead.
966 if (!DisablePreheaderProtect
&& isPreheader
&&
967 !(BB
->getSinglePredecessor() &&
968 BB
->getSinglePredecessor()->getSingleSuccessor()))
971 // Skip merging if the block's successor is also a successor to any callbr
972 // that leads to this block.
973 // FIXME: Is this really needed? Is this a correctness issue?
974 for (BasicBlock
*Pred
: predecessors(BB
)) {
975 if (isa
<CallBrInst
>(Pred
->getTerminator()) &&
976 llvm::is_contained(successors(Pred
), DestBB
))
980 // Try to skip merging if the unique predecessor of BB is terminated by a
981 // switch or indirect branch instruction, and BB is used as an incoming block
982 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
983 // add COPY instructions in the predecessor of BB instead of BB (if it is not
984 // merged). Note that the critical edge created by merging such blocks wont be
985 // split in MachineSink because the jump table is not analyzable. By keeping
986 // such empty block (BB), ISel will place COPY instructions in BB, not in the
987 // predecessor of BB.
988 BasicBlock
*Pred
= BB
->getUniquePredecessor();
989 if (!Pred
|| !(isa
<SwitchInst
>(Pred
->getTerminator()) ||
990 isa
<IndirectBrInst
>(Pred
->getTerminator())))
993 if (BB
->getTerminator() != BB
->getFirstNonPHIOrDbg())
996 // We use a simple cost heuristic which determine skipping merging is
997 // profitable if the cost of skipping merging is less than the cost of
998 // merging : Cost(skipping merging) < Cost(merging BB), where the
999 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
1000 // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
1001 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
1002 // Freq(Pred) / Freq(BB) > 2.
1003 // Note that if there are multiple empty blocks sharing the same incoming
1004 // value for the PHIs in the DestBB, we consider them together. In such
1005 // case, Cost(merging BB) will be the sum of their frequencies.
1007 if (!isa
<PHINode
>(DestBB
->begin()))
1010 SmallPtrSet
<BasicBlock
*, 16> SameIncomingValueBBs
;
1012 // Find all other incoming blocks from which incoming values of all PHIs in
1013 // DestBB are the same as the ones from BB.
1014 for (BasicBlock
*DestBBPred
: predecessors(DestBB
)) {
1015 if (DestBBPred
== BB
)
1018 if (llvm::all_of(DestBB
->phis(), [&](const PHINode
&DestPN
) {
1019 return DestPN
.getIncomingValueForBlock(BB
) ==
1020 DestPN
.getIncomingValueForBlock(DestBBPred
);
1022 SameIncomingValueBBs
.insert(DestBBPred
);
1025 // See if all BB's incoming values are same as the value from Pred. In this
1026 // case, no reason to skip merging because COPYs are expected to be place in
1028 if (SameIncomingValueBBs
.count(Pred
))
1031 BlockFrequency PredFreq
= BFI
->getBlockFreq(Pred
);
1032 BlockFrequency BBFreq
= BFI
->getBlockFreq(BB
);
1034 for (auto *SameValueBB
: SameIncomingValueBBs
)
1035 if (SameValueBB
->getUniquePredecessor() == Pred
&&
1036 DestBB
== findDestBlockOfMergeableEmptyBlock(SameValueBB
))
1037 BBFreq
+= BFI
->getBlockFreq(SameValueBB
);
1039 std::optional
<BlockFrequency
> Limit
= BBFreq
.mul(FreqRatioToSkipMerge
);
1040 return !Limit
|| PredFreq
<= *Limit
;
1043 /// Return true if we can merge BB into DestBB if there is a single
1044 /// unconditional branch between them, and BB contains no other non-phi
1046 bool CodeGenPrepare::canMergeBlocks(const BasicBlock
*BB
,
1047 const BasicBlock
*DestBB
) const {
1048 // We only want to eliminate blocks whose phi nodes are used by phi nodes in
1049 // the successor. If there are more complex condition (e.g. preheaders),
1050 // don't mess around with them.
1051 for (const PHINode
&PN
: BB
->phis()) {
1052 for (const User
*U
: PN
.users()) {
1053 const Instruction
*UI
= cast
<Instruction
>(U
);
1054 if (UI
->getParent() != DestBB
|| !isa
<PHINode
>(UI
))
1056 // If User is inside DestBB block and it is a PHINode then check
1057 // incoming value. If incoming value is not from BB then this is
1058 // a complex condition (e.g. preheaders) we want to avoid here.
1059 if (UI
->getParent() == DestBB
) {
1060 if (const PHINode
*UPN
= dyn_cast
<PHINode
>(UI
))
1061 for (unsigned I
= 0, E
= UPN
->getNumIncomingValues(); I
!= E
; ++I
) {
1062 Instruction
*Insn
= dyn_cast
<Instruction
>(UPN
->getIncomingValue(I
));
1063 if (Insn
&& Insn
->getParent() == BB
&&
1064 Insn
->getParent() != UPN
->getIncomingBlock(I
))
1071 // If BB and DestBB contain any common predecessors, then the phi nodes in BB
1072 // and DestBB may have conflicting incoming values for the block. If so, we
1073 // can't merge the block.
1074 const PHINode
*DestBBPN
= dyn_cast
<PHINode
>(DestBB
->begin());
1076 return true; // no conflict.
1078 // Collect the preds of BB.
1079 SmallPtrSet
<const BasicBlock
*, 16> BBPreds
;
1080 if (const PHINode
*BBPN
= dyn_cast
<PHINode
>(BB
->begin())) {
1081 // It is faster to get preds from a PHI than with pred_iterator.
1082 for (unsigned i
= 0, e
= BBPN
->getNumIncomingValues(); i
!= e
; ++i
)
1083 BBPreds
.insert(BBPN
->getIncomingBlock(i
));
1085 BBPreds
.insert(pred_begin(BB
), pred_end(BB
));
1088 // Walk the preds of DestBB.
1089 for (unsigned i
= 0, e
= DestBBPN
->getNumIncomingValues(); i
!= e
; ++i
) {
1090 BasicBlock
*Pred
= DestBBPN
->getIncomingBlock(i
);
1091 if (BBPreds
.count(Pred
)) { // Common predecessor?
1092 for (const PHINode
&PN
: DestBB
->phis()) {
1093 const Value
*V1
= PN
.getIncomingValueForBlock(Pred
);
1094 const Value
*V2
= PN
.getIncomingValueForBlock(BB
);
1096 // If V2 is a phi node in BB, look up what the mapped value will be.
1097 if (const PHINode
*V2PN
= dyn_cast
<PHINode
>(V2
))
1098 if (V2PN
->getParent() == BB
)
1099 V2
= V2PN
->getIncomingValueForBlock(Pred
);
1101 // If there is a conflict, bail out.
1111 /// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
1112 static void replaceAllUsesWith(Value
*Old
, Value
*New
,
1113 SmallSet
<BasicBlock
*, 32> &FreshBBs
,
1115 auto *OldI
= dyn_cast
<Instruction
>(Old
);
1117 for (Value::user_iterator UI
= OldI
->user_begin(), E
= OldI
->user_end();
1119 Instruction
*User
= cast
<Instruction
>(*UI
);
1121 FreshBBs
.insert(User
->getParent());
1124 Old
->replaceAllUsesWith(New
);
1127 /// Eliminate a basic block that has only phi's and an unconditional branch in
1129 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock
*BB
) {
1130 BranchInst
*BI
= cast
<BranchInst
>(BB
->getTerminator());
1131 BasicBlock
*DestBB
= BI
->getSuccessor(0);
1133 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
1136 // If the destination block has a single pred, then this is a trivial edge,
1137 // just collapse it.
1138 if (BasicBlock
*SinglePred
= DestBB
->getSinglePredecessor()) {
1139 if (SinglePred
!= DestBB
) {
1140 assert(SinglePred
== BB
&&
1141 "Single predecessor not the same as predecessor");
1142 // Merge DestBB into SinglePred/BB and delete it.
1143 MergeBlockIntoPredecessor(DestBB
);
1144 // Note: BB(=SinglePred) will not be deleted on this path.
1145 // DestBB(=its single successor) is the one that was deleted.
1146 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred
<< "\n\n\n");
1149 // Update FreshBBs to optimize the merged BB.
1150 FreshBBs
.insert(SinglePred
);
1151 FreshBBs
.erase(DestBB
);
1157 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
1158 // to handle the new incoming edges it is about to have.
1159 for (PHINode
&PN
: DestBB
->phis()) {
1160 // Remove the incoming value for BB, and remember it.
1161 Value
*InVal
= PN
.removeIncomingValue(BB
, false);
1163 // Two options: either the InVal is a phi node defined in BB or it is some
1164 // value that dominates BB.
1165 PHINode
*InValPhi
= dyn_cast
<PHINode
>(InVal
);
1166 if (InValPhi
&& InValPhi
->getParent() == BB
) {
1167 // Add all of the input values of the input PHI as inputs of this phi.
1168 for (unsigned i
= 0, e
= InValPhi
->getNumIncomingValues(); i
!= e
; ++i
)
1169 PN
.addIncoming(InValPhi
->getIncomingValue(i
),
1170 InValPhi
->getIncomingBlock(i
));
1172 // Otherwise, add one instance of the dominating value for each edge that
1173 // we will be adding.
1174 if (PHINode
*BBPN
= dyn_cast
<PHINode
>(BB
->begin())) {
1175 for (unsigned i
= 0, e
= BBPN
->getNumIncomingValues(); i
!= e
; ++i
)
1176 PN
.addIncoming(InVal
, BBPN
->getIncomingBlock(i
));
1178 for (BasicBlock
*Pred
: predecessors(BB
))
1179 PN
.addIncoming(InVal
, Pred
);
1184 // Preserve loop Metadata.
1185 if (BI
->hasMetadata(LLVMContext::MD_loop
)) {
1186 for (auto *Pred
: predecessors(BB
))
1187 Pred
->getTerminator()->copyMetadata(*BI
, LLVMContext::MD_loop
);
1190 // The PHIs are now updated, change everything that refers to BB to use
1191 // DestBB and remove BB.
1192 BB
->replaceAllUsesWith(DestBB
);
1193 BB
->eraseFromParent();
1196 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB
<< "\n\n\n");
1199 // Computes a map of base pointer relocation instructions to corresponding
1200 // derived pointer relocation instructions given a vector of all relocate calls
1201 static void computeBaseDerivedRelocateMap(
1202 const SmallVectorImpl
<GCRelocateInst
*> &AllRelocateCalls
,
1203 MapVector
<GCRelocateInst
*, SmallVector
<GCRelocateInst
*, 0>>
1205 // Collect information in two maps: one primarily for locating the base object
1206 // while filling the second map; the second map is the final structure holding
1207 // a mapping between Base and corresponding Derived relocate calls
1208 MapVector
<std::pair
<unsigned, unsigned>, GCRelocateInst
*> RelocateIdxMap
;
1209 for (auto *ThisRelocate
: AllRelocateCalls
) {
1210 auto K
= std::make_pair(ThisRelocate
->getBasePtrIndex(),
1211 ThisRelocate
->getDerivedPtrIndex());
1212 RelocateIdxMap
.insert(std::make_pair(K
, ThisRelocate
));
1214 for (auto &Item
: RelocateIdxMap
) {
1215 std::pair
<unsigned, unsigned> Key
= Item
.first
;
1216 if (Key
.first
== Key
.second
)
1217 // Base relocation: nothing to insert
1220 GCRelocateInst
*I
= Item
.second
;
1221 auto BaseKey
= std::make_pair(Key
.first
, Key
.first
);
1223 // We're iterating over RelocateIdxMap so we cannot modify it.
1224 auto MaybeBase
= RelocateIdxMap
.find(BaseKey
);
1225 if (MaybeBase
== RelocateIdxMap
.end())
1226 // TODO: We might want to insert a new base object relocate and gep off
1227 // that, if there are enough derived object relocates.
1230 RelocateInstMap
[MaybeBase
->second
].push_back(I
);
1234 // Accepts a GEP and extracts the operands into a vector provided they're all
1235 // small integer constants
1236 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst
*GEP
,
1237 SmallVectorImpl
<Value
*> &OffsetV
) {
1238 for (unsigned i
= 1; i
< GEP
->getNumOperands(); i
++) {
1239 // Only accept small constant integer operands
1240 auto *Op
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
1241 if (!Op
|| Op
->getZExtValue() > 20)
1245 for (unsigned i
= 1; i
< GEP
->getNumOperands(); i
++)
1246 OffsetV
.push_back(GEP
->getOperand(i
));
1250 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1251 // replace, computes a replacement, and affects it.
1253 simplifyRelocatesOffABase(GCRelocateInst
*RelocatedBase
,
1254 const SmallVectorImpl
<GCRelocateInst
*> &Targets
) {
1255 bool MadeChange
= false;
1256 // We must ensure the relocation of derived pointer is defined after
1257 // relocation of base pointer. If we find a relocation corresponding to base
1258 // defined earlier than relocation of base then we move relocation of base
1259 // right before found relocation. We consider only relocation in the same
1260 // basic block as relocation of base. Relocations from other basic block will
1261 // be skipped by optimization and we do not care about them.
1262 for (auto R
= RelocatedBase
->getParent()->getFirstInsertionPt();
1263 &*R
!= RelocatedBase
; ++R
)
1264 if (auto *RI
= dyn_cast
<GCRelocateInst
>(R
))
1265 if (RI
->getStatepoint() == RelocatedBase
->getStatepoint())
1266 if (RI
->getBasePtrIndex() == RelocatedBase
->getBasePtrIndex()) {
1267 RelocatedBase
->moveBefore(RI
);
1272 for (GCRelocateInst
*ToReplace
: Targets
) {
1273 assert(ToReplace
->getBasePtrIndex() == RelocatedBase
->getBasePtrIndex() &&
1274 "Not relocating a derived object of the original base object");
1275 if (ToReplace
->getBasePtrIndex() == ToReplace
->getDerivedPtrIndex()) {
1276 // A duplicate relocate call. TODO: coalesce duplicates.
1280 if (RelocatedBase
->getParent() != ToReplace
->getParent()) {
1281 // Base and derived relocates are in different basic blocks.
1282 // In this case transform is only valid when base dominates derived
1283 // relocate. However it would be too expensive to check dominance
1284 // for each such relocate, so we skip the whole transformation.
1288 Value
*Base
= ToReplace
->getBasePtr();
1289 auto *Derived
= dyn_cast
<GetElementPtrInst
>(ToReplace
->getDerivedPtr());
1290 if (!Derived
|| Derived
->getPointerOperand() != Base
)
1293 SmallVector
<Value
*, 2> OffsetV
;
1294 if (!getGEPSmallConstantIntOffsetV(Derived
, OffsetV
))
1297 // Create a Builder and replace the target callsite with a gep
1298 assert(RelocatedBase
->getNextNode() &&
1299 "Should always have one since it's not a terminator");
1301 // Insert after RelocatedBase
1302 IRBuilder
<> Builder(RelocatedBase
->getNextNode());
1303 Builder
.SetCurrentDebugLocation(ToReplace
->getDebugLoc());
1305 // If gc_relocate does not match the actual type, cast it to the right type.
1306 // In theory, there must be a bitcast after gc_relocate if the type does not
1307 // match, and we should reuse it to get the derived pointer. But it could be
1311 // %g1 = call coldcc i8 addrspace(1)*
1312 // @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1316 // %g2 = call coldcc i8 addrspace(1)*
1317 // @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1320 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1321 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1323 // In this case, we can not find the bitcast any more. So we insert a new
1324 // bitcast no matter there is already one or not. In this way, we can handle
1325 // all cases, and the extra bitcast should be optimized away in later
1327 Value
*ActualRelocatedBase
= RelocatedBase
;
1328 if (RelocatedBase
->getType() != Base
->getType()) {
1329 ActualRelocatedBase
=
1330 Builder
.CreateBitCast(RelocatedBase
, Base
->getType());
1332 Value
*Replacement
=
1333 Builder
.CreateGEP(Derived
->getSourceElementType(), ActualRelocatedBase
,
1335 Replacement
->takeName(ToReplace
);
1336 // If the newly generated derived pointer's type does not match the original
1337 // derived pointer's type, cast the new derived pointer to match it. Same
1338 // reasoning as above.
1339 Value
*ActualReplacement
= Replacement
;
1340 if (Replacement
->getType() != ToReplace
->getType()) {
1342 Builder
.CreateBitCast(Replacement
, ToReplace
->getType());
1344 ToReplace
->replaceAllUsesWith(ActualReplacement
);
1345 ToReplace
->eraseFromParent();
1355 // %ptr = gep %base + 15
1356 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1357 // %base' = relocate(%tok, i32 4, i32 4)
1358 // %ptr' = relocate(%tok, i32 4, i32 5)
1359 // %val = load %ptr'
1364 // %ptr = gep %base + 15
1365 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1366 // %base' = gc.relocate(%tok, i32 4, i32 4)
1367 // %ptr' = gep %base' + 15
1368 // %val = load %ptr'
1369 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst
&I
) {
1370 bool MadeChange
= false;
1371 SmallVector
<GCRelocateInst
*, 2> AllRelocateCalls
;
1372 for (auto *U
: I
.users())
1373 if (GCRelocateInst
*Relocate
= dyn_cast
<GCRelocateInst
>(U
))
1374 // Collect all the relocate calls associated with a statepoint
1375 AllRelocateCalls
.push_back(Relocate
);
1377 // We need at least one base pointer relocation + one derived pointer
1378 // relocation to mangle
1379 if (AllRelocateCalls
.size() < 2)
1382 // RelocateInstMap is a mapping from the base relocate instruction to the
1383 // corresponding derived relocate instructions
1384 MapVector
<GCRelocateInst
*, SmallVector
<GCRelocateInst
*, 0>> RelocateInstMap
;
1385 computeBaseDerivedRelocateMap(AllRelocateCalls
, RelocateInstMap
);
1386 if (RelocateInstMap
.empty())
1389 for (auto &Item
: RelocateInstMap
)
1390 // Item.first is the RelocatedBase to offset against
1391 // Item.second is the vector of Targets to replace
1392 MadeChange
= simplifyRelocatesOffABase(Item
.first
, Item
.second
);
1396 /// Sink the specified cast instruction into its user blocks.
1397 static bool SinkCast(CastInst
*CI
) {
1398 BasicBlock
*DefBB
= CI
->getParent();
1400 /// InsertedCasts - Only insert a cast in each block once.
1401 DenseMap
<BasicBlock
*, CastInst
*> InsertedCasts
;
1403 bool MadeChange
= false;
1404 for (Value::user_iterator UI
= CI
->user_begin(), E
= CI
->user_end();
1406 Use
&TheUse
= UI
.getUse();
1407 Instruction
*User
= cast
<Instruction
>(*UI
);
1409 // Figure out which BB this cast is used in. For PHI's this is the
1410 // appropriate predecessor block.
1411 BasicBlock
*UserBB
= User
->getParent();
1412 if (PHINode
*PN
= dyn_cast
<PHINode
>(User
)) {
1413 UserBB
= PN
->getIncomingBlock(TheUse
);
1416 // Preincrement use iterator so we don't invalidate it.
1419 // The first insertion point of a block containing an EH pad is after the
1420 // pad. If the pad is the user, we cannot sink the cast past the pad.
1421 if (User
->isEHPad())
1424 // If the block selected to receive the cast is an EH pad that does not
1425 // allow non-PHI instructions before the terminator, we can't sink the
1427 if (UserBB
->getTerminator()->isEHPad())
1430 // If this user is in the same block as the cast, don't change the cast.
1431 if (UserBB
== DefBB
)
1434 // If we have already inserted a cast into this block, use it.
1435 CastInst
*&InsertedCast
= InsertedCasts
[UserBB
];
1437 if (!InsertedCast
) {
1438 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
1439 assert(InsertPt
!= UserBB
->end());
1440 InsertedCast
= cast
<CastInst
>(CI
->clone());
1441 InsertedCast
->insertBefore(*UserBB
, InsertPt
);
1444 // Replace a use of the cast with a use of the new cast.
1445 TheUse
= InsertedCast
;
1450 // If we removed all uses, nuke the cast.
1451 if (CI
->use_empty()) {
1452 salvageDebugInfo(*CI
);
1453 CI
->eraseFromParent();
1460 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1461 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1462 /// reduce the number of virtual registers that must be created and coalesced.
1464 /// Return true if any changes are made.
1465 static bool OptimizeNoopCopyExpression(CastInst
*CI
, const TargetLowering
&TLI
,
1466 const DataLayout
&DL
) {
1467 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
1468 // than sinking only nop casts, but is helpful on some platforms.
1469 if (auto *ASC
= dyn_cast
<AddrSpaceCastInst
>(CI
)) {
1470 if (!TLI
.isFreeAddrSpaceCast(ASC
->getSrcAddressSpace(),
1471 ASC
->getDestAddressSpace()))
1475 // If this is a noop copy,
1476 EVT SrcVT
= TLI
.getValueType(DL
, CI
->getOperand(0)->getType());
1477 EVT DstVT
= TLI
.getValueType(DL
, CI
->getType());
1479 // This is an fp<->int conversion?
1480 if (SrcVT
.isInteger() != DstVT
.isInteger())
1483 // If this is an extension, it will be a zero or sign extension, which
1485 if (SrcVT
.bitsLT(DstVT
))
1488 // If these values will be promoted, find out what they will be promoted
1489 // to. This helps us consider truncates on PPC as noop copies when they
1491 if (TLI
.getTypeAction(CI
->getContext(), SrcVT
) ==
1492 TargetLowering::TypePromoteInteger
)
1493 SrcVT
= TLI
.getTypeToTransformTo(CI
->getContext(), SrcVT
);
1494 if (TLI
.getTypeAction(CI
->getContext(), DstVT
) ==
1495 TargetLowering::TypePromoteInteger
)
1496 DstVT
= TLI
.getTypeToTransformTo(CI
->getContext(), DstVT
);
1498 // If, after promotion, these are the same types, this is a noop copy.
1502 return SinkCast(CI
);
1505 // Match a simple increment by constant operation. Note that if a sub is
1506 // matched, the step is negated (as if the step had been canonicalized to
1507 // an add, even though we leave the instruction alone.)
1508 static bool matchIncrement(const Instruction
*IVInc
, Instruction
*&LHS
,
1510 if (match(IVInc
, m_Add(m_Instruction(LHS
), m_Constant(Step
))) ||
1511 match(IVInc
, m_ExtractValue
<0>(m_Intrinsic
<Intrinsic::uadd_with_overflow
>(
1512 m_Instruction(LHS
), m_Constant(Step
)))))
1514 if (match(IVInc
, m_Sub(m_Instruction(LHS
), m_Constant(Step
))) ||
1515 match(IVInc
, m_ExtractValue
<0>(m_Intrinsic
<Intrinsic::usub_with_overflow
>(
1516 m_Instruction(LHS
), m_Constant(Step
))))) {
1517 Step
= ConstantExpr::getNeg(Step
);
1523 /// If given \p PN is an inductive variable with value IVInc coming from the
1524 /// backedge, and on each iteration it gets increased by Step, return pair
1525 /// <IVInc, Step>. Otherwise, return std::nullopt.
1526 static std::optional
<std::pair
<Instruction
*, Constant
*>>
1527 getIVIncrement(const PHINode
*PN
, const LoopInfo
*LI
) {
1528 const Loop
*L
= LI
->getLoopFor(PN
->getParent());
1529 if (!L
|| L
->getHeader() != PN
->getParent() || !L
->getLoopLatch())
1530 return std::nullopt
;
1532 dyn_cast
<Instruction
>(PN
->getIncomingValueForBlock(L
->getLoopLatch()));
1533 if (!IVInc
|| LI
->getLoopFor(IVInc
->getParent()) != L
)
1534 return std::nullopt
;
1535 Instruction
*LHS
= nullptr;
1536 Constant
*Step
= nullptr;
1537 if (matchIncrement(IVInc
, LHS
, Step
) && LHS
== PN
)
1538 return std::make_pair(IVInc
, Step
);
1539 return std::nullopt
;
1542 static bool isIVIncrement(const Value
*V
, const LoopInfo
*LI
) {
1543 auto *I
= dyn_cast
<Instruction
>(V
);
1546 Instruction
*LHS
= nullptr;
1547 Constant
*Step
= nullptr;
1548 if (!matchIncrement(I
, LHS
, Step
))
1550 if (auto *PN
= dyn_cast
<PHINode
>(LHS
))
1551 if (auto IVInc
= getIVIncrement(PN
, LI
))
1552 return IVInc
->first
== I
;
1556 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator
*BO
,
1557 Value
*Arg0
, Value
*Arg1
,
1559 Intrinsic::ID IID
) {
1560 auto IsReplacableIVIncrement
= [this, &Cmp
](BinaryOperator
*BO
) {
1561 if (!isIVIncrement(BO
, LI
))
1563 const Loop
*L
= LI
->getLoopFor(BO
->getParent());
1564 assert(L
&& "L should not be null after isIVIncrement()");
1565 // Do not risk on moving increment into a child loop.
1566 if (LI
->getLoopFor(Cmp
->getParent()) != L
)
1569 // Finally, we need to ensure that the insert point will dominate all
1570 // existing uses of the increment.
1572 auto &DT
= getDT(*BO
->getParent()->getParent());
1573 if (DT
.dominates(Cmp
->getParent(), BO
->getParent()))
1574 // If we're moving up the dom tree, all uses are trivially dominated.
1575 // (This is the common case for code produced by LSR.)
1578 // Otherwise, special case the single use in the phi recurrence.
1579 return BO
->hasOneUse() && DT
.dominates(Cmp
->getParent(), L
->getLoopLatch());
1581 if (BO
->getParent() != Cmp
->getParent() && !IsReplacableIVIncrement(BO
)) {
1582 // We used to use a dominator tree here to allow multi-block optimization.
1583 // But that was problematic because:
1584 // 1. It could cause a perf regression by hoisting the math op into the
1586 // 2. It could cause a perf regression by creating a value that was live
1587 // across multiple blocks and increasing register pressure.
1588 // 3. Use of a dominator tree could cause large compile-time regression.
1589 // This is because we recompute the DT on every change in the main CGP
1590 // run-loop. The recomputing is probably unnecessary in many cases, so if
1591 // that was fixed, using a DT here would be ok.
1593 // There is one important particular case we still want to handle: if BO is
1594 // the IV increment. Important properties that make it profitable:
1595 // - We can speculate IV increment anywhere in the loop (as long as the
1596 // indvar Phi is its only user);
1597 // - Upon computing Cmp, we effectively compute something equivalent to the
1598 // IV increment (despite it loops differently in the IR). So moving it up
1599 // to the cmp point does not really increase register pressure.
1603 // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1604 if (BO
->getOpcode() == Instruction::Add
&&
1605 IID
== Intrinsic::usub_with_overflow
) {
1606 assert(isa
<Constant
>(Arg1
) && "Unexpected input for usubo");
1607 Arg1
= ConstantExpr::getNeg(cast
<Constant
>(Arg1
));
1610 // Insert at the first instruction of the pair.
1611 Instruction
*InsertPt
= nullptr;
1612 for (Instruction
&Iter
: *Cmp
->getParent()) {
1613 // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1614 // the overflow intrinsic are defined.
1615 if ((BO
->getOpcode() != Instruction::Xor
&& &Iter
== BO
) || &Iter
== Cmp
) {
1620 assert(InsertPt
!= nullptr && "Parent block did not contain cmp or binop");
1622 IRBuilder
<> Builder(InsertPt
);
1623 Value
*MathOV
= Builder
.CreateBinaryIntrinsic(IID
, Arg0
, Arg1
);
1624 if (BO
->getOpcode() != Instruction::Xor
) {
1625 Value
*Math
= Builder
.CreateExtractValue(MathOV
, 0, "math");
1626 replaceAllUsesWith(BO
, Math
, FreshBBs
, IsHugeFunc
);
1628 assert(BO
->hasOneUse() &&
1629 "Patterns with XOr should use the BO only in the compare");
1630 Value
*OV
= Builder
.CreateExtractValue(MathOV
, 1, "ov");
1631 replaceAllUsesWith(Cmp
, OV
, FreshBBs
, IsHugeFunc
);
1632 Cmp
->eraseFromParent();
1633 BO
->eraseFromParent();
1637 /// Match special-case patterns that check for unsigned add overflow.
1638 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst
*Cmp
,
1639 BinaryOperator
*&Add
) {
1640 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1641 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1642 Value
*A
= Cmp
->getOperand(0), *B
= Cmp
->getOperand(1);
1644 // We are not expecting non-canonical/degenerate code. Just bail out.
1645 if (isa
<Constant
>(A
))
1648 ICmpInst::Predicate Pred
= Cmp
->getPredicate();
1649 if (Pred
== ICmpInst::ICMP_EQ
&& match(B
, m_AllOnes()))
1650 B
= ConstantInt::get(B
->getType(), 1);
1651 else if (Pred
== ICmpInst::ICMP_NE
&& match(B
, m_ZeroInt()))
1652 B
= Constant::getAllOnesValue(B
->getType());
1656 // Check the users of the variable operand of the compare looking for an add
1657 // with the adjusted constant.
1658 for (User
*U
: A
->users()) {
1659 if (match(U
, m_Add(m_Specific(A
), m_Specific(B
)))) {
1660 Add
= cast
<BinaryOperator
>(U
);
1667 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1668 /// intrinsic. Return true if any changes were made.
1669 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst
*Cmp
,
1670 ModifyDT
&ModifiedDT
) {
1671 bool EdgeCase
= false;
1673 BinaryOperator
*Add
;
1674 if (!match(Cmp
, m_UAddWithOverflow(m_Value(A
), m_Value(B
), m_BinOp(Add
)))) {
1675 if (!matchUAddWithOverflowConstantEdgeCases(Cmp
, Add
))
1677 // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1678 A
= Add
->getOperand(0);
1679 B
= Add
->getOperand(1);
1683 if (!TLI
->shouldFormOverflowOp(ISD::UADDO
,
1684 TLI
->getValueType(*DL
, Add
->getType()),
1685 Add
->hasNUsesOrMore(EdgeCase
? 1 : 2)))
1688 // We don't want to move around uses of condition values this late, so we
1689 // check if it is legal to create the call to the intrinsic in the basic
1690 // block containing the icmp.
1691 if (Add
->getParent() != Cmp
->getParent() && !Add
->hasOneUse())
1694 if (!replaceMathCmpWithIntrinsic(Add
, A
, B
, Cmp
,
1695 Intrinsic::uadd_with_overflow
))
1698 // Reset callers - do not crash by iterating over a dead instruction.
1699 ModifiedDT
= ModifyDT::ModifyInstDT
;
1703 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst
*Cmp
,
1704 ModifyDT
&ModifiedDT
) {
1705 // We are not expecting non-canonical/degenerate code. Just bail out.
1706 Value
*A
= Cmp
->getOperand(0), *B
= Cmp
->getOperand(1);
1707 if (isa
<Constant
>(A
) && isa
<Constant
>(B
))
1710 // Convert (A u> B) to (A u< B) to simplify pattern matching.
1711 ICmpInst::Predicate Pred
= Cmp
->getPredicate();
1712 if (Pred
== ICmpInst::ICMP_UGT
) {
1714 Pred
= ICmpInst::ICMP_ULT
;
1716 // Convert special-case: (A == 0) is the same as (A u< 1).
1717 if (Pred
== ICmpInst::ICMP_EQ
&& match(B
, m_ZeroInt())) {
1718 B
= ConstantInt::get(B
->getType(), 1);
1719 Pred
= ICmpInst::ICMP_ULT
;
1721 // Convert special-case: (A != 0) is the same as (0 u< A).
1722 if (Pred
== ICmpInst::ICMP_NE
&& match(B
, m_ZeroInt())) {
1724 Pred
= ICmpInst::ICMP_ULT
;
1726 if (Pred
!= ICmpInst::ICMP_ULT
)
1729 // Walk the users of a variable operand of a compare looking for a subtract or
1730 // add with that same operand. Also match the 2nd operand of the compare to
1731 // the add/sub, but that may be a negated constant operand of an add.
1732 Value
*CmpVariableOperand
= isa
<Constant
>(A
) ? B
: A
;
1733 BinaryOperator
*Sub
= nullptr;
1734 for (User
*U
: CmpVariableOperand
->users()) {
1735 // A - B, A u< B --> usubo(A, B)
1736 if (match(U
, m_Sub(m_Specific(A
), m_Specific(B
)))) {
1737 Sub
= cast
<BinaryOperator
>(U
);
1741 // A + (-C), A u< C (canonicalized form of (sub A, C))
1742 const APInt
*CmpC
, *AddC
;
1743 if (match(U
, m_Add(m_Specific(A
), m_APInt(AddC
))) &&
1744 match(B
, m_APInt(CmpC
)) && *AddC
== -(*CmpC
)) {
1745 Sub
= cast
<BinaryOperator
>(U
);
1752 if (!TLI
->shouldFormOverflowOp(ISD::USUBO
,
1753 TLI
->getValueType(*DL
, Sub
->getType()),
1754 Sub
->hasNUsesOrMore(1)))
1757 if (!replaceMathCmpWithIntrinsic(Sub
, Sub
->getOperand(0), Sub
->getOperand(1),
1758 Cmp
, Intrinsic::usub_with_overflow
))
1761 // Reset callers - do not crash by iterating over a dead instruction.
1762 ModifiedDT
= ModifyDT::ModifyInstDT
;
1766 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1767 /// registers that must be created and coalesced. This is a clear win except on
1768 /// targets with multiple condition code registers (PowerPC), where it might
1769 /// lose; some adjustment may be wanted there.
1771 /// Return true if any changes are made.
1772 static bool sinkCmpExpression(CmpInst
*Cmp
, const TargetLowering
&TLI
) {
1773 if (TLI
.hasMultipleConditionRegisters())
1776 // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1777 if (TLI
.useSoftFloat() && isa
<FCmpInst
>(Cmp
))
1780 // Only insert a cmp in each block once.
1781 DenseMap
<BasicBlock
*, CmpInst
*> InsertedCmps
;
1783 bool MadeChange
= false;
1784 for (Value::user_iterator UI
= Cmp
->user_begin(), E
= Cmp
->user_end();
1786 Use
&TheUse
= UI
.getUse();
1787 Instruction
*User
= cast
<Instruction
>(*UI
);
1789 // Preincrement use iterator so we don't invalidate it.
1792 // Don't bother for PHI nodes.
1793 if (isa
<PHINode
>(User
))
1796 // Figure out which BB this cmp is used in.
1797 BasicBlock
*UserBB
= User
->getParent();
1798 BasicBlock
*DefBB
= Cmp
->getParent();
1800 // If this user is in the same block as the cmp, don't change the cmp.
1801 if (UserBB
== DefBB
)
1804 // If we have already inserted a cmp into this block, use it.
1805 CmpInst
*&InsertedCmp
= InsertedCmps
[UserBB
];
1808 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
1809 assert(InsertPt
!= UserBB
->end());
1810 InsertedCmp
= CmpInst::Create(Cmp
->getOpcode(), Cmp
->getPredicate(),
1811 Cmp
->getOperand(0), Cmp
->getOperand(1), "");
1812 InsertedCmp
->insertBefore(*UserBB
, InsertPt
);
1813 // Propagate the debug info.
1814 InsertedCmp
->setDebugLoc(Cmp
->getDebugLoc());
1817 // Replace a use of the cmp with a use of the new cmp.
1818 TheUse
= InsertedCmp
;
1823 // If we removed all uses, nuke the cmp.
1824 if (Cmp
->use_empty()) {
1825 Cmp
->eraseFromParent();
1832 /// For pattern like:
1834 /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1838 /// br DomCond, TrueBB, CmpBB
1839 /// CmpBB: (with DomBB being the single predecessor)
1841 /// Cmp = icmp eq CmpOp0, CmpOp1
1844 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1845 /// different from lowering of icmp eq (PowerPC). This function try to convert
1846 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1847 /// After that, DomCond and Cmp can use the same comparison so reduce one
1850 /// Return true if any changes are made.
1851 static bool foldICmpWithDominatingICmp(CmpInst
*Cmp
,
1852 const TargetLowering
&TLI
) {
1853 if (!EnableICMP_EQToICMP_ST
&& TLI
.isEqualityCmpFoldedWithSignedCmp())
1856 ICmpInst::Predicate Pred
= Cmp
->getPredicate();
1857 if (Pred
!= ICmpInst::ICMP_EQ
)
1860 // If icmp eq has users other than BranchInst and SelectInst, converting it to
1861 // icmp slt/sgt would introduce more redundant LLVM IR.
1862 for (User
*U
: Cmp
->users()) {
1863 if (isa
<BranchInst
>(U
))
1865 if (isa
<SelectInst
>(U
) && cast
<SelectInst
>(U
)->getCondition() == Cmp
)
1870 // This is a cheap/incomplete check for dominance - just match a single
1871 // predecessor with a conditional branch.
1872 BasicBlock
*CmpBB
= Cmp
->getParent();
1873 BasicBlock
*DomBB
= CmpBB
->getSinglePredecessor();
1877 // We want to ensure that the only way control gets to the comparison of
1878 // interest is that a less/greater than comparison on the same operands is
1881 BasicBlock
*TrueBB
, *FalseBB
;
1882 if (!match(DomBB
->getTerminator(), m_Br(m_Value(DomCond
), TrueBB
, FalseBB
)))
1884 if (CmpBB
!= FalseBB
)
1887 Value
*CmpOp0
= Cmp
->getOperand(0), *CmpOp1
= Cmp
->getOperand(1);
1888 ICmpInst::Predicate DomPred
;
1889 if (!match(DomCond
, m_ICmp(DomPred
, m_Specific(CmpOp0
), m_Specific(CmpOp1
))))
1891 if (DomPred
!= ICmpInst::ICMP_SGT
&& DomPred
!= ICmpInst::ICMP_SLT
)
1894 // Convert the equality comparison to the opposite of the dominating
1895 // comparison and swap the direction for all branch/select users.
1896 // We have conceptually converted:
1897 // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1899 // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>;
1900 // And similarly for branches.
1901 for (User
*U
: Cmp
->users()) {
1902 if (auto *BI
= dyn_cast
<BranchInst
>(U
)) {
1903 assert(BI
->isConditional() && "Must be conditional");
1904 BI
->swapSuccessors();
1907 if (auto *SI
= dyn_cast
<SelectInst
>(U
)) {
1910 SI
->swapProfMetadata();
1913 llvm_unreachable("Must be a branch or a select");
1915 Cmp
->setPredicate(CmpInst::getSwappedPredicate(DomPred
));
1919 /// Many architectures use the same instruction for both subtract and cmp. Try
1920 /// to swap cmp operands to match subtract operations to allow for CSE.
1921 static bool swapICmpOperandsToExposeCSEOpportunities(CmpInst
*Cmp
) {
1922 Value
*Op0
= Cmp
->getOperand(0);
1923 Value
*Op1
= Cmp
->getOperand(1);
1924 if (!Op0
->getType()->isIntegerTy() || isa
<Constant
>(Op0
) ||
1925 isa
<Constant
>(Op1
) || Op0
== Op1
)
1928 // If a subtract already has the same operands as a compare, swapping would be
1929 // bad. If a subtract has the same operands as a compare but in reverse order,
1930 // then swapping is good.
1932 unsigned NumInspected
= 0;
1933 for (const User
*U
: Op0
->users()) {
1934 // Avoid walking many users.
1935 if (++NumInspected
> 128)
1937 if (match(U
, m_Sub(m_Specific(Op1
), m_Specific(Op0
))))
1939 else if (match(U
, m_Sub(m_Specific(Op0
), m_Specific(Op1
))))
1943 if (GoodToSwap
> 0) {
1944 Cmp
->swapOperands();
1950 static bool foldFCmpToFPClassTest(CmpInst
*Cmp
, const TargetLowering
&TLI
,
1951 const DataLayout
&DL
) {
1952 FCmpInst
*FCmp
= dyn_cast
<FCmpInst
>(Cmp
);
1956 // Don't fold if the target offers free fabs and the predicate is legal.
1957 EVT VT
= TLI
.getValueType(DL
, Cmp
->getOperand(0)->getType());
1958 if (TLI
.isFAbsFree(VT
) &&
1959 TLI
.isCondCodeLegal(getFCmpCondCode(FCmp
->getPredicate()),
1963 // Reverse the canonicalization if it is a FP class test
1964 auto ShouldReverseTransform
= [](FPClassTest ClassTest
) {
1965 return ClassTest
== fcInf
|| ClassTest
== (fcInf
| fcNan
);
1967 auto [ClassVal
, ClassTest
] =
1968 fcmpToClassTest(FCmp
->getPredicate(), *FCmp
->getParent()->getParent(),
1969 FCmp
->getOperand(0), FCmp
->getOperand(1));
1973 if (!ShouldReverseTransform(ClassTest
) && !ShouldReverseTransform(~ClassTest
))
1976 IRBuilder
<> Builder(Cmp
);
1977 Value
*IsFPClass
= Builder
.createIsFPClass(ClassVal
, ClassTest
);
1978 Cmp
->replaceAllUsesWith(IsFPClass
);
1979 RecursivelyDeleteTriviallyDeadInstructions(Cmp
);
1983 static bool isRemOfLoopIncrementWithLoopInvariant(
1984 Instruction
*Rem
, const LoopInfo
*LI
, Value
*&RemAmtOut
, Value
*&AddInstOut
,
1985 Value
*&AddOffsetOut
, PHINode
*&LoopIncrPNOut
) {
1986 Value
*Incr
, *RemAmt
;
1987 // NB: If RemAmt is a power of 2 it *should* have been transformed by now.
1988 if (!match(Rem
, m_URem(m_Value(Incr
), m_Value(RemAmt
))))
1991 Value
*AddInst
, *AddOffset
;
1992 // Find out loop increment PHI.
1993 auto *PN
= dyn_cast
<PHINode
>(Incr
);
1994 if (PN
!= nullptr) {
1996 AddOffset
= nullptr;
1998 // Search through a NUW add on top of the loop increment.
2000 if (!match(Incr
, m_NUWAdd(m_Value(V0
), m_Value(V1
))))
2004 PN
= dyn_cast
<PHINode
>(V0
);
2005 if (PN
!= nullptr) {
2008 PN
= dyn_cast
<PHINode
>(V1
);
2016 // This isn't strictly necessary, what we really need is one increment and any
2017 // amount of initial values all being the same.
2018 if (PN
->getNumIncomingValues() != 2)
2021 // Only trivially analyzable loops.
2022 Loop
*L
= LI
->getLoopFor(PN
->getParent());
2023 if (!L
|| !L
->getLoopPreheader() || !L
->getLoopLatch())
2026 // Req that the remainder is in the loop
2027 if (!L
->contains(Rem
))
2030 // Only works if the remainder amount is a loop invaraint
2031 if (!L
->isLoopInvariant(RemAmt
))
2034 // Is the PHI a loop increment?
2035 auto LoopIncrInfo
= getIVIncrement(PN
, LI
);
2039 // We need remainder_amount % increment_amount to be zero. Increment of one
2040 // satisfies that without any special logic and is overwhelmingly the common
2042 if (!match(LoopIncrInfo
->second
, m_One()))
2045 // Need the increment to not overflow.
2046 if (!match(LoopIncrInfo
->first
, m_c_NUWAdd(m_Specific(PN
), m_Value())))
2049 // Set output variables.
2052 AddInstOut
= AddInst
;
2053 AddOffsetOut
= AddOffset
;
2058 // Try to transform:
2060 // for(i = Start; i < End; ++i)
2061 // Rem = (i nuw+ IncrLoopInvariant) u% RemAmtLoopInvariant;
2065 // Rem = (Start nuw+ IncrLoopInvariant) % RemAmtLoopInvariant;
2066 // for(i = Start; i < End; ++i, ++rem)
2067 // Rem = rem == RemAmtLoopInvariant ? 0 : Rem;
2068 static bool foldURemOfLoopIncrement(Instruction
*Rem
, const DataLayout
*DL
,
2070 SmallSet
<BasicBlock
*, 32> &FreshBBs
,
2072 Value
*AddOffset
, *RemAmt
, *AddInst
;
2073 PHINode
*LoopIncrPN
;
2074 if (!isRemOfLoopIncrementWithLoopInvariant(Rem
, LI
, RemAmt
, AddInst
,
2075 AddOffset
, LoopIncrPN
))
2078 // Only non-constant remainder as the extra IV is probably not profitable
2081 // Potential TODO(1): `urem` of a const ends up as `mul` + `shift` + `add`. If
2082 // we can rule out register pressure and ensure this `urem` is executed each
2083 // iteration, its probably profitable to handle the const case as well.
2085 // Potential TODO(2): Should we have a check for how "nested" this remainder
2086 // operation is? The new code runs every iteration so if the remainder is
2087 // guarded behind unlikely conditions this might not be worth it.
2088 if (match(RemAmt
, m_ImmConstant()))
2091 Loop
*L
= LI
->getLoopFor(LoopIncrPN
->getParent());
2092 Value
*Start
= LoopIncrPN
->getIncomingValueForBlock(L
->getLoopPreheader());
2093 // If we have add create initial value for remainder.
2094 // The logic here is:
2095 // (urem (add nuw Start, IncrLoopInvariant), RemAmtLoopInvariant
2097 // Only proceed if the expression simplifies (otherwise we can't fully
2098 // optimize out the urem).
2100 assert(AddOffset
&& "We found an add but missing values");
2101 // Without dom-condition/assumption cache we aren't likely to get much out
2102 // of a context instruction.
2103 Start
= simplifyAddInst(Start
, AddOffset
,
2104 match(AddInst
, m_NSWAdd(m_Value(), m_Value())),
2105 /*IsNUW=*/true, *DL
);
2110 // If we can't fully optimize out the `rem`, skip this transform.
2111 Start
= simplifyURemInst(Start
, RemAmt
, *DL
);
2115 // Create new remainder with induction variable.
2116 Type
*Ty
= Rem
->getType();
2117 IRBuilder
<> Builder(Rem
->getContext());
2119 Builder
.SetInsertPoint(LoopIncrPN
);
2120 PHINode
*NewRem
= Builder
.CreatePHI(Ty
, 2);
2122 Builder
.SetInsertPoint(cast
<Instruction
>(
2123 LoopIncrPN
->getIncomingValueForBlock(L
->getLoopLatch())));
2124 // `(add (urem x, y), 1)` is always nuw.
2125 Value
*RemAdd
= Builder
.CreateNUWAdd(NewRem
, ConstantInt::get(Ty
, 1));
2126 Value
*RemCmp
= Builder
.CreateICmp(ICmpInst::ICMP_EQ
, RemAdd
, RemAmt
);
2128 Builder
.CreateSelect(RemCmp
, Constant::getNullValue(Ty
), RemAdd
);
2130 NewRem
->addIncoming(Start
, L
->getLoopPreheader());
2131 NewRem
->addIncoming(RemSel
, L
->getLoopLatch());
2133 // Insert all touched BBs.
2134 FreshBBs
.insert(LoopIncrPN
->getParent());
2135 FreshBBs
.insert(L
->getLoopLatch());
2136 FreshBBs
.insert(Rem
->getParent());
2138 FreshBBs
.insert(cast
<Instruction
>(AddInst
)->getParent());
2139 replaceAllUsesWith(Rem
, NewRem
, FreshBBs
, IsHuge
);
2140 Rem
->eraseFromParent();
2141 if (AddInst
&& AddInst
->use_empty())
2142 cast
<Instruction
>(AddInst
)->eraseFromParent();
2146 bool CodeGenPrepare::optimizeURem(Instruction
*Rem
) {
2147 if (foldURemOfLoopIncrement(Rem
, DL
, LI
, FreshBBs
, IsHugeFunc
))
2152 /// Some targets have better codegen for `ctpop(X) u< 2` than `ctpop(X) == 1`.
2153 /// This function converts `ctpop(X) ==/!= 1` into `ctpop(X) u</u> 2/1` if the
2154 /// result cannot be zero.
2155 static bool adjustIsPower2Test(CmpInst
*Cmp
, const TargetLowering
&TLI
,
2156 const TargetTransformInfo
&TTI
,
2157 const DataLayout
&DL
) {
2158 ICmpInst::Predicate Pred
;
2159 if (!match(Cmp
, m_ICmp(Pred
, m_Intrinsic
<Intrinsic::ctpop
>(), m_One())))
2161 if (!ICmpInst::isEquality(Pred
))
2163 auto *II
= cast
<IntrinsicInst
>(Cmp
->getOperand(0));
2165 if (isKnownNonZero(II
, DL
)) {
2166 if (Pred
== ICmpInst::ICMP_EQ
) {
2167 Cmp
->setOperand(1, ConstantInt::get(II
->getType(), 2));
2168 Cmp
->setPredicate(ICmpInst::ICMP_ULT
);
2170 Cmp
->setPredicate(ICmpInst::ICMP_UGT
);
2177 bool CodeGenPrepare::optimizeCmp(CmpInst
*Cmp
, ModifyDT
&ModifiedDT
) {
2178 if (sinkCmpExpression(Cmp
, *TLI
))
2181 if (combineToUAddWithOverflow(Cmp
, ModifiedDT
))
2184 if (combineToUSubWithOverflow(Cmp
, ModifiedDT
))
2187 if (foldICmpWithDominatingICmp(Cmp
, *TLI
))
2190 if (swapICmpOperandsToExposeCSEOpportunities(Cmp
))
2193 if (foldFCmpToFPClassTest(Cmp
, *TLI
, *DL
))
2196 if (adjustIsPower2Test(Cmp
, *TLI
, *TTI
, *DL
))
2202 /// Duplicate and sink the given 'and' instruction into user blocks where it is
2203 /// used in a compare to allow isel to generate better code for targets where
2204 /// this operation can be combined.
2206 /// Return true if any changes are made.
2207 static bool sinkAndCmp0Expression(Instruction
*AndI
, const TargetLowering
&TLI
,
2208 SetOfInstrs
&InsertedInsts
) {
2209 // Double-check that we're not trying to optimize an instruction that was
2210 // already optimized by some other part of this pass.
2211 assert(!InsertedInsts
.count(AndI
) &&
2212 "Attempting to optimize already optimized and instruction");
2213 (void)InsertedInsts
;
2215 // Nothing to do for single use in same basic block.
2216 if (AndI
->hasOneUse() &&
2217 AndI
->getParent() == cast
<Instruction
>(*AndI
->user_begin())->getParent())
2220 // Try to avoid cases where sinking/duplicating is likely to increase register
2222 if (!isa
<ConstantInt
>(AndI
->getOperand(0)) &&
2223 !isa
<ConstantInt
>(AndI
->getOperand(1)) &&
2224 AndI
->getOperand(0)->hasOneUse() && AndI
->getOperand(1)->hasOneUse())
2227 for (auto *U
: AndI
->users()) {
2228 Instruction
*User
= cast
<Instruction
>(U
);
2230 // Only sink 'and' feeding icmp with 0.
2231 if (!isa
<ICmpInst
>(User
))
2234 auto *CmpC
= dyn_cast
<ConstantInt
>(User
->getOperand(1));
2235 if (!CmpC
|| !CmpC
->isZero())
2239 if (!TLI
.isMaskAndCmp0FoldingBeneficial(*AndI
))
2242 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
2243 LLVM_DEBUG(AndI
->getParent()->dump());
2245 // Push the 'and' into the same block as the icmp 0. There should only be
2246 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
2247 // others, so we don't need to keep track of which BBs we insert into.
2248 for (Value::user_iterator UI
= AndI
->user_begin(), E
= AndI
->user_end();
2250 Use
&TheUse
= UI
.getUse();
2251 Instruction
*User
= cast
<Instruction
>(*UI
);
2253 // Preincrement use iterator so we don't invalidate it.
2256 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User
<< "\n");
2258 // Keep the 'and' in the same place if the use is already in the same block.
2259 Instruction
*InsertPt
=
2260 User
->getParent() == AndI
->getParent() ? AndI
: User
;
2261 Instruction
*InsertedAnd
= BinaryOperator::Create(
2262 Instruction::And
, AndI
->getOperand(0), AndI
->getOperand(1), "",
2263 InsertPt
->getIterator());
2264 // Propagate the debug info.
2265 InsertedAnd
->setDebugLoc(AndI
->getDebugLoc());
2267 // Replace a use of the 'and' with a use of the new 'and'.
2268 TheUse
= InsertedAnd
;
2270 LLVM_DEBUG(User
->getParent()->dump());
2273 // We removed all uses, nuke the and.
2274 AndI
->eraseFromParent();
2278 /// Check if the candidates could be combined with a shift instruction, which
2280 /// 1. Truncate instruction
2281 /// 2. And instruction and the imm is a mask of the low bits:
2282 /// imm & (imm+1) == 0
2283 static bool isExtractBitsCandidateUse(Instruction
*User
) {
2284 if (!isa
<TruncInst
>(User
)) {
2285 if (User
->getOpcode() != Instruction::And
||
2286 !isa
<ConstantInt
>(User
->getOperand(1)))
2289 const APInt
&Cimm
= cast
<ConstantInt
>(User
->getOperand(1))->getValue();
2291 if ((Cimm
& (Cimm
+ 1)).getBoolValue())
2297 /// Sink both shift and truncate instruction to the use of truncate's BB.
2299 SinkShiftAndTruncate(BinaryOperator
*ShiftI
, Instruction
*User
, ConstantInt
*CI
,
2300 DenseMap
<BasicBlock
*, BinaryOperator
*> &InsertedShifts
,
2301 const TargetLowering
&TLI
, const DataLayout
&DL
) {
2302 BasicBlock
*UserBB
= User
->getParent();
2303 DenseMap
<BasicBlock
*, CastInst
*> InsertedTruncs
;
2304 auto *TruncI
= cast
<TruncInst
>(User
);
2305 bool MadeChange
= false;
2307 for (Value::user_iterator TruncUI
= TruncI
->user_begin(),
2308 TruncE
= TruncI
->user_end();
2309 TruncUI
!= TruncE
;) {
2311 Use
&TruncTheUse
= TruncUI
.getUse();
2312 Instruction
*TruncUser
= cast
<Instruction
>(*TruncUI
);
2313 // Preincrement use iterator so we don't invalidate it.
2317 int ISDOpcode
= TLI
.InstructionOpcodeToISD(TruncUser
->getOpcode());
2321 // If the use is actually a legal node, there will not be an
2322 // implicit truncate.
2323 // FIXME: always querying the result type is just an
2324 // approximation; some nodes' legality is determined by the
2325 // operand or other means. There's no good way to find out though.
2326 if (TLI
.isOperationLegalOrCustom(
2327 ISDOpcode
, TLI
.getValueType(DL
, TruncUser
->getType(), true)))
2330 // Don't bother for PHI nodes.
2331 if (isa
<PHINode
>(TruncUser
))
2334 BasicBlock
*TruncUserBB
= TruncUser
->getParent();
2336 if (UserBB
== TruncUserBB
)
2339 BinaryOperator
*&InsertedShift
= InsertedShifts
[TruncUserBB
];
2340 CastInst
*&InsertedTrunc
= InsertedTruncs
[TruncUserBB
];
2342 if (!InsertedShift
&& !InsertedTrunc
) {
2343 BasicBlock::iterator InsertPt
= TruncUserBB
->getFirstInsertionPt();
2344 assert(InsertPt
!= TruncUserBB
->end());
2346 if (ShiftI
->getOpcode() == Instruction::AShr
)
2348 BinaryOperator::CreateAShr(ShiftI
->getOperand(0), CI
, "");
2351 BinaryOperator::CreateLShr(ShiftI
->getOperand(0), CI
, "");
2352 InsertedShift
->setDebugLoc(ShiftI
->getDebugLoc());
2353 InsertedShift
->insertBefore(*TruncUserBB
, InsertPt
);
2356 BasicBlock::iterator TruncInsertPt
= TruncUserBB
->getFirstInsertionPt();
2358 // It will go ahead of any debug-info.
2359 TruncInsertPt
.setHeadBit(true);
2360 assert(TruncInsertPt
!= TruncUserBB
->end());
2362 InsertedTrunc
= CastInst::Create(TruncI
->getOpcode(), InsertedShift
,
2363 TruncI
->getType(), "");
2364 InsertedTrunc
->insertBefore(*TruncUserBB
, TruncInsertPt
);
2365 InsertedTrunc
->setDebugLoc(TruncI
->getDebugLoc());
2369 TruncTheUse
= InsertedTrunc
;
2375 /// Sink the shift *right* instruction into user blocks if the uses could
2376 /// potentially be combined with this shift instruction and generate BitExtract
2377 /// instruction. It will only be applied if the architecture supports BitExtract
2378 /// instruction. Here is an example:
2380 /// %x.extract.shift = lshr i64 %arg1, 32
2382 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16
2386 /// %x.extract.shift.1 = lshr i64 %arg1, 32
2387 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
2389 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
2391 /// Return true if any changes are made.
2392 static bool OptimizeExtractBits(BinaryOperator
*ShiftI
, ConstantInt
*CI
,
2393 const TargetLowering
&TLI
,
2394 const DataLayout
&DL
) {
2395 BasicBlock
*DefBB
= ShiftI
->getParent();
2397 /// Only insert instructions in each block once.
2398 DenseMap
<BasicBlock
*, BinaryOperator
*> InsertedShifts
;
2400 bool shiftIsLegal
= TLI
.isTypeLegal(TLI
.getValueType(DL
, ShiftI
->getType()));
2402 bool MadeChange
= false;
2403 for (Value::user_iterator UI
= ShiftI
->user_begin(), E
= ShiftI
->user_end();
2405 Use
&TheUse
= UI
.getUse();
2406 Instruction
*User
= cast
<Instruction
>(*UI
);
2407 // Preincrement use iterator so we don't invalidate it.
2410 // Don't bother for PHI nodes.
2411 if (isa
<PHINode
>(User
))
2414 if (!isExtractBitsCandidateUse(User
))
2417 BasicBlock
*UserBB
= User
->getParent();
2419 if (UserBB
== DefBB
) {
2420 // If the shift and truncate instruction are in the same BB. The use of
2421 // the truncate(TruncUse) may still introduce another truncate if not
2422 // legal. In this case, we would like to sink both shift and truncate
2423 // instruction to the BB of TruncUse.
2426 // i64 shift.result = lshr i64 opnd, imm
2427 // trunc.result = trunc shift.result to i16
2430 // ----> We will have an implicit truncate here if the architecture does
2431 // not have i16 compare.
2432 // cmp i16 trunc.result, opnd2
2434 if (isa
<TruncInst
>(User
) &&
2436 // If the type of the truncate is legal, no truncate will be
2437 // introduced in other basic blocks.
2438 && (!TLI
.isTypeLegal(TLI
.getValueType(DL
, User
->getType()))))
2440 SinkShiftAndTruncate(ShiftI
, User
, CI
, InsertedShifts
, TLI
, DL
);
2444 // If we have already inserted a shift into this block, use it.
2445 BinaryOperator
*&InsertedShift
= InsertedShifts
[UserBB
];
2447 if (!InsertedShift
) {
2448 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
2449 assert(InsertPt
!= UserBB
->end());
2451 if (ShiftI
->getOpcode() == Instruction::AShr
)
2453 BinaryOperator::CreateAShr(ShiftI
->getOperand(0), CI
, "");
2456 BinaryOperator::CreateLShr(ShiftI
->getOperand(0), CI
, "");
2457 InsertedShift
->insertBefore(*UserBB
, InsertPt
);
2458 InsertedShift
->setDebugLoc(ShiftI
->getDebugLoc());
2463 // Replace a use of the shift with a use of the new shift.
2464 TheUse
= InsertedShift
;
2467 // If we removed all uses, or there are none, nuke the shift.
2468 if (ShiftI
->use_empty()) {
2469 salvageDebugInfo(*ShiftI
);
2470 ShiftI
->eraseFromParent();
2477 /// If counting leading or trailing zeros is an expensive operation and a zero
2478 /// input is defined, add a check for zero to avoid calling the intrinsic.
2480 /// We want to transform:
2481 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2485 /// %cmpz = icmp eq i64 %A, 0
2486 /// br i1 %cmpz, label %cond.end, label %cond.false
2488 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2489 /// br label %cond.end
2491 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2493 /// If the transform is performed, return true and set ModifiedDT to true.
2494 static bool despeculateCountZeros(IntrinsicInst
*CountZeros
,
2496 const TargetLowering
*TLI
,
2497 const DataLayout
*DL
, ModifyDT
&ModifiedDT
,
2498 SmallSet
<BasicBlock
*, 32> &FreshBBs
,
2500 // If a zero input is undefined, it doesn't make sense to despeculate that.
2501 if (match(CountZeros
->getOperand(1), m_One()))
2504 // If it's cheap to speculate, there's nothing to do.
2505 Type
*Ty
= CountZeros
->getType();
2506 auto IntrinsicID
= CountZeros
->getIntrinsicID();
2507 if ((IntrinsicID
== Intrinsic::cttz
&& TLI
->isCheapToSpeculateCttz(Ty
)) ||
2508 (IntrinsicID
== Intrinsic::ctlz
&& TLI
->isCheapToSpeculateCtlz(Ty
)))
2511 // Only handle legal scalar cases. Anything else requires too much work.
2512 unsigned SizeInBits
= Ty
->getScalarSizeInBits();
2513 if (Ty
->isVectorTy() || SizeInBits
> DL
->getLargestLegalIntTypeSizeInBits())
2516 // Bail if the value is never zero.
2517 Use
&Op
= CountZeros
->getOperandUse(0);
2518 if (isKnownNonZero(Op
, *DL
))
2521 // The intrinsic will be sunk behind a compare against zero and branch.
2522 BasicBlock
*StartBlock
= CountZeros
->getParent();
2523 BasicBlock
*CallBlock
= StartBlock
->splitBasicBlock(CountZeros
, "cond.false");
2525 FreshBBs
.insert(CallBlock
);
2527 // Create another block after the count zero intrinsic. A PHI will be added
2528 // in this block to select the result of the intrinsic or the bit-width
2529 // constant if the input to the intrinsic is zero.
2530 BasicBlock::iterator SplitPt
= std::next(BasicBlock::iterator(CountZeros
));
2531 // Any debug-info after CountZeros should not be included.
2532 SplitPt
.setHeadBit(true);
2533 BasicBlock
*EndBlock
= CallBlock
->splitBasicBlock(SplitPt
, "cond.end");
2535 FreshBBs
.insert(EndBlock
);
2537 // Update the LoopInfo. The new blocks are in the same loop as the start
2539 if (Loop
*L
= LI
.getLoopFor(StartBlock
)) {
2540 L
->addBasicBlockToLoop(CallBlock
, LI
);
2541 L
->addBasicBlockToLoop(EndBlock
, LI
);
2544 // Set up a builder to create a compare, conditional branch, and PHI.
2545 IRBuilder
<> Builder(CountZeros
->getContext());
2546 Builder
.SetInsertPoint(StartBlock
->getTerminator());
2547 Builder
.SetCurrentDebugLocation(CountZeros
->getDebugLoc());
2549 // Replace the unconditional branch that was created by the first split with
2550 // a compare against zero and a conditional branch.
2551 Value
*Zero
= Constant::getNullValue(Ty
);
2552 // Avoid introducing branch on poison. This also replaces the ctz operand.
2553 if (!isGuaranteedNotToBeUndefOrPoison(Op
))
2554 Op
= Builder
.CreateFreeze(Op
, Op
->getName() + ".fr");
2555 Value
*Cmp
= Builder
.CreateICmpEQ(Op
, Zero
, "cmpz");
2556 Builder
.CreateCondBr(Cmp
, EndBlock
, CallBlock
);
2557 StartBlock
->getTerminator()->eraseFromParent();
2559 // Create a PHI in the end block to select either the output of the intrinsic
2560 // or the bit width of the operand.
2561 Builder
.SetInsertPoint(EndBlock
, EndBlock
->begin());
2562 PHINode
*PN
= Builder
.CreatePHI(Ty
, 2, "ctz");
2563 replaceAllUsesWith(CountZeros
, PN
, FreshBBs
, IsHugeFunc
);
2564 Value
*BitWidth
= Builder
.getInt(APInt(SizeInBits
, SizeInBits
));
2565 PN
->addIncoming(BitWidth
, StartBlock
);
2566 PN
->addIncoming(CountZeros
, CallBlock
);
2568 // We are explicitly handling the zero case, so we can set the intrinsic's
2569 // undefined zero argument to 'true'. This will also prevent reprocessing the
2570 // intrinsic; we only despeculate when a zero input is defined.
2571 CountZeros
->setArgOperand(1, Builder
.getTrue());
2572 ModifiedDT
= ModifyDT::ModifyBBDT
;
2576 bool CodeGenPrepare::optimizeCallInst(CallInst
*CI
, ModifyDT
&ModifiedDT
) {
2577 BasicBlock
*BB
= CI
->getParent();
2579 // Lower inline assembly if we can.
2580 // If we found an inline asm expession, and if the target knows how to
2581 // lower it to normal LLVM code, do so now.
2582 if (CI
->isInlineAsm()) {
2583 if (TLI
->ExpandInlineAsm(CI
)) {
2584 // Avoid invalidating the iterator.
2585 CurInstIterator
= BB
->begin();
2586 // Avoid processing instructions out of order, which could cause
2587 // reuse before a value is defined.
2591 // Sink address computing for memory operands into the block.
2592 if (optimizeInlineAsmInst(CI
))
2596 // Align the pointer arguments to this call if the target thinks it's a good
2600 if (TLI
->shouldAlignPointerArgs(CI
, MinSize
, PrefAlign
)) {
2601 for (auto &Arg
: CI
->args()) {
2602 // We want to align both objects whose address is used directly and
2603 // objects whose address is used in casts and GEPs, though it only makes
2604 // sense for GEPs if the offset is a multiple of the desired alignment and
2605 // if size - offset meets the size threshold.
2606 if (!Arg
->getType()->isPointerTy())
2608 APInt
Offset(DL
->getIndexSizeInBits(
2609 cast
<PointerType
>(Arg
->getType())->getAddressSpace()),
2611 Value
*Val
= Arg
->stripAndAccumulateInBoundsConstantOffsets(*DL
, Offset
);
2612 uint64_t Offset2
= Offset
.getLimitedValue();
2613 if (!isAligned(PrefAlign
, Offset2
))
2616 if ((AI
= dyn_cast
<AllocaInst
>(Val
)) && AI
->getAlign() < PrefAlign
&&
2617 DL
->getTypeAllocSize(AI
->getAllocatedType()) >= MinSize
+ Offset2
)
2618 AI
->setAlignment(PrefAlign
);
2619 // Global variables can only be aligned if they are defined in this
2620 // object (i.e. they are uniquely initialized in this object), and
2621 // over-aligning global variables that have an explicit section is
2624 if ((GV
= dyn_cast
<GlobalVariable
>(Val
)) && GV
->canIncreaseAlignment() &&
2625 GV
->getPointerAlignment(*DL
) < PrefAlign
&&
2626 DL
->getTypeAllocSize(GV
->getValueType()) >= MinSize
+ Offset2
)
2627 GV
->setAlignment(PrefAlign
);
2630 // If this is a memcpy (or similar) then we may be able to improve the
2632 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(CI
)) {
2633 Align DestAlign
= getKnownAlignment(MI
->getDest(), *DL
);
2634 MaybeAlign MIDestAlign
= MI
->getDestAlign();
2635 if (!MIDestAlign
|| DestAlign
> *MIDestAlign
)
2636 MI
->setDestAlignment(DestAlign
);
2637 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(MI
)) {
2638 MaybeAlign MTISrcAlign
= MTI
->getSourceAlign();
2639 Align SrcAlign
= getKnownAlignment(MTI
->getSource(), *DL
);
2640 if (!MTISrcAlign
|| SrcAlign
> *MTISrcAlign
)
2641 MTI
->setSourceAlignment(SrcAlign
);
2645 // If we have a cold call site, try to sink addressing computation into the
2646 // cold block. This interacts with our handling for loads and stores to
2647 // ensure that we can fold all uses of a potential addressing computation
2648 // into their uses. TODO: generalize this to work over profiling data
2649 if (CI
->hasFnAttr(Attribute::Cold
) &&
2650 !llvm::shouldOptimizeForSize(BB
, PSI
, BFI
.get()))
2651 for (auto &Arg
: CI
->args()) {
2652 if (!Arg
->getType()->isPointerTy())
2654 unsigned AS
= Arg
->getType()->getPointerAddressSpace();
2655 if (optimizeMemoryInst(CI
, Arg
, Arg
->getType(), AS
))
2659 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(CI
);
2661 switch (II
->getIntrinsicID()) {
2664 case Intrinsic::assume
:
2665 llvm_unreachable("llvm.assume should have been removed already");
2666 case Intrinsic::allow_runtime_check
:
2667 case Intrinsic::allow_ubsan_check
:
2668 case Intrinsic::experimental_widenable_condition
: {
2669 // Give up on future widening opportunities so that we can fold away dead
2670 // paths and merge blocks before going into block-local instruction
2672 if (II
->use_empty()) {
2673 II
->eraseFromParent();
2676 Constant
*RetVal
= ConstantInt::getTrue(II
->getContext());
2677 resetIteratorIfInvalidatedWhileCalling(BB
, [&]() {
2678 replaceAndRecursivelySimplify(CI
, RetVal
, TLInfo
, nullptr);
2682 case Intrinsic::objectsize
:
2683 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2684 case Intrinsic::is_constant
:
2685 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2686 case Intrinsic::aarch64_stlxr
:
2687 case Intrinsic::aarch64_stxr
: {
2688 ZExtInst
*ExtVal
= dyn_cast
<ZExtInst
>(CI
->getArgOperand(0));
2689 if (!ExtVal
|| !ExtVal
->hasOneUse() ||
2690 ExtVal
->getParent() == CI
->getParent())
2692 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2693 ExtVal
->moveBefore(CI
);
2694 // Mark this instruction as "inserted by CGP", so that other
2695 // optimizations don't touch it.
2696 InsertedInsts
.insert(ExtVal
);
2700 case Intrinsic::launder_invariant_group
:
2701 case Intrinsic::strip_invariant_group
: {
2702 Value
*ArgVal
= II
->getArgOperand(0);
2703 auto it
= LargeOffsetGEPMap
.find(II
);
2704 if (it
!= LargeOffsetGEPMap
.end()) {
2705 // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2706 // Make sure not to have to deal with iterator invalidation
2707 // after possibly adding ArgVal to LargeOffsetGEPMap.
2708 auto GEPs
= std::move(it
->second
);
2709 LargeOffsetGEPMap
[ArgVal
].append(GEPs
.begin(), GEPs
.end());
2710 LargeOffsetGEPMap
.erase(II
);
2713 replaceAllUsesWith(II
, ArgVal
, FreshBBs
, IsHugeFunc
);
2714 II
->eraseFromParent();
2717 case Intrinsic::cttz
:
2718 case Intrinsic::ctlz
:
2719 // If counting zeros is expensive, try to avoid it.
2720 return despeculateCountZeros(II
, *LI
, TLI
, DL
, ModifiedDT
, FreshBBs
,
2722 case Intrinsic::fshl
:
2723 case Intrinsic::fshr
:
2724 return optimizeFunnelShift(II
);
2725 case Intrinsic::dbg_assign
:
2726 case Intrinsic::dbg_value
:
2727 return fixupDbgValue(II
);
2728 case Intrinsic::masked_gather
:
2729 return optimizeGatherScatterInst(II
, II
->getArgOperand(0));
2730 case Intrinsic::masked_scatter
:
2731 return optimizeGatherScatterInst(II
, II
->getArgOperand(1));
2734 SmallVector
<Value
*, 2> PtrOps
;
2736 if (TLI
->getAddrModeArguments(II
, PtrOps
, AccessTy
))
2737 while (!PtrOps
.empty()) {
2738 Value
*PtrVal
= PtrOps
.pop_back_val();
2739 unsigned AS
= PtrVal
->getType()->getPointerAddressSpace();
2740 if (optimizeMemoryInst(II
, PtrVal
, AccessTy
, AS
))
2745 // From here on out we're working with named functions.
2746 auto *Callee
= CI
->getCalledFunction();
2750 // Lower all default uses of _chk calls. This is very similar
2751 // to what InstCombineCalls does, but here we are only lowering calls
2752 // to fortified library functions (e.g. __memcpy_chk) that have the default
2753 // "don't know" as the objectsize. Anything else should be left alone.
2754 FortifiedLibCallSimplifier
Simplifier(TLInfo
, true);
2755 IRBuilder
<> Builder(CI
);
2756 if (Value
*V
= Simplifier
.optimizeCall(CI
, Builder
)) {
2757 replaceAllUsesWith(CI
, V
, FreshBBs
, IsHugeFunc
);
2758 CI
->eraseFromParent();
2762 // SCCP may have propagated, among other things, C++ static variables across
2763 // calls. If this happens to be the case, we may want to undo it in order to
2764 // avoid redundant pointer computation of the constant, as the function method
2765 // returning the constant needs to be executed anyways.
2766 auto GetUniformReturnValue
= [](const Function
*F
) -> GlobalVariable
* {
2767 if (!F
->getReturnType()->isPointerTy())
2770 GlobalVariable
*UniformValue
= nullptr;
2771 for (auto &BB
: *F
) {
2772 if (auto *RI
= dyn_cast
<ReturnInst
>(BB
.getTerminator())) {
2773 if (auto *V
= dyn_cast
<GlobalVariable
>(RI
->getReturnValue())) {
2776 else if (V
!= UniformValue
)
2784 return UniformValue
;
2787 if (Callee
->hasExactDefinition()) {
2788 if (GlobalVariable
*RV
= GetUniformReturnValue(Callee
)) {
2789 bool MadeChange
= false;
2790 for (Use
&U
: make_early_inc_range(RV
->uses())) {
2791 auto *I
= dyn_cast
<Instruction
>(U
.getUser());
2792 if (!I
|| I
->getParent() != CI
->getParent()) {
2793 // Limit to the same basic block to avoid extending the call-site live
2794 // range, which otherwise could increase register pressure.
2797 if (CI
->comesBefore(I
)) {
2810 static bool isIntrinsicOrLFToBeTailCalled(const TargetLibraryInfo
*TLInfo
,
2811 const CallInst
*CI
) {
2812 assert(CI
&& CI
->use_empty());
2814 if (const auto *II
= dyn_cast
<IntrinsicInst
>(CI
))
2815 switch (II
->getIntrinsicID()) {
2816 case Intrinsic::memset
:
2817 case Intrinsic::memcpy
:
2818 case Intrinsic::memmove
:
2825 Function
*Callee
= CI
->getCalledFunction();
2826 if (Callee
&& TLInfo
&& TLInfo
->getLibFunc(*Callee
, LF
))
2828 case LibFunc_strcpy
:
2829 case LibFunc_strncpy
:
2830 case LibFunc_strcat
:
2831 case LibFunc_strncat
:
2840 /// Look for opportunities to duplicate return instructions to the predecessor
2841 /// to enable tail call optimizations. The case it is currently looking for is
2842 /// the following one. Known intrinsics or library function that may be tail
2843 /// called are taken into account as well.
2846 /// %tmp0 = tail call i32 @f0()
2847 /// br label %return
2849 /// %tmp1 = tail call i32 @f1()
2850 /// br label %return
2852 /// %tmp2 = tail call i32 @f2()
2853 /// br label %return
2855 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2863 /// %tmp0 = tail call i32 @f0()
2866 /// %tmp1 = tail call i32 @f1()
2869 /// %tmp2 = tail call i32 @f2()
2872 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock
*BB
,
2873 ModifyDT
&ModifiedDT
) {
2874 if (!BB
->getTerminator())
2877 ReturnInst
*RetI
= dyn_cast
<ReturnInst
>(BB
->getTerminator());
2881 assert(LI
->getLoopFor(BB
) == nullptr && "A return block cannot be in a loop");
2883 PHINode
*PN
= nullptr;
2884 ExtractValueInst
*EVI
= nullptr;
2885 BitCastInst
*BCI
= nullptr;
2886 Value
*V
= RetI
->getReturnValue();
2888 BCI
= dyn_cast
<BitCastInst
>(V
);
2890 V
= BCI
->getOperand(0);
2892 EVI
= dyn_cast
<ExtractValueInst
>(V
);
2894 V
= EVI
->getOperand(0);
2895 if (!llvm::all_of(EVI
->indices(), [](unsigned idx
) { return idx
== 0; }))
2899 PN
= dyn_cast
<PHINode
>(V
);
2902 if (PN
&& PN
->getParent() != BB
)
2905 auto isLifetimeEndOrBitCastFor
= [](const Instruction
*Inst
) {
2906 const BitCastInst
*BC
= dyn_cast
<BitCastInst
>(Inst
);
2907 if (BC
&& BC
->hasOneUse())
2908 Inst
= BC
->user_back();
2910 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
))
2911 return II
->getIntrinsicID() == Intrinsic::lifetime_end
;
2915 SmallVector
<const IntrinsicInst
*, 4> FakeUses
;
2917 auto isFakeUse
= [&FakeUses
](const Instruction
*Inst
) {
2918 if (auto *II
= dyn_cast
<IntrinsicInst
>(Inst
);
2919 II
&& II
->getIntrinsicID() == Intrinsic::fake_use
) {
2920 // Record the instruction so it can be preserved when the exit block is
2921 // removed. Do not preserve the fake use that uses the result of the
2923 // Do not copy fake uses that use the result of a PHI node.
2924 // FIXME: If we do want to copy the fake use into the return blocks, we
2925 // have to figure out which of the PHI node operands to use for each
2927 if (!isa
<PHINode
>(II
->getOperand(0))) {
2928 FakeUses
.push_back(II
);
2936 // Make sure there are no instructions between the first instruction
2938 const Instruction
*BI
= BB
->getFirstNonPHI();
2939 // Skip over debug and the bitcast.
2940 while (isa
<DbgInfoIntrinsic
>(BI
) || BI
== BCI
|| BI
== EVI
||
2941 isa
<PseudoProbeInst
>(BI
) || isLifetimeEndOrBitCastFor(BI
) ||
2943 BI
= BI
->getNextNode();
2947 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2949 const Function
*F
= BB
->getParent();
2950 SmallVector
<BasicBlock
*, 4> TailCallBBs
;
2951 // Record the call instructions so we can insert any fake uses
2952 // that need to be preserved before them.
2953 SmallVector
<CallInst
*, 4> CallInsts
;
2955 for (unsigned I
= 0, E
= PN
->getNumIncomingValues(); I
!= E
; ++I
) {
2956 // Look through bitcasts.
2957 Value
*IncomingVal
= PN
->getIncomingValue(I
)->stripPointerCasts();
2958 CallInst
*CI
= dyn_cast
<CallInst
>(IncomingVal
);
2959 BasicBlock
*PredBB
= PN
->getIncomingBlock(I
);
2960 // Make sure the phi value is indeed produced by the tail call.
2961 if (CI
&& CI
->hasOneUse() && CI
->getParent() == PredBB
&&
2962 TLI
->mayBeEmittedAsTailCall(CI
) &&
2963 attributesPermitTailCall(F
, CI
, RetI
, *TLI
)) {
2964 TailCallBBs
.push_back(PredBB
);
2965 CallInsts
.push_back(CI
);
2967 // Consider the cases in which the phi value is indirectly produced by
2968 // the tail call, for example when encountering memset(), memmove(),
2969 // strcpy(), whose return value may have been optimized out. In such
2970 // cases, the value needs to be the first function argument.
2973 // tail call void @llvm.memset.p0.i64(ptr %0, i8 0, i64 %1)
2976 // %phi = phi ptr [ %0, %bb0 ], [ %2, %entry ]
2977 if (PredBB
&& PredBB
->getSingleSuccessor() == BB
)
2978 CI
= dyn_cast_or_null
<CallInst
>(
2979 PredBB
->getTerminator()->getPrevNonDebugInstruction(true));
2981 if (CI
&& CI
->use_empty() &&
2982 isIntrinsicOrLFToBeTailCalled(TLInfo
, CI
) &&
2983 IncomingVal
== CI
->getArgOperand(0) &&
2984 TLI
->mayBeEmittedAsTailCall(CI
) &&
2985 attributesPermitTailCall(F
, CI
, RetI
, *TLI
)) {
2986 TailCallBBs
.push_back(PredBB
);
2987 CallInsts
.push_back(CI
);
2992 SmallPtrSet
<BasicBlock
*, 4> VisitedBBs
;
2993 for (BasicBlock
*Pred
: predecessors(BB
)) {
2994 if (!VisitedBBs
.insert(Pred
).second
)
2996 if (Instruction
*I
= Pred
->rbegin()->getPrevNonDebugInstruction(true)) {
2997 CallInst
*CI
= dyn_cast
<CallInst
>(I
);
2998 if (CI
&& CI
->use_empty() && TLI
->mayBeEmittedAsTailCall(CI
) &&
2999 attributesPermitTailCall(F
, CI
, RetI
, *TLI
)) {
3000 // Either we return void or the return value must be the first
3001 // argument of a known intrinsic or library function.
3002 if (!V
|| isa
<UndefValue
>(V
) ||
3003 (isIntrinsicOrLFToBeTailCalled(TLInfo
, CI
) &&
3004 V
== CI
->getArgOperand(0))) {
3005 TailCallBBs
.push_back(Pred
);
3006 CallInsts
.push_back(CI
);
3013 bool Changed
= false;
3014 for (auto const &TailCallBB
: TailCallBBs
) {
3015 // Make sure the call instruction is followed by an unconditional branch to
3016 // the return block.
3017 BranchInst
*BI
= dyn_cast
<BranchInst
>(TailCallBB
->getTerminator());
3018 if (!BI
|| !BI
->isUnconditional() || BI
->getSuccessor(0) != BB
)
3021 // Duplicate the return into TailCallBB.
3022 (void)FoldReturnIntoUncondBranch(RetI
, BB
, TailCallBB
);
3023 assert(!VerifyBFIUpdates
||
3024 BFI
->getBlockFreq(BB
) >= BFI
->getBlockFreq(TailCallBB
));
3025 BFI
->setBlockFreq(BB
,
3026 (BFI
->getBlockFreq(BB
) - BFI
->getBlockFreq(TailCallBB
)));
3027 ModifiedDT
= ModifyDT::ModifyBBDT
;
3032 // If we eliminated all predecessors of the block, delete the block now.
3033 if (Changed
&& !BB
->hasAddressTaken() && pred_empty(BB
)) {
3034 // Copy the fake uses found in the original return block to all blocks
3035 // that contain tail calls.
3036 for (auto *CI
: CallInsts
) {
3037 for (auto const *FakeUse
: FakeUses
) {
3038 auto *ClonedInst
= FakeUse
->clone();
3039 ClonedInst
->insertBefore(CI
);
3042 BB
->eraseFromParent();
3048 //===----------------------------------------------------------------------===//
3049 // Memory Optimization
3050 //===----------------------------------------------------------------------===//
3054 /// This is an extended version of TargetLowering::AddrMode
3055 /// which holds actual Value*'s for register values.
3056 struct ExtAddrMode
: public TargetLowering::AddrMode
{
3057 Value
*BaseReg
= nullptr;
3058 Value
*ScaledReg
= nullptr;
3059 Value
*OriginalValue
= nullptr;
3060 bool InBounds
= true;
3064 BaseRegField
= 0x01,
3066 BaseOffsField
= 0x04,
3067 ScaledRegField
= 0x08,
3069 MultipleFields
= 0xff
3072 ExtAddrMode() = default;
3074 void print(raw_ostream
&OS
) const;
3077 FieldName
compare(const ExtAddrMode
&other
) {
3078 // First check that the types are the same on each field, as differing types
3079 // is something we can't cope with later on.
3080 if (BaseReg
&& other
.BaseReg
&&
3081 BaseReg
->getType() != other
.BaseReg
->getType())
3082 return MultipleFields
;
3083 if (BaseGV
&& other
.BaseGV
&& BaseGV
->getType() != other
.BaseGV
->getType())
3084 return MultipleFields
;
3085 if (ScaledReg
&& other
.ScaledReg
&&
3086 ScaledReg
->getType() != other
.ScaledReg
->getType())
3087 return MultipleFields
;
3089 // Conservatively reject 'inbounds' mismatches.
3090 if (InBounds
!= other
.InBounds
)
3091 return MultipleFields
;
3093 // Check each field to see if it differs.
3094 unsigned Result
= NoField
;
3095 if (BaseReg
!= other
.BaseReg
)
3096 Result
|= BaseRegField
;
3097 if (BaseGV
!= other
.BaseGV
)
3098 Result
|= BaseGVField
;
3099 if (BaseOffs
!= other
.BaseOffs
)
3100 Result
|= BaseOffsField
;
3101 if (ScaledReg
!= other
.ScaledReg
)
3102 Result
|= ScaledRegField
;
3103 // Don't count 0 as being a different scale, because that actually means
3104 // unscaled (which will already be counted by having no ScaledReg).
3105 if (Scale
&& other
.Scale
&& Scale
!= other
.Scale
)
3106 Result
|= ScaleField
;
3108 if (llvm::popcount(Result
) > 1)
3109 return MultipleFields
;
3111 return static_cast<FieldName
>(Result
);
3114 // An AddrMode is trivial if it involves no calculation i.e. it is just a base
3117 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
3118 // trivial if at most one of these terms is nonzero, except that BaseGV and
3119 // BaseReg both being zero actually means a null pointer value, which we
3120 // consider to be 'non-zero' here.
3121 return !BaseOffs
&& !Scale
&& !(BaseGV
&& BaseReg
);
3124 Value
*GetFieldAsValue(FieldName Field
, Type
*IntPtrTy
) {
3132 case ScaledRegField
:
3135 return ConstantInt::get(IntPtrTy
, BaseOffs
);
3139 void SetCombinedField(FieldName Field
, Value
*V
,
3140 const SmallVectorImpl
<ExtAddrMode
> &AddrModes
) {
3143 llvm_unreachable("Unhandled fields are expected to be rejected earlier");
3145 case ExtAddrMode::BaseRegField
:
3148 case ExtAddrMode::BaseGVField
:
3149 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
3150 // in the BaseReg field.
3151 assert(BaseReg
== nullptr);
3155 case ExtAddrMode::ScaledRegField
:
3157 // If we have a mix of scaled and unscaled addrmodes then we want scale
3158 // to be the scale and not zero.
3160 for (const ExtAddrMode
&AM
: AddrModes
)
3166 case ExtAddrMode::BaseOffsField
:
3167 // The offset is no longer a constant, so it goes in ScaledReg with a
3169 assert(ScaledReg
== nullptr);
3179 static inline raw_ostream
&operator<<(raw_ostream
&OS
, const ExtAddrMode
&AM
) {
3185 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3186 void ExtAddrMode::print(raw_ostream
&OS
) const {
3187 bool NeedPlus
= false;
3193 BaseGV
->printAsOperand(OS
, /*PrintType=*/false);
3198 OS
<< (NeedPlus
? " + " : "") << BaseOffs
;
3203 OS
<< (NeedPlus
? " + " : "") << "Base:";
3204 BaseReg
->printAsOperand(OS
, /*PrintType=*/false);
3208 OS
<< (NeedPlus
? " + " : "") << Scale
<< "*";
3209 ScaledReg
->printAsOperand(OS
, /*PrintType=*/false);
3215 LLVM_DUMP_METHOD
void ExtAddrMode::dump() const {
3221 } // end anonymous namespace
3225 /// This class provides transaction based operation on the IR.
3226 /// Every change made through this class is recorded in the internal state and
3227 /// can be undone (rollback) until commit is called.
3228 /// CGP does not check if instructions could be speculatively executed when
3229 /// moved. Preserving the original location would pessimize the debugging
3230 /// experience, as well as negatively impact the quality of sample PGO.
3231 class TypePromotionTransaction
{
3232 /// This represents the common interface of the individual transaction.
3233 /// Each class implements the logic for doing one specific modification on
3234 /// the IR via the TypePromotionTransaction.
3235 class TypePromotionAction
{
3237 /// The Instruction modified.
3241 /// Constructor of the action.
3242 /// The constructor performs the related action on the IR.
3243 TypePromotionAction(Instruction
*Inst
) : Inst(Inst
) {}
3245 virtual ~TypePromotionAction() = default;
3247 /// Undo the modification done by this action.
3248 /// When this method is called, the IR must be in the same state as it was
3249 /// before this action was applied.
3250 /// \pre Undoing the action works if and only if the IR is in the exact same
3251 /// state as it was directly after this action was applied.
3252 virtual void undo() = 0;
3254 /// Advocate every change made by this action.
3255 /// When the results on the IR of the action are to be kept, it is important
3256 /// to call this function, otherwise hidden information may be kept forever.
3257 virtual void commit() {
3258 // Nothing to be done, this action is not doing anything.
3262 /// Utility to remember the position of an instruction.
3263 class InsertionHandler
{
3264 /// Position of an instruction.
3265 /// Either an instruction:
3266 /// - Is the first in a basic block: BB is used.
3267 /// - Has a previous instruction: PrevInst is used.
3269 Instruction
*PrevInst
;
3272 std::optional
<DbgRecord::self_iterator
> BeforeDbgRecord
= std::nullopt
;
3274 /// Remember whether or not the instruction had a previous instruction.
3275 bool HasPrevInstruction
;
3278 /// Record the position of \p Inst.
3279 InsertionHandler(Instruction
*Inst
) {
3280 HasPrevInstruction
= (Inst
!= &*(Inst
->getParent()->begin()));
3281 BasicBlock
*BB
= Inst
->getParent();
3283 // Record where we would have to re-insert the instruction in the sequence
3284 // of DbgRecords, if we ended up reinserting.
3285 if (BB
->IsNewDbgInfoFormat
)
3286 BeforeDbgRecord
= Inst
->getDbgReinsertionPosition();
3288 if (HasPrevInstruction
) {
3289 Point
.PrevInst
= &*std::prev(Inst
->getIterator());
3295 /// Insert \p Inst at the recorded position.
3296 void insert(Instruction
*Inst
) {
3297 if (HasPrevInstruction
) {
3298 if (Inst
->getParent())
3299 Inst
->removeFromParent();
3300 Inst
->insertAfter(&*Point
.PrevInst
);
3302 BasicBlock::iterator Position
= Point
.BB
->getFirstInsertionPt();
3303 if (Inst
->getParent())
3304 Inst
->moveBefore(*Point
.BB
, Position
);
3306 Inst
->insertBefore(*Point
.BB
, Position
);
3309 Inst
->getParent()->reinsertInstInDbgRecords(Inst
, BeforeDbgRecord
);
3313 /// Move an instruction before another.
3314 class InstructionMoveBefore
: public TypePromotionAction
{
3315 /// Original position of the instruction.
3316 InsertionHandler Position
;
3319 /// Move \p Inst before \p Before.
3320 InstructionMoveBefore(Instruction
*Inst
, Instruction
*Before
)
3321 : TypePromotionAction(Inst
), Position(Inst
) {
3322 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst
<< "\nbefore: " << *Before
3324 Inst
->moveBefore(Before
);
3327 /// Move the instruction back to its original position.
3328 void undo() override
{
3329 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst
<< "\n");
3330 Position
.insert(Inst
);
3334 /// Set the operand of an instruction with a new value.
3335 class OperandSetter
: public TypePromotionAction
{
3336 /// Original operand of the instruction.
3339 /// Index of the modified instruction.
3343 /// Set \p Idx operand of \p Inst with \p NewVal.
3344 OperandSetter(Instruction
*Inst
, unsigned Idx
, Value
*NewVal
)
3345 : TypePromotionAction(Inst
), Idx(Idx
) {
3346 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx
<< "\n"
3347 << "for:" << *Inst
<< "\n"
3348 << "with:" << *NewVal
<< "\n");
3349 Origin
= Inst
->getOperand(Idx
);
3350 Inst
->setOperand(Idx
, NewVal
);
3353 /// Restore the original value of the instruction.
3354 void undo() override
{
3355 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx
<< "\n"
3356 << "for: " << *Inst
<< "\n"
3357 << "with: " << *Origin
<< "\n");
3358 Inst
->setOperand(Idx
, Origin
);
3362 /// Hide the operands of an instruction.
3363 /// Do as if this instruction was not using any of its operands.
3364 class OperandsHider
: public TypePromotionAction
{
3365 /// The list of original operands.
3366 SmallVector
<Value
*, 4> OriginalValues
;
3369 /// Remove \p Inst from the uses of the operands of \p Inst.
3370 OperandsHider(Instruction
*Inst
) : TypePromotionAction(Inst
) {
3371 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst
<< "\n");
3372 unsigned NumOpnds
= Inst
->getNumOperands();
3373 OriginalValues
.reserve(NumOpnds
);
3374 for (unsigned It
= 0; It
< NumOpnds
; ++It
) {
3375 // Save the current operand.
3376 Value
*Val
= Inst
->getOperand(It
);
3377 OriginalValues
.push_back(Val
);
3379 // We could use OperandSetter here, but that would imply an overhead
3380 // that we are not willing to pay.
3381 Inst
->setOperand(It
, PoisonValue::get(Val
->getType()));
3385 /// Restore the original list of uses.
3386 void undo() override
{
3387 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst
<< "\n");
3388 for (unsigned It
= 0, EndIt
= OriginalValues
.size(); It
!= EndIt
; ++It
)
3389 Inst
->setOperand(It
, OriginalValues
[It
]);
3393 /// Build a truncate instruction.
3394 class TruncBuilder
: public TypePromotionAction
{
3398 /// Build a truncate instruction of \p Opnd producing a \p Ty
3400 /// trunc Opnd to Ty.
3401 TruncBuilder(Instruction
*Opnd
, Type
*Ty
) : TypePromotionAction(Opnd
) {
3402 IRBuilder
<> Builder(Opnd
);
3403 Builder
.SetCurrentDebugLocation(DebugLoc());
3404 Val
= Builder
.CreateTrunc(Opnd
, Ty
, "promoted");
3405 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val
<< "\n");
3408 /// Get the built value.
3409 Value
*getBuiltValue() { return Val
; }
3411 /// Remove the built instruction.
3412 void undo() override
{
3413 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val
<< "\n");
3414 if (Instruction
*IVal
= dyn_cast
<Instruction
>(Val
))
3415 IVal
->eraseFromParent();
3419 /// Build a sign extension instruction.
3420 class SExtBuilder
: public TypePromotionAction
{
3424 /// Build a sign extension instruction of \p Opnd producing a \p Ty
3426 /// sext Opnd to Ty.
3427 SExtBuilder(Instruction
*InsertPt
, Value
*Opnd
, Type
*Ty
)
3428 : TypePromotionAction(InsertPt
) {
3429 IRBuilder
<> Builder(InsertPt
);
3430 Val
= Builder
.CreateSExt(Opnd
, Ty
, "promoted");
3431 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val
<< "\n");
3434 /// Get the built value.
3435 Value
*getBuiltValue() { return Val
; }
3437 /// Remove the built instruction.
3438 void undo() override
{
3439 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val
<< "\n");
3440 if (Instruction
*IVal
= dyn_cast
<Instruction
>(Val
))
3441 IVal
->eraseFromParent();
3445 /// Build a zero extension instruction.
3446 class ZExtBuilder
: public TypePromotionAction
{
3450 /// Build a zero extension instruction of \p Opnd producing a \p Ty
3452 /// zext Opnd to Ty.
3453 ZExtBuilder(Instruction
*InsertPt
, Value
*Opnd
, Type
*Ty
)
3454 : TypePromotionAction(InsertPt
) {
3455 IRBuilder
<> Builder(InsertPt
);
3456 Builder
.SetCurrentDebugLocation(DebugLoc());
3457 Val
= Builder
.CreateZExt(Opnd
, Ty
, "promoted");
3458 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val
<< "\n");
3461 /// Get the built value.
3462 Value
*getBuiltValue() { return Val
; }
3464 /// Remove the built instruction.
3465 void undo() override
{
3466 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val
<< "\n");
3467 if (Instruction
*IVal
= dyn_cast
<Instruction
>(Val
))
3468 IVal
->eraseFromParent();
3472 /// Mutate an instruction to another type.
3473 class TypeMutator
: public TypePromotionAction
{
3474 /// Record the original type.
3478 /// Mutate the type of \p Inst into \p NewTy.
3479 TypeMutator(Instruction
*Inst
, Type
*NewTy
)
3480 : TypePromotionAction(Inst
), OrigTy(Inst
->getType()) {
3481 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst
<< " with " << *NewTy
3483 Inst
->mutateType(NewTy
);
3486 /// Mutate the instruction back to its original type.
3487 void undo() override
{
3488 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst
<< " with " << *OrigTy
3490 Inst
->mutateType(OrigTy
);
3494 /// Replace the uses of an instruction by another instruction.
3495 class UsesReplacer
: public TypePromotionAction
{
3496 /// Helper structure to keep track of the replaced uses.
3497 struct InstructionAndIdx
{
3498 /// The instruction using the instruction.
3501 /// The index where this instruction is used for Inst.
3504 InstructionAndIdx(Instruction
*Inst
, unsigned Idx
)
3505 : Inst(Inst
), Idx(Idx
) {}
3508 /// Keep track of the original uses (pair Instruction, Index).
3509 SmallVector
<InstructionAndIdx
, 4> OriginalUses
;
3510 /// Keep track of the debug users.
3511 SmallVector
<DbgValueInst
*, 1> DbgValues
;
3512 /// And non-instruction debug-users too.
3513 SmallVector
<DbgVariableRecord
*, 1> DbgVariableRecords
;
3515 /// Keep track of the new value so that we can undo it by replacing
3516 /// instances of the new value with the original value.
3519 using use_iterator
= SmallVectorImpl
<InstructionAndIdx
>::iterator
;
3522 /// Replace all the use of \p Inst by \p New.
3523 UsesReplacer(Instruction
*Inst
, Value
*New
)
3524 : TypePromotionAction(Inst
), New(New
) {
3525 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst
<< " with " << *New
3527 // Record the original uses.
3528 for (Use
&U
: Inst
->uses()) {
3529 Instruction
*UserI
= cast
<Instruction
>(U
.getUser());
3530 OriginalUses
.push_back(InstructionAndIdx(UserI
, U
.getOperandNo()));
3532 // Record the debug uses separately. They are not in the instruction's
3533 // use list, but they are replaced by RAUW.
3534 findDbgValues(DbgValues
, Inst
, &DbgVariableRecords
);
3536 // Now, we can replace the uses.
3537 Inst
->replaceAllUsesWith(New
);
3540 /// Reassign the original uses of Inst to Inst.
3541 void undo() override
{
3542 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst
<< "\n");
3543 for (InstructionAndIdx
&Use
: OriginalUses
)
3544 Use
.Inst
->setOperand(Use
.Idx
, Inst
);
3545 // RAUW has replaced all original uses with references to the new value,
3546 // including the debug uses. Since we are undoing the replacements,
3547 // the original debug uses must also be reinstated to maintain the
3548 // correctness and utility of debug value instructions.
3549 for (auto *DVI
: DbgValues
)
3550 DVI
->replaceVariableLocationOp(New
, Inst
);
3551 // Similar story with DbgVariableRecords, the non-instruction
3552 // representation of dbg.values.
3553 for (DbgVariableRecord
*DVR
: DbgVariableRecords
)
3554 DVR
->replaceVariableLocationOp(New
, Inst
);
3558 /// Remove an instruction from the IR.
3559 class InstructionRemover
: public TypePromotionAction
{
3560 /// Original position of the instruction.
3561 InsertionHandler Inserter
;
3563 /// Helper structure to hide all the link to the instruction. In other
3564 /// words, this helps to do as if the instruction was removed.
3565 OperandsHider Hider
;
3567 /// Keep track of the uses replaced, if any.
3568 UsesReplacer
*Replacer
= nullptr;
3570 /// Keep track of instructions removed.
3571 SetOfInstrs
&RemovedInsts
;
3574 /// Remove all reference of \p Inst and optionally replace all its
3576 /// \p RemovedInsts Keep track of the instructions removed by this Action.
3577 /// \pre If !Inst->use_empty(), then New != nullptr
3578 InstructionRemover(Instruction
*Inst
, SetOfInstrs
&RemovedInsts
,
3579 Value
*New
= nullptr)
3580 : TypePromotionAction(Inst
), Inserter(Inst
), Hider(Inst
),
3581 RemovedInsts(RemovedInsts
) {
3583 Replacer
= new UsesReplacer(Inst
, New
);
3584 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst
<< "\n");
3585 RemovedInsts
.insert(Inst
);
3586 /// The instructions removed here will be freed after completing
3587 /// optimizeBlock() for all blocks as we need to keep track of the
3588 /// removed instructions during promotion.
3589 Inst
->removeFromParent();
3592 ~InstructionRemover() override
{ delete Replacer
; }
3594 InstructionRemover
&operator=(const InstructionRemover
&other
) = delete;
3595 InstructionRemover(const InstructionRemover
&other
) = delete;
3597 /// Resurrect the instruction and reassign it to the proper uses if
3598 /// new value was provided when build this action.
3599 void undo() override
{
3600 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst
<< "\n");
3601 Inserter
.insert(Inst
);
3605 RemovedInsts
.erase(Inst
);
3610 /// Restoration point.
3611 /// The restoration point is a pointer to an action instead of an iterator
3612 /// because the iterator may be invalidated but not the pointer.
3613 using ConstRestorationPt
= const TypePromotionAction
*;
3615 TypePromotionTransaction(SetOfInstrs
&RemovedInsts
)
3616 : RemovedInsts(RemovedInsts
) {}
3618 /// Advocate every changes made in that transaction. Return true if any change
3622 /// Undo all the changes made after the given point.
3623 void rollback(ConstRestorationPt Point
);
3625 /// Get the current restoration point.
3626 ConstRestorationPt
getRestorationPoint() const;
3628 /// \name API for IR modification with state keeping to support rollback.
3630 /// Same as Instruction::setOperand.
3631 void setOperand(Instruction
*Inst
, unsigned Idx
, Value
*NewVal
);
3633 /// Same as Instruction::eraseFromParent.
3634 void eraseInstruction(Instruction
*Inst
, Value
*NewVal
= nullptr);
3636 /// Same as Value::replaceAllUsesWith.
3637 void replaceAllUsesWith(Instruction
*Inst
, Value
*New
);
3639 /// Same as Value::mutateType.
3640 void mutateType(Instruction
*Inst
, Type
*NewTy
);
3642 /// Same as IRBuilder::createTrunc.
3643 Value
*createTrunc(Instruction
*Opnd
, Type
*Ty
);
3645 /// Same as IRBuilder::createSExt.
3646 Value
*createSExt(Instruction
*Inst
, Value
*Opnd
, Type
*Ty
);
3648 /// Same as IRBuilder::createZExt.
3649 Value
*createZExt(Instruction
*Inst
, Value
*Opnd
, Type
*Ty
);
3652 /// The ordered list of actions made so far.
3653 SmallVector
<std::unique_ptr
<TypePromotionAction
>, 16> Actions
;
3656 SmallVectorImpl
<std::unique_ptr
<TypePromotionAction
>>::iterator
;
3658 SetOfInstrs
&RemovedInsts
;
3661 } // end anonymous namespace
3663 void TypePromotionTransaction::setOperand(Instruction
*Inst
, unsigned Idx
,
3665 Actions
.push_back(std::make_unique
<TypePromotionTransaction::OperandSetter
>(
3666 Inst
, Idx
, NewVal
));
3669 void TypePromotionTransaction::eraseInstruction(Instruction
*Inst
,
3672 std::make_unique
<TypePromotionTransaction::InstructionRemover
>(
3673 Inst
, RemovedInsts
, NewVal
));
3676 void TypePromotionTransaction::replaceAllUsesWith(Instruction
*Inst
,
3679 std::make_unique
<TypePromotionTransaction::UsesReplacer
>(Inst
, New
));
3682 void TypePromotionTransaction::mutateType(Instruction
*Inst
, Type
*NewTy
) {
3684 std::make_unique
<TypePromotionTransaction::TypeMutator
>(Inst
, NewTy
));
3687 Value
*TypePromotionTransaction::createTrunc(Instruction
*Opnd
, Type
*Ty
) {
3688 std::unique_ptr
<TruncBuilder
> Ptr(new TruncBuilder(Opnd
, Ty
));
3689 Value
*Val
= Ptr
->getBuiltValue();
3690 Actions
.push_back(std::move(Ptr
));
3694 Value
*TypePromotionTransaction::createSExt(Instruction
*Inst
, Value
*Opnd
,
3696 std::unique_ptr
<SExtBuilder
> Ptr(new SExtBuilder(Inst
, Opnd
, Ty
));
3697 Value
*Val
= Ptr
->getBuiltValue();
3698 Actions
.push_back(std::move(Ptr
));
3702 Value
*TypePromotionTransaction::createZExt(Instruction
*Inst
, Value
*Opnd
,
3704 std::unique_ptr
<ZExtBuilder
> Ptr(new ZExtBuilder(Inst
, Opnd
, Ty
));
3705 Value
*Val
= Ptr
->getBuiltValue();
3706 Actions
.push_back(std::move(Ptr
));
3710 TypePromotionTransaction::ConstRestorationPt
3711 TypePromotionTransaction::getRestorationPoint() const {
3712 return !Actions
.empty() ? Actions
.back().get() : nullptr;
3715 bool TypePromotionTransaction::commit() {
3716 for (std::unique_ptr
<TypePromotionAction
> &Action
: Actions
)
3718 bool Modified
= !Actions
.empty();
3723 void TypePromotionTransaction::rollback(
3724 TypePromotionTransaction::ConstRestorationPt Point
) {
3725 while (!Actions
.empty() && Point
!= Actions
.back().get()) {
3726 std::unique_ptr
<TypePromotionAction
> Curr
= Actions
.pop_back_val();
3733 /// A helper class for matching addressing modes.
3735 /// This encapsulates the logic for matching the target-legal addressing modes.
3736 class AddressingModeMatcher
{
3737 SmallVectorImpl
<Instruction
*> &AddrModeInsts
;
3738 const TargetLowering
&TLI
;
3739 const TargetRegisterInfo
&TRI
;
3740 const DataLayout
&DL
;
3742 const std::function
<const DominatorTree
&()> getDTFn
;
3744 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3745 /// the memory instruction that we're computing this address for.
3748 Instruction
*MemoryInst
;
3750 /// This is the addressing mode that we're building up. This is
3751 /// part of the return value of this addressing mode matching stuff.
3752 ExtAddrMode
&AddrMode
;
3754 /// The instructions inserted by other CodeGenPrepare optimizations.
3755 const SetOfInstrs
&InsertedInsts
;
3757 /// A map from the instructions to their type before promotion.
3758 InstrToOrigTy
&PromotedInsts
;
3760 /// The ongoing transaction where every action should be registered.
3761 TypePromotionTransaction
&TPT
;
3763 // A GEP which has too large offset to be folded into the addressing mode.
3764 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> &LargeOffsetGEP
;
3766 /// This is set to true when we should not do profitability checks.
3767 /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3768 bool IgnoreProfitability
;
3770 /// True if we are optimizing for size.
3771 bool OptSize
= false;
3773 ProfileSummaryInfo
*PSI
;
3774 BlockFrequencyInfo
*BFI
;
3776 AddressingModeMatcher(
3777 SmallVectorImpl
<Instruction
*> &AMI
, const TargetLowering
&TLI
,
3778 const TargetRegisterInfo
&TRI
, const LoopInfo
&LI
,
3779 const std::function
<const DominatorTree
&()> getDTFn
, Type
*AT
,
3780 unsigned AS
, Instruction
*MI
, ExtAddrMode
&AM
,
3781 const SetOfInstrs
&InsertedInsts
, InstrToOrigTy
&PromotedInsts
,
3782 TypePromotionTransaction
&TPT
,
3783 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> &LargeOffsetGEP
,
3784 bool OptSize
, ProfileSummaryInfo
*PSI
, BlockFrequencyInfo
*BFI
)
3785 : AddrModeInsts(AMI
), TLI(TLI
), TRI(TRI
),
3786 DL(MI
->getDataLayout()), LI(LI
), getDTFn(getDTFn
),
3787 AccessTy(AT
), AddrSpace(AS
), MemoryInst(MI
), AddrMode(AM
),
3788 InsertedInsts(InsertedInsts
), PromotedInsts(PromotedInsts
), TPT(TPT
),
3789 LargeOffsetGEP(LargeOffsetGEP
), OptSize(OptSize
), PSI(PSI
), BFI(BFI
) {
3790 IgnoreProfitability
= false;
3794 /// Find the maximal addressing mode that a load/store of V can fold,
3795 /// give an access type of AccessTy. This returns a list of involved
3796 /// instructions in AddrModeInsts.
3797 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3799 /// \p PromotedInsts maps the instructions to their type before promotion.
3800 /// \p The ongoing transaction where every action should be registered.
3802 Match(Value
*V
, Type
*AccessTy
, unsigned AS
, Instruction
*MemoryInst
,
3803 SmallVectorImpl
<Instruction
*> &AddrModeInsts
,
3804 const TargetLowering
&TLI
, const LoopInfo
&LI
,
3805 const std::function
<const DominatorTree
&()> getDTFn
,
3806 const TargetRegisterInfo
&TRI
, const SetOfInstrs
&InsertedInsts
,
3807 InstrToOrigTy
&PromotedInsts
, TypePromotionTransaction
&TPT
,
3808 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> &LargeOffsetGEP
,
3809 bool OptSize
, ProfileSummaryInfo
*PSI
, BlockFrequencyInfo
*BFI
) {
3812 bool Success
= AddressingModeMatcher(AddrModeInsts
, TLI
, TRI
, LI
, getDTFn
,
3813 AccessTy
, AS
, MemoryInst
, Result
,
3814 InsertedInsts
, PromotedInsts
, TPT
,
3815 LargeOffsetGEP
, OptSize
, PSI
, BFI
)
3818 assert(Success
&& "Couldn't select *anything*?");
3823 bool matchScaledValue(Value
*ScaleReg
, int64_t Scale
, unsigned Depth
);
3824 bool matchAddr(Value
*Addr
, unsigned Depth
);
3825 bool matchOperationAddr(User
*AddrInst
, unsigned Opcode
, unsigned Depth
,
3826 bool *MovedAway
= nullptr);
3827 bool isProfitableToFoldIntoAddressingMode(Instruction
*I
,
3828 ExtAddrMode
&AMBefore
,
3829 ExtAddrMode
&AMAfter
);
3830 bool valueAlreadyLiveAtInst(Value
*Val
, Value
*KnownLive1
, Value
*KnownLive2
);
3831 bool isPromotionProfitable(unsigned NewCost
, unsigned OldCost
,
3832 Value
*PromotedOperand
) const;
3837 /// An iterator for PhiNodeSet.
3838 class PhiNodeSetIterator
{
3839 PhiNodeSet
*const Set
;
3840 size_t CurrentIndex
= 0;
3843 /// The constructor. Start should point to either a valid element, or be equal
3844 /// to the size of the underlying SmallVector of the PhiNodeSet.
3845 PhiNodeSetIterator(PhiNodeSet
*const Set
, size_t Start
);
3846 PHINode
*operator*() const;
3847 PhiNodeSetIterator
&operator++();
3848 bool operator==(const PhiNodeSetIterator
&RHS
) const;
3849 bool operator!=(const PhiNodeSetIterator
&RHS
) const;
3852 /// Keeps a set of PHINodes.
3854 /// This is a minimal set implementation for a specific use case:
3855 /// It is very fast when there are very few elements, but also provides good
3856 /// performance when there are many. It is similar to SmallPtrSet, but also
3857 /// provides iteration by insertion order, which is deterministic and stable
3858 /// across runs. It is also similar to SmallSetVector, but provides removing
3859 /// elements in O(1) time. This is achieved by not actually removing the element
3860 /// from the underlying vector, so comes at the cost of using more memory, but
3861 /// that is fine, since PhiNodeSets are used as short lived objects.
3863 friend class PhiNodeSetIterator
;
3865 using MapType
= SmallDenseMap
<PHINode
*, size_t, 32>;
3866 using iterator
= PhiNodeSetIterator
;
3868 /// Keeps the elements in the order of their insertion in the underlying
3869 /// vector. To achieve constant time removal, it never deletes any element.
3870 SmallVector
<PHINode
*, 32> NodeList
;
3872 /// Keeps the elements in the underlying set implementation. This (and not the
3873 /// NodeList defined above) is the source of truth on whether an element
3874 /// is actually in the collection.
3877 /// Points to the first valid (not deleted) element when the set is not empty
3878 /// and the value is not zero. Equals to the size of the underlying vector
3879 /// when the set is empty. When the value is 0, as in the beginning, the
3880 /// first element may or may not be valid.
3881 size_t FirstValidElement
= 0;
3884 /// Inserts a new element to the collection.
3885 /// \returns true if the element is actually added, i.e. was not in the
3886 /// collection before the operation.
3887 bool insert(PHINode
*Ptr
) {
3888 if (NodeMap
.insert(std::make_pair(Ptr
, NodeList
.size())).second
) {
3889 NodeList
.push_back(Ptr
);
3895 /// Removes the element from the collection.
3896 /// \returns whether the element is actually removed, i.e. was in the
3897 /// collection before the operation.
3898 bool erase(PHINode
*Ptr
) {
3899 if (NodeMap
.erase(Ptr
)) {
3900 SkipRemovedElements(FirstValidElement
);
3906 /// Removes all elements and clears the collection.
3910 FirstValidElement
= 0;
3913 /// \returns an iterator that will iterate the elements in the order of
3916 if (FirstValidElement
== 0)
3917 SkipRemovedElements(FirstValidElement
);
3918 return PhiNodeSetIterator(this, FirstValidElement
);
3921 /// \returns an iterator that points to the end of the collection.
3922 iterator
end() { return PhiNodeSetIterator(this, NodeList
.size()); }
3924 /// Returns the number of elements in the collection.
3925 size_t size() const { return NodeMap
.size(); }
3927 /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3928 size_t count(PHINode
*Ptr
) const { return NodeMap
.count(Ptr
); }
3931 /// Updates the CurrentIndex so that it will point to a valid element.
3933 /// If the element of NodeList at CurrentIndex is valid, it does not
3934 /// change it. If there are no more valid elements, it updates CurrentIndex
3935 /// to point to the end of the NodeList.
3936 void SkipRemovedElements(size_t &CurrentIndex
) {
3937 while (CurrentIndex
< NodeList
.size()) {
3938 auto it
= NodeMap
.find(NodeList
[CurrentIndex
]);
3939 // If the element has been deleted and added again later, NodeMap will
3940 // point to a different index, so CurrentIndex will still be invalid.
3941 if (it
!= NodeMap
.end() && it
->second
== CurrentIndex
)
3948 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet
*const Set
, size_t Start
)
3949 : Set(Set
), CurrentIndex(Start
) {}
3951 PHINode
*PhiNodeSetIterator::operator*() const {
3952 assert(CurrentIndex
< Set
->NodeList
.size() &&
3953 "PhiNodeSet access out of range");
3954 return Set
->NodeList
[CurrentIndex
];
3957 PhiNodeSetIterator
&PhiNodeSetIterator::operator++() {
3958 assert(CurrentIndex
< Set
->NodeList
.size() &&
3959 "PhiNodeSet access out of range");
3961 Set
->SkipRemovedElements(CurrentIndex
);
3965 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator
&RHS
) const {
3966 return CurrentIndex
== RHS
.CurrentIndex
;
3969 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator
&RHS
) const {
3970 return !((*this) == RHS
);
3973 /// Keep track of simplification of Phi nodes.
3974 /// Accept the set of all phi nodes and erase phi node from this set
3975 /// if it is simplified.
3976 class SimplificationTracker
{
3977 DenseMap
<Value
*, Value
*> Storage
;
3978 const SimplifyQuery
&SQ
;
3979 // Tracks newly created Phi nodes. The elements are iterated by insertion
3981 PhiNodeSet AllPhiNodes
;
3982 // Tracks newly created Select nodes.
3983 SmallPtrSet
<SelectInst
*, 32> AllSelectNodes
;
3986 SimplificationTracker(const SimplifyQuery
&sq
) : SQ(sq
) {}
3988 Value
*Get(Value
*V
) {
3990 auto SV
= Storage
.find(V
);
3991 if (SV
== Storage
.end())
3997 Value
*Simplify(Value
*Val
) {
3998 SmallVector
<Value
*, 32> WorkList
;
3999 SmallPtrSet
<Value
*, 32> Visited
;
4000 WorkList
.push_back(Val
);
4001 while (!WorkList
.empty()) {
4002 auto *P
= WorkList
.pop_back_val();
4003 if (!Visited
.insert(P
).second
)
4005 if (auto *PI
= dyn_cast
<Instruction
>(P
))
4006 if (Value
*V
= simplifyInstruction(cast
<Instruction
>(PI
), SQ
)) {
4007 for (auto *U
: PI
->users())
4008 WorkList
.push_back(cast
<Value
>(U
));
4010 PI
->replaceAllUsesWith(V
);
4011 if (auto *PHI
= dyn_cast
<PHINode
>(PI
))
4012 AllPhiNodes
.erase(PHI
);
4013 if (auto *Select
= dyn_cast
<SelectInst
>(PI
))
4014 AllSelectNodes
.erase(Select
);
4015 PI
->eraseFromParent();
4021 void Put(Value
*From
, Value
*To
) { Storage
.insert({From
, To
}); }
4023 void ReplacePhi(PHINode
*From
, PHINode
*To
) {
4024 Value
*OldReplacement
= Get(From
);
4025 while (OldReplacement
!= From
) {
4027 To
= dyn_cast
<PHINode
>(OldReplacement
);
4028 OldReplacement
= Get(From
);
4030 assert(To
&& Get(To
) == To
&& "Replacement PHI node is already replaced.");
4032 From
->replaceAllUsesWith(To
);
4033 AllPhiNodes
.erase(From
);
4034 From
->eraseFromParent();
4037 PhiNodeSet
&newPhiNodes() { return AllPhiNodes
; }
4039 void insertNewPhi(PHINode
*PN
) { AllPhiNodes
.insert(PN
); }
4041 void insertNewSelect(SelectInst
*SI
) { AllSelectNodes
.insert(SI
); }
4043 unsigned countNewPhiNodes() const { return AllPhiNodes
.size(); }
4045 unsigned countNewSelectNodes() const { return AllSelectNodes
.size(); }
4047 void destroyNewNodes(Type
*CommonType
) {
4048 // For safe erasing, replace the uses with dummy value first.
4049 auto *Dummy
= PoisonValue::get(CommonType
);
4050 for (auto *I
: AllPhiNodes
) {
4051 I
->replaceAllUsesWith(Dummy
);
4052 I
->eraseFromParent();
4054 AllPhiNodes
.clear();
4055 for (auto *I
: AllSelectNodes
) {
4056 I
->replaceAllUsesWith(Dummy
);
4057 I
->eraseFromParent();
4059 AllSelectNodes
.clear();
4063 /// A helper class for combining addressing modes.
4064 class AddressingModeCombiner
{
4065 typedef DenseMap
<Value
*, Value
*> FoldAddrToValueMapping
;
4066 typedef std::pair
<PHINode
*, PHINode
*> PHIPair
;
4069 /// The addressing modes we've collected.
4070 SmallVector
<ExtAddrMode
, 16> AddrModes
;
4072 /// The field in which the AddrModes differ, when we have more than one.
4073 ExtAddrMode::FieldName DifferentField
= ExtAddrMode::NoField
;
4075 /// Are the AddrModes that we have all just equal to their original values?
4076 bool AllAddrModesTrivial
= true;
4078 /// Common Type for all different fields in addressing modes.
4079 Type
*CommonType
= nullptr;
4081 /// SimplifyQuery for simplifyInstruction utility.
4082 const SimplifyQuery
&SQ
;
4084 /// Original Address.
4087 /// Common value among addresses
4088 Value
*CommonValue
= nullptr;
4091 AddressingModeCombiner(const SimplifyQuery
&_SQ
, Value
*OriginalValue
)
4092 : SQ(_SQ
), Original(OriginalValue
) {}
4094 ~AddressingModeCombiner() { eraseCommonValueIfDead(); }
4096 /// Get the combined AddrMode
4097 const ExtAddrMode
&getAddrMode() const { return AddrModes
[0]; }
4099 /// Add a new AddrMode if it's compatible with the AddrModes we already
4101 /// \return True iff we succeeded in doing so.
4102 bool addNewAddrMode(ExtAddrMode
&NewAddrMode
) {
4103 // Take note of if we have any non-trivial AddrModes, as we need to detect
4104 // when all AddrModes are trivial as then we would introduce a phi or select
4105 // which just duplicates what's already there.
4106 AllAddrModesTrivial
= AllAddrModesTrivial
&& NewAddrMode
.isTrivial();
4108 // If this is the first addrmode then everything is fine.
4109 if (AddrModes
.empty()) {
4110 AddrModes
.emplace_back(NewAddrMode
);
4114 // Figure out how different this is from the other address modes, which we
4115 // can do just by comparing against the first one given that we only care
4116 // about the cumulative difference.
4117 ExtAddrMode::FieldName ThisDifferentField
=
4118 AddrModes
[0].compare(NewAddrMode
);
4119 if (DifferentField
== ExtAddrMode::NoField
)
4120 DifferentField
= ThisDifferentField
;
4121 else if (DifferentField
!= ThisDifferentField
)
4122 DifferentField
= ExtAddrMode::MultipleFields
;
4124 // If NewAddrMode differs in more than one dimension we cannot handle it.
4125 bool CanHandle
= DifferentField
!= ExtAddrMode::MultipleFields
;
4127 // If Scale Field is different then we reject.
4128 CanHandle
= CanHandle
&& DifferentField
!= ExtAddrMode::ScaleField
;
4130 // We also must reject the case when base offset is different and
4131 // scale reg is not null, we cannot handle this case due to merge of
4132 // different offsets will be used as ScaleReg.
4133 CanHandle
= CanHandle
&& (DifferentField
!= ExtAddrMode::BaseOffsField
||
4134 !NewAddrMode
.ScaledReg
);
4136 // We also must reject the case when GV is different and BaseReg installed
4137 // due to we want to use base reg as a merge of GV values.
4138 CanHandle
= CanHandle
&& (DifferentField
!= ExtAddrMode::BaseGVField
||
4139 !NewAddrMode
.HasBaseReg
);
4141 // Even if NewAddMode is the same we still need to collect it due to
4142 // original value is different. And later we will need all original values
4143 // as anchors during finding the common Phi node.
4145 AddrModes
.emplace_back(NewAddrMode
);
4152 /// Combine the addressing modes we've collected into a single
4153 /// addressing mode.
4154 /// \return True iff we successfully combined them or we only had one so
4155 /// didn't need to combine them anyway.
4156 bool combineAddrModes() {
4157 // If we have no AddrModes then they can't be combined.
4158 if (AddrModes
.size() == 0)
4161 // A single AddrMode can trivially be combined.
4162 if (AddrModes
.size() == 1 || DifferentField
== ExtAddrMode::NoField
)
4165 // If the AddrModes we collected are all just equal to the value they are
4166 // derived from then combining them wouldn't do anything useful.
4167 if (AllAddrModesTrivial
)
4170 if (!addrModeCombiningAllowed())
4173 // Build a map between <original value, basic block where we saw it> to
4174 // value of base register.
4175 // Bail out if there is no common type.
4176 FoldAddrToValueMapping Map
;
4177 if (!initializeMap(Map
))
4180 CommonValue
= findCommon(Map
);
4182 AddrModes
[0].SetCombinedField(DifferentField
, CommonValue
, AddrModes
);
4183 return CommonValue
!= nullptr;
4187 /// `CommonValue` may be a placeholder inserted by us.
4188 /// If the placeholder is not used, we should remove this dead instruction.
4189 void eraseCommonValueIfDead() {
4190 if (CommonValue
&& CommonValue
->getNumUses() == 0)
4191 if (Instruction
*CommonInst
= dyn_cast
<Instruction
>(CommonValue
))
4192 CommonInst
->eraseFromParent();
4195 /// Initialize Map with anchor values. For address seen
4196 /// we set the value of different field saw in this address.
4197 /// At the same time we find a common type for different field we will
4198 /// use to create new Phi/Select nodes. Keep it in CommonType field.
4199 /// Return false if there is no common type found.
4200 bool initializeMap(FoldAddrToValueMapping
&Map
) {
4201 // Keep track of keys where the value is null. We will need to replace it
4202 // with constant null when we know the common type.
4203 SmallVector
<Value
*, 2> NullValue
;
4204 Type
*IntPtrTy
= SQ
.DL
.getIntPtrType(AddrModes
[0].OriginalValue
->getType());
4205 for (auto &AM
: AddrModes
) {
4206 Value
*DV
= AM
.GetFieldAsValue(DifferentField
, IntPtrTy
);
4208 auto *Type
= DV
->getType();
4209 if (CommonType
&& CommonType
!= Type
)
4212 Map
[AM
.OriginalValue
] = DV
;
4214 NullValue
.push_back(AM
.OriginalValue
);
4217 assert(CommonType
&& "At least one non-null value must be!");
4218 for (auto *V
: NullValue
)
4219 Map
[V
] = Constant::getNullValue(CommonType
);
4223 /// We have mapping between value A and other value B where B was a field in
4224 /// addressing mode represented by A. Also we have an original value C
4225 /// representing an address we start with. Traversing from C through phi and
4226 /// selects we ended up with A's in a map. This utility function tries to find
4227 /// a value V which is a field in addressing mode C and traversing through phi
4228 /// nodes and selects we will end up in corresponded values B in a map.
4229 /// The utility will create a new Phi/Selects if needed.
4230 // The simple example looks as follows:
4238 // p = phi [p1, BB1], [p2, BB2]
4245 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
4246 Value
*findCommon(FoldAddrToValueMapping
&Map
) {
4247 // Tracks the simplification of newly created phi nodes. The reason we use
4248 // this mapping is because we will add new created Phi nodes in AddrToBase.
4249 // Simplification of Phi nodes is recursive, so some Phi node may
4250 // be simplified after we added it to AddrToBase. In reality this
4251 // simplification is possible only if original phi/selects were not
4253 // Using this mapping we can find the current value in AddrToBase.
4254 SimplificationTracker
ST(SQ
);
4256 // First step, DFS to create PHI nodes for all intermediate blocks.
4257 // Also fill traverse order for the second step.
4258 SmallVector
<Value
*, 32> TraverseOrder
;
4259 InsertPlaceholders(Map
, TraverseOrder
, ST
);
4261 // Second Step, fill new nodes by merged values and simplify if possible.
4262 FillPlaceholders(Map
, TraverseOrder
, ST
);
4264 if (!AddrSinkNewSelects
&& ST
.countNewSelectNodes() > 0) {
4265 ST
.destroyNewNodes(CommonType
);
4269 // Now we'd like to match New Phi nodes to existed ones.
4270 unsigned PhiNotMatchedCount
= 0;
4271 if (!MatchPhiSet(ST
, AddrSinkNewPhis
, PhiNotMatchedCount
)) {
4272 ST
.destroyNewNodes(CommonType
);
4276 auto *Result
= ST
.Get(Map
.find(Original
)->second
);
4278 NumMemoryInstsPhiCreated
+= ST
.countNewPhiNodes() + PhiNotMatchedCount
;
4279 NumMemoryInstsSelectCreated
+= ST
.countNewSelectNodes();
4284 /// Try to match PHI node to Candidate.
4285 /// Matcher tracks the matched Phi nodes.
4286 bool MatchPhiNode(PHINode
*PHI
, PHINode
*Candidate
,
4287 SmallSetVector
<PHIPair
, 8> &Matcher
,
4288 PhiNodeSet
&PhiNodesToMatch
) {
4289 SmallVector
<PHIPair
, 8> WorkList
;
4290 Matcher
.insert({PHI
, Candidate
});
4291 SmallSet
<PHINode
*, 8> MatchedPHIs
;
4292 MatchedPHIs
.insert(PHI
);
4293 WorkList
.push_back({PHI
, Candidate
});
4294 SmallSet
<PHIPair
, 8> Visited
;
4295 while (!WorkList
.empty()) {
4296 auto Item
= WorkList
.pop_back_val();
4297 if (!Visited
.insert(Item
).second
)
4299 // We iterate over all incoming values to Phi to compare them.
4300 // If values are different and both of them Phi and the first one is a
4301 // Phi we added (subject to match) and both of them is in the same basic
4302 // block then we can match our pair if values match. So we state that
4303 // these values match and add it to work list to verify that.
4304 for (auto *B
: Item
.first
->blocks()) {
4305 Value
*FirstValue
= Item
.first
->getIncomingValueForBlock(B
);
4306 Value
*SecondValue
= Item
.second
->getIncomingValueForBlock(B
);
4307 if (FirstValue
== SecondValue
)
4310 PHINode
*FirstPhi
= dyn_cast
<PHINode
>(FirstValue
);
4311 PHINode
*SecondPhi
= dyn_cast
<PHINode
>(SecondValue
);
4313 // One of them is not Phi or
4314 // The first one is not Phi node from the set we'd like to match or
4315 // Phi nodes from different basic blocks then
4316 // we will not be able to match.
4317 if (!FirstPhi
|| !SecondPhi
|| !PhiNodesToMatch
.count(FirstPhi
) ||
4318 FirstPhi
->getParent() != SecondPhi
->getParent())
4321 // If we already matched them then continue.
4322 if (Matcher
.count({FirstPhi
, SecondPhi
}))
4324 // So the values are different and does not match. So we need them to
4325 // match. (But we register no more than one match per PHI node, so that
4326 // we won't later try to replace them twice.)
4327 if (MatchedPHIs
.insert(FirstPhi
).second
)
4328 Matcher
.insert({FirstPhi
, SecondPhi
});
4329 // But me must check it.
4330 WorkList
.push_back({FirstPhi
, SecondPhi
});
4336 /// For the given set of PHI nodes (in the SimplificationTracker) try
4337 /// to find their equivalents.
4338 /// Returns false if this matching fails and creation of new Phi is disabled.
4339 bool MatchPhiSet(SimplificationTracker
&ST
, bool AllowNewPhiNodes
,
4340 unsigned &PhiNotMatchedCount
) {
4341 // Matched and PhiNodesToMatch iterate their elements in a deterministic
4342 // order, so the replacements (ReplacePhi) are also done in a deterministic
4344 SmallSetVector
<PHIPair
, 8> Matched
;
4345 SmallPtrSet
<PHINode
*, 8> WillNotMatch
;
4346 PhiNodeSet
&PhiNodesToMatch
= ST
.newPhiNodes();
4347 while (PhiNodesToMatch
.size()) {
4348 PHINode
*PHI
= *PhiNodesToMatch
.begin();
4350 // Add us, if no Phi nodes in the basic block we do not match.
4351 WillNotMatch
.clear();
4352 WillNotMatch
.insert(PHI
);
4354 // Traverse all Phis until we found equivalent or fail to do that.
4355 bool IsMatched
= false;
4356 for (auto &P
: PHI
->getParent()->phis()) {
4357 // Skip new Phi nodes.
4358 if (PhiNodesToMatch
.count(&P
))
4360 if ((IsMatched
= MatchPhiNode(PHI
, &P
, Matched
, PhiNodesToMatch
)))
4362 // If it does not match, collect all Phi nodes from matcher.
4363 // if we end up with no match, them all these Phi nodes will not match
4365 for (auto M
: Matched
)
4366 WillNotMatch
.insert(M
.first
);
4370 // Replace all matched values and erase them.
4371 for (auto MV
: Matched
)
4372 ST
.ReplacePhi(MV
.first
, MV
.second
);
4376 // If we are not allowed to create new nodes then bail out.
4377 if (!AllowNewPhiNodes
)
4379 // Just remove all seen values in matcher. They will not match anything.
4380 PhiNotMatchedCount
+= WillNotMatch
.size();
4381 for (auto *P
: WillNotMatch
)
4382 PhiNodesToMatch
.erase(P
);
4386 /// Fill the placeholders with values from predecessors and simplify them.
4387 void FillPlaceholders(FoldAddrToValueMapping
&Map
,
4388 SmallVectorImpl
<Value
*> &TraverseOrder
,
4389 SimplificationTracker
&ST
) {
4390 while (!TraverseOrder
.empty()) {
4391 Value
*Current
= TraverseOrder
.pop_back_val();
4392 assert(Map
.contains(Current
) && "No node to fill!!!");
4393 Value
*V
= Map
[Current
];
4395 if (SelectInst
*Select
= dyn_cast
<SelectInst
>(V
)) {
4396 // CurrentValue also must be Select.
4397 auto *CurrentSelect
= cast
<SelectInst
>(Current
);
4398 auto *TrueValue
= CurrentSelect
->getTrueValue();
4399 assert(Map
.contains(TrueValue
) && "No True Value!");
4400 Select
->setTrueValue(ST
.Get(Map
[TrueValue
]));
4401 auto *FalseValue
= CurrentSelect
->getFalseValue();
4402 assert(Map
.contains(FalseValue
) && "No False Value!");
4403 Select
->setFalseValue(ST
.Get(Map
[FalseValue
]));
4405 // Must be a Phi node then.
4406 auto *PHI
= cast
<PHINode
>(V
);
4407 // Fill the Phi node with values from predecessors.
4408 for (auto *B
: predecessors(PHI
->getParent())) {
4409 Value
*PV
= cast
<PHINode
>(Current
)->getIncomingValueForBlock(B
);
4410 assert(Map
.contains(PV
) && "No predecessor Value!");
4411 PHI
->addIncoming(ST
.Get(Map
[PV
]), B
);
4414 Map
[Current
] = ST
.Simplify(V
);
4418 /// Starting from original value recursively iterates over def-use chain up to
4419 /// known ending values represented in a map. For each traversed phi/select
4420 /// inserts a placeholder Phi or Select.
4421 /// Reports all new created Phi/Select nodes by adding them to set.
4422 /// Also reports and order in what values have been traversed.
4423 void InsertPlaceholders(FoldAddrToValueMapping
&Map
,
4424 SmallVectorImpl
<Value
*> &TraverseOrder
,
4425 SimplificationTracker
&ST
) {
4426 SmallVector
<Value
*, 32> Worklist
;
4427 assert((isa
<PHINode
>(Original
) || isa
<SelectInst
>(Original
)) &&
4428 "Address must be a Phi or Select node");
4429 auto *Dummy
= PoisonValue::get(CommonType
);
4430 Worklist
.push_back(Original
);
4431 while (!Worklist
.empty()) {
4432 Value
*Current
= Worklist
.pop_back_val();
4433 // if it is already visited or it is an ending value then skip it.
4434 if (Map
.contains(Current
))
4436 TraverseOrder
.push_back(Current
);
4438 // CurrentValue must be a Phi node or select. All others must be covered
4440 if (SelectInst
*CurrentSelect
= dyn_cast
<SelectInst
>(Current
)) {
4441 // Is it OK to get metadata from OrigSelect?!
4442 // Create a Select placeholder with dummy value.
4443 SelectInst
*Select
=
4444 SelectInst::Create(CurrentSelect
->getCondition(), Dummy
, Dummy
,
4445 CurrentSelect
->getName(),
4446 CurrentSelect
->getIterator(), CurrentSelect
);
4447 Map
[Current
] = Select
;
4448 ST
.insertNewSelect(Select
);
4449 // We are interested in True and False values.
4450 Worklist
.push_back(CurrentSelect
->getTrueValue());
4451 Worklist
.push_back(CurrentSelect
->getFalseValue());
4453 // It must be a Phi node then.
4454 PHINode
*CurrentPhi
= cast
<PHINode
>(Current
);
4455 unsigned PredCount
= CurrentPhi
->getNumIncomingValues();
4457 PHINode::Create(CommonType
, PredCount
, "sunk_phi", CurrentPhi
->getIterator());
4459 ST
.insertNewPhi(PHI
);
4460 append_range(Worklist
, CurrentPhi
->incoming_values());
4465 bool addrModeCombiningAllowed() {
4466 if (DisableComplexAddrModes
)
4468 switch (DifferentField
) {
4471 case ExtAddrMode::BaseRegField
:
4472 return AddrSinkCombineBaseReg
;
4473 case ExtAddrMode::BaseGVField
:
4474 return AddrSinkCombineBaseGV
;
4475 case ExtAddrMode::BaseOffsField
:
4476 return AddrSinkCombineBaseOffs
;
4477 case ExtAddrMode::ScaledRegField
:
4478 return AddrSinkCombineScaledReg
;
4482 } // end anonymous namespace
4484 /// Try adding ScaleReg*Scale to the current addressing mode.
4485 /// Return true and update AddrMode if this addr mode is legal for the target,
4487 bool AddressingModeMatcher::matchScaledValue(Value
*ScaleReg
, int64_t Scale
,
4489 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
4490 // mode. Just process that directly.
4492 return matchAddr(ScaleReg
, Depth
);
4494 // If the scale is 0, it takes nothing to add this.
4498 // If we already have a scale of this value, we can add to it, otherwise, we
4499 // need an available scale field.
4500 if (AddrMode
.Scale
!= 0 && AddrMode
.ScaledReg
!= ScaleReg
)
4503 ExtAddrMode TestAddrMode
= AddrMode
;
4505 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
4506 // [A+B + A*7] -> [B+A*8].
4507 TestAddrMode
.Scale
+= Scale
;
4508 TestAddrMode
.ScaledReg
= ScaleReg
;
4510 // If the new address isn't legal, bail out.
4511 if (!TLI
.isLegalAddressingMode(DL
, TestAddrMode
, AccessTy
, AddrSpace
))
4514 // It was legal, so commit it.
4515 AddrMode
= TestAddrMode
;
4517 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
4518 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
4519 // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
4520 // go any further: we can reuse it and cannot eliminate it.
4521 ConstantInt
*CI
= nullptr;
4522 Value
*AddLHS
= nullptr;
4523 if (isa
<Instruction
>(ScaleReg
) && // not a constant expr.
4524 match(ScaleReg
, m_Add(m_Value(AddLHS
), m_ConstantInt(CI
))) &&
4525 !isIVIncrement(ScaleReg
, &LI
) && CI
->getValue().isSignedIntN(64)) {
4526 TestAddrMode
.InBounds
= false;
4527 TestAddrMode
.ScaledReg
= AddLHS
;
4528 TestAddrMode
.BaseOffs
+= CI
->getSExtValue() * TestAddrMode
.Scale
;
4530 // If this addressing mode is legal, commit it and remember that we folded
4531 // this instruction.
4532 if (TLI
.isLegalAddressingMode(DL
, TestAddrMode
, AccessTy
, AddrSpace
)) {
4533 AddrModeInsts
.push_back(cast
<Instruction
>(ScaleReg
));
4534 AddrMode
= TestAddrMode
;
4537 // Restore status quo.
4538 TestAddrMode
= AddrMode
;
4541 // If this is an add recurrence with a constant step, return the increment
4542 // instruction and the canonicalized step.
4543 auto GetConstantStep
=
4544 [this](const Value
*V
) -> std::optional
<std::pair
<Instruction
*, APInt
>> {
4545 auto *PN
= dyn_cast
<PHINode
>(V
);
4547 return std::nullopt
;
4548 auto IVInc
= getIVIncrement(PN
, &LI
);
4550 return std::nullopt
;
4551 // TODO: The result of the intrinsics above is two-complement. However when
4552 // IV inc is expressed as add or sub, iv.next is potentially a poison value.
4553 // If it has nuw or nsw flags, we need to make sure that these flags are
4554 // inferrable at the point of memory instruction. Otherwise we are replacing
4555 // well-defined two-complement computation with poison. Currently, to avoid
4556 // potentially complex analysis needed to prove this, we reject such cases.
4557 if (auto *OIVInc
= dyn_cast
<OverflowingBinaryOperator
>(IVInc
->first
))
4558 if (OIVInc
->hasNoSignedWrap() || OIVInc
->hasNoUnsignedWrap())
4559 return std::nullopt
;
4560 if (auto *ConstantStep
= dyn_cast
<ConstantInt
>(IVInc
->second
))
4561 return std::make_pair(IVInc
->first
, ConstantStep
->getValue());
4562 return std::nullopt
;
4565 // Try to account for the following special case:
4566 // 1. ScaleReg is an inductive variable;
4567 // 2. We use it with non-zero offset;
4568 // 3. IV's increment is available at the point of memory instruction.
4570 // In this case, we may reuse the IV increment instead of the IV Phi to
4571 // achieve the following advantages:
4572 // 1. If IV step matches the offset, we will have no need in the offset;
4573 // 2. Even if they don't match, we will reduce the overlap of living IV
4574 // and IV increment, that will potentially lead to better register
4576 if (AddrMode
.BaseOffs
) {
4577 if (auto IVStep
= GetConstantStep(ScaleReg
)) {
4578 Instruction
*IVInc
= IVStep
->first
;
4579 // The following assert is important to ensure a lack of infinite loops.
4580 // This transforms is (intentionally) the inverse of the one just above.
4581 // If they don't agree on the definition of an increment, we'd alternate
4582 // back and forth indefinitely.
4583 assert(isIVIncrement(IVInc
, &LI
) && "implied by GetConstantStep");
4584 APInt Step
= IVStep
->second
;
4585 APInt Offset
= Step
* AddrMode
.Scale
;
4586 if (Offset
.isSignedIntN(64)) {
4587 TestAddrMode
.InBounds
= false;
4588 TestAddrMode
.ScaledReg
= IVInc
;
4589 TestAddrMode
.BaseOffs
-= Offset
.getLimitedValue();
4590 // If this addressing mode is legal, commit it..
4591 // (Note that we defer the (expensive) domtree base legality check
4592 // to the very last possible point.)
4593 if (TLI
.isLegalAddressingMode(DL
, TestAddrMode
, AccessTy
, AddrSpace
) &&
4594 getDTFn().dominates(IVInc
, MemoryInst
)) {
4595 AddrModeInsts
.push_back(cast
<Instruction
>(IVInc
));
4596 AddrMode
= TestAddrMode
;
4599 // Restore status quo.
4600 TestAddrMode
= AddrMode
;
4605 // Otherwise, just return what we have.
4609 /// This is a little filter, which returns true if an addressing computation
4610 /// involving I might be folded into a load/store accessing it.
4611 /// This doesn't need to be perfect, but needs to accept at least
4612 /// the set of instructions that MatchOperationAddr can.
4613 static bool MightBeFoldableInst(Instruction
*I
) {
4614 switch (I
->getOpcode()) {
4615 case Instruction::BitCast
:
4616 case Instruction::AddrSpaceCast
:
4617 // Don't touch identity bitcasts.
4618 if (I
->getType() == I
->getOperand(0)->getType())
4620 return I
->getType()->isIntOrPtrTy();
4621 case Instruction::PtrToInt
:
4622 // PtrToInt is always a noop, as we know that the int type is pointer sized.
4624 case Instruction::IntToPtr
:
4625 // We know the input is intptr_t, so this is foldable.
4627 case Instruction::Add
:
4629 case Instruction::Mul
:
4630 case Instruction::Shl
:
4631 // Can only handle X*C and X << C.
4632 return isa
<ConstantInt
>(I
->getOperand(1));
4633 case Instruction::GetElementPtr
:
4640 /// Check whether or not \p Val is a legal instruction for \p TLI.
4641 /// \note \p Val is assumed to be the product of some type promotion.
4642 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4643 /// to be legal, as the non-promoted value would have had the same state.
4644 static bool isPromotedInstructionLegal(const TargetLowering
&TLI
,
4645 const DataLayout
&DL
, Value
*Val
) {
4646 Instruction
*PromotedInst
= dyn_cast
<Instruction
>(Val
);
4649 int ISDOpcode
= TLI
.InstructionOpcodeToISD(PromotedInst
->getOpcode());
4650 // If the ISDOpcode is undefined, it was undefined before the promotion.
4653 // Otherwise, check if the promoted instruction is legal or not.
4654 return TLI
.isOperationLegalOrCustom(
4655 ISDOpcode
, TLI
.getValueType(DL
, PromotedInst
->getType()));
4660 /// Hepler class to perform type promotion.
4661 class TypePromotionHelper
{
4662 /// Utility function to add a promoted instruction \p ExtOpnd to
4663 /// \p PromotedInsts and record the type of extension we have seen.
4664 static void addPromotedInst(InstrToOrigTy
&PromotedInsts
,
4665 Instruction
*ExtOpnd
, bool IsSExt
) {
4666 ExtType ExtTy
= IsSExt
? SignExtension
: ZeroExtension
;
4667 InstrToOrigTy::iterator It
= PromotedInsts
.find(ExtOpnd
);
4668 if (It
!= PromotedInsts
.end()) {
4669 // If the new extension is same as original, the information in
4670 // PromotedInsts[ExtOpnd] is still correct.
4671 if (It
->second
.getInt() == ExtTy
)
4674 // Now the new extension is different from old extension, we make
4675 // the type information invalid by setting extension type to
4677 ExtTy
= BothExtension
;
4679 PromotedInsts
[ExtOpnd
] = TypeIsSExt(ExtOpnd
->getType(), ExtTy
);
4682 /// Utility function to query the original type of instruction \p Opnd
4683 /// with a matched extension type. If the extension doesn't match, we
4684 /// cannot use the information we had on the original type.
4685 /// BothExtension doesn't match any extension type.
4686 static const Type
*getOrigType(const InstrToOrigTy
&PromotedInsts
,
4687 Instruction
*Opnd
, bool IsSExt
) {
4688 ExtType ExtTy
= IsSExt
? SignExtension
: ZeroExtension
;
4689 InstrToOrigTy::const_iterator It
= PromotedInsts
.find(Opnd
);
4690 if (It
!= PromotedInsts
.end() && It
->second
.getInt() == ExtTy
)
4691 return It
->second
.getPointer();
4695 /// Utility function to check whether or not a sign or zero extension
4696 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4697 /// either using the operands of \p Inst or promoting \p Inst.
4698 /// The type of the extension is defined by \p IsSExt.
4699 /// In other words, check if:
4700 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4701 /// #1 Promotion applies:
4702 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4703 /// #2 Operand reuses:
4704 /// ext opnd1 to ConsideredExtType.
4705 /// \p PromotedInsts maps the instructions to their type before promotion.
4706 static bool canGetThrough(const Instruction
*Inst
, Type
*ConsideredExtType
,
4707 const InstrToOrigTy
&PromotedInsts
, bool IsSExt
);
4709 /// Utility function to determine if \p OpIdx should be promoted when
4710 /// promoting \p Inst.
4711 static bool shouldExtOperand(const Instruction
*Inst
, int OpIdx
) {
4712 return !(isa
<SelectInst
>(Inst
) && OpIdx
== 0);
4715 /// Utility function to promote the operand of \p Ext when this
4716 /// operand is a promotable trunc or sext or zext.
4717 /// \p PromotedInsts maps the instructions to their type before promotion.
4718 /// \p CreatedInstsCost[out] contains the cost of all instructions
4719 /// created to promote the operand of Ext.
4720 /// Newly added extensions are inserted in \p Exts.
4721 /// Newly added truncates are inserted in \p Truncs.
4722 /// Should never be called directly.
4723 /// \return The promoted value which is used instead of Ext.
4724 static Value
*promoteOperandForTruncAndAnyExt(
4725 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4726 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4727 SmallVectorImpl
<Instruction
*> *Exts
,
4728 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
);
4730 /// Utility function to promote the operand of \p Ext when this
4731 /// operand is promotable and is not a supported trunc or sext.
4732 /// \p PromotedInsts maps the instructions to their type before promotion.
4733 /// \p CreatedInstsCost[out] contains the cost of all the instructions
4734 /// created to promote the operand of Ext.
4735 /// Newly added extensions are inserted in \p Exts.
4736 /// Newly added truncates are inserted in \p Truncs.
4737 /// Should never be called directly.
4738 /// \return The promoted value which is used instead of Ext.
4739 static Value
*promoteOperandForOther(Instruction
*Ext
,
4740 TypePromotionTransaction
&TPT
,
4741 InstrToOrigTy
&PromotedInsts
,
4742 unsigned &CreatedInstsCost
,
4743 SmallVectorImpl
<Instruction
*> *Exts
,
4744 SmallVectorImpl
<Instruction
*> *Truncs
,
4745 const TargetLowering
&TLI
, bool IsSExt
);
4747 /// \see promoteOperandForOther.
4748 static Value
*signExtendOperandForOther(
4749 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4750 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4751 SmallVectorImpl
<Instruction
*> *Exts
,
4752 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
) {
4753 return promoteOperandForOther(Ext
, TPT
, PromotedInsts
, CreatedInstsCost
,
4754 Exts
, Truncs
, TLI
, true);
4757 /// \see promoteOperandForOther.
4758 static Value
*zeroExtendOperandForOther(
4759 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4760 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4761 SmallVectorImpl
<Instruction
*> *Exts
,
4762 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
) {
4763 return promoteOperandForOther(Ext
, TPT
, PromotedInsts
, CreatedInstsCost
,
4764 Exts
, Truncs
, TLI
, false);
4768 /// Type for the utility function that promotes the operand of Ext.
4769 using Action
= Value
*(*)(Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4770 InstrToOrigTy
&PromotedInsts
,
4771 unsigned &CreatedInstsCost
,
4772 SmallVectorImpl
<Instruction
*> *Exts
,
4773 SmallVectorImpl
<Instruction
*> *Truncs
,
4774 const TargetLowering
&TLI
);
4776 /// Given a sign/zero extend instruction \p Ext, return the appropriate
4777 /// action to promote the operand of \p Ext instead of using Ext.
4778 /// \return NULL if no promotable action is possible with the current
4780 /// \p InsertedInsts keeps track of all the instructions inserted by the
4781 /// other CodeGenPrepare optimizations. This information is important
4782 /// because we do not want to promote these instructions as CodeGenPrepare
4783 /// will reinsert them later. Thus creating an infinite loop: create/remove.
4784 /// \p PromotedInsts maps the instructions to their type before promotion.
4785 static Action
getAction(Instruction
*Ext
, const SetOfInstrs
&InsertedInsts
,
4786 const TargetLowering
&TLI
,
4787 const InstrToOrigTy
&PromotedInsts
);
4790 } // end anonymous namespace
4792 bool TypePromotionHelper::canGetThrough(const Instruction
*Inst
,
4793 Type
*ConsideredExtType
,
4794 const InstrToOrigTy
&PromotedInsts
,
4796 // The promotion helper does not know how to deal with vector types yet.
4797 // To be able to fix that, we would need to fix the places where we
4798 // statically extend, e.g., constants and such.
4799 if (Inst
->getType()->isVectorTy())
4802 // We can always get through zext.
4803 if (isa
<ZExtInst
>(Inst
))
4806 // sext(sext) is ok too.
4807 if (IsSExt
&& isa
<SExtInst
>(Inst
))
4810 // We can get through binary operator, if it is legal. In other words, the
4811 // binary operator must have a nuw or nsw flag.
4812 if (const auto *BinOp
= dyn_cast
<BinaryOperator
>(Inst
))
4813 if (isa
<OverflowingBinaryOperator
>(BinOp
) &&
4814 ((!IsSExt
&& BinOp
->hasNoUnsignedWrap()) ||
4815 (IsSExt
&& BinOp
->hasNoSignedWrap())))
4818 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4819 if ((Inst
->getOpcode() == Instruction::And
||
4820 Inst
->getOpcode() == Instruction::Or
))
4823 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4824 if (Inst
->getOpcode() == Instruction::Xor
) {
4825 // Make sure it is not a NOT.
4826 if (const auto *Cst
= dyn_cast
<ConstantInt
>(Inst
->getOperand(1)))
4827 if (!Cst
->getValue().isAllOnes())
4831 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4832 // It may change a poisoned value into a regular value, like
4833 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12
4834 // poisoned value regular value
4835 // It should be OK since undef covers valid value.
4836 if (Inst
->getOpcode() == Instruction::LShr
&& !IsSExt
)
4839 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4840 // It may change a poisoned value into a regular value, like
4841 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12
4842 // poisoned value regular value
4843 // It should be OK since undef covers valid value.
4844 if (Inst
->getOpcode() == Instruction::Shl
&& Inst
->hasOneUse()) {
4845 const auto *ExtInst
= cast
<const Instruction
>(*Inst
->user_begin());
4846 if (ExtInst
->hasOneUse()) {
4847 const auto *AndInst
= dyn_cast
<const Instruction
>(*ExtInst
->user_begin());
4848 if (AndInst
&& AndInst
->getOpcode() == Instruction::And
) {
4849 const auto *Cst
= dyn_cast
<ConstantInt
>(AndInst
->getOperand(1));
4851 Cst
->getValue().isIntN(Inst
->getType()->getIntegerBitWidth()))
4857 // Check if we can do the following simplification.
4858 // ext(trunc(opnd)) --> ext(opnd)
4859 if (!isa
<TruncInst
>(Inst
))
4862 Value
*OpndVal
= Inst
->getOperand(0);
4863 // Check if we can use this operand in the extension.
4864 // If the type is larger than the result type of the extension, we cannot.
4865 if (!OpndVal
->getType()->isIntegerTy() ||
4866 OpndVal
->getType()->getIntegerBitWidth() >
4867 ConsideredExtType
->getIntegerBitWidth())
4870 // If the operand of the truncate is not an instruction, we will not have
4871 // any information on the dropped bits.
4872 // (Actually we could for constant but it is not worth the extra logic).
4873 Instruction
*Opnd
= dyn_cast
<Instruction
>(OpndVal
);
4877 // Check if the source of the type is narrow enough.
4878 // I.e., check that trunc just drops extended bits of the same kind of
4880 // #1 get the type of the operand and check the kind of the extended bits.
4881 const Type
*OpndType
= getOrigType(PromotedInsts
, Opnd
, IsSExt
);
4884 else if ((IsSExt
&& isa
<SExtInst
>(Opnd
)) || (!IsSExt
&& isa
<ZExtInst
>(Opnd
)))
4885 OpndType
= Opnd
->getOperand(0)->getType();
4889 // #2 check that the truncate just drops extended bits.
4890 return Inst
->getType()->getIntegerBitWidth() >=
4891 OpndType
->getIntegerBitWidth();
4894 TypePromotionHelper::Action
TypePromotionHelper::getAction(
4895 Instruction
*Ext
, const SetOfInstrs
&InsertedInsts
,
4896 const TargetLowering
&TLI
, const InstrToOrigTy
&PromotedInsts
) {
4897 assert((isa
<SExtInst
>(Ext
) || isa
<ZExtInst
>(Ext
)) &&
4898 "Unexpected instruction type");
4899 Instruction
*ExtOpnd
= dyn_cast
<Instruction
>(Ext
->getOperand(0));
4900 Type
*ExtTy
= Ext
->getType();
4901 bool IsSExt
= isa
<SExtInst
>(Ext
);
4902 // If the operand of the extension is not an instruction, we cannot
4904 // If it, check we can get through.
4905 if (!ExtOpnd
|| !canGetThrough(ExtOpnd
, ExtTy
, PromotedInsts
, IsSExt
))
4908 // Do not promote if the operand has been added by codegenprepare.
4909 // Otherwise, it means we are undoing an optimization that is likely to be
4910 // redone, thus causing potential infinite loop.
4911 if (isa
<TruncInst
>(ExtOpnd
) && InsertedInsts
.count(ExtOpnd
))
4914 // SExt or Trunc instructions.
4915 // Return the related handler.
4916 if (isa
<SExtInst
>(ExtOpnd
) || isa
<TruncInst
>(ExtOpnd
) ||
4917 isa
<ZExtInst
>(ExtOpnd
))
4918 return promoteOperandForTruncAndAnyExt
;
4920 // Regular instruction.
4921 // Abort early if we will have to insert non-free instructions.
4922 if (!ExtOpnd
->hasOneUse() && !TLI
.isTruncateFree(ExtTy
, ExtOpnd
->getType()))
4924 return IsSExt
? signExtendOperandForOther
: zeroExtendOperandForOther
;
4927 Value
*TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4928 Instruction
*SExt
, TypePromotionTransaction
&TPT
,
4929 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4930 SmallVectorImpl
<Instruction
*> *Exts
,
4931 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
) {
4932 // By construction, the operand of SExt is an instruction. Otherwise we cannot
4933 // get through it and this method should not be called.
4934 Instruction
*SExtOpnd
= cast
<Instruction
>(SExt
->getOperand(0));
4935 Value
*ExtVal
= SExt
;
4936 bool HasMergedNonFreeExt
= false;
4937 if (isa
<ZExtInst
>(SExtOpnd
)) {
4938 // Replace s|zext(zext(opnd))
4940 HasMergedNonFreeExt
= !TLI
.isExtFree(SExtOpnd
);
4942 TPT
.createZExt(SExt
, SExtOpnd
->getOperand(0), SExt
->getType());
4943 TPT
.replaceAllUsesWith(SExt
, ZExt
);
4944 TPT
.eraseInstruction(SExt
);
4947 // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4949 TPT
.setOperand(SExt
, 0, SExtOpnd
->getOperand(0));
4951 CreatedInstsCost
= 0;
4953 // Remove dead code.
4954 if (SExtOpnd
->use_empty())
4955 TPT
.eraseInstruction(SExtOpnd
);
4957 // Check if the extension is still needed.
4958 Instruction
*ExtInst
= dyn_cast
<Instruction
>(ExtVal
);
4959 if (!ExtInst
|| ExtInst
->getType() != ExtInst
->getOperand(0)->getType()) {
4962 Exts
->push_back(ExtInst
);
4963 CreatedInstsCost
= !TLI
.isExtFree(ExtInst
) && !HasMergedNonFreeExt
;
4968 // At this point we have: ext ty opnd to ty.
4969 // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4970 Value
*NextVal
= ExtInst
->getOperand(0);
4971 TPT
.eraseInstruction(ExtInst
, NextVal
);
4975 Value
*TypePromotionHelper::promoteOperandForOther(
4976 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4977 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4978 SmallVectorImpl
<Instruction
*> *Exts
,
4979 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
,
4981 // By construction, the operand of Ext is an instruction. Otherwise we cannot
4982 // get through it and this method should not be called.
4983 Instruction
*ExtOpnd
= cast
<Instruction
>(Ext
->getOperand(0));
4984 CreatedInstsCost
= 0;
4985 if (!ExtOpnd
->hasOneUse()) {
4986 // ExtOpnd will be promoted.
4987 // All its uses, but Ext, will need to use a truncated value of the
4988 // promoted version.
4989 // Create the truncate now.
4990 Value
*Trunc
= TPT
.createTrunc(Ext
, ExtOpnd
->getType());
4991 if (Instruction
*ITrunc
= dyn_cast
<Instruction
>(Trunc
)) {
4992 // Insert it just after the definition.
4993 ITrunc
->moveAfter(ExtOpnd
);
4995 Truncs
->push_back(ITrunc
);
4998 TPT
.replaceAllUsesWith(ExtOpnd
, Trunc
);
4999 // Restore the operand of Ext (which has been replaced by the previous call
5000 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
5001 TPT
.setOperand(Ext
, 0, ExtOpnd
);
5004 // Get through the Instruction:
5005 // 1. Update its type.
5006 // 2. Replace the uses of Ext by Inst.
5007 // 3. Extend each operand that needs to be extended.
5009 // Remember the original type of the instruction before promotion.
5010 // This is useful to know that the high bits are sign extended bits.
5011 addPromotedInst(PromotedInsts
, ExtOpnd
, IsSExt
);
5013 TPT
.mutateType(ExtOpnd
, Ext
->getType());
5015 TPT
.replaceAllUsesWith(Ext
, ExtOpnd
);
5017 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
5018 for (int OpIdx
= 0, EndOpIdx
= ExtOpnd
->getNumOperands(); OpIdx
!= EndOpIdx
;
5020 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd
->getOperand(OpIdx
)) << '\n');
5021 if (ExtOpnd
->getOperand(OpIdx
)->getType() == Ext
->getType() ||
5022 !shouldExtOperand(ExtOpnd
, OpIdx
)) {
5023 LLVM_DEBUG(dbgs() << "No need to propagate\n");
5026 // Check if we can statically extend the operand.
5027 Value
*Opnd
= ExtOpnd
->getOperand(OpIdx
);
5028 if (const ConstantInt
*Cst
= dyn_cast
<ConstantInt
>(Opnd
)) {
5029 LLVM_DEBUG(dbgs() << "Statically extend\n");
5030 unsigned BitWidth
= Ext
->getType()->getIntegerBitWidth();
5031 APInt CstVal
= IsSExt
? Cst
->getValue().sext(BitWidth
)
5032 : Cst
->getValue().zext(BitWidth
);
5033 TPT
.setOperand(ExtOpnd
, OpIdx
, ConstantInt::get(Ext
->getType(), CstVal
));
5036 // UndefValue are typed, so we have to statically sign extend them.
5037 if (isa
<UndefValue
>(Opnd
)) {
5038 LLVM_DEBUG(dbgs() << "Statically extend\n");
5039 TPT
.setOperand(ExtOpnd
, OpIdx
, UndefValue::get(Ext
->getType()));
5043 // Otherwise we have to explicitly sign extend the operand.
5044 Value
*ValForExtOpnd
= IsSExt
5045 ? TPT
.createSExt(ExtOpnd
, Opnd
, Ext
->getType())
5046 : TPT
.createZExt(ExtOpnd
, Opnd
, Ext
->getType());
5047 TPT
.setOperand(ExtOpnd
, OpIdx
, ValForExtOpnd
);
5048 Instruction
*InstForExtOpnd
= dyn_cast
<Instruction
>(ValForExtOpnd
);
5049 if (!InstForExtOpnd
)
5053 Exts
->push_back(InstForExtOpnd
);
5055 CreatedInstsCost
+= !TLI
.isExtFree(InstForExtOpnd
);
5057 LLVM_DEBUG(dbgs() << "Extension is useless now\n");
5058 TPT
.eraseInstruction(Ext
);
5062 /// Check whether or not promoting an instruction to a wider type is profitable.
5063 /// \p NewCost gives the cost of extension instructions created by the
5065 /// \p OldCost gives the cost of extension instructions before the promotion
5066 /// plus the number of instructions that have been
5067 /// matched in the addressing mode the promotion.
5068 /// \p PromotedOperand is the value that has been promoted.
5069 /// \return True if the promotion is profitable, false otherwise.
5070 bool AddressingModeMatcher::isPromotionProfitable(
5071 unsigned NewCost
, unsigned OldCost
, Value
*PromotedOperand
) const {
5072 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost
<< "\tNewCost: " << NewCost
5074 // The cost of the new extensions is greater than the cost of the
5075 // old extension plus what we folded.
5076 // This is not profitable.
5077 if (NewCost
> OldCost
)
5079 if (NewCost
< OldCost
)
5081 // The promotion is neutral but it may help folding the sign extension in
5082 // loads for instance.
5083 // Check that we did not create an illegal instruction.
5084 return isPromotedInstructionLegal(TLI
, DL
, PromotedOperand
);
5087 /// Given an instruction or constant expr, see if we can fold the operation
5088 /// into the addressing mode. If so, update the addressing mode and return
5089 /// true, otherwise return false without modifying AddrMode.
5090 /// If \p MovedAway is not NULL, it contains the information of whether or
5091 /// not AddrInst has to be folded into the addressing mode on success.
5092 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
5093 /// because it has been moved away.
5094 /// Thus AddrInst must not be added in the matched instructions.
5095 /// This state can happen when AddrInst is a sext, since it may be moved away.
5096 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
5097 /// not be referenced anymore.
5098 bool AddressingModeMatcher::matchOperationAddr(User
*AddrInst
, unsigned Opcode
,
5101 // Avoid exponential behavior on extremely deep expression trees.
5105 // By default, all matched instructions stay in place.
5110 case Instruction::PtrToInt
:
5111 // PtrToInt is always a noop, as we know that the int type is pointer sized.
5112 return matchAddr(AddrInst
->getOperand(0), Depth
);
5113 case Instruction::IntToPtr
: {
5114 auto AS
= AddrInst
->getType()->getPointerAddressSpace();
5115 auto PtrTy
= MVT::getIntegerVT(DL
.getPointerSizeInBits(AS
));
5116 // This inttoptr is a no-op if the integer type is pointer sized.
5117 if (TLI
.getValueType(DL
, AddrInst
->getOperand(0)->getType()) == PtrTy
)
5118 return matchAddr(AddrInst
->getOperand(0), Depth
);
5121 case Instruction::BitCast
:
5122 // BitCast is always a noop, and we can handle it as long as it is
5123 // int->int or pointer->pointer (we don't want int<->fp or something).
5124 if (AddrInst
->getOperand(0)->getType()->isIntOrPtrTy() &&
5125 // Don't touch identity bitcasts. These were probably put here by LSR,
5126 // and we don't want to mess around with them. Assume it knows what it
5128 AddrInst
->getOperand(0)->getType() != AddrInst
->getType())
5129 return matchAddr(AddrInst
->getOperand(0), Depth
);
5131 case Instruction::AddrSpaceCast
: {
5133 AddrInst
->getOperand(0)->getType()->getPointerAddressSpace();
5134 unsigned DestAS
= AddrInst
->getType()->getPointerAddressSpace();
5135 if (TLI
.getTargetMachine().isNoopAddrSpaceCast(SrcAS
, DestAS
))
5136 return matchAddr(AddrInst
->getOperand(0), Depth
);
5139 case Instruction::Add
: {
5140 // Check to see if we can merge in one operand, then the other. If so, we
5142 ExtAddrMode BackupAddrMode
= AddrMode
;
5143 unsigned OldSize
= AddrModeInsts
.size();
5144 // Start a transaction at this point.
5145 // The LHS may match but not the RHS.
5146 // Therefore, we need a higher level restoration point to undo partially
5147 // matched operation.
5148 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5149 TPT
.getRestorationPoint();
5151 // Try to match an integer constant second to increase its chance of ending
5152 // up in `BaseOffs`, resp. decrease its chance of ending up in `BaseReg`.
5153 int First
= 0, Second
= 1;
5154 if (isa
<ConstantInt
>(AddrInst
->getOperand(First
))
5155 && !isa
<ConstantInt
>(AddrInst
->getOperand(Second
)))
5156 std::swap(First
, Second
);
5157 AddrMode
.InBounds
= false;
5158 if (matchAddr(AddrInst
->getOperand(First
), Depth
+ 1) &&
5159 matchAddr(AddrInst
->getOperand(Second
), Depth
+ 1))
5162 // Restore the old addr mode info.
5163 AddrMode
= BackupAddrMode
;
5164 AddrModeInsts
.resize(OldSize
);
5165 TPT
.rollback(LastKnownGood
);
5167 // Otherwise this was over-aggressive. Try merging operands in the opposite
5169 if (matchAddr(AddrInst
->getOperand(Second
), Depth
+ 1) &&
5170 matchAddr(AddrInst
->getOperand(First
), Depth
+ 1))
5173 // Otherwise we definitely can't merge the ADD in.
5174 AddrMode
= BackupAddrMode
;
5175 AddrModeInsts
.resize(OldSize
);
5176 TPT
.rollback(LastKnownGood
);
5179 // case Instruction::Or:
5180 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
5182 case Instruction::Mul
:
5183 case Instruction::Shl
: {
5184 // Can only handle X*C and X << C.
5185 AddrMode
.InBounds
= false;
5186 ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(AddrInst
->getOperand(1));
5187 if (!RHS
|| RHS
->getBitWidth() > 64)
5189 int64_t Scale
= Opcode
== Instruction::Shl
5190 ? 1LL << RHS
->getLimitedValue(RHS
->getBitWidth() - 1)
5191 : RHS
->getSExtValue();
5193 return matchScaledValue(AddrInst
->getOperand(0), Scale
, Depth
);
5195 case Instruction::GetElementPtr
: {
5196 // Scan the GEP. We check it if it contains constant offsets and at most
5197 // one variable offset.
5198 int VariableOperand
= -1;
5199 unsigned VariableScale
= 0;
5201 int64_t ConstantOffset
= 0;
5202 gep_type_iterator GTI
= gep_type_begin(AddrInst
);
5203 for (unsigned i
= 1, e
= AddrInst
->getNumOperands(); i
!= e
; ++i
, ++GTI
) {
5204 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
5205 const StructLayout
*SL
= DL
.getStructLayout(STy
);
5207 cast
<ConstantInt
>(AddrInst
->getOperand(i
))->getZExtValue();
5208 ConstantOffset
+= SL
->getElementOffset(Idx
);
5210 TypeSize TS
= GTI
.getSequentialElementStride(DL
);
5211 if (TS
.isNonZero()) {
5212 // The optimisations below currently only work for fixed offsets.
5213 if (TS
.isScalable())
5215 int64_t TypeSize
= TS
.getFixedValue();
5216 if (ConstantInt
*CI
=
5217 dyn_cast
<ConstantInt
>(AddrInst
->getOperand(i
))) {
5218 const APInt
&CVal
= CI
->getValue();
5219 if (CVal
.getSignificantBits() <= 64) {
5220 ConstantOffset
+= CVal
.getSExtValue() * TypeSize
;
5224 // We only allow one variable index at the moment.
5225 if (VariableOperand
!= -1)
5228 // Remember the variable index.
5229 VariableOperand
= i
;
5230 VariableScale
= TypeSize
;
5235 // A common case is for the GEP to only do a constant offset. In this case,
5236 // just add it to the disp field and check validity.
5237 if (VariableOperand
== -1) {
5238 AddrMode
.BaseOffs
+= ConstantOffset
;
5239 if (matchAddr(AddrInst
->getOperand(0), Depth
+ 1)) {
5240 if (!cast
<GEPOperator
>(AddrInst
)->isInBounds())
5241 AddrMode
.InBounds
= false;
5244 AddrMode
.BaseOffs
-= ConstantOffset
;
5246 if (EnableGEPOffsetSplit
&& isa
<GetElementPtrInst
>(AddrInst
) &&
5247 TLI
.shouldConsiderGEPOffsetSplit() && Depth
== 0 &&
5248 ConstantOffset
> 0) {
5249 // Record GEPs with non-zero offsets as candidates for splitting in
5250 // the event that the offset cannot fit into the r+i addressing mode.
5251 // Simple and common case that only one GEP is used in calculating the
5252 // address for the memory access.
5253 Value
*Base
= AddrInst
->getOperand(0);
5254 auto *BaseI
= dyn_cast
<Instruction
>(Base
);
5255 auto *GEP
= cast
<GetElementPtrInst
>(AddrInst
);
5256 if (isa
<Argument
>(Base
) || isa
<GlobalValue
>(Base
) ||
5257 (BaseI
&& !isa
<CastInst
>(BaseI
) &&
5258 !isa
<GetElementPtrInst
>(BaseI
))) {
5259 // Make sure the parent block allows inserting non-PHI instructions
5260 // before the terminator.
5261 BasicBlock
*Parent
= BaseI
? BaseI
->getParent()
5262 : &GEP
->getFunction()->getEntryBlock();
5263 if (!Parent
->getTerminator()->isEHPad())
5264 LargeOffsetGEP
= std::make_pair(GEP
, ConstantOffset
);
5271 // Save the valid addressing mode in case we can't match.
5272 ExtAddrMode BackupAddrMode
= AddrMode
;
5273 unsigned OldSize
= AddrModeInsts
.size();
5275 // See if the scale and offset amount is valid for this target.
5276 AddrMode
.BaseOffs
+= ConstantOffset
;
5277 if (!cast
<GEPOperator
>(AddrInst
)->isInBounds())
5278 AddrMode
.InBounds
= false;
5280 // Match the base operand of the GEP.
5281 if (!matchAddr(AddrInst
->getOperand(0), Depth
+ 1)) {
5282 // If it couldn't be matched, just stuff the value in a register.
5283 if (AddrMode
.HasBaseReg
) {
5284 AddrMode
= BackupAddrMode
;
5285 AddrModeInsts
.resize(OldSize
);
5288 AddrMode
.HasBaseReg
= true;
5289 AddrMode
.BaseReg
= AddrInst
->getOperand(0);
5292 // Match the remaining variable portion of the GEP.
5293 if (!matchScaledValue(AddrInst
->getOperand(VariableOperand
), VariableScale
,
5295 // If it couldn't be matched, try stuffing the base into a register
5296 // instead of matching it, and retrying the match of the scale.
5297 AddrMode
= BackupAddrMode
;
5298 AddrModeInsts
.resize(OldSize
);
5299 if (AddrMode
.HasBaseReg
)
5301 AddrMode
.HasBaseReg
= true;
5302 AddrMode
.BaseReg
= AddrInst
->getOperand(0);
5303 AddrMode
.BaseOffs
+= ConstantOffset
;
5304 if (!matchScaledValue(AddrInst
->getOperand(VariableOperand
),
5305 VariableScale
, Depth
)) {
5306 // If even that didn't work, bail.
5307 AddrMode
= BackupAddrMode
;
5308 AddrModeInsts
.resize(OldSize
);
5315 case Instruction::SExt
:
5316 case Instruction::ZExt
: {
5317 Instruction
*Ext
= dyn_cast
<Instruction
>(AddrInst
);
5321 // Try to move this ext out of the way of the addressing mode.
5322 // Ask for a method for doing so.
5323 TypePromotionHelper::Action TPH
=
5324 TypePromotionHelper::getAction(Ext
, InsertedInsts
, TLI
, PromotedInsts
);
5328 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5329 TPT
.getRestorationPoint();
5330 unsigned CreatedInstsCost
= 0;
5331 unsigned ExtCost
= !TLI
.isExtFree(Ext
);
5332 Value
*PromotedOperand
=
5333 TPH(Ext
, TPT
, PromotedInsts
, CreatedInstsCost
, nullptr, nullptr, TLI
);
5334 // SExt has been moved away.
5335 // Thus either it will be rematched later in the recursive calls or it is
5336 // gone. Anyway, we must not fold it into the addressing mode at this point.
5340 // addr = gep base, idx
5342 // promotedOpnd = ext opnd <- no match here
5343 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
5344 // addr = gep base, op <- match
5348 assert(PromotedOperand
&&
5349 "TypePromotionHelper should have filtered out those cases");
5351 ExtAddrMode BackupAddrMode
= AddrMode
;
5352 unsigned OldSize
= AddrModeInsts
.size();
5354 if (!matchAddr(PromotedOperand
, Depth
) ||
5355 // The total of the new cost is equal to the cost of the created
5357 // The total of the old cost is equal to the cost of the extension plus
5358 // what we have saved in the addressing mode.
5359 !isPromotionProfitable(CreatedInstsCost
,
5360 ExtCost
+ (AddrModeInsts
.size() - OldSize
),
5362 AddrMode
= BackupAddrMode
;
5363 AddrModeInsts
.resize(OldSize
);
5364 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
5365 TPT
.rollback(LastKnownGood
);
5370 case Instruction::Call
:
5371 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(AddrInst
)) {
5372 if (II
->getIntrinsicID() == Intrinsic::threadlocal_address
) {
5373 GlobalValue
&GV
= cast
<GlobalValue
>(*II
->getArgOperand(0));
5374 if (TLI
.addressingModeSupportsTLS(GV
))
5375 return matchAddr(AddrInst
->getOperand(0), Depth
);
5383 /// If we can, try to add the value of 'Addr' into the current addressing mode.
5384 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
5385 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
5388 bool AddressingModeMatcher::matchAddr(Value
*Addr
, unsigned Depth
) {
5389 // Start a transaction at this point that we will rollback if the matching
5391 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5392 TPT
.getRestorationPoint();
5393 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Addr
)) {
5394 if (CI
->getValue().isSignedIntN(64)) {
5395 // Fold in immediates if legal for the target.
5396 AddrMode
.BaseOffs
+= CI
->getSExtValue();
5397 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
5399 AddrMode
.BaseOffs
-= CI
->getSExtValue();
5401 } else if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(Addr
)) {
5402 // If this is a global variable, try to fold it into the addressing mode.
5403 if (!AddrMode
.BaseGV
) {
5404 AddrMode
.BaseGV
= GV
;
5405 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
5407 AddrMode
.BaseGV
= nullptr;
5409 } else if (Instruction
*I
= dyn_cast
<Instruction
>(Addr
)) {
5410 ExtAddrMode BackupAddrMode
= AddrMode
;
5411 unsigned OldSize
= AddrModeInsts
.size();
5413 // Check to see if it is possible to fold this operation.
5414 bool MovedAway
= false;
5415 if (matchOperationAddr(I
, I
->getOpcode(), Depth
, &MovedAway
)) {
5416 // This instruction may have been moved away. If so, there is nothing
5420 // Okay, it's possible to fold this. Check to see if it is actually
5421 // *profitable* to do so. We use a simple cost model to avoid increasing
5422 // register pressure too much.
5423 if (I
->hasOneUse() ||
5424 isProfitableToFoldIntoAddressingMode(I
, BackupAddrMode
, AddrMode
)) {
5425 AddrModeInsts
.push_back(I
);
5429 // It isn't profitable to do this, roll back.
5430 AddrMode
= BackupAddrMode
;
5431 AddrModeInsts
.resize(OldSize
);
5432 TPT
.rollback(LastKnownGood
);
5434 } else if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Addr
)) {
5435 if (matchOperationAddr(CE
, CE
->getOpcode(), Depth
))
5437 TPT
.rollback(LastKnownGood
);
5438 } else if (isa
<ConstantPointerNull
>(Addr
)) {
5439 // Null pointer gets folded without affecting the addressing mode.
5443 // Worse case, the target should support [reg] addressing modes. :)
5444 if (!AddrMode
.HasBaseReg
) {
5445 AddrMode
.HasBaseReg
= true;
5446 AddrMode
.BaseReg
= Addr
;
5447 // Still check for legality in case the target supports [imm] but not [i+r].
5448 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
5450 AddrMode
.HasBaseReg
= false;
5451 AddrMode
.BaseReg
= nullptr;
5454 // If the base register is already taken, see if we can do [r+r].
5455 if (AddrMode
.Scale
== 0) {
5457 AddrMode
.ScaledReg
= Addr
;
5458 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
5461 AddrMode
.ScaledReg
= nullptr;
5464 TPT
.rollback(LastKnownGood
);
5468 /// Check to see if all uses of OpVal by the specified inline asm call are due
5469 /// to memory operands. If so, return true, otherwise return false.
5470 static bool IsOperandAMemoryOperand(CallInst
*CI
, InlineAsm
*IA
, Value
*OpVal
,
5471 const TargetLowering
&TLI
,
5472 const TargetRegisterInfo
&TRI
) {
5473 const Function
*F
= CI
->getFunction();
5474 TargetLowering::AsmOperandInfoVector TargetConstraints
=
5475 TLI
.ParseConstraints(F
->getDataLayout(), &TRI
, *CI
);
5477 for (TargetLowering::AsmOperandInfo
&OpInfo
: TargetConstraints
) {
5478 // Compute the constraint code and ConstraintType to use.
5479 TLI
.ComputeConstraintToUse(OpInfo
, SDValue());
5481 // If this asm operand is our Value*, and if it isn't an indirect memory
5482 // operand, we can't fold it! TODO: Also handle C_Address?
5483 if (OpInfo
.CallOperandVal
== OpVal
&&
5484 (OpInfo
.ConstraintType
!= TargetLowering::C_Memory
||
5485 !OpInfo
.isIndirect
))
5492 /// Recursively walk all the uses of I until we find a memory use.
5493 /// If we find an obviously non-foldable instruction, return true.
5494 /// Add accessed addresses and types to MemoryUses.
5495 static bool FindAllMemoryUses(
5496 Instruction
*I
, SmallVectorImpl
<std::pair
<Use
*, Type
*>> &MemoryUses
,
5497 SmallPtrSetImpl
<Instruction
*> &ConsideredInsts
, const TargetLowering
&TLI
,
5498 const TargetRegisterInfo
&TRI
, bool OptSize
, ProfileSummaryInfo
*PSI
,
5499 BlockFrequencyInfo
*BFI
, unsigned &SeenInsts
) {
5500 // If we already considered this instruction, we're done.
5501 if (!ConsideredInsts
.insert(I
).second
)
5504 // If this is an obviously unfoldable instruction, bail out.
5505 if (!MightBeFoldableInst(I
))
5508 // Loop over all the uses, recursively processing them.
5509 for (Use
&U
: I
->uses()) {
5510 // Conservatively return true if we're seeing a large number or a deep chain
5511 // of users. This avoids excessive compilation times in pathological cases.
5512 if (SeenInsts
++ >= MaxAddressUsersToScan
)
5515 Instruction
*UserI
= cast
<Instruction
>(U
.getUser());
5516 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(UserI
)) {
5517 MemoryUses
.push_back({&U
, LI
->getType()});
5521 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(UserI
)) {
5522 if (U
.getOperandNo() != StoreInst::getPointerOperandIndex())
5523 return true; // Storing addr, not into addr.
5524 MemoryUses
.push_back({&U
, SI
->getValueOperand()->getType()});
5528 if (AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(UserI
)) {
5529 if (U
.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
5530 return true; // Storing addr, not into addr.
5531 MemoryUses
.push_back({&U
, RMW
->getValOperand()->getType()});
5535 if (AtomicCmpXchgInst
*CmpX
= dyn_cast
<AtomicCmpXchgInst
>(UserI
)) {
5536 if (U
.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
5537 return true; // Storing addr, not into addr.
5538 MemoryUses
.push_back({&U
, CmpX
->getCompareOperand()->getType()});
5542 if (CallInst
*CI
= dyn_cast
<CallInst
>(UserI
)) {
5543 if (CI
->hasFnAttr(Attribute::Cold
)) {
5544 // If this is a cold call, we can sink the addressing calculation into
5545 // the cold path. See optimizeCallInst
5546 if (!llvm::shouldOptimizeForSize(CI
->getParent(), PSI
, BFI
))
5550 InlineAsm
*IA
= dyn_cast
<InlineAsm
>(CI
->getCalledOperand());
5554 // If this is a memory operand, we're cool, otherwise bail out.
5555 if (!IsOperandAMemoryOperand(CI
, IA
, I
, TLI
, TRI
))
5560 if (FindAllMemoryUses(UserI
, MemoryUses
, ConsideredInsts
, TLI
, TRI
, OptSize
,
5561 PSI
, BFI
, SeenInsts
))
5568 static bool FindAllMemoryUses(
5569 Instruction
*I
, SmallVectorImpl
<std::pair
<Use
*, Type
*>> &MemoryUses
,
5570 const TargetLowering
&TLI
, const TargetRegisterInfo
&TRI
, bool OptSize
,
5571 ProfileSummaryInfo
*PSI
, BlockFrequencyInfo
*BFI
) {
5572 unsigned SeenInsts
= 0;
5573 SmallPtrSet
<Instruction
*, 16> ConsideredInsts
;
5574 return FindAllMemoryUses(I
, MemoryUses
, ConsideredInsts
, TLI
, TRI
, OptSize
,
5575 PSI
, BFI
, SeenInsts
);
5579 /// Return true if Val is already known to be live at the use site that we're
5580 /// folding it into. If so, there is no cost to include it in the addressing
5581 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
5582 /// instruction already.
5583 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value
*Val
,
5585 Value
*KnownLive2
) {
5586 // If Val is either of the known-live values, we know it is live!
5587 if (Val
== nullptr || Val
== KnownLive1
|| Val
== KnownLive2
)
5590 // All values other than instructions and arguments (e.g. constants) are live.
5591 if (!isa
<Instruction
>(Val
) && !isa
<Argument
>(Val
))
5594 // If Val is a constant sized alloca in the entry block, it is live, this is
5595 // true because it is just a reference to the stack/frame pointer, which is
5596 // live for the whole function.
5597 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(Val
))
5598 if (AI
->isStaticAlloca())
5601 // Check to see if this value is already used in the memory instruction's
5602 // block. If so, it's already live into the block at the very least, so we
5603 // can reasonably fold it.
5604 return Val
->isUsedInBasicBlock(MemoryInst
->getParent());
5607 /// It is possible for the addressing mode of the machine to fold the specified
5608 /// instruction into a load or store that ultimately uses it.
5609 /// However, the specified instruction has multiple uses.
5610 /// Given this, it may actually increase register pressure to fold it
5611 /// into the load. For example, consider this code:
5615 /// use(Y) -> nonload/store
5619 /// In this case, Y has multiple uses, and can be folded into the load of Z
5620 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
5621 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
5622 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
5623 /// number of computations either.
5625 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
5626 /// X was live across 'load Z' for other reasons, we actually *would* want to
5627 /// fold the addressing mode in the Z case. This would make Y die earlier.
5628 bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode(
5629 Instruction
*I
, ExtAddrMode
&AMBefore
, ExtAddrMode
&AMAfter
) {
5630 if (IgnoreProfitability
)
5633 // AMBefore is the addressing mode before this instruction was folded into it,
5634 // and AMAfter is the addressing mode after the instruction was folded. Get
5635 // the set of registers referenced by AMAfter and subtract out those
5636 // referenced by AMBefore: this is the set of values which folding in this
5637 // address extends the lifetime of.
5639 // Note that there are only two potential values being referenced here,
5640 // BaseReg and ScaleReg (global addresses are always available, as are any
5641 // folded immediates).
5642 Value
*BaseReg
= AMAfter
.BaseReg
, *ScaledReg
= AMAfter
.ScaledReg
;
5644 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5645 // lifetime wasn't extended by adding this instruction.
5646 if (valueAlreadyLiveAtInst(BaseReg
, AMBefore
.BaseReg
, AMBefore
.ScaledReg
))
5648 if (valueAlreadyLiveAtInst(ScaledReg
, AMBefore
.BaseReg
, AMBefore
.ScaledReg
))
5649 ScaledReg
= nullptr;
5651 // If folding this instruction (and it's subexprs) didn't extend any live
5652 // ranges, we're ok with it.
5653 if (!BaseReg
&& !ScaledReg
)
5656 // If all uses of this instruction can have the address mode sunk into them,
5657 // we can remove the addressing mode and effectively trade one live register
5658 // for another (at worst.) In this context, folding an addressing mode into
5659 // the use is just a particularly nice way of sinking it.
5660 SmallVector
<std::pair
<Use
*, Type
*>, 16> MemoryUses
;
5661 if (FindAllMemoryUses(I
, MemoryUses
, TLI
, TRI
, OptSize
, PSI
, BFI
))
5662 return false; // Has a non-memory, non-foldable use!
5664 // Now that we know that all uses of this instruction are part of a chain of
5665 // computation involving only operations that could theoretically be folded
5666 // into a memory use, loop over each of these memory operation uses and see
5667 // if they could *actually* fold the instruction. The assumption is that
5668 // addressing modes are cheap and that duplicating the computation involved
5669 // many times is worthwhile, even on a fastpath. For sinking candidates
5670 // (i.e. cold call sites), this serves as a way to prevent excessive code
5671 // growth since most architectures have some reasonable small and fast way to
5672 // compute an effective address. (i.e LEA on x86)
5673 SmallVector
<Instruction
*, 32> MatchedAddrModeInsts
;
5674 for (const std::pair
<Use
*, Type
*> &Pair
: MemoryUses
) {
5675 Value
*Address
= Pair
.first
->get();
5676 Instruction
*UserI
= cast
<Instruction
>(Pair
.first
->getUser());
5677 Type
*AddressAccessTy
= Pair
.second
;
5678 unsigned AS
= Address
->getType()->getPointerAddressSpace();
5680 // Do a match against the root of this address, ignoring profitability. This
5681 // will tell us if the addressing mode for the memory operation will
5682 // *actually* cover the shared instruction.
5684 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> LargeOffsetGEP(nullptr,
5686 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5687 TPT
.getRestorationPoint();
5688 AddressingModeMatcher
Matcher(MatchedAddrModeInsts
, TLI
, TRI
, LI
, getDTFn
,
5689 AddressAccessTy
, AS
, UserI
, Result
,
5690 InsertedInsts
, PromotedInsts
, TPT
,
5691 LargeOffsetGEP
, OptSize
, PSI
, BFI
);
5692 Matcher
.IgnoreProfitability
= true;
5693 bool Success
= Matcher
.matchAddr(Address
, 0);
5695 assert(Success
&& "Couldn't select *anything*?");
5697 // The match was to check the profitability, the changes made are not
5698 // part of the original matcher. Therefore, they should be dropped
5699 // otherwise the original matcher will not present the right state.
5700 TPT
.rollback(LastKnownGood
);
5702 // If the match didn't cover I, then it won't be shared by it.
5703 if (!is_contained(MatchedAddrModeInsts
, I
))
5706 MatchedAddrModeInsts
.clear();
5712 /// Return true if the specified values are defined in a
5713 /// different basic block than BB.
5714 static bool IsNonLocalValue(Value
*V
, BasicBlock
*BB
) {
5715 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
5716 return I
->getParent() != BB
;
5720 /// Sink addressing mode computation immediate before MemoryInst if doing so
5721 /// can be done without increasing register pressure. The need for the
5722 /// register pressure constraint means this can end up being an all or nothing
5723 /// decision for all uses of the same addressing computation.
5725 /// Load and Store Instructions often have addressing modes that can do
5726 /// significant amounts of computation. As such, instruction selection will try
5727 /// to get the load or store to do as much computation as possible for the
5728 /// program. The problem is that isel can only see within a single block. As
5729 /// such, we sink as much legal addressing mode work into the block as possible.
5731 /// This method is used to optimize both load/store and inline asms with memory
5732 /// operands. It's also used to sink addressing computations feeding into cold
5733 /// call sites into their (cold) basic block.
5735 /// The motivation for handling sinking into cold blocks is that doing so can
5736 /// both enable other address mode sinking (by satisfying the register pressure
5737 /// constraint above), and reduce register pressure globally (by removing the
5738 /// addressing mode computation from the fast path entirely.).
5739 bool CodeGenPrepare::optimizeMemoryInst(Instruction
*MemoryInst
, Value
*Addr
,
5740 Type
*AccessTy
, unsigned AddrSpace
) {
5743 // Try to collapse single-value PHI nodes. This is necessary to undo
5744 // unprofitable PRE transformations.
5745 SmallVector
<Value
*, 8> worklist
;
5746 SmallPtrSet
<Value
*, 16> Visited
;
5747 worklist
.push_back(Addr
);
5749 // Use a worklist to iteratively look through PHI and select nodes, and
5750 // ensure that the addressing mode obtained from the non-PHI/select roots of
5751 // the graph are compatible.
5752 bool PhiOrSelectSeen
= false;
5753 SmallVector
<Instruction
*, 16> AddrModeInsts
;
5754 const SimplifyQuery
SQ(*DL
, TLInfo
);
5755 AddressingModeCombiner
AddrModes(SQ
, Addr
);
5756 TypePromotionTransaction
TPT(RemovedInsts
);
5757 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5758 TPT
.getRestorationPoint();
5759 while (!worklist
.empty()) {
5760 Value
*V
= worklist
.pop_back_val();
5762 // We allow traversing cyclic Phi nodes.
5763 // In case of success after this loop we ensure that traversing through
5764 // Phi nodes ends up with all cases to compute address of the form
5765 // BaseGV + Base + Scale * Index + Offset
5766 // where Scale and Offset are constans and BaseGV, Base and Index
5767 // are exactly the same Values in all cases.
5768 // It means that BaseGV, Scale and Offset dominate our memory instruction
5769 // and have the same value as they had in address computation represented
5770 // as Phi. So we can safely sink address computation to memory instruction.
5771 if (!Visited
.insert(V
).second
)
5774 // For a PHI node, push all of its incoming values.
5775 if (PHINode
*P
= dyn_cast
<PHINode
>(V
)) {
5776 append_range(worklist
, P
->incoming_values());
5777 PhiOrSelectSeen
= true;
5780 // Similar for select.
5781 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
5782 worklist
.push_back(SI
->getFalseValue());
5783 worklist
.push_back(SI
->getTrueValue());
5784 PhiOrSelectSeen
= true;
5788 // For non-PHIs, determine the addressing mode being computed. Note that
5789 // the result may differ depending on what other uses our candidate
5790 // addressing instructions might have.
5791 AddrModeInsts
.clear();
5792 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> LargeOffsetGEP(nullptr,
5794 // Defer the query (and possible computation of) the dom tree to point of
5795 // actual use. It's expected that most address matches don't actually need
5797 auto getDTFn
= [MemoryInst
, this]() -> const DominatorTree
& {
5798 Function
*F
= MemoryInst
->getParent()->getParent();
5799 return this->getDT(*F
);
5801 ExtAddrMode NewAddrMode
= AddressingModeMatcher::Match(
5802 V
, AccessTy
, AddrSpace
, MemoryInst
, AddrModeInsts
, *TLI
, *LI
, getDTFn
,
5803 *TRI
, InsertedInsts
, PromotedInsts
, TPT
, LargeOffsetGEP
, OptSize
, PSI
,
5806 GetElementPtrInst
*GEP
= LargeOffsetGEP
.first
;
5807 if (GEP
&& !NewGEPBases
.count(GEP
)) {
5808 // If splitting the underlying data structure can reduce the offset of a
5809 // GEP, collect the GEP. Skip the GEPs that are the new bases of
5810 // previously split data structures.
5811 LargeOffsetGEPMap
[GEP
->getPointerOperand()].push_back(LargeOffsetGEP
);
5812 LargeOffsetGEPID
.insert(std::make_pair(GEP
, LargeOffsetGEPID
.size()));
5815 NewAddrMode
.OriginalValue
= V
;
5816 if (!AddrModes
.addNewAddrMode(NewAddrMode
))
5820 // Try to combine the AddrModes we've collected. If we couldn't collect any,
5821 // or we have multiple but either couldn't combine them or combining them
5822 // wouldn't do anything useful, bail out now.
5823 if (!AddrModes
.combineAddrModes()) {
5824 TPT
.rollback(LastKnownGood
);
5827 bool Modified
= TPT
.commit();
5829 // Get the combined AddrMode (or the only AddrMode, if we only had one).
5830 ExtAddrMode AddrMode
= AddrModes
.getAddrMode();
5832 // If all the instructions matched are already in this BB, don't do anything.
5833 // If we saw a Phi node then it is not local definitely, and if we saw a
5834 // select then we want to push the address calculation past it even if it's
5835 // already in this BB.
5836 if (!PhiOrSelectSeen
&& none_of(AddrModeInsts
, [&](Value
*V
) {
5837 return IsNonLocalValue(V
, MemoryInst
->getParent());
5839 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode
5844 // Insert this computation right after this user. Since our caller is
5845 // scanning from the top of the BB to the bottom, reuse of the expr are
5846 // guaranteed to happen later.
5847 IRBuilder
<> Builder(MemoryInst
);
5849 // Now that we determined the addressing expression we want to use and know
5850 // that we have to sink it into this block. Check to see if we have already
5851 // done this for some other load/store instr in this block. If so, reuse
5852 // the computation. Before attempting reuse, check if the address is valid
5853 // as it may have been erased.
5855 WeakTrackingVH SunkAddrVH
= SunkAddrs
[Addr
];
5857 Value
*SunkAddr
= SunkAddrVH
.pointsToAliveValue() ? SunkAddrVH
: nullptr;
5858 Type
*IntPtrTy
= DL
->getIntPtrType(Addr
->getType());
5860 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5861 << " for " << *MemoryInst
<< "\n");
5862 if (SunkAddr
->getType() != Addr
->getType()) {
5863 if (SunkAddr
->getType()->getPointerAddressSpace() !=
5864 Addr
->getType()->getPointerAddressSpace() &&
5865 !DL
->isNonIntegralPointerType(Addr
->getType())) {
5866 // There are two reasons the address spaces might not match: a no-op
5867 // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5868 // ptrtoint/inttoptr pair to ensure we match the original semantics.
5869 // TODO: allow bitcast between different address space pointers with the
5871 SunkAddr
= Builder
.CreatePtrToInt(SunkAddr
, IntPtrTy
, "sunkaddr");
5873 Builder
.CreateIntToPtr(SunkAddr
, Addr
->getType(), "sunkaddr");
5875 SunkAddr
= Builder
.CreatePointerCast(SunkAddr
, Addr
->getType());
5877 } else if (AddrSinkUsingGEPs
|| (!AddrSinkUsingGEPs
.getNumOccurrences() &&
5878 SubtargetInfo
->addrSinkUsingGEPs())) {
5879 // By default, we use the GEP-based method when AA is used later. This
5880 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5881 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5882 << " for " << *MemoryInst
<< "\n");
5883 Value
*ResultPtr
= nullptr, *ResultIndex
= nullptr;
5885 // First, find the pointer.
5886 if (AddrMode
.BaseReg
&& AddrMode
.BaseReg
->getType()->isPointerTy()) {
5887 ResultPtr
= AddrMode
.BaseReg
;
5888 AddrMode
.BaseReg
= nullptr;
5891 if (AddrMode
.Scale
&& AddrMode
.ScaledReg
->getType()->isPointerTy()) {
5892 // We can't add more than one pointer together, nor can we scale a
5893 // pointer (both of which seem meaningless).
5894 if (ResultPtr
|| AddrMode
.Scale
!= 1)
5897 ResultPtr
= AddrMode
.ScaledReg
;
5901 // It is only safe to sign extend the BaseReg if we know that the math
5902 // required to create it did not overflow before we extend it. Since
5903 // the original IR value was tossed in favor of a constant back when
5904 // the AddrMode was created we need to bail out gracefully if widths
5905 // do not match instead of extending it.
5907 // (See below for code to add the scale.)
5908 if (AddrMode
.Scale
) {
5909 Type
*ScaledRegTy
= AddrMode
.ScaledReg
->getType();
5910 if (cast
<IntegerType
>(IntPtrTy
)->getBitWidth() >
5911 cast
<IntegerType
>(ScaledRegTy
)->getBitWidth())
5915 GlobalValue
*BaseGV
= AddrMode
.BaseGV
;
5916 if (BaseGV
!= nullptr) {
5920 if (BaseGV
->isThreadLocal()) {
5921 ResultPtr
= Builder
.CreateThreadLocalAddress(BaseGV
);
5927 // If the real base value actually came from an inttoptr, then the matcher
5928 // will look through it and provide only the integer value. In that case,
5930 if (!DL
->isNonIntegralPointerType(Addr
->getType())) {
5931 if (!ResultPtr
&& AddrMode
.BaseReg
) {
5932 ResultPtr
= Builder
.CreateIntToPtr(AddrMode
.BaseReg
, Addr
->getType(),
5934 AddrMode
.BaseReg
= nullptr;
5935 } else if (!ResultPtr
&& AddrMode
.Scale
== 1) {
5936 ResultPtr
= Builder
.CreateIntToPtr(AddrMode
.ScaledReg
, Addr
->getType(),
5942 if (!ResultPtr
&& !AddrMode
.BaseReg
&& !AddrMode
.Scale
&&
5943 !AddrMode
.BaseOffs
) {
5944 SunkAddr
= Constant::getNullValue(Addr
->getType());
5945 } else if (!ResultPtr
) {
5949 Builder
.getPtrTy(Addr
->getType()->getPointerAddressSpace());
5951 // Start with the base register. Do this first so that subsequent address
5952 // matching finds it last, which will prevent it from trying to match it
5953 // as the scaled value in case it happens to be a mul. That would be
5954 // problematic if we've sunk a different mul for the scale, because then
5955 // we'd end up sinking both muls.
5956 if (AddrMode
.BaseReg
) {
5957 Value
*V
= AddrMode
.BaseReg
;
5958 if (V
->getType() != IntPtrTy
)
5959 V
= Builder
.CreateIntCast(V
, IntPtrTy
, /*isSigned=*/true, "sunkaddr");
5964 // Add the scale value.
5965 if (AddrMode
.Scale
) {
5966 Value
*V
= AddrMode
.ScaledReg
;
5967 if (V
->getType() == IntPtrTy
) {
5970 assert(cast
<IntegerType
>(IntPtrTy
)->getBitWidth() <
5971 cast
<IntegerType
>(V
->getType())->getBitWidth() &&
5972 "We can't transform if ScaledReg is too narrow");
5973 V
= Builder
.CreateTrunc(V
, IntPtrTy
, "sunkaddr");
5976 if (AddrMode
.Scale
!= 1)
5977 V
= Builder
.CreateMul(V
, ConstantInt::get(IntPtrTy
, AddrMode
.Scale
),
5980 ResultIndex
= Builder
.CreateAdd(ResultIndex
, V
, "sunkaddr");
5985 // Add in the Base Offset if present.
5986 if (AddrMode
.BaseOffs
) {
5987 Value
*V
= ConstantInt::get(IntPtrTy
, AddrMode
.BaseOffs
);
5989 // We need to add this separately from the scale above to help with
5990 // SDAG consecutive load/store merging.
5991 if (ResultPtr
->getType() != I8PtrTy
)
5992 ResultPtr
= Builder
.CreatePointerCast(ResultPtr
, I8PtrTy
);
5993 ResultPtr
= Builder
.CreatePtrAdd(ResultPtr
, ResultIndex
, "sunkaddr",
6001 SunkAddr
= ResultPtr
;
6003 if (ResultPtr
->getType() != I8PtrTy
)
6004 ResultPtr
= Builder
.CreatePointerCast(ResultPtr
, I8PtrTy
);
6005 SunkAddr
= Builder
.CreatePtrAdd(ResultPtr
, ResultIndex
, "sunkaddr",
6009 if (SunkAddr
->getType() != Addr
->getType()) {
6010 if (SunkAddr
->getType()->getPointerAddressSpace() !=
6011 Addr
->getType()->getPointerAddressSpace() &&
6012 !DL
->isNonIntegralPointerType(Addr
->getType())) {
6013 // There are two reasons the address spaces might not match: a no-op
6014 // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
6015 // ptrtoint/inttoptr pair to ensure we match the original semantics.
6016 // TODO: allow bitcast between different address space pointers with
6018 SunkAddr
= Builder
.CreatePtrToInt(SunkAddr
, IntPtrTy
, "sunkaddr");
6020 Builder
.CreateIntToPtr(SunkAddr
, Addr
->getType(), "sunkaddr");
6022 SunkAddr
= Builder
.CreatePointerCast(SunkAddr
, Addr
->getType());
6026 // We'd require a ptrtoint/inttoptr down the line, which we can't do for
6027 // non-integral pointers, so in that case bail out now.
6028 Type
*BaseTy
= AddrMode
.BaseReg
? AddrMode
.BaseReg
->getType() : nullptr;
6029 Type
*ScaleTy
= AddrMode
.Scale
? AddrMode
.ScaledReg
->getType() : nullptr;
6030 PointerType
*BasePtrTy
= dyn_cast_or_null
<PointerType
>(BaseTy
);
6031 PointerType
*ScalePtrTy
= dyn_cast_or_null
<PointerType
>(ScaleTy
);
6032 if (DL
->isNonIntegralPointerType(Addr
->getType()) ||
6033 (BasePtrTy
&& DL
->isNonIntegralPointerType(BasePtrTy
)) ||
6034 (ScalePtrTy
&& DL
->isNonIntegralPointerType(ScalePtrTy
)) ||
6036 DL
->isNonIntegralPointerType(AddrMode
.BaseGV
->getType())))
6039 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
6040 << " for " << *MemoryInst
<< "\n");
6041 Type
*IntPtrTy
= DL
->getIntPtrType(Addr
->getType());
6042 Value
*Result
= nullptr;
6044 // Start with the base register. Do this first so that subsequent address
6045 // matching finds it last, which will prevent it from trying to match it
6046 // as the scaled value in case it happens to be a mul. That would be
6047 // problematic if we've sunk a different mul for the scale, because then
6048 // we'd end up sinking both muls.
6049 if (AddrMode
.BaseReg
) {
6050 Value
*V
= AddrMode
.BaseReg
;
6051 if (V
->getType()->isPointerTy())
6052 V
= Builder
.CreatePtrToInt(V
, IntPtrTy
, "sunkaddr");
6053 if (V
->getType() != IntPtrTy
)
6054 V
= Builder
.CreateIntCast(V
, IntPtrTy
, /*isSigned=*/true, "sunkaddr");
6058 // Add the scale value.
6059 if (AddrMode
.Scale
) {
6060 Value
*V
= AddrMode
.ScaledReg
;
6061 if (V
->getType() == IntPtrTy
) {
6063 } else if (V
->getType()->isPointerTy()) {
6064 V
= Builder
.CreatePtrToInt(V
, IntPtrTy
, "sunkaddr");
6065 } else if (cast
<IntegerType
>(IntPtrTy
)->getBitWidth() <
6066 cast
<IntegerType
>(V
->getType())->getBitWidth()) {
6067 V
= Builder
.CreateTrunc(V
, IntPtrTy
, "sunkaddr");
6069 // It is only safe to sign extend the BaseReg if we know that the math
6070 // required to create it did not overflow before we extend it. Since
6071 // the original IR value was tossed in favor of a constant back when
6072 // the AddrMode was created we need to bail out gracefully if widths
6073 // do not match instead of extending it.
6074 Instruction
*I
= dyn_cast_or_null
<Instruction
>(Result
);
6075 if (I
&& (Result
!= AddrMode
.BaseReg
))
6076 I
->eraseFromParent();
6079 if (AddrMode
.Scale
!= 1)
6080 V
= Builder
.CreateMul(V
, ConstantInt::get(IntPtrTy
, AddrMode
.Scale
),
6083 Result
= Builder
.CreateAdd(Result
, V
, "sunkaddr");
6088 // Add in the BaseGV if present.
6089 GlobalValue
*BaseGV
= AddrMode
.BaseGV
;
6090 if (BaseGV
!= nullptr) {
6092 if (BaseGV
->isThreadLocal()) {
6093 BaseGVPtr
= Builder
.CreateThreadLocalAddress(BaseGV
);
6097 Value
*V
= Builder
.CreatePtrToInt(BaseGVPtr
, IntPtrTy
, "sunkaddr");
6099 Result
= Builder
.CreateAdd(Result
, V
, "sunkaddr");
6104 // Add in the Base Offset if present.
6105 if (AddrMode
.BaseOffs
) {
6106 Value
*V
= ConstantInt::get(IntPtrTy
, AddrMode
.BaseOffs
);
6108 Result
= Builder
.CreateAdd(Result
, V
, "sunkaddr");
6114 SunkAddr
= Constant::getNullValue(Addr
->getType());
6116 SunkAddr
= Builder
.CreateIntToPtr(Result
, Addr
->getType(), "sunkaddr");
6119 MemoryInst
->replaceUsesOfWith(Repl
, SunkAddr
);
6120 // Store the newly computed address into the cache. In the case we reused a
6121 // value, this should be idempotent.
6122 SunkAddrs
[Addr
] = WeakTrackingVH(SunkAddr
);
6124 // If we have no uses, recursively delete the value and all dead instructions
6126 if (Repl
->use_empty()) {
6127 resetIteratorIfInvalidatedWhileCalling(CurInstIterator
->getParent(), [&]() {
6128 RecursivelyDeleteTriviallyDeadInstructions(
6129 Repl
, TLInfo
, nullptr,
6130 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
6137 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
6138 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
6139 /// only handle a 2 operand GEP in the same basic block or a splat constant
6140 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector
6143 /// If the existing GEP has a vector base pointer that is splat, we can look
6144 /// through the splat to find the scalar pointer. If we can't find a scalar
6145 /// pointer there's nothing we can do.
6147 /// If we have a GEP with more than 2 indices where the middle indices are all
6148 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
6150 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP
6151 /// followed by a GEP with an all zeroes vector index. This will enable
6152 /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a
6154 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction
*MemoryInst
,
6158 if (const auto *GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
6159 // Don't optimize GEPs that don't have indices.
6160 if (!GEP
->hasIndices())
6163 // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
6164 // FIXME: We should support this by sinking the GEP.
6165 if (MemoryInst
->getParent() != GEP
->getParent())
6168 SmallVector
<Value
*, 2> Ops(GEP
->operands());
6170 bool RewriteGEP
= false;
6172 if (Ops
[0]->getType()->isVectorTy()) {
6173 Ops
[0] = getSplatValue(Ops
[0]);
6179 unsigned FinalIndex
= Ops
.size() - 1;
6181 // Ensure all but the last index is 0.
6182 // FIXME: This isn't strictly required. All that's required is that they are
6183 // all scalars or splats.
6184 for (unsigned i
= 1; i
< FinalIndex
; ++i
) {
6185 auto *C
= dyn_cast
<Constant
>(Ops
[i
]);
6188 if (isa
<VectorType
>(C
->getType()))
6189 C
= C
->getSplatValue();
6190 auto *CI
= dyn_cast_or_null
<ConstantInt
>(C
);
6191 if (!CI
|| !CI
->isZero())
6193 // Scalarize the index if needed.
6197 // Try to scalarize the final index.
6198 if (Ops
[FinalIndex
]->getType()->isVectorTy()) {
6199 if (Value
*V
= getSplatValue(Ops
[FinalIndex
])) {
6200 auto *C
= dyn_cast
<ConstantInt
>(V
);
6201 // Don't scalarize all zeros vector.
6202 if (!C
|| !C
->isZero()) {
6203 Ops
[FinalIndex
] = V
;
6209 // If we made any changes or the we have extra operands, we need to generate
6210 // new instructions.
6211 if (!RewriteGEP
&& Ops
.size() == 2)
6214 auto NumElts
= cast
<VectorType
>(Ptr
->getType())->getElementCount();
6216 IRBuilder
<> Builder(MemoryInst
);
6218 Type
*SourceTy
= GEP
->getSourceElementType();
6219 Type
*ScalarIndexTy
= DL
->getIndexType(Ops
[0]->getType()->getScalarType());
6221 // If the final index isn't a vector, emit a scalar GEP containing all ops
6222 // and a vector GEP with all zeroes final index.
6223 if (!Ops
[FinalIndex
]->getType()->isVectorTy()) {
6224 NewAddr
= Builder
.CreateGEP(SourceTy
, Ops
[0], ArrayRef(Ops
).drop_front());
6225 auto *IndexTy
= VectorType::get(ScalarIndexTy
, NumElts
);
6226 auto *SecondTy
= GetElementPtrInst::getIndexedType(
6227 SourceTy
, ArrayRef(Ops
).drop_front());
6229 Builder
.CreateGEP(SecondTy
, NewAddr
, Constant::getNullValue(IndexTy
));
6231 Value
*Base
= Ops
[0];
6232 Value
*Index
= Ops
[FinalIndex
];
6234 // Create a scalar GEP if there are more than 2 operands.
6235 if (Ops
.size() != 2) {
6236 // Replace the last index with 0.
6238 Constant::getNullValue(Ops
[FinalIndex
]->getType()->getScalarType());
6239 Base
= Builder
.CreateGEP(SourceTy
, Base
, ArrayRef(Ops
).drop_front());
6240 SourceTy
= GetElementPtrInst::getIndexedType(
6241 SourceTy
, ArrayRef(Ops
).drop_front());
6244 // Now create the GEP with scalar pointer and vector index.
6245 NewAddr
= Builder
.CreateGEP(SourceTy
, Base
, Index
);
6247 } else if (!isa
<Constant
>(Ptr
)) {
6248 // Not a GEP, maybe its a splat and we can create a GEP to enable
6249 // SelectionDAGBuilder to use it as a uniform base.
6250 Value
*V
= getSplatValue(Ptr
);
6254 auto NumElts
= cast
<VectorType
>(Ptr
->getType())->getElementCount();
6256 IRBuilder
<> Builder(MemoryInst
);
6258 // Emit a vector GEP with a scalar pointer and all 0s vector index.
6259 Type
*ScalarIndexTy
= DL
->getIndexType(V
->getType()->getScalarType());
6260 auto *IndexTy
= VectorType::get(ScalarIndexTy
, NumElts
);
6262 if (cast
<IntrinsicInst
>(MemoryInst
)->getIntrinsicID() ==
6263 Intrinsic::masked_gather
) {
6264 ScalarTy
= MemoryInst
->getType()->getScalarType();
6266 assert(cast
<IntrinsicInst
>(MemoryInst
)->getIntrinsicID() ==
6267 Intrinsic::masked_scatter
);
6268 ScalarTy
= MemoryInst
->getOperand(0)->getType()->getScalarType();
6270 NewAddr
= Builder
.CreateGEP(ScalarTy
, V
, Constant::getNullValue(IndexTy
));
6272 // Constant, SelectionDAGBuilder knows to check if its a splat.
6276 MemoryInst
->replaceUsesOfWith(Ptr
, NewAddr
);
6278 // If we have no uses, recursively delete the value and all dead instructions
6280 if (Ptr
->use_empty())
6281 RecursivelyDeleteTriviallyDeadInstructions(
6282 Ptr
, TLInfo
, nullptr,
6283 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
6288 /// If there are any memory operands, use OptimizeMemoryInst to sink their
6289 /// address computing into the block when possible / profitable.
6290 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst
*CS
) {
6291 bool MadeChange
= false;
6293 const TargetRegisterInfo
*TRI
=
6294 TM
->getSubtargetImpl(*CS
->getFunction())->getRegisterInfo();
6295 TargetLowering::AsmOperandInfoVector TargetConstraints
=
6296 TLI
->ParseConstraints(*DL
, TRI
, *CS
);
6298 for (TargetLowering::AsmOperandInfo
&OpInfo
: TargetConstraints
) {
6299 // Compute the constraint code and ConstraintType to use.
6300 TLI
->ComputeConstraintToUse(OpInfo
, SDValue());
6302 // TODO: Also handle C_Address?
6303 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
&&
6304 OpInfo
.isIndirect
) {
6305 Value
*OpVal
= CS
->getArgOperand(ArgNo
++);
6306 MadeChange
|= optimizeMemoryInst(CS
, OpVal
, OpVal
->getType(), ~0u);
6307 } else if (OpInfo
.Type
== InlineAsm::isInput
)
6314 /// Check if all the uses of \p Val are equivalent (or free) zero or
6315 /// sign extensions.
6316 static bool hasSameExtUse(Value
*Val
, const TargetLowering
&TLI
) {
6317 assert(!Val
->use_empty() && "Input must have at least one use");
6318 const Instruction
*FirstUser
= cast
<Instruction
>(*Val
->user_begin());
6319 bool IsSExt
= isa
<SExtInst
>(FirstUser
);
6320 Type
*ExtTy
= FirstUser
->getType();
6321 for (const User
*U
: Val
->users()) {
6322 const Instruction
*UI
= cast
<Instruction
>(U
);
6323 if ((IsSExt
&& !isa
<SExtInst
>(UI
)) || (!IsSExt
&& !isa
<ZExtInst
>(UI
)))
6325 Type
*CurTy
= UI
->getType();
6326 // Same input and output types: Same instruction after CSE.
6330 // If IsSExt is true, we are in this situation:
6332 // b = sext ty1 a to ty2
6333 // c = sext ty1 a to ty3
6334 // Assuming ty2 is shorter than ty3, this could be turned into:
6336 // b = sext ty1 a to ty2
6337 // c = sext ty2 b to ty3
6338 // However, the last sext is not free.
6342 // This is a ZExt, maybe this is free to extend from one type to another.
6343 // In that case, we would not account for a different use.
6346 if (ExtTy
->getScalarType()->getIntegerBitWidth() >
6347 CurTy
->getScalarType()->getIntegerBitWidth()) {
6355 if (!TLI
.isZExtFree(NarrowTy
, LargeTy
))
6358 // All uses are the same or can be derived from one another for free.
6362 /// Try to speculatively promote extensions in \p Exts and continue
6363 /// promoting through newly promoted operands recursively as far as doing so is
6364 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
6365 /// When some promotion happened, \p TPT contains the proper state to revert
6368 /// \return true if some promotion happened, false otherwise.
6369 bool CodeGenPrepare::tryToPromoteExts(
6370 TypePromotionTransaction
&TPT
, const SmallVectorImpl
<Instruction
*> &Exts
,
6371 SmallVectorImpl
<Instruction
*> &ProfitablyMovedExts
,
6372 unsigned CreatedInstsCost
) {
6373 bool Promoted
= false;
6375 // Iterate over all the extensions to try to promote them.
6376 for (auto *I
: Exts
) {
6377 // Early check if we directly have ext(load).
6378 if (isa
<LoadInst
>(I
->getOperand(0))) {
6379 ProfitablyMovedExts
.push_back(I
);
6383 // Check whether or not we want to do any promotion. The reason we have
6384 // this check inside the for loop is to catch the case where an extension
6385 // is directly fed by a load because in such case the extension can be moved
6386 // up without any promotion on its operands.
6387 if (!TLI
->enableExtLdPromotion() || DisableExtLdPromotion
)
6390 // Get the action to perform the promotion.
6391 TypePromotionHelper::Action TPH
=
6392 TypePromotionHelper::getAction(I
, InsertedInsts
, *TLI
, PromotedInsts
);
6393 // Check if we can promote.
6395 // Save the current extension as we cannot move up through its operand.
6396 ProfitablyMovedExts
.push_back(I
);
6400 // Save the current state.
6401 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
6402 TPT
.getRestorationPoint();
6403 SmallVector
<Instruction
*, 4> NewExts
;
6404 unsigned NewCreatedInstsCost
= 0;
6405 unsigned ExtCost
= !TLI
->isExtFree(I
);
6407 Value
*PromotedVal
= TPH(I
, TPT
, PromotedInsts
, NewCreatedInstsCost
,
6408 &NewExts
, nullptr, *TLI
);
6409 assert(PromotedVal
&&
6410 "TypePromotionHelper should have filtered out those cases");
6412 // We would be able to merge only one extension in a load.
6413 // Therefore, if we have more than 1 new extension we heuristically
6414 // cut this search path, because it means we degrade the code quality.
6415 // With exactly 2, the transformation is neutral, because we will merge
6416 // one extension but leave one. However, we optimistically keep going,
6417 // because the new extension may be removed too. Also avoid replacing a
6418 // single free extension with multiple extensions, as this increases the
6419 // number of IR instructions while not providing any savings.
6420 long long TotalCreatedInstsCost
= CreatedInstsCost
+ NewCreatedInstsCost
;
6421 // FIXME: It would be possible to propagate a negative value instead of
6422 // conservatively ceiling it to 0.
6423 TotalCreatedInstsCost
=
6424 std::max((long long)0, (TotalCreatedInstsCost
- ExtCost
));
6425 if (!StressExtLdPromotion
&&
6426 (TotalCreatedInstsCost
> 1 ||
6427 !isPromotedInstructionLegal(*TLI
, *DL
, PromotedVal
) ||
6428 (ExtCost
== 0 && NewExts
.size() > 1))) {
6429 // This promotion is not profitable, rollback to the previous state, and
6430 // save the current extension in ProfitablyMovedExts as the latest
6431 // speculative promotion turned out to be unprofitable.
6432 TPT
.rollback(LastKnownGood
);
6433 ProfitablyMovedExts
.push_back(I
);
6436 // Continue promoting NewExts as far as doing so is profitable.
6437 SmallVector
<Instruction
*, 2> NewlyMovedExts
;
6438 (void)tryToPromoteExts(TPT
, NewExts
, NewlyMovedExts
, TotalCreatedInstsCost
);
6439 bool NewPromoted
= false;
6440 for (auto *ExtInst
: NewlyMovedExts
) {
6441 Instruction
*MovedExt
= cast
<Instruction
>(ExtInst
);
6442 Value
*ExtOperand
= MovedExt
->getOperand(0);
6443 // If we have reached to a load, we need this extra profitability check
6444 // as it could potentially be merged into an ext(load).
6445 if (isa
<LoadInst
>(ExtOperand
) &&
6446 !(StressExtLdPromotion
|| NewCreatedInstsCost
<= ExtCost
||
6447 (ExtOperand
->hasOneUse() || hasSameExtUse(ExtOperand
, *TLI
))))
6450 ProfitablyMovedExts
.push_back(MovedExt
);
6454 // If none of speculative promotions for NewExts is profitable, rollback
6455 // and save the current extension (I) as the last profitable extension.
6457 TPT
.rollback(LastKnownGood
);
6458 ProfitablyMovedExts
.push_back(I
);
6461 // The promotion is profitable.
6467 /// Merging redundant sexts when one is dominating the other.
6468 bool CodeGenPrepare::mergeSExts(Function
&F
) {
6469 bool Changed
= false;
6470 for (auto &Entry
: ValToSExtendedUses
) {
6471 SExts
&Insts
= Entry
.second
;
6473 for (Instruction
*Inst
: Insts
) {
6474 if (RemovedInsts
.count(Inst
) || !isa
<SExtInst
>(Inst
) ||
6475 Inst
->getOperand(0) != Entry
.first
)
6477 bool inserted
= false;
6478 for (auto &Pt
: CurPts
) {
6479 if (getDT(F
).dominates(Inst
, Pt
)) {
6480 replaceAllUsesWith(Pt
, Inst
, FreshBBs
, IsHugeFunc
);
6481 RemovedInsts
.insert(Pt
);
6482 Pt
->removeFromParent();
6488 if (!getDT(F
).dominates(Pt
, Inst
))
6489 // Give up if we need to merge in a common dominator as the
6490 // experiments show it is not profitable.
6492 replaceAllUsesWith(Inst
, Pt
, FreshBBs
, IsHugeFunc
);
6493 RemovedInsts
.insert(Inst
);
6494 Inst
->removeFromParent();
6500 CurPts
.push_back(Inst
);
6506 // Splitting large data structures so that the GEPs accessing them can have
6507 // smaller offsets so that they can be sunk to the same blocks as their users.
6508 // For example, a large struct starting from %base is split into two parts
6509 // where the second part starts from %new_base.
6516 // %gep0 = gep %base, off0
6517 // %gep1 = gep %base, off1
6518 // %gep2 = gep %base, off2
6521 // %load1 = load %gep0
6522 // %load2 = load %gep1
6523 // %load3 = load %gep2
6528 // %new_base = gep %base, off0
6531 // %new_gep0 = %new_base
6532 // %new_gep1 = gep %new_base, off1 - off0
6533 // %new_gep2 = gep %new_base, off2 - off0
6536 // %load1 = load i32, i32* %new_gep0
6537 // %load2 = load i32, i32* %new_gep1
6538 // %load3 = load i32, i32* %new_gep2
6540 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
6541 // their offsets are smaller enough to fit into the addressing mode.
6542 bool CodeGenPrepare::splitLargeGEPOffsets() {
6543 bool Changed
= false;
6544 for (auto &Entry
: LargeOffsetGEPMap
) {
6545 Value
*OldBase
= Entry
.first
;
6546 SmallVectorImpl
<std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t>>
6547 &LargeOffsetGEPs
= Entry
.second
;
6548 auto compareGEPOffset
=
6549 [&](const std::pair
<GetElementPtrInst
*, int64_t> &LHS
,
6550 const std::pair
<GetElementPtrInst
*, int64_t> &RHS
) {
6551 if (LHS
.first
== RHS
.first
)
6553 if (LHS
.second
!= RHS
.second
)
6554 return LHS
.second
< RHS
.second
;
6555 return LargeOffsetGEPID
[LHS
.first
] < LargeOffsetGEPID
[RHS
.first
];
6557 // Sorting all the GEPs of the same data structures based on the offsets.
6558 llvm::sort(LargeOffsetGEPs
, compareGEPOffset
);
6559 LargeOffsetGEPs
.erase(llvm::unique(LargeOffsetGEPs
), LargeOffsetGEPs
.end());
6560 // Skip if all the GEPs have the same offsets.
6561 if (LargeOffsetGEPs
.front().second
== LargeOffsetGEPs
.back().second
)
6563 GetElementPtrInst
*BaseGEP
= LargeOffsetGEPs
.begin()->first
;
6564 int64_t BaseOffset
= LargeOffsetGEPs
.begin()->second
;
6565 Value
*NewBaseGEP
= nullptr;
6567 auto createNewBase
= [&](int64_t BaseOffset
, Value
*OldBase
,
6568 GetElementPtrInst
*GEP
) {
6569 LLVMContext
&Ctx
= GEP
->getContext();
6570 Type
*PtrIdxTy
= DL
->getIndexType(GEP
->getType());
6572 PointerType::get(Ctx
, GEP
->getType()->getPointerAddressSpace());
6574 BasicBlock::iterator NewBaseInsertPt
;
6575 BasicBlock
*NewBaseInsertBB
;
6576 if (auto *BaseI
= dyn_cast
<Instruction
>(OldBase
)) {
6577 // If the base of the struct is an instruction, the new base will be
6578 // inserted close to it.
6579 NewBaseInsertBB
= BaseI
->getParent();
6580 if (isa
<PHINode
>(BaseI
))
6581 NewBaseInsertPt
= NewBaseInsertBB
->getFirstInsertionPt();
6582 else if (InvokeInst
*Invoke
= dyn_cast
<InvokeInst
>(BaseI
)) {
6584 SplitEdge(NewBaseInsertBB
, Invoke
->getNormalDest(), DT
.get(), LI
);
6585 NewBaseInsertPt
= NewBaseInsertBB
->getFirstInsertionPt();
6587 NewBaseInsertPt
= std::next(BaseI
->getIterator());
6589 // If the current base is an argument or global value, the new base
6590 // will be inserted to the entry block.
6591 NewBaseInsertBB
= &BaseGEP
->getFunction()->getEntryBlock();
6592 NewBaseInsertPt
= NewBaseInsertBB
->getFirstInsertionPt();
6594 IRBuilder
<> NewBaseBuilder(NewBaseInsertBB
, NewBaseInsertPt
);
6595 // Create a new base.
6596 Value
*BaseIndex
= ConstantInt::get(PtrIdxTy
, BaseOffset
);
6597 NewBaseGEP
= OldBase
;
6598 if (NewBaseGEP
->getType() != I8PtrTy
)
6599 NewBaseGEP
= NewBaseBuilder
.CreatePointerCast(NewBaseGEP
, I8PtrTy
);
6601 NewBaseBuilder
.CreatePtrAdd(NewBaseGEP
, BaseIndex
, "splitgep");
6602 NewGEPBases
.insert(NewBaseGEP
);
6606 // Check whether all the offsets can be encoded with prefered common base.
6607 if (int64_t PreferBase
= TLI
->getPreferredLargeGEPBaseOffset(
6608 LargeOffsetGEPs
.front().second
, LargeOffsetGEPs
.back().second
)) {
6609 BaseOffset
= PreferBase
;
6610 // Create a new base if the offset of the BaseGEP can be decoded with one
6612 createNewBase(BaseOffset
, OldBase
, BaseGEP
);
6615 auto *LargeOffsetGEP
= LargeOffsetGEPs
.begin();
6616 while (LargeOffsetGEP
!= LargeOffsetGEPs
.end()) {
6617 GetElementPtrInst
*GEP
= LargeOffsetGEP
->first
;
6618 int64_t Offset
= LargeOffsetGEP
->second
;
6619 if (Offset
!= BaseOffset
) {
6620 TargetLowering::AddrMode AddrMode
;
6621 AddrMode
.HasBaseReg
= true;
6622 AddrMode
.BaseOffs
= Offset
- BaseOffset
;
6623 // The result type of the GEP might not be the type of the memory
6625 if (!TLI
->isLegalAddressingMode(*DL
, AddrMode
,
6626 GEP
->getResultElementType(),
6627 GEP
->getAddressSpace())) {
6628 // We need to create a new base if the offset to the current base is
6629 // too large to fit into the addressing mode. So, a very large struct
6630 // may be split into several parts.
6632 BaseOffset
= Offset
;
6633 NewBaseGEP
= nullptr;
6637 // Generate a new GEP to replace the current one.
6638 Type
*PtrIdxTy
= DL
->getIndexType(GEP
->getType());
6641 // Create a new base if we don't have one yet. Find the insertion
6642 // pointer for the new base first.
6643 createNewBase(BaseOffset
, OldBase
, GEP
);
6646 IRBuilder
<> Builder(GEP
);
6647 Value
*NewGEP
= NewBaseGEP
;
6648 if (Offset
!= BaseOffset
) {
6649 // Calculate the new offset for the new GEP.
6650 Value
*Index
= ConstantInt::get(PtrIdxTy
, Offset
- BaseOffset
);
6651 NewGEP
= Builder
.CreatePtrAdd(NewBaseGEP
, Index
);
6653 replaceAllUsesWith(GEP
, NewGEP
, FreshBBs
, IsHugeFunc
);
6654 LargeOffsetGEPID
.erase(GEP
);
6655 LargeOffsetGEP
= LargeOffsetGEPs
.erase(LargeOffsetGEP
);
6656 GEP
->eraseFromParent();
6663 bool CodeGenPrepare::optimizePhiType(
6664 PHINode
*I
, SmallPtrSetImpl
<PHINode
*> &Visited
,
6665 SmallPtrSetImpl
<Instruction
*> &DeletedInstrs
) {
6666 // We are looking for a collection on interconnected phi nodes that together
6667 // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts
6668 // are of the same type. Convert the whole set of nodes to the type of the
6670 Type
*PhiTy
= I
->getType();
6671 Type
*ConvertTy
= nullptr;
6672 if (Visited
.count(I
) ||
6673 (!I
->getType()->isIntegerTy() && !I
->getType()->isFloatingPointTy()))
6676 SmallVector
<Instruction
*, 4> Worklist
;
6677 Worklist
.push_back(cast
<Instruction
>(I
));
6678 SmallPtrSet
<PHINode
*, 4> PhiNodes
;
6679 SmallPtrSet
<ConstantData
*, 4> Constants
;
6682 SmallPtrSet
<Instruction
*, 4> Defs
;
6683 SmallPtrSet
<Instruction
*, 4> Uses
;
6684 // This works by adding extra bitcasts between load/stores and removing
6685 // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi))
6686 // we can get in the situation where we remove a bitcast in one iteration
6687 // just to add it again in the next. We need to ensure that at least one
6688 // bitcast we remove are anchored to something that will not change back.
6689 bool AnyAnchored
= false;
6691 while (!Worklist
.empty()) {
6692 Instruction
*II
= Worklist
.pop_back_val();
6694 if (auto *Phi
= dyn_cast
<PHINode
>(II
)) {
6695 // Handle Defs, which might also be PHI's
6696 for (Value
*V
: Phi
->incoming_values()) {
6697 if (auto *OpPhi
= dyn_cast
<PHINode
>(V
)) {
6698 if (!PhiNodes
.count(OpPhi
)) {
6699 if (!Visited
.insert(OpPhi
).second
)
6701 PhiNodes
.insert(OpPhi
);
6702 Worklist
.push_back(OpPhi
);
6704 } else if (auto *OpLoad
= dyn_cast
<LoadInst
>(V
)) {
6705 if (!OpLoad
->isSimple())
6707 if (Defs
.insert(OpLoad
).second
)
6708 Worklist
.push_back(OpLoad
);
6709 } else if (auto *OpEx
= dyn_cast
<ExtractElementInst
>(V
)) {
6710 if (Defs
.insert(OpEx
).second
)
6711 Worklist
.push_back(OpEx
);
6712 } else if (auto *OpBC
= dyn_cast
<BitCastInst
>(V
)) {
6714 ConvertTy
= OpBC
->getOperand(0)->getType();
6715 if (OpBC
->getOperand(0)->getType() != ConvertTy
)
6717 if (Defs
.insert(OpBC
).second
) {
6718 Worklist
.push_back(OpBC
);
6719 AnyAnchored
|= !isa
<LoadInst
>(OpBC
->getOperand(0)) &&
6720 !isa
<ExtractElementInst
>(OpBC
->getOperand(0));
6722 } else if (auto *OpC
= dyn_cast
<ConstantData
>(V
))
6723 Constants
.insert(OpC
);
6729 // Handle uses which might also be phi's
6730 for (User
*V
: II
->users()) {
6731 if (auto *OpPhi
= dyn_cast
<PHINode
>(V
)) {
6732 if (!PhiNodes
.count(OpPhi
)) {
6733 if (Visited
.count(OpPhi
))
6735 PhiNodes
.insert(OpPhi
);
6736 Visited
.insert(OpPhi
);
6737 Worklist
.push_back(OpPhi
);
6739 } else if (auto *OpStore
= dyn_cast
<StoreInst
>(V
)) {
6740 if (!OpStore
->isSimple() || OpStore
->getOperand(0) != II
)
6742 Uses
.insert(OpStore
);
6743 } else if (auto *OpBC
= dyn_cast
<BitCastInst
>(V
)) {
6745 ConvertTy
= OpBC
->getType();
6746 if (OpBC
->getType() != ConvertTy
)
6750 any_of(OpBC
->users(), [](User
*U
) { return !isa
<StoreInst
>(U
); });
6757 if (!ConvertTy
|| !AnyAnchored
||
6758 !TLI
->shouldConvertPhiType(PhiTy
, ConvertTy
))
6761 LLVM_DEBUG(dbgs() << "Converting " << *I
<< "\n and connected nodes to "
6762 << *ConvertTy
<< "\n");
6764 // Create all the new phi nodes of the new type, and bitcast any loads to the
6766 ValueToValueMap ValMap
;
6767 for (ConstantData
*C
: Constants
)
6768 ValMap
[C
] = ConstantExpr::getBitCast(C
, ConvertTy
);
6769 for (Instruction
*D
: Defs
) {
6770 if (isa
<BitCastInst
>(D
)) {
6771 ValMap
[D
] = D
->getOperand(0);
6772 DeletedInstrs
.insert(D
);
6774 BasicBlock::iterator insertPt
= std::next(D
->getIterator());
6775 ValMap
[D
] = new BitCastInst(D
, ConvertTy
, D
->getName() + ".bc", insertPt
);
6778 for (PHINode
*Phi
: PhiNodes
)
6779 ValMap
[Phi
] = PHINode::Create(ConvertTy
, Phi
->getNumIncomingValues(),
6780 Phi
->getName() + ".tc", Phi
->getIterator());
6781 // Pipe together all the PhiNodes.
6782 for (PHINode
*Phi
: PhiNodes
) {
6783 PHINode
*NewPhi
= cast
<PHINode
>(ValMap
[Phi
]);
6784 for (int i
= 0, e
= Phi
->getNumIncomingValues(); i
< e
; i
++)
6785 NewPhi
->addIncoming(ValMap
[Phi
->getIncomingValue(i
)],
6786 Phi
->getIncomingBlock(i
));
6787 Visited
.insert(NewPhi
);
6789 // And finally pipe up the stores and bitcasts
6790 for (Instruction
*U
: Uses
) {
6791 if (isa
<BitCastInst
>(U
)) {
6792 DeletedInstrs
.insert(U
);
6793 replaceAllUsesWith(U
, ValMap
[U
->getOperand(0)], FreshBBs
, IsHugeFunc
);
6795 U
->setOperand(0, new BitCastInst(ValMap
[U
->getOperand(0)], PhiTy
, "bc",
6800 // Save the removed phis to be deleted later.
6801 for (PHINode
*Phi
: PhiNodes
)
6802 DeletedInstrs
.insert(Phi
);
6806 bool CodeGenPrepare::optimizePhiTypes(Function
&F
) {
6807 if (!OptimizePhiTypes
)
6810 bool Changed
= false;
6811 SmallPtrSet
<PHINode
*, 4> Visited
;
6812 SmallPtrSet
<Instruction
*, 4> DeletedInstrs
;
6814 // Attempt to optimize all the phis in the functions to the correct type.
6816 for (auto &Phi
: BB
.phis())
6817 Changed
|= optimizePhiType(&Phi
, Visited
, DeletedInstrs
);
6819 // Remove any old phi's that have been converted.
6820 for (auto *I
: DeletedInstrs
) {
6821 replaceAllUsesWith(I
, PoisonValue::get(I
->getType()), FreshBBs
, IsHugeFunc
);
6822 I
->eraseFromParent();
6828 /// Return true, if an ext(load) can be formed from an extension in
6830 bool CodeGenPrepare::canFormExtLd(
6831 const SmallVectorImpl
<Instruction
*> &MovedExts
, LoadInst
*&LI
,
6832 Instruction
*&Inst
, bool HasPromoted
) {
6833 for (auto *MovedExtInst
: MovedExts
) {
6834 if (isa
<LoadInst
>(MovedExtInst
->getOperand(0))) {
6835 LI
= cast
<LoadInst
>(MovedExtInst
->getOperand(0));
6836 Inst
= MovedExtInst
;
6843 // If they're already in the same block, there's nothing to do.
6844 // Make the cheap checks first if we did not promote.
6845 // If we promoted, we need to check if it is indeed profitable.
6846 if (!HasPromoted
&& LI
->getParent() == Inst
->getParent())
6849 return TLI
->isExtLoad(LI
, Inst
, *DL
);
6852 /// Move a zext or sext fed by a load into the same basic block as the load,
6853 /// unless conditions are unfavorable. This allows SelectionDAG to fold the
6854 /// extend into the load.
6858 /// %ld = load i32* %addr
6859 /// %add = add nuw i32 %ld, 4
6860 /// %zext = zext i32 %add to i64
6864 /// %ld = load i32* %addr
6865 /// %zext = zext i32 %ld to i64
6866 /// %add = add nuw i64 %zext, 4
6868 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
6869 /// allow us to match zext(load i32*) to i64.
6871 /// Also, try to promote the computations used to obtain a sign extended
6872 /// value used into memory accesses.
6875 /// a = add nsw i32 b, 3
6876 /// d = sext i32 a to i64
6877 /// e = getelementptr ..., i64 d
6881 /// f = sext i32 b to i64
6882 /// a = add nsw i64 f, 3
6883 /// e = getelementptr ..., i64 a
6886 /// \p Inst[in/out] the extension may be modified during the process if some
6887 /// promotions apply.
6888 bool CodeGenPrepare::optimizeExt(Instruction
*&Inst
) {
6889 bool AllowPromotionWithoutCommonHeader
= false;
6890 /// See if it is an interesting sext operations for the address type
6891 /// promotion before trying to promote it, e.g., the ones with the right
6892 /// type and used in memory accesses.
6893 bool ATPConsiderable
= TTI
->shouldConsiderAddressTypePromotion(
6894 *Inst
, AllowPromotionWithoutCommonHeader
);
6895 TypePromotionTransaction
TPT(RemovedInsts
);
6896 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
6897 TPT
.getRestorationPoint();
6898 SmallVector
<Instruction
*, 1> Exts
;
6899 SmallVector
<Instruction
*, 2> SpeculativelyMovedExts
;
6900 Exts
.push_back(Inst
);
6902 bool HasPromoted
= tryToPromoteExts(TPT
, Exts
, SpeculativelyMovedExts
);
6904 // Look for a load being extended.
6905 LoadInst
*LI
= nullptr;
6906 Instruction
*ExtFedByLoad
;
6908 // Try to promote a chain of computation if it allows to form an extended
6910 if (canFormExtLd(SpeculativelyMovedExts
, LI
, ExtFedByLoad
, HasPromoted
)) {
6911 assert(LI
&& ExtFedByLoad
&& "Expect a valid load and extension");
6913 // Move the extend into the same block as the load.
6914 ExtFedByLoad
->moveAfter(LI
);
6916 Inst
= ExtFedByLoad
;
6920 // Continue promoting SExts if known as considerable depending on targets.
6921 if (ATPConsiderable
&&
6922 performAddressTypePromotion(Inst
, AllowPromotionWithoutCommonHeader
,
6923 HasPromoted
, TPT
, SpeculativelyMovedExts
))
6926 TPT
.rollback(LastKnownGood
);
6930 // Perform address type promotion if doing so is profitable.
6931 // If AllowPromotionWithoutCommonHeader == false, we should find other sext
6932 // instructions that sign extended the same initial value. However, if
6933 // AllowPromotionWithoutCommonHeader == true, we expect promoting the
6934 // extension is just profitable.
6935 bool CodeGenPrepare::performAddressTypePromotion(
6936 Instruction
*&Inst
, bool AllowPromotionWithoutCommonHeader
,
6937 bool HasPromoted
, TypePromotionTransaction
&TPT
,
6938 SmallVectorImpl
<Instruction
*> &SpeculativelyMovedExts
) {
6939 bool Promoted
= false;
6940 SmallPtrSet
<Instruction
*, 1> UnhandledExts
;
6941 bool AllSeenFirst
= true;
6942 for (auto *I
: SpeculativelyMovedExts
) {
6943 Value
*HeadOfChain
= I
->getOperand(0);
6944 DenseMap
<Value
*, Instruction
*>::iterator AlreadySeen
=
6945 SeenChainsForSExt
.find(HeadOfChain
);
6946 // If there is an unhandled SExt which has the same header, try to promote
6948 if (AlreadySeen
!= SeenChainsForSExt
.end()) {
6949 if (AlreadySeen
->second
!= nullptr)
6950 UnhandledExts
.insert(AlreadySeen
->second
);
6951 AllSeenFirst
= false;
6955 if (!AllSeenFirst
|| (AllowPromotionWithoutCommonHeader
&&
6956 SpeculativelyMovedExts
.size() == 1)) {
6960 for (auto *I
: SpeculativelyMovedExts
) {
6961 Value
*HeadOfChain
= I
->getOperand(0);
6962 SeenChainsForSExt
[HeadOfChain
] = nullptr;
6963 ValToSExtendedUses
[HeadOfChain
].push_back(I
);
6965 // Update Inst as promotion happen.
6966 Inst
= SpeculativelyMovedExts
.pop_back_val();
6968 // This is the first chain visited from the header, keep the current chain
6969 // as unhandled. Defer to promote this until we encounter another SExt
6970 // chain derived from the same header.
6971 for (auto *I
: SpeculativelyMovedExts
) {
6972 Value
*HeadOfChain
= I
->getOperand(0);
6973 SeenChainsForSExt
[HeadOfChain
] = Inst
;
6978 if (!AllSeenFirst
&& !UnhandledExts
.empty())
6979 for (auto *VisitedSExt
: UnhandledExts
) {
6980 if (RemovedInsts
.count(VisitedSExt
))
6982 TypePromotionTransaction
TPT(RemovedInsts
);
6983 SmallVector
<Instruction
*, 1> Exts
;
6984 SmallVector
<Instruction
*, 2> Chains
;
6985 Exts
.push_back(VisitedSExt
);
6986 bool HasPromoted
= tryToPromoteExts(TPT
, Exts
, Chains
);
6990 for (auto *I
: Chains
) {
6991 Value
*HeadOfChain
= I
->getOperand(0);
6992 // Mark this as handled.
6993 SeenChainsForSExt
[HeadOfChain
] = nullptr;
6994 ValToSExtendedUses
[HeadOfChain
].push_back(I
);
7000 bool CodeGenPrepare::optimizeExtUses(Instruction
*I
) {
7001 BasicBlock
*DefBB
= I
->getParent();
7003 // If the result of a {s|z}ext and its source are both live out, rewrite all
7004 // other uses of the source with result of extension.
7005 Value
*Src
= I
->getOperand(0);
7006 if (Src
->hasOneUse())
7009 // Only do this xform if truncating is free.
7010 if (!TLI
->isTruncateFree(I
->getType(), Src
->getType()))
7013 // Only safe to perform the optimization if the source is also defined in
7015 if (!isa
<Instruction
>(Src
) || DefBB
!= cast
<Instruction
>(Src
)->getParent())
7018 bool DefIsLiveOut
= false;
7019 for (User
*U
: I
->users()) {
7020 Instruction
*UI
= cast
<Instruction
>(U
);
7022 // Figure out which BB this ext is used in.
7023 BasicBlock
*UserBB
= UI
->getParent();
7024 if (UserBB
== DefBB
)
7026 DefIsLiveOut
= true;
7032 // Make sure none of the uses are PHI nodes.
7033 for (User
*U
: Src
->users()) {
7034 Instruction
*UI
= cast
<Instruction
>(U
);
7035 BasicBlock
*UserBB
= UI
->getParent();
7036 if (UserBB
== DefBB
)
7038 // Be conservative. We don't want this xform to end up introducing
7039 // reloads just before load / store instructions.
7040 if (isa
<PHINode
>(UI
) || isa
<LoadInst
>(UI
) || isa
<StoreInst
>(UI
))
7044 // InsertedTruncs - Only insert one trunc in each block once.
7045 DenseMap
<BasicBlock
*, Instruction
*> InsertedTruncs
;
7047 bool MadeChange
= false;
7048 for (Use
&U
: Src
->uses()) {
7049 Instruction
*User
= cast
<Instruction
>(U
.getUser());
7051 // Figure out which BB this ext is used in.
7052 BasicBlock
*UserBB
= User
->getParent();
7053 if (UserBB
== DefBB
)
7056 // Both src and def are live in this block. Rewrite the use.
7057 Instruction
*&InsertedTrunc
= InsertedTruncs
[UserBB
];
7059 if (!InsertedTrunc
) {
7060 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
7061 assert(InsertPt
!= UserBB
->end());
7062 InsertedTrunc
= new TruncInst(I
, Src
->getType(), "");
7063 InsertedTrunc
->insertBefore(*UserBB
, InsertPt
);
7064 InsertedInsts
.insert(InsertedTrunc
);
7067 // Replace a use of the {s|z}ext source with a use of the result.
7076 // Find loads whose uses only use some of the loaded value's bits. Add an "and"
7077 // just after the load if the target can fold this into one extload instruction,
7078 // with the hope of eliminating some of the other later "and" instructions using
7079 // the loaded value. "and"s that are made trivially redundant by the insertion
7080 // of the new "and" are removed by this function, while others (e.g. those whose
7081 // path from the load goes through a phi) are left for isel to potentially
7114 // becomes (after a call to optimizeLoadExt for each load):
7118 // x1' = and x1, 0xff
7122 // x2' = and x2, 0xff
7127 bool CodeGenPrepare::optimizeLoadExt(LoadInst
*Load
) {
7128 if (!Load
->isSimple() || !Load
->getType()->isIntOrPtrTy())
7131 // Skip loads we've already transformed.
7132 if (Load
->hasOneUse() &&
7133 InsertedInsts
.count(cast
<Instruction
>(*Load
->user_begin())))
7136 // Look at all uses of Load, looking through phis, to determine how many bits
7137 // of the loaded value are needed.
7138 SmallVector
<Instruction
*, 8> WorkList
;
7139 SmallPtrSet
<Instruction
*, 16> Visited
;
7140 SmallVector
<Instruction
*, 8> AndsToMaybeRemove
;
7141 for (auto *U
: Load
->users())
7142 WorkList
.push_back(cast
<Instruction
>(U
));
7144 EVT LoadResultVT
= TLI
->getValueType(*DL
, Load
->getType());
7145 unsigned BitWidth
= LoadResultVT
.getSizeInBits();
7146 // If the BitWidth is 0, do not try to optimize the type
7150 APInt
DemandBits(BitWidth
, 0);
7151 APInt
WidestAndBits(BitWidth
, 0);
7153 while (!WorkList
.empty()) {
7154 Instruction
*I
= WorkList
.pop_back_val();
7156 // Break use-def graph loops.
7157 if (!Visited
.insert(I
).second
)
7160 // For a PHI node, push all of its users.
7161 if (auto *Phi
= dyn_cast
<PHINode
>(I
)) {
7162 for (auto *U
: Phi
->users())
7163 WorkList
.push_back(cast
<Instruction
>(U
));
7167 switch (I
->getOpcode()) {
7168 case Instruction::And
: {
7169 auto *AndC
= dyn_cast
<ConstantInt
>(I
->getOperand(1));
7172 APInt AndBits
= AndC
->getValue();
7173 DemandBits
|= AndBits
;
7174 // Keep track of the widest and mask we see.
7175 if (AndBits
.ugt(WidestAndBits
))
7176 WidestAndBits
= AndBits
;
7177 if (AndBits
== WidestAndBits
&& I
->getOperand(0) == Load
)
7178 AndsToMaybeRemove
.push_back(I
);
7182 case Instruction::Shl
: {
7183 auto *ShlC
= dyn_cast
<ConstantInt
>(I
->getOperand(1));
7186 uint64_t ShiftAmt
= ShlC
->getLimitedValue(BitWidth
- 1);
7187 DemandBits
.setLowBits(BitWidth
- ShiftAmt
);
7191 case Instruction::Trunc
: {
7192 EVT TruncVT
= TLI
->getValueType(*DL
, I
->getType());
7193 unsigned TruncBitWidth
= TruncVT
.getSizeInBits();
7194 DemandBits
.setLowBits(TruncBitWidth
);
7203 uint32_t ActiveBits
= DemandBits
.getActiveBits();
7204 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
7205 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example,
7206 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
7207 // (and (load x) 1) is not matched as a single instruction, rather as a LDR
7208 // followed by an AND.
7209 // TODO: Look into removing this restriction by fixing backends to either
7210 // return false for isLoadExtLegal for i1 or have them select this pattern to
7211 // a single instruction.
7213 // Also avoid hoisting if we didn't see any ands with the exact DemandBits
7214 // mask, since these are the only ands that will be removed by isel.
7215 if (ActiveBits
<= 1 || !DemandBits
.isMask(ActiveBits
) ||
7216 WidestAndBits
!= DemandBits
)
7219 LLVMContext
&Ctx
= Load
->getType()->getContext();
7220 Type
*TruncTy
= Type::getIntNTy(Ctx
, ActiveBits
);
7221 EVT TruncVT
= TLI
->getValueType(*DL
, TruncTy
);
7223 // Reject cases that won't be matched as extloads.
7224 if (!LoadResultVT
.bitsGT(TruncVT
) || !TruncVT
.isRound() ||
7225 !TLI
->isLoadExtLegal(ISD::ZEXTLOAD
, LoadResultVT
, TruncVT
))
7228 IRBuilder
<> Builder(Load
->getNextNonDebugInstruction());
7229 auto *NewAnd
= cast
<Instruction
>(
7230 Builder
.CreateAnd(Load
, ConstantInt::get(Ctx
, DemandBits
)));
7231 // Mark this instruction as "inserted by CGP", so that other
7232 // optimizations don't touch it.
7233 InsertedInsts
.insert(NewAnd
);
7235 // Replace all uses of load with new and (except for the use of load in the
7237 replaceAllUsesWith(Load
, NewAnd
, FreshBBs
, IsHugeFunc
);
7238 NewAnd
->setOperand(0, Load
);
7240 // Remove any and instructions that are now redundant.
7241 for (auto *And
: AndsToMaybeRemove
)
7242 // Check that the and mask is the same as the one we decided to put on the
7244 if (cast
<ConstantInt
>(And
->getOperand(1))->getValue() == DemandBits
) {
7245 replaceAllUsesWith(And
, NewAnd
, FreshBBs
, IsHugeFunc
);
7246 if (&*CurInstIterator
== And
)
7247 CurInstIterator
= std::next(And
->getIterator());
7248 And
->eraseFromParent();
7256 /// Check if V (an operand of a select instruction) is an expensive instruction
7257 /// that is only used once.
7258 static bool sinkSelectOperand(const TargetTransformInfo
*TTI
, Value
*V
) {
7259 auto *I
= dyn_cast
<Instruction
>(V
);
7260 // If it's safe to speculatively execute, then it should not have side
7261 // effects; therefore, it's safe to sink and possibly *not* execute.
7262 return I
&& I
->hasOneUse() && isSafeToSpeculativelyExecute(I
) &&
7263 TTI
->isExpensiveToSpeculativelyExecute(I
);
7266 /// Returns true if a SelectInst should be turned into an explicit branch.
7267 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo
*TTI
,
7268 const TargetLowering
*TLI
,
7270 // If even a predictable select is cheap, then a branch can't be cheaper.
7271 if (!TLI
->isPredictableSelectExpensive())
7274 // FIXME: This should use the same heuristics as IfConversion to determine
7275 // whether a select is better represented as a branch.
7277 // If metadata tells us that the select condition is obviously predictable,
7278 // then we want to replace the select with a branch.
7279 uint64_t TrueWeight
, FalseWeight
;
7280 if (extractBranchWeights(*SI
, TrueWeight
, FalseWeight
)) {
7281 uint64_t Max
= std::max(TrueWeight
, FalseWeight
);
7282 uint64_t Sum
= TrueWeight
+ FalseWeight
;
7284 auto Probability
= BranchProbability::getBranchProbability(Max
, Sum
);
7285 if (Probability
> TTI
->getPredictableBranchThreshold())
7290 CmpInst
*Cmp
= dyn_cast
<CmpInst
>(SI
->getCondition());
7292 // If a branch is predictable, an out-of-order CPU can avoid blocking on its
7293 // comparison condition. If the compare has more than one use, there's
7294 // probably another cmov or setcc around, so it's not worth emitting a branch.
7295 if (!Cmp
|| !Cmp
->hasOneUse())
7298 // If either operand of the select is expensive and only needed on one side
7299 // of the select, we should form a branch.
7300 if (sinkSelectOperand(TTI
, SI
->getTrueValue()) ||
7301 sinkSelectOperand(TTI
, SI
->getFalseValue()))
7307 /// If \p isTrue is true, return the true value of \p SI, otherwise return
7308 /// false value of \p SI. If the true/false value of \p SI is defined by any
7309 /// select instructions in \p Selects, look through the defining select
7310 /// instruction until the true/false value is not defined in \p Selects.
7312 getTrueOrFalseValue(SelectInst
*SI
, bool isTrue
,
7313 const SmallPtrSet
<const Instruction
*, 2> &Selects
) {
7316 for (SelectInst
*DefSI
= SI
; DefSI
!= nullptr && Selects
.count(DefSI
);
7317 DefSI
= dyn_cast
<SelectInst
>(V
)) {
7318 assert(DefSI
->getCondition() == SI
->getCondition() &&
7319 "The condition of DefSI does not match with SI");
7320 V
= (isTrue
? DefSI
->getTrueValue() : DefSI
->getFalseValue());
7323 assert(V
&& "Failed to get select true/false value");
7327 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator
*Shift
) {
7328 assert(Shift
->isShift() && "Expected a shift");
7330 // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
7331 // general vector shifts, and (3) the shift amount is a select-of-splatted
7332 // values, hoist the shifts before the select:
7333 // shift Op0, (select Cond, TVal, FVal) -->
7334 // select Cond, (shift Op0, TVal), (shift Op0, FVal)
7336 // This is inverting a generic IR transform when we know that the cost of a
7337 // general vector shift is more than the cost of 2 shift-by-scalars.
7338 // We can't do this effectively in SDAG because we may not be able to
7339 // determine if the select operands are splats from within a basic block.
7340 Type
*Ty
= Shift
->getType();
7341 if (!Ty
->isVectorTy() || !TTI
->isVectorShiftByScalarCheap(Ty
))
7343 Value
*Cond
, *TVal
, *FVal
;
7344 if (!match(Shift
->getOperand(1),
7345 m_OneUse(m_Select(m_Value(Cond
), m_Value(TVal
), m_Value(FVal
)))))
7347 if (!isSplatValue(TVal
) || !isSplatValue(FVal
))
7350 IRBuilder
<> Builder(Shift
);
7351 BinaryOperator::BinaryOps Opcode
= Shift
->getOpcode();
7352 Value
*NewTVal
= Builder
.CreateBinOp(Opcode
, Shift
->getOperand(0), TVal
);
7353 Value
*NewFVal
= Builder
.CreateBinOp(Opcode
, Shift
->getOperand(0), FVal
);
7354 Value
*NewSel
= Builder
.CreateSelect(Cond
, NewTVal
, NewFVal
);
7355 replaceAllUsesWith(Shift
, NewSel
, FreshBBs
, IsHugeFunc
);
7356 Shift
->eraseFromParent();
7360 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst
*Fsh
) {
7361 Intrinsic::ID Opcode
= Fsh
->getIntrinsicID();
7362 assert((Opcode
== Intrinsic::fshl
|| Opcode
== Intrinsic::fshr
) &&
7363 "Expected a funnel shift");
7365 // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
7366 // than general vector shifts, and (3) the shift amount is select-of-splatted
7367 // values, hoist the funnel shifts before the select:
7368 // fsh Op0, Op1, (select Cond, TVal, FVal) -->
7369 // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
7371 // This is inverting a generic IR transform when we know that the cost of a
7372 // general vector shift is more than the cost of 2 shift-by-scalars.
7373 // We can't do this effectively in SDAG because we may not be able to
7374 // determine if the select operands are splats from within a basic block.
7375 Type
*Ty
= Fsh
->getType();
7376 if (!Ty
->isVectorTy() || !TTI
->isVectorShiftByScalarCheap(Ty
))
7378 Value
*Cond
, *TVal
, *FVal
;
7379 if (!match(Fsh
->getOperand(2),
7380 m_OneUse(m_Select(m_Value(Cond
), m_Value(TVal
), m_Value(FVal
)))))
7382 if (!isSplatValue(TVal
) || !isSplatValue(FVal
))
7385 IRBuilder
<> Builder(Fsh
);
7386 Value
*X
= Fsh
->getOperand(0), *Y
= Fsh
->getOperand(1);
7387 Value
*NewTVal
= Builder
.CreateIntrinsic(Opcode
, Ty
, {X
, Y
, TVal
});
7388 Value
*NewFVal
= Builder
.CreateIntrinsic(Opcode
, Ty
, {X
, Y
, FVal
});
7389 Value
*NewSel
= Builder
.CreateSelect(Cond
, NewTVal
, NewFVal
);
7390 replaceAllUsesWith(Fsh
, NewSel
, FreshBBs
, IsHugeFunc
);
7391 Fsh
->eraseFromParent();
7395 /// If we have a SelectInst that will likely profit from branch prediction,
7396 /// turn it into a branch.
7397 bool CodeGenPrepare::optimizeSelectInst(SelectInst
*SI
) {
7398 if (DisableSelectToBranch
)
7401 // If the SelectOptimize pass is enabled, selects have already been optimized.
7402 if (!getCGPassBuilderOption().DisableSelectOptimize
)
7405 // Find all consecutive select instructions that share the same condition.
7406 SmallVector
<SelectInst
*, 2> ASI
;
7408 for (BasicBlock::iterator It
= ++BasicBlock::iterator(SI
);
7409 It
!= SI
->getParent()->end(); ++It
) {
7410 SelectInst
*I
= dyn_cast
<SelectInst
>(&*It
);
7411 if (I
&& SI
->getCondition() == I
->getCondition()) {
7418 SelectInst
*LastSI
= ASI
.back();
7419 // Increment the current iterator to skip all the rest of select instructions
7420 // because they will be either "not lowered" or "all lowered" to branch.
7421 CurInstIterator
= std::next(LastSI
->getIterator());
7422 // Examine debug-info attached to the consecutive select instructions. They
7423 // won't be individually optimised by optimizeInst, so we need to perform
7424 // DbgVariableRecord maintenence here instead.
7425 for (SelectInst
*SI
: ArrayRef(ASI
).drop_front())
7426 fixupDbgVariableRecordsOnInst(*SI
);
7428 bool VectorCond
= !SI
->getCondition()->getType()->isIntegerTy(1);
7430 // Can we convert the 'select' to CF ?
7431 if (VectorCond
|| SI
->getMetadata(LLVMContext::MD_unpredictable
))
7434 TargetLowering::SelectSupportKind SelectKind
;
7435 if (SI
->getType()->isVectorTy())
7436 SelectKind
= TargetLowering::ScalarCondVectorVal
;
7438 SelectKind
= TargetLowering::ScalarValSelect
;
7440 if (TLI
->isSelectSupported(SelectKind
) &&
7441 (!isFormingBranchFromSelectProfitable(TTI
, TLI
, SI
) ||
7442 llvm::shouldOptimizeForSize(SI
->getParent(), PSI
, BFI
.get())))
7445 // The DominatorTree needs to be rebuilt by any consumers after this
7446 // transformation. We simply reset here rather than setting the ModifiedDT
7447 // flag to avoid restarting the function walk in runOnFunction for each
7448 // select optimized.
7451 // Transform a sequence like this:
7453 // %cmp = cmp uge i32 %a, %b
7454 // %sel = select i1 %cmp, i32 %c, i32 %d
7458 // %cmp = cmp uge i32 %a, %b
7459 // %cmp.frozen = freeze %cmp
7460 // br i1 %cmp.frozen, label %select.true, label %select.false
7462 // br label %select.end
7464 // br label %select.end
7466 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
7468 // %cmp should be frozen, otherwise it may introduce undefined behavior.
7469 // In addition, we may sink instructions that produce %c or %d from
7470 // the entry block into the destination(s) of the new branch.
7471 // If the true or false blocks do not contain a sunken instruction, that
7472 // block and its branch may be optimized away. In that case, one side of the
7473 // first branch will point directly to select.end, and the corresponding PHI
7474 // predecessor block will be the start block.
7476 // Collect values that go on the true side and the values that go on the false
7478 SmallVector
<Instruction
*> TrueInstrs
, FalseInstrs
;
7479 for (SelectInst
*SI
: ASI
) {
7480 if (Value
*V
= SI
->getTrueValue(); sinkSelectOperand(TTI
, V
))
7481 TrueInstrs
.push_back(cast
<Instruction
>(V
));
7482 if (Value
*V
= SI
->getFalseValue(); sinkSelectOperand(TTI
, V
))
7483 FalseInstrs
.push_back(cast
<Instruction
>(V
));
7486 // Split the select block, according to how many (if any) values go on each
7488 BasicBlock
*StartBlock
= SI
->getParent();
7489 BasicBlock::iterator SplitPt
= std::next(BasicBlock::iterator(LastSI
));
7490 // We should split before any debug-info.
7491 SplitPt
.setHeadBit(true);
7494 auto *CondFr
= IB
.CreateFreeze(SI
->getCondition(), SI
->getName() + ".frozen");
7496 BasicBlock
*TrueBlock
= nullptr;
7497 BasicBlock
*FalseBlock
= nullptr;
7498 BasicBlock
*EndBlock
= nullptr;
7499 BranchInst
*TrueBranch
= nullptr;
7500 BranchInst
*FalseBranch
= nullptr;
7501 if (TrueInstrs
.size() == 0) {
7502 FalseBranch
= cast
<BranchInst
>(SplitBlockAndInsertIfElse(
7503 CondFr
, SplitPt
, false, nullptr, nullptr, LI
));
7504 FalseBlock
= FalseBranch
->getParent();
7505 EndBlock
= cast
<BasicBlock
>(FalseBranch
->getOperand(0));
7506 } else if (FalseInstrs
.size() == 0) {
7507 TrueBranch
= cast
<BranchInst
>(SplitBlockAndInsertIfThen(
7508 CondFr
, SplitPt
, false, nullptr, nullptr, LI
));
7509 TrueBlock
= TrueBranch
->getParent();
7510 EndBlock
= cast
<BasicBlock
>(TrueBranch
->getOperand(0));
7512 Instruction
*ThenTerm
= nullptr;
7513 Instruction
*ElseTerm
= nullptr;
7514 SplitBlockAndInsertIfThenElse(CondFr
, SplitPt
, &ThenTerm
, &ElseTerm
,
7515 nullptr, nullptr, LI
);
7516 TrueBranch
= cast
<BranchInst
>(ThenTerm
);
7517 FalseBranch
= cast
<BranchInst
>(ElseTerm
);
7518 TrueBlock
= TrueBranch
->getParent();
7519 FalseBlock
= FalseBranch
->getParent();
7520 EndBlock
= cast
<BasicBlock
>(TrueBranch
->getOperand(0));
7523 EndBlock
->setName("select.end");
7525 TrueBlock
->setName("select.true.sink");
7527 FalseBlock
->setName(FalseInstrs
.size() == 0 ? "select.false"
7528 : "select.false.sink");
7532 FreshBBs
.insert(TrueBlock
);
7534 FreshBBs
.insert(FalseBlock
);
7535 FreshBBs
.insert(EndBlock
);
7538 BFI
->setBlockFreq(EndBlock
, BFI
->getBlockFreq(StartBlock
));
7540 static const unsigned MD
[] = {
7541 LLVMContext::MD_prof
, LLVMContext::MD_unpredictable
,
7542 LLVMContext::MD_make_implicit
, LLVMContext::MD_dbg
};
7543 StartBlock
->getTerminator()->copyMetadata(*SI
, MD
);
7545 // Sink expensive instructions into the conditional blocks to avoid executing
7546 // them speculatively.
7547 for (Instruction
*I
: TrueInstrs
)
7548 I
->moveBefore(TrueBranch
);
7549 for (Instruction
*I
: FalseInstrs
)
7550 I
->moveBefore(FalseBranch
);
7552 // If we did not create a new block for one of the 'true' or 'false' paths
7553 // of the condition, it means that side of the branch goes to the end block
7554 // directly and the path originates from the start block from the point of
7555 // view of the new PHI.
7556 if (TrueBlock
== nullptr)
7557 TrueBlock
= StartBlock
;
7558 else if (FalseBlock
== nullptr)
7559 FalseBlock
= StartBlock
;
7561 SmallPtrSet
<const Instruction
*, 2> INS
;
7562 INS
.insert(ASI
.begin(), ASI
.end());
7563 // Use reverse iterator because later select may use the value of the
7564 // earlier select, and we need to propagate value through earlier select
7565 // to get the PHI operand.
7566 for (SelectInst
*SI
: llvm::reverse(ASI
)) {
7567 // The select itself is replaced with a PHI Node.
7568 PHINode
*PN
= PHINode::Create(SI
->getType(), 2, "");
7569 PN
->insertBefore(EndBlock
->begin());
7571 PN
->addIncoming(getTrueOrFalseValue(SI
, true, INS
), TrueBlock
);
7572 PN
->addIncoming(getTrueOrFalseValue(SI
, false, INS
), FalseBlock
);
7573 PN
->setDebugLoc(SI
->getDebugLoc());
7575 replaceAllUsesWith(SI
, PN
, FreshBBs
, IsHugeFunc
);
7576 SI
->eraseFromParent();
7578 ++NumSelectsExpanded
;
7581 // Instruct OptimizeBlock to skip to the next block.
7582 CurInstIterator
= StartBlock
->end();
7586 /// Some targets only accept certain types for splat inputs. For example a VDUP
7587 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
7588 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
7589 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst
*SVI
) {
7590 // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only
7591 if (!match(SVI
, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
7592 m_Undef(), m_ZeroMask())))
7594 Type
*NewType
= TLI
->shouldConvertSplatType(SVI
);
7598 auto *SVIVecType
= cast
<FixedVectorType
>(SVI
->getType());
7599 assert(!NewType
->isVectorTy() && "Expected a scalar type!");
7600 assert(NewType
->getScalarSizeInBits() == SVIVecType
->getScalarSizeInBits() &&
7601 "Expected a type of the same size!");
7603 FixedVectorType::get(NewType
, SVIVecType
->getNumElements());
7605 // Create a bitcast (shuffle (insert (bitcast(..))))
7606 IRBuilder
<> Builder(SVI
->getContext());
7607 Builder
.SetInsertPoint(SVI
);
7608 Value
*BC1
= Builder
.CreateBitCast(
7609 cast
<Instruction
>(SVI
->getOperand(0))->getOperand(1), NewType
);
7610 Value
*Shuffle
= Builder
.CreateVectorSplat(NewVecType
->getNumElements(), BC1
);
7611 Value
*BC2
= Builder
.CreateBitCast(Shuffle
, SVIVecType
);
7613 replaceAllUsesWith(SVI
, BC2
, FreshBBs
, IsHugeFunc
);
7614 RecursivelyDeleteTriviallyDeadInstructions(
7615 SVI
, TLInfo
, nullptr,
7616 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
7618 // Also hoist the bitcast up to its operand if it they are not in the same
7620 if (auto *BCI
= dyn_cast
<Instruction
>(BC1
))
7621 if (auto *Op
= dyn_cast
<Instruction
>(BCI
->getOperand(0)))
7622 if (BCI
->getParent() != Op
->getParent() && !isa
<PHINode
>(Op
) &&
7623 !Op
->isTerminator() && !Op
->isEHPad())
7629 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction
*I
) {
7630 // If the operands of I can be folded into a target instruction together with
7631 // I, duplicate and sink them.
7632 SmallVector
<Use
*, 4> OpsToSink
;
7633 if (!TTI
->isProfitableToSinkOperands(I
, OpsToSink
))
7636 // OpsToSink can contain multiple uses in a use chain (e.g.
7637 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
7638 // uses must come first, so we process the ops in reverse order so as to not
7639 // create invalid IR.
7640 BasicBlock
*TargetBB
= I
->getParent();
7641 bool Changed
= false;
7642 SmallVector
<Use
*, 4> ToReplace
;
7643 Instruction
*InsertPoint
= I
;
7644 DenseMap
<const Instruction
*, unsigned long> InstOrdering
;
7645 unsigned long InstNumber
= 0;
7646 for (const auto &I
: *TargetBB
)
7647 InstOrdering
[&I
] = InstNumber
++;
7649 for (Use
*U
: reverse(OpsToSink
)) {
7650 auto *UI
= cast
<Instruction
>(U
->get());
7651 if (isa
<PHINode
>(UI
))
7653 if (UI
->getParent() == TargetBB
) {
7654 if (InstOrdering
[UI
] < InstOrdering
[InsertPoint
])
7658 ToReplace
.push_back(U
);
7661 SetVector
<Instruction
*> MaybeDead
;
7662 DenseMap
<Instruction
*, Instruction
*> NewInstructions
;
7663 for (Use
*U
: ToReplace
) {
7664 auto *UI
= cast
<Instruction
>(U
->get());
7665 Instruction
*NI
= UI
->clone();
7668 // Now we clone an instruction, its operands' defs may sink to this BB
7669 // now. So we put the operands defs' BBs into FreshBBs to do optimization.
7670 for (Value
*Op
: NI
->operands())
7671 if (auto *OpDef
= dyn_cast
<Instruction
>(Op
))
7672 FreshBBs
.insert(OpDef
->getParent());
7675 NewInstructions
[UI
] = NI
;
7676 MaybeDead
.insert(UI
);
7677 LLVM_DEBUG(dbgs() << "Sinking " << *UI
<< " to user " << *I
<< "\n");
7678 NI
->insertBefore(InsertPoint
);
7680 InsertedInsts
.insert(NI
);
7682 // Update the use for the new instruction, making sure that we update the
7683 // sunk instruction uses, if it is part of a chain that has already been
7685 Instruction
*OldI
= cast
<Instruction
>(U
->getUser());
7686 if (NewInstructions
.count(OldI
))
7687 NewInstructions
[OldI
]->setOperand(U
->getOperandNo(), NI
);
7693 // Remove instructions that are dead after sinking.
7694 for (auto *I
: MaybeDead
) {
7695 if (!I
->hasNUsesOrMore(1)) {
7696 LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I
<< "\n");
7697 I
->eraseFromParent();
7704 bool CodeGenPrepare::optimizeSwitchType(SwitchInst
*SI
) {
7705 Value
*Cond
= SI
->getCondition();
7706 Type
*OldType
= Cond
->getType();
7707 LLVMContext
&Context
= Cond
->getContext();
7708 EVT OldVT
= TLI
->getValueType(*DL
, OldType
);
7709 MVT RegType
= TLI
->getPreferredSwitchConditionType(Context
, OldVT
);
7710 unsigned RegWidth
= RegType
.getSizeInBits();
7712 if (RegWidth
<= cast
<IntegerType
>(OldType
)->getBitWidth())
7715 // If the register width is greater than the type width, expand the condition
7716 // of the switch instruction and each case constant to the width of the
7717 // register. By widening the type of the switch condition, subsequent
7718 // comparisons (for case comparisons) will not need to be extended to the
7719 // preferred register width, so we will potentially eliminate N-1 extends,
7720 // where N is the number of cases in the switch.
7721 auto *NewType
= Type::getIntNTy(Context
, RegWidth
);
7723 // Extend the switch condition and case constants using the target preferred
7724 // extend unless the switch condition is a function argument with an extend
7725 // attribute. In that case, we can avoid an unnecessary mask/extension by
7726 // matching the argument extension instead.
7727 Instruction::CastOps ExtType
= Instruction::ZExt
;
7728 // Some targets prefer SExt over ZExt.
7729 if (TLI
->isSExtCheaperThanZExt(OldVT
, RegType
))
7730 ExtType
= Instruction::SExt
;
7732 if (auto *Arg
= dyn_cast
<Argument
>(Cond
)) {
7733 if (Arg
->hasSExtAttr())
7734 ExtType
= Instruction::SExt
;
7735 if (Arg
->hasZExtAttr())
7736 ExtType
= Instruction::ZExt
;
7739 auto *ExtInst
= CastInst::Create(ExtType
, Cond
, NewType
);
7740 ExtInst
->insertBefore(SI
);
7741 ExtInst
->setDebugLoc(SI
->getDebugLoc());
7742 SI
->setCondition(ExtInst
);
7743 for (auto Case
: SI
->cases()) {
7744 const APInt
&NarrowConst
= Case
.getCaseValue()->getValue();
7745 APInt WideConst
= (ExtType
== Instruction::ZExt
)
7746 ? NarrowConst
.zext(RegWidth
)
7747 : NarrowConst
.sext(RegWidth
);
7748 Case
.setValue(ConstantInt::get(Context
, WideConst
));
7754 bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst
*SI
) {
7755 // The SCCP optimization tends to produce code like this:
7756 // switch(x) { case 42: phi(42, ...) }
7757 // Materializing the constant for the phi-argument needs instructions; So we
7758 // change the code to:
7759 // switch(x) { case 42: phi(x, ...) }
7761 Value
*Condition
= SI
->getCondition();
7762 // Avoid endless loop in degenerate case.
7763 if (isa
<ConstantInt
>(*Condition
))
7766 bool Changed
= false;
7767 BasicBlock
*SwitchBB
= SI
->getParent();
7768 Type
*ConditionType
= Condition
->getType();
7770 for (const SwitchInst::CaseHandle
&Case
: SI
->cases()) {
7771 ConstantInt
*CaseValue
= Case
.getCaseValue();
7772 BasicBlock
*CaseBB
= Case
.getCaseSuccessor();
7773 // Set to true if we previously checked that `CaseBB` is only reached by
7774 // a single case from this switch.
7775 bool CheckedForSinglePred
= false;
7776 for (PHINode
&PHI
: CaseBB
->phis()) {
7777 Type
*PHIType
= PHI
.getType();
7778 // If ZExt is free then we can also catch patterns like this:
7779 // switch((i32)x) { case 42: phi((i64)42, ...); }
7780 // and replace `(i64)42` with `zext i32 %x to i64`.
7782 PHIType
->isIntegerTy() &&
7783 PHIType
->getIntegerBitWidth() > ConditionType
->getIntegerBitWidth() &&
7784 TLI
->isZExtFree(ConditionType
, PHIType
);
7785 if (PHIType
== ConditionType
|| TryZExt
) {
7786 // Set to true to skip this case because of multiple preds.
7787 bool SkipCase
= false;
7788 Value
*Replacement
= nullptr;
7789 for (unsigned I
= 0, E
= PHI
.getNumIncomingValues(); I
!= E
; I
++) {
7790 Value
*PHIValue
= PHI
.getIncomingValue(I
);
7791 if (PHIValue
!= CaseValue
) {
7794 ConstantInt
*PHIValueInt
= dyn_cast
<ConstantInt
>(PHIValue
);
7796 PHIValueInt
->getValue() !=
7797 CaseValue
->getValue().zext(PHIType
->getIntegerBitWidth()))
7800 if (PHI
.getIncomingBlock(I
) != SwitchBB
)
7802 // We cannot optimize if there are multiple case labels jumping to
7803 // this block. This check may get expensive when there are many
7804 // case labels so we test for it last.
7805 if (!CheckedForSinglePred
) {
7806 CheckedForSinglePred
= true;
7807 if (SI
->findCaseDest(CaseBB
) == nullptr) {
7813 if (Replacement
== nullptr) {
7814 if (PHIValue
== CaseValue
) {
7815 Replacement
= Condition
;
7817 IRBuilder
<> Builder(SI
);
7818 Replacement
= Builder
.CreateZExt(Condition
, PHIType
);
7821 PHI
.setIncomingValue(I
, Replacement
);
7832 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst
*SI
) {
7833 bool Changed
= optimizeSwitchType(SI
);
7834 Changed
|= optimizeSwitchPhiConstants(SI
);
7840 /// Helper class to promote a scalar operation to a vector one.
7841 /// This class is used to move downward extractelement transition.
7843 /// a = vector_op <2 x i32>
7844 /// b = extractelement <2 x i32> a, i32 0
7849 /// a = vector_op <2 x i32>
7850 /// c = vector_op a (equivalent to scalar_op on the related lane)
7851 /// * d = extractelement <2 x i32> c, i32 0
7853 /// Assuming both extractelement and store can be combine, we get rid of the
7855 class VectorPromoteHelper
{
7856 /// DataLayout associated with the current module.
7857 const DataLayout
&DL
;
7859 /// Used to perform some checks on the legality of vector operations.
7860 const TargetLowering
&TLI
;
7862 /// Used to estimated the cost of the promoted chain.
7863 const TargetTransformInfo
&TTI
;
7865 /// The transition being moved downwards.
7866 Instruction
*Transition
;
7868 /// The sequence of instructions to be promoted.
7869 SmallVector
<Instruction
*, 4> InstsToBePromoted
;
7871 /// Cost of combining a store and an extract.
7872 unsigned StoreExtractCombineCost
;
7874 /// Instruction that will be combined with the transition.
7875 Instruction
*CombineInst
= nullptr;
7877 /// The instruction that represents the current end of the transition.
7878 /// Since we are faking the promotion until we reach the end of the chain
7879 /// of computation, we need a way to get the current end of the transition.
7880 Instruction
*getEndOfTransition() const {
7881 if (InstsToBePromoted
.empty())
7883 return InstsToBePromoted
.back();
7886 /// Return the index of the original value in the transition.
7887 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
7888 /// c, is at index 0.
7889 unsigned getTransitionOriginalValueIdx() const {
7890 assert(isa
<ExtractElementInst
>(Transition
) &&
7891 "Other kind of transitions are not supported yet");
7895 /// Return the index of the index in the transition.
7896 /// E.g., for "extractelement <2 x i32> c, i32 0" the index
7898 unsigned getTransitionIdx() const {
7899 assert(isa
<ExtractElementInst
>(Transition
) &&
7900 "Other kind of transitions are not supported yet");
7904 /// Get the type of the transition.
7905 /// This is the type of the original value.
7906 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
7907 /// transition is <2 x i32>.
7908 Type
*getTransitionType() const {
7909 return Transition
->getOperand(getTransitionOriginalValueIdx())->getType();
7912 /// Promote \p ToBePromoted by moving \p Def downward through.
7913 /// I.e., we have the following sequence:
7914 /// Def = Transition <ty1> a to <ty2>
7915 /// b = ToBePromoted <ty2> Def, ...
7917 /// b = ToBePromoted <ty1> a, ...
7918 /// Def = Transition <ty1> ToBePromoted to <ty2>
7919 void promoteImpl(Instruction
*ToBePromoted
);
7921 /// Check whether or not it is profitable to promote all the
7922 /// instructions enqueued to be promoted.
7923 bool isProfitableToPromote() {
7924 Value
*ValIdx
= Transition
->getOperand(getTransitionOriginalValueIdx());
7925 unsigned Index
= isa
<ConstantInt
>(ValIdx
)
7926 ? cast
<ConstantInt
>(ValIdx
)->getZExtValue()
7928 Type
*PromotedType
= getTransitionType();
7930 StoreInst
*ST
= cast
<StoreInst
>(CombineInst
);
7931 unsigned AS
= ST
->getPointerAddressSpace();
7932 // Check if this store is supported.
7933 if (!TLI
.allowsMisalignedMemoryAccesses(
7934 TLI
.getValueType(DL
, ST
->getValueOperand()->getType()), AS
,
7936 // If this is not supported, there is no way we can combine
7937 // the extract with the store.
7941 // The scalar chain of computation has to pay for the transition
7942 // scalar to vector.
7943 // The vector chain has to account for the combining cost.
7944 enum TargetTransformInfo::TargetCostKind CostKind
=
7945 TargetTransformInfo::TCK_RecipThroughput
;
7946 InstructionCost ScalarCost
=
7947 TTI
.getVectorInstrCost(*Transition
, PromotedType
, CostKind
, Index
);
7948 InstructionCost VectorCost
= StoreExtractCombineCost
;
7949 for (const auto &Inst
: InstsToBePromoted
) {
7950 // Compute the cost.
7951 // By construction, all instructions being promoted are arithmetic ones.
7952 // Moreover, one argument is a constant that can be viewed as a splat
7954 Value
*Arg0
= Inst
->getOperand(0);
7955 bool IsArg0Constant
= isa
<UndefValue
>(Arg0
) || isa
<ConstantInt
>(Arg0
) ||
7956 isa
<ConstantFP
>(Arg0
);
7957 TargetTransformInfo::OperandValueInfo Arg0Info
, Arg1Info
;
7959 Arg0Info
.Kind
= TargetTransformInfo::OK_UniformConstantValue
;
7961 Arg1Info
.Kind
= TargetTransformInfo::OK_UniformConstantValue
;
7963 ScalarCost
+= TTI
.getArithmeticInstrCost(
7964 Inst
->getOpcode(), Inst
->getType(), CostKind
, Arg0Info
, Arg1Info
);
7965 VectorCost
+= TTI
.getArithmeticInstrCost(Inst
->getOpcode(), PromotedType
,
7966 CostKind
, Arg0Info
, Arg1Info
);
7969 dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
7970 << ScalarCost
<< "\nVector: " << VectorCost
<< '\n');
7971 return ScalarCost
> VectorCost
;
7974 /// Generate a constant vector with \p Val with the same
7975 /// number of elements as the transition.
7976 /// \p UseSplat defines whether or not \p Val should be replicated
7977 /// across the whole vector.
7978 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
7979 /// otherwise we generate a vector with as many undef as possible:
7980 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
7981 /// used at the index of the extract.
7982 Value
*getConstantVector(Constant
*Val
, bool UseSplat
) const {
7983 unsigned ExtractIdx
= std::numeric_limits
<unsigned>::max();
7985 // If we cannot determine where the constant must be, we have to
7986 // use a splat constant.
7987 Value
*ValExtractIdx
= Transition
->getOperand(getTransitionIdx());
7988 if (ConstantInt
*CstVal
= dyn_cast
<ConstantInt
>(ValExtractIdx
))
7989 ExtractIdx
= CstVal
->getSExtValue();
7994 ElementCount EC
= cast
<VectorType
>(getTransitionType())->getElementCount();
7996 return ConstantVector::getSplat(EC
, Val
);
7998 if (!EC
.isScalable()) {
7999 SmallVector
<Constant
*, 4> ConstVec
;
8000 UndefValue
*UndefVal
= UndefValue::get(Val
->getType());
8001 for (unsigned Idx
= 0; Idx
!= EC
.getKnownMinValue(); ++Idx
) {
8002 if (Idx
== ExtractIdx
)
8003 ConstVec
.push_back(Val
);
8005 ConstVec
.push_back(UndefVal
);
8007 return ConstantVector::get(ConstVec
);
8010 "Generate scalable vector for non-splat is unimplemented");
8013 /// Check if promoting to a vector type an operand at \p OperandIdx
8014 /// in \p Use can trigger undefined behavior.
8015 static bool canCauseUndefinedBehavior(const Instruction
*Use
,
8016 unsigned OperandIdx
) {
8017 // This is not safe to introduce undef when the operand is on
8018 // the right hand side of a division-like instruction.
8019 if (OperandIdx
!= 1)
8021 switch (Use
->getOpcode()) {
8024 case Instruction::SDiv
:
8025 case Instruction::UDiv
:
8026 case Instruction::SRem
:
8027 case Instruction::URem
:
8029 case Instruction::FDiv
:
8030 case Instruction::FRem
:
8031 return !Use
->hasNoNaNs();
8033 llvm_unreachable(nullptr);
8037 VectorPromoteHelper(const DataLayout
&DL
, const TargetLowering
&TLI
,
8038 const TargetTransformInfo
&TTI
, Instruction
*Transition
,
8039 unsigned CombineCost
)
8040 : DL(DL
), TLI(TLI
), TTI(TTI
), Transition(Transition
),
8041 StoreExtractCombineCost(CombineCost
) {
8042 assert(Transition
&& "Do not know how to promote null");
8045 /// Check if we can promote \p ToBePromoted to \p Type.
8046 bool canPromote(const Instruction
*ToBePromoted
) const {
8047 // We could support CastInst too.
8048 return isa
<BinaryOperator
>(ToBePromoted
);
8051 /// Check if it is profitable to promote \p ToBePromoted
8052 /// by moving downward the transition through.
8053 bool shouldPromote(const Instruction
*ToBePromoted
) const {
8054 // Promote only if all the operands can be statically expanded.
8055 // Indeed, we do not want to introduce any new kind of transitions.
8056 for (const Use
&U
: ToBePromoted
->operands()) {
8057 const Value
*Val
= U
.get();
8058 if (Val
== getEndOfTransition()) {
8059 // If the use is a division and the transition is on the rhs,
8060 // we cannot promote the operation, otherwise we may create a
8061 // division by zero.
8062 if (canCauseUndefinedBehavior(ToBePromoted
, U
.getOperandNo()))
8066 if (!isa
<ConstantInt
>(Val
) && !isa
<UndefValue
>(Val
) &&
8067 !isa
<ConstantFP
>(Val
))
8070 // Check that the resulting operation is legal.
8071 int ISDOpcode
= TLI
.InstructionOpcodeToISD(ToBePromoted
->getOpcode());
8074 return StressStoreExtract
||
8075 TLI
.isOperationLegalOrCustom(
8076 ISDOpcode
, TLI
.getValueType(DL
, getTransitionType(), true));
8079 /// Check whether or not \p Use can be combined
8080 /// with the transition.
8081 /// I.e., is it possible to do Use(Transition) => AnotherUse?
8082 bool canCombine(const Instruction
*Use
) { return isa
<StoreInst
>(Use
); }
8084 /// Record \p ToBePromoted as part of the chain to be promoted.
8085 void enqueueForPromotion(Instruction
*ToBePromoted
) {
8086 InstsToBePromoted
.push_back(ToBePromoted
);
8089 /// Set the instruction that will be combined with the transition.
8090 void recordCombineInstruction(Instruction
*ToBeCombined
) {
8091 assert(canCombine(ToBeCombined
) && "Unsupported instruction to combine");
8092 CombineInst
= ToBeCombined
;
8095 /// Promote all the instructions enqueued for promotion if it is
8097 /// \return True if the promotion happened, false otherwise.
8099 // Check if there is something to promote.
8100 // Right now, if we do not have anything to combine with,
8101 // we assume the promotion is not profitable.
8102 if (InstsToBePromoted
.empty() || !CombineInst
)
8106 if (!StressStoreExtract
&& !isProfitableToPromote())
8110 for (auto &ToBePromoted
: InstsToBePromoted
)
8111 promoteImpl(ToBePromoted
);
8112 InstsToBePromoted
.clear();
8117 } // end anonymous namespace
8119 void VectorPromoteHelper::promoteImpl(Instruction
*ToBePromoted
) {
8120 // At this point, we know that all the operands of ToBePromoted but Def
8121 // can be statically promoted.
8122 // For Def, we need to use its parameter in ToBePromoted:
8123 // b = ToBePromoted ty1 a
8124 // Def = Transition ty1 b to ty2
8125 // Move the transition down.
8126 // 1. Replace all uses of the promoted operation by the transition.
8127 // = ... b => = ... Def.
8128 assert(ToBePromoted
->getType() == Transition
->getType() &&
8129 "The type of the result of the transition does not match "
8131 ToBePromoted
->replaceAllUsesWith(Transition
);
8132 // 2. Update the type of the uses.
8133 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
8134 Type
*TransitionTy
= getTransitionType();
8135 ToBePromoted
->mutateType(TransitionTy
);
8136 // 3. Update all the operands of the promoted operation with promoted
8138 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
8139 for (Use
&U
: ToBePromoted
->operands()) {
8140 Value
*Val
= U
.get();
8141 Value
*NewVal
= nullptr;
8142 if (Val
== Transition
)
8143 NewVal
= Transition
->getOperand(getTransitionOriginalValueIdx());
8144 else if (isa
<UndefValue
>(Val
) || isa
<ConstantInt
>(Val
) ||
8145 isa
<ConstantFP
>(Val
)) {
8146 // Use a splat constant if it is not safe to use undef.
8147 NewVal
= getConstantVector(
8148 cast
<Constant
>(Val
),
8149 isa
<UndefValue
>(Val
) ||
8150 canCauseUndefinedBehavior(ToBePromoted
, U
.getOperandNo()));
8152 llvm_unreachable("Did you modified shouldPromote and forgot to update "
8154 ToBePromoted
->setOperand(U
.getOperandNo(), NewVal
);
8156 Transition
->moveAfter(ToBePromoted
);
8157 Transition
->setOperand(getTransitionOriginalValueIdx(), ToBePromoted
);
8160 /// Some targets can do store(extractelement) with one instruction.
8161 /// Try to push the extractelement towards the stores when the target
8162 /// has this feature and this is profitable.
8163 bool CodeGenPrepare::optimizeExtractElementInst(Instruction
*Inst
) {
8164 unsigned CombineCost
= std::numeric_limits
<unsigned>::max();
8165 if (DisableStoreExtract
||
8166 (!StressStoreExtract
&&
8167 !TLI
->canCombineStoreAndExtract(Inst
->getOperand(0)->getType(),
8168 Inst
->getOperand(1), CombineCost
)))
8171 // At this point we know that Inst is a vector to scalar transition.
8172 // Try to move it down the def-use chain, until:
8173 // - We can combine the transition with its single use
8174 // => we got rid of the transition.
8175 // - We escape the current basic block
8176 // => we would need to check that we are moving it at a cheaper place and
8177 // we do not do that for now.
8178 BasicBlock
*Parent
= Inst
->getParent();
8179 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst
<< '\n');
8180 VectorPromoteHelper
VPH(*DL
, *TLI
, *TTI
, Inst
, CombineCost
);
8181 // If the transition has more than one use, assume this is not going to be
8183 while (Inst
->hasOneUse()) {
8184 Instruction
*ToBePromoted
= cast
<Instruction
>(*Inst
->user_begin());
8185 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted
<< '\n');
8187 if (ToBePromoted
->getParent() != Parent
) {
8188 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
8189 << ToBePromoted
->getParent()->getName()
8190 << ") than the transition (" << Parent
->getName()
8195 if (VPH
.canCombine(ToBePromoted
)) {
8196 LLVM_DEBUG(dbgs() << "Assume " << *Inst
<< '\n'
8197 << "will be combined with: " << *ToBePromoted
<< '\n');
8198 VPH
.recordCombineInstruction(ToBePromoted
);
8199 bool Changed
= VPH
.promote();
8200 NumStoreExtractExposed
+= Changed
;
8204 LLVM_DEBUG(dbgs() << "Try promoting.\n");
8205 if (!VPH
.canPromote(ToBePromoted
) || !VPH
.shouldPromote(ToBePromoted
))
8208 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
8210 VPH
.enqueueForPromotion(ToBePromoted
);
8211 Inst
= ToBePromoted
;
8216 /// For the instruction sequence of store below, F and I values
8217 /// are bundled together as an i64 value before being stored into memory.
8218 /// Sometimes it is more efficient to generate separate stores for F and I,
8219 /// which can remove the bitwise instructions or sink them to colder places.
8221 /// (store (or (zext (bitcast F to i32) to i64),
8222 /// (shl (zext I to i64), 32)), addr) -->
8223 /// (store F, addr) and (store I, addr+4)
8225 /// Similarly, splitting for other merged store can also be beneficial, like:
8226 /// For pair of {i32, i32}, i64 store --> two i32 stores.
8227 /// For pair of {i32, i16}, i64 store --> two i32 stores.
8228 /// For pair of {i16, i16}, i32 store --> two i16 stores.
8229 /// For pair of {i16, i8}, i32 store --> two i16 stores.
8230 /// For pair of {i8, i8}, i16 store --> two i8 stores.
8232 /// We allow each target to determine specifically which kind of splitting is
8235 /// The store patterns are commonly seen from the simple code snippet below
8236 /// if only std::make_pair(...) is sroa transformed before inlined into hoo.
8237 /// void goo(const std::pair<int, float> &);
8240 /// goo(std::make_pair(tmp, ftmp));
8244 /// Although we already have similar splitting in DAG Combine, we duplicate
8245 /// it in CodeGenPrepare to catch the case in which pattern is across
8246 /// multiple BBs. The logic in DAG Combine is kept to catch case generated
8247 /// during code expansion.
8248 static bool splitMergedValStore(StoreInst
&SI
, const DataLayout
&DL
,
8249 const TargetLowering
&TLI
) {
8250 // Handle simple but common cases only.
8251 Type
*StoreType
= SI
.getValueOperand()->getType();
8253 // The code below assumes shifting a value by <number of bits>,
8254 // whereas scalable vectors would have to be shifted by
8255 // <2log(vscale) + number of bits> in order to store the
8256 // low/high parts. Bailing out for now.
8257 if (StoreType
->isScalableTy())
8260 if (!DL
.typeSizeEqualsStoreSize(StoreType
) ||
8261 DL
.getTypeSizeInBits(StoreType
) == 0)
8264 unsigned HalfValBitSize
= DL
.getTypeSizeInBits(StoreType
) / 2;
8265 Type
*SplitStoreType
= Type::getIntNTy(SI
.getContext(), HalfValBitSize
);
8266 if (!DL
.typeSizeEqualsStoreSize(SplitStoreType
))
8269 // Don't split the store if it is volatile.
8270 if (SI
.isVolatile())
8273 // Match the following patterns:
8274 // (store (or (zext LValue to i64),
8275 // (shl (zext HValue to i64), 32)), HalfValBitSize)
8277 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
8278 // (zext LValue to i64),
8279 // Expect both operands of OR and the first operand of SHL have only
8281 Value
*LValue
, *HValue
;
8282 if (!match(SI
.getValueOperand(),
8283 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue
))),
8284 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue
))),
8285 m_SpecificInt(HalfValBitSize
))))))
8288 // Check LValue and HValue are int with size less or equal than 32.
8289 if (!LValue
->getType()->isIntegerTy() ||
8290 DL
.getTypeSizeInBits(LValue
->getType()) > HalfValBitSize
||
8291 !HValue
->getType()->isIntegerTy() ||
8292 DL
.getTypeSizeInBits(HValue
->getType()) > HalfValBitSize
)
8295 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
8296 // as the input of target query.
8297 auto *LBC
= dyn_cast
<BitCastInst
>(LValue
);
8298 auto *HBC
= dyn_cast
<BitCastInst
>(HValue
);
8299 EVT LowTy
= LBC
? EVT::getEVT(LBC
->getOperand(0)->getType())
8300 : EVT::getEVT(LValue
->getType());
8301 EVT HighTy
= HBC
? EVT::getEVT(HBC
->getOperand(0)->getType())
8302 : EVT::getEVT(HValue
->getType());
8303 if (!ForceSplitStore
&& !TLI
.isMultiStoresCheaperThanBitsMerge(LowTy
, HighTy
))
8306 // Start to split store.
8307 IRBuilder
<> Builder(SI
.getContext());
8308 Builder
.SetInsertPoint(&SI
);
8310 // If LValue/HValue is a bitcast in another BB, create a new one in current
8311 // BB so it may be merged with the splitted stores by dag combiner.
8312 if (LBC
&& LBC
->getParent() != SI
.getParent())
8313 LValue
= Builder
.CreateBitCast(LBC
->getOperand(0), LBC
->getType());
8314 if (HBC
&& HBC
->getParent() != SI
.getParent())
8315 HValue
= Builder
.CreateBitCast(HBC
->getOperand(0), HBC
->getType());
8317 bool IsLE
= SI
.getDataLayout().isLittleEndian();
8318 auto CreateSplitStore
= [&](Value
*V
, bool Upper
) {
8319 V
= Builder
.CreateZExtOrBitCast(V
, SplitStoreType
);
8320 Value
*Addr
= SI
.getPointerOperand();
8321 Align Alignment
= SI
.getAlign();
8322 const bool IsOffsetStore
= (IsLE
&& Upper
) || (!IsLE
&& !Upper
);
8323 if (IsOffsetStore
) {
8324 Addr
= Builder
.CreateGEP(
8325 SplitStoreType
, Addr
,
8326 ConstantInt::get(Type::getInt32Ty(SI
.getContext()), 1));
8328 // When splitting the store in half, naturally one half will retain the
8329 // alignment of the original wider store, regardless of whether it was
8330 // over-aligned or not, while the other will require adjustment.
8331 Alignment
= commonAlignment(Alignment
, HalfValBitSize
/ 8);
8333 Builder
.CreateAlignedStore(V
, Addr
, Alignment
);
8336 CreateSplitStore(LValue
, false);
8337 CreateSplitStore(HValue
, true);
8339 // Delete the old store.
8340 SI
.eraseFromParent();
8344 // Return true if the GEP has two operands, the first operand is of a sequential
8345 // type, and the second operand is a constant.
8346 static bool GEPSequentialConstIndexed(GetElementPtrInst
*GEP
) {
8347 gep_type_iterator I
= gep_type_begin(*GEP
);
8348 return GEP
->getNumOperands() == 2 && I
.isSequential() &&
8349 isa
<ConstantInt
>(GEP
->getOperand(1));
8352 // Try unmerging GEPs to reduce liveness interference (register pressure) across
8353 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
8354 // reducing liveness interference across those edges benefits global register
8355 // allocation. Currently handles only certain cases.
8357 // For example, unmerge %GEPI and %UGEPI as below.
8359 // ---------- BEFORE ----------
8364 // %GEPI = gep %GEPIOp, Idx
8366 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
8367 // (* %GEPI is alive on the indirectbr edges due to other uses ahead)
8368 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by
8371 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
8372 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
8377 // %UGEPI = gep %GEPIOp, UIdx
8379 // ---------------------------
8381 // ---------- AFTER ----------
8383 // ... (same as above)
8384 // (* %GEPI is still alive on the indirectbr edges)
8385 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
8391 // %UGEPI = gep %GEPI, (UIdx-Idx)
8393 // ---------------------------
8395 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is
8396 // no longer alive on them.
8398 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
8399 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
8400 // not to disable further simplications and optimizations as a result of GEP
8403 // Note this unmerging may increase the length of the data flow critical path
8404 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
8405 // between the register pressure and the length of data-flow critical
8406 // path. Restricting this to the uncommon IndirectBr case would minimize the
8407 // impact of potentially longer critical path, if any, and the impact on compile
8409 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst
*GEPI
,
8410 const TargetTransformInfo
*TTI
) {
8411 BasicBlock
*SrcBlock
= GEPI
->getParent();
8412 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
8413 // (non-IndirectBr) cases exit early here.
8414 if (!isa
<IndirectBrInst
>(SrcBlock
->getTerminator()))
8416 // Check that GEPI is a simple gep with a single constant index.
8417 if (!GEPSequentialConstIndexed(GEPI
))
8419 ConstantInt
*GEPIIdx
= cast
<ConstantInt
>(GEPI
->getOperand(1));
8420 // Check that GEPI is a cheap one.
8421 if (TTI
->getIntImmCost(GEPIIdx
->getValue(), GEPIIdx
->getType(),
8422 TargetTransformInfo::TCK_SizeAndLatency
) >
8423 TargetTransformInfo::TCC_Basic
)
8425 Value
*GEPIOp
= GEPI
->getOperand(0);
8426 // Check that GEPIOp is an instruction that's also defined in SrcBlock.
8427 if (!isa
<Instruction
>(GEPIOp
))
8429 auto *GEPIOpI
= cast
<Instruction
>(GEPIOp
);
8430 if (GEPIOpI
->getParent() != SrcBlock
)
8432 // Check that GEP is used outside the block, meaning it's alive on the
8433 // IndirectBr edge(s).
8434 if (llvm::none_of(GEPI
->users(), [&](User
*Usr
) {
8435 if (auto *I
= dyn_cast
<Instruction
>(Usr
)) {
8436 if (I
->getParent() != SrcBlock
) {
8443 // The second elements of the GEP chains to be unmerged.
8444 std::vector
<GetElementPtrInst
*> UGEPIs
;
8445 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
8446 // on IndirectBr edges.
8447 for (User
*Usr
: GEPIOp
->users()) {
8450 // Check if Usr is an Instruction. If not, give up.
8451 if (!isa
<Instruction
>(Usr
))
8453 auto *UI
= cast
<Instruction
>(Usr
);
8454 // Check if Usr in the same block as GEPIOp, which is fine, skip.
8455 if (UI
->getParent() == SrcBlock
)
8457 // Check if Usr is a GEP. If not, give up.
8458 if (!isa
<GetElementPtrInst
>(Usr
))
8460 auto *UGEPI
= cast
<GetElementPtrInst
>(Usr
);
8461 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
8462 // the pointer operand to it. If so, record it in the vector. If not, give
8464 if (!GEPSequentialConstIndexed(UGEPI
))
8466 if (UGEPI
->getOperand(0) != GEPIOp
)
8468 if (UGEPI
->getSourceElementType() != GEPI
->getSourceElementType())
8470 if (GEPIIdx
->getType() !=
8471 cast
<ConstantInt
>(UGEPI
->getOperand(1))->getType())
8473 ConstantInt
*UGEPIIdx
= cast
<ConstantInt
>(UGEPI
->getOperand(1));
8474 if (TTI
->getIntImmCost(UGEPIIdx
->getValue(), UGEPIIdx
->getType(),
8475 TargetTransformInfo::TCK_SizeAndLatency
) >
8476 TargetTransformInfo::TCC_Basic
)
8478 UGEPIs
.push_back(UGEPI
);
8480 if (UGEPIs
.size() == 0)
8482 // Check the materializing cost of (Uidx-Idx).
8483 for (GetElementPtrInst
*UGEPI
: UGEPIs
) {
8484 ConstantInt
*UGEPIIdx
= cast
<ConstantInt
>(UGEPI
->getOperand(1));
8485 APInt NewIdx
= UGEPIIdx
->getValue() - GEPIIdx
->getValue();
8486 InstructionCost ImmCost
= TTI
->getIntImmCost(
8487 NewIdx
, GEPIIdx
->getType(), TargetTransformInfo::TCK_SizeAndLatency
);
8488 if (ImmCost
> TargetTransformInfo::TCC_Basic
)
8491 // Now unmerge between GEPI and UGEPIs.
8492 for (GetElementPtrInst
*UGEPI
: UGEPIs
) {
8493 UGEPI
->setOperand(0, GEPI
);
8494 ConstantInt
*UGEPIIdx
= cast
<ConstantInt
>(UGEPI
->getOperand(1));
8495 Constant
*NewUGEPIIdx
= ConstantInt::get(
8496 GEPIIdx
->getType(), UGEPIIdx
->getValue() - GEPIIdx
->getValue());
8497 UGEPI
->setOperand(1, NewUGEPIIdx
);
8498 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
8499 // inbounds to avoid UB.
8500 if (!GEPI
->isInBounds()) {
8501 UGEPI
->setIsInBounds(false);
8504 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
8505 // alive on IndirectBr edges).
8506 assert(llvm::none_of(GEPIOp
->users(),
8508 return cast
<Instruction
>(Usr
)->getParent() != SrcBlock
;
8510 "GEPIOp is used outside SrcBlock");
8514 static bool optimizeBranch(BranchInst
*Branch
, const TargetLowering
&TLI
,
8515 SmallSet
<BasicBlock
*, 32> &FreshBBs
,
8518 // %c = icmp ult %x, 8
8523 // %c = icmp eq %tc, 0
8525 // Creating the cmp to zero can be better for the backend, especially if the
8526 // lshr produces flags that can be used automatically.
8527 if (!TLI
.preferZeroCompareBranch() || !Branch
->isConditional())
8530 ICmpInst
*Cmp
= dyn_cast
<ICmpInst
>(Branch
->getCondition());
8531 if (!Cmp
|| !isa
<ConstantInt
>(Cmp
->getOperand(1)) || !Cmp
->hasOneUse())
8534 Value
*X
= Cmp
->getOperand(0);
8535 APInt CmpC
= cast
<ConstantInt
>(Cmp
->getOperand(1))->getValue();
8537 for (auto *U
: X
->users()) {
8538 Instruction
*UI
= dyn_cast
<Instruction
>(U
);
8539 // A quick dominance check
8541 (UI
->getParent() != Branch
->getParent() &&
8542 UI
->getParent() != Branch
->getSuccessor(0) &&
8543 UI
->getParent() != Branch
->getSuccessor(1)) ||
8544 (UI
->getParent() != Branch
->getParent() &&
8545 !UI
->getParent()->getSinglePredecessor()))
8548 if (CmpC
.isPowerOf2() && Cmp
->getPredicate() == ICmpInst::ICMP_ULT
&&
8549 match(UI
, m_Shr(m_Specific(X
), m_SpecificInt(CmpC
.logBase2())))) {
8550 IRBuilder
<> Builder(Branch
);
8551 if (UI
->getParent() != Branch
->getParent())
8552 UI
->moveBefore(Branch
);
8553 UI
->dropPoisonGeneratingFlags();
8554 Value
*NewCmp
= Builder
.CreateCmp(ICmpInst::ICMP_EQ
, UI
,
8555 ConstantInt::get(UI
->getType(), 0));
8556 LLVM_DEBUG(dbgs() << "Converting " << *Cmp
<< "\n");
8557 LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp
<< "\n");
8558 replaceAllUsesWith(Cmp
, NewCmp
, FreshBBs
, IsHugeFunc
);
8561 if (Cmp
->isEquality() &&
8562 (match(UI
, m_Add(m_Specific(X
), m_SpecificInt(-CmpC
))) ||
8563 match(UI
, m_Sub(m_Specific(X
), m_SpecificInt(CmpC
))))) {
8564 IRBuilder
<> Builder(Branch
);
8565 if (UI
->getParent() != Branch
->getParent())
8566 UI
->moveBefore(Branch
);
8567 UI
->dropPoisonGeneratingFlags();
8568 Value
*NewCmp
= Builder
.CreateCmp(Cmp
->getPredicate(), UI
,
8569 ConstantInt::get(UI
->getType(), 0));
8570 LLVM_DEBUG(dbgs() << "Converting " << *Cmp
<< "\n");
8571 LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp
<< "\n");
8572 replaceAllUsesWith(Cmp
, NewCmp
, FreshBBs
, IsHugeFunc
);
8579 bool CodeGenPrepare::optimizeInst(Instruction
*I
, ModifyDT
&ModifiedDT
) {
8580 bool AnyChange
= false;
8581 AnyChange
= fixupDbgVariableRecordsOnInst(*I
);
8583 // Bail out if we inserted the instruction to prevent optimizations from
8584 // stepping on each other's toes.
8585 if (InsertedInsts
.count(I
))
8588 // TODO: Move into the switch on opcode below here.
8589 if (PHINode
*P
= dyn_cast
<PHINode
>(I
)) {
8590 // It is possible for very late stage optimizations (such as SimplifyCFG)
8591 // to introduce PHI nodes too late to be cleaned up. If we detect such a
8592 // trivial PHI, go ahead and zap it here.
8593 if (Value
*V
= simplifyInstruction(P
, {*DL
, TLInfo
})) {
8594 LargeOffsetGEPMap
.erase(P
);
8595 replaceAllUsesWith(P
, V
, FreshBBs
, IsHugeFunc
);
8596 P
->eraseFromParent();
8603 if (CastInst
*CI
= dyn_cast
<CastInst
>(I
)) {
8604 // If the source of the cast is a constant, then this should have
8605 // already been constant folded. The only reason NOT to constant fold
8606 // it is if something (e.g. LSR) was careful to place the constant
8607 // evaluation in a block other than then one that uses it (e.g. to hoist
8608 // the address of globals out of a loop). If this is the case, we don't
8609 // want to forward-subst the cast.
8610 if (isa
<Constant
>(CI
->getOperand(0)))
8613 if (OptimizeNoopCopyExpression(CI
, *TLI
, *DL
))
8616 if ((isa
<UIToFPInst
>(I
) || isa
<SIToFPInst
>(I
) || isa
<FPToUIInst
>(I
) ||
8617 isa
<TruncInst
>(I
)) &&
8618 TLI
->optimizeExtendOrTruncateConversion(
8619 I
, LI
->getLoopFor(I
->getParent()), *TTI
))
8622 if (isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) {
8623 /// Sink a zext or sext into its user blocks if the target type doesn't
8624 /// fit in one register
8625 if (TLI
->getTypeAction(CI
->getContext(),
8626 TLI
->getValueType(*DL
, CI
->getType())) ==
8627 TargetLowering::TypeExpandInteger
) {
8628 return SinkCast(CI
);
8630 if (TLI
->optimizeExtendOrTruncateConversion(
8631 I
, LI
->getLoopFor(I
->getParent()), *TTI
))
8634 bool MadeChange
= optimizeExt(I
);
8635 return MadeChange
| optimizeExtUses(I
);
8641 if (auto *Cmp
= dyn_cast
<CmpInst
>(I
))
8642 if (optimizeCmp(Cmp
, ModifiedDT
))
8645 if (match(I
, m_URem(m_Value(), m_Value())))
8646 if (optimizeURem(I
))
8649 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
8650 LI
->setMetadata(LLVMContext::MD_invariant_group
, nullptr);
8651 bool Modified
= optimizeLoadExt(LI
);
8652 unsigned AS
= LI
->getPointerAddressSpace();
8653 Modified
|= optimizeMemoryInst(I
, I
->getOperand(0), LI
->getType(), AS
);
8657 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
)) {
8658 if (splitMergedValStore(*SI
, *DL
, *TLI
))
8660 SI
->setMetadata(LLVMContext::MD_invariant_group
, nullptr);
8661 unsigned AS
= SI
->getPointerAddressSpace();
8662 return optimizeMemoryInst(I
, SI
->getOperand(1),
8663 SI
->getOperand(0)->getType(), AS
);
8666 if (AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(I
)) {
8667 unsigned AS
= RMW
->getPointerAddressSpace();
8668 return optimizeMemoryInst(I
, RMW
->getPointerOperand(), RMW
->getType(), AS
);
8671 if (AtomicCmpXchgInst
*CmpX
= dyn_cast
<AtomicCmpXchgInst
>(I
)) {
8672 unsigned AS
= CmpX
->getPointerAddressSpace();
8673 return optimizeMemoryInst(I
, CmpX
->getPointerOperand(),
8674 CmpX
->getCompareOperand()->getType(), AS
);
8677 BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(I
);
8679 if (BinOp
&& BinOp
->getOpcode() == Instruction::And
&& EnableAndCmpSinking
&&
8680 sinkAndCmp0Expression(BinOp
, *TLI
, InsertedInsts
))
8683 // TODO: Move this into the switch on opcode - it handles shifts already.
8684 if (BinOp
&& (BinOp
->getOpcode() == Instruction::AShr
||
8685 BinOp
->getOpcode() == Instruction::LShr
)) {
8686 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BinOp
->getOperand(1));
8687 if (CI
&& TLI
->hasExtractBitsInsn())
8688 if (OptimizeExtractBits(BinOp
, CI
, *TLI
, *DL
))
8692 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(I
)) {
8693 if (GEPI
->hasAllZeroIndices()) {
8694 /// The GEP operand must be a pointer, so must its result -> BitCast
8695 Instruction
*NC
= new BitCastInst(GEPI
->getOperand(0), GEPI
->getType(),
8696 GEPI
->getName(), GEPI
->getIterator());
8697 NC
->setDebugLoc(GEPI
->getDebugLoc());
8698 replaceAllUsesWith(GEPI
, NC
, FreshBBs
, IsHugeFunc
);
8699 RecursivelyDeleteTriviallyDeadInstructions(
8700 GEPI
, TLInfo
, nullptr,
8701 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
8703 optimizeInst(NC
, ModifiedDT
);
8706 if (tryUnmergingGEPsAcrossIndirectBr(GEPI
, TTI
)) {
8711 if (FreezeInst
*FI
= dyn_cast
<FreezeInst
>(I
)) {
8712 // freeze(icmp a, const)) -> icmp (freeze a), const
8713 // This helps generate efficient conditional jumps.
8714 Instruction
*CmpI
= nullptr;
8715 if (ICmpInst
*II
= dyn_cast
<ICmpInst
>(FI
->getOperand(0)))
8717 else if (FCmpInst
*F
= dyn_cast
<FCmpInst
>(FI
->getOperand(0)))
8718 CmpI
= F
->getFastMathFlags().none() ? F
: nullptr;
8720 if (CmpI
&& CmpI
->hasOneUse()) {
8721 auto Op0
= CmpI
->getOperand(0), Op1
= CmpI
->getOperand(1);
8722 bool Const0
= isa
<ConstantInt
>(Op0
) || isa
<ConstantFP
>(Op0
) ||
8723 isa
<ConstantPointerNull
>(Op0
);
8724 bool Const1
= isa
<ConstantInt
>(Op1
) || isa
<ConstantFP
>(Op1
) ||
8725 isa
<ConstantPointerNull
>(Op1
);
8726 if (Const0
|| Const1
) {
8727 if (!Const0
|| !Const1
) {
8728 auto *F
= new FreezeInst(Const0
? Op1
: Op0
, "", CmpI
->getIterator());
8730 CmpI
->setOperand(Const0
? 1 : 0, F
);
8732 replaceAllUsesWith(FI
, CmpI
, FreshBBs
, IsHugeFunc
);
8733 FI
->eraseFromParent();
8740 if (tryToSinkFreeOperands(I
))
8743 switch (I
->getOpcode()) {
8744 case Instruction::Shl
:
8745 case Instruction::LShr
:
8746 case Instruction::AShr
:
8747 return optimizeShiftInst(cast
<BinaryOperator
>(I
));
8748 case Instruction::Call
:
8749 return optimizeCallInst(cast
<CallInst
>(I
), ModifiedDT
);
8750 case Instruction::Select
:
8751 return optimizeSelectInst(cast
<SelectInst
>(I
));
8752 case Instruction::ShuffleVector
:
8753 return optimizeShuffleVectorInst(cast
<ShuffleVectorInst
>(I
));
8754 case Instruction::Switch
:
8755 return optimizeSwitchInst(cast
<SwitchInst
>(I
));
8756 case Instruction::ExtractElement
:
8757 return optimizeExtractElementInst(cast
<ExtractElementInst
>(I
));
8758 case Instruction::Br
:
8759 return optimizeBranch(cast
<BranchInst
>(I
), *TLI
, FreshBBs
, IsHugeFunc
);
8765 /// Given an OR instruction, check to see if this is a bitreverse
8766 /// idiom. If so, insert the new intrinsic and return true.
8767 bool CodeGenPrepare::makeBitReverse(Instruction
&I
) {
8768 if (!I
.getType()->isIntegerTy() ||
8769 !TLI
->isOperationLegalOrCustom(ISD::BITREVERSE
,
8770 TLI
->getValueType(*DL
, I
.getType(), true)))
8773 SmallVector
<Instruction
*, 4> Insts
;
8774 if (!recognizeBSwapOrBitReverseIdiom(&I
, false, true, Insts
))
8776 Instruction
*LastInst
= Insts
.back();
8777 replaceAllUsesWith(&I
, LastInst
, FreshBBs
, IsHugeFunc
);
8778 RecursivelyDeleteTriviallyDeadInstructions(
8779 &I
, TLInfo
, nullptr,
8780 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
8784 // In this pass we look for GEP and cast instructions that are used
8785 // across basic blocks and rewrite them to improve basic-block-at-a-time
8787 bool CodeGenPrepare::optimizeBlock(BasicBlock
&BB
, ModifyDT
&ModifiedDT
) {
8789 bool MadeChange
= false;
8792 CurInstIterator
= BB
.begin();
8793 ModifiedDT
= ModifyDT::NotModifyDT
;
8794 while (CurInstIterator
!= BB
.end()) {
8795 MadeChange
|= optimizeInst(&*CurInstIterator
++, ModifiedDT
);
8796 if (ModifiedDT
!= ModifyDT::NotModifyDT
) {
8797 // For huge function we tend to quickly go though the inner optmization
8798 // opportunities in the BB. So we go back to the BB head to re-optimize
8799 // each instruction instead of go back to the function head.
8802 getDT(*BB
.getParent());
8809 } while (ModifiedDT
== ModifyDT::ModifyInstDT
);
8811 bool MadeBitReverse
= true;
8812 while (MadeBitReverse
) {
8813 MadeBitReverse
= false;
8814 for (auto &I
: reverse(BB
)) {
8815 if (makeBitReverse(I
)) {
8816 MadeBitReverse
= MadeChange
= true;
8821 MadeChange
|= dupRetToEnableTailCallOpts(&BB
, ModifiedDT
);
8826 // Some CGP optimizations may move or alter what's computed in a block. Check
8827 // whether a dbg.value intrinsic could be pointed at a more appropriate operand.
8828 bool CodeGenPrepare::fixupDbgValue(Instruction
*I
) {
8829 assert(isa
<DbgValueInst
>(I
));
8830 DbgValueInst
&DVI
= *cast
<DbgValueInst
>(I
);
8832 // Does this dbg.value refer to a sunk address calculation?
8833 bool AnyChange
= false;
8834 SmallDenseSet
<Value
*> LocationOps(DVI
.location_ops().begin(),
8835 DVI
.location_ops().end());
8836 for (Value
*Location
: LocationOps
) {
8837 WeakTrackingVH SunkAddrVH
= SunkAddrs
[Location
];
8838 Value
*SunkAddr
= SunkAddrVH
.pointsToAliveValue() ? SunkAddrVH
: nullptr;
8840 // Point dbg.value at locally computed address, which should give the best
8841 // opportunity to be accurately lowered. This update may change the type
8842 // of pointer being referred to; however this makes no difference to
8843 // debugging information, and we can't generate bitcasts that may affect
8845 DVI
.replaceVariableLocationOp(Location
, SunkAddr
);
8852 bool CodeGenPrepare::fixupDbgVariableRecordsOnInst(Instruction
&I
) {
8853 bool AnyChange
= false;
8854 for (DbgVariableRecord
&DVR
: filterDbgVars(I
.getDbgRecordRange()))
8855 AnyChange
|= fixupDbgVariableRecord(DVR
);
8859 // FIXME: should updating debug-info really cause the "changed" flag to fire,
8860 // which can cause a function to be reprocessed?
8861 bool CodeGenPrepare::fixupDbgVariableRecord(DbgVariableRecord
&DVR
) {
8862 if (DVR
.Type
!= DbgVariableRecord::LocationType::Value
&&
8863 DVR
.Type
!= DbgVariableRecord::LocationType::Assign
)
8866 // Does this DbgVariableRecord refer to a sunk address calculation?
8867 bool AnyChange
= false;
8868 SmallDenseSet
<Value
*> LocationOps(DVR
.location_ops().begin(),
8869 DVR
.location_ops().end());
8870 for (Value
*Location
: LocationOps
) {
8871 WeakTrackingVH SunkAddrVH
= SunkAddrs
[Location
];
8872 Value
*SunkAddr
= SunkAddrVH
.pointsToAliveValue() ? SunkAddrVH
: nullptr;
8874 // Point dbg.value at locally computed address, which should give the best
8875 // opportunity to be accurately lowered. This update may change the type
8876 // of pointer being referred to; however this makes no difference to
8877 // debugging information, and we can't generate bitcasts that may affect
8879 DVR
.replaceVariableLocationOp(Location
, SunkAddr
);
8886 static void DbgInserterHelper(DbgValueInst
*DVI
, Instruction
*VI
) {
8887 DVI
->removeFromParent();
8888 if (isa
<PHINode
>(VI
))
8889 DVI
->insertBefore(&*VI
->getParent()->getFirstInsertionPt());
8891 DVI
->insertAfter(VI
);
8894 static void DbgInserterHelper(DbgVariableRecord
*DVR
, Instruction
*VI
) {
8895 DVR
->removeFromParent();
8896 BasicBlock
*VIBB
= VI
->getParent();
8897 if (isa
<PHINode
>(VI
))
8898 VIBB
->insertDbgRecordBefore(DVR
, VIBB
->getFirstInsertionPt());
8900 VIBB
->insertDbgRecordAfter(DVR
, VI
);
8903 // A llvm.dbg.value may be using a value before its definition, due to
8904 // optimizations in this pass and others. Scan for such dbg.values, and rescue
8905 // them by moving the dbg.value to immediately after the value definition.
8906 // FIXME: Ideally this should never be necessary, and this has the potential
8907 // to re-order dbg.value intrinsics.
8908 bool CodeGenPrepare::placeDbgValues(Function
&F
) {
8909 bool MadeChange
= false;
8910 DominatorTree
DT(F
);
8912 auto DbgProcessor
= [&](auto *DbgItem
, Instruction
*Position
) {
8913 SmallVector
<Instruction
*, 4> VIs
;
8914 for (Value
*V
: DbgItem
->location_ops())
8915 if (Instruction
*VI
= dyn_cast_or_null
<Instruction
>(V
))
8918 // This item may depend on multiple instructions, complicating any
8919 // potential sink. This block takes the defensive approach, opting to
8920 // "undef" the item if it has more than one instruction and any of them do
8921 // not dominate iem.
8922 for (Instruction
*VI
: VIs
) {
8923 if (VI
->isTerminator())
8926 // If VI is a phi in a block with an EHPad terminator, we can't insert
8928 if (isa
<PHINode
>(VI
) && VI
->getParent()->getTerminator()->isEHPad())
8931 // If the defining instruction dominates the dbg.value, we do not need
8932 // to move the dbg.value.
8933 if (DT
.dominates(VI
, Position
))
8936 // If we depend on multiple instructions and any of them doesn't
8937 // dominate this DVI, we probably can't salvage it: moving it to
8938 // after any of the instructions could cause us to lose the others.
8939 if (VIs
.size() > 1) {
8942 << "Unable to find valid location for Debug Value, undefing:\n"
8944 DbgItem
->setKillLocation();
8948 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
8949 << *DbgItem
<< ' ' << *VI
);
8950 DbgInserterHelper(DbgItem
, VI
);
8956 for (BasicBlock
&BB
: F
) {
8957 for (Instruction
&Insn
: llvm::make_early_inc_range(BB
)) {
8958 // Process dbg.value intrinsics.
8959 DbgValueInst
*DVI
= dyn_cast
<DbgValueInst
>(&Insn
);
8961 DbgProcessor(DVI
, DVI
);
8965 // If this isn't a dbg.value, process any attached DbgVariableRecord
8966 // records attached to this instruction.
8967 for (DbgVariableRecord
&DVR
: llvm::make_early_inc_range(
8968 filterDbgVars(Insn
.getDbgRecordRange()))) {
8969 if (DVR
.Type
!= DbgVariableRecord::LocationType::Value
)
8971 DbgProcessor(&DVR
, &Insn
);
8979 // Group scattered pseudo probes in a block to favor SelectionDAG. Scattered
8980 // probes can be chained dependencies of other regular DAG nodes and block DAG
8981 // combine optimizations.
8982 bool CodeGenPrepare::placePseudoProbes(Function
&F
) {
8983 bool MadeChange
= false;
8984 for (auto &Block
: F
) {
8985 // Move the rest probes to the beginning of the block.
8986 auto FirstInst
= Block
.getFirstInsertionPt();
8987 while (FirstInst
!= Block
.end() && FirstInst
->isDebugOrPseudoInst())
8989 BasicBlock::iterator
I(FirstInst
);
8991 while (I
!= Block
.end()) {
8992 if (auto *II
= dyn_cast
<PseudoProbeInst
>(I
++)) {
8993 II
->moveBefore(&*FirstInst
);
9001 /// Scale down both weights to fit into uint32_t.
9002 static void scaleWeights(uint64_t &NewTrue
, uint64_t &NewFalse
) {
9003 uint64_t NewMax
= (NewTrue
> NewFalse
) ? NewTrue
: NewFalse
;
9004 uint32_t Scale
= (NewMax
/ std::numeric_limits
<uint32_t>::max()) + 1;
9005 NewTrue
= NewTrue
/ Scale
;
9006 NewFalse
= NewFalse
/ Scale
;
9009 /// Some targets prefer to split a conditional branch like:
9011 /// %0 = icmp ne i32 %a, 0
9012 /// %1 = icmp ne i32 %b, 0
9013 /// %or.cond = or i1 %0, %1
9014 /// br i1 %or.cond, label %TrueBB, label %FalseBB
9016 /// into multiple branch instructions like:
9019 /// %0 = icmp ne i32 %a, 0
9020 /// br i1 %0, label %TrueBB, label %bb2
9022 /// %1 = icmp ne i32 %b, 0
9023 /// br i1 %1, label %TrueBB, label %FalseBB
9025 /// This usually allows instruction selection to do even further optimizations
9026 /// and combine the compare with the branch instruction. Currently this is
9027 /// applied for targets which have "cheap" jump instructions.
9029 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
9031 bool CodeGenPrepare::splitBranchCondition(Function
&F
, ModifyDT
&ModifiedDT
) {
9032 if (!TM
->Options
.EnableFastISel
|| TLI
->isJumpExpensive())
9035 bool MadeChange
= false;
9036 for (auto &BB
: F
) {
9037 // Does this BB end with the following?
9038 // %cond1 = icmp|fcmp|binary instruction ...
9039 // %cond2 = icmp|fcmp|binary instruction ...
9040 // %cond.or = or|and i1 %cond1, cond2
9041 // br i1 %cond.or label %dest1, label %dest2"
9042 Instruction
*LogicOp
;
9043 BasicBlock
*TBB
, *FBB
;
9044 if (!match(BB
.getTerminator(),
9045 m_Br(m_OneUse(m_Instruction(LogicOp
)), TBB
, FBB
)))
9048 auto *Br1
= cast
<BranchInst
>(BB
.getTerminator());
9049 if (Br1
->getMetadata(LLVMContext::MD_unpredictable
))
9052 // The merging of mostly empty BB can cause a degenerate branch.
9057 Value
*Cond1
, *Cond2
;
9059 m_LogicalAnd(m_OneUse(m_Value(Cond1
)), m_OneUse(m_Value(Cond2
)))))
9060 Opc
= Instruction::And
;
9061 else if (match(LogicOp
, m_LogicalOr(m_OneUse(m_Value(Cond1
)),
9062 m_OneUse(m_Value(Cond2
)))))
9063 Opc
= Instruction::Or
;
9067 auto IsGoodCond
= [](Value
*Cond
) {
9070 m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()),
9071 m_LogicalOr(m_Value(), m_Value()))));
9073 if (!IsGoodCond(Cond1
) || !IsGoodCond(Cond2
))
9076 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB
.dump());
9080 BasicBlock::Create(BB
.getContext(), BB
.getName() + ".cond.split",
9081 BB
.getParent(), BB
.getNextNode());
9083 FreshBBs
.insert(TmpBB
);
9085 // Update original basic block by using the first condition directly by the
9086 // branch instruction and removing the no longer needed and/or instruction.
9087 Br1
->setCondition(Cond1
);
9088 LogicOp
->eraseFromParent();
9090 // Depending on the condition we have to either replace the true or the
9091 // false successor of the original branch instruction.
9092 if (Opc
== Instruction::And
)
9093 Br1
->setSuccessor(0, TmpBB
);
9095 Br1
->setSuccessor(1, TmpBB
);
9097 // Fill in the new basic block.
9098 auto *Br2
= IRBuilder
<>(TmpBB
).CreateCondBr(Cond2
, TBB
, FBB
);
9099 if (auto *I
= dyn_cast
<Instruction
>(Cond2
)) {
9100 I
->removeFromParent();
9101 I
->insertBefore(Br2
);
9104 // Update PHI nodes in both successors. The original BB needs to be
9105 // replaced in one successor's PHI nodes, because the branch comes now from
9106 // the newly generated BB (NewBB). In the other successor we need to add one
9107 // incoming edge to the PHI nodes, because both branch instructions target
9108 // now the same successor. Depending on the original branch condition
9109 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
9110 // we perform the correct update for the PHI nodes.
9111 // This doesn't change the successor order of the just created branch
9112 // instruction (or any other instruction).
9113 if (Opc
== Instruction::Or
)
9114 std::swap(TBB
, FBB
);
9116 // Replace the old BB with the new BB.
9117 TBB
->replacePhiUsesWith(&BB
, TmpBB
);
9119 // Add another incoming edge from the new BB.
9120 for (PHINode
&PN
: FBB
->phis()) {
9121 auto *Val
= PN
.getIncomingValueForBlock(&BB
);
9122 PN
.addIncoming(Val
, TmpBB
);
9125 // Update the branch weights (from SelectionDAGBuilder::
9126 // FindMergedConditions).
9127 if (Opc
== Instruction::Or
) {
9128 // Codegen X | Y as:
9137 // We have flexibility in setting Prob for BB1 and Prob for NewBB.
9138 // The requirement is that
9139 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
9140 // = TrueProb for original BB.
9141 // Assuming the original weights are A and B, one choice is to set BB1's
9142 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
9144 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
9145 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
9146 // TmpBB, but the math is more complicated.
9147 uint64_t TrueWeight
, FalseWeight
;
9148 if (extractBranchWeights(*Br1
, TrueWeight
, FalseWeight
)) {
9149 uint64_t NewTrueWeight
= TrueWeight
;
9150 uint64_t NewFalseWeight
= TrueWeight
+ 2 * FalseWeight
;
9151 scaleWeights(NewTrueWeight
, NewFalseWeight
);
9152 Br1
->setMetadata(LLVMContext::MD_prof
,
9153 MDBuilder(Br1
->getContext())
9154 .createBranchWeights(TrueWeight
, FalseWeight
,
9155 hasBranchWeightOrigin(*Br1
)));
9157 NewTrueWeight
= TrueWeight
;
9158 NewFalseWeight
= 2 * FalseWeight
;
9159 scaleWeights(NewTrueWeight
, NewFalseWeight
);
9160 Br2
->setMetadata(LLVMContext::MD_prof
,
9161 MDBuilder(Br2
->getContext())
9162 .createBranchWeights(TrueWeight
, FalseWeight
));
9165 // Codegen X & Y as:
9173 // This requires creation of TmpBB after CurBB.
9175 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
9176 // The requirement is that
9177 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
9178 // = FalseProb for original BB.
9179 // Assuming the original weights are A and B, one choice is to set BB1's
9180 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
9182 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
9183 uint64_t TrueWeight
, FalseWeight
;
9184 if (extractBranchWeights(*Br1
, TrueWeight
, FalseWeight
)) {
9185 uint64_t NewTrueWeight
= 2 * TrueWeight
+ FalseWeight
;
9186 uint64_t NewFalseWeight
= FalseWeight
;
9187 scaleWeights(NewTrueWeight
, NewFalseWeight
);
9188 Br1
->setMetadata(LLVMContext::MD_prof
,
9189 MDBuilder(Br1
->getContext())
9190 .createBranchWeights(TrueWeight
, FalseWeight
));
9192 NewTrueWeight
= 2 * TrueWeight
;
9193 NewFalseWeight
= FalseWeight
;
9194 scaleWeights(NewTrueWeight
, NewFalseWeight
);
9195 Br2
->setMetadata(LLVMContext::MD_prof
,
9196 MDBuilder(Br2
->getContext())
9197 .createBranchWeights(TrueWeight
, FalseWeight
));
9201 ModifiedDT
= ModifyDT::ModifyBBDT
;
9204 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB
.dump();