1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/CodeGenPrepare.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/PointerIntPair.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/BranchProbabilityInfo.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/TargetLibraryInfo.h"
31 #include "llvm/Analysis/TargetTransformInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/CodeGen/Analysis.h"
35 #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
36 #include "llvm/CodeGen/ISDOpcodes.h"
37 #include "llvm/CodeGen/SelectionDAGNodes.h"
38 #include "llvm/CodeGen/TargetLowering.h"
39 #include "llvm/CodeGen/TargetPassConfig.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/CodeGen/ValueTypes.h"
42 #include "llvm/CodeGenTypes/MachineValueType.h"
43 #include "llvm/Config/llvm-config.h"
44 #include "llvm/IR/Argument.h"
45 #include "llvm/IR/Attributes.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DataLayout.h"
50 #include "llvm/IR/DebugInfo.h"
51 #include "llvm/IR/DerivedTypes.h"
52 #include "llvm/IR/Dominators.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/GetElementPtrTypeIterator.h"
55 #include "llvm/IR/GlobalValue.h"
56 #include "llvm/IR/GlobalVariable.h"
57 #include "llvm/IR/IRBuilder.h"
58 #include "llvm/IR/InlineAsm.h"
59 #include "llvm/IR/InstrTypes.h"
60 #include "llvm/IR/Instruction.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/Intrinsics.h"
64 #include "llvm/IR/IntrinsicsAArch64.h"
65 #include "llvm/IR/LLVMContext.h"
66 #include "llvm/IR/MDBuilder.h"
67 #include "llvm/IR/Module.h"
68 #include "llvm/IR/Operator.h"
69 #include "llvm/IR/PatternMatch.h"
70 #include "llvm/IR/ProfDataUtils.h"
71 #include "llvm/IR/Statepoint.h"
72 #include "llvm/IR/Type.h"
73 #include "llvm/IR/Use.h"
74 #include "llvm/IR/User.h"
75 #include "llvm/IR/Value.h"
76 #include "llvm/IR/ValueHandle.h"
77 #include "llvm/IR/ValueMap.h"
78 #include "llvm/InitializePasses.h"
79 #include "llvm/Pass.h"
80 #include "llvm/Support/BlockFrequency.h"
81 #include "llvm/Support/BranchProbability.h"
82 #include "llvm/Support/Casting.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/MathExtras.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/Target/TargetMachine.h"
90 #include "llvm/Target/TargetOptions.h"
91 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
92 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
93 #include "llvm/Transforms/Utils/Local.h"
94 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
95 #include "llvm/Transforms/Utils/SizeOpts.h"
106 using namespace llvm
;
107 using namespace llvm::PatternMatch
;
109 #define DEBUG_TYPE "codegenprepare"
111 STATISTIC(NumBlocksElim
, "Number of blocks eliminated");
112 STATISTIC(NumPHIsElim
, "Number of trivial PHIs eliminated");
113 STATISTIC(NumGEPsElim
, "Number of GEPs converted to casts");
114 STATISTIC(NumCmpUses
, "Number of uses of Cmp expressions replaced with uses of "
116 STATISTIC(NumCastUses
, "Number of uses of Cast expressions replaced with uses "
118 STATISTIC(NumMemoryInsts
, "Number of memory instructions whose address "
119 "computations were sunk");
120 STATISTIC(NumMemoryInstsPhiCreated
,
121 "Number of phis created when address "
122 "computations were sunk to memory instructions");
123 STATISTIC(NumMemoryInstsSelectCreated
,
124 "Number of select created when address "
125 "computations were sunk to memory instructions");
126 STATISTIC(NumExtsMoved
, "Number of [s|z]ext instructions combined with loads");
127 STATISTIC(NumExtUses
, "Number of uses of [s|z]ext instructions optimized");
128 STATISTIC(NumAndsAdded
,
129 "Number of and mask instructions added to form ext loads");
130 STATISTIC(NumAndUses
, "Number of uses of and mask instructions optimized");
131 STATISTIC(NumRetsDup
, "Number of return instructions duplicated");
132 STATISTIC(NumDbgValueMoved
, "Number of debug value instructions moved");
133 STATISTIC(NumSelectsExpanded
, "Number of selects turned into branches");
134 STATISTIC(NumStoreExtractExposed
, "Number of store(extractelement) exposed");
136 static cl::opt
<bool> DisableBranchOpts(
137 "disable-cgp-branch-opts", cl::Hidden
, cl::init(false),
138 cl::desc("Disable branch optimizations in CodeGenPrepare"));
141 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden
, cl::init(false),
142 cl::desc("Disable GC optimizations in CodeGenPrepare"));
145 DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden
,
147 cl::desc("Disable select to branch conversion."));
150 AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden
, cl::init(true),
151 cl::desc("Address sinking in CGP using GEPs."));
154 EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden
, cl::init(true),
155 cl::desc("Enable sinkinig and/cmp into branches."));
157 static cl::opt
<bool> DisableStoreExtract(
158 "disable-cgp-store-extract", cl::Hidden
, cl::init(false),
159 cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
161 static cl::opt
<bool> StressStoreExtract(
162 "stress-cgp-store-extract", cl::Hidden
, cl::init(false),
163 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
165 static cl::opt
<bool> DisableExtLdPromotion(
166 "disable-cgp-ext-ld-promotion", cl::Hidden
, cl::init(false),
167 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
170 static cl::opt
<bool> StressExtLdPromotion(
171 "stress-cgp-ext-ld-promotion", cl::Hidden
, cl::init(false),
172 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
173 "optimization in CodeGenPrepare"));
175 static cl::opt
<bool> DisablePreheaderProtect(
176 "disable-preheader-prot", cl::Hidden
, cl::init(false),
177 cl::desc("Disable protection against removing loop preheaders"));
179 static cl::opt
<bool> ProfileGuidedSectionPrefix(
180 "profile-guided-section-prefix", cl::Hidden
, cl::init(true),
181 cl::desc("Use profile info to add section prefix for hot/cold functions"));
183 static cl::opt
<bool> ProfileUnknownInSpecialSection(
184 "profile-unknown-in-special-section", cl::Hidden
,
185 cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
186 "profile, we cannot tell the function is cold for sure because "
187 "it may be a function newly added without ever being sampled. "
188 "With the flag enabled, compiler can put such profile unknown "
189 "functions into a special section, so runtime system can choose "
190 "to handle it in a different way than .text section, to save "
191 "RAM for example. "));
193 static cl::opt
<bool> BBSectionsGuidedSectionPrefix(
194 "bbsections-guided-section-prefix", cl::Hidden
, cl::init(true),
195 cl::desc("Use the basic-block-sections profile to determine the text "
196 "section prefix for hot functions. Functions with "
197 "basic-block-sections profile will be placed in `.text.hot` "
198 "regardless of their FDO profile info. Other functions won't be "
199 "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
202 static cl::opt
<uint64_t> FreqRatioToSkipMerge(
203 "cgp-freq-ratio-to-skip-merge", cl::Hidden
, cl::init(2),
204 cl::desc("Skip merging empty blocks if (frequency of empty block) / "
205 "(frequency of destination block) is greater than this ratio"));
207 static cl::opt
<bool> ForceSplitStore(
208 "force-split-store", cl::Hidden
, cl::init(false),
209 cl::desc("Force store splitting no matter what the target query says."));
211 static cl::opt
<bool> EnableTypePromotionMerge(
212 "cgp-type-promotion-merge", cl::Hidden
,
213 cl::desc("Enable merging of redundant sexts when one is dominating"
217 static cl::opt
<bool> DisableComplexAddrModes(
218 "disable-complex-addr-modes", cl::Hidden
, cl::init(false),
219 cl::desc("Disables combining addressing modes with different parts "
220 "in optimizeMemoryInst."));
223 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden
, cl::init(false),
224 cl::desc("Allow creation of Phis in Address sinking."));
226 static cl::opt
<bool> AddrSinkNewSelects(
227 "addr-sink-new-select", cl::Hidden
, cl::init(true),
228 cl::desc("Allow creation of selects in Address sinking."));
230 static cl::opt
<bool> AddrSinkCombineBaseReg(
231 "addr-sink-combine-base-reg", cl::Hidden
, cl::init(true),
232 cl::desc("Allow combining of BaseReg field in Address sinking."));
234 static cl::opt
<bool> AddrSinkCombineBaseGV(
235 "addr-sink-combine-base-gv", cl::Hidden
, cl::init(true),
236 cl::desc("Allow combining of BaseGV field in Address sinking."));
238 static cl::opt
<bool> AddrSinkCombineBaseOffs(
239 "addr-sink-combine-base-offs", cl::Hidden
, cl::init(true),
240 cl::desc("Allow combining of BaseOffs field in Address sinking."));
242 static cl::opt
<bool> AddrSinkCombineScaledReg(
243 "addr-sink-combine-scaled-reg", cl::Hidden
, cl::init(true),
244 cl::desc("Allow combining of ScaledReg field in Address sinking."));
247 EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden
,
249 cl::desc("Enable splitting large offset of GEP."));
251 static cl::opt
<bool> EnableICMP_EQToICMP_ST(
252 "cgp-icmp-eq2icmp-st", cl::Hidden
, cl::init(false),
253 cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
256 VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden
, cl::init(false),
257 cl::desc("Enable BFI update verification for "
261 OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden
, cl::init(true),
262 cl::desc("Enable converting phi types in CodeGenPrepare"));
264 static cl::opt
<unsigned>
265 HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden
,
266 cl::desc("Least BB number of huge function."));
268 static cl::opt
<unsigned>
269 MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100),
271 cl::desc("Max number of address users to look at"));
274 DisableDeletePHIs("disable-cgp-delete-phis", cl::Hidden
, cl::init(false),
275 cl::desc("Disable elimination of dead PHI nodes."));
280 ZeroExtension
, // Zero extension has been seen.
281 SignExtension
, // Sign extension has been seen.
282 BothExtension
// This extension type is used if we saw sext after
283 // ZeroExtension had been set, or if we saw zext after
284 // SignExtension had been set. It makes the type
285 // information of a promoted instruction invalid.
289 NotModifyDT
, // Not Modify any DT.
290 ModifyBBDT
, // Modify the Basic Block Dominator Tree.
291 ModifyInstDT
// Modify the Instruction Dominator in a Basic Block,
292 // This usually means we move/delete/insert instruction
293 // in a Basic Block. So we should re-iterate instructions
294 // in such Basic Block.
297 using SetOfInstrs
= SmallPtrSet
<Instruction
*, 16>;
298 using TypeIsSExt
= PointerIntPair
<Type
*, 2, ExtType
>;
299 using InstrToOrigTy
= DenseMap
<Instruction
*, TypeIsSExt
>;
300 using SExts
= SmallVector
<Instruction
*, 16>;
301 using ValueToSExts
= MapVector
<Value
*, SExts
>;
303 class TypePromotionTransaction
;
305 class CodeGenPrepare
{
306 friend class CodeGenPrepareLegacyPass
;
307 const TargetMachine
*TM
= nullptr;
308 const TargetSubtargetInfo
*SubtargetInfo
= nullptr;
309 const TargetLowering
*TLI
= nullptr;
310 const TargetRegisterInfo
*TRI
= nullptr;
311 const TargetTransformInfo
*TTI
= nullptr;
312 const BasicBlockSectionsProfileReader
*BBSectionsProfileReader
= nullptr;
313 const TargetLibraryInfo
*TLInfo
= nullptr;
314 LoopInfo
*LI
= nullptr;
315 std::unique_ptr
<BlockFrequencyInfo
> BFI
;
316 std::unique_ptr
<BranchProbabilityInfo
> BPI
;
317 ProfileSummaryInfo
*PSI
= nullptr;
319 /// As we scan instructions optimizing them, this is the next instruction
320 /// to optimize. Transforms that can invalidate this should update it.
321 BasicBlock::iterator CurInstIterator
;
323 /// Keeps track of non-local addresses that have been sunk into a block.
324 /// This allows us to avoid inserting duplicate code for blocks with
325 /// multiple load/stores of the same address. The usage of WeakTrackingVH
326 /// enables SunkAddrs to be treated as a cache whose entries can be
327 /// invalidated if a sunken address computation has been erased.
328 ValueMap
<Value
*, WeakTrackingVH
> SunkAddrs
;
330 /// Keeps track of all instructions inserted for the current function.
331 SetOfInstrs InsertedInsts
;
333 /// Keeps track of the type of the related instruction before their
334 /// promotion for the current function.
335 InstrToOrigTy PromotedInsts
;
337 /// Keep track of instructions removed during promotion.
338 SetOfInstrs RemovedInsts
;
340 /// Keep track of sext chains based on their initial value.
341 DenseMap
<Value
*, Instruction
*> SeenChainsForSExt
;
343 /// Keep track of GEPs accessing the same data structures such as structs or
344 /// arrays that are candidates to be split later because of their large
346 MapVector
<AssertingVH
<Value
>,
347 SmallVector
<std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t>, 32>>
350 /// Keep track of new GEP base after splitting the GEPs having large offset.
351 SmallSet
<AssertingVH
<Value
>, 2> NewGEPBases
;
353 /// Map serial numbers to Large offset GEPs.
354 DenseMap
<AssertingVH
<GetElementPtrInst
>, int> LargeOffsetGEPID
;
356 /// Keep track of SExt promoted.
357 ValueToSExts ValToSExtendedUses
;
359 /// True if the function has the OptSize attribute.
362 /// DataLayout for the Function being processed.
363 const DataLayout
*DL
= nullptr;
365 /// Building the dominator tree can be expensive, so we only build it
366 /// lazily and update it when required.
367 std::unique_ptr
<DominatorTree
> DT
;
371 CodeGenPrepare(const TargetMachine
*TM
) : TM(TM
){};
372 /// If encounter huge function, we need to limit the build time.
373 bool IsHugeFunc
= false;
375 /// FreshBBs is like worklist, it collected the updated BBs which need
376 /// to be optimized again.
377 /// Note: Consider building time in this pass, when a BB updated, we need
378 /// to insert such BB into FreshBBs for huge function.
379 SmallSet
<BasicBlock
*, 32> FreshBBs
;
381 void releaseMemory() {
382 // Clear per function information.
383 InsertedInsts
.clear();
384 PromotedInsts
.clear();
390 bool run(Function
&F
, FunctionAnalysisManager
&AM
);
393 template <typename F
>
394 void resetIteratorIfInvalidatedWhileCalling(BasicBlock
*BB
, F f
) {
395 // Substituting can cause recursive simplifications, which can invalidate
396 // our iterator. Use a WeakTrackingVH to hold onto it in case this
398 Value
*CurValue
= &*CurInstIterator
;
399 WeakTrackingVH
IterHandle(CurValue
);
403 // If the iterator instruction was recursively deleted, start over at the
404 // start of the block.
405 if (IterHandle
!= CurValue
) {
406 CurInstIterator
= BB
->begin();
411 // Get the DominatorTree, building if necessary.
412 DominatorTree
&getDT(Function
&F
) {
414 DT
= std::make_unique
<DominatorTree
>(F
);
418 void removeAllAssertingVHReferences(Value
*V
);
419 bool eliminateAssumptions(Function
&F
);
420 bool eliminateFallThrough(Function
&F
, DominatorTree
*DT
= nullptr);
421 bool eliminateMostlyEmptyBlocks(Function
&F
);
422 BasicBlock
*findDestBlockOfMergeableEmptyBlock(BasicBlock
*BB
);
423 bool canMergeBlocks(const BasicBlock
*BB
, const BasicBlock
*DestBB
) const;
424 void eliminateMostlyEmptyBlock(BasicBlock
*BB
);
425 bool isMergingEmptyBlockProfitable(BasicBlock
*BB
, BasicBlock
*DestBB
,
427 bool makeBitReverse(Instruction
&I
);
428 bool optimizeBlock(BasicBlock
&BB
, ModifyDT
&ModifiedDT
);
429 bool optimizeInst(Instruction
*I
, ModifyDT
&ModifiedDT
);
430 bool optimizeMemoryInst(Instruction
*MemoryInst
, Value
*Addr
, Type
*AccessTy
,
432 bool optimizeGatherScatterInst(Instruction
*MemoryInst
, Value
*Ptr
);
433 bool optimizeInlineAsmInst(CallInst
*CS
);
434 bool optimizeCallInst(CallInst
*CI
, ModifyDT
&ModifiedDT
);
435 bool optimizeExt(Instruction
*&I
);
436 bool optimizeExtUses(Instruction
*I
);
437 bool optimizeLoadExt(LoadInst
*Load
);
438 bool optimizeShiftInst(BinaryOperator
*BO
);
439 bool optimizeFunnelShift(IntrinsicInst
*Fsh
);
440 bool optimizeSelectInst(SelectInst
*SI
);
441 bool optimizeShuffleVectorInst(ShuffleVectorInst
*SVI
);
442 bool optimizeSwitchType(SwitchInst
*SI
);
443 bool optimizeSwitchPhiConstants(SwitchInst
*SI
);
444 bool optimizeSwitchInst(SwitchInst
*SI
);
445 bool optimizeExtractElementInst(Instruction
*Inst
);
446 bool dupRetToEnableTailCallOpts(BasicBlock
*BB
, ModifyDT
&ModifiedDT
);
447 bool fixupDbgValue(Instruction
*I
);
448 bool fixupDbgVariableRecord(DbgVariableRecord
&I
);
449 bool fixupDbgVariableRecordsOnInst(Instruction
&I
);
450 bool placeDbgValues(Function
&F
);
451 bool placePseudoProbes(Function
&F
);
452 bool canFormExtLd(const SmallVectorImpl
<Instruction
*> &MovedExts
,
453 LoadInst
*&LI
, Instruction
*&Inst
, bool HasPromoted
);
454 bool tryToPromoteExts(TypePromotionTransaction
&TPT
,
455 const SmallVectorImpl
<Instruction
*> &Exts
,
456 SmallVectorImpl
<Instruction
*> &ProfitablyMovedExts
,
457 unsigned CreatedInstsCost
= 0);
458 bool mergeSExts(Function
&F
);
459 bool splitLargeGEPOffsets();
460 bool optimizePhiType(PHINode
*Inst
, SmallPtrSetImpl
<PHINode
*> &Visited
,
461 SmallPtrSetImpl
<Instruction
*> &DeletedInstrs
);
462 bool optimizePhiTypes(Function
&F
);
463 bool performAddressTypePromotion(
464 Instruction
*&Inst
, bool AllowPromotionWithoutCommonHeader
,
465 bool HasPromoted
, TypePromotionTransaction
&TPT
,
466 SmallVectorImpl
<Instruction
*> &SpeculativelyMovedExts
);
467 bool splitBranchCondition(Function
&F
, ModifyDT
&ModifiedDT
);
468 bool simplifyOffsetableRelocate(GCStatepointInst
&I
);
470 bool tryToSinkFreeOperands(Instruction
*I
);
471 bool replaceMathCmpWithIntrinsic(BinaryOperator
*BO
, Value
*Arg0
, Value
*Arg1
,
472 CmpInst
*Cmp
, Intrinsic::ID IID
);
473 bool optimizeCmp(CmpInst
*Cmp
, ModifyDT
&ModifiedDT
);
474 bool combineToUSubWithOverflow(CmpInst
*Cmp
, ModifyDT
&ModifiedDT
);
475 bool combineToUAddWithOverflow(CmpInst
*Cmp
, ModifyDT
&ModifiedDT
);
476 void verifyBFIUpdates(Function
&F
);
477 bool _run(Function
&F
);
480 class CodeGenPrepareLegacyPass
: public FunctionPass
{
482 static char ID
; // Pass identification, replacement for typeid
484 CodeGenPrepareLegacyPass() : FunctionPass(ID
) {
485 initializeCodeGenPrepareLegacyPassPass(*PassRegistry::getPassRegistry());
488 bool runOnFunction(Function
&F
) override
;
490 StringRef
getPassName() const override
{ return "CodeGen Prepare"; }
492 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
493 // FIXME: When we can selectively preserve passes, preserve the domtree.
494 AU
.addRequired
<ProfileSummaryInfoWrapperPass
>();
495 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
496 AU
.addRequired
<TargetPassConfig
>();
497 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
498 AU
.addRequired
<LoopInfoWrapperPass
>();
499 AU
.addUsedIfAvailable
<BasicBlockSectionsProfileReaderWrapperPass
>();
503 } // end anonymous namespace
505 char CodeGenPrepareLegacyPass::ID
= 0;
507 bool CodeGenPrepareLegacyPass::runOnFunction(Function
&F
) {
510 auto TM
= &getAnalysis
<TargetPassConfig
>().getTM
<TargetMachine
>();
511 CodeGenPrepare
CGP(TM
);
512 CGP
.DL
= &F
.getDataLayout();
513 CGP
.SubtargetInfo
= TM
->getSubtargetImpl(F
);
514 CGP
.TLI
= CGP
.SubtargetInfo
->getTargetLowering();
515 CGP
.TRI
= CGP
.SubtargetInfo
->getRegisterInfo();
516 CGP
.TLInfo
= &getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
517 CGP
.TTI
= &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
518 CGP
.LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
519 CGP
.BPI
.reset(new BranchProbabilityInfo(F
, *CGP
.LI
));
520 CGP
.BFI
.reset(new BlockFrequencyInfo(F
, *CGP
.BPI
, *CGP
.LI
));
521 CGP
.PSI
= &getAnalysis
<ProfileSummaryInfoWrapperPass
>().getPSI();
523 getAnalysisIfAvailable
<BasicBlockSectionsProfileReaderWrapperPass
>();
524 CGP
.BBSectionsProfileReader
= BBSPRWP
? &BBSPRWP
->getBBSPR() : nullptr;
529 INITIALIZE_PASS_BEGIN(CodeGenPrepareLegacyPass
, DEBUG_TYPE
,
530 "Optimize for code generation", false, false)
531 INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReaderWrapperPass
)
532 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
533 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass
)
534 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
535 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig
)
536 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
537 INITIALIZE_PASS_END(CodeGenPrepareLegacyPass
, DEBUG_TYPE
,
538 "Optimize for code generation", false, false)
540 FunctionPass
*llvm::createCodeGenPrepareLegacyPass() {
541 return new CodeGenPrepareLegacyPass();
544 PreservedAnalyses
CodeGenPreparePass::run(Function
&F
,
545 FunctionAnalysisManager
&AM
) {
546 CodeGenPrepare
CGP(TM
);
548 bool Changed
= CGP
.run(F
, AM
);
550 return PreservedAnalyses::all();
552 PreservedAnalyses PA
;
553 PA
.preserve
<TargetLibraryAnalysis
>();
554 PA
.preserve
<TargetIRAnalysis
>();
555 PA
.preserve
<LoopAnalysis
>();
559 bool CodeGenPrepare::run(Function
&F
, FunctionAnalysisManager
&AM
) {
560 DL
= &F
.getDataLayout();
561 SubtargetInfo
= TM
->getSubtargetImpl(F
);
562 TLI
= SubtargetInfo
->getTargetLowering();
563 TRI
= SubtargetInfo
->getRegisterInfo();
564 TLInfo
= &AM
.getResult
<TargetLibraryAnalysis
>(F
);
565 TTI
= &AM
.getResult
<TargetIRAnalysis
>(F
);
566 LI
= &AM
.getResult
<LoopAnalysis
>(F
);
567 BPI
.reset(new BranchProbabilityInfo(F
, *LI
));
568 BFI
.reset(new BlockFrequencyInfo(F
, *BPI
, *LI
));
569 auto &MAMProxy
= AM
.getResult
<ModuleAnalysisManagerFunctionProxy
>(F
);
570 PSI
= MAMProxy
.getCachedResult
<ProfileSummaryAnalysis
>(*F
.getParent());
571 BBSectionsProfileReader
=
572 AM
.getCachedResult
<BasicBlockSectionsProfileReaderAnalysis
>(F
);
576 bool CodeGenPrepare::_run(Function
&F
) {
577 bool EverMadeChange
= false;
579 OptSize
= F
.hasOptSize();
580 // Use the basic-block-sections profile to promote hot functions to .text.hot
582 if (BBSectionsGuidedSectionPrefix
&& BBSectionsProfileReader
&&
583 BBSectionsProfileReader
->isFunctionHot(F
.getName())) {
584 F
.setSectionPrefix("hot");
585 } else if (ProfileGuidedSectionPrefix
) {
586 // The hot attribute overwrites profile count based hotness while profile
587 // counts based hotness overwrite the cold attribute.
588 // This is a conservative behabvior.
589 if (F
.hasFnAttribute(Attribute::Hot
) ||
590 PSI
->isFunctionHotInCallGraph(&F
, *BFI
))
591 F
.setSectionPrefix("hot");
592 // If PSI shows this function is not hot, we will placed the function
593 // into unlikely section if (1) PSI shows this is a cold function, or
594 // (2) the function has a attribute of cold.
595 else if (PSI
->isFunctionColdInCallGraph(&F
, *BFI
) ||
596 F
.hasFnAttribute(Attribute::Cold
))
597 F
.setSectionPrefix("unlikely");
598 else if (ProfileUnknownInSpecialSection
&& PSI
->hasPartialSampleProfile() &&
599 PSI
->isFunctionHotnessUnknown(F
))
600 F
.setSectionPrefix("unknown");
603 /// This optimization identifies DIV instructions that can be
604 /// profitably bypassed and carried out with a shorter, faster divide.
605 if (!OptSize
&& !PSI
->hasHugeWorkingSetSize() && TLI
->isSlowDivBypassed()) {
606 const DenseMap
<unsigned int, unsigned int> &BypassWidths
=
607 TLI
->getBypassSlowDivWidths();
608 BasicBlock
*BB
= &*F
.begin();
609 while (BB
!= nullptr) {
610 // bypassSlowDivision may create new BBs, but we don't want to reapply the
611 // optimization to those blocks.
612 BasicBlock
*Next
= BB
->getNextNode();
613 // F.hasOptSize is already checked in the outer if statement.
614 if (!llvm::shouldOptimizeForSize(BB
, PSI
, BFI
.get()))
615 EverMadeChange
|= bypassSlowDivision(BB
, BypassWidths
);
620 // Get rid of @llvm.assume builtins before attempting to eliminate empty
621 // blocks, since there might be blocks that only contain @llvm.assume calls
622 // (plus arguments that we can get rid of).
623 EverMadeChange
|= eliminateAssumptions(F
);
625 // Eliminate blocks that contain only PHI nodes and an
626 // unconditional branch.
627 EverMadeChange
|= eliminateMostlyEmptyBlocks(F
);
629 ModifyDT ModifiedDT
= ModifyDT::NotModifyDT
;
630 if (!DisableBranchOpts
)
631 EverMadeChange
|= splitBranchCondition(F
, ModifiedDT
);
633 // Split some critical edges where one of the sources is an indirect branch,
634 // to help generate sane code for PHIs involving such edges.
636 SplitIndirectBrCriticalEdges(F
, /*IgnoreBlocksWithoutPHI=*/true);
638 // If we are optimzing huge function, we need to consider the build time.
639 // Because the basic algorithm's complex is near O(N!).
640 IsHugeFunc
= F
.size() > HugeFuncThresholdInCGPP
;
642 // Transformations above may invalidate dominator tree and/or loop info.
645 LI
->analyze(getDT(F
));
647 bool MadeChange
= true;
648 bool FuncIterated
= false;
652 for (BasicBlock
&BB
: llvm::make_early_inc_range(F
)) {
653 if (FuncIterated
&& !FreshBBs
.contains(&BB
))
656 ModifyDT ModifiedDTOnIteration
= ModifyDT::NotModifyDT
;
657 bool Changed
= optimizeBlock(BB
, ModifiedDTOnIteration
);
659 if (ModifiedDTOnIteration
== ModifyDT::ModifyBBDT
)
662 MadeChange
|= Changed
;
664 // If the BB is updated, it may still has chance to be optimized.
665 // This usually happen at sink optimization.
669 // %and = and i32 %a, 4
670 // %cmp = icmp eq i32 %and, 0
672 // If the %cmp sink to other BB, the %and will has chance to sink.
674 FreshBBs
.insert(&BB
);
675 else if (FuncIterated
)
678 // For small/normal functions, we restart BB iteration if the dominator
679 // tree of the Function was changed.
680 if (ModifiedDTOnIteration
!= ModifyDT::NotModifyDT
)
684 // We have iterated all the BB in the (only work for huge) function.
685 FuncIterated
= IsHugeFunc
;
687 if (EnableTypePromotionMerge
&& !ValToSExtendedUses
.empty())
688 MadeChange
|= mergeSExts(F
);
689 if (!LargeOffsetGEPMap
.empty())
690 MadeChange
|= splitLargeGEPOffsets();
691 MadeChange
|= optimizePhiTypes(F
);
694 eliminateFallThrough(F
, DT
.get());
697 if (MadeChange
&& VerifyLoopInfo
)
698 LI
->verify(getDT(F
));
701 // Really free removed instructions during promotion.
702 for (Instruction
*I
: RemovedInsts
)
705 EverMadeChange
|= MadeChange
;
706 SeenChainsForSExt
.clear();
707 ValToSExtendedUses
.clear();
708 RemovedInsts
.clear();
709 LargeOffsetGEPMap
.clear();
710 LargeOffsetGEPID
.clear();
716 if (!DisableBranchOpts
) {
718 // Use a set vector to get deterministic iteration order. The order the
719 // blocks are removed may affect whether or not PHI nodes in successors
721 SmallSetVector
<BasicBlock
*, 8> WorkList
;
722 for (BasicBlock
&BB
: F
) {
723 SmallVector
<BasicBlock
*, 2> Successors(successors(&BB
));
724 MadeChange
|= ConstantFoldTerminator(&BB
, true);
728 for (BasicBlock
*Succ
: Successors
)
729 if (pred_empty(Succ
))
730 WorkList
.insert(Succ
);
733 // Delete the dead blocks and any of their dead successors.
734 MadeChange
|= !WorkList
.empty();
735 while (!WorkList
.empty()) {
736 BasicBlock
*BB
= WorkList
.pop_back_val();
737 SmallVector
<BasicBlock
*, 2> Successors(successors(BB
));
741 for (BasicBlock
*Succ
: Successors
)
742 if (pred_empty(Succ
))
743 WorkList
.insert(Succ
);
746 // Merge pairs of basic blocks with unconditional branches, connected by
748 if (EverMadeChange
|| MadeChange
)
749 MadeChange
|= eliminateFallThrough(F
);
751 EverMadeChange
|= MadeChange
;
754 if (!DisableGCOpts
) {
755 SmallVector
<GCStatepointInst
*, 2> Statepoints
;
756 for (BasicBlock
&BB
: F
)
757 for (Instruction
&I
: BB
)
758 if (auto *SP
= dyn_cast
<GCStatepointInst
>(&I
))
759 Statepoints
.push_back(SP
);
760 for (auto &I
: Statepoints
)
761 EverMadeChange
|= simplifyOffsetableRelocate(*I
);
764 // Do this last to clean up use-before-def scenarios introduced by other
765 // preparatory transforms.
766 EverMadeChange
|= placeDbgValues(F
);
767 EverMadeChange
|= placePseudoProbes(F
);
770 if (VerifyBFIUpdates
)
774 return EverMadeChange
;
777 bool CodeGenPrepare::eliminateAssumptions(Function
&F
) {
778 bool MadeChange
= false;
779 for (BasicBlock
&BB
: F
) {
780 CurInstIterator
= BB
.begin();
781 while (CurInstIterator
!= BB
.end()) {
782 Instruction
*I
= &*(CurInstIterator
++);
783 if (auto *Assume
= dyn_cast
<AssumeInst
>(I
)) {
785 Value
*Operand
= Assume
->getOperand(0);
786 Assume
->eraseFromParent();
788 resetIteratorIfInvalidatedWhileCalling(&BB
, [&]() {
789 RecursivelyDeleteTriviallyDeadInstructions(Operand
, TLInfo
, nullptr);
797 /// An instruction is about to be deleted, so remove all references to it in our
798 /// GEP-tracking data strcutures.
799 void CodeGenPrepare::removeAllAssertingVHReferences(Value
*V
) {
800 LargeOffsetGEPMap
.erase(V
);
801 NewGEPBases
.erase(V
);
803 auto GEP
= dyn_cast
<GetElementPtrInst
>(V
);
807 LargeOffsetGEPID
.erase(GEP
);
809 auto VecI
= LargeOffsetGEPMap
.find(GEP
->getPointerOperand());
810 if (VecI
== LargeOffsetGEPMap
.end())
813 auto &GEPVector
= VecI
->second
;
814 llvm::erase_if(GEPVector
, [=](auto &Elt
) { return Elt
.first
== GEP
; });
816 if (GEPVector
.empty())
817 LargeOffsetGEPMap
.erase(VecI
);
820 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
821 void LLVM_ATTRIBUTE_UNUSED
CodeGenPrepare::verifyBFIUpdates(Function
&F
) {
822 DominatorTree
NewDT(F
);
823 LoopInfo
NewLI(NewDT
);
824 BranchProbabilityInfo
NewBPI(F
, NewLI
, TLInfo
);
825 BlockFrequencyInfo
NewBFI(F
, NewBPI
, NewLI
);
826 NewBFI
.verifyMatch(*BFI
);
829 /// Merge basic blocks which are connected by a single edge, where one of the
830 /// basic blocks has a single successor pointing to the other basic block,
831 /// which has a single predecessor.
832 bool CodeGenPrepare::eliminateFallThrough(Function
&F
, DominatorTree
*DT
) {
833 bool Changed
= false;
834 // Scan all of the blocks in the function, except for the entry block.
835 // Use a temporary array to avoid iterator being invalidated when
837 SmallVector
<WeakTrackingVH
, 16> Blocks
;
838 for (auto &Block
: llvm::drop_begin(F
))
839 Blocks
.push_back(&Block
);
841 SmallSet
<WeakTrackingVH
, 16> Preds
;
842 for (auto &Block
: Blocks
) {
843 auto *BB
= cast_or_null
<BasicBlock
>(Block
);
846 // If the destination block has a single pred, then this is a trivial
847 // edge, just collapse it.
848 BasicBlock
*SinglePred
= BB
->getSinglePredecessor();
850 // Don't merge if BB's address is taken.
851 if (!SinglePred
|| SinglePred
== BB
|| BB
->hasAddressTaken())
854 // Make an effort to skip unreachable blocks.
855 if (DT
&& !DT
->isReachableFromEntry(BB
))
858 BranchInst
*Term
= dyn_cast
<BranchInst
>(SinglePred
->getTerminator());
859 if (Term
&& !Term
->isConditional()) {
861 LLVM_DEBUG(dbgs() << "To merge:\n" << *BB
<< "\n\n\n");
863 // Merge BB into SinglePred and delete it.
864 MergeBlockIntoPredecessor(BB
, /* DTU */ nullptr, LI
, /* MSSAU */ nullptr,
865 /* MemDep */ nullptr,
866 /* PredecessorWithTwoSuccessors */ false, DT
);
867 Preds
.insert(SinglePred
);
870 // Update FreshBBs to optimize the merged BB.
871 FreshBBs
.insert(SinglePred
);
877 // (Repeatedly) merging blocks into their predecessors can create redundant
879 for (const auto &Pred
: Preds
)
880 if (auto *BB
= cast_or_null
<BasicBlock
>(Pred
))
881 RemoveRedundantDbgInstrs(BB
);
886 /// Find a destination block from BB if BB is mergeable empty block.
887 BasicBlock
*CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock
*BB
) {
888 // If this block doesn't end with an uncond branch, ignore it.
889 BranchInst
*BI
= dyn_cast
<BranchInst
>(BB
->getTerminator());
890 if (!BI
|| !BI
->isUnconditional())
893 // If the instruction before the branch (skipping debug info) isn't a phi
894 // node, then other stuff is happening here.
895 BasicBlock::iterator BBI
= BI
->getIterator();
896 if (BBI
!= BB
->begin()) {
898 while (isa
<DbgInfoIntrinsic
>(BBI
)) {
899 if (BBI
== BB
->begin())
903 if (!isa
<DbgInfoIntrinsic
>(BBI
) && !isa
<PHINode
>(BBI
))
907 // Do not break infinite loops.
908 BasicBlock
*DestBB
= BI
->getSuccessor(0);
912 if (!canMergeBlocks(BB
, DestBB
))
918 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
919 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
920 /// edges in ways that are non-optimal for isel. Start by eliminating these
921 /// blocks so we can split them the way we want them.
922 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function
&F
) {
923 SmallPtrSet
<BasicBlock
*, 16> Preheaders
;
924 SmallVector
<Loop
*, 16> LoopList(LI
->begin(), LI
->end());
925 while (!LoopList
.empty()) {
926 Loop
*L
= LoopList
.pop_back_val();
927 llvm::append_range(LoopList
, *L
);
928 if (BasicBlock
*Preheader
= L
->getLoopPreheader())
929 Preheaders
.insert(Preheader
);
932 bool MadeChange
= false;
933 // Copy blocks into a temporary array to avoid iterator invalidation issues
934 // as we remove them.
935 // Note that this intentionally skips the entry block.
936 SmallVector
<WeakTrackingVH
, 16> Blocks
;
937 for (auto &Block
: llvm::drop_begin(F
)) {
938 // Delete phi nodes that could block deleting other empty blocks.
939 if (!DisableDeletePHIs
)
940 MadeChange
|= DeleteDeadPHIs(&Block
, TLInfo
);
941 Blocks
.push_back(&Block
);
944 for (auto &Block
: Blocks
) {
945 BasicBlock
*BB
= cast_or_null
<BasicBlock
>(Block
);
948 BasicBlock
*DestBB
= findDestBlockOfMergeableEmptyBlock(BB
);
950 !isMergingEmptyBlockProfitable(BB
, DestBB
, Preheaders
.count(BB
)))
953 eliminateMostlyEmptyBlock(BB
);
959 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock
*BB
,
962 // Do not delete loop preheaders if doing so would create a critical edge.
963 // Loop preheaders can be good locations to spill registers. If the
964 // preheader is deleted and we create a critical edge, registers may be
965 // spilled in the loop body instead.
966 if (!DisablePreheaderProtect
&& isPreheader
&&
967 !(BB
->getSinglePredecessor() &&
968 BB
->getSinglePredecessor()->getSingleSuccessor()))
971 // Skip merging if the block's successor is also a successor to any callbr
972 // that leads to this block.
973 // FIXME: Is this really needed? Is this a correctness issue?
974 for (BasicBlock
*Pred
: predecessors(BB
)) {
975 if (isa
<CallBrInst
>(Pred
->getTerminator()) &&
976 llvm::is_contained(successors(Pred
), DestBB
))
980 // Try to skip merging if the unique predecessor of BB is terminated by a
981 // switch or indirect branch instruction, and BB is used as an incoming block
982 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
983 // add COPY instructions in the predecessor of BB instead of BB (if it is not
984 // merged). Note that the critical edge created by merging such blocks wont be
985 // split in MachineSink because the jump table is not analyzable. By keeping
986 // such empty block (BB), ISel will place COPY instructions in BB, not in the
987 // predecessor of BB.
988 BasicBlock
*Pred
= BB
->getUniquePredecessor();
989 if (!Pred
|| !(isa
<SwitchInst
>(Pred
->getTerminator()) ||
990 isa
<IndirectBrInst
>(Pred
->getTerminator())))
993 if (BB
->getTerminator() != BB
->getFirstNonPHIOrDbg())
996 // We use a simple cost heuristic which determine skipping merging is
997 // profitable if the cost of skipping merging is less than the cost of
998 // merging : Cost(skipping merging) < Cost(merging BB), where the
999 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
1000 // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
1001 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
1002 // Freq(Pred) / Freq(BB) > 2.
1003 // Note that if there are multiple empty blocks sharing the same incoming
1004 // value for the PHIs in the DestBB, we consider them together. In such
1005 // case, Cost(merging BB) will be the sum of their frequencies.
1007 if (!isa
<PHINode
>(DestBB
->begin()))
1010 SmallPtrSet
<BasicBlock
*, 16> SameIncomingValueBBs
;
1012 // Find all other incoming blocks from which incoming values of all PHIs in
1013 // DestBB are the same as the ones from BB.
1014 for (BasicBlock
*DestBBPred
: predecessors(DestBB
)) {
1015 if (DestBBPred
== BB
)
1018 if (llvm::all_of(DestBB
->phis(), [&](const PHINode
&DestPN
) {
1019 return DestPN
.getIncomingValueForBlock(BB
) ==
1020 DestPN
.getIncomingValueForBlock(DestBBPred
);
1022 SameIncomingValueBBs
.insert(DestBBPred
);
1025 // See if all BB's incoming values are same as the value from Pred. In this
1026 // case, no reason to skip merging because COPYs are expected to be place in
1028 if (SameIncomingValueBBs
.count(Pred
))
1031 BlockFrequency PredFreq
= BFI
->getBlockFreq(Pred
);
1032 BlockFrequency BBFreq
= BFI
->getBlockFreq(BB
);
1034 for (auto *SameValueBB
: SameIncomingValueBBs
)
1035 if (SameValueBB
->getUniquePredecessor() == Pred
&&
1036 DestBB
== findDestBlockOfMergeableEmptyBlock(SameValueBB
))
1037 BBFreq
+= BFI
->getBlockFreq(SameValueBB
);
1039 std::optional
<BlockFrequency
> Limit
= BBFreq
.mul(FreqRatioToSkipMerge
);
1040 return !Limit
|| PredFreq
<= *Limit
;
1043 /// Return true if we can merge BB into DestBB if there is a single
1044 /// unconditional branch between them, and BB contains no other non-phi
1046 bool CodeGenPrepare::canMergeBlocks(const BasicBlock
*BB
,
1047 const BasicBlock
*DestBB
) const {
1048 // We only want to eliminate blocks whose phi nodes are used by phi nodes in
1049 // the successor. If there are more complex condition (e.g. preheaders),
1050 // don't mess around with them.
1051 for (const PHINode
&PN
: BB
->phis()) {
1052 for (const User
*U
: PN
.users()) {
1053 const Instruction
*UI
= cast
<Instruction
>(U
);
1054 if (UI
->getParent() != DestBB
|| !isa
<PHINode
>(UI
))
1056 // If User is inside DestBB block and it is a PHINode then check
1057 // incoming value. If incoming value is not from BB then this is
1058 // a complex condition (e.g. preheaders) we want to avoid here.
1059 if (UI
->getParent() == DestBB
) {
1060 if (const PHINode
*UPN
= dyn_cast
<PHINode
>(UI
))
1061 for (unsigned I
= 0, E
= UPN
->getNumIncomingValues(); I
!= E
; ++I
) {
1062 Instruction
*Insn
= dyn_cast
<Instruction
>(UPN
->getIncomingValue(I
));
1063 if (Insn
&& Insn
->getParent() == BB
&&
1064 Insn
->getParent() != UPN
->getIncomingBlock(I
))
1071 // If BB and DestBB contain any common predecessors, then the phi nodes in BB
1072 // and DestBB may have conflicting incoming values for the block. If so, we
1073 // can't merge the block.
1074 const PHINode
*DestBBPN
= dyn_cast
<PHINode
>(DestBB
->begin());
1076 return true; // no conflict.
1078 // Collect the preds of BB.
1079 SmallPtrSet
<const BasicBlock
*, 16> BBPreds
;
1080 if (const PHINode
*BBPN
= dyn_cast
<PHINode
>(BB
->begin())) {
1081 // It is faster to get preds from a PHI than with pred_iterator.
1082 for (unsigned i
= 0, e
= BBPN
->getNumIncomingValues(); i
!= e
; ++i
)
1083 BBPreds
.insert(BBPN
->getIncomingBlock(i
));
1085 BBPreds
.insert(pred_begin(BB
), pred_end(BB
));
1088 // Walk the preds of DestBB.
1089 for (unsigned i
= 0, e
= DestBBPN
->getNumIncomingValues(); i
!= e
; ++i
) {
1090 BasicBlock
*Pred
= DestBBPN
->getIncomingBlock(i
);
1091 if (BBPreds
.count(Pred
)) { // Common predecessor?
1092 for (const PHINode
&PN
: DestBB
->phis()) {
1093 const Value
*V1
= PN
.getIncomingValueForBlock(Pred
);
1094 const Value
*V2
= PN
.getIncomingValueForBlock(BB
);
1096 // If V2 is a phi node in BB, look up what the mapped value will be.
1097 if (const PHINode
*V2PN
= dyn_cast
<PHINode
>(V2
))
1098 if (V2PN
->getParent() == BB
)
1099 V2
= V2PN
->getIncomingValueForBlock(Pred
);
1101 // If there is a conflict, bail out.
1111 /// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
1112 static void replaceAllUsesWith(Value
*Old
, Value
*New
,
1113 SmallSet
<BasicBlock
*, 32> &FreshBBs
,
1115 auto *OldI
= dyn_cast
<Instruction
>(Old
);
1117 for (Value::user_iterator UI
= OldI
->user_begin(), E
= OldI
->user_end();
1119 Instruction
*User
= cast
<Instruction
>(*UI
);
1121 FreshBBs
.insert(User
->getParent());
1124 Old
->replaceAllUsesWith(New
);
1127 /// Eliminate a basic block that has only phi's and an unconditional branch in
1129 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock
*BB
) {
1130 BranchInst
*BI
= cast
<BranchInst
>(BB
->getTerminator());
1131 BasicBlock
*DestBB
= BI
->getSuccessor(0);
1133 LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
1136 // If the destination block has a single pred, then this is a trivial edge,
1137 // just collapse it.
1138 if (BasicBlock
*SinglePred
= DestBB
->getSinglePredecessor()) {
1139 if (SinglePred
!= DestBB
) {
1140 assert(SinglePred
== BB
&&
1141 "Single predecessor not the same as predecessor");
1142 // Merge DestBB into SinglePred/BB and delete it.
1143 MergeBlockIntoPredecessor(DestBB
);
1144 // Note: BB(=SinglePred) will not be deleted on this path.
1145 // DestBB(=its single successor) is the one that was deleted.
1146 LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred
<< "\n\n\n");
1149 // Update FreshBBs to optimize the merged BB.
1150 FreshBBs
.insert(SinglePred
);
1151 FreshBBs
.erase(DestBB
);
1157 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
1158 // to handle the new incoming edges it is about to have.
1159 for (PHINode
&PN
: DestBB
->phis()) {
1160 // Remove the incoming value for BB, and remember it.
1161 Value
*InVal
= PN
.removeIncomingValue(BB
, false);
1163 // Two options: either the InVal is a phi node defined in BB or it is some
1164 // value that dominates BB.
1165 PHINode
*InValPhi
= dyn_cast
<PHINode
>(InVal
);
1166 if (InValPhi
&& InValPhi
->getParent() == BB
) {
1167 // Add all of the input values of the input PHI as inputs of this phi.
1168 for (unsigned i
= 0, e
= InValPhi
->getNumIncomingValues(); i
!= e
; ++i
)
1169 PN
.addIncoming(InValPhi
->getIncomingValue(i
),
1170 InValPhi
->getIncomingBlock(i
));
1172 // Otherwise, add one instance of the dominating value for each edge that
1173 // we will be adding.
1174 if (PHINode
*BBPN
= dyn_cast
<PHINode
>(BB
->begin())) {
1175 for (unsigned i
= 0, e
= BBPN
->getNumIncomingValues(); i
!= e
; ++i
)
1176 PN
.addIncoming(InVal
, BBPN
->getIncomingBlock(i
));
1178 for (BasicBlock
*Pred
: predecessors(BB
))
1179 PN
.addIncoming(InVal
, Pred
);
1184 // The PHIs are now updated, change everything that refers to BB to use
1185 // DestBB and remove BB.
1186 BB
->replaceAllUsesWith(DestBB
);
1187 BB
->eraseFromParent();
1190 LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB
<< "\n\n\n");
1193 // Computes a map of base pointer relocation instructions to corresponding
1194 // derived pointer relocation instructions given a vector of all relocate calls
1195 static void computeBaseDerivedRelocateMap(
1196 const SmallVectorImpl
<GCRelocateInst
*> &AllRelocateCalls
,
1197 MapVector
<GCRelocateInst
*, SmallVector
<GCRelocateInst
*, 0>>
1199 // Collect information in two maps: one primarily for locating the base object
1200 // while filling the second map; the second map is the final structure holding
1201 // a mapping between Base and corresponding Derived relocate calls
1202 MapVector
<std::pair
<unsigned, unsigned>, GCRelocateInst
*> RelocateIdxMap
;
1203 for (auto *ThisRelocate
: AllRelocateCalls
) {
1204 auto K
= std::make_pair(ThisRelocate
->getBasePtrIndex(),
1205 ThisRelocate
->getDerivedPtrIndex());
1206 RelocateIdxMap
.insert(std::make_pair(K
, ThisRelocate
));
1208 for (auto &Item
: RelocateIdxMap
) {
1209 std::pair
<unsigned, unsigned> Key
= Item
.first
;
1210 if (Key
.first
== Key
.second
)
1211 // Base relocation: nothing to insert
1214 GCRelocateInst
*I
= Item
.second
;
1215 auto BaseKey
= std::make_pair(Key
.first
, Key
.first
);
1217 // We're iterating over RelocateIdxMap so we cannot modify it.
1218 auto MaybeBase
= RelocateIdxMap
.find(BaseKey
);
1219 if (MaybeBase
== RelocateIdxMap
.end())
1220 // TODO: We might want to insert a new base object relocate and gep off
1221 // that, if there are enough derived object relocates.
1224 RelocateInstMap
[MaybeBase
->second
].push_back(I
);
1228 // Accepts a GEP and extracts the operands into a vector provided they're all
1229 // small integer constants
1230 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst
*GEP
,
1231 SmallVectorImpl
<Value
*> &OffsetV
) {
1232 for (unsigned i
= 1; i
< GEP
->getNumOperands(); i
++) {
1233 // Only accept small constant integer operands
1234 auto *Op
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
1235 if (!Op
|| Op
->getZExtValue() > 20)
1239 for (unsigned i
= 1; i
< GEP
->getNumOperands(); i
++)
1240 OffsetV
.push_back(GEP
->getOperand(i
));
1244 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1245 // replace, computes a replacement, and affects it.
1247 simplifyRelocatesOffABase(GCRelocateInst
*RelocatedBase
,
1248 const SmallVectorImpl
<GCRelocateInst
*> &Targets
) {
1249 bool MadeChange
= false;
1250 // We must ensure the relocation of derived pointer is defined after
1251 // relocation of base pointer. If we find a relocation corresponding to base
1252 // defined earlier than relocation of base then we move relocation of base
1253 // right before found relocation. We consider only relocation in the same
1254 // basic block as relocation of base. Relocations from other basic block will
1255 // be skipped by optimization and we do not care about them.
1256 for (auto R
= RelocatedBase
->getParent()->getFirstInsertionPt();
1257 &*R
!= RelocatedBase
; ++R
)
1258 if (auto *RI
= dyn_cast
<GCRelocateInst
>(R
))
1259 if (RI
->getStatepoint() == RelocatedBase
->getStatepoint())
1260 if (RI
->getBasePtrIndex() == RelocatedBase
->getBasePtrIndex()) {
1261 RelocatedBase
->moveBefore(RI
);
1266 for (GCRelocateInst
*ToReplace
: Targets
) {
1267 assert(ToReplace
->getBasePtrIndex() == RelocatedBase
->getBasePtrIndex() &&
1268 "Not relocating a derived object of the original base object");
1269 if (ToReplace
->getBasePtrIndex() == ToReplace
->getDerivedPtrIndex()) {
1270 // A duplicate relocate call. TODO: coalesce duplicates.
1274 if (RelocatedBase
->getParent() != ToReplace
->getParent()) {
1275 // Base and derived relocates are in different basic blocks.
1276 // In this case transform is only valid when base dominates derived
1277 // relocate. However it would be too expensive to check dominance
1278 // for each such relocate, so we skip the whole transformation.
1282 Value
*Base
= ToReplace
->getBasePtr();
1283 auto *Derived
= dyn_cast
<GetElementPtrInst
>(ToReplace
->getDerivedPtr());
1284 if (!Derived
|| Derived
->getPointerOperand() != Base
)
1287 SmallVector
<Value
*, 2> OffsetV
;
1288 if (!getGEPSmallConstantIntOffsetV(Derived
, OffsetV
))
1291 // Create a Builder and replace the target callsite with a gep
1292 assert(RelocatedBase
->getNextNode() &&
1293 "Should always have one since it's not a terminator");
1295 // Insert after RelocatedBase
1296 IRBuilder
<> Builder(RelocatedBase
->getNextNode());
1297 Builder
.SetCurrentDebugLocation(ToReplace
->getDebugLoc());
1299 // If gc_relocate does not match the actual type, cast it to the right type.
1300 // In theory, there must be a bitcast after gc_relocate if the type does not
1301 // match, and we should reuse it to get the derived pointer. But it could be
1305 // %g1 = call coldcc i8 addrspace(1)*
1306 // @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1310 // %g2 = call coldcc i8 addrspace(1)*
1311 // @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1314 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1315 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1317 // In this case, we can not find the bitcast any more. So we insert a new
1318 // bitcast no matter there is already one or not. In this way, we can handle
1319 // all cases, and the extra bitcast should be optimized away in later
1321 Value
*ActualRelocatedBase
= RelocatedBase
;
1322 if (RelocatedBase
->getType() != Base
->getType()) {
1323 ActualRelocatedBase
=
1324 Builder
.CreateBitCast(RelocatedBase
, Base
->getType());
1326 Value
*Replacement
=
1327 Builder
.CreateGEP(Derived
->getSourceElementType(), ActualRelocatedBase
,
1329 Replacement
->takeName(ToReplace
);
1330 // If the newly generated derived pointer's type does not match the original
1331 // derived pointer's type, cast the new derived pointer to match it. Same
1332 // reasoning as above.
1333 Value
*ActualReplacement
= Replacement
;
1334 if (Replacement
->getType() != ToReplace
->getType()) {
1336 Builder
.CreateBitCast(Replacement
, ToReplace
->getType());
1338 ToReplace
->replaceAllUsesWith(ActualReplacement
);
1339 ToReplace
->eraseFromParent();
1349 // %ptr = gep %base + 15
1350 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1351 // %base' = relocate(%tok, i32 4, i32 4)
1352 // %ptr' = relocate(%tok, i32 4, i32 5)
1353 // %val = load %ptr'
1358 // %ptr = gep %base + 15
1359 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1360 // %base' = gc.relocate(%tok, i32 4, i32 4)
1361 // %ptr' = gep %base' + 15
1362 // %val = load %ptr'
1363 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst
&I
) {
1364 bool MadeChange
= false;
1365 SmallVector
<GCRelocateInst
*, 2> AllRelocateCalls
;
1366 for (auto *U
: I
.users())
1367 if (GCRelocateInst
*Relocate
= dyn_cast
<GCRelocateInst
>(U
))
1368 // Collect all the relocate calls associated with a statepoint
1369 AllRelocateCalls
.push_back(Relocate
);
1371 // We need at least one base pointer relocation + one derived pointer
1372 // relocation to mangle
1373 if (AllRelocateCalls
.size() < 2)
1376 // RelocateInstMap is a mapping from the base relocate instruction to the
1377 // corresponding derived relocate instructions
1378 MapVector
<GCRelocateInst
*, SmallVector
<GCRelocateInst
*, 0>> RelocateInstMap
;
1379 computeBaseDerivedRelocateMap(AllRelocateCalls
, RelocateInstMap
);
1380 if (RelocateInstMap
.empty())
1383 for (auto &Item
: RelocateInstMap
)
1384 // Item.first is the RelocatedBase to offset against
1385 // Item.second is the vector of Targets to replace
1386 MadeChange
= simplifyRelocatesOffABase(Item
.first
, Item
.second
);
1390 /// Sink the specified cast instruction into its user blocks.
1391 static bool SinkCast(CastInst
*CI
) {
1392 BasicBlock
*DefBB
= CI
->getParent();
1394 /// InsertedCasts - Only insert a cast in each block once.
1395 DenseMap
<BasicBlock
*, CastInst
*> InsertedCasts
;
1397 bool MadeChange
= false;
1398 for (Value::user_iterator UI
= CI
->user_begin(), E
= CI
->user_end();
1400 Use
&TheUse
= UI
.getUse();
1401 Instruction
*User
= cast
<Instruction
>(*UI
);
1403 // Figure out which BB this cast is used in. For PHI's this is the
1404 // appropriate predecessor block.
1405 BasicBlock
*UserBB
= User
->getParent();
1406 if (PHINode
*PN
= dyn_cast
<PHINode
>(User
)) {
1407 UserBB
= PN
->getIncomingBlock(TheUse
);
1410 // Preincrement use iterator so we don't invalidate it.
1413 // The first insertion point of a block containing an EH pad is after the
1414 // pad. If the pad is the user, we cannot sink the cast past the pad.
1415 if (User
->isEHPad())
1418 // If the block selected to receive the cast is an EH pad that does not
1419 // allow non-PHI instructions before the terminator, we can't sink the
1421 if (UserBB
->getTerminator()->isEHPad())
1424 // If this user is in the same block as the cast, don't change the cast.
1425 if (UserBB
== DefBB
)
1428 // If we have already inserted a cast into this block, use it.
1429 CastInst
*&InsertedCast
= InsertedCasts
[UserBB
];
1431 if (!InsertedCast
) {
1432 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
1433 assert(InsertPt
!= UserBB
->end());
1434 InsertedCast
= cast
<CastInst
>(CI
->clone());
1435 InsertedCast
->insertBefore(*UserBB
, InsertPt
);
1438 // Replace a use of the cast with a use of the new cast.
1439 TheUse
= InsertedCast
;
1444 // If we removed all uses, nuke the cast.
1445 if (CI
->use_empty()) {
1446 salvageDebugInfo(*CI
);
1447 CI
->eraseFromParent();
1454 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1455 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1456 /// reduce the number of virtual registers that must be created and coalesced.
1458 /// Return true if any changes are made.
1459 static bool OptimizeNoopCopyExpression(CastInst
*CI
, const TargetLowering
&TLI
,
1460 const DataLayout
&DL
) {
1461 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
1462 // than sinking only nop casts, but is helpful on some platforms.
1463 if (auto *ASC
= dyn_cast
<AddrSpaceCastInst
>(CI
)) {
1464 if (!TLI
.isFreeAddrSpaceCast(ASC
->getSrcAddressSpace(),
1465 ASC
->getDestAddressSpace()))
1469 // If this is a noop copy,
1470 EVT SrcVT
= TLI
.getValueType(DL
, CI
->getOperand(0)->getType());
1471 EVT DstVT
= TLI
.getValueType(DL
, CI
->getType());
1473 // This is an fp<->int conversion?
1474 if (SrcVT
.isInteger() != DstVT
.isInteger())
1477 // If this is an extension, it will be a zero or sign extension, which
1479 if (SrcVT
.bitsLT(DstVT
))
1482 // If these values will be promoted, find out what they will be promoted
1483 // to. This helps us consider truncates on PPC as noop copies when they
1485 if (TLI
.getTypeAction(CI
->getContext(), SrcVT
) ==
1486 TargetLowering::TypePromoteInteger
)
1487 SrcVT
= TLI
.getTypeToTransformTo(CI
->getContext(), SrcVT
);
1488 if (TLI
.getTypeAction(CI
->getContext(), DstVT
) ==
1489 TargetLowering::TypePromoteInteger
)
1490 DstVT
= TLI
.getTypeToTransformTo(CI
->getContext(), DstVT
);
1492 // If, after promotion, these are the same types, this is a noop copy.
1496 return SinkCast(CI
);
1499 // Match a simple increment by constant operation. Note that if a sub is
1500 // matched, the step is negated (as if the step had been canonicalized to
1501 // an add, even though we leave the instruction alone.)
1502 static bool matchIncrement(const Instruction
*IVInc
, Instruction
*&LHS
,
1504 if (match(IVInc
, m_Add(m_Instruction(LHS
), m_Constant(Step
))) ||
1505 match(IVInc
, m_ExtractValue
<0>(m_Intrinsic
<Intrinsic::uadd_with_overflow
>(
1506 m_Instruction(LHS
), m_Constant(Step
)))))
1508 if (match(IVInc
, m_Sub(m_Instruction(LHS
), m_Constant(Step
))) ||
1509 match(IVInc
, m_ExtractValue
<0>(m_Intrinsic
<Intrinsic::usub_with_overflow
>(
1510 m_Instruction(LHS
), m_Constant(Step
))))) {
1511 Step
= ConstantExpr::getNeg(Step
);
1517 /// If given \p PN is an inductive variable with value IVInc coming from the
1518 /// backedge, and on each iteration it gets increased by Step, return pair
1519 /// <IVInc, Step>. Otherwise, return std::nullopt.
1520 static std::optional
<std::pair
<Instruction
*, Constant
*>>
1521 getIVIncrement(const PHINode
*PN
, const LoopInfo
*LI
) {
1522 const Loop
*L
= LI
->getLoopFor(PN
->getParent());
1523 if (!L
|| L
->getHeader() != PN
->getParent() || !L
->getLoopLatch())
1524 return std::nullopt
;
1526 dyn_cast
<Instruction
>(PN
->getIncomingValueForBlock(L
->getLoopLatch()));
1527 if (!IVInc
|| LI
->getLoopFor(IVInc
->getParent()) != L
)
1528 return std::nullopt
;
1529 Instruction
*LHS
= nullptr;
1530 Constant
*Step
= nullptr;
1531 if (matchIncrement(IVInc
, LHS
, Step
) && LHS
== PN
)
1532 return std::make_pair(IVInc
, Step
);
1533 return std::nullopt
;
1536 static bool isIVIncrement(const Value
*V
, const LoopInfo
*LI
) {
1537 auto *I
= dyn_cast
<Instruction
>(V
);
1540 Instruction
*LHS
= nullptr;
1541 Constant
*Step
= nullptr;
1542 if (!matchIncrement(I
, LHS
, Step
))
1544 if (auto *PN
= dyn_cast
<PHINode
>(LHS
))
1545 if (auto IVInc
= getIVIncrement(PN
, LI
))
1546 return IVInc
->first
== I
;
1550 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator
*BO
,
1551 Value
*Arg0
, Value
*Arg1
,
1553 Intrinsic::ID IID
) {
1554 auto IsReplacableIVIncrement
= [this, &Cmp
](BinaryOperator
*BO
) {
1555 if (!isIVIncrement(BO
, LI
))
1557 const Loop
*L
= LI
->getLoopFor(BO
->getParent());
1558 assert(L
&& "L should not be null after isIVIncrement()");
1559 // Do not risk on moving increment into a child loop.
1560 if (LI
->getLoopFor(Cmp
->getParent()) != L
)
1563 // Finally, we need to ensure that the insert point will dominate all
1564 // existing uses of the increment.
1566 auto &DT
= getDT(*BO
->getParent()->getParent());
1567 if (DT
.dominates(Cmp
->getParent(), BO
->getParent()))
1568 // If we're moving up the dom tree, all uses are trivially dominated.
1569 // (This is the common case for code produced by LSR.)
1572 // Otherwise, special case the single use in the phi recurrence.
1573 return BO
->hasOneUse() && DT
.dominates(Cmp
->getParent(), L
->getLoopLatch());
1575 if (BO
->getParent() != Cmp
->getParent() && !IsReplacableIVIncrement(BO
)) {
1576 // We used to use a dominator tree here to allow multi-block optimization.
1577 // But that was problematic because:
1578 // 1. It could cause a perf regression by hoisting the math op into the
1580 // 2. It could cause a perf regression by creating a value that was live
1581 // across multiple blocks and increasing register pressure.
1582 // 3. Use of a dominator tree could cause large compile-time regression.
1583 // This is because we recompute the DT on every change in the main CGP
1584 // run-loop. The recomputing is probably unnecessary in many cases, so if
1585 // that was fixed, using a DT here would be ok.
1587 // There is one important particular case we still want to handle: if BO is
1588 // the IV increment. Important properties that make it profitable:
1589 // - We can speculate IV increment anywhere in the loop (as long as the
1590 // indvar Phi is its only user);
1591 // - Upon computing Cmp, we effectively compute something equivalent to the
1592 // IV increment (despite it loops differently in the IR). So moving it up
1593 // to the cmp point does not really increase register pressure.
1597 // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1598 if (BO
->getOpcode() == Instruction::Add
&&
1599 IID
== Intrinsic::usub_with_overflow
) {
1600 assert(isa
<Constant
>(Arg1
) && "Unexpected input for usubo");
1601 Arg1
= ConstantExpr::getNeg(cast
<Constant
>(Arg1
));
1604 // Insert at the first instruction of the pair.
1605 Instruction
*InsertPt
= nullptr;
1606 for (Instruction
&Iter
: *Cmp
->getParent()) {
1607 // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1608 // the overflow intrinsic are defined.
1609 if ((BO
->getOpcode() != Instruction::Xor
&& &Iter
== BO
) || &Iter
== Cmp
) {
1614 assert(InsertPt
!= nullptr && "Parent block did not contain cmp or binop");
1616 IRBuilder
<> Builder(InsertPt
);
1617 Value
*MathOV
= Builder
.CreateBinaryIntrinsic(IID
, Arg0
, Arg1
);
1618 if (BO
->getOpcode() != Instruction::Xor
) {
1619 Value
*Math
= Builder
.CreateExtractValue(MathOV
, 0, "math");
1620 replaceAllUsesWith(BO
, Math
, FreshBBs
, IsHugeFunc
);
1622 assert(BO
->hasOneUse() &&
1623 "Patterns with XOr should use the BO only in the compare");
1624 Value
*OV
= Builder
.CreateExtractValue(MathOV
, 1, "ov");
1625 replaceAllUsesWith(Cmp
, OV
, FreshBBs
, IsHugeFunc
);
1626 Cmp
->eraseFromParent();
1627 BO
->eraseFromParent();
1631 /// Match special-case patterns that check for unsigned add overflow.
1632 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst
*Cmp
,
1633 BinaryOperator
*&Add
) {
1634 // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1635 // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1636 Value
*A
= Cmp
->getOperand(0), *B
= Cmp
->getOperand(1);
1638 // We are not expecting non-canonical/degenerate code. Just bail out.
1639 if (isa
<Constant
>(A
))
1642 ICmpInst::Predicate Pred
= Cmp
->getPredicate();
1643 if (Pred
== ICmpInst::ICMP_EQ
&& match(B
, m_AllOnes()))
1644 B
= ConstantInt::get(B
->getType(), 1);
1645 else if (Pred
== ICmpInst::ICMP_NE
&& match(B
, m_ZeroInt()))
1646 B
= ConstantInt::get(B
->getType(), -1);
1650 // Check the users of the variable operand of the compare looking for an add
1651 // with the adjusted constant.
1652 for (User
*U
: A
->users()) {
1653 if (match(U
, m_Add(m_Specific(A
), m_Specific(B
)))) {
1654 Add
= cast
<BinaryOperator
>(U
);
1661 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1662 /// intrinsic. Return true if any changes were made.
1663 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst
*Cmp
,
1664 ModifyDT
&ModifiedDT
) {
1665 bool EdgeCase
= false;
1667 BinaryOperator
*Add
;
1668 if (!match(Cmp
, m_UAddWithOverflow(m_Value(A
), m_Value(B
), m_BinOp(Add
)))) {
1669 if (!matchUAddWithOverflowConstantEdgeCases(Cmp
, Add
))
1671 // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1672 A
= Add
->getOperand(0);
1673 B
= Add
->getOperand(1);
1677 if (!TLI
->shouldFormOverflowOp(ISD::UADDO
,
1678 TLI
->getValueType(*DL
, Add
->getType()),
1679 Add
->hasNUsesOrMore(EdgeCase
? 1 : 2)))
1682 // We don't want to move around uses of condition values this late, so we
1683 // check if it is legal to create the call to the intrinsic in the basic
1684 // block containing the icmp.
1685 if (Add
->getParent() != Cmp
->getParent() && !Add
->hasOneUse())
1688 if (!replaceMathCmpWithIntrinsic(Add
, A
, B
, Cmp
,
1689 Intrinsic::uadd_with_overflow
))
1692 // Reset callers - do not crash by iterating over a dead instruction.
1693 ModifiedDT
= ModifyDT::ModifyInstDT
;
1697 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst
*Cmp
,
1698 ModifyDT
&ModifiedDT
) {
1699 // We are not expecting non-canonical/degenerate code. Just bail out.
1700 Value
*A
= Cmp
->getOperand(0), *B
= Cmp
->getOperand(1);
1701 if (isa
<Constant
>(A
) && isa
<Constant
>(B
))
1704 // Convert (A u> B) to (A u< B) to simplify pattern matching.
1705 ICmpInst::Predicate Pred
= Cmp
->getPredicate();
1706 if (Pred
== ICmpInst::ICMP_UGT
) {
1708 Pred
= ICmpInst::ICMP_ULT
;
1710 // Convert special-case: (A == 0) is the same as (A u< 1).
1711 if (Pred
== ICmpInst::ICMP_EQ
&& match(B
, m_ZeroInt())) {
1712 B
= ConstantInt::get(B
->getType(), 1);
1713 Pred
= ICmpInst::ICMP_ULT
;
1715 // Convert special-case: (A != 0) is the same as (0 u< A).
1716 if (Pred
== ICmpInst::ICMP_NE
&& match(B
, m_ZeroInt())) {
1718 Pred
= ICmpInst::ICMP_ULT
;
1720 if (Pred
!= ICmpInst::ICMP_ULT
)
1723 // Walk the users of a variable operand of a compare looking for a subtract or
1724 // add with that same operand. Also match the 2nd operand of the compare to
1725 // the add/sub, but that may be a negated constant operand of an add.
1726 Value
*CmpVariableOperand
= isa
<Constant
>(A
) ? B
: A
;
1727 BinaryOperator
*Sub
= nullptr;
1728 for (User
*U
: CmpVariableOperand
->users()) {
1729 // A - B, A u< B --> usubo(A, B)
1730 if (match(U
, m_Sub(m_Specific(A
), m_Specific(B
)))) {
1731 Sub
= cast
<BinaryOperator
>(U
);
1735 // A + (-C), A u< C (canonicalized form of (sub A, C))
1736 const APInt
*CmpC
, *AddC
;
1737 if (match(U
, m_Add(m_Specific(A
), m_APInt(AddC
))) &&
1738 match(B
, m_APInt(CmpC
)) && *AddC
== -(*CmpC
)) {
1739 Sub
= cast
<BinaryOperator
>(U
);
1746 if (!TLI
->shouldFormOverflowOp(ISD::USUBO
,
1747 TLI
->getValueType(*DL
, Sub
->getType()),
1748 Sub
->hasNUsesOrMore(1)))
1751 if (!replaceMathCmpWithIntrinsic(Sub
, Sub
->getOperand(0), Sub
->getOperand(1),
1752 Cmp
, Intrinsic::usub_with_overflow
))
1755 // Reset callers - do not crash by iterating over a dead instruction.
1756 ModifiedDT
= ModifyDT::ModifyInstDT
;
1760 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1761 /// registers that must be created and coalesced. This is a clear win except on
1762 /// targets with multiple condition code registers (PowerPC), where it might
1763 /// lose; some adjustment may be wanted there.
1765 /// Return true if any changes are made.
1766 static bool sinkCmpExpression(CmpInst
*Cmp
, const TargetLowering
&TLI
) {
1767 if (TLI
.hasMultipleConditionRegisters())
1770 // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1771 if (TLI
.useSoftFloat() && isa
<FCmpInst
>(Cmp
))
1774 // Only insert a cmp in each block once.
1775 DenseMap
<BasicBlock
*, CmpInst
*> InsertedCmps
;
1777 bool MadeChange
= false;
1778 for (Value::user_iterator UI
= Cmp
->user_begin(), E
= Cmp
->user_end();
1780 Use
&TheUse
= UI
.getUse();
1781 Instruction
*User
= cast
<Instruction
>(*UI
);
1783 // Preincrement use iterator so we don't invalidate it.
1786 // Don't bother for PHI nodes.
1787 if (isa
<PHINode
>(User
))
1790 // Figure out which BB this cmp is used in.
1791 BasicBlock
*UserBB
= User
->getParent();
1792 BasicBlock
*DefBB
= Cmp
->getParent();
1794 // If this user is in the same block as the cmp, don't change the cmp.
1795 if (UserBB
== DefBB
)
1798 // If we have already inserted a cmp into this block, use it.
1799 CmpInst
*&InsertedCmp
= InsertedCmps
[UserBB
];
1802 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
1803 assert(InsertPt
!= UserBB
->end());
1804 InsertedCmp
= CmpInst::Create(Cmp
->getOpcode(), Cmp
->getPredicate(),
1805 Cmp
->getOperand(0), Cmp
->getOperand(1), "");
1806 InsertedCmp
->insertBefore(*UserBB
, InsertPt
);
1807 // Propagate the debug info.
1808 InsertedCmp
->setDebugLoc(Cmp
->getDebugLoc());
1811 // Replace a use of the cmp with a use of the new cmp.
1812 TheUse
= InsertedCmp
;
1817 // If we removed all uses, nuke the cmp.
1818 if (Cmp
->use_empty()) {
1819 Cmp
->eraseFromParent();
1826 /// For pattern like:
1828 /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1832 /// br DomCond, TrueBB, CmpBB
1833 /// CmpBB: (with DomBB being the single predecessor)
1835 /// Cmp = icmp eq CmpOp0, CmpOp1
1838 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1839 /// different from lowering of icmp eq (PowerPC). This function try to convert
1840 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1841 /// After that, DomCond and Cmp can use the same comparison so reduce one
1844 /// Return true if any changes are made.
1845 static bool foldICmpWithDominatingICmp(CmpInst
*Cmp
,
1846 const TargetLowering
&TLI
) {
1847 if (!EnableICMP_EQToICMP_ST
&& TLI
.isEqualityCmpFoldedWithSignedCmp())
1850 ICmpInst::Predicate Pred
= Cmp
->getPredicate();
1851 if (Pred
!= ICmpInst::ICMP_EQ
)
1854 // If icmp eq has users other than BranchInst and SelectInst, converting it to
1855 // icmp slt/sgt would introduce more redundant LLVM IR.
1856 for (User
*U
: Cmp
->users()) {
1857 if (isa
<BranchInst
>(U
))
1859 if (isa
<SelectInst
>(U
) && cast
<SelectInst
>(U
)->getCondition() == Cmp
)
1864 // This is a cheap/incomplete check for dominance - just match a single
1865 // predecessor with a conditional branch.
1866 BasicBlock
*CmpBB
= Cmp
->getParent();
1867 BasicBlock
*DomBB
= CmpBB
->getSinglePredecessor();
1871 // We want to ensure that the only way control gets to the comparison of
1872 // interest is that a less/greater than comparison on the same operands is
1875 BasicBlock
*TrueBB
, *FalseBB
;
1876 if (!match(DomBB
->getTerminator(), m_Br(m_Value(DomCond
), TrueBB
, FalseBB
)))
1878 if (CmpBB
!= FalseBB
)
1881 Value
*CmpOp0
= Cmp
->getOperand(0), *CmpOp1
= Cmp
->getOperand(1);
1882 ICmpInst::Predicate DomPred
;
1883 if (!match(DomCond
, m_ICmp(DomPred
, m_Specific(CmpOp0
), m_Specific(CmpOp1
))))
1885 if (DomPred
!= ICmpInst::ICMP_SGT
&& DomPred
!= ICmpInst::ICMP_SLT
)
1888 // Convert the equality comparison to the opposite of the dominating
1889 // comparison and swap the direction for all branch/select users.
1890 // We have conceptually converted:
1891 // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1893 // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>;
1894 // And similarly for branches.
1895 for (User
*U
: Cmp
->users()) {
1896 if (auto *BI
= dyn_cast
<BranchInst
>(U
)) {
1897 assert(BI
->isConditional() && "Must be conditional");
1898 BI
->swapSuccessors();
1901 if (auto *SI
= dyn_cast
<SelectInst
>(U
)) {
1904 SI
->swapProfMetadata();
1907 llvm_unreachable("Must be a branch or a select");
1909 Cmp
->setPredicate(CmpInst::getSwappedPredicate(DomPred
));
1913 /// Many architectures use the same instruction for both subtract and cmp. Try
1914 /// to swap cmp operands to match subtract operations to allow for CSE.
1915 static bool swapICmpOperandsToExposeCSEOpportunities(CmpInst
*Cmp
) {
1916 Value
*Op0
= Cmp
->getOperand(0);
1917 Value
*Op1
= Cmp
->getOperand(1);
1918 if (!Op0
->getType()->isIntegerTy() || isa
<Constant
>(Op0
) ||
1919 isa
<Constant
>(Op1
) || Op0
== Op1
)
1922 // If a subtract already has the same operands as a compare, swapping would be
1923 // bad. If a subtract has the same operands as a compare but in reverse order,
1924 // then swapping is good.
1926 unsigned NumInspected
= 0;
1927 for (const User
*U
: Op0
->users()) {
1928 // Avoid walking many users.
1929 if (++NumInspected
> 128)
1931 if (match(U
, m_Sub(m_Specific(Op1
), m_Specific(Op0
))))
1933 else if (match(U
, m_Sub(m_Specific(Op0
), m_Specific(Op1
))))
1937 if (GoodToSwap
> 0) {
1938 Cmp
->swapOperands();
1944 static bool foldFCmpToFPClassTest(CmpInst
*Cmp
, const TargetLowering
&TLI
,
1945 const DataLayout
&DL
) {
1946 FCmpInst
*FCmp
= dyn_cast
<FCmpInst
>(Cmp
);
1950 // Don't fold if the target offers free fabs and the predicate is legal.
1951 EVT VT
= TLI
.getValueType(DL
, Cmp
->getOperand(0)->getType());
1952 if (TLI
.isFAbsFree(VT
) &&
1953 TLI
.isCondCodeLegal(getFCmpCondCode(FCmp
->getPredicate()),
1957 // Reverse the canonicalization if it is a FP class test
1958 auto ShouldReverseTransform
= [](FPClassTest ClassTest
) {
1959 return ClassTest
== fcInf
|| ClassTest
== (fcInf
| fcNan
);
1961 auto [ClassVal
, ClassTest
] =
1962 fcmpToClassTest(FCmp
->getPredicate(), *FCmp
->getParent()->getParent(),
1963 FCmp
->getOperand(0), FCmp
->getOperand(1));
1967 if (!ShouldReverseTransform(ClassTest
) && !ShouldReverseTransform(~ClassTest
))
1970 IRBuilder
<> Builder(Cmp
);
1971 Value
*IsFPClass
= Builder
.createIsFPClass(ClassVal
, ClassTest
);
1972 Cmp
->replaceAllUsesWith(IsFPClass
);
1973 RecursivelyDeleteTriviallyDeadInstructions(Cmp
);
1977 bool CodeGenPrepare::optimizeCmp(CmpInst
*Cmp
, ModifyDT
&ModifiedDT
) {
1978 if (sinkCmpExpression(Cmp
, *TLI
))
1981 if (combineToUAddWithOverflow(Cmp
, ModifiedDT
))
1984 if (combineToUSubWithOverflow(Cmp
, ModifiedDT
))
1987 if (foldICmpWithDominatingICmp(Cmp
, *TLI
))
1990 if (swapICmpOperandsToExposeCSEOpportunities(Cmp
))
1993 if (foldFCmpToFPClassTest(Cmp
, *TLI
, *DL
))
1999 /// Duplicate and sink the given 'and' instruction into user blocks where it is
2000 /// used in a compare to allow isel to generate better code for targets where
2001 /// this operation can be combined.
2003 /// Return true if any changes are made.
2004 static bool sinkAndCmp0Expression(Instruction
*AndI
, const TargetLowering
&TLI
,
2005 SetOfInstrs
&InsertedInsts
) {
2006 // Double-check that we're not trying to optimize an instruction that was
2007 // already optimized by some other part of this pass.
2008 assert(!InsertedInsts
.count(AndI
) &&
2009 "Attempting to optimize already optimized and instruction");
2010 (void)InsertedInsts
;
2012 // Nothing to do for single use in same basic block.
2013 if (AndI
->hasOneUse() &&
2014 AndI
->getParent() == cast
<Instruction
>(*AndI
->user_begin())->getParent())
2017 // Try to avoid cases where sinking/duplicating is likely to increase register
2019 if (!isa
<ConstantInt
>(AndI
->getOperand(0)) &&
2020 !isa
<ConstantInt
>(AndI
->getOperand(1)) &&
2021 AndI
->getOperand(0)->hasOneUse() && AndI
->getOperand(1)->hasOneUse())
2024 for (auto *U
: AndI
->users()) {
2025 Instruction
*User
= cast
<Instruction
>(U
);
2027 // Only sink 'and' feeding icmp with 0.
2028 if (!isa
<ICmpInst
>(User
))
2031 auto *CmpC
= dyn_cast
<ConstantInt
>(User
->getOperand(1));
2032 if (!CmpC
|| !CmpC
->isZero())
2036 if (!TLI
.isMaskAndCmp0FoldingBeneficial(*AndI
))
2039 LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
2040 LLVM_DEBUG(AndI
->getParent()->dump());
2042 // Push the 'and' into the same block as the icmp 0. There should only be
2043 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
2044 // others, so we don't need to keep track of which BBs we insert into.
2045 for (Value::user_iterator UI
= AndI
->user_begin(), E
= AndI
->user_end();
2047 Use
&TheUse
= UI
.getUse();
2048 Instruction
*User
= cast
<Instruction
>(*UI
);
2050 // Preincrement use iterator so we don't invalidate it.
2053 LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User
<< "\n");
2055 // Keep the 'and' in the same place if the use is already in the same block.
2056 Instruction
*InsertPt
=
2057 User
->getParent() == AndI
->getParent() ? AndI
: User
;
2058 Instruction
*InsertedAnd
= BinaryOperator::Create(
2059 Instruction::And
, AndI
->getOperand(0), AndI
->getOperand(1), "",
2060 InsertPt
->getIterator());
2061 // Propagate the debug info.
2062 InsertedAnd
->setDebugLoc(AndI
->getDebugLoc());
2064 // Replace a use of the 'and' with a use of the new 'and'.
2065 TheUse
= InsertedAnd
;
2067 LLVM_DEBUG(User
->getParent()->dump());
2070 // We removed all uses, nuke the and.
2071 AndI
->eraseFromParent();
2075 /// Check if the candidates could be combined with a shift instruction, which
2077 /// 1. Truncate instruction
2078 /// 2. And instruction and the imm is a mask of the low bits:
2079 /// imm & (imm+1) == 0
2080 static bool isExtractBitsCandidateUse(Instruction
*User
) {
2081 if (!isa
<TruncInst
>(User
)) {
2082 if (User
->getOpcode() != Instruction::And
||
2083 !isa
<ConstantInt
>(User
->getOperand(1)))
2086 const APInt
&Cimm
= cast
<ConstantInt
>(User
->getOperand(1))->getValue();
2088 if ((Cimm
& (Cimm
+ 1)).getBoolValue())
2094 /// Sink both shift and truncate instruction to the use of truncate's BB.
2096 SinkShiftAndTruncate(BinaryOperator
*ShiftI
, Instruction
*User
, ConstantInt
*CI
,
2097 DenseMap
<BasicBlock
*, BinaryOperator
*> &InsertedShifts
,
2098 const TargetLowering
&TLI
, const DataLayout
&DL
) {
2099 BasicBlock
*UserBB
= User
->getParent();
2100 DenseMap
<BasicBlock
*, CastInst
*> InsertedTruncs
;
2101 auto *TruncI
= cast
<TruncInst
>(User
);
2102 bool MadeChange
= false;
2104 for (Value::user_iterator TruncUI
= TruncI
->user_begin(),
2105 TruncE
= TruncI
->user_end();
2106 TruncUI
!= TruncE
;) {
2108 Use
&TruncTheUse
= TruncUI
.getUse();
2109 Instruction
*TruncUser
= cast
<Instruction
>(*TruncUI
);
2110 // Preincrement use iterator so we don't invalidate it.
2114 int ISDOpcode
= TLI
.InstructionOpcodeToISD(TruncUser
->getOpcode());
2118 // If the use is actually a legal node, there will not be an
2119 // implicit truncate.
2120 // FIXME: always querying the result type is just an
2121 // approximation; some nodes' legality is determined by the
2122 // operand or other means. There's no good way to find out though.
2123 if (TLI
.isOperationLegalOrCustom(
2124 ISDOpcode
, TLI
.getValueType(DL
, TruncUser
->getType(), true)))
2127 // Don't bother for PHI nodes.
2128 if (isa
<PHINode
>(TruncUser
))
2131 BasicBlock
*TruncUserBB
= TruncUser
->getParent();
2133 if (UserBB
== TruncUserBB
)
2136 BinaryOperator
*&InsertedShift
= InsertedShifts
[TruncUserBB
];
2137 CastInst
*&InsertedTrunc
= InsertedTruncs
[TruncUserBB
];
2139 if (!InsertedShift
&& !InsertedTrunc
) {
2140 BasicBlock::iterator InsertPt
= TruncUserBB
->getFirstInsertionPt();
2141 assert(InsertPt
!= TruncUserBB
->end());
2143 if (ShiftI
->getOpcode() == Instruction::AShr
)
2145 BinaryOperator::CreateAShr(ShiftI
->getOperand(0), CI
, "");
2148 BinaryOperator::CreateLShr(ShiftI
->getOperand(0), CI
, "");
2149 InsertedShift
->setDebugLoc(ShiftI
->getDebugLoc());
2150 InsertedShift
->insertBefore(*TruncUserBB
, InsertPt
);
2153 BasicBlock::iterator TruncInsertPt
= TruncUserBB
->getFirstInsertionPt();
2155 // It will go ahead of any debug-info.
2156 TruncInsertPt
.setHeadBit(true);
2157 assert(TruncInsertPt
!= TruncUserBB
->end());
2159 InsertedTrunc
= CastInst::Create(TruncI
->getOpcode(), InsertedShift
,
2160 TruncI
->getType(), "");
2161 InsertedTrunc
->insertBefore(*TruncUserBB
, TruncInsertPt
);
2162 InsertedTrunc
->setDebugLoc(TruncI
->getDebugLoc());
2166 TruncTheUse
= InsertedTrunc
;
2172 /// Sink the shift *right* instruction into user blocks if the uses could
2173 /// potentially be combined with this shift instruction and generate BitExtract
2174 /// instruction. It will only be applied if the architecture supports BitExtract
2175 /// instruction. Here is an example:
2177 /// %x.extract.shift = lshr i64 %arg1, 32
2179 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16
2183 /// %x.extract.shift.1 = lshr i64 %arg1, 32
2184 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
2186 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
2188 /// Return true if any changes are made.
2189 static bool OptimizeExtractBits(BinaryOperator
*ShiftI
, ConstantInt
*CI
,
2190 const TargetLowering
&TLI
,
2191 const DataLayout
&DL
) {
2192 BasicBlock
*DefBB
= ShiftI
->getParent();
2194 /// Only insert instructions in each block once.
2195 DenseMap
<BasicBlock
*, BinaryOperator
*> InsertedShifts
;
2197 bool shiftIsLegal
= TLI
.isTypeLegal(TLI
.getValueType(DL
, ShiftI
->getType()));
2199 bool MadeChange
= false;
2200 for (Value::user_iterator UI
= ShiftI
->user_begin(), E
= ShiftI
->user_end();
2202 Use
&TheUse
= UI
.getUse();
2203 Instruction
*User
= cast
<Instruction
>(*UI
);
2204 // Preincrement use iterator so we don't invalidate it.
2207 // Don't bother for PHI nodes.
2208 if (isa
<PHINode
>(User
))
2211 if (!isExtractBitsCandidateUse(User
))
2214 BasicBlock
*UserBB
= User
->getParent();
2216 if (UserBB
== DefBB
) {
2217 // If the shift and truncate instruction are in the same BB. The use of
2218 // the truncate(TruncUse) may still introduce another truncate if not
2219 // legal. In this case, we would like to sink both shift and truncate
2220 // instruction to the BB of TruncUse.
2223 // i64 shift.result = lshr i64 opnd, imm
2224 // trunc.result = trunc shift.result to i16
2227 // ----> We will have an implicit truncate here if the architecture does
2228 // not have i16 compare.
2229 // cmp i16 trunc.result, opnd2
2231 if (isa
<TruncInst
>(User
) &&
2233 // If the type of the truncate is legal, no truncate will be
2234 // introduced in other basic blocks.
2235 && (!TLI
.isTypeLegal(TLI
.getValueType(DL
, User
->getType()))))
2237 SinkShiftAndTruncate(ShiftI
, User
, CI
, InsertedShifts
, TLI
, DL
);
2241 // If we have already inserted a shift into this block, use it.
2242 BinaryOperator
*&InsertedShift
= InsertedShifts
[UserBB
];
2244 if (!InsertedShift
) {
2245 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
2246 assert(InsertPt
!= UserBB
->end());
2248 if (ShiftI
->getOpcode() == Instruction::AShr
)
2250 BinaryOperator::CreateAShr(ShiftI
->getOperand(0), CI
, "");
2253 BinaryOperator::CreateLShr(ShiftI
->getOperand(0), CI
, "");
2254 InsertedShift
->insertBefore(*UserBB
, InsertPt
);
2255 InsertedShift
->setDebugLoc(ShiftI
->getDebugLoc());
2260 // Replace a use of the shift with a use of the new shift.
2261 TheUse
= InsertedShift
;
2264 // If we removed all uses, or there are none, nuke the shift.
2265 if (ShiftI
->use_empty()) {
2266 salvageDebugInfo(*ShiftI
);
2267 ShiftI
->eraseFromParent();
2274 /// If counting leading or trailing zeros is an expensive operation and a zero
2275 /// input is defined, add a check for zero to avoid calling the intrinsic.
2277 /// We want to transform:
2278 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2282 /// %cmpz = icmp eq i64 %A, 0
2283 /// br i1 %cmpz, label %cond.end, label %cond.false
2285 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2286 /// br label %cond.end
2288 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2290 /// If the transform is performed, return true and set ModifiedDT to true.
2291 static bool despeculateCountZeros(IntrinsicInst
*CountZeros
,
2293 const TargetLowering
*TLI
,
2294 const DataLayout
*DL
, ModifyDT
&ModifiedDT
,
2295 SmallSet
<BasicBlock
*, 32> &FreshBBs
,
2297 // If a zero input is undefined, it doesn't make sense to despeculate that.
2298 if (match(CountZeros
->getOperand(1), m_One()))
2301 // If it's cheap to speculate, there's nothing to do.
2302 Type
*Ty
= CountZeros
->getType();
2303 auto IntrinsicID
= CountZeros
->getIntrinsicID();
2304 if ((IntrinsicID
== Intrinsic::cttz
&& TLI
->isCheapToSpeculateCttz(Ty
)) ||
2305 (IntrinsicID
== Intrinsic::ctlz
&& TLI
->isCheapToSpeculateCtlz(Ty
)))
2308 // Only handle legal scalar cases. Anything else requires too much work.
2309 unsigned SizeInBits
= Ty
->getScalarSizeInBits();
2310 if (Ty
->isVectorTy() || SizeInBits
> DL
->getLargestLegalIntTypeSizeInBits())
2313 // Bail if the value is never zero.
2314 Use
&Op
= CountZeros
->getOperandUse(0);
2315 if (isKnownNonZero(Op
, *DL
))
2318 // The intrinsic will be sunk behind a compare against zero and branch.
2319 BasicBlock
*StartBlock
= CountZeros
->getParent();
2320 BasicBlock
*CallBlock
= StartBlock
->splitBasicBlock(CountZeros
, "cond.false");
2322 FreshBBs
.insert(CallBlock
);
2324 // Create another block after the count zero intrinsic. A PHI will be added
2325 // in this block to select the result of the intrinsic or the bit-width
2326 // constant if the input to the intrinsic is zero.
2327 BasicBlock::iterator SplitPt
= std::next(BasicBlock::iterator(CountZeros
));
2328 // Any debug-info after CountZeros should not be included.
2329 SplitPt
.setHeadBit(true);
2330 BasicBlock
*EndBlock
= CallBlock
->splitBasicBlock(SplitPt
, "cond.end");
2332 FreshBBs
.insert(EndBlock
);
2334 // Update the LoopInfo. The new blocks are in the same loop as the start
2336 if (Loop
*L
= LI
.getLoopFor(StartBlock
)) {
2337 L
->addBasicBlockToLoop(CallBlock
, LI
);
2338 L
->addBasicBlockToLoop(EndBlock
, LI
);
2341 // Set up a builder to create a compare, conditional branch, and PHI.
2342 IRBuilder
<> Builder(CountZeros
->getContext());
2343 Builder
.SetInsertPoint(StartBlock
->getTerminator());
2344 Builder
.SetCurrentDebugLocation(CountZeros
->getDebugLoc());
2346 // Replace the unconditional branch that was created by the first split with
2347 // a compare against zero and a conditional branch.
2348 Value
*Zero
= Constant::getNullValue(Ty
);
2349 // Avoid introducing branch on poison. This also replaces the ctz operand.
2350 if (!isGuaranteedNotToBeUndefOrPoison(Op
))
2351 Op
= Builder
.CreateFreeze(Op
, Op
->getName() + ".fr");
2352 Value
*Cmp
= Builder
.CreateICmpEQ(Op
, Zero
, "cmpz");
2353 Builder
.CreateCondBr(Cmp
, EndBlock
, CallBlock
);
2354 StartBlock
->getTerminator()->eraseFromParent();
2356 // Create a PHI in the end block to select either the output of the intrinsic
2357 // or the bit width of the operand.
2358 Builder
.SetInsertPoint(EndBlock
, EndBlock
->begin());
2359 PHINode
*PN
= Builder
.CreatePHI(Ty
, 2, "ctz");
2360 replaceAllUsesWith(CountZeros
, PN
, FreshBBs
, IsHugeFunc
);
2361 Value
*BitWidth
= Builder
.getInt(APInt(SizeInBits
, SizeInBits
));
2362 PN
->addIncoming(BitWidth
, StartBlock
);
2363 PN
->addIncoming(CountZeros
, CallBlock
);
2365 // We are explicitly handling the zero case, so we can set the intrinsic's
2366 // undefined zero argument to 'true'. This will also prevent reprocessing the
2367 // intrinsic; we only despeculate when a zero input is defined.
2368 CountZeros
->setArgOperand(1, Builder
.getTrue());
2369 ModifiedDT
= ModifyDT::ModifyBBDT
;
2373 bool CodeGenPrepare::optimizeCallInst(CallInst
*CI
, ModifyDT
&ModifiedDT
) {
2374 BasicBlock
*BB
= CI
->getParent();
2376 // Lower inline assembly if we can.
2377 // If we found an inline asm expession, and if the target knows how to
2378 // lower it to normal LLVM code, do so now.
2379 if (CI
->isInlineAsm()) {
2380 if (TLI
->ExpandInlineAsm(CI
)) {
2381 // Avoid invalidating the iterator.
2382 CurInstIterator
= BB
->begin();
2383 // Avoid processing instructions out of order, which could cause
2384 // reuse before a value is defined.
2388 // Sink address computing for memory operands into the block.
2389 if (optimizeInlineAsmInst(CI
))
2393 // Align the pointer arguments to this call if the target thinks it's a good
2397 if (TLI
->shouldAlignPointerArgs(CI
, MinSize
, PrefAlign
)) {
2398 for (auto &Arg
: CI
->args()) {
2399 // We want to align both objects whose address is used directly and
2400 // objects whose address is used in casts and GEPs, though it only makes
2401 // sense for GEPs if the offset is a multiple of the desired alignment and
2402 // if size - offset meets the size threshold.
2403 if (!Arg
->getType()->isPointerTy())
2405 APInt
Offset(DL
->getIndexSizeInBits(
2406 cast
<PointerType
>(Arg
->getType())->getAddressSpace()),
2408 Value
*Val
= Arg
->stripAndAccumulateInBoundsConstantOffsets(*DL
, Offset
);
2409 uint64_t Offset2
= Offset
.getLimitedValue();
2410 if (!isAligned(PrefAlign
, Offset2
))
2413 if ((AI
= dyn_cast
<AllocaInst
>(Val
)) && AI
->getAlign() < PrefAlign
&&
2414 DL
->getTypeAllocSize(AI
->getAllocatedType()) >= MinSize
+ Offset2
)
2415 AI
->setAlignment(PrefAlign
);
2416 // Global variables can only be aligned if they are defined in this
2417 // object (i.e. they are uniquely initialized in this object), and
2418 // over-aligning global variables that have an explicit section is
2421 if ((GV
= dyn_cast
<GlobalVariable
>(Val
)) && GV
->canIncreaseAlignment() &&
2422 GV
->getPointerAlignment(*DL
) < PrefAlign
&&
2423 DL
->getTypeAllocSize(GV
->getValueType()) >= MinSize
+ Offset2
)
2424 GV
->setAlignment(PrefAlign
);
2427 // If this is a memcpy (or similar) then we may be able to improve the
2429 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(CI
)) {
2430 Align DestAlign
= getKnownAlignment(MI
->getDest(), *DL
);
2431 MaybeAlign MIDestAlign
= MI
->getDestAlign();
2432 if (!MIDestAlign
|| DestAlign
> *MIDestAlign
)
2433 MI
->setDestAlignment(DestAlign
);
2434 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(MI
)) {
2435 MaybeAlign MTISrcAlign
= MTI
->getSourceAlign();
2436 Align SrcAlign
= getKnownAlignment(MTI
->getSource(), *DL
);
2437 if (!MTISrcAlign
|| SrcAlign
> *MTISrcAlign
)
2438 MTI
->setSourceAlignment(SrcAlign
);
2442 // If we have a cold call site, try to sink addressing computation into the
2443 // cold block. This interacts with our handling for loads and stores to
2444 // ensure that we can fold all uses of a potential addressing computation
2445 // into their uses. TODO: generalize this to work over profiling data
2446 if (CI
->hasFnAttr(Attribute::Cold
) && !OptSize
&&
2447 !llvm::shouldOptimizeForSize(BB
, PSI
, BFI
.get()))
2448 for (auto &Arg
: CI
->args()) {
2449 if (!Arg
->getType()->isPointerTy())
2451 unsigned AS
= Arg
->getType()->getPointerAddressSpace();
2452 if (optimizeMemoryInst(CI
, Arg
, Arg
->getType(), AS
))
2456 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(CI
);
2458 switch (II
->getIntrinsicID()) {
2461 case Intrinsic::assume
:
2462 llvm_unreachable("llvm.assume should have been removed already");
2463 case Intrinsic::allow_runtime_check
:
2464 case Intrinsic::allow_ubsan_check
:
2465 case Intrinsic::experimental_widenable_condition
: {
2466 // Give up on future widening opportunities so that we can fold away dead
2467 // paths and merge blocks before going into block-local instruction
2469 if (II
->use_empty()) {
2470 II
->eraseFromParent();
2473 Constant
*RetVal
= ConstantInt::getTrue(II
->getContext());
2474 resetIteratorIfInvalidatedWhileCalling(BB
, [&]() {
2475 replaceAndRecursivelySimplify(CI
, RetVal
, TLInfo
, nullptr);
2479 case Intrinsic::objectsize
:
2480 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2481 case Intrinsic::is_constant
:
2482 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2483 case Intrinsic::aarch64_stlxr
:
2484 case Intrinsic::aarch64_stxr
: {
2485 ZExtInst
*ExtVal
= dyn_cast
<ZExtInst
>(CI
->getArgOperand(0));
2486 if (!ExtVal
|| !ExtVal
->hasOneUse() ||
2487 ExtVal
->getParent() == CI
->getParent())
2489 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2490 ExtVal
->moveBefore(CI
);
2491 // Mark this instruction as "inserted by CGP", so that other
2492 // optimizations don't touch it.
2493 InsertedInsts
.insert(ExtVal
);
2497 case Intrinsic::launder_invariant_group
:
2498 case Intrinsic::strip_invariant_group
: {
2499 Value
*ArgVal
= II
->getArgOperand(0);
2500 auto it
= LargeOffsetGEPMap
.find(II
);
2501 if (it
!= LargeOffsetGEPMap
.end()) {
2502 // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2503 // Make sure not to have to deal with iterator invalidation
2504 // after possibly adding ArgVal to LargeOffsetGEPMap.
2505 auto GEPs
= std::move(it
->second
);
2506 LargeOffsetGEPMap
[ArgVal
].append(GEPs
.begin(), GEPs
.end());
2507 LargeOffsetGEPMap
.erase(II
);
2510 replaceAllUsesWith(II
, ArgVal
, FreshBBs
, IsHugeFunc
);
2511 II
->eraseFromParent();
2514 case Intrinsic::cttz
:
2515 case Intrinsic::ctlz
:
2516 // If counting zeros is expensive, try to avoid it.
2517 return despeculateCountZeros(II
, *LI
, TLI
, DL
, ModifiedDT
, FreshBBs
,
2519 case Intrinsic::fshl
:
2520 case Intrinsic::fshr
:
2521 return optimizeFunnelShift(II
);
2522 case Intrinsic::dbg_assign
:
2523 case Intrinsic::dbg_value
:
2524 return fixupDbgValue(II
);
2525 case Intrinsic::masked_gather
:
2526 return optimizeGatherScatterInst(II
, II
->getArgOperand(0));
2527 case Intrinsic::masked_scatter
:
2528 return optimizeGatherScatterInst(II
, II
->getArgOperand(1));
2531 SmallVector
<Value
*, 2> PtrOps
;
2533 if (TLI
->getAddrModeArguments(II
, PtrOps
, AccessTy
))
2534 while (!PtrOps
.empty()) {
2535 Value
*PtrVal
= PtrOps
.pop_back_val();
2536 unsigned AS
= PtrVal
->getType()->getPointerAddressSpace();
2537 if (optimizeMemoryInst(II
, PtrVal
, AccessTy
, AS
))
2542 // From here on out we're working with named functions.
2543 if (!CI
->getCalledFunction())
2546 // Lower all default uses of _chk calls. This is very similar
2547 // to what InstCombineCalls does, but here we are only lowering calls
2548 // to fortified library functions (e.g. __memcpy_chk) that have the default
2549 // "don't know" as the objectsize. Anything else should be left alone.
2550 FortifiedLibCallSimplifier
Simplifier(TLInfo
, true);
2551 IRBuilder
<> Builder(CI
);
2552 if (Value
*V
= Simplifier
.optimizeCall(CI
, Builder
)) {
2553 replaceAllUsesWith(CI
, V
, FreshBBs
, IsHugeFunc
);
2554 CI
->eraseFromParent();
2561 static bool isIntrinsicOrLFToBeTailCalled(const TargetLibraryInfo
*TLInfo
,
2562 const CallInst
*CI
) {
2563 assert(CI
&& CI
->use_empty());
2565 if (const auto *II
= dyn_cast
<IntrinsicInst
>(CI
))
2566 switch (II
->getIntrinsicID()) {
2567 case Intrinsic::memset
:
2568 case Intrinsic::memcpy
:
2569 case Intrinsic::memmove
:
2576 Function
*Callee
= CI
->getCalledFunction();
2577 if (Callee
&& TLInfo
&& TLInfo
->getLibFunc(*Callee
, LF
))
2579 case LibFunc_strcpy
:
2580 case LibFunc_strncpy
:
2581 case LibFunc_strcat
:
2582 case LibFunc_strncat
:
2591 /// Look for opportunities to duplicate return instructions to the predecessor
2592 /// to enable tail call optimizations. The case it is currently looking for is
2593 /// the following one. Known intrinsics or library function that may be tail
2594 /// called are taken into account as well.
2597 /// %tmp0 = tail call i32 @f0()
2598 /// br label %return
2600 /// %tmp1 = tail call i32 @f1()
2601 /// br label %return
2603 /// %tmp2 = tail call i32 @f2()
2604 /// br label %return
2606 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2614 /// %tmp0 = tail call i32 @f0()
2617 /// %tmp1 = tail call i32 @f1()
2620 /// %tmp2 = tail call i32 @f2()
2623 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock
*BB
,
2624 ModifyDT
&ModifiedDT
) {
2625 if (!BB
->getTerminator())
2628 ReturnInst
*RetI
= dyn_cast
<ReturnInst
>(BB
->getTerminator());
2632 assert(LI
->getLoopFor(BB
) == nullptr && "A return block cannot be in a loop");
2634 PHINode
*PN
= nullptr;
2635 ExtractValueInst
*EVI
= nullptr;
2636 BitCastInst
*BCI
= nullptr;
2637 Value
*V
= RetI
->getReturnValue();
2639 BCI
= dyn_cast
<BitCastInst
>(V
);
2641 V
= BCI
->getOperand(0);
2643 EVI
= dyn_cast
<ExtractValueInst
>(V
);
2645 V
= EVI
->getOperand(0);
2646 if (!llvm::all_of(EVI
->indices(), [](unsigned idx
) { return idx
== 0; }))
2650 PN
= dyn_cast
<PHINode
>(V
);
2653 if (PN
&& PN
->getParent() != BB
)
2656 auto isLifetimeEndOrBitCastFor
= [](const Instruction
*Inst
) {
2657 const BitCastInst
*BC
= dyn_cast
<BitCastInst
>(Inst
);
2658 if (BC
&& BC
->hasOneUse())
2659 Inst
= BC
->user_back();
2661 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
))
2662 return II
->getIntrinsicID() == Intrinsic::lifetime_end
;
2666 // Make sure there are no instructions between the first instruction
2668 const Instruction
*BI
= BB
->getFirstNonPHI();
2669 // Skip over debug and the bitcast.
2670 while (isa
<DbgInfoIntrinsic
>(BI
) || BI
== BCI
|| BI
== EVI
||
2671 isa
<PseudoProbeInst
>(BI
) || isLifetimeEndOrBitCastFor(BI
))
2672 BI
= BI
->getNextNode();
2676 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2678 const Function
*F
= BB
->getParent();
2679 SmallVector
<BasicBlock
*, 4> TailCallBBs
;
2681 for (unsigned I
= 0, E
= PN
->getNumIncomingValues(); I
!= E
; ++I
) {
2682 // Look through bitcasts.
2683 Value
*IncomingVal
= PN
->getIncomingValue(I
)->stripPointerCasts();
2684 CallInst
*CI
= dyn_cast
<CallInst
>(IncomingVal
);
2685 BasicBlock
*PredBB
= PN
->getIncomingBlock(I
);
2686 // Make sure the phi value is indeed produced by the tail call.
2687 if (CI
&& CI
->hasOneUse() && CI
->getParent() == PredBB
&&
2688 TLI
->mayBeEmittedAsTailCall(CI
) &&
2689 attributesPermitTailCall(F
, CI
, RetI
, *TLI
)) {
2690 TailCallBBs
.push_back(PredBB
);
2692 // Consider the cases in which the phi value is indirectly produced by
2693 // the tail call, for example when encountering memset(), memmove(),
2694 // strcpy(), whose return value may have been optimized out. In such
2695 // cases, the value needs to be the first function argument.
2698 // tail call void @llvm.memset.p0.i64(ptr %0, i8 0, i64 %1)
2701 // %phi = phi ptr [ %0, %bb0 ], [ %2, %entry ]
2702 if (PredBB
&& PredBB
->getSingleSuccessor() == BB
)
2703 CI
= dyn_cast_or_null
<CallInst
>(
2704 PredBB
->getTerminator()->getPrevNonDebugInstruction(true));
2706 if (CI
&& CI
->use_empty() &&
2707 isIntrinsicOrLFToBeTailCalled(TLInfo
, CI
) &&
2708 IncomingVal
== CI
->getArgOperand(0) &&
2709 TLI
->mayBeEmittedAsTailCall(CI
) &&
2710 attributesPermitTailCall(F
, CI
, RetI
, *TLI
))
2711 TailCallBBs
.push_back(PredBB
);
2715 SmallPtrSet
<BasicBlock
*, 4> VisitedBBs
;
2716 for (BasicBlock
*Pred
: predecessors(BB
)) {
2717 if (!VisitedBBs
.insert(Pred
).second
)
2719 if (Instruction
*I
= Pred
->rbegin()->getPrevNonDebugInstruction(true)) {
2720 CallInst
*CI
= dyn_cast
<CallInst
>(I
);
2721 if (CI
&& CI
->use_empty() && TLI
->mayBeEmittedAsTailCall(CI
) &&
2722 attributesPermitTailCall(F
, CI
, RetI
, *TLI
)) {
2723 // Either we return void or the return value must be the first
2724 // argument of a known intrinsic or library function.
2725 if (!V
|| isa
<UndefValue
>(V
) ||
2726 (isIntrinsicOrLFToBeTailCalled(TLInfo
, CI
) &&
2727 V
== CI
->getArgOperand(0))) {
2728 TailCallBBs
.push_back(Pred
);
2735 bool Changed
= false;
2736 for (auto const &TailCallBB
: TailCallBBs
) {
2737 // Make sure the call instruction is followed by an unconditional branch to
2738 // the return block.
2739 BranchInst
*BI
= dyn_cast
<BranchInst
>(TailCallBB
->getTerminator());
2740 if (!BI
|| !BI
->isUnconditional() || BI
->getSuccessor(0) != BB
)
2743 // Duplicate the return into TailCallBB.
2744 (void)FoldReturnIntoUncondBranch(RetI
, BB
, TailCallBB
);
2745 assert(!VerifyBFIUpdates
||
2746 BFI
->getBlockFreq(BB
) >= BFI
->getBlockFreq(TailCallBB
));
2747 BFI
->setBlockFreq(BB
,
2748 (BFI
->getBlockFreq(BB
) - BFI
->getBlockFreq(TailCallBB
)));
2749 ModifiedDT
= ModifyDT::ModifyBBDT
;
2754 // If we eliminated all predecessors of the block, delete the block now.
2755 if (Changed
&& !BB
->hasAddressTaken() && pred_empty(BB
))
2756 BB
->eraseFromParent();
2761 //===----------------------------------------------------------------------===//
2762 // Memory Optimization
2763 //===----------------------------------------------------------------------===//
2767 /// This is an extended version of TargetLowering::AddrMode
2768 /// which holds actual Value*'s for register values.
2769 struct ExtAddrMode
: public TargetLowering::AddrMode
{
2770 Value
*BaseReg
= nullptr;
2771 Value
*ScaledReg
= nullptr;
2772 Value
*OriginalValue
= nullptr;
2773 bool InBounds
= true;
2777 BaseRegField
= 0x01,
2779 BaseOffsField
= 0x04,
2780 ScaledRegField
= 0x08,
2782 MultipleFields
= 0xff
2785 ExtAddrMode() = default;
2787 void print(raw_ostream
&OS
) const;
2790 FieldName
compare(const ExtAddrMode
&other
) {
2791 // First check that the types are the same on each field, as differing types
2792 // is something we can't cope with later on.
2793 if (BaseReg
&& other
.BaseReg
&&
2794 BaseReg
->getType() != other
.BaseReg
->getType())
2795 return MultipleFields
;
2796 if (BaseGV
&& other
.BaseGV
&& BaseGV
->getType() != other
.BaseGV
->getType())
2797 return MultipleFields
;
2798 if (ScaledReg
&& other
.ScaledReg
&&
2799 ScaledReg
->getType() != other
.ScaledReg
->getType())
2800 return MultipleFields
;
2802 // Conservatively reject 'inbounds' mismatches.
2803 if (InBounds
!= other
.InBounds
)
2804 return MultipleFields
;
2806 // Check each field to see if it differs.
2807 unsigned Result
= NoField
;
2808 if (BaseReg
!= other
.BaseReg
)
2809 Result
|= BaseRegField
;
2810 if (BaseGV
!= other
.BaseGV
)
2811 Result
|= BaseGVField
;
2812 if (BaseOffs
!= other
.BaseOffs
)
2813 Result
|= BaseOffsField
;
2814 if (ScaledReg
!= other
.ScaledReg
)
2815 Result
|= ScaledRegField
;
2816 // Don't count 0 as being a different scale, because that actually means
2817 // unscaled (which will already be counted by having no ScaledReg).
2818 if (Scale
&& other
.Scale
&& Scale
!= other
.Scale
)
2819 Result
|= ScaleField
;
2821 if (llvm::popcount(Result
) > 1)
2822 return MultipleFields
;
2824 return static_cast<FieldName
>(Result
);
2827 // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2830 // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2831 // trivial if at most one of these terms is nonzero, except that BaseGV and
2832 // BaseReg both being zero actually means a null pointer value, which we
2833 // consider to be 'non-zero' here.
2834 return !BaseOffs
&& !Scale
&& !(BaseGV
&& BaseReg
);
2837 Value
*GetFieldAsValue(FieldName Field
, Type
*IntPtrTy
) {
2845 case ScaledRegField
:
2848 return ConstantInt::get(IntPtrTy
, BaseOffs
);
2852 void SetCombinedField(FieldName Field
, Value
*V
,
2853 const SmallVectorImpl
<ExtAddrMode
> &AddrModes
) {
2856 llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2858 case ExtAddrMode::BaseRegField
:
2861 case ExtAddrMode::BaseGVField
:
2862 // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2863 // in the BaseReg field.
2864 assert(BaseReg
== nullptr);
2868 case ExtAddrMode::ScaledRegField
:
2870 // If we have a mix of scaled and unscaled addrmodes then we want scale
2871 // to be the scale and not zero.
2873 for (const ExtAddrMode
&AM
: AddrModes
)
2879 case ExtAddrMode::BaseOffsField
:
2880 // The offset is no longer a constant, so it goes in ScaledReg with a
2882 assert(ScaledReg
== nullptr);
2892 static inline raw_ostream
&operator<<(raw_ostream
&OS
, const ExtAddrMode
&AM
) {
2898 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2899 void ExtAddrMode::print(raw_ostream
&OS
) const {
2900 bool NeedPlus
= false;
2906 BaseGV
->printAsOperand(OS
, /*PrintType=*/false);
2911 OS
<< (NeedPlus
? " + " : "") << BaseOffs
;
2916 OS
<< (NeedPlus
? " + " : "") << "Base:";
2917 BaseReg
->printAsOperand(OS
, /*PrintType=*/false);
2921 OS
<< (NeedPlus
? " + " : "") << Scale
<< "*";
2922 ScaledReg
->printAsOperand(OS
, /*PrintType=*/false);
2928 LLVM_DUMP_METHOD
void ExtAddrMode::dump() const {
2934 } // end anonymous namespace
2938 /// This class provides transaction based operation on the IR.
2939 /// Every change made through this class is recorded in the internal state and
2940 /// can be undone (rollback) until commit is called.
2941 /// CGP does not check if instructions could be speculatively executed when
2942 /// moved. Preserving the original location would pessimize the debugging
2943 /// experience, as well as negatively impact the quality of sample PGO.
2944 class TypePromotionTransaction
{
2945 /// This represents the common interface of the individual transaction.
2946 /// Each class implements the logic for doing one specific modification on
2947 /// the IR via the TypePromotionTransaction.
2948 class TypePromotionAction
{
2950 /// The Instruction modified.
2954 /// Constructor of the action.
2955 /// The constructor performs the related action on the IR.
2956 TypePromotionAction(Instruction
*Inst
) : Inst(Inst
) {}
2958 virtual ~TypePromotionAction() = default;
2960 /// Undo the modification done by this action.
2961 /// When this method is called, the IR must be in the same state as it was
2962 /// before this action was applied.
2963 /// \pre Undoing the action works if and only if the IR is in the exact same
2964 /// state as it was directly after this action was applied.
2965 virtual void undo() = 0;
2967 /// Advocate every change made by this action.
2968 /// When the results on the IR of the action are to be kept, it is important
2969 /// to call this function, otherwise hidden information may be kept forever.
2970 virtual void commit() {
2971 // Nothing to be done, this action is not doing anything.
2975 /// Utility to remember the position of an instruction.
2976 class InsertionHandler
{
2977 /// Position of an instruction.
2978 /// Either an instruction:
2979 /// - Is the first in a basic block: BB is used.
2980 /// - Has a previous instruction: PrevInst is used.
2982 Instruction
*PrevInst
;
2985 std::optional
<DbgRecord::self_iterator
> BeforeDbgRecord
= std::nullopt
;
2987 /// Remember whether or not the instruction had a previous instruction.
2988 bool HasPrevInstruction
;
2991 /// Record the position of \p Inst.
2992 InsertionHandler(Instruction
*Inst
) {
2993 HasPrevInstruction
= (Inst
!= &*(Inst
->getParent()->begin()));
2994 BasicBlock
*BB
= Inst
->getParent();
2996 // Record where we would have to re-insert the instruction in the sequence
2997 // of DbgRecords, if we ended up reinserting.
2998 if (BB
->IsNewDbgInfoFormat
)
2999 BeforeDbgRecord
= Inst
->getDbgReinsertionPosition();
3001 if (HasPrevInstruction
) {
3002 Point
.PrevInst
= &*std::prev(Inst
->getIterator());
3008 /// Insert \p Inst at the recorded position.
3009 void insert(Instruction
*Inst
) {
3010 if (HasPrevInstruction
) {
3011 if (Inst
->getParent())
3012 Inst
->removeFromParent();
3013 Inst
->insertAfter(&*Point
.PrevInst
);
3015 BasicBlock::iterator Position
= Point
.BB
->getFirstInsertionPt();
3016 if (Inst
->getParent())
3017 Inst
->moveBefore(*Point
.BB
, Position
);
3019 Inst
->insertBefore(*Point
.BB
, Position
);
3022 Inst
->getParent()->reinsertInstInDbgRecords(Inst
, BeforeDbgRecord
);
3026 /// Move an instruction before another.
3027 class InstructionMoveBefore
: public TypePromotionAction
{
3028 /// Original position of the instruction.
3029 InsertionHandler Position
;
3032 /// Move \p Inst before \p Before.
3033 InstructionMoveBefore(Instruction
*Inst
, Instruction
*Before
)
3034 : TypePromotionAction(Inst
), Position(Inst
) {
3035 LLVM_DEBUG(dbgs() << "Do: move: " << *Inst
<< "\nbefore: " << *Before
3037 Inst
->moveBefore(Before
);
3040 /// Move the instruction back to its original position.
3041 void undo() override
{
3042 LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst
<< "\n");
3043 Position
.insert(Inst
);
3047 /// Set the operand of an instruction with a new value.
3048 class OperandSetter
: public TypePromotionAction
{
3049 /// Original operand of the instruction.
3052 /// Index of the modified instruction.
3056 /// Set \p Idx operand of \p Inst with \p NewVal.
3057 OperandSetter(Instruction
*Inst
, unsigned Idx
, Value
*NewVal
)
3058 : TypePromotionAction(Inst
), Idx(Idx
) {
3059 LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx
<< "\n"
3060 << "for:" << *Inst
<< "\n"
3061 << "with:" << *NewVal
<< "\n");
3062 Origin
= Inst
->getOperand(Idx
);
3063 Inst
->setOperand(Idx
, NewVal
);
3066 /// Restore the original value of the instruction.
3067 void undo() override
{
3068 LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx
<< "\n"
3069 << "for: " << *Inst
<< "\n"
3070 << "with: " << *Origin
<< "\n");
3071 Inst
->setOperand(Idx
, Origin
);
3075 /// Hide the operands of an instruction.
3076 /// Do as if this instruction was not using any of its operands.
3077 class OperandsHider
: public TypePromotionAction
{
3078 /// The list of original operands.
3079 SmallVector
<Value
*, 4> OriginalValues
;
3082 /// Remove \p Inst from the uses of the operands of \p Inst.
3083 OperandsHider(Instruction
*Inst
) : TypePromotionAction(Inst
) {
3084 LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst
<< "\n");
3085 unsigned NumOpnds
= Inst
->getNumOperands();
3086 OriginalValues
.reserve(NumOpnds
);
3087 for (unsigned It
= 0; It
< NumOpnds
; ++It
) {
3088 // Save the current operand.
3089 Value
*Val
= Inst
->getOperand(It
);
3090 OriginalValues
.push_back(Val
);
3092 // We could use OperandSetter here, but that would imply an overhead
3093 // that we are not willing to pay.
3094 Inst
->setOperand(It
, UndefValue::get(Val
->getType()));
3098 /// Restore the original list of uses.
3099 void undo() override
{
3100 LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst
<< "\n");
3101 for (unsigned It
= 0, EndIt
= OriginalValues
.size(); It
!= EndIt
; ++It
)
3102 Inst
->setOperand(It
, OriginalValues
[It
]);
3106 /// Build a truncate instruction.
3107 class TruncBuilder
: public TypePromotionAction
{
3111 /// Build a truncate instruction of \p Opnd producing a \p Ty
3113 /// trunc Opnd to Ty.
3114 TruncBuilder(Instruction
*Opnd
, Type
*Ty
) : TypePromotionAction(Opnd
) {
3115 IRBuilder
<> Builder(Opnd
);
3116 Builder
.SetCurrentDebugLocation(DebugLoc());
3117 Val
= Builder
.CreateTrunc(Opnd
, Ty
, "promoted");
3118 LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val
<< "\n");
3121 /// Get the built value.
3122 Value
*getBuiltValue() { return Val
; }
3124 /// Remove the built instruction.
3125 void undo() override
{
3126 LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val
<< "\n");
3127 if (Instruction
*IVal
= dyn_cast
<Instruction
>(Val
))
3128 IVal
->eraseFromParent();
3132 /// Build a sign extension instruction.
3133 class SExtBuilder
: public TypePromotionAction
{
3137 /// Build a sign extension instruction of \p Opnd producing a \p Ty
3139 /// sext Opnd to Ty.
3140 SExtBuilder(Instruction
*InsertPt
, Value
*Opnd
, Type
*Ty
)
3141 : TypePromotionAction(InsertPt
) {
3142 IRBuilder
<> Builder(InsertPt
);
3143 Val
= Builder
.CreateSExt(Opnd
, Ty
, "promoted");
3144 LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val
<< "\n");
3147 /// Get the built value.
3148 Value
*getBuiltValue() { return Val
; }
3150 /// Remove the built instruction.
3151 void undo() override
{
3152 LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val
<< "\n");
3153 if (Instruction
*IVal
= dyn_cast
<Instruction
>(Val
))
3154 IVal
->eraseFromParent();
3158 /// Build a zero extension instruction.
3159 class ZExtBuilder
: public TypePromotionAction
{
3163 /// Build a zero extension instruction of \p Opnd producing a \p Ty
3165 /// zext Opnd to Ty.
3166 ZExtBuilder(Instruction
*InsertPt
, Value
*Opnd
, Type
*Ty
)
3167 : TypePromotionAction(InsertPt
) {
3168 IRBuilder
<> Builder(InsertPt
);
3169 Builder
.SetCurrentDebugLocation(DebugLoc());
3170 Val
= Builder
.CreateZExt(Opnd
, Ty
, "promoted");
3171 LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val
<< "\n");
3174 /// Get the built value.
3175 Value
*getBuiltValue() { return Val
; }
3177 /// Remove the built instruction.
3178 void undo() override
{
3179 LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val
<< "\n");
3180 if (Instruction
*IVal
= dyn_cast
<Instruction
>(Val
))
3181 IVal
->eraseFromParent();
3185 /// Mutate an instruction to another type.
3186 class TypeMutator
: public TypePromotionAction
{
3187 /// Record the original type.
3191 /// Mutate the type of \p Inst into \p NewTy.
3192 TypeMutator(Instruction
*Inst
, Type
*NewTy
)
3193 : TypePromotionAction(Inst
), OrigTy(Inst
->getType()) {
3194 LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst
<< " with " << *NewTy
3196 Inst
->mutateType(NewTy
);
3199 /// Mutate the instruction back to its original type.
3200 void undo() override
{
3201 LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst
<< " with " << *OrigTy
3203 Inst
->mutateType(OrigTy
);
3207 /// Replace the uses of an instruction by another instruction.
3208 class UsesReplacer
: public TypePromotionAction
{
3209 /// Helper structure to keep track of the replaced uses.
3210 struct InstructionAndIdx
{
3211 /// The instruction using the instruction.
3214 /// The index where this instruction is used for Inst.
3217 InstructionAndIdx(Instruction
*Inst
, unsigned Idx
)
3218 : Inst(Inst
), Idx(Idx
) {}
3221 /// Keep track of the original uses (pair Instruction, Index).
3222 SmallVector
<InstructionAndIdx
, 4> OriginalUses
;
3223 /// Keep track of the debug users.
3224 SmallVector
<DbgValueInst
*, 1> DbgValues
;
3225 /// And non-instruction debug-users too.
3226 SmallVector
<DbgVariableRecord
*, 1> DbgVariableRecords
;
3228 /// Keep track of the new value so that we can undo it by replacing
3229 /// instances of the new value with the original value.
3232 using use_iterator
= SmallVectorImpl
<InstructionAndIdx
>::iterator
;
3235 /// Replace all the use of \p Inst by \p New.
3236 UsesReplacer(Instruction
*Inst
, Value
*New
)
3237 : TypePromotionAction(Inst
), New(New
) {
3238 LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst
<< " with " << *New
3240 // Record the original uses.
3241 for (Use
&U
: Inst
->uses()) {
3242 Instruction
*UserI
= cast
<Instruction
>(U
.getUser());
3243 OriginalUses
.push_back(InstructionAndIdx(UserI
, U
.getOperandNo()));
3245 // Record the debug uses separately. They are not in the instruction's
3246 // use list, but they are replaced by RAUW.
3247 findDbgValues(DbgValues
, Inst
, &DbgVariableRecords
);
3249 // Now, we can replace the uses.
3250 Inst
->replaceAllUsesWith(New
);
3253 /// Reassign the original uses of Inst to Inst.
3254 void undo() override
{
3255 LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst
<< "\n");
3256 for (InstructionAndIdx
&Use
: OriginalUses
)
3257 Use
.Inst
->setOperand(Use
.Idx
, Inst
);
3258 // RAUW has replaced all original uses with references to the new value,
3259 // including the debug uses. Since we are undoing the replacements,
3260 // the original debug uses must also be reinstated to maintain the
3261 // correctness and utility of debug value instructions.
3262 for (auto *DVI
: DbgValues
)
3263 DVI
->replaceVariableLocationOp(New
, Inst
);
3264 // Similar story with DbgVariableRecords, the non-instruction
3265 // representation of dbg.values.
3266 for (DbgVariableRecord
*DVR
: DbgVariableRecords
)
3267 DVR
->replaceVariableLocationOp(New
, Inst
);
3271 /// Remove an instruction from the IR.
3272 class InstructionRemover
: public TypePromotionAction
{
3273 /// Original position of the instruction.
3274 InsertionHandler Inserter
;
3276 /// Helper structure to hide all the link to the instruction. In other
3277 /// words, this helps to do as if the instruction was removed.
3278 OperandsHider Hider
;
3280 /// Keep track of the uses replaced, if any.
3281 UsesReplacer
*Replacer
= nullptr;
3283 /// Keep track of instructions removed.
3284 SetOfInstrs
&RemovedInsts
;
3287 /// Remove all reference of \p Inst and optionally replace all its
3289 /// \p RemovedInsts Keep track of the instructions removed by this Action.
3290 /// \pre If !Inst->use_empty(), then New != nullptr
3291 InstructionRemover(Instruction
*Inst
, SetOfInstrs
&RemovedInsts
,
3292 Value
*New
= nullptr)
3293 : TypePromotionAction(Inst
), Inserter(Inst
), Hider(Inst
),
3294 RemovedInsts(RemovedInsts
) {
3296 Replacer
= new UsesReplacer(Inst
, New
);
3297 LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst
<< "\n");
3298 RemovedInsts
.insert(Inst
);
3299 /// The instructions removed here will be freed after completing
3300 /// optimizeBlock() for all blocks as we need to keep track of the
3301 /// removed instructions during promotion.
3302 Inst
->removeFromParent();
3305 ~InstructionRemover() override
{ delete Replacer
; }
3307 InstructionRemover
&operator=(const InstructionRemover
&other
) = delete;
3308 InstructionRemover(const InstructionRemover
&other
) = delete;
3310 /// Resurrect the instruction and reassign it to the proper uses if
3311 /// new value was provided when build this action.
3312 void undo() override
{
3313 LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst
<< "\n");
3314 Inserter
.insert(Inst
);
3318 RemovedInsts
.erase(Inst
);
3323 /// Restoration point.
3324 /// The restoration point is a pointer to an action instead of an iterator
3325 /// because the iterator may be invalidated but not the pointer.
3326 using ConstRestorationPt
= const TypePromotionAction
*;
3328 TypePromotionTransaction(SetOfInstrs
&RemovedInsts
)
3329 : RemovedInsts(RemovedInsts
) {}
3331 /// Advocate every changes made in that transaction. Return true if any change
3335 /// Undo all the changes made after the given point.
3336 void rollback(ConstRestorationPt Point
);
3338 /// Get the current restoration point.
3339 ConstRestorationPt
getRestorationPoint() const;
3341 /// \name API for IR modification with state keeping to support rollback.
3343 /// Same as Instruction::setOperand.
3344 void setOperand(Instruction
*Inst
, unsigned Idx
, Value
*NewVal
);
3346 /// Same as Instruction::eraseFromParent.
3347 void eraseInstruction(Instruction
*Inst
, Value
*NewVal
= nullptr);
3349 /// Same as Value::replaceAllUsesWith.
3350 void replaceAllUsesWith(Instruction
*Inst
, Value
*New
);
3352 /// Same as Value::mutateType.
3353 void mutateType(Instruction
*Inst
, Type
*NewTy
);
3355 /// Same as IRBuilder::createTrunc.
3356 Value
*createTrunc(Instruction
*Opnd
, Type
*Ty
);
3358 /// Same as IRBuilder::createSExt.
3359 Value
*createSExt(Instruction
*Inst
, Value
*Opnd
, Type
*Ty
);
3361 /// Same as IRBuilder::createZExt.
3362 Value
*createZExt(Instruction
*Inst
, Value
*Opnd
, Type
*Ty
);
3365 /// The ordered list of actions made so far.
3366 SmallVector
<std::unique_ptr
<TypePromotionAction
>, 16> Actions
;
3369 SmallVectorImpl
<std::unique_ptr
<TypePromotionAction
>>::iterator
;
3371 SetOfInstrs
&RemovedInsts
;
3374 } // end anonymous namespace
3376 void TypePromotionTransaction::setOperand(Instruction
*Inst
, unsigned Idx
,
3378 Actions
.push_back(std::make_unique
<TypePromotionTransaction::OperandSetter
>(
3379 Inst
, Idx
, NewVal
));
3382 void TypePromotionTransaction::eraseInstruction(Instruction
*Inst
,
3385 std::make_unique
<TypePromotionTransaction::InstructionRemover
>(
3386 Inst
, RemovedInsts
, NewVal
));
3389 void TypePromotionTransaction::replaceAllUsesWith(Instruction
*Inst
,
3392 std::make_unique
<TypePromotionTransaction::UsesReplacer
>(Inst
, New
));
3395 void TypePromotionTransaction::mutateType(Instruction
*Inst
, Type
*NewTy
) {
3397 std::make_unique
<TypePromotionTransaction::TypeMutator
>(Inst
, NewTy
));
3400 Value
*TypePromotionTransaction::createTrunc(Instruction
*Opnd
, Type
*Ty
) {
3401 std::unique_ptr
<TruncBuilder
> Ptr(new TruncBuilder(Opnd
, Ty
));
3402 Value
*Val
= Ptr
->getBuiltValue();
3403 Actions
.push_back(std::move(Ptr
));
3407 Value
*TypePromotionTransaction::createSExt(Instruction
*Inst
, Value
*Opnd
,
3409 std::unique_ptr
<SExtBuilder
> Ptr(new SExtBuilder(Inst
, Opnd
, Ty
));
3410 Value
*Val
= Ptr
->getBuiltValue();
3411 Actions
.push_back(std::move(Ptr
));
3415 Value
*TypePromotionTransaction::createZExt(Instruction
*Inst
, Value
*Opnd
,
3417 std::unique_ptr
<ZExtBuilder
> Ptr(new ZExtBuilder(Inst
, Opnd
, Ty
));
3418 Value
*Val
= Ptr
->getBuiltValue();
3419 Actions
.push_back(std::move(Ptr
));
3423 TypePromotionTransaction::ConstRestorationPt
3424 TypePromotionTransaction::getRestorationPoint() const {
3425 return !Actions
.empty() ? Actions
.back().get() : nullptr;
3428 bool TypePromotionTransaction::commit() {
3429 for (std::unique_ptr
<TypePromotionAction
> &Action
: Actions
)
3431 bool Modified
= !Actions
.empty();
3436 void TypePromotionTransaction::rollback(
3437 TypePromotionTransaction::ConstRestorationPt Point
) {
3438 while (!Actions
.empty() && Point
!= Actions
.back().get()) {
3439 std::unique_ptr
<TypePromotionAction
> Curr
= Actions
.pop_back_val();
3446 /// A helper class for matching addressing modes.
3448 /// This encapsulates the logic for matching the target-legal addressing modes.
3449 class AddressingModeMatcher
{
3450 SmallVectorImpl
<Instruction
*> &AddrModeInsts
;
3451 const TargetLowering
&TLI
;
3452 const TargetRegisterInfo
&TRI
;
3453 const DataLayout
&DL
;
3455 const std::function
<const DominatorTree
&()> getDTFn
;
3457 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3458 /// the memory instruction that we're computing this address for.
3461 Instruction
*MemoryInst
;
3463 /// This is the addressing mode that we're building up. This is
3464 /// part of the return value of this addressing mode matching stuff.
3465 ExtAddrMode
&AddrMode
;
3467 /// The instructions inserted by other CodeGenPrepare optimizations.
3468 const SetOfInstrs
&InsertedInsts
;
3470 /// A map from the instructions to their type before promotion.
3471 InstrToOrigTy
&PromotedInsts
;
3473 /// The ongoing transaction where every action should be registered.
3474 TypePromotionTransaction
&TPT
;
3476 // A GEP which has too large offset to be folded into the addressing mode.
3477 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> &LargeOffsetGEP
;
3479 /// This is set to true when we should not do profitability checks.
3480 /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3481 bool IgnoreProfitability
;
3483 /// True if we are optimizing for size.
3484 bool OptSize
= false;
3486 ProfileSummaryInfo
*PSI
;
3487 BlockFrequencyInfo
*BFI
;
3489 AddressingModeMatcher(
3490 SmallVectorImpl
<Instruction
*> &AMI
, const TargetLowering
&TLI
,
3491 const TargetRegisterInfo
&TRI
, const LoopInfo
&LI
,
3492 const std::function
<const DominatorTree
&()> getDTFn
, Type
*AT
,
3493 unsigned AS
, Instruction
*MI
, ExtAddrMode
&AM
,
3494 const SetOfInstrs
&InsertedInsts
, InstrToOrigTy
&PromotedInsts
,
3495 TypePromotionTransaction
&TPT
,
3496 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> &LargeOffsetGEP
,
3497 bool OptSize
, ProfileSummaryInfo
*PSI
, BlockFrequencyInfo
*BFI
)
3498 : AddrModeInsts(AMI
), TLI(TLI
), TRI(TRI
),
3499 DL(MI
->getDataLayout()), LI(LI
), getDTFn(getDTFn
),
3500 AccessTy(AT
), AddrSpace(AS
), MemoryInst(MI
), AddrMode(AM
),
3501 InsertedInsts(InsertedInsts
), PromotedInsts(PromotedInsts
), TPT(TPT
),
3502 LargeOffsetGEP(LargeOffsetGEP
), OptSize(OptSize
), PSI(PSI
), BFI(BFI
) {
3503 IgnoreProfitability
= false;
3507 /// Find the maximal addressing mode that a load/store of V can fold,
3508 /// give an access type of AccessTy. This returns a list of involved
3509 /// instructions in AddrModeInsts.
3510 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3512 /// \p PromotedInsts maps the instructions to their type before promotion.
3513 /// \p The ongoing transaction where every action should be registered.
3515 Match(Value
*V
, Type
*AccessTy
, unsigned AS
, Instruction
*MemoryInst
,
3516 SmallVectorImpl
<Instruction
*> &AddrModeInsts
,
3517 const TargetLowering
&TLI
, const LoopInfo
&LI
,
3518 const std::function
<const DominatorTree
&()> getDTFn
,
3519 const TargetRegisterInfo
&TRI
, const SetOfInstrs
&InsertedInsts
,
3520 InstrToOrigTy
&PromotedInsts
, TypePromotionTransaction
&TPT
,
3521 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> &LargeOffsetGEP
,
3522 bool OptSize
, ProfileSummaryInfo
*PSI
, BlockFrequencyInfo
*BFI
) {
3525 bool Success
= AddressingModeMatcher(AddrModeInsts
, TLI
, TRI
, LI
, getDTFn
,
3526 AccessTy
, AS
, MemoryInst
, Result
,
3527 InsertedInsts
, PromotedInsts
, TPT
,
3528 LargeOffsetGEP
, OptSize
, PSI
, BFI
)
3531 assert(Success
&& "Couldn't select *anything*?");
3536 bool matchScaledValue(Value
*ScaleReg
, int64_t Scale
, unsigned Depth
);
3537 bool matchAddr(Value
*Addr
, unsigned Depth
);
3538 bool matchOperationAddr(User
*AddrInst
, unsigned Opcode
, unsigned Depth
,
3539 bool *MovedAway
= nullptr);
3540 bool isProfitableToFoldIntoAddressingMode(Instruction
*I
,
3541 ExtAddrMode
&AMBefore
,
3542 ExtAddrMode
&AMAfter
);
3543 bool valueAlreadyLiveAtInst(Value
*Val
, Value
*KnownLive1
, Value
*KnownLive2
);
3544 bool isPromotionProfitable(unsigned NewCost
, unsigned OldCost
,
3545 Value
*PromotedOperand
) const;
3550 /// An iterator for PhiNodeSet.
3551 class PhiNodeSetIterator
{
3552 PhiNodeSet
*const Set
;
3553 size_t CurrentIndex
= 0;
3556 /// The constructor. Start should point to either a valid element, or be equal
3557 /// to the size of the underlying SmallVector of the PhiNodeSet.
3558 PhiNodeSetIterator(PhiNodeSet
*const Set
, size_t Start
);
3559 PHINode
*operator*() const;
3560 PhiNodeSetIterator
&operator++();
3561 bool operator==(const PhiNodeSetIterator
&RHS
) const;
3562 bool operator!=(const PhiNodeSetIterator
&RHS
) const;
3565 /// Keeps a set of PHINodes.
3567 /// This is a minimal set implementation for a specific use case:
3568 /// It is very fast when there are very few elements, but also provides good
3569 /// performance when there are many. It is similar to SmallPtrSet, but also
3570 /// provides iteration by insertion order, which is deterministic and stable
3571 /// across runs. It is also similar to SmallSetVector, but provides removing
3572 /// elements in O(1) time. This is achieved by not actually removing the element
3573 /// from the underlying vector, so comes at the cost of using more memory, but
3574 /// that is fine, since PhiNodeSets are used as short lived objects.
3576 friend class PhiNodeSetIterator
;
3578 using MapType
= SmallDenseMap
<PHINode
*, size_t, 32>;
3579 using iterator
= PhiNodeSetIterator
;
3581 /// Keeps the elements in the order of their insertion in the underlying
3582 /// vector. To achieve constant time removal, it never deletes any element.
3583 SmallVector
<PHINode
*, 32> NodeList
;
3585 /// Keeps the elements in the underlying set implementation. This (and not the
3586 /// NodeList defined above) is the source of truth on whether an element
3587 /// is actually in the collection.
3590 /// Points to the first valid (not deleted) element when the set is not empty
3591 /// and the value is not zero. Equals to the size of the underlying vector
3592 /// when the set is empty. When the value is 0, as in the beginning, the
3593 /// first element may or may not be valid.
3594 size_t FirstValidElement
= 0;
3597 /// Inserts a new element to the collection.
3598 /// \returns true if the element is actually added, i.e. was not in the
3599 /// collection before the operation.
3600 bool insert(PHINode
*Ptr
) {
3601 if (NodeMap
.insert(std::make_pair(Ptr
, NodeList
.size())).second
) {
3602 NodeList
.push_back(Ptr
);
3608 /// Removes the element from the collection.
3609 /// \returns whether the element is actually removed, i.e. was in the
3610 /// collection before the operation.
3611 bool erase(PHINode
*Ptr
) {
3612 if (NodeMap
.erase(Ptr
)) {
3613 SkipRemovedElements(FirstValidElement
);
3619 /// Removes all elements and clears the collection.
3623 FirstValidElement
= 0;
3626 /// \returns an iterator that will iterate the elements in the order of
3629 if (FirstValidElement
== 0)
3630 SkipRemovedElements(FirstValidElement
);
3631 return PhiNodeSetIterator(this, FirstValidElement
);
3634 /// \returns an iterator that points to the end of the collection.
3635 iterator
end() { return PhiNodeSetIterator(this, NodeList
.size()); }
3637 /// Returns the number of elements in the collection.
3638 size_t size() const { return NodeMap
.size(); }
3640 /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3641 size_t count(PHINode
*Ptr
) const { return NodeMap
.count(Ptr
); }
3644 /// Updates the CurrentIndex so that it will point to a valid element.
3646 /// If the element of NodeList at CurrentIndex is valid, it does not
3647 /// change it. If there are no more valid elements, it updates CurrentIndex
3648 /// to point to the end of the NodeList.
3649 void SkipRemovedElements(size_t &CurrentIndex
) {
3650 while (CurrentIndex
< NodeList
.size()) {
3651 auto it
= NodeMap
.find(NodeList
[CurrentIndex
]);
3652 // If the element has been deleted and added again later, NodeMap will
3653 // point to a different index, so CurrentIndex will still be invalid.
3654 if (it
!= NodeMap
.end() && it
->second
== CurrentIndex
)
3661 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet
*const Set
, size_t Start
)
3662 : Set(Set
), CurrentIndex(Start
) {}
3664 PHINode
*PhiNodeSetIterator::operator*() const {
3665 assert(CurrentIndex
< Set
->NodeList
.size() &&
3666 "PhiNodeSet access out of range");
3667 return Set
->NodeList
[CurrentIndex
];
3670 PhiNodeSetIterator
&PhiNodeSetIterator::operator++() {
3671 assert(CurrentIndex
< Set
->NodeList
.size() &&
3672 "PhiNodeSet access out of range");
3674 Set
->SkipRemovedElements(CurrentIndex
);
3678 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator
&RHS
) const {
3679 return CurrentIndex
== RHS
.CurrentIndex
;
3682 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator
&RHS
) const {
3683 return !((*this) == RHS
);
3686 /// Keep track of simplification of Phi nodes.
3687 /// Accept the set of all phi nodes and erase phi node from this set
3688 /// if it is simplified.
3689 class SimplificationTracker
{
3690 DenseMap
<Value
*, Value
*> Storage
;
3691 const SimplifyQuery
&SQ
;
3692 // Tracks newly created Phi nodes. The elements are iterated by insertion
3694 PhiNodeSet AllPhiNodes
;
3695 // Tracks newly created Select nodes.
3696 SmallPtrSet
<SelectInst
*, 32> AllSelectNodes
;
3699 SimplificationTracker(const SimplifyQuery
&sq
) : SQ(sq
) {}
3701 Value
*Get(Value
*V
) {
3703 auto SV
= Storage
.find(V
);
3704 if (SV
== Storage
.end())
3710 Value
*Simplify(Value
*Val
) {
3711 SmallVector
<Value
*, 32> WorkList
;
3712 SmallPtrSet
<Value
*, 32> Visited
;
3713 WorkList
.push_back(Val
);
3714 while (!WorkList
.empty()) {
3715 auto *P
= WorkList
.pop_back_val();
3716 if (!Visited
.insert(P
).second
)
3718 if (auto *PI
= dyn_cast
<Instruction
>(P
))
3719 if (Value
*V
= simplifyInstruction(cast
<Instruction
>(PI
), SQ
)) {
3720 for (auto *U
: PI
->users())
3721 WorkList
.push_back(cast
<Value
>(U
));
3723 PI
->replaceAllUsesWith(V
);
3724 if (auto *PHI
= dyn_cast
<PHINode
>(PI
))
3725 AllPhiNodes
.erase(PHI
);
3726 if (auto *Select
= dyn_cast
<SelectInst
>(PI
))
3727 AllSelectNodes
.erase(Select
);
3728 PI
->eraseFromParent();
3734 void Put(Value
*From
, Value
*To
) { Storage
.insert({From
, To
}); }
3736 void ReplacePhi(PHINode
*From
, PHINode
*To
) {
3737 Value
*OldReplacement
= Get(From
);
3738 while (OldReplacement
!= From
) {
3740 To
= dyn_cast
<PHINode
>(OldReplacement
);
3741 OldReplacement
= Get(From
);
3743 assert(To
&& Get(To
) == To
&& "Replacement PHI node is already replaced.");
3745 From
->replaceAllUsesWith(To
);
3746 AllPhiNodes
.erase(From
);
3747 From
->eraseFromParent();
3750 PhiNodeSet
&newPhiNodes() { return AllPhiNodes
; }
3752 void insertNewPhi(PHINode
*PN
) { AllPhiNodes
.insert(PN
); }
3754 void insertNewSelect(SelectInst
*SI
) { AllSelectNodes
.insert(SI
); }
3756 unsigned countNewPhiNodes() const { return AllPhiNodes
.size(); }
3758 unsigned countNewSelectNodes() const { return AllSelectNodes
.size(); }
3760 void destroyNewNodes(Type
*CommonType
) {
3761 // For safe erasing, replace the uses with dummy value first.
3762 auto *Dummy
= PoisonValue::get(CommonType
);
3763 for (auto *I
: AllPhiNodes
) {
3764 I
->replaceAllUsesWith(Dummy
);
3765 I
->eraseFromParent();
3767 AllPhiNodes
.clear();
3768 for (auto *I
: AllSelectNodes
) {
3769 I
->replaceAllUsesWith(Dummy
);
3770 I
->eraseFromParent();
3772 AllSelectNodes
.clear();
3776 /// A helper class for combining addressing modes.
3777 class AddressingModeCombiner
{
3778 typedef DenseMap
<Value
*, Value
*> FoldAddrToValueMapping
;
3779 typedef std::pair
<PHINode
*, PHINode
*> PHIPair
;
3782 /// The addressing modes we've collected.
3783 SmallVector
<ExtAddrMode
, 16> AddrModes
;
3785 /// The field in which the AddrModes differ, when we have more than one.
3786 ExtAddrMode::FieldName DifferentField
= ExtAddrMode::NoField
;
3788 /// Are the AddrModes that we have all just equal to their original values?
3789 bool AllAddrModesTrivial
= true;
3791 /// Common Type for all different fields in addressing modes.
3792 Type
*CommonType
= nullptr;
3794 /// SimplifyQuery for simplifyInstruction utility.
3795 const SimplifyQuery
&SQ
;
3797 /// Original Address.
3800 /// Common value among addresses
3801 Value
*CommonValue
= nullptr;
3804 AddressingModeCombiner(const SimplifyQuery
&_SQ
, Value
*OriginalValue
)
3805 : SQ(_SQ
), Original(OriginalValue
) {}
3807 ~AddressingModeCombiner() { eraseCommonValueIfDead(); }
3809 /// Get the combined AddrMode
3810 const ExtAddrMode
&getAddrMode() const { return AddrModes
[0]; }
3812 /// Add a new AddrMode if it's compatible with the AddrModes we already
3814 /// \return True iff we succeeded in doing so.
3815 bool addNewAddrMode(ExtAddrMode
&NewAddrMode
) {
3816 // Take note of if we have any non-trivial AddrModes, as we need to detect
3817 // when all AddrModes are trivial as then we would introduce a phi or select
3818 // which just duplicates what's already there.
3819 AllAddrModesTrivial
= AllAddrModesTrivial
&& NewAddrMode
.isTrivial();
3821 // If this is the first addrmode then everything is fine.
3822 if (AddrModes
.empty()) {
3823 AddrModes
.emplace_back(NewAddrMode
);
3827 // Figure out how different this is from the other address modes, which we
3828 // can do just by comparing against the first one given that we only care
3829 // about the cumulative difference.
3830 ExtAddrMode::FieldName ThisDifferentField
=
3831 AddrModes
[0].compare(NewAddrMode
);
3832 if (DifferentField
== ExtAddrMode::NoField
)
3833 DifferentField
= ThisDifferentField
;
3834 else if (DifferentField
!= ThisDifferentField
)
3835 DifferentField
= ExtAddrMode::MultipleFields
;
3837 // If NewAddrMode differs in more than one dimension we cannot handle it.
3838 bool CanHandle
= DifferentField
!= ExtAddrMode::MultipleFields
;
3840 // If Scale Field is different then we reject.
3841 CanHandle
= CanHandle
&& DifferentField
!= ExtAddrMode::ScaleField
;
3843 // We also must reject the case when base offset is different and
3844 // scale reg is not null, we cannot handle this case due to merge of
3845 // different offsets will be used as ScaleReg.
3846 CanHandle
= CanHandle
&& (DifferentField
!= ExtAddrMode::BaseOffsField
||
3847 !NewAddrMode
.ScaledReg
);
3849 // We also must reject the case when GV is different and BaseReg installed
3850 // due to we want to use base reg as a merge of GV values.
3851 CanHandle
= CanHandle
&& (DifferentField
!= ExtAddrMode::BaseGVField
||
3852 !NewAddrMode
.HasBaseReg
);
3854 // Even if NewAddMode is the same we still need to collect it due to
3855 // original value is different. And later we will need all original values
3856 // as anchors during finding the common Phi node.
3858 AddrModes
.emplace_back(NewAddrMode
);
3865 /// Combine the addressing modes we've collected into a single
3866 /// addressing mode.
3867 /// \return True iff we successfully combined them or we only had one so
3868 /// didn't need to combine them anyway.
3869 bool combineAddrModes() {
3870 // If we have no AddrModes then they can't be combined.
3871 if (AddrModes
.size() == 0)
3874 // A single AddrMode can trivially be combined.
3875 if (AddrModes
.size() == 1 || DifferentField
== ExtAddrMode::NoField
)
3878 // If the AddrModes we collected are all just equal to the value they are
3879 // derived from then combining them wouldn't do anything useful.
3880 if (AllAddrModesTrivial
)
3883 if (!addrModeCombiningAllowed())
3886 // Build a map between <original value, basic block where we saw it> to
3887 // value of base register.
3888 // Bail out if there is no common type.
3889 FoldAddrToValueMapping Map
;
3890 if (!initializeMap(Map
))
3893 CommonValue
= findCommon(Map
);
3895 AddrModes
[0].SetCombinedField(DifferentField
, CommonValue
, AddrModes
);
3896 return CommonValue
!= nullptr;
3900 /// `CommonValue` may be a placeholder inserted by us.
3901 /// If the placeholder is not used, we should remove this dead instruction.
3902 void eraseCommonValueIfDead() {
3903 if (CommonValue
&& CommonValue
->getNumUses() == 0)
3904 if (Instruction
*CommonInst
= dyn_cast
<Instruction
>(CommonValue
))
3905 CommonInst
->eraseFromParent();
3908 /// Initialize Map with anchor values. For address seen
3909 /// we set the value of different field saw in this address.
3910 /// At the same time we find a common type for different field we will
3911 /// use to create new Phi/Select nodes. Keep it in CommonType field.
3912 /// Return false if there is no common type found.
3913 bool initializeMap(FoldAddrToValueMapping
&Map
) {
3914 // Keep track of keys where the value is null. We will need to replace it
3915 // with constant null when we know the common type.
3916 SmallVector
<Value
*, 2> NullValue
;
3917 Type
*IntPtrTy
= SQ
.DL
.getIntPtrType(AddrModes
[0].OriginalValue
->getType());
3918 for (auto &AM
: AddrModes
) {
3919 Value
*DV
= AM
.GetFieldAsValue(DifferentField
, IntPtrTy
);
3921 auto *Type
= DV
->getType();
3922 if (CommonType
&& CommonType
!= Type
)
3925 Map
[AM
.OriginalValue
] = DV
;
3927 NullValue
.push_back(AM
.OriginalValue
);
3930 assert(CommonType
&& "At least one non-null value must be!");
3931 for (auto *V
: NullValue
)
3932 Map
[V
] = Constant::getNullValue(CommonType
);
3936 /// We have mapping between value A and other value B where B was a field in
3937 /// addressing mode represented by A. Also we have an original value C
3938 /// representing an address we start with. Traversing from C through phi and
3939 /// selects we ended up with A's in a map. This utility function tries to find
3940 /// a value V which is a field in addressing mode C and traversing through phi
3941 /// nodes and selects we will end up in corresponded values B in a map.
3942 /// The utility will create a new Phi/Selects if needed.
3943 // The simple example looks as follows:
3951 // p = phi [p1, BB1], [p2, BB2]
3958 // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
3959 Value
*findCommon(FoldAddrToValueMapping
&Map
) {
3960 // Tracks the simplification of newly created phi nodes. The reason we use
3961 // this mapping is because we will add new created Phi nodes in AddrToBase.
3962 // Simplification of Phi nodes is recursive, so some Phi node may
3963 // be simplified after we added it to AddrToBase. In reality this
3964 // simplification is possible only if original phi/selects were not
3966 // Using this mapping we can find the current value in AddrToBase.
3967 SimplificationTracker
ST(SQ
);
3969 // First step, DFS to create PHI nodes for all intermediate blocks.
3970 // Also fill traverse order for the second step.
3971 SmallVector
<Value
*, 32> TraverseOrder
;
3972 InsertPlaceholders(Map
, TraverseOrder
, ST
);
3974 // Second Step, fill new nodes by merged values and simplify if possible.
3975 FillPlaceholders(Map
, TraverseOrder
, ST
);
3977 if (!AddrSinkNewSelects
&& ST
.countNewSelectNodes() > 0) {
3978 ST
.destroyNewNodes(CommonType
);
3982 // Now we'd like to match New Phi nodes to existed ones.
3983 unsigned PhiNotMatchedCount
= 0;
3984 if (!MatchPhiSet(ST
, AddrSinkNewPhis
, PhiNotMatchedCount
)) {
3985 ST
.destroyNewNodes(CommonType
);
3989 auto *Result
= ST
.Get(Map
.find(Original
)->second
);
3991 NumMemoryInstsPhiCreated
+= ST
.countNewPhiNodes() + PhiNotMatchedCount
;
3992 NumMemoryInstsSelectCreated
+= ST
.countNewSelectNodes();
3997 /// Try to match PHI node to Candidate.
3998 /// Matcher tracks the matched Phi nodes.
3999 bool MatchPhiNode(PHINode
*PHI
, PHINode
*Candidate
,
4000 SmallSetVector
<PHIPair
, 8> &Matcher
,
4001 PhiNodeSet
&PhiNodesToMatch
) {
4002 SmallVector
<PHIPair
, 8> WorkList
;
4003 Matcher
.insert({PHI
, Candidate
});
4004 SmallSet
<PHINode
*, 8> MatchedPHIs
;
4005 MatchedPHIs
.insert(PHI
);
4006 WorkList
.push_back({PHI
, Candidate
});
4007 SmallSet
<PHIPair
, 8> Visited
;
4008 while (!WorkList
.empty()) {
4009 auto Item
= WorkList
.pop_back_val();
4010 if (!Visited
.insert(Item
).second
)
4012 // We iterate over all incoming values to Phi to compare them.
4013 // If values are different and both of them Phi and the first one is a
4014 // Phi we added (subject to match) and both of them is in the same basic
4015 // block then we can match our pair if values match. So we state that
4016 // these values match and add it to work list to verify that.
4017 for (auto *B
: Item
.first
->blocks()) {
4018 Value
*FirstValue
= Item
.first
->getIncomingValueForBlock(B
);
4019 Value
*SecondValue
= Item
.second
->getIncomingValueForBlock(B
);
4020 if (FirstValue
== SecondValue
)
4023 PHINode
*FirstPhi
= dyn_cast
<PHINode
>(FirstValue
);
4024 PHINode
*SecondPhi
= dyn_cast
<PHINode
>(SecondValue
);
4026 // One of them is not Phi or
4027 // The first one is not Phi node from the set we'd like to match or
4028 // Phi nodes from different basic blocks then
4029 // we will not be able to match.
4030 if (!FirstPhi
|| !SecondPhi
|| !PhiNodesToMatch
.count(FirstPhi
) ||
4031 FirstPhi
->getParent() != SecondPhi
->getParent())
4034 // If we already matched them then continue.
4035 if (Matcher
.count({FirstPhi
, SecondPhi
}))
4037 // So the values are different and does not match. So we need them to
4038 // match. (But we register no more than one match per PHI node, so that
4039 // we won't later try to replace them twice.)
4040 if (MatchedPHIs
.insert(FirstPhi
).second
)
4041 Matcher
.insert({FirstPhi
, SecondPhi
});
4042 // But me must check it.
4043 WorkList
.push_back({FirstPhi
, SecondPhi
});
4049 /// For the given set of PHI nodes (in the SimplificationTracker) try
4050 /// to find their equivalents.
4051 /// Returns false if this matching fails and creation of new Phi is disabled.
4052 bool MatchPhiSet(SimplificationTracker
&ST
, bool AllowNewPhiNodes
,
4053 unsigned &PhiNotMatchedCount
) {
4054 // Matched and PhiNodesToMatch iterate their elements in a deterministic
4055 // order, so the replacements (ReplacePhi) are also done in a deterministic
4057 SmallSetVector
<PHIPair
, 8> Matched
;
4058 SmallPtrSet
<PHINode
*, 8> WillNotMatch
;
4059 PhiNodeSet
&PhiNodesToMatch
= ST
.newPhiNodes();
4060 while (PhiNodesToMatch
.size()) {
4061 PHINode
*PHI
= *PhiNodesToMatch
.begin();
4063 // Add us, if no Phi nodes in the basic block we do not match.
4064 WillNotMatch
.clear();
4065 WillNotMatch
.insert(PHI
);
4067 // Traverse all Phis until we found equivalent or fail to do that.
4068 bool IsMatched
= false;
4069 for (auto &P
: PHI
->getParent()->phis()) {
4070 // Skip new Phi nodes.
4071 if (PhiNodesToMatch
.count(&P
))
4073 if ((IsMatched
= MatchPhiNode(PHI
, &P
, Matched
, PhiNodesToMatch
)))
4075 // If it does not match, collect all Phi nodes from matcher.
4076 // if we end up with no match, them all these Phi nodes will not match
4078 for (auto M
: Matched
)
4079 WillNotMatch
.insert(M
.first
);
4083 // Replace all matched values and erase them.
4084 for (auto MV
: Matched
)
4085 ST
.ReplacePhi(MV
.first
, MV
.second
);
4089 // If we are not allowed to create new nodes then bail out.
4090 if (!AllowNewPhiNodes
)
4092 // Just remove all seen values in matcher. They will not match anything.
4093 PhiNotMatchedCount
+= WillNotMatch
.size();
4094 for (auto *P
: WillNotMatch
)
4095 PhiNodesToMatch
.erase(P
);
4099 /// Fill the placeholders with values from predecessors and simplify them.
4100 void FillPlaceholders(FoldAddrToValueMapping
&Map
,
4101 SmallVectorImpl
<Value
*> &TraverseOrder
,
4102 SimplificationTracker
&ST
) {
4103 while (!TraverseOrder
.empty()) {
4104 Value
*Current
= TraverseOrder
.pop_back_val();
4105 assert(Map
.contains(Current
) && "No node to fill!!!");
4106 Value
*V
= Map
[Current
];
4108 if (SelectInst
*Select
= dyn_cast
<SelectInst
>(V
)) {
4109 // CurrentValue also must be Select.
4110 auto *CurrentSelect
= cast
<SelectInst
>(Current
);
4111 auto *TrueValue
= CurrentSelect
->getTrueValue();
4112 assert(Map
.contains(TrueValue
) && "No True Value!");
4113 Select
->setTrueValue(ST
.Get(Map
[TrueValue
]));
4114 auto *FalseValue
= CurrentSelect
->getFalseValue();
4115 assert(Map
.contains(FalseValue
) && "No False Value!");
4116 Select
->setFalseValue(ST
.Get(Map
[FalseValue
]));
4118 // Must be a Phi node then.
4119 auto *PHI
= cast
<PHINode
>(V
);
4120 // Fill the Phi node with values from predecessors.
4121 for (auto *B
: predecessors(PHI
->getParent())) {
4122 Value
*PV
= cast
<PHINode
>(Current
)->getIncomingValueForBlock(B
);
4123 assert(Map
.contains(PV
) && "No predecessor Value!");
4124 PHI
->addIncoming(ST
.Get(Map
[PV
]), B
);
4127 Map
[Current
] = ST
.Simplify(V
);
4131 /// Starting from original value recursively iterates over def-use chain up to
4132 /// known ending values represented in a map. For each traversed phi/select
4133 /// inserts a placeholder Phi or Select.
4134 /// Reports all new created Phi/Select nodes by adding them to set.
4135 /// Also reports and order in what values have been traversed.
4136 void InsertPlaceholders(FoldAddrToValueMapping
&Map
,
4137 SmallVectorImpl
<Value
*> &TraverseOrder
,
4138 SimplificationTracker
&ST
) {
4139 SmallVector
<Value
*, 32> Worklist
;
4140 assert((isa
<PHINode
>(Original
) || isa
<SelectInst
>(Original
)) &&
4141 "Address must be a Phi or Select node");
4142 auto *Dummy
= PoisonValue::get(CommonType
);
4143 Worklist
.push_back(Original
);
4144 while (!Worklist
.empty()) {
4145 Value
*Current
= Worklist
.pop_back_val();
4146 // if it is already visited or it is an ending value then skip it.
4147 if (Map
.contains(Current
))
4149 TraverseOrder
.push_back(Current
);
4151 // CurrentValue must be a Phi node or select. All others must be covered
4153 if (SelectInst
*CurrentSelect
= dyn_cast
<SelectInst
>(Current
)) {
4154 // Is it OK to get metadata from OrigSelect?!
4155 // Create a Select placeholder with dummy value.
4156 SelectInst
*Select
=
4157 SelectInst::Create(CurrentSelect
->getCondition(), Dummy
, Dummy
,
4158 CurrentSelect
->getName(),
4159 CurrentSelect
->getIterator(), CurrentSelect
);
4160 Map
[Current
] = Select
;
4161 ST
.insertNewSelect(Select
);
4162 // We are interested in True and False values.
4163 Worklist
.push_back(CurrentSelect
->getTrueValue());
4164 Worklist
.push_back(CurrentSelect
->getFalseValue());
4166 // It must be a Phi node then.
4167 PHINode
*CurrentPhi
= cast
<PHINode
>(Current
);
4168 unsigned PredCount
= CurrentPhi
->getNumIncomingValues();
4170 PHINode::Create(CommonType
, PredCount
, "sunk_phi", CurrentPhi
->getIterator());
4172 ST
.insertNewPhi(PHI
);
4173 append_range(Worklist
, CurrentPhi
->incoming_values());
4178 bool addrModeCombiningAllowed() {
4179 if (DisableComplexAddrModes
)
4181 switch (DifferentField
) {
4184 case ExtAddrMode::BaseRegField
:
4185 return AddrSinkCombineBaseReg
;
4186 case ExtAddrMode::BaseGVField
:
4187 return AddrSinkCombineBaseGV
;
4188 case ExtAddrMode::BaseOffsField
:
4189 return AddrSinkCombineBaseOffs
;
4190 case ExtAddrMode::ScaledRegField
:
4191 return AddrSinkCombineScaledReg
;
4195 } // end anonymous namespace
4197 /// Try adding ScaleReg*Scale to the current addressing mode.
4198 /// Return true and update AddrMode if this addr mode is legal for the target,
4200 bool AddressingModeMatcher::matchScaledValue(Value
*ScaleReg
, int64_t Scale
,
4202 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
4203 // mode. Just process that directly.
4205 return matchAddr(ScaleReg
, Depth
);
4207 // If the scale is 0, it takes nothing to add this.
4211 // If we already have a scale of this value, we can add to it, otherwise, we
4212 // need an available scale field.
4213 if (AddrMode
.Scale
!= 0 && AddrMode
.ScaledReg
!= ScaleReg
)
4216 ExtAddrMode TestAddrMode
= AddrMode
;
4218 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
4219 // [A+B + A*7] -> [B+A*8].
4220 TestAddrMode
.Scale
+= Scale
;
4221 TestAddrMode
.ScaledReg
= ScaleReg
;
4223 // If the new address isn't legal, bail out.
4224 if (!TLI
.isLegalAddressingMode(DL
, TestAddrMode
, AccessTy
, AddrSpace
))
4227 // It was legal, so commit it.
4228 AddrMode
= TestAddrMode
;
4230 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
4231 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
4232 // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
4233 // go any further: we can reuse it and cannot eliminate it.
4234 ConstantInt
*CI
= nullptr;
4235 Value
*AddLHS
= nullptr;
4236 if (isa
<Instruction
>(ScaleReg
) && // not a constant expr.
4237 match(ScaleReg
, m_Add(m_Value(AddLHS
), m_ConstantInt(CI
))) &&
4238 !isIVIncrement(ScaleReg
, &LI
) && CI
->getValue().isSignedIntN(64)) {
4239 TestAddrMode
.InBounds
= false;
4240 TestAddrMode
.ScaledReg
= AddLHS
;
4241 TestAddrMode
.BaseOffs
+= CI
->getSExtValue() * TestAddrMode
.Scale
;
4243 // If this addressing mode is legal, commit it and remember that we folded
4244 // this instruction.
4245 if (TLI
.isLegalAddressingMode(DL
, TestAddrMode
, AccessTy
, AddrSpace
)) {
4246 AddrModeInsts
.push_back(cast
<Instruction
>(ScaleReg
));
4247 AddrMode
= TestAddrMode
;
4250 // Restore status quo.
4251 TestAddrMode
= AddrMode
;
4254 // If this is an add recurrence with a constant step, return the increment
4255 // instruction and the canonicalized step.
4256 auto GetConstantStep
=
4257 [this](const Value
*V
) -> std::optional
<std::pair
<Instruction
*, APInt
>> {
4258 auto *PN
= dyn_cast
<PHINode
>(V
);
4260 return std::nullopt
;
4261 auto IVInc
= getIVIncrement(PN
, &LI
);
4263 return std::nullopt
;
4264 // TODO: The result of the intrinsics above is two-complement. However when
4265 // IV inc is expressed as add or sub, iv.next is potentially a poison value.
4266 // If it has nuw or nsw flags, we need to make sure that these flags are
4267 // inferrable at the point of memory instruction. Otherwise we are replacing
4268 // well-defined two-complement computation with poison. Currently, to avoid
4269 // potentially complex analysis needed to prove this, we reject such cases.
4270 if (auto *OIVInc
= dyn_cast
<OverflowingBinaryOperator
>(IVInc
->first
))
4271 if (OIVInc
->hasNoSignedWrap() || OIVInc
->hasNoUnsignedWrap())
4272 return std::nullopt
;
4273 if (auto *ConstantStep
= dyn_cast
<ConstantInt
>(IVInc
->second
))
4274 return std::make_pair(IVInc
->first
, ConstantStep
->getValue());
4275 return std::nullopt
;
4278 // Try to account for the following special case:
4279 // 1. ScaleReg is an inductive variable;
4280 // 2. We use it with non-zero offset;
4281 // 3. IV's increment is available at the point of memory instruction.
4283 // In this case, we may reuse the IV increment instead of the IV Phi to
4284 // achieve the following advantages:
4285 // 1. If IV step matches the offset, we will have no need in the offset;
4286 // 2. Even if they don't match, we will reduce the overlap of living IV
4287 // and IV increment, that will potentially lead to better register
4289 if (AddrMode
.BaseOffs
) {
4290 if (auto IVStep
= GetConstantStep(ScaleReg
)) {
4291 Instruction
*IVInc
= IVStep
->first
;
4292 // The following assert is important to ensure a lack of infinite loops.
4293 // This transforms is (intentionally) the inverse of the one just above.
4294 // If they don't agree on the definition of an increment, we'd alternate
4295 // back and forth indefinitely.
4296 assert(isIVIncrement(IVInc
, &LI
) && "implied by GetConstantStep");
4297 APInt Step
= IVStep
->second
;
4298 APInt Offset
= Step
* AddrMode
.Scale
;
4299 if (Offset
.isSignedIntN(64)) {
4300 TestAddrMode
.InBounds
= false;
4301 TestAddrMode
.ScaledReg
= IVInc
;
4302 TestAddrMode
.BaseOffs
-= Offset
.getLimitedValue();
4303 // If this addressing mode is legal, commit it..
4304 // (Note that we defer the (expensive) domtree base legality check
4305 // to the very last possible point.)
4306 if (TLI
.isLegalAddressingMode(DL
, TestAddrMode
, AccessTy
, AddrSpace
) &&
4307 getDTFn().dominates(IVInc
, MemoryInst
)) {
4308 AddrModeInsts
.push_back(cast
<Instruction
>(IVInc
));
4309 AddrMode
= TestAddrMode
;
4312 // Restore status quo.
4313 TestAddrMode
= AddrMode
;
4318 // Otherwise, just return what we have.
4322 /// This is a little filter, which returns true if an addressing computation
4323 /// involving I might be folded into a load/store accessing it.
4324 /// This doesn't need to be perfect, but needs to accept at least
4325 /// the set of instructions that MatchOperationAddr can.
4326 static bool MightBeFoldableInst(Instruction
*I
) {
4327 switch (I
->getOpcode()) {
4328 case Instruction::BitCast
:
4329 case Instruction::AddrSpaceCast
:
4330 // Don't touch identity bitcasts.
4331 if (I
->getType() == I
->getOperand(0)->getType())
4333 return I
->getType()->isIntOrPtrTy();
4334 case Instruction::PtrToInt
:
4335 // PtrToInt is always a noop, as we know that the int type is pointer sized.
4337 case Instruction::IntToPtr
:
4338 // We know the input is intptr_t, so this is foldable.
4340 case Instruction::Add
:
4342 case Instruction::Mul
:
4343 case Instruction::Shl
:
4344 // Can only handle X*C and X << C.
4345 return isa
<ConstantInt
>(I
->getOperand(1));
4346 case Instruction::GetElementPtr
:
4353 /// Check whether or not \p Val is a legal instruction for \p TLI.
4354 /// \note \p Val is assumed to be the product of some type promotion.
4355 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4356 /// to be legal, as the non-promoted value would have had the same state.
4357 static bool isPromotedInstructionLegal(const TargetLowering
&TLI
,
4358 const DataLayout
&DL
, Value
*Val
) {
4359 Instruction
*PromotedInst
= dyn_cast
<Instruction
>(Val
);
4362 int ISDOpcode
= TLI
.InstructionOpcodeToISD(PromotedInst
->getOpcode());
4363 // If the ISDOpcode is undefined, it was undefined before the promotion.
4366 // Otherwise, check if the promoted instruction is legal or not.
4367 return TLI
.isOperationLegalOrCustom(
4368 ISDOpcode
, TLI
.getValueType(DL
, PromotedInst
->getType()));
4373 /// Hepler class to perform type promotion.
4374 class TypePromotionHelper
{
4375 /// Utility function to add a promoted instruction \p ExtOpnd to
4376 /// \p PromotedInsts and record the type of extension we have seen.
4377 static void addPromotedInst(InstrToOrigTy
&PromotedInsts
,
4378 Instruction
*ExtOpnd
, bool IsSExt
) {
4379 ExtType ExtTy
= IsSExt
? SignExtension
: ZeroExtension
;
4380 InstrToOrigTy::iterator It
= PromotedInsts
.find(ExtOpnd
);
4381 if (It
!= PromotedInsts
.end()) {
4382 // If the new extension is same as original, the information in
4383 // PromotedInsts[ExtOpnd] is still correct.
4384 if (It
->second
.getInt() == ExtTy
)
4387 // Now the new extension is different from old extension, we make
4388 // the type information invalid by setting extension type to
4390 ExtTy
= BothExtension
;
4392 PromotedInsts
[ExtOpnd
] = TypeIsSExt(ExtOpnd
->getType(), ExtTy
);
4395 /// Utility function to query the original type of instruction \p Opnd
4396 /// with a matched extension type. If the extension doesn't match, we
4397 /// cannot use the information we had on the original type.
4398 /// BothExtension doesn't match any extension type.
4399 static const Type
*getOrigType(const InstrToOrigTy
&PromotedInsts
,
4400 Instruction
*Opnd
, bool IsSExt
) {
4401 ExtType ExtTy
= IsSExt
? SignExtension
: ZeroExtension
;
4402 InstrToOrigTy::const_iterator It
= PromotedInsts
.find(Opnd
);
4403 if (It
!= PromotedInsts
.end() && It
->second
.getInt() == ExtTy
)
4404 return It
->second
.getPointer();
4408 /// Utility function to check whether or not a sign or zero extension
4409 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4410 /// either using the operands of \p Inst or promoting \p Inst.
4411 /// The type of the extension is defined by \p IsSExt.
4412 /// In other words, check if:
4413 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4414 /// #1 Promotion applies:
4415 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4416 /// #2 Operand reuses:
4417 /// ext opnd1 to ConsideredExtType.
4418 /// \p PromotedInsts maps the instructions to their type before promotion.
4419 static bool canGetThrough(const Instruction
*Inst
, Type
*ConsideredExtType
,
4420 const InstrToOrigTy
&PromotedInsts
, bool IsSExt
);
4422 /// Utility function to determine if \p OpIdx should be promoted when
4423 /// promoting \p Inst.
4424 static bool shouldExtOperand(const Instruction
*Inst
, int OpIdx
) {
4425 return !(isa
<SelectInst
>(Inst
) && OpIdx
== 0);
4428 /// Utility function to promote the operand of \p Ext when this
4429 /// operand is a promotable trunc or sext or zext.
4430 /// \p PromotedInsts maps the instructions to their type before promotion.
4431 /// \p CreatedInstsCost[out] contains the cost of all instructions
4432 /// created to promote the operand of Ext.
4433 /// Newly added extensions are inserted in \p Exts.
4434 /// Newly added truncates are inserted in \p Truncs.
4435 /// Should never be called directly.
4436 /// \return The promoted value which is used instead of Ext.
4437 static Value
*promoteOperandForTruncAndAnyExt(
4438 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4439 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4440 SmallVectorImpl
<Instruction
*> *Exts
,
4441 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
);
4443 /// Utility function to promote the operand of \p Ext when this
4444 /// operand is promotable and is not a supported trunc or sext.
4445 /// \p PromotedInsts maps the instructions to their type before promotion.
4446 /// \p CreatedInstsCost[out] contains the cost of all the instructions
4447 /// created to promote the operand of Ext.
4448 /// Newly added extensions are inserted in \p Exts.
4449 /// Newly added truncates are inserted in \p Truncs.
4450 /// Should never be called directly.
4451 /// \return The promoted value which is used instead of Ext.
4452 static Value
*promoteOperandForOther(Instruction
*Ext
,
4453 TypePromotionTransaction
&TPT
,
4454 InstrToOrigTy
&PromotedInsts
,
4455 unsigned &CreatedInstsCost
,
4456 SmallVectorImpl
<Instruction
*> *Exts
,
4457 SmallVectorImpl
<Instruction
*> *Truncs
,
4458 const TargetLowering
&TLI
, bool IsSExt
);
4460 /// \see promoteOperandForOther.
4461 static Value
*signExtendOperandForOther(
4462 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4463 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4464 SmallVectorImpl
<Instruction
*> *Exts
,
4465 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
) {
4466 return promoteOperandForOther(Ext
, TPT
, PromotedInsts
, CreatedInstsCost
,
4467 Exts
, Truncs
, TLI
, true);
4470 /// \see promoteOperandForOther.
4471 static Value
*zeroExtendOperandForOther(
4472 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4473 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4474 SmallVectorImpl
<Instruction
*> *Exts
,
4475 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
) {
4476 return promoteOperandForOther(Ext
, TPT
, PromotedInsts
, CreatedInstsCost
,
4477 Exts
, Truncs
, TLI
, false);
4481 /// Type for the utility function that promotes the operand of Ext.
4482 using Action
= Value
*(*)(Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4483 InstrToOrigTy
&PromotedInsts
,
4484 unsigned &CreatedInstsCost
,
4485 SmallVectorImpl
<Instruction
*> *Exts
,
4486 SmallVectorImpl
<Instruction
*> *Truncs
,
4487 const TargetLowering
&TLI
);
4489 /// Given a sign/zero extend instruction \p Ext, return the appropriate
4490 /// action to promote the operand of \p Ext instead of using Ext.
4491 /// \return NULL if no promotable action is possible with the current
4493 /// \p InsertedInsts keeps track of all the instructions inserted by the
4494 /// other CodeGenPrepare optimizations. This information is important
4495 /// because we do not want to promote these instructions as CodeGenPrepare
4496 /// will reinsert them later. Thus creating an infinite loop: create/remove.
4497 /// \p PromotedInsts maps the instructions to their type before promotion.
4498 static Action
getAction(Instruction
*Ext
, const SetOfInstrs
&InsertedInsts
,
4499 const TargetLowering
&TLI
,
4500 const InstrToOrigTy
&PromotedInsts
);
4503 } // end anonymous namespace
4505 bool TypePromotionHelper::canGetThrough(const Instruction
*Inst
,
4506 Type
*ConsideredExtType
,
4507 const InstrToOrigTy
&PromotedInsts
,
4509 // The promotion helper does not know how to deal with vector types yet.
4510 // To be able to fix that, we would need to fix the places where we
4511 // statically extend, e.g., constants and such.
4512 if (Inst
->getType()->isVectorTy())
4515 // We can always get through zext.
4516 if (isa
<ZExtInst
>(Inst
))
4519 // sext(sext) is ok too.
4520 if (IsSExt
&& isa
<SExtInst
>(Inst
))
4523 // We can get through binary operator, if it is legal. In other words, the
4524 // binary operator must have a nuw or nsw flag.
4525 if (const auto *BinOp
= dyn_cast
<BinaryOperator
>(Inst
))
4526 if (isa
<OverflowingBinaryOperator
>(BinOp
) &&
4527 ((!IsSExt
&& BinOp
->hasNoUnsignedWrap()) ||
4528 (IsSExt
&& BinOp
->hasNoSignedWrap())))
4531 // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4532 if ((Inst
->getOpcode() == Instruction::And
||
4533 Inst
->getOpcode() == Instruction::Or
))
4536 // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4537 if (Inst
->getOpcode() == Instruction::Xor
) {
4538 // Make sure it is not a NOT.
4539 if (const auto *Cst
= dyn_cast
<ConstantInt
>(Inst
->getOperand(1)))
4540 if (!Cst
->getValue().isAllOnes())
4544 // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4545 // It may change a poisoned value into a regular value, like
4546 // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12
4547 // poisoned value regular value
4548 // It should be OK since undef covers valid value.
4549 if (Inst
->getOpcode() == Instruction::LShr
&& !IsSExt
)
4552 // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4553 // It may change a poisoned value into a regular value, like
4554 // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12
4555 // poisoned value regular value
4556 // It should be OK since undef covers valid value.
4557 if (Inst
->getOpcode() == Instruction::Shl
&& Inst
->hasOneUse()) {
4558 const auto *ExtInst
= cast
<const Instruction
>(*Inst
->user_begin());
4559 if (ExtInst
->hasOneUse()) {
4560 const auto *AndInst
= dyn_cast
<const Instruction
>(*ExtInst
->user_begin());
4561 if (AndInst
&& AndInst
->getOpcode() == Instruction::And
) {
4562 const auto *Cst
= dyn_cast
<ConstantInt
>(AndInst
->getOperand(1));
4564 Cst
->getValue().isIntN(Inst
->getType()->getIntegerBitWidth()))
4570 // Check if we can do the following simplification.
4571 // ext(trunc(opnd)) --> ext(opnd)
4572 if (!isa
<TruncInst
>(Inst
))
4575 Value
*OpndVal
= Inst
->getOperand(0);
4576 // Check if we can use this operand in the extension.
4577 // If the type is larger than the result type of the extension, we cannot.
4578 if (!OpndVal
->getType()->isIntegerTy() ||
4579 OpndVal
->getType()->getIntegerBitWidth() >
4580 ConsideredExtType
->getIntegerBitWidth())
4583 // If the operand of the truncate is not an instruction, we will not have
4584 // any information on the dropped bits.
4585 // (Actually we could for constant but it is not worth the extra logic).
4586 Instruction
*Opnd
= dyn_cast
<Instruction
>(OpndVal
);
4590 // Check if the source of the type is narrow enough.
4591 // I.e., check that trunc just drops extended bits of the same kind of
4593 // #1 get the type of the operand and check the kind of the extended bits.
4594 const Type
*OpndType
= getOrigType(PromotedInsts
, Opnd
, IsSExt
);
4597 else if ((IsSExt
&& isa
<SExtInst
>(Opnd
)) || (!IsSExt
&& isa
<ZExtInst
>(Opnd
)))
4598 OpndType
= Opnd
->getOperand(0)->getType();
4602 // #2 check that the truncate just drops extended bits.
4603 return Inst
->getType()->getIntegerBitWidth() >=
4604 OpndType
->getIntegerBitWidth();
4607 TypePromotionHelper::Action
TypePromotionHelper::getAction(
4608 Instruction
*Ext
, const SetOfInstrs
&InsertedInsts
,
4609 const TargetLowering
&TLI
, const InstrToOrigTy
&PromotedInsts
) {
4610 assert((isa
<SExtInst
>(Ext
) || isa
<ZExtInst
>(Ext
)) &&
4611 "Unexpected instruction type");
4612 Instruction
*ExtOpnd
= dyn_cast
<Instruction
>(Ext
->getOperand(0));
4613 Type
*ExtTy
= Ext
->getType();
4614 bool IsSExt
= isa
<SExtInst
>(Ext
);
4615 // If the operand of the extension is not an instruction, we cannot
4617 // If it, check we can get through.
4618 if (!ExtOpnd
|| !canGetThrough(ExtOpnd
, ExtTy
, PromotedInsts
, IsSExt
))
4621 // Do not promote if the operand has been added by codegenprepare.
4622 // Otherwise, it means we are undoing an optimization that is likely to be
4623 // redone, thus causing potential infinite loop.
4624 if (isa
<TruncInst
>(ExtOpnd
) && InsertedInsts
.count(ExtOpnd
))
4627 // SExt or Trunc instructions.
4628 // Return the related handler.
4629 if (isa
<SExtInst
>(ExtOpnd
) || isa
<TruncInst
>(ExtOpnd
) ||
4630 isa
<ZExtInst
>(ExtOpnd
))
4631 return promoteOperandForTruncAndAnyExt
;
4633 // Regular instruction.
4634 // Abort early if we will have to insert non-free instructions.
4635 if (!ExtOpnd
->hasOneUse() && !TLI
.isTruncateFree(ExtTy
, ExtOpnd
->getType()))
4637 return IsSExt
? signExtendOperandForOther
: zeroExtendOperandForOther
;
4640 Value
*TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4641 Instruction
*SExt
, TypePromotionTransaction
&TPT
,
4642 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4643 SmallVectorImpl
<Instruction
*> *Exts
,
4644 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
) {
4645 // By construction, the operand of SExt is an instruction. Otherwise we cannot
4646 // get through it and this method should not be called.
4647 Instruction
*SExtOpnd
= cast
<Instruction
>(SExt
->getOperand(0));
4648 Value
*ExtVal
= SExt
;
4649 bool HasMergedNonFreeExt
= false;
4650 if (isa
<ZExtInst
>(SExtOpnd
)) {
4651 // Replace s|zext(zext(opnd))
4653 HasMergedNonFreeExt
= !TLI
.isExtFree(SExtOpnd
);
4655 TPT
.createZExt(SExt
, SExtOpnd
->getOperand(0), SExt
->getType());
4656 TPT
.replaceAllUsesWith(SExt
, ZExt
);
4657 TPT
.eraseInstruction(SExt
);
4660 // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4662 TPT
.setOperand(SExt
, 0, SExtOpnd
->getOperand(0));
4664 CreatedInstsCost
= 0;
4666 // Remove dead code.
4667 if (SExtOpnd
->use_empty())
4668 TPT
.eraseInstruction(SExtOpnd
);
4670 // Check if the extension is still needed.
4671 Instruction
*ExtInst
= dyn_cast
<Instruction
>(ExtVal
);
4672 if (!ExtInst
|| ExtInst
->getType() != ExtInst
->getOperand(0)->getType()) {
4675 Exts
->push_back(ExtInst
);
4676 CreatedInstsCost
= !TLI
.isExtFree(ExtInst
) && !HasMergedNonFreeExt
;
4681 // At this point we have: ext ty opnd to ty.
4682 // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4683 Value
*NextVal
= ExtInst
->getOperand(0);
4684 TPT
.eraseInstruction(ExtInst
, NextVal
);
4688 Value
*TypePromotionHelper::promoteOperandForOther(
4689 Instruction
*Ext
, TypePromotionTransaction
&TPT
,
4690 InstrToOrigTy
&PromotedInsts
, unsigned &CreatedInstsCost
,
4691 SmallVectorImpl
<Instruction
*> *Exts
,
4692 SmallVectorImpl
<Instruction
*> *Truncs
, const TargetLowering
&TLI
,
4694 // By construction, the operand of Ext is an instruction. Otherwise we cannot
4695 // get through it and this method should not be called.
4696 Instruction
*ExtOpnd
= cast
<Instruction
>(Ext
->getOperand(0));
4697 CreatedInstsCost
= 0;
4698 if (!ExtOpnd
->hasOneUse()) {
4699 // ExtOpnd will be promoted.
4700 // All its uses, but Ext, will need to use a truncated value of the
4701 // promoted version.
4702 // Create the truncate now.
4703 Value
*Trunc
= TPT
.createTrunc(Ext
, ExtOpnd
->getType());
4704 if (Instruction
*ITrunc
= dyn_cast
<Instruction
>(Trunc
)) {
4705 // Insert it just after the definition.
4706 ITrunc
->moveAfter(ExtOpnd
);
4708 Truncs
->push_back(ITrunc
);
4711 TPT
.replaceAllUsesWith(ExtOpnd
, Trunc
);
4712 // Restore the operand of Ext (which has been replaced by the previous call
4713 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4714 TPT
.setOperand(Ext
, 0, ExtOpnd
);
4717 // Get through the Instruction:
4718 // 1. Update its type.
4719 // 2. Replace the uses of Ext by Inst.
4720 // 3. Extend each operand that needs to be extended.
4722 // Remember the original type of the instruction before promotion.
4723 // This is useful to know that the high bits are sign extended bits.
4724 addPromotedInst(PromotedInsts
, ExtOpnd
, IsSExt
);
4726 TPT
.mutateType(ExtOpnd
, Ext
->getType());
4728 TPT
.replaceAllUsesWith(Ext
, ExtOpnd
);
4730 LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4731 for (int OpIdx
= 0, EndOpIdx
= ExtOpnd
->getNumOperands(); OpIdx
!= EndOpIdx
;
4733 LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd
->getOperand(OpIdx
)) << '\n');
4734 if (ExtOpnd
->getOperand(OpIdx
)->getType() == Ext
->getType() ||
4735 !shouldExtOperand(ExtOpnd
, OpIdx
)) {
4736 LLVM_DEBUG(dbgs() << "No need to propagate\n");
4739 // Check if we can statically extend the operand.
4740 Value
*Opnd
= ExtOpnd
->getOperand(OpIdx
);
4741 if (const ConstantInt
*Cst
= dyn_cast
<ConstantInt
>(Opnd
)) {
4742 LLVM_DEBUG(dbgs() << "Statically extend\n");
4743 unsigned BitWidth
= Ext
->getType()->getIntegerBitWidth();
4744 APInt CstVal
= IsSExt
? Cst
->getValue().sext(BitWidth
)
4745 : Cst
->getValue().zext(BitWidth
);
4746 TPT
.setOperand(ExtOpnd
, OpIdx
, ConstantInt::get(Ext
->getType(), CstVal
));
4749 // UndefValue are typed, so we have to statically sign extend them.
4750 if (isa
<UndefValue
>(Opnd
)) {
4751 LLVM_DEBUG(dbgs() << "Statically extend\n");
4752 TPT
.setOperand(ExtOpnd
, OpIdx
, UndefValue::get(Ext
->getType()));
4756 // Otherwise we have to explicitly sign extend the operand.
4757 Value
*ValForExtOpnd
= IsSExt
4758 ? TPT
.createSExt(ExtOpnd
, Opnd
, Ext
->getType())
4759 : TPT
.createZExt(ExtOpnd
, Opnd
, Ext
->getType());
4760 TPT
.setOperand(ExtOpnd
, OpIdx
, ValForExtOpnd
);
4761 Instruction
*InstForExtOpnd
= dyn_cast
<Instruction
>(ValForExtOpnd
);
4762 if (!InstForExtOpnd
)
4766 Exts
->push_back(InstForExtOpnd
);
4768 CreatedInstsCost
+= !TLI
.isExtFree(InstForExtOpnd
);
4770 LLVM_DEBUG(dbgs() << "Extension is useless now\n");
4771 TPT
.eraseInstruction(Ext
);
4775 /// Check whether or not promoting an instruction to a wider type is profitable.
4776 /// \p NewCost gives the cost of extension instructions created by the
4778 /// \p OldCost gives the cost of extension instructions before the promotion
4779 /// plus the number of instructions that have been
4780 /// matched in the addressing mode the promotion.
4781 /// \p PromotedOperand is the value that has been promoted.
4782 /// \return True if the promotion is profitable, false otherwise.
4783 bool AddressingModeMatcher::isPromotionProfitable(
4784 unsigned NewCost
, unsigned OldCost
, Value
*PromotedOperand
) const {
4785 LLVM_DEBUG(dbgs() << "OldCost: " << OldCost
<< "\tNewCost: " << NewCost
4787 // The cost of the new extensions is greater than the cost of the
4788 // old extension plus what we folded.
4789 // This is not profitable.
4790 if (NewCost
> OldCost
)
4792 if (NewCost
< OldCost
)
4794 // The promotion is neutral but it may help folding the sign extension in
4795 // loads for instance.
4796 // Check that we did not create an illegal instruction.
4797 return isPromotedInstructionLegal(TLI
, DL
, PromotedOperand
);
4800 /// Given an instruction or constant expr, see if we can fold the operation
4801 /// into the addressing mode. If so, update the addressing mode and return
4802 /// true, otherwise return false without modifying AddrMode.
4803 /// If \p MovedAway is not NULL, it contains the information of whether or
4804 /// not AddrInst has to be folded into the addressing mode on success.
4805 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
4806 /// because it has been moved away.
4807 /// Thus AddrInst must not be added in the matched instructions.
4808 /// This state can happen when AddrInst is a sext, since it may be moved away.
4809 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
4810 /// not be referenced anymore.
4811 bool AddressingModeMatcher::matchOperationAddr(User
*AddrInst
, unsigned Opcode
,
4814 // Avoid exponential behavior on extremely deep expression trees.
4818 // By default, all matched instructions stay in place.
4823 case Instruction::PtrToInt
:
4824 // PtrToInt is always a noop, as we know that the int type is pointer sized.
4825 return matchAddr(AddrInst
->getOperand(0), Depth
);
4826 case Instruction::IntToPtr
: {
4827 auto AS
= AddrInst
->getType()->getPointerAddressSpace();
4828 auto PtrTy
= MVT::getIntegerVT(DL
.getPointerSizeInBits(AS
));
4829 // This inttoptr is a no-op if the integer type is pointer sized.
4830 if (TLI
.getValueType(DL
, AddrInst
->getOperand(0)->getType()) == PtrTy
)
4831 return matchAddr(AddrInst
->getOperand(0), Depth
);
4834 case Instruction::BitCast
:
4835 // BitCast is always a noop, and we can handle it as long as it is
4836 // int->int or pointer->pointer (we don't want int<->fp or something).
4837 if (AddrInst
->getOperand(0)->getType()->isIntOrPtrTy() &&
4838 // Don't touch identity bitcasts. These were probably put here by LSR,
4839 // and we don't want to mess around with them. Assume it knows what it
4841 AddrInst
->getOperand(0)->getType() != AddrInst
->getType())
4842 return matchAddr(AddrInst
->getOperand(0), Depth
);
4844 case Instruction::AddrSpaceCast
: {
4846 AddrInst
->getOperand(0)->getType()->getPointerAddressSpace();
4847 unsigned DestAS
= AddrInst
->getType()->getPointerAddressSpace();
4848 if (TLI
.getTargetMachine().isNoopAddrSpaceCast(SrcAS
, DestAS
))
4849 return matchAddr(AddrInst
->getOperand(0), Depth
);
4852 case Instruction::Add
: {
4853 // Check to see if we can merge in one operand, then the other. If so, we
4855 ExtAddrMode BackupAddrMode
= AddrMode
;
4856 unsigned OldSize
= AddrModeInsts
.size();
4857 // Start a transaction at this point.
4858 // The LHS may match but not the RHS.
4859 // Therefore, we need a higher level restoration point to undo partially
4860 // matched operation.
4861 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
4862 TPT
.getRestorationPoint();
4864 // Try to match an integer constant second to increase its chance of ending
4865 // up in `BaseOffs`, resp. decrease its chance of ending up in `BaseReg`.
4866 int First
= 0, Second
= 1;
4867 if (isa
<ConstantInt
>(AddrInst
->getOperand(First
))
4868 && !isa
<ConstantInt
>(AddrInst
->getOperand(Second
)))
4869 std::swap(First
, Second
);
4870 AddrMode
.InBounds
= false;
4871 if (matchAddr(AddrInst
->getOperand(First
), Depth
+ 1) &&
4872 matchAddr(AddrInst
->getOperand(Second
), Depth
+ 1))
4875 // Restore the old addr mode info.
4876 AddrMode
= BackupAddrMode
;
4877 AddrModeInsts
.resize(OldSize
);
4878 TPT
.rollback(LastKnownGood
);
4880 // Otherwise this was over-aggressive. Try merging operands in the opposite
4882 if (matchAddr(AddrInst
->getOperand(Second
), Depth
+ 1) &&
4883 matchAddr(AddrInst
->getOperand(First
), Depth
+ 1))
4886 // Otherwise we definitely can't merge the ADD in.
4887 AddrMode
= BackupAddrMode
;
4888 AddrModeInsts
.resize(OldSize
);
4889 TPT
.rollback(LastKnownGood
);
4892 // case Instruction::Or:
4893 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4895 case Instruction::Mul
:
4896 case Instruction::Shl
: {
4897 // Can only handle X*C and X << C.
4898 AddrMode
.InBounds
= false;
4899 ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(AddrInst
->getOperand(1));
4900 if (!RHS
|| RHS
->getBitWidth() > 64)
4902 int64_t Scale
= Opcode
== Instruction::Shl
4903 ? 1LL << RHS
->getLimitedValue(RHS
->getBitWidth() - 1)
4904 : RHS
->getSExtValue();
4906 return matchScaledValue(AddrInst
->getOperand(0), Scale
, Depth
);
4908 case Instruction::GetElementPtr
: {
4909 // Scan the GEP. We check it if it contains constant offsets and at most
4910 // one variable offset.
4911 int VariableOperand
= -1;
4912 unsigned VariableScale
= 0;
4914 int64_t ConstantOffset
= 0;
4915 gep_type_iterator GTI
= gep_type_begin(AddrInst
);
4916 for (unsigned i
= 1, e
= AddrInst
->getNumOperands(); i
!= e
; ++i
, ++GTI
) {
4917 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
4918 const StructLayout
*SL
= DL
.getStructLayout(STy
);
4920 cast
<ConstantInt
>(AddrInst
->getOperand(i
))->getZExtValue();
4921 ConstantOffset
+= SL
->getElementOffset(Idx
);
4923 TypeSize TS
= GTI
.getSequentialElementStride(DL
);
4924 if (TS
.isNonZero()) {
4925 // The optimisations below currently only work for fixed offsets.
4926 if (TS
.isScalable())
4928 int64_t TypeSize
= TS
.getFixedValue();
4929 if (ConstantInt
*CI
=
4930 dyn_cast
<ConstantInt
>(AddrInst
->getOperand(i
))) {
4931 const APInt
&CVal
= CI
->getValue();
4932 if (CVal
.getSignificantBits() <= 64) {
4933 ConstantOffset
+= CVal
.getSExtValue() * TypeSize
;
4937 // We only allow one variable index at the moment.
4938 if (VariableOperand
!= -1)
4941 // Remember the variable index.
4942 VariableOperand
= i
;
4943 VariableScale
= TypeSize
;
4948 // A common case is for the GEP to only do a constant offset. In this case,
4949 // just add it to the disp field and check validity.
4950 if (VariableOperand
== -1) {
4951 AddrMode
.BaseOffs
+= ConstantOffset
;
4952 if (matchAddr(AddrInst
->getOperand(0), Depth
+ 1)) {
4953 if (!cast
<GEPOperator
>(AddrInst
)->isInBounds())
4954 AddrMode
.InBounds
= false;
4957 AddrMode
.BaseOffs
-= ConstantOffset
;
4959 if (EnableGEPOffsetSplit
&& isa
<GetElementPtrInst
>(AddrInst
) &&
4960 TLI
.shouldConsiderGEPOffsetSplit() && Depth
== 0 &&
4961 ConstantOffset
> 0) {
4962 // Record GEPs with non-zero offsets as candidates for splitting in
4963 // the event that the offset cannot fit into the r+i addressing mode.
4964 // Simple and common case that only one GEP is used in calculating the
4965 // address for the memory access.
4966 Value
*Base
= AddrInst
->getOperand(0);
4967 auto *BaseI
= dyn_cast
<Instruction
>(Base
);
4968 auto *GEP
= cast
<GetElementPtrInst
>(AddrInst
);
4969 if (isa
<Argument
>(Base
) || isa
<GlobalValue
>(Base
) ||
4970 (BaseI
&& !isa
<CastInst
>(BaseI
) &&
4971 !isa
<GetElementPtrInst
>(BaseI
))) {
4972 // Make sure the parent block allows inserting non-PHI instructions
4973 // before the terminator.
4974 BasicBlock
*Parent
= BaseI
? BaseI
->getParent()
4975 : &GEP
->getFunction()->getEntryBlock();
4976 if (!Parent
->getTerminator()->isEHPad())
4977 LargeOffsetGEP
= std::make_pair(GEP
, ConstantOffset
);
4984 // Save the valid addressing mode in case we can't match.
4985 ExtAddrMode BackupAddrMode
= AddrMode
;
4986 unsigned OldSize
= AddrModeInsts
.size();
4988 // See if the scale and offset amount is valid for this target.
4989 AddrMode
.BaseOffs
+= ConstantOffset
;
4990 if (!cast
<GEPOperator
>(AddrInst
)->isInBounds())
4991 AddrMode
.InBounds
= false;
4993 // Match the base operand of the GEP.
4994 if (!matchAddr(AddrInst
->getOperand(0), Depth
+ 1)) {
4995 // If it couldn't be matched, just stuff the value in a register.
4996 if (AddrMode
.HasBaseReg
) {
4997 AddrMode
= BackupAddrMode
;
4998 AddrModeInsts
.resize(OldSize
);
5001 AddrMode
.HasBaseReg
= true;
5002 AddrMode
.BaseReg
= AddrInst
->getOperand(0);
5005 // Match the remaining variable portion of the GEP.
5006 if (!matchScaledValue(AddrInst
->getOperand(VariableOperand
), VariableScale
,
5008 // If it couldn't be matched, try stuffing the base into a register
5009 // instead of matching it, and retrying the match of the scale.
5010 AddrMode
= BackupAddrMode
;
5011 AddrModeInsts
.resize(OldSize
);
5012 if (AddrMode
.HasBaseReg
)
5014 AddrMode
.HasBaseReg
= true;
5015 AddrMode
.BaseReg
= AddrInst
->getOperand(0);
5016 AddrMode
.BaseOffs
+= ConstantOffset
;
5017 if (!matchScaledValue(AddrInst
->getOperand(VariableOperand
),
5018 VariableScale
, Depth
)) {
5019 // If even that didn't work, bail.
5020 AddrMode
= BackupAddrMode
;
5021 AddrModeInsts
.resize(OldSize
);
5028 case Instruction::SExt
:
5029 case Instruction::ZExt
: {
5030 Instruction
*Ext
= dyn_cast
<Instruction
>(AddrInst
);
5034 // Try to move this ext out of the way of the addressing mode.
5035 // Ask for a method for doing so.
5036 TypePromotionHelper::Action TPH
=
5037 TypePromotionHelper::getAction(Ext
, InsertedInsts
, TLI
, PromotedInsts
);
5041 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5042 TPT
.getRestorationPoint();
5043 unsigned CreatedInstsCost
= 0;
5044 unsigned ExtCost
= !TLI
.isExtFree(Ext
);
5045 Value
*PromotedOperand
=
5046 TPH(Ext
, TPT
, PromotedInsts
, CreatedInstsCost
, nullptr, nullptr, TLI
);
5047 // SExt has been moved away.
5048 // Thus either it will be rematched later in the recursive calls or it is
5049 // gone. Anyway, we must not fold it into the addressing mode at this point.
5053 // addr = gep base, idx
5055 // promotedOpnd = ext opnd <- no match here
5056 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
5057 // addr = gep base, op <- match
5061 assert(PromotedOperand
&&
5062 "TypePromotionHelper should have filtered out those cases");
5064 ExtAddrMode BackupAddrMode
= AddrMode
;
5065 unsigned OldSize
= AddrModeInsts
.size();
5067 if (!matchAddr(PromotedOperand
, Depth
) ||
5068 // The total of the new cost is equal to the cost of the created
5070 // The total of the old cost is equal to the cost of the extension plus
5071 // what we have saved in the addressing mode.
5072 !isPromotionProfitable(CreatedInstsCost
,
5073 ExtCost
+ (AddrModeInsts
.size() - OldSize
),
5075 AddrMode
= BackupAddrMode
;
5076 AddrModeInsts
.resize(OldSize
);
5077 LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
5078 TPT
.rollback(LastKnownGood
);
5083 case Instruction::Call
:
5084 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(AddrInst
)) {
5085 if (II
->getIntrinsicID() == Intrinsic::threadlocal_address
) {
5086 GlobalValue
&GV
= cast
<GlobalValue
>(*II
->getArgOperand(0));
5087 if (TLI
.addressingModeSupportsTLS(GV
))
5088 return matchAddr(AddrInst
->getOperand(0), Depth
);
5096 /// If we can, try to add the value of 'Addr' into the current addressing mode.
5097 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
5098 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
5101 bool AddressingModeMatcher::matchAddr(Value
*Addr
, unsigned Depth
) {
5102 // Start a transaction at this point that we will rollback if the matching
5104 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5105 TPT
.getRestorationPoint();
5106 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Addr
)) {
5107 if (CI
->getValue().isSignedIntN(64)) {
5108 // Fold in immediates if legal for the target.
5109 AddrMode
.BaseOffs
+= CI
->getSExtValue();
5110 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
5112 AddrMode
.BaseOffs
-= CI
->getSExtValue();
5114 } else if (GlobalValue
*GV
= dyn_cast
<GlobalValue
>(Addr
)) {
5115 // If this is a global variable, try to fold it into the addressing mode.
5116 if (!AddrMode
.BaseGV
) {
5117 AddrMode
.BaseGV
= GV
;
5118 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
5120 AddrMode
.BaseGV
= nullptr;
5122 } else if (Instruction
*I
= dyn_cast
<Instruction
>(Addr
)) {
5123 ExtAddrMode BackupAddrMode
= AddrMode
;
5124 unsigned OldSize
= AddrModeInsts
.size();
5126 // Check to see if it is possible to fold this operation.
5127 bool MovedAway
= false;
5128 if (matchOperationAddr(I
, I
->getOpcode(), Depth
, &MovedAway
)) {
5129 // This instruction may have been moved away. If so, there is nothing
5133 // Okay, it's possible to fold this. Check to see if it is actually
5134 // *profitable* to do so. We use a simple cost model to avoid increasing
5135 // register pressure too much.
5136 if (I
->hasOneUse() ||
5137 isProfitableToFoldIntoAddressingMode(I
, BackupAddrMode
, AddrMode
)) {
5138 AddrModeInsts
.push_back(I
);
5142 // It isn't profitable to do this, roll back.
5143 AddrMode
= BackupAddrMode
;
5144 AddrModeInsts
.resize(OldSize
);
5145 TPT
.rollback(LastKnownGood
);
5147 } else if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Addr
)) {
5148 if (matchOperationAddr(CE
, CE
->getOpcode(), Depth
))
5150 TPT
.rollback(LastKnownGood
);
5151 } else if (isa
<ConstantPointerNull
>(Addr
)) {
5152 // Null pointer gets folded without affecting the addressing mode.
5156 // Worse case, the target should support [reg] addressing modes. :)
5157 if (!AddrMode
.HasBaseReg
) {
5158 AddrMode
.HasBaseReg
= true;
5159 AddrMode
.BaseReg
= Addr
;
5160 // Still check for legality in case the target supports [imm] but not [i+r].
5161 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
5163 AddrMode
.HasBaseReg
= false;
5164 AddrMode
.BaseReg
= nullptr;
5167 // If the base register is already taken, see if we can do [r+r].
5168 if (AddrMode
.Scale
== 0) {
5170 AddrMode
.ScaledReg
= Addr
;
5171 if (TLI
.isLegalAddressingMode(DL
, AddrMode
, AccessTy
, AddrSpace
))
5174 AddrMode
.ScaledReg
= nullptr;
5177 TPT
.rollback(LastKnownGood
);
5181 /// Check to see if all uses of OpVal by the specified inline asm call are due
5182 /// to memory operands. If so, return true, otherwise return false.
5183 static bool IsOperandAMemoryOperand(CallInst
*CI
, InlineAsm
*IA
, Value
*OpVal
,
5184 const TargetLowering
&TLI
,
5185 const TargetRegisterInfo
&TRI
) {
5186 const Function
*F
= CI
->getFunction();
5187 TargetLowering::AsmOperandInfoVector TargetConstraints
=
5188 TLI
.ParseConstraints(F
->getDataLayout(), &TRI
, *CI
);
5190 for (TargetLowering::AsmOperandInfo
&OpInfo
: TargetConstraints
) {
5191 // Compute the constraint code and ConstraintType to use.
5192 TLI
.ComputeConstraintToUse(OpInfo
, SDValue());
5194 // If this asm operand is our Value*, and if it isn't an indirect memory
5195 // operand, we can't fold it! TODO: Also handle C_Address?
5196 if (OpInfo
.CallOperandVal
== OpVal
&&
5197 (OpInfo
.ConstraintType
!= TargetLowering::C_Memory
||
5198 !OpInfo
.isIndirect
))
5205 /// Recursively walk all the uses of I until we find a memory use.
5206 /// If we find an obviously non-foldable instruction, return true.
5207 /// Add accessed addresses and types to MemoryUses.
5208 static bool FindAllMemoryUses(
5209 Instruction
*I
, SmallVectorImpl
<std::pair
<Use
*, Type
*>> &MemoryUses
,
5210 SmallPtrSetImpl
<Instruction
*> &ConsideredInsts
, const TargetLowering
&TLI
,
5211 const TargetRegisterInfo
&TRI
, bool OptSize
, ProfileSummaryInfo
*PSI
,
5212 BlockFrequencyInfo
*BFI
, unsigned &SeenInsts
) {
5213 // If we already considered this instruction, we're done.
5214 if (!ConsideredInsts
.insert(I
).second
)
5217 // If this is an obviously unfoldable instruction, bail out.
5218 if (!MightBeFoldableInst(I
))
5221 // Loop over all the uses, recursively processing them.
5222 for (Use
&U
: I
->uses()) {
5223 // Conservatively return true if we're seeing a large number or a deep chain
5224 // of users. This avoids excessive compilation times in pathological cases.
5225 if (SeenInsts
++ >= MaxAddressUsersToScan
)
5228 Instruction
*UserI
= cast
<Instruction
>(U
.getUser());
5229 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(UserI
)) {
5230 MemoryUses
.push_back({&U
, LI
->getType()});
5234 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(UserI
)) {
5235 if (U
.getOperandNo() != StoreInst::getPointerOperandIndex())
5236 return true; // Storing addr, not into addr.
5237 MemoryUses
.push_back({&U
, SI
->getValueOperand()->getType()});
5241 if (AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(UserI
)) {
5242 if (U
.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
5243 return true; // Storing addr, not into addr.
5244 MemoryUses
.push_back({&U
, RMW
->getValOperand()->getType()});
5248 if (AtomicCmpXchgInst
*CmpX
= dyn_cast
<AtomicCmpXchgInst
>(UserI
)) {
5249 if (U
.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
5250 return true; // Storing addr, not into addr.
5251 MemoryUses
.push_back({&U
, CmpX
->getCompareOperand()->getType()});
5255 if (CallInst
*CI
= dyn_cast
<CallInst
>(UserI
)) {
5256 if (CI
->hasFnAttr(Attribute::Cold
)) {
5257 // If this is a cold call, we can sink the addressing calculation into
5258 // the cold path. See optimizeCallInst
5260 OptSize
|| llvm::shouldOptimizeForSize(CI
->getParent(), PSI
, BFI
);
5265 InlineAsm
*IA
= dyn_cast
<InlineAsm
>(CI
->getCalledOperand());
5269 // If this is a memory operand, we're cool, otherwise bail out.
5270 if (!IsOperandAMemoryOperand(CI
, IA
, I
, TLI
, TRI
))
5275 if (FindAllMemoryUses(UserI
, MemoryUses
, ConsideredInsts
, TLI
, TRI
, OptSize
,
5276 PSI
, BFI
, SeenInsts
))
5283 static bool FindAllMemoryUses(
5284 Instruction
*I
, SmallVectorImpl
<std::pair
<Use
*, Type
*>> &MemoryUses
,
5285 const TargetLowering
&TLI
, const TargetRegisterInfo
&TRI
, bool OptSize
,
5286 ProfileSummaryInfo
*PSI
, BlockFrequencyInfo
*BFI
) {
5287 unsigned SeenInsts
= 0;
5288 SmallPtrSet
<Instruction
*, 16> ConsideredInsts
;
5289 return FindAllMemoryUses(I
, MemoryUses
, ConsideredInsts
, TLI
, TRI
, OptSize
,
5290 PSI
, BFI
, SeenInsts
);
5294 /// Return true if Val is already known to be live at the use site that we're
5295 /// folding it into. If so, there is no cost to include it in the addressing
5296 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
5297 /// instruction already.
5298 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value
*Val
,
5300 Value
*KnownLive2
) {
5301 // If Val is either of the known-live values, we know it is live!
5302 if (Val
== nullptr || Val
== KnownLive1
|| Val
== KnownLive2
)
5305 // All values other than instructions and arguments (e.g. constants) are live.
5306 if (!isa
<Instruction
>(Val
) && !isa
<Argument
>(Val
))
5309 // If Val is a constant sized alloca in the entry block, it is live, this is
5310 // true because it is just a reference to the stack/frame pointer, which is
5311 // live for the whole function.
5312 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(Val
))
5313 if (AI
->isStaticAlloca())
5316 // Check to see if this value is already used in the memory instruction's
5317 // block. If so, it's already live into the block at the very least, so we
5318 // can reasonably fold it.
5319 return Val
->isUsedInBasicBlock(MemoryInst
->getParent());
5322 /// It is possible for the addressing mode of the machine to fold the specified
5323 /// instruction into a load or store that ultimately uses it.
5324 /// However, the specified instruction has multiple uses.
5325 /// Given this, it may actually increase register pressure to fold it
5326 /// into the load. For example, consider this code:
5330 /// use(Y) -> nonload/store
5334 /// In this case, Y has multiple uses, and can be folded into the load of Z
5335 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
5336 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
5337 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
5338 /// number of computations either.
5340 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
5341 /// X was live across 'load Z' for other reasons, we actually *would* want to
5342 /// fold the addressing mode in the Z case. This would make Y die earlier.
5343 bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode(
5344 Instruction
*I
, ExtAddrMode
&AMBefore
, ExtAddrMode
&AMAfter
) {
5345 if (IgnoreProfitability
)
5348 // AMBefore is the addressing mode before this instruction was folded into it,
5349 // and AMAfter is the addressing mode after the instruction was folded. Get
5350 // the set of registers referenced by AMAfter and subtract out those
5351 // referenced by AMBefore: this is the set of values which folding in this
5352 // address extends the lifetime of.
5354 // Note that there are only two potential values being referenced here,
5355 // BaseReg and ScaleReg (global addresses are always available, as are any
5356 // folded immediates).
5357 Value
*BaseReg
= AMAfter
.BaseReg
, *ScaledReg
= AMAfter
.ScaledReg
;
5359 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5360 // lifetime wasn't extended by adding this instruction.
5361 if (valueAlreadyLiveAtInst(BaseReg
, AMBefore
.BaseReg
, AMBefore
.ScaledReg
))
5363 if (valueAlreadyLiveAtInst(ScaledReg
, AMBefore
.BaseReg
, AMBefore
.ScaledReg
))
5364 ScaledReg
= nullptr;
5366 // If folding this instruction (and it's subexprs) didn't extend any live
5367 // ranges, we're ok with it.
5368 if (!BaseReg
&& !ScaledReg
)
5371 // If all uses of this instruction can have the address mode sunk into them,
5372 // we can remove the addressing mode and effectively trade one live register
5373 // for another (at worst.) In this context, folding an addressing mode into
5374 // the use is just a particularly nice way of sinking it.
5375 SmallVector
<std::pair
<Use
*, Type
*>, 16> MemoryUses
;
5376 if (FindAllMemoryUses(I
, MemoryUses
, TLI
, TRI
, OptSize
, PSI
, BFI
))
5377 return false; // Has a non-memory, non-foldable use!
5379 // Now that we know that all uses of this instruction are part of a chain of
5380 // computation involving only operations that could theoretically be folded
5381 // into a memory use, loop over each of these memory operation uses and see
5382 // if they could *actually* fold the instruction. The assumption is that
5383 // addressing modes are cheap and that duplicating the computation involved
5384 // many times is worthwhile, even on a fastpath. For sinking candidates
5385 // (i.e. cold call sites), this serves as a way to prevent excessive code
5386 // growth since most architectures have some reasonable small and fast way to
5387 // compute an effective address. (i.e LEA on x86)
5388 SmallVector
<Instruction
*, 32> MatchedAddrModeInsts
;
5389 for (const std::pair
<Use
*, Type
*> &Pair
: MemoryUses
) {
5390 Value
*Address
= Pair
.first
->get();
5391 Instruction
*UserI
= cast
<Instruction
>(Pair
.first
->getUser());
5392 Type
*AddressAccessTy
= Pair
.second
;
5393 unsigned AS
= Address
->getType()->getPointerAddressSpace();
5395 // Do a match against the root of this address, ignoring profitability. This
5396 // will tell us if the addressing mode for the memory operation will
5397 // *actually* cover the shared instruction.
5399 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> LargeOffsetGEP(nullptr,
5401 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5402 TPT
.getRestorationPoint();
5403 AddressingModeMatcher
Matcher(MatchedAddrModeInsts
, TLI
, TRI
, LI
, getDTFn
,
5404 AddressAccessTy
, AS
, UserI
, Result
,
5405 InsertedInsts
, PromotedInsts
, TPT
,
5406 LargeOffsetGEP
, OptSize
, PSI
, BFI
);
5407 Matcher
.IgnoreProfitability
= true;
5408 bool Success
= Matcher
.matchAddr(Address
, 0);
5410 assert(Success
&& "Couldn't select *anything*?");
5412 // The match was to check the profitability, the changes made are not
5413 // part of the original matcher. Therefore, they should be dropped
5414 // otherwise the original matcher will not present the right state.
5415 TPT
.rollback(LastKnownGood
);
5417 // If the match didn't cover I, then it won't be shared by it.
5418 if (!is_contained(MatchedAddrModeInsts
, I
))
5421 MatchedAddrModeInsts
.clear();
5427 /// Return true if the specified values are defined in a
5428 /// different basic block than BB.
5429 static bool IsNonLocalValue(Value
*V
, BasicBlock
*BB
) {
5430 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
5431 return I
->getParent() != BB
;
5435 /// Sink addressing mode computation immediate before MemoryInst if doing so
5436 /// can be done without increasing register pressure. The need for the
5437 /// register pressure constraint means this can end up being an all or nothing
5438 /// decision for all uses of the same addressing computation.
5440 /// Load and Store Instructions often have addressing modes that can do
5441 /// significant amounts of computation. As such, instruction selection will try
5442 /// to get the load or store to do as much computation as possible for the
5443 /// program. The problem is that isel can only see within a single block. As
5444 /// such, we sink as much legal addressing mode work into the block as possible.
5446 /// This method is used to optimize both load/store and inline asms with memory
5447 /// operands. It's also used to sink addressing computations feeding into cold
5448 /// call sites into their (cold) basic block.
5450 /// The motivation for handling sinking into cold blocks is that doing so can
5451 /// both enable other address mode sinking (by satisfying the register pressure
5452 /// constraint above), and reduce register pressure globally (by removing the
5453 /// addressing mode computation from the fast path entirely.).
5454 bool CodeGenPrepare::optimizeMemoryInst(Instruction
*MemoryInst
, Value
*Addr
,
5455 Type
*AccessTy
, unsigned AddrSpace
) {
5458 // Try to collapse single-value PHI nodes. This is necessary to undo
5459 // unprofitable PRE transformations.
5460 SmallVector
<Value
*, 8> worklist
;
5461 SmallPtrSet
<Value
*, 16> Visited
;
5462 worklist
.push_back(Addr
);
5464 // Use a worklist to iteratively look through PHI and select nodes, and
5465 // ensure that the addressing mode obtained from the non-PHI/select roots of
5466 // the graph are compatible.
5467 bool PhiOrSelectSeen
= false;
5468 SmallVector
<Instruction
*, 16> AddrModeInsts
;
5469 const SimplifyQuery
SQ(*DL
, TLInfo
);
5470 AddressingModeCombiner
AddrModes(SQ
, Addr
);
5471 TypePromotionTransaction
TPT(RemovedInsts
);
5472 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
5473 TPT
.getRestorationPoint();
5474 while (!worklist
.empty()) {
5475 Value
*V
= worklist
.pop_back_val();
5477 // We allow traversing cyclic Phi nodes.
5478 // In case of success after this loop we ensure that traversing through
5479 // Phi nodes ends up with all cases to compute address of the form
5480 // BaseGV + Base + Scale * Index + Offset
5481 // where Scale and Offset are constans and BaseGV, Base and Index
5482 // are exactly the same Values in all cases.
5483 // It means that BaseGV, Scale and Offset dominate our memory instruction
5484 // and have the same value as they had in address computation represented
5485 // as Phi. So we can safely sink address computation to memory instruction.
5486 if (!Visited
.insert(V
).second
)
5489 // For a PHI node, push all of its incoming values.
5490 if (PHINode
*P
= dyn_cast
<PHINode
>(V
)) {
5491 append_range(worklist
, P
->incoming_values());
5492 PhiOrSelectSeen
= true;
5495 // Similar for select.
5496 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
5497 worklist
.push_back(SI
->getFalseValue());
5498 worklist
.push_back(SI
->getTrueValue());
5499 PhiOrSelectSeen
= true;
5503 // For non-PHIs, determine the addressing mode being computed. Note that
5504 // the result may differ depending on what other uses our candidate
5505 // addressing instructions might have.
5506 AddrModeInsts
.clear();
5507 std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t> LargeOffsetGEP(nullptr,
5509 // Defer the query (and possible computation of) the dom tree to point of
5510 // actual use. It's expected that most address matches don't actually need
5512 auto getDTFn
= [MemoryInst
, this]() -> const DominatorTree
& {
5513 Function
*F
= MemoryInst
->getParent()->getParent();
5514 return this->getDT(*F
);
5516 ExtAddrMode NewAddrMode
= AddressingModeMatcher::Match(
5517 V
, AccessTy
, AddrSpace
, MemoryInst
, AddrModeInsts
, *TLI
, *LI
, getDTFn
,
5518 *TRI
, InsertedInsts
, PromotedInsts
, TPT
, LargeOffsetGEP
, OptSize
, PSI
,
5521 GetElementPtrInst
*GEP
= LargeOffsetGEP
.first
;
5522 if (GEP
&& !NewGEPBases
.count(GEP
)) {
5523 // If splitting the underlying data structure can reduce the offset of a
5524 // GEP, collect the GEP. Skip the GEPs that are the new bases of
5525 // previously split data structures.
5526 LargeOffsetGEPMap
[GEP
->getPointerOperand()].push_back(LargeOffsetGEP
);
5527 LargeOffsetGEPID
.insert(std::make_pair(GEP
, LargeOffsetGEPID
.size()));
5530 NewAddrMode
.OriginalValue
= V
;
5531 if (!AddrModes
.addNewAddrMode(NewAddrMode
))
5535 // Try to combine the AddrModes we've collected. If we couldn't collect any,
5536 // or we have multiple but either couldn't combine them or combining them
5537 // wouldn't do anything useful, bail out now.
5538 if (!AddrModes
.combineAddrModes()) {
5539 TPT
.rollback(LastKnownGood
);
5542 bool Modified
= TPT
.commit();
5544 // Get the combined AddrMode (or the only AddrMode, if we only had one).
5545 ExtAddrMode AddrMode
= AddrModes
.getAddrMode();
5547 // If all the instructions matched are already in this BB, don't do anything.
5548 // If we saw a Phi node then it is not local definitely, and if we saw a
5549 // select then we want to push the address calculation past it even if it's
5550 // already in this BB.
5551 if (!PhiOrSelectSeen
&& none_of(AddrModeInsts
, [&](Value
*V
) {
5552 return IsNonLocalValue(V
, MemoryInst
->getParent());
5554 LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode
5559 // Insert this computation right after this user. Since our caller is
5560 // scanning from the top of the BB to the bottom, reuse of the expr are
5561 // guaranteed to happen later.
5562 IRBuilder
<> Builder(MemoryInst
);
5564 // Now that we determined the addressing expression we want to use and know
5565 // that we have to sink it into this block. Check to see if we have already
5566 // done this for some other load/store instr in this block. If so, reuse
5567 // the computation. Before attempting reuse, check if the address is valid
5568 // as it may have been erased.
5570 WeakTrackingVH SunkAddrVH
= SunkAddrs
[Addr
];
5572 Value
*SunkAddr
= SunkAddrVH
.pointsToAliveValue() ? SunkAddrVH
: nullptr;
5573 Type
*IntPtrTy
= DL
->getIntPtrType(Addr
->getType());
5575 LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5576 << " for " << *MemoryInst
<< "\n");
5577 if (SunkAddr
->getType() != Addr
->getType()) {
5578 if (SunkAddr
->getType()->getPointerAddressSpace() !=
5579 Addr
->getType()->getPointerAddressSpace() &&
5580 !DL
->isNonIntegralPointerType(Addr
->getType())) {
5581 // There are two reasons the address spaces might not match: a no-op
5582 // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5583 // ptrtoint/inttoptr pair to ensure we match the original semantics.
5584 // TODO: allow bitcast between different address space pointers with the
5586 SunkAddr
= Builder
.CreatePtrToInt(SunkAddr
, IntPtrTy
, "sunkaddr");
5588 Builder
.CreateIntToPtr(SunkAddr
, Addr
->getType(), "sunkaddr");
5590 SunkAddr
= Builder
.CreatePointerCast(SunkAddr
, Addr
->getType());
5592 } else if (AddrSinkUsingGEPs
|| (!AddrSinkUsingGEPs
.getNumOccurrences() &&
5593 SubtargetInfo
->addrSinkUsingGEPs())) {
5594 // By default, we use the GEP-based method when AA is used later. This
5595 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5596 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5597 << " for " << *MemoryInst
<< "\n");
5598 Value
*ResultPtr
= nullptr, *ResultIndex
= nullptr;
5600 // First, find the pointer.
5601 if (AddrMode
.BaseReg
&& AddrMode
.BaseReg
->getType()->isPointerTy()) {
5602 ResultPtr
= AddrMode
.BaseReg
;
5603 AddrMode
.BaseReg
= nullptr;
5606 if (AddrMode
.Scale
&& AddrMode
.ScaledReg
->getType()->isPointerTy()) {
5607 // We can't add more than one pointer together, nor can we scale a
5608 // pointer (both of which seem meaningless).
5609 if (ResultPtr
|| AddrMode
.Scale
!= 1)
5612 ResultPtr
= AddrMode
.ScaledReg
;
5616 // It is only safe to sign extend the BaseReg if we know that the math
5617 // required to create it did not overflow before we extend it. Since
5618 // the original IR value was tossed in favor of a constant back when
5619 // the AddrMode was created we need to bail out gracefully if widths
5620 // do not match instead of extending it.
5622 // (See below for code to add the scale.)
5623 if (AddrMode
.Scale
) {
5624 Type
*ScaledRegTy
= AddrMode
.ScaledReg
->getType();
5625 if (cast
<IntegerType
>(IntPtrTy
)->getBitWidth() >
5626 cast
<IntegerType
>(ScaledRegTy
)->getBitWidth())
5630 GlobalValue
*BaseGV
= AddrMode
.BaseGV
;
5631 if (BaseGV
!= nullptr) {
5635 if (BaseGV
->isThreadLocal()) {
5636 ResultPtr
= Builder
.CreateThreadLocalAddress(BaseGV
);
5642 // If the real base value actually came from an inttoptr, then the matcher
5643 // will look through it and provide only the integer value. In that case,
5645 if (!DL
->isNonIntegralPointerType(Addr
->getType())) {
5646 if (!ResultPtr
&& AddrMode
.BaseReg
) {
5647 ResultPtr
= Builder
.CreateIntToPtr(AddrMode
.BaseReg
, Addr
->getType(),
5649 AddrMode
.BaseReg
= nullptr;
5650 } else if (!ResultPtr
&& AddrMode
.Scale
== 1) {
5651 ResultPtr
= Builder
.CreateIntToPtr(AddrMode
.ScaledReg
, Addr
->getType(),
5657 if (!ResultPtr
&& !AddrMode
.BaseReg
&& !AddrMode
.Scale
&&
5658 !AddrMode
.BaseOffs
) {
5659 SunkAddr
= Constant::getNullValue(Addr
->getType());
5660 } else if (!ResultPtr
) {
5664 Builder
.getPtrTy(Addr
->getType()->getPointerAddressSpace());
5666 // Start with the base register. Do this first so that subsequent address
5667 // matching finds it last, which will prevent it from trying to match it
5668 // as the scaled value in case it happens to be a mul. That would be
5669 // problematic if we've sunk a different mul for the scale, because then
5670 // we'd end up sinking both muls.
5671 if (AddrMode
.BaseReg
) {
5672 Value
*V
= AddrMode
.BaseReg
;
5673 if (V
->getType() != IntPtrTy
)
5674 V
= Builder
.CreateIntCast(V
, IntPtrTy
, /*isSigned=*/true, "sunkaddr");
5679 // Add the scale value.
5680 if (AddrMode
.Scale
) {
5681 Value
*V
= AddrMode
.ScaledReg
;
5682 if (V
->getType() == IntPtrTy
) {
5685 assert(cast
<IntegerType
>(IntPtrTy
)->getBitWidth() <
5686 cast
<IntegerType
>(V
->getType())->getBitWidth() &&
5687 "We can't transform if ScaledReg is too narrow");
5688 V
= Builder
.CreateTrunc(V
, IntPtrTy
, "sunkaddr");
5691 if (AddrMode
.Scale
!= 1)
5692 V
= Builder
.CreateMul(V
, ConstantInt::get(IntPtrTy
, AddrMode
.Scale
),
5695 ResultIndex
= Builder
.CreateAdd(ResultIndex
, V
, "sunkaddr");
5700 // Add in the Base Offset if present.
5701 if (AddrMode
.BaseOffs
) {
5702 Value
*V
= ConstantInt::get(IntPtrTy
, AddrMode
.BaseOffs
);
5704 // We need to add this separately from the scale above to help with
5705 // SDAG consecutive load/store merging.
5706 if (ResultPtr
->getType() != I8PtrTy
)
5707 ResultPtr
= Builder
.CreatePointerCast(ResultPtr
, I8PtrTy
);
5708 ResultPtr
= Builder
.CreatePtrAdd(ResultPtr
, ResultIndex
, "sunkaddr",
5716 SunkAddr
= ResultPtr
;
5718 if (ResultPtr
->getType() != I8PtrTy
)
5719 ResultPtr
= Builder
.CreatePointerCast(ResultPtr
, I8PtrTy
);
5720 SunkAddr
= Builder
.CreatePtrAdd(ResultPtr
, ResultIndex
, "sunkaddr",
5724 if (SunkAddr
->getType() != Addr
->getType()) {
5725 if (SunkAddr
->getType()->getPointerAddressSpace() !=
5726 Addr
->getType()->getPointerAddressSpace() &&
5727 !DL
->isNonIntegralPointerType(Addr
->getType())) {
5728 // There are two reasons the address spaces might not match: a no-op
5729 // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5730 // ptrtoint/inttoptr pair to ensure we match the original semantics.
5731 // TODO: allow bitcast between different address space pointers with
5733 SunkAddr
= Builder
.CreatePtrToInt(SunkAddr
, IntPtrTy
, "sunkaddr");
5735 Builder
.CreateIntToPtr(SunkAddr
, Addr
->getType(), "sunkaddr");
5737 SunkAddr
= Builder
.CreatePointerCast(SunkAddr
, Addr
->getType());
5741 // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5742 // non-integral pointers, so in that case bail out now.
5743 Type
*BaseTy
= AddrMode
.BaseReg
? AddrMode
.BaseReg
->getType() : nullptr;
5744 Type
*ScaleTy
= AddrMode
.Scale
? AddrMode
.ScaledReg
->getType() : nullptr;
5745 PointerType
*BasePtrTy
= dyn_cast_or_null
<PointerType
>(BaseTy
);
5746 PointerType
*ScalePtrTy
= dyn_cast_or_null
<PointerType
>(ScaleTy
);
5747 if (DL
->isNonIntegralPointerType(Addr
->getType()) ||
5748 (BasePtrTy
&& DL
->isNonIntegralPointerType(BasePtrTy
)) ||
5749 (ScalePtrTy
&& DL
->isNonIntegralPointerType(ScalePtrTy
)) ||
5751 DL
->isNonIntegralPointerType(AddrMode
.BaseGV
->getType())))
5754 LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5755 << " for " << *MemoryInst
<< "\n");
5756 Type
*IntPtrTy
= DL
->getIntPtrType(Addr
->getType());
5757 Value
*Result
= nullptr;
5759 // Start with the base register. Do this first so that subsequent address
5760 // matching finds it last, which will prevent it from trying to match it
5761 // as the scaled value in case it happens to be a mul. That would be
5762 // problematic if we've sunk a different mul for the scale, because then
5763 // we'd end up sinking both muls.
5764 if (AddrMode
.BaseReg
) {
5765 Value
*V
= AddrMode
.BaseReg
;
5766 if (V
->getType()->isPointerTy())
5767 V
= Builder
.CreatePtrToInt(V
, IntPtrTy
, "sunkaddr");
5768 if (V
->getType() != IntPtrTy
)
5769 V
= Builder
.CreateIntCast(V
, IntPtrTy
, /*isSigned=*/true, "sunkaddr");
5773 // Add the scale value.
5774 if (AddrMode
.Scale
) {
5775 Value
*V
= AddrMode
.ScaledReg
;
5776 if (V
->getType() == IntPtrTy
) {
5778 } else if (V
->getType()->isPointerTy()) {
5779 V
= Builder
.CreatePtrToInt(V
, IntPtrTy
, "sunkaddr");
5780 } else if (cast
<IntegerType
>(IntPtrTy
)->getBitWidth() <
5781 cast
<IntegerType
>(V
->getType())->getBitWidth()) {
5782 V
= Builder
.CreateTrunc(V
, IntPtrTy
, "sunkaddr");
5784 // It is only safe to sign extend the BaseReg if we know that the math
5785 // required to create it did not overflow before we extend it. Since
5786 // the original IR value was tossed in favor of a constant back when
5787 // the AddrMode was created we need to bail out gracefully if widths
5788 // do not match instead of extending it.
5789 Instruction
*I
= dyn_cast_or_null
<Instruction
>(Result
);
5790 if (I
&& (Result
!= AddrMode
.BaseReg
))
5791 I
->eraseFromParent();
5794 if (AddrMode
.Scale
!= 1)
5795 V
= Builder
.CreateMul(V
, ConstantInt::get(IntPtrTy
, AddrMode
.Scale
),
5798 Result
= Builder
.CreateAdd(Result
, V
, "sunkaddr");
5803 // Add in the BaseGV if present.
5804 GlobalValue
*BaseGV
= AddrMode
.BaseGV
;
5805 if (BaseGV
!= nullptr) {
5807 if (BaseGV
->isThreadLocal()) {
5808 BaseGVPtr
= Builder
.CreateThreadLocalAddress(BaseGV
);
5812 Value
*V
= Builder
.CreatePtrToInt(BaseGVPtr
, IntPtrTy
, "sunkaddr");
5814 Result
= Builder
.CreateAdd(Result
, V
, "sunkaddr");
5819 // Add in the Base Offset if present.
5820 if (AddrMode
.BaseOffs
) {
5821 Value
*V
= ConstantInt::get(IntPtrTy
, AddrMode
.BaseOffs
);
5823 Result
= Builder
.CreateAdd(Result
, V
, "sunkaddr");
5829 SunkAddr
= Constant::getNullValue(Addr
->getType());
5831 SunkAddr
= Builder
.CreateIntToPtr(Result
, Addr
->getType(), "sunkaddr");
5834 MemoryInst
->replaceUsesOfWith(Repl
, SunkAddr
);
5835 // Store the newly computed address into the cache. In the case we reused a
5836 // value, this should be idempotent.
5837 SunkAddrs
[Addr
] = WeakTrackingVH(SunkAddr
);
5839 // If we have no uses, recursively delete the value and all dead instructions
5841 if (Repl
->use_empty()) {
5842 resetIteratorIfInvalidatedWhileCalling(CurInstIterator
->getParent(), [&]() {
5843 RecursivelyDeleteTriviallyDeadInstructions(
5844 Repl
, TLInfo
, nullptr,
5845 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
5852 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
5853 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
5854 /// only handle a 2 operand GEP in the same basic block or a splat constant
5855 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector
5858 /// If the existing GEP has a vector base pointer that is splat, we can look
5859 /// through the splat to find the scalar pointer. If we can't find a scalar
5860 /// pointer there's nothing we can do.
5862 /// If we have a GEP with more than 2 indices where the middle indices are all
5863 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
5865 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP
5866 /// followed by a GEP with an all zeroes vector index. This will enable
5867 /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a
5869 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction
*MemoryInst
,
5873 if (const auto *GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
5874 // Don't optimize GEPs that don't have indices.
5875 if (!GEP
->hasIndices())
5878 // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
5879 // FIXME: We should support this by sinking the GEP.
5880 if (MemoryInst
->getParent() != GEP
->getParent())
5883 SmallVector
<Value
*, 2> Ops(GEP
->operands());
5885 bool RewriteGEP
= false;
5887 if (Ops
[0]->getType()->isVectorTy()) {
5888 Ops
[0] = getSplatValue(Ops
[0]);
5894 unsigned FinalIndex
= Ops
.size() - 1;
5896 // Ensure all but the last index is 0.
5897 // FIXME: This isn't strictly required. All that's required is that they are
5898 // all scalars or splats.
5899 for (unsigned i
= 1; i
< FinalIndex
; ++i
) {
5900 auto *C
= dyn_cast
<Constant
>(Ops
[i
]);
5903 if (isa
<VectorType
>(C
->getType()))
5904 C
= C
->getSplatValue();
5905 auto *CI
= dyn_cast_or_null
<ConstantInt
>(C
);
5906 if (!CI
|| !CI
->isZero())
5908 // Scalarize the index if needed.
5912 // Try to scalarize the final index.
5913 if (Ops
[FinalIndex
]->getType()->isVectorTy()) {
5914 if (Value
*V
= getSplatValue(Ops
[FinalIndex
])) {
5915 auto *C
= dyn_cast
<ConstantInt
>(V
);
5916 // Don't scalarize all zeros vector.
5917 if (!C
|| !C
->isZero()) {
5918 Ops
[FinalIndex
] = V
;
5924 // If we made any changes or the we have extra operands, we need to generate
5925 // new instructions.
5926 if (!RewriteGEP
&& Ops
.size() == 2)
5929 auto NumElts
= cast
<VectorType
>(Ptr
->getType())->getElementCount();
5931 IRBuilder
<> Builder(MemoryInst
);
5933 Type
*SourceTy
= GEP
->getSourceElementType();
5934 Type
*ScalarIndexTy
= DL
->getIndexType(Ops
[0]->getType()->getScalarType());
5936 // If the final index isn't a vector, emit a scalar GEP containing all ops
5937 // and a vector GEP with all zeroes final index.
5938 if (!Ops
[FinalIndex
]->getType()->isVectorTy()) {
5939 NewAddr
= Builder
.CreateGEP(SourceTy
, Ops
[0], ArrayRef(Ops
).drop_front());
5940 auto *IndexTy
= VectorType::get(ScalarIndexTy
, NumElts
);
5941 auto *SecondTy
= GetElementPtrInst::getIndexedType(
5942 SourceTy
, ArrayRef(Ops
).drop_front());
5944 Builder
.CreateGEP(SecondTy
, NewAddr
, Constant::getNullValue(IndexTy
));
5946 Value
*Base
= Ops
[0];
5947 Value
*Index
= Ops
[FinalIndex
];
5949 // Create a scalar GEP if there are more than 2 operands.
5950 if (Ops
.size() != 2) {
5951 // Replace the last index with 0.
5953 Constant::getNullValue(Ops
[FinalIndex
]->getType()->getScalarType());
5954 Base
= Builder
.CreateGEP(SourceTy
, Base
, ArrayRef(Ops
).drop_front());
5955 SourceTy
= GetElementPtrInst::getIndexedType(
5956 SourceTy
, ArrayRef(Ops
).drop_front());
5959 // Now create the GEP with scalar pointer and vector index.
5960 NewAddr
= Builder
.CreateGEP(SourceTy
, Base
, Index
);
5962 } else if (!isa
<Constant
>(Ptr
)) {
5963 // Not a GEP, maybe its a splat and we can create a GEP to enable
5964 // SelectionDAGBuilder to use it as a uniform base.
5965 Value
*V
= getSplatValue(Ptr
);
5969 auto NumElts
= cast
<VectorType
>(Ptr
->getType())->getElementCount();
5971 IRBuilder
<> Builder(MemoryInst
);
5973 // Emit a vector GEP with a scalar pointer and all 0s vector index.
5974 Type
*ScalarIndexTy
= DL
->getIndexType(V
->getType()->getScalarType());
5975 auto *IndexTy
= VectorType::get(ScalarIndexTy
, NumElts
);
5977 if (cast
<IntrinsicInst
>(MemoryInst
)->getIntrinsicID() ==
5978 Intrinsic::masked_gather
) {
5979 ScalarTy
= MemoryInst
->getType()->getScalarType();
5981 assert(cast
<IntrinsicInst
>(MemoryInst
)->getIntrinsicID() ==
5982 Intrinsic::masked_scatter
);
5983 ScalarTy
= MemoryInst
->getOperand(0)->getType()->getScalarType();
5985 NewAddr
= Builder
.CreateGEP(ScalarTy
, V
, Constant::getNullValue(IndexTy
));
5987 // Constant, SelectionDAGBuilder knows to check if its a splat.
5991 MemoryInst
->replaceUsesOfWith(Ptr
, NewAddr
);
5993 // If we have no uses, recursively delete the value and all dead instructions
5995 if (Ptr
->use_empty())
5996 RecursivelyDeleteTriviallyDeadInstructions(
5997 Ptr
, TLInfo
, nullptr,
5998 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
6003 /// If there are any memory operands, use OptimizeMemoryInst to sink their
6004 /// address computing into the block when possible / profitable.
6005 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst
*CS
) {
6006 bool MadeChange
= false;
6008 const TargetRegisterInfo
*TRI
=
6009 TM
->getSubtargetImpl(*CS
->getFunction())->getRegisterInfo();
6010 TargetLowering::AsmOperandInfoVector TargetConstraints
=
6011 TLI
->ParseConstraints(*DL
, TRI
, *CS
);
6013 for (TargetLowering::AsmOperandInfo
&OpInfo
: TargetConstraints
) {
6014 // Compute the constraint code and ConstraintType to use.
6015 TLI
->ComputeConstraintToUse(OpInfo
, SDValue());
6017 // TODO: Also handle C_Address?
6018 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
&&
6019 OpInfo
.isIndirect
) {
6020 Value
*OpVal
= CS
->getArgOperand(ArgNo
++);
6021 MadeChange
|= optimizeMemoryInst(CS
, OpVal
, OpVal
->getType(), ~0u);
6022 } else if (OpInfo
.Type
== InlineAsm::isInput
)
6029 /// Check if all the uses of \p Val are equivalent (or free) zero or
6030 /// sign extensions.
6031 static bool hasSameExtUse(Value
*Val
, const TargetLowering
&TLI
) {
6032 assert(!Val
->use_empty() && "Input must have at least one use");
6033 const Instruction
*FirstUser
= cast
<Instruction
>(*Val
->user_begin());
6034 bool IsSExt
= isa
<SExtInst
>(FirstUser
);
6035 Type
*ExtTy
= FirstUser
->getType();
6036 for (const User
*U
: Val
->users()) {
6037 const Instruction
*UI
= cast
<Instruction
>(U
);
6038 if ((IsSExt
&& !isa
<SExtInst
>(UI
)) || (!IsSExt
&& !isa
<ZExtInst
>(UI
)))
6040 Type
*CurTy
= UI
->getType();
6041 // Same input and output types: Same instruction after CSE.
6045 // If IsSExt is true, we are in this situation:
6047 // b = sext ty1 a to ty2
6048 // c = sext ty1 a to ty3
6049 // Assuming ty2 is shorter than ty3, this could be turned into:
6051 // b = sext ty1 a to ty2
6052 // c = sext ty2 b to ty3
6053 // However, the last sext is not free.
6057 // This is a ZExt, maybe this is free to extend from one type to another.
6058 // In that case, we would not account for a different use.
6061 if (ExtTy
->getScalarType()->getIntegerBitWidth() >
6062 CurTy
->getScalarType()->getIntegerBitWidth()) {
6070 if (!TLI
.isZExtFree(NarrowTy
, LargeTy
))
6073 // All uses are the same or can be derived from one another for free.
6077 /// Try to speculatively promote extensions in \p Exts and continue
6078 /// promoting through newly promoted operands recursively as far as doing so is
6079 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
6080 /// When some promotion happened, \p TPT contains the proper state to revert
6083 /// \return true if some promotion happened, false otherwise.
6084 bool CodeGenPrepare::tryToPromoteExts(
6085 TypePromotionTransaction
&TPT
, const SmallVectorImpl
<Instruction
*> &Exts
,
6086 SmallVectorImpl
<Instruction
*> &ProfitablyMovedExts
,
6087 unsigned CreatedInstsCost
) {
6088 bool Promoted
= false;
6090 // Iterate over all the extensions to try to promote them.
6091 for (auto *I
: Exts
) {
6092 // Early check if we directly have ext(load).
6093 if (isa
<LoadInst
>(I
->getOperand(0))) {
6094 ProfitablyMovedExts
.push_back(I
);
6098 // Check whether or not we want to do any promotion. The reason we have
6099 // this check inside the for loop is to catch the case where an extension
6100 // is directly fed by a load because in such case the extension can be moved
6101 // up without any promotion on its operands.
6102 if (!TLI
->enableExtLdPromotion() || DisableExtLdPromotion
)
6105 // Get the action to perform the promotion.
6106 TypePromotionHelper::Action TPH
=
6107 TypePromotionHelper::getAction(I
, InsertedInsts
, *TLI
, PromotedInsts
);
6108 // Check if we can promote.
6110 // Save the current extension as we cannot move up through its operand.
6111 ProfitablyMovedExts
.push_back(I
);
6115 // Save the current state.
6116 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
6117 TPT
.getRestorationPoint();
6118 SmallVector
<Instruction
*, 4> NewExts
;
6119 unsigned NewCreatedInstsCost
= 0;
6120 unsigned ExtCost
= !TLI
->isExtFree(I
);
6122 Value
*PromotedVal
= TPH(I
, TPT
, PromotedInsts
, NewCreatedInstsCost
,
6123 &NewExts
, nullptr, *TLI
);
6124 assert(PromotedVal
&&
6125 "TypePromotionHelper should have filtered out those cases");
6127 // We would be able to merge only one extension in a load.
6128 // Therefore, if we have more than 1 new extension we heuristically
6129 // cut this search path, because it means we degrade the code quality.
6130 // With exactly 2, the transformation is neutral, because we will merge
6131 // one extension but leave one. However, we optimistically keep going,
6132 // because the new extension may be removed too. Also avoid replacing a
6133 // single free extension with multiple extensions, as this increases the
6134 // number of IR instructions while not providing any savings.
6135 long long TotalCreatedInstsCost
= CreatedInstsCost
+ NewCreatedInstsCost
;
6136 // FIXME: It would be possible to propagate a negative value instead of
6137 // conservatively ceiling it to 0.
6138 TotalCreatedInstsCost
=
6139 std::max((long long)0, (TotalCreatedInstsCost
- ExtCost
));
6140 if (!StressExtLdPromotion
&&
6141 (TotalCreatedInstsCost
> 1 ||
6142 !isPromotedInstructionLegal(*TLI
, *DL
, PromotedVal
) ||
6143 (ExtCost
== 0 && NewExts
.size() > 1))) {
6144 // This promotion is not profitable, rollback to the previous state, and
6145 // save the current extension in ProfitablyMovedExts as the latest
6146 // speculative promotion turned out to be unprofitable.
6147 TPT
.rollback(LastKnownGood
);
6148 ProfitablyMovedExts
.push_back(I
);
6151 // Continue promoting NewExts as far as doing so is profitable.
6152 SmallVector
<Instruction
*, 2> NewlyMovedExts
;
6153 (void)tryToPromoteExts(TPT
, NewExts
, NewlyMovedExts
, TotalCreatedInstsCost
);
6154 bool NewPromoted
= false;
6155 for (auto *ExtInst
: NewlyMovedExts
) {
6156 Instruction
*MovedExt
= cast
<Instruction
>(ExtInst
);
6157 Value
*ExtOperand
= MovedExt
->getOperand(0);
6158 // If we have reached to a load, we need this extra profitability check
6159 // as it could potentially be merged into an ext(load).
6160 if (isa
<LoadInst
>(ExtOperand
) &&
6161 !(StressExtLdPromotion
|| NewCreatedInstsCost
<= ExtCost
||
6162 (ExtOperand
->hasOneUse() || hasSameExtUse(ExtOperand
, *TLI
))))
6165 ProfitablyMovedExts
.push_back(MovedExt
);
6169 // If none of speculative promotions for NewExts is profitable, rollback
6170 // and save the current extension (I) as the last profitable extension.
6172 TPT
.rollback(LastKnownGood
);
6173 ProfitablyMovedExts
.push_back(I
);
6176 // The promotion is profitable.
6182 /// Merging redundant sexts when one is dominating the other.
6183 bool CodeGenPrepare::mergeSExts(Function
&F
) {
6184 bool Changed
= false;
6185 for (auto &Entry
: ValToSExtendedUses
) {
6186 SExts
&Insts
= Entry
.second
;
6188 for (Instruction
*Inst
: Insts
) {
6189 if (RemovedInsts
.count(Inst
) || !isa
<SExtInst
>(Inst
) ||
6190 Inst
->getOperand(0) != Entry
.first
)
6192 bool inserted
= false;
6193 for (auto &Pt
: CurPts
) {
6194 if (getDT(F
).dominates(Inst
, Pt
)) {
6195 replaceAllUsesWith(Pt
, Inst
, FreshBBs
, IsHugeFunc
);
6196 RemovedInsts
.insert(Pt
);
6197 Pt
->removeFromParent();
6203 if (!getDT(F
).dominates(Pt
, Inst
))
6204 // Give up if we need to merge in a common dominator as the
6205 // experiments show it is not profitable.
6207 replaceAllUsesWith(Inst
, Pt
, FreshBBs
, IsHugeFunc
);
6208 RemovedInsts
.insert(Inst
);
6209 Inst
->removeFromParent();
6215 CurPts
.push_back(Inst
);
6221 // Splitting large data structures so that the GEPs accessing them can have
6222 // smaller offsets so that they can be sunk to the same blocks as their users.
6223 // For example, a large struct starting from %base is split into two parts
6224 // where the second part starts from %new_base.
6231 // %gep0 = gep %base, off0
6232 // %gep1 = gep %base, off1
6233 // %gep2 = gep %base, off2
6236 // %load1 = load %gep0
6237 // %load2 = load %gep1
6238 // %load3 = load %gep2
6243 // %new_base = gep %base, off0
6246 // %new_gep0 = %new_base
6247 // %new_gep1 = gep %new_base, off1 - off0
6248 // %new_gep2 = gep %new_base, off2 - off0
6251 // %load1 = load i32, i32* %new_gep0
6252 // %load2 = load i32, i32* %new_gep1
6253 // %load3 = load i32, i32* %new_gep2
6255 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
6256 // their offsets are smaller enough to fit into the addressing mode.
6257 bool CodeGenPrepare::splitLargeGEPOffsets() {
6258 bool Changed
= false;
6259 for (auto &Entry
: LargeOffsetGEPMap
) {
6260 Value
*OldBase
= Entry
.first
;
6261 SmallVectorImpl
<std::pair
<AssertingVH
<GetElementPtrInst
>, int64_t>>
6262 &LargeOffsetGEPs
= Entry
.second
;
6263 auto compareGEPOffset
=
6264 [&](const std::pair
<GetElementPtrInst
*, int64_t> &LHS
,
6265 const std::pair
<GetElementPtrInst
*, int64_t> &RHS
) {
6266 if (LHS
.first
== RHS
.first
)
6268 if (LHS
.second
!= RHS
.second
)
6269 return LHS
.second
< RHS
.second
;
6270 return LargeOffsetGEPID
[LHS
.first
] < LargeOffsetGEPID
[RHS
.first
];
6272 // Sorting all the GEPs of the same data structures based on the offsets.
6273 llvm::sort(LargeOffsetGEPs
, compareGEPOffset
);
6274 LargeOffsetGEPs
.erase(llvm::unique(LargeOffsetGEPs
), LargeOffsetGEPs
.end());
6275 // Skip if all the GEPs have the same offsets.
6276 if (LargeOffsetGEPs
.front().second
== LargeOffsetGEPs
.back().second
)
6278 GetElementPtrInst
*BaseGEP
= LargeOffsetGEPs
.begin()->first
;
6279 int64_t BaseOffset
= LargeOffsetGEPs
.begin()->second
;
6280 Value
*NewBaseGEP
= nullptr;
6282 auto createNewBase
= [&](int64_t BaseOffset
, Value
*OldBase
,
6283 GetElementPtrInst
*GEP
) {
6284 LLVMContext
&Ctx
= GEP
->getContext();
6285 Type
*PtrIdxTy
= DL
->getIndexType(GEP
->getType());
6287 PointerType::get(Ctx
, GEP
->getType()->getPointerAddressSpace());
6289 BasicBlock::iterator NewBaseInsertPt
;
6290 BasicBlock
*NewBaseInsertBB
;
6291 if (auto *BaseI
= dyn_cast
<Instruction
>(OldBase
)) {
6292 // If the base of the struct is an instruction, the new base will be
6293 // inserted close to it.
6294 NewBaseInsertBB
= BaseI
->getParent();
6295 if (isa
<PHINode
>(BaseI
))
6296 NewBaseInsertPt
= NewBaseInsertBB
->getFirstInsertionPt();
6297 else if (InvokeInst
*Invoke
= dyn_cast
<InvokeInst
>(BaseI
)) {
6299 SplitEdge(NewBaseInsertBB
, Invoke
->getNormalDest(), DT
.get(), LI
);
6300 NewBaseInsertPt
= NewBaseInsertBB
->getFirstInsertionPt();
6302 NewBaseInsertPt
= std::next(BaseI
->getIterator());
6304 // If the current base is an argument or global value, the new base
6305 // will be inserted to the entry block.
6306 NewBaseInsertBB
= &BaseGEP
->getFunction()->getEntryBlock();
6307 NewBaseInsertPt
= NewBaseInsertBB
->getFirstInsertionPt();
6309 IRBuilder
<> NewBaseBuilder(NewBaseInsertBB
, NewBaseInsertPt
);
6310 // Create a new base.
6311 Value
*BaseIndex
= ConstantInt::get(PtrIdxTy
, BaseOffset
);
6312 NewBaseGEP
= OldBase
;
6313 if (NewBaseGEP
->getType() != I8PtrTy
)
6314 NewBaseGEP
= NewBaseBuilder
.CreatePointerCast(NewBaseGEP
, I8PtrTy
);
6316 NewBaseBuilder
.CreatePtrAdd(NewBaseGEP
, BaseIndex
, "splitgep");
6317 NewGEPBases
.insert(NewBaseGEP
);
6321 // Check whether all the offsets can be encoded with prefered common base.
6322 if (int64_t PreferBase
= TLI
->getPreferredLargeGEPBaseOffset(
6323 LargeOffsetGEPs
.front().second
, LargeOffsetGEPs
.back().second
)) {
6324 BaseOffset
= PreferBase
;
6325 // Create a new base if the offset of the BaseGEP can be decoded with one
6327 createNewBase(BaseOffset
, OldBase
, BaseGEP
);
6330 auto *LargeOffsetGEP
= LargeOffsetGEPs
.begin();
6331 while (LargeOffsetGEP
!= LargeOffsetGEPs
.end()) {
6332 GetElementPtrInst
*GEP
= LargeOffsetGEP
->first
;
6333 int64_t Offset
= LargeOffsetGEP
->second
;
6334 if (Offset
!= BaseOffset
) {
6335 TargetLowering::AddrMode AddrMode
;
6336 AddrMode
.HasBaseReg
= true;
6337 AddrMode
.BaseOffs
= Offset
- BaseOffset
;
6338 // The result type of the GEP might not be the type of the memory
6340 if (!TLI
->isLegalAddressingMode(*DL
, AddrMode
,
6341 GEP
->getResultElementType(),
6342 GEP
->getAddressSpace())) {
6343 // We need to create a new base if the offset to the current base is
6344 // too large to fit into the addressing mode. So, a very large struct
6345 // may be split into several parts.
6347 BaseOffset
= Offset
;
6348 NewBaseGEP
= nullptr;
6352 // Generate a new GEP to replace the current one.
6353 Type
*PtrIdxTy
= DL
->getIndexType(GEP
->getType());
6356 // Create a new base if we don't have one yet. Find the insertion
6357 // pointer for the new base first.
6358 createNewBase(BaseOffset
, OldBase
, GEP
);
6361 IRBuilder
<> Builder(GEP
);
6362 Value
*NewGEP
= NewBaseGEP
;
6363 if (Offset
!= BaseOffset
) {
6364 // Calculate the new offset for the new GEP.
6365 Value
*Index
= ConstantInt::get(PtrIdxTy
, Offset
- BaseOffset
);
6366 NewGEP
= Builder
.CreatePtrAdd(NewBaseGEP
, Index
);
6368 replaceAllUsesWith(GEP
, NewGEP
, FreshBBs
, IsHugeFunc
);
6369 LargeOffsetGEPID
.erase(GEP
);
6370 LargeOffsetGEP
= LargeOffsetGEPs
.erase(LargeOffsetGEP
);
6371 GEP
->eraseFromParent();
6378 bool CodeGenPrepare::optimizePhiType(
6379 PHINode
*I
, SmallPtrSetImpl
<PHINode
*> &Visited
,
6380 SmallPtrSetImpl
<Instruction
*> &DeletedInstrs
) {
6381 // We are looking for a collection on interconnected phi nodes that together
6382 // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts
6383 // are of the same type. Convert the whole set of nodes to the type of the
6385 Type
*PhiTy
= I
->getType();
6386 Type
*ConvertTy
= nullptr;
6387 if (Visited
.count(I
) ||
6388 (!I
->getType()->isIntegerTy() && !I
->getType()->isFloatingPointTy()))
6391 SmallVector
<Instruction
*, 4> Worklist
;
6392 Worklist
.push_back(cast
<Instruction
>(I
));
6393 SmallPtrSet
<PHINode
*, 4> PhiNodes
;
6394 SmallPtrSet
<ConstantData
*, 4> Constants
;
6397 SmallPtrSet
<Instruction
*, 4> Defs
;
6398 SmallPtrSet
<Instruction
*, 4> Uses
;
6399 // This works by adding extra bitcasts between load/stores and removing
6400 // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi))
6401 // we can get in the situation where we remove a bitcast in one iteration
6402 // just to add it again in the next. We need to ensure that at least one
6403 // bitcast we remove are anchored to something that will not change back.
6404 bool AnyAnchored
= false;
6406 while (!Worklist
.empty()) {
6407 Instruction
*II
= Worklist
.pop_back_val();
6409 if (auto *Phi
= dyn_cast
<PHINode
>(II
)) {
6410 // Handle Defs, which might also be PHI's
6411 for (Value
*V
: Phi
->incoming_values()) {
6412 if (auto *OpPhi
= dyn_cast
<PHINode
>(V
)) {
6413 if (!PhiNodes
.count(OpPhi
)) {
6414 if (!Visited
.insert(OpPhi
).second
)
6416 PhiNodes
.insert(OpPhi
);
6417 Worklist
.push_back(OpPhi
);
6419 } else if (auto *OpLoad
= dyn_cast
<LoadInst
>(V
)) {
6420 if (!OpLoad
->isSimple())
6422 if (Defs
.insert(OpLoad
).second
)
6423 Worklist
.push_back(OpLoad
);
6424 } else if (auto *OpEx
= dyn_cast
<ExtractElementInst
>(V
)) {
6425 if (Defs
.insert(OpEx
).second
)
6426 Worklist
.push_back(OpEx
);
6427 } else if (auto *OpBC
= dyn_cast
<BitCastInst
>(V
)) {
6429 ConvertTy
= OpBC
->getOperand(0)->getType();
6430 if (OpBC
->getOperand(0)->getType() != ConvertTy
)
6432 if (Defs
.insert(OpBC
).second
) {
6433 Worklist
.push_back(OpBC
);
6434 AnyAnchored
|= !isa
<LoadInst
>(OpBC
->getOperand(0)) &&
6435 !isa
<ExtractElementInst
>(OpBC
->getOperand(0));
6437 } else if (auto *OpC
= dyn_cast
<ConstantData
>(V
))
6438 Constants
.insert(OpC
);
6444 // Handle uses which might also be phi's
6445 for (User
*V
: II
->users()) {
6446 if (auto *OpPhi
= dyn_cast
<PHINode
>(V
)) {
6447 if (!PhiNodes
.count(OpPhi
)) {
6448 if (Visited
.count(OpPhi
))
6450 PhiNodes
.insert(OpPhi
);
6451 Visited
.insert(OpPhi
);
6452 Worklist
.push_back(OpPhi
);
6454 } else if (auto *OpStore
= dyn_cast
<StoreInst
>(V
)) {
6455 if (!OpStore
->isSimple() || OpStore
->getOperand(0) != II
)
6457 Uses
.insert(OpStore
);
6458 } else if (auto *OpBC
= dyn_cast
<BitCastInst
>(V
)) {
6460 ConvertTy
= OpBC
->getType();
6461 if (OpBC
->getType() != ConvertTy
)
6465 any_of(OpBC
->users(), [](User
*U
) { return !isa
<StoreInst
>(U
); });
6472 if (!ConvertTy
|| !AnyAnchored
||
6473 !TLI
->shouldConvertPhiType(PhiTy
, ConvertTy
))
6476 LLVM_DEBUG(dbgs() << "Converting " << *I
<< "\n and connected nodes to "
6477 << *ConvertTy
<< "\n");
6479 // Create all the new phi nodes of the new type, and bitcast any loads to the
6481 ValueToValueMap ValMap
;
6482 for (ConstantData
*C
: Constants
)
6483 ValMap
[C
] = ConstantExpr::getBitCast(C
, ConvertTy
);
6484 for (Instruction
*D
: Defs
) {
6485 if (isa
<BitCastInst
>(D
)) {
6486 ValMap
[D
] = D
->getOperand(0);
6487 DeletedInstrs
.insert(D
);
6489 BasicBlock::iterator insertPt
= std::next(D
->getIterator());
6490 ValMap
[D
] = new BitCastInst(D
, ConvertTy
, D
->getName() + ".bc", insertPt
);
6493 for (PHINode
*Phi
: PhiNodes
)
6494 ValMap
[Phi
] = PHINode::Create(ConvertTy
, Phi
->getNumIncomingValues(),
6495 Phi
->getName() + ".tc", Phi
->getIterator());
6496 // Pipe together all the PhiNodes.
6497 for (PHINode
*Phi
: PhiNodes
) {
6498 PHINode
*NewPhi
= cast
<PHINode
>(ValMap
[Phi
]);
6499 for (int i
= 0, e
= Phi
->getNumIncomingValues(); i
< e
; i
++)
6500 NewPhi
->addIncoming(ValMap
[Phi
->getIncomingValue(i
)],
6501 Phi
->getIncomingBlock(i
));
6502 Visited
.insert(NewPhi
);
6504 // And finally pipe up the stores and bitcasts
6505 for (Instruction
*U
: Uses
) {
6506 if (isa
<BitCastInst
>(U
)) {
6507 DeletedInstrs
.insert(U
);
6508 replaceAllUsesWith(U
, ValMap
[U
->getOperand(0)], FreshBBs
, IsHugeFunc
);
6510 U
->setOperand(0, new BitCastInst(ValMap
[U
->getOperand(0)], PhiTy
, "bc",
6515 // Save the removed phis to be deleted later.
6516 for (PHINode
*Phi
: PhiNodes
)
6517 DeletedInstrs
.insert(Phi
);
6521 bool CodeGenPrepare::optimizePhiTypes(Function
&F
) {
6522 if (!OptimizePhiTypes
)
6525 bool Changed
= false;
6526 SmallPtrSet
<PHINode
*, 4> Visited
;
6527 SmallPtrSet
<Instruction
*, 4> DeletedInstrs
;
6529 // Attempt to optimize all the phis in the functions to the correct type.
6531 for (auto &Phi
: BB
.phis())
6532 Changed
|= optimizePhiType(&Phi
, Visited
, DeletedInstrs
);
6534 // Remove any old phi's that have been converted.
6535 for (auto *I
: DeletedInstrs
) {
6536 replaceAllUsesWith(I
, PoisonValue::get(I
->getType()), FreshBBs
, IsHugeFunc
);
6537 I
->eraseFromParent();
6543 /// Return true, if an ext(load) can be formed from an extension in
6545 bool CodeGenPrepare::canFormExtLd(
6546 const SmallVectorImpl
<Instruction
*> &MovedExts
, LoadInst
*&LI
,
6547 Instruction
*&Inst
, bool HasPromoted
) {
6548 for (auto *MovedExtInst
: MovedExts
) {
6549 if (isa
<LoadInst
>(MovedExtInst
->getOperand(0))) {
6550 LI
= cast
<LoadInst
>(MovedExtInst
->getOperand(0));
6551 Inst
= MovedExtInst
;
6558 // If they're already in the same block, there's nothing to do.
6559 // Make the cheap checks first if we did not promote.
6560 // If we promoted, we need to check if it is indeed profitable.
6561 if (!HasPromoted
&& LI
->getParent() == Inst
->getParent())
6564 return TLI
->isExtLoad(LI
, Inst
, *DL
);
6567 /// Move a zext or sext fed by a load into the same basic block as the load,
6568 /// unless conditions are unfavorable. This allows SelectionDAG to fold the
6569 /// extend into the load.
6573 /// %ld = load i32* %addr
6574 /// %add = add nuw i32 %ld, 4
6575 /// %zext = zext i32 %add to i64
6579 /// %ld = load i32* %addr
6580 /// %zext = zext i32 %ld to i64
6581 /// %add = add nuw i64 %zext, 4
6583 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
6584 /// allow us to match zext(load i32*) to i64.
6586 /// Also, try to promote the computations used to obtain a sign extended
6587 /// value used into memory accesses.
6590 /// a = add nsw i32 b, 3
6591 /// d = sext i32 a to i64
6592 /// e = getelementptr ..., i64 d
6596 /// f = sext i32 b to i64
6597 /// a = add nsw i64 f, 3
6598 /// e = getelementptr ..., i64 a
6601 /// \p Inst[in/out] the extension may be modified during the process if some
6602 /// promotions apply.
6603 bool CodeGenPrepare::optimizeExt(Instruction
*&Inst
) {
6604 bool AllowPromotionWithoutCommonHeader
= false;
6605 /// See if it is an interesting sext operations for the address type
6606 /// promotion before trying to promote it, e.g., the ones with the right
6607 /// type and used in memory accesses.
6608 bool ATPConsiderable
= TTI
->shouldConsiderAddressTypePromotion(
6609 *Inst
, AllowPromotionWithoutCommonHeader
);
6610 TypePromotionTransaction
TPT(RemovedInsts
);
6611 TypePromotionTransaction::ConstRestorationPt LastKnownGood
=
6612 TPT
.getRestorationPoint();
6613 SmallVector
<Instruction
*, 1> Exts
;
6614 SmallVector
<Instruction
*, 2> SpeculativelyMovedExts
;
6615 Exts
.push_back(Inst
);
6617 bool HasPromoted
= tryToPromoteExts(TPT
, Exts
, SpeculativelyMovedExts
);
6619 // Look for a load being extended.
6620 LoadInst
*LI
= nullptr;
6621 Instruction
*ExtFedByLoad
;
6623 // Try to promote a chain of computation if it allows to form an extended
6625 if (canFormExtLd(SpeculativelyMovedExts
, LI
, ExtFedByLoad
, HasPromoted
)) {
6626 assert(LI
&& ExtFedByLoad
&& "Expect a valid load and extension");
6628 // Move the extend into the same block as the load.
6629 ExtFedByLoad
->moveAfter(LI
);
6631 Inst
= ExtFedByLoad
;
6635 // Continue promoting SExts if known as considerable depending on targets.
6636 if (ATPConsiderable
&&
6637 performAddressTypePromotion(Inst
, AllowPromotionWithoutCommonHeader
,
6638 HasPromoted
, TPT
, SpeculativelyMovedExts
))
6641 TPT
.rollback(LastKnownGood
);
6645 // Perform address type promotion if doing so is profitable.
6646 // If AllowPromotionWithoutCommonHeader == false, we should find other sext
6647 // instructions that sign extended the same initial value. However, if
6648 // AllowPromotionWithoutCommonHeader == true, we expect promoting the
6649 // extension is just profitable.
6650 bool CodeGenPrepare::performAddressTypePromotion(
6651 Instruction
*&Inst
, bool AllowPromotionWithoutCommonHeader
,
6652 bool HasPromoted
, TypePromotionTransaction
&TPT
,
6653 SmallVectorImpl
<Instruction
*> &SpeculativelyMovedExts
) {
6654 bool Promoted
= false;
6655 SmallPtrSet
<Instruction
*, 1> UnhandledExts
;
6656 bool AllSeenFirst
= true;
6657 for (auto *I
: SpeculativelyMovedExts
) {
6658 Value
*HeadOfChain
= I
->getOperand(0);
6659 DenseMap
<Value
*, Instruction
*>::iterator AlreadySeen
=
6660 SeenChainsForSExt
.find(HeadOfChain
);
6661 // If there is an unhandled SExt which has the same header, try to promote
6663 if (AlreadySeen
!= SeenChainsForSExt
.end()) {
6664 if (AlreadySeen
->second
!= nullptr)
6665 UnhandledExts
.insert(AlreadySeen
->second
);
6666 AllSeenFirst
= false;
6670 if (!AllSeenFirst
|| (AllowPromotionWithoutCommonHeader
&&
6671 SpeculativelyMovedExts
.size() == 1)) {
6675 for (auto *I
: SpeculativelyMovedExts
) {
6676 Value
*HeadOfChain
= I
->getOperand(0);
6677 SeenChainsForSExt
[HeadOfChain
] = nullptr;
6678 ValToSExtendedUses
[HeadOfChain
].push_back(I
);
6680 // Update Inst as promotion happen.
6681 Inst
= SpeculativelyMovedExts
.pop_back_val();
6683 // This is the first chain visited from the header, keep the current chain
6684 // as unhandled. Defer to promote this until we encounter another SExt
6685 // chain derived from the same header.
6686 for (auto *I
: SpeculativelyMovedExts
) {
6687 Value
*HeadOfChain
= I
->getOperand(0);
6688 SeenChainsForSExt
[HeadOfChain
] = Inst
;
6693 if (!AllSeenFirst
&& !UnhandledExts
.empty())
6694 for (auto *VisitedSExt
: UnhandledExts
) {
6695 if (RemovedInsts
.count(VisitedSExt
))
6697 TypePromotionTransaction
TPT(RemovedInsts
);
6698 SmallVector
<Instruction
*, 1> Exts
;
6699 SmallVector
<Instruction
*, 2> Chains
;
6700 Exts
.push_back(VisitedSExt
);
6701 bool HasPromoted
= tryToPromoteExts(TPT
, Exts
, Chains
);
6705 for (auto *I
: Chains
) {
6706 Value
*HeadOfChain
= I
->getOperand(0);
6707 // Mark this as handled.
6708 SeenChainsForSExt
[HeadOfChain
] = nullptr;
6709 ValToSExtendedUses
[HeadOfChain
].push_back(I
);
6715 bool CodeGenPrepare::optimizeExtUses(Instruction
*I
) {
6716 BasicBlock
*DefBB
= I
->getParent();
6718 // If the result of a {s|z}ext and its source are both live out, rewrite all
6719 // other uses of the source with result of extension.
6720 Value
*Src
= I
->getOperand(0);
6721 if (Src
->hasOneUse())
6724 // Only do this xform if truncating is free.
6725 if (!TLI
->isTruncateFree(I
->getType(), Src
->getType()))
6728 // Only safe to perform the optimization if the source is also defined in
6730 if (!isa
<Instruction
>(Src
) || DefBB
!= cast
<Instruction
>(Src
)->getParent())
6733 bool DefIsLiveOut
= false;
6734 for (User
*U
: I
->users()) {
6735 Instruction
*UI
= cast
<Instruction
>(U
);
6737 // Figure out which BB this ext is used in.
6738 BasicBlock
*UserBB
= UI
->getParent();
6739 if (UserBB
== DefBB
)
6741 DefIsLiveOut
= true;
6747 // Make sure none of the uses are PHI nodes.
6748 for (User
*U
: Src
->users()) {
6749 Instruction
*UI
= cast
<Instruction
>(U
);
6750 BasicBlock
*UserBB
= UI
->getParent();
6751 if (UserBB
== DefBB
)
6753 // Be conservative. We don't want this xform to end up introducing
6754 // reloads just before load / store instructions.
6755 if (isa
<PHINode
>(UI
) || isa
<LoadInst
>(UI
) || isa
<StoreInst
>(UI
))
6759 // InsertedTruncs - Only insert one trunc in each block once.
6760 DenseMap
<BasicBlock
*, Instruction
*> InsertedTruncs
;
6762 bool MadeChange
= false;
6763 for (Use
&U
: Src
->uses()) {
6764 Instruction
*User
= cast
<Instruction
>(U
.getUser());
6766 // Figure out which BB this ext is used in.
6767 BasicBlock
*UserBB
= User
->getParent();
6768 if (UserBB
== DefBB
)
6771 // Both src and def are live in this block. Rewrite the use.
6772 Instruction
*&InsertedTrunc
= InsertedTruncs
[UserBB
];
6774 if (!InsertedTrunc
) {
6775 BasicBlock::iterator InsertPt
= UserBB
->getFirstInsertionPt();
6776 assert(InsertPt
!= UserBB
->end());
6777 InsertedTrunc
= new TruncInst(I
, Src
->getType(), "");
6778 InsertedTrunc
->insertBefore(*UserBB
, InsertPt
);
6779 InsertedInsts
.insert(InsertedTrunc
);
6782 // Replace a use of the {s|z}ext source with a use of the result.
6791 // Find loads whose uses only use some of the loaded value's bits. Add an "and"
6792 // just after the load if the target can fold this into one extload instruction,
6793 // with the hope of eliminating some of the other later "and" instructions using
6794 // the loaded value. "and"s that are made trivially redundant by the insertion
6795 // of the new "and" are removed by this function, while others (e.g. those whose
6796 // path from the load goes through a phi) are left for isel to potentially
6829 // becomes (after a call to optimizeLoadExt for each load):
6833 // x1' = and x1, 0xff
6837 // x2' = and x2, 0xff
6842 bool CodeGenPrepare::optimizeLoadExt(LoadInst
*Load
) {
6843 if (!Load
->isSimple() || !Load
->getType()->isIntOrPtrTy())
6846 // Skip loads we've already transformed.
6847 if (Load
->hasOneUse() &&
6848 InsertedInsts
.count(cast
<Instruction
>(*Load
->user_begin())))
6851 // Look at all uses of Load, looking through phis, to determine how many bits
6852 // of the loaded value are needed.
6853 SmallVector
<Instruction
*, 8> WorkList
;
6854 SmallPtrSet
<Instruction
*, 16> Visited
;
6855 SmallVector
<Instruction
*, 8> AndsToMaybeRemove
;
6856 for (auto *U
: Load
->users())
6857 WorkList
.push_back(cast
<Instruction
>(U
));
6859 EVT LoadResultVT
= TLI
->getValueType(*DL
, Load
->getType());
6860 unsigned BitWidth
= LoadResultVT
.getSizeInBits();
6861 // If the BitWidth is 0, do not try to optimize the type
6865 APInt
DemandBits(BitWidth
, 0);
6866 APInt
WidestAndBits(BitWidth
, 0);
6868 while (!WorkList
.empty()) {
6869 Instruction
*I
= WorkList
.pop_back_val();
6871 // Break use-def graph loops.
6872 if (!Visited
.insert(I
).second
)
6875 // For a PHI node, push all of its users.
6876 if (auto *Phi
= dyn_cast
<PHINode
>(I
)) {
6877 for (auto *U
: Phi
->users())
6878 WorkList
.push_back(cast
<Instruction
>(U
));
6882 switch (I
->getOpcode()) {
6883 case Instruction::And
: {
6884 auto *AndC
= dyn_cast
<ConstantInt
>(I
->getOperand(1));
6887 APInt AndBits
= AndC
->getValue();
6888 DemandBits
|= AndBits
;
6889 // Keep track of the widest and mask we see.
6890 if (AndBits
.ugt(WidestAndBits
))
6891 WidestAndBits
= AndBits
;
6892 if (AndBits
== WidestAndBits
&& I
->getOperand(0) == Load
)
6893 AndsToMaybeRemove
.push_back(I
);
6897 case Instruction::Shl
: {
6898 auto *ShlC
= dyn_cast
<ConstantInt
>(I
->getOperand(1));
6901 uint64_t ShiftAmt
= ShlC
->getLimitedValue(BitWidth
- 1);
6902 DemandBits
.setLowBits(BitWidth
- ShiftAmt
);
6906 case Instruction::Trunc
: {
6907 EVT TruncVT
= TLI
->getValueType(*DL
, I
->getType());
6908 unsigned TruncBitWidth
= TruncVT
.getSizeInBits();
6909 DemandBits
.setLowBits(TruncBitWidth
);
6918 uint32_t ActiveBits
= DemandBits
.getActiveBits();
6919 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
6920 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example,
6921 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
6922 // (and (load x) 1) is not matched as a single instruction, rather as a LDR
6923 // followed by an AND.
6924 // TODO: Look into removing this restriction by fixing backends to either
6925 // return false for isLoadExtLegal for i1 or have them select this pattern to
6926 // a single instruction.
6928 // Also avoid hoisting if we didn't see any ands with the exact DemandBits
6929 // mask, since these are the only ands that will be removed by isel.
6930 if (ActiveBits
<= 1 || !DemandBits
.isMask(ActiveBits
) ||
6931 WidestAndBits
!= DemandBits
)
6934 LLVMContext
&Ctx
= Load
->getType()->getContext();
6935 Type
*TruncTy
= Type::getIntNTy(Ctx
, ActiveBits
);
6936 EVT TruncVT
= TLI
->getValueType(*DL
, TruncTy
);
6938 // Reject cases that won't be matched as extloads.
6939 if (!LoadResultVT
.bitsGT(TruncVT
) || !TruncVT
.isRound() ||
6940 !TLI
->isLoadExtLegal(ISD::ZEXTLOAD
, LoadResultVT
, TruncVT
))
6943 IRBuilder
<> Builder(Load
->getNextNonDebugInstruction());
6944 auto *NewAnd
= cast
<Instruction
>(
6945 Builder
.CreateAnd(Load
, ConstantInt::get(Ctx
, DemandBits
)));
6946 // Mark this instruction as "inserted by CGP", so that other
6947 // optimizations don't touch it.
6948 InsertedInsts
.insert(NewAnd
);
6950 // Replace all uses of load with new and (except for the use of load in the
6952 replaceAllUsesWith(Load
, NewAnd
, FreshBBs
, IsHugeFunc
);
6953 NewAnd
->setOperand(0, Load
);
6955 // Remove any and instructions that are now redundant.
6956 for (auto *And
: AndsToMaybeRemove
)
6957 // Check that the and mask is the same as the one we decided to put on the
6959 if (cast
<ConstantInt
>(And
->getOperand(1))->getValue() == DemandBits
) {
6960 replaceAllUsesWith(And
, NewAnd
, FreshBBs
, IsHugeFunc
);
6961 if (&*CurInstIterator
== And
)
6962 CurInstIterator
= std::next(And
->getIterator());
6963 And
->eraseFromParent();
6971 /// Check if V (an operand of a select instruction) is an expensive instruction
6972 /// that is only used once.
6973 static bool sinkSelectOperand(const TargetTransformInfo
*TTI
, Value
*V
) {
6974 auto *I
= dyn_cast
<Instruction
>(V
);
6975 // If it's safe to speculatively execute, then it should not have side
6976 // effects; therefore, it's safe to sink and possibly *not* execute.
6977 return I
&& I
->hasOneUse() && isSafeToSpeculativelyExecute(I
) &&
6978 TTI
->isExpensiveToSpeculativelyExecute(I
);
6981 /// Returns true if a SelectInst should be turned into an explicit branch.
6982 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo
*TTI
,
6983 const TargetLowering
*TLI
,
6985 // If even a predictable select is cheap, then a branch can't be cheaper.
6986 if (!TLI
->isPredictableSelectExpensive())
6989 // FIXME: This should use the same heuristics as IfConversion to determine
6990 // whether a select is better represented as a branch.
6992 // If metadata tells us that the select condition is obviously predictable,
6993 // then we want to replace the select with a branch.
6994 uint64_t TrueWeight
, FalseWeight
;
6995 if (extractBranchWeights(*SI
, TrueWeight
, FalseWeight
)) {
6996 uint64_t Max
= std::max(TrueWeight
, FalseWeight
);
6997 uint64_t Sum
= TrueWeight
+ FalseWeight
;
6999 auto Probability
= BranchProbability::getBranchProbability(Max
, Sum
);
7000 if (Probability
> TTI
->getPredictableBranchThreshold())
7005 CmpInst
*Cmp
= dyn_cast
<CmpInst
>(SI
->getCondition());
7007 // If a branch is predictable, an out-of-order CPU can avoid blocking on its
7008 // comparison condition. If the compare has more than one use, there's
7009 // probably another cmov or setcc around, so it's not worth emitting a branch.
7010 if (!Cmp
|| !Cmp
->hasOneUse())
7013 // If either operand of the select is expensive and only needed on one side
7014 // of the select, we should form a branch.
7015 if (sinkSelectOperand(TTI
, SI
->getTrueValue()) ||
7016 sinkSelectOperand(TTI
, SI
->getFalseValue()))
7022 /// If \p isTrue is true, return the true value of \p SI, otherwise return
7023 /// false value of \p SI. If the true/false value of \p SI is defined by any
7024 /// select instructions in \p Selects, look through the defining select
7025 /// instruction until the true/false value is not defined in \p Selects.
7027 getTrueOrFalseValue(SelectInst
*SI
, bool isTrue
,
7028 const SmallPtrSet
<const Instruction
*, 2> &Selects
) {
7031 for (SelectInst
*DefSI
= SI
; DefSI
!= nullptr && Selects
.count(DefSI
);
7032 DefSI
= dyn_cast
<SelectInst
>(V
)) {
7033 assert(DefSI
->getCondition() == SI
->getCondition() &&
7034 "The condition of DefSI does not match with SI");
7035 V
= (isTrue
? DefSI
->getTrueValue() : DefSI
->getFalseValue());
7038 assert(V
&& "Failed to get select true/false value");
7042 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator
*Shift
) {
7043 assert(Shift
->isShift() && "Expected a shift");
7045 // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
7046 // general vector shifts, and (3) the shift amount is a select-of-splatted
7047 // values, hoist the shifts before the select:
7048 // shift Op0, (select Cond, TVal, FVal) -->
7049 // select Cond, (shift Op0, TVal), (shift Op0, FVal)
7051 // This is inverting a generic IR transform when we know that the cost of a
7052 // general vector shift is more than the cost of 2 shift-by-scalars.
7053 // We can't do this effectively in SDAG because we may not be able to
7054 // determine if the select operands are splats from within a basic block.
7055 Type
*Ty
= Shift
->getType();
7056 if (!Ty
->isVectorTy() || !TLI
->isVectorShiftByScalarCheap(Ty
))
7058 Value
*Cond
, *TVal
, *FVal
;
7059 if (!match(Shift
->getOperand(1),
7060 m_OneUse(m_Select(m_Value(Cond
), m_Value(TVal
), m_Value(FVal
)))))
7062 if (!isSplatValue(TVal
) || !isSplatValue(FVal
))
7065 IRBuilder
<> Builder(Shift
);
7066 BinaryOperator::BinaryOps Opcode
= Shift
->getOpcode();
7067 Value
*NewTVal
= Builder
.CreateBinOp(Opcode
, Shift
->getOperand(0), TVal
);
7068 Value
*NewFVal
= Builder
.CreateBinOp(Opcode
, Shift
->getOperand(0), FVal
);
7069 Value
*NewSel
= Builder
.CreateSelect(Cond
, NewTVal
, NewFVal
);
7070 replaceAllUsesWith(Shift
, NewSel
, FreshBBs
, IsHugeFunc
);
7071 Shift
->eraseFromParent();
7075 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst
*Fsh
) {
7076 Intrinsic::ID Opcode
= Fsh
->getIntrinsicID();
7077 assert((Opcode
== Intrinsic::fshl
|| Opcode
== Intrinsic::fshr
) &&
7078 "Expected a funnel shift");
7080 // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
7081 // than general vector shifts, and (3) the shift amount is select-of-splatted
7082 // values, hoist the funnel shifts before the select:
7083 // fsh Op0, Op1, (select Cond, TVal, FVal) -->
7084 // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
7086 // This is inverting a generic IR transform when we know that the cost of a
7087 // general vector shift is more than the cost of 2 shift-by-scalars.
7088 // We can't do this effectively in SDAG because we may not be able to
7089 // determine if the select operands are splats from within a basic block.
7090 Type
*Ty
= Fsh
->getType();
7091 if (!Ty
->isVectorTy() || !TLI
->isVectorShiftByScalarCheap(Ty
))
7093 Value
*Cond
, *TVal
, *FVal
;
7094 if (!match(Fsh
->getOperand(2),
7095 m_OneUse(m_Select(m_Value(Cond
), m_Value(TVal
), m_Value(FVal
)))))
7097 if (!isSplatValue(TVal
) || !isSplatValue(FVal
))
7100 IRBuilder
<> Builder(Fsh
);
7101 Value
*X
= Fsh
->getOperand(0), *Y
= Fsh
->getOperand(1);
7102 Value
*NewTVal
= Builder
.CreateIntrinsic(Opcode
, Ty
, {X
, Y
, TVal
});
7103 Value
*NewFVal
= Builder
.CreateIntrinsic(Opcode
, Ty
, {X
, Y
, FVal
});
7104 Value
*NewSel
= Builder
.CreateSelect(Cond
, NewTVal
, NewFVal
);
7105 replaceAllUsesWith(Fsh
, NewSel
, FreshBBs
, IsHugeFunc
);
7106 Fsh
->eraseFromParent();
7110 /// If we have a SelectInst that will likely profit from branch prediction,
7111 /// turn it into a branch.
7112 bool CodeGenPrepare::optimizeSelectInst(SelectInst
*SI
) {
7113 if (DisableSelectToBranch
)
7116 // If the SelectOptimize pass is enabled, selects have already been optimized.
7117 if (!getCGPassBuilderOption().DisableSelectOptimize
)
7120 // Find all consecutive select instructions that share the same condition.
7121 SmallVector
<SelectInst
*, 2> ASI
;
7123 for (BasicBlock::iterator It
= ++BasicBlock::iterator(SI
);
7124 It
!= SI
->getParent()->end(); ++It
) {
7125 SelectInst
*I
= dyn_cast
<SelectInst
>(&*It
);
7126 if (I
&& SI
->getCondition() == I
->getCondition()) {
7133 SelectInst
*LastSI
= ASI
.back();
7134 // Increment the current iterator to skip all the rest of select instructions
7135 // because they will be either "not lowered" or "all lowered" to branch.
7136 CurInstIterator
= std::next(LastSI
->getIterator());
7137 // Examine debug-info attached to the consecutive select instructions. They
7138 // won't be individually optimised by optimizeInst, so we need to perform
7139 // DbgVariableRecord maintenence here instead.
7140 for (SelectInst
*SI
: ArrayRef(ASI
).drop_front())
7141 fixupDbgVariableRecordsOnInst(*SI
);
7143 bool VectorCond
= !SI
->getCondition()->getType()->isIntegerTy(1);
7145 // Can we convert the 'select' to CF ?
7146 if (VectorCond
|| SI
->getMetadata(LLVMContext::MD_unpredictable
))
7149 TargetLowering::SelectSupportKind SelectKind
;
7150 if (SI
->getType()->isVectorTy())
7151 SelectKind
= TargetLowering::ScalarCondVectorVal
;
7153 SelectKind
= TargetLowering::ScalarValSelect
;
7155 if (TLI
->isSelectSupported(SelectKind
) &&
7156 (!isFormingBranchFromSelectProfitable(TTI
, TLI
, SI
) || OptSize
||
7157 llvm::shouldOptimizeForSize(SI
->getParent(), PSI
, BFI
.get())))
7160 // The DominatorTree needs to be rebuilt by any consumers after this
7161 // transformation. We simply reset here rather than setting the ModifiedDT
7162 // flag to avoid restarting the function walk in runOnFunction for each
7163 // select optimized.
7166 // Transform a sequence like this:
7168 // %cmp = cmp uge i32 %a, %b
7169 // %sel = select i1 %cmp, i32 %c, i32 %d
7173 // %cmp = cmp uge i32 %a, %b
7174 // %cmp.frozen = freeze %cmp
7175 // br i1 %cmp.frozen, label %select.true, label %select.false
7177 // br label %select.end
7179 // br label %select.end
7181 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
7183 // %cmp should be frozen, otherwise it may introduce undefined behavior.
7184 // In addition, we may sink instructions that produce %c or %d from
7185 // the entry block into the destination(s) of the new branch.
7186 // If the true or false blocks do not contain a sunken instruction, that
7187 // block and its branch may be optimized away. In that case, one side of the
7188 // first branch will point directly to select.end, and the corresponding PHI
7189 // predecessor block will be the start block.
7191 // Collect values that go on the true side and the values that go on the false
7193 SmallVector
<Instruction
*> TrueInstrs
, FalseInstrs
;
7194 for (SelectInst
*SI
: ASI
) {
7195 if (Value
*V
= SI
->getTrueValue(); sinkSelectOperand(TTI
, V
))
7196 TrueInstrs
.push_back(cast
<Instruction
>(V
));
7197 if (Value
*V
= SI
->getFalseValue(); sinkSelectOperand(TTI
, V
))
7198 FalseInstrs
.push_back(cast
<Instruction
>(V
));
7201 // Split the select block, according to how many (if any) values go on each
7203 BasicBlock
*StartBlock
= SI
->getParent();
7204 BasicBlock::iterator SplitPt
= std::next(BasicBlock::iterator(LastSI
));
7205 // We should split before any debug-info.
7206 SplitPt
.setHeadBit(true);
7209 auto *CondFr
= IB
.CreateFreeze(SI
->getCondition(), SI
->getName() + ".frozen");
7211 BasicBlock
*TrueBlock
= nullptr;
7212 BasicBlock
*FalseBlock
= nullptr;
7213 BasicBlock
*EndBlock
= nullptr;
7214 BranchInst
*TrueBranch
= nullptr;
7215 BranchInst
*FalseBranch
= nullptr;
7216 if (TrueInstrs
.size() == 0) {
7217 FalseBranch
= cast
<BranchInst
>(SplitBlockAndInsertIfElse(
7218 CondFr
, SplitPt
, false, nullptr, nullptr, LI
));
7219 FalseBlock
= FalseBranch
->getParent();
7220 EndBlock
= cast
<BasicBlock
>(FalseBranch
->getOperand(0));
7221 } else if (FalseInstrs
.size() == 0) {
7222 TrueBranch
= cast
<BranchInst
>(SplitBlockAndInsertIfThen(
7223 CondFr
, SplitPt
, false, nullptr, nullptr, LI
));
7224 TrueBlock
= TrueBranch
->getParent();
7225 EndBlock
= cast
<BasicBlock
>(TrueBranch
->getOperand(0));
7227 Instruction
*ThenTerm
= nullptr;
7228 Instruction
*ElseTerm
= nullptr;
7229 SplitBlockAndInsertIfThenElse(CondFr
, SplitPt
, &ThenTerm
, &ElseTerm
,
7230 nullptr, nullptr, LI
);
7231 TrueBranch
= cast
<BranchInst
>(ThenTerm
);
7232 FalseBranch
= cast
<BranchInst
>(ElseTerm
);
7233 TrueBlock
= TrueBranch
->getParent();
7234 FalseBlock
= FalseBranch
->getParent();
7235 EndBlock
= cast
<BasicBlock
>(TrueBranch
->getOperand(0));
7238 EndBlock
->setName("select.end");
7240 TrueBlock
->setName("select.true.sink");
7242 FalseBlock
->setName(FalseInstrs
.size() == 0 ? "select.false"
7243 : "select.false.sink");
7247 FreshBBs
.insert(TrueBlock
);
7249 FreshBBs
.insert(FalseBlock
);
7250 FreshBBs
.insert(EndBlock
);
7253 BFI
->setBlockFreq(EndBlock
, BFI
->getBlockFreq(StartBlock
));
7255 static const unsigned MD
[] = {
7256 LLVMContext::MD_prof
, LLVMContext::MD_unpredictable
,
7257 LLVMContext::MD_make_implicit
, LLVMContext::MD_dbg
};
7258 StartBlock
->getTerminator()->copyMetadata(*SI
, MD
);
7260 // Sink expensive instructions into the conditional blocks to avoid executing
7261 // them speculatively.
7262 for (Instruction
*I
: TrueInstrs
)
7263 I
->moveBefore(TrueBranch
);
7264 for (Instruction
*I
: FalseInstrs
)
7265 I
->moveBefore(FalseBranch
);
7267 // If we did not create a new block for one of the 'true' or 'false' paths
7268 // of the condition, it means that side of the branch goes to the end block
7269 // directly and the path originates from the start block from the point of
7270 // view of the new PHI.
7271 if (TrueBlock
== nullptr)
7272 TrueBlock
= StartBlock
;
7273 else if (FalseBlock
== nullptr)
7274 FalseBlock
= StartBlock
;
7276 SmallPtrSet
<const Instruction
*, 2> INS
;
7277 INS
.insert(ASI
.begin(), ASI
.end());
7278 // Use reverse iterator because later select may use the value of the
7279 // earlier select, and we need to propagate value through earlier select
7280 // to get the PHI operand.
7281 for (SelectInst
*SI
: llvm::reverse(ASI
)) {
7282 // The select itself is replaced with a PHI Node.
7283 PHINode
*PN
= PHINode::Create(SI
->getType(), 2, "");
7284 PN
->insertBefore(EndBlock
->begin());
7286 PN
->addIncoming(getTrueOrFalseValue(SI
, true, INS
), TrueBlock
);
7287 PN
->addIncoming(getTrueOrFalseValue(SI
, false, INS
), FalseBlock
);
7288 PN
->setDebugLoc(SI
->getDebugLoc());
7290 replaceAllUsesWith(SI
, PN
, FreshBBs
, IsHugeFunc
);
7291 SI
->eraseFromParent();
7293 ++NumSelectsExpanded
;
7296 // Instruct OptimizeBlock to skip to the next block.
7297 CurInstIterator
= StartBlock
->end();
7301 /// Some targets only accept certain types for splat inputs. For example a VDUP
7302 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
7303 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
7304 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst
*SVI
) {
7305 // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only
7306 if (!match(SVI
, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
7307 m_Undef(), m_ZeroMask())))
7309 Type
*NewType
= TLI
->shouldConvertSplatType(SVI
);
7313 auto *SVIVecType
= cast
<FixedVectorType
>(SVI
->getType());
7314 assert(!NewType
->isVectorTy() && "Expected a scalar type!");
7315 assert(NewType
->getScalarSizeInBits() == SVIVecType
->getScalarSizeInBits() &&
7316 "Expected a type of the same size!");
7318 FixedVectorType::get(NewType
, SVIVecType
->getNumElements());
7320 // Create a bitcast (shuffle (insert (bitcast(..))))
7321 IRBuilder
<> Builder(SVI
->getContext());
7322 Builder
.SetInsertPoint(SVI
);
7323 Value
*BC1
= Builder
.CreateBitCast(
7324 cast
<Instruction
>(SVI
->getOperand(0))->getOperand(1), NewType
);
7325 Value
*Shuffle
= Builder
.CreateVectorSplat(NewVecType
->getNumElements(), BC1
);
7326 Value
*BC2
= Builder
.CreateBitCast(Shuffle
, SVIVecType
);
7328 replaceAllUsesWith(SVI
, BC2
, FreshBBs
, IsHugeFunc
);
7329 RecursivelyDeleteTriviallyDeadInstructions(
7330 SVI
, TLInfo
, nullptr,
7331 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
7333 // Also hoist the bitcast up to its operand if it they are not in the same
7335 if (auto *BCI
= dyn_cast
<Instruction
>(BC1
))
7336 if (auto *Op
= dyn_cast
<Instruction
>(BCI
->getOperand(0)))
7337 if (BCI
->getParent() != Op
->getParent() && !isa
<PHINode
>(Op
) &&
7338 !Op
->isTerminator() && !Op
->isEHPad())
7344 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction
*I
) {
7345 // If the operands of I can be folded into a target instruction together with
7346 // I, duplicate and sink them.
7347 SmallVector
<Use
*, 4> OpsToSink
;
7348 if (!TLI
->shouldSinkOperands(I
, OpsToSink
))
7351 // OpsToSink can contain multiple uses in a use chain (e.g.
7352 // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
7353 // uses must come first, so we process the ops in reverse order so as to not
7354 // create invalid IR.
7355 BasicBlock
*TargetBB
= I
->getParent();
7356 bool Changed
= false;
7357 SmallVector
<Use
*, 4> ToReplace
;
7358 Instruction
*InsertPoint
= I
;
7359 DenseMap
<const Instruction
*, unsigned long> InstOrdering
;
7360 unsigned long InstNumber
= 0;
7361 for (const auto &I
: *TargetBB
)
7362 InstOrdering
[&I
] = InstNumber
++;
7364 for (Use
*U
: reverse(OpsToSink
)) {
7365 auto *UI
= cast
<Instruction
>(U
->get());
7366 if (isa
<PHINode
>(UI
))
7368 if (UI
->getParent() == TargetBB
) {
7369 if (InstOrdering
[UI
] < InstOrdering
[InsertPoint
])
7373 ToReplace
.push_back(U
);
7376 SetVector
<Instruction
*> MaybeDead
;
7377 DenseMap
<Instruction
*, Instruction
*> NewInstructions
;
7378 for (Use
*U
: ToReplace
) {
7379 auto *UI
= cast
<Instruction
>(U
->get());
7380 Instruction
*NI
= UI
->clone();
7383 // Now we clone an instruction, its operands' defs may sink to this BB
7384 // now. So we put the operands defs' BBs into FreshBBs to do optimization.
7385 for (unsigned I
= 0; I
< NI
->getNumOperands(); ++I
) {
7386 auto *OpDef
= dyn_cast
<Instruction
>(NI
->getOperand(I
));
7389 FreshBBs
.insert(OpDef
->getParent());
7393 NewInstructions
[UI
] = NI
;
7394 MaybeDead
.insert(UI
);
7395 LLVM_DEBUG(dbgs() << "Sinking " << *UI
<< " to user " << *I
<< "\n");
7396 NI
->insertBefore(InsertPoint
);
7398 InsertedInsts
.insert(NI
);
7400 // Update the use for the new instruction, making sure that we update the
7401 // sunk instruction uses, if it is part of a chain that has already been
7403 Instruction
*OldI
= cast
<Instruction
>(U
->getUser());
7404 if (NewInstructions
.count(OldI
))
7405 NewInstructions
[OldI
]->setOperand(U
->getOperandNo(), NI
);
7411 // Remove instructions that are dead after sinking.
7412 for (auto *I
: MaybeDead
) {
7413 if (!I
->hasNUsesOrMore(1)) {
7414 LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I
<< "\n");
7415 I
->eraseFromParent();
7422 bool CodeGenPrepare::optimizeSwitchType(SwitchInst
*SI
) {
7423 Value
*Cond
= SI
->getCondition();
7424 Type
*OldType
= Cond
->getType();
7425 LLVMContext
&Context
= Cond
->getContext();
7426 EVT OldVT
= TLI
->getValueType(*DL
, OldType
);
7427 MVT RegType
= TLI
->getPreferredSwitchConditionType(Context
, OldVT
);
7428 unsigned RegWidth
= RegType
.getSizeInBits();
7430 if (RegWidth
<= cast
<IntegerType
>(OldType
)->getBitWidth())
7433 // If the register width is greater than the type width, expand the condition
7434 // of the switch instruction and each case constant to the width of the
7435 // register. By widening the type of the switch condition, subsequent
7436 // comparisons (for case comparisons) will not need to be extended to the
7437 // preferred register width, so we will potentially eliminate N-1 extends,
7438 // where N is the number of cases in the switch.
7439 auto *NewType
= Type::getIntNTy(Context
, RegWidth
);
7441 // Extend the switch condition and case constants using the target preferred
7442 // extend unless the switch condition is a function argument with an extend
7443 // attribute. In that case, we can avoid an unnecessary mask/extension by
7444 // matching the argument extension instead.
7445 Instruction::CastOps ExtType
= Instruction::ZExt
;
7446 // Some targets prefer SExt over ZExt.
7447 if (TLI
->isSExtCheaperThanZExt(OldVT
, RegType
))
7448 ExtType
= Instruction::SExt
;
7450 if (auto *Arg
= dyn_cast
<Argument
>(Cond
)) {
7451 if (Arg
->hasSExtAttr())
7452 ExtType
= Instruction::SExt
;
7453 if (Arg
->hasZExtAttr())
7454 ExtType
= Instruction::ZExt
;
7457 auto *ExtInst
= CastInst::Create(ExtType
, Cond
, NewType
);
7458 ExtInst
->insertBefore(SI
);
7459 ExtInst
->setDebugLoc(SI
->getDebugLoc());
7460 SI
->setCondition(ExtInst
);
7461 for (auto Case
: SI
->cases()) {
7462 const APInt
&NarrowConst
= Case
.getCaseValue()->getValue();
7463 APInt WideConst
= (ExtType
== Instruction::ZExt
)
7464 ? NarrowConst
.zext(RegWidth
)
7465 : NarrowConst
.sext(RegWidth
);
7466 Case
.setValue(ConstantInt::get(Context
, WideConst
));
7472 bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst
*SI
) {
7473 // The SCCP optimization tends to produce code like this:
7474 // switch(x) { case 42: phi(42, ...) }
7475 // Materializing the constant for the phi-argument needs instructions; So we
7476 // change the code to:
7477 // switch(x) { case 42: phi(x, ...) }
7479 Value
*Condition
= SI
->getCondition();
7480 // Avoid endless loop in degenerate case.
7481 if (isa
<ConstantInt
>(*Condition
))
7484 bool Changed
= false;
7485 BasicBlock
*SwitchBB
= SI
->getParent();
7486 Type
*ConditionType
= Condition
->getType();
7488 for (const SwitchInst::CaseHandle
&Case
: SI
->cases()) {
7489 ConstantInt
*CaseValue
= Case
.getCaseValue();
7490 BasicBlock
*CaseBB
= Case
.getCaseSuccessor();
7491 // Set to true if we previously checked that `CaseBB` is only reached by
7492 // a single case from this switch.
7493 bool CheckedForSinglePred
= false;
7494 for (PHINode
&PHI
: CaseBB
->phis()) {
7495 Type
*PHIType
= PHI
.getType();
7496 // If ZExt is free then we can also catch patterns like this:
7497 // switch((i32)x) { case 42: phi((i64)42, ...); }
7498 // and replace `(i64)42` with `zext i32 %x to i64`.
7500 PHIType
->isIntegerTy() &&
7501 PHIType
->getIntegerBitWidth() > ConditionType
->getIntegerBitWidth() &&
7502 TLI
->isZExtFree(ConditionType
, PHIType
);
7503 if (PHIType
== ConditionType
|| TryZExt
) {
7504 // Set to true to skip this case because of multiple preds.
7505 bool SkipCase
= false;
7506 Value
*Replacement
= nullptr;
7507 for (unsigned I
= 0, E
= PHI
.getNumIncomingValues(); I
!= E
; I
++) {
7508 Value
*PHIValue
= PHI
.getIncomingValue(I
);
7509 if (PHIValue
!= CaseValue
) {
7512 ConstantInt
*PHIValueInt
= dyn_cast
<ConstantInt
>(PHIValue
);
7514 PHIValueInt
->getValue() !=
7515 CaseValue
->getValue().zext(PHIType
->getIntegerBitWidth()))
7518 if (PHI
.getIncomingBlock(I
) != SwitchBB
)
7520 // We cannot optimize if there are multiple case labels jumping to
7521 // this block. This check may get expensive when there are many
7522 // case labels so we test for it last.
7523 if (!CheckedForSinglePred
) {
7524 CheckedForSinglePred
= true;
7525 if (SI
->findCaseDest(CaseBB
) == nullptr) {
7531 if (Replacement
== nullptr) {
7532 if (PHIValue
== CaseValue
) {
7533 Replacement
= Condition
;
7535 IRBuilder
<> Builder(SI
);
7536 Replacement
= Builder
.CreateZExt(Condition
, PHIType
);
7539 PHI
.setIncomingValue(I
, Replacement
);
7550 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst
*SI
) {
7551 bool Changed
= optimizeSwitchType(SI
);
7552 Changed
|= optimizeSwitchPhiConstants(SI
);
7558 /// Helper class to promote a scalar operation to a vector one.
7559 /// This class is used to move downward extractelement transition.
7561 /// a = vector_op <2 x i32>
7562 /// b = extractelement <2 x i32> a, i32 0
7567 /// a = vector_op <2 x i32>
7568 /// c = vector_op a (equivalent to scalar_op on the related lane)
7569 /// * d = extractelement <2 x i32> c, i32 0
7571 /// Assuming both extractelement and store can be combine, we get rid of the
7573 class VectorPromoteHelper
{
7574 /// DataLayout associated with the current module.
7575 const DataLayout
&DL
;
7577 /// Used to perform some checks on the legality of vector operations.
7578 const TargetLowering
&TLI
;
7580 /// Used to estimated the cost of the promoted chain.
7581 const TargetTransformInfo
&TTI
;
7583 /// The transition being moved downwards.
7584 Instruction
*Transition
;
7586 /// The sequence of instructions to be promoted.
7587 SmallVector
<Instruction
*, 4> InstsToBePromoted
;
7589 /// Cost of combining a store and an extract.
7590 unsigned StoreExtractCombineCost
;
7592 /// Instruction that will be combined with the transition.
7593 Instruction
*CombineInst
= nullptr;
7595 /// The instruction that represents the current end of the transition.
7596 /// Since we are faking the promotion until we reach the end of the chain
7597 /// of computation, we need a way to get the current end of the transition.
7598 Instruction
*getEndOfTransition() const {
7599 if (InstsToBePromoted
.empty())
7601 return InstsToBePromoted
.back();
7604 /// Return the index of the original value in the transition.
7605 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
7606 /// c, is at index 0.
7607 unsigned getTransitionOriginalValueIdx() const {
7608 assert(isa
<ExtractElementInst
>(Transition
) &&
7609 "Other kind of transitions are not supported yet");
7613 /// Return the index of the index in the transition.
7614 /// E.g., for "extractelement <2 x i32> c, i32 0" the index
7616 unsigned getTransitionIdx() const {
7617 assert(isa
<ExtractElementInst
>(Transition
) &&
7618 "Other kind of transitions are not supported yet");
7622 /// Get the type of the transition.
7623 /// This is the type of the original value.
7624 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
7625 /// transition is <2 x i32>.
7626 Type
*getTransitionType() const {
7627 return Transition
->getOperand(getTransitionOriginalValueIdx())->getType();
7630 /// Promote \p ToBePromoted by moving \p Def downward through.
7631 /// I.e., we have the following sequence:
7632 /// Def = Transition <ty1> a to <ty2>
7633 /// b = ToBePromoted <ty2> Def, ...
7635 /// b = ToBePromoted <ty1> a, ...
7636 /// Def = Transition <ty1> ToBePromoted to <ty2>
7637 void promoteImpl(Instruction
*ToBePromoted
);
7639 /// Check whether or not it is profitable to promote all the
7640 /// instructions enqueued to be promoted.
7641 bool isProfitableToPromote() {
7642 Value
*ValIdx
= Transition
->getOperand(getTransitionOriginalValueIdx());
7643 unsigned Index
= isa
<ConstantInt
>(ValIdx
)
7644 ? cast
<ConstantInt
>(ValIdx
)->getZExtValue()
7646 Type
*PromotedType
= getTransitionType();
7648 StoreInst
*ST
= cast
<StoreInst
>(CombineInst
);
7649 unsigned AS
= ST
->getPointerAddressSpace();
7650 // Check if this store is supported.
7651 if (!TLI
.allowsMisalignedMemoryAccesses(
7652 TLI
.getValueType(DL
, ST
->getValueOperand()->getType()), AS
,
7654 // If this is not supported, there is no way we can combine
7655 // the extract with the store.
7659 // The scalar chain of computation has to pay for the transition
7660 // scalar to vector.
7661 // The vector chain has to account for the combining cost.
7662 enum TargetTransformInfo::TargetCostKind CostKind
=
7663 TargetTransformInfo::TCK_RecipThroughput
;
7664 InstructionCost ScalarCost
=
7665 TTI
.getVectorInstrCost(*Transition
, PromotedType
, CostKind
, Index
);
7666 InstructionCost VectorCost
= StoreExtractCombineCost
;
7667 for (const auto &Inst
: InstsToBePromoted
) {
7668 // Compute the cost.
7669 // By construction, all instructions being promoted are arithmetic ones.
7670 // Moreover, one argument is a constant that can be viewed as a splat
7672 Value
*Arg0
= Inst
->getOperand(0);
7673 bool IsArg0Constant
= isa
<UndefValue
>(Arg0
) || isa
<ConstantInt
>(Arg0
) ||
7674 isa
<ConstantFP
>(Arg0
);
7675 TargetTransformInfo::OperandValueInfo Arg0Info
, Arg1Info
;
7677 Arg0Info
.Kind
= TargetTransformInfo::OK_UniformConstantValue
;
7679 Arg1Info
.Kind
= TargetTransformInfo::OK_UniformConstantValue
;
7681 ScalarCost
+= TTI
.getArithmeticInstrCost(
7682 Inst
->getOpcode(), Inst
->getType(), CostKind
, Arg0Info
, Arg1Info
);
7683 VectorCost
+= TTI
.getArithmeticInstrCost(Inst
->getOpcode(), PromotedType
,
7684 CostKind
, Arg0Info
, Arg1Info
);
7687 dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
7688 << ScalarCost
<< "\nVector: " << VectorCost
<< '\n');
7689 return ScalarCost
> VectorCost
;
7692 /// Generate a constant vector with \p Val with the same
7693 /// number of elements as the transition.
7694 /// \p UseSplat defines whether or not \p Val should be replicated
7695 /// across the whole vector.
7696 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
7697 /// otherwise we generate a vector with as many undef as possible:
7698 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
7699 /// used at the index of the extract.
7700 Value
*getConstantVector(Constant
*Val
, bool UseSplat
) const {
7701 unsigned ExtractIdx
= std::numeric_limits
<unsigned>::max();
7703 // If we cannot determine where the constant must be, we have to
7704 // use a splat constant.
7705 Value
*ValExtractIdx
= Transition
->getOperand(getTransitionIdx());
7706 if (ConstantInt
*CstVal
= dyn_cast
<ConstantInt
>(ValExtractIdx
))
7707 ExtractIdx
= CstVal
->getSExtValue();
7712 ElementCount EC
= cast
<VectorType
>(getTransitionType())->getElementCount();
7714 return ConstantVector::getSplat(EC
, Val
);
7716 if (!EC
.isScalable()) {
7717 SmallVector
<Constant
*, 4> ConstVec
;
7718 UndefValue
*UndefVal
= UndefValue::get(Val
->getType());
7719 for (unsigned Idx
= 0; Idx
!= EC
.getKnownMinValue(); ++Idx
) {
7720 if (Idx
== ExtractIdx
)
7721 ConstVec
.push_back(Val
);
7723 ConstVec
.push_back(UndefVal
);
7725 return ConstantVector::get(ConstVec
);
7728 "Generate scalable vector for non-splat is unimplemented");
7731 /// Check if promoting to a vector type an operand at \p OperandIdx
7732 /// in \p Use can trigger undefined behavior.
7733 static bool canCauseUndefinedBehavior(const Instruction
*Use
,
7734 unsigned OperandIdx
) {
7735 // This is not safe to introduce undef when the operand is on
7736 // the right hand side of a division-like instruction.
7737 if (OperandIdx
!= 1)
7739 switch (Use
->getOpcode()) {
7742 case Instruction::SDiv
:
7743 case Instruction::UDiv
:
7744 case Instruction::SRem
:
7745 case Instruction::URem
:
7747 case Instruction::FDiv
:
7748 case Instruction::FRem
:
7749 return !Use
->hasNoNaNs();
7751 llvm_unreachable(nullptr);
7755 VectorPromoteHelper(const DataLayout
&DL
, const TargetLowering
&TLI
,
7756 const TargetTransformInfo
&TTI
, Instruction
*Transition
,
7757 unsigned CombineCost
)
7758 : DL(DL
), TLI(TLI
), TTI(TTI
), Transition(Transition
),
7759 StoreExtractCombineCost(CombineCost
) {
7760 assert(Transition
&& "Do not know how to promote null");
7763 /// Check if we can promote \p ToBePromoted to \p Type.
7764 bool canPromote(const Instruction
*ToBePromoted
) const {
7765 // We could support CastInst too.
7766 return isa
<BinaryOperator
>(ToBePromoted
);
7769 /// Check if it is profitable to promote \p ToBePromoted
7770 /// by moving downward the transition through.
7771 bool shouldPromote(const Instruction
*ToBePromoted
) const {
7772 // Promote only if all the operands can be statically expanded.
7773 // Indeed, we do not want to introduce any new kind of transitions.
7774 for (const Use
&U
: ToBePromoted
->operands()) {
7775 const Value
*Val
= U
.get();
7776 if (Val
== getEndOfTransition()) {
7777 // If the use is a division and the transition is on the rhs,
7778 // we cannot promote the operation, otherwise we may create a
7779 // division by zero.
7780 if (canCauseUndefinedBehavior(ToBePromoted
, U
.getOperandNo()))
7784 if (!isa
<ConstantInt
>(Val
) && !isa
<UndefValue
>(Val
) &&
7785 !isa
<ConstantFP
>(Val
))
7788 // Check that the resulting operation is legal.
7789 int ISDOpcode
= TLI
.InstructionOpcodeToISD(ToBePromoted
->getOpcode());
7792 return StressStoreExtract
||
7793 TLI
.isOperationLegalOrCustom(
7794 ISDOpcode
, TLI
.getValueType(DL
, getTransitionType(), true));
7797 /// Check whether or not \p Use can be combined
7798 /// with the transition.
7799 /// I.e., is it possible to do Use(Transition) => AnotherUse?
7800 bool canCombine(const Instruction
*Use
) { return isa
<StoreInst
>(Use
); }
7802 /// Record \p ToBePromoted as part of the chain to be promoted.
7803 void enqueueForPromotion(Instruction
*ToBePromoted
) {
7804 InstsToBePromoted
.push_back(ToBePromoted
);
7807 /// Set the instruction that will be combined with the transition.
7808 void recordCombineInstruction(Instruction
*ToBeCombined
) {
7809 assert(canCombine(ToBeCombined
) && "Unsupported instruction to combine");
7810 CombineInst
= ToBeCombined
;
7813 /// Promote all the instructions enqueued for promotion if it is
7815 /// \return True if the promotion happened, false otherwise.
7817 // Check if there is something to promote.
7818 // Right now, if we do not have anything to combine with,
7819 // we assume the promotion is not profitable.
7820 if (InstsToBePromoted
.empty() || !CombineInst
)
7824 if (!StressStoreExtract
&& !isProfitableToPromote())
7828 for (auto &ToBePromoted
: InstsToBePromoted
)
7829 promoteImpl(ToBePromoted
);
7830 InstsToBePromoted
.clear();
7835 } // end anonymous namespace
7837 void VectorPromoteHelper::promoteImpl(Instruction
*ToBePromoted
) {
7838 // At this point, we know that all the operands of ToBePromoted but Def
7839 // can be statically promoted.
7840 // For Def, we need to use its parameter in ToBePromoted:
7841 // b = ToBePromoted ty1 a
7842 // Def = Transition ty1 b to ty2
7843 // Move the transition down.
7844 // 1. Replace all uses of the promoted operation by the transition.
7845 // = ... b => = ... Def.
7846 assert(ToBePromoted
->getType() == Transition
->getType() &&
7847 "The type of the result of the transition does not match "
7849 ToBePromoted
->replaceAllUsesWith(Transition
);
7850 // 2. Update the type of the uses.
7851 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
7852 Type
*TransitionTy
= getTransitionType();
7853 ToBePromoted
->mutateType(TransitionTy
);
7854 // 3. Update all the operands of the promoted operation with promoted
7856 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
7857 for (Use
&U
: ToBePromoted
->operands()) {
7858 Value
*Val
= U
.get();
7859 Value
*NewVal
= nullptr;
7860 if (Val
== Transition
)
7861 NewVal
= Transition
->getOperand(getTransitionOriginalValueIdx());
7862 else if (isa
<UndefValue
>(Val
) || isa
<ConstantInt
>(Val
) ||
7863 isa
<ConstantFP
>(Val
)) {
7864 // Use a splat constant if it is not safe to use undef.
7865 NewVal
= getConstantVector(
7866 cast
<Constant
>(Val
),
7867 isa
<UndefValue
>(Val
) ||
7868 canCauseUndefinedBehavior(ToBePromoted
, U
.getOperandNo()));
7870 llvm_unreachable("Did you modified shouldPromote and forgot to update "
7872 ToBePromoted
->setOperand(U
.getOperandNo(), NewVal
);
7874 Transition
->moveAfter(ToBePromoted
);
7875 Transition
->setOperand(getTransitionOriginalValueIdx(), ToBePromoted
);
7878 /// Some targets can do store(extractelement) with one instruction.
7879 /// Try to push the extractelement towards the stores when the target
7880 /// has this feature and this is profitable.
7881 bool CodeGenPrepare::optimizeExtractElementInst(Instruction
*Inst
) {
7882 unsigned CombineCost
= std::numeric_limits
<unsigned>::max();
7883 if (DisableStoreExtract
||
7884 (!StressStoreExtract
&&
7885 !TLI
->canCombineStoreAndExtract(Inst
->getOperand(0)->getType(),
7886 Inst
->getOperand(1), CombineCost
)))
7889 // At this point we know that Inst is a vector to scalar transition.
7890 // Try to move it down the def-use chain, until:
7891 // - We can combine the transition with its single use
7892 // => we got rid of the transition.
7893 // - We escape the current basic block
7894 // => we would need to check that we are moving it at a cheaper place and
7895 // we do not do that for now.
7896 BasicBlock
*Parent
= Inst
->getParent();
7897 LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst
<< '\n');
7898 VectorPromoteHelper
VPH(*DL
, *TLI
, *TTI
, Inst
, CombineCost
);
7899 // If the transition has more than one use, assume this is not going to be
7901 while (Inst
->hasOneUse()) {
7902 Instruction
*ToBePromoted
= cast
<Instruction
>(*Inst
->user_begin());
7903 LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted
<< '\n');
7905 if (ToBePromoted
->getParent() != Parent
) {
7906 LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
7907 << ToBePromoted
->getParent()->getName()
7908 << ") than the transition (" << Parent
->getName()
7913 if (VPH
.canCombine(ToBePromoted
)) {
7914 LLVM_DEBUG(dbgs() << "Assume " << *Inst
<< '\n'
7915 << "will be combined with: " << *ToBePromoted
<< '\n');
7916 VPH
.recordCombineInstruction(ToBePromoted
);
7917 bool Changed
= VPH
.promote();
7918 NumStoreExtractExposed
+= Changed
;
7922 LLVM_DEBUG(dbgs() << "Try promoting.\n");
7923 if (!VPH
.canPromote(ToBePromoted
) || !VPH
.shouldPromote(ToBePromoted
))
7926 LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
7928 VPH
.enqueueForPromotion(ToBePromoted
);
7929 Inst
= ToBePromoted
;
7934 /// For the instruction sequence of store below, F and I values
7935 /// are bundled together as an i64 value before being stored into memory.
7936 /// Sometimes it is more efficient to generate separate stores for F and I,
7937 /// which can remove the bitwise instructions or sink them to colder places.
7939 /// (store (or (zext (bitcast F to i32) to i64),
7940 /// (shl (zext I to i64), 32)), addr) -->
7941 /// (store F, addr) and (store I, addr+4)
7943 /// Similarly, splitting for other merged store can also be beneficial, like:
7944 /// For pair of {i32, i32}, i64 store --> two i32 stores.
7945 /// For pair of {i32, i16}, i64 store --> two i32 stores.
7946 /// For pair of {i16, i16}, i32 store --> two i16 stores.
7947 /// For pair of {i16, i8}, i32 store --> two i16 stores.
7948 /// For pair of {i8, i8}, i16 store --> two i8 stores.
7950 /// We allow each target to determine specifically which kind of splitting is
7953 /// The store patterns are commonly seen from the simple code snippet below
7954 /// if only std::make_pair(...) is sroa transformed before inlined into hoo.
7955 /// void goo(const std::pair<int, float> &);
7958 /// goo(std::make_pair(tmp, ftmp));
7962 /// Although we already have similar splitting in DAG Combine, we duplicate
7963 /// it in CodeGenPrepare to catch the case in which pattern is across
7964 /// multiple BBs. The logic in DAG Combine is kept to catch case generated
7965 /// during code expansion.
7966 static bool splitMergedValStore(StoreInst
&SI
, const DataLayout
&DL
,
7967 const TargetLowering
&TLI
) {
7968 // Handle simple but common cases only.
7969 Type
*StoreType
= SI
.getValueOperand()->getType();
7971 // The code below assumes shifting a value by <number of bits>,
7972 // whereas scalable vectors would have to be shifted by
7973 // <2log(vscale) + number of bits> in order to store the
7974 // low/high parts. Bailing out for now.
7975 if (StoreType
->isScalableTy())
7978 if (!DL
.typeSizeEqualsStoreSize(StoreType
) ||
7979 DL
.getTypeSizeInBits(StoreType
) == 0)
7982 unsigned HalfValBitSize
= DL
.getTypeSizeInBits(StoreType
) / 2;
7983 Type
*SplitStoreType
= Type::getIntNTy(SI
.getContext(), HalfValBitSize
);
7984 if (!DL
.typeSizeEqualsStoreSize(SplitStoreType
))
7987 // Don't split the store if it is volatile.
7988 if (SI
.isVolatile())
7991 // Match the following patterns:
7992 // (store (or (zext LValue to i64),
7993 // (shl (zext HValue to i64), 32)), HalfValBitSize)
7995 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
7996 // (zext LValue to i64),
7997 // Expect both operands of OR and the first operand of SHL have only
7999 Value
*LValue
, *HValue
;
8000 if (!match(SI
.getValueOperand(),
8001 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue
))),
8002 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue
))),
8003 m_SpecificInt(HalfValBitSize
))))))
8006 // Check LValue and HValue are int with size less or equal than 32.
8007 if (!LValue
->getType()->isIntegerTy() ||
8008 DL
.getTypeSizeInBits(LValue
->getType()) > HalfValBitSize
||
8009 !HValue
->getType()->isIntegerTy() ||
8010 DL
.getTypeSizeInBits(HValue
->getType()) > HalfValBitSize
)
8013 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
8014 // as the input of target query.
8015 auto *LBC
= dyn_cast
<BitCastInst
>(LValue
);
8016 auto *HBC
= dyn_cast
<BitCastInst
>(HValue
);
8017 EVT LowTy
= LBC
? EVT::getEVT(LBC
->getOperand(0)->getType())
8018 : EVT::getEVT(LValue
->getType());
8019 EVT HighTy
= HBC
? EVT::getEVT(HBC
->getOperand(0)->getType())
8020 : EVT::getEVT(HValue
->getType());
8021 if (!ForceSplitStore
&& !TLI
.isMultiStoresCheaperThanBitsMerge(LowTy
, HighTy
))
8024 // Start to split store.
8025 IRBuilder
<> Builder(SI
.getContext());
8026 Builder
.SetInsertPoint(&SI
);
8028 // If LValue/HValue is a bitcast in another BB, create a new one in current
8029 // BB so it may be merged with the splitted stores by dag combiner.
8030 if (LBC
&& LBC
->getParent() != SI
.getParent())
8031 LValue
= Builder
.CreateBitCast(LBC
->getOperand(0), LBC
->getType());
8032 if (HBC
&& HBC
->getParent() != SI
.getParent())
8033 HValue
= Builder
.CreateBitCast(HBC
->getOperand(0), HBC
->getType());
8035 bool IsLE
= SI
.getDataLayout().isLittleEndian();
8036 auto CreateSplitStore
= [&](Value
*V
, bool Upper
) {
8037 V
= Builder
.CreateZExtOrBitCast(V
, SplitStoreType
);
8038 Value
*Addr
= SI
.getPointerOperand();
8039 Align Alignment
= SI
.getAlign();
8040 const bool IsOffsetStore
= (IsLE
&& Upper
) || (!IsLE
&& !Upper
);
8041 if (IsOffsetStore
) {
8042 Addr
= Builder
.CreateGEP(
8043 SplitStoreType
, Addr
,
8044 ConstantInt::get(Type::getInt32Ty(SI
.getContext()), 1));
8046 // When splitting the store in half, naturally one half will retain the
8047 // alignment of the original wider store, regardless of whether it was
8048 // over-aligned or not, while the other will require adjustment.
8049 Alignment
= commonAlignment(Alignment
, HalfValBitSize
/ 8);
8051 Builder
.CreateAlignedStore(V
, Addr
, Alignment
);
8054 CreateSplitStore(LValue
, false);
8055 CreateSplitStore(HValue
, true);
8057 // Delete the old store.
8058 SI
.eraseFromParent();
8062 // Return true if the GEP has two operands, the first operand is of a sequential
8063 // type, and the second operand is a constant.
8064 static bool GEPSequentialConstIndexed(GetElementPtrInst
*GEP
) {
8065 gep_type_iterator I
= gep_type_begin(*GEP
);
8066 return GEP
->getNumOperands() == 2 && I
.isSequential() &&
8067 isa
<ConstantInt
>(GEP
->getOperand(1));
8070 // Try unmerging GEPs to reduce liveness interference (register pressure) across
8071 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
8072 // reducing liveness interference across those edges benefits global register
8073 // allocation. Currently handles only certain cases.
8075 // For example, unmerge %GEPI and %UGEPI as below.
8077 // ---------- BEFORE ----------
8082 // %GEPI = gep %GEPIOp, Idx
8084 // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
8085 // (* %GEPI is alive on the indirectbr edges due to other uses ahead)
8086 // (* %GEPIOp is alive on the indirectbr edges only because of it's used by
8089 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
8090 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
8095 // %UGEPI = gep %GEPIOp, UIdx
8097 // ---------------------------
8099 // ---------- AFTER ----------
8101 // ... (same as above)
8102 // (* %GEPI is still alive on the indirectbr edges)
8103 // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
8109 // %UGEPI = gep %GEPI, (UIdx-Idx)
8111 // ---------------------------
8113 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is
8114 // no longer alive on them.
8116 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
8117 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
8118 // not to disable further simplications and optimizations as a result of GEP
8121 // Note this unmerging may increase the length of the data flow critical path
8122 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
8123 // between the register pressure and the length of data-flow critical
8124 // path. Restricting this to the uncommon IndirectBr case would minimize the
8125 // impact of potentially longer critical path, if any, and the impact on compile
8127 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst
*GEPI
,
8128 const TargetTransformInfo
*TTI
) {
8129 BasicBlock
*SrcBlock
= GEPI
->getParent();
8130 // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
8131 // (non-IndirectBr) cases exit early here.
8132 if (!isa
<IndirectBrInst
>(SrcBlock
->getTerminator()))
8134 // Check that GEPI is a simple gep with a single constant index.
8135 if (!GEPSequentialConstIndexed(GEPI
))
8137 ConstantInt
*GEPIIdx
= cast
<ConstantInt
>(GEPI
->getOperand(1));
8138 // Check that GEPI is a cheap one.
8139 if (TTI
->getIntImmCost(GEPIIdx
->getValue(), GEPIIdx
->getType(),
8140 TargetTransformInfo::TCK_SizeAndLatency
) >
8141 TargetTransformInfo::TCC_Basic
)
8143 Value
*GEPIOp
= GEPI
->getOperand(0);
8144 // Check that GEPIOp is an instruction that's also defined in SrcBlock.
8145 if (!isa
<Instruction
>(GEPIOp
))
8147 auto *GEPIOpI
= cast
<Instruction
>(GEPIOp
);
8148 if (GEPIOpI
->getParent() != SrcBlock
)
8150 // Check that GEP is used outside the block, meaning it's alive on the
8151 // IndirectBr edge(s).
8152 if (llvm::none_of(GEPI
->users(), [&](User
*Usr
) {
8153 if (auto *I
= dyn_cast
<Instruction
>(Usr
)) {
8154 if (I
->getParent() != SrcBlock
) {
8161 // The second elements of the GEP chains to be unmerged.
8162 std::vector
<GetElementPtrInst
*> UGEPIs
;
8163 // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
8164 // on IndirectBr edges.
8165 for (User
*Usr
: GEPIOp
->users()) {
8168 // Check if Usr is an Instruction. If not, give up.
8169 if (!isa
<Instruction
>(Usr
))
8171 auto *UI
= cast
<Instruction
>(Usr
);
8172 // Check if Usr in the same block as GEPIOp, which is fine, skip.
8173 if (UI
->getParent() == SrcBlock
)
8175 // Check if Usr is a GEP. If not, give up.
8176 if (!isa
<GetElementPtrInst
>(Usr
))
8178 auto *UGEPI
= cast
<GetElementPtrInst
>(Usr
);
8179 // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
8180 // the pointer operand to it. If so, record it in the vector. If not, give
8182 if (!GEPSequentialConstIndexed(UGEPI
))
8184 if (UGEPI
->getOperand(0) != GEPIOp
)
8186 if (UGEPI
->getSourceElementType() != GEPI
->getSourceElementType())
8188 if (GEPIIdx
->getType() !=
8189 cast
<ConstantInt
>(UGEPI
->getOperand(1))->getType())
8191 ConstantInt
*UGEPIIdx
= cast
<ConstantInt
>(UGEPI
->getOperand(1));
8192 if (TTI
->getIntImmCost(UGEPIIdx
->getValue(), UGEPIIdx
->getType(),
8193 TargetTransformInfo::TCK_SizeAndLatency
) >
8194 TargetTransformInfo::TCC_Basic
)
8196 UGEPIs
.push_back(UGEPI
);
8198 if (UGEPIs
.size() == 0)
8200 // Check the materializing cost of (Uidx-Idx).
8201 for (GetElementPtrInst
*UGEPI
: UGEPIs
) {
8202 ConstantInt
*UGEPIIdx
= cast
<ConstantInt
>(UGEPI
->getOperand(1));
8203 APInt NewIdx
= UGEPIIdx
->getValue() - GEPIIdx
->getValue();
8204 InstructionCost ImmCost
= TTI
->getIntImmCost(
8205 NewIdx
, GEPIIdx
->getType(), TargetTransformInfo::TCK_SizeAndLatency
);
8206 if (ImmCost
> TargetTransformInfo::TCC_Basic
)
8209 // Now unmerge between GEPI and UGEPIs.
8210 for (GetElementPtrInst
*UGEPI
: UGEPIs
) {
8211 UGEPI
->setOperand(0, GEPI
);
8212 ConstantInt
*UGEPIIdx
= cast
<ConstantInt
>(UGEPI
->getOperand(1));
8213 Constant
*NewUGEPIIdx
= ConstantInt::get(
8214 GEPIIdx
->getType(), UGEPIIdx
->getValue() - GEPIIdx
->getValue());
8215 UGEPI
->setOperand(1, NewUGEPIIdx
);
8216 // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
8217 // inbounds to avoid UB.
8218 if (!GEPI
->isInBounds()) {
8219 UGEPI
->setIsInBounds(false);
8222 // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
8223 // alive on IndirectBr edges).
8224 assert(llvm::none_of(GEPIOp
->users(),
8226 return cast
<Instruction
>(Usr
)->getParent() != SrcBlock
;
8228 "GEPIOp is used outside SrcBlock");
8232 static bool optimizeBranch(BranchInst
*Branch
, const TargetLowering
&TLI
,
8233 SmallSet
<BasicBlock
*, 32> &FreshBBs
,
8236 // %c = icmp ult %x, 8
8241 // %c = icmp eq %tc, 0
8243 // Creating the cmp to zero can be better for the backend, especially if the
8244 // lshr produces flags that can be used automatically.
8245 if (!TLI
.preferZeroCompareBranch() || !Branch
->isConditional())
8248 ICmpInst
*Cmp
= dyn_cast
<ICmpInst
>(Branch
->getCondition());
8249 if (!Cmp
|| !isa
<ConstantInt
>(Cmp
->getOperand(1)) || !Cmp
->hasOneUse())
8252 Value
*X
= Cmp
->getOperand(0);
8253 APInt CmpC
= cast
<ConstantInt
>(Cmp
->getOperand(1))->getValue();
8255 for (auto *U
: X
->users()) {
8256 Instruction
*UI
= dyn_cast
<Instruction
>(U
);
8257 // A quick dominance check
8259 (UI
->getParent() != Branch
->getParent() &&
8260 UI
->getParent() != Branch
->getSuccessor(0) &&
8261 UI
->getParent() != Branch
->getSuccessor(1)) ||
8262 (UI
->getParent() != Branch
->getParent() &&
8263 !UI
->getParent()->getSinglePredecessor()))
8266 if (CmpC
.isPowerOf2() && Cmp
->getPredicate() == ICmpInst::ICMP_ULT
&&
8267 match(UI
, m_Shr(m_Specific(X
), m_SpecificInt(CmpC
.logBase2())))) {
8268 IRBuilder
<> Builder(Branch
);
8269 if (UI
->getParent() != Branch
->getParent())
8270 UI
->moveBefore(Branch
);
8271 UI
->dropPoisonGeneratingFlags();
8272 Value
*NewCmp
= Builder
.CreateCmp(ICmpInst::ICMP_EQ
, UI
,
8273 ConstantInt::get(UI
->getType(), 0));
8274 LLVM_DEBUG(dbgs() << "Converting " << *Cmp
<< "\n");
8275 LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp
<< "\n");
8276 replaceAllUsesWith(Cmp
, NewCmp
, FreshBBs
, IsHugeFunc
);
8279 if (Cmp
->isEquality() &&
8280 (match(UI
, m_Add(m_Specific(X
), m_SpecificInt(-CmpC
))) ||
8281 match(UI
, m_Sub(m_Specific(X
), m_SpecificInt(CmpC
))))) {
8282 IRBuilder
<> Builder(Branch
);
8283 if (UI
->getParent() != Branch
->getParent())
8284 UI
->moveBefore(Branch
);
8285 UI
->dropPoisonGeneratingFlags();
8286 Value
*NewCmp
= Builder
.CreateCmp(Cmp
->getPredicate(), UI
,
8287 ConstantInt::get(UI
->getType(), 0));
8288 LLVM_DEBUG(dbgs() << "Converting " << *Cmp
<< "\n");
8289 LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp
<< "\n");
8290 replaceAllUsesWith(Cmp
, NewCmp
, FreshBBs
, IsHugeFunc
);
8297 bool CodeGenPrepare::optimizeInst(Instruction
*I
, ModifyDT
&ModifiedDT
) {
8298 bool AnyChange
= false;
8299 AnyChange
= fixupDbgVariableRecordsOnInst(*I
);
8301 // Bail out if we inserted the instruction to prevent optimizations from
8302 // stepping on each other's toes.
8303 if (InsertedInsts
.count(I
))
8306 // TODO: Move into the switch on opcode below here.
8307 if (PHINode
*P
= dyn_cast
<PHINode
>(I
)) {
8308 // It is possible for very late stage optimizations (such as SimplifyCFG)
8309 // to introduce PHI nodes too late to be cleaned up. If we detect such a
8310 // trivial PHI, go ahead and zap it here.
8311 if (Value
*V
= simplifyInstruction(P
, {*DL
, TLInfo
})) {
8312 LargeOffsetGEPMap
.erase(P
);
8313 replaceAllUsesWith(P
, V
, FreshBBs
, IsHugeFunc
);
8314 P
->eraseFromParent();
8321 if (CastInst
*CI
= dyn_cast
<CastInst
>(I
)) {
8322 // If the source of the cast is a constant, then this should have
8323 // already been constant folded. The only reason NOT to constant fold
8324 // it is if something (e.g. LSR) was careful to place the constant
8325 // evaluation in a block other than then one that uses it (e.g. to hoist
8326 // the address of globals out of a loop). If this is the case, we don't
8327 // want to forward-subst the cast.
8328 if (isa
<Constant
>(CI
->getOperand(0)))
8331 if (OptimizeNoopCopyExpression(CI
, *TLI
, *DL
))
8334 if ((isa
<UIToFPInst
>(I
) || isa
<SIToFPInst
>(I
) || isa
<FPToUIInst
>(I
) ||
8335 isa
<TruncInst
>(I
)) &&
8336 TLI
->optimizeExtendOrTruncateConversion(
8337 I
, LI
->getLoopFor(I
->getParent()), *TTI
))
8340 if (isa
<ZExtInst
>(I
) || isa
<SExtInst
>(I
)) {
8341 /// Sink a zext or sext into its user blocks if the target type doesn't
8342 /// fit in one register
8343 if (TLI
->getTypeAction(CI
->getContext(),
8344 TLI
->getValueType(*DL
, CI
->getType())) ==
8345 TargetLowering::TypeExpandInteger
) {
8346 return SinkCast(CI
);
8348 if (TLI
->optimizeExtendOrTruncateConversion(
8349 I
, LI
->getLoopFor(I
->getParent()), *TTI
))
8352 bool MadeChange
= optimizeExt(I
);
8353 return MadeChange
| optimizeExtUses(I
);
8359 if (auto *Cmp
= dyn_cast
<CmpInst
>(I
))
8360 if (optimizeCmp(Cmp
, ModifiedDT
))
8363 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
8364 LI
->setMetadata(LLVMContext::MD_invariant_group
, nullptr);
8365 bool Modified
= optimizeLoadExt(LI
);
8366 unsigned AS
= LI
->getPointerAddressSpace();
8367 Modified
|= optimizeMemoryInst(I
, I
->getOperand(0), LI
->getType(), AS
);
8371 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
)) {
8372 if (splitMergedValStore(*SI
, *DL
, *TLI
))
8374 SI
->setMetadata(LLVMContext::MD_invariant_group
, nullptr);
8375 unsigned AS
= SI
->getPointerAddressSpace();
8376 return optimizeMemoryInst(I
, SI
->getOperand(1),
8377 SI
->getOperand(0)->getType(), AS
);
8380 if (AtomicRMWInst
*RMW
= dyn_cast
<AtomicRMWInst
>(I
)) {
8381 unsigned AS
= RMW
->getPointerAddressSpace();
8382 return optimizeMemoryInst(I
, RMW
->getPointerOperand(), RMW
->getType(), AS
);
8385 if (AtomicCmpXchgInst
*CmpX
= dyn_cast
<AtomicCmpXchgInst
>(I
)) {
8386 unsigned AS
= CmpX
->getPointerAddressSpace();
8387 return optimizeMemoryInst(I
, CmpX
->getPointerOperand(),
8388 CmpX
->getCompareOperand()->getType(), AS
);
8391 BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(I
);
8393 if (BinOp
&& BinOp
->getOpcode() == Instruction::And
&& EnableAndCmpSinking
&&
8394 sinkAndCmp0Expression(BinOp
, *TLI
, InsertedInsts
))
8397 // TODO: Move this into the switch on opcode - it handles shifts already.
8398 if (BinOp
&& (BinOp
->getOpcode() == Instruction::AShr
||
8399 BinOp
->getOpcode() == Instruction::LShr
)) {
8400 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BinOp
->getOperand(1));
8401 if (CI
&& TLI
->hasExtractBitsInsn())
8402 if (OptimizeExtractBits(BinOp
, CI
, *TLI
, *DL
))
8406 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(I
)) {
8407 if (GEPI
->hasAllZeroIndices()) {
8408 /// The GEP operand must be a pointer, so must its result -> BitCast
8409 Instruction
*NC
= new BitCastInst(GEPI
->getOperand(0), GEPI
->getType(),
8410 GEPI
->getName(), GEPI
->getIterator());
8411 NC
->setDebugLoc(GEPI
->getDebugLoc());
8412 replaceAllUsesWith(GEPI
, NC
, FreshBBs
, IsHugeFunc
);
8413 RecursivelyDeleteTriviallyDeadInstructions(
8414 GEPI
, TLInfo
, nullptr,
8415 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
8417 optimizeInst(NC
, ModifiedDT
);
8420 if (tryUnmergingGEPsAcrossIndirectBr(GEPI
, TTI
)) {
8425 if (FreezeInst
*FI
= dyn_cast
<FreezeInst
>(I
)) {
8426 // freeze(icmp a, const)) -> icmp (freeze a), const
8427 // This helps generate efficient conditional jumps.
8428 Instruction
*CmpI
= nullptr;
8429 if (ICmpInst
*II
= dyn_cast
<ICmpInst
>(FI
->getOperand(0)))
8431 else if (FCmpInst
*F
= dyn_cast
<FCmpInst
>(FI
->getOperand(0)))
8432 CmpI
= F
->getFastMathFlags().none() ? F
: nullptr;
8434 if (CmpI
&& CmpI
->hasOneUse()) {
8435 auto Op0
= CmpI
->getOperand(0), Op1
= CmpI
->getOperand(1);
8436 bool Const0
= isa
<ConstantInt
>(Op0
) || isa
<ConstantFP
>(Op0
) ||
8437 isa
<ConstantPointerNull
>(Op0
);
8438 bool Const1
= isa
<ConstantInt
>(Op1
) || isa
<ConstantFP
>(Op1
) ||
8439 isa
<ConstantPointerNull
>(Op1
);
8440 if (Const0
|| Const1
) {
8441 if (!Const0
|| !Const1
) {
8442 auto *F
= new FreezeInst(Const0
? Op1
: Op0
, "", CmpI
->getIterator());
8444 CmpI
->setOperand(Const0
? 1 : 0, F
);
8446 replaceAllUsesWith(FI
, CmpI
, FreshBBs
, IsHugeFunc
);
8447 FI
->eraseFromParent();
8454 if (tryToSinkFreeOperands(I
))
8457 switch (I
->getOpcode()) {
8458 case Instruction::Shl
:
8459 case Instruction::LShr
:
8460 case Instruction::AShr
:
8461 return optimizeShiftInst(cast
<BinaryOperator
>(I
));
8462 case Instruction::Call
:
8463 return optimizeCallInst(cast
<CallInst
>(I
), ModifiedDT
);
8464 case Instruction::Select
:
8465 return optimizeSelectInst(cast
<SelectInst
>(I
));
8466 case Instruction::ShuffleVector
:
8467 return optimizeShuffleVectorInst(cast
<ShuffleVectorInst
>(I
));
8468 case Instruction::Switch
:
8469 return optimizeSwitchInst(cast
<SwitchInst
>(I
));
8470 case Instruction::ExtractElement
:
8471 return optimizeExtractElementInst(cast
<ExtractElementInst
>(I
));
8472 case Instruction::Br
:
8473 return optimizeBranch(cast
<BranchInst
>(I
), *TLI
, FreshBBs
, IsHugeFunc
);
8479 /// Given an OR instruction, check to see if this is a bitreverse
8480 /// idiom. If so, insert the new intrinsic and return true.
8481 bool CodeGenPrepare::makeBitReverse(Instruction
&I
) {
8482 if (!I
.getType()->isIntegerTy() ||
8483 !TLI
->isOperationLegalOrCustom(ISD::BITREVERSE
,
8484 TLI
->getValueType(*DL
, I
.getType(), true)))
8487 SmallVector
<Instruction
*, 4> Insts
;
8488 if (!recognizeBSwapOrBitReverseIdiom(&I
, false, true, Insts
))
8490 Instruction
*LastInst
= Insts
.back();
8491 replaceAllUsesWith(&I
, LastInst
, FreshBBs
, IsHugeFunc
);
8492 RecursivelyDeleteTriviallyDeadInstructions(
8493 &I
, TLInfo
, nullptr,
8494 [&](Value
*V
) { removeAllAssertingVHReferences(V
); });
8498 // In this pass we look for GEP and cast instructions that are used
8499 // across basic blocks and rewrite them to improve basic-block-at-a-time
8501 bool CodeGenPrepare::optimizeBlock(BasicBlock
&BB
, ModifyDT
&ModifiedDT
) {
8503 bool MadeChange
= false;
8506 CurInstIterator
= BB
.begin();
8507 ModifiedDT
= ModifyDT::NotModifyDT
;
8508 while (CurInstIterator
!= BB
.end()) {
8509 MadeChange
|= optimizeInst(&*CurInstIterator
++, ModifiedDT
);
8510 if (ModifiedDT
!= ModifyDT::NotModifyDT
) {
8511 // For huge function we tend to quickly go though the inner optmization
8512 // opportunities in the BB. So we go back to the BB head to re-optimize
8513 // each instruction instead of go back to the function head.
8516 getDT(*BB
.getParent());
8523 } while (ModifiedDT
== ModifyDT::ModifyInstDT
);
8525 bool MadeBitReverse
= true;
8526 while (MadeBitReverse
) {
8527 MadeBitReverse
= false;
8528 for (auto &I
: reverse(BB
)) {
8529 if (makeBitReverse(I
)) {
8530 MadeBitReverse
= MadeChange
= true;
8535 MadeChange
|= dupRetToEnableTailCallOpts(&BB
, ModifiedDT
);
8540 // Some CGP optimizations may move or alter what's computed in a block. Check
8541 // whether a dbg.value intrinsic could be pointed at a more appropriate operand.
8542 bool CodeGenPrepare::fixupDbgValue(Instruction
*I
) {
8543 assert(isa
<DbgValueInst
>(I
));
8544 DbgValueInst
&DVI
= *cast
<DbgValueInst
>(I
);
8546 // Does this dbg.value refer to a sunk address calculation?
8547 bool AnyChange
= false;
8548 SmallDenseSet
<Value
*> LocationOps(DVI
.location_ops().begin(),
8549 DVI
.location_ops().end());
8550 for (Value
*Location
: LocationOps
) {
8551 WeakTrackingVH SunkAddrVH
= SunkAddrs
[Location
];
8552 Value
*SunkAddr
= SunkAddrVH
.pointsToAliveValue() ? SunkAddrVH
: nullptr;
8554 // Point dbg.value at locally computed address, which should give the best
8555 // opportunity to be accurately lowered. This update may change the type
8556 // of pointer being referred to; however this makes no difference to
8557 // debugging information, and we can't generate bitcasts that may affect
8559 DVI
.replaceVariableLocationOp(Location
, SunkAddr
);
8566 bool CodeGenPrepare::fixupDbgVariableRecordsOnInst(Instruction
&I
) {
8567 bool AnyChange
= false;
8568 for (DbgVariableRecord
&DVR
: filterDbgVars(I
.getDbgRecordRange()))
8569 AnyChange
|= fixupDbgVariableRecord(DVR
);
8573 // FIXME: should updating debug-info really cause the "changed" flag to fire,
8574 // which can cause a function to be reprocessed?
8575 bool CodeGenPrepare::fixupDbgVariableRecord(DbgVariableRecord
&DVR
) {
8576 if (DVR
.Type
!= DbgVariableRecord::LocationType::Value
&&
8577 DVR
.Type
!= DbgVariableRecord::LocationType::Assign
)
8580 // Does this DbgVariableRecord refer to a sunk address calculation?
8581 bool AnyChange
= false;
8582 SmallDenseSet
<Value
*> LocationOps(DVR
.location_ops().begin(),
8583 DVR
.location_ops().end());
8584 for (Value
*Location
: LocationOps
) {
8585 WeakTrackingVH SunkAddrVH
= SunkAddrs
[Location
];
8586 Value
*SunkAddr
= SunkAddrVH
.pointsToAliveValue() ? SunkAddrVH
: nullptr;
8588 // Point dbg.value at locally computed address, which should give the best
8589 // opportunity to be accurately lowered. This update may change the type
8590 // of pointer being referred to; however this makes no difference to
8591 // debugging information, and we can't generate bitcasts that may affect
8593 DVR
.replaceVariableLocationOp(Location
, SunkAddr
);
8600 static void DbgInserterHelper(DbgValueInst
*DVI
, Instruction
*VI
) {
8601 DVI
->removeFromParent();
8602 if (isa
<PHINode
>(VI
))
8603 DVI
->insertBefore(&*VI
->getParent()->getFirstInsertionPt());
8605 DVI
->insertAfter(VI
);
8608 static void DbgInserterHelper(DbgVariableRecord
*DVR
, Instruction
*VI
) {
8609 DVR
->removeFromParent();
8610 BasicBlock
*VIBB
= VI
->getParent();
8611 if (isa
<PHINode
>(VI
))
8612 VIBB
->insertDbgRecordBefore(DVR
, VIBB
->getFirstInsertionPt());
8614 VIBB
->insertDbgRecordAfter(DVR
, VI
);
8617 // A llvm.dbg.value may be using a value before its definition, due to
8618 // optimizations in this pass and others. Scan for such dbg.values, and rescue
8619 // them by moving the dbg.value to immediately after the value definition.
8620 // FIXME: Ideally this should never be necessary, and this has the potential
8621 // to re-order dbg.value intrinsics.
8622 bool CodeGenPrepare::placeDbgValues(Function
&F
) {
8623 bool MadeChange
= false;
8624 DominatorTree
DT(F
);
8626 auto DbgProcessor
= [&](auto *DbgItem
, Instruction
*Position
) {
8627 SmallVector
<Instruction
*, 4> VIs
;
8628 for (Value
*V
: DbgItem
->location_ops())
8629 if (Instruction
*VI
= dyn_cast_or_null
<Instruction
>(V
))
8632 // This item may depend on multiple instructions, complicating any
8633 // potential sink. This block takes the defensive approach, opting to
8634 // "undef" the item if it has more than one instruction and any of them do
8635 // not dominate iem.
8636 for (Instruction
*VI
: VIs
) {
8637 if (VI
->isTerminator())
8640 // If VI is a phi in a block with an EHPad terminator, we can't insert
8642 if (isa
<PHINode
>(VI
) && VI
->getParent()->getTerminator()->isEHPad())
8645 // If the defining instruction dominates the dbg.value, we do not need
8646 // to move the dbg.value.
8647 if (DT
.dominates(VI
, Position
))
8650 // If we depend on multiple instructions and any of them doesn't
8651 // dominate this DVI, we probably can't salvage it: moving it to
8652 // after any of the instructions could cause us to lose the others.
8653 if (VIs
.size() > 1) {
8656 << "Unable to find valid location for Debug Value, undefing:\n"
8658 DbgItem
->setKillLocation();
8662 LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
8663 << *DbgItem
<< ' ' << *VI
);
8664 DbgInserterHelper(DbgItem
, VI
);
8670 for (BasicBlock
&BB
: F
) {
8671 for (Instruction
&Insn
: llvm::make_early_inc_range(BB
)) {
8672 // Process dbg.value intrinsics.
8673 DbgValueInst
*DVI
= dyn_cast
<DbgValueInst
>(&Insn
);
8675 DbgProcessor(DVI
, DVI
);
8679 // If this isn't a dbg.value, process any attached DbgVariableRecord
8680 // records attached to this instruction.
8681 for (DbgVariableRecord
&DVR
: llvm::make_early_inc_range(
8682 filterDbgVars(Insn
.getDbgRecordRange()))) {
8683 if (DVR
.Type
!= DbgVariableRecord::LocationType::Value
)
8685 DbgProcessor(&DVR
, &Insn
);
8693 // Group scattered pseudo probes in a block to favor SelectionDAG. Scattered
8694 // probes can be chained dependencies of other regular DAG nodes and block DAG
8695 // combine optimizations.
8696 bool CodeGenPrepare::placePseudoProbes(Function
&F
) {
8697 bool MadeChange
= false;
8698 for (auto &Block
: F
) {
8699 // Move the rest probes to the beginning of the block.
8700 auto FirstInst
= Block
.getFirstInsertionPt();
8701 while (FirstInst
!= Block
.end() && FirstInst
->isDebugOrPseudoInst())
8703 BasicBlock::iterator
I(FirstInst
);
8705 while (I
!= Block
.end()) {
8706 if (auto *II
= dyn_cast
<PseudoProbeInst
>(I
++)) {
8707 II
->moveBefore(&*FirstInst
);
8715 /// Scale down both weights to fit into uint32_t.
8716 static void scaleWeights(uint64_t &NewTrue
, uint64_t &NewFalse
) {
8717 uint64_t NewMax
= (NewTrue
> NewFalse
) ? NewTrue
: NewFalse
;
8718 uint32_t Scale
= (NewMax
/ std::numeric_limits
<uint32_t>::max()) + 1;
8719 NewTrue
= NewTrue
/ Scale
;
8720 NewFalse
= NewFalse
/ Scale
;
8723 /// Some targets prefer to split a conditional branch like:
8725 /// %0 = icmp ne i32 %a, 0
8726 /// %1 = icmp ne i32 %b, 0
8727 /// %or.cond = or i1 %0, %1
8728 /// br i1 %or.cond, label %TrueBB, label %FalseBB
8730 /// into multiple branch instructions like:
8733 /// %0 = icmp ne i32 %a, 0
8734 /// br i1 %0, label %TrueBB, label %bb2
8736 /// %1 = icmp ne i32 %b, 0
8737 /// br i1 %1, label %TrueBB, label %FalseBB
8739 /// This usually allows instruction selection to do even further optimizations
8740 /// and combine the compare with the branch instruction. Currently this is
8741 /// applied for targets which have "cheap" jump instructions.
8743 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
8745 bool CodeGenPrepare::splitBranchCondition(Function
&F
, ModifyDT
&ModifiedDT
) {
8746 if (!TM
->Options
.EnableFastISel
|| TLI
->isJumpExpensive())
8749 bool MadeChange
= false;
8750 for (auto &BB
: F
) {
8751 // Does this BB end with the following?
8752 // %cond1 = icmp|fcmp|binary instruction ...
8753 // %cond2 = icmp|fcmp|binary instruction ...
8754 // %cond.or = or|and i1 %cond1, cond2
8755 // br i1 %cond.or label %dest1, label %dest2"
8756 Instruction
*LogicOp
;
8757 BasicBlock
*TBB
, *FBB
;
8758 if (!match(BB
.getTerminator(),
8759 m_Br(m_OneUse(m_Instruction(LogicOp
)), TBB
, FBB
)))
8762 auto *Br1
= cast
<BranchInst
>(BB
.getTerminator());
8763 if (Br1
->getMetadata(LLVMContext::MD_unpredictable
))
8766 // The merging of mostly empty BB can cause a degenerate branch.
8771 Value
*Cond1
, *Cond2
;
8773 m_LogicalAnd(m_OneUse(m_Value(Cond1
)), m_OneUse(m_Value(Cond2
)))))
8774 Opc
= Instruction::And
;
8775 else if (match(LogicOp
, m_LogicalOr(m_OneUse(m_Value(Cond1
)),
8776 m_OneUse(m_Value(Cond2
)))))
8777 Opc
= Instruction::Or
;
8781 auto IsGoodCond
= [](Value
*Cond
) {
8784 m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()),
8785 m_LogicalOr(m_Value(), m_Value()))));
8787 if (!IsGoodCond(Cond1
) || !IsGoodCond(Cond2
))
8790 LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB
.dump());
8794 BasicBlock::Create(BB
.getContext(), BB
.getName() + ".cond.split",
8795 BB
.getParent(), BB
.getNextNode());
8797 FreshBBs
.insert(TmpBB
);
8799 // Update original basic block by using the first condition directly by the
8800 // branch instruction and removing the no longer needed and/or instruction.
8801 Br1
->setCondition(Cond1
);
8802 LogicOp
->eraseFromParent();
8804 // Depending on the condition we have to either replace the true or the
8805 // false successor of the original branch instruction.
8806 if (Opc
== Instruction::And
)
8807 Br1
->setSuccessor(0, TmpBB
);
8809 Br1
->setSuccessor(1, TmpBB
);
8811 // Fill in the new basic block.
8812 auto *Br2
= IRBuilder
<>(TmpBB
).CreateCondBr(Cond2
, TBB
, FBB
);
8813 if (auto *I
= dyn_cast
<Instruction
>(Cond2
)) {
8814 I
->removeFromParent();
8815 I
->insertBefore(Br2
);
8818 // Update PHI nodes in both successors. The original BB needs to be
8819 // replaced in one successor's PHI nodes, because the branch comes now from
8820 // the newly generated BB (NewBB). In the other successor we need to add one
8821 // incoming edge to the PHI nodes, because both branch instructions target
8822 // now the same successor. Depending on the original branch condition
8823 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
8824 // we perform the correct update for the PHI nodes.
8825 // This doesn't change the successor order of the just created branch
8826 // instruction (or any other instruction).
8827 if (Opc
== Instruction::Or
)
8828 std::swap(TBB
, FBB
);
8830 // Replace the old BB with the new BB.
8831 TBB
->replacePhiUsesWith(&BB
, TmpBB
);
8833 // Add another incoming edge from the new BB.
8834 for (PHINode
&PN
: FBB
->phis()) {
8835 auto *Val
= PN
.getIncomingValueForBlock(&BB
);
8836 PN
.addIncoming(Val
, TmpBB
);
8839 // Update the branch weights (from SelectionDAGBuilder::
8840 // FindMergedConditions).
8841 if (Opc
== Instruction::Or
) {
8842 // Codegen X | Y as:
8851 // We have flexibility in setting Prob for BB1 and Prob for NewBB.
8852 // The requirement is that
8853 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
8854 // = TrueProb for original BB.
8855 // Assuming the original weights are A and B, one choice is to set BB1's
8856 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
8858 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
8859 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
8860 // TmpBB, but the math is more complicated.
8861 uint64_t TrueWeight
, FalseWeight
;
8862 if (extractBranchWeights(*Br1
, TrueWeight
, FalseWeight
)) {
8863 uint64_t NewTrueWeight
= TrueWeight
;
8864 uint64_t NewFalseWeight
= TrueWeight
+ 2 * FalseWeight
;
8865 scaleWeights(NewTrueWeight
, NewFalseWeight
);
8866 Br1
->setMetadata(LLVMContext::MD_prof
,
8867 MDBuilder(Br1
->getContext())
8868 .createBranchWeights(TrueWeight
, FalseWeight
,
8869 hasBranchWeightOrigin(*Br1
)));
8871 NewTrueWeight
= TrueWeight
;
8872 NewFalseWeight
= 2 * FalseWeight
;
8873 scaleWeights(NewTrueWeight
, NewFalseWeight
);
8874 Br2
->setMetadata(LLVMContext::MD_prof
,
8875 MDBuilder(Br2
->getContext())
8876 .createBranchWeights(TrueWeight
, FalseWeight
));
8879 // Codegen X & Y as:
8887 // This requires creation of TmpBB after CurBB.
8889 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
8890 // The requirement is that
8891 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
8892 // = FalseProb for original BB.
8893 // Assuming the original weights are A and B, one choice is to set BB1's
8894 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
8896 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
8897 uint64_t TrueWeight
, FalseWeight
;
8898 if (extractBranchWeights(*Br1
, TrueWeight
, FalseWeight
)) {
8899 uint64_t NewTrueWeight
= 2 * TrueWeight
+ FalseWeight
;
8900 uint64_t NewFalseWeight
= FalseWeight
;
8901 scaleWeights(NewTrueWeight
, NewFalseWeight
);
8902 Br1
->setMetadata(LLVMContext::MD_prof
,
8903 MDBuilder(Br1
->getContext())
8904 .createBranchWeights(TrueWeight
, FalseWeight
));
8906 NewTrueWeight
= 2 * TrueWeight
;
8907 NewFalseWeight
= FalseWeight
;
8908 scaleWeights(NewTrueWeight
, NewFalseWeight
);
8909 Br2
->setMetadata(LLVMContext::MD_prof
,
8910 MDBuilder(Br2
->getContext())
8911 .createBranchWeights(TrueWeight
, FalseWeight
));
8915 ModifiedDT
= ModifyDT::ModifyBBDT
;
8918 LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB
.dump();