1 //===- PreISelIntrinsicLowering.cpp - Pre-ISel intrinsic lowering pass ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass implements IR lowering for the llvm.memcpy, llvm.memmove,
10 // llvm.memset, llvm.load.relative and llvm.objc.* intrinsics.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/PreISelIntrinsicLowering.h"
15 #include "llvm/Analysis/ObjCARCInstKind.h"
16 #include "llvm/Analysis/ObjCARCUtil.h"
17 #include "llvm/Analysis/TargetLibraryInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/ExpandVectorPredication.h"
20 #include "llvm/CodeGen/Passes.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/CodeGen/TargetPassConfig.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Module.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/IR/Use.h"
30 #include "llvm/InitializePasses.h"
31 #include "llvm/Pass.h"
32 #include "llvm/Support/Casting.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Transforms/Scalar/LowerConstantIntrinsics.h"
35 #include "llvm/Transforms/Utils/BuildLibCalls.h"
36 #include "llvm/Transforms/Utils/LowerMemIntrinsics.h"
37 #include "llvm/Transforms/Utils/LowerVectorIntrinsics.h"
41 /// Threshold to leave statically sized memory intrinsic calls. Calls of known
42 /// size larger than this will be expanded by the pass. Calls of unknown or
43 /// lower size will be left for expansion in codegen.
44 static cl::opt
<int64_t> MemIntrinsicExpandSizeThresholdOpt(
45 "mem-intrinsic-expand-size",
46 cl::desc("Set minimum mem intrinsic size to expand in IR"), cl::init(-1),
51 struct PreISelIntrinsicLowering
{
52 const TargetMachine
*TM
;
53 const function_ref
<TargetTransformInfo
&(Function
&)> LookupTTI
;
54 const function_ref
<TargetLibraryInfo
&(Function
&)> LookupTLI
;
56 /// If this is true, assume it's preferably to leave memory intrinsic calls
57 /// for replacement with a library call later. Otherwise this depends on
58 /// TargetLoweringInfo availability of the corresponding function.
59 const bool UseMemIntrinsicLibFunc
;
61 explicit PreISelIntrinsicLowering(
62 const TargetMachine
*TM_
,
63 function_ref
<TargetTransformInfo
&(Function
&)> LookupTTI_
,
64 function_ref
<TargetLibraryInfo
&(Function
&)> LookupTLI_
,
65 bool UseMemIntrinsicLibFunc_
= true)
66 : TM(TM_
), LookupTTI(LookupTTI_
), LookupTLI(LookupTLI_
),
67 UseMemIntrinsicLibFunc(UseMemIntrinsicLibFunc_
) {}
69 static bool shouldExpandMemIntrinsicWithSize(Value
*Size
,
70 const TargetTransformInfo
&TTI
);
71 bool expandMemIntrinsicUses(Function
&F
) const;
72 bool lowerIntrinsics(Module
&M
) const;
77 template <class T
> static bool forEachCall(Function
&Intrin
, T Callback
) {
78 // Lowering all intrinsics in a function will delete multiple uses, so we
79 // can't use an early-inc-range. In case some remain, we don't want to look
80 // at them again. Unfortunately, Value::UseList is private, so we can't use a
81 // simple Use**. If LastUse is null, the next use to consider is
82 // Intrin.use_begin(), otherwise it's LastUse->getNext().
83 Use
*LastUse
= nullptr;
85 while (!Intrin
.use_empty() && (!LastUse
|| LastUse
->getNext())) {
86 Use
*U
= LastUse
? LastUse
->getNext() : &*Intrin
.use_begin();
88 // An intrinsic cannot have its address taken, so it cannot be an argument
89 // operand. It might be used as operand in debug metadata, though.
90 if (auto CI
= dyn_cast
<CallInst
>(U
->getUser()))
91 Changed
|= Removed
= Callback(CI
);
98 static bool lowerLoadRelative(Function
&F
) {
102 bool Changed
= false;
103 Type
*Int32Ty
= Type::getInt32Ty(F
.getContext());
105 for (Use
&U
: llvm::make_early_inc_range(F
.uses())) {
106 auto CI
= dyn_cast
<CallInst
>(U
.getUser());
107 if (!CI
|| CI
->getCalledOperand() != &F
)
112 B
.CreatePtrAdd(CI
->getArgOperand(0), CI
->getArgOperand(1));
113 Value
*OffsetI32
= B
.CreateAlignedLoad(Int32Ty
, OffsetPtr
, Align(4));
115 Value
*ResultPtr
= B
.CreatePtrAdd(CI
->getArgOperand(0), OffsetI32
);
117 CI
->replaceAllUsesWith(ResultPtr
);
118 CI
->eraseFromParent();
125 // ObjCARC has knowledge about whether an obj-c runtime function needs to be
126 // always tail-called or never tail-called.
127 static CallInst::TailCallKind
getOverridingTailCallKind(const Function
&F
) {
128 objcarc::ARCInstKind Kind
= objcarc::GetFunctionClass(&F
);
129 if (objcarc::IsAlwaysTail(Kind
))
130 return CallInst::TCK_Tail
;
131 else if (objcarc::IsNeverTail(Kind
))
132 return CallInst::TCK_NoTail
;
133 return CallInst::TCK_None
;
136 static bool lowerObjCCall(Function
&F
, const char *NewFn
,
137 bool setNonLazyBind
= false) {
138 assert(IntrinsicInst::mayLowerToFunctionCall(F
.getIntrinsicID()) &&
139 "Pre-ISel intrinsics do lower into regular function calls");
143 // If we haven't already looked up this function, check to see if the
144 // program already contains a function with this name.
145 Module
*M
= F
.getParent();
146 FunctionCallee FCache
= M
->getOrInsertFunction(NewFn
, F
.getFunctionType());
148 if (Function
*Fn
= dyn_cast
<Function
>(FCache
.getCallee())) {
149 Fn
->setLinkage(F
.getLinkage());
150 if (setNonLazyBind
&& !Fn
->isWeakForLinker()) {
151 // If we have Native ARC, set nonlazybind attribute for these APIs for
153 Fn
->addFnAttr(Attribute::NonLazyBind
);
157 CallInst::TailCallKind OverridingTCK
= getOverridingTailCallKind(F
);
159 for (Use
&U
: llvm::make_early_inc_range(F
.uses())) {
160 auto *CB
= cast
<CallBase
>(U
.getUser());
162 if (CB
->getCalledFunction() != &F
) {
163 objcarc::ARCInstKind Kind
= objcarc::getAttachedARCFunctionKind(CB
);
165 assert((Kind
== objcarc::ARCInstKind::RetainRV
||
166 Kind
== objcarc::ARCInstKind::UnsafeClaimRV
) &&
167 "use expected to be the argument of operand bundle "
168 "\"clang.arc.attachedcall\"");
169 U
.set(FCache
.getCallee());
173 auto *CI
= cast
<CallInst
>(CB
);
174 assert(CI
->getCalledFunction() && "Cannot lower an indirect call!");
176 IRBuilder
<> Builder(CI
->getParent(), CI
->getIterator());
177 SmallVector
<Value
*, 8> Args(CI
->args());
178 SmallVector
<llvm::OperandBundleDef
, 1> BundleList
;
179 CI
->getOperandBundlesAsDefs(BundleList
);
180 CallInst
*NewCI
= Builder
.CreateCall(FCache
, Args
, BundleList
);
181 NewCI
->setName(CI
->getName());
183 // Try to set the most appropriate TailCallKind based on both the current
184 // attributes and the ones that we could get from ObjCARC's special
185 // knowledge of the runtime functions.
187 // std::max respects both requirements of notail and tail here:
188 // * notail on either the call or from ObjCARC becomes notail
189 // * tail on either side is stronger than none, but not notail
190 CallInst::TailCallKind TCK
= CI
->getTailCallKind();
191 NewCI
->setTailCallKind(std::max(TCK
, OverridingTCK
));
193 // Transfer the 'returned' attribute from the intrinsic to the call site.
194 // By applying this only to intrinsic call sites, we avoid applying it to
195 // non-ARC explicit calls to things like objc_retain which have not been
196 // auto-upgraded to use the intrinsics.
198 if (F
.getAttributes().hasAttrSomewhere(Attribute::Returned
, &Index
) &&
200 NewCI
->addParamAttr(Index
- AttributeList::FirstArgIndex
,
201 Attribute::Returned
);
203 if (!CI
->use_empty())
204 CI
->replaceAllUsesWith(NewCI
);
205 CI
->eraseFromParent();
211 // TODO: Should refine based on estimated number of accesses (e.g. does it
212 // require splitting based on alignment)
213 bool PreISelIntrinsicLowering::shouldExpandMemIntrinsicWithSize(
214 Value
*Size
, const TargetTransformInfo
&TTI
) {
215 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Size
);
218 uint64_t Threshold
= MemIntrinsicExpandSizeThresholdOpt
.getNumOccurrences()
219 ? MemIntrinsicExpandSizeThresholdOpt
220 : TTI
.getMaxMemIntrinsicInlineSizeThreshold();
221 uint64_t SizeVal
= CI
->getZExtValue();
223 // Treat a threshold of 0 as a special case to force expansion of all
224 // intrinsics, including size 0.
225 return SizeVal
> Threshold
|| Threshold
== 0;
228 static bool canEmitLibcall(const TargetMachine
*TM
, Function
*F
,
230 // TODO: Should this consider the address space of the memcpy?
233 const TargetLowering
*TLI
= TM
->getSubtargetImpl(*F
)->getTargetLowering();
234 return TLI
->getLibcallName(LC
) != nullptr;
237 // Return a value appropriate for use with the memset_pattern16 libcall, if
238 // possible and if we know how. (Adapted from equivalent helper in
239 // LoopIdiomRecognize).
240 static Constant
*getMemSetPattern16Value(MemSetPatternInst
*Inst
,
241 const TargetLibraryInfo
&TLI
) {
242 // TODO: This could check for UndefValue because it can be merged into any
243 // other valid pattern.
245 // Don't emit libcalls if a non-default address space is being used.
246 if (Inst
->getRawDest()->getType()->getPointerAddressSpace() != 0)
249 Value
*V
= Inst
->getValue();
250 Type
*VTy
= V
->getType();
251 const DataLayout
&DL
= Inst
->getDataLayout();
252 Module
*M
= Inst
->getModule();
254 if (!isLibFuncEmittable(M
, &TLI
, LibFunc_memset_pattern16
))
257 // If the value isn't a constant, we can't promote it to being in a constant
258 // array. We could theoretically do a store to an alloca or something, but
259 // that doesn't seem worthwhile.
260 Constant
*C
= dyn_cast
<Constant
>(V
);
261 if (!C
|| isa
<ConstantExpr
>(C
))
264 // Only handle simple values that are a power of two bytes in size.
265 uint64_t Size
= DL
.getTypeSizeInBits(VTy
);
266 if (!DL
.typeSizeEqualsStoreSize(VTy
) || !isPowerOf2_64(Size
))
269 // Don't care enough about darwin/ppc to implement this.
270 if (DL
.isBigEndian())
273 // Convert to size in bytes.
276 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
277 // if the top and bottom are the same (e.g. for vectors and large integers).
281 // If the constant is exactly 16 bytes, just use it.
285 // Otherwise, we'll use an array of the constants.
286 uint64_t ArraySize
= 16 / Size
;
287 ArrayType
*AT
= ArrayType::get(V
->getType(), ArraySize
);
288 return ConstantArray::get(AT
, std::vector
<Constant
*>(ArraySize
, C
));
291 // TODO: Handle atomic memcpy and memcpy.inline
292 // TODO: Pass ScalarEvolution
293 bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function
&F
) const {
294 Intrinsic::ID ID
= F
.getIntrinsicID();
295 bool Changed
= false;
297 for (User
*U
: llvm::make_early_inc_range(F
.users())) {
298 Instruction
*Inst
= cast
<Instruction
>(U
);
301 case Intrinsic::memcpy
: {
302 auto *Memcpy
= cast
<MemCpyInst
>(Inst
);
303 Function
*ParentFunc
= Memcpy
->getFunction();
304 const TargetTransformInfo
&TTI
= LookupTTI(*ParentFunc
);
305 if (shouldExpandMemIntrinsicWithSize(Memcpy
->getLength(), TTI
)) {
306 if (UseMemIntrinsicLibFunc
&&
307 canEmitLibcall(TM
, ParentFunc
, RTLIB::MEMCPY
))
310 // TODO: For optsize, emit the loop into a separate function
311 expandMemCpyAsLoop(Memcpy
, TTI
);
313 Memcpy
->eraseFromParent();
318 case Intrinsic::memcpy_inline
: {
319 // Only expand llvm.memcpy.inline with non-constant length in this
320 // codepath, leaving the current SelectionDAG expansion for constant
321 // length memcpy intrinsics undisturbed.
322 auto *Memcpy
= cast
<MemCpyInlineInst
>(Inst
);
323 if (isa
<ConstantInt
>(Memcpy
->getLength()))
326 Function
*ParentFunc
= Memcpy
->getFunction();
327 const TargetTransformInfo
&TTI
= LookupTTI(*ParentFunc
);
328 expandMemCpyAsLoop(Memcpy
, TTI
);
330 Memcpy
->eraseFromParent();
333 case Intrinsic::memmove
: {
334 auto *Memmove
= cast
<MemMoveInst
>(Inst
);
335 Function
*ParentFunc
= Memmove
->getFunction();
336 const TargetTransformInfo
&TTI
= LookupTTI(*ParentFunc
);
337 if (shouldExpandMemIntrinsicWithSize(Memmove
->getLength(), TTI
)) {
338 if (UseMemIntrinsicLibFunc
&&
339 canEmitLibcall(TM
, ParentFunc
, RTLIB::MEMMOVE
))
342 if (expandMemMoveAsLoop(Memmove
, TTI
)) {
344 Memmove
->eraseFromParent();
350 case Intrinsic::memset
: {
351 auto *Memset
= cast
<MemSetInst
>(Inst
);
352 Function
*ParentFunc
= Memset
->getFunction();
353 const TargetTransformInfo
&TTI
= LookupTTI(*ParentFunc
);
354 if (shouldExpandMemIntrinsicWithSize(Memset
->getLength(), TTI
)) {
355 if (UseMemIntrinsicLibFunc
&&
356 canEmitLibcall(TM
, ParentFunc
, RTLIB::MEMSET
))
359 expandMemSetAsLoop(Memset
);
361 Memset
->eraseFromParent();
366 case Intrinsic::memset_inline
: {
367 // Only expand llvm.memset.inline with non-constant length in this
368 // codepath, leaving the current SelectionDAG expansion for constant
369 // length memset intrinsics undisturbed.
370 auto *Memset
= cast
<MemSetInlineInst
>(Inst
);
371 if (isa
<ConstantInt
>(Memset
->getLength()))
374 expandMemSetAsLoop(Memset
);
376 Memset
->eraseFromParent();
379 case Intrinsic::experimental_memset_pattern
: {
380 auto *Memset
= cast
<MemSetPatternInst
>(Inst
);
381 const TargetLibraryInfo
&TLI
= LookupTLI(*Memset
->getFunction());
382 Constant
*PatternValue
= getMemSetPattern16Value(Memset
, TLI
);
384 // If it isn't possible to emit a memset_pattern16 libcall, expand to
386 expandMemSetPatternAsLoop(Memset
);
388 Memset
->eraseFromParent();
391 // FIXME: There is currently no profitability calculation for emitting
392 // the libcall vs expanding the memset.pattern directly.
393 IRBuilder
<> Builder(Inst
);
394 Module
*M
= Memset
->getModule();
395 const DataLayout
&DL
= Memset
->getDataLayout();
397 StringRef FuncName
= "memset_pattern16";
398 FunctionCallee MSP
= getOrInsertLibFunc(
399 M
, TLI
, LibFunc_memset_pattern16
, Builder
.getVoidTy(),
400 Memset
->getRawDest()->getType(), Builder
.getPtrTy(),
401 Memset
->getLength()->getType());
402 inferNonMandatoryLibFuncAttrs(M
, FuncName
, TLI
);
404 // Otherwise we should form a memset_pattern16. PatternValue is known
405 // to be an constant array of 16-bytes. Put the value into a mergable
407 assert(Memset
->getRawDest()->getType()->getPointerAddressSpace() == 0 &&
408 "Should have skipped if non-zero AS");
409 GlobalVariable
*GV
= new GlobalVariable(
410 *M
, PatternValue
->getType(), /*isConstant=*/true,
411 GlobalValue::PrivateLinkage
, PatternValue
, ".memset_pattern");
413 GlobalValue::UnnamedAddr::Global
); // Ok to merge these.
414 // TODO: Consider relaxing alignment requirement.
415 GV
->setAlignment(Align(16));
416 Value
*PatternPtr
= GV
;
417 Value
*NumBytes
= Builder
.CreateMul(
418 Builder
.getInt64(DL
.getTypeSizeInBits(Memset
->getValue()->getType()) /
420 Memset
->getLength());
421 CallInst
*MemsetPattern16Call
=
422 Builder
.CreateCall(MSP
, {Memset
->getRawDest(), PatternPtr
, NumBytes
});
423 MemsetPattern16Call
->setAAMetadata(Memset
->getAAMetadata());
424 // Preserve any call site attributes on the destination pointer
425 // argument (e.g. alignment).
426 AttrBuilder
ArgAttrs(Memset
->getContext(),
427 Memset
->getAttributes().getParamAttrs(0));
428 MemsetPattern16Call
->setAttributes(
429 MemsetPattern16Call
->getAttributes().addParamAttributes(
430 Memset
->getContext(), 0, ArgAttrs
));
432 Memset
->eraseFromParent();
436 llvm_unreachable("unhandled intrinsic");
443 bool PreISelIntrinsicLowering::lowerIntrinsics(Module
&M
) const {
444 bool Changed
= false;
445 for (Function
&F
: M
) {
446 switch (F
.getIntrinsicID()) {
449 case Intrinsic::memcpy
:
450 case Intrinsic::memcpy_inline
:
451 case Intrinsic::memmove
:
452 case Intrinsic::memset
:
453 case Intrinsic::memset_inline
:
454 case Intrinsic::experimental_memset_pattern
:
455 Changed
|= expandMemIntrinsicUses(F
);
457 case Intrinsic::load_relative
:
458 Changed
|= lowerLoadRelative(F
);
460 case Intrinsic::is_constant
:
461 case Intrinsic::objectsize
:
462 Changed
|= forEachCall(F
, [&](CallInst
*CI
) {
463 Function
*Parent
= CI
->getParent()->getParent();
464 TargetLibraryInfo
&TLI
= LookupTLI(*Parent
);
465 // Intrinsics in unreachable code are not lowered.
466 bool Changed
= lowerConstantIntrinsics(*Parent
, TLI
, /*DT=*/nullptr);
470 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
471 case Intrinsic::VPID:
472 #include "llvm/IR/VPIntrinsics.def"
473 forEachCall(F
, [&](CallInst
*CI
) {
474 Function
*Parent
= CI
->getParent()->getParent();
475 const TargetTransformInfo
&TTI
= LookupTTI(*Parent
);
476 auto *VPI
= cast
<VPIntrinsic
>(CI
);
477 VPExpansionDetails ED
= expandVectorPredicationIntrinsic(*VPI
, TTI
);
478 // Expansion of VP intrinsics may change the IR but not actually
479 // replace the intrinsic, so update Changed for the pass
480 // and compute Removed for forEachCall.
481 Changed
|= ED
!= VPExpansionDetails::IntrinsicUnchanged
;
482 bool Removed
= ED
== VPExpansionDetails::IntrinsicReplaced
;
486 case Intrinsic::objc_autorelease
:
487 Changed
|= lowerObjCCall(F
, "objc_autorelease");
489 case Intrinsic::objc_autoreleasePoolPop
:
490 Changed
|= lowerObjCCall(F
, "objc_autoreleasePoolPop");
492 case Intrinsic::objc_autoreleasePoolPush
:
493 Changed
|= lowerObjCCall(F
, "objc_autoreleasePoolPush");
495 case Intrinsic::objc_autoreleaseReturnValue
:
496 Changed
|= lowerObjCCall(F
, "objc_autoreleaseReturnValue");
498 case Intrinsic::objc_copyWeak
:
499 Changed
|= lowerObjCCall(F
, "objc_copyWeak");
501 case Intrinsic::objc_destroyWeak
:
502 Changed
|= lowerObjCCall(F
, "objc_destroyWeak");
504 case Intrinsic::objc_initWeak
:
505 Changed
|= lowerObjCCall(F
, "objc_initWeak");
507 case Intrinsic::objc_loadWeak
:
508 Changed
|= lowerObjCCall(F
, "objc_loadWeak");
510 case Intrinsic::objc_loadWeakRetained
:
511 Changed
|= lowerObjCCall(F
, "objc_loadWeakRetained");
513 case Intrinsic::objc_moveWeak
:
514 Changed
|= lowerObjCCall(F
, "objc_moveWeak");
516 case Intrinsic::objc_release
:
517 Changed
|= lowerObjCCall(F
, "objc_release", true);
519 case Intrinsic::objc_retain
:
520 Changed
|= lowerObjCCall(F
, "objc_retain", true);
522 case Intrinsic::objc_retainAutorelease
:
523 Changed
|= lowerObjCCall(F
, "objc_retainAutorelease");
525 case Intrinsic::objc_retainAutoreleaseReturnValue
:
526 Changed
|= lowerObjCCall(F
, "objc_retainAutoreleaseReturnValue");
528 case Intrinsic::objc_retainAutoreleasedReturnValue
:
529 Changed
|= lowerObjCCall(F
, "objc_retainAutoreleasedReturnValue");
531 case Intrinsic::objc_retainBlock
:
532 Changed
|= lowerObjCCall(F
, "objc_retainBlock");
534 case Intrinsic::objc_storeStrong
:
535 Changed
|= lowerObjCCall(F
, "objc_storeStrong");
537 case Intrinsic::objc_storeWeak
:
538 Changed
|= lowerObjCCall(F
, "objc_storeWeak");
540 case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue
:
541 Changed
|= lowerObjCCall(F
, "objc_unsafeClaimAutoreleasedReturnValue");
543 case Intrinsic::objc_retainedObject
:
544 Changed
|= lowerObjCCall(F
, "objc_retainedObject");
546 case Intrinsic::objc_unretainedObject
:
547 Changed
|= lowerObjCCall(F
, "objc_unretainedObject");
549 case Intrinsic::objc_unretainedPointer
:
550 Changed
|= lowerObjCCall(F
, "objc_unretainedPointer");
552 case Intrinsic::objc_retain_autorelease
:
553 Changed
|= lowerObjCCall(F
, "objc_retain_autorelease");
555 case Intrinsic::objc_sync_enter
:
556 Changed
|= lowerObjCCall(F
, "objc_sync_enter");
558 case Intrinsic::objc_sync_exit
:
559 Changed
|= lowerObjCCall(F
, "objc_sync_exit");
562 case Intrinsic::exp2
:
563 Changed
|= forEachCall(F
, [&](CallInst
*CI
) {
564 Type
*Ty
= CI
->getArgOperand(0)->getType();
565 if (!isa
<ScalableVectorType
>(Ty
))
567 const TargetLowering
*TL
= TM
->getSubtargetImpl(F
)->getTargetLowering();
568 unsigned Op
= TL
->IntrinsicIDToISD(F
.getIntrinsicID());
569 if (!TL
->isOperationExpand(Op
, EVT::getEVT(Ty
)))
571 return lowerUnaryVectorIntrinsicAsLoop(M
, CI
);
581 class PreISelIntrinsicLoweringLegacyPass
: public ModulePass
{
585 PreISelIntrinsicLoweringLegacyPass() : ModulePass(ID
) {}
587 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
588 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
589 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
590 AU
.addRequired
<TargetPassConfig
>();
593 bool runOnModule(Module
&M
) override
{
594 auto LookupTTI
= [this](Function
&F
) -> TargetTransformInfo
& {
595 return this->getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
597 auto LookupTLI
= [this](Function
&F
) -> TargetLibraryInfo
& {
598 return this->getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
601 const auto *TM
= &getAnalysis
<TargetPassConfig
>().getTM
<TargetMachine
>();
602 PreISelIntrinsicLowering
Lowering(TM
, LookupTTI
, LookupTLI
);
603 return Lowering
.lowerIntrinsics(M
);
607 } // end anonymous namespace
609 char PreISelIntrinsicLoweringLegacyPass::ID
;
611 INITIALIZE_PASS_BEGIN(PreISelIntrinsicLoweringLegacyPass
,
612 "pre-isel-intrinsic-lowering",
613 "Pre-ISel Intrinsic Lowering", false, false)
614 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
615 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig
)
616 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
617 INITIALIZE_PASS_END(PreISelIntrinsicLoweringLegacyPass
,
618 "pre-isel-intrinsic-lowering",
619 "Pre-ISel Intrinsic Lowering", false, false)
621 ModulePass
*llvm::createPreISelIntrinsicLoweringPass() {
622 return new PreISelIntrinsicLoweringLegacyPass();
625 PreservedAnalyses
PreISelIntrinsicLoweringPass::run(Module
&M
,
626 ModuleAnalysisManager
&AM
) {
627 auto &FAM
= AM
.getResult
<FunctionAnalysisManagerModuleProxy
>(M
).getManager();
629 auto LookupTTI
= [&FAM
](Function
&F
) -> TargetTransformInfo
& {
630 return FAM
.getResult
<TargetIRAnalysis
>(F
);
632 auto LookupTLI
= [&FAM
](Function
&F
) -> TargetLibraryInfo
& {
633 return FAM
.getResult
<TargetLibraryAnalysis
>(F
);
636 PreISelIntrinsicLowering
Lowering(TM
, LookupTTI
, LookupTLI
);
637 if (!Lowering
.lowerIntrinsics(M
))
638 return PreservedAnalyses::all();
640 return PreservedAnalyses::none();