1 //===----- CodeGen/ExpandVectorPredication.cpp - Expand VP intrinsics -----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass implements IR expansion for vector predication intrinsics, allowing
10 // targets to enable vector predication until just before codegen.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/ExpandVectorPredication.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/TargetTransformInfo.h"
17 #include "llvm/Analysis/ValueTracking.h"
18 #include "llvm/CodeGen/Passes.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/InstIterator.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/InitializePasses.h"
28 #include "llvm/Pass.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Compiler.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/MathExtras.h"
36 using VPLegalization
= TargetTransformInfo::VPLegalization
;
37 using VPTransform
= TargetTransformInfo::VPLegalization::VPTransform
;
39 // Keep this in sync with TargetTransformInfo::VPLegalization.
40 #define VPINTERNAL_VPLEGAL_CASES \
41 VPINTERNAL_CASE(Legal) \
42 VPINTERNAL_CASE(Discard) \
43 VPINTERNAL_CASE(Convert)
45 #define VPINTERNAL_CASE(X) "|" #X
48 static cl::opt
<std::string
> EVLTransformOverride(
49 "expandvp-override-evl-transform", cl::init(""), cl::Hidden
,
50 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
51 ". If non-empty, ignore "
52 "TargetTransformInfo and "
53 "always use this transformation for the %evl parameter (Used in "
56 static cl::opt
<std::string
> MaskTransformOverride(
57 "expandvp-override-mask-transform", cl::init(""), cl::Hidden
,
58 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
59 ". If non-empty, Ignore "
60 "TargetTransformInfo and "
61 "always use this transformation for the %mask parameter (Used in "
64 #undef VPINTERNAL_CASE
65 #define VPINTERNAL_CASE(X) .Case(#X, VPLegalization::X)
67 static VPTransform
parseOverrideOption(const std::string
&TextOpt
) {
68 return StringSwitch
<VPTransform
>(TextOpt
) VPINTERNAL_VPLEGAL_CASES
;
71 #undef VPINTERNAL_VPLEGAL_CASES
73 // Whether any override options are set.
74 static bool anyExpandVPOverridesSet() {
75 return !EVLTransformOverride
.empty() || !MaskTransformOverride
.empty();
78 #define DEBUG_TYPE "expandvp"
80 STATISTIC(NumFoldedVL
, "Number of folded vector length params");
81 STATISTIC(NumLoweredVPOps
, "Number of folded vector predication operations");
85 /// \returns Whether the vector mask \p MaskVal has all lane bits set.
86 static bool isAllTrueMask(Value
*MaskVal
) {
87 auto *ConstVec
= dyn_cast
<ConstantVector
>(MaskVal
);
88 return ConstVec
&& ConstVec
->isAllOnesValue();
91 /// \returns A non-excepting divisor constant for this type.
92 static Constant
*getSafeDivisor(Type
*DivTy
) {
93 assert(DivTy
->isIntOrIntVectorTy() && "Unsupported divisor type");
94 return ConstantInt::get(DivTy
, 1u, false);
97 /// Transfer operation properties from \p OldVPI to \p NewVal.
98 static void transferDecorations(Value
&NewVal
, VPIntrinsic
&VPI
) {
99 auto *NewInst
= dyn_cast
<Instruction
>(&NewVal
);
100 if (!NewInst
|| !isa
<FPMathOperator
>(NewVal
))
103 auto *OldFMOp
= dyn_cast
<FPMathOperator
>(&VPI
);
107 NewInst
->setFastMathFlags(OldFMOp
->getFastMathFlags());
110 /// Transfer all properties from \p OldOp to \p NewOp and replace all uses.
111 /// OldVP gets erased.
112 static void replaceOperation(Value
&NewOp
, VPIntrinsic
&OldOp
) {
113 transferDecorations(NewOp
, OldOp
);
114 OldOp
.replaceAllUsesWith(&NewOp
);
115 OldOp
.eraseFromParent();
122 // Expansion pass state at function scope.
123 struct CachingVPExpander
{
125 const TargetTransformInfo
&TTI
;
127 /// \returns A (fixed length) vector with ascending integer indices
128 /// (<0, 1, ..., NumElems-1>).
130 /// Used for instruction creation.
132 /// Integer element type of the result vector.
134 /// Number of vector elements.
135 Value
*createStepVector(IRBuilder
<> &Builder
, Type
*LaneTy
,
138 /// \returns A bitmask that is true where the lane position is less-than \p
142 /// Used for instruction creation.
144 /// The explicit vector length parameter to test against the lane
147 /// Static (potentially scalable) number of vector elements.
148 Value
*convertEVLToMask(IRBuilder
<> &Builder
, Value
*EVLParam
,
149 ElementCount ElemCount
);
151 Value
*foldEVLIntoMask(VPIntrinsic
&VPI
);
153 /// "Remove" the %evl parameter of \p PI by setting it to the static vector
154 /// length of the operation.
155 void discardEVLParameter(VPIntrinsic
&PI
);
157 /// \brief Lower this VP binary operator to a unpredicated binary operator.
158 Value
*expandPredicationInBinaryOperator(IRBuilder
<> &Builder
,
161 /// \brief Lower this VP reduction to a call to an unpredicated reduction
163 Value
*expandPredicationInReduction(IRBuilder
<> &Builder
,
164 VPReductionIntrinsic
&PI
);
166 /// \brief Query TTI and expand the vector predication in \p P accordingly.
167 Value
*expandPredication(VPIntrinsic
&PI
);
169 /// \brief Determine how and whether the VPIntrinsic \p VPI shall be
170 /// expanded. This overrides TTI with the cl::opts listed at the top of this
172 VPLegalization
getVPLegalizationStrategy(const VPIntrinsic
&VPI
) const;
173 bool UsingTTIOverrides
;
176 CachingVPExpander(Function
&F
, const TargetTransformInfo
&TTI
)
177 : F(F
), TTI(TTI
), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
179 bool expandVectorPredication();
182 //// CachingVPExpander {
184 Value
*CachingVPExpander::createStepVector(IRBuilder
<> &Builder
, Type
*LaneTy
,
187 SmallVector
<Constant
*, 16> ConstElems
;
189 for (unsigned Idx
= 0; Idx
< NumElems
; ++Idx
)
190 ConstElems
.push_back(ConstantInt::get(LaneTy
, Idx
, false));
192 return ConstantVector::get(ConstElems
);
195 Value
*CachingVPExpander::convertEVLToMask(IRBuilder
<> &Builder
,
197 ElementCount ElemCount
) {
199 // Scalable vector %evl conversion.
200 if (ElemCount
.isScalable()) {
201 auto *M
= Builder
.GetInsertBlock()->getModule();
202 Type
*BoolVecTy
= VectorType::get(Builder
.getInt1Ty(), ElemCount
);
203 Function
*ActiveMaskFunc
= Intrinsic::getDeclaration(
204 M
, Intrinsic::get_active_lane_mask
, {BoolVecTy
, EVLParam
->getType()});
205 // `get_active_lane_mask` performs an implicit less-than comparison.
206 Value
*ConstZero
= Builder
.getInt32(0);
207 return Builder
.CreateCall(ActiveMaskFunc
, {ConstZero
, EVLParam
});
210 // Fixed vector %evl conversion.
211 Type
*LaneTy
= EVLParam
->getType();
212 unsigned NumElems
= ElemCount
.getFixedValue();
213 Value
*VLSplat
= Builder
.CreateVectorSplat(NumElems
, EVLParam
);
214 Value
*IdxVec
= createStepVector(Builder
, LaneTy
, NumElems
);
215 return Builder
.CreateICmp(CmpInst::ICMP_ULT
, IdxVec
, VLSplat
);
219 CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder
<> &Builder
,
221 assert((isSafeToSpeculativelyExecute(&VPI
) ||
222 VPI
.canIgnoreVectorLengthParam()) &&
223 "Implicitly dropping %evl in non-speculatable operator!");
225 auto OC
= static_cast<Instruction::BinaryOps
>(*VPI
.getFunctionalOpcode());
226 assert(Instruction::isBinaryOp(OC
));
228 Value
*Op0
= VPI
.getOperand(0);
229 Value
*Op1
= VPI
.getOperand(1);
230 Value
*Mask
= VPI
.getMaskParam();
232 // Blend in safe operands.
233 if (Mask
&& !isAllTrueMask(Mask
)) {
236 // Can safely ignore the predicate.
239 // Division operators need a safe divisor on masked-off lanes (1).
240 case Instruction::UDiv
:
241 case Instruction::SDiv
:
242 case Instruction::URem
:
243 case Instruction::SRem
:
244 // 2nd operand must not be zero.
245 Value
*SafeDivisor
= getSafeDivisor(VPI
.getType());
246 Op1
= Builder
.CreateSelect(Mask
, Op1
, SafeDivisor
);
250 Value
*NewBinOp
= Builder
.CreateBinOp(OC
, Op0
, Op1
, VPI
.getName());
252 replaceOperation(*NewBinOp
, VPI
);
256 static Value
*getNeutralReductionElement(const VPReductionIntrinsic
&VPI
,
258 bool Negative
= false;
259 unsigned EltBits
= EltTy
->getScalarSizeInBits();
260 switch (VPI
.getIntrinsicID()) {
262 llvm_unreachable("Expecting a VP reduction intrinsic");
263 case Intrinsic::vp_reduce_add
:
264 case Intrinsic::vp_reduce_or
:
265 case Intrinsic::vp_reduce_xor
:
266 case Intrinsic::vp_reduce_umax
:
267 return Constant::getNullValue(EltTy
);
268 case Intrinsic::vp_reduce_mul
:
269 return ConstantInt::get(EltTy
, 1, /*IsSigned*/ false);
270 case Intrinsic::vp_reduce_and
:
271 case Intrinsic::vp_reduce_umin
:
272 return ConstantInt::getAllOnesValue(EltTy
);
273 case Intrinsic::vp_reduce_smin
:
274 return ConstantInt::get(EltTy
->getContext(),
275 APInt::getSignedMaxValue(EltBits
));
276 case Intrinsic::vp_reduce_smax
:
277 return ConstantInt::get(EltTy
->getContext(),
278 APInt::getSignedMinValue(EltBits
));
279 case Intrinsic::vp_reduce_fmax
:
282 case Intrinsic::vp_reduce_fmin
: {
283 FastMathFlags Flags
= VPI
.getFastMathFlags();
284 const fltSemantics
&Semantics
= EltTy
->getFltSemantics();
285 return !Flags
.noNaNs() ? ConstantFP::getQNaN(EltTy
, Negative
)
287 ? ConstantFP::getInfinity(EltTy
, Negative
)
288 : ConstantFP::get(EltTy
,
289 APFloat::getLargest(Semantics
, Negative
));
291 case Intrinsic::vp_reduce_fadd
:
292 return ConstantFP::getNegativeZero(EltTy
);
293 case Intrinsic::vp_reduce_fmul
:
294 return ConstantFP::get(EltTy
, 1.0);
299 CachingVPExpander::expandPredicationInReduction(IRBuilder
<> &Builder
,
300 VPReductionIntrinsic
&VPI
) {
301 assert((isSafeToSpeculativelyExecute(&VPI
) ||
302 VPI
.canIgnoreVectorLengthParam()) &&
303 "Implicitly dropping %evl in non-speculatable operator!");
305 Value
*Mask
= VPI
.getMaskParam();
306 Value
*RedOp
= VPI
.getOperand(VPI
.getVectorParamPos());
308 // Insert neutral element in masked-out positions
309 if (Mask
&& !isAllTrueMask(Mask
)) {
310 auto *NeutralElt
= getNeutralReductionElement(VPI
, VPI
.getType());
311 auto *NeutralVector
= Builder
.CreateVectorSplat(
312 cast
<VectorType
>(RedOp
->getType())->getElementCount(), NeutralElt
);
313 RedOp
= Builder
.CreateSelect(Mask
, RedOp
, NeutralVector
);
317 Value
*Start
= VPI
.getOperand(VPI
.getStartParamPos());
319 switch (VPI
.getIntrinsicID()) {
321 llvm_unreachable("Impossible reduction kind");
322 case Intrinsic::vp_reduce_add
:
323 Reduction
= Builder
.CreateAddReduce(RedOp
);
324 Reduction
= Builder
.CreateAdd(Reduction
, Start
);
326 case Intrinsic::vp_reduce_mul
:
327 Reduction
= Builder
.CreateMulReduce(RedOp
);
328 Reduction
= Builder
.CreateMul(Reduction
, Start
);
330 case Intrinsic::vp_reduce_and
:
331 Reduction
= Builder
.CreateAndReduce(RedOp
);
332 Reduction
= Builder
.CreateAnd(Reduction
, Start
);
334 case Intrinsic::vp_reduce_or
:
335 Reduction
= Builder
.CreateOrReduce(RedOp
);
336 Reduction
= Builder
.CreateOr(Reduction
, Start
);
338 case Intrinsic::vp_reduce_xor
:
339 Reduction
= Builder
.CreateXorReduce(RedOp
);
340 Reduction
= Builder
.CreateXor(Reduction
, Start
);
342 case Intrinsic::vp_reduce_smax
:
343 Reduction
= Builder
.CreateIntMaxReduce(RedOp
, /*IsSigned*/ true);
345 Builder
.CreateBinaryIntrinsic(Intrinsic::smax
, Reduction
, Start
);
347 case Intrinsic::vp_reduce_smin
:
348 Reduction
= Builder
.CreateIntMinReduce(RedOp
, /*IsSigned*/ true);
350 Builder
.CreateBinaryIntrinsic(Intrinsic::smin
, Reduction
, Start
);
352 case Intrinsic::vp_reduce_umax
:
353 Reduction
= Builder
.CreateIntMaxReduce(RedOp
, /*IsSigned*/ false);
355 Builder
.CreateBinaryIntrinsic(Intrinsic::umax
, Reduction
, Start
);
357 case Intrinsic::vp_reduce_umin
:
358 Reduction
= Builder
.CreateIntMinReduce(RedOp
, /*IsSigned*/ false);
360 Builder
.CreateBinaryIntrinsic(Intrinsic::umin
, Reduction
, Start
);
362 case Intrinsic::vp_reduce_fmax
:
363 Reduction
= Builder
.CreateFPMaxReduce(RedOp
);
364 transferDecorations(*Reduction
, VPI
);
366 Builder
.CreateBinaryIntrinsic(Intrinsic::maxnum
, Reduction
, Start
);
368 case Intrinsic::vp_reduce_fmin
:
369 Reduction
= Builder
.CreateFPMinReduce(RedOp
);
370 transferDecorations(*Reduction
, VPI
);
372 Builder
.CreateBinaryIntrinsic(Intrinsic::minnum
, Reduction
, Start
);
374 case Intrinsic::vp_reduce_fadd
:
375 Reduction
= Builder
.CreateFAddReduce(Start
, RedOp
);
377 case Intrinsic::vp_reduce_fmul
:
378 Reduction
= Builder
.CreateFMulReduce(Start
, RedOp
);
382 replaceOperation(*Reduction
, VPI
);
386 void CachingVPExpander::discardEVLParameter(VPIntrinsic
&VPI
) {
387 LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI
<< "\n");
389 if (VPI
.canIgnoreVectorLengthParam())
392 Value
*EVLParam
= VPI
.getVectorLengthParam();
396 ElementCount StaticElemCount
= VPI
.getStaticVectorLength();
397 Value
*MaxEVL
= nullptr;
398 Type
*Int32Ty
= Type::getInt32Ty(VPI
.getContext());
399 if (StaticElemCount
.isScalable()) {
401 auto *M
= VPI
.getModule();
402 Function
*VScaleFunc
=
403 Intrinsic::getDeclaration(M
, Intrinsic::vscale
, Int32Ty
);
404 IRBuilder
<> Builder(VPI
.getParent(), VPI
.getIterator());
405 Value
*FactorConst
= Builder
.getInt32(StaticElemCount
.getKnownMinValue());
406 Value
*VScale
= Builder
.CreateCall(VScaleFunc
, {}, "vscale");
407 MaxEVL
= Builder
.CreateMul(VScale
, FactorConst
, "scalable_size",
408 /*NUW*/ true, /*NSW*/ false);
410 MaxEVL
= ConstantInt::get(Int32Ty
, StaticElemCount
.getFixedValue(), false);
412 VPI
.setVectorLengthParam(MaxEVL
);
415 Value
*CachingVPExpander::foldEVLIntoMask(VPIntrinsic
&VPI
) {
416 LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI
<< '\n');
418 IRBuilder
<> Builder(&VPI
);
420 // Ineffective %evl parameter and so nothing to do here.
421 if (VPI
.canIgnoreVectorLengthParam())
424 // Only VP intrinsics can have an %evl parameter.
425 Value
*OldMaskParam
= VPI
.getMaskParam();
426 Value
*OldEVLParam
= VPI
.getVectorLengthParam();
427 assert(OldMaskParam
&& "no mask param to fold the vl param into");
428 assert(OldEVLParam
&& "no EVL param to fold away");
430 LLVM_DEBUG(dbgs() << "OLD evl: " << *OldEVLParam
<< '\n');
431 LLVM_DEBUG(dbgs() << "OLD mask: " << *OldMaskParam
<< '\n');
433 // Convert the %evl predication into vector mask predication.
434 ElementCount ElemCount
= VPI
.getStaticVectorLength();
435 Value
*VLMask
= convertEVLToMask(Builder
, OldEVLParam
, ElemCount
);
436 Value
*NewMaskParam
= Builder
.CreateAnd(VLMask
, OldMaskParam
);
437 VPI
.setMaskParam(NewMaskParam
);
439 // Drop the %evl parameter.
440 discardEVLParameter(VPI
);
441 assert(VPI
.canIgnoreVectorLengthParam() &&
442 "transformation did not render the evl param ineffective!");
444 // Reassess the modified instruction.
448 Value
*CachingVPExpander::expandPredication(VPIntrinsic
&VPI
) {
449 LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI
<< '\n');
451 IRBuilder
<> Builder(&VPI
);
453 // Try lowering to a LLVM instruction first.
454 auto OC
= VPI
.getFunctionalOpcode();
456 if (OC
&& Instruction::isBinaryOp(*OC
))
457 return expandPredicationInBinaryOperator(Builder
, VPI
);
459 if (auto *VPRI
= dyn_cast
<VPReductionIntrinsic
>(&VPI
))
460 return expandPredicationInReduction(Builder
, *VPRI
);
465 //// } CachingVPExpander
467 struct TransformJob
{
469 TargetTransformInfo::VPLegalization Strategy
;
470 TransformJob(VPIntrinsic
*PI
, TargetTransformInfo::VPLegalization InitStrat
)
471 : PI(PI
), Strategy(InitStrat
) {}
473 bool isDone() const { return Strategy
.shouldDoNothing(); }
476 void sanitizeStrategy(Instruction
&I
, VPLegalization
&LegalizeStrat
) {
477 // Speculatable instructions do not strictly need predication.
478 if (isSafeToSpeculativelyExecute(&I
)) {
479 // Converting a speculatable VP intrinsic means dropping %mask and %evl.
480 // No need to expand %evl into the %mask only to ignore that code.
481 if (LegalizeStrat
.OpStrategy
== VPLegalization::Convert
)
482 LegalizeStrat
.EVLParamStrategy
= VPLegalization::Discard
;
486 // We have to preserve the predicating effect of %evl for this
487 // non-speculatable VP intrinsic.
488 // 1) Never discard %evl.
489 // 2) If this VP intrinsic will be expanded to non-VP code, make sure that
490 // %evl gets folded into %mask.
491 if ((LegalizeStrat
.EVLParamStrategy
== VPLegalization::Discard
) ||
492 (LegalizeStrat
.OpStrategy
== VPLegalization::Convert
)) {
493 LegalizeStrat
.EVLParamStrategy
= VPLegalization::Convert
;
498 CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic
&VPI
) const {
499 auto VPStrat
= TTI
.getVPLegalizationStrategy(VPI
);
500 if (LLVM_LIKELY(!UsingTTIOverrides
)) {
501 // No overrides - we are in production.
505 // Overrides set - we are in testing, the following does not need to be
507 VPStrat
.EVLParamStrategy
= parseOverrideOption(EVLTransformOverride
);
508 VPStrat
.OpStrategy
= parseOverrideOption(MaskTransformOverride
);
512 /// \brief Expand llvm.vp.* intrinsics as requested by \p TTI.
513 bool CachingVPExpander::expandVectorPredication() {
514 SmallVector
<TransformJob
, 16> Worklist
;
516 // Collect all VPIntrinsics that need expansion and determine their expansion
518 for (auto &I
: instructions(F
)) {
519 auto *VPI
= dyn_cast
<VPIntrinsic
>(&I
);
522 auto VPStrat
= getVPLegalizationStrategy(*VPI
);
523 sanitizeStrategy(I
, VPStrat
);
524 if (!VPStrat
.shouldDoNothing())
525 Worklist
.emplace_back(VPI
, VPStrat
);
527 if (Worklist
.empty())
530 // Transform all VPIntrinsics on the worklist.
531 LLVM_DEBUG(dbgs() << "\n:::: Transforming " << Worklist
.size()
532 << " instructions ::::\n");
533 for (TransformJob Job
: Worklist
) {
534 // Transform the EVL parameter.
535 switch (Job
.Strategy
.EVLParamStrategy
) {
536 case VPLegalization::Legal
:
538 case VPLegalization::Discard
:
539 discardEVLParameter(*Job
.PI
);
541 case VPLegalization::Convert
:
542 if (foldEVLIntoMask(*Job
.PI
))
546 Job
.Strategy
.EVLParamStrategy
= VPLegalization::Legal
;
548 // Replace with a non-predicated operation.
549 switch (Job
.Strategy
.OpStrategy
) {
550 case VPLegalization::Legal
:
552 case VPLegalization::Discard
:
553 llvm_unreachable("Invalid strategy for operators.");
554 case VPLegalization::Convert
:
555 expandPredication(*Job
.PI
);
559 Job
.Strategy
.OpStrategy
= VPLegalization::Legal
;
561 assert(Job
.isDone() && "incomplete transformation");
566 class ExpandVectorPredication
: public FunctionPass
{
569 ExpandVectorPredication() : FunctionPass(ID
) {
570 initializeExpandVectorPredicationPass(*PassRegistry::getPassRegistry());
573 bool runOnFunction(Function
&F
) override
{
574 const auto *TTI
= &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
575 CachingVPExpander
VPExpander(F
, *TTI
);
576 return VPExpander
.expandVectorPredication();
579 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
580 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
581 AU
.setPreservesCFG();
586 char ExpandVectorPredication::ID
;
587 INITIALIZE_PASS_BEGIN(ExpandVectorPredication
, "expandvp",
588 "Expand vector predication intrinsics", false, false)
589 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
590 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
591 INITIALIZE_PASS_END(ExpandVectorPredication
, "expandvp",
592 "Expand vector predication intrinsics", false, false)
594 FunctionPass
*llvm::createExpandVectorPredicationPass() {
595 return new ExpandVectorPredication();
599 ExpandVectorPredicationPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
600 const auto &TTI
= AM
.getResult
<TargetIRAnalysis
>(F
);
601 CachingVPExpander
VPExpander(F
, TTI
);
602 if (!VPExpander
.expandVectorPredication())
603 return PreservedAnalyses::all();
604 PreservedAnalyses PA
;
605 PA
.preserveSet
<CFGAnalyses
>();