[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / CodeGen / ExpandVectorPredication.cpp
blobbb8d2b3e9a7854879afcf1c5d4d602e61063184a
1 //===----- CodeGen/ExpandVectorPredication.cpp - Expand VP intrinsics -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements IR expansion for vector predication intrinsics, allowing
10 // targets to enable vector predication until just before codegen.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/ExpandVectorPredication.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/TargetTransformInfo.h"
17 #include "llvm/Analysis/ValueTracking.h"
18 #include "llvm/CodeGen/Passes.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/InstIterator.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/InitializePasses.h"
28 #include "llvm/Pass.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Compiler.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/MathExtras.h"
34 using namespace llvm;
36 using VPLegalization = TargetTransformInfo::VPLegalization;
37 using VPTransform = TargetTransformInfo::VPLegalization::VPTransform;
39 // Keep this in sync with TargetTransformInfo::VPLegalization.
40 #define VPINTERNAL_VPLEGAL_CASES \
41 VPINTERNAL_CASE(Legal) \
42 VPINTERNAL_CASE(Discard) \
43 VPINTERNAL_CASE(Convert)
45 #define VPINTERNAL_CASE(X) "|" #X
47 // Override options.
48 static cl::opt<std::string> EVLTransformOverride(
49 "expandvp-override-evl-transform", cl::init(""), cl::Hidden,
50 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
51 ". If non-empty, ignore "
52 "TargetTransformInfo and "
53 "always use this transformation for the %evl parameter (Used in "
54 "testing)."));
56 static cl::opt<std::string> MaskTransformOverride(
57 "expandvp-override-mask-transform", cl::init(""), cl::Hidden,
58 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
59 ". If non-empty, Ignore "
60 "TargetTransformInfo and "
61 "always use this transformation for the %mask parameter (Used in "
62 "testing)."));
64 #undef VPINTERNAL_CASE
65 #define VPINTERNAL_CASE(X) .Case(#X, VPLegalization::X)
67 static VPTransform parseOverrideOption(const std::string &TextOpt) {
68 return StringSwitch<VPTransform>(TextOpt) VPINTERNAL_VPLEGAL_CASES;
71 #undef VPINTERNAL_VPLEGAL_CASES
73 // Whether any override options are set.
74 static bool anyExpandVPOverridesSet() {
75 return !EVLTransformOverride.empty() || !MaskTransformOverride.empty();
78 #define DEBUG_TYPE "expandvp"
80 STATISTIC(NumFoldedVL, "Number of folded vector length params");
81 STATISTIC(NumLoweredVPOps, "Number of folded vector predication operations");
83 ///// Helpers {
85 /// \returns Whether the vector mask \p MaskVal has all lane bits set.
86 static bool isAllTrueMask(Value *MaskVal) {
87 auto *ConstVec = dyn_cast<ConstantVector>(MaskVal);
88 return ConstVec && ConstVec->isAllOnesValue();
91 /// \returns A non-excepting divisor constant for this type.
92 static Constant *getSafeDivisor(Type *DivTy) {
93 assert(DivTy->isIntOrIntVectorTy() && "Unsupported divisor type");
94 return ConstantInt::get(DivTy, 1u, false);
97 /// Transfer operation properties from \p OldVPI to \p NewVal.
98 static void transferDecorations(Value &NewVal, VPIntrinsic &VPI) {
99 auto *NewInst = dyn_cast<Instruction>(&NewVal);
100 if (!NewInst || !isa<FPMathOperator>(NewVal))
101 return;
103 auto *OldFMOp = dyn_cast<FPMathOperator>(&VPI);
104 if (!OldFMOp)
105 return;
107 NewInst->setFastMathFlags(OldFMOp->getFastMathFlags());
110 /// Transfer all properties from \p OldOp to \p NewOp and replace all uses.
111 /// OldVP gets erased.
112 static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp) {
113 transferDecorations(NewOp, OldOp);
114 OldOp.replaceAllUsesWith(&NewOp);
115 OldOp.eraseFromParent();
118 //// } Helpers
120 namespace {
122 // Expansion pass state at function scope.
123 struct CachingVPExpander {
124 Function &F;
125 const TargetTransformInfo &TTI;
127 /// \returns A (fixed length) vector with ascending integer indices
128 /// (<0, 1, ..., NumElems-1>).
129 /// \p Builder
130 /// Used for instruction creation.
131 /// \p LaneTy
132 /// Integer element type of the result vector.
133 /// \p NumElems
134 /// Number of vector elements.
135 Value *createStepVector(IRBuilder<> &Builder, Type *LaneTy,
136 unsigned NumElems);
138 /// \returns A bitmask that is true where the lane position is less-than \p
139 /// EVLParam
141 /// \p Builder
142 /// Used for instruction creation.
143 /// \p VLParam
144 /// The explicit vector length parameter to test against the lane
145 /// positions.
146 /// \p ElemCount
147 /// Static (potentially scalable) number of vector elements.
148 Value *convertEVLToMask(IRBuilder<> &Builder, Value *EVLParam,
149 ElementCount ElemCount);
151 Value *foldEVLIntoMask(VPIntrinsic &VPI);
153 /// "Remove" the %evl parameter of \p PI by setting it to the static vector
154 /// length of the operation.
155 void discardEVLParameter(VPIntrinsic &PI);
157 /// \brief Lower this VP binary operator to a unpredicated binary operator.
158 Value *expandPredicationInBinaryOperator(IRBuilder<> &Builder,
159 VPIntrinsic &PI);
161 /// \brief Lower this VP reduction to a call to an unpredicated reduction
162 /// intrinsic.
163 Value *expandPredicationInReduction(IRBuilder<> &Builder,
164 VPReductionIntrinsic &PI);
166 /// \brief Query TTI and expand the vector predication in \p P accordingly.
167 Value *expandPredication(VPIntrinsic &PI);
169 /// \brief Determine how and whether the VPIntrinsic \p VPI shall be
170 /// expanded. This overrides TTI with the cl::opts listed at the top of this
171 /// file.
172 VPLegalization getVPLegalizationStrategy(const VPIntrinsic &VPI) const;
173 bool UsingTTIOverrides;
175 public:
176 CachingVPExpander(Function &F, const TargetTransformInfo &TTI)
177 : F(F), TTI(TTI), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
179 bool expandVectorPredication();
182 //// CachingVPExpander {
184 Value *CachingVPExpander::createStepVector(IRBuilder<> &Builder, Type *LaneTy,
185 unsigned NumElems) {
186 // TODO add caching
187 SmallVector<Constant *, 16> ConstElems;
189 for (unsigned Idx = 0; Idx < NumElems; ++Idx)
190 ConstElems.push_back(ConstantInt::get(LaneTy, Idx, false));
192 return ConstantVector::get(ConstElems);
195 Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
196 Value *EVLParam,
197 ElementCount ElemCount) {
198 // TODO add caching
199 // Scalable vector %evl conversion.
200 if (ElemCount.isScalable()) {
201 auto *M = Builder.GetInsertBlock()->getModule();
202 Type *BoolVecTy = VectorType::get(Builder.getInt1Ty(), ElemCount);
203 Function *ActiveMaskFunc = Intrinsic::getDeclaration(
204 M, Intrinsic::get_active_lane_mask, {BoolVecTy, EVLParam->getType()});
205 // `get_active_lane_mask` performs an implicit less-than comparison.
206 Value *ConstZero = Builder.getInt32(0);
207 return Builder.CreateCall(ActiveMaskFunc, {ConstZero, EVLParam});
210 // Fixed vector %evl conversion.
211 Type *LaneTy = EVLParam->getType();
212 unsigned NumElems = ElemCount.getFixedValue();
213 Value *VLSplat = Builder.CreateVectorSplat(NumElems, EVLParam);
214 Value *IdxVec = createStepVector(Builder, LaneTy, NumElems);
215 return Builder.CreateICmp(CmpInst::ICMP_ULT, IdxVec, VLSplat);
218 Value *
219 CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder,
220 VPIntrinsic &VPI) {
221 assert((isSafeToSpeculativelyExecute(&VPI) ||
222 VPI.canIgnoreVectorLengthParam()) &&
223 "Implicitly dropping %evl in non-speculatable operator!");
225 auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
226 assert(Instruction::isBinaryOp(OC));
228 Value *Op0 = VPI.getOperand(0);
229 Value *Op1 = VPI.getOperand(1);
230 Value *Mask = VPI.getMaskParam();
232 // Blend in safe operands.
233 if (Mask && !isAllTrueMask(Mask)) {
234 switch (OC) {
235 default:
236 // Can safely ignore the predicate.
237 break;
239 // Division operators need a safe divisor on masked-off lanes (1).
240 case Instruction::UDiv:
241 case Instruction::SDiv:
242 case Instruction::URem:
243 case Instruction::SRem:
244 // 2nd operand must not be zero.
245 Value *SafeDivisor = getSafeDivisor(VPI.getType());
246 Op1 = Builder.CreateSelect(Mask, Op1, SafeDivisor);
250 Value *NewBinOp = Builder.CreateBinOp(OC, Op0, Op1, VPI.getName());
252 replaceOperation(*NewBinOp, VPI);
253 return NewBinOp;
256 static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
257 Type *EltTy) {
258 bool Negative = false;
259 unsigned EltBits = EltTy->getScalarSizeInBits();
260 switch (VPI.getIntrinsicID()) {
261 default:
262 llvm_unreachable("Expecting a VP reduction intrinsic");
263 case Intrinsic::vp_reduce_add:
264 case Intrinsic::vp_reduce_or:
265 case Intrinsic::vp_reduce_xor:
266 case Intrinsic::vp_reduce_umax:
267 return Constant::getNullValue(EltTy);
268 case Intrinsic::vp_reduce_mul:
269 return ConstantInt::get(EltTy, 1, /*IsSigned*/ false);
270 case Intrinsic::vp_reduce_and:
271 case Intrinsic::vp_reduce_umin:
272 return ConstantInt::getAllOnesValue(EltTy);
273 case Intrinsic::vp_reduce_smin:
274 return ConstantInt::get(EltTy->getContext(),
275 APInt::getSignedMaxValue(EltBits));
276 case Intrinsic::vp_reduce_smax:
277 return ConstantInt::get(EltTy->getContext(),
278 APInt::getSignedMinValue(EltBits));
279 case Intrinsic::vp_reduce_fmax:
280 Negative = true;
281 LLVM_FALLTHROUGH;
282 case Intrinsic::vp_reduce_fmin: {
283 FastMathFlags Flags = VPI.getFastMathFlags();
284 const fltSemantics &Semantics = EltTy->getFltSemantics();
285 return !Flags.noNaNs() ? ConstantFP::getQNaN(EltTy, Negative)
286 : !Flags.noInfs()
287 ? ConstantFP::getInfinity(EltTy, Negative)
288 : ConstantFP::get(EltTy,
289 APFloat::getLargest(Semantics, Negative));
291 case Intrinsic::vp_reduce_fadd:
292 return ConstantFP::getNegativeZero(EltTy);
293 case Intrinsic::vp_reduce_fmul:
294 return ConstantFP::get(EltTy, 1.0);
298 Value *
299 CachingVPExpander::expandPredicationInReduction(IRBuilder<> &Builder,
300 VPReductionIntrinsic &VPI) {
301 assert((isSafeToSpeculativelyExecute(&VPI) ||
302 VPI.canIgnoreVectorLengthParam()) &&
303 "Implicitly dropping %evl in non-speculatable operator!");
305 Value *Mask = VPI.getMaskParam();
306 Value *RedOp = VPI.getOperand(VPI.getVectorParamPos());
308 // Insert neutral element in masked-out positions
309 if (Mask && !isAllTrueMask(Mask)) {
310 auto *NeutralElt = getNeutralReductionElement(VPI, VPI.getType());
311 auto *NeutralVector = Builder.CreateVectorSplat(
312 cast<VectorType>(RedOp->getType())->getElementCount(), NeutralElt);
313 RedOp = Builder.CreateSelect(Mask, RedOp, NeutralVector);
316 Value *Reduction;
317 Value *Start = VPI.getOperand(VPI.getStartParamPos());
319 switch (VPI.getIntrinsicID()) {
320 default:
321 llvm_unreachable("Impossible reduction kind");
322 case Intrinsic::vp_reduce_add:
323 Reduction = Builder.CreateAddReduce(RedOp);
324 Reduction = Builder.CreateAdd(Reduction, Start);
325 break;
326 case Intrinsic::vp_reduce_mul:
327 Reduction = Builder.CreateMulReduce(RedOp);
328 Reduction = Builder.CreateMul(Reduction, Start);
329 break;
330 case Intrinsic::vp_reduce_and:
331 Reduction = Builder.CreateAndReduce(RedOp);
332 Reduction = Builder.CreateAnd(Reduction, Start);
333 break;
334 case Intrinsic::vp_reduce_or:
335 Reduction = Builder.CreateOrReduce(RedOp);
336 Reduction = Builder.CreateOr(Reduction, Start);
337 break;
338 case Intrinsic::vp_reduce_xor:
339 Reduction = Builder.CreateXorReduce(RedOp);
340 Reduction = Builder.CreateXor(Reduction, Start);
341 break;
342 case Intrinsic::vp_reduce_smax:
343 Reduction = Builder.CreateIntMaxReduce(RedOp, /*IsSigned*/ true);
344 Reduction =
345 Builder.CreateBinaryIntrinsic(Intrinsic::smax, Reduction, Start);
346 break;
347 case Intrinsic::vp_reduce_smin:
348 Reduction = Builder.CreateIntMinReduce(RedOp, /*IsSigned*/ true);
349 Reduction =
350 Builder.CreateBinaryIntrinsic(Intrinsic::smin, Reduction, Start);
351 break;
352 case Intrinsic::vp_reduce_umax:
353 Reduction = Builder.CreateIntMaxReduce(RedOp, /*IsSigned*/ false);
354 Reduction =
355 Builder.CreateBinaryIntrinsic(Intrinsic::umax, Reduction, Start);
356 break;
357 case Intrinsic::vp_reduce_umin:
358 Reduction = Builder.CreateIntMinReduce(RedOp, /*IsSigned*/ false);
359 Reduction =
360 Builder.CreateBinaryIntrinsic(Intrinsic::umin, Reduction, Start);
361 break;
362 case Intrinsic::vp_reduce_fmax:
363 Reduction = Builder.CreateFPMaxReduce(RedOp);
364 transferDecorations(*Reduction, VPI);
365 Reduction =
366 Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, Reduction, Start);
367 break;
368 case Intrinsic::vp_reduce_fmin:
369 Reduction = Builder.CreateFPMinReduce(RedOp);
370 transferDecorations(*Reduction, VPI);
371 Reduction =
372 Builder.CreateBinaryIntrinsic(Intrinsic::minnum, Reduction, Start);
373 break;
374 case Intrinsic::vp_reduce_fadd:
375 Reduction = Builder.CreateFAddReduce(Start, RedOp);
376 break;
377 case Intrinsic::vp_reduce_fmul:
378 Reduction = Builder.CreateFMulReduce(Start, RedOp);
379 break;
382 replaceOperation(*Reduction, VPI);
383 return Reduction;
386 void CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
387 LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI << "\n");
389 if (VPI.canIgnoreVectorLengthParam())
390 return;
392 Value *EVLParam = VPI.getVectorLengthParam();
393 if (!EVLParam)
394 return;
396 ElementCount StaticElemCount = VPI.getStaticVectorLength();
397 Value *MaxEVL = nullptr;
398 Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
399 if (StaticElemCount.isScalable()) {
400 // TODO add caching
401 auto *M = VPI.getModule();
402 Function *VScaleFunc =
403 Intrinsic::getDeclaration(M, Intrinsic::vscale, Int32Ty);
404 IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
405 Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
406 Value *VScale = Builder.CreateCall(VScaleFunc, {}, "vscale");
407 MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
408 /*NUW*/ true, /*NSW*/ false);
409 } else {
410 MaxEVL = ConstantInt::get(Int32Ty, StaticElemCount.getFixedValue(), false);
412 VPI.setVectorLengthParam(MaxEVL);
415 Value *CachingVPExpander::foldEVLIntoMask(VPIntrinsic &VPI) {
416 LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI << '\n');
418 IRBuilder<> Builder(&VPI);
420 // Ineffective %evl parameter and so nothing to do here.
421 if (VPI.canIgnoreVectorLengthParam())
422 return &VPI;
424 // Only VP intrinsics can have an %evl parameter.
425 Value *OldMaskParam = VPI.getMaskParam();
426 Value *OldEVLParam = VPI.getVectorLengthParam();
427 assert(OldMaskParam && "no mask param to fold the vl param into");
428 assert(OldEVLParam && "no EVL param to fold away");
430 LLVM_DEBUG(dbgs() << "OLD evl: " << *OldEVLParam << '\n');
431 LLVM_DEBUG(dbgs() << "OLD mask: " << *OldMaskParam << '\n');
433 // Convert the %evl predication into vector mask predication.
434 ElementCount ElemCount = VPI.getStaticVectorLength();
435 Value *VLMask = convertEVLToMask(Builder, OldEVLParam, ElemCount);
436 Value *NewMaskParam = Builder.CreateAnd(VLMask, OldMaskParam);
437 VPI.setMaskParam(NewMaskParam);
439 // Drop the %evl parameter.
440 discardEVLParameter(VPI);
441 assert(VPI.canIgnoreVectorLengthParam() &&
442 "transformation did not render the evl param ineffective!");
444 // Reassess the modified instruction.
445 return &VPI;
448 Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
449 LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI << '\n');
451 IRBuilder<> Builder(&VPI);
453 // Try lowering to a LLVM instruction first.
454 auto OC = VPI.getFunctionalOpcode();
456 if (OC && Instruction::isBinaryOp(*OC))
457 return expandPredicationInBinaryOperator(Builder, VPI);
459 if (auto *VPRI = dyn_cast<VPReductionIntrinsic>(&VPI))
460 return expandPredicationInReduction(Builder, *VPRI);
462 return &VPI;
465 //// } CachingVPExpander
467 struct TransformJob {
468 VPIntrinsic *PI;
469 TargetTransformInfo::VPLegalization Strategy;
470 TransformJob(VPIntrinsic *PI, TargetTransformInfo::VPLegalization InitStrat)
471 : PI(PI), Strategy(InitStrat) {}
473 bool isDone() const { return Strategy.shouldDoNothing(); }
476 void sanitizeStrategy(Instruction &I, VPLegalization &LegalizeStrat) {
477 // Speculatable instructions do not strictly need predication.
478 if (isSafeToSpeculativelyExecute(&I)) {
479 // Converting a speculatable VP intrinsic means dropping %mask and %evl.
480 // No need to expand %evl into the %mask only to ignore that code.
481 if (LegalizeStrat.OpStrategy == VPLegalization::Convert)
482 LegalizeStrat.EVLParamStrategy = VPLegalization::Discard;
483 return;
486 // We have to preserve the predicating effect of %evl for this
487 // non-speculatable VP intrinsic.
488 // 1) Never discard %evl.
489 // 2) If this VP intrinsic will be expanded to non-VP code, make sure that
490 // %evl gets folded into %mask.
491 if ((LegalizeStrat.EVLParamStrategy == VPLegalization::Discard) ||
492 (LegalizeStrat.OpStrategy == VPLegalization::Convert)) {
493 LegalizeStrat.EVLParamStrategy = VPLegalization::Convert;
497 VPLegalization
498 CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
499 auto VPStrat = TTI.getVPLegalizationStrategy(VPI);
500 if (LLVM_LIKELY(!UsingTTIOverrides)) {
501 // No overrides - we are in production.
502 return VPStrat;
505 // Overrides set - we are in testing, the following does not need to be
506 // efficient.
507 VPStrat.EVLParamStrategy = parseOverrideOption(EVLTransformOverride);
508 VPStrat.OpStrategy = parseOverrideOption(MaskTransformOverride);
509 return VPStrat;
512 /// \brief Expand llvm.vp.* intrinsics as requested by \p TTI.
513 bool CachingVPExpander::expandVectorPredication() {
514 SmallVector<TransformJob, 16> Worklist;
516 // Collect all VPIntrinsics that need expansion and determine their expansion
517 // strategy.
518 for (auto &I : instructions(F)) {
519 auto *VPI = dyn_cast<VPIntrinsic>(&I);
520 if (!VPI)
521 continue;
522 auto VPStrat = getVPLegalizationStrategy(*VPI);
523 sanitizeStrategy(I, VPStrat);
524 if (!VPStrat.shouldDoNothing())
525 Worklist.emplace_back(VPI, VPStrat);
527 if (Worklist.empty())
528 return false;
530 // Transform all VPIntrinsics on the worklist.
531 LLVM_DEBUG(dbgs() << "\n:::: Transforming " << Worklist.size()
532 << " instructions ::::\n");
533 for (TransformJob Job : Worklist) {
534 // Transform the EVL parameter.
535 switch (Job.Strategy.EVLParamStrategy) {
536 case VPLegalization::Legal:
537 break;
538 case VPLegalization::Discard:
539 discardEVLParameter(*Job.PI);
540 break;
541 case VPLegalization::Convert:
542 if (foldEVLIntoMask(*Job.PI))
543 ++NumFoldedVL;
544 break;
546 Job.Strategy.EVLParamStrategy = VPLegalization::Legal;
548 // Replace with a non-predicated operation.
549 switch (Job.Strategy.OpStrategy) {
550 case VPLegalization::Legal:
551 break;
552 case VPLegalization::Discard:
553 llvm_unreachable("Invalid strategy for operators.");
554 case VPLegalization::Convert:
555 expandPredication(*Job.PI);
556 ++NumLoweredVPOps;
557 break;
559 Job.Strategy.OpStrategy = VPLegalization::Legal;
561 assert(Job.isDone() && "incomplete transformation");
564 return true;
566 class ExpandVectorPredication : public FunctionPass {
567 public:
568 static char ID;
569 ExpandVectorPredication() : FunctionPass(ID) {
570 initializeExpandVectorPredicationPass(*PassRegistry::getPassRegistry());
573 bool runOnFunction(Function &F) override {
574 const auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
575 CachingVPExpander VPExpander(F, *TTI);
576 return VPExpander.expandVectorPredication();
579 void getAnalysisUsage(AnalysisUsage &AU) const override {
580 AU.addRequired<TargetTransformInfoWrapperPass>();
581 AU.setPreservesCFG();
584 } // namespace
586 char ExpandVectorPredication::ID;
587 INITIALIZE_PASS_BEGIN(ExpandVectorPredication, "expandvp",
588 "Expand vector predication intrinsics", false, false)
589 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
590 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
591 INITIALIZE_PASS_END(ExpandVectorPredication, "expandvp",
592 "Expand vector predication intrinsics", false, false)
594 FunctionPass *llvm::createExpandVectorPredicationPass() {
595 return new ExpandVectorPredication();
598 PreservedAnalyses
599 ExpandVectorPredicationPass::run(Function &F, FunctionAnalysisManager &AM) {
600 const auto &TTI = AM.getResult<TargetIRAnalysis>(F);
601 CachingVPExpander VPExpander(F, TTI);
602 if (!VPExpander.expandVectorPredication())
603 return PreservedAnalyses::all();
604 PreservedAnalyses PA;
605 PA.preserveSet<CFGAnalyses>();
606 return PA;