1 //===- VPlanUtils.cpp - VPlan-related utilities ---------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "VPlanUtils.h"
10 #include "VPlanPatternMatch.h"
11 #include "llvm/ADT/TypeSwitch.h"
12 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
16 bool vputils::onlyFirstLaneUsed(const VPValue
*Def
) {
17 return all_of(Def
->users(),
18 [Def
](const VPUser
*U
) { return U
->onlyFirstLaneUsed(Def
); });
21 bool vputils::onlyFirstPartUsed(const VPValue
*Def
) {
22 return all_of(Def
->users(),
23 [Def
](const VPUser
*U
) { return U
->onlyFirstPartUsed(Def
); });
26 VPValue
*vputils::getOrCreateVPValueForSCEVExpr(VPlan
&Plan
, const SCEV
*Expr
,
27 ScalarEvolution
&SE
) {
28 if (auto *Expanded
= Plan
.getSCEVExpansion(Expr
))
30 VPValue
*Expanded
= nullptr;
31 if (auto *E
= dyn_cast
<SCEVConstant
>(Expr
))
32 Expanded
= Plan
.getOrAddLiveIn(E
->getValue());
33 else if (auto *E
= dyn_cast
<SCEVUnknown
>(Expr
))
34 Expanded
= Plan
.getOrAddLiveIn(E
->getValue());
36 Expanded
= new VPExpandSCEVRecipe(Expr
, SE
);
37 Plan
.getEntry()->appendRecipe(Expanded
->getDefiningRecipe());
39 Plan
.addSCEVExpansion(Expr
, Expanded
);
43 bool vputils::isHeaderMask(const VPValue
*V
, VPlan
&Plan
) {
44 if (isa
<VPActiveLaneMaskPHIRecipe
>(V
))
47 auto IsWideCanonicalIV
= [](VPValue
*A
) {
48 return isa
<VPWidenCanonicalIVRecipe
>(A
) ||
49 (isa
<VPWidenIntOrFpInductionRecipe
>(A
) &&
50 cast
<VPWidenIntOrFpInductionRecipe
>(A
)->isCanonical());
54 using namespace VPlanPatternMatch
;
56 if (match(V
, m_ActiveLaneMask(m_VPValue(A
), m_VPValue(B
))))
57 return B
== Plan
.getTripCount() &&
58 (match(A
, m_ScalarIVSteps(m_CanonicalIV(), m_SpecificInt(1))) ||
59 IsWideCanonicalIV(A
));
61 return match(V
, m_Binary
<Instruction::ICmp
>(m_VPValue(A
), m_VPValue(B
))) &&
62 IsWideCanonicalIV(A
) && B
== Plan
.getOrCreateBackedgeTakenCount();
65 const SCEV
*vputils::getSCEVExprForVPValue(VPValue
*V
, ScalarEvolution
&SE
) {
67 return SE
.getSCEV(V
->getLiveInIRValue());
69 // TODO: Support constructing SCEVs for more recipes as needed.
70 return TypeSwitch
<const VPRecipeBase
*, const SCEV
*>(V
->getDefiningRecipe())
71 .Case
<VPExpandSCEVRecipe
>(
72 [](const VPExpandSCEVRecipe
*R
) { return R
->getSCEV(); })
73 .Default([&SE
](const VPRecipeBase
*) { return SE
.getCouldNotCompute(); });
76 bool vputils::isUniformAcrossVFsAndUFs(VPValue
*V
) {
77 using namespace VPlanPatternMatch
;
78 // Live-ins are uniform.
82 VPRecipeBase
*R
= V
->getDefiningRecipe();
83 if (R
&& V
->isDefinedOutsideLoopRegions()) {
84 if (match(V
->getDefiningRecipe(),
85 m_VPInstruction
<VPInstruction::CanonicalIVIncrementForPart
>(
88 return all_of(R
->operands(),
89 [](VPValue
*Op
) { return isUniformAcrossVFsAndUFs(Op
); });
92 auto *CanonicalIV
= R
->getParent()->getPlan()->getCanonicalIV();
93 // Canonical IV chain is uniform.
94 if (V
== CanonicalIV
|| V
== CanonicalIV
->getBackedgeValue())
97 return TypeSwitch
<const VPRecipeBase
*, bool>(R
)
98 .Case
<VPDerivedIVRecipe
>([](const auto *R
) { return true; })
99 .Case
<VPReplicateRecipe
>([](const auto *R
) {
100 // Loads and stores that are uniform across VF lanes are handled by
101 // VPReplicateRecipe.IsUniform. They are also uniform across UF parts if
102 // all their operands are invariant.
103 // TODO: Further relax the restrictions.
104 return R
->isUniform() &&
105 (isa
<LoadInst
, StoreInst
>(R
->getUnderlyingValue())) &&
106 all_of(R
->operands(),
107 [](VPValue
*Op
) { return isUniformAcrossVFsAndUFs(Op
); });
109 .Case
<VPScalarCastRecipe
, VPWidenCastRecipe
>([](const auto *R
) {
110 // A cast is uniform according to its operand.
111 return isUniformAcrossVFsAndUFs(R
->getOperand(0));
113 .Default([](const VPRecipeBase
*) { // A value is considered non-uniform
114 // unless proven otherwise.