Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / llvm / lib / IR / Operator.cpp
blob0c917ad77e15806617e79b0d26a204ade050212a
1 //===-- Operator.cpp - Implement the LLVM operators -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the non-inline methods for the LLVM Operator classes.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/IR/Operator.h"
14 #include "llvm/IR/DataLayout.h"
15 #include "llvm/IR/GetElementPtrTypeIterator.h"
16 #include "llvm/IR/Instructions.h"
18 #include "ConstantsContext.h"
20 namespace llvm {
21 bool Operator::hasPoisonGeneratingFlags() const {
22 switch (getOpcode()) {
23 case Instruction::Add:
24 case Instruction::Sub:
25 case Instruction::Mul:
26 case Instruction::Shl: {
27 auto *OBO = cast<OverflowingBinaryOperator>(this);
28 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
30 case Instruction::UDiv:
31 case Instruction::SDiv:
32 case Instruction::AShr:
33 case Instruction::LShr:
34 return cast<PossiblyExactOperator>(this)->isExact();
35 case Instruction::GetElementPtr: {
36 auto *GEP = cast<GEPOperator>(this);
37 // Note: inrange exists on constexpr only
38 return GEP->isInBounds() || GEP->getInRangeIndex() != std::nullopt;
40 case Instruction::ZExt:
41 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(this))
42 return NNI->hasNonNeg();
43 return false;
44 default:
45 if (const auto *FP = dyn_cast<FPMathOperator>(this))
46 return FP->hasNoNaNs() || FP->hasNoInfs();
47 return false;
51 bool Operator::hasPoisonGeneratingFlagsOrMetadata() const {
52 if (hasPoisonGeneratingFlags())
53 return true;
54 auto *I = dyn_cast<Instruction>(this);
55 return I && I->hasPoisonGeneratingMetadata();
58 Type *GEPOperator::getSourceElementType() const {
59 if (auto *I = dyn_cast<GetElementPtrInst>(this))
60 return I->getSourceElementType();
61 return cast<GetElementPtrConstantExpr>(this)->getSourceElementType();
64 Type *GEPOperator::getResultElementType() const {
65 if (auto *I = dyn_cast<GetElementPtrInst>(this))
66 return I->getResultElementType();
67 return cast<GetElementPtrConstantExpr>(this)->getResultElementType();
70 Align GEPOperator::getMaxPreservedAlignment(const DataLayout &DL) const {
71 /// compute the worse possible offset for every level of the GEP et accumulate
72 /// the minimum alignment into Result.
74 Align Result = Align(llvm::Value::MaximumAlignment);
75 for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
76 GTI != GTE; ++GTI) {
77 uint64_t Offset;
78 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
80 if (StructType *STy = GTI.getStructTypeOrNull()) {
81 const StructLayout *SL = DL.getStructLayout(STy);
82 Offset = SL->getElementOffset(OpC->getZExtValue());
83 } else {
84 assert(GTI.isSequential() && "should be sequencial");
85 /// If the index isn't known, we take 1 because it is the index that will
86 /// give the worse alignment of the offset.
87 const uint64_t ElemCount = OpC ? OpC->getZExtValue() : 1;
88 Offset = DL.getTypeAllocSize(GTI.getIndexedType()) * ElemCount;
90 Result = Align(MinAlign(Offset, Result.value()));
92 return Result;
95 bool GEPOperator::accumulateConstantOffset(
96 const DataLayout &DL, APInt &Offset,
97 function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
98 assert(Offset.getBitWidth() ==
99 DL.getIndexSizeInBits(getPointerAddressSpace()) &&
100 "The offset bit width does not match DL specification.");
101 SmallVector<const Value *> Index(llvm::drop_begin(operand_values()));
102 return GEPOperator::accumulateConstantOffset(getSourceElementType(), Index,
103 DL, Offset, ExternalAnalysis);
106 bool GEPOperator::accumulateConstantOffset(
107 Type *SourceType, ArrayRef<const Value *> Index, const DataLayout &DL,
108 APInt &Offset, function_ref<bool(Value &, APInt &)> ExternalAnalysis) {
109 bool UsedExternalAnalysis = false;
110 auto AccumulateOffset = [&](APInt Index, uint64_t Size) -> bool {
111 Index = Index.sextOrTrunc(Offset.getBitWidth());
112 APInt IndexedSize = APInt(Offset.getBitWidth(), Size);
113 // For array or vector indices, scale the index by the size of the type.
114 if (!UsedExternalAnalysis) {
115 Offset += Index * IndexedSize;
116 } else {
117 // External Analysis can return a result higher/lower than the value
118 // represents. We need to detect overflow/underflow.
119 bool Overflow = false;
120 APInt OffsetPlus = Index.smul_ov(IndexedSize, Overflow);
121 if (Overflow)
122 return false;
123 Offset = Offset.sadd_ov(OffsetPlus, Overflow);
124 if (Overflow)
125 return false;
127 return true;
129 auto begin = generic_gep_type_iterator<decltype(Index.begin())>::begin(
130 SourceType, Index.begin());
131 auto end = generic_gep_type_iterator<decltype(Index.end())>::end(Index.end());
132 for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
133 // Scalable vectors are multiplied by a runtime constant.
134 bool ScalableType = GTI.getIndexedType()->isScalableTy();
136 Value *V = GTI.getOperand();
137 StructType *STy = GTI.getStructTypeOrNull();
138 // Handle ConstantInt if possible.
139 if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
140 if (ConstOffset->isZero())
141 continue;
142 // if the type is scalable and the constant is not zero (vscale * n * 0 =
143 // 0) bailout.
144 if (ScalableType)
145 return false;
146 // Handle a struct index, which adds its field offset to the pointer.
147 if (STy) {
148 unsigned ElementIdx = ConstOffset->getZExtValue();
149 const StructLayout *SL = DL.getStructLayout(STy);
150 // Element offset is in bytes.
151 if (!AccumulateOffset(
152 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)),
154 return false;
155 continue;
157 if (!AccumulateOffset(ConstOffset->getValue(),
158 DL.getTypeAllocSize(GTI.getIndexedType())))
159 return false;
160 continue;
163 // The operand is not constant, check if an external analysis was provided.
164 // External analsis is not applicable to a struct type.
165 if (!ExternalAnalysis || STy || ScalableType)
166 return false;
167 APInt AnalysisIndex;
168 if (!ExternalAnalysis(*V, AnalysisIndex))
169 return false;
170 UsedExternalAnalysis = true;
171 if (!AccumulateOffset(AnalysisIndex,
172 DL.getTypeAllocSize(GTI.getIndexedType())))
173 return false;
175 return true;
178 bool GEPOperator::collectOffset(
179 const DataLayout &DL, unsigned BitWidth,
180 MapVector<Value *, APInt> &VariableOffsets,
181 APInt &ConstantOffset) const {
182 assert(BitWidth == DL.getIndexSizeInBits(getPointerAddressSpace()) &&
183 "The offset bit width does not match DL specification.");
185 auto CollectConstantOffset = [&](APInt Index, uint64_t Size) {
186 Index = Index.sextOrTrunc(BitWidth);
187 APInt IndexedSize = APInt(BitWidth, Size);
188 ConstantOffset += Index * IndexedSize;
191 for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
192 GTI != GTE; ++GTI) {
193 // Scalable vectors are multiplied by a runtime constant.
194 bool ScalableType = GTI.getIndexedType()->isScalableTy();
196 Value *V = GTI.getOperand();
197 StructType *STy = GTI.getStructTypeOrNull();
198 // Handle ConstantInt if possible.
199 if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
200 if (ConstOffset->isZero())
201 continue;
202 // If the type is scalable and the constant is not zero (vscale * n * 0 =
203 // 0) bailout.
204 // TODO: If the runtime value is accessible at any point before DWARF
205 // emission, then we could potentially keep a forward reference to it
206 // in the debug value to be filled in later.
207 if (ScalableType)
208 return false;
209 // Handle a struct index, which adds its field offset to the pointer.
210 if (STy) {
211 unsigned ElementIdx = ConstOffset->getZExtValue();
212 const StructLayout *SL = DL.getStructLayout(STy);
213 // Element offset is in bytes.
214 CollectConstantOffset(APInt(BitWidth, SL->getElementOffset(ElementIdx)),
216 continue;
218 CollectConstantOffset(ConstOffset->getValue(),
219 DL.getTypeAllocSize(GTI.getIndexedType()));
220 continue;
223 if (STy || ScalableType)
224 return false;
225 APInt IndexedSize =
226 APInt(BitWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
227 // Insert an initial offset of 0 for V iff none exists already, then
228 // increment the offset by IndexedSize.
229 if (!IndexedSize.isZero()) {
230 VariableOffsets.insert({V, APInt(BitWidth, 0)});
231 VariableOffsets[V] += IndexedSize;
234 return true;
237 void FastMathFlags::print(raw_ostream &O) const {
238 if (all())
239 O << " fast";
240 else {
241 if (allowReassoc())
242 O << " reassoc";
243 if (noNaNs())
244 O << " nnan";
245 if (noInfs())
246 O << " ninf";
247 if (noSignedZeros())
248 O << " nsz";
249 if (allowReciprocal())
250 O << " arcp";
251 if (allowContract())
252 O << " contract";
253 if (approxFunc())
254 O << " afn";
257 } // namespace llvm