[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / IR / Operator.cpp
blob18a1c84933e0adf4595450ac014b084ef60a917f
1 //===-- Operator.cpp - Implement the LLVM operators -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the non-inline methods for the LLVM Operator classes.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/IR/Operator.h"
14 #include "llvm/IR/DataLayout.h"
15 #include "llvm/IR/GetElementPtrTypeIterator.h"
16 #include "llvm/IR/Instructions.h"
17 #include "llvm/IR/Type.h"
19 #include "ConstantsContext.h"
21 namespace llvm {
22 Type *GEPOperator::getSourceElementType() const {
23 if (auto *I = dyn_cast<GetElementPtrInst>(this))
24 return I->getSourceElementType();
25 return cast<GetElementPtrConstantExpr>(this)->getSourceElementType();
28 Type *GEPOperator::getResultElementType() const {
29 if (auto *I = dyn_cast<GetElementPtrInst>(this))
30 return I->getResultElementType();
31 return cast<GetElementPtrConstantExpr>(this)->getResultElementType();
34 Align GEPOperator::getMaxPreservedAlignment(const DataLayout &DL) const {
35 /// compute the worse possible offset for every level of the GEP et accumulate
36 /// the minimum alignment into Result.
38 Align Result = Align(llvm::Value::MaximumAlignment);
39 for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
40 GTI != GTE; ++GTI) {
41 int64_t Offset = 1;
42 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
44 if (StructType *STy = GTI.getStructTypeOrNull()) {
45 const StructLayout *SL = DL.getStructLayout(STy);
46 Offset = SL->getElementOffset(OpC->getZExtValue());
47 } else {
48 assert(GTI.isSequential() && "should be sequencial");
49 /// If the index isn't know we take 1 because it is the index that will
50 /// give the worse alignment of the offset.
51 int64_t ElemCount = 1;
52 if (OpC)
53 ElemCount = OpC->getZExtValue();
54 Offset = DL.getTypeAllocSize(GTI.getIndexedType()) * ElemCount;
56 Result = Align(MinAlign(Offset, Result.value()));
58 return Result;
61 bool GEPOperator::accumulateConstantOffset(
62 const DataLayout &DL, APInt &Offset,
63 function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
64 assert(Offset.getBitWidth() ==
65 DL.getIndexSizeInBits(getPointerAddressSpace()) &&
66 "The offset bit width does not match DL specification.");
67 SmallVector<const Value *> Index(value_op_begin() + 1, value_op_end());
68 return GEPOperator::accumulateConstantOffset(getSourceElementType(), Index,
69 DL, Offset, ExternalAnalysis);
72 bool GEPOperator::accumulateConstantOffset(
73 Type *SourceType, ArrayRef<const Value *> Index, const DataLayout &DL,
74 APInt &Offset, function_ref<bool(Value &, APInt &)> ExternalAnalysis) {
75 bool UsedExternalAnalysis = false;
76 auto AccumulateOffset = [&](APInt Index, uint64_t Size) -> bool {
77 Index = Index.sextOrTrunc(Offset.getBitWidth());
78 APInt IndexedSize = APInt(Offset.getBitWidth(), Size);
79 // For array or vector indices, scale the index by the size of the type.
80 if (!UsedExternalAnalysis) {
81 Offset += Index * IndexedSize;
82 } else {
83 // External Analysis can return a result higher/lower than the value
84 // represents. We need to detect overflow/underflow.
85 bool Overflow = false;
86 APInt OffsetPlus = Index.smul_ov(IndexedSize, Overflow);
87 if (Overflow)
88 return false;
89 Offset = Offset.sadd_ov(OffsetPlus, Overflow);
90 if (Overflow)
91 return false;
93 return true;
95 auto begin = generic_gep_type_iterator<decltype(Index.begin())>::begin(
96 SourceType, Index.begin());
97 auto end = generic_gep_type_iterator<decltype(Index.end())>::end(Index.end());
98 for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
99 // Scalable vectors are multiplied by a runtime constant.
100 bool ScalableType = false;
101 if (isa<ScalableVectorType>(GTI.getIndexedType()))
102 ScalableType = true;
104 Value *V = GTI.getOperand();
105 StructType *STy = GTI.getStructTypeOrNull();
106 // Handle ConstantInt if possible.
107 if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
108 if (ConstOffset->isZero())
109 continue;
110 // if the type is scalable and the constant is not zero (vscale * n * 0 =
111 // 0) bailout.
112 if (ScalableType)
113 return false;
114 // Handle a struct index, which adds its field offset to the pointer.
115 if (STy) {
116 unsigned ElementIdx = ConstOffset->getZExtValue();
117 const StructLayout *SL = DL.getStructLayout(STy);
118 // Element offset is in bytes.
119 if (!AccumulateOffset(
120 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)),
122 return false;
123 continue;
125 if (!AccumulateOffset(ConstOffset->getValue(),
126 DL.getTypeAllocSize(GTI.getIndexedType())))
127 return false;
128 continue;
131 // The operand is not constant, check if an external analysis was provided.
132 // External analsis is not applicable to a struct type.
133 if (!ExternalAnalysis || STy || ScalableType)
134 return false;
135 APInt AnalysisIndex;
136 if (!ExternalAnalysis(*V, AnalysisIndex))
137 return false;
138 UsedExternalAnalysis = true;
139 if (!AccumulateOffset(AnalysisIndex,
140 DL.getTypeAllocSize(GTI.getIndexedType())))
141 return false;
143 return true;
146 bool GEPOperator::collectOffset(
147 const DataLayout &DL, unsigned BitWidth,
148 MapVector<Value *, APInt> &VariableOffsets,
149 APInt &ConstantOffset) const {
150 assert(BitWidth == DL.getIndexSizeInBits(getPointerAddressSpace()) &&
151 "The offset bit width does not match DL specification.");
153 auto CollectConstantOffset = [&](APInt Index, uint64_t Size) {
154 Index = Index.sextOrTrunc(BitWidth);
155 APInt IndexedSize = APInt(BitWidth, Size);
156 ConstantOffset += Index * IndexedSize;
159 for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
160 GTI != GTE; ++GTI) {
161 // Scalable vectors are multiplied by a runtime constant.
162 bool ScalableType = isa<ScalableVectorType>(GTI.getIndexedType());
164 Value *V = GTI.getOperand();
165 StructType *STy = GTI.getStructTypeOrNull();
166 // Handle ConstantInt if possible.
167 if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
168 if (ConstOffset->isZero())
169 continue;
170 // If the type is scalable and the constant is not zero (vscale * n * 0 =
171 // 0) bailout.
172 // TODO: If the runtime value is accessible at any point before DWARF
173 // emission, then we could potentially keep a forward reference to it
174 // in the debug value to be filled in later.
175 if (ScalableType)
176 return false;
177 // Handle a struct index, which adds its field offset to the pointer.
178 if (STy) {
179 unsigned ElementIdx = ConstOffset->getZExtValue();
180 const StructLayout *SL = DL.getStructLayout(STy);
181 // Element offset is in bytes.
182 CollectConstantOffset(APInt(BitWidth, SL->getElementOffset(ElementIdx)),
184 continue;
186 CollectConstantOffset(ConstOffset->getValue(),
187 DL.getTypeAllocSize(GTI.getIndexedType()));
188 continue;
191 if (STy || ScalableType)
192 return false;
193 // Insert an initial offset of 0 for V iff none exists already, then
194 // increment the offset by IndexedSize.
195 VariableOffsets.insert({V, APInt(BitWidth, 0)});
196 APInt IndexedSize =
197 APInt(BitWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
198 VariableOffsets[V] += IndexedSize;
200 return true;
202 } // namespace llvm