[ARM] Split large truncating MVE stores
[llvm-complete.git] / lib / DebugInfo / PDB / Native / TpiStream.cpp
blobac19db03fab2fac6cac8bc6901da70ce9d045edc
1 //===- TpiStream.cpp - PDB Type Info (TPI) Stream 2 Access ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "llvm/DebugInfo/PDB/Native/TpiStream.h"
11 #include "llvm/ADT/iterator_range.h"
12 #include "llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h"
13 #include "llvm/DebugInfo/CodeView/RecordName.h"
14 #include "llvm/DebugInfo/CodeView/TypeRecord.h"
15 #include "llvm/DebugInfo/CodeView/TypeRecordHelpers.h"
16 #include "llvm/DebugInfo/MSF/MappedBlockStream.h"
17 #include "llvm/DebugInfo/PDB/Native/Hash.h"
18 #include "llvm/DebugInfo/PDB/Native/PDBFile.h"
19 #include "llvm/DebugInfo/PDB/Native/RawConstants.h"
20 #include "llvm/DebugInfo/PDB/Native/RawError.h"
21 #include "llvm/DebugInfo/PDB/Native/RawTypes.h"
22 #include "llvm/DebugInfo/PDB/Native/TpiHashing.h"
23 #include "llvm/Support/BinaryStreamReader.h"
24 #include "llvm/Support/Endian.h"
25 #include "llvm/Support/Error.h"
26 #include <algorithm>
27 #include <cstdint>
28 #include <vector>
30 using namespace llvm;
31 using namespace llvm::codeview;
32 using namespace llvm::support;
33 using namespace llvm::msf;
34 using namespace llvm::pdb;
36 TpiStream::TpiStream(PDBFile &File, std::unique_ptr<MappedBlockStream> Stream)
37 : Pdb(File), Stream(std::move(Stream)) {}
39 TpiStream::~TpiStream() = default;
41 Error TpiStream::reload() {
42 BinaryStreamReader Reader(*Stream);
44 if (Reader.bytesRemaining() < sizeof(TpiStreamHeader))
45 return make_error<RawError>(raw_error_code::corrupt_file,
46 "TPI Stream does not contain a header.");
48 if (Reader.readObject(Header))
49 return make_error<RawError>(raw_error_code::corrupt_file,
50 "TPI Stream does not contain a header.");
52 if (Header->Version != PdbTpiV80)
53 return make_error<RawError>(raw_error_code::corrupt_file,
54 "Unsupported TPI Version.");
56 if (Header->HeaderSize != sizeof(TpiStreamHeader))
57 return make_error<RawError>(raw_error_code::corrupt_file,
58 "Corrupt TPI Header size.");
60 if (Header->HashKeySize != sizeof(ulittle32_t))
61 return make_error<RawError>(raw_error_code::corrupt_file,
62 "TPI Stream expected 4 byte hash key size.");
64 if (Header->NumHashBuckets < MinTpiHashBuckets ||
65 Header->NumHashBuckets > MaxTpiHashBuckets)
66 return make_error<RawError>(raw_error_code::corrupt_file,
67 "TPI Stream Invalid number of hash buckets.");
69 // The actual type records themselves come from this stream
70 if (auto EC =
71 Reader.readSubstream(TypeRecordsSubstream, Header->TypeRecordBytes))
72 return EC;
74 BinaryStreamReader RecordReader(TypeRecordsSubstream.StreamData);
75 if (auto EC =
76 RecordReader.readArray(TypeRecords, TypeRecordsSubstream.size()))
77 return EC;
79 // Hash indices, hash values, etc come from the hash stream.
80 if (Header->HashStreamIndex != kInvalidStreamIndex) {
81 auto HS = Pdb.safelyCreateIndexedStream(Header->HashStreamIndex);
82 if (!HS) {
83 consumeError(HS.takeError());
84 return make_error<RawError>(raw_error_code::corrupt_file,
85 "Invalid TPI hash stream index.");
87 BinaryStreamReader HSR(**HS);
89 // There should be a hash value for every type record, or no hashes at all.
90 uint32_t NumHashValues =
91 Header->HashValueBuffer.Length / sizeof(ulittle32_t);
92 if (NumHashValues != getNumTypeRecords() && NumHashValues != 0)
93 return make_error<RawError>(
94 raw_error_code::corrupt_file,
95 "TPI hash count does not match with the number of type records.");
96 HSR.setOffset(Header->HashValueBuffer.Off);
97 if (auto EC = HSR.readArray(HashValues, NumHashValues))
98 return EC;
100 HSR.setOffset(Header->IndexOffsetBuffer.Off);
101 uint32_t NumTypeIndexOffsets =
102 Header->IndexOffsetBuffer.Length / sizeof(TypeIndexOffset);
103 if (auto EC = HSR.readArray(TypeIndexOffsets, NumTypeIndexOffsets))
104 return EC;
106 if (Header->HashAdjBuffer.Length > 0) {
107 HSR.setOffset(Header->HashAdjBuffer.Off);
108 if (auto EC = HashAdjusters.load(HSR))
109 return EC;
112 HashStream = std::move(*HS);
115 Types = std::make_unique<LazyRandomTypeCollection>(
116 TypeRecords, getNumTypeRecords(), getTypeIndexOffsets());
117 return Error::success();
120 PdbRaw_TpiVer TpiStream::getTpiVersion() const {
121 uint32_t Value = Header->Version;
122 return static_cast<PdbRaw_TpiVer>(Value);
125 uint32_t TpiStream::TypeIndexBegin() const { return Header->TypeIndexBegin; }
127 uint32_t TpiStream::TypeIndexEnd() const { return Header->TypeIndexEnd; }
129 uint32_t TpiStream::getNumTypeRecords() const {
130 return TypeIndexEnd() - TypeIndexBegin();
133 uint16_t TpiStream::getTypeHashStreamIndex() const {
134 return Header->HashStreamIndex;
137 uint16_t TpiStream::getTypeHashStreamAuxIndex() const {
138 return Header->HashAuxStreamIndex;
141 uint32_t TpiStream::getNumHashBuckets() const { return Header->NumHashBuckets; }
142 uint32_t TpiStream::getHashKeySize() const { return Header->HashKeySize; }
144 void TpiStream::buildHashMap() {
145 if (!HashMap.empty())
146 return;
147 if (HashValues.empty())
148 return;
150 HashMap.resize(Header->NumHashBuckets);
152 TypeIndex TIB{Header->TypeIndexBegin};
153 TypeIndex TIE{Header->TypeIndexEnd};
154 while (TIB < TIE) {
155 uint32_t HV = HashValues[TIB.toArrayIndex()];
156 HashMap[HV].push_back(TIB++);
160 std::vector<TypeIndex> TpiStream::findRecordsByName(StringRef Name) const {
161 if (!supportsTypeLookup())
162 const_cast<TpiStream*>(this)->buildHashMap();
164 uint32_t Bucket = hashStringV1(Name) % Header->NumHashBuckets;
165 if (Bucket > HashMap.size())
166 return {};
168 std::vector<TypeIndex> Result;
169 for (TypeIndex TI : HashMap[Bucket]) {
170 std::string ThisName = computeTypeName(*Types, TI);
171 if (ThisName == Name)
172 Result.push_back(TI);
174 return Result;
177 bool TpiStream::supportsTypeLookup() const { return !HashMap.empty(); }
179 Expected<TypeIndex>
180 TpiStream::findFullDeclForForwardRef(TypeIndex ForwardRefTI) const {
181 if (!supportsTypeLookup())
182 const_cast<TpiStream*>(this)->buildHashMap();
184 CVType F = Types->getType(ForwardRefTI);
185 if (!isUdtForwardRef(F))
186 return ForwardRefTI;
188 Expected<TagRecordHash> ForwardTRH = hashTagRecord(F);
189 if (!ForwardTRH)
190 return ForwardTRH.takeError();
192 uint32_t BucketIdx = ForwardTRH->FullRecordHash % Header->NumHashBuckets;
194 for (TypeIndex TI : HashMap[BucketIdx]) {
195 CVType CVT = Types->getType(TI);
196 if (CVT.kind() != F.kind())
197 continue;
199 Expected<TagRecordHash> FullTRH = hashTagRecord(CVT);
200 if (!FullTRH)
201 return FullTRH.takeError();
202 if (ForwardTRH->FullRecordHash != FullTRH->FullRecordHash)
203 continue;
204 TagRecord &ForwardTR = ForwardTRH->getRecord();
205 TagRecord &FullTR = FullTRH->getRecord();
207 if (!ForwardTR.hasUniqueName()) {
208 if (ForwardTR.getName() == FullTR.getName())
209 return TI;
210 continue;
213 if (!FullTR.hasUniqueName())
214 continue;
215 if (ForwardTR.getUniqueName() == FullTR.getUniqueName())
216 return TI;
218 return ForwardRefTI;
221 codeview::CVType TpiStream::getType(codeview::TypeIndex Index) {
222 assert(!Index.isSimple());
223 return Types->getType(Index);
226 BinarySubstreamRef TpiStream::getTypeRecordsSubstream() const {
227 return TypeRecordsSubstream;
230 FixedStreamArray<support::ulittle32_t> TpiStream::getHashValues() const {
231 return HashValues;
234 FixedStreamArray<TypeIndexOffset> TpiStream::getTypeIndexOffsets() const {
235 return TypeIndexOffsets;
238 HashTable<support::ulittle32_t> &TpiStream::getHashAdjusters() {
239 return HashAdjusters;
242 CVTypeRange TpiStream::types(bool *HadError) const {
243 return make_range(TypeRecords.begin(HadError), TypeRecords.end());
246 Error TpiStream::commit() { return Error::success(); }