1 //===- MappedBlockStream.cpp - Reads stream data from an MSF file ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "llvm/DebugInfo/MSF/MappedBlockStream.h"
10 #include "llvm/ADT/ArrayRef.h"
11 #include "llvm/ADT/STLExtras.h"
12 #include "llvm/DebugInfo/MSF/MSFCommon.h"
13 #include "llvm/Support/BinaryStreamWriter.h"
14 #include "llvm/Support/Endian.h"
15 #include "llvm/Support/Error.h"
16 #include "llvm/Support/MathExtras.h"
25 using namespace llvm::msf
;
29 template <typename Base
> class MappedBlockStreamImpl
: public Base
{
31 template <typename
... Args
>
32 MappedBlockStreamImpl(Args
&&... Params
)
33 : Base(std::forward
<Args
>(Params
)...) {}
36 } // end anonymous namespace
38 using Interval
= std::pair
<uint32_t, uint32_t>;
40 static Interval
intersect(const Interval
&I1
, const Interval
&I2
) {
41 return std::make_pair(std::max(I1
.first
, I2
.first
),
42 std::min(I1
.second
, I2
.second
));
45 MappedBlockStream::MappedBlockStream(uint32_t BlockSize
,
46 const MSFStreamLayout
&Layout
,
47 BinaryStreamRef MsfData
,
48 BumpPtrAllocator
&Allocator
)
49 : BlockSize(BlockSize
), StreamLayout(Layout
), MsfData(MsfData
),
50 Allocator(Allocator
) {}
52 std::unique_ptr
<MappedBlockStream
> MappedBlockStream::createStream(
53 uint32_t BlockSize
, const MSFStreamLayout
&Layout
, BinaryStreamRef MsfData
,
54 BumpPtrAllocator
&Allocator
) {
55 return llvm::make_unique
<MappedBlockStreamImpl
<MappedBlockStream
>>(
56 BlockSize
, Layout
, MsfData
, Allocator
);
59 std::unique_ptr
<MappedBlockStream
> MappedBlockStream::createIndexedStream(
60 const MSFLayout
&Layout
, BinaryStreamRef MsfData
, uint32_t StreamIndex
,
61 BumpPtrAllocator
&Allocator
) {
62 assert(StreamIndex
< Layout
.StreamMap
.size() && "Invalid stream index");
64 SL
.Blocks
= Layout
.StreamMap
[StreamIndex
];
65 SL
.Length
= Layout
.StreamSizes
[StreamIndex
];
66 return llvm::make_unique
<MappedBlockStreamImpl
<MappedBlockStream
>>(
67 Layout
.SB
->BlockSize
, SL
, MsfData
, Allocator
);
70 std::unique_ptr
<MappedBlockStream
>
71 MappedBlockStream::createDirectoryStream(const MSFLayout
&Layout
,
72 BinaryStreamRef MsfData
,
73 BumpPtrAllocator
&Allocator
) {
75 SL
.Blocks
= Layout
.DirectoryBlocks
;
76 SL
.Length
= Layout
.SB
->NumDirectoryBytes
;
77 return createStream(Layout
.SB
->BlockSize
, SL
, MsfData
, Allocator
);
80 std::unique_ptr
<MappedBlockStream
>
81 MappedBlockStream::createFpmStream(const MSFLayout
&Layout
,
82 BinaryStreamRef MsfData
,
83 BumpPtrAllocator
&Allocator
) {
84 MSFStreamLayout
SL(getFpmStreamLayout(Layout
));
85 return createStream(Layout
.SB
->BlockSize
, SL
, MsfData
, Allocator
);
88 Error
MappedBlockStream::readBytes(uint32_t Offset
, uint32_t Size
,
89 ArrayRef
<uint8_t> &Buffer
) {
90 // Make sure we aren't trying to read beyond the end of the stream.
91 if (auto EC
= checkOffsetForRead(Offset
, Size
))
94 if (tryReadContiguously(Offset
, Size
, Buffer
))
95 return Error::success();
97 auto CacheIter
= CacheMap
.find(Offset
);
98 if (CacheIter
!= CacheMap
.end()) {
99 // Try to find an alloc that was large enough for this request.
100 for (auto &Entry
: CacheIter
->second
) {
101 if (Entry
.size() >= Size
) {
102 Buffer
= Entry
.slice(0, Size
);
103 return Error::success();
108 // We couldn't find a buffer that started at the correct offset (the most
109 // common scenario). Try to see if there is a buffer that starts at some
110 // other offset but overlaps the desired range.
111 for (auto &CacheItem
: CacheMap
) {
112 Interval RequestExtent
= std::make_pair(Offset
, Offset
+ Size
);
114 // We already checked this one on the fast path above.
115 if (CacheItem
.first
== Offset
)
117 // If the initial extent of the cached item is beyond the ending extent
118 // of the request, there is no overlap.
119 if (CacheItem
.first
>= Offset
+ Size
)
122 // We really only have to check the last item in the list, since we append
123 // in order of increasing length.
124 if (CacheItem
.second
.empty())
127 auto CachedAlloc
= CacheItem
.second
.back();
128 // If the initial extent of the request is beyond the ending extent of
129 // the cached item, there is no overlap.
130 Interval CachedExtent
=
131 std::make_pair(CacheItem
.first
, CacheItem
.first
+ CachedAlloc
.size());
132 if (RequestExtent
.first
>= CachedExtent
.first
+ CachedExtent
.second
)
135 Interval Intersection
= intersect(CachedExtent
, RequestExtent
);
136 // Only use this if the entire request extent is contained in the cached
138 if (Intersection
!= RequestExtent
)
141 uint32_t CacheRangeOffset
=
142 AbsoluteDifference(CachedExtent
.first
, Intersection
.first
);
143 Buffer
= CachedAlloc
.slice(CacheRangeOffset
, Size
);
144 return Error::success();
147 // Otherwise allocate a large enough buffer in the pool, memcpy the data
148 // into it, and return an ArrayRef to that. Do not touch existing pool
149 // allocations, as existing clients may be holding a pointer which must
150 // not be invalidated.
151 uint8_t *WriteBuffer
= static_cast<uint8_t *>(Allocator
.Allocate(Size
, 8));
152 if (auto EC
= readBytes(Offset
, MutableArrayRef
<uint8_t>(WriteBuffer
, Size
)))
155 if (CacheIter
!= CacheMap
.end()) {
156 CacheIter
->second
.emplace_back(WriteBuffer
, Size
);
158 std::vector
<CacheEntry
> List
;
159 List
.emplace_back(WriteBuffer
, Size
);
160 CacheMap
.insert(std::make_pair(Offset
, List
));
162 Buffer
= ArrayRef
<uint8_t>(WriteBuffer
, Size
);
163 return Error::success();
166 Error
MappedBlockStream::readLongestContiguousChunk(uint32_t Offset
,
167 ArrayRef
<uint8_t> &Buffer
) {
168 // Make sure we aren't trying to read beyond the end of the stream.
169 if (auto EC
= checkOffsetForRead(Offset
, 1))
172 uint32_t First
= Offset
/ BlockSize
;
173 uint32_t Last
= First
;
175 while (Last
< getNumBlocks() - 1) {
176 if (StreamLayout
.Blocks
[Last
] != StreamLayout
.Blocks
[Last
+ 1] - 1)
181 uint32_t OffsetInFirstBlock
= Offset
% BlockSize
;
182 uint32_t BytesFromFirstBlock
= BlockSize
- OffsetInFirstBlock
;
183 uint32_t BlockSpan
= Last
- First
+ 1;
184 uint32_t ByteSpan
= BytesFromFirstBlock
+ (BlockSpan
- 1) * BlockSize
;
186 ArrayRef
<uint8_t> BlockData
;
187 uint32_t MsfOffset
= blockToOffset(StreamLayout
.Blocks
[First
], BlockSize
);
188 if (auto EC
= MsfData
.readBytes(MsfOffset
, BlockSize
, BlockData
))
191 BlockData
= BlockData
.drop_front(OffsetInFirstBlock
);
192 Buffer
= ArrayRef
<uint8_t>(BlockData
.data(), ByteSpan
);
193 return Error::success();
196 uint32_t MappedBlockStream::getLength() { return StreamLayout
.Length
; }
198 bool MappedBlockStream::tryReadContiguously(uint32_t Offset
, uint32_t Size
,
199 ArrayRef
<uint8_t> &Buffer
) {
201 Buffer
= ArrayRef
<uint8_t>();
204 // Attempt to fulfill the request with a reference directly into the stream.
205 // This can work even if the request crosses a block boundary, provided that
206 // all subsequent blocks are contiguous. For example, a 10k read with a 4k
207 // block size can be filled with a reference if, from the starting offset,
208 // 3 blocks in a row are contiguous.
209 uint32_t BlockNum
= Offset
/ BlockSize
;
210 uint32_t OffsetInBlock
= Offset
% BlockSize
;
211 uint32_t BytesFromFirstBlock
= std::min(Size
, BlockSize
- OffsetInBlock
);
212 uint32_t NumAdditionalBlocks
=
213 alignTo(Size
- BytesFromFirstBlock
, BlockSize
) / BlockSize
;
215 uint32_t RequiredContiguousBlocks
= NumAdditionalBlocks
+ 1;
216 uint32_t E
= StreamLayout
.Blocks
[BlockNum
];
217 for (uint32_t I
= 0; I
< RequiredContiguousBlocks
; ++I
, ++E
) {
218 if (StreamLayout
.Blocks
[I
+ BlockNum
] != E
)
222 // Read out the entire block where the requested offset starts. Then drop
223 // bytes from the beginning so that the actual starting byte lines up with
224 // the requested starting byte. Then, since we know this is a contiguous
225 // cross-block span, explicitly resize the ArrayRef to cover the entire
227 ArrayRef
<uint8_t> BlockData
;
228 uint32_t FirstBlockAddr
= StreamLayout
.Blocks
[BlockNum
];
229 uint32_t MsfOffset
= blockToOffset(FirstBlockAddr
, BlockSize
);
230 if (auto EC
= MsfData
.readBytes(MsfOffset
, BlockSize
, BlockData
)) {
231 consumeError(std::move(EC
));
234 BlockData
= BlockData
.drop_front(OffsetInBlock
);
235 Buffer
= ArrayRef
<uint8_t>(BlockData
.data(), Size
);
239 Error
MappedBlockStream::readBytes(uint32_t Offset
,
240 MutableArrayRef
<uint8_t> Buffer
) {
241 uint32_t BlockNum
= Offset
/ BlockSize
;
242 uint32_t OffsetInBlock
= Offset
% BlockSize
;
244 // Make sure we aren't trying to read beyond the end of the stream.
245 if (auto EC
= checkOffsetForRead(Offset
, Buffer
.size()))
248 uint32_t BytesLeft
= Buffer
.size();
249 uint32_t BytesWritten
= 0;
250 uint8_t *WriteBuffer
= Buffer
.data();
251 while (BytesLeft
> 0) {
252 uint32_t StreamBlockAddr
= StreamLayout
.Blocks
[BlockNum
];
254 ArrayRef
<uint8_t> BlockData
;
255 uint32_t Offset
= blockToOffset(StreamBlockAddr
, BlockSize
);
256 if (auto EC
= MsfData
.readBytes(Offset
, BlockSize
, BlockData
))
259 const uint8_t *ChunkStart
= BlockData
.data() + OffsetInBlock
;
260 uint32_t BytesInChunk
= std::min(BytesLeft
, BlockSize
- OffsetInBlock
);
261 ::memcpy(WriteBuffer
+ BytesWritten
, ChunkStart
, BytesInChunk
);
263 BytesWritten
+= BytesInChunk
;
264 BytesLeft
-= BytesInChunk
;
269 return Error::success();
272 void MappedBlockStream::invalidateCache() { CacheMap
.shrink_and_clear(); }
274 void MappedBlockStream::fixCacheAfterWrite(uint32_t Offset
,
275 ArrayRef
<uint8_t> Data
) const {
276 // If this write overlapped a read which previously came from the pool,
277 // someone may still be holding a pointer to that alloc which is now invalid.
278 // Compute the overlapping range and update the cache entry, so any
279 // outstanding buffers are automatically updated.
280 for (const auto &MapEntry
: CacheMap
) {
281 // If the end of the written extent precedes the beginning of the cached
282 // extent, ignore this map entry.
283 if (Offset
+ Data
.size() < MapEntry
.first
)
285 for (const auto &Alloc
: MapEntry
.second
) {
286 // If the end of the cached extent precedes the beginning of the written
287 // extent, ignore this alloc.
288 if (MapEntry
.first
+ Alloc
.size() < Offset
)
291 // If we get here, they are guaranteed to overlap.
292 Interval WriteInterval
= std::make_pair(Offset
, Offset
+ Data
.size());
293 Interval CachedInterval
=
294 std::make_pair(MapEntry
.first
, MapEntry
.first
+ Alloc
.size());
295 // If they overlap, we need to write the new data into the overlapping
297 auto Intersection
= intersect(WriteInterval
, CachedInterval
);
298 assert(Intersection
.first
<= Intersection
.second
);
300 uint32_t Length
= Intersection
.second
- Intersection
.first
;
302 AbsoluteDifference(WriteInterval
.first
, Intersection
.first
);
303 uint32_t DestOffset
=
304 AbsoluteDifference(CachedInterval
.first
, Intersection
.first
);
305 ::memcpy(Alloc
.data() + DestOffset
, Data
.data() + SrcOffset
, Length
);
310 WritableMappedBlockStream::WritableMappedBlockStream(
311 uint32_t BlockSize
, const MSFStreamLayout
&Layout
,
312 WritableBinaryStreamRef MsfData
, BumpPtrAllocator
&Allocator
)
313 : ReadInterface(BlockSize
, Layout
, MsfData
, Allocator
),
314 WriteInterface(MsfData
) {}
316 std::unique_ptr
<WritableMappedBlockStream
>
317 WritableMappedBlockStream::createStream(uint32_t BlockSize
,
318 const MSFStreamLayout
&Layout
,
319 WritableBinaryStreamRef MsfData
,
320 BumpPtrAllocator
&Allocator
) {
321 return llvm::make_unique
<MappedBlockStreamImpl
<WritableMappedBlockStream
>>(
322 BlockSize
, Layout
, MsfData
, Allocator
);
325 std::unique_ptr
<WritableMappedBlockStream
>
326 WritableMappedBlockStream::createIndexedStream(const MSFLayout
&Layout
,
327 WritableBinaryStreamRef MsfData
,
328 uint32_t StreamIndex
,
329 BumpPtrAllocator
&Allocator
) {
330 assert(StreamIndex
< Layout
.StreamMap
.size() && "Invalid stream index");
332 SL
.Blocks
= Layout
.StreamMap
[StreamIndex
];
333 SL
.Length
= Layout
.StreamSizes
[StreamIndex
];
334 return createStream(Layout
.SB
->BlockSize
, SL
, MsfData
, Allocator
);
337 std::unique_ptr
<WritableMappedBlockStream
>
338 WritableMappedBlockStream::createDirectoryStream(
339 const MSFLayout
&Layout
, WritableBinaryStreamRef MsfData
,
340 BumpPtrAllocator
&Allocator
) {
342 SL
.Blocks
= Layout
.DirectoryBlocks
;
343 SL
.Length
= Layout
.SB
->NumDirectoryBytes
;
344 return createStream(Layout
.SB
->BlockSize
, SL
, MsfData
, Allocator
);
347 std::unique_ptr
<WritableMappedBlockStream
>
348 WritableMappedBlockStream::createFpmStream(const MSFLayout
&Layout
,
349 WritableBinaryStreamRef MsfData
,
350 BumpPtrAllocator
&Allocator
,
352 // We only want to give the user a stream containing the bytes of the FPM that
353 // are actually valid, but we want to initialize all of the bytes, even those
354 // that come from reserved FPM blocks where the entire block is unused. To do
355 // this, we first create the full layout, which gives us a stream with all
356 // bytes and all blocks, and initialize everything to 0xFF (all blocks in the
357 // file are unused). Then we create the minimal layout (which contains only a
358 // subset of the bytes previously initialized), and return that to the user.
359 MSFStreamLayout
MinLayout(getFpmStreamLayout(Layout
, false, AltFpm
));
361 MSFStreamLayout
FullLayout(getFpmStreamLayout(Layout
, true, AltFpm
));
363 createStream(Layout
.SB
->BlockSize
, FullLayout
, MsfData
, Allocator
);
366 std::vector
<uint8_t> InitData(Layout
.SB
->BlockSize
, 0xFF);
367 BinaryStreamWriter
Initializer(*Result
);
368 while (Initializer
.bytesRemaining() > 0)
369 cantFail(Initializer
.writeBytes(InitData
));
370 return createStream(Layout
.SB
->BlockSize
, MinLayout
, MsfData
, Allocator
);
373 Error
WritableMappedBlockStream::readBytes(uint32_t Offset
, uint32_t Size
,
374 ArrayRef
<uint8_t> &Buffer
) {
375 return ReadInterface
.readBytes(Offset
, Size
, Buffer
);
378 Error
WritableMappedBlockStream::readLongestContiguousChunk(
379 uint32_t Offset
, ArrayRef
<uint8_t> &Buffer
) {
380 return ReadInterface
.readLongestContiguousChunk(Offset
, Buffer
);
383 uint32_t WritableMappedBlockStream::getLength() {
384 return ReadInterface
.getLength();
387 Error
WritableMappedBlockStream::writeBytes(uint32_t Offset
,
388 ArrayRef
<uint8_t> Buffer
) {
389 // Make sure we aren't trying to write beyond the end of the stream.
390 if (auto EC
= checkOffsetForWrite(Offset
, Buffer
.size()))
393 uint32_t BlockNum
= Offset
/ getBlockSize();
394 uint32_t OffsetInBlock
= Offset
% getBlockSize();
396 uint32_t BytesLeft
= Buffer
.size();
397 uint32_t BytesWritten
= 0;
398 while (BytesLeft
> 0) {
399 uint32_t StreamBlockAddr
= getStreamLayout().Blocks
[BlockNum
];
400 uint32_t BytesToWriteInChunk
=
401 std::min(BytesLeft
, getBlockSize() - OffsetInBlock
);
403 const uint8_t *Chunk
= Buffer
.data() + BytesWritten
;
404 ArrayRef
<uint8_t> ChunkData(Chunk
, BytesToWriteInChunk
);
405 uint32_t MsfOffset
= blockToOffset(StreamBlockAddr
, getBlockSize());
406 MsfOffset
+= OffsetInBlock
;
407 if (auto EC
= WriteInterface
.writeBytes(MsfOffset
, ChunkData
))
410 BytesLeft
-= BytesToWriteInChunk
;
411 BytesWritten
+= BytesToWriteInChunk
;
416 ReadInterface
.fixCacheAfterWrite(Offset
, Buffer
);
418 return Error::success();
421 Error
WritableMappedBlockStream::commit() { return WriteInterface
.commit(); }