1 //===-- Memory.cpp --------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "lldb/Target/Memory.h"
10 #include "lldb/Target/Process.h"
11 #include "lldb/Utility/DataBufferHeap.h"
12 #include "lldb/Utility/LLDBLog.h"
13 #include "lldb/Utility/Log.h"
14 #include "lldb/Utility/RangeMap.h"
15 #include "lldb/Utility/State.h"
21 using namespace lldb_private
;
23 // MemoryCache constructor
24 MemoryCache::MemoryCache(Process
&process
)
25 : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(),
27 m_L2_cache_line_byte_size(process
.GetMemoryCacheLineSize()) {}
30 MemoryCache::~MemoryCache() = default;
32 void MemoryCache::Clear(bool clear_invalid_ranges
) {
33 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
36 if (clear_invalid_ranges
)
37 m_invalid_ranges
.Clear();
38 m_L2_cache_line_byte_size
= m_process
.GetMemoryCacheLineSize();
41 void MemoryCache::AddL1CacheData(lldb::addr_t addr
, const void *src
,
44 addr
, DataBufferSP(new DataBufferHeap(DataBufferHeap(src
, src_len
))));
47 void MemoryCache::AddL1CacheData(lldb::addr_t addr
,
48 const DataBufferSP
&data_buffer_sp
) {
49 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
50 m_L1_cache
[addr
] = data_buffer_sp
;
53 void MemoryCache::Flush(addr_t addr
, size_t size
) {
57 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
59 // Erase any blocks from the L1 cache that intersect with the flush range
60 if (!m_L1_cache
.empty()) {
61 AddrRange
flush_range(addr
, size
);
62 BlockMap::iterator pos
= m_L1_cache
.upper_bound(addr
);
63 if (pos
!= m_L1_cache
.begin()) {
66 while (pos
!= m_L1_cache
.end()) {
67 AddrRange
chunk_range(pos
->first
, pos
->second
->GetByteSize());
68 if (!chunk_range
.DoesIntersect(flush_range
))
70 pos
= m_L1_cache
.erase(pos
);
74 if (!m_L2_cache
.empty()) {
75 const uint32_t cache_line_byte_size
= m_L2_cache_line_byte_size
;
76 const addr_t end_addr
= (addr
+ size
- 1);
77 const addr_t first_cache_line_addr
= addr
- (addr
% cache_line_byte_size
);
78 const addr_t last_cache_line_addr
=
79 end_addr
- (end_addr
% cache_line_byte_size
);
80 // Watch for overflow where size will cause us to go off the end of the
81 // 64 bit address space
82 uint32_t num_cache_lines
;
83 if (last_cache_line_addr
>= first_cache_line_addr
)
84 num_cache_lines
= ((last_cache_line_addr
- first_cache_line_addr
) /
85 cache_line_byte_size
) +
89 (UINT64_MAX
- first_cache_line_addr
+ 1) / cache_line_byte_size
;
91 uint32_t cache_idx
= 0;
92 for (addr_t curr_addr
= first_cache_line_addr
; cache_idx
< num_cache_lines
;
93 curr_addr
+= cache_line_byte_size
, ++cache_idx
) {
94 BlockMap::iterator pos
= m_L2_cache
.find(curr_addr
);
95 if (pos
!= m_L2_cache
.end())
96 m_L2_cache
.erase(pos
);
101 void MemoryCache::AddInvalidRange(lldb::addr_t base_addr
,
102 lldb::addr_t byte_size
) {
104 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
105 InvalidRanges::Entry
range(base_addr
, byte_size
);
106 m_invalid_ranges
.Append(range
);
107 m_invalid_ranges
.Sort();
111 bool MemoryCache::RemoveInvalidRange(lldb::addr_t base_addr
,
112 lldb::addr_t byte_size
) {
114 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
115 const uint32_t idx
= m_invalid_ranges
.FindEntryIndexThatContains(base_addr
);
116 if (idx
!= UINT32_MAX
) {
117 const InvalidRanges::Entry
*entry
= m_invalid_ranges
.GetEntryAtIndex(idx
);
118 if (entry
->GetRangeBase() == base_addr
&&
119 entry
->GetByteSize() == byte_size
)
120 return m_invalid_ranges
.RemoveEntryAtIndex(idx
);
126 lldb::DataBufferSP
MemoryCache::GetL2CacheLine(lldb::addr_t line_base_addr
,
128 // This function assumes that the address given is aligned correctly.
129 assert((line_base_addr
% m_L2_cache_line_byte_size
) == 0);
131 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
132 auto pos
= m_L2_cache
.find(line_base_addr
);
133 if (pos
!= m_L2_cache
.end())
136 auto data_buffer_heap_sp
=
137 std::make_shared
<DataBufferHeap
>(m_L2_cache_line_byte_size
, 0);
138 size_t process_bytes_read
= m_process
.ReadMemoryFromInferior(
139 line_base_addr
, data_buffer_heap_sp
->GetBytes(),
140 data_buffer_heap_sp
->GetByteSize(), error
);
142 // If we failed a read, not much we can do.
143 if (process_bytes_read
== 0)
144 return lldb::DataBufferSP();
146 // If we didn't get a complete read, we can still cache what we did get.
147 if (process_bytes_read
< m_L2_cache_line_byte_size
)
148 data_buffer_heap_sp
->SetByteSize(process_bytes_read
);
150 m_L2_cache
[line_base_addr
] = data_buffer_heap_sp
;
151 return data_buffer_heap_sp
;
154 size_t MemoryCache::Read(addr_t addr
, void *dst
, size_t dst_len
,
156 if (!dst
|| dst_len
== 0)
159 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
160 // FIXME: We should do a more thorough check to make sure that we're not
161 // overlapping with any invalid ranges (e.g. Read 0x100 - 0x200 but there's an
162 // invalid range 0x180 - 0x280). `FindEntryThatContains` has an implementation
163 // that takes a range, but it only checks to see if the argument is contained
164 // by an existing invalid range. It cannot check if the argument contains
165 // invalid ranges and cannot check for overlaps.
166 if (m_invalid_ranges
.FindEntryThatContains(addr
)) {
167 error
.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64
, addr
);
171 // Check the L1 cache for a range that contains the entire memory read.
172 // L1 cache contains chunks of memory that are not required to be the size of
173 // an L2 cache line. We avoid trying to do partial reads from the L1 cache to
174 // simplify the implementation.
175 if (!m_L1_cache
.empty()) {
176 AddrRange
read_range(addr
, dst_len
);
177 BlockMap::iterator pos
= m_L1_cache
.upper_bound(addr
);
178 if (pos
!= m_L1_cache
.begin()) {
181 AddrRange
chunk_range(pos
->first
, pos
->second
->GetByteSize());
182 if (chunk_range
.Contains(read_range
)) {
183 memcpy(dst
, pos
->second
->GetBytes() + (addr
- chunk_range
.GetRangeBase()),
189 // If the size of the read is greater than the size of an L2 cache line, we'll
190 // just read from the inferior. If that read is successful, we'll cache what
191 // we read in the L1 cache for future use.
192 if (dst_len
> m_L2_cache_line_byte_size
) {
194 m_process
.ReadMemoryFromInferior(addr
, dst
, dst_len
, error
);
196 AddL1CacheData(addr
, dst
, bytes_read
);
200 // If the size of the read fits inside one L2 cache line, we'll try reading
201 // from the L2 cache. Note that if the range of memory we're reading sits
202 // between two contiguous cache lines, we'll touch two cache lines instead of
205 // We're going to have all of our loads and reads be cache line aligned.
206 addr_t cache_line_offset
= addr
% m_L2_cache_line_byte_size
;
207 addr_t cache_line_base_addr
= addr
- cache_line_offset
;
208 DataBufferSP first_cache_line
= GetL2CacheLine(cache_line_base_addr
, error
);
209 // If we get nothing, then the read to the inferior likely failed. Nothing to
211 if (!first_cache_line
)
214 // If the cache line was not filled out completely and the offset is greater
215 // than what we have available, we can't do anything further here.
216 if (cache_line_offset
>= first_cache_line
->GetByteSize())
219 uint8_t *dst_buf
= (uint8_t *)dst
;
220 size_t bytes_left
= dst_len
;
221 size_t read_size
= first_cache_line
->GetByteSize() - cache_line_offset
;
222 if (read_size
> bytes_left
)
223 read_size
= bytes_left
;
225 memcpy(dst_buf
+ dst_len
- bytes_left
,
226 first_cache_line
->GetBytes() + cache_line_offset
, read_size
);
227 bytes_left
-= read_size
;
229 // If the cache line was not filled out completely and we still have data to
230 // read, we can't do anything further.
231 if (first_cache_line
->GetByteSize() < m_L2_cache_line_byte_size
&&
233 return dst_len
- bytes_left
;
235 // We'll hit this scenario if our read straddles two cache lines.
236 if (bytes_left
> 0) {
237 cache_line_base_addr
+= m_L2_cache_line_byte_size
;
239 // FIXME: Until we are able to more thoroughly check for invalid ranges, we
240 // will have to check the second line to see if it is in an invalid range as
241 // well. See the check near the beginning of the function for more details.
242 if (m_invalid_ranges
.FindEntryThatContains(cache_line_base_addr
)) {
243 error
.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64
,
244 cache_line_base_addr
);
245 return dst_len
- bytes_left
;
248 DataBufferSP second_cache_line
=
249 GetL2CacheLine(cache_line_base_addr
, error
);
250 if (!second_cache_line
)
251 return dst_len
- bytes_left
;
253 read_size
= bytes_left
;
254 if (read_size
> second_cache_line
->GetByteSize())
255 read_size
= second_cache_line
->GetByteSize();
257 memcpy(dst_buf
+ dst_len
- bytes_left
, second_cache_line
->GetBytes(),
259 bytes_left
-= read_size
;
261 return dst_len
- bytes_left
;
267 AllocatedBlock::AllocatedBlock(lldb::addr_t addr
, uint32_t byte_size
,
268 uint32_t permissions
, uint32_t chunk_size
)
269 : m_range(addr
, byte_size
), m_permissions(permissions
),
270 m_chunk_size(chunk_size
)
272 // The entire address range is free to start with.
273 m_free_blocks
.Append(m_range
);
274 assert(byte_size
> chunk_size
);
277 AllocatedBlock::~AllocatedBlock() = default;
279 lldb::addr_t
AllocatedBlock::ReserveBlock(uint32_t size
) {
280 // We must return something valid for zero bytes.
283 Log
*log
= GetLog(LLDBLog::Process
);
285 const size_t free_count
= m_free_blocks
.GetSize();
286 for (size_t i
=0; i
<free_count
; ++i
)
288 auto &free_block
= m_free_blocks
.GetEntryRef(i
);
289 const lldb::addr_t range_size
= free_block
.GetByteSize();
290 if (range_size
>= size
)
292 // We found a free block that is big enough for our data. Figure out how
293 // many chunks we will need and calculate the resulting block size we
295 addr_t addr
= free_block
.GetRangeBase();
296 size_t num_chunks
= CalculateChunksNeededForSize(size
);
297 lldb::addr_t block_size
= num_chunks
* m_chunk_size
;
298 lldb::addr_t bytes_left
= range_size
- block_size
;
301 // The newly allocated block will take all of the bytes in this
302 // available block, so we can just add it to the allocated ranges and
303 // remove the range from the free ranges.
304 m_reserved_blocks
.Insert(free_block
, false);
305 m_free_blocks
.RemoveEntryAtIndex(i
);
309 // Make the new allocated range and add it to the allocated ranges.
310 Range
<lldb::addr_t
, uint32_t> reserved_block(free_block
);
311 reserved_block
.SetByteSize(block_size
);
312 // Insert the reserved range and don't combine it with other blocks in
313 // the reserved blocks list.
314 m_reserved_blocks
.Insert(reserved_block
, false);
315 // Adjust the free range in place since we won't change the sorted
316 // ordering of the m_free_blocks list.
317 free_block
.SetRangeBase(reserved_block
.GetRangeEnd());
318 free_block
.SetByteSize(bytes_left
);
320 LLDB_LOGV(log
, "({0}) (size = {1} ({1:x})) => {2:x}", this, size
, addr
);
325 LLDB_LOGV(log
, "({0}) (size = {1} ({1:x})) => {2:x}", this, size
,
326 LLDB_INVALID_ADDRESS
);
327 return LLDB_INVALID_ADDRESS
;
330 bool AllocatedBlock::FreeBlock(addr_t addr
) {
331 bool success
= false;
332 auto entry_idx
= m_reserved_blocks
.FindEntryIndexThatContains(addr
);
333 if (entry_idx
!= UINT32_MAX
)
335 m_free_blocks
.Insert(m_reserved_blocks
.GetEntryRef(entry_idx
), true);
336 m_reserved_blocks
.RemoveEntryAtIndex(entry_idx
);
339 Log
*log
= GetLog(LLDBLog::Process
);
340 LLDB_LOGV(log
, "({0}) (addr = {1:x}) => {2}", this, addr
, success
);
344 AllocatedMemoryCache::AllocatedMemoryCache(Process
&process
)
345 : m_process(process
), m_mutex(), m_memory_map() {}
347 AllocatedMemoryCache::~AllocatedMemoryCache() = default;
349 void AllocatedMemoryCache::Clear(bool deallocate_memory
) {
350 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
351 if (m_process
.IsAlive() && deallocate_memory
) {
352 PermissionsToBlockMap::iterator pos
, end
= m_memory_map
.end();
353 for (pos
= m_memory_map
.begin(); pos
!= end
; ++pos
)
354 m_process
.DoDeallocateMemory(pos
->second
->GetBaseAddress());
356 m_memory_map
.clear();
359 AllocatedMemoryCache::AllocatedBlockSP
360 AllocatedMemoryCache::AllocatePage(uint32_t byte_size
, uint32_t permissions
,
361 uint32_t chunk_size
, Status
&error
) {
362 AllocatedBlockSP block_sp
;
363 const size_t page_size
= 4096;
364 const size_t num_pages
= (byte_size
+ page_size
- 1) / page_size
;
365 const size_t page_byte_size
= num_pages
* page_size
;
367 addr_t addr
= m_process
.DoAllocateMemory(page_byte_size
, permissions
, error
);
369 Log
*log
= GetLog(LLDBLog::Process
);
372 "Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32
373 ", permissions = %s) => 0x%16.16" PRIx64
,
374 (uint32_t)page_byte_size
, GetPermissionsAsCString(permissions
),
378 if (addr
!= LLDB_INVALID_ADDRESS
) {
379 block_sp
= std::make_shared
<AllocatedBlock
>(addr
, page_byte_size
,
380 permissions
, chunk_size
);
381 m_memory_map
.insert(std::make_pair(permissions
, block_sp
));
386 lldb::addr_t
AllocatedMemoryCache::AllocateMemory(size_t byte_size
,
387 uint32_t permissions
,
389 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
391 addr_t addr
= LLDB_INVALID_ADDRESS
;
392 std::pair
<PermissionsToBlockMap::iterator
, PermissionsToBlockMap::iterator
>
393 range
= m_memory_map
.equal_range(permissions
);
395 for (PermissionsToBlockMap::iterator pos
= range
.first
; pos
!= range
.second
;
397 addr
= (*pos
).second
->ReserveBlock(byte_size
);
398 if (addr
!= LLDB_INVALID_ADDRESS
)
402 if (addr
== LLDB_INVALID_ADDRESS
) {
403 AllocatedBlockSP
block_sp(AllocatePage(byte_size
, permissions
, 16, error
));
406 addr
= block_sp
->ReserveBlock(byte_size
);
408 Log
*log
= GetLog(LLDBLog::Process
);
410 "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32
411 ", permissions = %s) => 0x%16.16" PRIx64
,
412 (uint32_t)byte_size
, GetPermissionsAsCString(permissions
),
417 bool AllocatedMemoryCache::DeallocateMemory(lldb::addr_t addr
) {
418 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
420 PermissionsToBlockMap::iterator pos
, end
= m_memory_map
.end();
421 bool success
= false;
422 for (pos
= m_memory_map
.begin(); pos
!= end
; ++pos
) {
423 if (pos
->second
->Contains(addr
)) {
424 success
= pos
->second
->FreeBlock(addr
);
428 Log
*log
= GetLog(LLDBLog::Process
);
430 "AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64
432 (uint64_t)addr
, success
);