1 //===-- Memory.cpp --------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "lldb/Target/Memory.h"
10 #include "lldb/Target/Process.h"
11 #include "lldb/Utility/DataBufferHeap.h"
12 #include "lldb/Utility/LLDBLog.h"
13 #include "lldb/Utility/Log.h"
14 #include "lldb/Utility/RangeMap.h"
15 #include "lldb/Utility/State.h"
21 using namespace lldb_private
;
23 // MemoryCache constructor
24 MemoryCache::MemoryCache(Process
&process
)
25 : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(),
27 m_L2_cache_line_byte_size(process
.GetMemoryCacheLineSize()) {}
30 MemoryCache::~MemoryCache() = default;
32 void MemoryCache::Clear(bool clear_invalid_ranges
) {
33 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
36 if (clear_invalid_ranges
)
37 m_invalid_ranges
.Clear();
38 m_L2_cache_line_byte_size
= m_process
.GetMemoryCacheLineSize();
41 void MemoryCache::AddL1CacheData(lldb::addr_t addr
, const void *src
,
44 addr
, DataBufferSP(new DataBufferHeap(DataBufferHeap(src
, src_len
))));
47 void MemoryCache::AddL1CacheData(lldb::addr_t addr
,
48 const DataBufferSP
&data_buffer_sp
) {
49 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
50 m_L1_cache
[addr
] = data_buffer_sp
;
53 void MemoryCache::Flush(addr_t addr
, size_t size
) {
57 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
59 // Erase any blocks from the L1 cache that intersect with the flush range
60 if (!m_L1_cache
.empty()) {
61 AddrRange
flush_range(addr
, size
);
62 BlockMap::iterator pos
= m_L1_cache
.upper_bound(addr
);
63 if (pos
!= m_L1_cache
.begin()) {
66 while (pos
!= m_L1_cache
.end()) {
67 AddrRange
chunk_range(pos
->first
, pos
->second
->GetByteSize());
68 if (!chunk_range
.DoesIntersect(flush_range
))
70 pos
= m_L1_cache
.erase(pos
);
74 if (!m_L2_cache
.empty()) {
75 const uint32_t cache_line_byte_size
= m_L2_cache_line_byte_size
;
76 const addr_t end_addr
= (addr
+ size
- 1);
77 const addr_t first_cache_line_addr
= addr
- (addr
% cache_line_byte_size
);
78 const addr_t last_cache_line_addr
=
79 end_addr
- (end_addr
% cache_line_byte_size
);
80 // Watch for overflow where size will cause us to go off the end of the
81 // 64 bit address space
82 uint32_t num_cache_lines
;
83 if (last_cache_line_addr
>= first_cache_line_addr
)
84 num_cache_lines
= ((last_cache_line_addr
- first_cache_line_addr
) /
85 cache_line_byte_size
) +
89 (UINT64_MAX
- first_cache_line_addr
+ 1) / cache_line_byte_size
;
91 uint32_t cache_idx
= 0;
92 for (addr_t curr_addr
= first_cache_line_addr
; cache_idx
< num_cache_lines
;
93 curr_addr
+= cache_line_byte_size
, ++cache_idx
) {
94 BlockMap::iterator pos
= m_L2_cache
.find(curr_addr
);
95 if (pos
!= m_L2_cache
.end())
96 m_L2_cache
.erase(pos
);
101 void MemoryCache::AddInvalidRange(lldb::addr_t base_addr
,
102 lldb::addr_t byte_size
) {
104 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
105 InvalidRanges::Entry
range(base_addr
, byte_size
);
106 m_invalid_ranges
.Append(range
);
107 m_invalid_ranges
.Sort();
111 bool MemoryCache::RemoveInvalidRange(lldb::addr_t base_addr
,
112 lldb::addr_t byte_size
) {
114 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
115 const uint32_t idx
= m_invalid_ranges
.FindEntryIndexThatContains(base_addr
);
116 if (idx
!= UINT32_MAX
) {
117 const InvalidRanges::Entry
*entry
= m_invalid_ranges
.GetEntryAtIndex(idx
);
118 if (entry
->GetRangeBase() == base_addr
&&
119 entry
->GetByteSize() == byte_size
)
120 return m_invalid_ranges
.RemoveEntryAtIndex(idx
);
126 lldb::DataBufferSP
MemoryCache::GetL2CacheLine(lldb::addr_t line_base_addr
,
128 // This function assumes that the address given is aligned correctly.
129 assert((line_base_addr
% m_L2_cache_line_byte_size
) == 0);
131 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
132 auto pos
= m_L2_cache
.find(line_base_addr
);
133 if (pos
!= m_L2_cache
.end())
136 auto data_buffer_heap_sp
=
137 std::make_shared
<DataBufferHeap
>(m_L2_cache_line_byte_size
, 0);
138 size_t process_bytes_read
= m_process
.ReadMemoryFromInferior(
139 line_base_addr
, data_buffer_heap_sp
->GetBytes(),
140 data_buffer_heap_sp
->GetByteSize(), error
);
142 // If we failed a read, not much we can do.
143 if (process_bytes_read
== 0)
144 return lldb::DataBufferSP();
146 // If we didn't get a complete read, we can still cache what we did get.
147 if (process_bytes_read
< m_L2_cache_line_byte_size
)
148 data_buffer_heap_sp
->SetByteSize(process_bytes_read
);
150 m_L2_cache
[line_base_addr
] = data_buffer_heap_sp
;
151 return data_buffer_heap_sp
;
154 size_t MemoryCache::Read(addr_t addr
, void *dst
, size_t dst_len
,
156 if (!dst
|| dst_len
== 0)
159 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
160 // FIXME: We should do a more thorough check to make sure that we're not
161 // overlapping with any invalid ranges (e.g. Read 0x100 - 0x200 but there's an
162 // invalid range 0x180 - 0x280). `FindEntryThatContains` has an implementation
163 // that takes a range, but it only checks to see if the argument is contained
164 // by an existing invalid range. It cannot check if the argument contains
165 // invalid ranges and cannot check for overlaps.
166 if (m_invalid_ranges
.FindEntryThatContains(addr
)) {
167 error
= Status::FromErrorStringWithFormat(
168 "memory read failed for 0x%" PRIx64
, addr
);
172 // Check the L1 cache for a range that contains the entire memory read.
173 // L1 cache contains chunks of memory that are not required to be the size of
174 // an L2 cache line. We avoid trying to do partial reads from the L1 cache to
175 // simplify the implementation.
176 if (!m_L1_cache
.empty()) {
177 AddrRange
read_range(addr
, dst_len
);
178 BlockMap::iterator pos
= m_L1_cache
.upper_bound(addr
);
179 if (pos
!= m_L1_cache
.begin()) {
182 AddrRange
chunk_range(pos
->first
, pos
->second
->GetByteSize());
183 if (chunk_range
.Contains(read_range
)) {
184 memcpy(dst
, pos
->second
->GetBytes() + (addr
- chunk_range
.GetRangeBase()),
190 // If the size of the read is greater than the size of an L2 cache line, we'll
191 // just read from the inferior. If that read is successful, we'll cache what
192 // we read in the L1 cache for future use.
193 if (dst_len
> m_L2_cache_line_byte_size
) {
195 m_process
.ReadMemoryFromInferior(addr
, dst
, dst_len
, error
);
197 AddL1CacheData(addr
, dst
, bytes_read
);
201 // If the size of the read fits inside one L2 cache line, we'll try reading
202 // from the L2 cache. Note that if the range of memory we're reading sits
203 // between two contiguous cache lines, we'll touch two cache lines instead of
206 // We're going to have all of our loads and reads be cache line aligned.
207 addr_t cache_line_offset
= addr
% m_L2_cache_line_byte_size
;
208 addr_t cache_line_base_addr
= addr
- cache_line_offset
;
209 DataBufferSP first_cache_line
= GetL2CacheLine(cache_line_base_addr
, error
);
210 // If we get nothing, then the read to the inferior likely failed. Nothing to
212 if (!first_cache_line
)
215 // If the cache line was not filled out completely and the offset is greater
216 // than what we have available, we can't do anything further here.
217 if (cache_line_offset
>= first_cache_line
->GetByteSize())
220 uint8_t *dst_buf
= (uint8_t *)dst
;
221 size_t bytes_left
= dst_len
;
222 size_t read_size
= first_cache_line
->GetByteSize() - cache_line_offset
;
223 if (read_size
> bytes_left
)
224 read_size
= bytes_left
;
226 memcpy(dst_buf
+ dst_len
- bytes_left
,
227 first_cache_line
->GetBytes() + cache_line_offset
, read_size
);
228 bytes_left
-= read_size
;
230 // If the cache line was not filled out completely and we still have data to
231 // read, we can't do anything further.
232 if (first_cache_line
->GetByteSize() < m_L2_cache_line_byte_size
&&
234 return dst_len
- bytes_left
;
236 // We'll hit this scenario if our read straddles two cache lines.
237 if (bytes_left
> 0) {
238 cache_line_base_addr
+= m_L2_cache_line_byte_size
;
240 // FIXME: Until we are able to more thoroughly check for invalid ranges, we
241 // will have to check the second line to see if it is in an invalid range as
242 // well. See the check near the beginning of the function for more details.
243 if (m_invalid_ranges
.FindEntryThatContains(cache_line_base_addr
)) {
244 error
= Status::FromErrorStringWithFormat(
245 "memory read failed for 0x%" PRIx64
, cache_line_base_addr
);
246 return dst_len
- bytes_left
;
249 DataBufferSP second_cache_line
=
250 GetL2CacheLine(cache_line_base_addr
, error
);
251 if (!second_cache_line
)
252 return dst_len
- bytes_left
;
254 read_size
= bytes_left
;
255 if (read_size
> second_cache_line
->GetByteSize())
256 read_size
= second_cache_line
->GetByteSize();
258 memcpy(dst_buf
+ dst_len
- bytes_left
, second_cache_line
->GetBytes(),
260 bytes_left
-= read_size
;
262 return dst_len
- bytes_left
;
268 AllocatedBlock::AllocatedBlock(lldb::addr_t addr
, uint32_t byte_size
,
269 uint32_t permissions
, uint32_t chunk_size
)
270 : m_range(addr
, byte_size
), m_permissions(permissions
),
271 m_chunk_size(chunk_size
)
273 // The entire address range is free to start with.
274 m_free_blocks
.Append(m_range
);
275 assert(byte_size
> chunk_size
);
278 AllocatedBlock::~AllocatedBlock() = default;
280 lldb::addr_t
AllocatedBlock::ReserveBlock(uint32_t size
) {
281 // We must return something valid for zero bytes.
284 Log
*log
= GetLog(LLDBLog::Process
);
286 const size_t free_count
= m_free_blocks
.GetSize();
287 for (size_t i
=0; i
<free_count
; ++i
)
289 auto &free_block
= m_free_blocks
.GetEntryRef(i
);
290 const lldb::addr_t range_size
= free_block
.GetByteSize();
291 if (range_size
>= size
)
293 // We found a free block that is big enough for our data. Figure out how
294 // many chunks we will need and calculate the resulting block size we
296 addr_t addr
= free_block
.GetRangeBase();
297 size_t num_chunks
= CalculateChunksNeededForSize(size
);
298 lldb::addr_t block_size
= num_chunks
* m_chunk_size
;
299 lldb::addr_t bytes_left
= range_size
- block_size
;
302 // The newly allocated block will take all of the bytes in this
303 // available block, so we can just add it to the allocated ranges and
304 // remove the range from the free ranges.
305 m_reserved_blocks
.Insert(free_block
, false);
306 m_free_blocks
.RemoveEntryAtIndex(i
);
310 // Make the new allocated range and add it to the allocated ranges.
311 Range
<lldb::addr_t
, uint32_t> reserved_block(free_block
);
312 reserved_block
.SetByteSize(block_size
);
313 // Insert the reserved range and don't combine it with other blocks in
314 // the reserved blocks list.
315 m_reserved_blocks
.Insert(reserved_block
, false);
316 // Adjust the free range in place since we won't change the sorted
317 // ordering of the m_free_blocks list.
318 free_block
.SetRangeBase(reserved_block
.GetRangeEnd());
319 free_block
.SetByteSize(bytes_left
);
321 LLDB_LOGV(log
, "({0}) (size = {1} ({1:x})) => {2:x}", this, size
, addr
);
326 LLDB_LOGV(log
, "({0}) (size = {1} ({1:x})) => {2:x}", this, size
,
327 LLDB_INVALID_ADDRESS
);
328 return LLDB_INVALID_ADDRESS
;
331 bool AllocatedBlock::FreeBlock(addr_t addr
) {
332 bool success
= false;
333 auto entry_idx
= m_reserved_blocks
.FindEntryIndexThatContains(addr
);
334 if (entry_idx
!= UINT32_MAX
)
336 m_free_blocks
.Insert(m_reserved_blocks
.GetEntryRef(entry_idx
), true);
337 m_reserved_blocks
.RemoveEntryAtIndex(entry_idx
);
340 Log
*log
= GetLog(LLDBLog::Process
);
341 LLDB_LOGV(log
, "({0}) (addr = {1:x}) => {2}", this, addr
, success
);
345 AllocatedMemoryCache::AllocatedMemoryCache(Process
&process
)
346 : m_process(process
), m_mutex(), m_memory_map() {}
348 AllocatedMemoryCache::~AllocatedMemoryCache() = default;
350 void AllocatedMemoryCache::Clear(bool deallocate_memory
) {
351 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
352 if (m_process
.IsAlive() && deallocate_memory
) {
353 PermissionsToBlockMap::iterator pos
, end
= m_memory_map
.end();
354 for (pos
= m_memory_map
.begin(); pos
!= end
; ++pos
)
355 m_process
.DoDeallocateMemory(pos
->second
->GetBaseAddress());
357 m_memory_map
.clear();
360 AllocatedMemoryCache::AllocatedBlockSP
361 AllocatedMemoryCache::AllocatePage(uint32_t byte_size
, uint32_t permissions
,
362 uint32_t chunk_size
, Status
&error
) {
363 AllocatedBlockSP block_sp
;
364 const size_t page_size
= 4096;
365 const size_t num_pages
= (byte_size
+ page_size
- 1) / page_size
;
366 const size_t page_byte_size
= num_pages
* page_size
;
368 addr_t addr
= m_process
.DoAllocateMemory(page_byte_size
, permissions
, error
);
370 Log
*log
= GetLog(LLDBLog::Process
);
373 "Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32
374 ", permissions = %s) => 0x%16.16" PRIx64
,
375 (uint32_t)page_byte_size
, GetPermissionsAsCString(permissions
),
379 if (addr
!= LLDB_INVALID_ADDRESS
) {
380 block_sp
= std::make_shared
<AllocatedBlock
>(addr
, page_byte_size
,
381 permissions
, chunk_size
);
382 m_memory_map
.insert(std::make_pair(permissions
, block_sp
));
387 lldb::addr_t
AllocatedMemoryCache::AllocateMemory(size_t byte_size
,
388 uint32_t permissions
,
390 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
392 addr_t addr
= LLDB_INVALID_ADDRESS
;
393 std::pair
<PermissionsToBlockMap::iterator
, PermissionsToBlockMap::iterator
>
394 range
= m_memory_map
.equal_range(permissions
);
396 for (PermissionsToBlockMap::iterator pos
= range
.first
; pos
!= range
.second
;
398 addr
= (*pos
).second
->ReserveBlock(byte_size
);
399 if (addr
!= LLDB_INVALID_ADDRESS
)
403 if (addr
== LLDB_INVALID_ADDRESS
) {
404 AllocatedBlockSP
block_sp(AllocatePage(byte_size
, permissions
, 16, error
));
407 addr
= block_sp
->ReserveBlock(byte_size
);
409 Log
*log
= GetLog(LLDBLog::Process
);
411 "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32
412 ", permissions = %s) => 0x%16.16" PRIx64
,
413 (uint32_t)byte_size
, GetPermissionsAsCString(permissions
),
418 bool AllocatedMemoryCache::DeallocateMemory(lldb::addr_t addr
) {
419 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
421 PermissionsToBlockMap::iterator pos
, end
= m_memory_map
.end();
422 bool success
= false;
423 for (pos
= m_memory_map
.begin(); pos
!= end
; ++pos
) {
424 if (pos
->second
->Contains(addr
)) {
425 success
= pos
->second
->FreeBlock(addr
);
429 Log
*log
= GetLog(LLDBLog::Process
);
431 "AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64
433 (uint64_t)addr
, success
);