1 //===-- Memory.cpp --------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "lldb/Target/Memory.h"
10 #include "lldb/Target/Process.h"
11 #include "lldb/Utility/DataBufferHeap.h"
12 #include "lldb/Utility/Log.h"
13 #include "lldb/Utility/RangeMap.h"
14 #include "lldb/Utility/State.h"
20 using namespace lldb_private
;
22 // MemoryCache constructor
23 MemoryCache::MemoryCache(Process
&process
)
24 : m_mutex(), m_L1_cache(), m_L2_cache(), m_invalid_ranges(),
26 m_L2_cache_line_byte_size(process
.GetMemoryCacheLineSize()) {}
29 MemoryCache::~MemoryCache() = default;
31 void MemoryCache::Clear(bool clear_invalid_ranges
) {
32 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
35 if (clear_invalid_ranges
)
36 m_invalid_ranges
.Clear();
37 m_L2_cache_line_byte_size
= m_process
.GetMemoryCacheLineSize();
40 void MemoryCache::AddL1CacheData(lldb::addr_t addr
, const void *src
,
43 addr
, DataBufferSP(new DataBufferHeap(DataBufferHeap(src
, src_len
))));
46 void MemoryCache::AddL1CacheData(lldb::addr_t addr
,
47 const DataBufferSP
&data_buffer_sp
) {
48 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
49 m_L1_cache
[addr
] = data_buffer_sp
;
52 void MemoryCache::Flush(addr_t addr
, size_t size
) {
56 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
58 // Erase any blocks from the L1 cache that intersect with the flush range
59 if (!m_L1_cache
.empty()) {
60 AddrRange
flush_range(addr
, size
);
61 BlockMap::iterator pos
= m_L1_cache
.upper_bound(addr
);
62 if (pos
!= m_L1_cache
.begin()) {
65 while (pos
!= m_L1_cache
.end()) {
66 AddrRange
chunk_range(pos
->first
, pos
->second
->GetByteSize());
67 if (!chunk_range
.DoesIntersect(flush_range
))
69 pos
= m_L1_cache
.erase(pos
);
73 if (!m_L2_cache
.empty()) {
74 const uint32_t cache_line_byte_size
= m_L2_cache_line_byte_size
;
75 const addr_t end_addr
= (addr
+ size
- 1);
76 const addr_t first_cache_line_addr
= addr
- (addr
% cache_line_byte_size
);
77 const addr_t last_cache_line_addr
=
78 end_addr
- (end_addr
% cache_line_byte_size
);
79 // Watch for overflow where size will cause us to go off the end of the
80 // 64 bit address space
81 uint32_t num_cache_lines
;
82 if (last_cache_line_addr
>= first_cache_line_addr
)
83 num_cache_lines
= ((last_cache_line_addr
- first_cache_line_addr
) /
84 cache_line_byte_size
) +
88 (UINT64_MAX
- first_cache_line_addr
+ 1) / cache_line_byte_size
;
90 uint32_t cache_idx
= 0;
91 for (addr_t curr_addr
= first_cache_line_addr
; cache_idx
< num_cache_lines
;
92 curr_addr
+= cache_line_byte_size
, ++cache_idx
) {
93 BlockMap::iterator pos
= m_L2_cache
.find(curr_addr
);
94 if (pos
!= m_L2_cache
.end())
95 m_L2_cache
.erase(pos
);
100 void MemoryCache::AddInvalidRange(lldb::addr_t base_addr
,
101 lldb::addr_t byte_size
) {
103 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
104 InvalidRanges::Entry
range(base_addr
, byte_size
);
105 m_invalid_ranges
.Append(range
);
106 m_invalid_ranges
.Sort();
110 bool MemoryCache::RemoveInvalidRange(lldb::addr_t base_addr
,
111 lldb::addr_t byte_size
) {
113 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
114 const uint32_t idx
= m_invalid_ranges
.FindEntryIndexThatContains(base_addr
);
115 if (idx
!= UINT32_MAX
) {
116 const InvalidRanges::Entry
*entry
= m_invalid_ranges
.GetEntryAtIndex(idx
);
117 if (entry
->GetRangeBase() == base_addr
&&
118 entry
->GetByteSize() == byte_size
)
119 return m_invalid_ranges
.RemoveEntryAtIndex(idx
);
125 size_t MemoryCache::Read(addr_t addr
, void *dst
, size_t dst_len
,
127 size_t bytes_left
= dst_len
;
129 // Check the L1 cache for a range that contain the entire memory read. If we
130 // find a range in the L1 cache that does, we use it. Else we fall back to
131 // reading memory in m_L2_cache_line_byte_size byte sized chunks. The L1
132 // cache contains chunks of memory that are not required to be
133 // m_L2_cache_line_byte_size bytes in size, so we don't try anything tricky
134 // when reading from them (no partial reads from the L1 cache).
136 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
137 if (!m_L1_cache
.empty()) {
138 AddrRange
read_range(addr
, dst_len
);
139 BlockMap::iterator pos
= m_L1_cache
.upper_bound(addr
);
140 if (pos
!= m_L1_cache
.begin()) {
143 AddrRange
chunk_range(pos
->first
, pos
->second
->GetByteSize());
144 if (chunk_range
.Contains(read_range
)) {
145 memcpy(dst
, pos
->second
->GetBytes() + (addr
- chunk_range
.GetRangeBase()),
151 // If this memory read request is larger than the cache line size, then we
152 // (1) try to read as much of it at once as possible, and (2) don't add the
153 // data to the memory cache. We don't want to split a big read up into more
154 // separate reads than necessary, and with a large memory read request, it is
155 // unlikely that the caller function will ask for the next
156 // 4 bytes after the large memory read - so there's little benefit to saving
158 if (dst
&& dst_len
> m_L2_cache_line_byte_size
) {
160 m_process
.ReadMemoryFromInferior(addr
, dst
, dst_len
, error
);
161 // Add this non block sized range to the L1 cache if we actually read
164 AddL1CacheData(addr
, dst
, bytes_read
);
168 if (dst
&& bytes_left
> 0) {
169 const uint32_t cache_line_byte_size
= m_L2_cache_line_byte_size
;
170 uint8_t *dst_buf
= (uint8_t *)dst
;
171 addr_t curr_addr
= addr
- (addr
% cache_line_byte_size
);
172 addr_t cache_offset
= addr
- curr_addr
;
174 while (bytes_left
> 0) {
175 if (m_invalid_ranges
.FindEntryThatContains(curr_addr
)) {
176 error
.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64
,
178 return dst_len
- bytes_left
;
181 BlockMap::const_iterator pos
= m_L2_cache
.find(curr_addr
);
182 BlockMap::const_iterator end
= m_L2_cache
.end();
185 size_t curr_read_size
= cache_line_byte_size
- cache_offset
;
186 if (curr_read_size
> bytes_left
)
187 curr_read_size
= bytes_left
;
189 memcpy(dst_buf
+ dst_len
- bytes_left
,
190 pos
->second
->GetBytes() + cache_offset
, curr_read_size
);
192 bytes_left
-= curr_read_size
;
193 curr_addr
+= curr_read_size
+ cache_offset
;
196 if (bytes_left
> 0) {
197 // Get sequential cache page hits
198 for (++pos
; (pos
!= end
) && (bytes_left
> 0); ++pos
) {
199 assert((curr_addr
% cache_line_byte_size
) == 0);
201 if (pos
->first
!= curr_addr
)
204 curr_read_size
= pos
->second
->GetByteSize();
205 if (curr_read_size
> bytes_left
)
206 curr_read_size
= bytes_left
;
208 memcpy(dst_buf
+ dst_len
- bytes_left
, pos
->second
->GetBytes(),
211 bytes_left
-= curr_read_size
;
212 curr_addr
+= curr_read_size
;
214 // We have a cache page that succeeded to read some bytes but not
215 // an entire page. If this happens, we must cap off how much data
216 // we are able to read...
217 if (pos
->second
->GetByteSize() != cache_line_byte_size
)
218 return dst_len
- bytes_left
;
223 // We need to read from the process
225 if (bytes_left
> 0) {
226 assert((curr_addr
% cache_line_byte_size
) == 0);
227 std::unique_ptr
<DataBufferHeap
> data_buffer_heap_up(
228 new DataBufferHeap(cache_line_byte_size
, 0));
229 size_t process_bytes_read
= m_process
.ReadMemoryFromInferior(
230 curr_addr
, data_buffer_heap_up
->GetBytes(),
231 data_buffer_heap_up
->GetByteSize(), error
);
232 if (process_bytes_read
== 0)
233 return dst_len
- bytes_left
;
235 if (process_bytes_read
!= cache_line_byte_size
) {
236 if (process_bytes_read
< data_buffer_heap_up
->GetByteSize()) {
237 dst_len
-= data_buffer_heap_up
->GetByteSize() - process_bytes_read
;
238 bytes_left
= process_bytes_read
;
240 data_buffer_heap_up
->SetByteSize(process_bytes_read
);
242 m_L2_cache
[curr_addr
] = DataBufferSP(data_buffer_heap_up
.release());
243 // We have read data and put it into the cache, continue through the
244 // loop again to get the data out of the cache...
249 return dst_len
- bytes_left
;
252 AllocatedBlock::AllocatedBlock(lldb::addr_t addr
, uint32_t byte_size
,
253 uint32_t permissions
, uint32_t chunk_size
)
254 : m_range(addr
, byte_size
), m_permissions(permissions
),
255 m_chunk_size(chunk_size
)
257 // The entire address range is free to start with.
258 m_free_blocks
.Append(m_range
);
259 assert(byte_size
> chunk_size
);
262 AllocatedBlock::~AllocatedBlock() = default;
264 lldb::addr_t
AllocatedBlock::ReserveBlock(uint32_t size
) {
265 // We must return something valid for zero bytes.
268 Log
*log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS
));
270 const size_t free_count
= m_free_blocks
.GetSize();
271 for (size_t i
=0; i
<free_count
; ++i
)
273 auto &free_block
= m_free_blocks
.GetEntryRef(i
);
274 const lldb::addr_t range_size
= free_block
.GetByteSize();
275 if (range_size
>= size
)
277 // We found a free block that is big enough for our data. Figure out how
278 // many chunks we will need and calculate the resulting block size we
280 addr_t addr
= free_block
.GetRangeBase();
281 size_t num_chunks
= CalculateChunksNeededForSize(size
);
282 lldb::addr_t block_size
= num_chunks
* m_chunk_size
;
283 lldb::addr_t bytes_left
= range_size
- block_size
;
286 // The newly allocated block will take all of the bytes in this
287 // available block, so we can just add it to the allocated ranges and
288 // remove the range from the free ranges.
289 m_reserved_blocks
.Insert(free_block
, false);
290 m_free_blocks
.RemoveEntryAtIndex(i
);
294 // Make the new allocated range and add it to the allocated ranges.
295 Range
<lldb::addr_t
, uint32_t> reserved_block(free_block
);
296 reserved_block
.SetByteSize(block_size
);
297 // Insert the reserved range and don't combine it with other blocks in
298 // the reserved blocks list.
299 m_reserved_blocks
.Insert(reserved_block
, false);
300 // Adjust the free range in place since we won't change the sorted
301 // ordering of the m_free_blocks list.
302 free_block
.SetRangeBase(reserved_block
.GetRangeEnd());
303 free_block
.SetByteSize(bytes_left
);
305 LLDB_LOGV(log
, "({0}) (size = {1} ({1:x})) => {2:x}", this, size
, addr
);
310 LLDB_LOGV(log
, "({0}) (size = {1} ({1:x})) => {2:x}", this, size
,
311 LLDB_INVALID_ADDRESS
);
312 return LLDB_INVALID_ADDRESS
;
315 bool AllocatedBlock::FreeBlock(addr_t addr
) {
316 bool success
= false;
317 auto entry_idx
= m_reserved_blocks
.FindEntryIndexThatContains(addr
);
318 if (entry_idx
!= UINT32_MAX
)
320 m_free_blocks
.Insert(m_reserved_blocks
.GetEntryRef(entry_idx
), true);
321 m_reserved_blocks
.RemoveEntryAtIndex(entry_idx
);
324 Log
*log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS
));
325 LLDB_LOGV(log
, "({0}) (addr = {1:x}) => {2}", this, addr
, success
);
329 AllocatedMemoryCache::AllocatedMemoryCache(Process
&process
)
330 : m_process(process
), m_mutex(), m_memory_map() {}
332 AllocatedMemoryCache::~AllocatedMemoryCache() = default;
334 void AllocatedMemoryCache::Clear() {
335 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
336 if (m_process
.IsAlive()) {
337 PermissionsToBlockMap::iterator pos
, end
= m_memory_map
.end();
338 for (pos
= m_memory_map
.begin(); pos
!= end
; ++pos
)
339 m_process
.DoDeallocateMemory(pos
->second
->GetBaseAddress());
341 m_memory_map
.clear();
344 AllocatedMemoryCache::AllocatedBlockSP
345 AllocatedMemoryCache::AllocatePage(uint32_t byte_size
, uint32_t permissions
,
346 uint32_t chunk_size
, Status
&error
) {
347 AllocatedBlockSP block_sp
;
348 const size_t page_size
= 4096;
349 const size_t num_pages
= (byte_size
+ page_size
- 1) / page_size
;
350 const size_t page_byte_size
= num_pages
* page_size
;
352 addr_t addr
= m_process
.DoAllocateMemory(page_byte_size
, permissions
, error
);
354 Log
*log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS
));
357 "Process::DoAllocateMemory (byte_size = 0x%8.8" PRIx32
358 ", permissions = %s) => 0x%16.16" PRIx64
,
359 (uint32_t)page_byte_size
, GetPermissionsAsCString(permissions
),
363 if (addr
!= LLDB_INVALID_ADDRESS
) {
364 block_sp
= std::make_shared
<AllocatedBlock
>(addr
, page_byte_size
,
365 permissions
, chunk_size
);
366 m_memory_map
.insert(std::make_pair(permissions
, block_sp
));
371 lldb::addr_t
AllocatedMemoryCache::AllocateMemory(size_t byte_size
,
372 uint32_t permissions
,
374 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
376 addr_t addr
= LLDB_INVALID_ADDRESS
;
377 std::pair
<PermissionsToBlockMap::iterator
, PermissionsToBlockMap::iterator
>
378 range
= m_memory_map
.equal_range(permissions
);
380 for (PermissionsToBlockMap::iterator pos
= range
.first
; pos
!= range
.second
;
382 addr
= (*pos
).second
->ReserveBlock(byte_size
);
383 if (addr
!= LLDB_INVALID_ADDRESS
)
387 if (addr
== LLDB_INVALID_ADDRESS
) {
388 AllocatedBlockSP
block_sp(AllocatePage(byte_size
, permissions
, 16, error
));
391 addr
= block_sp
->ReserveBlock(byte_size
);
393 Log
*log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS
));
395 "AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8" PRIx32
396 ", permissions = %s) => 0x%16.16" PRIx64
,
397 (uint32_t)byte_size
, GetPermissionsAsCString(permissions
),
402 bool AllocatedMemoryCache::DeallocateMemory(lldb::addr_t addr
) {
403 std::lock_guard
<std::recursive_mutex
> guard(m_mutex
);
405 PermissionsToBlockMap::iterator pos
, end
= m_memory_map
.end();
406 bool success
= false;
407 for (pos
= m_memory_map
.begin(); pos
!= end
; ++pos
) {
408 if (pos
->second
->Contains(addr
)) {
409 success
= pos
->second
->FreeBlock(addr
);
413 Log
*log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS
));
415 "AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64
417 (uint64_t)addr
, success
);