1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Created by Greg Clayton on 6/26/07.
11 //===----------------------------------------------------------------------===//
13 #include "MachVMMemory.h"
15 #include "MachVMRegion.h"
17 #include <mach/mach_vm.h>
18 #include <mach/shared_region.h>
19 #include <sys/sysctl.h>
21 #if defined(WITH_FBS) || defined(WITH_BKS)
23 #import <System/sys/kern_memorystatus.h>
27 static const vm_size_t kInvalidPageSize
= ~0;
29 MachVMMemory::MachVMMemory() : m_page_size(kInvalidPageSize
), m_err(0) {}
31 MachVMMemory::~MachVMMemory() = default;
33 nub_size_t
MachVMMemory::PageSize(task_t task
) {
34 if (m_page_size
== kInvalidPageSize
) {
35 #if defined(TASK_VM_INFO) && TASK_VM_INFO >= 22
36 if (task
!= TASK_NULL
) {
38 mach_msg_type_number_t info_count
= TASK_VM_INFO_COUNT
;
39 task_vm_info_data_t vm_info
;
40 kr
= task_info(task
, TASK_VM_INFO
, (task_info_t
)&vm_info
, &info_count
);
41 if (kr
== KERN_SUCCESS
) {
44 "MachVMMemory::PageSize task_info returned page size of 0x%x",
45 (int)vm_info
.page_size
);
46 m_page_size
= vm_info
.page_size
;
49 DNBLogThreadedIf(LOG_TASK
, "MachVMMemory::PageSize task_info call "
50 "failed to get page size, TASK_VM_INFO %d, "
51 "TASK_VM_INFO_COUNT %d, kern return %d",
52 TASK_VM_INFO
, TASK_VM_INFO_COUNT
, kr
);
56 m_err
= ::host_page_size(::mach_host_self(), &m_page_size
);
63 nub_size_t
MachVMMemory::MaxBytesLeftInPage(task_t task
, nub_addr_t addr
,
65 const nub_size_t page_size
= PageSize(task
);
67 nub_size_t page_offset
= (addr
% page_size
);
68 nub_size_t bytes_left_in_page
= page_size
- page_offset
;
69 if (count
> bytes_left_in_page
)
70 count
= bytes_left_in_page
;
75 #define MAX_STACK_ALLOC_DISPOSITIONS \
76 (16 * 1024 / sizeof(int)) // 16K of allocations
78 std::vector
<nub_addr_t
> get_dirty_pages(task_t task
, mach_vm_address_t addr
,
79 mach_vm_size_t size
) {
80 std::vector
<nub_addr_t
> dirty_pages
;
82 int pages_to_query
= size
/ vm_page_size
;
83 // Don't try to fetch too many pages' dispositions in a single call or we
84 // could blow our stack out.
85 mach_vm_size_t dispositions_size
=
86 std::min(pages_to_query
, (int)MAX_STACK_ALLOC_DISPOSITIONS
);
87 int dispositions
[dispositions_size
];
89 mach_vm_size_t chunk_count
=
90 ((pages_to_query
+ MAX_STACK_ALLOC_DISPOSITIONS
- 1) /
91 MAX_STACK_ALLOC_DISPOSITIONS
);
93 for (mach_vm_size_t cur_disposition_chunk
= 0;
94 cur_disposition_chunk
< chunk_count
; cur_disposition_chunk
++) {
95 mach_vm_size_t dispositions_already_queried
=
96 cur_disposition_chunk
* MAX_STACK_ALLOC_DISPOSITIONS
;
98 mach_vm_size_t chunk_pages_to_query
= std::min(
99 pages_to_query
- dispositions_already_queried
, dispositions_size
);
100 mach_vm_address_t chunk_page_aligned_start_addr
=
101 addr
+ (dispositions_already_queried
* vm_page_size
);
103 kern_return_t kr
= mach_vm_page_range_query(
104 task
, chunk_page_aligned_start_addr
,
105 chunk_pages_to_query
* vm_page_size
, (mach_vm_address_t
)dispositions
,
106 &chunk_pages_to_query
);
107 if (kr
!= KERN_SUCCESS
)
109 for (mach_vm_size_t i
= 0; i
< chunk_pages_to_query
; i
++) {
110 uint64_t dirty_addr
= chunk_page_aligned_start_addr
+ (i
* vm_page_size
);
111 if (dispositions
[i
] & VM_PAGE_QUERY_PAGE_DIRTY
)
112 dirty_pages
.push_back(dirty_addr
);
118 nub_bool_t
MachVMMemory::GetMemoryRegionInfo(task_t task
, nub_addr_t address
,
119 DNBRegionInfo
*region_info
) {
120 MachVMRegion
vmRegion(task
);
122 if (vmRegion
.GetRegionForAddress(address
)) {
123 region_info
->addr
= vmRegion
.StartAddress();
124 region_info
->size
= vmRegion
.GetByteSize();
125 region_info
->permissions
= vmRegion
.GetDNBPermissions();
126 region_info
->dirty_pages
=
127 get_dirty_pages(task
, vmRegion
.StartAddress(), vmRegion
.GetByteSize());
128 region_info
->vm_types
= vmRegion
.GetMemoryTypes();
130 region_info
->addr
= address
;
131 region_info
->size
= 0;
132 if (vmRegion
.GetError().Success()) {
133 // vmRegion.GetRegionForAddress() return false, indicating that "address"
134 // wasn't in a valid region, but the "vmRegion" info was successfully
135 // read from the task which means the info describes the next valid
136 // region from which we can infer the size of this invalid region
137 mach_vm_address_t start_addr
= vmRegion
.StartAddress();
138 if (address
< start_addr
)
139 region_info
->size
= start_addr
- address
;
141 // If we can't get any info about the size from the next region it means
142 // we asked about an address that was past all mappings, so the size
143 // of this region will take up all remaining address space.
144 if (region_info
->size
== 0)
145 region_info
->size
= INVALID_NUB_ADDRESS
- region_info
->addr
;
147 // Not readable, writeable or executable
148 region_info
->permissions
= 0;
153 static uint64_t GetPhysicalMemory() {
154 // This doesn't change often at all. No need to poll each time.
155 static uint64_t physical_memory
= 0;
156 static bool calculated
= false;
158 return physical_memory
;
160 size_t len
= sizeof(physical_memory
);
161 sysctlbyname("hw.memsize", &physical_memory
, &len
, NULL
, 0);
164 return physical_memory
;
167 nub_bool_t
MachVMMemory::GetMemoryProfile(
168 DNBProfileDataScanType scanType
, task_t task
, struct task_basic_info ti
,
169 cpu_type_t cputype
, nub_process_t pid
, vm_statistics64_data_t
&vminfo
,
170 uint64_t &physical_memory
, uint64_t &anonymous
,
171 uint64_t &phys_footprint
, uint64_t &memory_cap
)
173 if (scanType
& eProfileHostMemory
)
174 physical_memory
= GetPhysicalMemory();
176 if (scanType
& eProfileMemory
) {
177 static mach_port_t localHost
= mach_host_self();
178 mach_msg_type_number_t count
= HOST_VM_INFO64_COUNT
;
179 host_statistics64(localHost
, HOST_VM_INFO64
, (host_info64_t
)&vminfo
,
183 mach_msg_type_number_t info_count
;
184 task_vm_info_data_t vm_info
;
186 info_count
= TASK_VM_INFO_COUNT
;
187 kr
= task_info(task
, TASK_VM_INFO_PURGEABLE
, (task_info_t
)&vm_info
, &info_count
);
188 if (kr
== KERN_SUCCESS
) {
189 if (scanType
& eProfileMemoryAnonymous
) {
190 anonymous
= vm_info
.internal
+ vm_info
.compressed
- vm_info
.purgeable_volatile_pmap
;
193 phys_footprint
= vm_info
.phys_footprint
;
197 #if defined(WITH_FBS) || defined(WITH_BKS)
198 if (scanType
& eProfileMemoryCap
) {
199 memorystatus_memlimit_properties_t memlimit_properties
;
200 memset(&memlimit_properties
, 0, sizeof(memlimit_properties
));
201 if (memorystatus_control(MEMORYSTATUS_CMD_GET_MEMLIMIT_PROPERTIES
, pid
, 0, &memlimit_properties
, sizeof(memlimit_properties
)) == 0) {
202 memory_cap
= memlimit_properties
.memlimit_active
;
210 nub_size_t
MachVMMemory::Read(task_t task
, nub_addr_t address
, void *data
,
211 nub_size_t data_count
) {
212 if (data
== NULL
|| data_count
== 0)
215 nub_size_t total_bytes_read
= 0;
216 nub_addr_t curr_addr
= address
;
217 uint8_t *curr_data
= (uint8_t *)data
;
218 while (total_bytes_read
< data_count
) {
219 mach_vm_size_t curr_size
=
220 MaxBytesLeftInPage(task
, curr_addr
, data_count
- total_bytes_read
);
221 mach_msg_type_number_t curr_bytes_read
= 0;
222 vm_offset_t vm_memory
= 0;
223 m_err
= ::mach_vm_read(task
, curr_addr
, curr_size
, &vm_memory
,
226 if (DNBLogCheckLogBit(LOG_MEMORY
))
227 m_err
.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, "
228 "size = %llu, data => %8.8p, dataCnt => %i )",
229 task
, (uint64_t)curr_addr
, (uint64_t)curr_size
,
230 vm_memory
, curr_bytes_read
);
232 if (m_err
.Success()) {
233 if (curr_bytes_read
!= curr_size
) {
234 if (DNBLogCheckLogBit(LOG_MEMORY
))
236 "::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, "
237 "data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes",
238 task
, (uint64_t)curr_addr
, (uint64_t)curr_size
, vm_memory
,
239 curr_bytes_read
, curr_bytes_read
, (uint64_t)curr_size
);
241 ::memcpy(curr_data
, (void *)vm_memory
, curr_bytes_read
);
242 ::vm_deallocate(mach_task_self(), vm_memory
, curr_bytes_read
);
243 total_bytes_read
+= curr_bytes_read
;
244 curr_addr
+= curr_bytes_read
;
245 curr_data
+= curr_bytes_read
;
250 return total_bytes_read
;
253 nub_size_t
MachVMMemory::Write(task_t task
, nub_addr_t address
,
254 const void *data
, nub_size_t data_count
) {
255 MachVMRegion
vmRegion(task
);
257 nub_size_t total_bytes_written
= 0;
258 nub_addr_t curr_addr
= address
;
259 const uint8_t *curr_data
= (const uint8_t *)data
;
261 while (total_bytes_written
< data_count
) {
262 if (vmRegion
.GetRegionForAddress(curr_addr
)) {
263 mach_vm_size_t curr_data_count
= data_count
- total_bytes_written
;
264 mach_vm_size_t region_bytes_left
= vmRegion
.BytesRemaining(curr_addr
);
265 if (region_bytes_left
== 0) {
268 if (curr_data_count
> region_bytes_left
)
269 curr_data_count
= region_bytes_left
;
271 if (vmRegion
.SetProtections(curr_addr
, curr_data_count
,
272 VM_PROT_READ
| VM_PROT_WRITE
)) {
273 nub_size_t bytes_written
=
274 WriteRegion(task
, curr_addr
, curr_data
, curr_data_count
);
275 if (bytes_written
<= 0) {
276 // Status should have already be posted by WriteRegion...
279 total_bytes_written
+= bytes_written
;
280 curr_addr
+= bytes_written
;
281 curr_data
+= bytes_written
;
285 LOG_MEMORY_PROTECTIONS
, "Failed to set read/write protections on "
286 "region for address: [0x%8.8llx-0x%8.8llx)",
287 (uint64_t)curr_addr
, (uint64_t)(curr_addr
+ curr_data_count
));
291 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS
,
292 "Failed to get region for address: 0x%8.8llx",
298 return total_bytes_written
;
301 nub_size_t
MachVMMemory::WriteRegion(task_t task
, const nub_addr_t address
,
303 const nub_size_t data_count
) {
304 if (data
== NULL
|| data_count
== 0)
307 nub_size_t total_bytes_written
= 0;
308 nub_addr_t curr_addr
= address
;
309 const uint8_t *curr_data
= (const uint8_t *)data
;
310 while (total_bytes_written
< data_count
) {
311 mach_msg_type_number_t curr_data_count
=
312 static_cast<mach_msg_type_number_t
>(MaxBytesLeftInPage(
313 task
, curr_addr
, data_count
- total_bytes_written
));
315 ::mach_vm_write(task
, curr_addr
, (pointer_t
)curr_data
, curr_data_count
);
316 if (DNBLogCheckLogBit(LOG_MEMORY
) || m_err
.Fail())
317 m_err
.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, "
318 "data = %8.8p, dataCnt = %u )",
319 task
, (uint64_t)curr_addr
, curr_data
, curr_data_count
);
321 #if !defined(__i386__) && !defined(__x86_64__)
322 vm_machine_attribute_val_t mattr_value
= MATTR_VAL_CACHE_FLUSH
;
324 m_err
= ::vm_machine_attribute(task
, curr_addr
, curr_data_count
,
325 MATTR_CACHE
, &mattr_value
);
326 if (DNBLogCheckLogBit(LOG_MEMORY
) || m_err
.Fail())
327 m_err
.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = "
328 "0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value "
329 "=> MATTR_VAL_CACHE_FLUSH )",
330 task
, (uint64_t)curr_addr
, curr_data_count
);
333 if (m_err
.Success()) {
334 total_bytes_written
+= curr_data_count
;
335 curr_addr
+= curr_data_count
;
336 curr_data
+= curr_data_count
;
341 return total_bytes_written
;