1 //===-- MachVMRegion.cpp ----------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Created by Greg Clayton on 6/26/07.
11 //===----------------------------------------------------------------------===//
13 #include "MachVMRegion.h"
16 #include <mach/mach_vm.h>
18 MachVMRegion::MachVMRegion(task_t task
)
19 : m_task(task
), m_addr(INVALID_NUB_ADDRESS
), m_err(),
20 m_start(INVALID_NUB_ADDRESS
), m_size(0), m_depth(-1),
21 m_curr_protection(0), m_protection_addr(INVALID_NUB_ADDRESS
),
22 m_protection_size(0) {
23 memset(&m_data
, 0, sizeof(m_data
));
26 MachVMRegion::~MachVMRegion() {
27 // Restore any original protections and clear our vars
31 void MachVMRegion::Clear() {
33 m_addr
= INVALID_NUB_ADDRESS
;
35 m_start
= INVALID_NUB_ADDRESS
;
38 memset(&m_data
, 0, sizeof(m_data
));
39 m_curr_protection
= 0;
40 m_protection_addr
= INVALID_NUB_ADDRESS
;
41 m_protection_size
= 0;
44 bool MachVMRegion::SetProtections(mach_vm_address_t addr
, mach_vm_size_t size
,
46 if (ContainsAddress(addr
)) {
47 mach_vm_size_t prot_size
= size
;
48 mach_vm_address_t end_addr
= EndAddress();
49 if (prot_size
> (end_addr
- addr
))
50 prot_size
= end_addr
- addr
;
53 if (prot
== (m_curr_protection
& VM_PROT_ALL
)) {
54 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS
| LOG_VERBOSE
,
55 "MachVMRegion::%s: protections (%u) already "
56 "sufficient for task 0x%4.4x at address 0x%8.8llx) ",
57 __FUNCTION__
, prot
, m_task
, (uint64_t)addr
);
58 // Protections are already set as requested...
61 m_err
= ::mach_vm_protect(m_task
, addr
, prot_size
, 0, prot
);
62 if (DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS
))
63 m_err
.LogThreaded("::mach_vm_protect ( task = 0x%4.4x, addr = "
64 "0x%8.8llx, size = %llu, set_max = %i, prot = %u )",
65 m_task
, (uint64_t)addr
, (uint64_t)prot_size
, 0,
68 // Try again with the ability to create a copy on write region
69 m_err
= ::mach_vm_protect(m_task
, addr
, prot_size
, 0,
71 if (DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS
) || m_err
.Fail())
72 m_err
.LogThreaded("::mach_vm_protect ( task = 0x%4.4x, addr = "
73 "0x%8.8llx, size = %llu, set_max = %i, prot = %u "
75 m_task
, (uint64_t)addr
, (uint64_t)prot_size
, 0,
78 if (m_err
.Success()) {
79 m_curr_protection
= prot
;
80 m_protection_addr
= addr
;
81 m_protection_size
= prot_size
;
86 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS
| LOG_VERBOSE
,
87 "%s: Zero size for task 0x%4.4x at address 0x%8.8llx) ",
88 __FUNCTION__
, m_task
, (uint64_t)addr
);
94 bool MachVMRegion::RestoreProtections() {
95 if (m_curr_protection
!= m_data
.protection
&& m_protection_size
> 0) {
96 m_err
= ::mach_vm_protect(m_task
, m_protection_addr
, m_protection_size
, 0,
98 if (DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS
) || m_err
.Fail())
99 m_err
.LogThreaded("::mach_vm_protect ( task = 0x%4.4x, addr = 0x%8.8llx, "
100 "size = %llu, set_max = %i, prot = %u )",
101 m_task
, (uint64_t)m_protection_addr
,
102 (uint64_t)m_protection_size
, 0, m_data
.protection
);
103 if (m_err
.Success()) {
104 m_protection_size
= 0;
105 m_protection_addr
= INVALID_NUB_ADDRESS
;
106 m_curr_protection
= m_data
.protection
;
117 bool MachVMRegion::GetRegionForAddress(nub_addr_t addr
) {
118 // Restore any original protections and clear our vars
124 mach_msg_type_number_t info_size
= kRegionInfoSize
;
125 static_assert(sizeof(info_size
) == 4, "");
127 ::mach_vm_region_recurse(m_task
, &m_start
, &m_size
, &m_depth
,
128 (vm_region_recurse_info_t
)&m_data
, &info_size
);
130 const bool failed
= m_err
.Fail();
131 const bool log_protections
= DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS
);
133 if (log_protections
|| failed
)
134 m_err
.LogThreaded("::mach_vm_region_recurse ( task = 0x%4.4x, address => "
135 "0x%8.8llx, size => %llu, nesting_depth => %d, info => "
136 "%p, infoCnt => %d) addr = 0x%8.8llx ",
137 m_task
, (uint64_t)m_start
, (uint64_t)m_size
, m_depth
,
138 &m_data
, info_size
, (uint64_t)addr
);
142 if (log_protections
) {
143 DNBLogThreaded("info = { prot = %u, "
145 "inheritance = 0x%8.8x, "
146 "offset = 0x%8.8llx, "
147 "user_tag = 0x%8.8x, "
149 "shadow_depth = %u, "
154 "object_id = 0x%8.8x, "
155 "user_wired_count = 0x%4.4x }",
156 m_data
.protection
, m_data
.max_protection
, m_data
.inheritance
,
157 (uint64_t)m_data
.offset
, m_data
.user_tag
, m_data
.ref_count
,
158 m_data
.shadow_depth
, m_data
.external_pager
,
159 m_data
.share_mode
, m_data
.is_submap
, m_data
.behavior
,
160 m_data
.object_id
, m_data
.user_wired_count
);
162 m_curr_protection
= m_data
.protection
;
164 // We make a request for an address and got no error back, but this
165 // doesn't mean that "addr" is in the range. The data in this object will
166 // be valid though, so you could see where the next region begins. So we
167 // return false, yet leave "m_err" with a successfull return code.
168 return !((addr
< m_start
) || (addr
>= (m_start
+ m_size
)));
171 uint32_t MachVMRegion::GetDNBPermissions() const {
172 if (m_addr
== INVALID_NUB_ADDRESS
|| m_start
== INVALID_NUB_ADDRESS
||
175 uint32_t dnb_permissions
= 0;
177 if ((m_data
.protection
& VM_PROT_READ
) == VM_PROT_READ
)
178 dnb_permissions
|= eMemoryPermissionsReadable
;
179 if ((m_data
.protection
& VM_PROT_WRITE
) == VM_PROT_WRITE
)
180 dnb_permissions
|= eMemoryPermissionsWritable
;
181 if ((m_data
.protection
& VM_PROT_EXECUTE
) == VM_PROT_EXECUTE
)
182 dnb_permissions
|= eMemoryPermissionsExecutable
;
183 return dnb_permissions
;
186 std::vector
<std::string
> MachVMRegion::GetMemoryTypes() const {
187 std::vector
<std::string
> types
;
188 if (m_data
.user_tag
== VM_MEMORY_STACK
) {
189 if (m_data
.protection
== VM_PROT_NONE
) {
190 types
.push_back("stack-guard");
192 types
.push_back("stack");
195 if (m_data
.user_tag
== VM_MEMORY_MALLOC
) {
196 if (m_data
.protection
== VM_PROT_NONE
)
197 types
.push_back("malloc-guard");
198 else if (m_data
.share_mode
== SM_EMPTY
)
199 types
.push_back("malloc-reserved");
201 types
.push_back("malloc-metadata");
203 if (m_data
.user_tag
== VM_MEMORY_MALLOC_NANO
||
204 m_data
.user_tag
== VM_MEMORY_MALLOC_TINY
||
205 m_data
.user_tag
== VM_MEMORY_MALLOC_SMALL
||
206 m_data
.user_tag
== VM_MEMORY_MALLOC_LARGE
||
207 m_data
.user_tag
== VM_MEMORY_MALLOC_LARGE_REUSED
||
208 m_data
.user_tag
== VM_MEMORY_MALLOC_LARGE_REUSABLE
||
209 m_data
.user_tag
== VM_MEMORY_MALLOC_HUGE
||
210 m_data
.user_tag
== VM_MEMORY_REALLOC
||
211 m_data
.user_tag
== VM_MEMORY_SBRK
) {
212 types
.push_back("heap");
213 if (m_data
.user_tag
== VM_MEMORY_MALLOC_TINY
) {
214 types
.push_back("malloc-tiny");
216 if (m_data
.user_tag
== VM_MEMORY_MALLOC_LARGE
) {
217 types
.push_back("malloc-large");
219 if (m_data
.user_tag
== VM_MEMORY_MALLOC_SMALL
) {
220 types
.push_back("malloc-small");