1 //===-- xray_allocator.h ---------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
11 // Defines the allocator interface for an arena allocator, used primarily for
12 // the profiling runtime.
14 //===----------------------------------------------------------------------===//
15 #ifndef XRAY_ALLOCATOR_H
16 #define XRAY_ALLOCATOR_H
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_mutex.h"
22 #include <zircon/process.h>
23 #include <zircon/status.h>
24 #include <zircon/syscalls.h>
26 #include "sanitizer_common/sanitizer_posix.h"
28 #include "xray_defs.h"
29 #include "xray_utils.h"
36 // We implement our own memory allocation routine which will bypass the
37 // internal allocator. This allows us to manage the memory directly, using
38 // mmap'ed memory to back the allocators.
39 template <class T
> T
*allocate() XRAY_NEVER_INSTRUMENT
{
40 uptr RoundedSize
= RoundUpTo(sizeof(T
), GetPageSizeCached());
43 zx_status_t Status
= _zx_vmo_create(RoundedSize
, 0, &Vmo
);
44 if (Status
!= ZX_OK
) {
46 Report("XRay Profiling: Failed to create VMO of size %zu: %s\n",
47 sizeof(T
), _zx_status_get_string(Status
));
52 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ
| ZX_VM_PERM_WRITE
, 0,
53 Vmo
, 0, sizeof(T
), &B
);
54 _zx_handle_close(Vmo
);
55 if (Status
!= ZX_OK
) {
57 Report("XRay Profiling: Failed to map VMAR of size %zu: %s\n", sizeof(T
),
58 _zx_status_get_string(Status
));
61 return reinterpret_cast<T
*>(B
);
63 uptr B
= internal_mmap(NULL
, RoundedSize
, PROT_READ
| PROT_WRITE
,
64 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
66 if (UNLIKELY(internal_iserror(B
, &ErrNo
))) {
68 Report("XRay Profiling: Failed to allocate memory of size %zu; Error = "
74 return reinterpret_cast<T
*>(B
);
77 template <class T
> void deallocate(T
*B
) XRAY_NEVER_INSTRUMENT
{
80 uptr RoundedSize
= RoundUpTo(sizeof(T
), GetPageSizeCached());
82 _zx_vmar_unmap(_zx_vmar_root_self(), reinterpret_cast<uintptr_t>(B
),
85 internal_munmap(B
, RoundedSize
);
89 template <class T
= unsigned char>
90 T
*allocateBuffer(size_t S
) XRAY_NEVER_INSTRUMENT
{
91 uptr RoundedSize
= RoundUpTo(S
* sizeof(T
), GetPageSizeCached());
94 zx_status_t Status
= _zx_vmo_create(RoundedSize
, 0, &Vmo
);
95 if (Status
!= ZX_OK
) {
97 Report("XRay Profiling: Failed to create VMO of size %zu: %s\n", S
,
98 _zx_status_get_string(Status
));
102 Status
= _zx_vmar_map(_zx_vmar_root_self(),
103 ZX_VM_PERM_READ
| ZX_VM_PERM_WRITE
, 0, Vmo
, 0, S
, &B
);
104 _zx_handle_close(Vmo
);
105 if (Status
!= ZX_OK
) {
107 Report("XRay Profiling: Failed to map VMAR of size %zu: %s\n", S
,
108 _zx_status_get_string(Status
));
112 uptr B
= internal_mmap(NULL
, RoundedSize
, PROT_READ
| PROT_WRITE
,
113 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
115 if (UNLIKELY(internal_iserror(B
, &ErrNo
))) {
117 Report("XRay Profiling: Failed to allocate memory of size %zu; Error = "
123 return reinterpret_cast<T
*>(B
);
126 template <class T
> void deallocateBuffer(T
*B
, size_t S
) XRAY_NEVER_INSTRUMENT
{
129 uptr RoundedSize
= RoundUpTo(S
* sizeof(T
), GetPageSizeCached());
130 #if SANITIZER_FUCHSIA
131 _zx_vmar_unmap(_zx_vmar_root_self(), reinterpret_cast<uintptr_t>(B
),
134 internal_munmap(B
, RoundedSize
);
138 template <class T
, class... U
>
139 T
*initArray(size_t N
, U
&&... Us
) XRAY_NEVER_INSTRUMENT
{
140 auto A
= allocateBuffer
<T
>(N
);
143 new (A
+ (--N
)) T(std::forward
<U
>(Us
)...);
147 /// The Allocator type hands out fixed-sized chunks of memory that are
148 /// cache-line aligned and sized. This is useful for placement of
149 /// performance-sensitive data in memory that's frequently accessed. The
150 /// allocator also self-limits the peak memory usage to a dynamically defined
153 /// N is the lower-bound size of the block of memory to return from the
154 /// allocation function. N is used to compute the size of a block, which is
155 /// cache-line-size multiples worth of memory. We compute the size of a block by
156 /// determining how many cache lines worth of memory is required to subsume N.
158 /// The Allocator instance will manage its own memory acquired through mmap.
159 /// This severely constrains the platforms on which this can be used to POSIX
160 /// systems where mmap semantics are well-defined.
162 /// FIXME: Isolate the lower-level memory management to a different abstraction
163 /// that can be platform-specific.
164 template <size_t N
> struct Allocator
{
165 // The Allocator returns memory as Block instances.
167 /// Compute the minimum cache-line size multiple that is >= N.
168 static constexpr auto Size
= nearest_boundary(N
, kCacheLineSize
);
174 unsigned char *BackingStore
= nullptr;
175 unsigned char *AlignedNextBlock
= nullptr;
176 size_t AllocatedBlocks
= 0;
180 void *Alloc() XRAY_NEVER_INSTRUMENT
{
181 SpinMutexLock
Lock(&Mutex
);
182 if (UNLIKELY(BackingStore
== nullptr)) {
183 BackingStore
= allocateBuffer(MaxMemory
);
184 if (BackingStore
== nullptr) {
186 Report("XRay Profiling: Failed to allocate memory for allocator\n");
190 AlignedNextBlock
= BackingStore
;
192 // Ensure that NextBlock is aligned appropriately.
193 auto BackingStoreNum
= reinterpret_cast<uintptr_t>(BackingStore
);
194 auto AlignedNextBlockNum
= nearest_boundary(
195 reinterpret_cast<uintptr_t>(AlignedNextBlock
), kCacheLineSize
);
196 if (diff(AlignedNextBlockNum
, BackingStoreNum
) > ptrdiff_t(MaxMemory
)) {
197 deallocateBuffer(BackingStore
, MaxMemory
);
198 AlignedNextBlock
= BackingStore
= nullptr;
200 Report("XRay Profiling: Cannot obtain enough memory from "
201 "preallocated region\n");
205 AlignedNextBlock
= reinterpret_cast<unsigned char *>(AlignedNextBlockNum
);
207 // Assert that AlignedNextBlock is cache-line aligned.
208 DCHECK_EQ(reinterpret_cast<uintptr_t>(AlignedNextBlock
) % kCacheLineSize
,
212 if (((AllocatedBlocks
+ 1) * Block::Size
) > MaxMemory
)
215 // Align the pointer we'd like to return to an appropriate alignment, then
216 // advance the pointer from where to start allocations.
217 void *Result
= AlignedNextBlock
;
219 reinterpret_cast<unsigned char *>(AlignedNextBlock
) + Block::Size
;
225 explicit Allocator(size_t M
) XRAY_NEVER_INSTRUMENT
226 : MaxMemory(RoundUpTo(M
, kCacheLineSize
)),
227 BackingStore(nullptr),
228 AlignedNextBlock(nullptr),
233 explicit Allocator(void *P
, size_t M
) XRAY_NEVER_INSTRUMENT
235 BackingStore(reinterpret_cast<unsigned char *>(P
)),
236 AlignedNextBlock(reinterpret_cast<unsigned char *>(P
)),
241 Allocator(const Allocator
&) = delete;
242 Allocator
&operator=(const Allocator
&) = delete;
244 Allocator(Allocator
&&O
) XRAY_NEVER_INSTRUMENT
{
245 SpinMutexLock
L0(&Mutex
);
246 SpinMutexLock
L1(&O
.Mutex
);
247 MaxMemory
= O
.MaxMemory
;
249 BackingStore
= O
.BackingStore
;
250 O
.BackingStore
= nullptr;
251 AlignedNextBlock
= O
.AlignedNextBlock
;
252 O
.AlignedNextBlock
= nullptr;
253 AllocatedBlocks
= O
.AllocatedBlocks
;
254 O
.AllocatedBlocks
= 0;
259 Allocator
&operator=(Allocator
&&O
) XRAY_NEVER_INSTRUMENT
{
260 SpinMutexLock
L0(&Mutex
);
261 SpinMutexLock
L1(&O
.Mutex
);
262 MaxMemory
= O
.MaxMemory
;
264 if (BackingStore
!= nullptr)
265 deallocateBuffer(BackingStore
, MaxMemory
);
266 BackingStore
= O
.BackingStore
;
267 O
.BackingStore
= nullptr;
268 AlignedNextBlock
= O
.AlignedNextBlock
;
269 O
.AlignedNextBlock
= nullptr;
270 AllocatedBlocks
= O
.AllocatedBlocks
;
271 O
.AllocatedBlocks
= 0;
277 Block
Allocate() XRAY_NEVER_INSTRUMENT
{ return {Alloc()}; }
279 ~Allocator() NOEXCEPT XRAY_NEVER_INSTRUMENT
{
280 if (Owned
&& BackingStore
!= nullptr) {
281 deallocateBuffer(BackingStore
, MaxMemory
);
286 } // namespace __xray
288 #endif // XRAY_ALLOCATOR_H