1 //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Part of the Sanitizer Allocator.
11 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_ALLOCATOR_H
13 #error This file must be included inside sanitizer_allocator.h
16 // Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
17 // allocated chunks. To be used in memory constrained or not memory hungry cases
18 // (currently, 32 bits and internal allocator).
19 class LargeMmapAllocatorPtrArrayStatic
{
21 inline void *Init() { return &p_
[0]; }
22 inline void EnsureSpace(uptr n
) { CHECK_LT(n
, kMaxNumChunks
); }
24 static const int kMaxNumChunks
= 1 << 15;
25 uptr p_
[kMaxNumChunks
];
28 // Much less restricted LargeMmapAllocator chunks list (comparing to
29 // PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
30 // ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
31 // same functionality in Fuchsia case, which does not support MAP_NORESERVE.
32 class LargeMmapAllocatorPtrArrayDynamic
{
35 uptr p
= address_range_
.Init(kMaxNumChunks
* sizeof(uptr
),
36 SecondaryAllocatorName
);
38 return reinterpret_cast<void*>(p
);
41 inline void EnsureSpace(uptr n
) {
42 CHECK_LT(n
, kMaxNumChunks
);
43 DCHECK(n
<= n_reserved_
);
44 if (UNLIKELY(n
== n_reserved_
)) {
45 address_range_
.MapOrDie(
46 reinterpret_cast<uptr
>(address_range_
.base()) +
47 n_reserved_
* sizeof(uptr
),
48 kChunksBlockCount
* sizeof(uptr
));
49 n_reserved_
+= kChunksBlockCount
;
54 static const int kMaxNumChunks
= 1 << 20;
55 static const int kChunksBlockCount
= 1 << 14;
56 ReservedAddressRange address_range_
;
60 #if SANITIZER_WORDSIZE == 32
61 typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray
;
63 typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray
;
66 // This class can (de)allocate only large chunks of memory using mmap/unmap.
67 // The main purpose of this allocator is to cover large and rare allocation
68 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
69 template <class MapUnmapCallback
= NoOpMapUnmapCallback
,
70 class PtrArrayT
= DefaultLargeMmapAllocatorPtrArray
,
71 class AddressSpaceViewTy
= LocalAddressSpaceView
>
72 class LargeMmapAllocator
{
74 using AddressSpaceView
= AddressSpaceViewTy
;
75 void InitLinkerInitialized() {
76 page_size_
= GetPageSizeCached();
77 chunks_
= reinterpret_cast<Header
**>(ptr_array_
.Init());
81 internal_memset(this, 0, sizeof(*this));
82 InitLinkerInitialized();
85 void *Allocate(AllocatorStats
*stat
, uptr size
, uptr alignment
) {
86 CHECK(IsPowerOfTwo(alignment
));
87 uptr map_size
= RoundUpMapSize(size
);
88 if (alignment
> page_size_
)
89 map_size
+= alignment
;
91 if (map_size
< size
) {
92 Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
93 "0x%zx bytes with 0x%zx alignment requested\n",
94 SanitizerToolName
, map_size
, alignment
);
97 uptr map_beg
= reinterpret_cast<uptr
>(
98 MmapOrDieOnFatalError(map_size
, SecondaryAllocatorName
));
101 CHECK(IsAligned(map_beg
, page_size_
));
102 MapUnmapCallback().OnMap(map_beg
, map_size
);
103 uptr map_end
= map_beg
+ map_size
;
104 uptr res
= map_beg
+ page_size_
;
105 if (res
& (alignment
- 1)) // Align.
106 res
+= alignment
- (res
& (alignment
- 1));
107 CHECK(IsAligned(res
, alignment
));
108 CHECK(IsAligned(res
, page_size_
));
109 CHECK_GE(res
+ size
, map_beg
);
110 CHECK_LE(res
+ size
, map_end
);
111 Header
*h
= GetHeader(res
);
113 h
->map_beg
= map_beg
;
114 h
->map_size
= map_size
;
115 uptr size_log
= MostSignificantSetBitIndex(map_size
);
116 CHECK_LT(size_log
, ARRAY_SIZE(stats
.by_size_log
));
118 SpinMutexLock
l(&mutex_
);
119 ptr_array_
.EnsureSpace(n_chunks_
);
120 uptr idx
= n_chunks_
++;
123 chunks_sorted_
= false;
125 stats
.currently_allocated
+= map_size
;
126 stats
.max_allocated
= Max(stats
.max_allocated
, stats
.currently_allocated
);
127 stats
.by_size_log
[size_log
]++;
128 stat
->Add(AllocatorStatAllocated
, map_size
);
129 stat
->Add(AllocatorStatMapped
, map_size
);
131 return reinterpret_cast<void*>(res
);
134 void Deallocate(AllocatorStats
*stat
, void *p
) {
135 Header
*h
= GetHeader(p
);
137 SpinMutexLock
l(&mutex_
);
138 uptr idx
= h
->chunk_idx
;
139 CHECK_EQ(chunks_
[idx
], h
);
140 CHECK_LT(idx
, n_chunks_
);
141 chunks_
[idx
] = chunks_
[--n_chunks_
];
142 chunks_
[idx
]->chunk_idx
= idx
;
143 chunks_sorted_
= false;
145 stats
.currently_allocated
-= h
->map_size
;
146 stat
->Sub(AllocatorStatAllocated
, h
->map_size
);
147 stat
->Sub(AllocatorStatMapped
, h
->map_size
);
149 MapUnmapCallback().OnUnmap(h
->map_beg
, h
->map_size
);
150 UnmapOrDie(reinterpret_cast<void*>(h
->map_beg
), h
->map_size
);
153 uptr
TotalMemoryUsed() {
154 SpinMutexLock
l(&mutex_
);
156 for (uptr i
= 0; i
< n_chunks_
; i
++) {
157 Header
*h
= chunks_
[i
];
158 CHECK_EQ(h
->chunk_idx
, i
);
159 res
+= RoundUpMapSize(h
->size
);
164 bool PointerIsMine(const void *p
) const {
165 return GetBlockBegin(p
) != nullptr;
168 uptr
GetActuallyAllocatedSize(void *p
) {
169 return RoundUpTo(GetHeader(p
)->size
, page_size_
);
172 // At least page_size_/2 metadata bytes is available.
173 void *GetMetaData(const void *p
) {
174 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
175 if (!IsAligned(reinterpret_cast<uptr
>(p
), page_size_
)) {
176 Printf("%s: bad pointer %p\n", SanitizerToolName
, p
);
177 CHECK(IsAligned(reinterpret_cast<uptr
>(p
), page_size_
));
179 return GetHeader(p
) + 1;
182 void *GetBlockBegin(const void *ptr
) const {
183 uptr p
= reinterpret_cast<uptr
>(ptr
);
184 SpinMutexLock
l(&mutex_
);
185 uptr nearest_chunk
= 0;
186 Header
*const *chunks
= AddressSpaceView::Load(chunks_
, n_chunks_
);
187 // Cache-friendly linear search.
188 for (uptr i
= 0; i
< n_chunks_
; i
++) {
189 uptr ch
= reinterpret_cast<uptr
>(chunks
[i
]);
190 if (p
< ch
) continue; // p is at left to this chunk, skip it.
191 if (p
- ch
< p
- nearest_chunk
)
197 AddressSpaceView::Load(reinterpret_cast<Header
*>(nearest_chunk
));
198 Header
*h_ptr
= reinterpret_cast<Header
*>(nearest_chunk
);
199 CHECK_GE(nearest_chunk
, h
->map_beg
);
200 CHECK_LT(nearest_chunk
, h
->map_beg
+ h
->map_size
);
201 CHECK_LE(nearest_chunk
, p
);
202 if (h
->map_beg
+ h
->map_size
<= p
)
204 return GetUser(h_ptr
);
207 void EnsureSortedChunks() {
208 if (chunks_sorted_
) return;
209 Header
**chunks
= AddressSpaceView::LoadWritable(chunks_
, n_chunks_
);
210 Sort(reinterpret_cast<uptr
*>(chunks
), n_chunks_
);
211 for (uptr i
= 0; i
< n_chunks_
; i
++)
212 AddressSpaceView::LoadWritable(chunks
[i
])->chunk_idx
= i
;
213 chunks_sorted_
= true;
216 // This function does the same as GetBlockBegin, but is much faster.
217 // Must be called with the allocator locked.
218 void *GetBlockBeginFastLocked(const void *ptr
) {
219 mutex_
.CheckLocked();
220 uptr p
= reinterpret_cast<uptr
>(ptr
);
222 if (!n
) return nullptr;
223 EnsureSortedChunks();
224 Header
*const *chunks
= AddressSpaceView::Load(chunks_
, n_chunks_
);
225 auto min_mmap_
= reinterpret_cast<uptr
>(chunks
[0]);
226 auto max_mmap_
= reinterpret_cast<uptr
>(chunks
[n
- 1]) +
227 AddressSpaceView::Load(chunks
[n
- 1])->map_size
;
228 if (p
< min_mmap_
|| p
>= max_mmap_
)
230 uptr beg
= 0, end
= n
- 1;
231 // This loop is a log(n) lower_bound. It does not check for the exact match
232 // to avoid expensive cache-thrashing loads.
233 while (end
- beg
>= 2) {
234 uptr mid
= (beg
+ end
) / 2; // Invariant: mid >= beg + 1
235 if (p
< reinterpret_cast<uptr
>(chunks
[mid
]))
236 end
= mid
- 1; // We are not interested in chunks[mid].
238 beg
= mid
; // chunks[mid] may still be what we want.
242 CHECK_EQ(beg
+ 1, end
);
243 // There are 2 chunks left, choose one.
244 if (p
>= reinterpret_cast<uptr
>(chunks
[end
]))
248 const Header
*h
= AddressSpaceView::Load(chunks
[beg
]);
249 Header
*h_ptr
= chunks
[beg
];
250 if (h
->map_beg
+ h
->map_size
<= p
|| p
< h
->map_beg
)
252 return GetUser(h_ptr
);
256 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
257 "remains %zd (%zd K) max %zd M; by size logs: ",
258 stats
.n_allocs
, stats
.n_allocs
- stats
.n_frees
,
259 stats
.currently_allocated
>> 10, stats
.max_allocated
>> 20);
260 for (uptr i
= 0; i
< ARRAY_SIZE(stats
.by_size_log
); i
++) {
261 uptr c
= stats
.by_size_log
[i
];
263 Printf("%zd:%zd; ", i
, c
);
268 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
269 // introspection API.
270 void ForceLock() SANITIZER_ACQUIRE(mutex_
) { mutex_
.Lock(); }
272 void ForceUnlock() SANITIZER_RELEASE(mutex_
) { mutex_
.Unlock(); }
274 // Iterate over all existing chunks.
275 // The allocator must be locked when calling this function.
276 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
277 EnsureSortedChunks(); // Avoid doing the sort while iterating.
278 const Header
*const *chunks
= AddressSpaceView::Load(chunks_
, n_chunks_
);
279 for (uptr i
= 0; i
< n_chunks_
; i
++) {
280 const Header
*t
= chunks
[i
];
281 callback(reinterpret_cast<uptr
>(GetUser(t
)), arg
);
282 // Consistency check: verify that the array did not change.
283 CHECK_EQ(chunks
[i
], t
);
284 CHECK_EQ(AddressSpaceView::Load(chunks
[i
])->chunk_idx
, i
);
296 Header
*GetHeader(uptr p
) {
297 CHECK(IsAligned(p
, page_size_
));
298 return reinterpret_cast<Header
*>(p
- page_size_
);
300 Header
*GetHeader(const void *p
) {
301 return GetHeader(reinterpret_cast<uptr
>(p
));
304 void *GetUser(const Header
*h
) const {
305 CHECK(IsAligned((uptr
)h
, page_size_
));
306 return reinterpret_cast<void*>(reinterpret_cast<uptr
>(h
) + page_size_
);
309 uptr
RoundUpMapSize(uptr size
) {
310 return RoundUpTo(size
, page_size_
) + page_size_
;
315 PtrArrayT ptr_array_
;
319 uptr n_allocs
, n_frees
, currently_allocated
, max_allocated
, by_size_log
[64];
321 mutable StaticSpinMutex mutex_
;