1 //===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 #error "Define SCUDO_PREFIX prior to including this file!"
13 // malloc-type functions have to be aligned to std::max_align_t. This is
14 // distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
15 // do not have to abide by the same requirement.
16 #ifndef SCUDO_MALLOC_ALIGNMENT
17 #define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
20 static void reportAllocation(void *ptr, size_t size) {
21 if (SCUDO_ENABLE_HOOKS)
22 if (__scudo_allocate_hook && ptr)
23 __scudo_allocate_hook(ptr, size);
25 static void reportDeallocation(void *ptr) {
26 if (SCUDO_ENABLE_HOOKS)
27 if (__scudo_deallocate_hook)
28 __scudo_deallocate_hook(ptr);
33 INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
35 if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
36 if (SCUDO_ALLOCATOR.canReturnNull()) {
40 scudo::reportCallocOverflow(nmemb, size);
42 void *Ptr = SCUDO_ALLOCATOR.allocate(Product, scudo::Chunk::Origin::Malloc,
43 SCUDO_MALLOC_ALIGNMENT, true);
44 reportAllocation(Ptr, Product);
45 return scudo::setErrnoOnNull(Ptr);
48 INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
49 reportDeallocation(ptr);
50 SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
53 INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
54 struct SCUDO_MALLINFO Info = {};
55 scudo::StatCounters Stats;
56 SCUDO_ALLOCATOR.getStats(Stats);
57 // Space allocated in mmapped regions (bytes)
58 Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
59 // Maximum total allocated space (bytes)
60 Info.usmblks = Info.hblkhd;
61 // Space in freed fastbin blocks (bytes)
62 Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
63 // Total allocated space (bytes)
65 static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
66 // Total free space (bytes)
67 Info.fordblks = Info.fsmblks;
71 // On Android, mallinfo2 is an alias of mallinfo, so don't define both.
73 INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
74 struct __scudo_mallinfo2 Info = {};
75 scudo::StatCounters Stats;
76 SCUDO_ALLOCATOR.getStats(Stats);
77 // Space allocated in mmapped regions (bytes)
78 Info.hblkhd = Stats[scudo::StatMapped];
79 // Maximum total allocated space (bytes)
80 Info.usmblks = Info.hblkhd;
81 // Space in freed fastbin blocks (bytes)
82 Info.fsmblks = Stats[scudo::StatFree];
83 // Total allocated space (bytes)
84 Info.uordblks = Stats[scudo::StatAllocated];
85 // Total free space (bytes)
86 Info.fordblks = Info.fsmblks;
91 INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
92 void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
93 SCUDO_MALLOC_ALIGNMENT);
94 reportAllocation(Ptr, size);
95 return scudo::setErrnoOnNull(Ptr);
99 INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
101 INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
103 return SCUDO_ALLOCATOR.getUsableSize(ptr);
106 INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
107 // Android rounds up the alignment to a power of two if it isn't one.
109 if (UNLIKELY(!alignment)) {
112 if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
113 alignment = scudo::roundUpPowerOfTwo(alignment);
116 if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
117 if (SCUDO_ALLOCATOR.canReturnNull()) {
121 scudo::reportAlignmentNotPowerOfTwo(alignment);
125 SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
126 reportAllocation(Ptr, size);
130 INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
132 if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
133 if (!SCUDO_ALLOCATOR.canReturnNull())
134 scudo::reportInvalidPosixMemalignAlignment(alignment);
138 SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
141 reportAllocation(Ptr, size);
147 INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
148 const scudo::uptr PageSize = scudo::getPageSizeCached();
149 if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
150 if (SCUDO_ALLOCATOR.canReturnNull()) {
154 scudo::reportPvallocOverflow(size);
156 // pvalloc(0) should allocate one page.
158 SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize,
159 scudo::Chunk::Origin::Memalign, PageSize);
160 reportAllocation(Ptr, scudo::roundUp(size, PageSize));
162 return scudo::setErrnoOnNull(Ptr);
165 INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
167 void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
168 SCUDO_MALLOC_ALIGNMENT);
169 reportAllocation(Ptr, size);
170 return scudo::setErrnoOnNull(Ptr);
173 reportDeallocation(ptr);
174 SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
178 void *NewPtr = SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT);
180 reportAllocation(NewPtr, size);
181 reportDeallocation(ptr);
184 return scudo::setErrnoOnNull(NewPtr);
187 INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
188 void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
189 scudo::getPageSizeCached());
190 reportAllocation(Ptr, size);
192 return scudo::setErrnoOnNull(Ptr);
195 INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
196 uintptr_t base, size_t size,
197 void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
198 SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
202 INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
204 INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
205 SCUDO_ALLOCATOR.disable();
208 void SCUDO_PREFIX(malloc_postinit)() {
209 SCUDO_ALLOCATOR.initGwpAsan();
210 pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable),
211 SCUDO_PREFIX(malloc_enable));
214 INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
215 if (param == M_DECAY_TIME) {
218 // Will set the release values to their minimum values.
221 // Will set the release values to their maximum values.
226 SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval,
227 static_cast<scudo::sptr>(value));
229 } else if (param == M_PURGE) {
230 SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force);
232 } else if (param == M_PURGE_ALL) {
233 SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::ForceAll);
235 } else if (param == M_LOG_STATS) {
236 SCUDO_ALLOCATOR.printStats();
237 SCUDO_ALLOCATOR.printFragmentationInfo();
240 scudo::Option option;
242 case M_MEMTAG_TUNING:
243 option = scudo::Option::MemtagTuning;
245 case M_THREAD_DISABLE_MEM_INIT:
246 option = scudo::Option::ThreadDisableMemInit;
248 case M_CACHE_COUNT_MAX:
249 option = scudo::Option::MaxCacheEntriesCount;
251 case M_CACHE_SIZE_MAX:
252 option = scudo::Option::MaxCacheEntrySize;
254 case M_TSDS_COUNT_MAX:
255 option = scudo::Option::MaxTSDsCount;
260 return SCUDO_ALLOCATOR.setOption(option, static_cast<scudo::sptr>(value));
264 INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
266 if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
267 if (SCUDO_ALLOCATOR.canReturnNull()) {
271 scudo::reportInvalidAlignedAllocAlignment(alignment, size);
275 SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment);
276 reportAllocation(Ptr, size);
278 return scudo::setErrnoOnNull(Ptr);
281 INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
282 const scudo::uptr max_size =
283 decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize;
284 auto *sizes = static_cast<scudo::uptr *>(
285 SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr)));
286 auto callback = [](uintptr_t, size_t size, void *arg) {
287 auto *sizes = reinterpret_cast<scudo::uptr *>(arg);
292 SCUDO_ALLOCATOR.disable();
293 SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes);
294 SCUDO_ALLOCATOR.enable();
296 fputs("<malloc version=\"scudo-1\">\n", stream);
297 for (scudo::uptr i = 0; i != max_size; ++i)
299 fprintf(stream, "<alloc size=\"%zu\" count=\"%zu\"/>\n", i, sizes[i]);
300 fputs("</malloc>\n", stream);
301 SCUDO_PREFIX(free)(sizes);
305 // Disable memory tagging for the heap. The caller must disable memory tag
306 // checks globally (e.g. by clearing TCF0 on aarch64) before calling this
307 // function, and may not re-enable them after calling the function.
308 INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() {
309 SCUDO_ALLOCATOR.disableMemoryTagging();
312 // Sets whether scudo records stack traces and other metadata for allocations
313 // and deallocations. This function only has an effect if the allocator and
314 // hardware support memory tagging.
316 SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) {
317 SCUDO_ALLOCATOR.setTrackAllocationStacks(track);
320 // Sets whether scudo zero-initializes all allocated memory.
321 INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) {
322 SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill
326 // Sets whether scudo pattern-initializes all allocated memory.
328 SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
329 SCUDO_ALLOCATOR.setFillContents(
330 pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill);
333 // Sets whether scudo adds a small amount of slack at the end of large
334 // allocations, before the guard page. This can be enabled to work around buggy
335 // applications that read a few bytes past the end of their allocation.
337 SCUDO_PREFIX(malloc_set_add_large_allocation_slack)(int add_slack) {
338 SCUDO_ALLOCATOR.setAddLargeAllocationSlack(add_slack);