Add 32bit time64 syscalls for arm, mips32, ppc32 and x86.
[valgrind.git] / memcheck / mc_malloc_wrappers.c
blob3e1665e675fb364d8bbde6bbdf7d433d4406502b
2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*--- mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
12 jseward@acm.org
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_execontext.h"
32 #include "pub_tool_poolalloc.h"
33 #include "pub_tool_hashtable.h"
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_libcproc.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_threadstate.h"
42 #include "pub_tool_tooliface.h" // Needed for mc_include.h
43 #include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
44 #include "pub_tool_xarray.h"
45 #include "pub_tool_xtree.h"
46 #include "pub_tool_xtmemory.h"
48 #include "mc_include.h"
50 /*------------------------------------------------------------*/
51 /*--- Defns ---*/
52 /*------------------------------------------------------------*/
54 /* Stats ... */
55 static SizeT cmalloc_n_mallocs = 0;
56 static SizeT cmalloc_n_frees = 0;
57 static ULong cmalloc_bs_mallocd = 0;
59 /* For debug printing to do with mempools: what stack trace
60 depth to show. */
61 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
64 /*------------------------------------------------------------*/
65 /*--- Tracking malloc'd and free'd blocks ---*/
66 /*------------------------------------------------------------*/
68 SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
70 /* Record malloc'd blocks. */
71 VgHashTable *MC_(malloc_list) = NULL;
73 /* Memory pools: a hash table of MC_Mempools. Search key is
74 MC_Mempool::pool. */
75 VgHashTable *MC_(mempool_list) = NULL;
77 /* Pool allocator for MC_Chunk. */
78 PoolAlloc *MC_(chunk_poolalloc) = NULL;
79 static
80 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
81 MC_AllocKind kind);
82 static inline
83 void delete_MC_Chunk (MC_Chunk* mc);
85 /* Records blocks after freeing. */
86 /* Blocks freed by the client are queued in one of two lists of
87 freed blocks not yet physically freed:
88 "big blocks" freed list.
89 "small blocks" freed list
90 The blocks with a size >= MC_(clo_freelist_big_blocks)
91 are linked in the big blocks freed list.
92 This allows a client to allocate and free big blocks
93 (e.g. bigger than VG_(clo_freelist_vol)) without losing
94 immediately all protection against dangling pointers.
95 position [0] is for big blocks, [1] is for small blocks. */
96 static MC_Chunk* freed_list_start[2] = {NULL, NULL};
97 static MC_Chunk* freed_list_end[2] = {NULL, NULL};
99 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
100 some of the oldest blocks in the queue at the same time. */
101 static void add_to_freed_queue ( MC_Chunk* mc )
103 const Bool show = False;
104 const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
106 /* Put it at the end of the freed list, unless the block
107 would be directly released any way : in this case, we
108 put it at the head of the freed list. */
109 if (freed_list_end[l] == NULL) {
110 tl_assert(freed_list_start[l] == NULL);
111 mc->next = NULL;
112 freed_list_end[l] = freed_list_start[l] = mc;
113 } else {
114 tl_assert(freed_list_end[l]->next == NULL);
115 if (mc->szB >= MC_(clo_freelist_vol)) {
116 mc->next = freed_list_start[l];
117 freed_list_start[l] = mc;
118 } else {
119 mc->next = NULL;
120 freed_list_end[l]->next = mc;
121 freed_list_end[l] = mc;
124 VG_(free_queue_volume) += (Long)mc->szB;
125 if (show)
126 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
127 VG_(free_queue_volume));
128 VG_(free_queue_length)++;
131 /* Release enough of the oldest blocks to bring the free queue
132 volume below vg_clo_freelist_vol.
133 Start with big block list first.
134 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
135 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
136 static void release_oldest_block(void)
138 const Bool show = False;
139 int i;
140 tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
141 tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
143 for (i = 0; i < 2; i++) {
144 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
145 && freed_list_start[i] != NULL) {
146 MC_Chunk* mc1;
148 tl_assert(freed_list_end[i] != NULL);
150 mc1 = freed_list_start[i];
151 VG_(free_queue_volume) -= (Long)mc1->szB;
152 VG_(free_queue_length)--;
153 if (show)
154 VG_(printf)("mc_freelist: discard: volume now %lld\n",
155 VG_(free_queue_volume));
156 tl_assert(VG_(free_queue_volume) >= 0);
158 if (freed_list_start[i] == freed_list_end[i]) {
159 freed_list_start[i] = freed_list_end[i] = NULL;
160 } else {
161 freed_list_start[i] = mc1->next;
163 mc1->next = NULL; /* just paranoia */
165 /* free MC_Chunk */
166 if (MC_AllocCustom != mc1->allockind)
167 VG_(cli_free) ( (void*)(mc1->data) );
168 delete_MC_Chunk ( mc1 );
173 MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
175 int i;
176 for (i = 0; i < 2; i++) {
177 MC_Chunk* mc;
178 mc = freed_list_start[i];
179 while (mc) {
180 if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
181 MC_(Malloc_Redzone_SzB) ))
182 return mc;
183 mc = mc->next;
186 return NULL;
189 /* Allocate a shadow chunk, put it on the appropriate list.
190 If needed, release oldest blocks from freed list. */
191 static
192 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
193 MC_AllocKind kind)
195 MC_Chunk* mc = VG_(allocEltPA)(MC_(chunk_poolalloc));
196 mc->data = p;
197 mc->szB = szB;
198 mc->allockind = kind;
199 switch ( MC_(n_where_pointers)() ) {
200 case 2: mc->where[1] = 0; // fallthrough to 1
201 case 1: mc->where[0] = 0; // fallthrough to 0
202 case 0: break;
203 default: tl_assert(0);
205 MC_(set_allocated_at) (tid, mc);
207 /* Each time a new MC_Chunk is created, release oldest blocks
208 if the free list volume is exceeded. */
209 if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
210 release_oldest_block();
212 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
213 the mc->data field isn't visible to the leak checker. If memory
214 management is working correctly, any pointer returned by VG_(malloc)
215 should be noaccess as far as the client is concerned. */
216 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
217 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
219 return mc;
222 static inline
223 void delete_MC_Chunk (MC_Chunk* mc)
225 VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
228 // True if mc is in the given block list.
229 static Bool in_block_list (const VgHashTable *block_list, MC_Chunk* mc)
231 MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
232 if (found_mc) {
233 tl_assert (found_mc->data == mc->data);
234 /* If a user builds a pool from a malloc-ed superblock
235 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
236 an address at the beginning of this superblock, then
237 this address will be twice in the block_list.
238 We handle this case by checking size and allockind.
239 Note: I suspect that having the same block
240 twice in MC_(malloc_list) is a recipe for bugs.
241 We might maybe better create a "standard" mempool to
242 handle all this more cleanly. */
243 if (found_mc->szB != mc->szB
244 || found_mc->allockind != mc->allockind)
245 return False;
246 tl_assert (found_mc == mc);
247 return True;
248 } else
249 return False;
252 // True if mc is a live block (not yet freed).
253 static Bool live_block (MC_Chunk* mc)
255 if (mc->allockind == MC_AllocCustom) {
256 MC_Mempool* mp;
257 VG_(HT_ResetIter)(MC_(mempool_list));
258 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
259 if ( in_block_list (mp->chunks, mc) )
260 return True;
263 /* Note: we fallback here for a not found MC_AllocCustom
264 as such a block can be inserted in MC_(malloc_list)
265 by VALGRIND_MALLOCLIKE_BLOCK. */
266 return in_block_list ( MC_(malloc_list), mc );
269 ExeContext* MC_(allocated_at) (MC_Chunk* mc)
271 switch (MC_(clo_keep_stacktraces)) {
272 case KS_none: return VG_(null_ExeContext) ();
273 case KS_alloc: return mc->where[0];
274 case KS_free: return VG_(null_ExeContext) ();
275 case KS_alloc_then_free: return (live_block(mc) ?
276 mc->where[0] : VG_(null_ExeContext) ());
277 case KS_alloc_and_free: return mc->where[0];
278 default: tl_assert (0);
282 ExeContext* MC_(freed_at) (MC_Chunk* mc)
284 switch (MC_(clo_keep_stacktraces)) {
285 case KS_none: return VG_(null_ExeContext) ();
286 case KS_alloc: return VG_(null_ExeContext) ();
287 case KS_free: return (mc->where[0] ?
288 mc->where[0] : VG_(null_ExeContext) ());
289 case KS_alloc_then_free: return (live_block(mc) ?
290 VG_(null_ExeContext) () : mc->where[0]);
291 case KS_alloc_and_free: return (mc->where[1] ?
292 mc->where[1] : VG_(null_ExeContext) ());
293 default: tl_assert (0);
297 void MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
299 switch (MC_(clo_keep_stacktraces)) {
300 case KS_none: return;
301 case KS_alloc: break;
302 case KS_free: return;
303 case KS_alloc_then_free: break;
304 case KS_alloc_and_free: break;
305 default: tl_assert (0);
307 mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
308 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
309 VG_(XTMemory_Full_alloc)(mc->szB, mc->where[0]);
312 void MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
314 Int pos;
315 ExeContext* ec_free;
317 switch (MC_(clo_keep_stacktraces)) {
318 case KS_none: return;
319 case KS_alloc:
320 if (LIKELY(VG_(clo_xtree_memory)
321 != Vg_XTMemory_Full))
322 return;
323 pos = -1; break;
324 case KS_free: pos = 0; break;
325 case KS_alloc_then_free: pos = 0; break;
326 case KS_alloc_and_free: pos = 1; break;
327 default: tl_assert (0);
329 /* We need the execontext for the free operation, either to store
330 it in the mc chunk and/or for full xtree memory profiling.
331 Note: we are guaranteed to find the ec_alloc in mc->where[0], as
332 mc_post_clo_init verifies the consistency of --xtree-memory and
333 --keep-stacktraces. */
334 ec_free = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
335 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
336 VG_(XTMemory_Full_free)(mc->szB, mc->where[0], ec_free);
337 if (LIKELY(pos >= 0))
338 mc->where[pos] = ec_free;
341 UInt MC_(n_where_pointers) (void)
343 switch (MC_(clo_keep_stacktraces)) {
344 case KS_none: return 0;
345 case KS_alloc:
346 case KS_free:
347 case KS_alloc_then_free: return 1;
348 case KS_alloc_and_free: return 2;
349 default: tl_assert (0);
353 /*------------------------------------------------------------*/
354 /*--- client_malloc(), etc ---*/
355 /*------------------------------------------------------------*/
357 /* Allocate memory and note change in memory available */
358 void* MC_(new_block) ( ThreadId tid,
359 Addr p, SizeT szB, SizeT alignB,
360 Bool is_zeroed, MC_AllocKind kind,
361 VgHashTable *table)
363 MC_Chunk* mc;
365 // Allocate and zero if necessary
366 if (p) {
367 tl_assert(MC_AllocCustom == kind);
368 } else {
369 tl_assert(MC_AllocCustom != kind);
370 p = (Addr)VG_(cli_malloc)( alignB, szB );
371 if (!p) {
372 return NULL;
374 if (is_zeroed) {
375 VG_(memset)((void*)p, 0, szB);
376 } else
377 if (MC_(clo_malloc_fill) != -1) {
378 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
379 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
383 // Only update stats if allocation succeeded.
384 cmalloc_n_mallocs ++;
385 cmalloc_bs_mallocd += (ULong)szB;
386 mc = create_MC_Chunk (tid, p, szB, kind);
387 VG_(HT_add_node)( table, mc );
389 if (is_zeroed)
390 MC_(make_mem_defined)( p, szB );
391 else {
392 UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
393 tl_assert(VG_(is_plausible_ECU)(ecu));
394 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
397 return (void*)p;
400 void* MC_(malloc) ( ThreadId tid, SizeT n )
402 if (MC_(record_fishy_value_error)(tid, "malloc", "size", n)) {
403 return NULL;
404 } else {
405 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
406 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
410 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
412 if (MC_(record_fishy_value_error)(tid, "__builtin_new", "size", n)) {
413 return NULL;
414 } else {
415 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
416 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
420 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
422 if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new", "size", n)) {
423 return NULL;
424 } else {
425 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
426 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
430 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
432 if (MC_(record_fishy_value_error)(tid, "memalign", "size", n)) {
433 return NULL;
434 } else {
435 return MC_(new_block) ( tid, 0, n, alignB,
436 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
440 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
442 if (MC_(record_fishy_value_error)(tid, "calloc", "nmemb", nmemb) ||
443 MC_(record_fishy_value_error)(tid, "calloc", "size", size1)) {
444 return NULL;
445 } else {
446 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
447 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
451 static
452 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
454 /* Note: we do not free fill the custom allocs produced
455 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
456 if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
457 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
458 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
461 /* Note: make redzones noaccess again -- just in case user made them
462 accessible with a client request... */
463 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
465 /* Record where freed */
466 MC_(set_freed_at) (tid, mc);
467 /* Put it out of harm's way for a while */
468 add_to_freed_queue ( mc );
469 /* If the free list volume is bigger than MC_(clo_freelist_vol),
470 we wait till the next block allocation to release blocks.
471 This increase the chance to discover dangling pointer usage,
472 even for big blocks being freed by the client. */
476 static
477 void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
479 /* Only show such an error if the user hasn't disabled doing so. */
480 if (!MC_(clo_show_mismatched_frees))
481 return;
483 /* MC_(record_freemismatch_error) reports errors for still
484 allocated blocks but we are in the middle of freeing it. To
485 report the error correctly, we re-insert the chunk (making it
486 again a "clean allocated block", report the error, and then
487 re-remove the chunk. This avoids to do a VG_(HT_lookup)
488 followed by a VG_(HT_remove) in all "non-erroneous cases". */
489 VG_(HT_add_node)( MC_(malloc_list), mc );
490 MC_(record_freemismatch_error) ( tid, mc );
491 if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
492 tl_assert(0);
495 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
497 MC_Chunk* mc;
499 cmalloc_n_frees++;
501 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
502 if (mc == NULL) {
503 MC_(record_free_error) ( tid, p );
504 } else {
505 /* check if it is a matching free() / delete / delete [] */
506 if (kind != mc->allockind) {
507 tl_assert(p == mc->data);
508 record_freemismatch_error ( tid, mc );
510 die_and_free_mem ( tid, mc, rzB );
514 void MC_(free) ( ThreadId tid, void* p )
516 MC_(handle_free)(
517 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
520 void MC_(__builtin_delete) ( ThreadId tid, void* p )
522 MC_(handle_free)(
523 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
526 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
528 MC_(handle_free)(
529 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
532 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
534 MC_Chunk* old_mc;
535 MC_Chunk* new_mc;
536 Addr a_new;
537 SizeT old_szB;
539 if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
540 return NULL;
542 cmalloc_n_frees ++;
543 cmalloc_n_mallocs ++;
544 cmalloc_bs_mallocd += (ULong)new_szB;
546 /* Remove the old block */
547 old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
548 if (old_mc == NULL) {
549 MC_(record_free_error) ( tid, (Addr)p_old );
550 /* We return to the program regardless. */
551 return NULL;
554 /* check if its a matching free() / delete / delete [] */
555 if (MC_AllocMalloc != old_mc->allockind) {
556 /* can not realloc a range that was allocated with new or new [] */
557 tl_assert((Addr)p_old == old_mc->data);
558 record_freemismatch_error ( tid, old_mc );
559 /* but keep going anyway */
562 old_szB = old_mc->szB;
564 /* Get new memory */
565 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
567 if (a_new) {
568 /* In all cases, even when the new size is smaller or unchanged, we
569 reallocate and copy the contents, and make the old block
570 inaccessible. This is so as to guarantee to catch all cases of
571 accesses via the old address after reallocation, regardless of
572 the change in size. (Of course the ability to detect accesses
573 to the old block also depends on the size of the freed blocks
574 queue). */
576 // Allocate a new chunk.
577 new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );
579 // Now insert the new mc (with a new 'data' field) into malloc_list.
580 VG_(HT_add_node)( MC_(malloc_list), new_mc );
582 /* Retained part is copied, red zones set as normal */
584 /* Redzone at the front */
585 MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
586 MC_(Malloc_Redzone_SzB) );
588 /* payload */
589 if (old_szB >= new_szB) {
590 /* new size is smaller or the same */
592 /* Copy address range state and value from old to new */
593 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
594 VG_(memcpy)((void*)a_new, p_old, new_szB);
595 } else {
596 /* new size is bigger */
597 UInt ecu;
599 /* Copy address range state and value from old to new */
600 MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
601 VG_(memcpy)((void*)a_new, p_old, old_szB);
603 // If the block has grown, we mark the grown area as undefined.
604 // We have to do that after VG_(HT_add_node) to ensure the ecu
605 // execontext is for a fully allocated block.
606 ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
607 tl_assert(VG_(is_plausible_ECU)(ecu));
608 MC_(make_mem_undefined_w_otag)( a_new+old_szB,
609 new_szB-old_szB,
610 ecu | MC_OKIND_HEAP );
612 /* Possibly fill new area with specified junk */
613 if (MC_(clo_malloc_fill) != -1) {
614 tl_assert(MC_(clo_malloc_fill) >= 0x00
615 && MC_(clo_malloc_fill) <= 0xFF);
616 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
617 new_szB-old_szB);
621 /* Redzone at the back. */
622 MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
624 /* Possibly fill freed area with specified junk. */
625 if (MC_(clo_free_fill) != -1) {
626 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
627 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
630 /* Free old memory */
631 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
632 than recycling the old one, so that any erroneous accesses to the
633 old memory are reported. */
634 die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );
636 } else {
637 /* Could not allocate new client memory.
638 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
639 unconditionally removed at the beginning of the function. */
640 VG_(HT_add_node)( MC_(malloc_list), old_mc );
643 return (void*)a_new;
646 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
648 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
650 // There may be slop, but pretend there isn't because only the asked-for
651 // area will be marked as addressable.
652 return ( mc ? mc->szB : 0 );
655 /* This handles the in place resize of a block, as performed by the
656 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
657 and not used for, handling of the normal libc realloc()
658 function. */
659 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
660 SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
662 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
663 if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
664 /* Reject if: p is not found, or oldSizeB is wrong,
665 or new block would be empty. */
666 MC_(record_free_error) ( tid, p );
667 return;
670 if (oldSizeB == newSizeB)
671 return;
673 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
674 VG_(XTMemory_Full_resize_in_place)(oldSizeB, newSizeB, mc->where[0]);
676 mc->szB = newSizeB;
677 if (newSizeB < oldSizeB) {
678 MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
679 } else {
680 ExeContext* ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
681 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
682 MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
683 ecu | MC_OKIND_HEAP );
684 if (rzB > 0)
685 MC_(make_mem_noaccess)( p + newSizeB, rzB );
690 /*------------------------------------------------------------*/
691 /*--- Memory pool stuff. ---*/
692 /*------------------------------------------------------------*/
694 /* Set to 1 for intensive sanity checking. Is very expensive though
695 and should not be used in production scenarios. See #255966. */
696 #define MP_DETAILED_SANITY_CHECKS 0
698 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
700 static void free_mallocs_in_mempool_block (MC_Mempool* mp,
701 Addr StartAddr,
702 Addr EndAddr)
704 MC_Chunk *mc;
705 ThreadId tid;
707 tl_assert(mp->auto_free);
709 if (VG_(clo_verbosity) > 2) {
710 VG_(message)(Vg_UserMsg,
711 "free_mallocs_in_mempool_block: Start 0x%lx size %lu\n",
712 StartAddr, (SizeT) (EndAddr - StartAddr));
715 tid = VG_(get_running_tid)();
717 VG_(HT_ResetIter)(MC_(malloc_list));
718 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
719 if (mc->data >= StartAddr && mc->data + mc->szB <= EndAddr) {
720 if (VG_(clo_verbosity) > 2) {
721 VG_(message)(Vg_UserMsg, "Auto-free of 0x%lx size=%lu\n",
722 mc->data, (mc->szB + 0UL));
725 VG_(HT_remove_at_Iter)(MC_(malloc_list));
726 die_and_free_mem(tid, mc, mp->rzB);
731 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed,
732 Bool auto_free, Bool metapool)
734 MC_Mempool* mp;
736 if (VG_(clo_verbosity) > 2 || (auto_free && !metapool)) {
737 VG_(message)(Vg_UserMsg,
738 "create_mempool(0x%lx, rzB=%u, zeroed=%d,"
739 " autofree=%d, metapool=%d)\n",
740 pool, rzB, is_zeroed,
741 auto_free, metapool);
742 VG_(get_and_pp_StackTrace)
743 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
744 if (auto_free && !metapool)
745 VG_(tool_panic)("Inappropriate use of mempool:"
746 " an auto free pool must be a meta pool. Aborting\n");
749 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
750 if (mp != NULL) {
751 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
754 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
755 mp->pool = pool;
756 mp->rzB = rzB;
757 mp->is_zeroed = is_zeroed;
758 mp->auto_free = auto_free;
759 mp->metapool = metapool;
760 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
761 check_mempool_sane(mp);
763 /* Paranoia ... ensure this area is off-limits to the client, so
764 the mp->data field isn't visible to the leak checker. If memory
765 management is working correctly, anything pointer returned by
766 VG_(malloc) should be noaccess as far as the client is
767 concerned. */
768 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
769 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
772 VG_(HT_add_node)( MC_(mempool_list), mp );
775 void MC_(destroy_mempool)(Addr pool)
777 MC_Chunk* mc;
778 MC_Mempool* mp;
780 if (VG_(clo_verbosity) > 2) {
781 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
782 VG_(get_and_pp_StackTrace)
783 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
786 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
788 if (mp == NULL) {
789 ThreadId tid = VG_(get_running_tid)();
790 MC_(record_illegal_mempool_error) ( tid, pool );
791 return;
793 check_mempool_sane(mp);
795 // Clean up the chunks, one by one
796 VG_(HT_ResetIter)(mp->chunks);
797 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
798 /* Note: make redzones noaccess again -- just in case user made them
799 accessible with a client request... */
800 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
802 // Destroy the chunk table
803 VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
805 VG_(free)(mp);
808 static Int
809 mp_compar(const void* n1, const void* n2)
811 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
812 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
813 if (mc1->data < mc2->data) return -1;
814 if (mc1->data > mc2->data) return 1;
815 return 0;
818 static void
819 check_mempool_sane(MC_Mempool* mp)
821 UInt n_chunks, i, bad = 0;
822 static UInt tick = 0;
824 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
825 if (!chunks)
826 return;
828 if (VG_(clo_verbosity) > 1) {
829 if (tick++ >= 10000)
831 UInt total_pools = 0, total_chunks = 0;
832 MC_Mempool* mp2;
834 VG_(HT_ResetIter)(MC_(mempool_list));
835 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
836 total_pools++;
837 VG_(HT_ResetIter)(mp2->chunks);
838 while (VG_(HT_Next)(mp2->chunks)) {
839 total_chunks++;
843 VG_(message)(Vg_UserMsg,
844 "Total mempools active: %u pools, %u chunks\n",
845 total_pools, total_chunks);
846 tick = 0;
851 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
853 /* Sanity check; assert that the blocks are now in order */
854 for (i = 0; i < n_chunks-1; i++) {
855 if (chunks[i]->data > chunks[i+1]->data) {
856 VG_(message)(Vg_UserMsg,
857 "Mempool chunk %u / %u is out of order "
858 "wrt. its successor\n",
859 i+1, n_chunks);
860 bad = 1;
864 /* Sanity check -- make sure they don't overlap */
865 for (i = 0; i < n_chunks-1; i++) {
866 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
867 VG_(message)(Vg_UserMsg,
868 "Mempool chunk %u / %u overlaps with its successor\n",
869 i+1, n_chunks);
870 bad = 1;
874 if (bad) {
875 VG_(message)(Vg_UserMsg,
876 "Bad mempool (%u chunks), dumping chunks for inspection:\n",
877 n_chunks);
878 for (i = 0; i < n_chunks; ++i) {
879 VG_(message)(Vg_UserMsg,
880 "Mempool chunk %u / %u: %lu bytes "
881 "[%lx,%lx), allocated:\n",
882 i+1,
883 n_chunks,
884 chunks[i]->szB + 0UL,
885 chunks[i]->data,
886 chunks[i]->data + chunks[i]->szB);
888 VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
891 VG_(free)(chunks);
894 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
896 MC_Mempool* mp;
898 if (VG_(clo_verbosity) > 2) {
899 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
900 pool, addr, szB);
901 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
904 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
905 if (mp == NULL) {
906 MC_(record_illegal_mempool_error) ( tid, pool );
907 } else {
908 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
909 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
910 MC_AllocCustom, mp->chunks);
911 if (mp->rzB > 0) {
912 // This is not needed if the user application has properly
913 // marked the superblock noaccess when defining the mempool.
914 // We however still mark the redzones noaccess to still catch
915 // some bugs if user forgot.
916 MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
917 MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
919 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
923 void MC_(mempool_free)(Addr pool, Addr addr)
925 MC_Mempool* mp;
926 MC_Chunk* mc;
927 ThreadId tid = VG_(get_running_tid)();
929 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
930 if (mp == NULL) {
931 MC_(record_illegal_mempool_error)(tid, pool);
932 return;
935 if (VG_(clo_verbosity) > 2) {
936 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
937 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
940 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
941 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
942 if (mc == NULL) {
943 MC_(record_free_error)(tid, (Addr)addr);
944 return;
947 if (mp->auto_free) {
948 free_mallocs_in_mempool_block(mp, mc->data, mc->data + (mc->szB + 0UL));
951 if (VG_(clo_verbosity) > 2) {
952 VG_(message)(Vg_UserMsg,
953 "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
954 pool, addr, mc->szB + 0UL);
957 die_and_free_mem ( tid, mc, mp->rzB );
958 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
962 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
964 MC_Mempool* mp;
965 MC_Chunk* mc;
966 ThreadId tid = VG_(get_running_tid)();
967 UInt n_shadows, i;
968 VgHashNode** chunks;
970 if (VG_(clo_verbosity) > 2) {
971 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
972 pool, addr, szB);
973 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
976 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
977 if (mp == NULL) {
978 MC_(record_illegal_mempool_error)(tid, pool);
979 return;
982 check_mempool_sane(mp);
983 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
984 if (n_shadows == 0) {
985 tl_assert(chunks == NULL);
986 return;
989 tl_assert(chunks != NULL);
990 for (i = 0; i < n_shadows; ++i) {
992 Addr lo, hi, min, max;
994 mc = (MC_Chunk*) chunks[i];
996 lo = mc->data;
997 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
999 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
1001 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
1003 /* The current chunk is entirely within the trim extent: keep
1004 it. */
1006 continue;
1008 } else if ( (! EXTENT_CONTAINS(lo)) &&
1009 (! EXTENT_CONTAINS(hi)) ) {
1011 /* The current chunk is entirely outside the trim extent:
1012 delete it. */
1014 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1015 MC_(record_free_error)(tid, (Addr)mc->data);
1016 VG_(free)(chunks);
1017 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1018 return;
1020 die_and_free_mem ( tid, mc, mp->rzB );
1022 } else {
1024 /* The current chunk intersects the trim extent: remove,
1025 trim, and reinsert it. */
1027 tl_assert(EXTENT_CONTAINS(lo) ||
1028 EXTENT_CONTAINS(hi));
1029 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1030 MC_(record_free_error)(tid, (Addr)mc->data);
1031 VG_(free)(chunks);
1032 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1033 return;
1036 if (mc->data < addr) {
1037 min = mc->data;
1038 lo = addr;
1039 } else {
1040 min = addr;
1041 lo = mc->data;
1044 if (mc->data + szB > addr + szB) {
1045 max = mc->data + szB;
1046 hi = addr + szB;
1047 } else {
1048 max = addr + szB;
1049 hi = mc->data + szB;
1052 tl_assert(min <= lo);
1053 tl_assert(lo < hi);
1054 tl_assert(hi <= max);
1056 if (min < lo && !EXTENT_CONTAINS(min)) {
1057 MC_(make_mem_noaccess)( min, lo - min);
1060 if (hi < max && !EXTENT_CONTAINS(max)) {
1061 MC_(make_mem_noaccess)( hi, max - hi );
1064 mc->data = lo;
1065 mc->szB = (UInt) (hi - lo);
1066 VG_(HT_add_node)( mp->chunks, mc );
1069 #undef EXTENT_CONTAINS
1072 check_mempool_sane(mp);
1073 VG_(free)(chunks);
1076 void MC_(move_mempool)(Addr poolA, Addr poolB)
1078 MC_Mempool* mp;
1080 if (VG_(clo_verbosity) > 2) {
1081 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
1082 VG_(get_and_pp_StackTrace)
1083 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1086 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
1088 if (mp == NULL) {
1089 ThreadId tid = VG_(get_running_tid)();
1090 MC_(record_illegal_mempool_error) ( tid, poolA );
1091 return;
1094 mp->pool = poolB;
1095 VG_(HT_add_node)( MC_(mempool_list), mp );
1098 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
1100 MC_Mempool* mp;
1101 MC_Chunk* mc;
1102 ThreadId tid = VG_(get_running_tid)();
1104 if (VG_(clo_verbosity) > 2) {
1105 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
1106 pool, addrA, addrB, szB);
1107 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1110 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1111 if (mp == NULL) {
1112 MC_(record_illegal_mempool_error)(tid, pool);
1113 return;
1116 check_mempool_sane(mp);
1118 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1119 if (mc == NULL) {
1120 MC_(record_free_error)(tid, (Addr)addrA);
1121 return;
1124 mc->data = addrB;
1125 mc->szB = szB;
1126 VG_(HT_add_node)( mp->chunks, mc );
1128 check_mempool_sane(mp);
1131 Bool MC_(mempool_exists)(Addr pool)
1133 MC_Mempool* mp;
1135 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1136 if (mp == NULL) {
1137 return False;
1139 return True;
1142 static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
1144 MC_Chunk* mc = VG_(HT_Next)(MC_(malloc_list));
1145 if (mc) {
1146 xta->nbytes = mc->szB;
1147 xta->nblocks = 1;
1148 *ec_alloc = MC_(allocated_at)(mc);
1149 } else
1150 xta->nblocks = 0;
1153 void MC_(xtmemory_report) ( const HChar* filename, Bool fini )
1155 // Make xtmemory_report_next_block ready to be called.
1156 VG_(HT_ResetIter)(MC_(malloc_list));
1158 VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
1159 VG_(XT_filter_1top_and_maybe_below_main));
1162 /*------------------------------------------------------------*/
1163 /*--- Statistics printing ---*/
1164 /*------------------------------------------------------------*/
1166 void MC_(print_malloc_stats) ( void )
1168 MC_Chunk* mc;
1169 SizeT nblocks = 0;
1170 ULong nbytes = 0;
1172 if (VG_(clo_verbosity) == 0)
1173 return;
1174 if (VG_(clo_xml))
1175 return;
1177 /* Count memory still in use. */
1178 VG_(HT_ResetIter)(MC_(malloc_list));
1179 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1180 nblocks++;
1181 nbytes += (ULong)mc->szB;
1184 VG_(umsg)(
1185 "HEAP SUMMARY:\n"
1186 " in use at exit: %'llu bytes in %'lu blocks\n"
1187 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1188 "\n",
1189 nbytes, nblocks,
1190 cmalloc_n_mallocs,
1191 cmalloc_n_frees, cmalloc_bs_mallocd
1195 SizeT MC_(get_cmalloc_n_frees) ( void )
1197 return cmalloc_n_frees;
1201 /*--------------------------------------------------------------------*/
1202 /*--- end ---*/
1203 /*--------------------------------------------------------------------*/