readdwarf3.c (parse_type_DIE): Accept DW_TAG_subrange_type with DW_AT_count
[valgrind.git] / memcheck / mc_malloc_wrappers.c
blob875eba758381ee3db2e269e94f22e36b1bad376c
2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*--- mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
12 jseward@acm.org
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_execontext.h"
34 #include "pub_tool_poolalloc.h"
35 #include "pub_tool_hashtable.h"
36 #include "pub_tool_libcbase.h"
37 #include "pub_tool_libcassert.h"
38 #include "pub_tool_libcprint.h"
39 #include "pub_tool_libcproc.h"
40 #include "pub_tool_mallocfree.h"
41 #include "pub_tool_options.h"
42 #include "pub_tool_replacemalloc.h"
43 #include "pub_tool_threadstate.h"
44 #include "pub_tool_tooliface.h" // Needed for mc_include.h
45 #include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
46 #include "pub_tool_xarray.h"
47 #include "pub_tool_xtree.h"
48 #include "pub_tool_xtmemory.h"
50 #include "mc_include.h"
52 /*------------------------------------------------------------*/
53 /*--- Defns ---*/
54 /*------------------------------------------------------------*/
56 /* Stats ... */
57 static SizeT cmalloc_n_mallocs = 0;
58 static SizeT cmalloc_n_frees = 0;
59 static ULong cmalloc_bs_mallocd = 0;
61 /* For debug printing to do with mempools: what stack trace
62 depth to show. */
63 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
66 /*------------------------------------------------------------*/
67 /*--- Tracking malloc'd and free'd blocks ---*/
68 /*------------------------------------------------------------*/
70 SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
72 /* Record malloc'd blocks. */
73 VgHashTable *MC_(malloc_list) = NULL;
75 /* Memory pools: a hash table of MC_Mempools. Search key is
76 MC_Mempool::pool. */
77 VgHashTable *MC_(mempool_list) = NULL;
79 /* Pool allocator for MC_Chunk. */
80 PoolAlloc *MC_(chunk_poolalloc) = NULL;
81 static
82 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
83 MC_AllocKind kind);
84 static inline
85 void delete_MC_Chunk (MC_Chunk* mc);
87 /* Records blocks after freeing. */
88 /* Blocks freed by the client are queued in one of two lists of
89 freed blocks not yet physically freed:
90 "big blocks" freed list.
91 "small blocks" freed list
92 The blocks with a size >= MC_(clo_freelist_big_blocks)
93 are linked in the big blocks freed list.
94 This allows a client to allocate and free big blocks
95 (e.g. bigger than VG_(clo_freelist_vol)) without losing
96 immediately all protection against dangling pointers.
97 position [0] is for big blocks, [1] is for small blocks. */
98 static MC_Chunk* freed_list_start[2] = {NULL, NULL};
99 static MC_Chunk* freed_list_end[2] = {NULL, NULL};
101 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
102 some of the oldest blocks in the queue at the same time. */
103 static void add_to_freed_queue ( MC_Chunk* mc )
105 const Bool show = False;
106 const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
108 /* Put it at the end of the freed list, unless the block
109 would be directly released any way : in this case, we
110 put it at the head of the freed list. */
111 if (freed_list_end[l] == NULL) {
112 tl_assert(freed_list_start[l] == NULL);
113 mc->next = NULL;
114 freed_list_end[l] = freed_list_start[l] = mc;
115 } else {
116 tl_assert(freed_list_end[l]->next == NULL);
117 if (mc->szB >= MC_(clo_freelist_vol)) {
118 mc->next = freed_list_start[l];
119 freed_list_start[l] = mc;
120 } else {
121 mc->next = NULL;
122 freed_list_end[l]->next = mc;
123 freed_list_end[l] = mc;
126 VG_(free_queue_volume) += (Long)mc->szB;
127 if (show)
128 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
129 VG_(free_queue_volume));
130 VG_(free_queue_length)++;
133 /* Release enough of the oldest blocks to bring the free queue
134 volume below vg_clo_freelist_vol.
135 Start with big block list first.
136 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
137 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
138 static void release_oldest_block(void)
140 const Bool show = False;
141 int i;
142 tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
143 tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
145 for (i = 0; i < 2; i++) {
146 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
147 && freed_list_start[i] != NULL) {
148 MC_Chunk* mc1;
150 tl_assert(freed_list_end[i] != NULL);
152 mc1 = freed_list_start[i];
153 VG_(free_queue_volume) -= (Long)mc1->szB;
154 VG_(free_queue_length)--;
155 if (show)
156 VG_(printf)("mc_freelist: discard: volume now %lld\n",
157 VG_(free_queue_volume));
158 tl_assert(VG_(free_queue_volume) >= 0);
160 if (freed_list_start[i] == freed_list_end[i]) {
161 freed_list_start[i] = freed_list_end[i] = NULL;
162 } else {
163 freed_list_start[i] = mc1->next;
165 mc1->next = NULL; /* just paranoia */
167 /* free MC_Chunk */
168 if (MC_AllocCustom != mc1->allockind)
169 VG_(cli_free) ( (void*)(mc1->data) );
170 delete_MC_Chunk ( mc1 );
175 MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
177 int i;
178 for (i = 0; i < 2; i++) {
179 MC_Chunk* mc;
180 mc = freed_list_start[i];
181 while (mc) {
182 if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
183 MC_(Malloc_Redzone_SzB) ))
184 return mc;
185 mc = mc->next;
188 return NULL;
191 /* Allocate a shadow chunk, put it on the appropriate list.
192 If needed, release oldest blocks from freed list. */
193 static
194 MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
195 MC_AllocKind kind)
197 MC_Chunk* mc = VG_(allocEltPA)(MC_(chunk_poolalloc));
198 mc->data = p;
199 mc->szB = szB;
200 mc->allockind = kind;
201 switch ( MC_(n_where_pointers)() ) {
202 case 2: mc->where[1] = 0; // fallback to 1
203 case 1: mc->where[0] = 0; // fallback to 0
204 case 0: break;
205 default: tl_assert(0);
207 MC_(set_allocated_at) (tid, mc);
209 /* Each time a new MC_Chunk is created, release oldest blocks
210 if the free list volume is exceeded. */
211 if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
212 release_oldest_block();
214 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
215 the mc->data field isn't visible to the leak checker. If memory
216 management is working correctly, any pointer returned by VG_(malloc)
217 should be noaccess as far as the client is concerned. */
218 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
219 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
221 return mc;
224 static inline
225 void delete_MC_Chunk (MC_Chunk* mc)
227 VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
230 // True if mc is in the given block list.
231 static Bool in_block_list (const VgHashTable *block_list, MC_Chunk* mc)
233 MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
234 if (found_mc) {
235 tl_assert (found_mc->data == mc->data);
236 /* If a user builds a pool from a malloc-ed superblock
237 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
238 an address at the beginning of this superblock, then
239 this address will be twice in the block_list.
240 We handle this case by checking size and allockind.
241 Note: I suspect that having the same block
242 twice in MC_(malloc_list) is a recipe for bugs.
243 We might maybe better create a "standard" mempool to
244 handle all this more cleanly. */
245 if (found_mc->szB != mc->szB
246 || found_mc->allockind != mc->allockind)
247 return False;
248 tl_assert (found_mc == mc);
249 return True;
250 } else
251 return False;
254 // True if mc is a live block (not yet freed).
255 static Bool live_block (MC_Chunk* mc)
257 if (mc->allockind == MC_AllocCustom) {
258 MC_Mempool* mp;
259 VG_(HT_ResetIter)(MC_(mempool_list));
260 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
261 if ( in_block_list (mp->chunks, mc) )
262 return True;
265 /* Note: we fallback here for a not found MC_AllocCustom
266 as such a block can be inserted in MC_(malloc_list)
267 by VALGRIND_MALLOCLIKE_BLOCK. */
268 return in_block_list ( MC_(malloc_list), mc );
271 ExeContext* MC_(allocated_at) (MC_Chunk* mc)
273 switch (MC_(clo_keep_stacktraces)) {
274 case KS_none: return VG_(null_ExeContext) ();
275 case KS_alloc: return mc->where[0];
276 case KS_free: return VG_(null_ExeContext) ();
277 case KS_alloc_then_free: return (live_block(mc) ?
278 mc->where[0] : VG_(null_ExeContext) ());
279 case KS_alloc_and_free: return mc->where[0];
280 default: tl_assert (0);
284 ExeContext* MC_(freed_at) (MC_Chunk* mc)
286 switch (MC_(clo_keep_stacktraces)) {
287 case KS_none: return VG_(null_ExeContext) ();
288 case KS_alloc: return VG_(null_ExeContext) ();
289 case KS_free: return (mc->where[0] ?
290 mc->where[0] : VG_(null_ExeContext) ());
291 case KS_alloc_then_free: return (live_block(mc) ?
292 VG_(null_ExeContext) () : mc->where[0]);
293 case KS_alloc_and_free: return (mc->where[1] ?
294 mc->where[1] : VG_(null_ExeContext) ());
295 default: tl_assert (0);
299 void MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
301 switch (MC_(clo_keep_stacktraces)) {
302 case KS_none: return;
303 case KS_alloc: break;
304 case KS_free: return;
305 case KS_alloc_then_free: break;
306 case KS_alloc_and_free: break;
307 default: tl_assert (0);
309 mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
310 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
311 VG_(XTMemory_Full_alloc)(mc->szB, mc->where[0]);
314 void MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
316 Int pos;
317 ExeContext* ec_free;
319 switch (MC_(clo_keep_stacktraces)) {
320 case KS_none: return;
321 case KS_alloc:
322 if (LIKELY(VG_(clo_xtree_memory)
323 != Vg_XTMemory_Full))
324 return;
325 pos = -1; break;
326 case KS_free: pos = 0; break;
327 case KS_alloc_then_free: pos = 0; break;
328 case KS_alloc_and_free: pos = 1; break;
329 default: tl_assert (0);
331 /* We need the execontext for the free operation, either to store
332 it in the mc chunk and/or for full xtree memory profiling.
333 Note: we are guaranteed to find the ec_alloc in mc->where[0], as
334 mc_post_clo_init verifies the consistency of --xtree-memory and
335 --keep-stacktraces. */
336 ec_free = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
337 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
338 VG_(XTMemory_Full_free)(mc->szB, mc->where[0], ec_free);
339 if (LIKELY(pos >= 0))
340 mc->where[pos] = ec_free;
343 UInt MC_(n_where_pointers) (void)
345 switch (MC_(clo_keep_stacktraces)) {
346 case KS_none: return 0;
347 case KS_alloc:
348 case KS_free:
349 case KS_alloc_then_free: return 1;
350 case KS_alloc_and_free: return 2;
351 default: tl_assert (0);
355 /*------------------------------------------------------------*/
356 /*--- client_malloc(), etc ---*/
357 /*------------------------------------------------------------*/
359 /* Allocate memory and note change in memory available */
360 void* MC_(new_block) ( ThreadId tid,
361 Addr p, SizeT szB, SizeT alignB,
362 Bool is_zeroed, MC_AllocKind kind,
363 VgHashTable *table)
365 MC_Chunk* mc;
367 // Allocate and zero if necessary
368 if (p) {
369 tl_assert(MC_AllocCustom == kind);
370 } else {
371 tl_assert(MC_AllocCustom != kind);
372 p = (Addr)VG_(cli_malloc)( alignB, szB );
373 if (!p) {
374 return NULL;
376 if (is_zeroed) {
377 VG_(memset)((void*)p, 0, szB);
378 } else
379 if (MC_(clo_malloc_fill) != -1) {
380 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
381 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
385 // Only update stats if allocation succeeded.
386 cmalloc_n_mallocs ++;
387 cmalloc_bs_mallocd += (ULong)szB;
388 mc = create_MC_Chunk (tid, p, szB, kind);
389 VG_(HT_add_node)( table, mc );
391 if (is_zeroed)
392 MC_(make_mem_defined)( p, szB );
393 else {
394 UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
395 tl_assert(VG_(is_plausible_ECU)(ecu));
396 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
399 return (void*)p;
402 void* MC_(malloc) ( ThreadId tid, SizeT n )
404 if (MC_(record_fishy_value_error)(tid, "malloc", "size", n)) {
405 return NULL;
406 } else {
407 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
408 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
412 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
414 if (MC_(record_fishy_value_error)(tid, "__builtin_new", "size", n)) {
415 return NULL;
416 } else {
417 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
418 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
422 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
424 if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new", "size", n)) {
425 return NULL;
426 } else {
427 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
428 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
432 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
434 if (MC_(record_fishy_value_error)(tid, "memalign", "size", n)) {
435 return NULL;
436 } else {
437 return MC_(new_block) ( tid, 0, n, alignB,
438 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
442 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
444 if (MC_(record_fishy_value_error)(tid, "calloc", "nmemb", nmemb) ||
445 MC_(record_fishy_value_error)(tid, "calloc", "size", size1)) {
446 return NULL;
447 } else {
448 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
449 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
453 static
454 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
456 /* Note: we do not free fill the custom allocs produced
457 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
458 if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
459 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
460 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
463 /* Note: make redzones noaccess again -- just in case user made them
464 accessible with a client request... */
465 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
467 /* Record where freed */
468 MC_(set_freed_at) (tid, mc);
469 /* Put it out of harm's way for a while */
470 add_to_freed_queue ( mc );
471 /* If the free list volume is bigger than MC_(clo_freelist_vol),
472 we wait till the next block allocation to release blocks.
473 This increase the chance to discover dangling pointer usage,
474 even for big blocks being freed by the client. */
478 static
479 void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
481 /* Only show such an error if the user hasn't disabled doing so. */
482 if (!MC_(clo_show_mismatched_frees))
483 return;
485 /* MC_(record_freemismatch_error) reports errors for still
486 allocated blocks but we are in the middle of freeing it. To
487 report the error correctly, we re-insert the chunk (making it
488 again a "clean allocated block", report the error, and then
489 re-remove the chunk. This avoids to do a VG_(HT_lookup)
490 followed by a VG_(HT_remove) in all "non-erroneous cases". */
491 VG_(HT_add_node)( MC_(malloc_list), mc );
492 MC_(record_freemismatch_error) ( tid, mc );
493 if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
494 tl_assert(0);
497 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
499 MC_Chunk* mc;
501 cmalloc_n_frees++;
503 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
504 if (mc == NULL) {
505 MC_(record_free_error) ( tid, p );
506 } else {
507 /* check if it is a matching free() / delete / delete [] */
508 if (kind != mc->allockind) {
509 tl_assert(p == mc->data);
510 record_freemismatch_error ( tid, mc );
512 die_and_free_mem ( tid, mc, rzB );
516 void MC_(free) ( ThreadId tid, void* p )
518 MC_(handle_free)(
519 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
522 void MC_(__builtin_delete) ( ThreadId tid, void* p )
524 MC_(handle_free)(
525 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
528 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
530 MC_(handle_free)(
531 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
534 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
536 MC_Chunk* old_mc;
537 MC_Chunk* new_mc;
538 Addr a_new;
539 SizeT old_szB;
541 if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
542 return NULL;
544 cmalloc_n_frees ++;
545 cmalloc_n_mallocs ++;
546 cmalloc_bs_mallocd += (ULong)new_szB;
548 /* Remove the old block */
549 old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
550 if (old_mc == NULL) {
551 MC_(record_free_error) ( tid, (Addr)p_old );
552 /* We return to the program regardless. */
553 return NULL;
556 /* check if its a matching free() / delete / delete [] */
557 if (MC_AllocMalloc != old_mc->allockind) {
558 /* can not realloc a range that was allocated with new or new [] */
559 tl_assert((Addr)p_old == old_mc->data);
560 record_freemismatch_error ( tid, old_mc );
561 /* but keep going anyway */
564 old_szB = old_mc->szB;
566 /* Get new memory */
567 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
569 if (a_new) {
570 /* In all cases, even when the new size is smaller or unchanged, we
571 reallocate and copy the contents, and make the old block
572 inaccessible. This is so as to guarantee to catch all cases of
573 accesses via the old address after reallocation, regardless of
574 the change in size. (Of course the ability to detect accesses
575 to the old block also depends on the size of the freed blocks
576 queue). */
578 // Allocate a new chunk.
579 new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );
581 // Now insert the new mc (with a new 'data' field) into malloc_list.
582 VG_(HT_add_node)( MC_(malloc_list), new_mc );
584 /* Retained part is copied, red zones set as normal */
586 /* Redzone at the front */
587 MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
588 MC_(Malloc_Redzone_SzB) );
590 /* payload */
591 if (old_szB >= new_szB) {
592 /* new size is smaller or the same */
594 /* Copy address range state and value from old to new */
595 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
596 VG_(memcpy)((void*)a_new, p_old, new_szB);
597 } else {
598 /* new size is bigger */
599 UInt ecu;
601 /* Copy address range state and value from old to new */
602 MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
603 VG_(memcpy)((void*)a_new, p_old, old_szB);
605 // If the block has grown, we mark the grown area as undefined.
606 // We have to do that after VG_(HT_add_node) to ensure the ecu
607 // execontext is for a fully allocated block.
608 ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
609 tl_assert(VG_(is_plausible_ECU)(ecu));
610 MC_(make_mem_undefined_w_otag)( a_new+old_szB,
611 new_szB-old_szB,
612 ecu | MC_OKIND_HEAP );
614 /* Possibly fill new area with specified junk */
615 if (MC_(clo_malloc_fill) != -1) {
616 tl_assert(MC_(clo_malloc_fill) >= 0x00
617 && MC_(clo_malloc_fill) <= 0xFF);
618 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
619 new_szB-old_szB);
623 /* Redzone at the back. */
624 MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
626 /* Possibly fill freed area with specified junk. */
627 if (MC_(clo_free_fill) != -1) {
628 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
629 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
632 /* Free old memory */
633 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
634 than recycling the old one, so that any erroneous accesses to the
635 old memory are reported. */
636 die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );
638 } else {
639 /* Could not allocate new client memory.
640 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
641 unconditionally removed at the beginning of the function. */
642 VG_(HT_add_node)( MC_(malloc_list), old_mc );
645 return (void*)a_new;
648 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
650 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
652 // There may be slop, but pretend there isn't because only the asked-for
653 // area will be marked as addressable.
654 return ( mc ? mc->szB : 0 );
657 /* This handles the in place resize of a block, as performed by the
658 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
659 and not used for, handling of the normal libc realloc()
660 function. */
661 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
662 SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
664 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
665 if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
666 /* Reject if: p is not found, or oldSizeB is wrong,
667 or new block would be empty. */
668 MC_(record_free_error) ( tid, p );
669 return;
672 if (oldSizeB == newSizeB)
673 return;
675 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
676 VG_(XTMemory_Full_resize_in_place)(oldSizeB, newSizeB, mc->where[0]);
678 mc->szB = newSizeB;
679 if (newSizeB < oldSizeB) {
680 MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
681 } else {
682 ExeContext* ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
683 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
684 MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
685 ecu | MC_OKIND_HEAP );
686 if (rzB > 0)
687 MC_(make_mem_noaccess)( p + newSizeB, rzB );
692 /*------------------------------------------------------------*/
693 /*--- Memory pool stuff. ---*/
694 /*------------------------------------------------------------*/
696 /* Set to 1 for intensive sanity checking. Is very expensive though
697 and should not be used in production scenarios. See #255966. */
698 #define MP_DETAILED_SANITY_CHECKS 0
700 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
702 static void free_mallocs_in_mempool_block (MC_Mempool* mp,
703 Addr StartAddr,
704 Addr EndAddr)
706 MC_Chunk *mc;
707 ThreadId tid;
709 tl_assert(mp->auto_free);
711 if (VG_(clo_verbosity) > 2) {
712 VG_(message)(Vg_UserMsg,
713 "free_mallocs_in_mempool_block: Start 0x%lx size %lu\n",
714 StartAddr, (SizeT) (EndAddr - StartAddr));
717 tid = VG_(get_running_tid)();
719 VG_(HT_ResetIter)(MC_(malloc_list));
720 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
721 if (mc->data >= StartAddr && mc->data + mc->szB <= EndAddr) {
722 if (VG_(clo_verbosity) > 2) {
723 VG_(message)(Vg_UserMsg, "Auto-free of 0x%lx size=%lu\n",
724 mc->data, (mc->szB + 0UL));
727 VG_(HT_remove_at_Iter)(MC_(malloc_list));
728 die_and_free_mem(tid, mc, mp->rzB);
733 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed,
734 Bool auto_free, Bool metapool)
736 MC_Mempool* mp;
738 if (VG_(clo_verbosity) > 2 || (auto_free && !metapool)) {
739 VG_(message)(Vg_UserMsg,
740 "create_mempool(0x%lx, rzB=%u, zeroed=%d,"
741 " autofree=%d, metapool=%d)\n",
742 pool, rzB, is_zeroed,
743 auto_free, metapool);
744 VG_(get_and_pp_StackTrace)
745 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
746 if (auto_free && !metapool)
747 VG_(tool_panic)("Inappropriate use of mempool:"
748 " an auto free pool must be a meta pool. Aborting\n");
751 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
752 if (mp != NULL) {
753 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
756 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
757 mp->pool = pool;
758 mp->rzB = rzB;
759 mp->is_zeroed = is_zeroed;
760 mp->auto_free = auto_free;
761 mp->metapool = metapool;
762 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
763 check_mempool_sane(mp);
765 /* Paranoia ... ensure this area is off-limits to the client, so
766 the mp->data field isn't visible to the leak checker. If memory
767 management is working correctly, anything pointer returned by
768 VG_(malloc) should be noaccess as far as the client is
769 concerned. */
770 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
771 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
774 VG_(HT_add_node)( MC_(mempool_list), mp );
777 void MC_(destroy_mempool)(Addr pool)
779 MC_Chunk* mc;
780 MC_Mempool* mp;
782 if (VG_(clo_verbosity) > 2) {
783 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
784 VG_(get_and_pp_StackTrace)
785 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
788 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
790 if (mp == NULL) {
791 ThreadId tid = VG_(get_running_tid)();
792 MC_(record_illegal_mempool_error) ( tid, pool );
793 return;
795 check_mempool_sane(mp);
797 // Clean up the chunks, one by one
798 VG_(HT_ResetIter)(mp->chunks);
799 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
800 /* Note: make redzones noaccess again -- just in case user made them
801 accessible with a client request... */
802 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
804 // Destroy the chunk table
805 VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
807 VG_(free)(mp);
810 static Int
811 mp_compar(const void* n1, const void* n2)
813 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
814 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
815 if (mc1->data < mc2->data) return -1;
816 if (mc1->data > mc2->data) return 1;
817 return 0;
820 static void
821 check_mempool_sane(MC_Mempool* mp)
823 UInt n_chunks, i, bad = 0;
824 static UInt tick = 0;
826 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
827 if (!chunks)
828 return;
830 if (VG_(clo_verbosity) > 1) {
831 if (tick++ >= 10000)
833 UInt total_pools = 0, total_chunks = 0;
834 MC_Mempool* mp2;
836 VG_(HT_ResetIter)(MC_(mempool_list));
837 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
838 total_pools++;
839 VG_(HT_ResetIter)(mp2->chunks);
840 while (VG_(HT_Next)(mp2->chunks)) {
841 total_chunks++;
845 VG_(message)(Vg_UserMsg,
846 "Total mempools active: %u pools, %u chunks\n",
847 total_pools, total_chunks);
848 tick = 0;
853 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
855 /* Sanity check; assert that the blocks are now in order */
856 for (i = 0; i < n_chunks-1; i++) {
857 if (chunks[i]->data > chunks[i+1]->data) {
858 VG_(message)(Vg_UserMsg,
859 "Mempool chunk %u / %u is out of order "
860 "wrt. its successor\n",
861 i+1, n_chunks);
862 bad = 1;
866 /* Sanity check -- make sure they don't overlap */
867 for (i = 0; i < n_chunks-1; i++) {
868 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
869 VG_(message)(Vg_UserMsg,
870 "Mempool chunk %u / %u overlaps with its successor\n",
871 i+1, n_chunks);
872 bad = 1;
876 if (bad) {
877 VG_(message)(Vg_UserMsg,
878 "Bad mempool (%u chunks), dumping chunks for inspection:\n",
879 n_chunks);
880 for (i = 0; i < n_chunks; ++i) {
881 VG_(message)(Vg_UserMsg,
882 "Mempool chunk %u / %u: %lu bytes "
883 "[%lx,%lx), allocated:\n",
884 i+1,
885 n_chunks,
886 chunks[i]->szB + 0UL,
887 chunks[i]->data,
888 chunks[i]->data + chunks[i]->szB);
890 VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
893 VG_(free)(chunks);
896 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
898 MC_Mempool* mp;
900 if (VG_(clo_verbosity) > 2) {
901 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
902 pool, addr, szB);
903 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
906 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
907 if (mp == NULL) {
908 MC_(record_illegal_mempool_error) ( tid, pool );
909 } else {
910 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
911 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
912 MC_AllocCustom, mp->chunks);
913 if (mp->rzB > 0) {
914 // This is not needed if the user application has properly
915 // marked the superblock noaccess when defining the mempool.
916 // We however still mark the redzones noaccess to still catch
917 // some bugs if user forgot.
918 MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
919 MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
921 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
925 void MC_(mempool_free)(Addr pool, Addr addr)
927 MC_Mempool* mp;
928 MC_Chunk* mc;
929 ThreadId tid = VG_(get_running_tid)();
931 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
932 if (mp == NULL) {
933 MC_(record_illegal_mempool_error)(tid, pool);
934 return;
937 if (VG_(clo_verbosity) > 2) {
938 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
939 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
942 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
943 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
944 if (mc == NULL) {
945 MC_(record_free_error)(tid, (Addr)addr);
946 return;
949 if (mp->auto_free) {
950 free_mallocs_in_mempool_block(mp, mc->data, mc->data + (mc->szB + 0UL));
953 if (VG_(clo_verbosity) > 2) {
954 VG_(message)(Vg_UserMsg,
955 "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
956 pool, addr, mc->szB + 0UL);
959 die_and_free_mem ( tid, mc, mp->rzB );
960 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
964 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
966 MC_Mempool* mp;
967 MC_Chunk* mc;
968 ThreadId tid = VG_(get_running_tid)();
969 UInt n_shadows, i;
970 VgHashNode** chunks;
972 if (VG_(clo_verbosity) > 2) {
973 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
974 pool, addr, szB);
975 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
978 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
979 if (mp == NULL) {
980 MC_(record_illegal_mempool_error)(tid, pool);
981 return;
984 check_mempool_sane(mp);
985 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
986 if (n_shadows == 0) {
987 tl_assert(chunks == NULL);
988 return;
991 tl_assert(chunks != NULL);
992 for (i = 0; i < n_shadows; ++i) {
994 Addr lo, hi, min, max;
996 mc = (MC_Chunk*) chunks[i];
998 lo = mc->data;
999 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
1001 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
1003 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
1005 /* The current chunk is entirely within the trim extent: keep
1006 it. */
1008 continue;
1010 } else if ( (! EXTENT_CONTAINS(lo)) &&
1011 (! EXTENT_CONTAINS(hi)) ) {
1013 /* The current chunk is entirely outside the trim extent:
1014 delete it. */
1016 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1017 MC_(record_free_error)(tid, (Addr)mc->data);
1018 VG_(free)(chunks);
1019 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1020 return;
1022 die_and_free_mem ( tid, mc, mp->rzB );
1024 } else {
1026 /* The current chunk intersects the trim extent: remove,
1027 trim, and reinsert it. */
1029 tl_assert(EXTENT_CONTAINS(lo) ||
1030 EXTENT_CONTAINS(hi));
1031 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
1032 MC_(record_free_error)(tid, (Addr)mc->data);
1033 VG_(free)(chunks);
1034 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
1035 return;
1038 if (mc->data < addr) {
1039 min = mc->data;
1040 lo = addr;
1041 } else {
1042 min = addr;
1043 lo = mc->data;
1046 if (mc->data + szB > addr + szB) {
1047 max = mc->data + szB;
1048 hi = addr + szB;
1049 } else {
1050 max = addr + szB;
1051 hi = mc->data + szB;
1054 tl_assert(min <= lo);
1055 tl_assert(lo < hi);
1056 tl_assert(hi <= max);
1058 if (min < lo && !EXTENT_CONTAINS(min)) {
1059 MC_(make_mem_noaccess)( min, lo - min);
1062 if (hi < max && !EXTENT_CONTAINS(max)) {
1063 MC_(make_mem_noaccess)( hi, max - hi );
1066 mc->data = lo;
1067 mc->szB = (UInt) (hi - lo);
1068 VG_(HT_add_node)( mp->chunks, mc );
1071 #undef EXTENT_CONTAINS
1074 check_mempool_sane(mp);
1075 VG_(free)(chunks);
1078 void MC_(move_mempool)(Addr poolA, Addr poolB)
1080 MC_Mempool* mp;
1082 if (VG_(clo_verbosity) > 2) {
1083 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
1084 VG_(get_and_pp_StackTrace)
1085 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1088 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
1090 if (mp == NULL) {
1091 ThreadId tid = VG_(get_running_tid)();
1092 MC_(record_illegal_mempool_error) ( tid, poolA );
1093 return;
1096 mp->pool = poolB;
1097 VG_(HT_add_node)( MC_(mempool_list), mp );
1100 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
1102 MC_Mempool* mp;
1103 MC_Chunk* mc;
1104 ThreadId tid = VG_(get_running_tid)();
1106 if (VG_(clo_verbosity) > 2) {
1107 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
1108 pool, addrA, addrB, szB);
1109 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1112 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1113 if (mp == NULL) {
1114 MC_(record_illegal_mempool_error)(tid, pool);
1115 return;
1118 check_mempool_sane(mp);
1120 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1121 if (mc == NULL) {
1122 MC_(record_free_error)(tid, (Addr)addrA);
1123 return;
1126 mc->data = addrB;
1127 mc->szB = szB;
1128 VG_(HT_add_node)( mp->chunks, mc );
1130 check_mempool_sane(mp);
1133 Bool MC_(mempool_exists)(Addr pool)
1135 MC_Mempool* mp;
1137 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1138 if (mp == NULL) {
1139 return False;
1141 return True;
1144 static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
1146 MC_Chunk* mc = VG_(HT_Next)(MC_(malloc_list));
1147 if (mc) {
1148 xta->nbytes = mc->szB;
1149 xta->nblocks = 1;
1150 *ec_alloc = MC_(allocated_at)(mc);
1151 } else
1152 xta->nblocks = 0;
1155 void MC_(xtmemory_report) ( const HChar* filename, Bool fini )
1157 // Make xtmemory_report_next_block ready to be called.
1158 VG_(HT_ResetIter)(MC_(malloc_list));
1160 VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
1161 VG_(XT_filter_1top_and_maybe_below_main));
1164 /*------------------------------------------------------------*/
1165 /*--- Statistics printing ---*/
1166 /*------------------------------------------------------------*/
1168 void MC_(print_malloc_stats) ( void )
1170 MC_Chunk* mc;
1171 SizeT nblocks = 0;
1172 ULong nbytes = 0;
1174 if (VG_(clo_verbosity) == 0)
1175 return;
1176 if (VG_(clo_xml))
1177 return;
1179 /* Count memory still in use. */
1180 VG_(HT_ResetIter)(MC_(malloc_list));
1181 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1182 nblocks++;
1183 nbytes += (ULong)mc->szB;
1186 VG_(umsg)(
1187 "HEAP SUMMARY:\n"
1188 " in use at exit: %'llu bytes in %'lu blocks\n"
1189 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1190 "\n",
1191 nbytes, nblocks,
1192 cmalloc_n_mallocs,
1193 cmalloc_n_frees, cmalloc_bs_mallocd
1197 SizeT MC_(get_cmalloc_n_frees) ( void )
1199 return cmalloc_n_frees;
1203 /*--------------------------------------------------------------------*/
1204 /*--- end ---*/
1205 /*--------------------------------------------------------------------*/