2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*--- mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_execontext.h"
34 #include "pub_tool_poolalloc.h"
35 #include "pub_tool_hashtable.h"
36 #include "pub_tool_libcbase.h"
37 #include "pub_tool_libcassert.h"
38 #include "pub_tool_libcprint.h"
39 #include "pub_tool_libcproc.h"
40 #include "pub_tool_mallocfree.h"
41 #include "pub_tool_options.h"
42 #include "pub_tool_replacemalloc.h"
43 #include "pub_tool_threadstate.h"
44 #include "pub_tool_tooliface.h" // Needed for mc_include.h
45 #include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
46 #include "pub_tool_xarray.h"
47 #include "pub_tool_xtree.h"
48 #include "pub_tool_xtmemory.h"
50 #include "mc_include.h"
52 /*------------------------------------------------------------*/
54 /*------------------------------------------------------------*/
57 static SizeT cmalloc_n_mallocs
= 0;
58 static SizeT cmalloc_n_frees
= 0;
59 static ULong cmalloc_bs_mallocd
= 0;
61 /* For debug printing to do with mempools: what stack trace
63 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
66 /*------------------------------------------------------------*/
67 /*--- Tracking malloc'd and free'd blocks ---*/
68 /*------------------------------------------------------------*/
70 SizeT
MC_(Malloc_Redzone_SzB
) = -10000000; // If used before set, should BOMB
72 /* Record malloc'd blocks. */
73 VgHashTable
*MC_(malloc_list
) = NULL
;
75 /* Memory pools: a hash table of MC_Mempools. Search key is
77 VgHashTable
*MC_(mempool_list
) = NULL
;
79 /* Pool allocator for MC_Chunk. */
80 PoolAlloc
*MC_(chunk_poolalloc
) = NULL
;
82 MC_Chunk
* create_MC_Chunk ( ThreadId tid
, Addr p
, SizeT szB
,
85 void delete_MC_Chunk (MC_Chunk
* mc
);
87 /* Records blocks after freeing. */
88 /* Blocks freed by the client are queued in one of two lists of
89 freed blocks not yet physically freed:
90 "big blocks" freed list.
91 "small blocks" freed list
92 The blocks with a size >= MC_(clo_freelist_big_blocks)
93 are linked in the big blocks freed list.
94 This allows a client to allocate and free big blocks
95 (e.g. bigger than VG_(clo_freelist_vol)) without losing
96 immediately all protection against dangling pointers.
97 position [0] is for big blocks, [1] is for small blocks. */
98 static MC_Chunk
* freed_list_start
[2] = {NULL
, NULL
};
99 static MC_Chunk
* freed_list_end
[2] = {NULL
, NULL
};
101 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
102 some of the oldest blocks in the queue at the same time. */
103 static void add_to_freed_queue ( MC_Chunk
* mc
)
105 const Bool show
= False
;
106 const int l
= (mc
->szB
>= MC_(clo_freelist_big_blocks
) ? 0 : 1);
108 /* Put it at the end of the freed list, unless the block
109 would be directly released any way : in this case, we
110 put it at the head of the freed list. */
111 if (freed_list_end
[l
] == NULL
) {
112 tl_assert(freed_list_start
[l
] == NULL
);
114 freed_list_end
[l
] = freed_list_start
[l
] = mc
;
116 tl_assert(freed_list_end
[l
]->next
== NULL
);
117 if (mc
->szB
>= MC_(clo_freelist_vol
)) {
118 mc
->next
= freed_list_start
[l
];
119 freed_list_start
[l
] = mc
;
122 freed_list_end
[l
]->next
= mc
;
123 freed_list_end
[l
] = mc
;
126 VG_(free_queue_volume
) += (Long
)mc
->szB
;
128 VG_(printf
)("mc_freelist: acquire: volume now %lld\n",
129 VG_(free_queue_volume
));
130 VG_(free_queue_length
)++;
133 /* Release enough of the oldest blocks to bring the free queue
134 volume below vg_clo_freelist_vol.
135 Start with big block list first.
136 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
137 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
138 static void release_oldest_block(void)
140 const Bool show
= False
;
142 tl_assert (VG_(free_queue_volume
) > MC_(clo_freelist_vol
));
143 tl_assert (freed_list_start
[0] != NULL
|| freed_list_start
[1] != NULL
);
145 for (i
= 0; i
< 2; i
++) {
146 while (VG_(free_queue_volume
) > MC_(clo_freelist_vol
)
147 && freed_list_start
[i
] != NULL
) {
150 tl_assert(freed_list_end
[i
] != NULL
);
152 mc1
= freed_list_start
[i
];
153 VG_(free_queue_volume
) -= (Long
)mc1
->szB
;
154 VG_(free_queue_length
)--;
156 VG_(printf
)("mc_freelist: discard: volume now %lld\n",
157 VG_(free_queue_volume
));
158 tl_assert(VG_(free_queue_volume
) >= 0);
160 if (freed_list_start
[i
] == freed_list_end
[i
]) {
161 freed_list_start
[i
] = freed_list_end
[i
] = NULL
;
163 freed_list_start
[i
] = mc1
->next
;
165 mc1
->next
= NULL
; /* just paranoia */
168 if (MC_AllocCustom
!= mc1
->allockind
)
169 VG_(cli_free
) ( (void*)(mc1
->data
) );
170 delete_MC_Chunk ( mc1
);
175 MC_Chunk
* MC_(get_freed_block_bracketting
) (Addr a
)
178 for (i
= 0; i
< 2; i
++) {
180 mc
= freed_list_start
[i
];
182 if (VG_(addr_is_in_block
)( a
, mc
->data
, mc
->szB
,
183 MC_(Malloc_Redzone_SzB
) ))
191 /* Allocate a shadow chunk, put it on the appropriate list.
192 If needed, release oldest blocks from freed list. */
194 MC_Chunk
* create_MC_Chunk ( ThreadId tid
, Addr p
, SizeT szB
,
197 MC_Chunk
* mc
= VG_(allocEltPA
)(MC_(chunk_poolalloc
));
200 mc
->allockind
= kind
;
201 switch ( MC_(n_where_pointers
)() ) {
202 case 2: mc
->where
[1] = 0; // fallback to 1
203 case 1: mc
->where
[0] = 0; // fallback to 0
205 default: tl_assert(0);
207 MC_(set_allocated_at
) (tid
, mc
);
209 /* Each time a new MC_Chunk is created, release oldest blocks
210 if the free list volume is exceeded. */
211 if (VG_(free_queue_volume
) > MC_(clo_freelist_vol
))
212 release_oldest_block();
214 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
215 the mc->data field isn't visible to the leak checker. If memory
216 management is working correctly, any pointer returned by VG_(malloc)
217 should be noaccess as far as the client is concerned. */
218 if (!MC_(check_mem_is_noaccess
)( (Addr
)mc
, sizeof(MC_Chunk
), NULL
)) {
219 VG_(tool_panic
)("create_MC_Chunk: shadow area is accessible");
225 void delete_MC_Chunk (MC_Chunk
* mc
)
227 VG_(freeEltPA
) (MC_(chunk_poolalloc
), mc
);
230 // True if mc is in the given block list.
231 static Bool
in_block_list (const VgHashTable
*block_list
, MC_Chunk
* mc
)
233 MC_Chunk
* found_mc
= VG_(HT_lookup
) ( block_list
, (UWord
)mc
->data
);
235 tl_assert (found_mc
->data
== mc
->data
);
236 /* If a user builds a pool from a malloc-ed superblock
237 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
238 an address at the beginning of this superblock, then
239 this address will be twice in the block_list.
240 We handle this case by checking size and allockind.
241 Note: I suspect that having the same block
242 twice in MC_(malloc_list) is a recipe for bugs.
243 We might maybe better create a "standard" mempool to
244 handle all this more cleanly. */
245 if (found_mc
->szB
!= mc
->szB
246 || found_mc
->allockind
!= mc
->allockind
)
248 tl_assert (found_mc
== mc
);
254 // True if mc is a live block (not yet freed).
255 static Bool
live_block (MC_Chunk
* mc
)
257 if (mc
->allockind
== MC_AllocCustom
) {
259 VG_(HT_ResetIter
)(MC_(mempool_list
));
260 while ( (mp
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
261 if ( in_block_list (mp
->chunks
, mc
) )
265 /* Note: we fallback here for a not found MC_AllocCustom
266 as such a block can be inserted in MC_(malloc_list)
267 by VALGRIND_MALLOCLIKE_BLOCK. */
268 return in_block_list ( MC_(malloc_list
), mc
);
271 ExeContext
* MC_(allocated_at
) (MC_Chunk
* mc
)
273 switch (MC_(clo_keep_stacktraces
)) {
274 case KS_none
: return VG_(null_ExeContext
) ();
275 case KS_alloc
: return mc
->where
[0];
276 case KS_free
: return VG_(null_ExeContext
) ();
277 case KS_alloc_then_free
: return (live_block(mc
) ?
278 mc
->where
[0] : VG_(null_ExeContext
) ());
279 case KS_alloc_and_free
: return mc
->where
[0];
280 default: tl_assert (0);
284 ExeContext
* MC_(freed_at
) (MC_Chunk
* mc
)
286 switch (MC_(clo_keep_stacktraces
)) {
287 case KS_none
: return VG_(null_ExeContext
) ();
288 case KS_alloc
: return VG_(null_ExeContext
) ();
289 case KS_free
: return (mc
->where
[0] ?
290 mc
->where
[0] : VG_(null_ExeContext
) ());
291 case KS_alloc_then_free
: return (live_block(mc
) ?
292 VG_(null_ExeContext
) () : mc
->where
[0]);
293 case KS_alloc_and_free
: return (mc
->where
[1] ?
294 mc
->where
[1] : VG_(null_ExeContext
) ());
295 default: tl_assert (0);
299 void MC_(set_allocated_at
) (ThreadId tid
, MC_Chunk
* mc
)
301 switch (MC_(clo_keep_stacktraces
)) {
302 case KS_none
: return;
303 case KS_alloc
: break;
304 case KS_free
: return;
305 case KS_alloc_then_free
: break;
306 case KS_alloc_and_free
: break;
307 default: tl_assert (0);
309 mc
->where
[0] = VG_(record_ExeContext
) ( tid
, 0/*first_ip_delta*/ );
310 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
311 VG_(XTMemory_Full_alloc
)(mc
->szB
, mc
->where
[0]);
314 void MC_(set_freed_at
) (ThreadId tid
, MC_Chunk
* mc
)
319 switch (MC_(clo_keep_stacktraces
)) {
320 case KS_none
: return;
322 if (LIKELY(VG_(clo_xtree_memory
)
323 != Vg_XTMemory_Full
))
326 case KS_free
: pos
= 0; break;
327 case KS_alloc_then_free
: pos
= 0; break;
328 case KS_alloc_and_free
: pos
= 1; break;
329 default: tl_assert (0);
331 /* We need the execontext for the free operation, either to store
332 it in the mc chunk and/or for full xtree memory profiling.
333 Note: we are guaranteed to find the ec_alloc in mc->where[0], as
334 mc_post_clo_init verifies the consistency of --xtree-memory and
335 --keep-stacktraces. */
336 ec_free
= VG_(record_ExeContext
) ( tid
, 0/*first_ip_delta*/ );
337 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
338 VG_(XTMemory_Full_free
)(mc
->szB
, mc
->where
[0], ec_free
);
339 if (LIKELY(pos
>= 0))
340 mc
->where
[pos
] = ec_free
;
343 UInt
MC_(n_where_pointers
) (void)
345 switch (MC_(clo_keep_stacktraces
)) {
346 case KS_none
: return 0;
349 case KS_alloc_then_free
: return 1;
350 case KS_alloc_and_free
: return 2;
351 default: tl_assert (0);
355 /*------------------------------------------------------------*/
356 /*--- client_malloc(), etc ---*/
357 /*------------------------------------------------------------*/
359 /* Allocate memory and note change in memory available */
360 void* MC_(new_block
) ( ThreadId tid
,
361 Addr p
, SizeT szB
, SizeT alignB
,
362 Bool is_zeroed
, MC_AllocKind kind
,
367 // Allocate and zero if necessary
369 tl_assert(MC_AllocCustom
== kind
);
371 tl_assert(MC_AllocCustom
!= kind
);
372 p
= (Addr
)VG_(cli_malloc
)( alignB
, szB
);
377 VG_(memset
)((void*)p
, 0, szB
);
379 if (MC_(clo_malloc_fill
) != -1) {
380 tl_assert(MC_(clo_malloc_fill
) >= 0x00 && MC_(clo_malloc_fill
) <= 0xFF);
381 VG_(memset
)((void*)p
, MC_(clo_malloc_fill
), szB
);
385 // Only update stats if allocation succeeded.
386 cmalloc_n_mallocs
++;
387 cmalloc_bs_mallocd
+= (ULong
)szB
;
388 mc
= create_MC_Chunk (tid
, p
, szB
, kind
);
389 VG_(HT_add_node
)( table
, mc
);
392 MC_(make_mem_defined
)( p
, szB
);
394 UInt ecu
= VG_(get_ECU_from_ExeContext
)(MC_(allocated_at
)(mc
));
395 tl_assert(VG_(is_plausible_ECU
)(ecu
));
396 MC_(make_mem_undefined_w_otag
)( p
, szB
, ecu
| MC_OKIND_HEAP
);
402 void* MC_(malloc
) ( ThreadId tid
, SizeT n
)
404 if (MC_(record_fishy_value_error
)(tid
, "malloc", "size", n
)) {
407 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
),
408 /*is_zeroed*/False
, MC_AllocMalloc
, MC_(malloc_list
));
412 void* MC_(__builtin_new
) ( ThreadId tid
, SizeT n
)
414 if (MC_(record_fishy_value_error
)(tid
, "__builtin_new", "size", n
)) {
417 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
),
418 /*is_zeroed*/False
, MC_AllocNew
, MC_(malloc_list
));
422 void* MC_(__builtin_vec_new
) ( ThreadId tid
, SizeT n
)
424 if (MC_(record_fishy_value_error
)(tid
, "__builtin_vec_new", "size", n
)) {
427 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
),
428 /*is_zeroed*/False
, MC_AllocNewVec
, MC_(malloc_list
));
432 void* MC_(memalign
) ( ThreadId tid
, SizeT alignB
, SizeT n
)
434 if (MC_(record_fishy_value_error
)(tid
, "memalign", "size", n
)) {
437 return MC_(new_block
) ( tid
, 0, n
, alignB
,
438 /*is_zeroed*/False
, MC_AllocMalloc
, MC_(malloc_list
));
442 void* MC_(calloc
) ( ThreadId tid
, SizeT nmemb
, SizeT size1
)
444 if (MC_(record_fishy_value_error
)(tid
, "calloc", "nmemb", nmemb
) ||
445 MC_(record_fishy_value_error
)(tid
, "calloc", "size", size1
)) {
448 return MC_(new_block
) ( tid
, 0, nmemb
*size1
, VG_(clo_alignment
),
449 /*is_zeroed*/True
, MC_AllocMalloc
, MC_(malloc_list
));
454 void die_and_free_mem ( ThreadId tid
, MC_Chunk
* mc
, SizeT rzB
)
456 /* Note: we do not free fill the custom allocs produced
457 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
458 if (MC_(clo_free_fill
) != -1 && MC_AllocCustom
!= mc
->allockind
) {
459 tl_assert(MC_(clo_free_fill
) >= 0x00 && MC_(clo_free_fill
) <= 0xFF);
460 VG_(memset
)((void*)mc
->data
, MC_(clo_free_fill
), mc
->szB
);
463 /* Note: make redzones noaccess again -- just in case user made them
464 accessible with a client request... */
465 MC_(make_mem_noaccess
)( mc
->data
-rzB
, mc
->szB
+ 2*rzB
);
467 /* Record where freed */
468 MC_(set_freed_at
) (tid
, mc
);
469 /* Put it out of harm's way for a while */
470 add_to_freed_queue ( mc
);
471 /* If the free list volume is bigger than MC_(clo_freelist_vol),
472 we wait till the next block allocation to release blocks.
473 This increase the chance to discover dangling pointer usage,
474 even for big blocks being freed by the client. */
479 void record_freemismatch_error (ThreadId tid
, MC_Chunk
* mc
)
481 /* Only show such an error if the user hasn't disabled doing so. */
482 if (!MC_(clo_show_mismatched_frees
))
485 /* MC_(record_freemismatch_error) reports errors for still
486 allocated blocks but we are in the middle of freeing it. To
487 report the error correctly, we re-insert the chunk (making it
488 again a "clean allocated block", report the error, and then
489 re-remove the chunk. This avoids to do a VG_(HT_lookup)
490 followed by a VG_(HT_remove) in all "non-erroneous cases". */
491 VG_(HT_add_node
)( MC_(malloc_list
), mc
);
492 MC_(record_freemismatch_error
) ( tid
, mc
);
493 if ((mc
!= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)mc
->data
)))
497 void MC_(handle_free
) ( ThreadId tid
, Addr p
, UInt rzB
, MC_AllocKind kind
)
503 mc
= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)p
);
505 MC_(record_free_error
) ( tid
, p
);
507 /* check if it is a matching free() / delete / delete [] */
508 if (kind
!= mc
->allockind
) {
509 tl_assert(p
== mc
->data
);
510 record_freemismatch_error ( tid
, mc
);
512 die_and_free_mem ( tid
, mc
, rzB
);
516 void MC_(free
) ( ThreadId tid
, void* p
)
519 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocMalloc
);
522 void MC_(__builtin_delete
) ( ThreadId tid
, void* p
)
525 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNew
);
528 void MC_(__builtin_vec_delete
) ( ThreadId tid
, void* p
)
531 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNewVec
);
534 void* MC_(realloc
) ( ThreadId tid
, void* p_old
, SizeT new_szB
)
541 if (MC_(record_fishy_value_error
)(tid
, "realloc", "size", new_szB
))
545 cmalloc_n_mallocs
++;
546 cmalloc_bs_mallocd
+= (ULong
)new_szB
;
548 /* Remove the old block */
549 old_mc
= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)p_old
);
550 if (old_mc
== NULL
) {
551 MC_(record_free_error
) ( tid
, (Addr
)p_old
);
552 /* We return to the program regardless. */
556 /* check if its a matching free() / delete / delete [] */
557 if (MC_AllocMalloc
!= old_mc
->allockind
) {
558 /* can not realloc a range that was allocated with new or new [] */
559 tl_assert((Addr
)p_old
== old_mc
->data
);
560 record_freemismatch_error ( tid
, old_mc
);
561 /* but keep going anyway */
564 old_szB
= old_mc
->szB
;
567 a_new
= (Addr
)VG_(cli_malloc
)(VG_(clo_alignment
), new_szB
);
570 /* In all cases, even when the new size is smaller or unchanged, we
571 reallocate and copy the contents, and make the old block
572 inaccessible. This is so as to guarantee to catch all cases of
573 accesses via the old address after reallocation, regardless of
574 the change in size. (Of course the ability to detect accesses
575 to the old block also depends on the size of the freed blocks
578 // Allocate a new chunk.
579 new_mc
= create_MC_Chunk( tid
, a_new
, new_szB
, MC_AllocMalloc
);
581 // Now insert the new mc (with a new 'data' field) into malloc_list.
582 VG_(HT_add_node
)( MC_(malloc_list
), new_mc
);
584 /* Retained part is copied, red zones set as normal */
586 /* Redzone at the front */
587 MC_(make_mem_noaccess
)( a_new
-MC_(Malloc_Redzone_SzB
),
588 MC_(Malloc_Redzone_SzB
) );
591 if (old_szB
>= new_szB
) {
592 /* new size is smaller or the same */
594 /* Copy address range state and value from old to new */
595 MC_(copy_address_range_state
) ( (Addr
)p_old
, a_new
, new_szB
);
596 VG_(memcpy
)((void*)a_new
, p_old
, new_szB
);
598 /* new size is bigger */
601 /* Copy address range state and value from old to new */
602 MC_(copy_address_range_state
) ( (Addr
)p_old
, a_new
, old_szB
);
603 VG_(memcpy
)((void*)a_new
, p_old
, old_szB
);
605 // If the block has grown, we mark the grown area as undefined.
606 // We have to do that after VG_(HT_add_node) to ensure the ecu
607 // execontext is for a fully allocated block.
608 ecu
= VG_(get_ECU_from_ExeContext
)(MC_(allocated_at
)(new_mc
));
609 tl_assert(VG_(is_plausible_ECU
)(ecu
));
610 MC_(make_mem_undefined_w_otag
)( a_new
+old_szB
,
612 ecu
| MC_OKIND_HEAP
);
614 /* Possibly fill new area with specified junk */
615 if (MC_(clo_malloc_fill
) != -1) {
616 tl_assert(MC_(clo_malloc_fill
) >= 0x00
617 && MC_(clo_malloc_fill
) <= 0xFF);
618 VG_(memset
)((void*)(a_new
+old_szB
), MC_(clo_malloc_fill
),
623 /* Redzone at the back. */
624 MC_(make_mem_noaccess
) ( a_new
+new_szB
, MC_(Malloc_Redzone_SzB
));
626 /* Possibly fill freed area with specified junk. */
627 if (MC_(clo_free_fill
) != -1) {
628 tl_assert(MC_(clo_free_fill
) >= 0x00 && MC_(clo_free_fill
) <= 0xFF);
629 VG_(memset
)((void*)p_old
, MC_(clo_free_fill
), old_szB
);
632 /* Free old memory */
633 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
634 than recycling the old one, so that any erroneous accesses to the
635 old memory are reported. */
636 die_and_free_mem ( tid
, old_mc
, MC_(Malloc_Redzone_SzB
) );
639 /* Could not allocate new client memory.
640 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
641 unconditionally removed at the beginning of the function. */
642 VG_(HT_add_node
)( MC_(malloc_list
), old_mc
);
648 SizeT
MC_(malloc_usable_size
) ( ThreadId tid
, void* p
)
650 MC_Chunk
* mc
= VG_(HT_lookup
) ( MC_(malloc_list
), (UWord
)p
);
652 // There may be slop, but pretend there isn't because only the asked-for
653 // area will be marked as addressable.
654 return ( mc
? mc
->szB
: 0 );
657 /* This handles the in place resize of a block, as performed by the
658 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
659 and not used for, handling of the normal libc realloc()
661 void MC_(handle_resizeInPlace
)(ThreadId tid
, Addr p
,
662 SizeT oldSizeB
, SizeT newSizeB
, SizeT rzB
)
664 MC_Chunk
* mc
= VG_(HT_lookup
) ( MC_(malloc_list
), (UWord
)p
);
665 if (!mc
|| mc
->szB
!= oldSizeB
|| newSizeB
== 0) {
666 /* Reject if: p is not found, or oldSizeB is wrong,
667 or new block would be empty. */
668 MC_(record_free_error
) ( tid
, p
);
672 if (oldSizeB
== newSizeB
)
675 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
676 VG_(XTMemory_Full_resize_in_place
)(oldSizeB
, newSizeB
, mc
->where
[0]);
679 if (newSizeB
< oldSizeB
) {
680 MC_(make_mem_noaccess
)( p
+ newSizeB
, oldSizeB
- newSizeB
+ rzB
);
682 ExeContext
* ec
= VG_(record_ExeContext
)(tid
, 0/*first_ip_delta*/);
683 UInt ecu
= VG_(get_ECU_from_ExeContext
)(ec
);
684 MC_(make_mem_undefined_w_otag
)( p
+ oldSizeB
, newSizeB
- oldSizeB
,
685 ecu
| MC_OKIND_HEAP
);
687 MC_(make_mem_noaccess
)( p
+ newSizeB
, rzB
);
692 /*------------------------------------------------------------*/
693 /*--- Memory pool stuff. ---*/
694 /*------------------------------------------------------------*/
696 /* Set to 1 for intensive sanity checking. Is very expensive though
697 and should not be used in production scenarios. See #255966. */
698 #define MP_DETAILED_SANITY_CHECKS 0
700 static void check_mempool_sane(MC_Mempool
* mp
); /*forward*/
702 static void free_mallocs_in_mempool_block (MC_Mempool
* mp
,
709 tl_assert(mp
->auto_free
);
711 if (VG_(clo_verbosity
) > 2) {
712 VG_(message
)(Vg_UserMsg
,
713 "free_mallocs_in_mempool_block: Start 0x%lx size %lu\n",
714 StartAddr
, (SizeT
) (EndAddr
- StartAddr
));
717 tid
= VG_(get_running_tid
)();
719 VG_(HT_ResetIter
)(MC_(malloc_list
));
720 while ( (mc
= VG_(HT_Next
)(MC_(malloc_list
))) ) {
721 if (mc
->data
>= StartAddr
&& mc
->data
+ mc
->szB
<= EndAddr
) {
722 if (VG_(clo_verbosity
) > 2) {
723 VG_(message
)(Vg_UserMsg
, "Auto-free of 0x%lx size=%lu\n",
724 mc
->data
, (mc
->szB
+ 0UL));
727 VG_(HT_remove_at_Iter
)(MC_(malloc_list
));
728 die_and_free_mem(tid
, mc
, mp
->rzB
);
733 void MC_(create_mempool
)(Addr pool
, UInt rzB
, Bool is_zeroed
,
734 Bool auto_free
, Bool metapool
)
738 if (VG_(clo_verbosity
) > 2 || (auto_free
&& !metapool
)) {
739 VG_(message
)(Vg_UserMsg
,
740 "create_mempool(0x%lx, rzB=%u, zeroed=%d,"
741 " autofree=%d, metapool=%d)\n",
742 pool
, rzB
, is_zeroed
,
743 auto_free
, metapool
);
744 VG_(get_and_pp_StackTrace
)
745 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
746 if (auto_free
&& !metapool
)
747 VG_(tool_panic
)("Inappropriate use of mempool:"
748 " an auto free pool must be a meta pool. Aborting\n");
751 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
753 VG_(tool_panic
)("MC_(create_mempool): duplicate pool creation");
756 mp
= VG_(malloc
)("mc.cm.1", sizeof(MC_Mempool
));
759 mp
->is_zeroed
= is_zeroed
;
760 mp
->auto_free
= auto_free
;
761 mp
->metapool
= metapool
;
762 mp
->chunks
= VG_(HT_construct
)( "MC_(create_mempool)" );
763 check_mempool_sane(mp
);
765 /* Paranoia ... ensure this area is off-limits to the client, so
766 the mp->data field isn't visible to the leak checker. If memory
767 management is working correctly, anything pointer returned by
768 VG_(malloc) should be noaccess as far as the client is
770 if (!MC_(check_mem_is_noaccess
)( (Addr
)mp
, sizeof(MC_Mempool
), NULL
)) {
771 VG_(tool_panic
)("MC_(create_mempool): shadow area is accessible");
774 VG_(HT_add_node
)( MC_(mempool_list
), mp
);
777 void MC_(destroy_mempool
)(Addr pool
)
782 if (VG_(clo_verbosity
) > 2) {
783 VG_(message
)(Vg_UserMsg
, "destroy_mempool(0x%lx)\n", pool
);
784 VG_(get_and_pp_StackTrace
)
785 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
788 mp
= VG_(HT_remove
) ( MC_(mempool_list
), (UWord
)pool
);
791 ThreadId tid
= VG_(get_running_tid
)();
792 MC_(record_illegal_mempool_error
) ( tid
, pool
);
795 check_mempool_sane(mp
);
797 // Clean up the chunks, one by one
798 VG_(HT_ResetIter
)(mp
->chunks
);
799 while ( (mc
= VG_(HT_Next
)(mp
->chunks
)) ) {
800 /* Note: make redzones noaccess again -- just in case user made them
801 accessible with a client request... */
802 MC_(make_mem_noaccess
)(mc
->data
-mp
->rzB
, mc
->szB
+ 2*mp
->rzB
);
804 // Destroy the chunk table
805 VG_(HT_destruct
)(mp
->chunks
, (void (*)(void *))delete_MC_Chunk
);
811 mp_compar(const void* n1
, const void* n2
)
813 const MC_Chunk
* mc1
= *(const MC_Chunk
*const *)n1
;
814 const MC_Chunk
* mc2
= *(const MC_Chunk
*const *)n2
;
815 if (mc1
->data
< mc2
->data
) return -1;
816 if (mc1
->data
> mc2
->data
) return 1;
821 check_mempool_sane(MC_Mempool
* mp
)
823 UInt n_chunks
, i
, bad
= 0;
824 static UInt tick
= 0;
826 MC_Chunk
**chunks
= (MC_Chunk
**) VG_(HT_to_array
)( mp
->chunks
, &n_chunks
);
830 if (VG_(clo_verbosity
) > 1) {
833 UInt total_pools
= 0, total_chunks
= 0;
836 VG_(HT_ResetIter
)(MC_(mempool_list
));
837 while ( (mp2
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
839 VG_(HT_ResetIter
)(mp2
->chunks
);
840 while (VG_(HT_Next
)(mp2
->chunks
)) {
845 VG_(message
)(Vg_UserMsg
,
846 "Total mempools active: %u pools, %u chunks\n",
847 total_pools
, total_chunks
);
853 VG_(ssort
)((void*)chunks
, n_chunks
, sizeof(VgHashNode
*), mp_compar
);
855 /* Sanity check; assert that the blocks are now in order */
856 for (i
= 0; i
< n_chunks
-1; i
++) {
857 if (chunks
[i
]->data
> chunks
[i
+1]->data
) {
858 VG_(message
)(Vg_UserMsg
,
859 "Mempool chunk %u / %u is out of order "
860 "wrt. its successor\n",
866 /* Sanity check -- make sure they don't overlap */
867 for (i
= 0; i
< n_chunks
-1; i
++) {
868 if (chunks
[i
]->data
+ chunks
[i
]->szB
> chunks
[i
+1]->data
) {
869 VG_(message
)(Vg_UserMsg
,
870 "Mempool chunk %u / %u overlaps with its successor\n",
877 VG_(message
)(Vg_UserMsg
,
878 "Bad mempool (%u chunks), dumping chunks for inspection:\n",
880 for (i
= 0; i
< n_chunks
; ++i
) {
881 VG_(message
)(Vg_UserMsg
,
882 "Mempool chunk %u / %u: %lu bytes "
883 "[%lx,%lx), allocated:\n",
886 chunks
[i
]->szB
+ 0UL,
888 chunks
[i
]->data
+ chunks
[i
]->szB
);
890 VG_(pp_ExeContext
)(MC_(allocated_at
)(chunks
[i
]));
896 void MC_(mempool_alloc
)(ThreadId tid
, Addr pool
, Addr addr
, SizeT szB
)
900 if (VG_(clo_verbosity
) > 2) {
901 VG_(message
)(Vg_UserMsg
, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
903 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
906 mp
= VG_(HT_lookup
) ( MC_(mempool_list
), (UWord
)pool
);
908 MC_(record_illegal_mempool_error
) ( tid
, pool
);
910 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
911 MC_(new_block
)(tid
, addr
, szB
, /*ignored*/0, mp
->is_zeroed
,
912 MC_AllocCustom
, mp
->chunks
);
914 // This is not needed if the user application has properly
915 // marked the superblock noaccess when defining the mempool.
916 // We however still mark the redzones noaccess to still catch
917 // some bugs if user forgot.
918 MC_(make_mem_noaccess
) ( addr
- mp
->rzB
, mp
->rzB
);
919 MC_(make_mem_noaccess
) ( addr
+ szB
, mp
->rzB
);
921 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
925 void MC_(mempool_free
)(Addr pool
, Addr addr
)
929 ThreadId tid
= VG_(get_running_tid
)();
931 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
933 MC_(record_illegal_mempool_error
)(tid
, pool
);
937 if (VG_(clo_verbosity
) > 2) {
938 VG_(message
)(Vg_UserMsg
, "mempool_free(0x%lx, 0x%lx)\n", pool
, addr
);
939 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
942 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
943 mc
= VG_(HT_remove
)(mp
->chunks
, (UWord
)addr
);
945 MC_(record_free_error
)(tid
, (Addr
)addr
);
950 free_mallocs_in_mempool_block(mp
, mc
->data
, mc
->data
+ (mc
->szB
+ 0UL));
953 if (VG_(clo_verbosity
) > 2) {
954 VG_(message
)(Vg_UserMsg
,
955 "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
956 pool
, addr
, mc
->szB
+ 0UL);
959 die_and_free_mem ( tid
, mc
, mp
->rzB
);
960 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
964 void MC_(mempool_trim
)(Addr pool
, Addr addr
, SizeT szB
)
968 ThreadId tid
= VG_(get_running_tid
)();
972 if (VG_(clo_verbosity
) > 2) {
973 VG_(message
)(Vg_UserMsg
, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
975 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
978 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
980 MC_(record_illegal_mempool_error
)(tid
, pool
);
984 check_mempool_sane(mp
);
985 chunks
= VG_(HT_to_array
) ( mp
->chunks
, &n_shadows
);
986 if (n_shadows
== 0) {
987 tl_assert(chunks
== NULL
);
991 tl_assert(chunks
!= NULL
);
992 for (i
= 0; i
< n_shadows
; ++i
) {
994 Addr lo
, hi
, min
, max
;
996 mc
= (MC_Chunk
*) chunks
[i
];
999 hi
= mc
->szB
== 0 ? mc
->data
: mc
->data
+ mc
->szB
- 1;
1001 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
1003 if (EXTENT_CONTAINS(lo
) && EXTENT_CONTAINS(hi
)) {
1005 /* The current chunk is entirely within the trim extent: keep
1010 } else if ( (! EXTENT_CONTAINS(lo
)) &&
1011 (! EXTENT_CONTAINS(hi
)) ) {
1013 /* The current chunk is entirely outside the trim extent:
1016 if (VG_(HT_remove
)(mp
->chunks
, (UWord
)mc
->data
) == NULL
) {
1017 MC_(record_free_error
)(tid
, (Addr
)mc
->data
);
1019 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
1022 die_and_free_mem ( tid
, mc
, mp
->rzB
);
1026 /* The current chunk intersects the trim extent: remove,
1027 trim, and reinsert it. */
1029 tl_assert(EXTENT_CONTAINS(lo
) ||
1030 EXTENT_CONTAINS(hi
));
1031 if (VG_(HT_remove
)(mp
->chunks
, (UWord
)mc
->data
) == NULL
) {
1032 MC_(record_free_error
)(tid
, (Addr
)mc
->data
);
1034 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
1038 if (mc
->data
< addr
) {
1046 if (mc
->data
+ szB
> addr
+ szB
) {
1047 max
= mc
->data
+ szB
;
1051 hi
= mc
->data
+ szB
;
1054 tl_assert(min
<= lo
);
1056 tl_assert(hi
<= max
);
1058 if (min
< lo
&& !EXTENT_CONTAINS(min
)) {
1059 MC_(make_mem_noaccess
)( min
, lo
- min
);
1062 if (hi
< max
&& !EXTENT_CONTAINS(max
)) {
1063 MC_(make_mem_noaccess
)( hi
, max
- hi
);
1067 mc
->szB
= (UInt
) (hi
- lo
);
1068 VG_(HT_add_node
)( mp
->chunks
, mc
);
1071 #undef EXTENT_CONTAINS
1074 check_mempool_sane(mp
);
1078 void MC_(move_mempool
)(Addr poolA
, Addr poolB
)
1082 if (VG_(clo_verbosity
) > 2) {
1083 VG_(message
)(Vg_UserMsg
, "move_mempool(0x%lx, 0x%lx)\n", poolA
, poolB
);
1084 VG_(get_and_pp_StackTrace
)
1085 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
1088 mp
= VG_(HT_remove
) ( MC_(mempool_list
), (UWord
)poolA
);
1091 ThreadId tid
= VG_(get_running_tid
)();
1092 MC_(record_illegal_mempool_error
) ( tid
, poolA
);
1097 VG_(HT_add_node
)( MC_(mempool_list
), mp
);
1100 void MC_(mempool_change
)(Addr pool
, Addr addrA
, Addr addrB
, SizeT szB
)
1104 ThreadId tid
= VG_(get_running_tid
)();
1106 if (VG_(clo_verbosity
) > 2) {
1107 VG_(message
)(Vg_UserMsg
, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
1108 pool
, addrA
, addrB
, szB
);
1109 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
1112 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
1114 MC_(record_illegal_mempool_error
)(tid
, pool
);
1118 check_mempool_sane(mp
);
1120 mc
= VG_(HT_remove
)(mp
->chunks
, (UWord
)addrA
);
1122 MC_(record_free_error
)(tid
, (Addr
)addrA
);
1128 VG_(HT_add_node
)( mp
->chunks
, mc
);
1130 check_mempool_sane(mp
);
1133 Bool
MC_(mempool_exists
)(Addr pool
)
1137 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
1144 static void xtmemory_report_next_block(XT_Allocs
* xta
, ExeContext
** ec_alloc
)
1146 MC_Chunk
* mc
= VG_(HT_Next
)(MC_(malloc_list
));
1148 xta
->nbytes
= mc
->szB
;
1150 *ec_alloc
= MC_(allocated_at
)(mc
);
1155 void MC_(xtmemory_report
) ( const HChar
* filename
, Bool fini
)
1157 // Make xtmemory_report_next_block ready to be called.
1158 VG_(HT_ResetIter
)(MC_(malloc_list
));
1160 VG_(XTMemory_report
)(filename
, fini
, xtmemory_report_next_block
,
1161 VG_(XT_filter_1top_and_maybe_below_main
));
1164 /*------------------------------------------------------------*/
1165 /*--- Statistics printing ---*/
1166 /*------------------------------------------------------------*/
1168 void MC_(print_malloc_stats
) ( void )
1174 if (VG_(clo_verbosity
) == 0)
1179 /* Count memory still in use. */
1180 VG_(HT_ResetIter
)(MC_(malloc_list
));
1181 while ( (mc
= VG_(HT_Next
)(MC_(malloc_list
))) ) {
1183 nbytes
+= (ULong
)mc
->szB
;
1188 " in use at exit: %'llu bytes in %'lu blocks\n"
1189 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1193 cmalloc_n_frees
, cmalloc_bs_mallocd
1197 SizeT
MC_(get_cmalloc_n_frees
) ( void )
1199 return cmalloc_n_frees
;
1203 /*--------------------------------------------------------------------*/
1205 /*--------------------------------------------------------------------*/