2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*--- mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_execontext.h"
32 #include "pub_tool_poolalloc.h"
33 #include "pub_tool_hashtable.h"
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_libcproc.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_threadstate.h"
42 #include "pub_tool_tooliface.h" // Needed for mc_include.h
43 #include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
44 #include "pub_tool_xarray.h"
45 #include "pub_tool_xtree.h"
46 #include "pub_tool_xtmemory.h"
48 #include "mc_include.h"
50 /*------------------------------------------------------------*/
52 /*------------------------------------------------------------*/
55 static SizeT cmalloc_n_mallocs
= 0;
56 static SizeT cmalloc_n_frees
= 0;
57 static ULong cmalloc_bs_mallocd
= 0;
59 /* For debug printing to do with mempools: what stack trace
61 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
64 /*------------------------------------------------------------*/
65 /*--- Tracking malloc'd and free'd blocks ---*/
66 /*------------------------------------------------------------*/
68 SizeT
MC_(Malloc_Redzone_SzB
) = -10000000; // If used before set, should BOMB
70 /* Record malloc'd blocks. */
71 VgHashTable
*MC_(malloc_list
) = NULL
;
73 /* Memory pools: a hash table of MC_Mempools. Search key is
75 VgHashTable
*MC_(mempool_list
) = NULL
;
77 /* Pool allocator for MC_Chunk. */
78 PoolAlloc
*MC_(chunk_poolalloc
) = NULL
;
80 MC_Chunk
* create_MC_Chunk ( ThreadId tid
, Addr p
, SizeT szB
,
84 void delete_MC_Chunk (MC_Chunk
* mc
);
86 /* Records blocks after freeing. */
87 /* Blocks freed by the client are queued in one of two lists of
88 freed blocks not yet physically freed:
89 "big blocks" freed list.
90 "small blocks" freed list
91 The blocks with a size >= MC_(clo_freelist_big_blocks)
92 are linked in the big blocks freed list.
93 This allows a client to allocate and free big blocks
94 (e.g. bigger than VG_(clo_freelist_vol)) without losing
95 immediately all protection against dangling pointers.
96 position [0] is for big blocks, [1] is for small blocks. */
97 static MC_Chunk
* freed_list_start
[2] = {NULL
, NULL
};
98 static MC_Chunk
* freed_list_end
[2] = {NULL
, NULL
};
100 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
101 some of the oldest blocks in the queue at the same time. */
102 static void add_to_freed_queue ( MC_Chunk
* mc
)
104 const Bool show
= False
;
105 const int l
= (mc
->szB
>= MC_(clo_freelist_big_blocks
) ? 0 : 1);
107 /* Put it at the end of the freed list, unless the block
108 would be directly released any way : in this case, we
109 put it at the head of the freed list. */
110 if (freed_list_end
[l
] == NULL
) {
111 tl_assert(freed_list_start
[l
] == NULL
);
113 freed_list_end
[l
] = freed_list_start
[l
] = mc
;
115 tl_assert(freed_list_end
[l
]->next
== NULL
);
116 if (mc
->szB
>= MC_(clo_freelist_vol
)) {
117 mc
->next
= freed_list_start
[l
];
118 freed_list_start
[l
] = mc
;
121 freed_list_end
[l
]->next
= mc
;
122 freed_list_end
[l
] = mc
;
125 VG_(free_queue_volume
) += (Long
)mc
->szB
;
127 VG_(printf
)("mc_freelist: acquire: volume now %lld\n",
128 VG_(free_queue_volume
));
129 VG_(free_queue_length
)++;
132 /* Release enough of the oldest blocks to bring the free queue
133 volume below vg_clo_freelist_vol.
134 Start with big block list first.
135 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
136 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
137 static void release_oldest_block(void)
139 const Bool show
= False
;
141 tl_assert (VG_(free_queue_volume
) > MC_(clo_freelist_vol
));
142 tl_assert (freed_list_start
[0] != NULL
|| freed_list_start
[1] != NULL
);
144 for (i
= 0; i
< 2; i
++) {
145 while (VG_(free_queue_volume
) > MC_(clo_freelist_vol
)
146 && freed_list_start
[i
] != NULL
) {
149 tl_assert(freed_list_end
[i
] != NULL
);
151 mc1
= freed_list_start
[i
];
152 VG_(free_queue_volume
) -= (Long
)mc1
->szB
;
153 VG_(free_queue_length
)--;
155 VG_(printf
)("mc_freelist: discard: volume now %lld\n",
156 VG_(free_queue_volume
));
157 tl_assert(VG_(free_queue_volume
) >= 0);
159 if (freed_list_start
[i
] == freed_list_end
[i
]) {
160 freed_list_start
[i
] = freed_list_end
[i
] = NULL
;
162 freed_list_start
[i
] = mc1
->next
;
164 mc1
->next
= NULL
; /* just paranoia */
167 if (MC_AllocCustom
!= mc1
->allockind
)
168 VG_(cli_free
) ( (void*)(mc1
->data
) );
169 delete_MC_Chunk ( mc1
);
174 MC_Chunk
* MC_(get_freed_block_bracketting
) (Addr a
)
177 for (i
= 0; i
< 2; i
++) {
179 mc
= freed_list_start
[i
];
181 if (VG_(addr_is_in_block
)( a
, mc
->data
, mc
->szB
,
182 MC_(Malloc_Redzone_SzB
) ))
190 /* Allocate a shadow chunk, put it on the appropriate list.
191 If needed, release oldest blocks from freed list. */
193 MC_Chunk
* create_MC_Chunk ( ThreadId tid
, Addr p
, SizeT szB
,
197 MC_Chunk
* mc
= VG_(allocEltPA
)(MC_(chunk_poolalloc
));
201 mc
->allockind
= kind
;
202 switch ( MC_(n_where_pointers
)() ) {
203 case 2: mc
->where
[1] = 0; // fallthrough to 1
204 case 1: mc
->where
[0] = 0; // fallthrough to 0
206 default: tl_assert(0);
208 MC_(set_allocated_at
) (tid
, mc
);
210 /* Each time a new MC_Chunk is created, release oldest blocks
211 if the free list volume is exceeded. */
212 if (VG_(free_queue_volume
) > MC_(clo_freelist_vol
))
213 release_oldest_block();
215 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
216 the mc->data field isn't visible to the leak checker. If memory
217 management is working correctly, any pointer returned by VG_(malloc)
218 should be noaccess as far as the client is concerned. */
219 if (!MC_(check_mem_is_noaccess
)( (Addr
)mc
, sizeof(MC_Chunk
), NULL
)) {
220 VG_(tool_panic
)("create_MC_Chunk: shadow area is accessible");
226 void delete_MC_Chunk (MC_Chunk
* mc
)
228 VG_(freeEltPA
) (MC_(chunk_poolalloc
), mc
);
231 // True if mc is in the given block list.
232 static Bool
in_block_list (const VgHashTable
*block_list
, MC_Chunk
* mc
)
234 MC_Chunk
* found_mc
= VG_(HT_lookup
) ( block_list
, (UWord
)mc
->data
);
236 tl_assert (found_mc
->data
== mc
->data
);
237 /* If a user builds a pool from a malloc-ed superblock
238 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
239 an address at the beginning of this superblock, then
240 this address will be twice in the block_list.
241 We handle this case by checking size and allockind.
242 Note: I suspect that having the same block
243 twice in MC_(malloc_list) is a recipe for bugs.
244 We might maybe better create a "standard" mempool to
245 handle all this more cleanly. */
246 if (found_mc
->szB
!= mc
->szB
247 || found_mc
->allockind
!= mc
->allockind
)
249 tl_assert (found_mc
== mc
);
255 // True if mc is a live block (not yet freed).
256 static Bool
live_block (MC_Chunk
* mc
)
258 if (mc
->allockind
== MC_AllocCustom
) {
260 VG_(HT_ResetIter
)(MC_(mempool_list
));
261 while ( (mp
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
262 if ( in_block_list (mp
->chunks
, mc
) )
266 /* Note: we fallback here for a not found MC_AllocCustom
267 as such a block can be inserted in MC_(malloc_list)
268 by VALGRIND_MALLOCLIKE_BLOCK. */
269 return in_block_list ( MC_(malloc_list
), mc
);
272 ExeContext
* MC_(allocated_at
) (MC_Chunk
* mc
)
274 switch (MC_(clo_keep_stacktraces
)) {
275 case KS_none
: return VG_(null_ExeContext
) ();
276 case KS_alloc
: return mc
->where
[0];
277 case KS_free
: return VG_(null_ExeContext
) ();
278 case KS_alloc_then_free
: return (live_block(mc
) ?
279 mc
->where
[0] : VG_(null_ExeContext
) ());
280 case KS_alloc_and_free
: return mc
->where
[0];
281 default: tl_assert (0);
285 ExeContext
* MC_(freed_at
) (MC_Chunk
* mc
)
287 switch (MC_(clo_keep_stacktraces
)) {
288 case KS_none
: return VG_(null_ExeContext
) ();
289 case KS_alloc
: return VG_(null_ExeContext
) ();
290 case KS_free
: return (mc
->where
[0] ?
291 mc
->where
[0] : VG_(null_ExeContext
) ());
292 case KS_alloc_then_free
: return (live_block(mc
) ?
293 VG_(null_ExeContext
) () : mc
->where
[0]);
294 case KS_alloc_and_free
: return (mc
->where
[1] ?
295 mc
->where
[1] : VG_(null_ExeContext
) ());
296 default: tl_assert (0);
300 void MC_(set_allocated_at
) (ThreadId tid
, MC_Chunk
* mc
)
302 switch (MC_(clo_keep_stacktraces
)) {
303 case KS_none
: return;
304 case KS_alloc
: break;
305 case KS_free
: return;
306 case KS_alloc_then_free
: break;
307 case KS_alloc_and_free
: break;
308 default: tl_assert (0);
310 mc
->where
[0] = VG_(record_ExeContext
) ( tid
, 0/*first_ip_delta*/ );
311 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
312 VG_(XTMemory_Full_alloc
)(mc
->szB
, mc
->where
[0]);
315 void MC_(set_freed_at
) (ThreadId tid
, MC_Chunk
* mc
)
320 switch (MC_(clo_keep_stacktraces
)) {
321 case KS_none
: return;
323 if (LIKELY(VG_(clo_xtree_memory
)
324 != Vg_XTMemory_Full
))
327 case KS_free
: pos
= 0; break;
328 case KS_alloc_then_free
: pos
= 0; break;
329 case KS_alloc_and_free
: pos
= 1; break;
330 default: tl_assert (0);
332 /* We need the execontext for the free operation, either to store
333 it in the mc chunk and/or for full xtree memory profiling.
334 Note: we are guaranteed to find the ec_alloc in mc->where[0], as
335 mc_post_clo_init verifies the consistency of --xtree-memory and
336 --keep-stacktraces. */
337 ec_free
= VG_(record_ExeContext
) ( tid
, 0/*first_ip_delta*/ );
338 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
339 VG_(XTMemory_Full_free
)(mc
->szB
, mc
->where
[0], ec_free
);
340 if (LIKELY(pos
>= 0))
341 mc
->where
[pos
] = ec_free
;
344 UInt
MC_(n_where_pointers
) (void)
346 switch (MC_(clo_keep_stacktraces
)) {
347 case KS_none
: return 0;
350 case KS_alloc_then_free
: return 1;
351 case KS_alloc_and_free
: return 2;
352 default: tl_assert (0);
356 /*------------------------------------------------------------*/
357 /*--- client_malloc(), etc ---*/
358 /*------------------------------------------------------------*/
360 /* Allocate memory and note change in memory available */
361 void* MC_(new_block
) ( ThreadId tid
,
362 Addr p
, SizeT szB
, SizeT alignB
,
364 Bool is_zeroed
, MC_AllocKind kind
,
369 // Allocate and zero if necessary
371 tl_assert(MC_AllocCustom
== kind
);
373 tl_assert(MC_AllocCustom
!= kind
);
374 p
= (Addr
)VG_(cli_malloc
)( alignB
, szB
);
379 VG_(memset
)((void*)p
, 0, szB
);
381 if (MC_(clo_malloc_fill
) != -1) {
382 tl_assert(MC_(clo_malloc_fill
) >= 0x00 && MC_(clo_malloc_fill
) <= 0xFF);
383 VG_(memset
)((void*)p
, MC_(clo_malloc_fill
), szB
);
387 // Only update stats if allocation succeeded.
388 cmalloc_n_mallocs
++;
389 cmalloc_bs_mallocd
+= (ULong
)szB
;
390 mc
= create_MC_Chunk (tid
, p
, szB
, orig_alignB
, kind
);
391 VG_(HT_add_node
)( table
, mc
);
394 MC_(make_mem_defined
)( p
, szB
);
396 UInt ecu
= VG_(get_ECU_from_ExeContext
)(MC_(allocated_at
)(mc
));
397 tl_assert(VG_(is_plausible_ECU
)(ecu
));
398 MC_(make_mem_undefined_w_otag
)( p
, szB
, ecu
| MC_OKIND_HEAP
);
404 void* MC_(malloc
) ( ThreadId tid
, SizeT n
)
406 if (MC_(record_fishy_value_error
)(tid
, "malloc", "size", n
)) {
409 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
), 0U,
410 /*is_zeroed*/False
, MC_AllocMalloc
, MC_(malloc_list
));
414 void* MC_(__builtin_new
) ( ThreadId tid
, SizeT n
)
416 if (MC_(record_fishy_value_error
)(tid
, "__builtin_new", "size", n
)) {
419 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
), 0U,
420 /*is_zeroed*/False
, MC_AllocNew
, MC_(malloc_list
));
424 void* MC_(__builtin_new_aligned
) ( ThreadId tid
, SizeT n
, SizeT alignB
, SizeT orig_alignB
)
426 if (MC_(record_fishy_value_error
)(tid
, "__builtin_new_aligned", "size", n
)) {
429 return MC_(new_block
) ( tid
, 0, n
, alignB
, orig_alignB
,
430 /*is_zeroed*/False
, MC_AllocNew
, MC_(malloc_list
));
434 void* MC_(__builtin_vec_new
) ( ThreadId tid
, SizeT n
)
436 if (MC_(record_fishy_value_error
)(tid
, "__builtin_vec_new", "size", n
)) {
439 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
), 0U,
440 /*is_zeroed*/False
, MC_AllocNewVec
, MC_(malloc_list
));
444 void* MC_(__builtin_vec_new_aligned
) ( ThreadId tid
, SizeT n
, SizeT alignB
, SizeT orig_alignB
)
446 if (MC_(record_fishy_value_error
)(tid
, "__builtin_vec_new_aligned", "size", n
)) {
449 return MC_(new_block
) ( tid
, 0, n
, alignB
, orig_alignB
,
450 /*is_zeroed*/False
, MC_AllocNewVec
, MC_(malloc_list
));
454 void* MC_(memalign
) ( ThreadId tid
, SizeT alignB
, SizeT orig_alignB
, SizeT n
)
456 if (MC_(record_fishy_value_error
)(tid
, "memalign", "size", n
)) {
460 return MC_(new_block
) ( tid
, 0, n
, alignB
, orig_alignB
,
461 /*is_zeroed*/False
, MC_AllocMalloc
, MC_(malloc_list
));
464 void* MC_(calloc
) ( ThreadId tid
, SizeT nmemb
, SizeT size1
)
466 if (MC_(record_fishy_value_error
)(tid
, "calloc", "nmemb", nmemb
) ||
467 MC_(record_fishy_value_error
)(tid
, "calloc", "size", size1
)) {
470 return MC_(new_block
) ( tid
, 0, nmemb
*size1
, VG_(clo_alignment
), 0U,
471 /*is_zeroed*/True
, MC_AllocMalloc
, MC_(malloc_list
));
476 void die_and_free_mem ( ThreadId tid
, MC_Chunk
* mc
, SizeT rzB
)
478 /* Note: we do not free fill the custom allocs produced
479 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
480 if (MC_(clo_free_fill
) != -1 && MC_AllocCustom
!= mc
->allockind
) {
481 tl_assert(MC_(clo_free_fill
) >= 0x00 && MC_(clo_free_fill
) <= 0xFF);
482 VG_(memset
)((void*)mc
->data
, MC_(clo_free_fill
), mc
->szB
);
485 /* Note: make redzones noaccess again -- just in case user made them
486 accessible with a client request... */
487 MC_(make_mem_noaccess
)( mc
->data
-rzB
, mc
->szB
+ 2*rzB
);
489 /* Record where freed */
490 MC_(set_freed_at
) (tid
, mc
);
491 /* Put it out of harm's way for a while */
492 add_to_freed_queue ( mc
);
493 /* If the free list volume is bigger than MC_(clo_freelist_vol),
494 we wait till the next block allocation to release blocks.
495 This increase the chance to discover dangling pointer usage,
496 even for big blocks being freed by the client. */
501 void record_freemismatch_error (ThreadId tid
, MC_Chunk
* mc
)
503 /* Only show such an error if the user hasn't disabled doing so. */
504 if (!MC_(clo_show_mismatched_frees
))
507 /* MC_(record_freemismatch_error) reports errors for still
508 allocated blocks but we are in the middle of freeing it. To
509 report the error correctly, we re-insert the chunk (making it
510 again a "clean allocated block", report the error, and then
511 re-remove the chunk. This avoids to do a VG_(HT_lookup)
512 followed by a VG_(HT_remove) in all "non-erroneous cases". */
513 VG_(HT_add_node
)( MC_(malloc_list
), mc
);
514 MC_(record_freemismatch_error
) ( tid
, mc
);
515 if ((mc
!= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)mc
->data
)))
519 void MC_(handle_free
) ( ThreadId tid
, Addr p
, UInt rzB
, MC_AllocKind kind
)
525 mc
= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)p
);
527 MC_(record_free_error
) ( tid
, p
);
529 /* check if it is a matching free() / delete / delete [] */
530 if (kind
!= mc
->allockind
) {
531 tl_assert(p
== mc
->data
);
532 record_freemismatch_error ( tid
, mc
);
534 die_and_free_mem ( tid
, mc
, rzB
);
538 void MC_(free
) ( ThreadId tid
, void* p
)
541 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocMalloc
);
544 void MC_(__builtin_delete
) ( ThreadId tid
, void* p
)
547 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNew
);
551 void MC_(__builtin_delete_aligned
) ( ThreadId tid
, void* p
, SizeT alignB
)
554 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNew
);
557 void MC_(__builtin_vec_delete
) ( ThreadId tid
, void* p
)
560 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNewVec
);
563 void MC_(__builtin_vec_delete_aligned
) ( ThreadId tid
, void* p
, SizeT alignB
)
566 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNewVec
);
570 void* MC_(realloc
) ( ThreadId tid
, void* p_old
, SizeT new_szB
)
577 if (MC_(record_fishy_value_error
)(tid
, "realloc", "size", new_szB
))
581 return MC_(new_block
) ( tid
, 0, new_szB
, VG_(clo_alignment
), 0U,
582 /*is_zeroed*/False
, MC_AllocMalloc
, MC_(malloc_list
));
586 if (MC_(clo_show_realloc_size_zero
)) {
587 MC_(record_realloc_size_zero
)(tid
, (Addr
)p_old
);
590 if (VG_(clo_realloc_zero_bytes_frees
) == True
) {
592 tid
, (Addr
)p_old
, MC_(Malloc_Redzone_SzB
), MC_AllocMalloc
);
600 cmalloc_n_mallocs
++;
601 cmalloc_bs_mallocd
+= (ULong
)new_szB
;
603 /* Remove the old block */
604 old_mc
= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)p_old
);
605 if (old_mc
== NULL
) {
606 MC_(record_free_error
) ( tid
, (Addr
)p_old
);
607 /* We return to the program regardless. */
611 /* check if its a matching free() / delete / delete [] */
612 if (MC_AllocMalloc
!= old_mc
->allockind
) {
613 /* can not realloc a range that was allocated with new or new [] */
614 tl_assert((Addr
)p_old
== old_mc
->data
);
615 record_freemismatch_error ( tid
, old_mc
);
616 /* but keep going anyway */
619 old_szB
= old_mc
->szB
;
622 a_new
= (Addr
)VG_(cli_malloc
)(VG_(clo_alignment
), new_szB
);
625 /* In all cases, even when the new size is smaller or unchanged, we
626 reallocate and copy the contents, and make the old block
627 inaccessible. This is so as to guarantee to catch all cases of
628 accesses via the old address after reallocation, regardless of
629 the change in size. (Of course the ability to detect accesses
630 to the old block also depends on the size of the freed blocks
633 // Allocate a new chunk.
634 // Re-allocation does not conserve alignment.
635 new_mc
= create_MC_Chunk( tid
, a_new
, new_szB
, 0U, MC_AllocMalloc
);
637 // Now insert the new mc (with a new 'data' field) into malloc_list.
638 VG_(HT_add_node
)( MC_(malloc_list
), new_mc
);
640 /* Retained part is copied, red zones set as normal */
642 /* Redzone at the front */
643 MC_(make_mem_noaccess
)( a_new
-MC_(Malloc_Redzone_SzB
),
644 MC_(Malloc_Redzone_SzB
) );
647 if (old_szB
>= new_szB
) {
648 /* new size is smaller or the same */
650 /* Copy address range state and value from old to new */
651 MC_(copy_address_range_state
) ( (Addr
)p_old
, a_new
, new_szB
);
652 VG_(memcpy
)((void*)a_new
, p_old
, new_szB
);
654 /* new size is bigger */
657 /* Copy address range state and value from old to new */
658 MC_(copy_address_range_state
) ( (Addr
)p_old
, a_new
, old_szB
);
659 VG_(memcpy
)((void*)a_new
, p_old
, old_szB
);
661 // If the block has grown, we mark the grown area as undefined.
662 // We have to do that after VG_(HT_add_node) to ensure the ecu
663 // execontext is for a fully allocated block.
664 ecu
= VG_(get_ECU_from_ExeContext
)(MC_(allocated_at
)(new_mc
));
665 tl_assert(VG_(is_plausible_ECU
)(ecu
));
666 MC_(make_mem_undefined_w_otag
)( a_new
+old_szB
,
668 ecu
| MC_OKIND_HEAP
);
670 /* Possibly fill new area with specified junk */
671 if (MC_(clo_malloc_fill
) != -1) {
672 tl_assert(MC_(clo_malloc_fill
) >= 0x00
673 && MC_(clo_malloc_fill
) <= 0xFF);
674 VG_(memset
)((void*)(a_new
+old_szB
), MC_(clo_malloc_fill
),
679 /* Redzone at the back. */
680 MC_(make_mem_noaccess
) ( a_new
+new_szB
, MC_(Malloc_Redzone_SzB
));
682 /* Possibly fill freed area with specified junk. */
683 if (MC_(clo_free_fill
) != -1) {
684 tl_assert(MC_(clo_free_fill
) >= 0x00 && MC_(clo_free_fill
) <= 0xFF);
685 VG_(memset
)((void*)p_old
, MC_(clo_free_fill
), old_szB
);
688 /* Free old memory */
689 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
690 than recycling the old one, so that any erroneous accesses to the
691 old memory are reported. */
692 die_and_free_mem ( tid
, old_mc
, MC_(Malloc_Redzone_SzB
) );
695 /* Could not allocate new client memory.
696 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
697 unconditionally removed at the beginning of the function. */
698 VG_(HT_add_node
)( MC_(malloc_list
), old_mc
);
704 SizeT
MC_(malloc_usable_size
) ( ThreadId tid
, void* p
)
706 MC_Chunk
* mc
= VG_(HT_lookup
) ( MC_(malloc_list
), (UWord
)p
);
708 // There may be slop, but pretend there isn't because only the asked-for
709 // area will be marked as addressable.
710 return ( mc
? mc
->szB
: 0 );
713 /* This handles the in place resize of a block, as performed by the
714 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
715 and not used for, handling of the normal libc realloc()
717 void MC_(handle_resizeInPlace
)(ThreadId tid
, Addr p
,
718 SizeT oldSizeB
, SizeT newSizeB
, SizeT rzB
)
720 MC_Chunk
* mc
= VG_(HT_lookup
) ( MC_(malloc_list
), (UWord
)p
);
721 if (!mc
|| mc
->szB
!= oldSizeB
|| newSizeB
== 0) {
722 /* Reject if: p is not found, or oldSizeB is wrong,
723 or new block would be empty. */
724 MC_(record_free_error
) ( tid
, p
);
728 if (oldSizeB
== newSizeB
)
731 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
732 VG_(XTMemory_Full_resize_in_place
)(oldSizeB
, newSizeB
, mc
->where
[0]);
735 if (newSizeB
< oldSizeB
) {
736 MC_(make_mem_noaccess
)( p
+ newSizeB
, oldSizeB
- newSizeB
+ rzB
);
738 ExeContext
* ec
= VG_(record_ExeContext
)(tid
, 0/*first_ip_delta*/);
739 UInt ecu
= VG_(get_ECU_from_ExeContext
)(ec
);
740 MC_(make_mem_undefined_w_otag
)( p
+ oldSizeB
, newSizeB
- oldSizeB
,
741 ecu
| MC_OKIND_HEAP
);
743 MC_(make_mem_noaccess
)( p
+ newSizeB
, rzB
);
748 /*------------------------------------------------------------*/
749 /*--- Memory pool stuff. ---*/
750 /*------------------------------------------------------------*/
752 /* Set to 1 for intensive sanity checking. Is very expensive though
753 and should not be used in production scenarios. See #255966. */
754 #define MP_DETAILED_SANITY_CHECKS 0
756 static void check_mempool_sane(MC_Mempool
* mp
); /*forward*/
758 static void free_mallocs_in_mempool_block (MC_Mempool
* mp
,
765 tl_assert(mp
->auto_free
);
767 if (VG_(clo_verbosity
) > 2) {
768 VG_(message
)(Vg_UserMsg
,
769 "free_mallocs_in_mempool_block: Start 0x%lx size %lu\n",
770 StartAddr
, (SizeT
) (EndAddr
- StartAddr
));
773 tid
= VG_(get_running_tid
)();
775 VG_(HT_ResetIter
)(MC_(malloc_list
));
776 while ( (mc
= VG_(HT_Next
)(MC_(malloc_list
))) ) {
777 if (mc
->data
>= StartAddr
&& mc
->data
+ mc
->szB
<= EndAddr
) {
778 if (VG_(clo_verbosity
) > 2) {
779 VG_(message
)(Vg_UserMsg
, "Auto-free of 0x%lx size=%lu\n",
780 mc
->data
, (mc
->szB
+ 0UL));
783 VG_(HT_remove_at_Iter
)(MC_(malloc_list
));
784 die_and_free_mem(tid
, mc
, mp
->rzB
);
789 void MC_(create_mempool
)(Addr pool
, UInt rzB
, Bool is_zeroed
,
790 Bool auto_free
, Bool metapool
)
794 if (VG_(clo_verbosity
) > 2 || (auto_free
&& !metapool
)) {
795 VG_(message
)(Vg_UserMsg
,
796 "create_mempool(0x%lx, rzB=%u, zeroed=%d,"
797 " autofree=%d, metapool=%d)\n",
798 pool
, rzB
, is_zeroed
,
799 auto_free
, metapool
);
800 VG_(get_and_pp_StackTrace
)
801 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
802 if (auto_free
&& !metapool
)
803 VG_(tool_panic
)("Inappropriate use of mempool:"
804 " an auto free pool must be a meta pool. Aborting\n");
807 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
809 VG_(tool_panic
)("MC_(create_mempool): duplicate pool creation");
812 mp
= VG_(malloc
)("mc.cm.1", sizeof(MC_Mempool
));
815 mp
->is_zeroed
= is_zeroed
;
816 mp
->auto_free
= auto_free
;
817 mp
->metapool
= metapool
;
818 mp
->chunks
= VG_(HT_construct
)( "MC_(create_mempool)" );
819 check_mempool_sane(mp
);
821 /* Paranoia ... ensure this area is off-limits to the client, so
822 the mp->data field isn't visible to the leak checker. If memory
823 management is working correctly, anything pointer returned by
824 VG_(malloc) should be noaccess as far as the client is
826 if (!MC_(check_mem_is_noaccess
)( (Addr
)mp
, sizeof(MC_Mempool
), NULL
)) {
827 VG_(tool_panic
)("MC_(create_mempool): shadow area is accessible");
830 VG_(HT_add_node
)( MC_(mempool_list
), mp
);
833 void MC_(destroy_mempool
)(Addr pool
)
838 if (VG_(clo_verbosity
) > 2) {
839 VG_(message
)(Vg_UserMsg
, "destroy_mempool(0x%lx)\n", pool
);
840 VG_(get_and_pp_StackTrace
)
841 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
844 mp
= VG_(HT_remove
) ( MC_(mempool_list
), (UWord
)pool
);
847 ThreadId tid
= VG_(get_running_tid
)();
848 MC_(record_illegal_mempool_error
) ( tid
, pool
);
851 check_mempool_sane(mp
);
853 // Clean up the chunks, one by one
854 VG_(HT_ResetIter
)(mp
->chunks
);
855 while ( (mc
= VG_(HT_Next
)(mp
->chunks
)) ) {
856 /* Note: make redzones noaccess again -- just in case user made them
857 accessible with a client request... */
858 MC_(make_mem_noaccess
)(mc
->data
-mp
->rzB
, mc
->szB
+ 2*mp
->rzB
);
860 // Destroy the chunk table
861 VG_(HT_destruct
)(mp
->chunks
, (void (*)(void *))delete_MC_Chunk
);
867 mp_compar(const void* n1
, const void* n2
)
869 const MC_Chunk
* mc1
= *(const MC_Chunk
*const *)n1
;
870 const MC_Chunk
* mc2
= *(const MC_Chunk
*const *)n2
;
871 if (mc1
->data
< mc2
->data
) return -1;
872 if (mc1
->data
> mc2
->data
) return 1;
877 check_mempool_sane(MC_Mempool
* mp
)
879 UInt n_chunks
, i
, bad
= 0;
880 static UInt tick
= 0;
882 MC_Chunk
**chunks
= (MC_Chunk
**) VG_(HT_to_array
)( mp
->chunks
, &n_chunks
);
886 if (VG_(clo_verbosity
) > 1) {
889 UInt total_pools
= 0, total_chunks
= 0;
892 VG_(HT_ResetIter
)(MC_(mempool_list
));
893 while ( (mp2
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
895 VG_(HT_ResetIter
)(mp2
->chunks
);
896 while (VG_(HT_Next
)(mp2
->chunks
)) {
901 VG_(message
)(Vg_UserMsg
,
902 "Total mempools active: %u pools, %u chunks\n",
903 total_pools
, total_chunks
);
909 VG_(ssort
)((void*)chunks
, n_chunks
, sizeof(VgHashNode
*), mp_compar
);
911 /* Sanity check; assert that the blocks are now in order */
912 for (i
= 0; i
< n_chunks
-1; i
++) {
913 if (chunks
[i
]->data
> chunks
[i
+1]->data
) {
914 VG_(message
)(Vg_UserMsg
,
915 "Mempool chunk %u / %u is out of order "
916 "wrt. its successor\n",
922 /* Sanity check -- make sure they don't overlap */
923 for (i
= 0; i
< n_chunks
-1; i
++) {
924 if (chunks
[i
]->data
+ chunks
[i
]->szB
> chunks
[i
+1]->data
) {
925 VG_(message
)(Vg_UserMsg
,
926 "Mempool chunk %u / %u overlaps with its successor\n",
933 VG_(message
)(Vg_UserMsg
,
934 "Bad mempool (%u chunks), dumping chunks for inspection:\n",
936 for (i
= 0; i
< n_chunks
; ++i
) {
937 VG_(message
)(Vg_UserMsg
,
938 "Mempool chunk %u / %u: %lu bytes "
939 "[%lx,%lx), allocated:\n",
942 chunks
[i
]->szB
+ 0UL,
944 chunks
[i
]->data
+ chunks
[i
]->szB
);
946 VG_(pp_ExeContext
)(MC_(allocated_at
)(chunks
[i
]));
952 void MC_(mempool_alloc
)(ThreadId tid
, Addr pool
, Addr addr
, SizeT szB
)
956 if (VG_(clo_verbosity
) > 2) {
957 VG_(message
)(Vg_UserMsg
, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
959 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
962 mp
= VG_(HT_lookup
) ( MC_(mempool_list
), (UWord
)pool
);
964 MC_(record_illegal_mempool_error
) ( tid
, pool
);
966 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
967 MC_(new_block
)(tid
, addr
, szB
, /*ignored*/0U, 0U, mp
->is_zeroed
,
968 MC_AllocCustom
, mp
->chunks
);
970 // This is not needed if the user application has properly
971 // marked the superblock noaccess when defining the mempool.
972 // We however still mark the redzones noaccess to still catch
973 // some bugs if user forgot.
974 MC_(make_mem_noaccess
) ( addr
- mp
->rzB
, mp
->rzB
);
975 MC_(make_mem_noaccess
) ( addr
+ szB
, mp
->rzB
);
977 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
981 void MC_(mempool_free
)(Addr pool
, Addr addr
)
985 ThreadId tid
= VG_(get_running_tid
)();
987 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
989 MC_(record_illegal_mempool_error
)(tid
, pool
);
993 if (VG_(clo_verbosity
) > 2) {
994 VG_(message
)(Vg_UserMsg
, "mempool_free(0x%lx, 0x%lx)\n", pool
, addr
);
995 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
998 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
999 mc
= VG_(HT_remove
)(mp
->chunks
, (UWord
)addr
);
1001 MC_(record_free_error
)(tid
, (Addr
)addr
);
1005 if (mp
->auto_free
) {
1006 free_mallocs_in_mempool_block(mp
, mc
->data
, mc
->data
+ (mc
->szB
+ 0UL));
1009 if (VG_(clo_verbosity
) > 2) {
1010 VG_(message
)(Vg_UserMsg
,
1011 "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
1012 pool
, addr
, mc
->szB
+ 0UL);
1015 die_and_free_mem ( tid
, mc
, mp
->rzB
);
1016 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
1020 void MC_(mempool_trim
)(Addr pool
, Addr addr
, SizeT szB
)
1024 ThreadId tid
= VG_(get_running_tid
)();
1026 VgHashNode
** chunks
;
1028 if (VG_(clo_verbosity
) > 2) {
1029 VG_(message
)(Vg_UserMsg
, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
1031 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
1034 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
1036 MC_(record_illegal_mempool_error
)(tid
, pool
);
1040 check_mempool_sane(mp
);
1041 chunks
= VG_(HT_to_array
) ( mp
->chunks
, &n_shadows
);
1042 if (n_shadows
== 0) {
1043 tl_assert(chunks
== NULL
);
1047 tl_assert(chunks
!= NULL
);
1048 for (i
= 0; i
< n_shadows
; ++i
) {
1050 Addr lo
, hi
, min
, max
;
1052 mc
= (MC_Chunk
*) chunks
[i
];
1055 hi
= mc
->szB
== 0 ? mc
->data
: mc
->data
+ mc
->szB
- 1;
1057 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
1059 if (EXTENT_CONTAINS(lo
) && EXTENT_CONTAINS(hi
)) {
1061 /* The current chunk is entirely within the trim extent: keep
1066 } else if ( (! EXTENT_CONTAINS(lo
)) &&
1067 (! EXTENT_CONTAINS(hi
)) ) {
1069 /* The current chunk is entirely outside the trim extent:
1072 if (VG_(HT_remove
)(mp
->chunks
, (UWord
)mc
->data
) == NULL
) {
1073 MC_(record_free_error
)(tid
, (Addr
)mc
->data
);
1075 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
1078 die_and_free_mem ( tid
, mc
, mp
->rzB
);
1082 /* The current chunk intersects the trim extent: remove,
1083 trim, and reinsert it. */
1085 tl_assert(EXTENT_CONTAINS(lo
) ||
1086 EXTENT_CONTAINS(hi
));
1087 if (VG_(HT_remove
)(mp
->chunks
, (UWord
)mc
->data
) == NULL
) {
1088 MC_(record_free_error
)(tid
, (Addr
)mc
->data
);
1090 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
1094 if (mc
->data
< addr
) {
1102 if (mc
->data
+ szB
> addr
+ szB
) {
1103 max
= mc
->data
+ szB
;
1107 hi
= mc
->data
+ szB
;
1110 tl_assert(min
<= lo
);
1112 tl_assert(hi
<= max
);
1114 if (min
< lo
&& !EXTENT_CONTAINS(min
)) {
1115 MC_(make_mem_noaccess
)( min
, lo
- min
);
1118 if (hi
< max
&& !EXTENT_CONTAINS(max
)) {
1119 MC_(make_mem_noaccess
)( hi
, max
- hi
);
1123 mc
->szB
= (UInt
) (hi
- lo
);
1124 VG_(HT_add_node
)( mp
->chunks
, mc
);
1127 #undef EXTENT_CONTAINS
1130 check_mempool_sane(mp
);
1134 void MC_(move_mempool
)(Addr poolA
, Addr poolB
)
1138 if (VG_(clo_verbosity
) > 2) {
1139 VG_(message
)(Vg_UserMsg
, "move_mempool(0x%lx, 0x%lx)\n", poolA
, poolB
);
1140 VG_(get_and_pp_StackTrace
)
1141 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
1144 mp
= VG_(HT_remove
) ( MC_(mempool_list
), (UWord
)poolA
);
1147 ThreadId tid
= VG_(get_running_tid
)();
1148 MC_(record_illegal_mempool_error
) ( tid
, poolA
);
1153 VG_(HT_add_node
)( MC_(mempool_list
), mp
);
1156 void MC_(mempool_change
)(Addr pool
, Addr addrA
, Addr addrB
, SizeT szB
)
1160 ThreadId tid
= VG_(get_running_tid
)();
1162 if (VG_(clo_verbosity
) > 2) {
1163 VG_(message
)(Vg_UserMsg
, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
1164 pool
, addrA
, addrB
, szB
);
1165 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
1168 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
1170 MC_(record_illegal_mempool_error
)(tid
, pool
);
1174 check_mempool_sane(mp
);
1176 mc
= VG_(HT_remove
)(mp
->chunks
, (UWord
)addrA
);
1178 MC_(record_free_error
)(tid
, (Addr
)addrA
);
1184 VG_(HT_add_node
)( mp
->chunks
, mc
);
1186 check_mempool_sane(mp
);
1189 Bool
MC_(mempool_exists
)(Addr pool
)
1193 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
1200 static void xtmemory_report_next_block(XT_Allocs
* xta
, ExeContext
** ec_alloc
)
1202 MC_Chunk
* mc
= VG_(HT_Next
)(MC_(malloc_list
));
1204 xta
->nbytes
= mc
->szB
;
1206 *ec_alloc
= MC_(allocated_at
)(mc
);
1211 void MC_(xtmemory_report
) ( const HChar
* filename
, Bool fini
)
1213 // Make xtmemory_report_next_block ready to be called.
1214 VG_(HT_ResetIter
)(MC_(malloc_list
));
1216 VG_(XTMemory_report
)(filename
, fini
, xtmemory_report_next_block
,
1217 VG_(XT_filter_1top_and_maybe_below_main
));
1220 /*------------------------------------------------------------*/
1221 /*--- Statistics printing ---*/
1222 /*------------------------------------------------------------*/
1224 void MC_(print_malloc_stats
) ( void )
1230 if (VG_(clo_verbosity
) == 0)
1235 /* Count memory still in use. */
1236 VG_(HT_ResetIter
)(MC_(malloc_list
));
1237 while ( (mc
= VG_(HT_Next
)(MC_(malloc_list
))) ) {
1239 nbytes
+= (ULong
)mc
->szB
;
1244 " in use at exit: %'llu bytes in %'lu blocks\n"
1245 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1249 cmalloc_n_frees
, cmalloc_bs_mallocd
1253 SizeT
MC_(get_cmalloc_n_frees
) ( void )
1255 return cmalloc_n_frees
;
1259 /*--------------------------------------------------------------------*/
1261 /*--------------------------------------------------------------------*/