2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*--- mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2013 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_execontext.h"
34 #include "pub_tool_poolalloc.h"
35 #include "pub_tool_hashtable.h"
36 #include "pub_tool_libcbase.h"
37 #include "pub_tool_libcassert.h"
38 #include "pub_tool_libcprint.h"
39 #include "pub_tool_mallocfree.h"
40 #include "pub_tool_options.h"
41 #include "pub_tool_replacemalloc.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_tooliface.h" // Needed for mc_include.h
44 #include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
46 #include "mc_include.h"
48 /*------------------------------------------------------------*/
50 /*------------------------------------------------------------*/
53 static SizeT cmalloc_n_mallocs
= 0;
54 static SizeT cmalloc_n_frees
= 0;
55 static ULong cmalloc_bs_mallocd
= 0;
57 /* For debug printing to do with mempools: what stack trace
59 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
62 /*------------------------------------------------------------*/
63 /*--- Tracking malloc'd and free'd blocks ---*/
64 /*------------------------------------------------------------*/
66 SizeT
MC_(Malloc_Redzone_SzB
) = -10000000; // If used before set, should BOMB
68 /* Record malloc'd blocks. */
69 VgHashTable
*MC_(malloc_list
) = NULL
;
71 /* Memory pools: a hash table of MC_Mempools. Search key is
73 VgHashTable
*MC_(mempool_list
) = NULL
;
75 /* Pool allocator for MC_Chunk. */
76 PoolAlloc
*MC_(chunk_poolalloc
) = NULL
;
78 MC_Chunk
* create_MC_Chunk ( ThreadId tid
, Addr p
, SizeT szB
,
81 void delete_MC_Chunk (MC_Chunk
* mc
);
83 /* Records blocks after freeing. */
84 /* Blocks freed by the client are queued in one of two lists of
85 freed blocks not yet physically freed:
86 "big blocks" freed list.
87 "small blocks" freed list
88 The blocks with a size >= MC_(clo_freelist_big_blocks)
89 are linked in the big blocks freed list.
90 This allows a client to allocate and free big blocks
91 (e.g. bigger than VG_(clo_freelist_vol)) without losing
92 immediately all protection against dangling pointers.
93 position [0] is for big blocks, [1] is for small blocks. */
94 static MC_Chunk
* freed_list_start
[2] = {NULL
, NULL
};
95 static MC_Chunk
* freed_list_end
[2] = {NULL
, NULL
};
97 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
98 some of the oldest blocks in the queue at the same time. */
99 static void add_to_freed_queue ( MC_Chunk
* mc
)
101 const Bool show
= False
;
102 const int l
= (mc
->szB
>= MC_(clo_freelist_big_blocks
) ? 0 : 1);
104 /* Put it at the end of the freed list, unless the block
105 would be directly released any way : in this case, we
106 put it at the head of the freed list. */
107 if (freed_list_end
[l
] == NULL
) {
108 tl_assert(freed_list_start
[l
] == NULL
);
110 freed_list_end
[l
] = freed_list_start
[l
] = mc
;
112 tl_assert(freed_list_end
[l
]->next
== NULL
);
113 if (mc
->szB
>= MC_(clo_freelist_vol
)) {
114 mc
->next
= freed_list_start
[l
];
115 freed_list_start
[l
] = mc
;
118 freed_list_end
[l
]->next
= mc
;
119 freed_list_end
[l
] = mc
;
122 VG_(free_queue_volume
) += (Long
)mc
->szB
;
124 VG_(printf
)("mc_freelist: acquire: volume now %lld\n",
125 VG_(free_queue_volume
));
126 VG_(free_queue_length
)++;
129 /* Release enough of the oldest blocks to bring the free queue
130 volume below vg_clo_freelist_vol.
131 Start with big block list first.
132 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
133 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
134 static void release_oldest_block(void)
136 const Bool show
= False
;
138 tl_assert (VG_(free_queue_volume
) > MC_(clo_freelist_vol
));
139 tl_assert (freed_list_start
[0] != NULL
|| freed_list_start
[1] != NULL
);
141 for (i
= 0; i
< 2; i
++) {
142 while (VG_(free_queue_volume
) > MC_(clo_freelist_vol
)
143 && freed_list_start
[i
] != NULL
) {
146 tl_assert(freed_list_end
[i
] != NULL
);
148 mc1
= freed_list_start
[i
];
149 VG_(free_queue_volume
) -= (Long
)mc1
->szB
;
150 VG_(free_queue_length
)--;
152 VG_(printf
)("mc_freelist: discard: volume now %lld\n",
153 VG_(free_queue_volume
));
154 tl_assert(VG_(free_queue_volume
) >= 0);
156 if (freed_list_start
[i
] == freed_list_end
[i
]) {
157 freed_list_start
[i
] = freed_list_end
[i
] = NULL
;
159 freed_list_start
[i
] = mc1
->next
;
161 mc1
->next
= NULL
; /* just paranoia */
164 if (MC_AllocCustom
!= mc1
->allockind
)
165 VG_(cli_free
) ( (void*)(mc1
->data
) );
166 delete_MC_Chunk ( mc1
);
171 MC_Chunk
* MC_(get_freed_block_bracketting
) (Addr a
)
174 for (i
= 0; i
< 2; i
++) {
176 mc
= freed_list_start
[i
];
178 if (VG_(addr_is_in_block
)( a
, mc
->data
, mc
->szB
,
179 MC_(Malloc_Redzone_SzB
) ))
187 /* Allocate a shadow chunk, put it on the appropriate list.
188 If needed, release oldest blocks from freed list. */
190 MC_Chunk
* create_MC_Chunk ( ThreadId tid
, Addr p
, SizeT szB
,
193 MC_Chunk
* mc
= VG_(allocEltPA
)(MC_(chunk_poolalloc
));
196 mc
->allockind
= kind
;
197 switch ( MC_(n_where_pointers
)() ) {
198 case 2: mc
->where
[1] = 0; // fallback to 1
199 case 1: mc
->where
[0] = 0; // fallback to 0
201 default: tl_assert(0);
203 MC_(set_allocated_at
) (tid
, mc
);
205 /* Each time a new MC_Chunk is created, release oldest blocks
206 if the free list volume is exceeded. */
207 if (VG_(free_queue_volume
) > MC_(clo_freelist_vol
))
208 release_oldest_block();
210 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
211 the mc->data field isn't visible to the leak checker. If memory
212 management is working correctly, any pointer returned by VG_(malloc)
213 should be noaccess as far as the client is concerned. */
214 if (!MC_(check_mem_is_noaccess
)( (Addr
)mc
, sizeof(MC_Chunk
), NULL
)) {
215 VG_(tool_panic
)("create_MC_Chunk: shadow area is accessible");
221 void delete_MC_Chunk (MC_Chunk
* mc
)
223 VG_(freeEltPA
) (MC_(chunk_poolalloc
), mc
);
226 // True if mc is in the given block list.
227 static Bool
in_block_list (const VgHashTable
*block_list
, MC_Chunk
* mc
)
229 MC_Chunk
* found_mc
= VG_(HT_lookup
) ( block_list
, (UWord
)mc
->data
);
231 tl_assert (found_mc
->data
== mc
->data
);
232 /* If a user builds a pool from a malloc-ed superblock
233 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
234 an address at the beginning of this superblock, then
235 this address will be twice in the block_list.
236 We handle this case by checking size and allockind.
237 Note: I suspect that having the same block
238 twice in MC_(malloc_list) is a recipe for bugs.
239 We might maybe better create a "standard" mempool to
240 handle all this more cleanly. */
241 if (found_mc
->szB
!= mc
->szB
242 || found_mc
->allockind
!= mc
->allockind
)
244 tl_assert (found_mc
== mc
);
250 // True if mc is a live block (not yet freed).
251 static Bool
live_block (MC_Chunk
* mc
)
253 if (mc
->allockind
== MC_AllocCustom
) {
255 VG_(HT_ResetIter
)(MC_(mempool_list
));
256 while ( (mp
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
257 if ( in_block_list (mp
->chunks
, mc
) )
261 /* Note: we fallback here for a not found MC_AllocCustom
262 as such a block can be inserted in MC_(malloc_list)
263 by VALGRIND_MALLOCLIKE_BLOCK. */
264 return in_block_list ( MC_(malloc_list
), mc
);
267 ExeContext
* MC_(allocated_at
) (MC_Chunk
* mc
)
269 switch (MC_(clo_keep_stacktraces
)) {
270 case KS_none
: return VG_(null_ExeContext
) ();
271 case KS_alloc
: return mc
->where
[0];
272 case KS_free
: return VG_(null_ExeContext
) ();
273 case KS_alloc_then_free
: return (live_block(mc
) ?
274 mc
->where
[0] : VG_(null_ExeContext
) ());
275 case KS_alloc_and_free
: return mc
->where
[0];
276 default: tl_assert (0);
280 ExeContext
* MC_(freed_at
) (MC_Chunk
* mc
)
282 switch (MC_(clo_keep_stacktraces
)) {
283 case KS_none
: return VG_(null_ExeContext
) ();
284 case KS_alloc
: return VG_(null_ExeContext
) ();
285 case KS_free
: return (mc
->where
[0] ?
286 mc
->where
[0] : VG_(null_ExeContext
) ());
287 case KS_alloc_then_free
: return (live_block(mc
) ?
288 VG_(null_ExeContext
) () : mc
->where
[0]);
289 case KS_alloc_and_free
: return (mc
->where
[1] ?
290 mc
->where
[1] : VG_(null_ExeContext
) ());
291 default: tl_assert (0);
295 void MC_(set_allocated_at
) (ThreadId tid
, MC_Chunk
* mc
)
297 switch (MC_(clo_keep_stacktraces
)) {
298 case KS_none
: return;
299 case KS_alloc
: break;
300 case KS_free
: return;
301 case KS_alloc_then_free
: break;
302 case KS_alloc_and_free
: break;
303 default: tl_assert (0);
305 mc
->where
[0] = VG_(record_ExeContext
) ( tid
, 0/*first_ip_delta*/ );
308 void MC_(set_freed_at
) (ThreadId tid
, MC_Chunk
* mc
)
311 switch (MC_(clo_keep_stacktraces
)) {
312 case KS_none
: return;
313 case KS_alloc
: return;
314 case KS_free
: pos
= 0; break;
315 case KS_alloc_then_free
: pos
= 0; break;
316 case KS_alloc_and_free
: pos
= 1; break;
317 default: tl_assert (0);
319 mc
->where
[pos
] = VG_(record_ExeContext
) ( tid
, 0/*first_ip_delta*/ );
322 UInt
MC_(n_where_pointers
) (void)
324 switch (MC_(clo_keep_stacktraces
)) {
325 case KS_none
: return 0;
328 case KS_alloc_then_free
: return 1;
329 case KS_alloc_and_free
: return 2;
330 default: tl_assert (0);
334 /*------------------------------------------------------------*/
335 /*--- client_malloc(), etc ---*/
336 /*------------------------------------------------------------*/
338 /* Allocate memory and note change in memory available */
339 void* MC_(new_block
) ( ThreadId tid
,
340 Addr p
, SizeT szB
, SizeT alignB
,
341 Bool is_zeroed
, MC_AllocKind kind
, VgHashTable
*table
)
345 // Allocate and zero if necessary
347 tl_assert(MC_AllocCustom
== kind
);
349 tl_assert(MC_AllocCustom
!= kind
);
350 p
= (Addr
)VG_(cli_malloc
)( alignB
, szB
);
355 VG_(memset
)((void*)p
, 0, szB
);
357 if (MC_(clo_malloc_fill
) != -1) {
358 tl_assert(MC_(clo_malloc_fill
) >= 0x00 && MC_(clo_malloc_fill
) <= 0xFF);
359 VG_(memset
)((void*)p
, MC_(clo_malloc_fill
), szB
);
363 // Only update stats if allocation succeeded.
364 cmalloc_n_mallocs
++;
365 cmalloc_bs_mallocd
+= (ULong
)szB
;
366 mc
= create_MC_Chunk (tid
, p
, szB
, kind
);
367 VG_(HT_add_node
)( table
, mc
);
370 MC_(make_mem_defined
)( p
, szB
);
372 UInt ecu
= VG_(get_ECU_from_ExeContext
)(MC_(allocated_at
)(mc
));
373 tl_assert(VG_(is_plausible_ECU
)(ecu
));
374 MC_(make_mem_undefined_w_otag
)( p
, szB
, ecu
| MC_OKIND_HEAP
);
380 void* MC_(malloc
) ( ThreadId tid
, SizeT n
)
382 if (MC_(record_fishy_value_error
)(tid
, "malloc", "size", n
)) {
385 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
),
386 /*is_zeroed*/False
, MC_AllocMalloc
, MC_(malloc_list
));
390 void* MC_(__builtin_new
) ( ThreadId tid
, SizeT n
)
392 if (MC_(record_fishy_value_error
)(tid
, "__builtin_new", "size", n
)) {
395 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
),
396 /*is_zeroed*/False
, MC_AllocNew
, MC_(malloc_list
));
400 void* MC_(__builtin_vec_new
) ( ThreadId tid
, SizeT n
)
402 if (MC_(record_fishy_value_error
)(tid
, "__builtin_vec_new", "size", n
)) {
405 return MC_(new_block
) ( tid
, 0, n
, VG_(clo_alignment
),
406 /*is_zeroed*/False
, MC_AllocNewVec
, MC_(malloc_list
));
410 void* MC_(memalign
) ( ThreadId tid
, SizeT alignB
, SizeT n
)
412 if (MC_(record_fishy_value_error
)(tid
, "memalign", "size", n
)) {
415 return MC_(new_block
) ( tid
, 0, n
, alignB
,
416 /*is_zeroed*/False
, MC_AllocMalloc
, MC_(malloc_list
));
420 void* MC_(calloc
) ( ThreadId tid
, SizeT nmemb
, SizeT size1
)
422 if (MC_(record_fishy_value_error
)(tid
, "calloc", "nmemb", nmemb
) ||
423 MC_(record_fishy_value_error
)(tid
, "calloc", "size", size1
)) {
426 return MC_(new_block
) ( tid
, 0, nmemb
*size1
, VG_(clo_alignment
),
427 /*is_zeroed*/True
, MC_AllocMalloc
, MC_(malloc_list
));
432 void die_and_free_mem ( ThreadId tid
, MC_Chunk
* mc
, SizeT rzB
)
434 /* Note: we do not free fill the custom allocs produced
435 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
436 if (MC_(clo_free_fill
) != -1 && MC_AllocCustom
!= mc
->allockind
) {
437 tl_assert(MC_(clo_free_fill
) >= 0x00 && MC_(clo_free_fill
) <= 0xFF);
438 VG_(memset
)((void*)mc
->data
, MC_(clo_free_fill
), mc
->szB
);
441 /* Note: make redzones noaccess again -- just in case user made them
442 accessible with a client request... */
443 MC_(make_mem_noaccess
)( mc
->data
-rzB
, mc
->szB
+ 2*rzB
);
445 /* Record where freed */
446 MC_(set_freed_at
) (tid
, mc
);
447 /* Put it out of harm's way for a while */
448 add_to_freed_queue ( mc
);
449 /* If the free list volume is bigger than MC_(clo_freelist_vol),
450 we wait till the next block allocation to release blocks.
451 This increase the chance to discover dangling pointer usage,
452 even for big blocks being freed by the client. */
457 void record_freemismatch_error (ThreadId tid
, MC_Chunk
* mc
)
459 /* Only show such an error if the user hasn't disabled doing so. */
460 if (!MC_(clo_show_mismatched_frees
))
463 /* MC_(record_freemismatch_error) reports errors for still
464 allocated blocks but we are in the middle of freeing it. To
465 report the error correctly, we re-insert the chunk (making it
466 again a "clean allocated block", report the error, and then
467 re-remove the chunk. This avoids to do a VG_(HT_lookup)
468 followed by a VG_(HT_remove) in all "non-erroneous cases". */
469 VG_(HT_add_node
)( MC_(malloc_list
), mc
);
470 MC_(record_freemismatch_error
) ( tid
, mc
);
471 if ((mc
!= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)mc
->data
)))
475 void MC_(handle_free
) ( ThreadId tid
, Addr p
, UInt rzB
, MC_AllocKind kind
)
481 mc
= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)p
);
483 MC_(record_free_error
) ( tid
, p
);
485 /* check if it is a matching free() / delete / delete [] */
486 if (kind
!= mc
->allockind
) {
487 tl_assert(p
== mc
->data
);
488 record_freemismatch_error ( tid
, mc
);
490 die_and_free_mem ( tid
, mc
, rzB
);
494 void MC_(free
) ( ThreadId tid
, void* p
)
497 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocMalloc
);
500 void MC_(__builtin_delete
) ( ThreadId tid
, void* p
)
503 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNew
);
506 void MC_(__builtin_vec_delete
) ( ThreadId tid
, void* p
)
509 tid
, (Addr
)p
, MC_(Malloc_Redzone_SzB
), MC_AllocNewVec
);
512 void* MC_(realloc
) ( ThreadId tid
, void* p_old
, SizeT new_szB
)
519 if (MC_(record_fishy_value_error
)(tid
, "realloc", "size", new_szB
))
523 cmalloc_n_mallocs
++;
524 cmalloc_bs_mallocd
+= (ULong
)new_szB
;
526 /* Remove the old block */
527 old_mc
= VG_(HT_remove
) ( MC_(malloc_list
), (UWord
)p_old
);
528 if (old_mc
== NULL
) {
529 MC_(record_free_error
) ( tid
, (Addr
)p_old
);
530 /* We return to the program regardless. */
534 /* check if its a matching free() / delete / delete [] */
535 if (MC_AllocMalloc
!= old_mc
->allockind
) {
536 /* can not realloc a range that was allocated with new or new [] */
537 tl_assert((Addr
)p_old
== old_mc
->data
);
538 record_freemismatch_error ( tid
, old_mc
);
539 /* but keep going anyway */
542 old_szB
= old_mc
->szB
;
545 a_new
= (Addr
)VG_(cli_malloc
)(VG_(clo_alignment
), new_szB
);
548 /* In all cases, even when the new size is smaller or unchanged, we
549 reallocate and copy the contents, and make the old block
550 inaccessible. This is so as to guarantee to catch all cases of
551 accesses via the old address after reallocation, regardless of
552 the change in size. (Of course the ability to detect accesses
553 to the old block also depends on the size of the freed blocks
556 // Allocate a new chunk.
557 new_mc
= create_MC_Chunk( tid
, a_new
, new_szB
, MC_AllocMalloc
);
559 // Now insert the new mc (with a new 'data' field) into malloc_list.
560 VG_(HT_add_node
)( MC_(malloc_list
), new_mc
);
562 /* Retained part is copied, red zones set as normal */
564 /* Redzone at the front */
565 MC_(make_mem_noaccess
)( a_new
-MC_(Malloc_Redzone_SzB
),
566 MC_(Malloc_Redzone_SzB
) );
569 if (old_szB
>= new_szB
) {
570 /* new size is smaller or the same */
572 /* Copy address range state and value from old to new */
573 MC_(copy_address_range_state
) ( (Addr
)p_old
, a_new
, new_szB
);
574 VG_(memcpy
)((void*)a_new
, p_old
, new_szB
);
576 /* new size is bigger */
579 /* Copy address range state and value from old to new */
580 MC_(copy_address_range_state
) ( (Addr
)p_old
, a_new
, old_szB
);
581 VG_(memcpy
)((void*)a_new
, p_old
, old_szB
);
583 // If the block has grown, we mark the grown area as undefined.
584 // We have to do that after VG_(HT_add_node) to ensure the ecu
585 // execontext is for a fully allocated block.
586 ecu
= VG_(get_ECU_from_ExeContext
)(MC_(allocated_at
)(new_mc
));
587 tl_assert(VG_(is_plausible_ECU
)(ecu
));
588 MC_(make_mem_undefined_w_otag
)( a_new
+old_szB
,
590 ecu
| MC_OKIND_HEAP
);
592 /* Possibly fill new area with specified junk */
593 if (MC_(clo_malloc_fill
) != -1) {
594 tl_assert(MC_(clo_malloc_fill
) >= 0x00
595 && MC_(clo_malloc_fill
) <= 0xFF);
596 VG_(memset
)((void*)(a_new
+old_szB
), MC_(clo_malloc_fill
),
601 /* Redzone at the back. */
602 MC_(make_mem_noaccess
) ( a_new
+new_szB
, MC_(Malloc_Redzone_SzB
));
604 /* Possibly fill freed area with specified junk. */
605 if (MC_(clo_free_fill
) != -1) {
606 tl_assert(MC_(clo_free_fill
) >= 0x00 && MC_(clo_free_fill
) <= 0xFF);
607 VG_(memset
)((void*)p_old
, MC_(clo_free_fill
), old_szB
);
610 /* Free old memory */
611 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
612 than recycling the old one, so that any erroneous accesses to the
613 old memory are reported. */
614 die_and_free_mem ( tid
, old_mc
, MC_(Malloc_Redzone_SzB
) );
617 /* Could not allocate new client memory.
618 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
619 unconditionally removed at the beginning of the function. */
620 VG_(HT_add_node
)( MC_(malloc_list
), old_mc
);
626 SizeT
MC_(malloc_usable_size
) ( ThreadId tid
, void* p
)
628 MC_Chunk
* mc
= VG_(HT_lookup
) ( MC_(malloc_list
), (UWord
)p
);
630 // There may be slop, but pretend there isn't because only the asked-for
631 // area will be marked as addressable.
632 return ( mc
? mc
->szB
: 0 );
635 /* This handles the in place resize of a block, as performed by the
636 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
637 and not used for, handling of the normal libc realloc()
639 void MC_(handle_resizeInPlace
)(ThreadId tid
, Addr p
,
640 SizeT oldSizeB
, SizeT newSizeB
, SizeT rzB
)
642 MC_Chunk
* mc
= VG_(HT_lookup
) ( MC_(malloc_list
), (UWord
)p
);
643 if (!mc
|| mc
->szB
!= oldSizeB
|| newSizeB
== 0) {
644 /* Reject if: p is not found, or oldSizeB is wrong,
645 or new block would be empty. */
646 MC_(record_free_error
) ( tid
, p
);
650 if (oldSizeB
== newSizeB
)
654 if (newSizeB
< oldSizeB
) {
655 MC_(make_mem_noaccess
)( p
+ newSizeB
, oldSizeB
- newSizeB
+ rzB
);
657 ExeContext
* ec
= VG_(record_ExeContext
)(tid
, 0/*first_ip_delta*/);
658 UInt ecu
= VG_(get_ECU_from_ExeContext
)(ec
);
659 MC_(make_mem_undefined_w_otag
)( p
+ oldSizeB
, newSizeB
- oldSizeB
,
660 ecu
| MC_OKIND_HEAP
);
662 MC_(make_mem_noaccess
)( p
+ newSizeB
, rzB
);
667 /*------------------------------------------------------------*/
668 /*--- Memory pool stuff. ---*/
669 /*------------------------------------------------------------*/
671 /* Set to 1 for intensive sanity checking. Is very expensive though
672 and should not be used in production scenarios. See #255966. */
673 #define MP_DETAILED_SANITY_CHECKS 0
675 static void check_mempool_sane(MC_Mempool
* mp
); /*forward*/
678 void MC_(create_mempool
)(Addr pool
, UInt rzB
, Bool is_zeroed
)
682 if (VG_(clo_verbosity
) > 2) {
683 VG_(message
)(Vg_UserMsg
, "create_mempool(0x%lx, %d, %d)\n",
684 pool
, rzB
, is_zeroed
);
685 VG_(get_and_pp_StackTrace
)
686 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
689 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
691 VG_(tool_panic
)("MC_(create_mempool): duplicate pool creation");
694 mp
= VG_(malloc
)("mc.cm.1", sizeof(MC_Mempool
));
697 mp
->is_zeroed
= is_zeroed
;
698 mp
->chunks
= VG_(HT_construct
)( "MC_(create_mempool)" );
699 check_mempool_sane(mp
);
701 /* Paranoia ... ensure this area is off-limits to the client, so
702 the mp->data field isn't visible to the leak checker. If memory
703 management is working correctly, anything pointer returned by
704 VG_(malloc) should be noaccess as far as the client is
706 if (!MC_(check_mem_is_noaccess
)( (Addr
)mp
, sizeof(MC_Mempool
), NULL
)) {
707 VG_(tool_panic
)("MC_(create_mempool): shadow area is accessible");
710 VG_(HT_add_node
)( MC_(mempool_list
), mp
);
713 void MC_(destroy_mempool
)(Addr pool
)
718 if (VG_(clo_verbosity
) > 2) {
719 VG_(message
)(Vg_UserMsg
, "destroy_mempool(0x%lx)\n", pool
);
720 VG_(get_and_pp_StackTrace
)
721 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
724 mp
= VG_(HT_remove
) ( MC_(mempool_list
), (UWord
)pool
);
727 ThreadId tid
= VG_(get_running_tid
)();
728 MC_(record_illegal_mempool_error
) ( tid
, pool
);
731 check_mempool_sane(mp
);
733 // Clean up the chunks, one by one
734 VG_(HT_ResetIter
)(mp
->chunks
);
735 while ( (mc
= VG_(HT_Next
)(mp
->chunks
)) ) {
736 /* Note: make redzones noaccess again -- just in case user made them
737 accessible with a client request... */
738 MC_(make_mem_noaccess
)(mc
->data
-mp
->rzB
, mc
->szB
+ 2*mp
->rzB
);
740 // Destroy the chunk table
741 VG_(HT_destruct
)(mp
->chunks
, (void (*)(void *))delete_MC_Chunk
);
747 mp_compar(const void* n1
, const void* n2
)
749 const MC_Chunk
* mc1
= *(const MC_Chunk
*const *)n1
;
750 const MC_Chunk
* mc2
= *(const MC_Chunk
*const *)n2
;
751 if (mc1
->data
< mc2
->data
) return -1;
752 if (mc1
->data
> mc2
->data
) return 1;
757 check_mempool_sane(MC_Mempool
* mp
)
759 UInt n_chunks
, i
, bad
= 0;
760 static UInt tick
= 0;
762 MC_Chunk
**chunks
= (MC_Chunk
**) VG_(HT_to_array
)( mp
->chunks
, &n_chunks
);
766 if (VG_(clo_verbosity
) > 1) {
769 UInt total_pools
= 0, total_chunks
= 0;
772 VG_(HT_ResetIter
)(MC_(mempool_list
));
773 while ( (mp2
= VG_(HT_Next
)(MC_(mempool_list
))) ) {
775 VG_(HT_ResetIter
)(mp2
->chunks
);
776 while (VG_(HT_Next
)(mp2
->chunks
)) {
781 VG_(message
)(Vg_UserMsg
,
782 "Total mempools active: %d pools, %d chunks\n",
783 total_pools
, total_chunks
);
789 VG_(ssort
)((void*)chunks
, n_chunks
, sizeof(VgHashNode
*), mp_compar
);
791 /* Sanity check; assert that the blocks are now in order */
792 for (i
= 0; i
< n_chunks
-1; i
++) {
793 if (chunks
[i
]->data
> chunks
[i
+1]->data
) {
794 VG_(message
)(Vg_UserMsg
,
795 "Mempool chunk %d / %d is out of order "
796 "wrt. its successor\n",
802 /* Sanity check -- make sure they don't overlap */
803 for (i
= 0; i
< n_chunks
-1; i
++) {
804 if (chunks
[i
]->data
+ chunks
[i
]->szB
> chunks
[i
+1]->data
) {
805 VG_(message
)(Vg_UserMsg
,
806 "Mempool chunk %d / %d overlaps with its successor\n",
813 VG_(message
)(Vg_UserMsg
,
814 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
816 for (i
= 0; i
< n_chunks
; ++i
) {
817 VG_(message
)(Vg_UserMsg
,
818 "Mempool chunk %d / %d: %ld bytes "
819 "[%lx,%lx), allocated:\n",
822 chunks
[i
]->szB
+ 0UL,
824 chunks
[i
]->data
+ chunks
[i
]->szB
);
826 VG_(pp_ExeContext
)(MC_(allocated_at
)(chunks
[i
]));
832 void MC_(mempool_alloc
)(ThreadId tid
, Addr pool
, Addr addr
, SizeT szB
)
836 if (VG_(clo_verbosity
) > 2) {
837 VG_(message
)(Vg_UserMsg
, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
839 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
842 mp
= VG_(HT_lookup
) ( MC_(mempool_list
), (UWord
)pool
);
844 MC_(record_illegal_mempool_error
) ( tid
, pool
);
846 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
847 MC_(new_block
)(tid
, addr
, szB
, /*ignored*/0, mp
->is_zeroed
,
848 MC_AllocCustom
, mp
->chunks
);
850 // This is not needed if the user application has properly
851 // marked the superblock noaccess when defining the mempool.
852 // We however still mark the redzones noaccess to still catch
853 // some bugs if user forgot.
854 MC_(make_mem_noaccess
) ( addr
- mp
->rzB
, mp
->rzB
);
855 MC_(make_mem_noaccess
) ( addr
+ szB
, mp
->rzB
);
857 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
861 void MC_(mempool_free
)(Addr pool
, Addr addr
)
865 ThreadId tid
= VG_(get_running_tid
)();
867 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
869 MC_(record_illegal_mempool_error
)(tid
, pool
);
873 if (VG_(clo_verbosity
) > 2) {
874 VG_(message
)(Vg_UserMsg
, "mempool_free(0x%lx, 0x%lx)\n", pool
, addr
);
875 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
878 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
879 mc
= VG_(HT_remove
)(mp
->chunks
, (UWord
)addr
);
881 MC_(record_free_error
)(tid
, (Addr
)addr
);
885 if (VG_(clo_verbosity
) > 2) {
886 VG_(message
)(Vg_UserMsg
,
887 "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
888 pool
, addr
, mc
->szB
+ 0UL);
891 die_and_free_mem ( tid
, mc
, mp
->rzB
);
892 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
896 void MC_(mempool_trim
)(Addr pool
, Addr addr
, SizeT szB
)
900 ThreadId tid
= VG_(get_running_tid
)();
904 if (VG_(clo_verbosity
) > 2) {
905 VG_(message
)(Vg_UserMsg
, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
907 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
910 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
912 MC_(record_illegal_mempool_error
)(tid
, pool
);
916 check_mempool_sane(mp
);
917 chunks
= VG_(HT_to_array
) ( mp
->chunks
, &n_shadows
);
918 if (n_shadows
== 0) {
919 tl_assert(chunks
== NULL
);
923 tl_assert(chunks
!= NULL
);
924 for (i
= 0; i
< n_shadows
; ++i
) {
926 Addr lo
, hi
, min
, max
;
928 mc
= (MC_Chunk
*) chunks
[i
];
931 hi
= mc
->szB
== 0 ? mc
->data
: mc
->data
+ mc
->szB
- 1;
933 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
935 if (EXTENT_CONTAINS(lo
) && EXTENT_CONTAINS(hi
)) {
937 /* The current chunk is entirely within the trim extent: keep
942 } else if ( (! EXTENT_CONTAINS(lo
)) &&
943 (! EXTENT_CONTAINS(hi
)) ) {
945 /* The current chunk is entirely outside the trim extent:
948 if (VG_(HT_remove
)(mp
->chunks
, (UWord
)mc
->data
) == NULL
) {
949 MC_(record_free_error
)(tid
, (Addr
)mc
->data
);
951 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
954 die_and_free_mem ( tid
, mc
, mp
->rzB
);
958 /* The current chunk intersects the trim extent: remove,
959 trim, and reinsert it. */
961 tl_assert(EXTENT_CONTAINS(lo
) ||
962 EXTENT_CONTAINS(hi
));
963 if (VG_(HT_remove
)(mp
->chunks
, (UWord
)mc
->data
) == NULL
) {
964 MC_(record_free_error
)(tid
, (Addr
)mc
->data
);
966 if (MP_DETAILED_SANITY_CHECKS
) check_mempool_sane(mp
);
970 if (mc
->data
< addr
) {
978 if (mc
->data
+ szB
> addr
+ szB
) {
979 max
= mc
->data
+ szB
;
986 tl_assert(min
<= lo
);
988 tl_assert(hi
<= max
);
990 if (min
< lo
&& !EXTENT_CONTAINS(min
)) {
991 MC_(make_mem_noaccess
)( min
, lo
- min
);
994 if (hi
< max
&& !EXTENT_CONTAINS(max
)) {
995 MC_(make_mem_noaccess
)( hi
, max
- hi
);
999 mc
->szB
= (UInt
) (hi
- lo
);
1000 VG_(HT_add_node
)( mp
->chunks
, mc
);
1003 #undef EXTENT_CONTAINS
1006 check_mempool_sane(mp
);
1010 void MC_(move_mempool
)(Addr poolA
, Addr poolB
)
1014 if (VG_(clo_verbosity
) > 2) {
1015 VG_(message
)(Vg_UserMsg
, "move_mempool(0x%lx, 0x%lx)\n", poolA
, poolB
);
1016 VG_(get_and_pp_StackTrace
)
1017 (VG_(get_running_tid
)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
1020 mp
= VG_(HT_remove
) ( MC_(mempool_list
), (UWord
)poolA
);
1023 ThreadId tid
= VG_(get_running_tid
)();
1024 MC_(record_illegal_mempool_error
) ( tid
, poolA
);
1029 VG_(HT_add_node
)( MC_(mempool_list
), mp
);
1032 void MC_(mempool_change
)(Addr pool
, Addr addrA
, Addr addrB
, SizeT szB
)
1036 ThreadId tid
= VG_(get_running_tid
)();
1038 if (VG_(clo_verbosity
) > 2) {
1039 VG_(message
)(Vg_UserMsg
, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
1040 pool
, addrA
, addrB
, szB
);
1041 VG_(get_and_pp_StackTrace
) (tid
, MEMPOOL_DEBUG_STACKTRACE_DEPTH
);
1044 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
1046 MC_(record_illegal_mempool_error
)(tid
, pool
);
1050 check_mempool_sane(mp
);
1052 mc
= VG_(HT_remove
)(mp
->chunks
, (UWord
)addrA
);
1054 MC_(record_free_error
)(tid
, (Addr
)addrA
);
1060 VG_(HT_add_node
)( mp
->chunks
, mc
);
1062 check_mempool_sane(mp
);
1065 Bool
MC_(mempool_exists
)(Addr pool
)
1069 mp
= VG_(HT_lookup
)(MC_(mempool_list
), (UWord
)pool
);
1077 /*------------------------------------------------------------*/
1078 /*--- Statistics printing ---*/
1079 /*------------------------------------------------------------*/
1081 void MC_(print_malloc_stats
) ( void )
1087 if (VG_(clo_verbosity
) == 0)
1092 /* Count memory still in use. */
1093 VG_(HT_ResetIter
)(MC_(malloc_list
));
1094 while ( (mc
= VG_(HT_Next
)(MC_(malloc_list
))) ) {
1096 nbytes
+= (ULong
)mc
->szB
;
1101 " in use at exit: %'llu bytes in %'lu blocks\n"
1102 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1106 cmalloc_n_frees
, cmalloc_bs_mallocd
1110 SizeT
MC_(get_cmalloc_n_frees
) ( void )
1112 return cmalloc_n_frees
;
1116 /*--------------------------------------------------------------------*/
1118 /*--------------------------------------------------------------------*/