2 /*--------------------------------------------------------------------*/
3 /*--- An implementation of malloc/free which doesn't use sbrk. ---*/
4 /*--- m_mallocfree.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Valgrind, a dynamic binary instrumentation
11 Copyright (C) 2000-2017 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_core_basics.h"
31 #include "pub_core_vki.h"
32 #include "pub_core_debuglog.h"
33 #include "pub_core_libcbase.h"
34 #include "pub_core_aspacemgr.h"
35 #include "pub_core_libcassert.h"
36 #include "pub_core_libcprint.h"
37 #include "pub_core_mallocfree.h"
38 #include "pub_core_options.h"
39 #include "pub_core_threadstate.h" // For VG_INVALID_THREADID
40 #include "pub_core_syscall.h" // For VG_(strerror)
41 #include "pub_core_gdbserver.h"
42 #include "pub_core_transtab.h"
43 #include "pub_core_tooliface.h"
45 #include "pub_core_inner.h"
46 #if defined(ENABLE_INNER_CLIENT_REQUEST)
47 #include "memcheck/memcheck.h"
50 // #define DEBUG_MALLOC // turn on heavyweight debugging machinery
51 // #define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
53 /* Number and total size of blocks in free queue. Used by mallinfo(). */
54 Long
VG_(free_queue_volume
) = 0;
55 Long
VG_(free_queue_length
) = 0;
57 static void cc_analyse_alloc_arena ( ArenaId aid
); /* fwds */
59 /*------------------------------------------------------------*/
60 /*--- Main types ---*/
61 /*------------------------------------------------------------*/
63 #define N_MALLOC_LISTS 112 // do not change this
65 // The amount you can ask for is limited only by sizeof(SizeT)...
66 #define MAX_PSZB (~((SizeT)0x0))
68 // Each arena has a sorted array of superblocks, which expands
69 // dynamically. This is its initial size.
70 #define SBLOCKS_SIZE_INITIAL 50
74 /* Layout of an in-use block:
76 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
77 this block total szB (sizeof(SizeT) bytes)
78 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
80 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
81 this block total szB (sizeof(SizeT) bytes)
83 Layout of a block on the free list:
85 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
86 this block total szB (sizeof(SizeT) bytes)
87 freelist previous ptr (sizeof(void*) bytes)
88 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
90 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
91 freelist next ptr (sizeof(void*) bytes)
92 this block total szB (sizeof(SizeT) bytes)
94 Total size in bytes (bszB) and payload size in bytes (pszB)
97 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB
99 when heap profiling is not enabled, and
101 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB + VG_MIN_MALLOC_SZB
103 when it is enabled. It follows that the minimum overhead per heap
104 block for arenas used by the core is:
106 32-bit platforms: 2*4 + 2*4 == 16 bytes
107 64-bit platforms: 2*8 + 2*8 == 32 bytes
109 when heap profiling is not enabled, and
111 32-bit platforms: 2*4 + 2*4 + 8 == 24 bytes
112 64-bit platforms: 2*8 + 2*8 + 16 == 48 bytes
114 when it is enabled. In all cases, extra overhead may be incurred
115 when rounding the payload size up to VG_MIN_MALLOC_SZB.
117 Furthermore, both size fields in the block have their least-significant
118 bit set if the block is not in use, and unset if it is in use.
119 (The bottom 3 or so bits are always free for this because of alignment.)
120 A block size of zero is not possible, because a block always has at
121 least two SizeTs and two pointers of overhead.
123 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
124 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
125 (see newSuperblock() for how), and that the lengths of the following
126 things are a multiple of VG_MIN_MALLOC_SZB:
127 - Superblock admin section lengths (due to elastic padding)
128 - Block admin section (low and high) lengths (due to elastic redzones)
129 - Block payload lengths (due to req_pszB rounding up)
131 The heap-profile cost-center field is 8 bytes even on 32 bit
132 platforms. This is so as to keep the payload field 8-aligned. On
133 a 64-bit platform, this cc-field contains a pointer to a const
134 HChar*, which is the cost center name. On 32-bit platforms, the
135 pointer lives in the lower-addressed half of the field, regardless
136 of the endianness of the host.
140 // No fields are actually used in this struct, because a Block has
141 // many variable sized fields and so can't be accessed
142 // meaningfully with normal fields. So we use access functions all
143 // the time. This struct gives us a type to use, though. Also, we
144 // make sizeof(Block) 1 byte so that we can do arithmetic with the
145 // Block* type in increments of 1!
150 /* Ensure that Block payloads can be safely cast to various pointers below. */
151 STATIC_ASSERT(VG_MIN_MALLOC_SZB
% sizeof(void *) == 0);
153 // A superblock. 'padding' is never used, it just ensures that if the
154 // entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
155 // will be too. It can add small amounts of padding unnecessarily -- eg.
156 // 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
157 // it's too hard to make a constant expression that works perfectly in all
159 // 'unsplittable' is set to NULL if superblock can be split, otherwise
160 // it is set to the address of the superblock. An unsplittable superblock
161 // will contain only one allocated block. An unsplittable superblock will
162 // be unmapped when its (only) allocated block is freed.
163 // The free space at the end of an unsplittable superblock is not used to
164 // make a free block. Note that this means that an unsplittable superblock can
165 // have up to slightly less than 1 page of unused bytes at the end of the
167 // 'unsplittable' is used to avoid quadratic memory usage for linear
168 // reallocation of big structures
169 // (see http://bugs.kde.org/show_bug.cgi?id=250101).
170 // ??? unsplittable replaces 'void *padding2'. Choosed this
171 // ??? to avoid changing the alignment logic. Maybe something cleaner
173 // A splittable block can be reclaimed when all its blocks are freed :
174 // the reclaim of such a block is deferred till either another superblock
175 // of the same arena can be reclaimed or till a new superblock is needed
177 // payload_bytes[] is made a single big Block when the Superblock is
178 // created, and then can be split and the splittings remerged, but Blocks
179 // always cover its entire length -- there's never any unused bytes at the
183 SizeT n_payload_bytes
;
184 struct _Superblock
* unsplittable
;
185 UByte padding
[ VG_MIN_MALLOC_SZB
-
186 ((sizeof(struct _Superblock
*) + sizeof(SizeT
)) %
187 VG_MIN_MALLOC_SZB
) ];
188 UByte payload_bytes
[0];
192 // An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
193 // elastic, in that it can be bigger than asked-for to ensure alignment.
197 Bool clientmem
; // Allocates in the client address space?
198 SizeT rz_szB
; // Red zone size in bytes
199 SizeT min_sblock_szB
; // Minimum superblock size in bytes
200 SizeT min_unsplittable_sblock_szB
;
201 // Minimum unsplittable superblock size in bytes. To be marked as
202 // unsplittable, a superblock must have a
203 // size >= min_unsplittable_sblock_szB and cannot be split.
204 // So, to avoid big overhead, superblocks used to provide aligned
205 // blocks on big alignments are splittable.
206 // Unsplittable superblocks will be reclaimed when their (only)
207 // allocated block is freed.
208 // Smaller size superblocks are splittable and can be reclaimed when all
209 // their blocks are freed.
210 Block
* freelist
[N_MALLOC_LISTS
];
211 // A dynamically expanding, ordered array of (pointers to)
212 // superblocks in the arena. If this array is expanded, which
213 // is rare, the previous space it occupies is simply abandoned.
214 // To avoid having to get yet another block from m_aspacemgr for
215 // the first incarnation of this array, the first allocation of
216 // it is within this struct. If it has to be expanded then the
217 // new space is acquired from m_aspacemgr as you would expect.
218 Superblock
** sblocks
;
221 Superblock
* sblocks_initial
[SBLOCKS_SIZE_INITIAL
];
222 Superblock
* deferred_reclaimed_sb
;
224 // VG_(arena_perm_malloc) returns memory from superblocks
225 // only used for permanent blocks. No overhead. These superblocks
226 // are not stored in sblocks array above.
227 Addr perm_malloc_current
; // first byte free in perm_malloc sb.
228 Addr perm_malloc_limit
; // maximum usable byte in perm_malloc sb.
231 SizeT stats__perm_bytes_on_loan
;
232 SizeT stats__perm_blocks
;
234 ULong stats__nreclaim_unsplit
;
235 ULong stats__nreclaim_split
;
236 /* total # of reclaim executed for unsplittable/splittable superblocks */
237 SizeT stats__bytes_on_loan
;
238 SizeT stats__bytes_mmaped
;
239 SizeT stats__bytes_on_loan_max
;
240 ULong stats__tot_blocks
; /* total # blocks alloc'd */
241 ULong stats__tot_bytes
; /* total # bytes alloc'd */
242 ULong stats__nsearches
; /* total # freelist checks */
243 // If profiling, when should the next profile happen at
244 // (in terms of stats__bytes_on_loan_max) ?
245 SizeT next_profile_at
;
246 SizeT stats__bytes_mmaped_max
;
251 /*------------------------------------------------------------*/
252 /*--- Low-level functions for working with Blocks. ---*/
253 /*------------------------------------------------------------*/
255 #define SIZE_T_0x1 ((SizeT)0x1)
257 static const char* probably_your_fault
=
258 "This is probably caused by your program erroneously writing past the\n"
259 "end of a heap block and corrupting heap metadata. If you fix any\n"
260 "invalid writes reported by Memcheck, this assertion failure will\n"
261 "probably go away. Please try that before reporting this as a bug.\n";
263 // Mark a bszB as in-use, and not in-use, and remove the in-use attribute.
265 SizeT
mk_inuse_bszB ( SizeT bszB
)
267 vg_assert2(bszB
!= 0, probably_your_fault
);
268 return bszB
& (~SIZE_T_0x1
);
271 SizeT
mk_free_bszB ( SizeT bszB
)
273 vg_assert2(bszB
!= 0, probably_your_fault
);
274 return bszB
| SIZE_T_0x1
;
277 SizeT
mk_plain_bszB ( SizeT bszB
)
279 vg_assert2(bszB
!= 0, probably_your_fault
);
280 return bszB
& (~SIZE_T_0x1
);
283 // Forward definition.
285 void ensure_mm_init ( ArenaId aid
);
287 // return either 0 or sizeof(ULong) depending on whether or not
288 // heap profiling is engaged
289 #define hp_overhead_szB() set_at_init_hp_overhead_szB
290 static SizeT set_at_init_hp_overhead_szB
= -1000000;
291 // startup value chosen to very likely cause a problem if used before
292 // a proper value is given by ensure_mm_init.
294 //---------------------------------------------------------------------------
296 // Get a block's size as stored, ie with the in-use/free attribute.
298 SizeT
get_bszB_as_is ( Block
* b
)
300 UByte
* b2
= (UByte
*)b
;
301 SizeT bszB_lo
= *ASSUME_ALIGNED(SizeT
*, &b2
[0 + hp_overhead_szB()]);
302 SizeT bszB_hi
= *ASSUME_ALIGNED(SizeT
*,
303 &b2
[mk_plain_bszB(bszB_lo
) - sizeof(SizeT
)]);
304 vg_assert2(bszB_lo
== bszB_hi
,
305 "Heap block lo/hi size mismatch: lo = %llu, hi = %llu.\n%s",
306 (ULong
)bszB_lo
, (ULong
)bszB_hi
, probably_your_fault
);
310 // Get a block's plain size, ie. remove the in-use/free attribute.
312 SizeT
get_bszB ( Block
* b
)
314 return mk_plain_bszB(get_bszB_as_is(b
));
317 // Set the size fields of a block. bszB may have the in-use/free attribute.
319 void set_bszB ( Block
* b
, SizeT bszB
)
321 UByte
* b2
= (UByte
*)b
;
322 *ASSUME_ALIGNED(SizeT
*, &b2
[0 + hp_overhead_szB()]) = bszB
;
323 *ASSUME_ALIGNED(SizeT
*, &b2
[mk_plain_bszB(bszB
) - sizeof(SizeT
)]) = bszB
;
326 //---------------------------------------------------------------------------
328 // Does this block have the in-use attribute?
330 Bool
is_inuse_block ( Block
* b
)
332 SizeT bszB
= get_bszB_as_is(b
);
333 vg_assert2(bszB
!= 0, probably_your_fault
);
334 return (0 != (bszB
& SIZE_T_0x1
)) ? False
: True
;
337 //---------------------------------------------------------------------------
339 // Return the lower, upper and total overhead in bytes for a block.
340 // These are determined purely by which arena the block lives in.
342 SizeT
overhead_szB_lo ( Arena
* a
)
344 return hp_overhead_szB() + sizeof(SizeT
) + a
->rz_szB
;
347 SizeT
overhead_szB_hi ( Arena
* a
)
349 return a
->rz_szB
+ sizeof(SizeT
);
352 SizeT
overhead_szB ( Arena
* a
)
354 return overhead_szB_lo(a
) + overhead_szB_hi(a
);
357 //---------------------------------------------------------------------------
359 // Return the minimum bszB for a block in this arena. Can have zero-length
360 // payloads, so it's the size of the admin bytes.
362 SizeT
min_useful_bszB ( Arena
* a
)
364 return overhead_szB(a
);
367 //---------------------------------------------------------------------------
369 // Convert payload size <--> block size (both in bytes).
371 SizeT
pszB_to_bszB ( Arena
* a
, SizeT pszB
)
373 return pszB
+ overhead_szB(a
);
376 SizeT
bszB_to_pszB ( Arena
* a
, SizeT bszB
)
378 vg_assert2(bszB
>= overhead_szB(a
), probably_your_fault
);
379 return bszB
- overhead_szB(a
);
382 //---------------------------------------------------------------------------
384 // Get a block's payload size.
386 SizeT
get_pszB ( Arena
* a
, Block
* b
)
388 return bszB_to_pszB(a
, get_bszB(b
));
391 //---------------------------------------------------------------------------
393 // Given the addr of a block, return the addr of its payload, and vice versa.
395 UByte
* get_block_payload ( Arena
* a
, Block
* b
)
397 UByte
* b2
= (UByte
*)b
;
398 return & b2
[ overhead_szB_lo(a
) ];
400 // Given the addr of a block's payload, return the addr of the block itself.
402 Block
* get_payload_block ( Arena
* a
, UByte
* payload
)
404 return (Block
*)&payload
[ -overhead_szB_lo(a
) ];
407 //---------------------------------------------------------------------------
409 // Set and get the next and previous link fields of a block.
411 void set_prev_b ( Block
* b
, Block
* prev_p
)
413 UByte
* b2
= (UByte
*)b
;
414 *ASSUME_ALIGNED(Block
**, &b2
[hp_overhead_szB() + sizeof(SizeT
)]) = prev_p
;
417 void set_next_b ( Block
* b
, Block
* next_p
)
419 UByte
* b2
= (UByte
*)b
;
420 *ASSUME_ALIGNED(Block
**,
421 &b2
[get_bszB(b
) - sizeof(SizeT
) - sizeof(void*)]) = next_p
;
424 Block
* get_prev_b ( Block
* b
)
426 UByte
* b2
= (UByte
*)b
;
427 return *ASSUME_ALIGNED(Block
**, &b2
[hp_overhead_szB() + sizeof(SizeT
)]);
430 Block
* get_next_b ( Block
* b
)
432 UByte
* b2
= (UByte
*)b
;
433 return *ASSUME_ALIGNED(Block
**,
434 &b2
[get_bszB(b
) - sizeof(SizeT
) - sizeof(void*)]);
437 //---------------------------------------------------------------------------
439 // Set and get the cost-center field of a block.
441 void set_cc ( Block
* b
, const HChar
* cc
)
443 UByte
* b2
= (UByte
*)b
;
444 vg_assert( VG_(clo_profile_heap
) );
445 *ASSUME_ALIGNED(const HChar
**, &b2
[0]) = cc
;
448 const HChar
* get_cc ( Block
* b
)
450 UByte
* b2
= (UByte
*)b
;
451 vg_assert( VG_(clo_profile_heap
) );
452 return *ASSUME_ALIGNED(const HChar
**, &b2
[0]);
455 //---------------------------------------------------------------------------
457 // Get the block immediately preceding this one in the Superblock.
459 Block
* get_predecessor_block ( Block
* b
)
461 UByte
* b2
= (UByte
*)b
;
462 SizeT bszB
= mk_plain_bszB(*ASSUME_ALIGNED(SizeT
*, &b2
[-sizeof(SizeT
)]));
463 return (Block
*)&b2
[-bszB
];
466 //---------------------------------------------------------------------------
468 // Read and write the lower and upper red-zone bytes of a block.
470 void set_rz_lo_byte ( Block
* b
, UInt rz_byteno
, UByte v
)
472 UByte
* b2
= (UByte
*)b
;
473 b2
[hp_overhead_szB() + sizeof(SizeT
) + rz_byteno
] = v
;
476 void set_rz_hi_byte ( Block
* b
, UInt rz_byteno
, UByte v
)
478 UByte
* b2
= (UByte
*)b
;
479 b2
[get_bszB(b
) - sizeof(SizeT
) - rz_byteno
- 1] = v
;
482 UByte
get_rz_lo_byte ( Block
* b
, UInt rz_byteno
)
484 UByte
* b2
= (UByte
*)b
;
485 return b2
[hp_overhead_szB() + sizeof(SizeT
) + rz_byteno
];
488 UByte
get_rz_hi_byte ( Block
* b
, UInt rz_byteno
)
490 UByte
* b2
= (UByte
*)b
;
491 return b2
[get_bszB(b
) - sizeof(SizeT
) - rz_byteno
- 1];
494 #if defined(ENABLE_INNER_CLIENT_REQUEST)
495 /* When running as an inner, the block headers before and after
496 (see 'Layout of an in-use block:' above) are made non accessible
497 by VALGRIND_MALLOCLIKE_BLOCK/VALGRIND_FREELIKE_BLOCK
498 to allow the outer to detect block overrun.
499 The below two functions are used when these headers must be
500 temporarily accessed. */
501 static void mkBhdrAccess( Arena
* a
, Block
* b
)
503 VALGRIND_MAKE_MEM_DEFINED (b
,
504 hp_overhead_szB() + sizeof(SizeT
) + a
->rz_szB
);
505 VALGRIND_MAKE_MEM_DEFINED (b
+ get_bszB(b
) - a
->rz_szB
- sizeof(SizeT
),
506 a
->rz_szB
+ sizeof(SizeT
));
509 /* Mark block hdr as not accessible.
510 !!! Currently, we do not mark the cost center and szB fields unaccessible
511 as these are accessed at too many places. */
512 static void mkBhdrNoAccess( Arena
* a
, Block
* b
)
514 VALGRIND_MAKE_MEM_NOACCESS (b
+ hp_overhead_szB() + sizeof(SizeT
),
516 VALGRIND_MAKE_MEM_NOACCESS (b
+ get_bszB(b
) - sizeof(SizeT
) - a
->rz_szB
,
520 /* Make the cc+szB fields accessible. */
521 static void mkBhdrSzAccess( Arena
* a
, Block
* b
)
523 VALGRIND_MAKE_MEM_DEFINED (b
,
524 hp_overhead_szB() + sizeof(SizeT
));
525 /* We cannot use get_bszB(b), as this reads the 'hi' szB we want
526 to mark accessible. So, we only access the 'lo' szB. */
527 SizeT bszB_lo
= mk_plain_bszB(*(SizeT
*)&b
[0 + hp_overhead_szB()]);
528 VALGRIND_MAKE_MEM_DEFINED (b
+ bszB_lo
- sizeof(SizeT
),
533 /*------------------------------------------------------------*/
534 /*--- Arena management ---*/
535 /*------------------------------------------------------------*/
537 #define CORE_ARENA_MIN_SZB 1048576
539 // The arena structures themselves.
540 static Arena vg_arena
[VG_N_ARENAS
];
542 // Functions external to this module identify arenas using ArenaIds,
543 // not Arena*s. This fn converts the former to the latter.
544 static Arena
* arenaId_to_ArenaP ( ArenaId arena
)
546 vg_assert(arena
>= 0 && arena
< VG_N_ARENAS
);
547 return & vg_arena
[arena
];
550 static ArenaId
arenaP_to_ArenaId ( Arena
*a
)
552 ArenaId arena
= a
-vg_arena
;
553 vg_assert(arena
>= 0 && arena
< VG_N_ARENAS
);
557 // Initialise an arena. rz_szB is the (default) minimum redzone size;
558 // It might be overridden by VG_(clo_redzone_size) or VG_(clo_core_redzone_size).
559 // it might be made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
561 void arena_init ( ArenaId aid
, const HChar
* name
, SizeT rz_szB
,
562 SizeT min_sblock_szB
, SizeT min_unsplittable_sblock_szB
)
565 Arena
* a
= arenaId_to_ArenaP(aid
);
567 // Ensure default redzones are a reasonable size.
568 vg_assert(rz_szB
<= MAX_REDZONE_SZB
);
570 /* Override the default redzone size if a clo value was given.
571 Note that the clo value can be significantly bigger than MAX_REDZONE_SZB
572 to allow the user to chase horrible bugs using up to 1 page
574 if (VG_AR_CLIENT
== aid
) {
575 if (VG_(clo_redzone_size
) != -1)
576 rz_szB
= VG_(clo_redzone_size
);
578 if (VG_(clo_core_redzone_size
) != rz_szB
)
579 rz_szB
= VG_(clo_core_redzone_size
);
582 // Redzones must always be at least the size of a pointer, for holding the
583 // prev/next pointer (see the layout details at the top of this file).
584 if (rz_szB
< sizeof(void*)) rz_szB
= sizeof(void*);
586 // The size of the low and high admin sections in a block must be a
587 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
588 // redzone size if necessary to achieve this.
590 while (0 != overhead_szB_lo(a
) % VG_MIN_MALLOC_SZB
) a
->rz_szB
++;
591 vg_assert(overhead_szB_lo(a
) - hp_overhead_szB() == overhead_szB_hi(a
));
593 // Here we have established the effective redzone size.
596 vg_assert((min_sblock_szB
% VKI_PAGE_SIZE
) == 0);
598 a
->clientmem
= ( VG_AR_CLIENT
== aid
? True
: False
);
600 a
->min_sblock_szB
= min_sblock_szB
;
601 a
->min_unsplittable_sblock_szB
= min_unsplittable_sblock_szB
;
602 for (i
= 0; i
< N_MALLOC_LISTS
; i
++) a
->freelist
[i
] = NULL
;
604 a
->sblocks
= & a
->sblocks_initial
[0];
605 a
->sblocks_size
= SBLOCKS_SIZE_INITIAL
;
607 a
->deferred_reclaimed_sb
= 0;
608 a
->perm_malloc_current
= 0;
609 a
->perm_malloc_limit
= 0;
610 a
->stats__perm_bytes_on_loan
= 0;
611 a
->stats__perm_blocks
= 0;
612 a
->stats__nreclaim_unsplit
= 0;
613 a
->stats__nreclaim_split
= 0;
614 a
->stats__bytes_on_loan
= 0;
615 a
->stats__bytes_mmaped
= 0;
616 a
->stats__bytes_on_loan_max
= 0;
617 a
->stats__bytes_mmaped_max
= 0;
618 a
->stats__tot_blocks
= 0;
619 a
->stats__tot_bytes
= 0;
620 a
->stats__nsearches
= 0;
621 a
->next_profile_at
= 25 * 1000 * 1000;
622 vg_assert(sizeof(a
->sblocks_initial
)
623 == SBLOCKS_SIZE_INITIAL
* sizeof(Superblock
*));
626 /* Print vital stats for an arena. */
627 void VG_(print_all_arena_stats
) ( void )
630 for (i
= 0; i
< VG_N_ARENAS
; i
++) {
631 Arena
* a
= arenaId_to_ArenaP(i
);
632 VG_(message
)(Vg_DebugMsg
,
633 "%-8s: %'13lu/%'13lu max/curr mmap'd, "
634 "%llu/%llu unsplit/split sb unmmap'd, "
635 "%'13lu/%'13lu max/curr, "
636 "%10llu/%10llu totalloc-blocks/bytes,"
637 " %10llu searches %lu rzB\n",
639 a
->stats__bytes_mmaped_max
, a
->stats__bytes_mmaped
,
640 a
->stats__nreclaim_unsplit
, a
->stats__nreclaim_split
,
641 a
->stats__bytes_on_loan_max
,
642 a
->stats__bytes_on_loan
,
643 a
->stats__tot_blocks
, a
->stats__tot_bytes
,
650 void VG_(print_arena_cc_analysis
) ( void )
653 vg_assert( VG_(clo_profile_heap
) );
654 for (i
= 0; i
< VG_N_ARENAS
; i
++) {
655 cc_analyse_alloc_arena(i
);
660 /* This library is self-initialising, as it makes this more self-contained,
661 less coupled with the outside world. Hence VG_(arena_malloc)() and
662 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
663 correctly initialised.
665 We initialise the client arena separately (and later) because the core
666 must do non-client allocation before the tool has a chance to set the
667 client arena's redzone size.
669 static Bool client_inited
= False
;
670 static Bool nonclient_inited
= False
;
673 void ensure_mm_init ( ArenaId aid
)
675 static SizeT client_rz_szB
= 8; // default: be paranoid
677 /* We use checked red zones (of various sizes) for our internal stuff,
678 and an unchecked zone of arbitrary size for the client. Of
679 course the client's red zone can be checked by the tool, eg.
680 by using addressibility maps, but not by the mechanism implemented
681 here, which merely checks at the time of freeing that the red
682 zone bytes are unchanged.
684 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
685 alignment. Eg. with 8 byte alignment, on 32-bit machines 4 stays as
686 4, but 16 becomes 20; but on 64-bit machines 4 becomes 8, and 16
687 stays as 16 --- the extra 4 bytes in both are accounted for by the
688 larger prev/next ptr.
690 if (VG_AR_CLIENT
== aid
) {
693 // This assertion ensures that a tool cannot try to change the client
694 // redzone size with VG_(needs_malloc_replacement)() after this module
695 // has done its first allocation from the client arena.
696 if (VG_(needs
).malloc_replacement
)
697 vg_assert(client_rz_szB
== VG_(tdict
).tool_client_redzone_szB
);
701 // Check and set the client arena redzone size
702 if (VG_(needs
).malloc_replacement
) {
703 client_rz_szB
= VG_(tdict
).tool_client_redzone_szB
;
704 if (client_rz_szB
> MAX_REDZONE_SZB
) {
705 VG_(printf
)( "\nTool error:\n"
706 " specified redzone size is too big (%llu)\n",
707 (ULong
)client_rz_szB
);
711 // Initialise the client arena. On all platforms,
712 // increasing the superblock size reduces the number of superblocks
713 // in the client arena, which makes findSb cheaper.
714 ar_client_sbszB
= 4194304;
715 // superblocks with a size > ar_client_sbszB will be unsplittable
716 // (unless used for providing memalign-ed blocks).
717 arena_init ( VG_AR_CLIENT
, "client", client_rz_szB
,
718 ar_client_sbszB
, ar_client_sbszB
+1);
719 client_inited
= True
;
722 if (nonclient_inited
) {
725 set_at_init_hp_overhead_szB
=
726 VG_(clo_profile_heap
) ? VG_MIN_MALLOC_SZB
: 0;
727 // Initialise the non-client arenas
728 // Similarly to client arena, big allocations will be unsplittable.
729 arena_init ( VG_AR_CORE
, "core", CORE_REDZONE_DEFAULT_SZB
,
730 4194304, 4194304+1 );
731 arena_init ( VG_AR_DINFO
, "dinfo", CORE_REDZONE_DEFAULT_SZB
,
732 1048576, 1048576+1 );
733 arena_init ( VG_AR_DEMANGLE
, "demangle", CORE_REDZONE_DEFAULT_SZB
,
735 arena_init ( VG_AR_TTAUX
, "ttaux", CORE_REDZONE_DEFAULT_SZB
,
737 nonclient_inited
= True
;
741 VG_(printf
)("ZZZ1\n");
742 VG_(sanity_check_malloc_all
)();
743 VG_(printf
)("ZZZ2\n");
748 /*------------------------------------------------------------*/
749 /*--- Superblock management ---*/
750 /*------------------------------------------------------------*/
752 __attribute__((noreturn
))
753 void VG_(out_of_memory_NORETURN
) ( const HChar
* who
, SizeT szB
, UWord err
)
755 static Int outputTrial
= 0;
756 // We try once to output the full memory state followed by the below message.
757 // If that fails (due to out of memory during first trial), we try to just
758 // output the below message.
759 // And then we abandon.
761 ULong tot_alloc
= VG_(am_get_anonsize_total
)();
764 " Valgrind's memory management: out of memory: %s\n"
765 " %s's request for %llu bytes failed.\n"
766 " %'13llu bytes have already been mmap-ed ANONYMOUS.\n"
767 " Valgrind cannot continue. Sorry.\n\n"
768 " There are several possible reasons for this.\n"
769 " - You have some kind of memory limit in place. Look at the\n"
770 " output of 'ulimit -a'. Is there a limit on the size of\n"
771 " virtual memory or address space?\n"
772 " - You have run out of swap space.\n"
773 " - You have some policy enabled that denies memory to be\n"
774 " executable (for example selinux deny_execmem) that causes\n"
775 " mmap to fail with Permission denied.\n"
776 " - Valgrind has a bug. If you think this is the case or you are\n"
777 " not sure, please let us know and we'll try to fix it.\n"
778 " Please note that programs can take substantially more memory than\n"
779 " normal when running under Valgrind tools, eg. up to twice or\n"
780 " more, depending on the tool. On a 64-bit machine, Valgrind\n"
781 " should be able to make use of up 32GB memory. On a 32-bit\n"
782 " machine, Valgrind should be able to use all the memory available\n"
783 " to a single process, up to 4GB if that's how you have your\n"
784 " kernel configured. Most 32-bit Linux setups allow a maximum of\n"
785 " 3GB per process.\n\n"
786 " Whatever the reason, Valgrind cannot continue. Sorry.\n";
788 if (outputTrial
<= 1) {
789 if (outputTrial
== 0) {
791 // First print the memory stats with the aspacemgr data.
792 VG_(am_show_nsegments
) (0, "out_of_memory");
793 VG_(print_all_arena_stats
) ();
794 if (VG_(clo_profile_heap
))
795 VG_(print_arena_cc_analysis
) ();
796 // And then print some other information that might help.
797 VG_(print_all_stats
) (False
, /* Memory stats */
798 True
/* Tool stats */);
799 VG_(show_sched_status
) (True
, // host_stacktrace
800 True
, // valgrind_stack_usage
801 True
); // exited_threads
802 /* In case we are an inner valgrind, asks the outer to report
803 its memory state in its log output. */
804 INNER_REQUEST(VALGRIND_MONITOR_COMMAND("v.set log_output"));
805 INNER_REQUEST(VALGRIND_MONITOR_COMMAND("v.info memory aspacemgr"));
808 VG_(message
)(Vg_UserMsg
, s1
,
809 ((err
== 0 || err
== VKI_ENOMEM
)
810 ? "" : VG_(strerror
) (err
)),
811 who
, (ULong
)szB
, tot_alloc
);
813 VG_(debugLog
)(0,"mallocfree", s1
,
814 ((err
== 0 || err
== VKI_ENOMEM
)
815 ? "" : VG_(strerror
) (err
)),
816 who
, (ULong
)szB
, tot_alloc
);
823 // Align ptr p upwards to an align-sized boundary.
825 void* align_upwards ( void* p
, SizeT align
)
828 if ((a
% align
) == 0) return (void*)a
;
829 return (void*)(a
- (a
% align
) + align
);
832 // Forward definition.
834 void deferred_reclaimSuperblock ( Arena
* a
, Superblock
* sb
);
836 // If not enough memory available, either aborts (for non-client memory)
837 // or returns 0 (for client memory).
839 Superblock
* newSuperblock ( Arena
* a
, SizeT cszB
)
846 // A new superblock is needed for arena a. We will execute the deferred
847 // reclaim in all arenas in order to minimise fragmentation and
848 // peak memory usage.
849 for (aid
= 0; aid
< VG_N_ARENAS
; aid
++) {
850 Arena
* arena
= arenaId_to_ArenaP(aid
);
851 if (arena
->deferred_reclaimed_sb
!= NULL
)
852 deferred_reclaimSuperblock (arena
, NULL
);
855 // Take into account admin bytes in the Superblock.
856 cszB
+= sizeof(Superblock
);
858 if (cszB
< a
->min_sblock_szB
) cszB
= a
->min_sblock_szB
;
859 cszB
= VG_PGROUNDUP(cszB
);
861 if (cszB
>= a
->min_unsplittable_sblock_szB
)
864 unsplittable
= False
;
868 // client allocation -- return 0 to client if it fails
869 sres
= VG_(am_mmap_client_heap
)
870 ( cszB
, VKI_PROT_READ
|VKI_PROT_WRITE
|VKI_PROT_EXEC
);
871 if (sr_isError(sres
))
873 sb
= (Superblock
*)(Addr
)sr_Res(sres
);
875 // non-client allocation -- abort if it fails
876 sres
= VG_(am_mmap_anon_float_valgrind
)( cszB
);
877 if (sr_isError(sres
)) {
878 VG_(out_of_memory_NORETURN
)("newSuperblock", cszB
, sr_Err(sres
));
880 sb
= NULL
; /* keep gcc happy */
882 sb
= (Superblock
*)(Addr
)sr_Res(sres
);
885 vg_assert(NULL
!= sb
);
886 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(sb
, cszB
));
887 vg_assert(0 == (Addr
)sb
% VG_MIN_MALLOC_SZB
);
888 sb
->n_payload_bytes
= cszB
- sizeof(Superblock
);
889 sb
->unsplittable
= (unsplittable
? sb
: NULL
);
890 a
->stats__bytes_mmaped
+= cszB
;
891 if (a
->stats__bytes_mmaped
> a
->stats__bytes_mmaped_max
)
892 a
->stats__bytes_mmaped_max
= a
->stats__bytes_mmaped
;
893 VG_(debugLog
)(1, "mallocfree",
894 "newSuperblock at %p (pszB %7lu) %s owner %s/%s\n",
895 sb
, sb
->n_payload_bytes
,
896 (unsplittable
? "unsplittable" : ""),
897 a
->clientmem
? "CLIENT" : "VALGRIND", a
->name
);
901 // Reclaims the given superblock:
902 // * removes sb from arena sblocks list.
903 // * munmap the superblock segment.
905 void reclaimSuperblock ( Arena
* a
, Superblock
* sb
)
911 VG_(debugLog
)(1, "mallocfree",
912 "reclaimSuperblock at %p (pszB %7lu) %s owner %s/%s\n",
913 sb
, sb
->n_payload_bytes
,
914 (sb
->unsplittable
? "unsplittable" : ""),
915 a
->clientmem
? "CLIENT" : "VALGRIND", a
->name
);
917 // Take into account admin bytes in the Superblock.
918 cszB
= sizeof(Superblock
) + sb
->n_payload_bytes
;
920 // removes sb from superblock list.
921 for (i
= 0U; i
< a
->sblocks_used
; i
++) {
922 if (a
->sblocks
[i
] == sb
)
925 vg_assert(i
< a
->sblocks_used
);
926 for (j
= i
; j
< a
->sblocks_used
; j
++)
927 a
->sblocks
[j
] = a
->sblocks
[j
+1];
929 a
->sblocks
[a
->sblocks_used
] = NULL
;
930 // paranoia: NULLify ptr to reclaimed sb or NULLify copy of ptr to last sb.
932 a
->stats__bytes_mmaped
-= cszB
;
933 if (sb
->unsplittable
)
934 a
->stats__nreclaim_unsplit
++;
936 a
->stats__nreclaim_split
++;
938 // Now that the sb is removed from the list, mnumap its space.
940 // reclaimable client allocation
941 Bool need_discard
= False
;
942 sres
= VG_(am_munmap_client
)(&need_discard
, (Addr
) sb
, cszB
);
943 vg_assert2(! sr_isError(sres
), "superblock client munmap failure\n");
944 /* We somewhat help the client by discarding the range.
945 Note however that if the client has JITted some code in
946 a small block that was freed, we do not provide this
948 /* JRS 2011-Sept-26: it would be nice to move the discard
949 outwards somewhat (in terms of calls) so as to make it easier
950 to verify that there will be no nonterminating recursive set
951 of calls a result of calling VG_(discard_translations).
952 Another day, perhaps. */
954 VG_(discard_translations
) ((Addr
) sb
, cszB
, "reclaimSuperblock");
956 // reclaimable non-client allocation
957 sres
= VG_(am_munmap_valgrind
)((Addr
) sb
, cszB
);
958 vg_assert2(! sr_isError(sres
), "superblock valgrind munmap failure\n");
963 // Find the superblock containing the given chunk.
965 Superblock
* findSb ( Arena
* a
, Block
* b
)
968 SizeT max
= a
->sblocks_used
;
972 SizeT pos
= min
+ (max
- min
)/2;
974 vg_assert(pos
< a
->sblocks_used
);
975 sb
= a
->sblocks
[pos
];
976 if ((Block
*)&sb
->payload_bytes
[0] <= b
977 && b
< (Block
*)&sb
->payload_bytes
[sb
->n_payload_bytes
])
980 } else if ((Block
*)&sb
->payload_bytes
[0] <= b
) {
986 VG_(printf
)("findSb: can't find pointer %p in arena '%s'\n",
988 VG_(core_panic
)("findSb: VG_(arena_free)() in wrong arena?");
989 return NULL
; /*NOTREACHED*/
993 // Find the superblock containing the given address.
994 // If superblock not found, return NULL.
996 Superblock
* maybe_findSb ( Arena
* a
, Addr ad
)
999 SizeT max
= a
->sblocks_used
;
1001 while (min
<= max
) {
1003 SizeT pos
= min
+ (max
- min
)/2;
1004 if (pos
>= a
->sblocks_used
)
1006 sb
= a
->sblocks
[pos
];
1007 if ((Addr
)&sb
->payload_bytes
[0] <= ad
1008 && ad
< (Addr
)&sb
->payload_bytes
[sb
->n_payload_bytes
]) {
1010 } else if ((Addr
)&sb
->payload_bytes
[0] <= ad
) {
1020 /*------------------------------------------------------------*/
1021 /*--- Functions for working with freelists. ---*/
1022 /*------------------------------------------------------------*/
1024 #if defined(__clang__)
1025 /* The nicely aligned 'returns' in the function below produce
1026 * misleading indentation warnings. Rather than turn the
1027 * warning off globally, just turn it off for the block of code. */
1028 #pragma clang diagnostic push
1029 #pragma clang diagnostic ignored "-Wmisleading-indentation"
1032 // Nb: Determination of which freelist a block lives on is based on the
1033 // payload size, not block size.
1035 // Convert a payload size in bytes to a freelist number.
1036 static __attribute__((noinline
))
1037 UInt
pszB_to_listNo_SLOW ( SizeT pszB__divided_by__VG_MIN_MALLOC_SZB
)
1039 SizeT n
= pszB__divided_by__VG_MIN_MALLOC_SZB
;
1045 /* -- Exponential slope up, factor 1.05 -- */
1046 if (n
< 67) return 64;
1047 if (n
< 70) return 65;
1048 /* else */ return 66;
1050 if (n
< 77) return 67;
1051 if (n
< 81) return 68;
1052 /* else */ return 69;
1056 if (n
< 90) return 70;
1057 if (n
< 94) return 71;
1058 /* else */ return 72;
1060 if (n
< 104) return 73;
1061 if (n
< 109) return 74;
1062 /* else */ return 75;
1068 if (n
< 120) return 76;
1069 if (n
< 126) return 77;
1070 /* else */ return 78;
1072 if (n
< 139) return 79;
1073 /* -- Exponential slope up, factor 1.10 -- */
1074 if (n
< 153) return 80;
1075 /* else */ return 81;
1079 if (n
< 185) return 82;
1080 if (n
< 204) return 83;
1081 /* else */ return 84;
1083 if (n
< 247) return 85;
1084 if (n
< 272) return 86;
1085 /* else */ return 87;
1093 if (n
< 329) return 88;
1094 if (n
< 362) return 89;
1095 /* else */ return 90;
1097 if (n
< 438) return 91;
1098 if (n
< 482) return 92;
1099 /* else */ return 93;
1103 if (n
< 583) return 94;
1104 if (n
< 641) return 95;
1105 /* -- Exponential slope up, factor 1.20 -- */
1106 /* else */ return 96;
1108 if (n
< 924) return 97;
1109 if (n
< 1109) return 98;
1110 /* else */ return 99;
1116 if (n
< 1597) return 100;
1117 if (n
< 1916) return 101;
1120 if (n
< 2760) return 103;
1121 if (n
< 3312) return 104;
1122 /* else */ return 105;
1126 if (n
< 4769) return 106;
1127 if (n
< 5723) return 107;
1128 /* else */ return 108;
1130 if (n
< 8241) return 109;
1131 if (n
< 9890) return 110;
1132 /* else */ return 111;
1141 #if defined(__clang__)
1142 #pragma clang diagnostic pop
1146 UInt
pszB_to_listNo ( SizeT pszB
)
1148 SizeT n
= pszB
/ VG_MIN_MALLOC_SZB
;
1149 vg_assert(0 == (pszB
% VG_MIN_MALLOC_SZB
));
1151 // The first 64 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
1152 // The final 48 hold bigger blocks and are dealt with by the _SLOW
1154 if (LIKELY(n
< 64)) {
1157 return pszB_to_listNo_SLOW(n
);
1161 // What is the minimum payload size for a given list?
1163 SizeT
listNo_to_pszB_min ( UInt listNo
)
1165 /* Repeatedly computing this function at every request is
1166 expensive. Hence at the first call just cache the result for
1167 every possible argument. */
1168 static SizeT cache
[N_MALLOC_LISTS
];
1169 static Bool cache_valid
= False
;
1172 for (i
= 0; i
< N_MALLOC_LISTS
; i
++) {
1174 while (pszB_to_listNo(pszB
) < i
)
1175 pszB
+= VG_MIN_MALLOC_SZB
;
1180 /* Returned cached answer. */
1181 vg_assert(listNo
<= N_MALLOC_LISTS
);
1182 return cache
[listNo
];
1185 // What is the maximum payload size for a given list?
1187 SizeT
listNo_to_pszB_max ( UInt listNo
)
1189 vg_assert(listNo
<= N_MALLOC_LISTS
);
1190 if (listNo
== N_MALLOC_LISTS
-1) {
1193 return listNo_to_pszB_min(listNo
+1) - 1;
1198 /* A nasty hack to try and reduce fragmentation. Try and replace
1199 a->freelist[lno] with another block on the same list but with a
1200 lower address, with the idea of attempting to recycle the same
1201 blocks rather than cruise through the address space. */
1203 void swizzle ( Arena
* a
, UInt lno
)
1210 p_best
= a
->freelist
[lno
];
1211 if (p_best
== NULL
) return;
1215 // This loop bound was 20 for a long time, but experiments showed that
1216 // reducing it to 10 gave the same result in all the tests, and 5 got the
1217 // same result in 85--100% of cases. And it's called often enough to be
1218 // noticeable in programs that allocated a lot.
1219 for (i
= 0; i
< 5; i
++) {
1220 pn
= get_next_b(pn
);
1221 pp
= get_prev_b(pp
);
1222 if (pn
< p_best
) p_best
= pn
;
1223 if (pp
< p_best
) p_best
= pp
;
1225 if (p_best
< a
->freelist
[lno
]) {
1226 # ifdef VERBOSE_MALLOC
1227 VG_(printf
)("retreat by %ld\n", (Word
)(a
->freelist
[lno
] - p_best
));
1229 a
->freelist
[lno
] = p_best
;
1234 /*------------------------------------------------------------*/
1235 /*--- Sanity-check/debugging machinery. ---*/
1236 /*------------------------------------------------------------*/
1238 #define REDZONE_LO_MASK 0x31
1239 #define REDZONE_HI_MASK 0x7c
1241 // Do some crude sanity checks on a Block.
1243 Bool
blockSane ( Arena
* a
, Block
* b
)
1245 # define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
1247 // The lo and hi size fields will be checked (indirectly) by the call
1248 // to get_rz_hi_byte().
1249 if (!a
->clientmem
&& is_inuse_block(b
)) {
1250 // In the inner, for memcheck sake, temporarily mark redzone accessible.
1251 INNER_REQUEST(mkBhdrAccess(a
,b
));
1252 for (i
= 0; i
< a
->rz_szB
; i
++) {
1253 if (get_rz_lo_byte(b
, i
) !=
1254 (UByte
)(((Addr
)b
&0xff) ^ REDZONE_LO_MASK
))
1255 {BLEAT("redzone-lo");return False
;}
1256 if (get_rz_hi_byte(b
, i
) !=
1257 (UByte
)(((Addr
)b
&0xff) ^ REDZONE_HI_MASK
))
1258 {BLEAT("redzone-hi");return False
;}
1260 INNER_REQUEST(mkBhdrNoAccess(a
,b
));
1266 // Sanity checks on a Block inside an unsplittable superblock
1268 Bool
unsplittableBlockSane ( Arena
* a
, Superblock
*sb
, Block
* b
)
1270 # define BLEAT(str) VG_(printf)("unsplittableBlockSane: fail -- %s\n",str)
1275 if (!blockSane (a
, b
))
1276 {BLEAT("blockSane");return False
;}
1278 if (sb
->unsplittable
!= sb
)
1279 {BLEAT("unsplittable");return False
;}
1281 sb_start
= &sb
->payload_bytes
[0];
1282 sb_end
= &sb
->payload_bytes
[sb
->n_payload_bytes
- 1];
1284 // b must be first block (i.e. no unused bytes at the beginning)
1285 if ((Block
*)sb_start
!= b
)
1286 {BLEAT("sb_start");return False
;}
1288 // b must be last block (i.e. no unused bytes at the end)
1289 other_b
= b
+ get_bszB(b
);
1290 if (other_b
-1 != (Block
*)sb_end
)
1291 {BLEAT("sb_end");return False
;}
1297 // Print superblocks (only for debugging).
1299 void ppSuperblocks ( Arena
* a
)
1301 UInt i
, j
, blockno
= 1;
1304 for (j
= 0; j
< a
->sblocks_used
; ++j
) {
1305 Superblock
* sb
= a
->sblocks
[j
];
1307 VG_(printf
)( "\n" );
1308 VG_(printf
)( "superblock %u at %p %s, sb->n_pl_bs = %lu\n",
1309 blockno
++, sb
, (sb
->unsplittable
? "unsplittable" : ""),
1310 sb
->n_payload_bytes
);
1311 for (i
= 0; i
< sb
->n_payload_bytes
; i
+= b_bszB
) {
1312 Block
* b
= (Block
*)&sb
->payload_bytes
[i
];
1313 b_bszB
= get_bszB(b
);
1314 VG_(printf
)( " block at %u, bszB %lu: ", i
, b_bszB
);
1315 VG_(printf
)( "%s, ", is_inuse_block(b
) ? "inuse" : "free");
1316 VG_(printf
)( "%s\n", blockSane(a
, b
) ? "ok" : "BAD" );
1318 vg_assert(i
== sb
->n_payload_bytes
); // no overshoot at end of Sb
1320 VG_(printf
)( "end of superblocks\n\n" );
1323 // Sanity check both the superblocks and the chains.
1324 static void sanity_check_malloc_arena ( ArenaId aid
)
1326 UInt i
, j
, superblockctr
, blockctr_sb
, blockctr_li
;
1327 UInt blockctr_sb_free
, listno
;
1328 SizeT b_bszB
, b_pszB
, list_min_pszB
, list_max_pszB
;
1329 Bool thisFree
, lastWasFree
, sblockarrOK
;
1332 SizeT arena_bytes_on_loan
;
1335 # define BOMB VG_(core_panic)("sanity_check_malloc_arena")
1337 a
= arenaId_to_ArenaP(aid
);
1339 // Check the superblock array.
1341 = a
->sblocks
!= NULL
1342 && a
->sblocks_size
>= SBLOCKS_SIZE_INITIAL
1343 && a
->sblocks_used
<= a
->sblocks_size
1344 && (a
->sblocks_size
== SBLOCKS_SIZE_INITIAL
1345 ? (a
->sblocks
== &a
->sblocks_initial
[0])
1346 : (a
->sblocks
!= &a
->sblocks_initial
[0]));
1348 VG_(printf
)("sanity_check_malloc_arena: sblock array BAD\n");
1352 // First, traverse all the superblocks, inspecting the Blocks in each.
1353 superblockctr
= blockctr_sb
= blockctr_sb_free
= 0;
1354 arena_bytes_on_loan
= 0;
1355 for (j
= 0; j
< a
->sblocks_used
; ++j
) {
1356 Superblock
* sb
= a
->sblocks
[j
];
1357 lastWasFree
= False
;
1359 for (i
= 0; i
< sb
->n_payload_bytes
; i
+= mk_plain_bszB(b_bszB
)) {
1361 b
= (Block
*)&sb
->payload_bytes
[i
];
1362 b_bszB
= get_bszB_as_is(b
);
1363 if (!blockSane(a
, b
)) {
1364 VG_(printf
)("sanity_check_malloc_arena: sb %p, block %u "
1365 "(bszB %lu): BAD\n", sb
, i
, b_bszB
);
1368 thisFree
= !is_inuse_block(b
);
1369 if (thisFree
&& lastWasFree
) {
1370 VG_(printf
)("sanity_check_malloc_arena: sb %p, block %u "
1371 "(bszB %lu): UNMERGED FREES\n", sb
, i
, b_bszB
);
1374 if (thisFree
) blockctr_sb_free
++;
1376 arena_bytes_on_loan
+= bszB_to_pszB(a
, b_bszB
);
1377 lastWasFree
= thisFree
;
1379 if (i
> sb
->n_payload_bytes
) {
1380 VG_(printf
)( "sanity_check_malloc_arena: sb %p: last block "
1381 "overshoots end\n", sb
);
1386 arena_bytes_on_loan
+= a
->stats__perm_bytes_on_loan
;
1388 if (arena_bytes_on_loan
!= a
->stats__bytes_on_loan
) {
1389 # ifdef VERBOSE_MALLOC
1390 VG_(printf
)( "sanity_check_malloc_arena: a->bytes_on_loan %lu, "
1391 "arena_bytes_on_loan %lu: "
1392 "MISMATCH\n", a
->stats__bytes_on_loan
, arena_bytes_on_loan
);
1398 /* Second, traverse each list, checking that the back pointers make
1399 sense, counting blocks encountered, and checking that each block
1400 is an appropriate size for this list. */
1402 for (listno
= 0; listno
< N_MALLOC_LISTS
; listno
++) {
1403 list_min_pszB
= listNo_to_pszB_min(listno
);
1404 list_max_pszB
= listNo_to_pszB_max(listno
);
1405 b
= a
->freelist
[listno
];
1406 if (b
== NULL
) continue;
1410 if (get_prev_b(b
) != b_prev
) {
1411 VG_(printf
)( "sanity_check_malloc_arena: list %u at %p: "
1416 b_pszB
= get_pszB(a
, b
);
1417 if (b_pszB
< list_min_pszB
|| b_pszB
> list_max_pszB
) {
1419 "sanity_check_malloc_arena: list %u at %p: "
1420 "WRONG CHAIN SIZE %luB (%luB, %luB)\n",
1421 listno
, b
, b_pszB
, list_min_pszB
, list_max_pszB
);
1425 if (b
== a
->freelist
[listno
]) break;
1429 if (blockctr_sb_free
!= blockctr_li
) {
1430 # ifdef VERBOSE_MALLOC
1431 VG_(printf
)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
1432 "(via sbs %d, via lists %d)\n",
1433 blockctr_sb_free
, blockctr_li
);
1439 if (VG_(clo_verbosity
) > 2)
1440 VG_(message
)(Vg_DebugMsg
,
1441 "%-8s: %2u sbs, %5u bs, %2u/%-2u free bs, "
1442 "%7lu mmap, %7lu loan\n",
1445 blockctr_sb
, blockctr_sb_free
, blockctr_li
,
1446 a
->stats__bytes_mmaped
, a
->stats__bytes_on_loan
);
1451 #define N_AN_CCS 1000
1459 static AnCC anCCs
[N_AN_CCS
];
1461 /* Sorting by decreasing cost center nBytes, to have the biggest
1462 cost centres at the top. */
1463 static Int
cmp_AnCC_by_vol ( const void* v1
, const void* v2
) {
1464 const AnCC
* ancc1
= v1
;
1465 const AnCC
* ancc2
= v2
;
1466 if (ancc1
->nBytes
< ancc2
->nBytes
) return 1;
1467 if (ancc1
->nBytes
> ancc2
->nBytes
) return -1;
1471 static void cc_analyse_alloc_arena ( ArenaId aid
)
1476 Bool thisFree
, lastWasFree
;
1482 a
= arenaId_to_ArenaP(aid
);
1483 if (a
->name
== NULL
) {
1484 /* arena is not in use, is not initialised and will fail the
1485 sanity check that follows. */
1489 sanity_check_malloc_arena(aid
);
1492 "-------- Arena \"%s\": %'lu/%'lu max/curr mmap'd, "
1493 "%llu/%llu unsplit/split sb unmmap'd, "
1494 "%'lu/%'lu max/curr on_loan %lu rzB --------\n",
1495 a
->name
, a
->stats__bytes_mmaped_max
, a
->stats__bytes_mmaped
,
1496 a
->stats__nreclaim_unsplit
, a
->stats__nreclaim_split
,
1497 a
->stats__bytes_on_loan_max
, a
->stats__bytes_on_loan
,
1501 for (j
= 0; j
< a
->sblocks_used
; ++j
) {
1502 Superblock
* sb
= a
->sblocks
[j
];
1503 lastWasFree
= False
;
1504 for (i
= 0; i
< sb
->n_payload_bytes
; i
+= mk_plain_bszB(b_bszB
)) {
1505 b
= (Block
*)&sb
->payload_bytes
[i
];
1506 b_bszB
= get_bszB_as_is(b
);
1507 if (!blockSane(a
, b
)) {
1508 VG_(printf
)("sanity_check_malloc_arena: sb %p, block %ld "
1509 "(bszB %lu): BAD\n", sb
, i
, b_bszB
);
1512 thisFree
= !is_inuse_block(b
);
1513 if (thisFree
&& lastWasFree
) {
1514 VG_(printf
)("sanity_check_malloc_arena: sb %p, block %ld "
1515 "(bszB %lu): UNMERGED FREES\n", sb
, i
, b_bszB
);
1518 lastWasFree
= thisFree
;
1520 if (thisFree
) continue;
1522 if (VG_(clo_profile_heap
))
1525 cc
= "(--profile-heap=yes for details)";
1527 VG_(printf
)("block: inUse=%d pszB=%d cc=%s\n",
1529 (Int
)bszB_to_pszB(a
, b_bszB
),
1532 for (k
= 0; k
< n_ccs
; k
++) {
1533 vg_assert(anCCs
[k
].cc
);
1534 if (0 == VG_(strcmp
)(cc
, anCCs
[k
].cc
))
1537 vg_assert(k
>= 0 && k
<= n_ccs
);
1540 vg_assert(n_ccs
< N_AN_CCS
-1);
1542 anCCs
[k
].nBytes
= 0;
1543 anCCs
[k
].nBlocks
= 0;
1547 vg_assert(k
>= 0 && k
< n_ccs
&& k
< N_AN_CCS
);
1548 anCCs
[k
].nBytes
+= (ULong
)bszB_to_pszB(a
, b_bszB
);
1551 if (i
> sb
->n_payload_bytes
) {
1552 VG_(printf
)( "sanity_check_malloc_arena: sb %p: last block "
1553 "overshoots end\n", sb
);
1558 if (a
->stats__perm_bytes_on_loan
> 0) {
1559 vg_assert(n_ccs
< N_AN_CCS
-1);
1560 anCCs
[n_ccs
].nBytes
= a
->stats__perm_bytes_on_loan
;
1561 anCCs
[n_ccs
].nBlocks
= a
->stats__perm_blocks
;
1562 anCCs
[n_ccs
].cc
= "perm_malloc";
1566 VG_(ssort
)( &anCCs
[0], n_ccs
, sizeof(anCCs
[0]), cmp_AnCC_by_vol
);
1568 for (k
= 0; k
< n_ccs
; k
++) {
1569 VG_(printf
)("%'13llu in %'9llu: %s\n",
1570 anCCs
[k
].nBytes
, anCCs
[k
].nBlocks
, anCCs
[k
].cc
);
1577 void VG_(sanity_check_malloc_all
) ( void )
1580 for (i
= 0; i
< VG_N_ARENAS
; i
++) {
1581 if (i
== VG_AR_CLIENT
&& !client_inited
)
1583 sanity_check_malloc_arena ( i
);
1587 void VG_(describe_arena_addr
) ( Addr a
, AddrArenaInfo
* aai
)
1593 for (i
= 0; i
< VG_N_ARENAS
; i
++) {
1594 if (i
== VG_AR_CLIENT
&& !client_inited
)
1596 arena
= arenaId_to_ArenaP(i
);
1597 sb
= maybe_findSb( arena
, a
);
1604 aai
->name
= arena
->name
;
1605 for (j
= 0; j
< sb
->n_payload_bytes
; j
+= mk_plain_bszB(b_bszB
)) {
1606 b
= (Block
*)&sb
->payload_bytes
[j
];
1607 b_bszB
= get_bszB_as_is(b
);
1608 if (a
< (Addr
)b
+ mk_plain_bszB(b_bszB
))
1612 aai
->block_szB
= get_pszB(arena
, b
);
1613 aai
->rwoffset
= a
- (Addr
)get_block_payload(arena
, b
);
1614 aai
->free
= !is_inuse_block(b
);
1625 /*------------------------------------------------------------*/
1626 /*--- Creating and deleting blocks. ---*/
1627 /*------------------------------------------------------------*/
1629 // Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
1630 // relevant free list.
1633 void mkFreeBlock ( Arena
* a
, Block
* b
, SizeT bszB
, UInt b_lno
)
1635 SizeT pszB
= bszB_to_pszB(a
, bszB
);
1636 vg_assert(b_lno
== pszB_to_listNo(pszB
));
1637 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b
, bszB
));
1638 // Set the size fields and indicate not-in-use.
1639 set_bszB(b
, mk_free_bszB(bszB
));
1641 // Add to the relevant list.
1642 if (a
->freelist
[b_lno
] == NULL
) {
1645 a
->freelist
[b_lno
] = b
;
1647 Block
* b_prev
= get_prev_b(a
->freelist
[b_lno
]);
1648 Block
* b_next
= a
->freelist
[b_lno
];
1649 set_next_b(b_prev
, b
);
1650 set_prev_b(b_next
, b
);
1651 set_next_b(b
, b_next
);
1652 set_prev_b(b
, b_prev
);
1654 # ifdef DEBUG_MALLOC
1655 (void)blockSane(a
,b
);
1659 // Mark the bytes at b .. b+bszB-1 as in use, and set up the block
1662 void mkInuseBlock ( Arena
* a
, Block
* b
, SizeT bszB
)
1665 vg_assert(bszB
>= min_useful_bszB(a
));
1666 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b
, bszB
));
1667 set_bszB(b
, mk_inuse_bszB(bszB
));
1668 set_prev_b(b
, NULL
); // Take off freelist
1669 set_next_b(b
, NULL
); // ditto
1670 if (!a
->clientmem
) {
1671 for (i
= 0; i
< a
->rz_szB
; i
++) {
1672 set_rz_lo_byte(b
, i
, (UByte
)(((Addr
)b
&0xff) ^ REDZONE_LO_MASK
));
1673 set_rz_hi_byte(b
, i
, (UByte
)(((Addr
)b
&0xff) ^ REDZONE_HI_MASK
));
1676 # ifdef DEBUG_MALLOC
1677 (void)blockSane(a
,b
);
1681 // Mark the bytes at b .. b+bszB-1 as being part of a block that has been shrunk.
1683 void shrinkInuseBlock ( Arena
* a
, Block
* b
, SizeT bszB
)
1687 vg_assert(bszB
>= min_useful_bszB(a
));
1688 INNER_REQUEST(mkBhdrAccess(a
,b
));
1689 set_bszB(b
, mk_inuse_bszB(bszB
));
1690 if (!a
->clientmem
) {
1691 for (i
= 0; i
< a
->rz_szB
; i
++) {
1692 set_rz_lo_byte(b
, i
, (UByte
)(((Addr
)b
&0xff) ^ REDZONE_LO_MASK
));
1693 set_rz_hi_byte(b
, i
, (UByte
)(((Addr
)b
&0xff) ^ REDZONE_HI_MASK
));
1696 INNER_REQUEST(mkBhdrNoAccess(a
,b
));
1698 # ifdef DEBUG_MALLOC
1699 (void)blockSane(a
,b
);
1703 // Remove a block from a given list. Does no sanity checking.
1705 void unlinkBlock ( Arena
* a
, Block
* b
, UInt listno
)
1707 vg_assert(listno
< N_MALLOC_LISTS
);
1708 if (get_prev_b(b
) == b
) {
1709 // Only one element in the list; treat it specially.
1710 vg_assert(get_next_b(b
) == b
);
1711 a
->freelist
[listno
] = NULL
;
1713 Block
* b_prev
= get_prev_b(b
);
1714 Block
* b_next
= get_next_b(b
);
1715 a
->freelist
[listno
] = b_prev
;
1716 set_next_b(b_prev
, b_next
);
1717 set_prev_b(b_next
, b_prev
);
1718 swizzle ( a
, listno
);
1720 set_prev_b(b
, NULL
);
1721 set_next_b(b
, NULL
);
1725 /*------------------------------------------------------------*/
1726 /*--- Core-visible functions. ---*/
1727 /*------------------------------------------------------------*/
1729 // Align the request size.
1731 SizeT
align_req_pszB ( SizeT req_pszB
)
1733 SizeT n
= VG_MIN_MALLOC_SZB
-1;
1734 return ((req_pszB
+ n
) & (~n
));
1738 void add_one_block_to_stats (Arena
* a
, SizeT loaned
)
1740 a
->stats__bytes_on_loan
+= loaned
;
1741 if (a
->stats__bytes_on_loan
> a
->stats__bytes_on_loan_max
) {
1742 a
->stats__bytes_on_loan_max
= a
->stats__bytes_on_loan
;
1743 if (a
->stats__bytes_on_loan_max
>= a
->next_profile_at
) {
1744 /* next profile after 10% more growth */
1747 (((ULong
)a
->stats__bytes_on_loan_max
) * 105ULL) / 100ULL );
1748 if (VG_(clo_profile_heap
))
1749 cc_analyse_alloc_arena(arenaP_to_ArenaId (a
));
1752 a
->stats__tot_blocks
+= (ULong
)1;
1753 a
->stats__tot_bytes
+= (ULong
)loaned
;
1756 /* Allocate a piece of memory of req_pszB bytes on the given arena.
1757 The function may return NULL if (and only if) aid == VG_AR_CLIENT.
1758 Otherwise, the function returns a non-NULL value. */
1759 void* VG_(arena_malloc
) ( ArenaId aid
, const HChar
* cc
, SizeT req_pszB
)
1761 SizeT req_bszB
, frag_bszB
, b_bszB
;
1763 Superblock
* new_sb
= NULL
;
1767 UWord stats__nsearches
= 0;
1769 ensure_mm_init(aid
);
1770 a
= arenaId_to_ArenaP(aid
);
1772 vg_assert(req_pszB
< MAX_PSZB
);
1773 req_pszB
= align_req_pszB(req_pszB
);
1774 req_bszB
= pszB_to_bszB(a
, req_pszB
);
1776 // You must provide a cost-center name against which to charge
1777 // this allocation; it isn't optional.
1780 // Scan through all the big-enough freelists for a block.
1782 // Nb: this scanning might be expensive in some cases. Eg. if you
1783 // allocate lots of small objects without freeing them, but no
1784 // medium-sized objects, it will repeatedly scanning through the whole
1785 // list, and each time not find any free blocks until the last element.
1787 // If this becomes a noticeable problem... the loop answers the question
1788 // "where is the first nonempty list above me?" And most of the time,
1789 // you ask the same question and get the same answer. So it would be
1790 // good to somehow cache the results of previous searches.
1791 // One possibility is an array (with N_MALLOC_LISTS elements) of
1792 // shortcuts. shortcut[i] would give the index number of the nearest
1793 // larger list above list i which is non-empty. Then this loop isn't
1794 // necessary. However, we'd have to modify some section [ .. i-1] of the
1795 // shortcut array every time a list [i] changes from empty to nonempty or
1796 // back. This would require care to avoid pathological worst-case
1799 for (lno
= pszB_to_listNo(req_pszB
); lno
< N_MALLOC_LISTS
; lno
++) {
1800 UWord nsearches_this_level
= 0;
1801 b
= a
->freelist
[lno
];
1802 if (NULL
== b
) continue; // If this list is empty, try the next one.
1805 nsearches_this_level
++;
1806 if (UNLIKELY(nsearches_this_level
>= 100)
1807 && lno
< N_MALLOC_LISTS
-1) {
1808 /* Avoid excessive scanning on this freelist, and instead
1809 try the next one up. But first, move this freelist's
1810 start pointer one element along, so as to ensure that
1811 subsequent searches of this list don't endlessly
1812 revisit only these 100 elements, but in fact slowly
1813 progress through the entire list. */
1814 b
= a
->freelist
[lno
];
1815 vg_assert(b
); // this list must be nonempty!
1816 a
->freelist
[lno
] = get_next_b(b
); // step one along
1819 b_bszB
= get_bszB(b
);
1820 if (b_bszB
>= req_bszB
) goto obtained_block
; // success!
1822 if (b
== a
->freelist
[lno
]) break; // traversed entire freelist
1826 // If we reach here, no suitable block found, allocate a new superblock
1827 vg_assert(lno
== N_MALLOC_LISTS
);
1828 new_sb
= newSuperblock(a
, req_bszB
);
1829 if (NULL
== new_sb
) {
1830 // Should only fail if for client, otherwise, should have aborted
1832 vg_assert(VG_AR_CLIENT
== aid
);
1836 vg_assert(a
->sblocks_used
<= a
->sblocks_size
);
1837 if (a
->sblocks_used
== a
->sblocks_size
) {
1838 Superblock
** array
;
1839 SysRes sres
= VG_(am_mmap_anon_float_valgrind
)(sizeof(Superblock
*) *
1840 a
->sblocks_size
* 2);
1841 if (sr_isError(sres
)) {
1842 VG_(out_of_memory_NORETURN
)("arena_init", sizeof(Superblock
*) *
1843 a
->sblocks_size
* 2,
1847 array
= (Superblock
**)(Addr
)sr_Res(sres
);
1848 for (i
= 0; i
< a
->sblocks_used
; ++i
) array
[i
] = a
->sblocks
[i
];
1850 a
->sblocks_size
*= 2;
1852 VG_(debugLog
)(1, "mallocfree",
1853 "sblock array for arena `%s' resized to %lu\n",
1854 a
->name
, a
->sblocks_size
);
1857 vg_assert(a
->sblocks_used
< a
->sblocks_size
);
1859 i
= a
->sblocks_used
;
1861 if (a
->sblocks
[i
-1] > new_sb
) {
1862 a
->sblocks
[i
] = a
->sblocks
[i
-1];
1868 a
->sblocks
[i
] = new_sb
;
1871 b
= (Block
*)&new_sb
->payload_bytes
[0];
1872 lno
= pszB_to_listNo(bszB_to_pszB(a
, new_sb
->n_payload_bytes
));
1873 mkFreeBlock ( a
, b
, new_sb
->n_payload_bytes
, lno
);
1874 if (VG_(clo_profile_heap
))
1875 set_cc(b
, "admin.free-new-sb-1");
1879 // Ok, we can allocate from b, which lives in list lno.
1880 vg_assert(b
!= NULL
);
1881 vg_assert(lno
< N_MALLOC_LISTS
);
1882 vg_assert(a
->freelist
[lno
] != NULL
);
1883 b_bszB
= get_bszB(b
);
1884 // req_bszB is the size of the block we are after. b_bszB is the
1885 // size of what we've actually got. */
1886 vg_assert(b_bszB
>= req_bszB
);
1888 // Could we split this block and still get a useful fragment?
1889 // A block in an unsplittable superblock can never be split.
1890 frag_bszB
= b_bszB
- req_bszB
;
1891 if (frag_bszB
>= min_useful_bszB(a
)
1892 && (NULL
== new_sb
|| ! new_sb
->unsplittable
)) {
1893 // Yes, split block in two, put the fragment on the appropriate free
1894 // list, and update b_bszB accordingly.
1895 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
1896 unlinkBlock(a
, b
, lno
);
1897 mkInuseBlock(a
, b
, req_bszB
);
1898 if (VG_(clo_profile_heap
))
1900 mkFreeBlock(a
, &b
[req_bszB
], frag_bszB
,
1901 pszB_to_listNo(bszB_to_pszB(a
, frag_bszB
)));
1902 if (VG_(clo_profile_heap
))
1903 set_cc(&b
[req_bszB
], "admin.fragmentation-1");
1904 b_bszB
= get_bszB(b
);
1906 // No, mark as in use and use as-is.
1907 unlinkBlock(a
, b
, lno
);
1908 mkInuseBlock(a
, b
, b_bszB
);
1909 if (VG_(clo_profile_heap
))
1914 SizeT loaned
= bszB_to_pszB(a
, b_bszB
);
1915 add_one_block_to_stats (a
, loaned
);
1916 a
->stats__nsearches
+= (ULong
)stats__nsearches
;
1918 # ifdef DEBUG_MALLOC
1919 sanity_check_malloc_arena(aid
);
1922 v
= get_block_payload(a
, b
);
1923 vg_assert( (((Addr
)v
) & (VG_MIN_MALLOC_SZB
-1)) == 0 );
1925 // Which size should we pass to VALGRIND_MALLOCLIKE_BLOCK ?
1926 // We have 2 possible options:
1927 // 1. The final resulting usable size.
1928 // 2. The initial (non-aligned) req_pszB.
1929 // Memcheck implements option 2 easily, as the initial requested size
1930 // is maintained in the mc_chunk data structure.
1931 // This is not as easy in the core, as there is no such structure.
1932 // (note: using the aligned req_pszB is not simpler than 2, as
1933 // requesting an aligned req_pszB might still be satisfied by returning
1934 // a (slightly) bigger block than requested if the remaining part of
1935 // of a free block is not big enough to make a free block by itself).
1936 // Implement Sol 2 can be done the following way:
1937 // After having called VALGRIND_MALLOCLIKE_BLOCK, the non accessible
1938 // redzone just after the block can be used to determine the
1939 // initial requested size.
1940 // Currently, not implemented => we use Option 1.
1942 (VALGRIND_MALLOCLIKE_BLOCK(v
,
1943 VG_(arena_malloc_usable_size
)(aid
, v
),
1946 /* For debugging/testing purposes, fill the newly allocated area
1947 with a definite value in an attempt to shake out any
1948 uninitialised uses of the data (by V core / V tools, not by the
1949 client). Testing on 25 Nov 07 with the values 0x00, 0xFF, 0x55,
1950 0xAA showed no differences in the regression tests on
1951 amd64-linux. Note, is disabled by default. */
1952 if (0 && aid
!= VG_AR_CLIENT
)
1953 VG_(memset
)(v
, 0xAA, (SizeT
)req_pszB
);
1958 // If arena has already a deferred reclaimed superblock and
1959 // this superblock is still reclaimable, then this superblock is first
1961 // sb becomes then the new arena deferred superblock.
1962 // Passing NULL as sb allows to reclaim a deferred sb without setting a new
1963 // deferred reclaim.
1965 void deferred_reclaimSuperblock ( Arena
* a
, Superblock
* sb
)
1969 if (!a
->deferred_reclaimed_sb
)
1970 // no deferred sb to reclaim now, nothing to do in the future =>
1974 VG_(debugLog
)(1, "mallocfree",
1975 "deferred_reclaimSuperblock NULL "
1976 "(prev %p) owner %s/%s\n",
1977 a
->deferred_reclaimed_sb
,
1978 a
->clientmem
? "CLIENT" : "VALGRIND", a
->name
);
1980 VG_(debugLog
)(1, "mallocfree",
1981 "deferred_reclaimSuperblock at %p (pszB %7lu) %s "
1982 "(prev %p) owner %s/%s\n",
1983 sb
, sb
->n_payload_bytes
,
1984 (sb
->unsplittable
? "unsplittable" : ""),
1985 a
->deferred_reclaimed_sb
,
1986 a
->clientmem
? "CLIENT" : "VALGRIND", a
->name
);
1988 if (a
->deferred_reclaimed_sb
&& a
->deferred_reclaimed_sb
!= sb
) {
1989 // If we are deferring another block that the current block deferred,
1990 // then if this block can stil be reclaimed, reclaim it now.
1991 // Note that we might have a re-deferred reclaim of the same block
1992 // with a sequence: free (causing a deferred reclaim of sb)
1993 // alloc (using a piece of memory of the deferred sb)
1994 // free of the just alloc-ed block (causing a re-defer).
1995 UByte
* def_sb_start
;
2000 def_sb
= a
->deferred_reclaimed_sb
;
2001 def_sb_start
= &def_sb
->payload_bytes
[0];
2002 def_sb_end
= &def_sb
->payload_bytes
[def_sb
->n_payload_bytes
- 1];
2003 b
= (Block
*)def_sb_start
;
2004 vg_assert (blockSane(a
, b
));
2006 // Check if the deferred_reclaimed_sb is still reclaimable.
2007 // If yes, we will execute the reclaim.
2008 if (!is_inuse_block(b
)) {
2009 // b (at the beginning of def_sb) is not in use.
2011 SizeT b_bszB
, b_pszB
;
2012 b_bszB
= get_bszB(b
);
2013 b_pszB
= bszB_to_pszB(a
, b_bszB
);
2014 if (b
+ b_bszB
-1 == (Block
*)def_sb_end
) {
2015 // b (not in use) covers the full superblock.
2016 // => def_sb is still reclaimable
2017 // => execute now the reclaim of this def_sb.
2018 b_listno
= pszB_to_listNo(b_pszB
);
2019 unlinkBlock( a
, b
, b_listno
);
2020 reclaimSuperblock (a
, def_sb
);
2021 a
->deferred_reclaimed_sb
= NULL
;
2026 // sb (possibly NULL) becomes the new deferred reclaimed superblock.
2027 a
->deferred_reclaimed_sb
= sb
;
2030 /* b must be a free block, of size b_bszB.
2031 If b is followed by another free block, merge them.
2032 If b is preceded by another free block, merge them.
2033 If the merge results in the superblock being fully free,
2034 deferred_reclaimSuperblock the superblock. */
2035 static void mergeWithFreeNeighbours (Arena
* a
, Superblock
* sb
,
2036 Block
* b
, SizeT b_bszB
)
2044 sb_start
= &sb
->payload_bytes
[0];
2045 sb_end
= &sb
->payload_bytes
[sb
->n_payload_bytes
- 1];
2047 b_listno
= pszB_to_listNo(bszB_to_pszB(a
, b_bszB
));
2049 // See if this block can be merged with its successor.
2050 // First test if we're far enough before the superblock's end to possibly
2051 // have a successor.
2052 other_b
= b
+ b_bszB
;
2053 if (other_b
+min_useful_bszB(a
)-1 <= (Block
*)sb_end
) {
2054 // Ok, we have a successor, merge if it's not in use.
2055 other_bszB
= get_bszB(other_b
);
2056 if (!is_inuse_block(other_b
)) {
2057 // VG_(printf)( "merge-successor\n");
2058 # ifdef DEBUG_MALLOC
2059 vg_assert(blockSane(a
, other_b
));
2061 unlinkBlock( a
, b
, b_listno
);
2062 unlinkBlock( a
, other_b
,
2063 pszB_to_listNo(bszB_to_pszB(a
,other_bszB
)) );
2064 b_bszB
+= other_bszB
;
2065 b_listno
= pszB_to_listNo(bszB_to_pszB(a
, b_bszB
));
2066 mkFreeBlock( a
, b
, b_bszB
, b_listno
);
2067 if (VG_(clo_profile_heap
))
2068 set_cc(b
, "admin.free-2");
2071 // Not enough space for successor: check that b is the last block
2072 // ie. there are no unused bytes at the end of the Superblock.
2073 vg_assert(other_b
-1 == (Block
*)sb_end
);
2076 // Then see if this block can be merged with its predecessor.
2077 // First test if we're far enough after the superblock's start to possibly
2078 // have a predecessor.
2079 if (b
>= (Block
*)sb_start
+ min_useful_bszB(a
)) {
2080 // Ok, we have a predecessor, merge if it's not in use.
2081 other_b
= get_predecessor_block( b
);
2082 other_bszB
= get_bszB(other_b
);
2083 if (!is_inuse_block(other_b
)) {
2084 // VG_(printf)( "merge-predecessor\n");
2085 unlinkBlock( a
, b
, b_listno
);
2086 unlinkBlock( a
, other_b
,
2087 pszB_to_listNo(bszB_to_pszB(a
, other_bszB
)) );
2089 b_bszB
+= other_bszB
;
2090 b_listno
= pszB_to_listNo(bszB_to_pszB(a
, b_bszB
));
2091 mkFreeBlock( a
, b
, b_bszB
, b_listno
);
2092 if (VG_(clo_profile_heap
))
2093 set_cc(b
, "admin.free-3");
2096 // Not enough space for predecessor: check that b is the first block,
2097 // ie. there are no unused bytes at the start of the Superblock.
2098 vg_assert((Block
*)sb_start
== b
);
2101 /* If the block b just merged is the only block of the superblock sb,
2102 then we defer reclaim sb. */
2103 if ( ((Block
*)sb_start
== b
) && (b
+ b_bszB
-1 == (Block
*)sb_end
) ) {
2104 deferred_reclaimSuperblock (a
, sb
);
2108 void VG_(arena_free
) ( ArenaId aid
, void* ptr
)
2112 SizeT b_bszB
, b_pszB
;
2116 ensure_mm_init(aid
);
2117 a
= arenaId_to_ArenaP(aid
);
2123 b
= get_payload_block(a
, ptr
);
2125 /* If this is one of V's areas, check carefully the block we're
2126 getting back. This picks up simple block-end overruns. */
2127 if (aid
!= VG_AR_CLIENT
)
2128 vg_assert(is_inuse_block(b
) && blockSane(a
, b
));
2130 b_bszB
= get_bszB(b
);
2131 b_pszB
= bszB_to_pszB(a
, b_bszB
);
2132 sb
= findSb( a
, b
);
2134 a
->stats__bytes_on_loan
-= b_pszB
;
2136 /* If this is one of V's areas, fill it up with junk to enhance the
2137 chances of catching any later reads of it. Note, 0xDD is
2138 carefully chosen junk :-), in that: (1) 0xDDDDDDDD is an invalid
2139 and non-word-aligned address on most systems, and (2) 0xDD is a
2140 value which is unlikely to be generated by the new compressed
2141 Vbits representation for memcheck. */
2142 if (aid
!= VG_AR_CLIENT
)
2143 VG_(memset
)(ptr
, 0xDD, (SizeT
)b_pszB
);
2145 if (! sb
->unsplittable
) {
2146 // Put this chunk back on a list somewhere.
2147 b_listno
= pszB_to_listNo(b_pszB
);
2148 mkFreeBlock( a
, b
, b_bszB
, b_listno
);
2149 if (VG_(clo_profile_heap
))
2150 set_cc(b
, "admin.free-1");
2152 /* Possibly merge b with its predecessor or successor. */
2153 mergeWithFreeNeighbours (a
, sb
, b
, b_bszB
);
2155 // Inform that ptr has been released. We give redzone size
2156 // 0 instead of a->rz_szB as proper accessibility is done just after.
2157 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr
, 0));
2159 // We need to (re-)establish the minimum accessibility needed
2160 // for free list management. E.g. if block ptr has been put in a free
2161 // list and a neighbour block is released afterwards, the
2162 // "lo" and "hi" portions of the block ptr will be accessed to
2163 // glue the 2 blocks together.
2164 // We could mark the whole block as not accessible, and each time
2165 // transiently mark accessible the needed lo/hi parts. Not done as this
2166 // is quite complex, for very little expected additional bug detection.
2167 // fully unaccessible. Note that the below marks the (possibly) merged
2168 // block, not the block corresponding to the ptr argument.
2170 // First mark the whole block unaccessible.
2171 INNER_REQUEST(VALGRIND_MAKE_MEM_NOACCESS(b
, b_bszB
));
2172 // Then mark the relevant administrative headers as defined.
2173 // No need to mark the heap profile portion as defined, this is not
2174 // used for free blocks.
2175 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b
+ hp_overhead_szB(),
2176 sizeof(SizeT
) + sizeof(void*)));
2177 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b
+ b_bszB
2178 - sizeof(SizeT
) - sizeof(void*),
2179 sizeof(SizeT
) + sizeof(void*)));
2181 vg_assert(unsplittableBlockSane(a
, sb
, b
));
2183 // Inform that ptr has been released. Redzone size value
2184 // is not relevant (so we give 0 instead of a->rz_szB)
2185 // as it is expected that the aspacemgr munmap will be used by
2186 // outer to mark the whole superblock as unaccessible.
2187 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr
, 0));
2189 // Reclaim immediately the unsplittable superblock sb.
2190 reclaimSuperblock (a
, sb
);
2193 # ifdef DEBUG_MALLOC
2194 sanity_check_malloc_arena(aid
);
2201 The idea for malloc_aligned() is to allocate a big block, base, and
2202 then split it into two parts: frag, which is returned to the free
2203 pool, and align, which is the bit we're really after. Here's
2204 a picture. L and H denote the block lower and upper overheads, in
2205 bytes. The details are gruesome. Note it is slightly complicated
2206 because the initial request to generate base may return a bigger
2207 block than we asked for, so it is important to distinguish the base
2208 request size and the base actual size.
2216 +---+ +---+---+ +---+
2217 | L |----------------| H | L |---------------| H |
2218 +---+ +---+---+ +---+
2222 | base_p this addr must be aligned
2227 <------ frag_bszB -------> . . .
2228 . <------------- base_pszB_act -----------> .
2232 void* VG_(arena_memalign
) ( ArenaId aid
, const HChar
* cc
,
2233 SizeT req_alignB
, SizeT req_pszB
)
2235 SizeT base_pszB_req
, base_pszB_act
, frag_bszB
;
2236 Block
*base_b
, *align_b
;
2237 UByte
*base_p
, *align_p
;
2238 SizeT saved_bytes_on_loan
;
2241 ensure_mm_init(aid
);
2242 a
= arenaId_to_ArenaP(aid
);
2244 vg_assert(req_pszB
< MAX_PSZB
);
2246 // You must provide a cost-center name against which to charge
2247 // this allocation; it isn't optional.
2250 // Check that the requested alignment has a plausible size.
2251 // Check that the requested alignment seems reasonable; that is, is
2253 if (req_alignB
< VG_MIN_MALLOC_SZB
) {
2254 VG_(printf
)("VG_(arena_memalign)(%p, %lu, %lu)\n"
2255 "bad alignment value %lu\n"
2256 "(it is too small, below the lower limit of %d)",
2257 a
, req_alignB
, req_pszB
, req_alignB
, VG_MIN_MALLOC_SZB
);
2258 VG_(core_panic
)("VG_(arena_memalign)");
2261 if (req_alignB
> 16 * 1024 * 1024) {
2262 VG_(printf
)("VG_(arena_memalign)(%p, %lu, %lu)\n"
2263 "bad alignment value %lu\n"
2264 "(it is too big, larger than the upper limit of %d)",
2265 a
, req_alignB
, req_pszB
, req_alignB
, 16 * 1024 * 1024 );
2266 VG_(core_panic
)("VG_(arena_memalign)");
2269 if (VG_(log2
)( req_alignB
) == -1 /* not a power of 2 */) {
2270 VG_(printf
)("VG_(arena_memalign)(%p, %lu, %lu)\n"
2271 "bad alignment value %lu\n"
2272 "(it is not a power of two)",
2273 a
, req_alignB
, req_pszB
, req_alignB
);
2274 VG_(core_panic
)("VG_(arena_memalign)");
2278 vg_assert(req_alignB
% VG_MIN_MALLOC_SZB
== 0);
2280 /* Required payload size for the aligned chunk. */
2281 req_pszB
= align_req_pszB(req_pszB
);
2283 /* Payload size to request for the big block that we will split up. */
2284 base_pszB_req
= req_pszB
+ min_useful_bszB(a
) + req_alignB
;
2286 /* Payload ptr for the block we are going to split. Note this
2287 changes a->bytes_on_loan; we save and restore it ourselves. */
2288 saved_bytes_on_loan
= a
->stats__bytes_on_loan
;
2290 /* As we will split the block given back by VG_(arena_malloc),
2291 we have to (temporarily) disable unsplittable for this arena,
2292 as unsplittable superblocks cannot be split. */
2293 const SizeT save_min_unsplittable_sblock_szB
2294 = a
->min_unsplittable_sblock_szB
;
2295 a
->min_unsplittable_sblock_szB
= MAX_PSZB
;
2296 base_p
= VG_(arena_malloc
) ( aid
, cc
, base_pszB_req
);
2297 a
->min_unsplittable_sblock_szB
= save_min_unsplittable_sblock_szB
;
2299 a
->stats__bytes_on_loan
= saved_bytes_on_loan
;
2301 /* Give up if we couldn't allocate enough space */
2304 /* base_p was marked as allocated by VALGRIND_MALLOCLIKE_BLOCK
2305 inside VG_(arena_malloc). We need to indicate it is free, then
2306 we need to mark it undefined to allow the below code to access is. */
2307 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(base_p
, a
->rz_szB
));
2308 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(base_p
, base_pszB_req
));
2310 /* Block ptr for the block we are going to split. */
2311 base_b
= get_payload_block ( a
, base_p
);
2313 /* Pointer to the payload of the aligned block we are going to
2314 return. This has to be suitably aligned. */
2315 align_p
= align_upwards ( base_b
+ 2 * overhead_szB_lo(a
)
2316 + overhead_szB_hi(a
),
2318 align_b
= get_payload_block(a
, align_p
);
2320 /* The block size of the fragment we will create. This must be big
2321 enough to actually create a fragment. */
2322 frag_bszB
= align_b
- base_b
;
2324 vg_assert(frag_bszB
>= min_useful_bszB(a
));
2326 /* The actual payload size of the block we are going to split. */
2327 base_pszB_act
= get_pszB(a
, base_b
);
2329 /* Create the fragment block, and put it back on the relevant free list. */
2330 mkFreeBlock ( a
, base_b
, frag_bszB
,
2331 pszB_to_listNo(bszB_to_pszB(a
, frag_bszB
)) );
2332 if (VG_(clo_profile_heap
))
2333 set_cc(base_b
, "admin.frag-memalign-1");
2335 /* Create the aligned block. */
2336 mkInuseBlock ( a
, align_b
,
2337 base_p
+ base_pszB_act
2338 + overhead_szB_hi(a
) - (UByte
*)align_b
);
2339 if (VG_(clo_profile_heap
))
2340 set_cc(align_b
, cc
);
2342 /* Final sanity checks. */
2343 vg_assert( is_inuse_block(get_payload_block(a
, align_p
)) );
2345 vg_assert(req_pszB
<= get_pszB(a
, get_payload_block(a
, align_p
)));
2347 a
->stats__bytes_on_loan
+= get_pszB(a
, get_payload_block(a
, align_p
));
2348 if (a
->stats__bytes_on_loan
> a
->stats__bytes_on_loan_max
) {
2349 a
->stats__bytes_on_loan_max
= a
->stats__bytes_on_loan
;
2351 /* a->stats__tot_blocks, a->stats__tot_bytes, a->stats__nsearches
2352 are updated by the call to VG_(arena_malloc) just a few lines
2353 above. So we don't need to update them here. */
2355 # ifdef DEBUG_MALLOC
2356 sanity_check_malloc_arena(aid
);
2359 vg_assert( (((Addr
)align_p
) % req_alignB
) == 0 );
2361 INNER_REQUEST(VALGRIND_MALLOCLIKE_BLOCK(align_p
,
2362 req_pszB
, a
->rz_szB
, False
));
2368 SizeT
VG_(arena_malloc_usable_size
) ( ArenaId aid
, void* ptr
)
2370 Arena
* a
= arenaId_to_ArenaP(aid
);
2371 Block
* b
= get_payload_block(a
, ptr
);
2372 return get_pszB(a
, b
);
2376 // Implementation of mallinfo(). There is no recent standard that defines
2377 // the behavior of mallinfo(). The meaning of the fields in struct mallinfo
2380 // struct mallinfo {
2381 // int arena; /* total space in arena */
2382 // int ordblks; /* number of ordinary blocks */
2383 // int smblks; /* number of small blocks */
2384 // int hblks; /* number of holding blocks */
2385 // int hblkhd; /* space in holding block headers */
2386 // int usmblks; /* space in small blocks in use */
2387 // int fsmblks; /* space in free small blocks */
2388 // int uordblks; /* space in ordinary blocks in use */
2389 // int fordblks; /* space in free ordinary blocks */
2390 // int keepcost; /* space penalty if keep option */
2394 // The glibc documentation about mallinfo (which is somewhat outdated) can
2396 // http://www.gnu.org/software/libtool/manual/libc/Statistics-of-Malloc.html
2398 // See also http://bugs.kde.org/show_bug.cgi?id=160956.
2400 // Regarding the implementation of VG_(mallinfo)(): we cannot return the
2401 // whole struct as the library function does, because this is called by a
2402 // client request. So instead we use a pointer to do call by reference.
2403 void VG_(mallinfo
) ( ThreadId tid
, struct vg_mallinfo
* mi
)
2405 UWord i
, free_blocks
, free_blocks_size
;
2406 Arena
* a
= arenaId_to_ArenaP(VG_AR_CLIENT
);
2408 // Traverse free list and calculate free blocks statistics.
2409 // This may seem slow but glibc works the same way.
2410 free_blocks_size
= free_blocks
= 0;
2411 for (i
= 0; i
< N_MALLOC_LISTS
; i
++) {
2412 Block
* b
= a
->freelist
[i
];
2413 if (b
== NULL
) continue;
2416 free_blocks_size
+= (UWord
)get_pszB(a
, b
);
2418 if (b
== a
->freelist
[i
]) break;
2422 // We don't have fastbins so smblks & fsmblks are always 0. Also we don't
2423 // have a separate mmap allocator so set hblks & hblkhd to 0.
2424 mi
->arena
= a
->stats__bytes_mmaped
;
2425 mi
->ordblks
= free_blocks
+ VG_(free_queue_length
);
2431 mi
->uordblks
= a
->stats__bytes_on_loan
- VG_(free_queue_volume
);
2432 mi
->fordblks
= free_blocks_size
+ VG_(free_queue_volume
);
2433 mi
->keepcost
= 0; // may want some value in here
2436 SizeT
VG_(arena_redzone_size
) ( ArenaId aid
)
2438 ensure_mm_init (VG_AR_CLIENT
);
2439 /* ensure_mm_init will call arena_init if not yet done.
2440 This then ensures that the arena redzone size is properly
2442 return arenaId_to_ArenaP(aid
)->rz_szB
;
2445 /*------------------------------------------------------------*/
2446 /*--- Services layered on top of malloc/free. ---*/
2447 /*------------------------------------------------------------*/
2449 void* VG_(arena_calloc
) ( ArenaId aid
, const HChar
* cc
,
2450 SizeT nmemb
, SizeT bytes_per_memb
)
2455 size
= nmemb
* bytes_per_memb
;
2456 vg_assert(size
>= nmemb
&& size
>= bytes_per_memb
);// check against overflow
2458 p
= VG_(arena_malloc
) ( aid
, cc
, size
);
2461 VG_(memset
)(p
, 0, size
);
2467 void* VG_(arena_realloc
) ( ArenaId aid
, const HChar
* cc
,
2468 void* ptr
, SizeT req_pszB
)
2475 ensure_mm_init(aid
);
2476 a
= arenaId_to_ArenaP(aid
);
2478 vg_assert(req_pszB
< MAX_PSZB
);
2481 return VG_(arena_malloc
)(aid
, cc
, req_pszB
);
2484 if (req_pszB
== 0) {
2485 VG_(arena_free
)(aid
, ptr
);
2489 b
= get_payload_block(a
, ptr
);
2490 vg_assert(blockSane(a
, b
));
2492 vg_assert(is_inuse_block(b
));
2493 old_pszB
= get_pszB(a
, b
);
2495 if (req_pszB
<= old_pszB
) {
2499 p_new
= VG_(arena_malloc
) ( aid
, cc
, req_pszB
);
2501 VG_(memcpy
)(p_new
, ptr
, old_pszB
);
2503 VG_(arena_free
)(aid
, ptr
);
2509 void VG_(arena_realloc_shrink
) ( ArenaId aid
,
2510 void* ptr
, SizeT req_pszB
)
2512 SizeT req_bszB
, frag_bszB
, b_bszB
;
2518 ensure_mm_init(aid
);
2520 a
= arenaId_to_ArenaP(aid
);
2521 b
= get_payload_block(a
, ptr
);
2522 vg_assert(blockSane(a
, b
));
2523 vg_assert(is_inuse_block(b
));
2525 old_pszB
= get_pszB(a
, b
);
2526 req_pszB
= align_req_pszB(req_pszB
);
2527 vg_assert(old_pszB
>= req_pszB
);
2528 if (old_pszB
== req_pszB
)
2531 sb
= findSb( a
, b
);
2532 if (sb
->unsplittable
) {
2533 const UByte
* sb_start
= &sb
->payload_bytes
[0];
2534 const UByte
* sb_end
= &sb
->payload_bytes
[sb
->n_payload_bytes
- 1];
2537 vg_assert(unsplittableBlockSane(a
, sb
, b
));
2539 frag
= VG_PGROUNDUP((Addr
) sb
2540 + sizeof(Superblock
) + pszB_to_bszB(a
, req_pszB
));
2541 frag_bszB
= (Addr
)sb_end
- frag
+ 1;
2543 if (frag_bszB
>= VKI_PAGE_SIZE
) {
2546 a
->stats__bytes_on_loan
-= old_pszB
;
2547 b_bszB
= (UByte
*)frag
- sb_start
;
2548 shrinkInuseBlock(a
, b
, b_bszB
);
2550 (VALGRIND_RESIZEINPLACE_BLOCK(ptr
,
2552 VG_(arena_malloc_usable_size
)(aid
, ptr
),
2554 /* Have the minimum admin headers needed accessibility. */
2555 INNER_REQUEST(mkBhdrSzAccess(a
, b
));
2556 a
->stats__bytes_on_loan
+= bszB_to_pszB(a
, b_bszB
);
2558 sb
->n_payload_bytes
-= frag_bszB
;
2559 VG_(debugLog
)(1, "mallocfree",
2560 "shrink superblock %p to (pszB %7lu) "
2561 "owner %s/%s (munmap-ing %p %7lu)\n",
2562 sb
, sb
->n_payload_bytes
,
2563 a
->clientmem
? "CLIENT" : "VALGRIND", a
->name
,
2564 (void*) frag
, frag_bszB
);
2566 Bool need_discard
= False
;
2567 sres
= VG_(am_munmap_client
)(&need_discard
,
2570 vg_assert (!need_discard
);
2572 sres
= VG_(am_munmap_valgrind
)(frag
,
2575 vg_assert2(! sr_isError(sres
), "shrink superblock munmap failure\n");
2576 a
->stats__bytes_mmaped
-= frag_bszB
;
2578 vg_assert(unsplittableBlockSane(a
, sb
, b
));
2581 req_bszB
= pszB_to_bszB(a
, req_pszB
);
2582 b_bszB
= get_bszB(b
);
2583 frag_bszB
= b_bszB
- req_bszB
;
2584 if (frag_bszB
< min_useful_bszB(a
))
2587 a
->stats__bytes_on_loan
-= old_pszB
;
2588 shrinkInuseBlock(a
, b
, req_bszB
);
2590 (VALGRIND_RESIZEINPLACE_BLOCK(ptr
,
2592 VG_(arena_malloc_usable_size
)(aid
, ptr
),
2594 /* Have the minimum admin headers needed accessibility. */
2595 INNER_REQUEST(mkBhdrSzAccess(a
, b
));
2597 mkFreeBlock(a
, &b
[req_bszB
], frag_bszB
,
2598 pszB_to_listNo(bszB_to_pszB(a
, frag_bszB
)));
2599 /* Mark the admin headers as accessible. */
2600 INNER_REQUEST(mkBhdrAccess(a
, &b
[req_bszB
]));
2601 if (VG_(clo_profile_heap
))
2602 set_cc(&b
[req_bszB
], "admin.fragmentation-2");
2603 /* Possibly merge &b[req_bszB] with its free neighbours. */
2604 mergeWithFreeNeighbours(a
, sb
, &b
[req_bszB
], frag_bszB
);
2606 b_bszB
= get_bszB(b
);
2607 a
->stats__bytes_on_loan
+= bszB_to_pszB(a
, b_bszB
);
2610 vg_assert (blockSane(a
, b
));
2611 # ifdef DEBUG_MALLOC
2612 sanity_check_malloc_arena(aid
);
2616 /* Inline just for the wrapper VG_(strdup) below */
2617 __inline__ HChar
* VG_(arena_strdup
) ( ArenaId aid
, const HChar
* cc
,
2627 len
= VG_(strlen
)(s
) + 1;
2628 res
= VG_(arena_malloc
) (aid
, cc
, len
);
2630 for (i
= 0; i
< len
; i
++)
2635 void* VG_(arena_perm_malloc
) ( ArenaId aid
, SizeT size
, Int align
)
2639 ensure_mm_init(aid
);
2640 a
= arenaId_to_ArenaP(aid
);
2643 size
= (size
+ align
) & ~align
;
2645 if (UNLIKELY(a
->perm_malloc_current
+ size
> a
->perm_malloc_limit
)) {
2646 // Get a superblock, but we will not insert it into the superblock list.
2647 // The superblock structure is not needed, so we will use the full
2648 // memory range of it. This superblock is however counted in the
2649 // mmaped statistics.
2650 Superblock
* new_sb
= newSuperblock (a
, size
);
2651 a
->perm_malloc_limit
= (Addr
)&new_sb
->payload_bytes
[new_sb
->n_payload_bytes
- 1];
2653 // We do not mind starting allocating from the beginning of the superblock
2654 // as afterwards, we "lose" it as a superblock.
2655 a
->perm_malloc_current
= (Addr
)new_sb
;
2658 a
->stats__perm_blocks
+= 1;
2659 a
->stats__perm_bytes_on_loan
+= size
;
2660 add_one_block_to_stats (a
, size
);
2662 a
->perm_malloc_current
+= size
;
2663 return (void*)(a
->perm_malloc_current
- size
);
2666 /*------------------------------------------------------------*/
2667 /*--- Tool-visible functions. ---*/
2668 /*------------------------------------------------------------*/
2670 // All just wrappers to avoid exposing arenas to tools.
2672 // This function never returns NULL.
2673 void* VG_(malloc
) ( const HChar
* cc
, SizeT nbytes
)
2675 return VG_(arena_malloc
) ( VG_AR_CORE
, cc
, nbytes
);
2678 void VG_(free
) ( void* ptr
)
2680 VG_(arena_free
) ( VG_AR_CORE
, ptr
);
2683 void* VG_(calloc
) ( const HChar
* cc
, SizeT nmemb
, SizeT bytes_per_memb
)
2685 return VG_(arena_calloc
) ( VG_AR_CORE
, cc
, nmemb
, bytes_per_memb
);
2688 void* VG_(realloc
) ( const HChar
* cc
, void* ptr
, SizeT size
)
2690 return VG_(arena_realloc
) ( VG_AR_CORE
, cc
, ptr
, size
);
2693 void VG_(realloc_shrink
) ( void* ptr
, SizeT size
)
2695 VG_(arena_realloc_shrink
) ( VG_AR_CORE
, ptr
, size
);
2698 HChar
* VG_(strdup
) ( const HChar
* cc
, const HChar
* s
)
2700 return VG_(arena_strdup
) ( VG_AR_CORE
, cc
, s
);
2703 void* VG_(perm_malloc
) ( SizeT size
, Int align
)
2705 return VG_(arena_perm_malloc
) ( VG_AR_CORE
, size
, align
);
2709 /*--------------------------------------------------------------------*/
2711 /*--------------------------------------------------------------------*/