2 /*--------------------------------------------------------------------*/
3 /*--- An implementation of malloc/free which doesn't use sbrk. ---*/
4 /*--- m_mallocfree.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Valgrind, a dynamic binary instrumentation
11 Copyright (C) 2000-2013 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_core_basics.h"
33 #include "pub_core_vki.h"
34 #include "pub_core_debuglog.h"
35 #include "pub_core_libcbase.h"
36 #include "pub_core_aspacemgr.h"
37 #include "pub_core_libcassert.h"
38 #include "pub_core_libcprint.h"
39 #include "pub_core_mallocfree.h"
40 #include "pub_core_options.h"
41 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
42 #include "pub_core_threadstate.h" // For VG_INVALID_THREADID
43 #include "pub_core_gdbserver.h"
44 #include "pub_core_transtab.h"
45 #include "pub_core_tooliface.h"
47 #include "pub_core_inner.h"
48 #if defined(ENABLE_INNER_CLIENT_REQUEST)
49 #include "memcheck/memcheck.h"
52 // #define DEBUG_MALLOC // turn on heavyweight debugging machinery
53 // #define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
55 /* Number and total size of blocks in free queue. Used by mallinfo(). */
56 Long
VG_(free_queue_volume
) = 0;
57 Long
VG_(free_queue_length
) = 0;
59 static void cc_analyse_alloc_arena ( ArenaId aid
); /* fwds */
61 /*------------------------------------------------------------*/
62 /*--- Main types ---*/
63 /*------------------------------------------------------------*/
65 #define N_MALLOC_LISTS 112 // do not change this
67 // The amount you can ask for is limited only by sizeof(SizeT)...
68 #define MAX_PSZB (~((SizeT)0x0))
70 // Each arena has a sorted array of superblocks, which expands
71 // dynamically. This is its initial size.
72 #define SBLOCKS_SIZE_INITIAL 50
76 /* Layout of an in-use block:
78 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
79 this block total szB (sizeof(SizeT) bytes)
80 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
82 red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
83 this block total szB (sizeof(SizeT) bytes)
85 Layout of a block on the free list:
87 cost center (OPTIONAL) (VG_MIN_MALLOC_SZB bytes, only when h-p enabled)
88 this block total szB (sizeof(SizeT) bytes)
89 freelist previous ptr (sizeof(void*) bytes)
90 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
92 excess red zone bytes (if Arena.rz_szB > sizeof(void*))
93 freelist next ptr (sizeof(void*) bytes)
94 this block total szB (sizeof(SizeT) bytes)
96 Total size in bytes (bszB) and payload size in bytes (pszB)
99 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB
101 when heap profiling is not enabled, and
103 bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB + VG_MIN_MALLOC_SZB
105 when it is enabled. It follows that the minimum overhead per heap
106 block for arenas used by the core is:
108 32-bit platforms: 2*4 + 2*4 == 16 bytes
109 64-bit platforms: 2*8 + 2*8 == 32 bytes
111 when heap profiling is not enabled, and
113 32-bit platforms: 2*4 + 2*4 + 8 == 24 bytes
114 64-bit platforms: 2*8 + 2*8 + 16 == 48 bytes
116 when it is enabled. In all cases, extra overhead may be incurred
117 when rounding the payload size up to VG_MIN_MALLOC_SZB.
119 Furthermore, both size fields in the block have their least-significant
120 bit set if the block is not in use, and unset if it is in use.
121 (The bottom 3 or so bits are always free for this because of alignment.)
122 A block size of zero is not possible, because a block always has at
123 least two SizeTs and two pointers of overhead.
125 Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
126 achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
127 (see newSuperblock() for how), and that the lengths of the following
128 things are a multiple of VG_MIN_MALLOC_SZB:
129 - Superblock admin section lengths (due to elastic padding)
130 - Block admin section (low and high) lengths (due to elastic redzones)
131 - Block payload lengths (due to req_pszB rounding up)
133 The heap-profile cost-center field is 8 bytes even on 32 bit
134 platforms. This is so as to keep the payload field 8-aligned. On
135 a 64-bit platform, this cc-field contains a pointer to a const
136 HChar*, which is the cost center name. On 32-bit platforms, the
137 pointer lives in the lower-addressed half of the field, regardless
138 of the endianness of the host.
142 // No fields are actually used in this struct, because a Block has
143 // many variable sized fields and so can't be accessed
144 // meaningfully with normal fields. So we use access functions all
145 // the time. This struct gives us a type to use, though. Also, we
146 // make sizeof(Block) 1 byte so that we can do arithmetic with the
147 // Block* type in increments of 1!
152 // A superblock. 'padding' is never used, it just ensures that if the
153 // entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
154 // will be too. It can add small amounts of padding unnecessarily -- eg.
155 // 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
156 // it's too hard to make a constant expression that works perfectly in all
158 // 'unsplittable' is set to NULL if superblock can be splitted, otherwise
159 // it is set to the address of the superblock. An unsplittable superblock
160 // will contain only one allocated block. An unsplittable superblock will
161 // be unmapped when its (only) allocated block is freed.
162 // The free space at the end of an unsplittable superblock is not used to
163 // make a free block. Note that this means that an unsplittable superblock can
164 // have up to slightly less than 1 page of unused bytes at the end of the
166 // 'unsplittable' is used to avoid quadratic memory usage for linear
167 // reallocation of big structures
168 // (see http://bugs.kde.org/show_bug.cgi?id=250101).
169 // ??? unsplittable replaces 'void *padding2'. Choosed this
170 // ??? to avoid changing the alignment logic. Maybe something cleaner
172 // A splittable block can be reclaimed when all its blocks are freed :
173 // the reclaim of such a block is deferred till either another superblock
174 // of the same arena can be reclaimed or till a new superblock is needed
176 // payload_bytes[] is made a single big Block when the Superblock is
177 // created, and then can be split and the splittings remerged, but Blocks
178 // always cover its entire length -- there's never any unused bytes at the
182 SizeT n_payload_bytes
;
183 struct _Superblock
* unsplittable
;
184 UByte padding
[ VG_MIN_MALLOC_SZB
-
185 ((sizeof(struct _Superblock
*) + sizeof(SizeT
)) %
186 VG_MIN_MALLOC_SZB
) ];
187 UByte payload_bytes
[0];
191 // An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
192 // elastic, in that it can be bigger than asked-for to ensure alignment.
196 Bool clientmem
; // Allocates in the client address space?
197 SizeT rz_szB
; // Red zone size in bytes
198 SizeT min_sblock_szB
; // Minimum superblock size in bytes
199 SizeT min_unsplittable_sblock_szB
;
200 // Minimum unsplittable superblock size in bytes. To be marked as
201 // unsplittable, a superblock must have a
202 // size >= min_unsplittable_sblock_szB and cannot be splitted.
203 // So, to avoid big overhead, superblocks used to provide aligned
204 // blocks on big alignments are splittable.
205 // Unsplittable superblocks will be reclaimed when their (only)
206 // allocated block is freed.
207 // Smaller size superblocks are splittable and can be reclaimed when all
208 // their blocks are freed.
209 Block
* freelist
[N_MALLOC_LISTS
];
210 // A dynamically expanding, ordered array of (pointers to)
211 // superblocks in the arena. If this array is expanded, which
212 // is rare, the previous space it occupies is simply abandoned.
213 // To avoid having to get yet another block from m_aspacemgr for
214 // the first incarnation of this array, the first allocation of
215 // it is within this struct. If it has to be expanded then the
216 // new space is acquired from m_aspacemgr as you would expect.
217 Superblock
** sblocks
;
220 Superblock
* sblocks_initial
[SBLOCKS_SIZE_INITIAL
];
221 Superblock
* deferred_reclaimed_sb
;
223 // VG_(arena_perm_malloc) returns memory from superblocks
224 // only used for permanent blocks. No overhead. These superblocks
225 // are not stored in sblocks array above.
226 Addr perm_malloc_current
; // first byte free in perm_malloc sb.
227 Addr perm_malloc_limit
; // maximum usable byte in perm_malloc sb.
230 SizeT stats__perm_bytes_on_loan
;
231 SizeT stats__perm_blocks
;
233 ULong stats__nreclaim_unsplit
;
234 ULong stats__nreclaim_split
;
235 /* total # of reclaim executed for unsplittable/splittable superblocks */
236 SizeT stats__bytes_on_loan
;
237 SizeT stats__bytes_mmaped
;
238 SizeT stats__bytes_on_loan_max
;
239 ULong stats__tot_blocks
; /* total # blocks alloc'd */
240 ULong stats__tot_bytes
; /* total # bytes alloc'd */
241 ULong stats__nsearches
; /* total # freelist checks */
242 // If profiling, when should the next profile happen at
243 // (in terms of stats__bytes_on_loan_max) ?
244 SizeT next_profile_at
;
245 SizeT stats__bytes_mmaped_max
;
250 /*------------------------------------------------------------*/
251 /*--- Low-level functions for working with Blocks. ---*/
252 /*------------------------------------------------------------*/
254 #define SIZE_T_0x1 ((SizeT)0x1)
256 static const char* probably_your_fault
=
257 "This is probably caused by your program erroneously writing past the\n"
258 "end of a heap block and corrupting heap metadata. If you fix any\n"
259 "invalid writes reported by Memcheck, this assertion failure will\n"
260 "probably go away. Please try that before reporting this as a bug.\n";
262 // Mark a bszB as in-use, and not in-use, and remove the in-use attribute.
264 SizeT
mk_inuse_bszB ( SizeT bszB
)
266 vg_assert2(bszB
!= 0, probably_your_fault
);
267 return bszB
& (~SIZE_T_0x1
);
270 SizeT
mk_free_bszB ( SizeT bszB
)
272 vg_assert2(bszB
!= 0, probably_your_fault
);
273 return bszB
| SIZE_T_0x1
;
276 SizeT
mk_plain_bszB ( SizeT bszB
)
278 vg_assert2(bszB
!= 0, probably_your_fault
);
279 return bszB
& (~SIZE_T_0x1
);
282 // Forward definition.
284 void ensure_mm_init ( ArenaId aid
);
286 // return either 0 or sizeof(ULong) depending on whether or not
287 // heap profiling is engaged
288 #define hp_overhead_szB() set_at_init_hp_overhead_szB
289 static SizeT set_at_init_hp_overhead_szB
= -1000000;
290 // startup value chosen to very likely cause a problem if used before
291 // a proper value is given by ensure_mm_init.
293 //---------------------------------------------------------------------------
295 // Get a block's size as stored, ie with the in-use/free attribute.
297 SizeT
get_bszB_as_is ( Block
* b
)
299 UByte
* b2
= (UByte
*)b
;
300 SizeT bszB_lo
= *(SizeT
*)&b2
[0 + hp_overhead_szB()];
301 SizeT bszB_hi
= *(SizeT
*)&b2
[mk_plain_bszB(bszB_lo
) - sizeof(SizeT
)];
302 vg_assert2(bszB_lo
== bszB_hi
,
303 "Heap block lo/hi size mismatch: lo = %llu, hi = %llu.\n%s",
304 (ULong
)bszB_lo
, (ULong
)bszB_hi
, probably_your_fault
);
308 // Get a block's plain size, ie. remove the in-use/free attribute.
310 SizeT
get_bszB ( Block
* b
)
312 return mk_plain_bszB(get_bszB_as_is(b
));
315 // Set the size fields of a block. bszB may have the in-use/free attribute.
317 void set_bszB ( Block
* b
, SizeT bszB
)
319 UByte
* b2
= (UByte
*)b
;
320 *(SizeT
*)&b2
[0 + hp_overhead_szB()] = bszB
;
321 *(SizeT
*)&b2
[mk_plain_bszB(bszB
) - sizeof(SizeT
)] = bszB
;
324 //---------------------------------------------------------------------------
326 // Does this block have the in-use attribute?
328 Bool
is_inuse_block ( Block
* b
)
330 SizeT bszB
= get_bszB_as_is(b
);
331 vg_assert2(bszB
!= 0, probably_your_fault
);
332 return (0 != (bszB
& SIZE_T_0x1
)) ? False
: True
;
335 //---------------------------------------------------------------------------
337 // Return the lower, upper and total overhead in bytes for a block.
338 // These are determined purely by which arena the block lives in.
340 SizeT
overhead_szB_lo ( Arena
* a
)
342 return hp_overhead_szB() + sizeof(SizeT
) + a
->rz_szB
;
345 SizeT
overhead_szB_hi ( Arena
* a
)
347 return a
->rz_szB
+ sizeof(SizeT
);
350 SizeT
overhead_szB ( Arena
* a
)
352 return overhead_szB_lo(a
) + overhead_szB_hi(a
);
355 //---------------------------------------------------------------------------
357 // Return the minimum bszB for a block in this arena. Can have zero-length
358 // payloads, so it's the size of the admin bytes.
360 SizeT
min_useful_bszB ( Arena
* a
)
362 return overhead_szB(a
);
365 //---------------------------------------------------------------------------
367 // Convert payload size <--> block size (both in bytes).
369 SizeT
pszB_to_bszB ( Arena
* a
, SizeT pszB
)
371 return pszB
+ overhead_szB(a
);
374 SizeT
bszB_to_pszB ( Arena
* a
, SizeT bszB
)
376 vg_assert2(bszB
>= overhead_szB(a
), probably_your_fault
);
377 return bszB
- overhead_szB(a
);
380 //---------------------------------------------------------------------------
382 // Get a block's payload size.
384 SizeT
get_pszB ( Arena
* a
, Block
* b
)
386 return bszB_to_pszB(a
, get_bszB(b
));
389 //---------------------------------------------------------------------------
391 // Given the addr of a block, return the addr of its payload, and vice versa.
393 UByte
* get_block_payload ( Arena
* a
, Block
* b
)
395 UByte
* b2
= (UByte
*)b
;
396 return & b2
[ overhead_szB_lo(a
) ];
398 // Given the addr of a block's payload, return the addr of the block itself.
400 Block
* get_payload_block ( Arena
* a
, UByte
* payload
)
402 return (Block
*)&payload
[ -overhead_szB_lo(a
) ];
405 //---------------------------------------------------------------------------
407 // Set and get the next and previous link fields of a block.
409 void set_prev_b ( Block
* b
, Block
* prev_p
)
411 UByte
* b2
= (UByte
*)b
;
412 *(Block
**)&b2
[hp_overhead_szB() + sizeof(SizeT
)] = prev_p
;
415 void set_next_b ( Block
* b
, Block
* next_p
)
417 UByte
* b2
= (UByte
*)b
;
418 *(Block
**)&b2
[get_bszB(b
) - sizeof(SizeT
) - sizeof(void*)] = next_p
;
421 Block
* get_prev_b ( Block
* b
)
423 UByte
* b2
= (UByte
*)b
;
424 return *(Block
**)&b2
[hp_overhead_szB() + sizeof(SizeT
)];
427 Block
* get_next_b ( Block
* b
)
429 UByte
* b2
= (UByte
*)b
;
430 return *(Block
**)&b2
[get_bszB(b
) - sizeof(SizeT
) - sizeof(void*)];
433 //---------------------------------------------------------------------------
435 // Set and get the cost-center field of a block.
437 void set_cc ( Block
* b
, const HChar
* cc
)
439 UByte
* b2
= (UByte
*)b
;
440 vg_assert( VG_(clo_profile_heap
) );
441 *(const HChar
**)&b2
[0] = cc
;
444 const HChar
* get_cc ( Block
* b
)
446 UByte
* b2
= (UByte
*)b
;
447 vg_assert( VG_(clo_profile_heap
) );
448 return *(const HChar
**)&b2
[0];
451 //---------------------------------------------------------------------------
453 // Get the block immediately preceding this one in the Superblock.
455 Block
* get_predecessor_block ( Block
* b
)
457 UByte
* b2
= (UByte
*)b
;
458 SizeT bszB
= mk_plain_bszB( (*(SizeT
*)&b2
[-sizeof(SizeT
)]) );
459 return (Block
*)&b2
[-bszB
];
462 //---------------------------------------------------------------------------
464 // Read and write the lower and upper red-zone bytes of a block.
466 void set_rz_lo_byte ( Block
* b
, UInt rz_byteno
, UByte v
)
468 UByte
* b2
= (UByte
*)b
;
469 b2
[hp_overhead_szB() + sizeof(SizeT
) + rz_byteno
] = v
;
472 void set_rz_hi_byte ( Block
* b
, UInt rz_byteno
, UByte v
)
474 UByte
* b2
= (UByte
*)b
;
475 b2
[get_bszB(b
) - sizeof(SizeT
) - rz_byteno
- 1] = v
;
478 UByte
get_rz_lo_byte ( Block
* b
, UInt rz_byteno
)
480 UByte
* b2
= (UByte
*)b
;
481 return b2
[hp_overhead_szB() + sizeof(SizeT
) + rz_byteno
];
484 UByte
get_rz_hi_byte ( Block
* b
, UInt rz_byteno
)
486 UByte
* b2
= (UByte
*)b
;
487 return b2
[get_bszB(b
) - sizeof(SizeT
) - rz_byteno
- 1];
490 #if defined(ENABLE_INNER_CLIENT_REQUEST)
491 /* When running as an inner, the block headers before and after
492 (see 'Layout of an in-use block:' above) are made non accessible
493 by VALGRIND_MALLOCLIKE_BLOCK/VALGRIND_FREELIKE_BLOCK
494 to allow the outer to detect block overrun.
495 The below two functions are used when these headers must be
496 temporarily accessed. */
497 static void mkBhdrAccess( Arena
* a
, Block
* b
)
499 VALGRIND_MAKE_MEM_DEFINED (b
,
500 hp_overhead_szB() + sizeof(SizeT
) + a
->rz_szB
);
501 VALGRIND_MAKE_MEM_DEFINED (b
+ get_bszB(b
) - a
->rz_szB
- sizeof(SizeT
),
502 a
->rz_szB
+ sizeof(SizeT
));
505 /* Mark block hdr as not accessible.
506 !!! Currently, we do not mark the cost center and szB fields unaccessible
507 as these are accessed at too many places. */
508 static void mkBhdrNoAccess( Arena
* a
, Block
* b
)
510 VALGRIND_MAKE_MEM_NOACCESS (b
+ hp_overhead_szB() + sizeof(SizeT
),
512 VALGRIND_MAKE_MEM_NOACCESS (b
+ get_bszB(b
) - sizeof(SizeT
) - a
->rz_szB
,
516 /* Make the cc+szB fields accessible. */
517 static void mkBhdrSzAccess( Arena
* a
, Block
* b
)
519 VALGRIND_MAKE_MEM_DEFINED (b
,
520 hp_overhead_szB() + sizeof(SizeT
));
521 /* We cannot use get_bszB(b), as this reads the 'hi' szB we want
522 to mark accessible. So, we only access the 'lo' szB. */
523 SizeT bszB_lo
= mk_plain_bszB(*(SizeT
*)&b
[0 + hp_overhead_szB()]);
524 VALGRIND_MAKE_MEM_DEFINED (b
+ bszB_lo
- sizeof(SizeT
),
529 /*------------------------------------------------------------*/
530 /*--- Arena management ---*/
531 /*------------------------------------------------------------*/
533 #define CORE_ARENA_MIN_SZB 1048576
535 // The arena structures themselves.
536 static Arena vg_arena
[VG_N_ARENAS
];
538 // Functions external to this module identify arenas using ArenaIds,
539 // not Arena*s. This fn converts the former to the latter.
540 static Arena
* arenaId_to_ArenaP ( ArenaId arena
)
542 vg_assert(arena
>= 0 && arena
< VG_N_ARENAS
);
543 return & vg_arena
[arena
];
546 static ArenaId
arenaP_to_ArenaId ( Arena
*a
)
548 ArenaId arena
= a
-vg_arena
;
549 vg_assert(arena
>= 0 && arena
< VG_N_ARENAS
);
553 // Initialise an arena. rz_szB is the (default) minimum redzone size;
554 // It might be overriden by VG_(clo_redzone_size) or VG_(clo_core_redzone_size).
555 // it might be made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
557 void arena_init ( ArenaId aid
, const HChar
* name
, SizeT rz_szB
,
558 SizeT min_sblock_szB
, SizeT min_unsplittable_sblock_szB
)
561 Arena
* a
= arenaId_to_ArenaP(aid
);
563 // Ensure default redzones are a reasonable size.
564 vg_assert(rz_szB
<= MAX_REDZONE_SZB
);
566 /* Override the default redzone size if a clo value was given.
567 Note that the clo value can be significantly bigger than MAX_REDZONE_SZB
568 to allow the user to chase horrible bugs using up to 1 page
570 if (VG_AR_CLIENT
== aid
) {
571 if (VG_(clo_redzone_size
) != -1)
572 rz_szB
= VG_(clo_redzone_size
);
574 if (VG_(clo_core_redzone_size
) != rz_szB
)
575 rz_szB
= VG_(clo_core_redzone_size
);
578 // Redzones must always be at least the size of a pointer, for holding the
579 // prev/next pointer (see the layout details at the top of this file).
580 if (rz_szB
< sizeof(void*)) rz_szB
= sizeof(void*);
582 // The size of the low and high admin sections in a block must be a
583 // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
584 // redzone size if necessary to achieve this.
586 while (0 != overhead_szB_lo(a
) % VG_MIN_MALLOC_SZB
) a
->rz_szB
++;
587 vg_assert(overhead_szB_lo(a
) - hp_overhead_szB() == overhead_szB_hi(a
));
589 // Here we have established the effective redzone size.
592 vg_assert((min_sblock_szB
% VKI_PAGE_SIZE
) == 0);
594 a
->clientmem
= ( VG_AR_CLIENT
== aid
? True
: False
);
596 a
->min_sblock_szB
= min_sblock_szB
;
597 a
->min_unsplittable_sblock_szB
= min_unsplittable_sblock_szB
;
598 for (i
= 0; i
< N_MALLOC_LISTS
; i
++) a
->freelist
[i
] = NULL
;
600 a
->sblocks
= & a
->sblocks_initial
[0];
601 a
->sblocks_size
= SBLOCKS_SIZE_INITIAL
;
603 a
->deferred_reclaimed_sb
= 0;
604 a
->perm_malloc_current
= 0;
605 a
->perm_malloc_limit
= 0;
606 a
->stats__perm_bytes_on_loan
= 0;
607 a
->stats__perm_blocks
= 0;
608 a
->stats__nreclaim_unsplit
= 0;
609 a
->stats__nreclaim_split
= 0;
610 a
->stats__bytes_on_loan
= 0;
611 a
->stats__bytes_mmaped
= 0;
612 a
->stats__bytes_on_loan_max
= 0;
613 a
->stats__bytes_mmaped_max
= 0;
614 a
->stats__tot_blocks
= 0;
615 a
->stats__tot_bytes
= 0;
616 a
->stats__nsearches
= 0;
617 a
->next_profile_at
= 25 * 1000 * 1000;
618 vg_assert(sizeof(a
->sblocks_initial
)
619 == SBLOCKS_SIZE_INITIAL
* sizeof(Superblock
*));
622 /* Print vital stats for an arena. */
623 void VG_(print_all_arena_stats
) ( void )
626 for (i
= 0; i
< VG_N_ARENAS
; i
++) {
627 Arena
* a
= arenaId_to_ArenaP(i
);
628 VG_(message
)(Vg_DebugMsg
,
629 "%8s: %8lu/%8lu max/curr mmap'd, "
630 "%llu/%llu unsplit/split sb unmmap'd, "
631 "%8lu/%8lu max/curr, "
632 "%10llu/%10llu totalloc-blocks/bytes,"
633 " %10llu searches %lu rzB\n",
635 a
->stats__bytes_mmaped_max
, a
->stats__bytes_mmaped
,
636 a
->stats__nreclaim_unsplit
, a
->stats__nreclaim_split
,
637 a
->stats__bytes_on_loan_max
,
638 a
->stats__bytes_on_loan
,
639 a
->stats__tot_blocks
, a
->stats__tot_bytes
,
646 void VG_(print_arena_cc_analysis
) ( void )
649 vg_assert( VG_(clo_profile_heap
) );
650 for (i
= 0; i
< VG_N_ARENAS
; i
++) {
651 cc_analyse_alloc_arena(i
);
656 /* This library is self-initialising, as it makes this more self-contained,
657 less coupled with the outside world. Hence VG_(arena_malloc)() and
658 VG_(arena_free)() below always call ensure_mm_init() to ensure things are
659 correctly initialised.
661 We initialise the client arena separately (and later) because the core
662 must do non-client allocation before the tool has a chance to set the
663 client arena's redzone size.
665 static Bool client_inited
= False
;
666 static Bool nonclient_inited
= False
;
669 void ensure_mm_init ( ArenaId aid
)
671 static SizeT client_rz_szB
= 8; // default: be paranoid
673 /* We use checked red zones (of various sizes) for our internal stuff,
674 and an unchecked zone of arbitrary size for the client. Of
675 course the client's red zone can be checked by the tool, eg.
676 by using addressibility maps, but not by the mechanism implemented
677 here, which merely checks at the time of freeing that the red
678 zone bytes are unchanged.
680 Nb: redzone sizes are *minimums*; they could be made bigger to ensure
681 alignment. Eg. with 8 byte alignment, on 32-bit machines 4 stays as
682 4, but 16 becomes 20; but on 64-bit machines 4 becomes 8, and 16
683 stays as 16 --- the extra 4 bytes in both are accounted for by the
684 larger prev/next ptr.
686 if (VG_AR_CLIENT
== aid
) {
689 // This assertion ensures that a tool cannot try to change the client
690 // redzone size with VG_(needs_malloc_replacement)() after this module
691 // has done its first allocation from the client arena.
692 if (VG_(needs
).malloc_replacement
)
693 vg_assert(client_rz_szB
== VG_(tdict
).tool_client_redzone_szB
);
697 // Check and set the client arena redzone size
698 if (VG_(needs
).malloc_replacement
) {
699 client_rz_szB
= VG_(tdict
).tool_client_redzone_szB
;
700 if (client_rz_szB
> MAX_REDZONE_SZB
) {
701 VG_(printf
)( "\nTool error:\n"
702 " specified redzone size is too big (%llu)\n",
703 (ULong
)client_rz_szB
);
707 // Initialise the client arena. On all platforms,
708 // increasing the superblock size reduces the number of superblocks
709 // in the client arena, which makes findSb cheaper.
710 ar_client_sbszB
= 4194304;
711 // superblocks with a size > ar_client_sbszB will be unsplittable
712 // (unless used for providing memalign-ed blocks).
713 arena_init ( VG_AR_CLIENT
, "client", client_rz_szB
,
714 ar_client_sbszB
, ar_client_sbszB
+1);
715 client_inited
= True
;
718 if (nonclient_inited
) {
721 set_at_init_hp_overhead_szB
=
722 VG_(clo_profile_heap
) ? VG_MIN_MALLOC_SZB
: 0;
723 // Initialise the non-client arenas
724 // Similarly to client arena, big allocations will be unsplittable.
725 arena_init ( VG_AR_CORE
, "core", CORE_REDZONE_DEFAULT_SZB
,
726 4194304, 4194304+1 );
727 arena_init ( VG_AR_DINFO
, "dinfo", CORE_REDZONE_DEFAULT_SZB
,
728 1048576, 1048576+1 );
729 arena_init ( VG_AR_DEMANGLE
, "demangle", CORE_REDZONE_DEFAULT_SZB
,
731 arena_init ( VG_AR_TTAUX
, "ttaux", CORE_REDZONE_DEFAULT_SZB
,
733 nonclient_inited
= True
;
737 VG_(printf
)("ZZZ1\n");
738 VG_(sanity_check_malloc_all
)();
739 VG_(printf
)("ZZZ2\n");
744 /*------------------------------------------------------------*/
745 /*--- Superblock management ---*/
746 /*------------------------------------------------------------*/
748 __attribute__((noreturn
))
749 void VG_(out_of_memory_NORETURN
) ( const HChar
* who
, SizeT szB
)
751 static Int outputTrial
= 0;
752 // We try once to output the full memory state followed by the below message.
753 // If that fails (due to out of memory during first trial), we try to just
754 // output the below message.
755 // And then we abandon.
757 ULong tot_alloc
= VG_(am_get_anonsize_total
)();
760 " Valgrind's memory management: out of memory:\n"
761 " %s's request for %llu bytes failed.\n"
762 " %llu bytes have already been allocated.\n"
763 " Valgrind cannot continue. Sorry.\n\n"
764 " There are several possible reasons for this.\n"
765 " - You have some kind of memory limit in place. Look at the\n"
766 " output of 'ulimit -a'. Is there a limit on the size of\n"
767 " virtual memory or address space?\n"
768 " - You have run out of swap space.\n"
769 " - Valgrind has a bug. If you think this is the case or you are\n"
770 " not sure, please let us know and we'll try to fix it.\n"
771 " Please note that programs can take substantially more memory than\n"
772 " normal when running under Valgrind tools, eg. up to twice or\n"
773 " more, depending on the tool. On a 64-bit machine, Valgrind\n"
774 " should be able to make use of up 32GB memory. On a 32-bit\n"
775 " machine, Valgrind should be able to use all the memory available\n"
776 " to a single process, up to 4GB if that's how you have your\n"
777 " kernel configured. Most 32-bit Linux setups allow a maximum of\n"
778 " 3GB per process.\n\n"
779 " Whatever the reason, Valgrind cannot continue. Sorry.\n";
781 if (outputTrial
<= 1) {
782 if (outputTrial
== 0) {
784 // First print the memory stats with the aspacemgr data.
785 VG_(am_show_nsegments
) (0, "out_of_memory");
786 VG_(print_all_arena_stats
) ();
787 if (VG_(clo_profile_heap
))
788 VG_(print_arena_cc_analysis
) ();
789 // And then print some other information that might help.
790 VG_(print_all_stats
) (False
, /* Memory stats */
791 True
/* Tool stats */);
792 VG_(show_sched_status
) (True
, // host_stacktrace
793 True
, // valgrind_stack_usage
794 True
); // exited_threads
795 /* In case we are an inner valgrind, asks the outer to report
796 its memory state in its log output. */
797 INNER_REQUEST(VALGRIND_MONITOR_COMMAND("v.set log_output"));
798 INNER_REQUEST(VALGRIND_MONITOR_COMMAND("v.info memory aspacemgr"));
801 VG_(message
)(Vg_UserMsg
, s1
, who
, (ULong
)szB
, tot_alloc
);
803 VG_(debugLog
)(0,"mallocfree", s1
, who
, (ULong
)szB
, tot_alloc
);
810 // Align ptr p upwards to an align-sized boundary.
812 void* align_upwards ( void* p
, SizeT align
)
815 if ((a
% align
) == 0) return (void*)a
;
816 return (void*)(a
- (a
% align
) + align
);
819 // Forward definition.
821 void deferred_reclaimSuperblock ( Arena
* a
, Superblock
* sb
);
823 // If not enough memory available, either aborts (for non-client memory)
824 // or returns 0 (for client memory).
826 Superblock
* newSuperblock ( Arena
* a
, SizeT cszB
)
833 // A new superblock is needed for arena a. We will execute the deferred
834 // reclaim in all arenas in order to minimise fragmentation and
835 // peak memory usage.
836 for (aid
= 0; aid
< VG_N_ARENAS
; aid
++) {
837 Arena
* arena
= arenaId_to_ArenaP(aid
);
838 if (arena
->deferred_reclaimed_sb
!= NULL
)
839 deferred_reclaimSuperblock (arena
, NULL
);
842 // Take into account admin bytes in the Superblock.
843 cszB
+= sizeof(Superblock
);
845 if (cszB
< a
->min_sblock_szB
) cszB
= a
->min_sblock_szB
;
846 cszB
= VG_PGROUNDUP(cszB
);
848 if (cszB
>= a
->min_unsplittable_sblock_szB
)
851 unsplittable
= False
;
855 // client allocation -- return 0 to client if it fails
856 sres
= VG_(am_mmap_anon_float_client
)
857 ( cszB
, VKI_PROT_READ
|VKI_PROT_WRITE
|VKI_PROT_EXEC
);
858 if (sr_isError(sres
))
860 sb
= (Superblock
*)(AddrH
)sr_Res(sres
);
861 // Mark this segment as containing client heap. The leak
862 // checker needs to be able to identify such segments so as not
863 // to use them as sources of roots during leak checks.
864 VG_(am_set_segment_isCH_if_SkAnonC
)( VG_(am_find_nsegment
)( (Addr
)sb
) );
866 // non-client allocation -- abort if it fails
867 sres
= VG_(am_mmap_anon_float_valgrind
)( cszB
);
868 if (sr_isError(sres
)) {
869 VG_(out_of_memory_NORETURN
)("newSuperblock", cszB
);
871 sb
= NULL
; /* keep gcc happy */
873 sb
= (Superblock
*)(AddrH
)sr_Res(sres
);
876 vg_assert(NULL
!= sb
);
877 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(sb
, cszB
));
878 vg_assert(0 == (Addr
)sb
% VG_MIN_MALLOC_SZB
);
879 sb
->n_payload_bytes
= cszB
- sizeof(Superblock
);
880 sb
->unsplittable
= (unsplittable
? sb
: NULL
);
881 a
->stats__bytes_mmaped
+= cszB
;
882 if (a
->stats__bytes_mmaped
> a
->stats__bytes_mmaped_max
)
883 a
->stats__bytes_mmaped_max
= a
->stats__bytes_mmaped
;
884 VG_(debugLog
)(1, "mallocfree",
885 "newSuperblock at %p (pszB %7ld) %s owner %s/%s\n",
886 sb
, sb
->n_payload_bytes
,
887 (unsplittable
? "unsplittable" : ""),
888 a
->clientmem
? "CLIENT" : "VALGRIND", a
->name
);
892 // Reclaims the given superblock:
893 // * removes sb from arena sblocks list.
894 // * munmap the superblock segment.
896 void reclaimSuperblock ( Arena
* a
, Superblock
* sb
)
902 VG_(debugLog
)(1, "mallocfree",
903 "reclaimSuperblock at %p (pszB %7ld) %s owner %s/%s\n",
904 sb
, sb
->n_payload_bytes
,
905 (sb
->unsplittable
? "unsplittable" : ""),
906 a
->clientmem
? "CLIENT" : "VALGRIND", a
->name
);
908 // Take into account admin bytes in the Superblock.
909 cszB
= sizeof(Superblock
) + sb
->n_payload_bytes
;
911 // removes sb from superblock list.
912 for (i
= 0; i
< a
->sblocks_used
; i
++) {
913 if (a
->sblocks
[i
] == sb
)
916 vg_assert(i
>= 0 && i
< a
->sblocks_used
);
917 for (j
= i
; j
< a
->sblocks_used
; j
++)
918 a
->sblocks
[j
] = a
->sblocks
[j
+1];
920 a
->sblocks
[a
->sblocks_used
] = NULL
;
921 // paranoia: NULLify ptr to reclaimed sb or NULLify copy of ptr to last sb.
923 a
->stats__bytes_mmaped
-= cszB
;
924 if (sb
->unsplittable
)
925 a
->stats__nreclaim_unsplit
++;
927 a
->stats__nreclaim_split
++;
929 // Now that the sb is removed from the list, mnumap its space.
931 // reclaimable client allocation
932 Bool need_discard
= False
;
933 sres
= VG_(am_munmap_client
)(&need_discard
, (Addr
) sb
, cszB
);
934 vg_assert2(! sr_isError(sres
), "superblock client munmap failure\n");
935 /* We somewhat help the client by discarding the range.
936 Note however that if the client has JITted some code in
937 a small block that was freed, we do not provide this
939 /* JRS 2011-Sept-26: it would be nice to move the discard
940 outwards somewhat (in terms of calls) so as to make it easier
941 to verify that there will be no nonterminating recursive set
942 of calls a result of calling VG_(discard_translations).
943 Another day, perhaps. */
945 VG_(discard_translations
) ((Addr
) sb
, cszB
, "reclaimSuperblock");
947 // reclaimable non-client allocation
948 sres
= VG_(am_munmap_valgrind
)((Addr
) sb
, cszB
);
949 vg_assert2(! sr_isError(sres
), "superblock valgrind munmap failure\n");
954 // Find the superblock containing the given chunk.
956 Superblock
* findSb ( Arena
* a
, Block
* b
)
959 SizeT max
= a
->sblocks_used
;
963 SizeT pos
= min
+ (max
- min
)/2;
965 vg_assert(pos
>= 0 && pos
< a
->sblocks_used
);
966 sb
= a
->sblocks
[pos
];
967 if ((Block
*)&sb
->payload_bytes
[0] <= b
968 && b
< (Block
*)&sb
->payload_bytes
[sb
->n_payload_bytes
])
971 } else if ((Block
*)&sb
->payload_bytes
[0] <= b
) {
977 VG_(printf
)("findSb: can't find pointer %p in arena '%s'\n",
979 VG_(core_panic
)("findSb: VG_(arena_free)() in wrong arena?");
980 return NULL
; /*NOTREACHED*/
984 // Find the superblock containing the given address.
985 // If superblock not found, return NULL.
987 Superblock
* maybe_findSb ( Arena
* a
, Addr ad
)
990 SizeT max
= a
->sblocks_used
;
994 SizeT pos
= min
+ (max
- min
)/2;
995 if (pos
< 0 || pos
>= a
->sblocks_used
)
997 sb
= a
->sblocks
[pos
];
998 if ((Addr
)&sb
->payload_bytes
[0] <= ad
999 && ad
< (Addr
)&sb
->payload_bytes
[sb
->n_payload_bytes
]) {
1001 } else if ((Addr
)&sb
->payload_bytes
[0] <= ad
) {
1011 /*------------------------------------------------------------*/
1012 /*--- Functions for working with freelists. ---*/
1013 /*------------------------------------------------------------*/
1015 // Nb: Determination of which freelist a block lives on is based on the
1016 // payload size, not block size.
1018 // Convert a payload size in bytes to a freelist number.
1020 UInt
pszB_to_listNo ( SizeT pszB
)
1022 SizeT n
= pszB
/ VG_MIN_MALLOC_SZB
;
1023 vg_assert(0 == pszB
% VG_MIN_MALLOC_SZB
);
1025 // The first 64 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
1026 // The final 48 hold bigger blocks.
1027 if (n
< 64) return (UInt
)n
;
1028 /* Exponential slope up, factor 1.05 */
1029 if (n
< 67) return 64;
1030 if (n
< 70) return 65;
1031 if (n
< 74) return 66;
1032 if (n
< 77) return 67;
1033 if (n
< 81) return 68;
1034 if (n
< 85) return 69;
1035 if (n
< 90) return 70;
1036 if (n
< 94) return 71;
1037 if (n
< 99) return 72;
1038 if (n
< 104) return 73;
1039 if (n
< 109) return 74;
1040 if (n
< 114) return 75;
1041 if (n
< 120) return 76;
1042 if (n
< 126) return 77;
1043 if (n
< 133) return 78;
1044 if (n
< 139) return 79;
1045 /* Exponential slope up, factor 1.10 */
1046 if (n
< 153) return 80;
1047 if (n
< 169) return 81;
1048 if (n
< 185) return 82;
1049 if (n
< 204) return 83;
1050 if (n
< 224) return 84;
1051 if (n
< 247) return 85;
1052 if (n
< 272) return 86;
1053 if (n
< 299) return 87;
1054 if (n
< 329) return 88;
1055 if (n
< 362) return 89;
1056 if (n
< 398) return 90;
1057 if (n
< 438) return 91;
1058 if (n
< 482) return 92;
1059 if (n
< 530) return 93;
1060 if (n
< 583) return 94;
1061 if (n
< 641) return 95;
1062 /* Exponential slope up, factor 1.20 */
1063 if (n
< 770) return 96;
1064 if (n
< 924) return 97;
1065 if (n
< 1109) return 98;
1066 if (n
< 1331) return 99;
1067 if (n
< 1597) return 100;
1068 if (n
< 1916) return 101;
1069 if (n
< 2300) return 102;
1070 if (n
< 2760) return 103;
1071 if (n
< 3312) return 104;
1072 if (n
< 3974) return 105;
1073 if (n
< 4769) return 106;
1074 if (n
< 5723) return 107;
1075 if (n
< 6868) return 108;
1076 if (n
< 8241) return 109;
1077 if (n
< 9890) return 110;
1081 // What is the minimum payload size for a given list?
1083 SizeT
listNo_to_pszB_min ( UInt listNo
)
1085 /* Repeatedly computing this function at every request is
1086 expensive. Hence at the first call just cache the result for
1087 every possible argument. */
1088 static SizeT cache
[N_MALLOC_LISTS
];
1089 static Bool cache_valid
= False
;
1092 for (i
= 0; i
< N_MALLOC_LISTS
; i
++) {
1094 while (pszB_to_listNo(pszB
) < i
)
1095 pszB
+= VG_MIN_MALLOC_SZB
;
1100 /* Returned cached answer. */
1101 vg_assert(listNo
<= N_MALLOC_LISTS
);
1102 return cache
[listNo
];
1105 // What is the maximum payload size for a given list?
1107 SizeT
listNo_to_pszB_max ( UInt listNo
)
1109 vg_assert(listNo
<= N_MALLOC_LISTS
);
1110 if (listNo
== N_MALLOC_LISTS
-1) {
1113 return listNo_to_pszB_min(listNo
+1) - 1;
1118 /* A nasty hack to try and reduce fragmentation. Try and replace
1119 a->freelist[lno] with another block on the same list but with a
1120 lower address, with the idea of attempting to recycle the same
1121 blocks rather than cruise through the address space. */
1123 void swizzle ( Arena
* a
, UInt lno
)
1130 p_best
= a
->freelist
[lno
];
1131 if (p_best
== NULL
) return;
1135 // This loop bound was 20 for a long time, but experiments showed that
1136 // reducing it to 10 gave the same result in all the tests, and 5 got the
1137 // same result in 85--100% of cases. And it's called often enough to be
1138 // noticeable in programs that allocated a lot.
1139 for (i
= 0; i
< 5; i
++) {
1140 pn
= get_next_b(pn
);
1141 pp
= get_prev_b(pp
);
1142 if (pn
< p_best
) p_best
= pn
;
1143 if (pp
< p_best
) p_best
= pp
;
1145 if (p_best
< a
->freelist
[lno
]) {
1146 # ifdef VERBOSE_MALLOC
1147 VG_(printf
)("retreat by %ld\n", (Word
)(a
->freelist
[lno
] - p_best
));
1149 a
->freelist
[lno
] = p_best
;
1154 /*------------------------------------------------------------*/
1155 /*--- Sanity-check/debugging machinery. ---*/
1156 /*------------------------------------------------------------*/
1158 #define REDZONE_LO_MASK 0x31
1159 #define REDZONE_HI_MASK 0x7c
1161 // Do some crude sanity checks on a Block.
1163 Bool
blockSane ( Arena
* a
, Block
* b
)
1165 # define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
1167 // The lo and hi size fields will be checked (indirectly) by the call
1168 // to get_rz_hi_byte().
1169 if (!a
->clientmem
&& is_inuse_block(b
)) {
1170 // In the inner, for memcheck sake, temporarily mark redzone accessible.
1171 INNER_REQUEST(mkBhdrAccess(a
,b
));
1172 for (i
= 0; i
< a
->rz_szB
; i
++) {
1173 if (get_rz_lo_byte(b
, i
) !=
1174 (UByte
)(((Addr
)b
&0xff) ^ REDZONE_LO_MASK
))
1175 {BLEAT("redzone-lo");return False
;}
1176 if (get_rz_hi_byte(b
, i
) !=
1177 (UByte
)(((Addr
)b
&0xff) ^ REDZONE_HI_MASK
))
1178 {BLEAT("redzone-hi");return False
;}
1180 INNER_REQUEST(mkBhdrNoAccess(a
,b
));
1186 // Sanity checks on a Block inside an unsplittable superblock
1188 Bool
unsplittableBlockSane ( Arena
* a
, Superblock
*sb
, Block
* b
)
1190 # define BLEAT(str) VG_(printf)("unsplittableBlockSane: fail -- %s\n",str)
1195 if (!blockSane (a
, b
))
1196 {BLEAT("blockSane");return False
;}
1198 if (sb
->unsplittable
!= sb
)
1199 {BLEAT("unsplittable");return False
;}
1201 sb_start
= &sb
->payload_bytes
[0];
1202 sb_end
= &sb
->payload_bytes
[sb
->n_payload_bytes
- 1];
1204 // b must be first block (i.e. no unused bytes at the beginning)
1205 if ((Block
*)sb_start
!= b
)
1206 {BLEAT("sb_start");return False
;}
1208 // b must be last block (i.e. no unused bytes at the end)
1209 other_b
= b
+ get_bszB(b
);
1210 if (other_b
-1 != (Block
*)sb_end
)
1211 {BLEAT("sb_end");return False
;}
1217 // Print superblocks (only for debugging).
1219 void ppSuperblocks ( Arena
* a
)
1221 UInt i
, j
, blockno
= 1;
1224 for (j
= 0; j
< a
->sblocks_used
; ++j
) {
1225 Superblock
* sb
= a
->sblocks
[j
];
1227 VG_(printf
)( "\n" );
1228 VG_(printf
)( "superblock %d at %p %s, sb->n_pl_bs = %lu\n",
1229 blockno
++, sb
, (sb
->unsplittable
? "unsplittable" : ""),
1230 sb
->n_payload_bytes
);
1231 for (i
= 0; i
< sb
->n_payload_bytes
; i
+= b_bszB
) {
1232 Block
* b
= (Block
*)&sb
->payload_bytes
[i
];
1233 b_bszB
= get_bszB(b
);
1234 VG_(printf
)( " block at %d, bszB %lu: ", i
, b_bszB
);
1235 VG_(printf
)( "%s, ", is_inuse_block(b
) ? "inuse" : "free");
1236 VG_(printf
)( "%s\n", blockSane(a
, b
) ? "ok" : "BAD" );
1238 vg_assert(i
== sb
->n_payload_bytes
); // no overshoot at end of Sb
1240 VG_(printf
)( "end of superblocks\n\n" );
1243 // Sanity check both the superblocks and the chains.
1244 static void sanity_check_malloc_arena ( ArenaId aid
)
1246 UInt i
, j
, superblockctr
, blockctr_sb
, blockctr_li
;
1247 UInt blockctr_sb_free
, listno
;
1248 SizeT b_bszB
, b_pszB
, list_min_pszB
, list_max_pszB
;
1249 Bool thisFree
, lastWasFree
, sblockarrOK
;
1252 SizeT arena_bytes_on_loan
;
1255 # define BOMB VG_(core_panic)("sanity_check_malloc_arena")
1257 a
= arenaId_to_ArenaP(aid
);
1259 // Check the superblock array.
1261 = a
->sblocks
!= NULL
1262 && a
->sblocks_size
>= SBLOCKS_SIZE_INITIAL
1263 && a
->sblocks_used
<= a
->sblocks_size
1264 && (a
->sblocks_size
== SBLOCKS_SIZE_INITIAL
1265 ? (a
->sblocks
== &a
->sblocks_initial
[0])
1266 : (a
->sblocks
!= &a
->sblocks_initial
[0]));
1268 VG_(printf
)("sanity_check_malloc_arena: sblock array BAD\n");
1272 // First, traverse all the superblocks, inspecting the Blocks in each.
1273 superblockctr
= blockctr_sb
= blockctr_sb_free
= 0;
1274 arena_bytes_on_loan
= 0;
1275 for (j
= 0; j
< a
->sblocks_used
; ++j
) {
1276 Superblock
* sb
= a
->sblocks
[j
];
1277 lastWasFree
= False
;
1279 for (i
= 0; i
< sb
->n_payload_bytes
; i
+= mk_plain_bszB(b_bszB
)) {
1281 b
= (Block
*)&sb
->payload_bytes
[i
];
1282 b_bszB
= get_bszB_as_is(b
);
1283 if (!blockSane(a
, b
)) {
1284 VG_(printf
)("sanity_check_malloc_arena: sb %p, block %d "
1285 "(bszB %lu): BAD\n", sb
, i
, b_bszB
);
1288 thisFree
= !is_inuse_block(b
);
1289 if (thisFree
&& lastWasFree
) {
1290 VG_(printf
)("sanity_check_malloc_arena: sb %p, block %d "
1291 "(bszB %lu): UNMERGED FREES\n", sb
, i
, b_bszB
);
1294 if (thisFree
) blockctr_sb_free
++;
1296 arena_bytes_on_loan
+= bszB_to_pszB(a
, b_bszB
);
1297 lastWasFree
= thisFree
;
1299 if (i
> sb
->n_payload_bytes
) {
1300 VG_(printf
)( "sanity_check_malloc_arena: sb %p: last block "
1301 "overshoots end\n", sb
);
1306 arena_bytes_on_loan
+= a
->stats__perm_bytes_on_loan
;
1308 if (arena_bytes_on_loan
!= a
->stats__bytes_on_loan
) {
1309 # ifdef VERBOSE_MALLOC
1310 VG_(printf
)( "sanity_check_malloc_arena: a->bytes_on_loan %lu, "
1311 "arena_bytes_on_loan %lu: "
1312 "MISMATCH\n", a
->stats__bytes_on_loan
, arena_bytes_on_loan
);
1318 /* Second, traverse each list, checking that the back pointers make
1319 sense, counting blocks encountered, and checking that each block
1320 is an appropriate size for this list. */
1322 for (listno
= 0; listno
< N_MALLOC_LISTS
; listno
++) {
1323 list_min_pszB
= listNo_to_pszB_min(listno
);
1324 list_max_pszB
= listNo_to_pszB_max(listno
);
1325 b
= a
->freelist
[listno
];
1326 if (b
== NULL
) continue;
1330 if (get_prev_b(b
) != b_prev
) {
1331 VG_(printf
)( "sanity_check_malloc_arena: list %d at %p: "
1336 b_pszB
= get_pszB(a
, b
);
1337 if (b_pszB
< list_min_pszB
|| b_pszB
> list_max_pszB
) {
1339 "sanity_check_malloc_arena: list %d at %p: "
1340 "WRONG CHAIN SIZE %luB (%luB, %luB)\n",
1341 listno
, b
, b_pszB
, list_min_pszB
, list_max_pszB
);
1345 if (b
== a
->freelist
[listno
]) break;
1349 if (blockctr_sb_free
!= blockctr_li
) {
1350 # ifdef VERBOSE_MALLOC
1351 VG_(printf
)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
1352 "(via sbs %d, via lists %d)\n",
1353 blockctr_sb_free
, blockctr_li
);
1359 if (VG_(clo_verbosity
) > 2)
1360 VG_(message
)(Vg_DebugMsg
,
1361 "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
1362 "%7ld mmap, %7ld loan\n",
1365 blockctr_sb
, blockctr_sb_free
, blockctr_li
,
1366 a
->stats__bytes_mmaped
, a
->stats__bytes_on_loan
);
1371 #define N_AN_CCS 1000
1379 static AnCC anCCs
[N_AN_CCS
];
1381 /* Sorting by decreasing cost center nBytes, to have the biggest
1382 cost centres at the top. */
1383 static Int
cmp_AnCC_by_vol ( const void* v1
, const void* v2
) {
1384 const AnCC
* ancc1
= v1
;
1385 const AnCC
* ancc2
= v2
;
1386 if (ancc1
->nBytes
< ancc2
->nBytes
) return 1;
1387 if (ancc1
->nBytes
> ancc2
->nBytes
) return -1;
1391 static void cc_analyse_alloc_arena ( ArenaId aid
)
1396 Bool thisFree
, lastWasFree
;
1402 a
= arenaId_to_ArenaP(aid
);
1403 if (a
->name
== NULL
) {
1404 /* arena is not in use, is not initialised and will fail the
1405 sanity check that follows. */
1409 sanity_check_malloc_arena(aid
);
1412 "-------- Arena \"%s\": %lu/%lu max/curr mmap'd, "
1413 "%llu/%llu unsplit/split sb unmmap'd, "
1414 "%lu/%lu max/curr on_loan %lu rzB --------\n",
1415 a
->name
, a
->stats__bytes_mmaped_max
, a
->stats__bytes_mmaped
,
1416 a
->stats__nreclaim_unsplit
, a
->stats__nreclaim_split
,
1417 a
->stats__bytes_on_loan_max
, a
->stats__bytes_on_loan
,
1421 for (j
= 0; j
< a
->sblocks_used
; ++j
) {
1422 Superblock
* sb
= a
->sblocks
[j
];
1423 lastWasFree
= False
;
1424 for (i
= 0; i
< sb
->n_payload_bytes
; i
+= mk_plain_bszB(b_bszB
)) {
1425 b
= (Block
*)&sb
->payload_bytes
[i
];
1426 b_bszB
= get_bszB_as_is(b
);
1427 if (!blockSane(a
, b
)) {
1428 VG_(printf
)("sanity_check_malloc_arena: sb %p, block %ld "
1429 "(bszB %lu): BAD\n", sb
, i
, b_bszB
);
1432 thisFree
= !is_inuse_block(b
);
1433 if (thisFree
&& lastWasFree
) {
1434 VG_(printf
)("sanity_check_malloc_arena: sb %p, block %ld "
1435 "(bszB %lu): UNMERGED FREES\n", sb
, i
, b_bszB
);
1438 lastWasFree
= thisFree
;
1440 if (thisFree
) continue;
1442 if (VG_(clo_profile_heap
))
1445 cc
= "(--profile-heap=yes for details)";
1447 VG_(printf
)("block: inUse=%d pszB=%d cc=%s\n",
1449 (Int
)bszB_to_pszB(a
, b_bszB
),
1452 for (k
= 0; k
< n_ccs
; k
++) {
1453 vg_assert(anCCs
[k
].cc
);
1454 if (0 == VG_(strcmp
)(cc
, anCCs
[k
].cc
))
1457 vg_assert(k
>= 0 && k
<= n_ccs
);
1460 vg_assert(n_ccs
< N_AN_CCS
-1);
1462 anCCs
[k
].nBytes
= 0;
1463 anCCs
[k
].nBlocks
= 0;
1467 vg_assert(k
>= 0 && k
< n_ccs
&& k
< N_AN_CCS
);
1468 anCCs
[k
].nBytes
+= (ULong
)bszB_to_pszB(a
, b_bszB
);
1471 if (i
> sb
->n_payload_bytes
) {
1472 VG_(printf
)( "sanity_check_malloc_arena: sb %p: last block "
1473 "overshoots end\n", sb
);
1478 if (a
->stats__perm_bytes_on_loan
> 0) {
1479 vg_assert(n_ccs
< N_AN_CCS
-1);
1480 anCCs
[n_ccs
].nBytes
= a
->stats__perm_bytes_on_loan
;
1481 anCCs
[n_ccs
].nBlocks
= a
->stats__perm_blocks
;
1482 anCCs
[n_ccs
].cc
= "perm_malloc";
1486 VG_(ssort
)( &anCCs
[0], n_ccs
, sizeof(anCCs
[0]), cmp_AnCC_by_vol
);
1488 for (k
= 0; k
< n_ccs
; k
++) {
1489 VG_(printf
)("%'13llu in %'9llu: %s\n",
1490 anCCs
[k
].nBytes
, anCCs
[k
].nBlocks
, anCCs
[k
].cc
);
1497 void VG_(sanity_check_malloc_all
) ( void )
1500 for (i
= 0; i
< VG_N_ARENAS
; i
++) {
1501 if (i
== VG_AR_CLIENT
&& !client_inited
)
1503 sanity_check_malloc_arena ( i
);
1507 void VG_(describe_arena_addr
) ( Addr a
, AddrArenaInfo
* aai
)
1513 for (i
= 0; i
< VG_N_ARENAS
; i
++) {
1514 if (i
== VG_AR_CLIENT
&& !client_inited
)
1516 arena
= arenaId_to_ArenaP(i
);
1517 sb
= maybe_findSb( arena
, a
);
1524 aai
->name
= arena
->name
;
1525 for (j
= 0; j
< sb
->n_payload_bytes
; j
+= mk_plain_bszB(b_bszB
)) {
1526 b
= (Block
*)&sb
->payload_bytes
[j
];
1527 b_bszB
= get_bszB_as_is(b
);
1528 if (a
< (Addr
)b
+ mk_plain_bszB(b_bszB
))
1532 aai
->block_szB
= get_pszB(arena
, b
);
1533 aai
->rwoffset
= a
- (Addr
)get_block_payload(arena
, b
);
1534 aai
->free
= !is_inuse_block(b
);
1545 /*------------------------------------------------------------*/
1546 /*--- Creating and deleting blocks. ---*/
1547 /*------------------------------------------------------------*/
1549 // Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
1550 // relevant free list.
1553 void mkFreeBlock ( Arena
* a
, Block
* b
, SizeT bszB
, UInt b_lno
)
1555 SizeT pszB
= bszB_to_pszB(a
, bszB
);
1556 vg_assert(b_lno
== pszB_to_listNo(pszB
));
1557 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b
, bszB
));
1558 // Set the size fields and indicate not-in-use.
1559 set_bszB(b
, mk_free_bszB(bszB
));
1561 // Add to the relevant list.
1562 if (a
->freelist
[b_lno
] == NULL
) {
1565 a
->freelist
[b_lno
] = b
;
1567 Block
* b_prev
= get_prev_b(a
->freelist
[b_lno
]);
1568 Block
* b_next
= a
->freelist
[b_lno
];
1569 set_next_b(b_prev
, b
);
1570 set_prev_b(b_next
, b
);
1571 set_next_b(b
, b_next
);
1572 set_prev_b(b
, b_prev
);
1574 # ifdef DEBUG_MALLOC
1575 (void)blockSane(a
,b
);
1579 // Mark the bytes at b .. b+bszB-1 as in use, and set up the block
1582 void mkInuseBlock ( Arena
* a
, Block
* b
, SizeT bszB
)
1585 vg_assert(bszB
>= min_useful_bszB(a
));
1586 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(b
, bszB
));
1587 set_bszB(b
, mk_inuse_bszB(bszB
));
1588 set_prev_b(b
, NULL
); // Take off freelist
1589 set_next_b(b
, NULL
); // ditto
1590 if (!a
->clientmem
) {
1591 for (i
= 0; i
< a
->rz_szB
; i
++) {
1592 set_rz_lo_byte(b
, i
, (UByte
)(((Addr
)b
&0xff) ^ REDZONE_LO_MASK
));
1593 set_rz_hi_byte(b
, i
, (UByte
)(((Addr
)b
&0xff) ^ REDZONE_HI_MASK
));
1596 # ifdef DEBUG_MALLOC
1597 (void)blockSane(a
,b
);
1601 // Mark the bytes at b .. b+bszB-1 as being part of a block that has been shrunk.
1603 void shrinkInuseBlock ( Arena
* a
, Block
* b
, SizeT bszB
)
1607 vg_assert(bszB
>= min_useful_bszB(a
));
1608 INNER_REQUEST(mkBhdrAccess(a
,b
));
1609 set_bszB(b
, mk_inuse_bszB(bszB
));
1610 if (!a
->clientmem
) {
1611 for (i
= 0; i
< a
->rz_szB
; i
++) {
1612 set_rz_lo_byte(b
, i
, (UByte
)(((Addr
)b
&0xff) ^ REDZONE_LO_MASK
));
1613 set_rz_hi_byte(b
, i
, (UByte
)(((Addr
)b
&0xff) ^ REDZONE_HI_MASK
));
1616 INNER_REQUEST(mkBhdrNoAccess(a
,b
));
1618 # ifdef DEBUG_MALLOC
1619 (void)blockSane(a
,b
);
1623 // Remove a block from a given list. Does no sanity checking.
1625 void unlinkBlock ( Arena
* a
, Block
* b
, UInt listno
)
1627 vg_assert(listno
< N_MALLOC_LISTS
);
1628 if (get_prev_b(b
) == b
) {
1629 // Only one element in the list; treat it specially.
1630 vg_assert(get_next_b(b
) == b
);
1631 a
->freelist
[listno
] = NULL
;
1633 Block
* b_prev
= get_prev_b(b
);
1634 Block
* b_next
= get_next_b(b
);
1635 a
->freelist
[listno
] = b_prev
;
1636 set_next_b(b_prev
, b_next
);
1637 set_prev_b(b_next
, b_prev
);
1638 swizzle ( a
, listno
);
1640 set_prev_b(b
, NULL
);
1641 set_next_b(b
, NULL
);
1645 /*------------------------------------------------------------*/
1646 /*--- Core-visible functions. ---*/
1647 /*------------------------------------------------------------*/
1649 // Align the request size.
1651 SizeT
align_req_pszB ( SizeT req_pszB
)
1653 SizeT n
= VG_MIN_MALLOC_SZB
-1;
1654 return ((req_pszB
+ n
) & (~n
));
1658 void add_one_block_to_stats (Arena
* a
, SizeT loaned
)
1660 a
->stats__bytes_on_loan
+= loaned
;
1661 if (a
->stats__bytes_on_loan
> a
->stats__bytes_on_loan_max
) {
1662 a
->stats__bytes_on_loan_max
= a
->stats__bytes_on_loan
;
1663 if (a
->stats__bytes_on_loan_max
>= a
->next_profile_at
) {
1664 /* next profile after 10% more growth */
1667 (((ULong
)a
->stats__bytes_on_loan_max
) * 105ULL) / 100ULL );
1668 if (VG_(clo_profile_heap
))
1669 cc_analyse_alloc_arena(arenaP_to_ArenaId (a
));
1672 a
->stats__tot_blocks
+= (ULong
)1;
1673 a
->stats__tot_bytes
+= (ULong
)loaned
;
1676 /* Allocate a piece of memory of req_pszB bytes on the given arena.
1677 The function may return NULL if (and only if) aid == VG_AR_CLIENT.
1678 Otherwise, the function returns a non-NULL value. */
1679 void* VG_(arena_malloc
) ( ArenaId aid
, const HChar
* cc
, SizeT req_pszB
)
1681 SizeT req_bszB
, frag_bszB
, b_bszB
;
1683 Superblock
* new_sb
= NULL
;
1687 UWord stats__nsearches
= 0;
1689 ensure_mm_init(aid
);
1690 a
= arenaId_to_ArenaP(aid
);
1692 vg_assert(req_pszB
< MAX_PSZB
);
1693 req_pszB
= align_req_pszB(req_pszB
);
1694 req_bszB
= pszB_to_bszB(a
, req_pszB
);
1696 // You must provide a cost-center name against which to charge
1697 // this allocation; it isn't optional.
1700 // Scan through all the big-enough freelists for a block.
1702 // Nb: this scanning might be expensive in some cases. Eg. if you
1703 // allocate lots of small objects without freeing them, but no
1704 // medium-sized objects, it will repeatedly scanning through the whole
1705 // list, and each time not find any free blocks until the last element.
1707 // If this becomes a noticeable problem... the loop answers the question
1708 // "where is the first nonempty list above me?" And most of the time,
1709 // you ask the same question and get the same answer. So it would be
1710 // good to somehow cache the results of previous searches.
1711 // One possibility is an array (with N_MALLOC_LISTS elements) of
1712 // shortcuts. shortcut[i] would give the index number of the nearest
1713 // larger list above list i which is non-empty. Then this loop isn't
1714 // necessary. However, we'd have to modify some section [ .. i-1] of the
1715 // shortcut array every time a list [i] changes from empty to nonempty or
1716 // back. This would require care to avoid pathological worst-case
1719 for (lno
= pszB_to_listNo(req_pszB
); lno
< N_MALLOC_LISTS
; lno
++) {
1720 UWord nsearches_this_level
= 0;
1721 b
= a
->freelist
[lno
];
1722 if (NULL
== b
) continue; // If this list is empty, try the next one.
1725 nsearches_this_level
++;
1726 if (UNLIKELY(nsearches_this_level
>= 100)
1727 && lno
< N_MALLOC_LISTS
-1) {
1728 /* Avoid excessive scanning on this freelist, and instead
1729 try the next one up. But first, move this freelist's
1730 start pointer one element along, so as to ensure that
1731 subsequent searches of this list don't endlessly
1732 revisit only these 100 elements, but in fact slowly
1733 progress through the entire list. */
1734 b
= a
->freelist
[lno
];
1735 vg_assert(b
); // this list must be nonempty!
1736 a
->freelist
[lno
] = get_next_b(b
); // step one along
1739 b_bszB
= get_bszB(b
);
1740 if (b_bszB
>= req_bszB
) goto obtained_block
; // success!
1742 if (b
== a
->freelist
[lno
]) break; // traversed entire freelist
1746 // If we reach here, no suitable block found, allocate a new superblock
1747 vg_assert(lno
== N_MALLOC_LISTS
);
1748 new_sb
= newSuperblock(a
, req_bszB
);
1749 if (NULL
== new_sb
) {
1750 // Should only fail if for client, otherwise, should have aborted
1752 vg_assert(VG_AR_CLIENT
== aid
);
1756 vg_assert(a
->sblocks_used
<= a
->sblocks_size
);
1757 if (a
->sblocks_used
== a
->sblocks_size
) {
1758 Superblock
** array
;
1759 SysRes sres
= VG_(am_mmap_anon_float_valgrind
)(sizeof(Superblock
*) *
1760 a
->sblocks_size
* 2);
1761 if (sr_isError(sres
)) {
1762 VG_(out_of_memory_NORETURN
)("arena_init", sizeof(Superblock
*) *
1763 a
->sblocks_size
* 2);
1766 array
= (Superblock
**)(AddrH
)sr_Res(sres
);
1767 for (i
= 0; i
< a
->sblocks_used
; ++i
) array
[i
] = a
->sblocks
[i
];
1769 a
->sblocks_size
*= 2;
1771 VG_(debugLog
)(1, "mallocfree",
1772 "sblock array for arena `%s' resized to %ld\n",
1773 a
->name
, a
->sblocks_size
);
1776 vg_assert(a
->sblocks_used
< a
->sblocks_size
);
1778 i
= a
->sblocks_used
;
1780 if (a
->sblocks
[i
-1] > new_sb
) {
1781 a
->sblocks
[i
] = a
->sblocks
[i
-1];
1787 a
->sblocks
[i
] = new_sb
;
1790 b
= (Block
*)&new_sb
->payload_bytes
[0];
1791 lno
= pszB_to_listNo(bszB_to_pszB(a
, new_sb
->n_payload_bytes
));
1792 mkFreeBlock ( a
, b
, new_sb
->n_payload_bytes
, lno
);
1793 if (VG_(clo_profile_heap
))
1794 set_cc(b
, "admin.free-new-sb-1");
1798 // Ok, we can allocate from b, which lives in list lno.
1799 vg_assert(b
!= NULL
);
1800 vg_assert(lno
< N_MALLOC_LISTS
);
1801 vg_assert(a
->freelist
[lno
] != NULL
);
1802 b_bszB
= get_bszB(b
);
1803 // req_bszB is the size of the block we are after. b_bszB is the
1804 // size of what we've actually got. */
1805 vg_assert(b_bszB
>= req_bszB
);
1807 // Could we split this block and still get a useful fragment?
1808 // A block in an unsplittable superblock can never be splitted.
1809 frag_bszB
= b_bszB
- req_bszB
;
1810 if (frag_bszB
>= min_useful_bszB(a
)
1811 && (NULL
== new_sb
|| ! new_sb
->unsplittable
)) {
1812 // Yes, split block in two, put the fragment on the appropriate free
1813 // list, and update b_bszB accordingly.
1814 // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
1815 unlinkBlock(a
, b
, lno
);
1816 mkInuseBlock(a
, b
, req_bszB
);
1817 if (VG_(clo_profile_heap
))
1819 mkFreeBlock(a
, &b
[req_bszB
], frag_bszB
,
1820 pszB_to_listNo(bszB_to_pszB(a
, frag_bszB
)));
1821 if (VG_(clo_profile_heap
))
1822 set_cc(&b
[req_bszB
], "admin.fragmentation-1");
1823 b_bszB
= get_bszB(b
);
1825 // No, mark as in use and use as-is.
1826 unlinkBlock(a
, b
, lno
);
1827 mkInuseBlock(a
, b
, b_bszB
);
1828 if (VG_(clo_profile_heap
))
1833 SizeT loaned
= bszB_to_pszB(a
, b_bszB
);
1834 add_one_block_to_stats (a
, loaned
);
1835 a
->stats__nsearches
+= (ULong
)stats__nsearches
;
1837 # ifdef DEBUG_MALLOC
1838 sanity_check_malloc_arena(aid
);
1841 v
= get_block_payload(a
, b
);
1842 vg_assert( (((Addr
)v
) & (VG_MIN_MALLOC_SZB
-1)) == 0 );
1844 // Which size should we pass to VALGRIND_MALLOCLIKE_BLOCK ?
1845 // We have 2 possible options:
1846 // 1. The final resulting usable size.
1847 // 2. The initial (non-aligned) req_pszB.
1848 // Memcheck implements option 2 easily, as the initial requested size
1849 // is maintained in the mc_chunk data structure.
1850 // This is not as easy in the core, as there is no such structure.
1851 // (note: using the aligned req_pszB is not simpler than 2, as
1852 // requesting an aligned req_pszB might still be satisfied by returning
1853 // a (slightly) bigger block than requested if the remaining part of
1854 // of a free block is not big enough to make a free block by itself).
1855 // Implement Sol 2 can be done the following way:
1856 // After having called VALGRIND_MALLOCLIKE_BLOCK, the non accessible
1857 // redzone just after the block can be used to determine the
1858 // initial requested size.
1859 // Currently, not implemented => we use Option 1.
1861 (VALGRIND_MALLOCLIKE_BLOCK(v
,
1862 VG_(arena_malloc_usable_size
)(aid
, v
),
1865 /* For debugging/testing purposes, fill the newly allocated area
1866 with a definite value in an attempt to shake out any
1867 uninitialised uses of the data (by V core / V tools, not by the
1868 client). Testing on 25 Nov 07 with the values 0x00, 0xFF, 0x55,
1869 0xAA showed no differences in the regression tests on
1870 amd64-linux. Note, is disabled by default. */
1871 if (0 && aid
!= VG_AR_CLIENT
)
1872 VG_(memset
)(v
, 0xAA, (SizeT
)req_pszB
);
1877 // If arena has already a deferred reclaimed superblock and
1878 // this superblock is still reclaimable, then this superblock is first
1880 // sb becomes then the new arena deferred superblock.
1881 // Passing NULL as sb allows to reclaim a deferred sb without setting a new
1882 // deferred reclaim.
1884 void deferred_reclaimSuperblock ( Arena
* a
, Superblock
* sb
)
1888 if (!a
->deferred_reclaimed_sb
)
1889 // no deferred sb to reclaim now, nothing to do in the future =>
1893 VG_(debugLog
)(1, "mallocfree",
1894 "deferred_reclaimSuperblock NULL "
1895 "(prev %p) owner %s/%s\n",
1896 a
->deferred_reclaimed_sb
,
1897 a
->clientmem
? "CLIENT" : "VALGRIND", a
->name
);
1899 VG_(debugLog
)(1, "mallocfree",
1900 "deferred_reclaimSuperblock at %p (pszB %7ld) %s "
1901 "(prev %p) owner %s/%s\n",
1902 sb
, sb
->n_payload_bytes
,
1903 (sb
->unsplittable
? "unsplittable" : ""),
1904 a
->deferred_reclaimed_sb
,
1905 a
->clientmem
? "CLIENT" : "VALGRIND", a
->name
);
1907 if (a
->deferred_reclaimed_sb
&& a
->deferred_reclaimed_sb
!= sb
) {
1908 // If we are deferring another block that the current block deferred,
1909 // then if this block can stil be reclaimed, reclaim it now.
1910 // Note that we might have a re-deferred reclaim of the same block
1911 // with a sequence: free (causing a deferred reclaim of sb)
1912 // alloc (using a piece of memory of the deferred sb)
1913 // free of the just alloc-ed block (causing a re-defer).
1914 UByte
* def_sb_start
;
1919 def_sb
= a
->deferred_reclaimed_sb
;
1920 def_sb_start
= &def_sb
->payload_bytes
[0];
1921 def_sb_end
= &def_sb
->payload_bytes
[def_sb
->n_payload_bytes
- 1];
1922 b
= (Block
*)def_sb_start
;
1923 vg_assert (blockSane(a
, b
));
1925 // Check if the deferred_reclaimed_sb is still reclaimable.
1926 // If yes, we will execute the reclaim.
1927 if (!is_inuse_block(b
)) {
1928 // b (at the beginning of def_sb) is not in use.
1930 SizeT b_bszB
, b_pszB
;
1931 b_bszB
= get_bszB(b
);
1932 b_pszB
= bszB_to_pszB(a
, b_bszB
);
1933 if (b
+ b_bszB
-1 == (Block
*)def_sb_end
) {
1934 // b (not in use) covers the full superblock.
1935 // => def_sb is still reclaimable
1936 // => execute now the reclaim of this def_sb.
1937 b_listno
= pszB_to_listNo(b_pszB
);
1938 unlinkBlock( a
, b
, b_listno
);
1939 reclaimSuperblock (a
, def_sb
);
1940 a
->deferred_reclaimed_sb
= NULL
;
1945 // sb (possibly NULL) becomes the new deferred reclaimed superblock.
1946 a
->deferred_reclaimed_sb
= sb
;
1949 /* b must be a free block, of size b_bszB.
1950 If b is followed by another free block, merge them.
1951 If b is preceeded by another free block, merge them.
1952 If the merge results in the superblock being fully free,
1953 deferred_reclaimSuperblock the superblock. */
1954 static void mergeWithFreeNeighbours (Arena
* a
, Superblock
* sb
,
1955 Block
* b
, SizeT b_bszB
)
1963 sb_start
= &sb
->payload_bytes
[0];
1964 sb_end
= &sb
->payload_bytes
[sb
->n_payload_bytes
- 1];
1966 b_listno
= pszB_to_listNo(bszB_to_pszB(a
, b_bszB
));
1968 // See if this block can be merged with its successor.
1969 // First test if we're far enough before the superblock's end to possibly
1970 // have a successor.
1971 other_b
= b
+ b_bszB
;
1972 if (other_b
+min_useful_bszB(a
)-1 <= (Block
*)sb_end
) {
1973 // Ok, we have a successor, merge if it's not in use.
1974 other_bszB
= get_bszB(other_b
);
1975 if (!is_inuse_block(other_b
)) {
1976 // VG_(printf)( "merge-successor\n");
1977 # ifdef DEBUG_MALLOC
1978 vg_assert(blockSane(a
, other_b
));
1980 unlinkBlock( a
, b
, b_listno
);
1981 unlinkBlock( a
, other_b
,
1982 pszB_to_listNo(bszB_to_pszB(a
,other_bszB
)) );
1983 b_bszB
+= other_bszB
;
1984 b_listno
= pszB_to_listNo(bszB_to_pszB(a
, b_bszB
));
1985 mkFreeBlock( a
, b
, b_bszB
, b_listno
);
1986 if (VG_(clo_profile_heap
))
1987 set_cc(b
, "admin.free-2");
1990 // Not enough space for successor: check that b is the last block
1991 // ie. there are no unused bytes at the end of the Superblock.
1992 vg_assert(other_b
-1 == (Block
*)sb_end
);
1995 // Then see if this block can be merged with its predecessor.
1996 // First test if we're far enough after the superblock's start to possibly
1997 // have a predecessor.
1998 if (b
>= (Block
*)sb_start
+ min_useful_bszB(a
)) {
1999 // Ok, we have a predecessor, merge if it's not in use.
2000 other_b
= get_predecessor_block( b
);
2001 other_bszB
= get_bszB(other_b
);
2002 if (!is_inuse_block(other_b
)) {
2003 // VG_(printf)( "merge-predecessor\n");
2004 unlinkBlock( a
, b
, b_listno
);
2005 unlinkBlock( a
, other_b
,
2006 pszB_to_listNo(bszB_to_pszB(a
, other_bszB
)) );
2008 b_bszB
+= other_bszB
;
2009 b_listno
= pszB_to_listNo(bszB_to_pszB(a
, b_bszB
));
2010 mkFreeBlock( a
, b
, b_bszB
, b_listno
);
2011 if (VG_(clo_profile_heap
))
2012 set_cc(b
, "admin.free-3");
2015 // Not enough space for predecessor: check that b is the first block,
2016 // ie. there are no unused bytes at the start of the Superblock.
2017 vg_assert((Block
*)sb_start
== b
);
2020 /* If the block b just merged is the only block of the superblock sb,
2021 then we defer reclaim sb. */
2022 if ( ((Block
*)sb_start
== b
) && (b
+ b_bszB
-1 == (Block
*)sb_end
) ) {
2023 deferred_reclaimSuperblock (a
, sb
);
2027 void VG_(arena_free
) ( ArenaId aid
, void* ptr
)
2031 SizeT b_bszB
, b_pszB
;
2035 ensure_mm_init(aid
);
2036 a
= arenaId_to_ArenaP(aid
);
2042 b
= get_payload_block(a
, ptr
);
2044 /* If this is one of V's areas, check carefully the block we're
2045 getting back. This picks up simple block-end overruns. */
2046 if (aid
!= VG_AR_CLIENT
)
2047 vg_assert(blockSane(a
, b
));
2049 b_bszB
= get_bszB(b
);
2050 b_pszB
= bszB_to_pszB(a
, b_bszB
);
2051 sb
= findSb( a
, b
);
2053 a
->stats__bytes_on_loan
-= b_pszB
;
2055 /* If this is one of V's areas, fill it up with junk to enhance the
2056 chances of catching any later reads of it. Note, 0xDD is
2057 carefully chosen junk :-), in that: (1) 0xDDDDDDDD is an invalid
2058 and non-word-aligned address on most systems, and (2) 0xDD is a
2059 value which is unlikely to be generated by the new compressed
2060 Vbits representation for memcheck. */
2061 if (aid
!= VG_AR_CLIENT
)
2062 VG_(memset
)(ptr
, 0xDD, (SizeT
)b_pszB
);
2064 if (! sb
->unsplittable
) {
2065 // Put this chunk back on a list somewhere.
2066 b_listno
= pszB_to_listNo(b_pszB
);
2067 mkFreeBlock( a
, b
, b_bszB
, b_listno
);
2068 if (VG_(clo_profile_heap
))
2069 set_cc(b
, "admin.free-1");
2071 /* Possibly merge b with its predecessor or successor. */
2072 mergeWithFreeNeighbours (a
, sb
, b
, b_bszB
);
2074 // Inform that ptr has been released. We give redzone size
2075 // 0 instead of a->rz_szB as proper accessibility is done just after.
2076 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr
, 0));
2078 // We need to (re-)establish the minimum accessibility needed
2079 // for free list management. E.g. if block ptr has been put in a free
2080 // list and a neighbour block is released afterwards, the
2081 // "lo" and "hi" portions of the block ptr will be accessed to
2082 // glue the 2 blocks together.
2083 // We could mark the whole block as not accessible, and each time
2084 // transiently mark accessible the needed lo/hi parts. Not done as this
2085 // is quite complex, for very little expected additional bug detection.
2086 // fully unaccessible. Note that the below marks the (possibly) merged
2087 // block, not the block corresponding to the ptr argument.
2089 // First mark the whole block unaccessible.
2090 INNER_REQUEST(VALGRIND_MAKE_MEM_NOACCESS(b
, b_bszB
));
2091 // Then mark the relevant administrative headers as defined.
2092 // No need to mark the heap profile portion as defined, this is not
2093 // used for free blocks.
2094 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b
+ hp_overhead_szB(),
2095 sizeof(SizeT
) + sizeof(void*)));
2096 INNER_REQUEST(VALGRIND_MAKE_MEM_DEFINED(b
+ b_bszB
2097 - sizeof(SizeT
) - sizeof(void*),
2098 sizeof(SizeT
) + sizeof(void*)));
2100 vg_assert(unsplittableBlockSane(a
, sb
, b
));
2102 // Inform that ptr has been released. Redzone size value
2103 // is not relevant (so we give 0 instead of a->rz_szB)
2104 // as it is expected that the aspacemgr munmap will be used by
2105 // outer to mark the whole superblock as unaccessible.
2106 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(ptr
, 0));
2108 // Reclaim immediately the unsplittable superblock sb.
2109 reclaimSuperblock (a
, sb
);
2112 # ifdef DEBUG_MALLOC
2113 sanity_check_malloc_arena(aid
);
2120 The idea for malloc_aligned() is to allocate a big block, base, and
2121 then split it into two parts: frag, which is returned to the the
2122 free pool, and align, which is the bit we're really after. Here's
2123 a picture. L and H denote the block lower and upper overheads, in
2124 bytes. The details are gruesome. Note it is slightly complicated
2125 because the initial request to generate base may return a bigger
2126 block than we asked for, so it is important to distinguish the base
2127 request size and the base actual size.
2135 +---+ +---+---+ +---+
2136 | L |----------------| H | L |---------------| H |
2137 +---+ +---+---+ +---+
2141 | base_p this addr must be aligned
2146 <------ frag_bszB -------> . . .
2147 . <------------- base_pszB_act -----------> .
2151 void* VG_(arena_memalign
) ( ArenaId aid
, const HChar
* cc
,
2152 SizeT req_alignB
, SizeT req_pszB
)
2154 SizeT base_pszB_req
, base_pszB_act
, frag_bszB
;
2155 Block
*base_b
, *align_b
;
2156 UByte
*base_p
, *align_p
;
2157 SizeT saved_bytes_on_loan
;
2160 ensure_mm_init(aid
);
2161 a
= arenaId_to_ArenaP(aid
);
2163 vg_assert(req_pszB
< MAX_PSZB
);
2165 // You must provide a cost-center name against which to charge
2166 // this allocation; it isn't optional.
2169 // Check that the requested alignment has a plausible size.
2170 // Check that the requested alignment seems reasonable; that is, is
2172 if (req_alignB
< VG_MIN_MALLOC_SZB
2173 || req_alignB
> 16 * 1024 * 1024
2174 || VG_(log2
)( req_alignB
) == -1 /* not a power of 2 */) {
2175 VG_(printf
)("VG_(arena_memalign)(%p, %lu, %lu)\n"
2176 "bad alignment value %lu\n"
2177 "(it is too small, too big, or not a power of two)",
2178 a
, req_alignB
, req_pszB
, req_alignB
);
2179 VG_(core_panic
)("VG_(arena_memalign)");
2183 vg_assert(req_alignB
% VG_MIN_MALLOC_SZB
== 0);
2185 /* Required payload size for the aligned chunk. */
2186 req_pszB
= align_req_pszB(req_pszB
);
2188 /* Payload size to request for the big block that we will split up. */
2189 base_pszB_req
= req_pszB
+ min_useful_bszB(a
) + req_alignB
;
2191 /* Payload ptr for the block we are going to split. Note this
2192 changes a->bytes_on_loan; we save and restore it ourselves. */
2193 saved_bytes_on_loan
= a
->stats__bytes_on_loan
;
2195 /* As we will split the block given back by VG_(arena_malloc),
2196 we have to (temporarily) disable unsplittable for this arena,
2197 as unsplittable superblocks cannot be splitted. */
2198 const SizeT save_min_unsplittable_sblock_szB
2199 = a
->min_unsplittable_sblock_szB
;
2200 a
->min_unsplittable_sblock_szB
= MAX_PSZB
;
2201 base_p
= VG_(arena_malloc
) ( aid
, cc
, base_pszB_req
);
2202 a
->min_unsplittable_sblock_szB
= save_min_unsplittable_sblock_szB
;
2204 a
->stats__bytes_on_loan
= saved_bytes_on_loan
;
2206 /* Give up if we couldn't allocate enough space */
2209 /* base_p was marked as allocated by VALGRIND_MALLOCLIKE_BLOCK
2210 inside VG_(arena_malloc). We need to indicate it is free, then
2211 we need to mark it undefined to allow the below code to access is. */
2212 INNER_REQUEST(VALGRIND_FREELIKE_BLOCK(base_p
, a
->rz_szB
));
2213 INNER_REQUEST(VALGRIND_MAKE_MEM_UNDEFINED(base_p
, base_pszB_req
));
2215 /* Block ptr for the block we are going to split. */
2216 base_b
= get_payload_block ( a
, base_p
);
2218 /* Pointer to the payload of the aligned block we are going to
2219 return. This has to be suitably aligned. */
2220 align_p
= align_upwards ( base_b
+ 2 * overhead_szB_lo(a
)
2221 + overhead_szB_hi(a
),
2223 align_b
= get_payload_block(a
, align_p
);
2225 /* The block size of the fragment we will create. This must be big
2226 enough to actually create a fragment. */
2227 frag_bszB
= align_b
- base_b
;
2229 vg_assert(frag_bszB
>= min_useful_bszB(a
));
2231 /* The actual payload size of the block we are going to split. */
2232 base_pszB_act
= get_pszB(a
, base_b
);
2234 /* Create the fragment block, and put it back on the relevant free list. */
2235 mkFreeBlock ( a
, base_b
, frag_bszB
,
2236 pszB_to_listNo(bszB_to_pszB(a
, frag_bszB
)) );
2237 if (VG_(clo_profile_heap
))
2238 set_cc(base_b
, "admin.frag-memalign-1");
2240 /* Create the aligned block. */
2241 mkInuseBlock ( a
, align_b
,
2242 base_p
+ base_pszB_act
2243 + overhead_szB_hi(a
) - (UByte
*)align_b
);
2244 if (VG_(clo_profile_heap
))
2245 set_cc(align_b
, cc
);
2247 /* Final sanity checks. */
2248 vg_assert( is_inuse_block(get_payload_block(a
, align_p
)) );
2250 vg_assert(req_pszB
<= get_pszB(a
, get_payload_block(a
, align_p
)));
2252 a
->stats__bytes_on_loan
+= get_pszB(a
, get_payload_block(a
, align_p
));
2253 if (a
->stats__bytes_on_loan
> a
->stats__bytes_on_loan_max
) {
2254 a
->stats__bytes_on_loan_max
= a
->stats__bytes_on_loan
;
2256 /* a->stats__tot_blocks, a->stats__tot_bytes, a->stats__nsearches
2257 are updated by the call to VG_(arena_malloc) just a few lines
2258 above. So we don't need to update them here. */
2260 # ifdef DEBUG_MALLOC
2261 sanity_check_malloc_arena(aid
);
2264 vg_assert( (((Addr
)align_p
) % req_alignB
) == 0 );
2266 INNER_REQUEST(VALGRIND_MALLOCLIKE_BLOCK(align_p
,
2267 req_pszB
, a
->rz_szB
, False
));
2273 SizeT
VG_(arena_malloc_usable_size
) ( ArenaId aid
, void* ptr
)
2275 Arena
* a
= arenaId_to_ArenaP(aid
);
2276 Block
* b
= get_payload_block(a
, ptr
);
2277 return get_pszB(a
, b
);
2281 // Implementation of mallinfo(). There is no recent standard that defines
2282 // the behavior of mallinfo(). The meaning of the fields in struct mallinfo
2285 // struct mallinfo {
2286 // int arena; /* total space in arena */
2287 // int ordblks; /* number of ordinary blocks */
2288 // int smblks; /* number of small blocks */
2289 // int hblks; /* number of holding blocks */
2290 // int hblkhd; /* space in holding block headers */
2291 // int usmblks; /* space in small blocks in use */
2292 // int fsmblks; /* space in free small blocks */
2293 // int uordblks; /* space in ordinary blocks in use */
2294 // int fordblks; /* space in free ordinary blocks */
2295 // int keepcost; /* space penalty if keep option */
2299 // The glibc documentation about mallinfo (which is somewhat outdated) can
2301 // http://www.gnu.org/software/libtool/manual/libc/Statistics-of-Malloc.html
2303 // See also http://bugs.kde.org/show_bug.cgi?id=160956.
2305 // Regarding the implementation of VG_(mallinfo)(): we cannot return the
2306 // whole struct as the library function does, because this is called by a
2307 // client request. So instead we use a pointer to do call by reference.
2308 void VG_(mallinfo
) ( ThreadId tid
, struct vg_mallinfo
* mi
)
2310 UWord i
, free_blocks
, free_blocks_size
;
2311 Arena
* a
= arenaId_to_ArenaP(VG_AR_CLIENT
);
2313 // Traverse free list and calculate free blocks statistics.
2314 // This may seem slow but glibc works the same way.
2315 free_blocks_size
= free_blocks
= 0;
2316 for (i
= 0; i
< N_MALLOC_LISTS
; i
++) {
2317 Block
* b
= a
->freelist
[i
];
2318 if (b
== NULL
) continue;
2321 free_blocks_size
+= (UWord
)get_pszB(a
, b
);
2323 if (b
== a
->freelist
[i
]) break;
2327 // We don't have fastbins so smblks & fsmblks are always 0. Also we don't
2328 // have a separate mmap allocator so set hblks & hblkhd to 0.
2329 mi
->arena
= a
->stats__bytes_mmaped
;
2330 mi
->ordblks
= free_blocks
+ VG_(free_queue_length
);
2336 mi
->uordblks
= a
->stats__bytes_on_loan
- VG_(free_queue_volume
);
2337 mi
->fordblks
= free_blocks_size
+ VG_(free_queue_volume
);
2338 mi
->keepcost
= 0; // may want some value in here
2341 SizeT
VG_(arena_redzone_size
) ( ArenaId aid
)
2343 ensure_mm_init (VG_AR_CLIENT
);
2344 /* ensure_mm_init will call arena_init if not yet done.
2345 This then ensures that the arena redzone size is properly
2347 return arenaId_to_ArenaP(aid
)->rz_szB
;
2350 /*------------------------------------------------------------*/
2351 /*--- Services layered on top of malloc/free. ---*/
2352 /*------------------------------------------------------------*/
2354 void* VG_(arena_calloc
) ( ArenaId aid
, const HChar
* cc
,
2355 SizeT nmemb
, SizeT bytes_per_memb
)
2360 size
= nmemb
* bytes_per_memb
;
2361 vg_assert(size
>= nmemb
&& size
>= bytes_per_memb
);// check against overflow
2363 p
= VG_(arena_malloc
) ( aid
, cc
, size
);
2366 VG_(memset
)(p
, 0, size
);
2372 void* VG_(arena_realloc
) ( ArenaId aid
, const HChar
* cc
,
2373 void* ptr
, SizeT req_pszB
)
2380 ensure_mm_init(aid
);
2381 a
= arenaId_to_ArenaP(aid
);
2383 vg_assert(req_pszB
< MAX_PSZB
);
2386 return VG_(arena_malloc
)(aid
, cc
, req_pszB
);
2389 if (req_pszB
== 0) {
2390 VG_(arena_free
)(aid
, ptr
);
2394 b
= get_payload_block(a
, ptr
);
2395 vg_assert(blockSane(a
, b
));
2397 vg_assert(is_inuse_block(b
));
2398 old_pszB
= get_pszB(a
, b
);
2400 if (req_pszB
<= old_pszB
) {
2404 p_new
= VG_(arena_malloc
) ( aid
, cc
, req_pszB
);
2406 VG_(memcpy
)(p_new
, ptr
, old_pszB
);
2408 VG_(arena_free
)(aid
, ptr
);
2414 void VG_(arena_realloc_shrink
) ( ArenaId aid
,
2415 void* ptr
, SizeT req_pszB
)
2417 SizeT req_bszB
, frag_bszB
, b_bszB
;
2423 ensure_mm_init(aid
);
2425 a
= arenaId_to_ArenaP(aid
);
2426 b
= get_payload_block(a
, ptr
);
2427 vg_assert(blockSane(a
, b
));
2428 vg_assert(is_inuse_block(b
));
2430 old_pszB
= get_pszB(a
, b
);
2431 req_pszB
= align_req_pszB(req_pszB
);
2432 vg_assert(old_pszB
>= req_pszB
);
2433 if (old_pszB
== req_pszB
)
2436 sb
= findSb( a
, b
);
2437 if (sb
->unsplittable
) {
2438 const UByte
* sb_start
= &sb
->payload_bytes
[0];
2439 const UByte
* sb_end
= &sb
->payload_bytes
[sb
->n_payload_bytes
- 1];
2442 vg_assert(unsplittableBlockSane(a
, sb
, b
));
2444 frag
= VG_PGROUNDUP((Addr
) sb
2445 + sizeof(Superblock
) + pszB_to_bszB(a
, req_pszB
));
2446 frag_bszB
= (Addr
)sb_end
- frag
+ 1;
2448 if (frag_bszB
>= VKI_PAGE_SIZE
) {
2451 a
->stats__bytes_on_loan
-= old_pszB
;
2452 b_bszB
= (UByte
*)frag
- sb_start
;
2453 shrinkInuseBlock(a
, b
, b_bszB
);
2455 (VALGRIND_RESIZEINPLACE_BLOCK(ptr
,
2457 VG_(arena_malloc_usable_size
)(aid
, ptr
),
2459 /* Have the minimum admin headers needed accessibility. */
2460 INNER_REQUEST(mkBhdrSzAccess(a
, b
));
2461 a
->stats__bytes_on_loan
+= bszB_to_pszB(a
, b_bszB
);
2463 sb
->n_payload_bytes
-= frag_bszB
;
2464 VG_(debugLog
)(1, "mallocfree",
2465 "shrink superblock %p to (pszB %7ld) "
2466 "owner %s/%s (munmap-ing %p %7ld)\n",
2467 sb
, sb
->n_payload_bytes
,
2468 a
->clientmem
? "CLIENT" : "VALGRIND", a
->name
,
2469 (void*) frag
, frag_bszB
);
2471 Bool need_discard
= False
;
2472 sres
= VG_(am_munmap_client
)(&need_discard
,
2475 vg_assert (!need_discard
);
2477 sres
= VG_(am_munmap_valgrind
)(frag
,
2480 vg_assert2(! sr_isError(sres
), "shrink superblock munmap failure\n");
2481 a
->stats__bytes_mmaped
-= frag_bszB
;
2483 vg_assert(unsplittableBlockSane(a
, sb
, b
));
2486 req_bszB
= pszB_to_bszB(a
, req_pszB
);
2487 b_bszB
= get_bszB(b
);
2488 frag_bszB
= b_bszB
- req_bszB
;
2489 if (frag_bszB
< min_useful_bszB(a
))
2492 a
->stats__bytes_on_loan
-= old_pszB
;
2493 shrinkInuseBlock(a
, b
, req_bszB
);
2495 (VALGRIND_RESIZEINPLACE_BLOCK(ptr
,
2497 VG_(arena_malloc_usable_size
)(aid
, ptr
),
2499 /* Have the minimum admin headers needed accessibility. */
2500 INNER_REQUEST(mkBhdrSzAccess(a
, b
));
2502 mkFreeBlock(a
, &b
[req_bszB
], frag_bszB
,
2503 pszB_to_listNo(bszB_to_pszB(a
, frag_bszB
)));
2504 /* Mark the admin headers as accessible. */
2505 INNER_REQUEST(mkBhdrAccess(a
, &b
[req_bszB
]));
2506 if (VG_(clo_profile_heap
))
2507 set_cc(&b
[req_bszB
], "admin.fragmentation-2");
2508 /* Possibly merge &b[req_bszB] with its free neighbours. */
2509 mergeWithFreeNeighbours(a
, sb
, &b
[req_bszB
], frag_bszB
);
2511 b_bszB
= get_bszB(b
);
2512 a
->stats__bytes_on_loan
+= bszB_to_pszB(a
, b_bszB
);
2515 vg_assert (blockSane(a
, b
));
2516 # ifdef DEBUG_MALLOC
2517 sanity_check_malloc_arena(aid
);
2521 /* Inline just for the wrapper VG_(strdup) below */
2522 __inline__ HChar
* VG_(arena_strdup
) ( ArenaId aid
, const HChar
* cc
,
2532 len
= VG_(strlen
)(s
) + 1;
2533 res
= VG_(arena_malloc
) (aid
, cc
, len
);
2535 for (i
= 0; i
< len
; i
++)
2540 void* VG_(arena_perm_malloc
) ( ArenaId aid
, SizeT size
, Int align
)
2544 ensure_mm_init(aid
);
2545 a
= arenaId_to_ArenaP(aid
);
2548 size
= (size
+ align
) & ~align
;
2550 if (UNLIKELY(a
->perm_malloc_current
+ size
> a
->perm_malloc_limit
)) {
2551 // Get a superblock, but we will not insert it into the superblock list.
2552 // The superblock structure is not needed, so we will use the full
2553 // memory range of it. This superblock is however counted in the
2554 // mmaped statistics.
2555 Superblock
* new_sb
= newSuperblock (a
, size
);
2556 a
->perm_malloc_limit
= (Addr
)&new_sb
->payload_bytes
[new_sb
->n_payload_bytes
- 1];
2558 // We do not mind starting allocating from the beginning of the superblock
2559 // as afterwards, we "lose" it as a superblock.
2560 a
->perm_malloc_current
= (Addr
)new_sb
;
2563 a
->stats__perm_blocks
+= 1;
2564 a
->stats__perm_bytes_on_loan
+= size
;
2565 add_one_block_to_stats (a
, size
);
2567 a
->perm_malloc_current
+= size
;
2568 return (void*)(a
->perm_malloc_current
- size
);
2571 /*------------------------------------------------------------*/
2572 /*--- Tool-visible functions. ---*/
2573 /*------------------------------------------------------------*/
2575 // All just wrappers to avoid exposing arenas to tools.
2577 // This function never returns NULL.
2578 void* VG_(malloc
) ( const HChar
* cc
, SizeT nbytes
)
2580 return VG_(arena_malloc
) ( VG_AR_CORE
, cc
, nbytes
);
2583 void VG_(free
) ( void* ptr
)
2585 VG_(arena_free
) ( VG_AR_CORE
, ptr
);
2588 void* VG_(calloc
) ( const HChar
* cc
, SizeT nmemb
, SizeT bytes_per_memb
)
2590 return VG_(arena_calloc
) ( VG_AR_CORE
, cc
, nmemb
, bytes_per_memb
);
2593 void* VG_(realloc
) ( const HChar
* cc
, void* ptr
, SizeT size
)
2595 return VG_(arena_realloc
) ( VG_AR_CORE
, cc
, ptr
, size
);
2598 void VG_(realloc_shrink
) ( void* ptr
, SizeT size
)
2600 VG_(arena_realloc_shrink
) ( VG_AR_CORE
, ptr
, size
);
2603 HChar
* VG_(strdup
) ( const HChar
* cc
, const HChar
* s
)
2605 return VG_(arena_strdup
) ( VG_AR_CORE
, cc
, s
);
2608 void* VG_(perm_malloc
) ( SizeT size
, Int align
)
2610 return VG_(arena_perm_malloc
) ( VG_AR_CORE
, size
, align
);
2614 /*--------------------------------------------------------------------*/
2616 /*--------------------------------------------------------------------*/