1 /*--------------------------------------------------------------------*/
2 /*--- A pool (memory) allocator that avoids duplicated copies. ---*/
3 /*--- m_deduppoolalloc.c ---*/
4 /*--------------------------------------------------------------------*/
6 This file is part of Valgrind, a dynamic binary instrumentation
9 Copyright (C) 2014-2017 Philippe Waroquiers philippe.waroquiers@skynet.be
11 This program is free software; you can redistribute it and/or
12 modify it under the terms of the GNU General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, see <http://www.gnu.org/licenses/>.
24 The GNU General Public License is contained in the file COPYING.
27 #include "pub_core_basics.h"
28 #include "pub_core_libcbase.h"
29 #include "pub_core_libcprint.h"
30 #include "pub_core_libcassert.h"
31 #include "pub_core_xarray.h"
32 #include "pub_core_deduppoolalloc.h" /* self */
33 #include "pub_core_hashtable.h"
34 #include "pub_core_poolalloc.h"
35 #include "pub_core_options.h"
36 #include "pub_core_mallocfree.h"
37 #include "pub_core_debuglog.h"
39 struct _DedupPoolAlloc
{
40 SizeT poolSzB
; /* Minimum size of a pool. */
41 SizeT fixedSzb
; /* If using VG_(allocFixedEltDedupPA), size of elements */
42 Bool strPA
; /* True if this is a string dedup pool */
44 Alloc_Fn_t alloc_fn
; /* pool allocator */
45 const HChar
* cc
; /* pool allocator's cost centre */
46 Free_Fn_t free_fn
; /* pool allocator's deallocation function */
47 /* XArray of void* (pointers to pools). The pools themselves.
48 Each element is a pointer to a block of size at least PoolSzB bytes.
49 The last block might be smaller due to a call to shrink_block. */
52 /* hash table of pool elements, used to dedup.
53 If NULL, it means the DedupPoolAlloc is frozen. */
54 VgHashTable
*ht_elements
;
56 /* Hash table nodes of pool_elements are allocated with a pool, to
57 decrease memory overhead during insertion in the DedupPoolAlloc. */
58 PoolAlloc
*ht_node_pa
;
60 UChar
*curpool
; /* last allocated pool. */
61 UChar
*curpool_free
; /* Pos in current pool to allocate next elt.
62 always aligned on eltAlign. */
63 UChar
*curpool_limit
; /* Last pos in current pool. */
64 /* Note that for a fixed size pool, we only have a single pool to allow
65 simple/fast indexing. This single pool is grown, which might change
66 the address of the already allocated elements. */
68 /* Total nr of alloc calls, resulting in (we hope) a lot less
69 real (dedup) elements. */
75 struct _ht_node
*next
; // Read/Write by hashtable (pub_tool_hashtable.h)
76 UWord key
; // Read by hashtable (pub_tool_hashtable.h)
77 SizeT eltSzBorStrNr
; // for a normal pool, elt size
78 // for a string pool, the unique str number
83 DedupPoolAlloc
* VG_(newDedupPA
) ( SizeT poolSzB
,
90 vg_assert(poolSzB
>= eltAlign
);
91 vg_assert(poolSzB
>= 100); /* let's say */
92 vg_assert(poolSzB
>= 10*eltAlign
); /* let's say */
96 ddpa
= alloc_fn(cc
, sizeof(*ddpa
));
97 VG_(memset
)(ddpa
, 0, sizeof(*ddpa
));
98 ddpa
->poolSzB
= poolSzB
;
101 ddpa
->eltAlign
= eltAlign
;
102 ddpa
->alloc_fn
= alloc_fn
;
104 ddpa
->free_fn
= free_fn
;
105 ddpa
->pools
= VG_(newXA
)( alloc_fn
, cc
, free_fn
, sizeof(void*) );
107 ddpa
->ht_elements
= VG_(HT_construct
) (cc
);
108 ddpa
->ht_node_pa
= VG_(newPA
) ( sizeof(ht_node
),
113 ddpa
->curpool
= NULL
;
114 ddpa
->curpool_limit
= NULL
;
115 ddpa
->curpool_free
= NULL
;
120 void VG_(deleteDedupPA
) ( DedupPoolAlloc
* ddpa
)
123 if (ddpa
->ht_elements
)
124 // Free data structures used for insertion.
125 VG_(freezeDedupPA
) (ddpa
, NULL
);
126 for (i
= 0; i
< VG_(sizeXA
) (ddpa
->pools
); i
++)
127 ddpa
->free_fn (*(UWord
**)VG_(indexXA
) ( ddpa
->pools
, i
));
128 VG_(deleteXA
) (ddpa
->pools
);
129 ddpa
->free_fn (ddpa
);
133 UChar
* ddpa_align ( DedupPoolAlloc
* ddpa
, UChar
*c
)
135 return (UChar
*)VG_ROUNDUP(c
, ddpa
->eltAlign
);
138 /* Allocate a new pool or grow the (only) pool for a fixed size ddpa. */
139 __attribute__((noinline
))
140 static void ddpa_add_new_pool_or_grow ( DedupPoolAlloc
* ddpa
)
144 if (ddpa
->fixedSzb
> 0 && ddpa
->curpool
!= NULL
) {
145 // Grow (* 2) the current (fixed elt) pool
146 UChar
*curpool_align
= ddpa_align(ddpa
, ddpa
->curpool
);
147 SizeT curpool_used
= ddpa
->curpool_free
- curpool_align
;
148 SizeT curpool_size
= ddpa
->curpool_limit
- ddpa
->curpool
+ 1;
149 UChar
*newpool
= ddpa
->alloc_fn (ddpa
->cc
, 2 * curpool_size
);
150 UChar
*newpool_free
= ddpa_align (ddpa
, newpool
);
151 UChar
*newpool_limit
= newpool
+ 2 * curpool_size
- 1;
152 Word reloc_offset
= (Addr
)newpool_free
- (Addr
)curpool_align
;
155 VG_(memcpy
) (newpool_free
, curpool_align
, curpool_used
);
156 /* We have reallocated the (only) pool. We need to relocate the pointers
157 in the hash table nodes. */
158 VG_(HT_ResetIter
) (ddpa
->ht_elements
);
159 while ((n
= VG_(HT_Next
) (ddpa
->ht_elements
))) {
160 n
->elt
= (void*)((Addr
)n
->elt
+ reloc_offset
);
162 newpool_free
+= curpool_used
;
164 VG_(dropHeadXA
) (ddpa
->pools
, 1);
165 ddpa
->free_fn (ddpa
->curpool
);
166 ddpa
->curpool
= newpool
;
167 ddpa
->curpool_free
= newpool_free
;
168 ddpa
->curpool_limit
= newpool_limit
;
169 VG_(addToXA
)( ddpa
->pools
, &ddpa
->curpool
);
171 /* Allocate a new pool, or allocate the first/only pool for a
173 ddpa
->curpool
= ddpa
->alloc_fn( ddpa
->cc
, ddpa
->poolSzB
);
174 ddpa
->curpool_limit
= ddpa
->curpool
+ ddpa
->poolSzB
- 1;
175 ddpa
->curpool_free
= ddpa_align (ddpa
, ddpa
->curpool
);
176 /* add to our collection of pools */
177 VG_(addToXA
)( ddpa
->pools
, &ddpa
->curpool
);
181 /* Compare function for 'gen' hash table. No need to compare the key
182 in this function, as the hash table already does it for us,
183 and that in any case, if the data is equal, the keys must also be
185 static Word
cmp_pool_elt (const void* node1
, const void* node2
)
187 const ht_node
* hnode1
= node1
;
188 const ht_node
* hnode2
= node2
;
190 /* As this function is called by hashtable, that has already checked
191 for key equality, it is likely that it is the 'good' element.
192 So, we handle the equal case first. */
193 if (hnode1
->eltSzBorStrNr
== hnode2
->eltSzBorStrNr
)
194 return VG_(memcmp
) (hnode1
->elt
, hnode2
->elt
, hnode1
->eltSzBorStrNr
);
195 else if (hnode1
->eltSzBorStrNr
< hnode2
->eltSzBorStrNr
)
201 /* String compare function for 'gen' hash table.
202 Similarly to cmp_pool_elt, no need to compare the key. */
203 static Word
cmp_pool_str (const void* node1
, const void* node2
)
205 const ht_node
* hnode1
= node1
;
206 const ht_node
* hnode2
= node2
;
208 return VG_(strcmp
)(hnode1
->elt
, hnode2
->elt
);
211 /* Print some stats. */
212 static void print_stats (DedupPoolAlloc
*ddpa
)
214 VG_(message
)(Vg_DebugMsg
,
215 "dedupPA:%s %ld allocs (%u uniq)"
216 " %ld pools (%ld bytes free in last pool)\n",
218 (long int) ddpa
->nr_alloc_calls
,
219 VG_(HT_count_nodes
)(ddpa
->ht_elements
),
220 VG_(sizeXA
)(ddpa
->pools
),
222 (long int) (ddpa
->curpool_limit
- ddpa
->curpool_free
+ 1) : 0);
224 VG_(HT_print_stats
) (ddpa
->ht_elements
, cmp_pool_str
);
226 VG_(HT_print_stats
) (ddpa
->ht_elements
, cmp_pool_elt
);
229 /* Dummy free, as the ht elements are allocated in a pool, and
230 we will destroy the pool in one single operation. */
231 static void htelem_dummyfree(void* ht_elem
)
235 void VG_(freezeDedupPA
) (DedupPoolAlloc
*ddpa
,
236 void (*shrink_block
)(void*, SizeT
))
239 && (VG_(clo_verbosity
) > 2 || VG_(debugLog_getLevel
) () >= 2)) {
242 vg_assert (!ddpa
->fixedSzb
|| VG_(sizeXA
) (ddpa
->pools
) == 1);
243 if (shrink_block
&& ddpa
->curpool_limit
> ddpa
->curpool_free
)
244 (*shrink_block
)(ddpa
->curpool
, ddpa
->curpool_free
- ddpa
->curpool
);
245 VG_(HT_destruct
) ( ddpa
->ht_elements
, htelem_dummyfree
);
246 ddpa
->ht_elements
= NULL
;
247 VG_(deletePA
) (ddpa
->ht_node_pa
);
248 ddpa
->ht_node_pa
= NULL
;
252 // hash function used by gawk and SDBM.
253 static UInt
sdbm_hash (const UChar
* buf
, UInt len
)
259 for (i
= 0; i
< len
; i
++)
260 h
= *buf
++ + (h
<<6) + (h
<<16) - h
;
264 static ht_node
* allocEltDedupPA (DedupPoolAlloc
*ddpa
, SizeT eltSzB
,
271 vg_assert(ddpa
->ht_elements
);
273 ddpa
->nr_alloc_calls
++;
275 ht_elt
.key
= sdbm_hash (elt
, eltSzB
);
280 ht_ins
= VG_(HT_gen_lookup
) (ddpa
->ht_elements
, &ht_elt
, cmp_pool_str
);
282 ht_elt
.eltSzBorStrNr
= eltSzB
;
283 ht_ins
= VG_(HT_gen_lookup
) (ddpa
->ht_elements
, &ht_elt
, cmp_pool_elt
);
288 /* Not found -> we need to allocate a new element from the pool
289 and insert it in the hash table of inserted elements. */
291 // Add a new pool or grow pool if not enough space in the current pool
292 if (eltSzB
+ ddpa
->eltAlign
> ddpa
->poolSzB
) {
293 // Element (+eltAlign for worst case) bigger than the pool size
294 // => allocate a specific pool just for this element
295 UChar
*newpool
= ddpa
->alloc_fn (ddpa
->cc
, eltSzB
+ ddpa
->eltAlign
);
296 /* add to our collection of pools */
297 VG_(addToXA
)( ddpa
->pools
, &newpool
);
298 elt_ins
= ddpa_align (ddpa
, newpool
);
300 if (UNLIKELY(ddpa
->curpool_free
== NULL
301 || ddpa
->curpool_free
+ eltSzB
- 1 > ddpa
->curpool_limit
)) {
302 ddpa_add_new_pool_or_grow (ddpa
);
304 elt_ins
= ddpa
->curpool_free
;
305 ddpa
->curpool_free
= ddpa_align(ddpa
, ddpa
->curpool_free
+ eltSzB
);
309 VG_(memcpy
)(elt_ins
, elt
, eltSzB
);
310 ht_ins
= VG_(allocEltPA
) (ddpa
->ht_node_pa
);
311 ht_ins
->key
= ht_elt
.key
;
313 ht_ins
->eltSzBorStrNr
= VG_(HT_count_nodes
)(ddpa
->ht_elements
) + 1;
315 ht_ins
->eltSzBorStrNr
= eltSzB
;
316 ht_ins
->elt
= elt_ins
;
317 VG_(HT_add_node
)(ddpa
->ht_elements
, ht_ins
);
321 const void* VG_(allocEltDedupPA
) (DedupPoolAlloc
*ddpa
, SizeT eltSzB
,
324 return allocEltDedupPA(ddpa
, eltSzB
, elt
)->elt
;
327 UInt
VG_(allocStrDedupPA
) (DedupPoolAlloc
*ddpa
,
332 // First insertion in this ddpa
333 vg_assert (ddpa
->nr_alloc_calls
== 0);
334 vg_assert (ddpa
->fixedSzb
== 0);
338 const UInt nr_str
= VG_(HT_count_nodes
)(ddpa
->ht_elements
);
339 const ht_node
* ht_ins
= allocEltDedupPA(ddpa
, VG_(strlen
)(str
)+1, str
);
341 *newStr
= nr_str
< VG_(HT_count_nodes
)(ddpa
->ht_elements
);
342 return ht_ins
->eltSzBorStrNr
;
346 UInt
elt2nr (DedupPoolAlloc
*ddpa
, const void *dedup_elt
)
348 vg_assert (dedup_elt
>= (const void *)ddpa
->curpool
349 && dedup_elt
< (const void *)ddpa
->curpool_free
);
350 return 1 + ((const UChar
*)dedup_elt
- (const UChar
*)ddpa
->curpool
)
351 / VG_ROUNDUP(ddpa
->fixedSzb
, ddpa
->eltAlign
);
354 UInt
VG_(allocFixedEltDedupPA
) (DedupPoolAlloc
*ddpa
,
355 SizeT eltSzB
, const void *elt
)
357 if (ddpa
->fixedSzb
== 0) {
358 // First insertion in this ddpa
359 vg_assert (!ddpa
->strPA
);
360 vg_assert (ddpa
->nr_alloc_calls
== 0);
361 vg_assert (eltSzB
> 0);
362 ddpa
->fixedSzb
= eltSzB
;
364 vg_assert (ddpa
->fixedSzb
== eltSzB
);
365 const void *dedup_elt
= VG_(allocEltDedupPA
) (ddpa
, eltSzB
, elt
);
366 return elt2nr (ddpa
, dedup_elt
);
369 void* VG_(indexEltNumber
) (DedupPoolAlloc
*ddpa
,
374 dedup_elt
= ddpa
->curpool
375 + (eltNr
- 1) * VG_ROUNDUP(ddpa
->fixedSzb
, ddpa
->eltAlign
);
377 vg_assert ((UChar
*)dedup_elt
>= ddpa
->curpool
378 && (UChar
*)dedup_elt
< ddpa
->curpool_free
);
383 UInt
VG_(sizeDedupPA
) (DedupPoolAlloc
*ddpa
)
385 if (ddpa
->curpool
== NULL
)
388 vg_assert (ddpa
->fixedSzb
);
389 return (ddpa
->curpool_free
- ddpa_align(ddpa
, ddpa
->curpool
))
390 / VG_ROUNDUP(ddpa
->fixedSzb
, ddpa
->eltAlign
);