1 /*--------------------------------------------------------------------*/
2 /*--- A pool (memory) allocator that avoids duplicated copies. ---*/
3 /*--- m_deduppoolalloc.c ---*/
4 /*--------------------------------------------------------------------*/
6 This file is part of Valgrind, a dynamic binary instrumentation
9 Copyright (C) 2014-2017 Philippe Waroquiers philippe.waroquiers@skynet.be
11 This program is free software; you can redistribute it and/or
12 modify it under the terms of the GNU General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 The GNU General Public License is contained in the file COPYING.
29 #include "pub_core_basics.h"
30 #include "pub_core_libcbase.h"
31 #include "pub_core_libcprint.h"
32 #include "pub_core_libcassert.h"
33 #include "pub_core_xarray.h"
34 #include "pub_core_deduppoolalloc.h" /* self */
35 #include "pub_core_hashtable.h"
36 #include "pub_core_poolalloc.h"
37 #include "pub_core_options.h"
38 #include "pub_core_mallocfree.h"
39 #include "pub_core_debuglog.h"
41 struct _DedupPoolAlloc
{
42 SizeT poolSzB
; /* Minimum size of a pool. */
43 SizeT fixedSzb
; /* If using VG_(allocFixedEltDedupPA), size of elements */
44 Bool strPA
; /* True if this is a string dedup pool */
46 Alloc_Fn_t alloc_fn
; /* pool allocator */
47 const HChar
* cc
; /* pool allocator's cost centre */
48 Free_Fn_t free_fn
; /* pool allocator's deallocation function */
49 /* XArray of void* (pointers to pools). The pools themselves.
50 Each element is a pointer to a block of size at least PoolSzB bytes.
51 The last block might be smaller due to a call to shrink_block. */
54 /* hash table of pool elements, used to dedup.
55 If NULL, it means the DedupPoolAlloc is frozen. */
56 VgHashTable
*ht_elements
;
58 /* Hash table nodes of pool_elements are allocated with a pool, to
59 decrease memory overhead during insertion in the DedupPoolAlloc. */
60 PoolAlloc
*ht_node_pa
;
62 UChar
*curpool
; /* last allocated pool. */
63 UChar
*curpool_free
; /* Pos in current pool to allocate next elt.
64 always aligned on eltAlign. */
65 UChar
*curpool_limit
; /* Last pos in current pool. */
66 /* Note that for a fixed size pool, we only have a single pool to allow
67 simple/fast indexing. This single pool is grown, which might change
68 the address of the already allocated elements. */
70 /* Total nr of alloc calls, resulting in (we hope) a lot less
71 real (dedup) elements. */
77 struct _ht_node
*next
; // Read/Write by hashtable (pub_tool_hashtable.h)
78 UWord key
; // Read by hashtable (pub_tool_hashtable.h)
79 SizeT eltSzBorStrNr
; // for a normal pool, elt size
80 // for a string pool, the unique str number
85 DedupPoolAlloc
* VG_(newDedupPA
) ( SizeT poolSzB
,
92 vg_assert(poolSzB
>= eltAlign
);
93 vg_assert(poolSzB
>= 100); /* let's say */
94 vg_assert(poolSzB
>= 10*eltAlign
); /* let's say */
98 ddpa
= alloc_fn(cc
, sizeof(*ddpa
));
99 VG_(memset
)(ddpa
, 0, sizeof(*ddpa
));
100 ddpa
->poolSzB
= poolSzB
;
103 ddpa
->eltAlign
= eltAlign
;
104 ddpa
->alloc_fn
= alloc_fn
;
106 ddpa
->free_fn
= free_fn
;
107 ddpa
->pools
= VG_(newXA
)( alloc_fn
, cc
, free_fn
, sizeof(void*) );
109 ddpa
->ht_elements
= VG_(HT_construct
) (cc
);
110 ddpa
->ht_node_pa
= VG_(newPA
) ( sizeof(ht_node
),
115 ddpa
->curpool
= NULL
;
116 ddpa
->curpool_limit
= NULL
;
117 ddpa
->curpool_free
= NULL
;
122 void VG_(deleteDedupPA
) ( DedupPoolAlloc
* ddpa
)
125 if (ddpa
->ht_elements
)
126 // Free data structures used for insertion.
127 VG_(freezeDedupPA
) (ddpa
, NULL
);
128 for (i
= 0; i
< VG_(sizeXA
) (ddpa
->pools
); i
++)
129 ddpa
->free_fn (*(UWord
**)VG_(indexXA
) ( ddpa
->pools
, i
));
130 VG_(deleteXA
) (ddpa
->pools
);
131 ddpa
->free_fn (ddpa
);
135 UChar
* ddpa_align ( DedupPoolAlloc
* ddpa
, UChar
*c
)
137 return (UChar
*)VG_ROUNDUP(c
, ddpa
->eltAlign
);
140 /* Allocate a new pool or grow the (only) pool for a fixed size ddpa. */
141 __attribute__((noinline
))
142 static void ddpa_add_new_pool_or_grow ( DedupPoolAlloc
* ddpa
)
146 if (ddpa
->fixedSzb
> 0 && ddpa
->curpool
!= NULL
) {
147 // Grow (* 2) the current (fixed elt) pool
148 UChar
*curpool_align
= ddpa_align(ddpa
, ddpa
->curpool
);
149 SizeT curpool_used
= ddpa
->curpool_free
- curpool_align
;
150 SizeT curpool_size
= ddpa
->curpool_limit
- ddpa
->curpool
+ 1;
151 UChar
*newpool
= ddpa
->alloc_fn (ddpa
->cc
, 2 * curpool_size
);
152 UChar
*newpool_free
= ddpa_align (ddpa
, newpool
);
153 UChar
*newpool_limit
= newpool
+ 2 * curpool_size
- 1;
154 Word reloc_offset
= (Addr
)newpool_free
- (Addr
)curpool_align
;
157 VG_(memcpy
) (newpool_free
, curpool_align
, curpool_used
);
158 /* We have reallocated the (only) pool. We need to relocate the pointers
159 in the hash table nodes. */
160 VG_(HT_ResetIter
) (ddpa
->ht_elements
);
161 while ((n
= VG_(HT_Next
) (ddpa
->ht_elements
))) {
162 n
->elt
= (void*)((Addr
)n
->elt
+ reloc_offset
);
164 newpool_free
+= curpool_used
;
166 VG_(dropHeadXA
) (ddpa
->pools
, 1);
167 ddpa
->free_fn (ddpa
->curpool
);
168 ddpa
->curpool
= newpool
;
169 ddpa
->curpool_free
= newpool_free
;
170 ddpa
->curpool_limit
= newpool_limit
;
171 VG_(addToXA
)( ddpa
->pools
, &ddpa
->curpool
);
173 /* Allocate a new pool, or allocate the first/only pool for a
175 ddpa
->curpool
= ddpa
->alloc_fn( ddpa
->cc
, ddpa
->poolSzB
);
176 ddpa
->curpool_limit
= ddpa
->curpool
+ ddpa
->poolSzB
- 1;
177 ddpa
->curpool_free
= ddpa_align (ddpa
, ddpa
->curpool
);
178 /* add to our collection of pools */
179 VG_(addToXA
)( ddpa
->pools
, &ddpa
->curpool
);
183 /* Compare function for 'gen' hash table. No need to compare the key
184 in this function, as the hash table already does it for us,
185 and that in any case, if the data is equal, the keys must also be
187 static Word
cmp_pool_elt (const void* node1
, const void* node2
)
189 const ht_node
* hnode1
= node1
;
190 const ht_node
* hnode2
= node2
;
192 /* As this function is called by hashtable, that has already checked
193 for key equality, it is likely that it is the 'good' element.
194 So, we handle the equal case first. */
195 if (hnode1
->eltSzBorStrNr
== hnode2
->eltSzBorStrNr
)
196 return VG_(memcmp
) (hnode1
->elt
, hnode2
->elt
, hnode1
->eltSzBorStrNr
);
197 else if (hnode1
->eltSzBorStrNr
< hnode2
->eltSzBorStrNr
)
203 /* String compare function for 'gen' hash table.
204 Similarly to cmp_pool_elt, no need to compare the key. */
205 static Word
cmp_pool_str (const void* node1
, const void* node2
)
207 const ht_node
* hnode1
= node1
;
208 const ht_node
* hnode2
= node2
;
210 return VG_(strcmp
)(hnode1
->elt
, hnode2
->elt
);
213 /* Print some stats. */
214 static void print_stats (DedupPoolAlloc
*ddpa
)
216 VG_(message
)(Vg_DebugMsg
,
217 "dedupPA:%s %ld allocs (%u uniq)"
218 " %ld pools (%ld bytes free in last pool)\n",
220 (long int) ddpa
->nr_alloc_calls
,
221 VG_(HT_count_nodes
)(ddpa
->ht_elements
),
222 VG_(sizeXA
)(ddpa
->pools
),
224 (long int) (ddpa
->curpool_limit
- ddpa
->curpool_free
+ 1) : 0);
226 VG_(HT_print_stats
) (ddpa
->ht_elements
, cmp_pool_str
);
228 VG_(HT_print_stats
) (ddpa
->ht_elements
, cmp_pool_elt
);
231 /* Dummy free, as the ht elements are allocated in a pool, and
232 we will destroy the pool in one single operation. */
233 static void htelem_dummyfree(void* ht_elem
)
237 void VG_(freezeDedupPA
) (DedupPoolAlloc
*ddpa
,
238 void (*shrink_block
)(void*, SizeT
))
241 && (VG_(clo_verbosity
) > 2 || VG_(debugLog_getLevel
) () >= 2)) {
244 vg_assert (!ddpa
->fixedSzb
|| VG_(sizeXA
) (ddpa
->pools
) == 1);
245 if (shrink_block
&& ddpa
->curpool_limit
> ddpa
->curpool_free
)
246 (*shrink_block
)(ddpa
->curpool
, ddpa
->curpool_free
- ddpa
->curpool
);
247 VG_(HT_destruct
) ( ddpa
->ht_elements
, htelem_dummyfree
);
248 ddpa
->ht_elements
= NULL
;
249 VG_(deletePA
) (ddpa
->ht_node_pa
);
250 ddpa
->ht_node_pa
= NULL
;
254 // hash function used by gawk and SDBM.
255 static UInt
sdbm_hash (const UChar
* buf
, UInt len
)
261 for (i
= 0; i
< len
; i
++)
262 h
= *buf
++ + (h
<<6) + (h
<<16) - h
;
266 static ht_node
* allocEltDedupPA (DedupPoolAlloc
*ddpa
, SizeT eltSzB
,
273 vg_assert(ddpa
->ht_elements
);
275 ddpa
->nr_alloc_calls
++;
277 ht_elt
.key
= sdbm_hash (elt
, eltSzB
);
282 ht_ins
= VG_(HT_gen_lookup
) (ddpa
->ht_elements
, &ht_elt
, cmp_pool_str
);
284 ht_elt
.eltSzBorStrNr
= eltSzB
;
285 ht_ins
= VG_(HT_gen_lookup
) (ddpa
->ht_elements
, &ht_elt
, cmp_pool_elt
);
290 /* Not found -> we need to allocate a new element from the pool
291 and insert it in the hash table of inserted elements. */
293 // Add a new pool or grow pool if not enough space in the current pool
294 if (eltSzB
+ ddpa
->eltAlign
> ddpa
->poolSzB
) {
295 // Element (+eltAlign for worst case) bigger than the pool size
296 // => allocate a specific pool just for this element
297 UChar
*newpool
= ddpa
->alloc_fn (ddpa
->cc
, eltSzB
+ ddpa
->eltAlign
);
298 /* add to our collection of pools */
299 VG_(addToXA
)( ddpa
->pools
, &newpool
);
300 elt_ins
= ddpa_align (ddpa
, newpool
);
302 if (UNLIKELY(ddpa
->curpool_free
== NULL
303 || ddpa
->curpool_free
+ eltSzB
- 1 > ddpa
->curpool_limit
)) {
304 ddpa_add_new_pool_or_grow (ddpa
);
306 elt_ins
= ddpa
->curpool_free
;
307 ddpa
->curpool_free
= ddpa_align(ddpa
, ddpa
->curpool_free
+ eltSzB
);
311 VG_(memcpy
)(elt_ins
, elt
, eltSzB
);
312 ht_ins
= VG_(allocEltPA
) (ddpa
->ht_node_pa
);
313 ht_ins
->key
= ht_elt
.key
;
315 ht_ins
->eltSzBorStrNr
= VG_(HT_count_nodes
)(ddpa
->ht_elements
) + 1;
317 ht_ins
->eltSzBorStrNr
= eltSzB
;
318 ht_ins
->elt
= elt_ins
;
319 VG_(HT_add_node
)(ddpa
->ht_elements
, ht_ins
);
323 const void* VG_(allocEltDedupPA
) (DedupPoolAlloc
*ddpa
, SizeT eltSzB
,
326 return allocEltDedupPA(ddpa
, eltSzB
, elt
)->elt
;
329 UInt
VG_(allocStrDedupPA
) (DedupPoolAlloc
*ddpa
,
334 // First insertion in this ddpa
335 vg_assert (ddpa
->nr_alloc_calls
== 0);
336 vg_assert (ddpa
->fixedSzb
== 0);
340 const UInt nr_str
= VG_(HT_count_nodes
)(ddpa
->ht_elements
);
341 const ht_node
* ht_ins
= allocEltDedupPA(ddpa
, VG_(strlen
)(str
)+1, str
);
343 *newStr
= nr_str
< VG_(HT_count_nodes
)(ddpa
->ht_elements
);
344 return ht_ins
->eltSzBorStrNr
;
348 UInt
elt2nr (DedupPoolAlloc
*ddpa
, const void *dedup_elt
)
350 vg_assert (dedup_elt
>= (const void *)ddpa
->curpool
351 && dedup_elt
< (const void *)ddpa
->curpool_free
);
352 return 1 + ((const UChar
*)dedup_elt
- (const UChar
*)ddpa
->curpool
)
353 / VG_ROUNDUP(ddpa
->fixedSzb
, ddpa
->eltAlign
);
356 UInt
VG_(allocFixedEltDedupPA
) (DedupPoolAlloc
*ddpa
,
357 SizeT eltSzB
, const void *elt
)
359 if (ddpa
->fixedSzb
== 0) {
360 // First insertion in this ddpa
361 vg_assert (!ddpa
->strPA
);
362 vg_assert (ddpa
->nr_alloc_calls
== 0);
363 vg_assert (eltSzB
> 0);
364 ddpa
->fixedSzb
= eltSzB
;
366 vg_assert (ddpa
->fixedSzb
== eltSzB
);
367 const void *dedup_elt
= VG_(allocEltDedupPA
) (ddpa
, eltSzB
, elt
);
368 return elt2nr (ddpa
, dedup_elt
);
371 void* VG_(indexEltNumber
) (DedupPoolAlloc
*ddpa
,
376 dedup_elt
= ddpa
->curpool
377 + (eltNr
- 1) * VG_ROUNDUP(ddpa
->fixedSzb
, ddpa
->eltAlign
);
379 vg_assert ((UChar
*)dedup_elt
>= ddpa
->curpool
380 && (UChar
*)dedup_elt
< ddpa
->curpool_free
);
385 UInt
VG_(sizeDedupPA
) (DedupPoolAlloc
*ddpa
)
387 if (ddpa
->curpool
== NULL
)
390 vg_assert (ddpa
->fixedSzb
);
391 return (ddpa
->curpool_free
- ddpa_align(ddpa
, ddpa
->curpool
))
392 / VG_ROUNDUP(ddpa
->fixedSzb
, ddpa
->eltAlign
);