tests/vg_regtest: Always evaluate prerequisite expressions with sh
[valgrind.git] / coregrind / m_deduppoolalloc.c
blob51a58cb063d676262ed20b583e44549f228ce8a1
1 /*--------------------------------------------------------------------*/
2 /*--- A pool (memory) allocator that avoids duplicated copies. ---*/
3 /*--- m_deduppoolalloc.c ---*/
4 /*--------------------------------------------------------------------*/
5 /*
6 This file is part of Valgrind, a dynamic binary instrumentation
7 framework.
9 Copyright (C) 2014-2014 Philippe Waroquiers philippe.waroquiers@skynet.be
11 This program is free software; you can redistribute it and/or
12 modify it under the terms of the GNU General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 02111-1307, USA.
26 The GNU General Public License is contained in the file COPYING.
29 #include "pub_core_basics.h"
30 #include "pub_core_libcbase.h"
31 #include "pub_core_libcprint.h"
32 #include "pub_core_libcassert.h"
33 #include "pub_core_xarray.h"
34 #include "pub_core_deduppoolalloc.h" /* self */
35 #include "pub_core_hashtable.h"
36 #include "pub_core_poolalloc.h"
37 #include "pub_core_options.h"
38 #include "pub_core_mallocfree.h"
39 #include "pub_core_debuglog.h"
41 struct _DedupPoolAlloc {
42 SizeT poolSzB; /* Minimum size of a pool. */
43 SizeT fixedSzb; /* If using VG_(allocFixedEltDedupPA), size of elements */
44 SizeT eltAlign;
45 void* (*alloc_fn)(const HChar*, SizeT); /* pool allocator */
46 const HChar* cc; /* pool allocator's cost centre */
47 void (*free_fn)(void*); /* pool allocator's deallocation function */
48 /* XArray of void* (pointers to pools). The pools themselves.
49 Each element is a pointer to a block of size at least PoolSzB bytes.
50 The last block might be smaller due to a call to shrink_block. */
51 XArray *pools;
53 /* hash table of pool elements, used to dedup.
54 If NULL, it means the DedupPoolAlloc is frozen. */
55 VgHashTable *ht_elements;
57 /* Hash table nodes of pool_elements are allocated with a pool, to
58 decrease memory overhead during insertion in the DedupPoolAlloc. */
59 PoolAlloc *ht_node_pa;
61 UChar *curpool; /* last allocated pool. */
62 UChar *curpool_free; /* Pos in current pool to allocate next elt.
63 always aligned on eltAlign. */
64 UChar *curpool_limit; /* Last pos in current pool. */
65 /* Note that for a fixed size pool, we only have a single pool to allow
66 simple/fast indexing. This single pool is grown, which might change
67 the address of the already allocated elements. */
69 /* Total nr of alloc calls, resulting in (we hope) a lot less
70 real (dedup) elements. */
71 ULong nr_alloc_calls;
74 typedef
75 struct _ht_node {
76 struct _ht_node *next; // Read/Write by hashtable (pub_tool_hashtable.h)
77 UWord key; // Read by hashtable (pub_tool_hashtable.h)
78 SizeT eltSzB;
79 const void *elt;
81 ht_node;
83 DedupPoolAlloc* VG_(newDedupPA) ( SizeT poolSzB,
84 SizeT eltAlign,
85 void* (*alloc_fn)(const HChar*, SizeT),
86 const HChar* cc,
87 void (*free_fn)(void*) )
89 DedupPoolAlloc* ddpa;
90 vg_assert(poolSzB >= eltAlign);
91 vg_assert(poolSzB >= 100); /* let's say */
92 vg_assert(poolSzB >= 10*eltAlign); /* let's say */
93 vg_assert(alloc_fn);
94 vg_assert(cc);
95 vg_assert(free_fn);
96 ddpa = alloc_fn(cc, sizeof(*ddpa));
97 VG_(memset)(ddpa, 0, sizeof(*ddpa));
98 ddpa->poolSzB = poolSzB;
99 ddpa->fixedSzb = 0;
100 ddpa->eltAlign = eltAlign;
101 ddpa->alloc_fn = alloc_fn;
102 ddpa->cc = cc;
103 ddpa->free_fn = free_fn;
104 ddpa->pools = VG_(newXA)( alloc_fn, cc, free_fn, sizeof(void*) );
106 ddpa->ht_elements = VG_(HT_construct) (cc);
107 ddpa->ht_node_pa = VG_(newPA) ( sizeof(ht_node),
108 1000,
109 alloc_fn,
111 free_fn);
112 ddpa->curpool = NULL;
113 ddpa->curpool_limit = NULL;
114 ddpa->curpool_free = NULL;
116 return ddpa;
119 void VG_(deleteDedupPA) ( DedupPoolAlloc* ddpa)
121 Word i;
122 if (ddpa->ht_elements)
123 // Free data structures used for insertion.
124 VG_(freezeDedupPA) (ddpa, NULL);
125 for (i = 0; i < VG_(sizeXA) (ddpa->pools); i++)
126 ddpa->free_fn (*(UWord **)VG_(indexXA) ( ddpa->pools, i ));
127 VG_(deleteXA) (ddpa->pools);
128 ddpa->free_fn (ddpa);
131 static __inline__
132 UChar* ddpa_align ( DedupPoolAlloc* ddpa, UChar *c )
134 return (UChar*)VG_ROUNDUP(c, ddpa->eltAlign);
137 /* Allocate a new pool or grow the (only) pool for a fixed size ddpa. */
138 __attribute__((noinline))
139 static void ddpa_add_new_pool_or_grow ( DedupPoolAlloc* ddpa )
141 vg_assert(ddpa);
143 if (ddpa->fixedSzb > 0 && ddpa->curpool != NULL) {
144 // Grow (* 2) the current (fixed elt) pool
145 UChar *curpool_align = ddpa_align(ddpa, ddpa->curpool);
146 SizeT curpool_used = ddpa->curpool_free - curpool_align;
147 SizeT curpool_size = ddpa->curpool_limit - ddpa->curpool + 1;
148 UChar *newpool = ddpa->alloc_fn (ddpa->cc, 2 * curpool_size);
149 UChar *newpool_free = ddpa_align (ddpa, newpool);
150 UChar *newpool_limit = newpool + 2 * curpool_size - 1;
151 Word reloc_offset = (Addr)newpool_free - (Addr)curpool_align;
152 ht_node *n;
154 VG_(memcpy) (newpool_free, curpool_align, curpool_used);
155 /* We have reallocated the (only) pool. We need to relocate the pointers
156 in the hash table nodes. */
157 VG_(HT_ResetIter) (ddpa->ht_elements);
158 while ((n = VG_(HT_Next) (ddpa->ht_elements))) {
159 n->elt = (void*)((Addr)n->elt + reloc_offset);
161 newpool_free += curpool_used;
163 VG_(dropHeadXA) (ddpa->pools, 1);
164 ddpa->free_fn (ddpa->curpool);
165 ddpa->curpool = newpool;
166 ddpa->curpool_free = newpool_free;
167 ddpa->curpool_limit = newpool_limit;
168 VG_(addToXA)( ddpa->pools, &ddpa->curpool);
169 } else {
170 /* Allocate a new pool, or allocate the first/only pool for a
171 fixed size ddpa. */
172 ddpa->curpool = ddpa->alloc_fn( ddpa->cc, ddpa->poolSzB);
173 ddpa->curpool_limit = ddpa->curpool + ddpa->poolSzB - 1;
174 ddpa->curpool_free = ddpa_align (ddpa, ddpa->curpool);
175 /* add to our collection of pools */
176 VG_(addToXA)( ddpa->pools, &ddpa->curpool );
180 /* Compare function for 'gen' hash table. No need to compare the key
181 in this function, as the hash table already does it for us,
182 and that in any case, if the data is equal, the keys must also be
183 equal. */
184 static Word cmp_pool_elt (const void* node1, const void* node2 )
186 const ht_node* hnode1 = node1;
187 const ht_node* hnode2 = node2;
189 /* As this function is called by hashtable, that has already checked
190 for key equality, it is likely that it is the 'good' element.
191 So, we handle the equal case first. */
192 if (hnode1->eltSzB == hnode2->eltSzB)
193 return VG_(memcmp) (hnode1->elt, hnode2->elt, hnode1->eltSzB);
194 else if (hnode1->eltSzB < hnode2->eltSzB)
195 return -1;
196 else
197 return 1;
200 /* Print some stats. */
201 static void print_stats (DedupPoolAlloc *ddpa)
203 VG_(message)(Vg_DebugMsg,
204 "dedupPA:%s %ld allocs (%d uniq)"
205 " %ld pools (%ld bytes free in last pool)\n",
206 ddpa->cc,
207 (long int) ddpa->nr_alloc_calls,
208 VG_(HT_count_nodes)(ddpa->ht_elements),
209 VG_(sizeXA)(ddpa->pools),
210 ddpa->curpool ?
211 (long int) (ddpa->curpool_limit - ddpa->curpool_free + 1) : 0);
212 VG_(HT_print_stats) (ddpa->ht_elements, cmp_pool_elt);
215 /* Dummy free, as the ht elements are allocated in a pool, and
216 we will destroy the pool in one single operation. */
217 static void htelem_dummyfree(void* ht_elem)
221 void VG_(freezeDedupPA) (DedupPoolAlloc *ddpa,
222 void (*shrink_block)(void*, SizeT))
224 if (VG_(clo_stats)
225 && (VG_(clo_verbosity) > 2 || VG_(debugLog_getLevel) () >= 2)) {
226 print_stats(ddpa);
228 vg_assert (!ddpa->fixedSzb || VG_(sizeXA) (ddpa->pools) == 1);
229 if (shrink_block && ddpa->curpool_limit > ddpa->curpool_free)
230 (*shrink_block)(ddpa->curpool, ddpa->curpool_free - ddpa->curpool);
231 VG_(HT_destruct) ( ddpa->ht_elements, htelem_dummyfree);
232 ddpa->ht_elements = NULL;
233 VG_(deletePA) (ddpa->ht_node_pa);
234 ddpa->ht_node_pa = NULL;
238 // hash function used by gawk and SDBM.
239 static UInt sdbm_hash (const UChar* buf, UInt len )
241 UInt h;
242 UInt i;
244 h = 0;
245 for (i = 0; i < len; i++)
246 h = *buf++ + (h<<6) + (h<<16) - h;
247 return h;
250 const void* VG_(allocEltDedupPA) (DedupPoolAlloc *ddpa, SizeT eltSzB,
251 const void *elt)
253 ht_node ht_elt;
254 void* elt_ins;
255 ht_node *ht_ins;
256 vg_assert(ddpa);
257 vg_assert(ddpa->ht_elements);
258 vg_assert (eltSzB <= ddpa->poolSzB);
260 ddpa->nr_alloc_calls++;
262 ht_elt.key = sdbm_hash (elt, eltSzB);
264 ht_elt.eltSzB = eltSzB;
265 ht_elt.elt = elt;
267 ht_ins = VG_(HT_gen_lookup) (ddpa->ht_elements, &ht_elt, cmp_pool_elt);
268 if (ht_ins)
269 return ht_ins->elt;
271 /* Not found -> we need to allocate a new element from the pool
272 and insert it in the hash table of inserted elements. */
274 // Add a new pool or grow pool if not enough space in the current pool
275 if (UNLIKELY(ddpa->curpool_free == NULL
276 || ddpa->curpool_free + eltSzB - 1 > ddpa->curpool_limit)) {
277 ddpa_add_new_pool_or_grow (ddpa);
280 elt_ins = ddpa->curpool_free;
281 VG_(memcpy)(elt_ins, elt, eltSzB);
282 ddpa->curpool_free = ddpa_align(ddpa, ddpa->curpool_free + eltSzB);
284 ht_ins = VG_(allocEltPA) (ddpa->ht_node_pa);
285 ht_ins->key = ht_elt.key;
286 ht_ins->eltSzB = eltSzB;
287 ht_ins->elt = elt_ins;
288 VG_(HT_add_node)(ddpa->ht_elements, ht_ins);
289 return elt_ins;
292 static __inline__
293 UInt elt2nr (DedupPoolAlloc *ddpa, const void *dedup_elt)
295 vg_assert (dedup_elt >= (const void *)ddpa->curpool
296 && dedup_elt < (const void *)ddpa->curpool_free);
297 return 1 + ((const UChar*)dedup_elt - (const UChar *)ddpa->curpool)
298 / VG_ROUNDUP(ddpa->fixedSzb, ddpa->eltAlign);
301 UInt VG_(allocFixedEltDedupPA) (DedupPoolAlloc *ddpa,
302 SizeT eltSzB, const void *elt)
304 if (ddpa->fixedSzb == 0) {
305 // First insertion in this ddpa
306 vg_assert (ddpa->nr_alloc_calls == 0);
307 vg_assert (eltSzB > 0);
308 ddpa->fixedSzb = eltSzB;
310 vg_assert (ddpa->fixedSzb == eltSzB);
311 const void *dedup_elt = VG_(allocEltDedupPA) (ddpa, eltSzB, elt);
312 return elt2nr (ddpa, dedup_elt);
315 void* VG_(indexEltNumber) (DedupPoolAlloc *ddpa,
316 UInt eltNr)
318 void *dedup_elt;
320 dedup_elt = ddpa->curpool
321 + (eltNr - 1) * VG_ROUNDUP(ddpa->fixedSzb, ddpa->eltAlign);
323 vg_assert ((UChar*)dedup_elt >= ddpa->curpool
324 && (UChar*)dedup_elt < ddpa->curpool_free);
326 return dedup_elt;
329 UInt VG_(sizeDedupPA) (DedupPoolAlloc *ddpa)
331 if (ddpa->curpool == NULL)
332 return 0;
334 vg_assert (ddpa->fixedSzb);
335 return (ddpa->curpool_free - ddpa_align(ddpa, ddpa->curpool))
336 / VG_ROUNDUP(ddpa->fixedSzb, ddpa->eltAlign);