Remove unused function: dns_randfn_() in dns.c.
[tor.git] / src / lib / memarea / memarea.c
blob486673116cf3fc3d1560ae5bc35b74e28c90a04d
1 /* Copyright (c) 2008-2019, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
4 /**
5 * \file memarea.c
7 * \brief Implementation for memarea_t, an allocator for allocating lots of
8 * small objects that will be freed all at once.
9 */
11 #include "orconfig.h"
12 #include "lib/memarea/memarea.h"
14 #include <stdlib.h>
15 #include <string.h>
17 #include "lib/arch/bytes.h"
18 #include "lib/cc/torint.h"
19 #include "lib/container/smartlist.h"
20 #include "lib/log/log.h"
21 #include "lib/log/util_bug.h"
22 #include "lib/malloc/malloc.h"
24 #ifndef DISABLE_MEMORY_SENTINELS
26 /** If true, we try to detect any attempts to write beyond the length of a
27 * memarea. */
28 #define USE_SENTINELS
30 /** All returned pointers should be aligned to the nearest multiple of this
31 * value. */
32 #define MEMAREA_ALIGN SIZEOF_VOID_P
34 /** A value which, when masked out of a pointer, produces a maximally aligned
35 * pointer. */
36 #if MEMAREA_ALIGN == 4
37 #define MEMAREA_ALIGN_MASK ((uintptr_t)3)
38 #elif MEMAREA_ALIGN == 8
39 #define MEMAREA_ALIGN_MASK ((uintptr_t)7)
40 #else
41 #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
42 #endif /* MEMAREA_ALIGN == 4 || ... */
44 #if defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER)
45 #define USE_ALIGNED_ATTRIBUTE
46 /** Name for the 'memory' member of a memory chunk. */
47 #define U_MEM mem
48 #else
49 #define U_MEM u.mem
50 #endif /* defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER) */
52 #ifdef USE_SENTINELS
53 /** Magic value that we stick at the end of a memarea so we can make sure
54 * there are no run-off-the-end bugs. */
55 #define SENTINEL_VAL 0x90806622u
56 /** How many bytes per area do we devote to the sentinel? */
57 #define SENTINEL_LEN sizeof(uint32_t)
58 /** Given a mem_area_chunk_t with SENTINEL_LEN extra bytes allocated at the
59 * end, set those bytes. */
60 #define SET_SENTINEL(chunk) \
61 STMT_BEGIN \
62 set_uint32( &(chunk)->U_MEM[chunk->mem_size], SENTINEL_VAL ); \
63 STMT_END
64 /** Assert that the sentinel on a memarea is set correctly. */
65 #define CHECK_SENTINEL(chunk) \
66 STMT_BEGIN \
67 uint32_t sent_val = get_uint32(&(chunk)->U_MEM[chunk->mem_size]); \
68 tor_assert(sent_val == SENTINEL_VAL); \
69 STMT_END
70 #else /* !(defined(USE_SENTINELS)) */
71 #define SENTINEL_LEN 0
72 #define SET_SENTINEL(chunk) STMT_NIL
73 #define CHECK_SENTINEL(chunk) STMT_NIL
74 #endif /* defined(USE_SENTINELS) */
76 /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
77 static inline void *
78 realign_pointer(void *ptr)
80 uintptr_t x = (uintptr_t)ptr;
81 x = (x+MEMAREA_ALIGN_MASK) & ~MEMAREA_ALIGN_MASK;
82 /* Reinstate this if bug 930 ever reappears
83 tor_assert(((void*)x) >= ptr);
85 return (void*)x;
88 /** Implements part of a memarea. New memory is carved off from chunk->mem in
89 * increasing order until a request is too big, at which point a new chunk is
90 * allocated. */
91 typedef struct memarea_chunk_t {
92 /** Next chunk in this area. Only kept around so we can free it. */
93 struct memarea_chunk_t *next_chunk;
94 size_t mem_size; /**< How much RAM is available in mem, total? */
95 char *next_mem; /**< Next position in mem to allocate data at. If it's
96 * equal to mem+mem_size, this chunk is full. */
97 #ifdef USE_ALIGNED_ATTRIBUTE
98 /** Actual content of the memory chunk. */
99 char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN)));
100 #else
101 union {
102 char mem[1]; /**< Memory space in this chunk. */
103 void *void_for_alignment_; /**< Dummy; used to make sure mem is aligned. */
104 } u; /**< Union used to enforce alignment when we don't have support for
105 * doing it right. */
106 #endif /* defined(USE_ALIGNED_ATTRIBUTE) */
107 } memarea_chunk_t;
109 /** How many bytes are needed for overhead before we get to the memory part
110 * of a chunk? */
111 #define CHUNK_HEADER_SIZE offsetof(memarea_chunk_t, U_MEM)
113 /** What's the smallest that we'll allocate a chunk? */
114 #define CHUNK_SIZE 4096
116 /** A memarea_t is an allocation region for a set of small memory requests
117 * that will all be freed at once. */
118 struct memarea_t {
119 memarea_chunk_t *first; /**< Top of the chunk stack: never NULL. */
122 /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
123 static memarea_chunk_t *
124 alloc_chunk(size_t sz)
126 tor_assert(sz < SIZE_T_CEILING);
128 size_t chunk_size = sz < CHUNK_SIZE ? CHUNK_SIZE : sz;
129 memarea_chunk_t *res;
130 chunk_size += SENTINEL_LEN;
131 res = tor_malloc(chunk_size);
132 res->next_chunk = NULL;
133 res->mem_size = chunk_size - CHUNK_HEADER_SIZE - SENTINEL_LEN;
134 res->next_mem = res->U_MEM;
135 tor_assert(res->next_mem+res->mem_size+SENTINEL_LEN ==
136 ((char*)res)+chunk_size);
137 tor_assert(realign_pointer(res->next_mem) == res->next_mem);
138 SET_SENTINEL(res);
139 return res;
142 /** Release <b>chunk</b> from a memarea. */
143 static void
144 memarea_chunk_free_unchecked(memarea_chunk_t *chunk)
146 CHECK_SENTINEL(chunk);
147 tor_free(chunk);
150 /** Allocate and return new memarea. */
151 memarea_t *
152 memarea_new(void)
154 memarea_t *head = tor_malloc(sizeof(memarea_t));
155 head->first = alloc_chunk(CHUNK_SIZE);
156 return head;
159 /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
160 * and friends for this area */
161 void
162 memarea_drop_all_(memarea_t *area)
164 memarea_chunk_t *chunk, *next;
165 for (chunk = area->first; chunk; chunk = next) {
166 next = chunk->next_chunk;
167 memarea_chunk_free_unchecked(chunk);
169 area->first = NULL; /*fail fast on */
170 tor_free(area);
173 /** Forget about having allocated anything in <b>area</b>, and free some of
174 * the backing storage associated with it, as appropriate. Invalidates all
175 * pointers returned from memarea_alloc() for this area. */
176 void
177 memarea_clear(memarea_t *area)
179 memarea_chunk_t *chunk, *next;
180 if (area->first->next_chunk) {
181 for (chunk = area->first->next_chunk; chunk; chunk = next) {
182 next = chunk->next_chunk;
183 memarea_chunk_free_unchecked(chunk);
185 area->first->next_chunk = NULL;
187 area->first->next_mem = area->first->U_MEM;
190 /** Return true iff <b>p</b> is in a range that has been returned by an
191 * allocation from <b>area</b>. */
193 memarea_owns_ptr(const memarea_t *area, const void *p)
195 memarea_chunk_t *chunk;
196 const char *ptr = p;
197 for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
198 if (ptr >= chunk->U_MEM && ptr < chunk->next_mem)
199 return 1;
201 return 0;
204 /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
205 * bytes. <b>sz</b> should be significantly smaller than the area's chunk
206 * size, though we can deal if it isn't. */
207 void *
208 memarea_alloc(memarea_t *area, size_t sz)
210 memarea_chunk_t *chunk = area->first;
211 char *result;
212 tor_assert(chunk);
213 CHECK_SENTINEL(chunk);
214 tor_assert(sz < SIZE_T_CEILING);
215 if (sz == 0)
216 sz = 1;
217 tor_assert(chunk->next_mem <= chunk->U_MEM + chunk->mem_size);
218 const size_t space_remaining =
219 (chunk->U_MEM + chunk->mem_size) - chunk->next_mem;
220 if (sz > space_remaining) {
221 if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
222 /* This allocation is too big. Stick it in a special chunk, and put
223 * that chunk second in the list. */
224 memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE);
225 new_chunk->next_chunk = chunk->next_chunk;
226 chunk->next_chunk = new_chunk;
227 chunk = new_chunk;
228 } else {
229 memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE);
230 new_chunk->next_chunk = chunk;
231 area->first = chunk = new_chunk;
233 tor_assert(chunk->mem_size >= sz);
235 result = chunk->next_mem;
236 chunk->next_mem = chunk->next_mem + sz;
237 /* Reinstate these if bug 930 ever comes back
238 tor_assert(chunk->next_mem >= chunk->U_MEM);
239 tor_assert(chunk->next_mem <= chunk->U_MEM+chunk->mem_size);
241 chunk->next_mem = realign_pointer(chunk->next_mem);
242 return result;
245 /** As memarea_alloc(), but clears the memory it returns. */
246 void *
247 memarea_alloc_zero(memarea_t *area, size_t sz)
249 void *result = memarea_alloc(area, sz);
250 memset(result, 0, sz);
251 return result;
254 /** As memdup, but returns the memory from <b>area</b>. */
255 void *
256 memarea_memdup(memarea_t *area, const void *s, size_t n)
258 char *result = memarea_alloc(area, n);
259 memcpy(result, s, n);
260 return result;
263 /** As strdup, but returns the memory from <b>area</b>. */
264 char *
265 memarea_strdup(memarea_t *area, const char *s)
267 return memarea_memdup(area, s, strlen(s)+1);
270 /** As strndup, but returns the memory from <b>area</b>. */
271 char *
272 memarea_strndup(memarea_t *area, const char *s, size_t n)
274 size_t ln = 0;
275 char *result;
276 tor_assert(n < SIZE_T_CEILING);
277 for (ln = 0; ln < n && s[ln]; ++ln)
279 result = memarea_alloc(area, ln+1);
280 memcpy(result, s, ln);
281 result[ln]='\0';
282 return result;
285 /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
286 * and <b>used_out</b> to the number of bytes currently used. */
287 void
288 memarea_get_stats(memarea_t *area, size_t *allocated_out, size_t *used_out)
290 size_t a = 0, u = 0;
291 memarea_chunk_t *chunk;
292 for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
293 CHECK_SENTINEL(chunk);
294 a += CHUNK_HEADER_SIZE + chunk->mem_size;
295 tor_assert(chunk->next_mem >= chunk->U_MEM);
296 u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->U_MEM);
298 *allocated_out = a;
299 *used_out = u;
302 /** Assert that <b>area</b> is okay. */
303 void
304 memarea_assert_ok(memarea_t *area)
306 memarea_chunk_t *chunk;
307 tor_assert(area->first);
309 for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
310 CHECK_SENTINEL(chunk);
311 tor_assert(chunk->next_mem >= chunk->U_MEM);
312 tor_assert(chunk->next_mem <=
313 (char*) realign_pointer(chunk->U_MEM+chunk->mem_size));
317 #else /* !(!defined(DISABLE_MEMORY_SENTINELS)) */
319 struct memarea_t {
320 smartlist_t *pieces;
323 memarea_t *
324 memarea_new(void)
326 memarea_t *ma = tor_malloc_zero(sizeof(memarea_t));
327 ma->pieces = smartlist_new();
328 return ma;
330 void
331 memarea_drop_all_(memarea_t *area)
333 memarea_clear(area);
334 smartlist_free(area->pieces);
335 tor_free(area);
337 void
338 memarea_clear(memarea_t *area)
340 SMARTLIST_FOREACH(area->pieces, void *, p, tor_free_(p));
341 smartlist_clear(area->pieces);
344 memarea_owns_ptr(const memarea_t *area, const void *ptr)
346 SMARTLIST_FOREACH(area->pieces, const void *, p, if (ptr == p) return 1;);
347 return 0;
350 void *
351 memarea_alloc(memarea_t *area, size_t sz)
353 void *result = tor_malloc(sz);
354 smartlist_add(area->pieces, result);
355 return result;
358 void *
359 memarea_alloc_zero(memarea_t *area, size_t sz)
361 void *result = tor_malloc_zero(sz);
362 smartlist_add(area->pieces, result);
363 return result;
365 void *
366 memarea_memdup(memarea_t *area, const void *s, size_t n)
368 void *r = memarea_alloc(area, n);
369 memcpy(r, s, n);
370 return r;
372 char *
373 memarea_strdup(memarea_t *area, const char *s)
375 size_t n = strlen(s);
376 char *r = memarea_alloc(area, n+1);
377 memcpy(r, s, n);
378 r[n] = 0;
379 return r;
381 char *
382 memarea_strndup(memarea_t *area, const char *s, size_t n)
384 size_t ln = strnlen(s, n);
385 char *r = memarea_alloc(area, ln+1);
386 memcpy(r, s, ln);
387 r[ln] = 0;
388 return r;
390 void
391 memarea_get_stats(memarea_t *area,
392 size_t *allocated_out, size_t *used_out)
394 (void)area;
395 *allocated_out = *used_out = 128;
397 void
398 memarea_assert_ok(memarea_t *area)
400 (void)area;
403 #endif /* !defined(DISABLE_MEMORY_SENTINELS) */