1 /* Copyright (c) 2008-2021, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
7 * \brief Implementation for memarea_t, an allocator for allocating lots of
8 * small objects that will be freed all at once.
12 #include "lib/memarea/memarea.h"
17 #include "lib/arch/bytes.h"
18 #include "lib/cc/torint.h"
19 #include "lib/smartlist_core/smartlist_core.h"
20 #include "lib/smartlist_core/smartlist_foreach.h"
21 #include "lib/log/log.h"
22 #include "lib/log/util_bug.h"
23 #include "lib/malloc/malloc.h"
25 #ifndef DISABLE_MEMORY_SENTINELS
27 /** If true, we try to detect any attempts to write beyond the length of a
31 /** All returned pointers should be aligned to the nearest multiple of this
33 #define MEMAREA_ALIGN SIZEOF_VOID_P
35 /** A value which, when masked out of a pointer, produces a maximally aligned
37 #if MEMAREA_ALIGN == 4
38 #define MEMAREA_ALIGN_MASK ((uintptr_t)3)
39 #elif MEMAREA_ALIGN == 8
40 #define MEMAREA_ALIGN_MASK ((uintptr_t)7)
42 #error "void* is neither 4 nor 8 bytes long."
43 #endif /* MEMAREA_ALIGN == 4 || ... */
45 #if defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER)
46 #define USE_ALIGNED_ATTRIBUTE
47 /** Name for the 'memory' member of a memory chunk. */
51 #endif /* defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER) */
54 /** Magic value that we stick at the end of a memarea so we can make sure
55 * there are no run-off-the-end bugs. */
56 #define SENTINEL_VAL 0x90806622u
57 /** How many bytes per area do we devote to the sentinel? */
58 #define SENTINEL_LEN sizeof(uint32_t)
59 /** Given a mem_area_chunk_t with SENTINEL_LEN extra bytes allocated at the
60 * end, set those bytes. */
61 #define SET_SENTINEL(chunk) \
63 set_uint32( &(chunk)->U_MEM[chunk->mem_size], SENTINEL_VAL ); \
65 /** Assert that the sentinel on a memarea is set correctly. */
66 #define CHECK_SENTINEL(chunk) \
68 uint32_t sent_val = get_uint32(&(chunk)->U_MEM[chunk->mem_size]); \
69 tor_assert(sent_val == SENTINEL_VAL); \
71 #else /* !defined(USE_SENTINELS) */
72 #define SENTINEL_LEN 0
73 #define SET_SENTINEL(chunk) STMT_NIL
74 #define CHECK_SENTINEL(chunk) STMT_NIL
75 #endif /* defined(USE_SENTINELS) */
77 /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
79 realign_pointer(void *ptr
)
81 uintptr_t x
= (uintptr_t)ptr
;
82 x
= (x
+MEMAREA_ALIGN_MASK
) & ~MEMAREA_ALIGN_MASK
;
83 /* Reinstate this if bug 930 ever reappears
84 tor_assert(((void*)x) >= ptr);
89 /** Implements part of a memarea. New memory is carved off from chunk->mem in
90 * increasing order until a request is too big, at which point a new chunk is
92 typedef struct memarea_chunk_t
{
93 /** Next chunk in this area. Only kept around so we can free it. */
94 struct memarea_chunk_t
*next_chunk
;
95 size_t mem_size
; /**< How much RAM is available in mem, total? */
96 char *next_mem
; /**< Next position in mem to allocate data at. If it's
97 * equal to mem+mem_size, this chunk is full. */
98 #ifdef USE_ALIGNED_ATTRIBUTE
99 /** Actual content of the memory chunk. */
100 char mem
[FLEXIBLE_ARRAY_MEMBER
] __attribute__((aligned(MEMAREA_ALIGN
)));
103 char mem
[1]; /**< Memory space in this chunk. */
104 void *void_for_alignment_
; /**< Dummy; used to make sure mem is aligned. */
105 } u
; /**< Union used to enforce alignment when we don't have support for
107 #endif /* defined(USE_ALIGNED_ATTRIBUTE) */
110 /** How many bytes are needed for overhead before we get to the memory part
112 #define CHUNK_HEADER_SIZE offsetof(memarea_chunk_t, U_MEM)
114 /** What's the smallest that we'll allocate a chunk? */
115 #define CHUNK_SIZE 4096
117 /** A memarea_t is an allocation region for a set of small memory requests
118 * that will all be freed at once. */
120 memarea_chunk_t
*first
; /**< Top of the chunk stack: never NULL. */
123 /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
124 static memarea_chunk_t
*
125 alloc_chunk(size_t sz
)
127 tor_assert(sz
< SIZE_T_CEILING
);
129 size_t chunk_size
= sz
< CHUNK_SIZE
? CHUNK_SIZE
: sz
;
130 memarea_chunk_t
*res
;
131 chunk_size
+= SENTINEL_LEN
;
132 res
= tor_malloc(chunk_size
);
133 res
->next_chunk
= NULL
;
134 res
->mem_size
= chunk_size
- CHUNK_HEADER_SIZE
- SENTINEL_LEN
;
135 res
->next_mem
= res
->U_MEM
;
136 tor_assert(res
->next_mem
+res
->mem_size
+SENTINEL_LEN
==
137 ((char*)res
)+chunk_size
);
138 tor_assert(realign_pointer(res
->next_mem
) == res
->next_mem
);
143 /** Release <b>chunk</b> from a memarea. */
145 memarea_chunk_free_unchecked(memarea_chunk_t
*chunk
)
147 CHECK_SENTINEL(chunk
);
151 /** Allocate and return new memarea. */
155 memarea_t
*head
= tor_malloc(sizeof(memarea_t
));
156 head
->first
= alloc_chunk(CHUNK_SIZE
);
160 /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
161 * and friends for this area */
163 memarea_drop_all_(memarea_t
*area
)
165 memarea_chunk_t
*chunk
, *next
;
166 for (chunk
= area
->first
; chunk
; chunk
= next
) {
167 next
= chunk
->next_chunk
;
168 memarea_chunk_free_unchecked(chunk
);
170 area
->first
= NULL
; /*fail fast on */
174 /** Forget about having allocated anything in <b>area</b>, and free some of
175 * the backing storage associated with it, as appropriate. Invalidates all
176 * pointers returned from memarea_alloc() for this area. */
178 memarea_clear(memarea_t
*area
)
180 memarea_chunk_t
*chunk
, *next
;
181 if (area
->first
->next_chunk
) {
182 for (chunk
= area
->first
->next_chunk
; chunk
; chunk
= next
) {
183 next
= chunk
->next_chunk
;
184 memarea_chunk_free_unchecked(chunk
);
186 area
->first
->next_chunk
= NULL
;
188 area
->first
->next_mem
= area
->first
->U_MEM
;
191 /** Return true iff <b>p</b> is in a range that has been returned by an
192 * allocation from <b>area</b>. */
194 memarea_owns_ptr(const memarea_t
*area
, const void *p
)
196 memarea_chunk_t
*chunk
;
198 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
199 if (ptr
>= chunk
->U_MEM
&& ptr
< chunk
->next_mem
)
205 /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
206 * bytes. <b>sz</b> should be significantly smaller than the area's chunk
207 * size, though we can deal if it isn't. */
209 memarea_alloc(memarea_t
*area
, size_t sz
)
211 memarea_chunk_t
*chunk
= area
->first
;
214 CHECK_SENTINEL(chunk
);
215 tor_assert(sz
< SIZE_T_CEILING
);
218 tor_assert(chunk
->next_mem
<= chunk
->U_MEM
+ chunk
->mem_size
);
219 const size_t space_remaining
=
220 (chunk
->U_MEM
+ chunk
->mem_size
) - chunk
->next_mem
;
221 if (sz
> space_remaining
) {
222 if (sz
+CHUNK_HEADER_SIZE
>= CHUNK_SIZE
) {
223 /* This allocation is too big. Stick it in a special chunk, and put
224 * that chunk second in the list. */
225 memarea_chunk_t
*new_chunk
= alloc_chunk(sz
+CHUNK_HEADER_SIZE
);
226 new_chunk
->next_chunk
= chunk
->next_chunk
;
227 chunk
->next_chunk
= new_chunk
;
230 memarea_chunk_t
*new_chunk
= alloc_chunk(CHUNK_SIZE
);
231 new_chunk
->next_chunk
= chunk
;
232 area
->first
= chunk
= new_chunk
;
234 tor_assert(chunk
->mem_size
>= sz
);
236 result
= chunk
->next_mem
;
237 chunk
->next_mem
= chunk
->next_mem
+ sz
;
238 /* Reinstate these if bug 930 ever comes back
239 tor_assert(chunk->next_mem >= chunk->U_MEM);
240 tor_assert(chunk->next_mem <= chunk->U_MEM+chunk->mem_size);
242 chunk
->next_mem
= realign_pointer(chunk
->next_mem
);
246 /** As memarea_alloc(), but clears the memory it returns. */
248 memarea_alloc_zero(memarea_t
*area
, size_t sz
)
250 void *result
= memarea_alloc(area
, sz
);
251 memset(result
, 0, sz
);
255 /** As memdup, but returns the memory from <b>area</b>. */
257 memarea_memdup(memarea_t
*area
, const void *s
, size_t n
)
259 char *result
= memarea_alloc(area
, n
);
260 memcpy(result
, s
, n
);
264 /** As strdup, but returns the memory from <b>area</b>. */
266 memarea_strdup(memarea_t
*area
, const char *s
)
268 return memarea_memdup(area
, s
, strlen(s
)+1);
271 /** As strndup, but returns the memory from <b>area</b>. */
273 memarea_strndup(memarea_t
*area
, const char *s
, size_t n
)
277 tor_assert(n
< SIZE_T_CEILING
);
278 for (ln
= 0; ln
< n
&& s
[ln
]; ++ln
)
280 result
= memarea_alloc(area
, ln
+1);
281 memcpy(result
, s
, ln
);
286 /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
287 * and <b>used_out</b> to the number of bytes currently used. */
289 memarea_get_stats(memarea_t
*area
, size_t *allocated_out
, size_t *used_out
)
292 memarea_chunk_t
*chunk
;
293 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
294 CHECK_SENTINEL(chunk
);
295 a
+= CHUNK_HEADER_SIZE
+ chunk
->mem_size
;
296 tor_assert(chunk
->next_mem
>= chunk
->U_MEM
);
297 u
+= CHUNK_HEADER_SIZE
+ (chunk
->next_mem
- chunk
->U_MEM
);
303 /** Assert that <b>area</b> is okay. */
305 memarea_assert_ok(memarea_t
*area
)
307 memarea_chunk_t
*chunk
;
308 tor_assert(area
->first
);
310 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
311 CHECK_SENTINEL(chunk
);
312 tor_assert(chunk
->next_mem
>= chunk
->U_MEM
);
313 tor_assert(chunk
->next_mem
<=
314 (char*) realign_pointer(chunk
->U_MEM
+chunk
->mem_size
));
318 #else /* defined(DISABLE_MEMORY_SENTINELS) */
327 memarea_t
*ma
= tor_malloc_zero(sizeof(memarea_t
));
328 ma
->pieces
= smartlist_new();
332 memarea_drop_all_(memarea_t
*area
)
335 smartlist_free(area
->pieces
);
339 memarea_clear(memarea_t
*area
)
341 SMARTLIST_FOREACH(area
->pieces
, void *, p
, tor_free_(p
));
342 smartlist_clear(area
->pieces
);
345 memarea_owns_ptr(const memarea_t
*area
, const void *ptr
)
347 SMARTLIST_FOREACH(area
->pieces
, const void *, p
, if (ptr
== p
) return 1;);
352 memarea_alloc(memarea_t
*area
, size_t sz
)
354 void *result
= tor_malloc(sz
);
355 smartlist_add(area
->pieces
, result
);
360 memarea_alloc_zero(memarea_t
*area
, size_t sz
)
362 void *result
= tor_malloc_zero(sz
);
363 smartlist_add(area
->pieces
, result
);
367 memarea_memdup(memarea_t
*area
, const void *s
, size_t n
)
369 void *r
= memarea_alloc(area
, n
);
374 memarea_strdup(memarea_t
*area
, const char *s
)
376 size_t n
= strlen(s
);
377 char *r
= memarea_alloc(area
, n
+1);
383 memarea_strndup(memarea_t
*area
, const char *s
, size_t n
)
385 size_t ln
= strnlen(s
, n
);
386 char *r
= memarea_alloc(area
, ln
+1);
392 memarea_get_stats(memarea_t
*area
,
393 size_t *allocated_out
, size_t *used_out
)
396 *allocated_out
= *used_out
= 128;
399 memarea_assert_ok(memarea_t
*area
)
404 #endif /* !defined(DISABLE_MEMORY_SENTINELS) */