1 /* Copyright (c) 2008-2019, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
7 * \brief Implementation for memarea_t, an allocator for allocating lots of
8 * small objects that will be freed all at once.
12 #include "lib/memarea/memarea.h"
17 #include "lib/arch/bytes.h"
18 #include "lib/cc/torint.h"
19 #include "lib/container/smartlist.h"
20 #include "lib/log/log.h"
21 #include "lib/log/util_bug.h"
22 #include "lib/malloc/malloc.h"
24 #ifndef DISABLE_MEMORY_SENTINELS
26 /** If true, we try to detect any attempts to write beyond the length of a
30 /** All returned pointers should be aligned to the nearest multiple of this
32 #define MEMAREA_ALIGN SIZEOF_VOID_P
34 /** A value which, when masked out of a pointer, produces a maximally aligned
36 #if MEMAREA_ALIGN == 4
37 #define MEMAREA_ALIGN_MASK ((uintptr_t)3)
38 #elif MEMAREA_ALIGN == 8
39 #define MEMAREA_ALIGN_MASK ((uintptr_t)7)
41 #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
42 #endif /* MEMAREA_ALIGN == 4 || ... */
44 #if defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER)
45 #define USE_ALIGNED_ATTRIBUTE
46 /** Name for the 'memory' member of a memory chunk. */
50 #endif /* defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER) */
53 /** Magic value that we stick at the end of a memarea so we can make sure
54 * there are no run-off-the-end bugs. */
55 #define SENTINEL_VAL 0x90806622u
56 /** How many bytes per area do we devote to the sentinel? */
57 #define SENTINEL_LEN sizeof(uint32_t)
58 /** Given a mem_area_chunk_t with SENTINEL_LEN extra bytes allocated at the
59 * end, set those bytes. */
60 #define SET_SENTINEL(chunk) \
62 set_uint32( &(chunk)->U_MEM[chunk->mem_size], SENTINEL_VAL ); \
64 /** Assert that the sentinel on a memarea is set correctly. */
65 #define CHECK_SENTINEL(chunk) \
67 uint32_t sent_val = get_uint32(&(chunk)->U_MEM[chunk->mem_size]); \
68 tor_assert(sent_val == SENTINEL_VAL); \
70 #else /* !(defined(USE_SENTINELS)) */
71 #define SENTINEL_LEN 0
72 #define SET_SENTINEL(chunk) STMT_NIL
73 #define CHECK_SENTINEL(chunk) STMT_NIL
74 #endif /* defined(USE_SENTINELS) */
76 /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
78 realign_pointer(void *ptr
)
80 uintptr_t x
= (uintptr_t)ptr
;
81 x
= (x
+MEMAREA_ALIGN_MASK
) & ~MEMAREA_ALIGN_MASK
;
82 /* Reinstate this if bug 930 ever reappears
83 tor_assert(((void*)x) >= ptr);
88 /** Implements part of a memarea. New memory is carved off from chunk->mem in
89 * increasing order until a request is too big, at which point a new chunk is
91 typedef struct memarea_chunk_t
{
92 /** Next chunk in this area. Only kept around so we can free it. */
93 struct memarea_chunk_t
*next_chunk
;
94 size_t mem_size
; /**< How much RAM is available in mem, total? */
95 char *next_mem
; /**< Next position in mem to allocate data at. If it's
96 * equal to mem+mem_size, this chunk is full. */
97 #ifdef USE_ALIGNED_ATTRIBUTE
98 /** Actual content of the memory chunk. */
99 char mem
[FLEXIBLE_ARRAY_MEMBER
] __attribute__((aligned(MEMAREA_ALIGN
)));
102 char mem
[1]; /**< Memory space in this chunk. */
103 void *void_for_alignment_
; /**< Dummy; used to make sure mem is aligned. */
104 } u
; /**< Union used to enforce alignment when we don't have support for
106 #endif /* defined(USE_ALIGNED_ATTRIBUTE) */
109 /** How many bytes are needed for overhead before we get to the memory part
111 #define CHUNK_HEADER_SIZE offsetof(memarea_chunk_t, U_MEM)
113 /** What's the smallest that we'll allocate a chunk? */
114 #define CHUNK_SIZE 4096
116 /** A memarea_t is an allocation region for a set of small memory requests
117 * that will all be freed at once. */
119 memarea_chunk_t
*first
; /**< Top of the chunk stack: never NULL. */
122 /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
123 static memarea_chunk_t
*
124 alloc_chunk(size_t sz
)
126 tor_assert(sz
< SIZE_T_CEILING
);
128 size_t chunk_size
= sz
< CHUNK_SIZE
? CHUNK_SIZE
: sz
;
129 memarea_chunk_t
*res
;
130 chunk_size
+= SENTINEL_LEN
;
131 res
= tor_malloc(chunk_size
);
132 res
->next_chunk
= NULL
;
133 res
->mem_size
= chunk_size
- CHUNK_HEADER_SIZE
- SENTINEL_LEN
;
134 res
->next_mem
= res
->U_MEM
;
135 tor_assert(res
->next_mem
+res
->mem_size
+SENTINEL_LEN
==
136 ((char*)res
)+chunk_size
);
137 tor_assert(realign_pointer(res
->next_mem
) == res
->next_mem
);
142 /** Release <b>chunk</b> from a memarea. */
144 memarea_chunk_free_unchecked(memarea_chunk_t
*chunk
)
146 CHECK_SENTINEL(chunk
);
150 /** Allocate and return new memarea. */
154 memarea_t
*head
= tor_malloc(sizeof(memarea_t
));
155 head
->first
= alloc_chunk(CHUNK_SIZE
);
159 /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
160 * and friends for this area */
162 memarea_drop_all_(memarea_t
*area
)
164 memarea_chunk_t
*chunk
, *next
;
165 for (chunk
= area
->first
; chunk
; chunk
= next
) {
166 next
= chunk
->next_chunk
;
167 memarea_chunk_free_unchecked(chunk
);
169 area
->first
= NULL
; /*fail fast on */
173 /** Forget about having allocated anything in <b>area</b>, and free some of
174 * the backing storage associated with it, as appropriate. Invalidates all
175 * pointers returned from memarea_alloc() for this area. */
177 memarea_clear(memarea_t
*area
)
179 memarea_chunk_t
*chunk
, *next
;
180 if (area
->first
->next_chunk
) {
181 for (chunk
= area
->first
->next_chunk
; chunk
; chunk
= next
) {
182 next
= chunk
->next_chunk
;
183 memarea_chunk_free_unchecked(chunk
);
185 area
->first
->next_chunk
= NULL
;
187 area
->first
->next_mem
= area
->first
->U_MEM
;
190 /** Return true iff <b>p</b> is in a range that has been returned by an
191 * allocation from <b>area</b>. */
193 memarea_owns_ptr(const memarea_t
*area
, const void *p
)
195 memarea_chunk_t
*chunk
;
197 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
198 if (ptr
>= chunk
->U_MEM
&& ptr
< chunk
->next_mem
)
204 /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
205 * bytes. <b>sz</b> should be significantly smaller than the area's chunk
206 * size, though we can deal if it isn't. */
208 memarea_alloc(memarea_t
*area
, size_t sz
)
210 memarea_chunk_t
*chunk
= area
->first
;
213 CHECK_SENTINEL(chunk
);
214 tor_assert(sz
< SIZE_T_CEILING
);
217 tor_assert(chunk
->next_mem
<= chunk
->U_MEM
+ chunk
->mem_size
);
218 const size_t space_remaining
=
219 (chunk
->U_MEM
+ chunk
->mem_size
) - chunk
->next_mem
;
220 if (sz
> space_remaining
) {
221 if (sz
+CHUNK_HEADER_SIZE
>= CHUNK_SIZE
) {
222 /* This allocation is too big. Stick it in a special chunk, and put
223 * that chunk second in the list. */
224 memarea_chunk_t
*new_chunk
= alloc_chunk(sz
+CHUNK_HEADER_SIZE
);
225 new_chunk
->next_chunk
= chunk
->next_chunk
;
226 chunk
->next_chunk
= new_chunk
;
229 memarea_chunk_t
*new_chunk
= alloc_chunk(CHUNK_SIZE
);
230 new_chunk
->next_chunk
= chunk
;
231 area
->first
= chunk
= new_chunk
;
233 tor_assert(chunk
->mem_size
>= sz
);
235 result
= chunk
->next_mem
;
236 chunk
->next_mem
= chunk
->next_mem
+ sz
;
237 /* Reinstate these if bug 930 ever comes back
238 tor_assert(chunk->next_mem >= chunk->U_MEM);
239 tor_assert(chunk->next_mem <= chunk->U_MEM+chunk->mem_size);
241 chunk
->next_mem
= realign_pointer(chunk
->next_mem
);
245 /** As memarea_alloc(), but clears the memory it returns. */
247 memarea_alloc_zero(memarea_t
*area
, size_t sz
)
249 void *result
= memarea_alloc(area
, sz
);
250 memset(result
, 0, sz
);
254 /** As memdup, but returns the memory from <b>area</b>. */
256 memarea_memdup(memarea_t
*area
, const void *s
, size_t n
)
258 char *result
= memarea_alloc(area
, n
);
259 memcpy(result
, s
, n
);
263 /** As strdup, but returns the memory from <b>area</b>. */
265 memarea_strdup(memarea_t
*area
, const char *s
)
267 return memarea_memdup(area
, s
, strlen(s
)+1);
270 /** As strndup, but returns the memory from <b>area</b>. */
272 memarea_strndup(memarea_t
*area
, const char *s
, size_t n
)
276 tor_assert(n
< SIZE_T_CEILING
);
277 for (ln
= 0; ln
< n
&& s
[ln
]; ++ln
)
279 result
= memarea_alloc(area
, ln
+1);
280 memcpy(result
, s
, ln
);
285 /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
286 * and <b>used_out</b> to the number of bytes currently used. */
288 memarea_get_stats(memarea_t
*area
, size_t *allocated_out
, size_t *used_out
)
291 memarea_chunk_t
*chunk
;
292 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
293 CHECK_SENTINEL(chunk
);
294 a
+= CHUNK_HEADER_SIZE
+ chunk
->mem_size
;
295 tor_assert(chunk
->next_mem
>= chunk
->U_MEM
);
296 u
+= CHUNK_HEADER_SIZE
+ (chunk
->next_mem
- chunk
->U_MEM
);
302 /** Assert that <b>area</b> is okay. */
304 memarea_assert_ok(memarea_t
*area
)
306 memarea_chunk_t
*chunk
;
307 tor_assert(area
->first
);
309 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
310 CHECK_SENTINEL(chunk
);
311 tor_assert(chunk
->next_mem
>= chunk
->U_MEM
);
312 tor_assert(chunk
->next_mem
<=
313 (char*) realign_pointer(chunk
->U_MEM
+chunk
->mem_size
));
317 #else /* !(!defined(DISABLE_MEMORY_SENTINELS)) */
326 memarea_t
*ma
= tor_malloc_zero(sizeof(memarea_t
));
327 ma
->pieces
= smartlist_new();
331 memarea_drop_all_(memarea_t
*area
)
334 smartlist_free(area
->pieces
);
338 memarea_clear(memarea_t
*area
)
340 SMARTLIST_FOREACH(area
->pieces
, void *, p
, tor_free_(p
));
341 smartlist_clear(area
->pieces
);
344 memarea_owns_ptr(const memarea_t
*area
, const void *ptr
)
346 SMARTLIST_FOREACH(area
->pieces
, const void *, p
, if (ptr
== p
) return 1;);
351 memarea_alloc(memarea_t
*area
, size_t sz
)
353 void *result
= tor_malloc(sz
);
354 smartlist_add(area
->pieces
, result
);
359 memarea_alloc_zero(memarea_t
*area
, size_t sz
)
361 void *result
= tor_malloc_zero(sz
);
362 smartlist_add(area
->pieces
, result
);
366 memarea_memdup(memarea_t
*area
, const void *s
, size_t n
)
368 void *r
= memarea_alloc(area
, n
);
373 memarea_strdup(memarea_t
*area
, const char *s
)
375 size_t n
= strlen(s
);
376 char *r
= memarea_alloc(area
, n
+1);
382 memarea_strndup(memarea_t
*area
, const char *s
, size_t n
)
384 size_t ln
= strnlen(s
, n
);
385 char *r
= memarea_alloc(area
, ln
+1);
391 memarea_get_stats(memarea_t
*area
,
392 size_t *allocated_out
, size_t *used_out
)
395 *allocated_out
= *used_out
= 128;
398 memarea_assert_ok(memarea_t
*area
)
403 #endif /* !defined(DISABLE_MEMORY_SENTINELS) */