3 #define POOL_DEF_EXTENT (32 * 1024)
5 #define POOL_QALIGN_P2 (1<<16) /* power-of-2 qalign */
9 size_t size
; /* extent size */
10 size_t quantum
; /* allocation quantum */
11 struct pool_extent
*extents
; /* top extent is "live" */
12 void (*bomb
)(); /* called if malloc fails */
15 /* statistical data */
16 unsigned long e_created
; /* extents created */
17 unsigned long e_freed
; /* extents destroyed */
18 int64 n_allocated
; /* calls to alloc */
19 int64 n_freed
; /* calls to free */
20 int64 b_allocated
; /* cum. bytes allocated */
21 int64 b_freed
; /* cum. bytes freed */
26 struct pool_extent
*next
;
27 void *start
; /* starting address */
28 size_t free
; /* free bytecount */
29 size_t bound
; /* trapped free bytes */
40 #define MINALIGN offsetof(struct align_test, bar)
42 /* Temporarily cast a void* var into a char* var when adding an offset (to
43 * keep some compilers from complaining about the pointer arithmetic). */
44 #define PTR_ADD(b,o) ( (void*) ((char*)(b) + (o)) )
47 pool_create(size_t size
, size_t quantum
, void (*bomb
)(const char*, const char*, int), int flags
)
49 struct alloc_pool
*pool
;
51 if ((MINALIGN
& (MINALIGN
- 1)) != (0)) {
53 (*bomb
)("Compiler error: MINALIGN is not a power of 2", __FILE__
, __LINE__
);
57 if (!(pool
= new0(struct alloc_pool
)))
61 size
= POOL_DEF_EXTENT
;
65 if (flags
& POOL_INTERN
) {
66 if (size
<= sizeof (struct pool_extent
))
69 size
-= sizeof (struct pool_extent
);
70 flags
|= POOL_PREPEND
;
74 flags
= (flags
| POOL_NO_QALIGN
) & ~POOL_QALIGN_P2
;
75 else if (!(flags
& POOL_NO_QALIGN
)) {
77 size
+= quantum
- size
% quantum
;
78 /* If quantum is a power of 2, we'll avoid using modulus. */
79 if (!(quantum
& (quantum
- 1)))
80 flags
|= POOL_QALIGN_P2
;
84 pool
->quantum
= quantum
;
92 pool_destroy(alloc_pool_t p
)
94 struct alloc_pool
*pool
= (struct alloc_pool
*) p
;
95 struct pool_extent
*cur
, *next
;
100 for (cur
= pool
->extents
; cur
; cur
= next
) {
102 if (pool
->flags
& POOL_PREPEND
)
103 free(PTR_ADD(cur
->start
, -sizeof (struct pool_extent
)));
114 pool_alloc(alloc_pool_t p
, size_t len
, const char *bomb_msg
)
116 struct alloc_pool
*pool
= (struct alloc_pool
*) p
;
122 else if (pool
->flags
& POOL_QALIGN_P2
) {
123 if (len
& (pool
->quantum
- 1))
124 len
+= pool
->quantum
- (len
& (pool
->quantum
- 1));
125 } else if (!(pool
->flags
& POOL_NO_QALIGN
)) {
126 if (len
% pool
->quantum
)
127 len
+= pool
->quantum
- len
% pool
->quantum
;
130 if (len
> pool
->size
)
133 if (!pool
->extents
|| len
> pool
->extents
->free
) {
136 struct pool_extent
*ext
;
139 if (pool
->flags
& POOL_PREPEND
)
140 asize
+= sizeof (struct pool_extent
);
142 if (!(start
= new_array(char, asize
)))
145 if (pool
->flags
& POOL_CLEAR
)
146 memset(start
, 0, asize
);
148 if (pool
->flags
& POOL_PREPEND
) {
150 start
= PTR_ADD(start
, sizeof (struct pool_extent
));
151 } else if (!(ext
= new(struct pool_extent
)))
154 ext
->free
= pool
->size
;
156 ext
->next
= pool
->extents
;
163 pool
->b_allocated
+= len
;
165 pool
->extents
->free
-= len
;
167 return PTR_ADD(pool
->extents
->start
, pool
->extents
->free
);
171 (*pool
->bomb
)(bomb_msg
, __FILE__
, __LINE__
);
175 /* This function allows you to declare memory in the pool that you are done
176 * using. If you free all the memory in a pool's extent, that extent will
179 pool_free(alloc_pool_t p
, size_t len
, void *addr
)
181 struct alloc_pool
*pool
= (struct alloc_pool
*)p
;
182 struct pool_extent
*cur
, *prev
;
188 /* A NULL addr starts a fresh extent for new allocations. */
189 if ((cur
= pool
->extents
) != NULL
&& cur
->free
!= pool
->size
) {
190 cur
->bound
+= cur
->free
;
198 else if (pool
->flags
& POOL_QALIGN_P2
) {
199 if (len
& (pool
->quantum
- 1))
200 len
+= pool
->quantum
- (len
& (pool
->quantum
- 1));
201 } else if (!(pool
->flags
& POOL_NO_QALIGN
)) {
202 if (len
% pool
->quantum
)
203 len
+= pool
->quantum
- len
% pool
->quantum
;
207 pool
->b_freed
+= len
;
209 for (prev
= NULL
, cur
= pool
->extents
; cur
; prev
= cur
, cur
= cur
->next
) {
210 if (addr
>= cur
->start
211 && addr
< PTR_ADD(cur
->start
, pool
->size
))
218 /* The "live" extent is kept ready for more allocations. */
219 if (cur
->free
+ cur
->bound
+ len
>= pool
->size
) {
220 if (pool
->flags
& POOL_CLEAR
) {
221 memset(PTR_ADD(cur
->start
, cur
->free
), 0,
222 pool
->size
- cur
->free
);
224 cur
->free
= pool
->size
;
226 } else if (addr
== PTR_ADD(cur
->start
, cur
->free
)) {
227 if (pool
->flags
& POOL_CLEAR
)
228 memset(addr
, 0, len
);
235 if (cur
->free
+ cur
->bound
>= pool
->size
) {
236 prev
->next
= cur
->next
;
237 if (pool
->flags
& POOL_PREPEND
)
238 free(PTR_ADD(cur
->start
, -sizeof (struct pool_extent
)));
244 } else if (prev
!= pool
->extents
) {
245 /* Move the extent to be the first non-live extent. */
246 prev
->next
= cur
->next
;
247 cur
->next
= pool
->extents
->next
;
248 pool
->extents
->next
= cur
;
253 /* This allows you to declare that the given address marks the edge of some
254 * pool memory that is no longer needed. Any extents that hold only data
255 * older than the boundary address are freed. NOTE: You MUST NOT USE BOTH
256 * pool_free() and pool_free_old() on the same pool!! */
258 pool_free_old(alloc_pool_t p
, void *addr
)
260 struct alloc_pool
*pool
= (struct alloc_pool
*)p
;
261 struct pool_extent
*cur
, *prev
, *next
;
266 for (prev
= NULL
, cur
= pool
->extents
; cur
; prev
= cur
, cur
= cur
->next
) {
267 if (addr
>= cur
->start
268 && addr
< PTR_ADD(cur
->start
, pool
->size
))
274 if (addr
== PTR_ADD(cur
->start
, cur
->free
)) {
279 /* The most recent live extent can just be reset. */
280 if (pool
->flags
& POOL_CLEAR
)
281 memset(addr
, 0, pool
->size
- cur
->free
);
282 cur
->free
= pool
->size
;
292 while ((cur
= next
) != NULL
) {
294 if (pool
->flags
& POOL_PREPEND
)
295 free(PTR_ADD(cur
->start
, -sizeof (struct pool_extent
)));
304 /* If the current extent doesn't have "len" free space in it, mark it as full
305 * so that the next alloc will start a new extent. If len is (size_t)-1, this
306 * bump will always occur. The function returns a boundary address that can
307 * be used with pool_free_old(), or a NULL if no memory is allocated. */
309 pool_boundary(alloc_pool_t p
, size_t len
)
311 struct alloc_pool
*pool
= (struct alloc_pool
*)p
;
312 struct pool_extent
*cur
;
314 if (!pool
|| !pool
->extents
)
319 if (cur
->free
< len
) {
320 cur
->bound
+= cur
->free
;
324 return PTR_ADD(cur
->start
, cur
->free
);
327 #define FDPRINT(label, value) \
329 int len = snprintf(buf, sizeof buf, label, value); \
330 if (write(fd, buf, len) != len) \
334 #define FDEXTSTAT(ext) \
336 int len = snprintf(buf, sizeof buf, " %12ld %5ld\n", \
337 (long)ext->free, (long)ext->bound); \
338 if (write(fd, buf, len) != len) \
343 pool_stats(alloc_pool_t p
, int fd
, int summarize
)
345 struct alloc_pool
*pool
= (struct alloc_pool
*) p
;
346 struct pool_extent
*cur
;
353 FDPRINT(" Extent size: %12ld\n", (long) pool
->size
);
354 FDPRINT(" Alloc quantum: %12ld\n", (long) pool
->quantum
);
355 FDPRINT(" Extents created: %12ld\n", pool
->e_created
);
356 FDPRINT(" Extents freed: %12ld\n", pool
->e_freed
);
357 FDPRINT(" Alloc count: %12.0f\n", (double) pool
->n_allocated
);
358 FDPRINT(" Free Count: %12.0f\n", (double) pool
->n_freed
);
359 FDPRINT(" Bytes allocated: %12.0f\n", (double) pool
->b_allocated
);
360 FDPRINT(" Bytes freed: %12.0f\n", (double) pool
->b_freed
);
368 if (write(fd
, "\n", 1) != 1)
371 for (cur
= pool
->extents
; cur
; cur
= cur
->next
)