3 #define POOL_DEF_EXTENT (32 * 1024)
5 #define POOL_QALIGN_P2 (1<<16) /* power-of-2 qalign */
9 size_t size
; /* extent size */
10 size_t quantum
; /* allocation quantum */
11 struct pool_extent
*extents
; /* top extent is "live" */
12 void (*bomb
)(const char*, const char*, int); /* called if malloc fails */
15 /* statistical data */
16 unsigned long e_created
; /* extents created */
17 unsigned long e_freed
; /* extents destroyed */
18 int64 n_allocated
; /* calls to alloc */
19 int64 n_freed
; /* calls to free */
20 int64 b_allocated
; /* cum. bytes allocated */
21 int64 b_freed
; /* cum. bytes freed */
26 struct pool_extent
*next
;
27 void *start
; /* starting address */
28 size_t free
; /* free bytecount */
29 size_t bound
; /* trapped free bytes */
40 #define MINALIGN offsetof(struct align_test, bar)
42 /* Temporarily cast a void* var into a char* var when adding an offset (to
43 * keep some compilers from complaining about the pointer arithmetic). */
44 #define PTR_ADD(b,o) ( (void*) ((char*)(b) + (o)) )
45 #define PTR_SUB(b,o) ( (void*) ((char*)(b) - (o)) )
48 pool_create(size_t size
, size_t quantum
, void (*bomb
)(const char*, const char*, int), int flags
)
50 struct alloc_pool
*pool
;
52 if ((MINALIGN
& (MINALIGN
- 1)) != (0)) {
54 (*bomb
)("Compiler error: MINALIGN is not a power of 2", __FILE__
, __LINE__
);
58 if (!(pool
= new0(struct alloc_pool
)))
62 size
= POOL_DEF_EXTENT
;
66 if (flags
& POOL_INTERN
) {
67 if (size
<= sizeof (struct pool_extent
))
70 size
-= sizeof (struct pool_extent
);
71 flags
|= POOL_PREPEND
;
75 flags
= (flags
| POOL_NO_QALIGN
) & ~POOL_QALIGN_P2
;
76 else if (!(flags
& POOL_NO_QALIGN
)) {
78 size
+= quantum
- size
% quantum
;
79 /* If quantum is a power of 2, we'll avoid using modulus. */
80 if (!(quantum
& (quantum
- 1)))
81 flags
|= POOL_QALIGN_P2
;
85 pool
->quantum
= quantum
;
93 pool_destroy(alloc_pool_t p
)
95 struct alloc_pool
*pool
= (struct alloc_pool
*) p
;
96 struct pool_extent
*cur
, *next
;
101 for (cur
= pool
->extents
; cur
; cur
= next
) {
103 if (pool
->flags
& POOL_PREPEND
)
104 free(PTR_SUB(cur
->start
, sizeof (struct pool_extent
)));
115 pool_alloc(alloc_pool_t p
, size_t len
, const char *bomb_msg
)
117 struct alloc_pool
*pool
= (struct alloc_pool
*) p
;
123 else if (pool
->flags
& POOL_QALIGN_P2
) {
124 if (len
& (pool
->quantum
- 1))
125 len
+= pool
->quantum
- (len
& (pool
->quantum
- 1));
126 } else if (!(pool
->flags
& POOL_NO_QALIGN
)) {
127 if (len
% pool
->quantum
)
128 len
+= pool
->quantum
- len
% pool
->quantum
;
131 if (len
> pool
->size
)
134 if (!pool
->extents
|| len
> pool
->extents
->free
) {
137 struct pool_extent
*ext
;
140 if (pool
->flags
& POOL_PREPEND
)
141 asize
+= sizeof (struct pool_extent
);
143 if (!(start
= new_array(char, asize
)))
146 if (pool
->flags
& POOL_CLEAR
)
147 memset(start
, 0, asize
);
149 if (pool
->flags
& POOL_PREPEND
) {
151 start
= PTR_ADD(start
, sizeof (struct pool_extent
));
152 } else if (!(ext
= new(struct pool_extent
)))
155 ext
->free
= pool
->size
;
157 ext
->next
= pool
->extents
;
164 pool
->b_allocated
+= len
;
166 pool
->extents
->free
-= len
;
168 return PTR_ADD(pool
->extents
->start
, pool
->extents
->free
);
172 (*pool
->bomb
)(bomb_msg
, __FILE__
, __LINE__
);
176 /* This function allows you to declare memory in the pool that you are done
177 * using. If you free all the memory in a pool's extent, that extent will
180 pool_free(alloc_pool_t p
, size_t len
, void *addr
)
182 struct alloc_pool
*pool
= (struct alloc_pool
*)p
;
183 struct pool_extent
*cur
, *prev
;
189 /* A NULL addr starts a fresh extent for new allocations. */
190 if ((cur
= pool
->extents
) != NULL
&& cur
->free
!= pool
->size
) {
191 cur
->bound
+= cur
->free
;
199 else if (pool
->flags
& POOL_QALIGN_P2
) {
200 if (len
& (pool
->quantum
- 1))
201 len
+= pool
->quantum
- (len
& (pool
->quantum
- 1));
202 } else if (!(pool
->flags
& POOL_NO_QALIGN
)) {
203 if (len
% pool
->quantum
)
204 len
+= pool
->quantum
- len
% pool
->quantum
;
208 pool
->b_freed
+= len
;
210 for (prev
= NULL
, cur
= pool
->extents
; cur
; prev
= cur
, cur
= cur
->next
) {
211 if (addr
>= cur
->start
212 && addr
< PTR_ADD(cur
->start
, pool
->size
))
219 /* The "live" extent is kept ready for more allocations. */
220 if (cur
->free
+ cur
->bound
+ len
>= pool
->size
) {
221 if (pool
->flags
& POOL_CLEAR
) {
222 memset(PTR_ADD(cur
->start
, cur
->free
), 0,
223 pool
->size
- cur
->free
);
225 cur
->free
= pool
->size
;
227 } else if (addr
== PTR_ADD(cur
->start
, cur
->free
)) {
228 if (pool
->flags
& POOL_CLEAR
)
229 memset(addr
, 0, len
);
236 if (cur
->free
+ cur
->bound
>= pool
->size
) {
237 prev
->next
= cur
->next
;
238 if (pool
->flags
& POOL_PREPEND
)
239 free(PTR_SUB(cur
->start
, sizeof (struct pool_extent
)));
245 } else if (prev
!= pool
->extents
) {
246 /* Move the extent to be the first non-live extent. */
247 prev
->next
= cur
->next
;
248 cur
->next
= pool
->extents
->next
;
249 pool
->extents
->next
= cur
;
254 /* This allows you to declare that the given address marks the edge of some
255 * pool memory that is no longer needed. Any extents that hold only data
256 * older than the boundary address are freed. NOTE: You MUST NOT USE BOTH
257 * pool_free() and pool_free_old() on the same pool!! */
259 pool_free_old(alloc_pool_t p
, void *addr
)
261 struct alloc_pool
*pool
= (struct alloc_pool
*)p
;
262 struct pool_extent
*cur
, *prev
, *next
;
267 for (prev
= NULL
, cur
= pool
->extents
; cur
; prev
= cur
, cur
= cur
->next
) {
268 if (addr
>= cur
->start
269 && addr
< PTR_ADD(cur
->start
, pool
->size
))
275 if (addr
== PTR_ADD(cur
->start
, cur
->free
)) {
280 /* The most recent live extent can just be reset. */
281 if (pool
->flags
& POOL_CLEAR
)
282 memset(addr
, 0, pool
->size
- cur
->free
);
283 cur
->free
= pool
->size
;
293 while ((cur
= next
) != NULL
) {
295 if (pool
->flags
& POOL_PREPEND
)
296 free(PTR_SUB(cur
->start
, sizeof (struct pool_extent
)));
305 /* If the current extent doesn't have "len" free space in it, mark it as full
306 * so that the next alloc will start a new extent. If len is (size_t)-1, this
307 * bump will always occur. The function returns a boundary address that can
308 * be used with pool_free_old(), or a NULL if no memory is allocated. */
310 pool_boundary(alloc_pool_t p
, size_t len
)
312 struct alloc_pool
*pool
= (struct alloc_pool
*)p
;
313 struct pool_extent
*cur
;
315 if (!pool
|| !pool
->extents
)
320 if (cur
->free
< len
) {
321 cur
->bound
+= cur
->free
;
325 return PTR_ADD(cur
->start
, cur
->free
);
328 #define FDPRINT(label, value) \
330 int len = snprintf(buf, sizeof buf, label, value); \
331 if (write(fd, buf, len) != len) \
335 #define FDEXTSTAT(ext) \
337 int len = snprintf(buf, sizeof buf, " %12ld %5ld\n", \
338 (long)ext->free, (long)ext->bound); \
339 if (write(fd, buf, len) != len) \
344 pool_stats(alloc_pool_t p
, int fd
, int summarize
)
346 struct alloc_pool
*pool
= (struct alloc_pool
*) p
;
347 struct pool_extent
*cur
;
354 FDPRINT(" Extent size: %12ld\n", (long) pool
->size
);
355 FDPRINT(" Alloc quantum: %12ld\n", (long) pool
->quantum
);
356 FDPRINT(" Extents created: %12ld\n", pool
->e_created
);
357 FDPRINT(" Extents freed: %12ld\n", pool
->e_freed
);
358 FDPRINT(" Alloc count: %12.0f\n", (double) pool
->n_allocated
);
359 FDPRINT(" Free Count: %12.0f\n", (double) pool
->n_freed
);
360 FDPRINT(" Bytes allocated: %12.0f\n", (double) pool
->b_allocated
);
361 FDPRINT(" Bytes freed: %12.0f\n", (double) pool
->b_freed
);
369 if (write(fd
, "\n", 1) != 1)
372 for (cur
= pool
->extents
; cur
; cur
= cur
->next
)