2 * Copyright (c) 2000-2004 Sendmail, Inc. and its suppliers.
5 * By using this file, you agree to the terms and conditions set
6 * forth in the LICENSE file which can be found at the top level of
7 * the sendmail distribution.
10 #pragma ident "%Z%%M% %I% %E% SMI"
13 SM_RCSID("@(#)$Id: rpool.c,v 1.28 2004/08/03 20:44:04 ca Exp $")
17 ** For documentation, see rpool.html
23 #include <sm/varargs.h>
27 #endif /* _FFR_PERF_RPOOL */
29 const char SmRpoolMagic
[] = "sm_rpool";
34 char align
[SM_ALIGN_SIZE
];
37 static char *sm_rpool_allocblock_x
__P((SM_RPOOL_T
*, size_t));
38 static char *sm_rpool_allocblock
__P((SM_RPOOL_T
*, size_t));
45 #define BIG_OBJECT_RATIO 10
48 ** SM_RPOOL_ALLOCBLOCK_X -- allocate a new block for an rpool.
51 ** rpool -- rpool to which the block should be added.
52 ** size -- size of block.
58 ** F:sm_heap -- out of memory
62 sm_rpool_allocblock_x(rpool
, size
)
68 p
= sm_malloc_x(sizeof(SM_POOLHDR_T
) + size
);
69 p
->sm_pnext
= rpool
->sm_pools
;
71 return (char*) p
+ sizeof(SM_POOLHDR_T
);
75 ** SM_RPOOL_ALLOCBLOCK -- allocate a new block for an rpool.
78 ** rpool -- rpool to which the block should be added.
79 ** size -- size of block.
82 ** Pointer to block, NULL on failure.
86 sm_rpool_allocblock(rpool
, size
)
92 p
= sm_malloc(sizeof(SM_POOLHDR_T
) + size
);
95 p
->sm_pnext
= rpool
->sm_pools
;
97 return (char*) p
+ sizeof(SM_POOLHDR_T
);
101 ** SM_RPOOL_MALLOC_TAGGED_X -- allocate memory from rpool
104 ** rpool -- rpool from which memory should be allocated;
105 ** can be NULL, use sm_malloc() then.
106 ** size -- size of block.
108 ** line -- line number in file.
109 ** group -- heap group for debugging.
115 ** F:sm_heap -- out of memory
118 ** if size == 0 and the rpool is new (no memory
119 ** allocated yet) NULL is returned!
120 ** We could solve this by
121 ** - wasting 1 byte (size < avail)
122 ** - checking for rpool->sm_poolptr != NULL
123 ** - not asking for 0 sized buffer
128 sm_rpool_malloc_tagged_x(rpool
, size
, file
, line
, group
)
134 #else /* SM_HEAP_CHECK */
135 sm_rpool_malloc_x(rpool
, size
)
138 #endif /* SM_HEAP_CHECK */
143 return sm_malloc_tagged_x(size
, file
, line
, group
);
145 /* Ensure that size is properly aligned. */
146 if (size
& SM_ALIGN_BITS
)
147 size
= (size
& ~SM_ALIGN_BITS
) + SM_ALIGN_SIZE
;
149 /* The common case. This is optimized for speed. */
150 if (size
<= rpool
->sm_poolavail
)
152 ptr
= rpool
->sm_poolptr
;
153 rpool
->sm_poolptr
+= size
;
154 rpool
->sm_poolavail
-= size
;
159 ** The slow case: we need to call malloc.
160 ** The SM_REQUIRE assertion is deferred until now, for speed.
161 ** That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
162 ** so the common case code won't be triggered on a dangling pointer.
165 SM_REQUIRE(rpool
->sm_magic
== SmRpoolMagic
);
168 ** If size > sm_poolsize, then malloc a new block especially for
169 ** this request. Future requests will be allocated from the
172 ** What if the current pool is mostly unallocated, and the current
173 ** request is larger than the available space, but < sm_poolsize?
174 ** If we discard the current pool, and start allocating from a new
175 ** pool, then we will be wasting a lot of space. For this reason,
176 ** we malloc a block just for the current request if size >
177 ** sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
178 ** Thus, the most space that we will waste at the end of a pool
179 ** is sm_bigobjectsize - 1.
182 if (size
> rpool
->sm_bigobjectsize
)
185 ++rpool
->sm_nbigblocks
;
186 #endif /* _FFR_PERF_RPOOL */
187 return sm_rpool_allocblock_x(rpool
, size
);
189 SM_ASSERT(rpool
->sm_bigobjectsize
<= rpool
->sm_poolsize
);
190 ptr
= sm_rpool_allocblock_x(rpool
, rpool
->sm_poolsize
);
191 rpool
->sm_poolptr
= ptr
+ size
;
192 rpool
->sm_poolavail
= rpool
->sm_poolsize
- size
;
195 #endif /* _FFR_PERF_RPOOL */
200 ** SM_RPOOL_MALLOC_TAGGED -- allocate memory from rpool
203 ** rpool -- rpool from which memory should be allocated;
204 ** can be NULL, use sm_malloc() then.
205 ** size -- size of block.
207 ** line -- line number in file.
208 ** group -- heap group for debugging.
211 ** Pointer to block, NULL on failure.
214 ** if size == 0 and the rpool is new (no memory
215 ** allocated yet) NULL is returned!
216 ** We could solve this by
217 ** - wasting 1 byte (size < avail)
218 ** - checking for rpool->sm_poolptr != NULL
219 ** - not asking for 0 sized buffer
224 sm_rpool_malloc_tagged(rpool
, size
, file
, line
, group
)
230 #else /* SM_HEAP_CHECK */
231 sm_rpool_malloc(rpool
, size
)
234 #endif /* SM_HEAP_CHECK */
239 return sm_malloc_tagged(size
, file
, line
, group
);
241 /* Ensure that size is properly aligned. */
242 if (size
& SM_ALIGN_BITS
)
243 size
= (size
& ~SM_ALIGN_BITS
) + SM_ALIGN_SIZE
;
245 /* The common case. This is optimized for speed. */
246 if (size
<= rpool
->sm_poolavail
)
248 ptr
= rpool
->sm_poolptr
;
249 rpool
->sm_poolptr
+= size
;
250 rpool
->sm_poolavail
-= size
;
255 ** The slow case: we need to call malloc.
256 ** The SM_REQUIRE assertion is deferred until now, for speed.
257 ** That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
258 ** so the common case code won't be triggered on a dangling pointer.
261 SM_REQUIRE(rpool
->sm_magic
== SmRpoolMagic
);
264 ** If size > sm_poolsize, then malloc a new block especially for
265 ** this request. Future requests will be allocated from the
268 ** What if the current pool is mostly unallocated, and the current
269 ** request is larger than the available space, but < sm_poolsize?
270 ** If we discard the current pool, and start allocating from a new
271 ** pool, then we will be wasting a lot of space. For this reason,
272 ** we malloc a block just for the current request if size >
273 ** sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
274 ** Thus, the most space that we will waste at the end of a pool
275 ** is sm_bigobjectsize - 1.
278 if (size
> rpool
->sm_bigobjectsize
)
281 ++rpool
->sm_nbigblocks
;
282 #endif /* _FFR_PERF_RPOOL */
283 return sm_rpool_allocblock(rpool
, size
);
285 SM_ASSERT(rpool
->sm_bigobjectsize
<= rpool
->sm_poolsize
);
286 ptr
= sm_rpool_allocblock(rpool
, rpool
->sm_poolsize
);
289 rpool
->sm_poolptr
= ptr
+ size
;
290 rpool
->sm_poolavail
= rpool
->sm_poolsize
- size
;
293 #endif /* _FFR_PERF_RPOOL */
298 ** SM_RPOOL_NEW_X -- create a new rpool.
301 ** parent -- pointer to parent rpool, can be NULL.
304 ** Pointer to new rpool.
308 sm_rpool_new_x(parent
)
313 rpool
= sm_malloc_x(sizeof(SM_RPOOL_T
));
315 rpool
->sm_parentlink
= NULL
;
319 rpool
->sm_parentlink
= sm_rpool_attach_x(parent
,
320 (SM_RPOOL_RFREE_T
) sm_rpool_free
,
327 rpool
->sm_magic
= SmRpoolMagic
;
329 rpool
->sm_poolsize
= POOLSIZE
- sizeof(SM_POOLHDR_T
);
330 rpool
->sm_bigobjectsize
= rpool
->sm_poolsize
/ BIG_OBJECT_RATIO
;
331 rpool
->sm_poolptr
= NULL
;
332 rpool
->sm_poolavail
= 0;
333 rpool
->sm_pools
= NULL
;
335 rpool
->sm_rptr
= NULL
;
336 rpool
->sm_ravail
= 0;
337 rpool
->sm_rlists
= NULL
;
339 rpool
->sm_nbigblocks
= 0;
340 rpool
->sm_npools
= 0;
341 #endif /* _FFR_PERF_RPOOL */
347 ** SM_RPOOL_SETSIZES -- set sizes for rpool.
350 ** poolsize -- size of a single rpool block.
351 ** bigobjectsize -- if this size is exceeded, an individual
352 ** block is allocated (must be less or equal poolsize).
359 sm_rpool_setsizes(rpool
, poolsize
, bigobjectsize
)
362 size_t bigobjectsize
;
364 SM_REQUIRE(poolsize
>= bigobjectsize
);
366 poolsize
= POOLSIZE
- sizeof(SM_POOLHDR_T
);
367 if (bigobjectsize
== 0)
368 bigobjectsize
= poolsize
/ BIG_OBJECT_RATIO
;
369 rpool
->sm_poolsize
= poolsize
;
370 rpool
->sm_bigobjectsize
= bigobjectsize
;
374 ** SM_RPOOL_FREE -- free an rpool and release all of its resources.
377 ** rpool -- rpool to free.
387 SM_RLIST_T
*rl
, *rnext
;
388 SM_RESOURCE_T
*r
, *rmax
;
389 SM_POOLLINK_T
*pp
, *pnext
;
395 ** It's important to free the resources before the memory pools,
396 ** because the resource free functions might modify the contents
397 ** of the memory pools.
400 rl
= rpool
->sm_rlists
;
403 rmax
= rpool
->sm_rptr
;
406 for (r
= rl
->sm_rvec
; r
< rmax
; ++r
)
408 if (r
->sm_rfree
!= NULL
)
409 r
->sm_rfree(r
->sm_rcontext
);
411 rnext
= rl
->sm_rnext
;
416 rmax
= &rl
->sm_rvec
[SM_RLIST_MAX
];
421 ** Now free the memory pools.
424 for (pp
= rpool
->sm_pools
; pp
!= NULL
; pp
= pnext
)
426 pnext
= pp
->sm_pnext
;
431 ** Disconnect rpool from its parent.
434 if (rpool
->sm_parentlink
!= NULL
)
435 *rpool
->sm_parentlink
= NULL
;
438 ** Setting these fields to zero means that any future to attempt
439 ** to use the rpool after it is freed will cause an assertion failure.
442 rpool
->sm_magic
= NULL
;
443 rpool
->sm_poolavail
= 0;
444 rpool
->sm_ravail
= 0;
447 if (rpool
->sm_nbigblocks
> 0 || rpool
->sm_npools
> 1)
449 "perf: rpool=%lx, sm_nbigblocks=%d, sm_npools=%d",
450 (long) rpool
, rpool
->sm_nbigblocks
, rpool
->sm_npools
);
451 rpool
->sm_nbigblocks
= 0;
452 rpool
->sm_npools
= 0;
453 #endif /* _FFR_PERF_RPOOL */
458 ** SM_RPOOL_ATTACH_X -- attach a resource to an rpool.
461 ** rpool -- rpool to which resource should be attached.
462 ** rfree -- function to call when rpool is freed.
463 ** rcontext -- argument for function to call when rpool is freed.
466 ** Pointer to allocated function.
469 ** F:sm_heap -- out of memory
473 sm_rpool_attach_x(rpool
, rfree
, rcontext
)
475 SM_RPOOL_RFREE_T rfree
;
481 SM_REQUIRE_ISA(rpool
, SmRpoolMagic
);
483 if (rpool
->sm_ravail
== 0)
485 rl
= sm_malloc_x(sizeof(SM_RLIST_T
));
486 rl
->sm_rnext
= rpool
->sm_rlists
;
487 rpool
->sm_rlists
= rl
;
488 rpool
->sm_rptr
= rl
->sm_rvec
;
489 rpool
->sm_ravail
= SM_RLIST_MAX
;
492 a
= &rpool
->sm_rptr
->sm_rfree
;
493 rpool
->sm_rptr
->sm_rfree
= rfree
;
494 rpool
->sm_rptr
->sm_rcontext
= rcontext
;
500 #if DO_NOT_USE_STRCPY
502 ** SM_RPOOL_STRDUP_X -- Create a copy of a C string
505 ** rpool -- rpool to use.
506 ** s -- the string to copy.
509 ** pointer to newly allocated string.
513 sm_rpool_strdup_x(rpool
, s
)
521 SM_ASSERT(l
+ 1 > l
);
522 n
= sm_rpool_malloc_x(rpool
, l
+ 1);
523 sm_strlcpy(n
, s
, l
+ 1);
526 #endif /* DO_NOT_USE_STRCPY */