4 * Copyright (c) 2005 by Internet Systems Consortium, Inc. ("ISC")
5 * Copyright (c) 1997,1999 by Internet Software Consortium.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
17 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* When this symbol is defined allocations via memget are made slightly
22 bigger and some debugging info stuck before and after the region given
23 back to the caller. */
24 /* #define DEBUGGING_MEMCLUSTER */
25 #define MEMCLUSTER_ATEND
28 #if !defined(LINT) && !defined(CODECENTER)
29 static const char rcsid
[] = "Id: memcluster.c,v 1.11 2006/08/30 23:34:38 marka Exp";
32 #include "port_before.h"
34 #include <sys/types.h>
36 #include <sys/param.h>
39 #include <netinet/in.h>
40 #include <arpa/inet.h>
41 #include <arpa/nameser.h>
49 #include <isc/memcluster.h>
50 #include <isc/assertions.h>
52 #include "port_after.h"
54 #ifdef MEMCLUSTER_RECORD
55 #ifndef DEBUGGING_MEMCLUSTER
56 #define DEBUGGING_MEMCLUSTER
60 #define DEF_MAX_SIZE 1100
61 #define DEF_MEM_TARGET 4096
63 typedef u_int32_t fence_t
;
67 #if defined(DEBUGGING_MEMCLUSTER)
68 #if defined(MEMCLUSTER_RECORD)
77 #define SMALL_SIZE_LIMIT sizeof(memcluster_element)
78 #define P_SIZE sizeof(void *)
79 #define FRONT_FENCEPOST 0xfebafeba
80 #define BACK_FENCEPOST 0xabefabef
81 #define FENCEPOST_SIZE 4
83 #ifndef MEMCLUSTER_LITTLE_MALLOC
84 #define MEMCLUSTER_BIG_MALLOC 1
85 #define NUM_BASIC_BLOCKS 64
97 static pthread_mutex_t memlock
= PTHREAD_MUTEX_INITIALIZER
;
98 #define MEMLOCK (void)pthread_mutex_lock(&memlock)
99 #define MEMUNLOCK (void)pthread_mutex_unlock(&memlock)
102 * Catch bad lock usage in non threaded build.
104 static unsigned int memlock
= 0;
105 #define MEMLOCK do { INSIST(memlock == 0); memlock = 1; } while (0)
106 #define MEMUNLOCK do { INSIST(memlock == 1); memlock = 0; } while (0)
107 #endif /* DO_PTHEADS */
111 static size_t max_size
;
112 static size_t mem_target
;
113 #ifndef MEMCLUSTER_BIG_MALLOC
114 static size_t mem_target_half
;
115 static size_t mem_target_fudge
;
117 static memcluster_element
** freelists
;
118 #ifdef MEMCLUSTER_RECORD
119 static memcluster_element
** activelists
;
121 #ifdef MEMCLUSTER_BIG_MALLOC
122 static memcluster_element
* basic_blocks
;
124 static struct stats
* stats
;
128 static size_t quantize(size_t);
129 #if defined(DEBUGGING_MEMCLUSTER)
130 static void check(unsigned char *, int, size_t);
136 meminit(size_t init_max_size
, size_t target_size
) {
138 #if defined(DEBUGGING_MEMCLUSTER)
139 INSIST(sizeof(fence_t
) == FENCEPOST_SIZE
);
141 if (freelists
!= NULL
) {
145 if (init_max_size
== 0U)
146 max_size
= DEF_MAX_SIZE
;
148 max_size
= init_max_size
;
149 if (target_size
== 0U)
150 mem_target
= DEF_MEM_TARGET
;
152 mem_target
= target_size
;
153 #ifndef MEMCLUSTER_BIG_MALLOC
154 mem_target_half
= mem_target
/ 2;
155 mem_target_fudge
= mem_target
+ mem_target
/ 4;
157 freelists
= malloc(max_size
* sizeof (memcluster_element
*));
158 stats
= malloc((max_size
+1) * sizeof (struct stats
));
159 if (freelists
== NULL
|| stats
== NULL
) {
164 max_size
* sizeof (memcluster_element
*));
165 memset(stats
, 0, (max_size
+ 1) * sizeof (struct stats
));
166 #ifdef MEMCLUSTER_RECORD
167 activelists
= malloc((max_size
+ 1) * sizeof (memcluster_element
*));
168 if (activelists
== NULL
) {
172 memset(activelists
, 0,
173 (max_size
+ 1) * sizeof (memcluster_element
*));
175 #ifdef MEMCLUSTER_BIG_MALLOC
182 __memget(size_t size
) {
183 return (__memget_record(size
, NULL
, 0));
187 __memget_record(size_t size
, const char *file
, int line
) {
188 size_t new_size
= quantize(size
);
189 #if defined(DEBUGGING_MEMCLUSTER)
190 memcluster_element
*e
;
192 fence_t fp
= BACK_FENCEPOST
;
198 #if !defined(MEMCLUSTER_RECORD)
202 if (freelists
== NULL
) {
203 if (meminit(0, 0) == -1) {
213 if (size
>= max_size
|| new_size
>= max_size
) {
214 /* memget() was called on something beyond our upper limit. */
215 stats
[max_size
].gets
++;
216 stats
[max_size
].totalgets
++;
217 #if defined(DEBUGGING_MEMCLUSTER)
218 e
= malloc(new_size
);
226 #ifdef MEMCLUSTER_RECORD
229 e
->next
= activelists
[max_size
];
230 activelists
[max_size
] = e
;
233 e
->fencepost
= FRONT_FENCEPOST
;
234 p
= (char *)e
+ sizeof *e
+ size
;
235 memcpy(p
, &fp
, sizeof fp
);
236 return ((char *)e
+ sizeof *e
);
239 return (malloc(size
));
244 * If there are no blocks in the free list for this size, get a chunk
245 * of memory and then break it up into "new_size"-sized blocks, adding
246 * them to the free list.
248 if (freelists
[new_size
] == NULL
) {
254 #ifdef MEMCLUSTER_BIG_MALLOC
255 if (basic_blocks
== NULL
) {
256 new = malloc(NUM_BASIC_BLOCKS
* mem_target
);
263 next
= curr
+ mem_target
;
264 for (i
= 0; i
< (NUM_BASIC_BLOCKS
- 1); i
++) {
265 ((memcluster_element
*)curr
)->next
= next
;
270 * curr is now pointing at the last block in the
273 ((memcluster_element
*)curr
)->next
= NULL
;
276 total_size
= mem_target
;
278 basic_blocks
= basic_blocks
->next
;
280 if (new_size
> mem_target_half
)
281 total_size
= mem_target_fudge
;
283 total_size
= mem_target
;
284 new = malloc(total_size
);
291 frags
= total_size
/ new_size
;
292 stats
[new_size
].blocks
++;
293 stats
[new_size
].freefrags
+= frags
;
294 /* Set up a linked-list of blocks of size "new_size". */
296 next
= curr
+ new_size
;
297 for (i
= 0; i
< (frags
- 1); i
++) {
298 #if defined (DEBUGGING_MEMCLUSTER)
299 memset(curr
, 0xa5, new_size
);
301 ((memcluster_element
*)curr
)->next
= next
;
305 /* curr is now pointing at the last block in the array. */
306 #if defined (DEBUGGING_MEMCLUSTER)
307 memset(curr
, 0xa5, new_size
);
309 ((memcluster_element
*)curr
)->next
= freelists
[new_size
];
310 freelists
[new_size
] = new;
313 /* The free list uses the "rounded-up" size "new_size". */
314 #if defined (DEBUGGING_MEMCLUSTER)
315 e
= freelists
[new_size
];
316 ret
= (char *)e
+ sizeof *e
;
318 * Check to see if this buffer has been written to while on free list.
320 check(ret
, 0xa5, new_size
- sizeof *e
);
322 * Mark memory we are returning.
324 memset(ret
, 0xe5, size
);
326 ret
= freelists
[new_size
];
328 freelists
[new_size
] = freelists
[new_size
]->next
;
329 #if defined(DEBUGGING_MEMCLUSTER)
332 e
->fencepost
= FRONT_FENCEPOST
;
333 #ifdef MEMCLUSTER_RECORD
336 e
->next
= activelists
[size
];
337 activelists
[size
] = e
;
339 p
= (char *)e
+ sizeof *e
+ size
;
340 memcpy(p
, &fp
, sizeof fp
);
344 * The stats[] uses the _actual_ "size" requested by the
345 * caller, with the caveat (in the code above) that "size" >= the
346 * max. size (max_size) ends up getting recorded as a call to
350 stats
[size
].totalgets
++;
351 stats
[new_size
].freefrags
--;
353 #if defined(DEBUGGING_MEMCLUSTER)
354 return ((char *)e
+ sizeof *e
);
361 * This is a call from an external caller,
362 * so we want to count this as a user "put".
365 __memput(void *mem
, size_t size
) {
366 __memput_record(mem
, size
, NULL
, 0);
370 __memput_record(void *mem
, size_t size
, const char *file
, int line
) {
371 size_t new_size
= quantize(size
);
372 #if defined (DEBUGGING_MEMCLUSTER)
373 memcluster_element
*e
;
374 memcluster_element
*el
;
375 #ifdef MEMCLUSTER_RECORD
376 memcluster_element
*prev
;
384 #if !defined (MEMCLUSTER_RECORD)
389 REQUIRE(freelists
!= NULL
);
397 #if defined (DEBUGGING_MEMCLUSTER)
398 e
= (memcluster_element
*) ((char *)mem
- sizeof *e
);
399 INSIST(e
->fencepost
== FRONT_FENCEPOST
);
400 INSIST(e
->size
== size
);
401 p
= (char *)e
+ sizeof *e
+ size
;
402 memcpy(&fp
, p
, sizeof fp
);
403 INSIST(fp
== BACK_FENCEPOST
);
404 INSIST(((u_long
)mem
% 4) == 0);
405 #ifdef MEMCLUSTER_RECORD
407 if (size
== max_size
|| new_size
>= max_size
)
408 el
= activelists
[max_size
];
410 el
= activelists
[size
];
411 while (el
!= NULL
&& el
!= e
) {
415 INSIST(el
!= NULL
); /*%< double free */
417 if (size
== max_size
|| new_size
>= max_size
)
418 activelists
[max_size
] = el
->next
;
420 activelists
[size
] = el
->next
;
422 prev
->next
= el
->next
;
426 if (size
== max_size
|| new_size
>= max_size
) {
427 /* memput() called on something beyond our upper limit */
428 #if defined(DEBUGGING_MEMCLUSTER)
434 INSIST(stats
[max_size
].gets
!= 0U);
435 stats
[max_size
].gets
--;
440 /* The free list uses the "rounded-up" size "new_size": */
441 #if defined(DEBUGGING_MEMCLUSTER)
442 memset(mem
, 0xa5, new_size
- sizeof *e
); /*%< catch write after free */
443 e
->size
= 0; /*%< catch double memput() */
444 #ifdef MEMCLUSTER_RECORD
448 #ifdef MEMCLUSTER_ATEND
450 el
= freelists
[new_size
];
451 while (el
!= NULL
&& el
->next
!= NULL
)
456 freelists
[new_size
] = e
;
458 e
->next
= freelists
[new_size
];
459 freelists
[new_size
] = (void *)e
;
462 ((memcluster_element
*)mem
)->next
= freelists
[new_size
];
463 freelists
[new_size
] = (memcluster_element
*)mem
;
467 * The stats[] uses the _actual_ "size" requested by the
468 * caller, with the caveat (in the code above) that "size" >= the
469 * max. size (max_size) ends up getting recorded as a call to
472 INSIST(stats
[size
].gets
!= 0U);
474 stats
[new_size
].freefrags
++;
479 __memget_debug(size_t size
, const char *file
, int line
) {
481 ptr
= __memget_record(size
, file
, line
);
482 fprintf(stderr
, "%s:%d: memget(%lu) -> %p\n", file
, line
,
488 __memput_debug(void *ptr
, size_t size
, const char *file
, int line
) {
489 fprintf(stderr
, "%s:%d: memput(%p, %lu)\n", file
, line
, ptr
,
491 __memput_record(ptr
, size
, file
, line
);
495 * Print the stats[] on the stream "out" with suitable formatting.
498 memstats(FILE *out
) {
500 #ifdef MEMCLUSTER_RECORD
501 memcluster_element
*e
;
506 if (freelists
== NULL
) {
510 for (i
= 1; i
<= max_size
; i
++) {
511 const struct stats
*s
= &stats
[i
];
513 if (s
->totalgets
== 0U && s
->gets
== 0U)
515 fprintf(out
, "%s%5lu: %11lu gets, %11lu rem",
516 (i
== max_size
) ? ">=" : " ",
517 (unsigned long)i
, s
->totalgets
, s
->gets
);
519 fprintf(out
, " (%lu bl, %lu ff)",
520 s
->blocks
, s
->freefrags
);
523 #ifdef MEMCLUSTER_RECORD
524 fprintf(out
, "Active Memory:\n");
525 for (i
= 1; i
<= max_size
; i
++) {
526 if ((e
= activelists
[i
]) != NULL
)
528 fprintf(out
, "%s:%d %p:%lu\n",
529 e
->file
!= NULL
? e
->file
:
530 "<UNKNOWN>", e
->line
,
531 (char *)e
+ sizeof *e
,
546 for (i
= 1; i
<= max_size
; i
++)
547 if (stats
[i
].gets
!= 0U)
555 * Round up size to a multiple of sizeof(void *). This guarantees that a
556 * block is at least sizeof void *, and that we won't violate alignment
557 * restrictions, both of which are needed to make lists of blocks.
560 quantize(size_t size
) {
563 * If there is no remainder for the integer division of
567 * then we already have a good size; if not, then we need
568 * to round up the result in order to get a size big
569 * enough to satisfy the request _and_ aligned on P_SIZE boundaries.
571 remainder
= size
% P_SIZE
;
573 size
+= P_SIZE
- remainder
;
574 #if defined(DEBUGGING_MEMCLUSTER)
575 return (size
+ SMALL_SIZE_LIMIT
+ sizeof (int));
581 #if defined(DEBUGGING_MEMCLUSTER)
583 check(unsigned char *a
, int value
, size_t len
) {
585 for (i
= 0; i
< len
; i
++)
586 INSIST(a
[i
] == value
);