4 * Electric Fence - Red-Zone memory allocator.
5 * Bruce Perens, 1988, 1993
7 * This is a special version of malloc() and company for debugging software
8 * that is suspected of overrunning or underrunning the boundaries of a
9 * malloc buffer, or touching free memory.
11 * It arranges for each malloc buffer to be followed (or preceded)
12 * in the address space by an inaccessable virtual memory page,
13 * and for free memory to be inaccessable. If software touches the
14 * inaccessable page, it will get an immediate segmentation
15 * fault. It is then trivial to uncover the offending code using a debugger.
17 * An advantage of this product over most malloc debuggers is that this one
18 * detects reading out of bounds as well as writing, and this one stops on
19 * the exact instruction that causes the error, rather than waiting until the
20 * next boundary check.
22 * There is one product that debugs malloc buffer overruns
23 * better than Electric Fence: "Purify" from Purify Systems, and that's only
24 * a small part of what Purify does. I'm not affiliated with Purify, I just
25 * respect a job well done.
27 * This version of malloc() should not be linked into production software,
28 * since it tremendously increases the time and memory overhead of malloc().
29 * Each malloc buffer will consume a minimum of two virtual memory pages,
30 * this is 16 kilobytes on many systems. On some systems it will be necessary
31 * to increase the amount of swap space in order to debug large programs that
32 * perform lots of allocation, because of the per-buffer overhead.
48 static const char version
[] = "\n Electric Fence 2.0.5"
49 " Copyright (C) 1987-1995 Bruce Perens.\n";
52 * MEMORY_CREATION_SIZE is the amount of memory to get from the operating
53 * system at one time. We'll break that memory down into smaller pieces for
54 * malloc buffers. One megabyte is probably a good value.
56 #define MEMORY_CREATION_SIZE 1024 * 1024
59 * Enum Mode indicates the status of a malloc buffer.
62 NOT_IN_USE
= 0, /* Available to represent a malloc buffer. */
63 FREE
, /* A free buffer. */
64 ALLOCATED
, /* A buffer that is in use. */
65 PROTECTED
, /* A freed buffer that can not be allocated again. */
66 INTERNAL_USE
/* A buffer used internally by malloc(). */
68 typedef enum _Mode Mode
;
71 * Struct Slot contains all of the information about a malloc buffer except
72 * for the contents of its memory.
76 void * internalAddress
;
81 typedef struct _Slot Slot
;
84 * EF_ALIGNMENT is a global variable used to control the default alignment
85 * of buffers returned by malloc(), calloc(), and realloc(). It is all-caps
86 * so that its name matches the name of the environment variable that is used
87 * to set it. This gives the programmer one less name to remember.
88 * If the value is -1, it will be set from the environment or sizeof(int)
91 int EF_ALIGNMENT
= -1;
94 * EF_PROTECT_FREE is a global variable used to control the disposition of
95 * memory that is released using free(). It is all-caps so that its name
96 * matches the name of the environment variable that is used to set it.
97 * If its value is greater non-zero, memory released by free is made
98 * inaccessable and never allocated again. Any software that touches free
99 * memory will then get a segmentation fault. If its value is zero, freed
100 * memory will be available for reallocation, but will still be inaccessable
101 * until it is reallocated.
102 * If the value is -1, it will be set from the environment or to 0 at run-time.
104 int EF_PROTECT_FREE
= -1;
107 * EF_PROTECT_BELOW is used to modify the behavior of the allocator. When
108 * its value is non-zero, the allocator will place an inaccessable page
109 * immediately _before_ the malloc buffer in the address space, instead
110 * of _after_ it. Use this to detect malloc buffer under-runs, rather than
111 * over-runs. It won't detect both at the same time, so you should test your
112 * software twice, once with this value clear, and once with it set.
113 * If the value is -1, it will be set from the environment or to zero at
116 int EF_PROTECT_BELOW
= -1;
119 * EF_ALLOW_MALLOC_0 is set if Electric Fence is to allow malloc(0). I
120 * trap malloc(0) by default because it is a common source of bugs.
122 int EF_ALLOW_MALLOC_0
= -1;
125 * allocationList points to the array of slot structures used to manage the
128 static Slot
* allocationList
= 0;
131 * allocationListSize is the size of the allocation list. This will always
132 * be a multiple of the page size.
134 static size_t allocationListSize
= 0;
137 * slotCount is the number of Slot structures in allocationList.
139 static size_t slotCount
= 0;
142 * unUsedSlots is the number of Slot structures that are currently available
143 * to represent new malloc buffers. When this number gets too low, we will
146 static size_t unUsedSlots
= 0;
149 * slotsPerPage is the number of slot structures that fit in a virtual
152 static size_t slotsPerPage
= 0;
155 * internalUse is set when allocating and freeing the allocatior-internal
158 static int internalUse
= 0;
161 * noAllocationListProtection is set to tell malloc() and free() not to
162 * manipulate the protection of the allocation list. This is only set in
163 * realloc(), which does it to save on slow system calls, and in
164 * allocateMoreSlots(), which does it because it changes the allocation list.
166 static int noAllocationListProtection
= 0;
169 * bytesPerPage is set at run-time to the number of bytes per virtual-memory
170 * page, as returned by Page_Size().
172 static size_t bytesPerPage
= 0;
175 * internalError is called for those "shouldn't happen" errors in the
181 EF_Abort("Internal error in allocator.");
185 * initialize sets up the memory allocation arena and the run-time
186 * configuration information.
191 size_t size
= MEMORY_CREATION_SIZE
;
199 * Import the user's environment specification of the default
200 * alignment for malloc(). We want that alignment to be under
201 * user control, since smaller alignment lets us catch more bugs,
202 * however some software will break if malloc() returns a buffer
203 * that is not word-aligned.
206 * alignment to be zero so that we could catch all one-byte
207 * overruns, however if malloc() is asked to allocate an odd-size
208 * buffer and returns an address that is not word-aligned, or whose
209 * size is not a multiple of the word size, software breaks.
210 * This was the case with the Sun string-handling routines,
211 * which can do word fetches up to three bytes beyond the end of a
212 * string. I handle this problem in part by providing
213 * byte-reference-only versions of the string library functions, but
214 * there are other functions that break, too. Some in X Windows, one
215 * in Sam Leffler's TIFF library, and doubtless many others.
217 if ( EF_ALIGNMENT
== -1 ) {
218 if ( (string
= getenv("EF_ALIGNMENT")) != 0 )
219 EF_ALIGNMENT
= (size_t)atoi(string
);
221 EF_ALIGNMENT
= sizeof(int);
225 * See if the user wants to protect the address space below a buffer,
226 * rather than that above a buffer.
228 if ( EF_PROTECT_BELOW
== -1 ) {
229 if ( (string
= getenv("EF_PROTECT_BELOW")) != 0 )
230 EF_PROTECT_BELOW
= (atoi(string
) != 0);
232 EF_PROTECT_BELOW
= 0;
236 * See if the user wants to protect memory that has been freed until
237 * the program exits, rather than until it is re-allocated.
239 if ( EF_PROTECT_FREE
== -1 ) {
240 if ( (string
= getenv("EF_PROTECT_FREE")) != 0 )
241 EF_PROTECT_FREE
= (atoi(string
) != 0);
247 * See if the user wants to allow malloc(0).
249 if ( EF_ALLOW_MALLOC_0
== -1 ) {
250 if ( (string
= getenv("EF_ALLOW_MALLOC_0")) != 0 )
251 EF_ALLOW_MALLOC_0
= (atoi(string
) != 0);
253 EF_ALLOW_MALLOC_0
= 0;
257 * Get the run-time configuration of the virtual memory page size.
259 bytesPerPage
= Page_Size();
262 * Figure out how many Slot structures to allocate at one time.
264 slotCount
= slotsPerPage
= bytesPerPage
/ sizeof(Slot
);
265 allocationListSize
= bytesPerPage
;
267 if ( allocationListSize
> size
)
268 size
= allocationListSize
;
270 if ( (slack
= size
% bytesPerPage
) != 0 )
271 size
+= bytesPerPage
- slack
;
274 * Allocate memory, and break it up into two malloc buffers. The
275 * first buffer will be used for Slot structures, the second will
278 slot
= allocationList
= (Slot
*)Page_Create(size
);
279 memset((char *)allocationList
, 0, allocationListSize
);
281 slot
[0].internalSize
= slot
[0].userSize
= allocationListSize
;
282 slot
[0].internalAddress
= slot
[0].userAddress
= allocationList
;
283 slot
[0].mode
= INTERNAL_USE
;
284 if ( size
> allocationListSize
) {
285 slot
[1].internalAddress
= slot
[1].userAddress
286 = ((char *)slot
[0].internalAddress
) + slot
[0].internalSize
;
288 = slot
[1].userSize
= size
- slot
[0].internalSize
;
293 * Deny access to the free page, so that we will detect any software
294 * that treads upon free memory.
296 Page_DenyAccess(slot
[1].internalAddress
, slot
[1].internalSize
);
299 * Account for the two slot structures that we've used.
301 unUsedSlots
= slotCount
- 2;
305 * allocateMoreSlots is called when there are only enough slot structures
306 * left to support the allocation of a single malloc buffer.
309 allocateMoreSlots(void)
311 size_t newSize
= allocationListSize
+ bytesPerPage
;
312 void * newAllocation
;
313 void * oldAllocation
= allocationList
;
315 Page_AllowAccess(allocationList
, allocationListSize
);
316 noAllocationListProtection
= 1;
319 newAllocation
= malloc(newSize
);
320 memcpy(newAllocation
, allocationList
, allocationListSize
);
321 memset(&(((char *)newAllocation
)[allocationListSize
]), 0, bytesPerPage
);
323 allocationList
= (Slot
*)newAllocation
;
324 allocationListSize
= newSize
;
325 slotCount
+= slotsPerPage
;
326 unUsedSlots
+= slotsPerPage
;
331 * Keep access to the allocation list open at this point, because
332 * I am returning to memalign(), which needs that access.
334 noAllocationListProtection
= 0;
339 * This is the memory allocator. When asked to allocate a buffer, allocate
340 * it in such a way that the end of the buffer is followed by an inaccessable
341 * memory page. If software overruns that buffer, it will touch the bad page
342 * and get an immediate segmentation fault. It's then easy to zero in on the
343 * offending code with a debugger.
345 * There are a few complications. If the user asks for an odd-sized buffer,
346 * we would have to have that buffer start on an odd address if the byte after
347 * the end of the buffer was to be on the inaccessable page. Unfortunately,
348 * there is lots of software that asks for odd-sized buffers and then
349 * requires that the returned address be word-aligned, or the size of the
350 * buffer be a multiple of the word size. An example are the string-processing
351 * functions on Sun systems, which do word references to the string memory
352 * and may refer to memory up to three bytes beyond the end of the string.
353 * For this reason, I take the alignment requests to memalign() and valloc()
356 * Electric Fence wastes lots of memory. I do a best-fit allocator here
357 * so that it won't waste even more. It's slow, but thrashing because your
358 * working set is too big for a system's RAM is even slower.
360 extern C_LINKAGE
void *
361 memalign(size_t alignment
, size_t userSize
)
363 register Slot
* slot
;
364 register size_t count
;
366 Slot
* emptySlots
[2];
371 if ( allocationList
== 0 )
374 if ( userSize
== 0 && !EF_ALLOW_MALLOC_0
)
375 EF_Abort("Allocating 0 bytes, probably a bug.");
378 * If EF_PROTECT_BELOW is set, all addresses returned by malloc()
379 * and company will be page-aligned.
381 if ( !EF_PROTECT_BELOW
&& alignment
> 1 ) {
382 if ( (slack
= userSize
% alignment
) != 0 )
383 userSize
+= alignment
- slack
;
387 * The internal size of the buffer is rounded up to the next page-size
388 * boudary, and then we add another page's worth of memory for the
391 internalSize
= userSize
+ bytesPerPage
;
392 if ( (slack
= internalSize
% bytesPerPage
) != 0 )
393 internalSize
+= bytesPerPage
- slack
;
396 * These will hold the addresses of two empty Slot structures, that
397 * can be used to hold information for any memory I create, and any
398 * memory that I mark free.
404 * The internal memory used by the allocator is currently
405 * inaccessable, so that errant programs won't scrawl on the
406 * allocator's arena. I'll un-protect it here so that I can make
407 * a new allocation. I'll re-protect it before I return.
409 if ( !noAllocationListProtection
)
410 Page_AllowAccess(allocationList
, allocationListSize
);
413 * If I'm running out of empty slots, create some more before
414 * I don't have enough slots left to make an allocation.
416 if ( !internalUse
&& unUsedSlots
< 7 ) {
421 * Iterate through all of the slot structures. Attempt to find a slot
422 * containing free memory of the exact right size. Accept a slot with
423 * more memory than we want, if the exact right size is not available.
424 * Find two slot structures that are not in use. We will need one if
425 * we split a buffer into free and allocated parts, and the second if
426 * we have to create new memory and mark it as free.
430 for ( slot
= allocationList
, count
= slotCount
; count
> 0; count
-- ) {
431 if ( slot
->mode
== FREE
432 && slot
->internalSize
>= internalSize
) {
434 ||slot
->internalSize
< fullSlot
->internalSize
){
436 if ( slot
->internalSize
== internalSize
438 break; /* All done, */
441 else if ( slot
->mode
== NOT_IN_USE
) {
442 if ( !emptySlots
[0] )
443 emptySlots
[0] = slot
;
444 else if ( !emptySlots
[1] )
445 emptySlots
[1] = slot
;
447 && fullSlot
->internalSize
== internalSize
)
448 break; /* All done. */
452 if ( !emptySlots
[0] )
457 * I get here if I haven't been able to find a free buffer
458 * with all of the memory I need. I'll have to create more
459 * memory. I'll mark it all as free, and then split it into
460 * free and allocated portions later.
462 size_t chunkSize
= MEMORY_CREATION_SIZE
;
464 if ( !emptySlots
[1] )
467 if ( chunkSize
< internalSize
)
468 chunkSize
= internalSize
;
470 if ( (slack
= chunkSize
% bytesPerPage
) != 0 )
471 chunkSize
+= bytesPerPage
- slack
;
473 /* Use up one of the empty slots to make the full slot. */
474 fullSlot
= emptySlots
[0];
475 emptySlots
[0] = emptySlots
[1];
476 fullSlot
->internalAddress
= Page_Create(chunkSize
);
477 fullSlot
->internalSize
= chunkSize
;
478 fullSlot
->mode
= FREE
;
483 * If I'm allocating memory for the allocator's own data structures,
484 * mark it INTERNAL_USE so that no errant software will be able to
488 fullSlot
->mode
= INTERNAL_USE
;
490 fullSlot
->mode
= ALLOCATED
;
493 * If the buffer I've found is larger than I need, split it into
494 * an allocated buffer with the exact amount of memory I need, and
495 * a free buffer containing the surplus memory.
497 if ( fullSlot
->internalSize
> internalSize
) {
498 emptySlots
[0]->internalSize
499 = fullSlot
->internalSize
- internalSize
;
500 emptySlots
[0]->internalAddress
501 = ((char *)fullSlot
->internalAddress
) + internalSize
;
502 emptySlots
[0]->mode
= FREE
;
503 fullSlot
->internalSize
= internalSize
;
507 if ( !EF_PROTECT_BELOW
) {
509 * Arrange the buffer so that it is followed by an inaccessable
510 * memory page. A buffer overrun that touches that page will
511 * cause a segmentation fault.
513 address
= (char *)fullSlot
->internalAddress
;
515 /* Set up the "live" page. */
516 if ( internalSize
- bytesPerPage
> 0 )
518 fullSlot
->internalAddress
519 ,internalSize
- bytesPerPage
);
521 address
+= internalSize
- bytesPerPage
;
523 /* Set up the "dead" page. */
524 if ( EF_PROTECT_FREE
)
525 Page_Delete(address
, bytesPerPage
);
527 Page_DenyAccess(address
, bytesPerPage
);
529 /* Figure out what address to give the user. */
532 else { /* EF_PROTECT_BELOW != 0 */
534 * Arrange the buffer so that it is preceded by an inaccessable
535 * memory page. A buffer underrun that touches that page will
536 * cause a segmentation fault.
538 address
= (char *)fullSlot
->internalAddress
;
540 /* Set up the "dead" page. */
541 if ( EF_PROTECT_FREE
)
542 Page_Delete(address
, bytesPerPage
);
544 Page_DenyAccess(address
, bytesPerPage
);
546 address
+= bytesPerPage
;
548 /* Set up the "live" page. */
549 if ( internalSize
- bytesPerPage
> 0 )
550 Page_AllowAccess(address
, internalSize
- bytesPerPage
);
553 fullSlot
->userAddress
= address
;
554 fullSlot
->userSize
= userSize
;
557 * Make the pool's internal memory inaccessable, so that the program
558 * being debugged can't stomp on it.
561 Page_DenyAccess(allocationList
, allocationListSize
);
567 * Find the slot structure for a user address.
570 slotForUserAddress(void * address
)
572 register Slot
* slot
= allocationList
;
573 register size_t count
= slotCount
;
575 for ( ; count
> 0; count
-- ) {
576 if ( slot
->userAddress
== address
)
585 * Find the slot structure for an internal address.
588 slotForInternalAddress(void * address
)
590 register Slot
* slot
= allocationList
;
591 register size_t count
= slotCount
;
593 for ( ; count
> 0; count
-- ) {
594 if ( slot
->internalAddress
== address
)
602 * Given the internal address of a buffer, find the buffer immediately
603 * before that buffer in the address space. This is used by free() to
604 * coalesce two free buffers into one.
607 slotForInternalAddressPreviousTo(void * address
)
609 register Slot
* slot
= allocationList
;
610 register size_t count
= slotCount
;
612 for ( ; count
> 0; count
-- ) {
613 if ( ((char *)slot
->internalAddress
)
614 + slot
->internalSize
== address
)
621 extern C_LINKAGE
void
625 Slot
* previousSlot
= 0;
631 if ( allocationList
== 0 )
632 EF_Abort("free() called before first malloc().");
634 if ( !noAllocationListProtection
)
635 Page_AllowAccess(allocationList
, allocationListSize
);
637 slot
= slotForUserAddress(address
);
640 EF_Abort("free(%a): address not from malloc().", address
);
642 if ( slot
->mode
!= ALLOCATED
) {
643 if ( internalUse
&& slot
->mode
== INTERNAL_USE
)
647 "free(%a): freeing free memory."
652 if ( EF_PROTECT_FREE
)
653 slot
->mode
= PROTECTED
;
657 previousSlot
= slotForInternalAddressPreviousTo(slot
->internalAddress
);
658 nextSlot
= slotForInternalAddress(
659 ((char *)slot
->internalAddress
) + slot
->internalSize
);
662 && (previousSlot
->mode
== FREE
|| previousSlot
->mode
== PROTECTED
) ) {
663 /* Coalesce previous slot with this one. */
664 previousSlot
->internalSize
+= slot
->internalSize
;
665 if ( EF_PROTECT_FREE
)
666 previousSlot
->mode
= PROTECTED
;
668 slot
->internalAddress
= slot
->userAddress
= 0;
669 slot
->internalSize
= slot
->userSize
= 0;
670 slot
->mode
= NOT_IN_USE
;
675 && (nextSlot
->mode
== FREE
|| nextSlot
->mode
== PROTECTED
) ) {
676 /* Coalesce next slot with this one. */
677 slot
->internalSize
+= nextSlot
->internalSize
;
678 nextSlot
->internalAddress
= nextSlot
->userAddress
= 0;
679 nextSlot
->internalSize
= nextSlot
->userSize
= 0;
680 nextSlot
->mode
= NOT_IN_USE
;
684 slot
->userAddress
= slot
->internalAddress
;
685 slot
->userSize
= slot
->internalSize
;
688 * Free memory is _always_ set to deny access. When EF_PROTECT_FREE
689 * is true, free memory is never reallocated, so it remains access
690 * denied for the life of the process. When EF_PROTECT_FREE is false,
691 * the memory may be re-allocated, at which time access to it will be
694 * Some operating systems allow munmap() with single-page resolution,
695 * and allow you to un-map portions of a region, rather than the
696 * entire region that was mapped with mmap(). On those operating
697 * systems, we can release protected free pages with Page_Delete(),
698 * in the hope that the swap space attached to those pages will be
701 if ( EF_PROTECT_FREE
)
702 Page_Delete(slot
->internalAddress
, slot
->internalSize
);
704 Page_DenyAccess(slot
->internalAddress
, slot
->internalSize
);
706 if ( !noAllocationListProtection
)
707 Page_DenyAccess(allocationList
, allocationListSize
);
710 extern C_LINKAGE
void *
711 realloc(void * oldBuffer
, size_t newSize
)
713 void * newBuffer
= malloc(newSize
);
719 Page_AllowAccess(allocationList
, allocationListSize
);
720 noAllocationListProtection
= 1;
722 slot
= slotForUserAddress(oldBuffer
);
726 "realloc(%a, %d): address not from malloc()."
730 if ( newSize
< (size
= slot
->userSize
) )
734 memcpy(newBuffer
, oldBuffer
, size
);
737 noAllocationListProtection
= 0;
738 Page_DenyAccess(allocationList
, allocationListSize
);
740 if ( size
< newSize
)
741 memset(&(((char *)newBuffer
)[size
]), 0, newSize
- size
);
743 /* Internal memory was re-protected in free() */
749 extern C_LINKAGE
void *
752 if ( allocationList
== 0 )
753 initialize(); /* This sets EF_ALIGNMENT */
755 return memalign(EF_ALIGNMENT
, size
);
758 extern C_LINKAGE
void *
759 calloc(size_t nelem
, size_t elsize
)
761 size_t size
= nelem
* elsize
;
762 void * allocation
= malloc(size
);
764 memset(allocation
, 0, size
);
769 * This will catch more bugs if you remove the page alignment, but it
770 * will break some software.
772 extern C_LINKAGE
void *
775 return memalign(bytesPerPage
, size
);
780 * HP-UX 8/9.01 strcat reads a word past source when doing unaligned copies!
781 * Work around it here. The bug report has been filed with HP.
783 char *strcat(char *d
, const char *s
)
785 strcpy(d
+strlen(d
), s
);