dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / lib / libbsdmalloc / common / malloc.bsd43.c
blob621716a7d2a727fb1664f8ab7b0542ac65f6629c
1 /*
2 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
6 #pragma ident "%Z%%M% %I% %E% SMI"
7 /*
8 * wizard:/space/4.3reno/usr/src/lib/libc/stdlib/malloc.c
9 * Copyright (c) 1983 Regents of the University of California.
10 * All rights reserved.
12 * Redistribution and use in source and binary forms are permitted
13 * provided that: (1) source distributions retain this entire copyright
14 * notice and comment, and (2) distributions including binaries display
15 * the following acknowledgement: ``This product includes software
16 * developed by the University of California, Berkeley and its contributors''
17 * in the documentation or other materials provided with the distribution
18 * and in all advertising materials mentioning features or use of this
19 * software. Neither the name of the University nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
28 * malloc.c (Caltech) 2/21/82
29 * Chris Kingsley, kingsley@cit-20.
31 * This is a very fast storage allocator. It allocates blocks of a small
32 * number of different sizes, and keeps free lists of each size. Blocks that
33 * don't exactly fit are passed up to the next larger size. In this
34 * implementation, the available sizes are 2^n-4 bytes long (ILP32)
35 * or 2^n-8 bytes long (LP64).
38 /*LINTLIBRARY*/
39 #include <sys/types.h>
40 #include <stdlib.h>
41 #include <unistd.h>
42 #include <string.h>
43 #include <limits.h>
46 * The overhead on a block is at least 4 bytes. When free, this space
47 * contains a pointer to the next free block, and the bottom two bits must
48 * be zero. When in use, the first byte is set to MAGIC, and the second
49 * byte is the size index. The remaining bytes are for alignment.
50 * The order of elements is critical: ov_magic must overlay the low order
51 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
52 * Overhead is 4 bytes for ILP32, 8 bytes for LP64
54 union overhead {
55 union overhead *ov_next; /* when free */
56 struct {
57 #if defined(_LITTLE_ENDIAN)
58 uchar_t ovu_magic; /* magic number */
59 uchar_t ovu_index; /* bucket # */
60 uchar_t ovu_pad[sizeof (union overhead *) - 2];
61 #elif defined(_BIG_ENDIAN)
62 uchar_t ovu_pad[sizeof (union overhead *) - 2];
63 uchar_t ovu_index; /* bucket # */
64 uchar_t ovu_magic; /* magic number */
65 #else
66 #error "Endianness is not defined"
67 #endif
68 } ovu;
71 #define ov_magic ovu.ovu_magic
72 #define ov_index ovu.ovu_index
74 #define MAGIC 0xef /* magic # on accounting info */
77 * nextf[i] is the pointer to the next free block of size 2^(i+EXP).
78 * The smallest allocatable block is 8 bytes (ILP32) or 16 bytes (LP64).
79 * The overhead information precedes the data area returned to the user.
81 #ifdef _LP64
82 #define EXP 4
83 #define NBUCKETS 60
84 #else
85 #define EXP 3
86 #define NBUCKETS 29
87 #endif
88 static union overhead *nextf[NBUCKETS];
90 static int pagesz; /* page size */
91 static long sbrk_adjust; /* in case sbrk() does alignment */
92 static int pagebucket; /* page size bucket */
93 static void morecore(int);
94 static int findbucket(union overhead *, int);
96 void *
97 malloc(size_t nbytes)
99 union overhead *op;
100 int bucket;
101 ssize_t n;
102 size_t amt;
105 * First time malloc is called, setup page size and
106 * align break pointer so all data will be page aligned.
108 if (pagesz == 0) {
109 pagesz = getpagesize();
110 op = sbrk(0);
111 n = pagesz - sizeof (*op) - ((uintptr_t)op & (pagesz - 1));
112 if (n < 0)
113 n += pagesz;
114 if (n) {
115 if (sbrk(n) == (void *)-1)
116 return (NULL);
118 * We were careful to arrange that
119 * sbrk(0) + sizeof (union overhead)
120 * should end up on a page boundary.
121 * If the underlying sbrk() performs alignment
122 * then this is false. We compute the adjustment.
124 op = sbrk(0);
125 sbrk_adjust = (uintptr_t)(op + 1) & (pagesz - 1);
126 } else {
127 sbrk_adjust = 0;
129 bucket = 0;
130 amt = (1UL << EXP);
131 while (pagesz > amt) {
132 amt <<= 1;
133 bucket++;
135 pagebucket = bucket;
138 * Convert amount of memory requested into closest block size
139 * stored in hash buckets which satisfies request.
140 * Account for space used per block for accounting.
142 if (nbytes <= (n = pagesz - sizeof (*op))) {
143 amt = (1UL << EXP); /* size of first bucket */
144 bucket = 0;
145 n = -(ssize_t)(sizeof (*op));
146 } else {
147 amt = pagesz;
148 bucket = pagebucket;
150 while (nbytes > amt + n) {
151 amt <<= 1;
152 if (amt == 0)
153 return (NULL);
154 bucket++;
157 * If nothing in hash bucket right now,
158 * request more memory from the system.
160 if ((op = nextf[bucket]) == NULL) {
161 morecore(bucket);
162 if ((op = nextf[bucket]) == NULL)
163 return (NULL);
165 /* remove from linked list */
166 nextf[bucket] = op->ov_next;
167 op->ov_magic = MAGIC;
168 op->ov_index = (uchar_t)bucket;
169 return (op + 1);
173 * Allocate more memory to the indicated bucket.
175 static void
176 morecore(int bucket)
178 union overhead *op;
179 size_t sz; /* size of desired block */
180 ssize_t amt; /* amount to allocate */
181 long nblks; /* how many blocks we get */
183 sz = 1UL << (bucket + EXP);
184 if (sz == 0)
185 return;
186 if (sz < pagesz) {
187 amt = pagesz;
188 nblks = amt / sz;
189 } else {
190 amt = sz + pagesz;
191 nblks = 1;
193 if (amt <= 0)
194 return;
195 if (amt > LONG_MAX) {
196 intptr_t delta;
198 * the value required is too big for sbrk() to deal with
199 * in one go, so use sbrk() at most 2 times instead.
201 op = sbrk(0);
202 delta = LONG_MAX;
203 while (delta > 0) {
204 if (sbrk(delta) == (void *)-1) {
205 if (op != sbrk(0))
206 (void) sbrk(-LONG_MAX);
207 return;
209 amt -= LONG_MAX;
210 delta = amt;
213 else
214 op = sbrk(amt);
215 /* no more room! */
216 if (op == (union overhead *)-1)
217 return;
218 /* LINTED improper alignment */
219 op = (union overhead *)((caddr_t)op - sbrk_adjust);
221 * Add new memory allocated to that on
222 * free list for this hash bucket.
224 nextf[bucket] = op;
225 while (--nblks > 0) {
226 /* LINTED improper alignment */
227 op->ov_next = (union overhead *)((caddr_t)op + sz);
228 /* LINTED improper alignment */
229 op = (union overhead *)((caddr_t)op + sz);
233 void
234 free(void *cp)
236 int size;
237 union overhead *op;
239 if (cp == NULL)
240 return;
241 /* LINTED improper alignment */
242 op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
243 if (op->ov_magic != MAGIC)
244 return; /* previously freed? */
245 size = op->ov_index;
246 op->ov_next = nextf[size]; /* also clobbers ov_magic */
247 nextf[size] = op;
251 * When a program attempts "storage compaction" as mentioned in the
252 * old malloc man page, it realloc's an already freed block. Usually
253 * this is the last block it freed; occasionally it might be farther
254 * back. We have to search all the free lists for the block in order
255 * to determine its bucket: 1st we make one pass thru the lists
256 * checking only the first block in each; if that fails we search
257 * ``realloc_srchlen'' blocks in each list for a match (the variable
258 * is extern so the caller can modify it). If that fails we just copy
259 * however many bytes was given to realloc() and hope it's not huge.
261 int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */
263 void *
264 realloc(void *cp, size_t nbytes)
266 size_t onb;
267 int i;
268 union overhead *op;
269 char *res;
270 int was_alloced = 0;
272 if (cp == NULL)
273 return (malloc(nbytes));
274 /* LINTED improper alignment */
275 op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
276 if (op->ov_magic == MAGIC) {
277 was_alloced++;
278 i = op->ov_index;
279 } else {
281 * Already free, doing "compaction".
283 * Search for the old block of memory on the
284 * free list. First, check the most common
285 * case (last element free'd), then (this failing)
286 * the last ``realloc_srchlen'' items free'd.
287 * If all lookups fail, then just malloc() the
288 * space and copy the size of the new space.
290 if ((i = findbucket(op, 1)) < 0 &&
291 (i = findbucket(op, realloc_srchlen)) < 0) {
292 if ((res = malloc(nbytes)) != NULL)
293 (void) memmove(res, cp, nbytes);
294 return (res);
297 onb = 1UL << (i + EXP);
298 if (onb < pagesz)
299 onb -= sizeof (*op);
300 else
301 onb += pagesz - sizeof (*op);
302 /* avoid the copy if same size block */
303 if (was_alloced) {
304 size_t sz = 0;
305 if (i) {
306 sz = 1UL << (i + EXP - 1);
307 if (sz < pagesz)
308 sz -= sizeof (*op);
309 else
310 sz += pagesz - sizeof (*op);
312 if (nbytes <= onb && nbytes > sz) {
313 return (cp);
314 } else
315 free(cp);
317 if ((res = malloc(nbytes)) == NULL)
318 return (NULL);
319 if (cp != res) /* common optimization if "compacting" */
320 (void) memmove(res, cp, (nbytes < onb) ? nbytes : onb);
321 return (res);
325 * Search ``srchlen'' elements of each free list for a block whose
326 * header starts at ``freep''. If srchlen is -1 search the whole list.
327 * Return bucket number, or -1 if not found.
329 static int
330 findbucket(union overhead *freep, int srchlen)
332 union overhead *p;
333 int i, j;
335 for (i = 0; i < NBUCKETS; i++) {
336 j = 0;
337 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
338 if (p == freep)
339 return (i);
340 j++;
343 return (-1);