Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / lib / libumem / common / vmem_sbrk.c
blob20bfb7345470801bcaae987b84f431202d724bb9
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * The structure of the sbrk backend:
32 * +-----------+
33 * | sbrk_top |
34 * +-----------+
35 * | (vmem_sbrk_alloc(), vmem_free())
36 * |
37 * +-----------+
38 * | sbrk_heap |
39 * +-----------+
40 * | | ... | (vmem_alloc(), vmem_free())
41 * <other arenas>
43 * The sbrk_top arena holds all controlled memory. vmem_sbrk_alloc() handles
44 * allocations from it, including growing the heap when we run low.
46 * Growing the heap is complicated by the fact that we have to extend the
47 * sbrk_top arena (using _vmem_extend_alloc()), and that can fail. Since
48 * other threads may be actively allocating, we can't return the memory.
50 * Instead, we put it on a doubly-linked list, sbrk_fails, which we search
51 * before calling sbrk().
54 #include <errno.h>
55 #include <limits.h>
56 #include <sys/sysmacros.h>
57 #include <sys/mman.h>
58 #include <unistd.h>
60 #include "vmem_base.h"
62 #include "misc.h"
64 size_t vmem_sbrk_pagesize = 0; /* the preferred page size of the heap */
66 #define VMEM_SBRK_MINALLOC (64 * 1024)
67 size_t vmem_sbrk_minalloc = VMEM_SBRK_MINALLOC; /* minimum allocation */
69 static size_t real_pagesize;
70 static vmem_t *sbrk_heap;
72 typedef struct sbrk_fail {
73 struct sbrk_fail *sf_next;
74 struct sbrk_fail *sf_prev;
75 void *sf_base; /* == the sbrk_fail's address */
76 size_t sf_size; /* the size of this buffer */
77 } sbrk_fail_t;
79 static sbrk_fail_t sbrk_fails = {
80 &sbrk_fails,
81 &sbrk_fails,
82 NULL,
86 static mutex_t sbrk_faillock = DEFAULTMUTEX;
89 * Try to extend src with [pos, pos + size).
91 * If it fails, add the block to the sbrk_fails list.
93 static void *
94 vmem_sbrk_extend_alloc(vmem_t *src, void *pos, size_t size, size_t alloc,
95 int vmflags)
97 sbrk_fail_t *fnext, *fprev, *fp;
98 void *ret;
100 ret = _vmem_extend_alloc(src, pos, size, alloc, vmflags);
101 if (ret != NULL)
102 return (ret);
104 fp = (sbrk_fail_t *)pos;
106 ASSERT(sizeof (sbrk_fail_t) <= size);
108 fp->sf_base = pos;
109 fp->sf_size = size;
111 (void) mutex_lock(&sbrk_faillock);
112 fp->sf_next = fnext = &sbrk_fails;
113 fp->sf_prev = fprev = sbrk_fails.sf_prev;
114 fnext->sf_prev = fp;
115 fprev->sf_next = fp;
116 (void) mutex_unlock(&sbrk_faillock);
118 return (NULL);
122 * Try to add at least size bytes to src, using the sbrk_fails list
124 static void *
125 vmem_sbrk_tryfail(vmem_t *src, size_t size, int vmflags)
127 sbrk_fail_t *fp;
129 (void) mutex_lock(&sbrk_faillock);
130 for (fp = sbrk_fails.sf_next; fp != &sbrk_fails; fp = fp->sf_next) {
131 if (fp->sf_size >= size) {
132 fp->sf_next->sf_prev = fp->sf_prev;
133 fp->sf_prev->sf_next = fp->sf_next;
134 fp->sf_next = fp->sf_prev = NULL;
135 break;
138 (void) mutex_unlock(&sbrk_faillock);
140 if (fp != &sbrk_fails) {
141 ASSERT(fp->sf_base == (void *)fp);
142 return (vmem_sbrk_extend_alloc(src, fp, fp->sf_size, size,
143 vmflags));
146 * nothing of the right size on the freelist
148 return (NULL);
151 static void *
152 vmem_sbrk_alloc(vmem_t *src, size_t size, int vmflags)
154 extern void *_sbrk_grow_aligned(size_t min_size, size_t low_align,
155 size_t high_align, size_t *actual_size);
157 void *ret;
158 void *buf;
159 size_t buf_size;
161 int old_errno = errno;
163 ret = vmem_alloc(src, size, VM_NOSLEEP);
164 if (ret != NULL) {
165 errno = old_errno;
166 return (ret);
170 * The allocation failed. We need to grow the heap.
172 * First, try to use any buffers which failed earlier.
174 if (sbrk_fails.sf_next != &sbrk_fails &&
175 (ret = vmem_sbrk_tryfail(src, size, vmflags)) != NULL)
176 return (ret);
178 buf_size = MAX(size, vmem_sbrk_minalloc);
181 * buf_size gets overwritten with the actual allocated size
183 buf = _sbrk_grow_aligned(buf_size, real_pagesize, vmem_sbrk_pagesize,
184 &buf_size);
186 if (buf != MAP_FAILED) {
187 ret = vmem_sbrk_extend_alloc(src, buf, buf_size, size, vmflags);
188 if (ret != NULL) {
189 errno = old_errno;
190 return (ret);
195 * Growing the heap failed. The vmem_alloc() above called umem_reap().
197 ASSERT((vmflags & VM_NOSLEEP) == VM_NOSLEEP);
199 errno = old_errno;
200 return (NULL);
204 * fork1() support
206 void
207 vmem_sbrk_lockup(void)
209 (void) mutex_lock(&sbrk_faillock);
212 void
213 vmem_sbrk_release(void)
215 (void) mutex_unlock(&sbrk_faillock);
218 vmem_t *
219 vmem_sbrk_arena(vmem_alloc_t **a_out, vmem_free_t **f_out)
221 if (sbrk_heap == NULL) {
222 size_t heap_size;
224 real_pagesize = sysconf(_SC_PAGESIZE);
226 heap_size = vmem_sbrk_pagesize;
228 if (issetugid()) {
229 heap_size = 0;
230 } else if (heap_size != 0 && !ISP2(heap_size)) {
231 heap_size = 0;
232 log_message("ignoring bad pagesize: 0x%p\n", heap_size);
234 if (heap_size <= real_pagesize) {
235 heap_size = real_pagesize;
236 } else {
237 struct memcntl_mha mha;
238 mha.mha_cmd = MHA_MAPSIZE_BSSBRK;
239 mha.mha_flags = 0;
240 mha.mha_pagesize = heap_size;
242 if (memcntl(NULL, 0, MC_HAT_ADVISE, (char *)&mha, 0, 0)
243 == -1) {
244 log_message("unable to set MAPSIZE_BSSBRK to "
245 "0x%p\n", heap_size);
246 heap_size = real_pagesize;
249 vmem_sbrk_pagesize = heap_size;
251 /* validate vmem_sbrk_minalloc */
252 if (vmem_sbrk_minalloc < VMEM_SBRK_MINALLOC)
253 vmem_sbrk_minalloc = VMEM_SBRK_MINALLOC;
254 vmem_sbrk_minalloc = P2ROUNDUP(vmem_sbrk_minalloc, heap_size);
256 sbrk_heap = vmem_init("sbrk_top", real_pagesize,
257 vmem_sbrk_alloc, vmem_free,
258 "sbrk_heap", NULL, 0, real_pagesize,
259 vmem_alloc, vmem_free);
262 if (a_out != NULL)
263 *a_out = vmem_alloc;
264 if (f_out != NULL)
265 *f_out = vmem_free;
267 return (sbrk_heap);