starfix: HoR fix!!! wave+team check fix+2 bos jalan...kurang LK nya + PR:lootannya...
[st4rcore.git] / dep / jemalloc / src / chunk_dss.c
blob5c0e290e4411565044d39435437647b2f8fe3124
1 #define JEMALLOC_CHUNK_DSS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 #ifdef JEMALLOC_DSS
4 /******************************************************************************/
5 /* Data. */
7 malloc_mutex_t dss_mtx;
9 /* Base address of the DSS. */
10 static void *dss_base;
11 /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
12 static void *dss_prev;
13 /* Current upper limit on DSS addresses. */
14 static void *dss_max;
17 * Trees of chunks that were previously allocated (trees differ only in node
18 * ordering). These are used when allocating chunks, in an attempt to re-use
19 * address space. Depending on function, different tree orderings are needed,
20 * which is why there are two trees with the same contents.
22 static extent_tree_t dss_chunks_szad;
23 static extent_tree_t dss_chunks_ad;
25 /******************************************************************************/
26 /* Function prototypes for non-inline static functions. */
28 static void *chunk_recycle_dss(size_t size, bool *zero);
29 static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
31 /******************************************************************************/
33 static void *
34 chunk_recycle_dss(size_t size, bool *zero)
36 extent_node_t *node, key;
38 key.addr = NULL;
39 key.size = size;
40 malloc_mutex_lock(&dss_mtx);
41 node = extent_tree_szad_nsearch(&dss_chunks_szad, &key);
42 if (node != NULL) {
43 void *ret = node->addr;
45 /* Remove node from the tree. */
46 extent_tree_szad_remove(&dss_chunks_szad, node);
47 if (node->size == size) {
48 extent_tree_ad_remove(&dss_chunks_ad, node);
49 base_node_dealloc(node);
50 } else {
52 * Insert the remainder of node's address range as a
53 * smaller chunk. Its position within dss_chunks_ad
54 * does not change.
56 assert(node->size > size);
57 node->addr = (void *)((uintptr_t)node->addr + size);
58 node->size -= size;
59 extent_tree_szad_insert(&dss_chunks_szad, node);
61 malloc_mutex_unlock(&dss_mtx);
63 if (*zero)
64 memset(ret, 0, size);
65 return (ret);
67 malloc_mutex_unlock(&dss_mtx);
69 return (NULL);
72 void *
73 chunk_alloc_dss(size_t size, bool *zero)
75 void *ret;
77 ret = chunk_recycle_dss(size, zero);
78 if (ret != NULL)
79 return (ret);
82 * sbrk() uses a signed increment argument, so take care not to
83 * interpret a huge allocation request as a negative increment.
85 if ((intptr_t)size < 0)
86 return (NULL);
88 malloc_mutex_lock(&dss_mtx);
89 if (dss_prev != (void *)-1) {
90 intptr_t incr;
93 * The loop is necessary to recover from races with other
94 * threads that are using the DSS for something other than
95 * malloc.
97 do {
98 /* Get the current end of the DSS. */
99 dss_max = sbrk(0);
102 * Calculate how much padding is necessary to
103 * chunk-align the end of the DSS.
105 incr = (intptr_t)size
106 - (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
107 if (incr == (intptr_t)size)
108 ret = dss_max;
109 else {
110 ret = (void *)((intptr_t)dss_max + incr);
111 incr += size;
114 dss_prev = sbrk(incr);
115 if (dss_prev == dss_max) {
116 /* Success. */
117 dss_max = (void *)((intptr_t)dss_prev + incr);
118 malloc_mutex_unlock(&dss_mtx);
119 *zero = true;
120 return (ret);
122 } while (dss_prev != (void *)-1);
124 malloc_mutex_unlock(&dss_mtx);
126 return (NULL);
129 static extent_node_t *
130 chunk_dealloc_dss_record(void *chunk, size_t size)
132 extent_node_t *xnode, *node, *prev, key;
134 xnode = NULL;
135 while (true) {
136 key.addr = (void *)((uintptr_t)chunk + size);
137 node = extent_tree_ad_nsearch(&dss_chunks_ad, &key);
138 /* Try to coalesce forward. */
139 if (node != NULL && node->addr == key.addr) {
141 * Coalesce chunk with the following address range.
142 * This does not change the position within
143 * dss_chunks_ad, so only remove/insert from/into
144 * dss_chunks_szad.
146 extent_tree_szad_remove(&dss_chunks_szad, node);
147 node->addr = chunk;
148 node->size += size;
149 extent_tree_szad_insert(&dss_chunks_szad, node);
150 break;
151 } else if (xnode == NULL) {
153 * It is possible that base_node_alloc() will cause a
154 * new base chunk to be allocated, so take care not to
155 * deadlock on dss_mtx, and recover if another thread
156 * deallocates an adjacent chunk while this one is busy
157 * allocating xnode.
159 malloc_mutex_unlock(&dss_mtx);
160 xnode = base_node_alloc();
161 malloc_mutex_lock(&dss_mtx);
162 if (xnode == NULL)
163 return (NULL);
164 } else {
165 /* Coalescing forward failed, so insert a new node. */
166 node = xnode;
167 xnode = NULL;
168 node->addr = chunk;
169 node->size = size;
170 extent_tree_ad_insert(&dss_chunks_ad, node);
171 extent_tree_szad_insert(&dss_chunks_szad, node);
172 break;
175 /* Discard xnode if it ended up unused do to a race. */
176 if (xnode != NULL)
177 base_node_dealloc(xnode);
179 /* Try to coalesce backward. */
180 prev = extent_tree_ad_prev(&dss_chunks_ad, node);
181 if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
182 chunk) {
184 * Coalesce chunk with the previous address range. This does
185 * not change the position within dss_chunks_ad, so only
186 * remove/insert node from/into dss_chunks_szad.
188 extent_tree_szad_remove(&dss_chunks_szad, prev);
189 extent_tree_ad_remove(&dss_chunks_ad, prev);
191 extent_tree_szad_remove(&dss_chunks_szad, node);
192 node->addr = prev->addr;
193 node->size += prev->size;
194 extent_tree_szad_insert(&dss_chunks_szad, node);
196 base_node_dealloc(prev);
199 return (node);
202 bool
203 chunk_in_dss(void *chunk)
205 bool ret;
207 malloc_mutex_lock(&dss_mtx);
208 if ((uintptr_t)chunk >= (uintptr_t)dss_base
209 && (uintptr_t)chunk < (uintptr_t)dss_max)
210 ret = true;
211 else
212 ret = false;
213 malloc_mutex_unlock(&dss_mtx);
215 return (ret);
218 bool
219 chunk_dealloc_dss(void *chunk, size_t size)
221 bool ret;
223 malloc_mutex_lock(&dss_mtx);
224 if ((uintptr_t)chunk >= (uintptr_t)dss_base
225 && (uintptr_t)chunk < (uintptr_t)dss_max) {
226 extent_node_t *node;
228 /* Try to coalesce with other unused chunks. */
229 node = chunk_dealloc_dss_record(chunk, size);
230 if (node != NULL) {
231 chunk = node->addr;
232 size = node->size;
235 /* Get the current end of the DSS. */
236 dss_max = sbrk(0);
239 * Try to shrink the DSS if this chunk is at the end of the
240 * DSS. The sbrk() call here is subject to a race condition
241 * with threads that use brk(2) or sbrk(2) directly, but the
242 * alternative would be to leak memory for the sake of poorly
243 * designed multi-threaded programs.
245 if ((void *)((uintptr_t)chunk + size) == dss_max
246 && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
247 /* Success. */
248 dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
250 if (node != NULL) {
251 extent_tree_szad_remove(&dss_chunks_szad, node);
252 extent_tree_ad_remove(&dss_chunks_ad, node);
253 base_node_dealloc(node);
255 } else
256 madvise(chunk, size, MADV_DONTNEED);
258 ret = false;
259 goto RETURN;
262 ret = true;
263 RETURN:
264 malloc_mutex_unlock(&dss_mtx);
265 return (ret);
268 bool
269 chunk_dss_boot(void)
272 if (malloc_mutex_init(&dss_mtx))
273 return (true);
274 dss_base = sbrk(0);
275 dss_prev = dss_base;
276 dss_max = dss_base;
277 extent_tree_szad_new(&dss_chunks_szad);
278 extent_tree_ad_new(&dss_chunks_ad);
280 return (false);
283 /******************************************************************************/
284 #endif /* JEMALLOC_DSS */