starfix: HoR fix!!! wave+team check fix+2 bos jalan...kurang LK nya + PR:lootannya...
[st4rcore.git] / dep / jemalloc / src / chunk_mmap.c
blobbc367559774d57fbb0d421754720fd8ee1f423a3
1 #define JEMALLOC_CHUNK_MMAP_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
5 /* Data. */
7 /*
8 * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
9 * potentially avoid some system calls.
11 #ifndef NO_TLS
12 static __thread bool mmap_unaligned_tls
13 JEMALLOC_ATTR(tls_model("initial-exec"));
14 #define MMAP_UNALIGNED_GET() mmap_unaligned_tls
15 #define MMAP_UNALIGNED_SET(v) do { \
16 mmap_unaligned_tls = (v); \
17 } while (0)
18 #else
19 static pthread_key_t mmap_unaligned_tsd;
20 #define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
21 #define MMAP_UNALIGNED_SET(v) do { \
22 pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
23 } while (0)
24 #endif
26 /******************************************************************************/
27 /* Function prototypes for non-inline static functions. */
29 static void *pages_map(void *addr, size_t size, bool noreserve);
30 static void pages_unmap(void *addr, size_t size);
31 static void *chunk_alloc_mmap_slow(size_t size, bool unaligned,
32 bool noreserve);
33 static void *chunk_alloc_mmap_internal(size_t size, bool noreserve);
35 /******************************************************************************/
37 static void *
38 pages_map(void *addr, size_t size, bool noreserve)
40 void *ret;
43 * We don't use MAP_FIXED here, because it can cause the *replacement*
44 * of existing mappings, and we only want to create new mappings.
46 int flags = MAP_PRIVATE | MAP_ANON;
47 #ifdef MAP_NORESERVE
48 if (noreserve)
49 flags |= MAP_NORESERVE;
50 #endif
51 ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
52 assert(ret != NULL);
54 if (ret == MAP_FAILED)
55 ret = NULL;
56 else if (addr != NULL && ret != addr) {
58 * We succeeded in mapping memory, but not in the right place.
60 if (munmap(ret, size) == -1) {
61 char buf[BUFERROR_BUF];
63 buferror(errno, buf, sizeof(buf));
64 malloc_write("<jemalloc>: Error in munmap(): ");
65 malloc_write(buf);
66 malloc_write("\n");
67 if (opt_abort)
68 abort();
70 ret = NULL;
73 assert(ret == NULL || (addr == NULL && ret != addr)
74 || (addr != NULL && ret == addr));
75 return (ret);
78 static void
79 pages_unmap(void *addr, size_t size)
82 if (munmap(addr, size) == -1) {
83 char buf[BUFERROR_BUF];
85 buferror(errno, buf, sizeof(buf));
86 malloc_write("<jemalloc>: Error in munmap(): ");
87 malloc_write(buf);
88 malloc_write("\n");
89 if (opt_abort)
90 abort();
94 static void *
95 chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
97 void *ret;
98 size_t offset;
100 /* Beware size_t wrap-around. */
101 if (size + chunksize <= size)
102 return (NULL);
104 ret = pages_map(NULL, size + chunksize, noreserve);
105 if (ret == NULL)
106 return (NULL);
108 /* Clean up unneeded leading/trailing space. */
109 offset = CHUNK_ADDR2OFFSET(ret);
110 if (offset != 0) {
111 /* Note that mmap() returned an unaligned mapping. */
112 unaligned = true;
114 /* Leading space. */
115 pages_unmap(ret, chunksize - offset);
117 ret = (void *)((uintptr_t)ret +
118 (chunksize - offset));
120 /* Trailing space. */
121 pages_unmap((void *)((uintptr_t)ret + size),
122 offset);
123 } else {
124 /* Trailing space only. */
125 pages_unmap((void *)((uintptr_t)ret + size),
126 chunksize);
130 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
131 * the next chunk_alloc_mmap() execution tries the fast allocation
132 * method.
134 if (unaligned == false)
135 MMAP_UNALIGNED_SET(false);
137 return (ret);
140 static void *
141 chunk_alloc_mmap_internal(size_t size, bool noreserve)
143 void *ret;
146 * Ideally, there would be a way to specify alignment to mmap() (like
147 * NetBSD has), but in the absence of such a feature, we have to work
148 * hard to efficiently create aligned mappings. The reliable, but
149 * slow method is to create a mapping that is over-sized, then trim the
150 * excess. However, that always results in at least one call to
151 * pages_unmap().
153 * A more optimistic approach is to try mapping precisely the right
154 * amount, then try to append another mapping if alignment is off. In
155 * practice, this works out well as long as the application is not
156 * interleaving mappings via direct mmap() calls. If we do run into a
157 * situation where there is an interleaved mapping and we are unable to
158 * extend an unaligned mapping, our best option is to switch to the
159 * slow method until mmap() returns another aligned mapping. This will
160 * tend to leave a gap in the memory map that is too small to cause
161 * later problems for the optimistic method.
163 * Another possible confounding factor is address space layout
164 * randomization (ASLR), which causes mmap(2) to disregard the
165 * requested address. mmap_unaligned tracks whether the previous
166 * chunk_alloc_mmap() execution received any unaligned or relocated
167 * mappings, and if so, the current execution will immediately fall
168 * back to the slow method. However, we keep track of whether the fast
169 * method would have succeeded, and if so, we make a note to try the
170 * fast method next time.
173 if (MMAP_UNALIGNED_GET() == false) {
174 size_t offset;
176 ret = pages_map(NULL, size, noreserve);
177 if (ret == NULL)
178 return (NULL);
180 offset = CHUNK_ADDR2OFFSET(ret);
181 if (offset != 0) {
182 MMAP_UNALIGNED_SET(true);
183 /* Try to extend chunk boundary. */
184 if (pages_map((void *)((uintptr_t)ret + size),
185 chunksize - offset, noreserve) == NULL) {
187 * Extension failed. Clean up, then revert to
188 * the reliable-but-expensive method.
190 pages_unmap(ret, size);
191 ret = chunk_alloc_mmap_slow(size, true,
192 noreserve);
193 } else {
194 /* Clean up unneeded leading space. */
195 pages_unmap(ret, chunksize - offset);
196 ret = (void *)((uintptr_t)ret + (chunksize -
197 offset));
200 } else
201 ret = chunk_alloc_mmap_slow(size, false, noreserve);
203 return (ret);
206 void *
207 chunk_alloc_mmap(size_t size)
209 return chunk_alloc_mmap_internal(size, false);
212 void *
213 chunk_alloc_mmap_noreserve(size_t size)
215 return chunk_alloc_mmap_internal(size, true);
218 void
219 chunk_dealloc_mmap(void *chunk, size_t size)
222 pages_unmap(chunk, size);
225 bool
226 chunk_mmap_boot(void)
229 #ifdef NO_TLS
230 if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
231 malloc_write("<jemalloc>: Error in pthread_key_create()\n");
232 return (true);
234 #endif
236 return (false);