1 /* -----------------------------------------------------------------------
2 closures.c - Copyright (c) 2019 Anthony Green
3 Copyright (c) 2007, 2009, 2010 Red Hat, Inc.
4 Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc
5 Copyright (c) 2011 Plausible Labs Cooperative, Inc.
7 Code to allocate and deallocate memory for closures.
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 ``Software''), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice shall be included
18 in all copies or substantial portions of the Software.
20 THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
24 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
25 WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 DEALINGS IN THE SOFTWARE.
28 ----------------------------------------------------------------------- */
30 #if defined __linux__ && !defined _GNU_SOURCE
34 #include <fficonfig.h>
36 #include <ffi_common.h>
40 #include <sys/param.h>
43 #if __NetBSD_Version__ - 0 >= 799007200
44 /* NetBSD with PROT_MPROTECT */
49 #ifdef HAVE_SYS_MEMFD_H
50 #include <sys/memfd.h>
53 static const size_t overhead
=
54 (sizeof(max_align_t
) > sizeof(void *) + sizeof(size_t)) ?
56 : sizeof(void *) + sizeof(size_t);
58 #define ADD_TO_POINTER(p, d) ((void *)((uintptr_t)(p) + (d)))
61 ffi_closure_alloc (size_t size
, void **code
)
63 static size_t page_size
;
65 void *codeseg
, *dataseg
;
68 /* Expect that PAX mprotect is active and a separate code mapping is necessary. */
72 /* Obtain system page size. */
74 page_size
= sysconf(_SC_PAGESIZE
);
76 /* Round allocation size up to the next page, keeping in mind the size field and pointer to code map. */
77 rounded_size
= (size
+ overhead
+ page_size
- 1) & ~(page_size
- 1);
79 /* Primary mapping is RW, but request permission to switch to PROT_EXEC later. */
80 prot
= PROT_READ
| PROT_WRITE
| PROT_MPROTECT(PROT_EXEC
);
81 dataseg
= mmap(NULL
, rounded_size
, prot
, MAP_ANON
| MAP_PRIVATE
, -1, 0);
82 if (dataseg
== MAP_FAILED
)
85 /* Create secondary mapping and switch it to RX. */
86 codeseg
= mremap(dataseg
, rounded_size
, NULL
, rounded_size
, MAP_REMAPDUP
);
87 if (codeseg
== MAP_FAILED
) {
88 munmap(dataseg
, rounded_size
);
91 if (mprotect(codeseg
, rounded_size
, PROT_READ
| PROT_EXEC
) == -1) {
92 munmap(codeseg
, rounded_size
);
93 munmap(dataseg
, rounded_size
);
97 /* Remember allocation size and location of the secondary mapping for ffi_closure_free. */
98 memcpy(dataseg
, &rounded_size
, sizeof(rounded_size
));
99 memcpy(ADD_TO_POINTER(dataseg
, sizeof(size_t)), &codeseg
, sizeof(void *));
100 *code
= ADD_TO_POINTER(codeseg
, overhead
);
101 return ADD_TO_POINTER(dataseg
, overhead
);
105 ffi_closure_free (void *ptr
)
107 void *codeseg
, *dataseg
;
110 dataseg
= ADD_TO_POINTER(ptr
, -overhead
);
111 memcpy(&rounded_size
, dataseg
, sizeof(rounded_size
));
112 memcpy(&codeseg
, ADD_TO_POINTER(dataseg
, sizeof(size_t)), sizeof(void *));
113 munmap(dataseg
, rounded_size
);
114 munmap(codeseg
, rounded_size
);
118 ffi_tramp_is_present (__attribute__((unused
)) void *ptr
)
122 #else /* !NetBSD with PROT_MPROTECT */
124 #if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE
125 # if __linux__ && !defined(__ANDROID__)
126 /* This macro indicates it may be forbidden to map anonymous memory
127 with both write and execute permission. Code compiled when this
128 option is defined will attempt to map such pages once, but if it
129 fails, it falls back to creating a temporary file in a writable and
130 executable filesystem and mapping pages from it into separate
131 locations in the virtual memory space, one location writable and
132 another executable. */
133 # define FFI_MMAP_EXEC_WRIT 1
134 # define HAVE_MNTENT 1
136 # if defined(_WIN32) || defined(__OS2__)
137 /* Windows systems may have Data Execution Protection (DEP) enabled,
138 which requires the use of VirtualMalloc/VirtualFree to alloc/free
139 executable memory. */
140 # define FFI_MMAP_EXEC_WRIT 1
144 #if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX
145 # if defined(__linux__) && !defined(__ANDROID__)
146 /* When defined to 1 check for SELinux and if SELinux is active,
147 don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that
148 might cause audit messages. */
149 # define FFI_MMAP_EXEC_SELINUX 1
155 #if FFI_EXEC_TRAMPOLINE_TABLE
159 #include <mach/mach.h>
167 extern void *ffi_closure_trampoline_table_page
;
169 typedef struct ffi_trampoline_table ffi_trampoline_table
;
170 typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry
;
172 struct ffi_trampoline_table
174 /* contiguous writable and executable pages */
175 vm_address_t config_page
;
177 /* free list tracking */
179 ffi_trampoline_table_entry
*free_list
;
180 ffi_trampoline_table_entry
*free_list_pool
;
182 ffi_trampoline_table
*prev
;
183 ffi_trampoline_table
*next
;
186 struct ffi_trampoline_table_entry
188 void *(*trampoline
) (void);
189 ffi_trampoline_table_entry
*next
;
192 /* Total number of trampolines that fit in one trampoline table */
193 #define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE)
195 static pthread_mutex_t ffi_trampoline_lock
= PTHREAD_MUTEX_INITIALIZER
;
196 static ffi_trampoline_table
*ffi_trampoline_tables
= NULL
;
198 static ffi_trampoline_table
*
199 ffi_trampoline_table_alloc (void)
201 ffi_trampoline_table
*table
;
202 vm_address_t config_page
;
203 vm_address_t trampoline_page
;
204 vm_address_t trampoline_page_template
;
210 /* Allocate two pages -- a config page and a placeholder page */
212 kt
= vm_allocate (mach_task_self (), &config_page
, PAGE_MAX_SIZE
* 2,
214 if (kt
!= KERN_SUCCESS
)
217 /* Remap the trampoline table on top of the placeholder page */
218 trampoline_page
= config_page
+ PAGE_MAX_SIZE
;
221 trampoline_page_template
= (vm_address_t
)(uintptr_t)ptrauth_auth_data((void *)&ffi_closure_trampoline_table_page
, ptrauth_key_function_pointer
, 0);
223 trampoline_page_template
= (vm_address_t
)&ffi_closure_trampoline_table_page
;
227 /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */
228 trampoline_page_template
&= ~1UL;
230 kt
= vm_remap (mach_task_self (), &trampoline_page
, PAGE_MAX_SIZE
, 0x0,
231 VM_FLAGS_OVERWRITE
, mach_task_self (), trampoline_page_template
,
232 FALSE
, &cur_prot
, &max_prot
, VM_INHERIT_SHARE
);
233 if (kt
!= KERN_SUCCESS
|| !(cur_prot
& VM_PROT_EXECUTE
))
235 vm_deallocate (mach_task_self (), config_page
, PAGE_MAX_SIZE
* 2);
239 /* We have valid trampoline and config pages */
240 table
= calloc (1, sizeof (ffi_trampoline_table
));
241 table
->free_count
= FFI_TRAMPOLINE_COUNT
;
242 table
->config_page
= config_page
;
244 /* Create and initialize the free list */
245 table
->free_list_pool
=
246 calloc (FFI_TRAMPOLINE_COUNT
, sizeof (ffi_trampoline_table_entry
));
248 for (i
= 0; i
< table
->free_count
; i
++)
250 ffi_trampoline_table_entry
*entry
= &table
->free_list_pool
[i
];
252 (void *) (trampoline_page
+ (i
* FFI_TRAMPOLINE_SIZE
));
254 entry
->trampoline
= ptrauth_sign_unauthenticated(entry
->trampoline
, ptrauth_key_function_pointer
, 0);
257 if (i
< table
->free_count
- 1)
258 entry
->next
= &table
->free_list_pool
[i
+ 1];
261 table
->free_list
= table
->free_list_pool
;
267 ffi_trampoline_table_free (ffi_trampoline_table
*table
)
269 /* Remove from the list */
270 if (table
->prev
!= NULL
)
271 table
->prev
->next
= table
->next
;
273 if (table
->next
!= NULL
)
274 table
->next
->prev
= table
->prev
;
276 /* Deallocate pages */
277 vm_deallocate (mach_task_self (), table
->config_page
, PAGE_MAX_SIZE
* 2);
279 /* Deallocate free list */
280 free (table
->free_list_pool
);
285 ffi_closure_alloc (size_t size
, void **code
)
287 /* Create the closure */
288 ffi_closure
*closure
= malloc (size
);
292 pthread_mutex_lock (&ffi_trampoline_lock
);
294 /* Check for an active trampoline table with available entries. */
295 ffi_trampoline_table
*table
= ffi_trampoline_tables
;
296 if (table
== NULL
|| table
->free_list
== NULL
)
298 table
= ffi_trampoline_table_alloc ();
301 pthread_mutex_unlock (&ffi_trampoline_lock
);
306 /* Insert the new table at the top of the list */
307 table
->next
= ffi_trampoline_tables
;
308 if (table
->next
!= NULL
)
309 table
->next
->prev
= table
;
311 ffi_trampoline_tables
= table
;
314 /* Claim the free entry */
315 ffi_trampoline_table_entry
*entry
= ffi_trampoline_tables
->free_list
;
316 ffi_trampoline_tables
->free_list
= entry
->next
;
317 ffi_trampoline_tables
->free_count
--;
320 pthread_mutex_unlock (&ffi_trampoline_lock
);
322 /* Initialize the return values */
323 *code
= entry
->trampoline
;
324 closure
->trampoline_table
= table
;
325 closure
->trampoline_table_entry
= entry
;
331 ffi_closure_free (void *ptr
)
333 ffi_closure
*closure
= ptr
;
335 pthread_mutex_lock (&ffi_trampoline_lock
);
337 /* Fetch the table and entry references */
338 ffi_trampoline_table
*table
= closure
->trampoline_table
;
339 ffi_trampoline_table_entry
*entry
= closure
->trampoline_table_entry
;
341 /* Return the entry to the free list */
342 entry
->next
= table
->free_list
;
343 table
->free_list
= entry
;
346 /* If all trampolines within this table are free, and at least one other table exists, deallocate
348 if (table
->free_count
== FFI_TRAMPOLINE_COUNT
349 && ffi_trampoline_tables
!= table
)
351 ffi_trampoline_table_free (table
);
353 else if (ffi_trampoline_tables
!= table
)
355 /* Otherwise, bump this table to the top of the list */
357 table
->next
= ffi_trampoline_tables
;
358 if (ffi_trampoline_tables
!= NULL
)
359 ffi_trampoline_tables
->prev
= table
;
361 ffi_trampoline_tables
= table
;
364 pthread_mutex_unlock (&ffi_trampoline_lock
);
366 /* Free the closure */
372 // Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations.
374 #elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */
377 #define USE_DL_PREFIX 1
379 #ifndef USE_BUILTIN_FFS
380 #define USE_BUILTIN_FFS 1
384 /* We need to use mmap, not sbrk. */
385 #define HAVE_MORECORE 0
387 /* We could, in theory, support mremap, but it wouldn't buy us anything. */
388 #define HAVE_MREMAP 0
390 /* We have no use for this, so save some code and data. */
391 #define NO_MALLINFO 1
393 /* We need all allocations to be in regular segments, otherwise we
394 lose track of the corresponding code address. */
395 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
397 /* Don't allocate more than a page unless needed. */
398 #define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize)
400 #include <sys/types.h>
401 #include <sys/stat.h>
412 #endif /* HAVE_MNTENT */
413 #include <sys/param.h>
416 /* We don't want sys/mman.h to be included after we redefine mmap and
418 #include <sys/mman.h>
419 #define LACKS_SYS_MMAN_H 1
421 #if FFI_MMAP_EXEC_SELINUX
422 #include <sys/statfs.h>
425 static int selinux_enabled
= -1;
428 selinux_enabled_check (void)
435 if (statfs ("/selinux", &sfs
) >= 0
436 && (unsigned int) sfs
.f_type
== 0xf97cff8cU
)
438 f
= fopen ("/proc/mounts", "r");
441 while (getline (&buf
, &len
, f
) >= 0)
443 char *p
= strchr (buf
, ' ');
446 p
= strchr (p
+ 1, ' ');
449 if (strncmp (p
+ 1, "selinuxfs ", 10) == 0)
461 #define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \
462 : (selinux_enabled = selinux_enabled_check ()))
466 #define is_selinux_enabled() 0
468 #endif /* !FFI_MMAP_EXEC_SELINUX */
470 /* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */
471 #ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX
474 static int emutramp_enabled
= -1;
477 emutramp_enabled_check (void)
483 f
= fopen ("/proc/self/status", "r");
488 while (getline (&buf
, &len
, f
) != -1)
489 if (!strncmp (buf
, "PaX:", 4))
492 if (sscanf (buf
, "%*s %*c%c", &emutramp
) == 1)
493 ret
= (emutramp
== 'E');
501 #define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \
502 : (emutramp_enabled = emutramp_enabled_check ()))
503 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
505 #elif defined (__CYGWIN__) || defined(__INTERIX)
507 #include <sys/mman.h>
509 /* Cygwin is Linux-like, but not quite that Linux-like. */
510 #define is_selinux_enabled() 0
512 #endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */
514 #ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX
515 #define is_emutramp_enabled() 0
516 #endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
518 /* Declare all functions defined in dlmalloc.c as static. */
519 static void *dlmalloc(size_t);
520 static void dlfree(void*);
521 static void *dlcalloc(size_t, size_t) MAYBE_UNUSED
;
522 static void *dlrealloc(void *, size_t) MAYBE_UNUSED
;
523 static void *dlmemalign(size_t, size_t) MAYBE_UNUSED
;
524 static void *dlvalloc(size_t) MAYBE_UNUSED
;
525 static int dlmallopt(int, int) MAYBE_UNUSED
;
526 static size_t dlmalloc_footprint(void) MAYBE_UNUSED
;
527 static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED
;
528 static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED
;
529 static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED
;
530 static void *dlpvalloc(size_t) MAYBE_UNUSED
;
531 static int dlmalloc_trim(size_t) MAYBE_UNUSED
;
532 static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED
;
533 static void dlmalloc_stats(void) MAYBE_UNUSED
;
535 #if !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
536 /* Use these for mmap and munmap within dlmalloc.c. */
537 static void *dlmmap(void *, size_t, int, int, int, off_t
);
538 static int dlmunmap(void *, size_t);
539 #endif /* !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
542 #define munmap dlmunmap
544 #include "dlmalloc.c"
549 #if !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
551 /* A mutex used to synchronize access to *exec* variables in this file. */
552 static pthread_mutex_t open_temp_exec_file_mutex
= PTHREAD_MUTEX_INITIALIZER
;
554 /* A file descriptor of a temporary file from which we'll map
556 static int execfd
= -1;
558 /* The amount of space already allocated from the temporary file. */
559 static size_t execsize
= 0;
561 #ifdef HAVE_MEMFD_CREATE
562 /* Open a temporary file name, and immediately unlink it. */
564 open_temp_exec_file_memfd (const char *name
)
567 fd
= memfd_create (name
, MFD_CLOEXEC
);
572 /* Open a temporary file name, and immediately unlink it. */
574 open_temp_exec_file_name (char *name
, int flags
)
579 fd
= mkostemp (name
, flags
);
590 /* Open a temporary file in the named directory. */
592 open_temp_exec_file_dir (const char *dir
)
594 static const char suffix
[] = "/ffiXXXXXX";
608 fd
= open (dir
, flags
| O_RDWR
| O_EXCL
| O_TMPFILE
, 0700);
609 /* If the running system does not support the O_TMPFILE flag then retry without it. */
610 if (fd
!= -1 || (errno
!= EINVAL
&& errno
!= EISDIR
&& errno
!= EOPNOTSUPP
)) {
617 lendir
= (int) strlen (dir
);
618 tempname
= __builtin_alloca (lendir
+ sizeof (suffix
));
623 memcpy (tempname
, dir
, lendir
);
624 memcpy (tempname
+ lendir
, suffix
, sizeof (suffix
));
626 return open_temp_exec_file_name (tempname
, flags
);
629 /* Open a temporary file in the directory in the named environment
632 open_temp_exec_file_env (const char *envvar
)
634 const char *value
= getenv (envvar
);
639 return open_temp_exec_file_dir (value
);
643 /* Open a temporary file in an executable and writable mount point
644 listed in the mounts file. Subsequent calls with the same mounts
645 keep searching for mount points in the same file. Providing NULL
646 as the mounts file closes the file. */
648 open_temp_exec_file_mnt (const char *mounts
)
650 static const char *last_mounts
;
651 static FILE *last_mntent
;
653 if (mounts
!= last_mounts
)
656 endmntent (last_mntent
);
658 last_mounts
= mounts
;
661 last_mntent
= setmntent (mounts
, "r");
673 char buf
[MAXPATHLEN
* 3];
675 if (getmntent_r (last_mntent
, &mnt
, buf
, sizeof (buf
)) == NULL
)
678 if (hasmntopt (&mnt
, "ro")
679 || hasmntopt (&mnt
, "noexec")
680 || access (mnt
.mnt_dir
, W_OK
))
683 fd
= open_temp_exec_file_dir (mnt
.mnt_dir
);
689 #endif /* HAVE_MNTENT */
691 /* Instructions to look for a location to hold a temporary file that
692 can be mapped in for execution. */
695 int (*func
)(const char *);
698 } open_temp_exec_file_opts
[] = {
699 #ifdef HAVE_MEMFD_CREATE
700 { open_temp_exec_file_memfd
, "libffi", 0 },
702 { open_temp_exec_file_env
, "LIBFFI_TMPDIR", 0 },
703 { open_temp_exec_file_env
, "TMPDIR", 0 },
704 { open_temp_exec_file_dir
, "/tmp", 0 },
705 { open_temp_exec_file_dir
, "/var/tmp", 0 },
706 { open_temp_exec_file_dir
, "/dev/shm", 0 },
707 { open_temp_exec_file_env
, "HOME", 0 },
709 { open_temp_exec_file_mnt
, "/etc/mtab", 1 },
710 { open_temp_exec_file_mnt
, "/proc/mounts", 1 },
711 #endif /* HAVE_MNTENT */
714 /* Current index into open_temp_exec_file_opts. */
715 static int open_temp_exec_file_opts_idx
= 0;
717 /* Reset a current multi-call func, then advances to the next entry.
718 If we're at the last, go back to the first and return nonzero,
719 otherwise return zero. */
721 open_temp_exec_file_opts_next (void)
723 if (open_temp_exec_file_opts
[open_temp_exec_file_opts_idx
].repeat
)
724 open_temp_exec_file_opts
[open_temp_exec_file_opts_idx
].func (NULL
);
726 open_temp_exec_file_opts_idx
++;
727 if (open_temp_exec_file_opts_idx
728 == (sizeof (open_temp_exec_file_opts
)
729 / sizeof (*open_temp_exec_file_opts
)))
731 open_temp_exec_file_opts_idx
= 0;
738 /* Return a file descriptor of a temporary zero-sized file in a
739 writable and executable filesystem. */
741 open_temp_exec_file (void)
747 fd
= open_temp_exec_file_opts
[open_temp_exec_file_opts_idx
].func
748 (open_temp_exec_file_opts
[open_temp_exec_file_opts_idx
].arg
);
750 if (!open_temp_exec_file_opts
[open_temp_exec_file_opts_idx
].repeat
753 if (open_temp_exec_file_opts_next ())
762 /* We need to allocate space in a file that will be backing a writable
763 mapping. Several problems exist with the usual approaches:
764 - fallocate() is Linux-only
765 - posix_fallocate() is not available on all platforms
766 - ftruncate() does not allocate space on filesystems with sparse files
767 Failure to allocate the space will cause SIGBUS to be thrown when
768 the mapping is subsequently written to. */
770 allocate_space (int fd
, off_t offset
, off_t len
)
772 static size_t page_size
;
774 /* Obtain system page size. */
776 page_size
= sysconf(_SC_PAGESIZE
);
778 unsigned char buf
[page_size
];
779 memset (buf
, 0, page_size
);
783 off_t to_write
= (len
< page_size
) ? len
: page_size
;
784 if (write (fd
, buf
, to_write
) < to_write
)
792 /* Map in a chunk of memory from the temporary exec file into separate
793 locations in the virtual memory address space, one writable and one
794 executable. Returns the address of the writable portion, after
795 storing an offset to the corresponding executable portion at the
796 last word of the requested chunk. */
798 dlmmap_locked (void *start
, size_t length
, int prot
, int flags
, off_t offset
)
804 open_temp_exec_file_opts_idx
= 0;
806 execfd
= open_temp_exec_file ();
813 if (allocate_space (execfd
, offset
, length
))
816 flags
&= ~(MAP_PRIVATE
| MAP_ANONYMOUS
);
819 ptr
= mmap (NULL
, length
, (prot
& ~PROT_WRITE
) | PROT_EXEC
,
820 flags
, execfd
, offset
);
828 if (ftruncate (execfd
, offset
) != 0)
830 /* Fixme : Error logs can be added here. Returning an error for
831 * ftruncte() will not add any advantage as it is being
832 * validating in the error case. */
838 && open_temp_exec_file_opts
[open_temp_exec_file_opts_idx
].repeat
)
839 open_temp_exec_file_opts_next ();
841 start
= mmap (start
, length
, prot
, flags
, execfd
, offset
);
845 munmap (ptr
, length
);
846 if (ftruncate (execfd
, offset
) != 0)
848 /* Fixme : Error logs can be added here. Returning an error for
849 * ftruncte() will not add any advantage as it is being
850 * validating in the error case. */
855 mmap_exec_offset ((char *)start
, length
) = (char*)ptr
- (char*)start
;
862 /* Map in a writable and executable chunk of memory if possible.
863 Failing that, fall back to dlmmap_locked. */
865 dlmmap (void *start
, size_t length
, int prot
,
866 int flags
, int fd
, off_t offset
)
870 assert (start
== NULL
&& length
% malloc_getpagesize
== 0
871 && prot
== (PROT_READ
| PROT_WRITE
)
872 && flags
== (MAP_PRIVATE
| MAP_ANONYMOUS
)
873 && fd
== -1 && offset
== 0);
875 if (execfd
== -1 && ffi_tramp_is_supported ())
877 ptr
= mmap (start
, length
, prot
& ~PROT_EXEC
, flags
, fd
, offset
);
881 if (execfd
== -1 && is_emutramp_enabled ())
883 ptr
= mmap (start
, length
, prot
& ~PROT_EXEC
, flags
, fd
, offset
);
887 if (execfd
== -1 && !is_selinux_enabled ())
889 ptr
= mmap (start
, length
, prot
| PROT_EXEC
, flags
, fd
, offset
);
891 if (ptr
!= MFAIL
|| (errno
!= EPERM
&& errno
!= EACCES
))
892 /* Cool, no need to mess with separate segments. */
895 /* If MREMAP_DUP is ever introduced and implemented, try mmap
896 with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with
897 MREMAP_DUP and prot at this point. */
900 if (execsize
== 0 || execfd
== -1)
902 pthread_mutex_lock (&open_temp_exec_file_mutex
);
903 ptr
= dlmmap_locked (start
, length
, prot
, flags
, offset
);
904 pthread_mutex_unlock (&open_temp_exec_file_mutex
);
909 return dlmmap_locked (start
, length
, prot
, flags
, offset
);
912 /* Release memory at the given address, as well as the corresponding
913 executable page if it's separate. */
915 dlmunmap (void *start
, size_t length
)
917 /* We don't bother decreasing execsize or truncating the file, since
918 we can't quite tell whether we're unmapping the end of the file.
919 We don't expect frequent deallocation anyway. If we did, we
920 could locate pages in the file by writing to the pages being
921 deallocated and checking that the file contents change.
923 msegmentptr seg
= segment_holding (gm
, start
);
926 if (seg
&& (code
= add_segment_exec_offset (start
, seg
)) != start
)
928 int ret
= munmap (code
, length
);
933 return munmap (start
, length
);
936 #if FFI_CLOSURE_FREE_CODE
937 /* Return segment holding given code address. */
939 segment_holding_code (mstate m
, char* addr
)
941 msegmentptr sp
= &m
->seg
;
943 if (addr
>= add_segment_exec_offset (sp
->base
, sp
)
944 && addr
< add_segment_exec_offset (sp
->base
, sp
) + sp
->size
)
946 if ((sp
= sp
->next
) == 0)
952 #endif /* !(defined(_WIN32) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
954 /* Allocate a chunk of memory with the given size. Returns a pointer
955 to the writable address, and sets *CODE to the executable
956 corresponding virtual address. */
958 ffi_closure_alloc (size_t size
, void **code
)
965 ptr
= FFI_CLOSURE_PTR (dlmalloc (size
));
969 msegmentptr seg
= segment_holding (gm
, ptr
);
971 *code
= add_segment_exec_offset (ptr
, seg
);
972 if (!ffi_tramp_is_supported ())
975 ftramp
= ffi_tramp_alloc (0);
978 dlfree (FFI_RESTORE_PTR (ptr
));
981 *code
= ffi_tramp_get_addr (ftramp
);
982 ((ffi_closure
*) ptr
)->ftramp
= ftramp
;
989 ffi_data_to_code_pointer (void *data
)
991 msegmentptr seg
= segment_holding (gm
, data
);
992 /* We expect closures to be allocated with ffi_closure_alloc(), in
993 which case seg will be non-NULL. However, some users take on the
994 burden of managing this memory themselves, in which case this
995 we'll just return data. */
998 if (!ffi_tramp_is_supported ())
999 return add_segment_exec_offset (data
, seg
);
1000 return ffi_tramp_get_addr (((ffi_closure
*) data
)->ftramp
);
1006 /* Release a chunk of memory allocated with ffi_closure_alloc. If
1007 FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the
1008 writable or the executable address given. Otherwise, only the
1009 writable address can be provided here. */
1011 ffi_closure_free (void *ptr
)
1013 #if FFI_CLOSURE_FREE_CODE
1014 msegmentptr seg
= segment_holding_code (gm
, ptr
);
1017 ptr
= sub_segment_exec_offset (ptr
, seg
);
1019 if (ffi_tramp_is_supported ())
1020 ffi_tramp_free (((ffi_closure
*) ptr
)->ftramp
);
1022 dlfree (FFI_RESTORE_PTR (ptr
));
1026 ffi_tramp_is_present (void *ptr
)
1028 msegmentptr seg
= segment_holding (gm
, ptr
);
1029 return seg
!= NULL
&& ffi_tramp_is_supported();
1032 # else /* ! FFI_MMAP_EXEC_WRIT */
1034 /* On many systems, memory returned by malloc is writable and
1035 executable, so just use it. */
1040 ffi_closure_alloc (size_t size
, void **code
)
1045 return *code
= FFI_CLOSURE_PTR (malloc (size
));
1049 ffi_closure_free (void *ptr
)
1051 free (FFI_RESTORE_PTR (ptr
));
1055 ffi_data_to_code_pointer (void *data
)
1061 ffi_tramp_is_present (__attribute__((unused
)) void *ptr
)
1066 # endif /* ! FFI_MMAP_EXEC_WRIT */
1067 #endif /* FFI_CLOSURES */
1069 #endif /* NetBSD with PROT_MPROTECT */