1 /* Copyright (C) 2021-2023 Free Software Foundation, Inc.
4 This file is part of GNU Binutils.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
23 * incorporating former "loadobjects" into more general "map"
24 * (including code and data segments and dynamic functions)
34 #include <sys/param.h>
38 #include "collector.h"
39 #include "gp-experiment.h"
43 * These are obsolete and unreliable.
44 * They are included here only for historical compatibility.
46 #define MA_SHARED 0x08 /* changes are shared by mapped object */
47 #define MA_ANON 0x40 /* anonymous memory (e.g. /dev/zero) */
48 #define MA_ISM 0x80 /* intimate shared mem (shared MMU resources) */
49 #define MA_BREAK 0x10 /* grown by brk(2) */
50 #define MA_STACK 0x20 /* grown automatically on stack faults */
52 typedef struct prmap_t
54 unsigned long pr_vaddr
; /* virtual address of mapping */
55 unsigned long pr_size
; /* size of mapping in bytes */
56 char *pr_mapname
; /* name in /proc/<pid>/object */
57 int pr_mflags
; /* protection and attribute flags (see below) */
58 unsigned long pr_offset
; /* offset into mapped object, if any */
61 int pr_pagesize
; /* pagesize (bytes) for this mapping */
64 /* TprintfT(<level>,...) definitions. Adjust per module as needed */
65 #define DBG_LT0 0 // for high-level configuration, unexpected errors/warnings
66 #define DBG_LT1 1 // for configuration details, warnings
71 #define SYS_MMAP_NAME "mmap"
72 #define SYS_MMAP64_NAME "mmap64"
73 #define SYS_MUNMAP_NAME "munmap"
74 #define SYS_DLOPEN_NAME "dlopen"
75 #define SYS_DLCLOSE_NAME "dlclose"
77 typedef struct MapInfo
82 char *mapname
; /* name in /proc/<pid>/object */
89 typedef struct NameInfo
91 struct NameInfo
*next
;
93 char filename
[1]; /* dynamic length file name */
96 static NameInfo
*namemaps
= NULL
;
97 static MapInfo mmaps
; /* current memory maps */
98 static struct DataHandle
*map_hndl
= NULL
;
99 static char dyntext_fname
[MAXPATHLEN
];
100 static void *mapcache
= NULL
;
101 static char *maptext
= NULL
;
102 static size_t maptext_sz
= 4096; /* initial buffer size */
103 static int mmap_mode
= 0;
104 static int mmap_initted
= 0;
105 static collector_mutex_t map_lock
= COLLECTOR_MUTEX_INITIALIZER
;
106 static collector_mutex_t dyntext_lock
= COLLECTOR_MUTEX_INITIALIZER
;
108 /* a reentrance guard for the interposition functions ensures that updates to
109 the map cache/file are sequential, with the first doing the final update */
110 static int reentrance
= 0;
111 #define CHCK_REENTRANCE (reentrance || mmap_mode <= 0)
112 #define CURR_REENTRANCE reentrance
113 #define PUSH_REENTRANCE reentrance++
114 #define POP_REENTRANCE reentrance--
116 #define CALL_REAL(x) (__real_##x)
117 #define NULL_PTR(x) (__real_##x == NULL)
119 /* interposition function handles */
120 static void *(*__real_mmap
)(void* start
, size_t length
, int prot
, int flags
,
121 int fd
, off_t offset
) = NULL
;
122 static void *(*__real_mmap64
)(void* start
, size_t length
, int prot
, int flags
,
123 int fd
, off64_t offset
) = NULL
;
124 static int (*__real_munmap
)(void* start
, size_t length
) = NULL
;
125 static void *(*__real_dlopen
)(const char* pathname
, int mode
) = NULL
;
126 #if (ARCH(Intel) && WSIZE(32)) || ARCH(SPARC)
127 static void *(*__real_dlopen_2_1
)(const char* pathname
, int mode
) = NULL
;
128 static void *(*__real_dlopen_2_0
)(const char* pathname
, int mode
) = NULL
;
130 static int (*__real_dlclose
)(void* handle
) = NULL
;
131 static void (*collector_heap_record
)(int, size_t, void*) = NULL
;
133 /* internal function prototypes */
134 static int init_mmap_intf ();
135 static int init_mmap_files ();
136 static void append_segment_record (char *format
, ...);
137 static void update_map_segments (hrtime_t hrt
, int resolve
);
138 static void resolve_mapname (MapInfo
*map
, char *name
);
139 static void record_segment_map (hrtime_t timestamp
, uint64_t loadaddr
,
140 unsigned long msize
, int pagesize
, int modeflags
,
141 long long offset
, unsigned check
, char *name
);
142 static void record_segment_unmap (hrtime_t timestamp
, uint64_t loadaddr
);
144 /* Linux needs handling of the vsyscall page to get its data into the map.xml file */
145 static void process_vsyscall_page ();
147 #define MAXVSYSFUNCS 10
148 static int nvsysfuncs
= 0;
149 static char *sysfuncname
[MAXVSYSFUNCS
];
150 static uint64_t sysfuncvaddr
[MAXVSYSFUNCS
];
151 static unsigned long sysfuncsize
[MAXVSYSFUNCS
];
155 static char *dynname
[MAXDYN
];
156 static void *dynvaddr
[MAXDYN
];
157 static unsigned dynsize
[MAXDYN
];
158 static char *dynfuncname
[MAXDYN
];
160 /*===================================================================*/
163 * void __collector_mmap_init_mutex_locks()
164 * Iinitialize mmap mutex locks.
167 __collector_mmap_init_mutex_locks ()
169 __collector_mutex_init (&map_lock
);
170 __collector_mutex_init (&dyntext_lock
);
173 /* __collector_ext_update_map_segments called by the audit agent
174 * Is is also called by dbx/collector when a (possible) map update
175 * is intimated, such as after dlopen/dlclose.
176 * Required when libcollector.so is not preloaded and interpositions inactive.
179 __collector_ext_update_map_segments (void)
183 TprintfT (0, "__collector_ext_update_map_segments(%d)\n", CURR_REENTRANCE
);
187 update_map_segments (GETRELTIME (), 1);
192 * int __collector_ext_mmap_install()
193 * Install and initialise mmap tracing.
196 __collector_ext_mmap_install (int record
)
198 TprintfT (0, "__collector_ext_mmap_install(mmap_mode=%d)\n", mmap_mode
);
201 if (init_mmap_intf ())
203 TprintfT (0, "ERROR: collector mmap tracing initialization failed.\n");
204 return COL_ERROR_EXPOPEN
;
208 TprintfT (DBG_LT2
, "collector mmap tracing: mmap pointer not null\n");
210 /* Initialize side door interface with the heap tracing module */
211 collector_heap_record
= (void(*)(int, size_t, void*))dlsym (RTLD_DEFAULT
, "__collector_heap_record");
214 map_hndl
= __collector_create_handle (SP_MAP_FILE
);
215 if (map_hndl
== NULL
)
216 return COL_ERROR_MAPOPEN
;
217 if (init_mmap_files ())
219 TprintfT (0, "ERROR: collector init_mmap_files() failed.\n");
220 return COL_ERROR_EXPOPEN
;
226 update_map_segments (GETRELTIME (), 1); // initial map
230 process_vsyscall_page ();
231 return COL_ERROR_NONE
;
235 * int __collector_ext_mmap_deinstall()
236 * Optionally update final map and stop tracing mmap events.
239 __collector_ext_mmap_deinstall (int update
)
242 return COL_ERROR_NONE
;
248 update_map_segments (GETRELTIME (), 1);
251 TprintfT (0, "__collector_ext_mmap_deinstall(%d)\n", update
);
252 if (map_hndl
!= NULL
)
254 __collector_delete_handle (map_hndl
);
257 __collector_mutex_lock (&map_lock
); // get lock before resetting
259 /* Free all memory maps */
261 for (mp
= mmaps
.next
; mp
;)
263 MapInfo
*next
= mp
->next
;
264 __collector_freeCSize (__collector_heap
, mp
, sizeof (*mp
));
269 /* Free all name maps */
271 for (np
= namemaps
; np
;)
273 NameInfo
*next
= np
->next
;
274 __collector_freeCSize (__collector_heap
, np
, sizeof (*np
) + __collector_strlen (np
->filename
));
278 mapcache
= __collector_reallocVSize (__collector_heap
, mapcache
, 0);
281 __collector_mutex_unlock (&map_lock
);
282 TprintfT (0, "__collector_ext_mmap_deinstall done\n");
287 * void __collector_mmap_fork_child_cleanup()
288 * Perform all necessary cleanup steps in child process after fork().
291 __collector_mmap_fork_child_cleanup ()
293 /* Initialize all mmap "mutex" locks */
294 __collector_mmap_init_mutex_locks ();
298 __collector_delete_handle (map_hndl
);
299 __collector_mutex_lock (&map_lock
); // get lock before resetting
301 /* Free all memory maps */
303 for (mp
= mmaps
.next
; mp
;)
305 MapInfo
*next
= mp
->next
;
306 __collector_freeCSize (__collector_heap
, mp
, sizeof (*mp
));
311 /* Free all name maps */
313 for (np
= namemaps
; np
;)
315 NameInfo
*next
= np
->next
;
316 __collector_freeCSize (__collector_heap
, np
, sizeof (*np
) + __collector_strlen (np
->filename
));
320 mapcache
= __collector_reallocVSize (__collector_heap
, mapcache
, 0);
323 __collector_mutex_unlock (&map_lock
);
329 TprintfT (DBG_LT2
, "init_mmap_files\n");
330 /* also create the headerless dyntext file (if required) */
331 CALL_UTIL (snprintf
)(dyntext_fname
, sizeof (dyntext_fname
), "%s/%s",
332 __collector_exp_dir_name
, SP_DYNTEXT_FILE
);
333 if (CALL_UTIL (access
)(dyntext_fname
, F_OK
) != 0)
335 int fd
= CALL_UTIL (open
)(dyntext_fname
, O_RDWR
| O_CREAT
| O_TRUNC
,
336 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IROTH
);
340 TprintfT (0, "ERROR: init_mmap_files: open(%s) failed\n",
342 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s: %s</event>\n",
343 SP_JCMD_CERROR
, COL_ERROR_DYNOPEN
, errno
,
344 dyntext_fname
, errmsg
);
345 return COL_ERROR_DYNOPEN
;
348 CALL_UTIL (close
)(fd
);
350 return COL_ERROR_NONE
;
354 append_segment_record (char *format
, ...)
359 va_start (va
, format
);
360 int sz
= __collector_xml_vsnprintf (bufptr
, sizeof (buf
), format
, va
);
363 if (__collector_expstate
!= EXP_OPEN
&& __collector_expstate
!= EXP_PAUSED
)
365 TprintfT (0, "append_segment_record: expt neither open nor paused (%d); "
366 "not writing to map.xml\n\t%s", __collector_expstate
, buf
);
369 if (sz
>= sizeof (buf
))
371 /* Allocate a new buffer */
372 sz
+= 1; /* add the terminating null byte */
373 bufptr
= (char*) alloca (sz
);
374 va_start (va
, format
);
375 sz
= __collector_xml_vsnprintf (bufptr
, sz
, format
, va
);
378 int rc
= __collector_write_string (map_hndl
, bufptr
, sz
);
380 (void) __collector_log_write ("<event kind=\"%s\" id=\"%d\"></event>\n",
381 SP_JCMD_CERROR
, COL_ERROR_MAPWRITE
);
385 record_segment_map (hrtime_t timestamp
, uint64_t loadaddr
, unsigned long msize
,
386 int pagesize
, int modeflags
, long long offset
,
387 unsigned check
, char *name
)
390 TprintfT (DBG_LT2
, "record_segment_map(%s @ 0x%llx)\n", name
, (long long) loadaddr
);
391 append_segment_record ("<event kind=\"map\" object=\"segment\" tstamp=\"%u.%09u\" "
392 "vaddr=\"0x%016llX\" size=\"%lu\" pagesz=\"%d\" foffset=\"%c0x%08llX\" "
393 "modes=\"0x%03X\" chksum=\"0x%0X\" name=\"%s\"/>\n",
394 (unsigned) (timestamp
/ NANOSEC
),
395 (unsigned) (timestamp
% NANOSEC
),
396 loadaddr
, msize
, pagesize
,
397 offset
< 0 ? '-' : '+', offset
< 0 ? -offset
: offset
,
398 modeflags
, check
, name
);
402 record_segment_unmap (hrtime_t timestamp
, uint64_t loadaddr
)
404 TprintfT (DBG_LT2
, "record_segment_unmap(@ 0x%llx)\n", (long long) loadaddr
);
405 append_segment_record ("<event kind=\"unmap\" tstamp=\"%u.%09u\" vaddr=\"0x%016llX\"/>\n",
406 (unsigned) (timestamp
/ NANOSEC
),
407 (unsigned) (timestamp
% NANOSEC
), loadaddr
);
411 #define ELF_EHDR Elf64_Ehdr
412 #define ELF_PHDR Elf64_Phdr
413 #define ELF_SHDR Elf64_Shdr
414 #define ELF_DYN Elf64_Dyn
415 #define ELF_AUX Elf64_auxv_t
416 #define ELF_SYM Elf64_Sym
417 #define ELF_ST_BIND ELF64_ST_BIND
418 #define ELF_ST_TYPE ELF64_ST_TYPE
420 #define ELF_EHDR Elf32_Ehdr
421 #define ELF_PHDR Elf32_Phdr
422 #define ELF_SHDR Elf32_Shdr
423 #define ELF_DYN Elf32_Dyn
424 #define ELF_AUX Elf32_auxv_t
425 #define ELF_SYM Elf32_Sym
426 #define ELF_ST_BIND ELF32_ST_BIND
427 #define ELF_ST_TYPE ELF32_ST_TYPE
431 checksum_mapname (MapInfo
* map
)
433 unsigned checksum
= 0;
434 /* only checksum code segments */
435 if ((map
->mflags
& (PROT_EXEC
| PROT_READ
)) == 0 ||
436 (map
->mflags
& PROT_WRITE
) != 0)
438 checksum
= (unsigned) - 1;
439 TprintfT (DBG_LT2
, "checksum_mapname checksum = 0x%0X\n", checksum
);
445 dlopen_searchpath (void*(real_dlopen
) (const char *, int),
446 void *caller_addr
, const char *basename
, int mode
)
448 TprintfT (DBG_LT2
, "dlopen_searchpath(%p, %s, %d)\n", caller_addr
, basename
, mode
);
450 if (dladdr (caller_addr
, &dl_info
) == 0)
452 TprintfT (0, "ERROR: dladdr(%p): %s\n", caller_addr
, dlerror ());
455 TprintfT (DBG_LT2
, "dladdr(%p): %p fname=%s\n",
456 caller_addr
, dl_info
.dli_fbase
, dl_info
.dli_fname
);
457 int noload
= RTLD_LAZY
| RTLD_NOW
| RTLD_NOLOAD
;
458 void *caller_hndl
= NULL
;
459 #define WORKAROUND_RTLD_BUG 1
460 #ifdef WORKAROUND_RTLD_BUG
461 // A dynamic linker dlopen bug can result in corruption/closure of open streams
462 // XXXX workaround should be removed once linker patches are all available
464 #define MAINBASE 0x400000
466 #define MAINBASE 0x08048000
468 const char* tmp_path
=
469 (dl_info
.dli_fbase
== (void*) MAINBASE
) ? NULL
: dl_info
.dli_fname
;
470 caller_hndl
= real_dlopen (tmp_path
, noload
);
472 #else //XXXX workaround should be removed once linker patches are all available
474 caller_hndl
= real_dlopen (dl_info
.dli_fname
, noload
);
476 #endif //XXXX workaround should be removed once linker patches are all available
480 TprintfT (0, "ERROR: dlopen(%s,NOLOAD): %s\n", dl_info
.dli_fname
, dlerror ());
483 #if !defined(__MUSL_LIBC)
484 Dl_serinfo _info
, *info
= &_info
;
487 /* determine search path count and required buffer size */
488 dlinfo (caller_hndl
, RTLD_DI_SERINFOSIZE
, (void *) info
);
490 /* allocate new buffer and initialize */
493 There is a bug in Linux that causes the first call
494 to dlinfo() to return a small value for the dls_size.
496 The first call to dlinfo() determines the search path
497 count and the required buffer size. The second call to
498 dlinfo() tries to obtain the search path information.
500 However, the size of the buffer that is returned by
501 the first call to the dlinfo() is incorrect (too small).
502 The second call to dlinfo() uses the incorrect size to
503 allocate memory on the stack and internally uses the memcpy()
504 function to copy the search paths to the allocated memory space.
505 The length of the search path is much larger than the buffer
506 that is allocated on the stack. The memcpy() overwrites some
507 of the information that are saved on the stack, specifically,
508 it overwrites the "basename" parameter.
510 collect crashes right after the second call to dlinfo().
512 The search paths are used to locate the shared libraries.
513 dlinfo() creates the search paths based on the paths
514 that are assigned to LD_LIBRARY_PATH environment variable
515 and the standard library paths. The standard library paths
516 consists of the /lib and the /usr/lib paths. The
517 standard library paths are always included to the search
518 paths by dlinfo() even if the LD_LIBRARY_PATH environment
519 variable is not defined. Therefore, at the very least the
520 dls_cnt is assigned to 2 (/lib and /usr/lib) and dlinfo()
521 will never assign dls_cnt to zero. The dls_cnt is the count
522 of the potential paths for searching the shared libraries.
524 So we need to increase the buffer size before the second
525 call to dlinfo(). There are number of ways to increase
526 the buffer size. However, none of them can calculate the
527 buffer size precisely. Some users on the web have suggested
528 to multiply the MAXPATHLEN by dls_cnt for the buffer size.
529 The MAXPATHLEN is assigned to 1024 bytes. In my opinion
530 this is too much. So I have decided to multiply dls_size
531 by dls_cnt for the buffer size since the dls_size is much
532 smaller than 1024 bytes.
534 I have already confirmed with our user that the workaround
535 is working with his real application. Additionally,
536 the dlopen_searchpath() function is called only by the
537 libcollector init() function when the experiment is started.
538 Therefore, allocating some extra bytes on the stack which
539 is local to this routine is harmless.
542 info
= alloca (_info
.dls_size
* _info
.dls_cnt
);
543 info
->dls_size
= _info
.dls_size
;
544 info
->dls_cnt
= _info
.dls_cnt
;
546 /* obtain search path information */
547 dlinfo (caller_hndl
, RTLD_DI_SERINFO
, (void *) info
);
548 path
= &info
->dls_serpath
[0];
550 char pathname
[MAXPATHLEN
];
551 for (unsigned int cnt
= 1; cnt
<= info
->dls_cnt
; cnt
++, path
++)
553 __collector_strlcpy (pathname
, path
->dls_name
, sizeof (pathname
));
554 __collector_strlcat (pathname
, "/", sizeof (pathname
));
555 __collector_strlcat (pathname
, basename
, sizeof (pathname
));
557 #if (ARCH(Intel) && WSIZE(32)) || ARCH(SPARC)
558 ret
= (real_dlopen
) (pathname
, mode
);
560 ret
= CALL_REAL (dlopen
)(pathname
, mode
);
562 TprintfT (DBG_LT2
, "try %d/%d: %s = %p\n", cnt
, info
->dls_cnt
, pathname
, ret
);
564 return ret
; // success!
571 resolve_mapname (MapInfo
*map
, char *name
)
575 if (name
== NULL
|| *name
== '\0')
577 if (map
->mflags
& MA_STACK
)
578 map
->filename
= "<" SP_MAP_STACK
">";
579 else if (map
->mflags
& MA_BREAK
)
580 map
->filename
= "<" SP_MAP_HEAP
">";
581 else if (map
->mflags
& MA_ISM
)
582 map
->filename
= "<" SP_MAP_SHMEM
">";
586 for (np
= namemaps
; np
; np
= np
->next
)
587 if (__collector_strcmp (np
->mapname
, name
) == 0)
594 /* Create and link a new name map */
595 size_t fnamelen
= __collector_strlen (fname
) + 1;
596 np
= (NameInfo
*) __collector_allocCSize (__collector_heap
, sizeof (NameInfo
) + fnamelen
, 1);
597 if (np
== NULL
) // We could not get memory
599 np
->mapname
= np
->filename
;
600 __collector_strlcpy (np
->filename
, fname
, fnamelen
);
604 map
->mapname
= np
->mapname
;
605 map
->filename
= np
->filename
;
606 if (map
->filename
[0] == (char) 0)
607 map
->filename
= map
->mapname
;
608 TprintfT (DBG_LT2
, "resolve_mapname: %s resolved to %s\n", map
->mapname
, map
->filename
);
612 str2ulong (char **ss
)
615 unsigned long val
= 0UL;
620 if (c
>= '0' && c
<= '9')
621 val
= val
* base
+ (c
- '0');
622 else if (c
>= 'a' && c
<= 'f')
623 val
= val
* base
+ (c
- 'a') + 10;
624 else if (c
>= 'A' && c
<= 'F')
625 val
= val
* base
+ (c
- 'A') + 10;
634 update_map_segments (hrtime_t hrt
, int resolve
)
637 if (__collector_mutex_trylock (&map_lock
))
639 TprintfT (0, "WARNING: update_map_segments(resolve=%d) BUSY\n", resolve
);
642 TprintfT (DBG_LT2
, "\n");
643 TprintfT (DBG_LT2
, "begin update_map_segments(hrt, %d)\n", resolve
);
645 // Note: there is similar code to read /proc/$PID/map[s] in
646 // perfan/er_kernel/src/KSubExp.cc KSubExp::write_subexpt_map()
647 const char* proc_map
= "/proc/self/maps";
648 size_t bufsz
= maptext_sz
;
651 int map_fd
= CALL_UTIL (open
)(proc_map
, O_RDONLY
);
655 maptext
= __collector_reallocVSize (__collector_heap
, maptext
, bufsz
);
656 TprintfT (DBG_LT2
, " update_map_segments: Loop for bufsize=%ld\n",
660 int n
= CALL_UTIL (read
)(map_fd
, maptext
+ filesz
, bufsz
- filesz
);
661 TprintfT (DBG_LT2
, " update_map_segments: __collector_read(bufp=%p nbyte=%ld)=%d\n",
662 maptext
+ filesz
, (long) ( bufsz
- filesz
), n
);
665 TprintfT (0, "ERROR: update_map_segments: read(maps): errno=%d\n", errno
);
666 (void) __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s</event>\n",
667 SP_JCMD_CERROR
, COL_ERROR_MAPREAD
, errno
, proc_map
);
668 CALL_UTIL (close
)(map_fd
);
669 __collector_mutex_unlock (&map_lock
);
678 if (filesz
>= bufsz
) /* Buffer too small */
682 CALL_UTIL (close
)(map_fd
);
685 int mapcache_entries
= 0;
687 for (str
= maptext
;; str
= str1
)
689 for (str1
= str
; str1
- maptext
< filesz
; str1
++)
697 if (str1
- maptext
>= filesz
)
701 mapcache
= __collector_reallocVSize (__collector_heap
, mapcache
,
702 sizeof (prmap_t
) * mapcache_entries
);
703 prmap_t
*map
= ((prmap_t
*) mapcache
) + (mapcache_entries
- 1);
704 map
->pr_vaddr
= str2ulong (&str
);
706 unsigned long eaddr
= str2ulong (&str
);
708 map
->pr_size
= eaddr
- map
->pr_vaddr
;
710 map
->pr_mflags
+= (*str
++ == 'r' ? PROT_READ
: 0);
711 map
->pr_mflags
+= (*str
++ == 'w' ? PROT_WRITE
: 0);
712 map
->pr_mflags
+= (*str
++ == 'x' ? PROT_EXEC
: 0);
713 map
->pr_mflags
+= (*str
++ == 's' ? MA_SHARED
: 0);
715 map
->pr_offset
= str2ulong (&str
);
717 map
->pr_dev
= str2ulong (&str
) * 0x100;
719 map
->pr_dev
+= str2ulong (&str
);
721 map
->pr_ino
= str2ulong (&str
);
722 if (map
->pr_dev
== 0)
723 map
->pr_mflags
|= MA_ANON
;
726 map
->pr_mapname
= str
;
727 map
->pr_pagesize
= 4096;
730 /* Compare two maps and record all differences */
732 MapInfo
*prev
= &mmaps
;
733 MapInfo
*oldp
= mmaps
.next
;
736 prmap_t
*newp
= nidx
< mapcache_entries
?
737 (prmap_t
*) mapcache
+ nidx
: NULL
;
738 if (oldp
== NULL
&& newp
== NULL
)
741 /* If two maps are equal proceed to the next pair */
743 oldp
->vaddr
== newp
->pr_vaddr
&&
744 oldp
->size
== newp
->pr_size
&&
745 __collector_strcmp (oldp
->mapname
, newp
->pr_mapname
) == 0)
752 /* Check if we need to unload the old map first */
753 if (newp
== NULL
|| (oldp
&& oldp
->vaddr
<= newp
->pr_vaddr
))
757 /* Don't record MA_ANON maps except MA_STACK and MA_BREAK */
758 if ((!(oldp
->mflags
& MA_ANON
) || (oldp
->mflags
& (MA_STACK
| MA_BREAK
))))
759 record_segment_unmap (hrt
, oldp
->vaddr
);
760 /* Remove and free map */
761 prev
->next
= oldp
->next
;
764 __collector_freeCSize (__collector_heap
, tmp
, sizeof (*tmp
));
769 MapInfo
*map
= (MapInfo
*) __collector_allocCSize (__collector_heap
, sizeof (MapInfo
), 1);
772 __collector_mutex_unlock (&map_lock
);
775 map
->vaddr
= newp
->pr_vaddr
;
776 map
->size
= newp
->pr_size
;
777 map
->offset
= newp
->pr_offset
;
778 map
->mflags
= newp
->pr_mflags
;
779 map
->pagesize
= newp
->pr_pagesize
;
780 resolve_mapname (map
, newp
->pr_mapname
);
783 map
->next
= prev
->next
;
787 /* Don't record MA_ANON maps except MA_STACK and MA_BREAK */
788 if (!(newp
->pr_mflags
& MA_ANON
) || (newp
->pr_mflags
& (MA_STACK
| MA_BREAK
)))
790 unsigned checksum
= checksum_mapname (map
);
791 record_segment_map (hrt
, map
->vaddr
, map
->size
,
792 map
->pagesize
, map
->mflags
,
793 map
->offset
, checksum
, map
->filename
);
798 TprintfT (DBG_LT2
, "update_map_segments: done\n\n");
799 __collector_mutex_unlock (&map_lock
);
800 } /* update_map_segments */
803 * Map addr to a segment. Cope with split segments.
806 __collector_check_segment_internal (unsigned long addr
, unsigned long *base
,
807 unsigned long *end
, int maxnretries
, int MA_FLAGS
)
809 int number_of_tries
= 0;
813 unsigned long curbase
= 0;
814 unsigned long curfoff
= 0;
815 unsigned long cursize
= 0;
818 for (mp
= mmaps
.next
; mp
; mp
= mp
->next
)
821 if (curbase
+ cursize
== mp
->vaddr
&&
822 curfoff
+ cursize
== mp
->offset
&&
823 ((mp
->mflags
& MA_FLAGS
) == MA_FLAGS
824 || __collector_strncmp (mp
->mapname
, "[vdso]", 6) == 0
825 || __collector_strncmp (mp
->mapname
, "[vsyscall]", 10) == 0
827 cursize
= mp
->vaddr
+ mp
->size
- curbase
;
828 else if (addr
< mp
->vaddr
)
830 else if ((mp
->mflags
& MA_FLAGS
) != MA_FLAGS
831 && __collector_strncmp (mp
->mapname
, "[vdso]", 6)
832 && __collector_strncmp (mp
->mapname
, "[vsyscall]", 10))
841 curfoff
= mp
->offset
;
846 if (addr
>= curbase
&& addr
< curbase
+ cursize
)
849 *end
= curbase
+ cursize
;
854 * 21275311 Unwind failure in native stack for java application running on jdk8 on x86
856 * On JDK8, we've observed cases where Java-compiled methods end up
857 * in virtual address segments that were "dead zones" (mflags&PROT_READ==0) at
858 * the time of the last update_map_segments() but are now "live". So if we
859 * fail to find a segment, let's call update_map_segments and then retry
862 if (number_of_tries
< maxnretries
)
865 __collector_ext_update_map_segments ();
874 * Check if address belongs to a readable and executable segment
879 * @return 1 - yes, 0 - no
882 __collector_check_segment (unsigned long addr
, unsigned long *base
,
883 unsigned long *end
, int maxnretries
)
885 int MA_FLAGS
= PROT_READ
| PROT_EXEC
;
886 int res
= __collector_check_segment_internal (addr
, base
, end
, maxnretries
, MA_FLAGS
);
891 * Check if address belongs to a readable segment
896 * @return 1 - yes, 0 - no
899 __collector_check_readable_segment( unsigned long addr
, unsigned long *base
, unsigned long *end
, int maxnretries
)
901 int MA_FLAGS
= PROT_READ
;
902 int res
= __collector_check_segment_internal(addr
, base
, end
, maxnretries
, MA_FLAGS
);
906 static ELF_AUX
*auxv
= NULL
;
909 process_vsyscall_page ()
911 TprintfT (DBG_LT2
, "process_vsyscall_page()\n");
914 /* We've done this one in this process, and cached the results */
915 /* use the cached results */
916 for (int i
= 0; i
< ndyn
; i
++)
918 append_segment_record ("<event kind=\"map\" object=\"dynfunc\" name=\"%s\" "
919 "vaddr=\"0x%016lX\" size=\"%u\" funcname=\"%s\" />\n",
920 dynname
[i
], dynvaddr
[i
], dynsize
[i
], dynfuncname
[i
]);
921 TprintfT (DBG_LT2
, "process_vsyscall_page: append_segment_record map dynfunc='%s' vaddr=0x%016lX size=%ld funcname='%s' -- from cache\n",
922 dynname
[i
], (unsigned long) dynvaddr
[i
],
923 (long) dynsize
[i
], dynfuncname
[i
]);
928 /* We've done this one in this process, and cached the results */
929 /* use the cached results */
930 hrtime_t hrt
= GETRELTIME ();
931 for (int i
= 0; i
< nvsysfuncs
; i
++)
933 append_segment_record ("<event kind=\"map\" object=\"function\" tstamp=\"%u.%09u\" "
934 "vaddr=\"0x%016lX\" size=\"%u\" name=\"%s\" />\n",
935 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
),
936 (unsigned long) sysfuncvaddr
[i
], (unsigned) sysfuncsize
[i
], sysfuncname
[i
]);
937 TprintfT (DBG_LT2
, "process_vsyscall_page: append_segment_record map function='%s' vaddr=0x%016lX size=%ld -- from cache\n",
938 sysfuncname
[i
], (unsigned long) sysfuncvaddr
[i
], (long) sysfuncsize
[i
]);
941 if (ndyn
+ nvsysfuncs
!= 0)
944 /* After fork we can't rely on environ as it might have
945 * been moved by putenv(). Use the pointer saved by the parent.
949 char **envp
= (char**) environ
;
952 while (*envp
++ != NULL
);
953 auxv
= (ELF_AUX
*) envp
;
955 TprintfT (DBG_LT2
, "process_vsyscall_page, auxv = ox%p\n", auxv
);
959 for (ap
= auxv
; ap
->a_type
!= AT_NULL
; ap
++)
960 TprintfT (DBG_LT2
, "process_vsyscall_page: ELF_AUX: "
961 " a_type = 0x%016llx %10lld "
962 " a_un.a_val = 0x%016llx %10lld\n",
963 (long long) ap
->a_type
, (long long) ap
->a_type
,
964 (long long) ap
->a_un
.a_val
, (long long) ap
->a_un
.a_val
);
967 // find the first ELF_AUX of type AT_SYSINFO_EHDR
968 ELF_EHDR
*ehdr
= NULL
;
969 for (ap
= auxv
; ap
->a_type
!= AT_NULL
; ap
++)
971 if (ap
->a_type
== AT_SYSINFO_EHDR
)
973 // newer Linuxes do not have a_ptr field, they just have a_val
974 ehdr
= (ELF_EHDR
*)(intptr_t) ap
->a_un
.a_val
;
983 char *mapName
= "SYSINFO_EHDR";
985 for (mp
= mmaps
.next
; mp
; mp
= mp
->next
)
987 if ((unsigned long) ehdr
== mp
->vaddr
)
989 mp
->mflags
|= PROT_EXEC
;
990 if (mp
->mapname
&& mp
->mapname
[0])
991 mapName
= mp
->mapname
;
996 // Find the dynsym section and record all symbols
997 char *base
= (char*) ehdr
;
998 ELF_SHDR
*shdr
= (ELF_SHDR
*) (base
+ ehdr
->e_shoff
);
1002 TprintfT (DBG_LT2
, "process_vsyscall_page: ehdr: EI_CLASS=%lld EI_DATA=%lld EI_OSABI=%lld e_type=%lld e_machine=%lld e_version=%lld\n"
1003 " e_entry =0x%016llx %10lld e_phoff =0x%016llx %10lld\n"
1004 " e_shoff =0x%016llx %10lld e_flags =0x%016llx %10lld\n"
1005 " e_ehsize =0x%016llx %10lld e_phentsize =0x%016llx %10lld\n"
1006 " e_phnum =0x%016llx %10lld e_shentsize =0x%016llx %10lld\n"
1007 " e_shnum =0x%016llx %10lld e_shstrndx =0x%016llx %10lld\n",
1008 (long long) ehdr
->e_ident
[EI_CLASS
], (long long) ehdr
->e_ident
[EI_DATA
], (long long) ehdr
->e_ident
[EI_OSABI
],
1009 (long long) ehdr
->e_type
, (long long) ehdr
->e_machine
, (long long) ehdr
->e_version
,
1010 (long long) ehdr
->e_entry
, (long long) ehdr
->e_entry
,
1011 (long long) ehdr
->e_phoff
, (long long) ehdr
->e_phoff
,
1012 (long long) ehdr
->e_shoff
, (long long) ehdr
->e_shoff
,
1013 (long long) ehdr
->e_flags
, (long long) ehdr
->e_flags
,
1014 (long long) ehdr
->e_ehsize
, (long long) ehdr
->e_ehsize
,
1015 (long long) ehdr
->e_phentsize
, (long long) ehdr
->e_phentsize
,
1016 (long long) ehdr
->e_phnum
, (long long) ehdr
->e_phnum
,
1017 (long long) ehdr
->e_shentsize
, (long long) ehdr
->e_shentsize
,
1018 (long long) ehdr
->e_shnum
, (long long) ehdr
->e_shnum
,
1019 (long long) ehdr
->e_shstrndx
, (long long) ehdr
->e_shstrndx
);
1020 for (i
= 1; i
< ehdr
->e_shnum
; i
++)
1022 TprintfT (DBG_LT2
, "process_vsyscall_page: SECTION=%d sh_name=%lld '%s'\n"
1023 " sh_type =0x%016llx %10lld\n"
1024 " sh_flags =0x%016llx %10lld\n"
1025 " sh_addr =0x%016llx %10lld\n"
1026 " sh_offset =0x%016llx %10lld\n"
1027 " sh_size =0x%016llx %10lld\n"
1028 " sh_link =0x%016llx %10lld\n"
1029 " sh_info =0x%016llx %10lld\n"
1030 " sh_addralign =0x%016llx %10lld\n"
1031 " sh_entsize =0x%016llx %10lld\n",
1032 i
, (long long) shdr
[i
].sh_name
, base
+ shdr
[ehdr
->e_shstrndx
].sh_offset
+ shdr
[i
].sh_name
,
1033 (long long) shdr
[i
].sh_type
, (long long) shdr
[i
].sh_type
,
1034 (long long) shdr
[i
].sh_flags
, (long long) shdr
[i
].sh_flags
,
1035 (long long) shdr
[i
].sh_addr
, (long long) shdr
[i
].sh_addr
,
1036 (long long) shdr
[i
].sh_offset
, (long long) shdr
[i
].sh_offset
,
1037 (long long) shdr
[i
].sh_size
, (long long) shdr
[i
].sh_size
,
1038 (long long) shdr
[i
].sh_link
, (long long) shdr
[i
].sh_link
,
1039 (long long) shdr
[i
].sh_info
, (long long) shdr
[i
].sh_info
,
1040 (long long) shdr
[i
].sh_addralign
, (long long) shdr
[i
].sh_addralign
,
1041 (long long) shdr
[i
].sh_entsize
, (long long) shdr
[i
].sh_entsize
);
1046 for (i
= 1; i
< ehdr
->e_shnum
; i
++)
1047 if (shdr
[i
].sh_type
== SHT_DYNSYM
)
1054 char *symbase
= base
+ shdr
[shdr
[dynSec
].sh_link
].sh_offset
;
1055 ELF_SYM
*symbols
= (ELF_SYM
*) (base
+ shdr
[dynSec
].sh_offset
);
1057 int n
= shdr
[dynSec
].sh_size
/ shdr
[dynSec
].sh_entsize
;
1058 for (i
= 0; i
< n
; i
++)
1060 ELF_SYM
*sym
= symbols
+ i
;
1061 TprintfT (DBG_LT2
, "process_vsyscall_page: symbol=%d st_name=%lld '%s'\n"
1062 " st_size = 0x%016llx %10lld\n"
1063 " st_value = 0x%016llx %10lld\n"
1064 " st_shndx = 0x%016llx %10lld\n"
1065 " st_info = 0x%016llx %10lld\n",
1066 i
, (long long) sym
->st_name
, symbase
+ sym
->st_name
,
1067 (long long) sym
->st_size
, (long long) sym
->st_size
,
1068 (long long) sym
->st_value
, (long long) sym
->st_value
,
1069 (long long) sym
->st_shndx
, (long long) sym
->st_shndx
,
1070 (long long) sym
->st_info
, (long long) sym
->st_info
);
1071 if (sym
->st_shndx
<= 0 || sym
->st_size
<= 0 ||
1072 ELF_ST_BIND (sym
->st_info
) != STB_GLOBAL
|| ELF_ST_TYPE (sym
->st_info
) != STT_FUNC
)
1075 nextSec
= sym
->st_shndx
;
1076 else if (nextSec
> sym
->st_shndx
)
1077 nextSec
= sym
->st_shndx
;
1082 while (nextSec
!= 0)
1084 int curSec
= nextSec
;
1085 char *bgn
= base
+ shdr
[curSec
].sh_offset
;
1086 char *end
= bgn
+ shdr
[curSec
].sh_size
;
1087 for (i
= 0; i
< n
; i
++)
1089 ELF_SYM
*sym
= symbols
+ i
;
1090 if (sym
->st_shndx
<= 0 || sym
->st_size
<= 0 ||
1091 ELF_ST_BIND (sym
->st_info
) != STB_GLOBAL
|| ELF_ST_TYPE (sym
->st_info
) != STT_FUNC
)
1093 if (sym
->st_shndx
> curSec
)
1095 if (nextSec
== curSec
)
1096 nextSec
= sym
->st_shndx
;
1097 else if (nextSec
> sym
->st_shndx
)
1098 nextSec
= sym
->st_shndx
;
1099 nextSec
= sym
->st_shndx
;
1102 if (sym
->st_shndx
!= curSec
)
1104 long long st_delta
= (sym
->st_value
>= shdr
[sym
->st_shndx
].sh_addr
) ?
1105 (sym
->st_value
- shdr
[sym
->st_shndx
].sh_addr
) : -1;
1106 char *st_value
= bgn
+ st_delta
;
1107 if (st_delta
>= 0 && st_value
+ sym
->st_size
<= end
)
1109 append_segment_record ("<event kind=\"map\" object=\"dynfunc\" name=\"%s\" "
1110 "vaddr=\"0x%016lX\" size=\"%u\" funcname=\"%s\" />\n",
1111 mapName
, (void*) st_value
, sym
->st_size
, symbase
+ sym
->st_name
);
1113 TprintfT (DBG_LT2
, "process_vsyscall_page: append_segment_record map dynfunc='%s' vaddr=%016lX size=%ld funcname='%s'\n",
1114 mapName
, (unsigned long) st_value
,
1115 (long) sym
->st_size
, symbase
+ sym
->st_name
);
1117 /* now cache this for a subsequent experiment */
1119 __collector_log_write ("<event kind=\"%s\" id=\"%d\">MAXDYN=%d</event>\n",
1120 SP_JCMD_CERROR
, COL_ERROR_MAPCACHE
, MAXDYN
);
1123 dynname
[ndyn
] = CALL_UTIL (libc_strdup
)(mapName
);
1124 dynvaddr
[ndyn
] = (void *) st_value
;
1125 dynsize
[ndyn
] = (unsigned) sym
->st_size
;
1126 dynfuncname
[ndyn
] = CALL_UTIL (libc_strdup
)(symbase
+ sym
->st_name
);
1127 TprintfT (DBG_LT2
, "process_vsyscall_page: cached entry %d map function='%s' vaddr=0x%016lX size=%ld '%s'\n",
1128 ndyn
, dynname
[ndyn
], (unsigned long) dynvaddr
[ndyn
],
1129 (long) dynsize
[ndyn
], dynfuncname
[ndyn
]);
1134 __collector_int_func_load (DFUNC_KERNEL
, mapName
, NULL
,
1135 (void*) (base
+ shdr
[curSec
].sh_offset
), shdr
[curSec
].sh_size
, 0, NULL
);
1137 /* now cache this function for a subsequent experiment */
1138 if (nvsysfuncs
>= MAXVSYSFUNCS
)
1139 __collector_log_write ("<event kind=\"%s\" id=\"%d\">MAXVSYSFUNCS=%d</event>\n",
1140 SP_JCMD_CERROR
, COL_ERROR_MAPCACHE
, MAXVSYSFUNCS
);
1143 sysfuncname
[nvsysfuncs
] = CALL_UTIL (libc_strdup
)(mapName
);
1144 sysfuncvaddr
[nvsysfuncs
] = (unsigned long) (base
+ shdr
[curSec
].sh_offset
);
1145 sysfuncsize
[nvsysfuncs
] = (unsigned long) (shdr
[curSec
].sh_size
);
1146 TprintfT (DBG_LT2
, "process_vsyscall_page: cached entry %d map function='%s' vaddr=0x%016lX size=%ld\n",
1147 nvsysfuncs
, sysfuncname
[nvsysfuncs
],
1148 (unsigned long) sysfuncvaddr
[nvsysfuncs
],
1149 (long) sysfuncsize
[nvsysfuncs
]);
1152 TprintfT (DBG_LT2
, "process_vsyscall_page: collector_int_func_load='%s' vaddr=0x%016lX size=%ld\n",
1153 mapName
, (unsigned long) (base
+ shdr
[curSec
].sh_offset
),
1154 (long) shdr
[curSec
].sh_size
);
1155 if (curSec
== nextSec
)
1162 unsigned long vsysaddr
= (unsigned long) 0xffffe000;
1164 unsigned long vsysaddr
= (unsigned long) 0xffffffffff600000;
1166 // Make sure the vsyscall map has PROT_EXEC
1168 for (mp
= mmaps
.next
; mp
; mp
= mp
->next
)
1170 TprintfT (DBG_LT2
, "MapInfo: vaddr=0x%016llx [size=%lld] mflags=0x%llx offset=%lld pagesize=%lld\n"
1171 " mapname='%s' filename='%s'\n",
1172 (unsigned long long) mp
->vaddr
, (long long) mp
->size
,
1173 (long long) mp
->mflags
, (long long) mp
->offset
, (long long) mp
->pagesize
,
1174 mp
->mapname
? mp
->mapname
: "NULL",
1175 mp
->filename
? mp
->filename
: "NULL");
1176 if (vsysaddr
== mp
->vaddr
)
1177 mp
->mflags
|= PROT_EXEC
;
1178 if ((unsigned long) ehdr
== (unsigned long) mp
->vaddr
)
1180 if (__collector_strncmp (mp
->mapname
, "[vdso]", 6) == 0
1181 || __collector_strncmp (mp
->mapname
, "[vsyscall]", 10) == 0)
1184 * On rubbia ( 2.6.9-5.ELsmp #1 SMP 32-bit ) access to ehdr causes SEGV.
1185 * There doesn't seem to be a way to reliably determine the actual presence
1186 * of the page: even when /proc reports it's there it can't be accessed.
1187 * We will have to put up with <Unknown> on some Linuxes until this is resolved.
1188 __collector_int_func_load(DFUNC_KERNEL, mp->mapname, NULL, (void*) mp->vaddr, mp->size, 0, NULL);
1190 hrtime_t hrt
= GETRELTIME ();
1191 append_segment_record (
1192 "<event kind=\"map\" object=\"function\" tstamp=\"%u.%09u\" "
1193 "vaddr=\"0x%016lX\" size=\"%u\" name=\"%s\" />\n",
1194 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
),
1195 (unsigned long) mp
->vaddr
, (unsigned) mp
->size
, mp
->mapname
);
1196 TprintfT (DBG_LT2
, "process_vsyscall_page: append_segment_record map function = %s, vaddr = 0x%016lX, size = %u\n",
1197 mp
->mapname
, (unsigned long) mp
->vaddr
, (unsigned) mp
->size
);
1199 /* now cache this function for a subsequent experiment */
1200 if (nvsysfuncs
>= MAXVSYSFUNCS
)
1201 __collector_log_write ("<event kind=\"%s\" id=\"%d\">MAXVSYSFUNCS=%d</event>\n",
1202 SP_JCMD_CERROR
, COL_ERROR_MAPCACHE
, MAXVSYSFUNCS
);
1205 sysfuncname
[nvsysfuncs
] = CALL_UTIL (libc_strdup
)(mp
->mapname
);
1206 sysfuncvaddr
[nvsysfuncs
] = mp
->vaddr
;
1207 sysfuncsize
[nvsysfuncs
] = (unsigned long) mp
->size
;
1208 TprintfT (DBG_LT2
, "process_vsyscall_page: cached entry %d map function='%s' vaddr=0x%016lX size=%ld\n",
1209 nvsysfuncs
, sysfuncname
[nvsysfuncs
],
1210 (unsigned long) sysfuncvaddr
[nvsysfuncs
],
1211 (long) sysfuncsize
[nvsysfuncs
]);
1220 * collector API for dynamic functions
1222 void collector_func_load () __attribute__ ((weak
, alias ("__collector_func_load")));
1224 __collector_func_load (char *name
, char *alias
, char *sourcename
,
1225 void *vaddr
, int size
, int lntsize
, DT_lineno
*lntable
)
1227 __collector_int_func_load (DFUNC_API
, name
, sourcename
,
1228 vaddr
, size
, lntsize
, lntable
);
1231 void collector_func_unload () __attribute__ ((weak
, alias ("__collector_func_unload")));
1233 __collector_func_unload (void *vaddr
)
1235 __collector_int_func_unload (DFUNC_API
, vaddr
);
1238 /* routines for handling dynamic functions */
1240 rwrite (int fd
, void *buf
, size_t nbyte
)
1242 size_t left
= nbyte
;
1244 char *ptr
= (char*) buf
;
1247 res
= CALL_UTIL (write
)(fd
, ptr
, left
);
1250 TprintfT (0, "ERROR: rwrite(%s) failed: errno=%d\n", dyntext_fname
, errno
);
1251 (void) __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s</event>\n",
1252 SP_JCMD_CERROR
, COL_ERROR_DYNWRITE
, errno
, dyntext_fname
);
1261 __collector_int_func_load (dfunc_mode_t mode
, char *name
, char *sourcename
,
1262 void *vaddr
, int size
, int lntsize
, DT_lineno
*lntable
)
1266 static char pad
[16];
1270 hrtime_t hrt
= GETRELTIME ();
1274 /* generate a name based on vaddr */
1275 CALL_UTIL (snprintf
)(name_buf
, sizeof (name_buf
), "0x%lx", (unsigned long) vaddr
);
1283 append_segment_record ("<event kind=\"map\" object=\"function\" tstamp=\"%u.%09u\" "
1284 "vaddr=\"0x%016lX\" size=\"%u\" name=\"%s\" />\n",
1285 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
),
1286 (unsigned long) vaddr
, (unsigned) size
, name
);
1289 append_segment_record ("<event kind=\"map\" object=\"jcm\" tstamp=\"%u.%09u\" "
1290 "vaddr=\"0x%016lX\" size=\"%u\" methodId=\"%s\" />\n",
1291 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
),
1292 (unsigned long) vaddr
, (unsigned) size
, name
);
1298 /* 21275311 Unwind failure in native stack for java application running on jdk8 on x86
1300 * - function starts in a known segment (base1 != 0)
1301 * - function ends in the same segment (base1==base2 && end1==end2)
1302 * If not, then call update_map_segments().
1304 unsigned long base1
, end1
, base2
, end2
;
1305 __collector_check_segment ((unsigned long) vaddr
, &base1
, &end1
, 0);
1307 __collector_check_segment (((unsigned long) vaddr
)+((unsigned long) size
), &base2
, &end2
, 0);
1308 if (base1
== 0 || base1
!= base2
|| end1
!= end2
)
1309 __collector_ext_update_map_segments ();
1311 /* Write a copy of actual code to the "dyntext" file */
1313 dt_hdr
.type
= DT_HEADER
;
1314 dt_hdr
.size
= sizeof (dt_hdr
);
1316 unsigned long t
= (unsigned long) vaddr
; /* to suppress a warning from gcc */
1317 dt_hdr
.vaddr
= (uint64_t) t
;
1320 dt_code
.type
= DT_CODE
;
1322 if (vaddr
!= NULL
&& size
> 0)
1324 dt_code
.size
= sizeof (dt_code
) + ((size
+ 0xf) & ~0xf);
1325 if (mode
== DFUNC_KERNEL
)
1327 /* Some Linuxes don't accept vaddrs from the vsyscall
1328 * page in write(). Make a copy.
1330 code
= alloca (size
);
1331 __collector_memcpy (code
, vaddr
, size
);
1338 dt_src
.type
= DT_SRCFILE
;
1341 slen
= CALL_UTIL (strlen
)(sourcename
) + 1;
1342 dt_src
.size
= slen
? sizeof (dt_src
) + ((slen
+ 0xf) & ~0xf) : 0;
1351 dt_ltbl
.type
= DT_LTABLE
;
1352 if (lntable
!= NULL
&& lntsize
> 0)
1353 dt_ltbl
.size
= sizeof (dt_ltbl
) + lntsize
* sizeof (DT_lineno
);
1357 int fd
= CALL_UTIL (open
)(dyntext_fname
, O_RDWR
| O_APPEND
);
1360 TprintfT (0, "ERROR: __collector_int_func_load: open(%s) failed: errno=%d\n",
1361 dyntext_fname
, errno
);
1362 (void) __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s</event>\n",
1363 SP_JCMD_CERROR
, COL_ERROR_DYNOPEN
, errno
, dyntext_fname
);
1367 /* Lock the whole file */
1368 __collector_mutex_lock (&dyntext_lock
);
1369 rwrite (fd
, &dt_hdr
, sizeof (dt_hdr
));
1372 padn
= dt_code
.size
- sizeof (dt_code
) - size
;
1373 rwrite (fd
, &dt_code
, sizeof (dt_code
));
1374 rwrite (fd
, code
, size
);
1375 rwrite (fd
, &pad
, padn
);
1379 padn
= dt_src
.size
- sizeof (dt_src
) - slen
;
1380 rwrite (fd
, &dt_src
, sizeof (dt_src
));
1381 rwrite (fd
, sourcename
, slen
);
1382 rwrite (fd
, &pad
, padn
);
1386 rwrite (fd
, &dt_ltbl
, sizeof (dt_ltbl
));
1387 rwrite (fd
, lntable
, dt_ltbl
.size
- sizeof (dt_ltbl
));
1390 /* Unlock the file */
1391 __collector_mutex_unlock( &dyntext_lock
);
1392 CALL_UTIL(close( fd
) );
1396 __collector_int_func_unload (dfunc_mode_t mode
, void *vaddr
)
1400 hrtime_t hrt
= GETRELTIME ();
1401 if (mode
== DFUNC_API
)
1402 append_segment_record ("<event kind=\"unmap\" tstamp=\"%u.%09u\" vaddr=\"0x%016lX\"/>\n",
1403 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
), (unsigned long) vaddr
);
1404 else if (mode
== DFUNC_JAVA
)
1405 /* note that the "vaddr" is really a method id, not an address */
1406 append_segment_record ("<event kind=\"unmap\" tstamp=\"%u.%09u\" methodId=\"0x%016lX\"/>\n",
1407 (unsigned) (hrt
/ NANOSEC
), (unsigned) (hrt
% NANOSEC
), (unsigned long) vaddr
);
1413 * int init_mmap_intf()
1414 * Set up interposition (if not already done).
1419 if (__collector_dlsym_guard
)
1422 __real_mmap
= (void*(*)(void* addr
, size_t len
, int prot
, int flags
,
1423 int fildes
, off_t off
))dlsym (RTLD_NEXT
, SYS_MMAP_NAME
);
1424 if (__real_mmap
== NULL
)
1427 /* We are probably dlopened after libthread/libc,
1428 * try to search in the previously loaded objects
1430 __real_mmap
= (void*(*)(void* addr
, size_t len
, int prot
, int flags
,
1431 int fildes
, off_t off
))dlsym (RTLD_DEFAULT
, SYS_MMAP_NAME
);
1432 if (__real_mmap
== NULL
)
1434 TprintfT (0, "ERROR: collector real mmap not found\n");
1437 TprintfT (DBG_LT2
, "collector real mmap found with RTLD_DEFAULT\n");
1438 dlflag
= RTLD_DEFAULT
;
1442 TprintfT (DBG_LT2
, "collector real mmap found with RTLD_NEXT\n");
1446 TprintfT (DBG_LT2
, "init_mmap_intf() @%p __real_mmap\n", __real_mmap
);
1447 __real_mmap64
= (void*(*)(void *, size_t, int, int, int, off64_t
))
1448 dlsym (dlflag
, SYS_MMAP64_NAME
);
1449 TprintfT (DBG_LT2
, "init_mmap_intf() @%p __real_mmap64\n", __real_mmap64
);
1450 __real_munmap
= (int(*)(void *, size_t)) dlsym (dlflag
, SYS_MUNMAP_NAME
);
1451 TprintfT (DBG_LT2
, "init_mmap_intf() @%p __real_munmap\n", __real_munmap
);
1453 // dlopen/dlmopen/dlclose are in libdl.so
1454 __real_dlopen
= (void*(*)(const char *, int))
1455 dlvsym (dlflag
, SYS_DLOPEN_NAME
, SYS_DLOPEN_VERSION
);
1456 TprintfT (DBG_LT2
, "init_mmap_intf() [%s] @%p __real_dlopen\n",
1457 SYS_DLOPEN_VERSION
, __real_dlopen
);
1458 #if (ARCH(Intel) && WSIZE(32)) || ARCH(SPARC)
1459 __real_dlopen_2_1
= __real_dlopen
;
1460 __real_dlopen_2_0
= (void*(*)(const char *, int))
1461 dlvsym (dlflag
, SYS_DLOPEN_NAME
, "GLIBC_2.0");
1464 __real_dlclose
= (int(*)(void* handle
))dlsym (dlflag
, SYS_DLCLOSE_NAME
);
1465 TprintfT (DBG_LT2
, "init_mmap_intf() @%p __real_dlclose\n", __real_dlclose
);
1466 TprintfT (DBG_LT2
, "init_mmap_intf() done\n");
1471 /*------------------------------------------------------------- mmap */
1473 mmap (void *start
, size_t length
, int prot
, int flags
, int fd
, off_t offset
)
1476 if (NULL_PTR (mmap
))
1477 err
= init_mmap_intf ();
1481 /* hrtime_t hrt = GETRELTIME(); */
1482 void *ret
= CALL_REAL (mmap
)(start
, length
, prot
, flags
, fd
, offset
);
1484 if (!CHCK_REENTRANCE
&& (ret
!= MAP_FAILED
) && collector_heap_record
!= NULL
)
1487 /* write a separate record for mmap tracing */
1488 collector_heap_record (MMAP_TRACE
, length
, ret
);
1491 TprintfT (DBG_LT2
, "libcollector.mmap(%p, %ld, %d, %d, %d, 0x%lld) = %p\n",
1492 start
, (long) length
, prot
, flags
, fd
, (long long) offset
, ret
);
1496 /*------------------------------------------------------------- mmap64 */
1497 #if WSIZE(32) /* mmap64 only defined for non-64-bit */
1500 mmap64 (void *start
, size_t length
, int prot
, int flags
, int fd
, off64_t offset
)
1502 if (NULL_PTR (mmap64
))
1505 /* hrtime_t hrt = GETRELTIME(); */
1506 void *ret
= CALL_REAL (mmap64
)(start
, length
, prot
, flags
, fd
, offset
);
1507 if (!CHCK_REENTRANCE
&& (ret
!= MAP_FAILED
) && collector_heap_record
!= NULL
)
1510 /* write a separate record for mmap tracing */
1511 collector_heap_record (MMAP_TRACE
, length
, ret
);
1514 TprintfT (DBG_LT2
, "libcollector.mmap64(%p, %ld, %d, %d, %d, 0x%lld) = %p\n",
1515 start
, (long) length
, prot
, flags
, fd
, (long long) offset
, ret
);
1518 #endif /* WSIZE(32) */
1520 /*------------------------------------------------------------- munmap */
1522 munmap (void *start
, size_t length
)
1524 if (NULL_PTR (munmap
))
1527 /* hrtime_t hrt = GETRELTIME(); */
1528 int rc
= CALL_REAL (munmap
)(start
, length
);
1529 if (!CHCK_REENTRANCE
&& (rc
== 0) && collector_heap_record
!= NULL
)
1532 /* write a separate record for mmap tracing */
1533 collector_heap_record (MUNMAP_TRACE
, length
, start
);
1536 TprintfT (DBG_LT2
, "libcollector.munmap(%p, %ld) = %d\n", start
, (long) length
, rc
);
1541 /*------------------------------------------------------------- dlopen */
1543 __collector_dlopen_symver (void*(real_dlopen
) (const char *, int),
1544 void *caller
, const char *pathname
, int mode
)
1546 const char * real_pathname
= pathname
;
1547 char new_pathname
[MAXPATHLEN
];
1548 int origin_offset
= 0;
1549 TprintfT (DBG_LT2
, "dlopen: pathname=%s, mode=%d\n", pathname
? pathname
: "NULL", mode
);
1550 if (pathname
&& __collector_strStartWith (pathname
, "$ORIGIN/") == 0)
1552 else if (pathname
&& __collector_strStartWith (pathname
, "${ORIGIN}/") == 0)
1557 if (caller
&& dladdr (caller
, &dl_info
) != 0)
1559 TprintfT (DBG_LT2
, "dladdr(%p): %p fname=%s\n",
1560 caller
, dl_info
.dli_fbase
, dl_info
.dli_fname
);
1561 new_pathname
[0] = '\0';
1562 const char *p
= __collector_strrchr (dl_info
.dli_fname
, '/');
1564 __collector_strlcpy (new_pathname
, dl_info
.dli_fname
,
1565 (p
- dl_info
.dli_fname
+ 2) < MAXPATHLEN
? (p
- dl_info
.dli_fname
+ 2) : MAXPATHLEN
);
1566 __collector_strlcat (new_pathname
, pathname
+ origin_offset
, MAXPATHLEN
- CALL_UTIL (strlen
)(new_pathname
));
1567 real_pathname
= new_pathname
;
1570 TprintfT (0, "ERROR: dladdr(%p): %s\n", caller
, dlerror ());
1572 if (NULL_PTR (dlopen
))
1574 TprintfT (DBG_LT2
, "libcollector.dlopen(%s,%d) interposing\n",
1575 pathname
? pathname
: "", mode
);
1578 // set guard for duration of handling dlopen, since want to ensure
1579 // new mappings are resolved after the actual dlopen has occurred
1581 hrtime_t hrt
= GETRELTIME ();
1583 if (caller
&& real_pathname
&& !__collector_strchr (real_pathname
, '/'))
1584 ret
= dlopen_searchpath (real_dlopen
, caller
, real_pathname
, mode
);
1587 ret
= real_dlopen (real_pathname
, mode
);
1588 TprintfT (DBG_LT2
, "libcollector -- dlopen(%s) returning %p\n", pathname
, ret
);
1590 /* Don't call update if dlopen failed: preserve dlerror() */
1591 if (ret
&& (mmap_mode
> 0) && !(mode
& RTLD_NOLOAD
))
1592 update_map_segments (hrt
, 1);
1593 TprintfT (DBG_LT2
, "libcollector -- dlopen(%s) returning %p\n", pathname
, ret
);
1599 dlopen (const char *pathname
, int mode
)
1601 if (NULL_PTR (dlopen
))
1603 void* caller
= __builtin_return_address (0); // must be called inside dlopen first layer interposition
1604 return __collector_dlopen_symver (CALL_REAL (dlopen
), caller
, pathname
, mode
);
1607 #if !defined(__MUSL_LIBC) && ((ARCH(Intel) && WSIZE(32)) || ARCH(SPARC))
1608 // map interposed symbol versions
1610 SYMVER_ATTRIBUTE (__collector_dlopen_2_1
, dlopen@GLIBC_2
.1
)
1612 __collector_dlopen_2_1 (const char *pathname
, int mode
)
1614 if (NULL_PTR (dlopen_2_1
))
1616 void *caller
= __builtin_return_address (0); // must be called inside dlopen first layer interposition
1617 return __collector_dlopen_symver (CALL_REAL (dlopen_2_1
), caller
, pathname
, mode
);
1620 SYMVER_ATTRIBUTE (__collector_dlopen_2_0
, dlopen@GLIBC_2
.0
)
1622 __collector_dlopen_2_0 (const char *pathname
, int mode
)
1624 if (NULL_PTR (dlopen_2_0
))
1626 void* caller
= __builtin_return_address (0); // must be called inside dlopen first layer interposition
1627 return __collector_dlopen_symver (CALL_REAL (dlopen_2_0
), caller
, pathname
, mode
);
1631 /*------------------------------------------------------------- dlclose */
1633 dlclose (void *handle
)
1635 if (NULL_PTR (dlclose
))
1637 TprintfT (DBG_LT2
, "__collector_dlclose(%p) entered\n", handle
);
1638 hrtime_t hrt
= GETRELTIME ();
1639 if (!CHCK_REENTRANCE
)
1642 update_map_segments (hrt
, 1);
1644 hrt
= GETRELTIME ();
1646 int ret
= CALL_REAL (dlclose
)(handle
);
1648 /* Don't call update if dlclose failed: preserve dlerror() */
1649 if (!ret
&& !CHCK_REENTRANCE
)
1652 update_map_segments (hrt
, 1);
1655 TprintfT (DBG_LT2
, "__collector_dlclose(%p) returning %d\n", handle
, ret
);