4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
17 #include "util/record.h"
18 #include <traceevent/event-parse.h>
19 #include <api/fs/tracing_path.h>
21 #include "util/bpf_map.h"
22 #include "util/rlimit.h"
24 #include "util/cgroup.h"
25 #include "util/color.h"
26 #include "util/config.h"
27 #include "util/debug.h"
30 #include "util/event.h"
31 #include "util/evsel.h"
32 #include "util/evsel_fprintf.h"
33 #include "util/synthetic-events.h"
34 #include "util/evlist.h"
35 #include "util/evswitch.h"
36 #include "util/mmap.h"
37 #include <subcmd/pager.h>
38 #include <subcmd/exec-cmd.h>
39 #include "util/machine.h"
41 #include "util/symbol.h"
42 #include "util/path.h"
43 #include "util/session.h"
44 #include "util/thread.h"
45 #include <subcmd/parse-options.h>
46 #include "util/strlist.h"
47 #include "util/intlist.h"
48 #include "util/thread_map.h"
49 #include "util/stat.h"
50 #include "util/tool.h"
51 #include "util/util.h"
52 #include "trace/beauty/beauty.h"
53 #include "trace-event.h"
54 #include "util/parse-events.h"
55 #include "util/bpf-loader.h"
56 #include "callchain.h"
57 #include "print_binary.h"
59 #include "syscalltbl.h"
60 #include "rb_resort.h"
69 #include <linux/err.h>
70 #include <linux/filter.h>
71 #include <linux/kernel.h>
72 #include <linux/random.h>
73 #include <linux/stringify.h>
74 #include <linux/time64.h>
75 #include <linux/zalloc.h>
77 #include <sys/sysmacros.h>
79 #include <linux/ctype.h>
82 # define O_CLOEXEC 02000000
85 #ifndef F_LINUX_SPECIFIC_BASE
86 # define F_LINUX_SPECIFIC_BASE 1024
90 struct perf_tool tool
;
91 struct syscalltbl
*sctbl
;
93 struct syscall
*table
;
95 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
96 struct bpf_map
*sys_enter
,
100 struct evsel
*sys_enter
,
104 struct bpf_program
*unaugmented_prog
;
109 struct record_opts opts
;
110 struct evlist
*evlist
;
111 struct machine
*host
;
112 struct thread
*current
;
113 struct bpf_object
*bpf_obj
;
114 struct cgroup
*cgroup
;
117 unsigned long nr_events
;
118 unsigned long nr_events_printed
;
119 unsigned long max_events
;
120 struct evswitch evswitch
;
121 struct strlist
*ev_qualifier
;
131 double duration_filter
;
137 unsigned int max_stack
;
138 unsigned int min_stack
;
139 int raw_augmented_syscalls_args_size
;
140 bool raw_augmented_syscalls
;
141 bool fd_path_disabled
;
143 bool not_ev_qualifier
;
147 bool multiple_threads
;
153 bool show_tool_stats
;
155 bool kernel_syscallchains
;
161 bool show_string_prefix
;
166 struct ordered_events data
;
174 u64 (*integer
)(struct tp_field
*field
, struct perf_sample
*sample
);
175 void *(*pointer
)(struct tp_field
*field
, struct perf_sample
*sample
);
179 #define TP_UINT_FIELD(bits) \
180 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
183 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
192 #define TP_UINT_FIELD__SWAPPED(bits) \
193 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
196 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
197 return bswap_##bits(value);\
200 TP_UINT_FIELD__SWAPPED(16);
201 TP_UINT_FIELD__SWAPPED(32);
202 TP_UINT_FIELD__SWAPPED(64);
204 static int __tp_field__init_uint(struct tp_field
*field
, int size
, int offset
, bool needs_swap
)
206 field
->offset
= offset
;
210 field
->integer
= tp_field__u8
;
213 field
->integer
= needs_swap
? tp_field__swapped_u16
: tp_field__u16
;
216 field
->integer
= needs_swap
? tp_field__swapped_u32
: tp_field__u32
;
219 field
->integer
= needs_swap
? tp_field__swapped_u64
: tp_field__u64
;
228 static int tp_field__init_uint(struct tp_field
*field
, struct tep_format_field
*format_field
, bool needs_swap
)
230 return __tp_field__init_uint(field
, format_field
->size
, format_field
->offset
, needs_swap
);
233 static void *tp_field__ptr(struct tp_field
*field
, struct perf_sample
*sample
)
235 return sample
->raw_data
+ field
->offset
;
238 static int __tp_field__init_ptr(struct tp_field
*field
, int offset
)
240 field
->offset
= offset
;
241 field
->pointer
= tp_field__ptr
;
245 static int tp_field__init_ptr(struct tp_field
*field
, struct tep_format_field
*format_field
)
247 return __tp_field__init_ptr(field
, format_field
->offset
);
253 struct tp_field args
, ret
;
257 static int perf_evsel__init_tp_uint_field(struct evsel
*evsel
,
258 struct tp_field
*field
,
261 struct tep_format_field
*format_field
= perf_evsel__field(evsel
, name
);
263 if (format_field
== NULL
)
266 return tp_field__init_uint(field
, format_field
, evsel
->needs_swap
);
269 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
270 ({ struct syscall_tp *sc = evsel->priv;\
271 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
273 static int perf_evsel__init_tp_ptr_field(struct evsel
*evsel
,
274 struct tp_field
*field
,
277 struct tep_format_field
*format_field
= perf_evsel__field(evsel
, name
);
279 if (format_field
== NULL
)
282 return tp_field__init_ptr(field
, format_field
);
285 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
286 ({ struct syscall_tp *sc = evsel->priv;\
287 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
289 static void evsel__delete_priv(struct evsel
*evsel
)
292 evsel__delete(evsel
);
295 static int perf_evsel__init_syscall_tp(struct evsel
*evsel
)
297 struct syscall_tp
*sc
= evsel
->priv
= malloc(sizeof(struct syscall_tp
));
299 if (evsel
->priv
!= NULL
) {
300 if (perf_evsel__init_tp_uint_field(evsel
, &sc
->id
, "__syscall_nr") &&
301 perf_evsel__init_tp_uint_field(evsel
, &sc
->id
, "nr"))
312 static int perf_evsel__init_augmented_syscall_tp(struct evsel
*evsel
, struct evsel
*tp
)
314 struct syscall_tp
*sc
= evsel
->priv
= malloc(sizeof(struct syscall_tp
));
316 if (evsel
->priv
!= NULL
) {
317 struct tep_format_field
*syscall_id
= perf_evsel__field(tp
, "id");
318 if (syscall_id
== NULL
)
319 syscall_id
= perf_evsel__field(tp
, "__syscall_nr");
320 if (syscall_id
== NULL
)
322 if (__tp_field__init_uint(&sc
->id
, syscall_id
->size
, syscall_id
->offset
, evsel
->needs_swap
))
334 static int perf_evsel__init_augmented_syscall_tp_args(struct evsel
*evsel
)
336 struct syscall_tp
*sc
= evsel
->priv
;
338 return __tp_field__init_ptr(&sc
->args
, sc
->id
.offset
+ sizeof(u64
));
341 static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel
*evsel
)
343 struct syscall_tp
*sc
= evsel
->priv
;
345 return __tp_field__init_uint(&sc
->ret
, sizeof(u64
), sc
->id
.offset
+ sizeof(u64
), evsel
->needs_swap
);
348 static int perf_evsel__init_raw_syscall_tp(struct evsel
*evsel
, void *handler
)
350 evsel
->priv
= malloc(sizeof(struct syscall_tp
));
351 if (evsel
->priv
!= NULL
) {
352 if (perf_evsel__init_sc_tp_uint_field(evsel
, id
))
355 evsel
->handler
= handler
;
366 static struct evsel
*perf_evsel__raw_syscall_newtp(const char *direction
, void *handler
)
368 struct evsel
*evsel
= perf_evsel__newtp("raw_syscalls", direction
);
370 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
372 evsel
= perf_evsel__newtp("syscalls", direction
);
377 if (perf_evsel__init_raw_syscall_tp(evsel
, handler
))
383 evsel__delete_priv(evsel
);
387 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
388 ({ struct syscall_tp *fields = evsel->priv; \
389 fields->name.integer(&fields->name, sample); })
391 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
392 ({ struct syscall_tp *fields = evsel->priv; \
393 fields->name.pointer(&fields->name, sample); })
395 size_t strarray__scnprintf(struct strarray
*sa
, char *bf
, size_t size
, const char *intfmt
, bool show_prefix
, int val
)
397 int idx
= val
- sa
->offset
;
399 if (idx
< 0 || idx
>= sa
->nr_entries
|| sa
->entries
[idx
] == NULL
) {
400 size_t printed
= scnprintf(bf
, size
, intfmt
, val
);
402 printed
+= scnprintf(bf
+ printed
, size
- printed
, " /* %s??? */", sa
->prefix
);
406 return scnprintf(bf
, size
, "%s%s", show_prefix
? sa
->prefix
: "", sa
->entries
[idx
]);
409 static size_t __syscall_arg__scnprintf_strarray(char *bf
, size_t size
,
411 struct syscall_arg
*arg
)
413 return strarray__scnprintf(arg
->parm
, bf
, size
, intfmt
, arg
->show_string_prefix
, arg
->val
);
416 static size_t syscall_arg__scnprintf_strarray(char *bf
, size_t size
,
417 struct syscall_arg
*arg
)
419 return __syscall_arg__scnprintf_strarray(bf
, size
, "%d", arg
);
422 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
424 size_t syscall_arg__scnprintf_strarray_flags(char *bf
, size_t size
, struct syscall_arg
*arg
)
426 return strarray__scnprintf_flags(arg
->parm
, bf
, size
, arg
->show_string_prefix
, arg
->val
);
429 size_t strarrays__scnprintf(struct strarrays
*sas
, char *bf
, size_t size
, const char *intfmt
, bool show_prefix
, int val
)
434 for (i
= 0; i
< sas
->nr_entries
; ++i
) {
435 struct strarray
*sa
= sas
->entries
[i
];
436 int idx
= val
- sa
->offset
;
438 if (idx
>= 0 && idx
< sa
->nr_entries
) {
439 if (sa
->entries
[idx
] == NULL
)
441 return scnprintf(bf
, size
, "%s%s", show_prefix
? sa
->prefix
: "", sa
->entries
[idx
]);
445 printed
= scnprintf(bf
, size
, intfmt
, val
);
447 printed
+= scnprintf(bf
+ printed
, size
- printed
, " /* %s??? */", sas
->entries
[0]->prefix
);
451 size_t syscall_arg__scnprintf_strarrays(char *bf
, size_t size
,
452 struct syscall_arg
*arg
)
454 return strarrays__scnprintf(arg
->parm
, bf
, size
, "%d", arg
->show_string_prefix
, arg
->val
);
458 #define AT_FDCWD -100
461 static size_t syscall_arg__scnprintf_fd_at(char *bf
, size_t size
,
462 struct syscall_arg
*arg
)
465 const char *prefix
= "AT_FD";
468 return scnprintf(bf
, size
, "%s%s", arg
->show_string_prefix
? prefix
: "", "CWD");
470 return syscall_arg__scnprintf_fd(bf
, size
, arg
);
473 #define SCA_FDAT syscall_arg__scnprintf_fd_at
475 static size_t syscall_arg__scnprintf_close_fd(char *bf
, size_t size
,
476 struct syscall_arg
*arg
);
478 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
480 size_t syscall_arg__scnprintf_hex(char *bf
, size_t size
, struct syscall_arg
*arg
)
482 return scnprintf(bf
, size
, "%#lx", arg
->val
);
485 size_t syscall_arg__scnprintf_ptr(char *bf
, size_t size
, struct syscall_arg
*arg
)
488 return scnprintf(bf
, size
, "NULL");
489 return syscall_arg__scnprintf_hex(bf
, size
, arg
);
492 size_t syscall_arg__scnprintf_int(char *bf
, size_t size
, struct syscall_arg
*arg
)
494 return scnprintf(bf
, size
, "%d", arg
->val
);
497 size_t syscall_arg__scnprintf_long(char *bf
, size_t size
, struct syscall_arg
*arg
)
499 return scnprintf(bf
, size
, "%ld", arg
->val
);
502 static const char *bpf_cmd
[] = {
503 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
504 "MAP_GET_NEXT_KEY", "PROG_LOAD",
506 static DEFINE_STRARRAY(bpf_cmd
, "BPF_");
508 static const char *fsmount_flags
[] = {
511 static DEFINE_STRARRAY(fsmount_flags
, "FSMOUNT_");
513 #include "trace/beauty/generated/fsconfig_arrays.c"
515 static DEFINE_STRARRAY(fsconfig_cmds
, "FSCONFIG_");
517 static const char *epoll_ctl_ops
[] = { "ADD", "DEL", "MOD", };
518 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops
, "EPOLL_CTL_", 1);
520 static const char *itimers
[] = { "REAL", "VIRTUAL", "PROF", };
521 static DEFINE_STRARRAY(itimers
, "ITIMER_");
523 static const char *keyctl_options
[] = {
524 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
525 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
526 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
527 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
528 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
530 static DEFINE_STRARRAY(keyctl_options
, "KEYCTL_");
532 static const char *whences
[] = { "SET", "CUR", "END",
540 static DEFINE_STRARRAY(whences
, "SEEK_");
542 static const char *fcntl_cmds
[] = {
543 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
544 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
545 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
548 static DEFINE_STRARRAY(fcntl_cmds
, "F_");
550 static const char *fcntl_linux_specific_cmds
[] = {
551 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
552 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
553 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
556 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds
, "F_", F_LINUX_SPECIFIC_BASE
);
558 static struct strarray
*fcntl_cmds_arrays
[] = {
559 &strarray__fcntl_cmds
,
560 &strarray__fcntl_linux_specific_cmds
,
563 static DEFINE_STRARRAYS(fcntl_cmds_arrays
);
565 static const char *rlimit_resources
[] = {
566 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
567 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
570 static DEFINE_STRARRAY(rlimit_resources
, "RLIMIT_");
572 static const char *sighow
[] = { "BLOCK", "UNBLOCK", "SETMASK", };
573 static DEFINE_STRARRAY(sighow
, "SIG_");
575 static const char *clockid
[] = {
576 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
577 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
578 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
580 static DEFINE_STRARRAY(clockid
, "CLOCK_");
582 static size_t syscall_arg__scnprintf_access_mode(char *bf
, size_t size
,
583 struct syscall_arg
*arg
)
585 bool show_prefix
= arg
->show_string_prefix
;
586 const char *suffix
= "_OK";
590 if (mode
== F_OK
) /* 0 */
591 return scnprintf(bf
, size
, "F%s", show_prefix
? suffix
: "");
593 if (mode & n##_OK) { \
594 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
604 printed
+= scnprintf(bf
+ printed
, size
- printed
, "|%#x", mode
);
609 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
611 static size_t syscall_arg__scnprintf_filename(char *bf
, size_t size
,
612 struct syscall_arg
*arg
);
614 #define SCA_FILENAME syscall_arg__scnprintf_filename
616 static size_t syscall_arg__scnprintf_pipe_flags(char *bf
, size_t size
,
617 struct syscall_arg
*arg
)
619 bool show_prefix
= arg
->show_string_prefix
;
620 const char *prefix
= "O_";
621 int printed
= 0, flags
= arg
->val
;
624 if (flags & O_##n) { \
625 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
634 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s%#x", printed
? "|" : "", flags
);
639 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
641 #ifndef GRND_NONBLOCK
642 #define GRND_NONBLOCK 0x0001
645 #define GRND_RANDOM 0x0002
648 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf
, size_t size
,
649 struct syscall_arg
*arg
)
651 bool show_prefix
= arg
->show_string_prefix
;
652 const char *prefix
= "GRND_";
653 int printed
= 0, flags
= arg
->val
;
656 if (flags & GRND_##n) { \
657 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
658 flags &= ~GRND_##n; \
666 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s%#x", printed
? "|" : "", flags
);
671 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
673 #define STRARRAY(name, array) \
674 { .scnprintf = SCA_STRARRAY, \
675 .parm = &strarray__##array, }
677 #define STRARRAY_FLAGS(name, array) \
678 { .scnprintf = SCA_STRARRAY_FLAGS, \
679 .parm = &strarray__##array, }
681 #include "trace/beauty/arch_errno_names.c"
682 #include "trace/beauty/eventfd.c"
683 #include "trace/beauty/futex_op.c"
684 #include "trace/beauty/futex_val3.c"
685 #include "trace/beauty/mmap.c"
686 #include "trace/beauty/mode_t.c"
687 #include "trace/beauty/msg_flags.c"
688 #include "trace/beauty/open_flags.c"
689 #include "trace/beauty/perf_event_open.c"
690 #include "trace/beauty/pid.c"
691 #include "trace/beauty/sched_policy.c"
692 #include "trace/beauty/seccomp.c"
693 #include "trace/beauty/signum.c"
694 #include "trace/beauty/socket_type.c"
695 #include "trace/beauty/waitid_options.c"
697 struct syscall_arg_fmt
{
698 size_t (*scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
);
699 unsigned long (*mask_val
)(struct syscall_arg
*arg
, unsigned long val
);
705 static struct syscall_fmt
{
709 const char *sys_enter
,
712 struct syscall_arg_fmt arg
[6];
719 .arg
= { [1] = { .scnprintf
= SCA_ACCMODE
, /* mode */ }, }, },
720 { .name
= "arch_prctl",
721 .arg
= { [0] = { .scnprintf
= SCA_X86_ARCH_PRCTL_CODE
, /* code */ },
722 [1] = { .scnprintf
= SCA_PTR
, /* arg2 */ }, }, },
724 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* fd */ },
725 [1] = { .scnprintf
= SCA_SOCKADDR
, /* umyaddr */ },
726 [2] = { .scnprintf
= SCA_INT
, /* addrlen */ }, }, },
728 .arg
= { [0] = STRARRAY(cmd
, bpf_cmd
), }, },
729 { .name
= "brk", .hexret
= true,
730 .arg
= { [0] = { .scnprintf
= SCA_PTR
, /* brk */ }, }, },
731 { .name
= "clock_gettime",
732 .arg
= { [0] = STRARRAY(clk_id
, clockid
), }, },
733 { .name
= "clone", .errpid
= true, .nr_args
= 5,
734 .arg
= { [0] = { .name
= "flags", .scnprintf
= SCA_CLONE_FLAGS
, },
735 [1] = { .name
= "child_stack", .scnprintf
= SCA_HEX
, },
736 [2] = { .name
= "parent_tidptr", .scnprintf
= SCA_HEX
, },
737 [3] = { .name
= "child_tidptr", .scnprintf
= SCA_HEX
, },
738 [4] = { .name
= "tls", .scnprintf
= SCA_HEX
, }, }, },
740 .arg
= { [0] = { .scnprintf
= SCA_CLOSE_FD
, /* fd */ }, }, },
742 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* fd */ },
743 [1] = { .scnprintf
= SCA_SOCKADDR
, /* servaddr */ },
744 [2] = { .scnprintf
= SCA_INT
, /* addrlen */ }, }, },
745 { .name
= "epoll_ctl",
746 .arg
= { [1] = STRARRAY(op
, epoll_ctl_ops
), }, },
747 { .name
= "eventfd2",
748 .arg
= { [1] = { .scnprintf
= SCA_EFD_FLAGS
, /* flags */ }, }, },
749 { .name
= "fchmodat",
750 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
751 { .name
= "fchownat",
752 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
754 .arg
= { [1] = { .scnprintf
= SCA_FCNTL_CMD
, /* cmd */
755 .parm
= &strarrays__fcntl_cmds_arrays
,
756 .show_zero
= true, },
757 [2] = { .scnprintf
= SCA_FCNTL_ARG
, /* arg */ }, }, },
759 .arg
= { [1] = { .scnprintf
= SCA_FLOCK
, /* cmd */ }, }, },
760 { .name
= "fsconfig",
761 .arg
= { [1] = STRARRAY(cmd
, fsconfig_cmds
), }, },
763 .arg
= { [1] = STRARRAY_FLAGS(flags
, fsmount_flags
),
764 [2] = { .scnprintf
= SCA_FSMOUNT_ATTR_FLAGS
, /* attr_flags */ }, }, },
766 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
767 [1] = { .scnprintf
= SCA_FILENAME
, /* path */ },
768 [2] = { .scnprintf
= SCA_FSPICK_FLAGS
, /* flags */ }, }, },
769 { .name
= "fstat", .alias
= "newfstat", },
770 { .name
= "fstatat", .alias
= "newfstatat", },
772 .arg
= { [1] = { .scnprintf
= SCA_FUTEX_OP
, /* op */ },
773 [5] = { .scnprintf
= SCA_FUTEX_VAL3
, /* val3 */ }, }, },
774 { .name
= "futimesat",
775 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
776 { .name
= "getitimer",
777 .arg
= { [0] = STRARRAY(which
, itimers
), }, },
778 { .name
= "getpid", .errpid
= true, },
779 { .name
= "getpgid", .errpid
= true, },
780 { .name
= "getppid", .errpid
= true, },
781 { .name
= "getrandom",
782 .arg
= { [2] = { .scnprintf
= SCA_GETRANDOM_FLAGS
, /* flags */ }, }, },
783 { .name
= "getrlimit",
784 .arg
= { [0] = STRARRAY(resource
, rlimit_resources
), }, },
785 { .name
= "gettid", .errpid
= true, },
788 #if defined(__i386__) || defined(__x86_64__)
790 * FIXME: Make this available to all arches.
792 [1] = { .scnprintf
= SCA_IOCTL_CMD
, /* cmd */ },
793 [2] = { .scnprintf
= SCA_HEX
, /* arg */ }, }, },
795 [2] = { .scnprintf
= SCA_HEX
, /* arg */ }, }, },
797 { .name
= "kcmp", .nr_args
= 5,
798 .arg
= { [0] = { .name
= "pid1", .scnprintf
= SCA_PID
, },
799 [1] = { .name
= "pid2", .scnprintf
= SCA_PID
, },
800 [2] = { .name
= "type", .scnprintf
= SCA_KCMP_TYPE
, },
801 [3] = { .name
= "idx1", .scnprintf
= SCA_KCMP_IDX
, },
802 [4] = { .name
= "idx2", .scnprintf
= SCA_KCMP_IDX
, }, }, },
804 .arg
= { [0] = STRARRAY(option
, keyctl_options
), }, },
806 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
808 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
810 .arg
= { [2] = STRARRAY(whence
, whences
), }, },
811 { .name
= "lstat", .alias
= "newlstat", },
813 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
814 [2] = { .scnprintf
= SCA_MADV_BHV
, /* behavior */ }, }, },
816 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
818 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
819 { .name
= "mmap", .hexret
= true,
820 /* The standard mmap maps to old_mmap on s390x */
821 #if defined(__s390x__)
824 .arg
= { [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ },
825 [3] = { .scnprintf
= SCA_MMAP_FLAGS
, /* flags */ },
826 [5] = { .scnprintf
= SCA_HEX
, /* offset */ }, }, },
828 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* dev_name */ },
829 [3] = { .scnprintf
= SCA_MOUNT_FLAGS
, /* flags */
830 .mask_val
= SCAMV_MOUNT_FLAGS
, /* flags */ }, }, },
831 { .name
= "move_mount",
832 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* from_dfd */ },
833 [1] = { .scnprintf
= SCA_FILENAME
, /* from_pathname */ },
834 [2] = { .scnprintf
= SCA_FDAT
, /* to_dfd */ },
835 [3] = { .scnprintf
= SCA_FILENAME
, /* to_pathname */ },
836 [4] = { .scnprintf
= SCA_MOVE_MOUNT_FLAGS
, /* flags */ }, }, },
837 { .name
= "mprotect",
838 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
839 [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ }, }, },
840 { .name
= "mq_unlink",
841 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* u_name */ }, }, },
842 { .name
= "mremap", .hexret
= true,
843 .arg
= { [3] = { .scnprintf
= SCA_MREMAP_FLAGS
, /* flags */ }, }, },
844 { .name
= "name_to_handle_at",
845 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
846 { .name
= "newfstatat",
847 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
849 .arg
= { [1] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
850 { .name
= "open_by_handle_at",
851 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
852 [2] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
854 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
855 [2] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
856 { .name
= "perf_event_open",
857 .arg
= { [2] = { .scnprintf
= SCA_INT
, /* cpu */ },
858 [3] = { .scnprintf
= SCA_FD
, /* group_fd */ },
859 [4] = { .scnprintf
= SCA_PERF_FLAGS
, /* flags */ }, }, },
861 .arg
= { [1] = { .scnprintf
= SCA_PIPE_FLAGS
, /* flags */ }, }, },
862 { .name
= "pkey_alloc",
863 .arg
= { [1] = { .scnprintf
= SCA_PKEY_ALLOC_ACCESS_RIGHTS
, /* access_rights */ }, }, },
864 { .name
= "pkey_free",
865 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* key */ }, }, },
866 { .name
= "pkey_mprotect",
867 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
868 [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ },
869 [3] = { .scnprintf
= SCA_INT
, /* pkey */ }, }, },
870 { .name
= "poll", .timeout
= true, },
871 { .name
= "ppoll", .timeout
= true, },
873 .arg
= { [0] = { .scnprintf
= SCA_PRCTL_OPTION
, /* option */ },
874 [1] = { .scnprintf
= SCA_PRCTL_ARG2
, /* arg2 */ },
875 [2] = { .scnprintf
= SCA_PRCTL_ARG3
, /* arg3 */ }, }, },
876 { .name
= "pread", .alias
= "pread64", },
877 { .name
= "preadv", .alias
= "pread", },
878 { .name
= "prlimit64",
879 .arg
= { [1] = STRARRAY(resource
, rlimit_resources
), }, },
880 { .name
= "pwrite", .alias
= "pwrite64", },
881 { .name
= "readlinkat",
882 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
883 { .name
= "recvfrom",
884 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
885 { .name
= "recvmmsg",
886 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
888 .arg
= { [2] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
889 { .name
= "renameat",
890 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* olddirfd */ },
891 [2] = { .scnprintf
= SCA_FDAT
, /* newdirfd */ }, }, },
892 { .name
= "renameat2",
893 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* olddirfd */ },
894 [2] = { .scnprintf
= SCA_FDAT
, /* newdirfd */ },
895 [4] = { .scnprintf
= SCA_RENAMEAT2_FLAGS
, /* flags */ }, }, },
896 { .name
= "rt_sigaction",
897 .arg
= { [0] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
898 { .name
= "rt_sigprocmask",
899 .arg
= { [0] = STRARRAY(how
, sighow
), }, },
900 { .name
= "rt_sigqueueinfo",
901 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
902 { .name
= "rt_tgsigqueueinfo",
903 .arg
= { [2] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
904 { .name
= "sched_setscheduler",
905 .arg
= { [1] = { .scnprintf
= SCA_SCHED_POLICY
, /* policy */ }, }, },
907 .arg
= { [0] = { .scnprintf
= SCA_SECCOMP_OP
, /* op */ },
908 [1] = { .scnprintf
= SCA_SECCOMP_FLAGS
, /* flags */ }, }, },
909 { .name
= "select", .timeout
= true, },
910 { .name
= "sendfile", .alias
= "sendfile64", },
911 { .name
= "sendmmsg",
912 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
914 .arg
= { [2] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
916 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ },
917 [4] = { .scnprintf
= SCA_SOCKADDR
, /* addr */ }, }, },
918 { .name
= "set_tid_address", .errpid
= true, },
919 { .name
= "setitimer",
920 .arg
= { [0] = STRARRAY(which
, itimers
), }, },
921 { .name
= "setrlimit",
922 .arg
= { [0] = STRARRAY(resource
, rlimit_resources
), }, },
924 .arg
= { [0] = STRARRAY(family
, socket_families
),
925 [1] = { .scnprintf
= SCA_SK_TYPE
, /* type */ },
926 [2] = { .scnprintf
= SCA_SK_PROTO
, /* protocol */ }, }, },
927 { .name
= "socketpair",
928 .arg
= { [0] = STRARRAY(family
, socket_families
),
929 [1] = { .scnprintf
= SCA_SK_TYPE
, /* type */ },
930 [2] = { .scnprintf
= SCA_SK_PROTO
, /* protocol */ }, }, },
931 { .name
= "stat", .alias
= "newstat", },
933 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fdat */ },
934 [2] = { .scnprintf
= SCA_STATX_FLAGS
, /* flags */ } ,
935 [3] = { .scnprintf
= SCA_STATX_MASK
, /* mask */ }, }, },
937 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* specialfile */ }, }, },
939 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* specialfile */ }, }, },
940 { .name
= "symlinkat",
941 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
942 { .name
= "sync_file_range",
943 .arg
= { [3] = { .scnprintf
= SCA_SYNC_FILE_RANGE_FLAGS
, /* flags */ }, }, },
945 .arg
= { [2] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
947 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
948 { .name
= "umount2", .alias
= "umount",
949 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* name */ }, }, },
950 { .name
= "uname", .alias
= "newuname", },
951 { .name
= "unlinkat",
952 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
953 { .name
= "utimensat",
954 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dirfd */ }, }, },
955 { .name
= "wait4", .errpid
= true,
956 .arg
= { [2] = { .scnprintf
= SCA_WAITID_OPTIONS
, /* options */ }, }, },
957 { .name
= "waitid", .errpid
= true,
958 .arg
= { [3] = { .scnprintf
= SCA_WAITID_OPTIONS
, /* options */ }, }, },
961 static int syscall_fmt__cmp(const void *name
, const void *fmtp
)
963 const struct syscall_fmt
*fmt
= fmtp
;
964 return strcmp(name
, fmt
->name
);
967 static struct syscall_fmt
*syscall_fmt__find(const char *name
)
969 const int nmemb
= ARRAY_SIZE(syscall_fmts
);
970 return bsearch(name
, syscall_fmts
, nmemb
, sizeof(struct syscall_fmt
), syscall_fmt__cmp
);
973 static struct syscall_fmt
*syscall_fmt__find_by_alias(const char *alias
)
975 int i
, nmemb
= ARRAY_SIZE(syscall_fmts
);
977 for (i
= 0; i
< nmemb
; ++i
) {
978 if (syscall_fmts
[i
].alias
&& strcmp(syscall_fmts
[i
].alias
, alias
) == 0)
979 return &syscall_fmts
[i
];
986 * is_exit: is this "exit" or "exit_group"?
987 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
988 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
989 * nonexistent: Just a hole in the syscall table, syscall id not allocated
992 struct tep_event
*tp_format
;
996 struct bpf_program
*sys_enter
,
1002 struct tep_format_field
*args
;
1004 struct syscall_fmt
*fmt
;
1005 struct syscall_arg_fmt
*arg_fmt
;
1009 * Must match what is in the BPF program:
1011 * tools/perf/examples/bpf/augmented_raw_syscalls.c
1013 struct bpf_map_syscall_entry
{
1015 u16 string_args_len
[6];
1019 * We need to have this 'calculated' boolean because in some cases we really
1020 * don't know what is the duration of a syscall, for instance, when we start
1021 * a session and some threads are waiting for a syscall to finish, say 'poll',
1022 * in which case all we can do is to print "( ? ) for duration and for the
1025 static size_t fprintf_duration(unsigned long t
, bool calculated
, FILE *fp
)
1027 double duration
= (double)t
/ NSEC_PER_MSEC
;
1028 size_t printed
= fprintf(fp
, "(");
1031 printed
+= fprintf(fp
, " ");
1032 else if (duration
>= 1.0)
1033 printed
+= color_fprintf(fp
, PERF_COLOR_RED
, "%6.3f ms", duration
);
1034 else if (duration
>= 0.01)
1035 printed
+= color_fprintf(fp
, PERF_COLOR_YELLOW
, "%6.3f ms", duration
);
1037 printed
+= color_fprintf(fp
, PERF_COLOR_NORMAL
, "%6.3f ms", duration
);
1038 return printed
+ fprintf(fp
, "): ");
1042 * filename.ptr: The filename char pointer that will be vfs_getname'd
1043 * filename.entry_str_pos: Where to insert the string translated from
1044 * filename.ptr by the vfs_getname tracepoint/kprobe.
1045 * ret_scnprintf: syscall args may set this to a different syscall return
1046 * formatter, for instance, fcntl may return fds, file flags, etc.
1048 struct thread_trace
{
1051 unsigned long nr_events
;
1052 unsigned long pfmaj
, pfmin
;
1055 size_t (*ret_scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
);
1058 short int entry_str_pos
;
1060 unsigned int namelen
;
1068 struct intlist
*syscall_stats
;
1071 static struct thread_trace
*thread_trace__new(void)
1073 struct thread_trace
*ttrace
= zalloc(sizeof(struct thread_trace
));
1076 ttrace
->files
.max
= -1;
1077 ttrace
->syscall_stats
= intlist__new(NULL
);
1083 static struct thread_trace
*thread__trace(struct thread
*thread
, FILE *fp
)
1085 struct thread_trace
*ttrace
;
1090 if (thread__priv(thread
) == NULL
)
1091 thread__set_priv(thread
, thread_trace__new());
1093 if (thread__priv(thread
) == NULL
)
1096 ttrace
= thread__priv(thread
);
1097 ++ttrace
->nr_events
;
1101 color_fprintf(fp
, PERF_COLOR_RED
,
1102 "WARNING: not enough memory, dropping samples!\n");
1107 void syscall_arg__set_ret_scnprintf(struct syscall_arg
*arg
,
1108 size_t (*ret_scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
))
1110 struct thread_trace
*ttrace
= thread__priv(arg
->thread
);
1112 ttrace
->ret_scnprintf
= ret_scnprintf
;
1115 #define TRACE_PFMAJ (1 << 0)
1116 #define TRACE_PFMIN (1 << 1)
1118 static const size_t trace__entry_str_size
= 2048;
1120 static struct file
*thread_trace__files_entry(struct thread_trace
*ttrace
, int fd
)
1125 if (fd
> ttrace
->files
.max
) {
1126 struct file
*nfiles
= realloc(ttrace
->files
.table
, (fd
+ 1) * sizeof(struct file
));
1131 if (ttrace
->files
.max
!= -1) {
1132 memset(nfiles
+ ttrace
->files
.max
+ 1, 0,
1133 (fd
- ttrace
->files
.max
) * sizeof(struct file
));
1135 memset(nfiles
, 0, (fd
+ 1) * sizeof(struct file
));
1138 ttrace
->files
.table
= nfiles
;
1139 ttrace
->files
.max
= fd
;
1142 return ttrace
->files
.table
+ fd
;
1145 struct file
*thread__files_entry(struct thread
*thread
, int fd
)
1147 return thread_trace__files_entry(thread__priv(thread
), fd
);
1150 static int trace__set_fd_pathname(struct thread
*thread
, int fd
, const char *pathname
)
1152 struct thread_trace
*ttrace
= thread__priv(thread
);
1153 struct file
*file
= thread_trace__files_entry(ttrace
, fd
);
1157 if (stat(pathname
, &st
) == 0)
1158 file
->dev_maj
= major(st
.st_rdev
);
1159 file
->pathname
= strdup(pathname
);
1167 static int thread__read_fd_path(struct thread
*thread
, int fd
)
1169 char linkname
[PATH_MAX
], pathname
[PATH_MAX
];
1173 if (thread
->pid_
== thread
->tid
) {
1174 scnprintf(linkname
, sizeof(linkname
),
1175 "/proc/%d/fd/%d", thread
->pid_
, fd
);
1177 scnprintf(linkname
, sizeof(linkname
),
1178 "/proc/%d/task/%d/fd/%d", thread
->pid_
, thread
->tid
, fd
);
1181 if (lstat(linkname
, &st
) < 0 || st
.st_size
+ 1 > (off_t
)sizeof(pathname
))
1184 ret
= readlink(linkname
, pathname
, sizeof(pathname
));
1186 if (ret
< 0 || ret
> st
.st_size
)
1189 pathname
[ret
] = '\0';
1190 return trace__set_fd_pathname(thread
, fd
, pathname
);
1193 static const char *thread__fd_path(struct thread
*thread
, int fd
,
1194 struct trace
*trace
)
1196 struct thread_trace
*ttrace
= thread__priv(thread
);
1198 if (ttrace
== NULL
|| trace
->fd_path_disabled
)
1204 if ((fd
> ttrace
->files
.max
|| ttrace
->files
.table
[fd
].pathname
== NULL
)) {
1207 ++trace
->stats
.proc_getname
;
1208 if (thread__read_fd_path(thread
, fd
))
1212 return ttrace
->files
.table
[fd
].pathname
;
1215 size_t syscall_arg__scnprintf_fd(char *bf
, size_t size
, struct syscall_arg
*arg
)
1218 size_t printed
= scnprintf(bf
, size
, "%d", fd
);
1219 const char *path
= thread__fd_path(arg
->thread
, fd
, arg
->trace
);
1222 printed
+= scnprintf(bf
+ printed
, size
- printed
, "<%s>", path
);
1227 size_t pid__scnprintf_fd(struct trace
*trace
, pid_t pid
, int fd
, char *bf
, size_t size
)
1229 size_t printed
= scnprintf(bf
, size
, "%d", fd
);
1230 struct thread
*thread
= machine__find_thread(trace
->host
, pid
, pid
);
1233 const char *path
= thread__fd_path(thread
, fd
, trace
);
1236 printed
+= scnprintf(bf
+ printed
, size
- printed
, "<%s>", path
);
1238 thread__put(thread
);
1244 static size_t syscall_arg__scnprintf_close_fd(char *bf
, size_t size
,
1245 struct syscall_arg
*arg
)
1248 size_t printed
= syscall_arg__scnprintf_fd(bf
, size
, arg
);
1249 struct thread_trace
*ttrace
= thread__priv(arg
->thread
);
1251 if (ttrace
&& fd
>= 0 && fd
<= ttrace
->files
.max
)
1252 zfree(&ttrace
->files
.table
[fd
].pathname
);
1257 static void thread__set_filename_pos(struct thread
*thread
, const char *bf
,
1260 struct thread_trace
*ttrace
= thread__priv(thread
);
1262 ttrace
->filename
.ptr
= ptr
;
1263 ttrace
->filename
.entry_str_pos
= bf
- ttrace
->entry_str
;
1266 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg
*arg
, char *bf
, size_t size
)
1268 struct augmented_arg
*augmented_arg
= arg
->augmented
.args
;
1269 size_t printed
= scnprintf(bf
, size
, "\"%.*s\"", augmented_arg
->size
, augmented_arg
->value
);
1271 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1272 * we would have two strings, each prefixed by its size.
1274 int consumed
= sizeof(*augmented_arg
) + augmented_arg
->size
;
1276 arg
->augmented
.args
= ((void *)arg
->augmented
.args
) + consumed
;
1277 arg
->augmented
.size
-= consumed
;
1282 static size_t syscall_arg__scnprintf_filename(char *bf
, size_t size
,
1283 struct syscall_arg
*arg
)
1285 unsigned long ptr
= arg
->val
;
1287 if (arg
->augmented
.args
)
1288 return syscall_arg__scnprintf_augmented_string(arg
, bf
, size
);
1290 if (!arg
->trace
->vfs_getname
)
1291 return scnprintf(bf
, size
, "%#x", ptr
);
1293 thread__set_filename_pos(arg
->thread
, bf
, ptr
);
1297 static bool trace__filter_duration(struct trace
*trace
, double t
)
1299 return t
< (trace
->duration_filter
* NSEC_PER_MSEC
);
1302 static size_t __trace__fprintf_tstamp(struct trace
*trace
, u64 tstamp
, FILE *fp
)
1304 double ts
= (double)(tstamp
- trace
->base_time
) / NSEC_PER_MSEC
;
1306 return fprintf(fp
, "%10.3f ", ts
);
1310 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1311 * using ttrace->entry_time for a thread that receives a sys_exit without
1312 * first having received a sys_enter ("poll" issued before tracing session
1313 * starts, lost sys_enter exit due to ring buffer overflow).
1315 static size_t trace__fprintf_tstamp(struct trace
*trace
, u64 tstamp
, FILE *fp
)
1318 return __trace__fprintf_tstamp(trace
, tstamp
, fp
);
1320 return fprintf(fp
, " ? ");
1323 static bool done
= false;
1324 static bool interrupted
= false;
1326 static void sig_handler(int sig
)
1329 interrupted
= sig
== SIGINT
;
1332 static size_t trace__fprintf_comm_tid(struct trace
*trace
, struct thread
*thread
, FILE *fp
)
1336 if (trace
->multiple_threads
) {
1337 if (trace
->show_comm
)
1338 printed
+= fprintf(fp
, "%.14s/", thread__comm_str(thread
));
1339 printed
+= fprintf(fp
, "%d ", thread
->tid
);
1345 static size_t trace__fprintf_entry_head(struct trace
*trace
, struct thread
*thread
,
1346 u64 duration
, bool duration_calculated
, u64 tstamp
, FILE *fp
)
1350 if (trace
->show_tstamp
)
1351 printed
= trace__fprintf_tstamp(trace
, tstamp
, fp
);
1352 if (trace
->show_duration
)
1353 printed
+= fprintf_duration(duration
, duration_calculated
, fp
);
1354 return printed
+ trace__fprintf_comm_tid(trace
, thread
, fp
);
1357 static int trace__process_event(struct trace
*trace
, struct machine
*machine
,
1358 union perf_event
*event
, struct perf_sample
*sample
)
1362 switch (event
->header
.type
) {
1363 case PERF_RECORD_LOST
:
1364 color_fprintf(trace
->output
, PERF_COLOR_RED
,
1365 "LOST %" PRIu64
" events!\n", event
->lost
.lost
);
1366 ret
= machine__process_lost_event(machine
, event
, sample
);
1369 ret
= machine__process_event(machine
, event
, sample
);
1376 static int trace__tool_process(struct perf_tool
*tool
,
1377 union perf_event
*event
,
1378 struct perf_sample
*sample
,
1379 struct machine
*machine
)
1381 struct trace
*trace
= container_of(tool
, struct trace
, tool
);
1382 return trace__process_event(trace
, machine
, event
, sample
);
1385 static char *trace__machine__resolve_kernel_addr(void *vmachine
, unsigned long long *addrp
, char **modp
)
1387 struct machine
*machine
= vmachine
;
1389 if (machine
->kptr_restrict_warned
)
1392 if (symbol_conf
.kptr_restrict
) {
1393 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1394 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1395 "Kernel samples will not be resolved.\n");
1396 machine
->kptr_restrict_warned
= true;
1400 return machine__resolve_kernel_addr(vmachine
, addrp
, modp
);
1403 static int trace__symbols_init(struct trace
*trace
, struct evlist
*evlist
)
1405 int err
= symbol__init(NULL
);
1410 trace
->host
= machine__new_host();
1411 if (trace
->host
== NULL
)
1414 err
= trace_event__register_resolver(trace
->host
, trace__machine__resolve_kernel_addr
);
1418 err
= __machine__synthesize_threads(trace
->host
, &trace
->tool
, &trace
->opts
.target
,
1419 evlist
->core
.threads
, trace__tool_process
, false,
1428 static void trace__symbols__exit(struct trace
*trace
)
1430 machine__exit(trace
->host
);
1436 static int syscall__alloc_arg_fmts(struct syscall
*sc
, int nr_args
)
1440 if (nr_args
== 6 && sc
->fmt
&& sc
->fmt
->nr_args
!= 0)
1441 nr_args
= sc
->fmt
->nr_args
;
1443 sc
->arg_fmt
= calloc(nr_args
, sizeof(*sc
->arg_fmt
));
1444 if (sc
->arg_fmt
== NULL
)
1447 for (idx
= 0; idx
< nr_args
; ++idx
) {
1449 sc
->arg_fmt
[idx
] = sc
->fmt
->arg
[idx
];
1452 sc
->nr_args
= nr_args
;
1456 static int syscall__set_arg_fmts(struct syscall
*sc
)
1458 struct tep_format_field
*field
, *last_field
= NULL
;
1461 for (field
= sc
->args
; field
; field
= field
->next
, ++idx
) {
1464 if (sc
->fmt
&& sc
->fmt
->arg
[idx
].scnprintf
)
1467 len
= strlen(field
->name
);
1469 if (strcmp(field
->type
, "const char *") == 0 &&
1470 ((len
>= 4 && strcmp(field
->name
+ len
- 4, "name") == 0) ||
1471 strstr(field
->name
, "path") != NULL
))
1472 sc
->arg_fmt
[idx
].scnprintf
= SCA_FILENAME
;
1473 else if ((field
->flags
& TEP_FIELD_IS_POINTER
) || strstr(field
->name
, "addr"))
1474 sc
->arg_fmt
[idx
].scnprintf
= SCA_PTR
;
1475 else if (strcmp(field
->type
, "pid_t") == 0)
1476 sc
->arg_fmt
[idx
].scnprintf
= SCA_PID
;
1477 else if (strcmp(field
->type
, "umode_t") == 0)
1478 sc
->arg_fmt
[idx
].scnprintf
= SCA_MODE_T
;
1479 else if ((strcmp(field
->type
, "int") == 0 ||
1480 strcmp(field
->type
, "unsigned int") == 0 ||
1481 strcmp(field
->type
, "long") == 0) &&
1482 len
>= 2 && strcmp(field
->name
+ len
- 2, "fd") == 0) {
1484 * /sys/kernel/tracing/events/syscalls/sys_enter*
1485 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1490 sc
->arg_fmt
[idx
].scnprintf
= SCA_FD
;
1495 sc
->args_size
= last_field
->offset
+ last_field
->size
;
1500 static int trace__read_syscall_info(struct trace
*trace
, int id
)
1504 const char *name
= syscalltbl__name(trace
->sctbl
, id
);
1506 if (trace
->syscalls
.table
== NULL
) {
1507 trace
->syscalls
.table
= calloc(trace
->sctbl
->syscalls
.max_id
+ 1, sizeof(*sc
));
1508 if (trace
->syscalls
.table
== NULL
)
1512 sc
= trace
->syscalls
.table
+ id
;
1513 if (sc
->nonexistent
)
1517 sc
->nonexistent
= true;
1522 sc
->fmt
= syscall_fmt__find(sc
->name
);
1524 snprintf(tp_name
, sizeof(tp_name
), "sys_enter_%s", sc
->name
);
1525 sc
->tp_format
= trace_event__tp_format("syscalls", tp_name
);
1527 if (IS_ERR(sc
->tp_format
) && sc
->fmt
&& sc
->fmt
->alias
) {
1528 snprintf(tp_name
, sizeof(tp_name
), "sys_enter_%s", sc
->fmt
->alias
);
1529 sc
->tp_format
= trace_event__tp_format("syscalls", tp_name
);
1532 if (syscall__alloc_arg_fmts(sc
, IS_ERR(sc
->tp_format
) ? 6 : sc
->tp_format
->format
.nr_fields
))
1535 if (IS_ERR(sc
->tp_format
))
1536 return PTR_ERR(sc
->tp_format
);
1538 sc
->args
= sc
->tp_format
->format
.fields
;
1540 * We need to check and discard the first variable '__syscall_nr'
1541 * or 'nr' that mean the syscall number. It is needless here.
1542 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1544 if (sc
->args
&& (!strcmp(sc
->args
->name
, "__syscall_nr") || !strcmp(sc
->args
->name
, "nr"))) {
1545 sc
->args
= sc
->args
->next
;
1549 sc
->is_exit
= !strcmp(name
, "exit_group") || !strcmp(name
, "exit");
1550 sc
->is_open
= !strcmp(name
, "open") || !strcmp(name
, "openat");
1552 return syscall__set_arg_fmts(sc
);
1555 static int intcmp(const void *a
, const void *b
)
1557 const int *one
= a
, *another
= b
;
1559 return *one
- *another
;
1562 static int trace__validate_ev_qualifier(struct trace
*trace
)
1565 bool printed_invalid_prefix
= false;
1566 struct str_node
*pos
;
1567 size_t nr_used
= 0, nr_allocated
= strlist__nr_entries(trace
->ev_qualifier
);
1569 trace
->ev_qualifier_ids
.entries
= malloc(nr_allocated
*
1570 sizeof(trace
->ev_qualifier_ids
.entries
[0]));
1572 if (trace
->ev_qualifier_ids
.entries
== NULL
) {
1573 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1579 strlist__for_each_entry(pos
, trace
->ev_qualifier
) {
1580 const char *sc
= pos
->s
;
1581 int id
= syscalltbl__id(trace
->sctbl
, sc
), match_next
= -1;
1584 id
= syscalltbl__strglobmatch_first(trace
->sctbl
, sc
, &match_next
);
1588 if (!printed_invalid_prefix
) {
1589 pr_debug("Skipping unknown syscalls: ");
1590 printed_invalid_prefix
= true;
1599 trace
->ev_qualifier_ids
.entries
[nr_used
++] = id
;
1600 if (match_next
== -1)
1604 id
= syscalltbl__strglobmatch_next(trace
->sctbl
, sc
, &match_next
);
1607 if (nr_allocated
== nr_used
) {
1611 entries
= realloc(trace
->ev_qualifier_ids
.entries
,
1612 nr_allocated
* sizeof(trace
->ev_qualifier_ids
.entries
[0]));
1613 if (entries
== NULL
) {
1615 fputs("\nError:\t Not enough memory for parsing\n", trace
->output
);
1618 trace
->ev_qualifier_ids
.entries
= entries
;
1620 trace
->ev_qualifier_ids
.entries
[nr_used
++] = id
;
1624 trace
->ev_qualifier_ids
.nr
= nr_used
;
1625 qsort(trace
->ev_qualifier_ids
.entries
, nr_used
, sizeof(int), intcmp
);
1627 if (printed_invalid_prefix
)
1631 zfree(&trace
->ev_qualifier_ids
.entries
);
1632 trace
->ev_qualifier_ids
.nr
= 0;
1636 static __maybe_unused
bool trace__syscall_enabled(struct trace
*trace
, int id
)
1638 bool in_ev_qualifier
;
1640 if (trace
->ev_qualifier_ids
.nr
== 0)
1643 in_ev_qualifier
= bsearch(&id
, trace
->ev_qualifier_ids
.entries
,
1644 trace
->ev_qualifier_ids
.nr
, sizeof(int), intcmp
) != NULL
;
1646 if (in_ev_qualifier
)
1647 return !trace
->not_ev_qualifier
;
1649 return trace
->not_ev_qualifier
;
1653 * args is to be interpreted as a series of longs but we need to handle
1654 * 8-byte unaligned accesses. args points to raw_data within the event
1655 * and raw_data is guaranteed to be 8-byte unaligned because it is
1656 * preceded by raw_size which is a u32. So we need to copy args to a temp
1657 * variable to read it. Most notably this avoids extended load instructions
1658 * on unaligned addresses
1660 unsigned long syscall_arg__val(struct syscall_arg
*arg
, u8 idx
)
1663 unsigned char *p
= arg
->args
+ sizeof(unsigned long) * idx
;
1665 memcpy(&val
, p
, sizeof(val
));
1669 static size_t syscall__scnprintf_name(struct syscall
*sc
, char *bf
, size_t size
,
1670 struct syscall_arg
*arg
)
1672 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].name
)
1673 return scnprintf(bf
, size
, "%s: ", sc
->arg_fmt
[arg
->idx
].name
);
1675 return scnprintf(bf
, size
, "arg%d: ", arg
->idx
);
1679 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1680 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1681 * in tools/perf/trace/beauty/mount_flags.c
1683 static unsigned long syscall__mask_val(struct syscall
*sc
, struct syscall_arg
*arg
, unsigned long val
)
1685 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].mask_val
)
1686 return sc
->arg_fmt
[arg
->idx
].mask_val(arg
, val
);
1691 static size_t syscall__scnprintf_val(struct syscall
*sc
, char *bf
, size_t size
,
1692 struct syscall_arg
*arg
, unsigned long val
)
1694 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].scnprintf
) {
1696 if (sc
->arg_fmt
[arg
->idx
].parm
)
1697 arg
->parm
= sc
->arg_fmt
[arg
->idx
].parm
;
1698 return sc
->arg_fmt
[arg
->idx
].scnprintf(bf
, size
, arg
);
1700 return scnprintf(bf
, size
, "%ld", val
);
1703 static size_t syscall__scnprintf_args(struct syscall
*sc
, char *bf
, size_t size
,
1704 unsigned char *args
, void *augmented_args
, int augmented_args_size
,
1705 struct trace
*trace
, struct thread
*thread
)
1710 struct syscall_arg arg
= {
1713 .size
= augmented_args_size
,
1714 .args
= augmented_args
,
1720 .show_string_prefix
= trace
->show_string_prefix
,
1722 struct thread_trace
*ttrace
= thread__priv(thread
);
1725 * Things like fcntl will set this in its 'cmd' formatter to pick the
1726 * right formatter for the return value (an fd? file flags?), which is
1727 * not needed for syscalls that always return a given type, say an fd.
1729 ttrace
->ret_scnprintf
= NULL
;
1731 if (sc
->args
!= NULL
) {
1732 struct tep_format_field
*field
;
1734 for (field
= sc
->args
; field
;
1735 field
= field
->next
, ++arg
.idx
, bit
<<= 1) {
1739 val
= syscall_arg__val(&arg
, arg
.idx
);
1741 * Some syscall args need some mask, most don't and
1742 * return val untouched.
1744 val
= syscall__mask_val(sc
, &arg
, val
);
1747 * Suppress this argument if its value is zero and
1748 * and we don't have a string associated in an
1752 !trace
->show_zeros
&&
1754 (sc
->arg_fmt
[arg
.idx
].show_zero
||
1755 sc
->arg_fmt
[arg
.idx
].scnprintf
== SCA_STRARRAY
||
1756 sc
->arg_fmt
[arg
.idx
].scnprintf
== SCA_STRARRAYS
) &&
1757 sc
->arg_fmt
[arg
.idx
].parm
))
1760 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s", printed
? ", " : "");
1762 if (trace
->show_arg_names
)
1763 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s: ", field
->name
);
1765 printed
+= syscall__scnprintf_val(sc
, bf
+ printed
, size
- printed
, &arg
, val
);
1767 } else if (IS_ERR(sc
->tp_format
)) {
1769 * If we managed to read the tracepoint /format file, then we
1770 * may end up not having any args, like with gettid(), so only
1771 * print the raw args when we didn't manage to read it.
1773 while (arg
.idx
< sc
->nr_args
) {
1776 val
= syscall_arg__val(&arg
, arg
.idx
);
1778 printed
+= scnprintf(bf
+ printed
, size
- printed
, ", ");
1779 printed
+= syscall__scnprintf_name(sc
, bf
+ printed
, size
- printed
, &arg
);
1780 printed
+= syscall__scnprintf_val(sc
, bf
+ printed
, size
- printed
, &arg
, val
);
1790 typedef int (*tracepoint_handler
)(struct trace
*trace
, struct evsel
*evsel
,
1791 union perf_event
*event
,
1792 struct perf_sample
*sample
);
1794 static struct syscall
*trace__syscall_info(struct trace
*trace
,
1795 struct evsel
*evsel
, int id
)
1802 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1803 * before that, leaving at a higher verbosity level till that is
1804 * explained. Reproduced with plain ftrace with:
1806 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1807 * grep "NR -1 " /t/trace_pipe
1809 * After generating some load on the machine.
1813 fprintf(trace
->output
, "Invalid syscall %d id, skipping (%s, %" PRIu64
") ...\n",
1814 id
, perf_evsel__name(evsel
), ++n
);
1821 if (id
> trace
->sctbl
->syscalls
.max_id
)
1824 if ((trace
->syscalls
.table
== NULL
|| trace
->syscalls
.table
[id
].name
== NULL
) &&
1825 (err
= trace__read_syscall_info(trace
, id
)) != 0)
1828 if (trace
->syscalls
.table
[id
].name
== NULL
) {
1829 if (trace
->syscalls
.table
[id
].nonexistent
)
1834 return &trace
->syscalls
.table
[id
];
1838 char sbuf
[STRERR_BUFSIZE
];
1839 fprintf(trace
->output
, "Problems reading syscall %d: %d (%s)", id
, -err
, str_error_r(-err
, sbuf
, sizeof(sbuf
)));
1840 if (id
<= trace
->sctbl
->syscalls
.max_id
&& trace
->syscalls
.table
[id
].name
!= NULL
)
1841 fprintf(trace
->output
, "(%s)", trace
->syscalls
.table
[id
].name
);
1842 fputs(" information\n", trace
->output
);
1847 static void thread__update_stats(struct thread_trace
*ttrace
,
1848 int id
, struct perf_sample
*sample
)
1850 struct int_node
*inode
;
1851 struct stats
*stats
;
1854 inode
= intlist__findnew(ttrace
->syscall_stats
, id
);
1858 stats
= inode
->priv
;
1859 if (stats
== NULL
) {
1860 stats
= malloc(sizeof(struct stats
));
1864 inode
->priv
= stats
;
1867 if (ttrace
->entry_time
&& sample
->time
> ttrace
->entry_time
)
1868 duration
= sample
->time
- ttrace
->entry_time
;
1870 update_stats(stats
, duration
);
1873 static int trace__printf_interrupted_entry(struct trace
*trace
)
1875 struct thread_trace
*ttrace
;
1879 if (trace
->failure_only
|| trace
->current
== NULL
)
1882 ttrace
= thread__priv(trace
->current
);
1884 if (!ttrace
->entry_pending
)
1887 printed
= trace__fprintf_entry_head(trace
, trace
->current
, 0, false, ttrace
->entry_time
, trace
->output
);
1888 printed
+= len
= fprintf(trace
->output
, "%s)", ttrace
->entry_str
);
1890 if (len
< trace
->args_alignment
- 4)
1891 printed
+= fprintf(trace
->output
, "%-*s", trace
->args_alignment
- 4 - len
, " ");
1893 printed
+= fprintf(trace
->output
, " ...\n");
1895 ttrace
->entry_pending
= false;
1896 ++trace
->nr_events_printed
;
1901 static int trace__fprintf_sample(struct trace
*trace
, struct evsel
*evsel
,
1902 struct perf_sample
*sample
, struct thread
*thread
)
1906 if (trace
->print_sample
) {
1907 double ts
= (double)sample
->time
/ NSEC_PER_MSEC
;
1909 printed
+= fprintf(trace
->output
, "%22s %10.3f %s %d/%d [%d]\n",
1910 perf_evsel__name(evsel
), ts
,
1911 thread__comm_str(thread
),
1912 sample
->pid
, sample
->tid
, sample
->cpu
);
1918 static void *syscall__augmented_args(struct syscall
*sc
, struct perf_sample
*sample
, int *augmented_args_size
, int raw_augmented_args_size
)
1920 void *augmented_args
= NULL
;
1922 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
1923 * and there we get all 6 syscall args plus the tracepoint common fields
1924 * that gets calculated at the start and the syscall_nr (another long).
1925 * So we check if that is the case and if so don't look after the
1926 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
1929 * We'll revisit this later to pass s->args_size to the BPF augmenter
1930 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
1931 * copies only what we need for each syscall, like what happens when we
1932 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
1933 * traffic to just what is needed for each syscall.
1935 int args_size
= raw_augmented_args_size
?: sc
->args_size
;
1937 *augmented_args_size
= sample
->raw_size
- args_size
;
1938 if (*augmented_args_size
> 0)
1939 augmented_args
= sample
->raw_data
+ args_size
;
1941 return augmented_args
;
1944 static int trace__sys_enter(struct trace
*trace
, struct evsel
*evsel
,
1945 union perf_event
*event __maybe_unused
,
1946 struct perf_sample
*sample
)
1951 struct thread
*thread
;
1952 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1;
1953 int augmented_args_size
= 0;
1954 void *augmented_args
= NULL
;
1955 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
1956 struct thread_trace
*ttrace
;
1961 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
1962 ttrace
= thread__trace(thread
, trace
->output
);
1966 trace__fprintf_sample(trace
, evsel
, sample
, thread
);
1968 args
= perf_evsel__sc_tp_ptr(evsel
, args
, sample
);
1970 if (ttrace
->entry_str
== NULL
) {
1971 ttrace
->entry_str
= malloc(trace__entry_str_size
);
1972 if (!ttrace
->entry_str
)
1976 if (!(trace
->duration_filter
|| trace
->summary_only
|| trace
->min_stack
))
1977 trace__printf_interrupted_entry(trace
);
1979 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
1980 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
1981 * this breaks syscall__augmented_args() check for augmented args, as we calculate
1982 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
1983 * so when handling, say the openat syscall, we end up getting 6 args for the
1984 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
1985 * thinking that the extra 2 u64 args are the augmented filename, so just check
1986 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
1988 if (evsel
!= trace
->syscalls
.events
.sys_enter
)
1989 augmented_args
= syscall__augmented_args(sc
, sample
, &augmented_args_size
, trace
->raw_augmented_syscalls_args_size
);
1990 ttrace
->entry_time
= sample
->time
;
1991 msg
= ttrace
->entry_str
;
1992 printed
+= scnprintf(msg
+ printed
, trace__entry_str_size
- printed
, "%s(", sc
->name
);
1994 printed
+= syscall__scnprintf_args(sc
, msg
+ printed
, trace__entry_str_size
- printed
,
1995 args
, augmented_args
, augmented_args_size
, trace
, thread
);
1998 if (!(trace
->duration_filter
|| trace
->summary_only
|| trace
->failure_only
|| trace
->min_stack
)) {
2001 trace__fprintf_entry_head(trace
, thread
, 0, false, ttrace
->entry_time
, trace
->output
);
2002 printed
= fprintf(trace
->output
, "%s)", ttrace
->entry_str
);
2003 if (trace
->args_alignment
> printed
)
2004 alignment
= trace
->args_alignment
- printed
;
2005 fprintf(trace
->output
, "%*s= ?\n", alignment
, " ");
2008 ttrace
->entry_pending
= true;
2009 /* See trace__vfs_getname & trace__sys_exit */
2010 ttrace
->filename
.pending_open
= false;
2013 if (trace
->current
!= thread
) {
2014 thread__put(trace
->current
);
2015 trace
->current
= thread__get(thread
);
2019 thread__put(thread
);
2023 static int trace__fprintf_sys_enter(struct trace
*trace
, struct evsel
*evsel
,
2024 struct perf_sample
*sample
)
2026 struct thread_trace
*ttrace
;
2027 struct thread
*thread
;
2028 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1;
2029 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2031 void *args
, *augmented_args
= NULL
;
2032 int augmented_args_size
;
2037 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2038 ttrace
= thread__trace(thread
, trace
->output
);
2040 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2041 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2046 args
= perf_evsel__sc_tp_ptr(evsel
, args
, sample
);
2047 augmented_args
= syscall__augmented_args(sc
, sample
, &augmented_args_size
, trace
->raw_augmented_syscalls_args_size
);
2048 syscall__scnprintf_args(sc
, msg
, sizeof(msg
), args
, augmented_args
, augmented_args_size
, trace
, thread
);
2049 fprintf(trace
->output
, "%s", msg
);
2052 thread__put(thread
);
2056 static int trace__resolve_callchain(struct trace
*trace
, struct evsel
*evsel
,
2057 struct perf_sample
*sample
,
2058 struct callchain_cursor
*cursor
)
2060 struct addr_location al
;
2061 int max_stack
= evsel
->core
.attr
.sample_max_stack
?
2062 evsel
->core
.attr
.sample_max_stack
:
2066 if (machine__resolve(trace
->host
, &al
, sample
) < 0)
2069 err
= thread__resolve_callchain(al
.thread
, cursor
, evsel
, sample
, NULL
, NULL
, max_stack
);
2070 addr_location__put(&al
);
2074 static int trace__fprintf_callchain(struct trace
*trace
, struct perf_sample
*sample
)
2076 /* TODO: user-configurable print_opts */
2077 const unsigned int print_opts
= EVSEL__PRINT_SYM
|
2079 EVSEL__PRINT_UNKNOWN_AS_ADDR
;
2081 return sample__fprintf_callchain(sample
, 38, print_opts
, &callchain_cursor
, symbol_conf
.bt_stop_list
, trace
->output
);
2084 static const char *errno_to_name(struct evsel
*evsel
, int err
)
2086 struct perf_env
*env
= perf_evsel__env(evsel
);
2087 const char *arch_name
= perf_env__arch(env
);
2089 return arch_syscalls__strerrno(arch_name
, err
);
2092 static int trace__sys_exit(struct trace
*trace
, struct evsel
*evsel
,
2093 union perf_event
*event __maybe_unused
,
2094 struct perf_sample
*sample
)
2098 bool duration_calculated
= false;
2099 struct thread
*thread
;
2100 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1, callchain_ret
= 0, printed
= 0;
2101 int alignment
= trace
->args_alignment
;
2102 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2103 struct thread_trace
*ttrace
;
2108 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2109 ttrace
= thread__trace(thread
, trace
->output
);
2113 trace__fprintf_sample(trace
, evsel
, sample
, thread
);
2116 thread__update_stats(ttrace
, id
, sample
);
2118 ret
= perf_evsel__sc_tp_uint(evsel
, ret
, sample
);
2120 if (!trace
->fd_path_disabled
&& sc
->is_open
&& ret
>= 0 && ttrace
->filename
.pending_open
) {
2121 trace__set_fd_pathname(thread
, ret
, ttrace
->filename
.name
);
2122 ttrace
->filename
.pending_open
= false;
2123 ++trace
->stats
.vfs_getname
;
2126 if (ttrace
->entry_time
) {
2127 duration
= sample
->time
- ttrace
->entry_time
;
2128 if (trace__filter_duration(trace
, duration
))
2130 duration_calculated
= true;
2131 } else if (trace
->duration_filter
)
2134 if (sample
->callchain
) {
2135 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2136 if (callchain_ret
== 0) {
2137 if (callchain_cursor
.nr
< trace
->min_stack
)
2143 if (trace
->summary_only
|| (ret
>= 0 && trace
->failure_only
))
2146 trace__fprintf_entry_head(trace
, thread
, duration
, duration_calculated
, ttrace
->entry_time
, trace
->output
);
2148 if (ttrace
->entry_pending
) {
2149 printed
= fprintf(trace
->output
, "%s", ttrace
->entry_str
);
2151 printed
+= fprintf(trace
->output
, " ... [");
2152 color_fprintf(trace
->output
, PERF_COLOR_YELLOW
, "continued");
2154 printed
+= fprintf(trace
->output
, "]: %s()", sc
->name
);
2157 printed
++; /* the closing ')' */
2159 if (alignment
> printed
)
2160 alignment
-= printed
;
2164 fprintf(trace
->output
, ")%*s= ", alignment
, " ");
2166 if (sc
->fmt
== NULL
) {
2170 fprintf(trace
->output
, "%ld", ret
);
2171 } else if (ret
< 0) {
2173 char bf
[STRERR_BUFSIZE
];
2174 const char *emsg
= str_error_r(-ret
, bf
, sizeof(bf
)),
2175 *e
= errno_to_name(evsel
, -ret
);
2177 fprintf(trace
->output
, "-1 %s (%s)", e
, emsg
);
2179 } else if (ret
== 0 && sc
->fmt
->timeout
)
2180 fprintf(trace
->output
, "0 (Timeout)");
2181 else if (ttrace
->ret_scnprintf
) {
2183 struct syscall_arg arg
= {
2188 ttrace
->ret_scnprintf(bf
, sizeof(bf
), &arg
);
2189 ttrace
->ret_scnprintf
= NULL
;
2190 fprintf(trace
->output
, "%s", bf
);
2191 } else if (sc
->fmt
->hexret
)
2192 fprintf(trace
->output
, "%#lx", ret
);
2193 else if (sc
->fmt
->errpid
) {
2194 struct thread
*child
= machine__find_thread(trace
->host
, ret
, ret
);
2196 if (child
!= NULL
) {
2197 fprintf(trace
->output
, "%ld", ret
);
2198 if (child
->comm_set
)
2199 fprintf(trace
->output
, " (%s)", thread__comm_str(child
));
2205 fputc('\n', trace
->output
);
2208 * We only consider an 'event' for the sake of --max-events a non-filtered
2209 * sys_enter + sys_exit and other tracepoint events.
2211 if (++trace
->nr_events_printed
== trace
->max_events
&& trace
->max_events
!= ULONG_MAX
)
2214 if (callchain_ret
> 0)
2215 trace__fprintf_callchain(trace
, sample
);
2216 else if (callchain_ret
< 0)
2217 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2219 ttrace
->entry_pending
= false;
2222 thread__put(thread
);
2226 static int trace__vfs_getname(struct trace
*trace
, struct evsel
*evsel
,
2227 union perf_event
*event __maybe_unused
,
2228 struct perf_sample
*sample
)
2230 struct thread
*thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2231 struct thread_trace
*ttrace
;
2232 size_t filename_len
, entry_str_len
, to_move
;
2233 ssize_t remaining_space
;
2235 const char *filename
= perf_evsel__rawptr(evsel
, sample
, "pathname");
2240 ttrace
= thread__priv(thread
);
2244 filename_len
= strlen(filename
);
2245 if (filename_len
== 0)
2248 if (ttrace
->filename
.namelen
< filename_len
) {
2249 char *f
= realloc(ttrace
->filename
.name
, filename_len
+ 1);
2254 ttrace
->filename
.namelen
= filename_len
;
2255 ttrace
->filename
.name
= f
;
2258 strcpy(ttrace
->filename
.name
, filename
);
2259 ttrace
->filename
.pending_open
= true;
2261 if (!ttrace
->filename
.ptr
)
2264 entry_str_len
= strlen(ttrace
->entry_str
);
2265 remaining_space
= trace__entry_str_size
- entry_str_len
- 1; /* \0 */
2266 if (remaining_space
<= 0)
2269 if (filename_len
> (size_t)remaining_space
) {
2270 filename
+= filename_len
- remaining_space
;
2271 filename_len
= remaining_space
;
2274 to_move
= entry_str_len
- ttrace
->filename
.entry_str_pos
+ 1; /* \0 */
2275 pos
= ttrace
->entry_str
+ ttrace
->filename
.entry_str_pos
;
2276 memmove(pos
+ filename_len
, pos
, to_move
);
2277 memcpy(pos
, filename
, filename_len
);
2279 ttrace
->filename
.ptr
= 0;
2280 ttrace
->filename
.entry_str_pos
= 0;
2282 thread__put(thread
);
2287 static int trace__sched_stat_runtime(struct trace
*trace
, struct evsel
*evsel
,
2288 union perf_event
*event __maybe_unused
,
2289 struct perf_sample
*sample
)
2291 u64 runtime
= perf_evsel__intval(evsel
, sample
, "runtime");
2292 double runtime_ms
= (double)runtime
/ NSEC_PER_MSEC
;
2293 struct thread
*thread
= machine__findnew_thread(trace
->host
,
2296 struct thread_trace
*ttrace
= thread__trace(thread
, trace
->output
);
2301 ttrace
->runtime_ms
+= runtime_ms
;
2302 trace
->runtime_ms
+= runtime_ms
;
2304 thread__put(thread
);
2308 fprintf(trace
->output
, "%s: comm=%s,pid=%u,runtime=%" PRIu64
",vruntime=%" PRIu64
")\n",
2310 perf_evsel__strval(evsel
, sample
, "comm"),
2311 (pid_t
)perf_evsel__intval(evsel
, sample
, "pid"),
2313 perf_evsel__intval(evsel
, sample
, "vruntime"));
2317 static int bpf_output__printer(enum binary_printer_ops op
,
2318 unsigned int val
, void *extra __maybe_unused
, FILE *fp
)
2320 unsigned char ch
= (unsigned char)val
;
2323 case BINARY_PRINT_CHAR_DATA
:
2324 return fprintf(fp
, "%c", isprint(ch
) ? ch
: '.');
2325 case BINARY_PRINT_DATA_BEGIN
:
2326 case BINARY_PRINT_LINE_BEGIN
:
2327 case BINARY_PRINT_ADDR
:
2328 case BINARY_PRINT_NUM_DATA
:
2329 case BINARY_PRINT_NUM_PAD
:
2330 case BINARY_PRINT_SEP
:
2331 case BINARY_PRINT_CHAR_PAD
:
2332 case BINARY_PRINT_LINE_END
:
2333 case BINARY_PRINT_DATA_END
:
2341 static void bpf_output__fprintf(struct trace
*trace
,
2342 struct perf_sample
*sample
)
2344 binary__fprintf(sample
->raw_data
, sample
->raw_size
, 8,
2345 bpf_output__printer
, NULL
, trace
->output
);
2346 ++trace
->nr_events_printed
;
2349 static int trace__event_handler(struct trace
*trace
, struct evsel
*evsel
,
2350 union perf_event
*event __maybe_unused
,
2351 struct perf_sample
*sample
)
2353 struct thread
*thread
;
2354 int callchain_ret
= 0;
2356 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2357 * this event's max_events having been hit and this is an entry coming
2358 * from the ring buffer that we should discard, since the max events
2359 * have already been considered/printed.
2361 if (evsel
->disabled
)
2364 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2366 if (sample
->callchain
) {
2367 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2368 if (callchain_ret
== 0) {
2369 if (callchain_cursor
.nr
< trace
->min_stack
)
2375 trace__printf_interrupted_entry(trace
);
2376 trace__fprintf_tstamp(trace
, sample
->time
, trace
->output
);
2378 if (trace
->trace_syscalls
&& trace
->show_duration
)
2379 fprintf(trace
->output
, "( ): ");
2382 trace__fprintf_comm_tid(trace
, thread
, trace
->output
);
2384 if (evsel
== trace
->syscalls
.events
.augmented
) {
2385 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
);
2386 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2389 fprintf(trace
->output
, "%s(", sc
->name
);
2390 trace__fprintf_sys_enter(trace
, evsel
, sample
);
2391 fputc(')', trace
->output
);
2396 * XXX: Not having the associated syscall info or not finding/adding
2397 * the thread should never happen, but if it does...
2398 * fall thru and print it as a bpf_output event.
2402 fprintf(trace
->output
, "%s:", evsel
->name
);
2404 if (perf_evsel__is_bpf_output(evsel
)) {
2405 bpf_output__fprintf(trace
, sample
);
2406 } else if (evsel
->tp_format
) {
2407 if (strncmp(evsel
->tp_format
->name
, "sys_enter_", 10) ||
2408 trace__fprintf_sys_enter(trace
, evsel
, sample
)) {
2409 event_format__fprintf(evsel
->tp_format
, sample
->cpu
,
2410 sample
->raw_data
, sample
->raw_size
,
2412 ++trace
->nr_events_printed
;
2414 if (evsel
->max_events
!= ULONG_MAX
&& ++evsel
->nr_events_printed
== evsel
->max_events
) {
2415 evsel__disable(evsel
);
2416 evsel__close(evsel
);
2422 fprintf(trace
->output
, "\n");
2424 if (callchain_ret
> 0)
2425 trace__fprintf_callchain(trace
, sample
);
2426 else if (callchain_ret
< 0)
2427 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2429 thread__put(thread
);
2433 static void print_location(FILE *f
, struct perf_sample
*sample
,
2434 struct addr_location
*al
,
2435 bool print_dso
, bool print_sym
)
2438 if ((verbose
> 0 || print_dso
) && al
->map
)
2439 fprintf(f
, "%s@", al
->map
->dso
->long_name
);
2441 if ((verbose
> 0 || print_sym
) && al
->sym
)
2442 fprintf(f
, "%s+0x%" PRIx64
, al
->sym
->name
,
2443 al
->addr
- al
->sym
->start
);
2445 fprintf(f
, "0x%" PRIx64
, al
->addr
);
2447 fprintf(f
, "0x%" PRIx64
, sample
->addr
);
2450 static int trace__pgfault(struct trace
*trace
,
2451 struct evsel
*evsel
,
2452 union perf_event
*event __maybe_unused
,
2453 struct perf_sample
*sample
)
2455 struct thread
*thread
;
2456 struct addr_location al
;
2457 char map_type
= 'd';
2458 struct thread_trace
*ttrace
;
2460 int callchain_ret
= 0;
2462 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2464 if (sample
->callchain
) {
2465 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2466 if (callchain_ret
== 0) {
2467 if (callchain_cursor
.nr
< trace
->min_stack
)
2473 ttrace
= thread__trace(thread
, trace
->output
);
2477 if (evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
)
2482 if (trace
->summary_only
)
2485 thread__find_symbol(thread
, sample
->cpumode
, sample
->ip
, &al
);
2487 trace__fprintf_entry_head(trace
, thread
, 0, true, sample
->time
, trace
->output
);
2489 fprintf(trace
->output
, "%sfault [",
2490 evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
?
2493 print_location(trace
->output
, sample
, &al
, false, true);
2495 fprintf(trace
->output
, "] => ");
2497 thread__find_symbol(thread
, sample
->cpumode
, sample
->addr
, &al
);
2500 thread__find_symbol(thread
, sample
->cpumode
, sample
->addr
, &al
);
2508 print_location(trace
->output
, sample
, &al
, true, false);
2510 fprintf(trace
->output
, " (%c%c)\n", map_type
, al
.level
);
2512 if (callchain_ret
> 0)
2513 trace__fprintf_callchain(trace
, sample
);
2514 else if (callchain_ret
< 0)
2515 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2517 ++trace
->nr_events_printed
;
2521 thread__put(thread
);
2525 static void trace__set_base_time(struct trace
*trace
,
2526 struct evsel
*evsel
,
2527 struct perf_sample
*sample
)
2530 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2531 * and don't use sample->time unconditionally, we may end up having
2532 * some other event in the future without PERF_SAMPLE_TIME for good
2533 * reason, i.e. we may not be interested in its timestamps, just in
2534 * it taking place, picking some piece of information when it
2535 * appears in our event stream (vfs_getname comes to mind).
2537 if (trace
->base_time
== 0 && !trace
->full_time
&&
2538 (evsel
->core
.attr
.sample_type
& PERF_SAMPLE_TIME
))
2539 trace
->base_time
= sample
->time
;
2542 static int trace__process_sample(struct perf_tool
*tool
,
2543 union perf_event
*event
,
2544 struct perf_sample
*sample
,
2545 struct evsel
*evsel
,
2546 struct machine
*machine __maybe_unused
)
2548 struct trace
*trace
= container_of(tool
, struct trace
, tool
);
2549 struct thread
*thread
;
2552 tracepoint_handler handler
= evsel
->handler
;
2554 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2555 if (thread
&& thread__is_filtered(thread
))
2558 trace__set_base_time(trace
, evsel
, sample
);
2562 handler(trace
, evsel
, event
, sample
);
2565 thread__put(thread
);
2569 static int trace__record(struct trace
*trace
, int argc
, const char **argv
)
2571 unsigned int rec_argc
, i
, j
;
2572 const char **rec_argv
;
2573 const char * const record_args
[] = {
2580 const char * const sc_args
[] = { "-e", };
2581 unsigned int sc_args_nr
= ARRAY_SIZE(sc_args
);
2582 const char * const majpf_args
[] = { "-e", "major-faults" };
2583 unsigned int majpf_args_nr
= ARRAY_SIZE(majpf_args
);
2584 const char * const minpf_args
[] = { "-e", "minor-faults" };
2585 unsigned int minpf_args_nr
= ARRAY_SIZE(minpf_args
);
2587 /* +1 is for the event string below */
2588 rec_argc
= ARRAY_SIZE(record_args
) + sc_args_nr
+ 1 +
2589 majpf_args_nr
+ minpf_args_nr
+ argc
;
2590 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
2592 if (rec_argv
== NULL
)
2596 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
2597 rec_argv
[j
++] = record_args
[i
];
2599 if (trace
->trace_syscalls
) {
2600 for (i
= 0; i
< sc_args_nr
; i
++)
2601 rec_argv
[j
++] = sc_args
[i
];
2603 /* event string may be different for older kernels - e.g., RHEL6 */
2604 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2605 rec_argv
[j
++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2606 else if (is_valid_tracepoint("syscalls:sys_enter"))
2607 rec_argv
[j
++] = "syscalls:sys_enter,syscalls:sys_exit";
2609 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2615 if (trace
->trace_pgfaults
& TRACE_PFMAJ
)
2616 for (i
= 0; i
< majpf_args_nr
; i
++)
2617 rec_argv
[j
++] = majpf_args
[i
];
2619 if (trace
->trace_pgfaults
& TRACE_PFMIN
)
2620 for (i
= 0; i
< minpf_args_nr
; i
++)
2621 rec_argv
[j
++] = minpf_args
[i
];
2623 for (i
= 0; i
< (unsigned int)argc
; i
++)
2624 rec_argv
[j
++] = argv
[i
];
2626 return cmd_record(j
, rec_argv
);
2629 static size_t trace__fprintf_thread_summary(struct trace
*trace
, FILE *fp
);
2631 static bool evlist__add_vfs_getname(struct evlist
*evlist
)
2634 struct evsel
*evsel
, *tmp
;
2635 struct parse_events_error err
= { .idx
= 0, };
2636 int ret
= parse_events(evlist
, "probe:vfs_getname*", &err
);
2641 evlist__for_each_entry_safe(evlist
, evsel
, tmp
) {
2642 if (!strstarts(perf_evsel__name(evsel
), "probe:vfs_getname"))
2645 if (perf_evsel__field(evsel
, "pathname")) {
2646 evsel
->handler
= trace__vfs_getname
;
2651 list_del_init(&evsel
->core
.node
);
2652 evsel
->evlist
= NULL
;
2653 evsel__delete(evsel
);
2659 static struct evsel
*perf_evsel__new_pgfault(u64 config
)
2661 struct evsel
*evsel
;
2662 struct perf_event_attr attr
= {
2663 .type
= PERF_TYPE_SOFTWARE
,
2667 attr
.config
= config
;
2668 attr
.sample_period
= 1;
2670 event_attr_init(&attr
);
2672 evsel
= evsel__new(&attr
);
2674 evsel
->handler
= trace__pgfault
;
2679 static void trace__handle_event(struct trace
*trace
, union perf_event
*event
, struct perf_sample
*sample
)
2681 const u32 type
= event
->header
.type
;
2682 struct evsel
*evsel
;
2684 if (type
!= PERF_RECORD_SAMPLE
) {
2685 trace__process_event(trace
, trace
->host
, event
, sample
);
2689 evsel
= perf_evlist__id2evsel(trace
->evlist
, sample
->id
);
2690 if (evsel
== NULL
) {
2691 fprintf(trace
->output
, "Unknown tp ID %" PRIu64
", skipping...\n", sample
->id
);
2695 if (evswitch__discard(&trace
->evswitch
, evsel
))
2698 trace__set_base_time(trace
, evsel
, sample
);
2700 if (evsel
->core
.attr
.type
== PERF_TYPE_TRACEPOINT
&&
2701 sample
->raw_data
== NULL
) {
2702 fprintf(trace
->output
, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2703 perf_evsel__name(evsel
), sample
->tid
,
2704 sample
->cpu
, sample
->raw_size
);
2706 tracepoint_handler handler
= evsel
->handler
;
2707 handler(trace
, evsel
, event
, sample
);
2710 if (trace
->nr_events_printed
>= trace
->max_events
&& trace
->max_events
!= ULONG_MAX
)
2714 static int trace__add_syscall_newtp(struct trace
*trace
)
2717 struct evlist
*evlist
= trace
->evlist
;
2718 struct evsel
*sys_enter
, *sys_exit
;
2720 sys_enter
= perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter
);
2721 if (sys_enter
== NULL
)
2724 if (perf_evsel__init_sc_tp_ptr_field(sys_enter
, args
))
2725 goto out_delete_sys_enter
;
2727 sys_exit
= perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit
);
2728 if (sys_exit
== NULL
)
2729 goto out_delete_sys_enter
;
2731 if (perf_evsel__init_sc_tp_uint_field(sys_exit
, ret
))
2732 goto out_delete_sys_exit
;
2734 perf_evsel__config_callchain(sys_enter
, &trace
->opts
, &callchain_param
);
2735 perf_evsel__config_callchain(sys_exit
, &trace
->opts
, &callchain_param
);
2737 evlist__add(evlist
, sys_enter
);
2738 evlist__add(evlist
, sys_exit
);
2740 if (callchain_param
.enabled
&& !trace
->kernel_syscallchains
) {
2742 * We're interested only in the user space callchain
2743 * leading to the syscall, allow overriding that for
2744 * debugging reasons using --kernel_syscall_callchains
2746 sys_exit
->core
.attr
.exclude_callchain_kernel
= 1;
2749 trace
->syscalls
.events
.sys_enter
= sys_enter
;
2750 trace
->syscalls
.events
.sys_exit
= sys_exit
;
2756 out_delete_sys_exit
:
2757 evsel__delete_priv(sys_exit
);
2758 out_delete_sys_enter
:
2759 evsel__delete_priv(sys_enter
);
2763 static int trace__set_ev_qualifier_tp_filter(struct trace
*trace
)
2766 struct evsel
*sys_exit
;
2767 char *filter
= asprintf_expr_inout_ints("id", !trace
->not_ev_qualifier
,
2768 trace
->ev_qualifier_ids
.nr
,
2769 trace
->ev_qualifier_ids
.entries
);
2774 if (!perf_evsel__append_tp_filter(trace
->syscalls
.events
.sys_enter
,
2776 sys_exit
= trace
->syscalls
.events
.sys_exit
;
2777 err
= perf_evsel__append_tp_filter(sys_exit
, filter
);
2788 #ifdef HAVE_LIBBPF_SUPPORT
2789 static struct bpf_program
*trace__find_bpf_program_by_title(struct trace
*trace
, const char *name
)
2791 if (trace
->bpf_obj
== NULL
)
2794 return bpf_object__find_program_by_title(trace
->bpf_obj
, name
);
2797 static struct bpf_program
*trace__find_syscall_bpf_prog(struct trace
*trace
, struct syscall
*sc
,
2798 const char *prog_name
, const char *type
)
2800 struct bpf_program
*prog
;
2802 if (prog_name
== NULL
) {
2803 char default_prog_name
[256];
2804 scnprintf(default_prog_name
, sizeof(default_prog_name
), "!syscalls:sys_%s_%s", type
, sc
->name
);
2805 prog
= trace__find_bpf_program_by_title(trace
, default_prog_name
);
2808 if (sc
->fmt
&& sc
->fmt
->alias
) {
2809 scnprintf(default_prog_name
, sizeof(default_prog_name
), "!syscalls:sys_%s_%s", type
, sc
->fmt
->alias
);
2810 prog
= trace__find_bpf_program_by_title(trace
, default_prog_name
);
2814 goto out_unaugmented
;
2817 prog
= trace__find_bpf_program_by_title(trace
, prog_name
);
2824 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
2825 prog_name
, type
, sc
->name
);
2827 return trace
->syscalls
.unaugmented_prog
;
2830 static void trace__init_syscall_bpf_progs(struct trace
*trace
, int id
)
2832 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2837 sc
->bpf_prog
.sys_enter
= trace__find_syscall_bpf_prog(trace
, sc
, sc
->fmt
? sc
->fmt
->bpf_prog_name
.sys_enter
: NULL
, "enter");
2838 sc
->bpf_prog
.sys_exit
= trace__find_syscall_bpf_prog(trace
, sc
, sc
->fmt
? sc
->fmt
->bpf_prog_name
.sys_exit
: NULL
, "exit");
2841 static int trace__bpf_prog_sys_enter_fd(struct trace
*trace
, int id
)
2843 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2844 return sc
? bpf_program__fd(sc
->bpf_prog
.sys_enter
) : bpf_program__fd(trace
->syscalls
.unaugmented_prog
);
2847 static int trace__bpf_prog_sys_exit_fd(struct trace
*trace
, int id
)
2849 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2850 return sc
? bpf_program__fd(sc
->bpf_prog
.sys_exit
) : bpf_program__fd(trace
->syscalls
.unaugmented_prog
);
2853 static void trace__init_bpf_map_syscall_args(struct trace
*trace
, int id
, struct bpf_map_syscall_entry
*entry
)
2855 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2861 for (; arg
< sc
->nr_args
; ++arg
) {
2862 entry
->string_args_len
[arg
] = 0;
2863 if (sc
->arg_fmt
[arg
].scnprintf
== SCA_FILENAME
) {
2864 /* Should be set like strace -s strsize */
2865 entry
->string_args_len
[arg
] = PATH_MAX
;
2869 for (; arg
< 6; ++arg
)
2870 entry
->string_args_len
[arg
] = 0;
2872 static int trace__set_ev_qualifier_bpf_filter(struct trace
*trace
)
2874 int fd
= bpf_map__fd(trace
->syscalls
.map
);
2875 struct bpf_map_syscall_entry value
= {
2876 .enabled
= !trace
->not_ev_qualifier
,
2881 for (i
= 0; i
< trace
->ev_qualifier_ids
.nr
; ++i
) {
2882 int key
= trace
->ev_qualifier_ids
.entries
[i
];
2884 if (value
.enabled
) {
2885 trace__init_bpf_map_syscall_args(trace
, key
, &value
);
2886 trace__init_syscall_bpf_progs(trace
, key
);
2889 err
= bpf_map_update_elem(fd
, &key
, &value
, BPF_EXIST
);
2897 static int __trace__init_syscalls_bpf_map(struct trace
*trace
, bool enabled
)
2899 int fd
= bpf_map__fd(trace
->syscalls
.map
);
2900 struct bpf_map_syscall_entry value
= {
2905 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
2907 trace__init_bpf_map_syscall_args(trace
, key
, &value
);
2909 err
= bpf_map_update_elem(fd
, &key
, &value
, BPF_ANY
);
2917 static int trace__init_syscalls_bpf_map(struct trace
*trace
)
2919 bool enabled
= true;
2921 if (trace
->ev_qualifier_ids
.nr
)
2922 enabled
= trace
->not_ev_qualifier
;
2924 return __trace__init_syscalls_bpf_map(trace
, enabled
);
2927 static struct bpf_program
*trace__find_usable_bpf_prog_entry(struct trace
*trace
, struct syscall
*sc
)
2929 struct tep_format_field
*field
, *candidate_field
;
2933 * We're only interested in syscalls that have a pointer:
2935 for (field
= sc
->args
; field
; field
= field
->next
) {
2936 if (field
->flags
& TEP_FIELD_IS_POINTER
)
2937 goto try_to_find_pair
;
2943 for (id
= 0; id
< trace
->sctbl
->syscalls
.nr_entries
; ++id
) {
2944 struct syscall
*pair
= trace__syscall_info(trace
, NULL
, id
);
2945 struct bpf_program
*pair_prog
;
2946 bool is_candidate
= false;
2948 if (pair
== NULL
|| pair
== sc
||
2949 pair
->bpf_prog
.sys_enter
== trace
->syscalls
.unaugmented_prog
)
2952 for (field
= sc
->args
, candidate_field
= pair
->args
;
2953 field
&& candidate_field
; field
= field
->next
, candidate_field
= candidate_field
->next
) {
2954 bool is_pointer
= field
->flags
& TEP_FIELD_IS_POINTER
,
2955 candidate_is_pointer
= candidate_field
->flags
& TEP_FIELD_IS_POINTER
;
2958 if (!candidate_is_pointer
) {
2959 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
2963 if (candidate_is_pointer
) {
2964 // The candidate might copy a pointer we don't have, skip it.
2965 goto next_candidate
;
2970 if (strcmp(field
->type
, candidate_field
->type
))
2971 goto next_candidate
;
2973 is_candidate
= true;
2977 goto next_candidate
;
2980 * Check if the tentative pair syscall augmenter has more pointers, if it has,
2981 * then it may be collecting that and we then can't use it, as it would collect
2982 * more than what is common to the two syscalls.
2984 if (candidate_field
) {
2985 for (candidate_field
= candidate_field
->next
; candidate_field
; candidate_field
= candidate_field
->next
)
2986 if (candidate_field
->flags
& TEP_FIELD_IS_POINTER
)
2987 goto next_candidate
;
2990 pair_prog
= pair
->bpf_prog
.sys_enter
;
2992 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
2993 * have been searched for, so search it here and if it returns the
2994 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
2995 * program for a filtered syscall on a non-filtered one.
2997 * For instance, we have "!syscalls:sys_enter_renameat" and that is
2998 * useful for "renameat2".
3000 if (pair_prog
== NULL
) {
3001 pair_prog
= trace__find_syscall_bpf_prog(trace
, pair
, pair
->fmt
? pair
->fmt
->bpf_prog_name
.sys_enter
: NULL
, "enter");
3002 if (pair_prog
== trace
->syscalls
.unaugmented_prog
)
3003 goto next_candidate
;
3006 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair
->name
, sc
->name
);
3015 static int trace__init_syscalls_bpf_prog_array_maps(struct trace
*trace
)
3017 int map_enter_fd
= bpf_map__fd(trace
->syscalls
.prog_array
.sys_enter
),
3018 map_exit_fd
= bpf_map__fd(trace
->syscalls
.prog_array
.sys_exit
);
3021 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
3024 if (!trace__syscall_enabled(trace
, key
))
3027 trace__init_syscall_bpf_progs(trace
, key
);
3029 // It'll get at least the "!raw_syscalls:unaugmented"
3030 prog_fd
= trace__bpf_prog_sys_enter_fd(trace
, key
);
3031 err
= bpf_map_update_elem(map_enter_fd
, &key
, &prog_fd
, BPF_ANY
);
3034 prog_fd
= trace__bpf_prog_sys_exit_fd(trace
, key
);
3035 err
= bpf_map_update_elem(map_exit_fd
, &key
, &prog_fd
, BPF_ANY
);
3041 * Now lets do a second pass looking for enabled syscalls without
3042 * an augmenter that have a signature that is a superset of another
3043 * syscall with an augmenter so that we can auto-reuse it.
3045 * I.e. if we have an augmenter for the "open" syscall that has
3048 * int open(const char *pathname, int flags, mode_t mode);
3050 * I.e. that will collect just the first string argument, then we
3051 * can reuse it for the 'creat' syscall, that has this signature:
3053 * int creat(const char *pathname, mode_t mode);
3057 * int stat(const char *pathname, struct stat *statbuf);
3058 * int lstat(const char *pathname, struct stat *statbuf);
3060 * Because the 'open' augmenter will collect the first arg as a string,
3061 * and leave alone all the other args, which already helps with
3062 * beautifying 'stat' and 'lstat''s pathname arg.
3064 * Then, in time, when 'stat' gets an augmenter that collects both
3065 * first and second arg (this one on the raw_syscalls:sys_exit prog
3066 * array tail call, then that one will be used.
3068 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
3069 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, key
);
3070 struct bpf_program
*pair_prog
;
3073 if (sc
== NULL
|| sc
->bpf_prog
.sys_enter
== NULL
)
3077 * For now we're just reusing the sys_enter prog, and if it
3078 * already has an augmenter, we don't need to find one.
3080 if (sc
->bpf_prog
.sys_enter
!= trace
->syscalls
.unaugmented_prog
)
3084 * Look at all the other syscalls for one that has a signature
3085 * that is close enough that we can share:
3087 pair_prog
= trace__find_usable_bpf_prog_entry(trace
, sc
);
3088 if (pair_prog
== NULL
)
3091 sc
->bpf_prog
.sys_enter
= pair_prog
;
3094 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3095 * with the fd for the program we're reusing:
3097 prog_fd
= bpf_program__fd(sc
->bpf_prog
.sys_enter
);
3098 err
= bpf_map_update_elem(map_enter_fd
, &key
, &prog_fd
, BPF_ANY
);
3107 static int trace__set_ev_qualifier_bpf_filter(struct trace
*trace __maybe_unused
)
3112 static int trace__init_syscalls_bpf_map(struct trace
*trace __maybe_unused
)
3117 static struct bpf_program
*trace__find_bpf_program_by_title(struct trace
*trace __maybe_unused
,
3118 const char *name __maybe_unused
)
3123 static int trace__init_syscalls_bpf_prog_array_maps(struct trace
*trace __maybe_unused
)
3127 #endif // HAVE_LIBBPF_SUPPORT
3129 static int trace__set_ev_qualifier_filter(struct trace
*trace
)
3131 if (trace
->syscalls
.map
)
3132 return trace__set_ev_qualifier_bpf_filter(trace
);
3133 if (trace
->syscalls
.events
.sys_enter
)
3134 return trace__set_ev_qualifier_tp_filter(trace
);
3138 static int bpf_map__set_filter_pids(struct bpf_map
*map __maybe_unused
,
3139 size_t npids __maybe_unused
, pid_t
*pids __maybe_unused
)
3142 #ifdef HAVE_LIBBPF_SUPPORT
3144 int map_fd
= bpf_map__fd(map
);
3147 for (i
= 0; i
< npids
; ++i
) {
3148 err
= bpf_map_update_elem(map_fd
, &pids
[i
], &value
, BPF_ANY
);
3156 static int trace__set_filter_loop_pids(struct trace
*trace
)
3158 unsigned int nr
= 1, err
;
3162 struct thread
*thread
= machine__find_thread(trace
->host
, pids
[0], pids
[0]);
3164 while (thread
&& nr
< ARRAY_SIZE(pids
)) {
3165 struct thread
*parent
= machine__find_thread(trace
->host
, thread
->ppid
, thread
->ppid
);
3170 if (!strcmp(thread__comm_str(parent
), "sshd") ||
3171 strstarts(thread__comm_str(parent
), "gnome-terminal")) {
3172 pids
[nr
++] = parent
->tid
;
3178 err
= perf_evlist__set_tp_filter_pids(trace
->evlist
, nr
, pids
);
3179 if (!err
&& trace
->filter_pids
.map
)
3180 err
= bpf_map__set_filter_pids(trace
->filter_pids
.map
, nr
, pids
);
3185 static int trace__set_filter_pids(struct trace
*trace
)
3189 * Better not use !target__has_task() here because we need to cover the
3190 * case where no threads were specified in the command line, but a
3191 * workload was, and in that case we will fill in the thread_map when
3192 * we fork the workload in perf_evlist__prepare_workload.
3194 if (trace
->filter_pids
.nr
> 0) {
3195 err
= perf_evlist__set_tp_filter_pids(trace
->evlist
, trace
->filter_pids
.nr
,
3196 trace
->filter_pids
.entries
);
3197 if (!err
&& trace
->filter_pids
.map
) {
3198 err
= bpf_map__set_filter_pids(trace
->filter_pids
.map
, trace
->filter_pids
.nr
,
3199 trace
->filter_pids
.entries
);
3201 } else if (perf_thread_map__pid(trace
->evlist
->core
.threads
, 0) == -1) {
3202 err
= trace__set_filter_loop_pids(trace
);
3208 static int __trace__deliver_event(struct trace
*trace
, union perf_event
*event
)
3210 struct evlist
*evlist
= trace
->evlist
;
3211 struct perf_sample sample
;
3214 err
= perf_evlist__parse_sample(evlist
, event
, &sample
);
3216 fprintf(trace
->output
, "Can't parse sample, err = %d, skipping...\n", err
);
3218 trace__handle_event(trace
, event
, &sample
);
3223 static int __trace__flush_events(struct trace
*trace
)
3225 u64 first
= ordered_events__first_time(&trace
->oe
.data
);
3226 u64 flush
= trace
->oe
.last
- NSEC_PER_SEC
;
3228 /* Is there some thing to flush.. */
3229 if (first
&& first
< flush
)
3230 return ordered_events__flush_time(&trace
->oe
.data
, flush
);
3235 static int trace__flush_events(struct trace
*trace
)
3237 return !trace
->sort_events
? 0 : __trace__flush_events(trace
);
3240 static int trace__deliver_event(struct trace
*trace
, union perf_event
*event
)
3244 if (!trace
->sort_events
)
3245 return __trace__deliver_event(trace
, event
);
3247 err
= perf_evlist__parse_sample_timestamp(trace
->evlist
, event
, &trace
->oe
.last
);
3248 if (err
&& err
!= -1)
3251 err
= ordered_events__queue(&trace
->oe
.data
, event
, trace
->oe
.last
, 0);
3255 return trace__flush_events(trace
);
3258 static int ordered_events__deliver_event(struct ordered_events
*oe
,
3259 struct ordered_event
*event
)
3261 struct trace
*trace
= container_of(oe
, struct trace
, oe
.data
);
3263 return __trace__deliver_event(trace
, event
->event
);
3266 static int trace__run(struct trace
*trace
, int argc
, const char **argv
)
3268 struct evlist
*evlist
= trace
->evlist
;
3269 struct evsel
*evsel
, *pgfault_maj
= NULL
, *pgfault_min
= NULL
;
3271 unsigned long before
;
3272 const bool forks
= argc
> 0;
3273 bool draining
= false;
3277 if (!trace
->raw_augmented_syscalls
) {
3278 if (trace
->trace_syscalls
&& trace__add_syscall_newtp(trace
))
3279 goto out_error_raw_syscalls
;
3281 if (trace
->trace_syscalls
)
3282 trace
->vfs_getname
= evlist__add_vfs_getname(evlist
);
3285 if ((trace
->trace_pgfaults
& TRACE_PFMAJ
)) {
3286 pgfault_maj
= perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ
);
3287 if (pgfault_maj
== NULL
)
3289 perf_evsel__config_callchain(pgfault_maj
, &trace
->opts
, &callchain_param
);
3290 evlist__add(evlist
, pgfault_maj
);
3293 if ((trace
->trace_pgfaults
& TRACE_PFMIN
)) {
3294 pgfault_min
= perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN
);
3295 if (pgfault_min
== NULL
)
3297 perf_evsel__config_callchain(pgfault_min
, &trace
->opts
, &callchain_param
);
3298 evlist__add(evlist
, pgfault_min
);
3302 perf_evlist__add_newtp(evlist
, "sched", "sched_stat_runtime",
3303 trace__sched_stat_runtime
))
3304 goto out_error_sched_stat_runtime
;
3307 * If a global cgroup was set, apply it to all the events without an
3308 * explicit cgroup. I.e.:
3310 * trace -G A -e sched:*switch
3312 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3313 * _and_ sched:sched_switch to the 'A' cgroup, while:
3315 * trace -e sched:*switch -G A
3317 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3318 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3319 * a cgroup (on the root cgroup, sys wide, etc).
3323 * trace -G A -e sched:*switch -G B
3325 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3326 * to the 'B' cgroup.
3328 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3329 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3332 evlist__set_default_cgroup(trace
->evlist
, trace
->cgroup
);
3334 err
= perf_evlist__create_maps(evlist
, &trace
->opts
.target
);
3336 fprintf(trace
->output
, "Problems parsing the target to trace, check your options!\n");
3337 goto out_delete_evlist
;
3340 err
= trace__symbols_init(trace
, evlist
);
3342 fprintf(trace
->output
, "Problems initializing symbol libraries!\n");
3343 goto out_delete_evlist
;
3346 perf_evlist__config(evlist
, &trace
->opts
, &callchain_param
);
3348 signal(SIGCHLD
, sig_handler
);
3349 signal(SIGINT
, sig_handler
);
3352 err
= perf_evlist__prepare_workload(evlist
, &trace
->opts
.target
,
3355 fprintf(trace
->output
, "Couldn't run the workload!\n");
3356 goto out_delete_evlist
;
3360 err
= evlist__open(evlist
);
3362 goto out_error_open
;
3364 err
= bpf__apply_obj_config();
3366 char errbuf
[BUFSIZ
];
3368 bpf__strerror_apply_obj_config(err
, errbuf
, sizeof(errbuf
));
3369 pr_err("ERROR: Apply config to BPF failed: %s\n",
3371 goto out_error_open
;
3374 err
= trace__set_filter_pids(trace
);
3378 if (trace
->syscalls
.map
)
3379 trace__init_syscalls_bpf_map(trace
);
3381 if (trace
->syscalls
.prog_array
.sys_enter
)
3382 trace__init_syscalls_bpf_prog_array_maps(trace
);
3384 if (trace
->ev_qualifier_ids
.nr
> 0) {
3385 err
= trace__set_ev_qualifier_filter(trace
);
3389 if (trace
->syscalls
.events
.sys_exit
) {
3390 pr_debug("event qualifier tracepoint filter: %s\n",
3391 trace
->syscalls
.events
.sys_exit
->filter
);
3396 * If the "close" syscall is not traced, then we will not have the
3397 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3398 * fd->pathname table and were ending up showing the last value set by
3399 * syscalls opening a pathname and associating it with a descriptor or
3400 * reading it from /proc/pid/fd/ in cases where that doesn't make
3403 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3406 trace
->fd_path_disabled
= !trace__syscall_enabled(trace
, syscalltbl__id(trace
->sctbl
, "close"));
3408 err
= perf_evlist__apply_filters(evlist
, &evsel
);
3410 goto out_error_apply_filters
;
3412 if (trace
->dump
.map
)
3413 bpf_map__fprintf(trace
->dump
.map
, trace
->output
);
3415 err
= evlist__mmap(evlist
, trace
->opts
.mmap_pages
);
3417 goto out_error_mmap
;
3419 if (!target__none(&trace
->opts
.target
) && !trace
->opts
.initial_delay
)
3420 evlist__enable(evlist
);
3423 perf_evlist__start_workload(evlist
);
3425 if (trace
->opts
.initial_delay
) {
3426 usleep(trace
->opts
.initial_delay
* 1000);
3427 evlist__enable(evlist
);
3430 trace
->multiple_threads
= perf_thread_map__pid(evlist
->core
.threads
, 0) == -1 ||
3431 evlist
->core
.threads
->nr
> 1 ||
3432 evlist__first(evlist
)->core
.attr
.inherit
;
3435 * Now that we already used evsel->core.attr to ask the kernel to setup the
3436 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
3437 * trace__resolve_callchain(), allowing per-event max-stack settings
3438 * to override an explicitly set --max-stack global setting.
3440 evlist__for_each_entry(evlist
, evsel
) {
3441 if (evsel__has_callchain(evsel
) &&
3442 evsel
->core
.attr
.sample_max_stack
== 0)
3443 evsel
->core
.attr
.sample_max_stack
= trace
->max_stack
;
3446 before
= trace
->nr_events
;
3448 for (i
= 0; i
< evlist
->core
.nr_mmaps
; i
++) {
3449 union perf_event
*event
;
3452 md
= &evlist
->mmap
[i
];
3453 if (perf_mmap__read_init(md
) < 0)
3456 while ((event
= perf_mmap__read_event(md
)) != NULL
) {
3459 err
= trace__deliver_event(trace
, event
);
3463 perf_mmap__consume(md
);
3468 if (done
&& !draining
) {
3469 evlist__disable(evlist
);
3473 perf_mmap__read_done(md
);
3476 if (trace
->nr_events
== before
) {
3477 int timeout
= done
? 100 : -1;
3479 if (!draining
&& evlist__poll(evlist
, timeout
) > 0) {
3480 if (evlist__filter_pollfd(evlist
, POLLERR
| POLLHUP
| POLLNVAL
) == 0)
3485 if (trace__flush_events(trace
))
3493 thread__zput(trace
->current
);
3495 evlist__disable(evlist
);
3497 if (trace
->sort_events
)
3498 ordered_events__flush(&trace
->oe
.data
, OE_FLUSH__FINAL
);
3502 trace__fprintf_thread_summary(trace
, trace
->output
);
3504 if (trace
->show_tool_stats
) {
3505 fprintf(trace
->output
, "Stats:\n "
3506 " vfs_getname : %" PRIu64
"\n"
3507 " proc_getname: %" PRIu64
"\n",
3508 trace
->stats
.vfs_getname
,
3509 trace
->stats
.proc_getname
);
3514 trace__symbols__exit(trace
);
3516 evlist__delete(evlist
);
3517 cgroup__put(trace
->cgroup
);
3518 trace
->evlist
= NULL
;
3519 trace
->live
= false;
3522 char errbuf
[BUFSIZ
];
3524 out_error_sched_stat_runtime
:
3525 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "sched", "sched_stat_runtime");
3528 out_error_raw_syscalls
:
3529 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "raw_syscalls", "sys_(enter|exit)");
3533 perf_evlist__strerror_mmap(evlist
, errno
, errbuf
, sizeof(errbuf
));
3537 perf_evlist__strerror_open(evlist
, errno
, errbuf
, sizeof(errbuf
));
3540 fprintf(trace
->output
, "%s\n", errbuf
);
3541 goto out_delete_evlist
;
3543 out_error_apply_filters
:
3544 fprintf(trace
->output
,
3545 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
3546 evsel
->filter
, perf_evsel__name(evsel
), errno
,
3547 str_error_r(errno
, errbuf
, sizeof(errbuf
)));
3548 goto out_delete_evlist
;
3551 fprintf(trace
->output
, "Not enough memory to run!\n");
3552 goto out_delete_evlist
;
3555 fprintf(trace
->output
, "errno=%d,%s\n", errno
, strerror(errno
));
3556 goto out_delete_evlist
;
3559 static int trace__replay(struct trace
*trace
)
3561 const struct evsel_str_handler handlers
[] = {
3562 { "probe:vfs_getname", trace__vfs_getname
, },
3564 struct perf_data data
= {
3566 .mode
= PERF_DATA_MODE_READ
,
3567 .force
= trace
->force
,
3569 struct perf_session
*session
;
3570 struct evsel
*evsel
;
3573 trace
->tool
.sample
= trace__process_sample
;
3574 trace
->tool
.mmap
= perf_event__process_mmap
;
3575 trace
->tool
.mmap2
= perf_event__process_mmap2
;
3576 trace
->tool
.comm
= perf_event__process_comm
;
3577 trace
->tool
.exit
= perf_event__process_exit
;
3578 trace
->tool
.fork
= perf_event__process_fork
;
3579 trace
->tool
.attr
= perf_event__process_attr
;
3580 trace
->tool
.tracing_data
= perf_event__process_tracing_data
;
3581 trace
->tool
.build_id
= perf_event__process_build_id
;
3582 trace
->tool
.namespaces
= perf_event__process_namespaces
;
3584 trace
->tool
.ordered_events
= true;
3585 trace
->tool
.ordering_requires_timestamps
= true;
3587 /* add tid to output */
3588 trace
->multiple_threads
= true;
3590 session
= perf_session__new(&data
, false, &trace
->tool
);
3591 if (IS_ERR(session
))
3592 return PTR_ERR(session
);
3594 if (trace
->opts
.target
.pid
)
3595 symbol_conf
.pid_list_str
= strdup(trace
->opts
.target
.pid
);
3597 if (trace
->opts
.target
.tid
)
3598 symbol_conf
.tid_list_str
= strdup(trace
->opts
.target
.tid
);
3600 if (symbol__init(&session
->header
.env
) < 0)
3603 trace
->host
= &session
->machines
.host
;
3605 err
= perf_session__set_tracepoints_handlers(session
, handlers
);
3609 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3610 "raw_syscalls:sys_enter");
3611 /* older kernels have syscalls tp versus raw_syscalls */
3613 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3614 "syscalls:sys_enter");
3617 (perf_evsel__init_raw_syscall_tp(evsel
, trace__sys_enter
) < 0 ||
3618 perf_evsel__init_sc_tp_ptr_field(evsel
, args
))) {
3619 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
3623 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3624 "raw_syscalls:sys_exit");
3626 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3627 "syscalls:sys_exit");
3629 (perf_evsel__init_raw_syscall_tp(evsel
, trace__sys_exit
) < 0 ||
3630 perf_evsel__init_sc_tp_uint_field(evsel
, ret
))) {
3631 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
3635 evlist__for_each_entry(session
->evlist
, evsel
) {
3636 if (evsel
->core
.attr
.type
== PERF_TYPE_SOFTWARE
&&
3637 (evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
||
3638 evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MIN
||
3639 evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS
))
3640 evsel
->handler
= trace__pgfault
;
3645 err
= perf_session__process_events(session
);
3647 pr_err("Failed to process events, error %d", err
);
3649 else if (trace
->summary
)
3650 trace__fprintf_thread_summary(trace
, trace
->output
);
3653 perf_session__delete(session
);
3658 static size_t trace__fprintf_threads_header(FILE *fp
)
3662 printed
= fprintf(fp
, "\n Summary of events:\n\n");
3667 DEFINE_RESORT_RB(syscall_stats
, a
->msecs
> b
->msecs
,
3668 struct stats
*stats
;
3673 struct int_node
*source
= rb_entry(nd
, struct int_node
, rb_node
);
3674 struct stats
*stats
= source
->priv
;
3676 entry
->syscall
= source
->i
;
3677 entry
->stats
= stats
;
3678 entry
->msecs
= stats
? (u64
)stats
->n
* (avg_stats(stats
) / NSEC_PER_MSEC
) : 0;
3681 static size_t thread__dump_stats(struct thread_trace
*ttrace
,
3682 struct trace
*trace
, FILE *fp
)
3687 DECLARE_RESORT_RB_INTLIST(syscall_stats
, ttrace
->syscall_stats
);
3689 if (syscall_stats
== NULL
)
3692 printed
+= fprintf(fp
, "\n");
3694 printed
+= fprintf(fp
, " syscall calls total min avg max stddev\n");
3695 printed
+= fprintf(fp
, " (msec) (msec) (msec) (msec) (%%)\n");
3696 printed
+= fprintf(fp
, " --------------- -------- --------- --------- --------- --------- ------\n");
3698 resort_rb__for_each_entry(nd
, syscall_stats
) {
3699 struct stats
*stats
= syscall_stats_entry
->stats
;
3701 double min
= (double)(stats
->min
) / NSEC_PER_MSEC
;
3702 double max
= (double)(stats
->max
) / NSEC_PER_MSEC
;
3703 double avg
= avg_stats(stats
);
3705 u64 n
= (u64
) stats
->n
;
3707 pct
= avg
? 100.0 * stddev_stats(stats
)/avg
: 0.0;
3708 avg
/= NSEC_PER_MSEC
;
3710 sc
= &trace
->syscalls
.table
[syscall_stats_entry
->syscall
];
3711 printed
+= fprintf(fp
, " %-15s", sc
->name
);
3712 printed
+= fprintf(fp
, " %8" PRIu64
" %9.3f %9.3f %9.3f",
3713 n
, syscall_stats_entry
->msecs
, min
, avg
);
3714 printed
+= fprintf(fp
, " %9.3f %9.2f%%\n", max
, pct
);
3718 resort_rb__delete(syscall_stats
);
3719 printed
+= fprintf(fp
, "\n\n");
3724 static size_t trace__fprintf_thread(FILE *fp
, struct thread
*thread
, struct trace
*trace
)
3727 struct thread_trace
*ttrace
= thread__priv(thread
);
3733 ratio
= (double)ttrace
->nr_events
/ trace
->nr_events
* 100.0;
3735 printed
+= fprintf(fp
, " %s (%d), ", thread__comm_str(thread
), thread
->tid
);
3736 printed
+= fprintf(fp
, "%lu events, ", ttrace
->nr_events
);
3737 printed
+= fprintf(fp
, "%.1f%%", ratio
);
3739 printed
+= fprintf(fp
, ", %lu majfaults", ttrace
->pfmaj
);
3741 printed
+= fprintf(fp
, ", %lu minfaults", ttrace
->pfmin
);
3743 printed
+= fprintf(fp
, ", %.3f msec\n", ttrace
->runtime_ms
);
3744 else if (fputc('\n', fp
) != EOF
)
3747 printed
+= thread__dump_stats(ttrace
, trace
, fp
);
3752 static unsigned long thread__nr_events(struct thread_trace
*ttrace
)
3754 return ttrace
? ttrace
->nr_events
: 0;
3757 DEFINE_RESORT_RB(threads
, (thread__nr_events(a
->thread
->priv
) < thread__nr_events(b
->thread
->priv
)),
3758 struct thread
*thread
;
3761 entry
->thread
= rb_entry(nd
, struct thread
, rb_node
);
3764 static size_t trace__fprintf_thread_summary(struct trace
*trace
, FILE *fp
)
3766 size_t printed
= trace__fprintf_threads_header(fp
);
3770 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
3771 DECLARE_RESORT_RB_MACHINE_THREADS(threads
, trace
->host
, i
);
3773 if (threads
== NULL
) {
3774 fprintf(fp
, "%s", "Error sorting output by nr_events!\n");
3778 resort_rb__for_each_entry(nd
, threads
)
3779 printed
+= trace__fprintf_thread(fp
, threads_entry
->thread
, trace
);
3781 resort_rb__delete(threads
);
3786 static int trace__set_duration(const struct option
*opt
, const char *str
,
3787 int unset __maybe_unused
)
3789 struct trace
*trace
= opt
->value
;
3791 trace
->duration_filter
= atof(str
);
3795 static int trace__set_filter_pids_from_option(const struct option
*opt
, const char *str
,
3796 int unset __maybe_unused
)
3800 struct trace
*trace
= opt
->value
;
3802 * FIXME: introduce a intarray class, plain parse csv and create a
3803 * { int nr, int entries[] } struct...
3805 struct intlist
*list
= intlist__new(str
);
3810 i
= trace
->filter_pids
.nr
= intlist__nr_entries(list
) + 1;
3811 trace
->filter_pids
.entries
= calloc(i
, sizeof(pid_t
));
3813 if (trace
->filter_pids
.entries
== NULL
)
3816 trace
->filter_pids
.entries
[0] = getpid();
3818 for (i
= 1; i
< trace
->filter_pids
.nr
; ++i
)
3819 trace
->filter_pids
.entries
[i
] = intlist__entry(list
, i
- 1)->i
;
3821 intlist__delete(list
);
3827 static int trace__open_output(struct trace
*trace
, const char *filename
)
3831 if (!stat(filename
, &st
) && st
.st_size
) {
3832 char oldname
[PATH_MAX
];
3834 scnprintf(oldname
, sizeof(oldname
), "%s.old", filename
);
3836 rename(filename
, oldname
);
3839 trace
->output
= fopen(filename
, "w");
3841 return trace
->output
== NULL
? -errno
: 0;
3844 static int parse_pagefaults(const struct option
*opt
, const char *str
,
3845 int unset __maybe_unused
)
3847 int *trace_pgfaults
= opt
->value
;
3849 if (strcmp(str
, "all") == 0)
3850 *trace_pgfaults
|= TRACE_PFMAJ
| TRACE_PFMIN
;
3851 else if (strcmp(str
, "maj") == 0)
3852 *trace_pgfaults
|= TRACE_PFMAJ
;
3853 else if (strcmp(str
, "min") == 0)
3854 *trace_pgfaults
|= TRACE_PFMIN
;
3861 static void evlist__set_evsel_handler(struct evlist
*evlist
, void *handler
)
3863 struct evsel
*evsel
;
3865 evlist__for_each_entry(evlist
, evsel
)
3866 evsel
->handler
= handler
;
3869 static int evlist__set_syscall_tp_fields(struct evlist
*evlist
)
3871 struct evsel
*evsel
;
3873 evlist__for_each_entry(evlist
, evsel
) {
3874 if (evsel
->priv
|| !evsel
->tp_format
)
3877 if (strcmp(evsel
->tp_format
->system
, "syscalls"))
3880 if (perf_evsel__init_syscall_tp(evsel
))
3883 if (!strncmp(evsel
->tp_format
->name
, "sys_enter_", 10)) {
3884 struct syscall_tp
*sc
= evsel
->priv
;
3886 if (__tp_field__init_ptr(&sc
->args
, sc
->id
.offset
+ sizeof(u64
)))
3888 } else if (!strncmp(evsel
->tp_format
->name
, "sys_exit_", 9)) {
3889 struct syscall_tp
*sc
= evsel
->priv
;
3891 if (__tp_field__init_uint(&sc
->ret
, sizeof(u64
), sc
->id
.offset
+ sizeof(u64
), evsel
->needs_swap
))
3900 * XXX: Hackish, just splitting the combined -e+--event (syscalls
3901 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
3902 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
3904 * It'd be better to introduce a parse_options() variant that would return a
3905 * list with the terms it didn't match to an event...
3907 static int trace__parse_events_option(const struct option
*opt
, const char *str
,
3908 int unset __maybe_unused
)
3910 struct trace
*trace
= (struct trace
*)opt
->value
;
3911 const char *s
= str
;
3912 char *sep
= NULL
, *lists
[2] = { NULL
, NULL
, };
3913 int len
= strlen(str
) + 1, err
= -1, list
, idx
;
3914 char *strace_groups_dir
= system_path(STRACE_GROUPS_DIR
);
3915 char group_name
[PATH_MAX
];
3916 struct syscall_fmt
*fmt
;
3918 if (strace_groups_dir
== NULL
)
3923 trace
->not_ev_qualifier
= true;
3927 if ((sep
= strchr(s
, ',')) != NULL
)
3931 if (syscalltbl__id(trace
->sctbl
, s
) >= 0 ||
3932 syscalltbl__strglobmatch_first(trace
->sctbl
, s
, &idx
) >= 0) {
3937 fmt
= syscall_fmt__find_by_alias(s
);
3942 path__join(group_name
, sizeof(group_name
), strace_groups_dir
, s
);
3943 if (access(group_name
, R_OK
) == 0)
3948 sprintf(lists
[list
] + strlen(lists
[list
]), ",%s", s
);
3950 lists
[list
] = malloc(len
);
3951 if (lists
[list
] == NULL
)
3953 strcpy(lists
[list
], s
);
3963 if (lists
[1] != NULL
) {
3964 struct strlist_config slist_config
= {
3965 .dirname
= strace_groups_dir
,
3968 trace
->ev_qualifier
= strlist__new(lists
[1], &slist_config
);
3969 if (trace
->ev_qualifier
== NULL
) {
3970 fputs("Not enough memory to parse event qualifier", trace
->output
);
3974 if (trace__validate_ev_qualifier(trace
))
3976 trace
->trace_syscalls
= true;
3982 struct option o
= OPT_CALLBACK('e', "event", &trace
->evlist
, "event",
3983 "event selector. use 'perf list' to list available events",
3984 parse_events_option
);
3985 err
= parse_events_option(&o
, lists
[0], 0);
3994 static int trace__parse_cgroups(const struct option
*opt
, const char *str
, int unset
)
3996 struct trace
*trace
= opt
->value
;
3998 if (!list_empty(&trace
->evlist
->core
.entries
))
3999 return parse_cgroups(opt
, str
, unset
);
4001 trace
->cgroup
= evlist__findnew_cgroup(trace
->evlist
, str
);
4006 static struct bpf_map
*trace__find_bpf_map_by_name(struct trace
*trace
, const char *name
)
4008 if (trace
->bpf_obj
== NULL
)
4011 return bpf_object__find_map_by_name(trace
->bpf_obj
, name
);
4014 static void trace__set_bpf_map_filtered_pids(struct trace
*trace
)
4016 trace
->filter_pids
.map
= trace__find_bpf_map_by_name(trace
, "pids_filtered");
4019 static void trace__set_bpf_map_syscalls(struct trace
*trace
)
4021 trace
->syscalls
.map
= trace__find_bpf_map_by_name(trace
, "syscalls");
4022 trace
->syscalls
.prog_array
.sys_enter
= trace__find_bpf_map_by_name(trace
, "syscalls_sys_enter");
4023 trace
->syscalls
.prog_array
.sys_exit
= trace__find_bpf_map_by_name(trace
, "syscalls_sys_exit");
4026 static int trace__config(const char *var
, const char *value
, void *arg
)
4028 struct trace
*trace
= arg
;
4031 if (!strcmp(var
, "trace.add_events")) {
4032 struct option o
= OPT_CALLBACK('e', "event", &trace
->evlist
, "event",
4033 "event selector. use 'perf list' to list available events",
4034 parse_events_option
);
4036 * We can't propagate parse_event_option() return, as it is 1
4037 * for failure while perf_config() expects -1.
4039 if (parse_events_option(&o
, value
, 0))
4041 } else if (!strcmp(var
, "trace.show_timestamp")) {
4042 trace
->show_tstamp
= perf_config_bool(var
, value
);
4043 } else if (!strcmp(var
, "trace.show_duration")) {
4044 trace
->show_duration
= perf_config_bool(var
, value
);
4045 } else if (!strcmp(var
, "trace.show_arg_names")) {
4046 trace
->show_arg_names
= perf_config_bool(var
, value
);
4047 if (!trace
->show_arg_names
)
4048 trace
->show_zeros
= true;
4049 } else if (!strcmp(var
, "trace.show_zeros")) {
4050 bool new_show_zeros
= perf_config_bool(var
, value
);
4051 if (!trace
->show_arg_names
&& !new_show_zeros
) {
4052 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4055 trace
->show_zeros
= new_show_zeros
;
4056 } else if (!strcmp(var
, "trace.show_prefix")) {
4057 trace
->show_string_prefix
= perf_config_bool(var
, value
);
4058 } else if (!strcmp(var
, "trace.no_inherit")) {
4059 trace
->opts
.no_inherit
= perf_config_bool(var
, value
);
4060 } else if (!strcmp(var
, "trace.args_alignment")) {
4061 int args_alignment
= 0;
4062 if (perf_config_int(&args_alignment
, var
, value
) == 0)
4063 trace
->args_alignment
= args_alignment
;
4069 int cmd_trace(int argc
, const char **argv
)
4071 const char *trace_usage
[] = {
4072 "perf trace [<options>] [<command>]",
4073 "perf trace [<options>] -- <command> [<options>]",
4074 "perf trace record [<options>] [<command>]",
4075 "perf trace record [<options>] -- <command> [<options>]",
4078 struct trace trace
= {
4084 .user_freq
= UINT_MAX
,
4085 .user_interval
= ULLONG_MAX
,
4086 .no_buffering
= true,
4087 .mmap_pages
= UINT_MAX
,
4091 .show_tstamp
= true,
4092 .show_duration
= true,
4093 .show_arg_names
= true,
4094 .args_alignment
= 70,
4095 .trace_syscalls
= false,
4096 .kernel_syscallchains
= false,
4097 .max_stack
= UINT_MAX
,
4098 .max_events
= ULONG_MAX
,
4100 const char *map_dump_str
= NULL
;
4101 const char *output_name
= NULL
;
4102 const struct option trace_options
[] = {
4103 OPT_CALLBACK('e', "event", &trace
, "event",
4104 "event/syscall selector. use 'perf list' to list available events",
4105 trace__parse_events_option
),
4106 OPT_BOOLEAN(0, "comm", &trace
.show_comm
,
4107 "show the thread COMM next to its id"),
4108 OPT_BOOLEAN(0, "tool_stats", &trace
.show_tool_stats
, "show tool stats"),
4109 OPT_CALLBACK(0, "expr", &trace
, "expr", "list of syscalls/events to trace",
4110 trace__parse_events_option
),
4111 OPT_STRING('o', "output", &output_name
, "file", "output file name"),
4112 OPT_STRING('i', "input", &input_name
, "file", "Analyze events in file"),
4113 OPT_STRING('p', "pid", &trace
.opts
.target
.pid
, "pid",
4114 "trace events on existing process id"),
4115 OPT_STRING('t', "tid", &trace
.opts
.target
.tid
, "tid",
4116 "trace events on existing thread id"),
4117 OPT_CALLBACK(0, "filter-pids", &trace
, "CSV list of pids",
4118 "pids to filter (by the kernel)", trace__set_filter_pids_from_option
),
4119 OPT_BOOLEAN('a', "all-cpus", &trace
.opts
.target
.system_wide
,
4120 "system-wide collection from all CPUs"),
4121 OPT_STRING('C', "cpu", &trace
.opts
.target
.cpu_list
, "cpu",
4122 "list of cpus to monitor"),
4123 OPT_BOOLEAN(0, "no-inherit", &trace
.opts
.no_inherit
,
4124 "child tasks do not inherit counters"),
4125 OPT_CALLBACK('m', "mmap-pages", &trace
.opts
.mmap_pages
, "pages",
4126 "number of mmap data pages",
4127 perf_evlist__parse_mmap_pages
),
4128 OPT_STRING('u', "uid", &trace
.opts
.target
.uid_str
, "user",
4130 OPT_CALLBACK(0, "duration", &trace
, "float",
4131 "show only events with duration > N.M ms",
4132 trace__set_duration
),
4133 #ifdef HAVE_LIBBPF_SUPPORT
4134 OPT_STRING(0, "map-dump", &map_dump_str
, "BPF map", "BPF map to periodically dump"),
4136 OPT_BOOLEAN(0, "sched", &trace
.sched
, "show blocking scheduler events"),
4137 OPT_INCR('v', "verbose", &verbose
, "be more verbose"),
4138 OPT_BOOLEAN('T', "time", &trace
.full_time
,
4139 "Show full timestamp, not time relative to first start"),
4140 OPT_BOOLEAN(0, "failure", &trace
.failure_only
,
4141 "Show only syscalls that failed"),
4142 OPT_BOOLEAN('s', "summary", &trace
.summary_only
,
4143 "Show only syscall summary with statistics"),
4144 OPT_BOOLEAN('S', "with-summary", &trace
.summary
,
4145 "Show all syscalls and summary with statistics"),
4146 OPT_CALLBACK_DEFAULT('F', "pf", &trace
.trace_pgfaults
, "all|maj|min",
4147 "Trace pagefaults", parse_pagefaults
, "maj"),
4148 OPT_BOOLEAN(0, "syscalls", &trace
.trace_syscalls
, "Trace syscalls"),
4149 OPT_BOOLEAN('f', "force", &trace
.force
, "don't complain, do it"),
4150 OPT_CALLBACK(0, "call-graph", &trace
.opts
,
4151 "record_mode[,record_size]", record_callchain_help
,
4152 &record_parse_callchain_opt
),
4153 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace
.kernel_syscallchains
,
4154 "Show the kernel callchains on the syscall exit path"),
4155 OPT_ULONG(0, "max-events", &trace
.max_events
,
4156 "Set the maximum number of events to print, exit after that is reached. "),
4157 OPT_UINTEGER(0, "min-stack", &trace
.min_stack
,
4158 "Set the minimum stack depth when parsing the callchain, "
4159 "anything below the specified depth will be ignored."),
4160 OPT_UINTEGER(0, "max-stack", &trace
.max_stack
,
4161 "Set the maximum stack depth when parsing the callchain, "
4162 "anything beyond the specified depth will be ignored. "
4163 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH
)),
4164 OPT_BOOLEAN(0, "sort-events", &trace
.sort_events
,
4165 "Sort batch of events before processing, use if getting out of order events"),
4166 OPT_BOOLEAN(0, "print-sample", &trace
.print_sample
,
4167 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4168 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout
,
4169 "per thread proc mmap processing timeout in ms"),
4170 OPT_CALLBACK('G', "cgroup", &trace
, "name", "monitor event in cgroup name only",
4171 trace__parse_cgroups
),
4172 OPT_UINTEGER('D', "delay", &trace
.opts
.initial_delay
,
4173 "ms to wait before starting measurement after program "
4175 OPTS_EVSWITCH(&trace
.evswitch
),
4178 bool __maybe_unused max_stack_user_set
= true;
4179 bool mmap_pages_user_set
= true;
4180 struct evsel
*evsel
;
4181 const char * const trace_subcommands
[] = { "record", NULL
};
4185 signal(SIGSEGV
, sighandler_dump_stack
);
4186 signal(SIGFPE
, sighandler_dump_stack
);
4188 trace
.evlist
= evlist__new();
4189 trace
.sctbl
= syscalltbl__new();
4191 if (trace
.evlist
== NULL
|| trace
.sctbl
== NULL
) {
4192 pr_err("Not enough memory to run!\n");
4198 * Parsing .perfconfig may entail creating a BPF event, that may need
4199 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4200 * is too small. This affects just this process, not touching the
4201 * global setting. If it fails we'll get something in 'perf trace -v'
4202 * to help diagnose the problem.
4204 rlimit__bump_memlock();
4206 err
= perf_config(trace__config
, &trace
);
4210 argc
= parse_options_subcommand(argc
, argv
, trace_options
, trace_subcommands
,
4211 trace_usage
, PARSE_OPT_STOP_AT_NON_OPTION
);
4213 if ((nr_cgroups
|| trace
.cgroup
) && !trace
.opts
.target
.system_wide
) {
4214 usage_with_options_msg(trace_usage
, trace_options
,
4215 "cgroup monitoring only available in system-wide mode");
4218 evsel
= bpf__setup_output_event(trace
.evlist
, "__augmented_syscalls__");
4219 if (IS_ERR(evsel
)) {
4220 bpf__strerror_setup_output_event(trace
.evlist
, PTR_ERR(evsel
), bf
, sizeof(bf
));
4221 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf
);
4226 trace
.syscalls
.events
.augmented
= evsel
;
4228 evsel
= perf_evlist__find_tracepoint_by_name(trace
.evlist
, "raw_syscalls:sys_enter");
4229 if (evsel
== NULL
) {
4230 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4234 if (evsel
->bpf_obj
== NULL
) {
4235 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4239 trace
.bpf_obj
= evsel
->bpf_obj
;
4241 trace__set_bpf_map_filtered_pids(&trace
);
4242 trace__set_bpf_map_syscalls(&trace
);
4243 trace
.syscalls
.unaugmented_prog
= trace__find_bpf_program_by_title(&trace
, "!raw_syscalls:unaugmented");
4246 err
= bpf__setup_stdout(trace
.evlist
);
4248 bpf__strerror_setup_stdout(trace
.evlist
, err
, bf
, sizeof(bf
));
4249 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf
);
4256 trace
.dump
.map
= trace__find_bpf_map_by_name(&trace
, map_dump_str
);
4257 if (trace
.dump
.map
== NULL
) {
4258 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str
);
4263 if (trace
.trace_pgfaults
) {
4264 trace
.opts
.sample_address
= true;
4265 trace
.opts
.sample_time
= true;
4268 if (trace
.opts
.mmap_pages
== UINT_MAX
)
4269 mmap_pages_user_set
= false;
4271 if (trace
.max_stack
== UINT_MAX
) {
4272 trace
.max_stack
= input_name
? PERF_MAX_STACK_DEPTH
: sysctl__max_stack();
4273 max_stack_user_set
= false;
4276 #ifdef HAVE_DWARF_UNWIND_SUPPORT
4277 if ((trace
.min_stack
|| max_stack_user_set
) && !callchain_param
.enabled
) {
4278 record_opts__parse_callchain(&trace
.opts
, &callchain_param
, "dwarf", false);
4282 if (callchain_param
.enabled
) {
4283 if (!mmap_pages_user_set
&& geteuid() == 0)
4284 trace
.opts
.mmap_pages
= perf_event_mlock_kb_in_pages() * 4;
4286 symbol_conf
.use_callchain
= true;
4289 if (trace
.evlist
->core
.nr_entries
> 0) {
4290 evlist__set_evsel_handler(trace
.evlist
, trace__event_handler
);
4291 if (evlist__set_syscall_tp_fields(trace
.evlist
)) {
4292 perror("failed to set syscalls:* tracepoint fields");
4297 if (trace
.sort_events
) {
4298 ordered_events__init(&trace
.oe
.data
, ordered_events__deliver_event
, &trace
);
4299 ordered_events__set_copy_on_queue(&trace
.oe
.data
, true);
4303 * If we are augmenting syscalls, then combine what we put in the
4304 * __augmented_syscalls__ BPF map with what is in the
4305 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
4306 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
4308 * We'll switch to look at two BPF maps, one for sys_enter and the
4309 * other for sys_exit when we start augmenting the sys_exit paths with
4310 * buffers that are being copied from kernel to userspace, think 'read'
4313 if (trace
.syscalls
.events
.augmented
) {
4314 evlist__for_each_entry(trace
.evlist
, evsel
) {
4315 bool raw_syscalls_sys_exit
= strcmp(perf_evsel__name(evsel
), "raw_syscalls:sys_exit") == 0;
4317 if (raw_syscalls_sys_exit
) {
4318 trace
.raw_augmented_syscalls
= true;
4319 goto init_augmented_syscall_tp
;
4322 if (trace
.syscalls
.events
.augmented
->priv
== NULL
&&
4323 strstr(perf_evsel__name(evsel
), "syscalls:sys_enter")) {
4324 struct evsel
*augmented
= trace
.syscalls
.events
.augmented
;
4325 if (perf_evsel__init_augmented_syscall_tp(augmented
, evsel
) ||
4326 perf_evsel__init_augmented_syscall_tp_args(augmented
))
4329 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
4330 * Above we made sure we can get from the payload the tp fields
4331 * that we get from syscalls:sys_enter tracefs format file.
4333 augmented
->handler
= trace__sys_enter
;
4335 * Now we do the same for the *syscalls:sys_enter event so that
4336 * if we handle it directly, i.e. if the BPF prog returns 0 so
4337 * as not to filter it, then we'll handle it just like we would
4338 * for the BPF_OUTPUT one:
4340 if (perf_evsel__init_augmented_syscall_tp(evsel
, evsel
) ||
4341 perf_evsel__init_augmented_syscall_tp_args(evsel
))
4343 evsel
->handler
= trace__sys_enter
;
4346 if (strstarts(perf_evsel__name(evsel
), "syscalls:sys_exit_")) {
4347 struct syscall_tp
*sc
;
4348 init_augmented_syscall_tp
:
4349 if (perf_evsel__init_augmented_syscall_tp(evsel
, evsel
))
4353 * For now with BPF raw_augmented we hook into
4354 * raw_syscalls:sys_enter and there we get all
4355 * 6 syscall args plus the tracepoint common
4356 * fields and the syscall_nr (another long).
4357 * So we check if that is the case and if so
4358 * don't look after the sc->args_size but
4359 * always after the full raw_syscalls:sys_enter
4360 * payload, which is fixed.
4362 * We'll revisit this later to pass
4363 * s->args_size to the BPF augmenter (now
4364 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
4365 * so that it copies only what we need for each
4366 * syscall, like what happens when we use
4367 * syscalls:sys_enter_NAME, so that we reduce
4368 * the kernel/userspace traffic to just what is
4369 * needed for each syscall.
4371 if (trace
.raw_augmented_syscalls
)
4372 trace
.raw_augmented_syscalls_args_size
= (6 + 1) * sizeof(long) + sc
->id
.offset
;
4373 perf_evsel__init_augmented_syscall_tp_ret(evsel
);
4374 evsel
->handler
= trace__sys_exit
;
4379 if ((argc
>= 1) && (strcmp(argv
[0], "record") == 0))
4380 return trace__record(&trace
, argc
-1, &argv
[1]);
4382 /* summary_only implies summary option, but don't overwrite summary if set */
4383 if (trace
.summary_only
)
4384 trace
.summary
= trace
.summary_only
;
4386 if (!trace
.trace_syscalls
&& !trace
.trace_pgfaults
&&
4387 trace
.evlist
->core
.nr_entries
== 0 /* Was --events used? */) {
4388 trace
.trace_syscalls
= true;
4391 if (output_name
!= NULL
) {
4392 err
= trace__open_output(&trace
, output_name
);
4394 perror("failed to create output file");
4399 err
= evswitch__init(&trace
.evswitch
, trace
.evlist
, stderr
);
4403 err
= target__validate(&trace
.opts
.target
);
4405 target__strerror(&trace
.opts
.target
, err
, bf
, sizeof(bf
));
4406 fprintf(trace
.output
, "%s", bf
);
4410 err
= target__parse_uid(&trace
.opts
.target
);
4412 target__strerror(&trace
.opts
.target
, err
, bf
, sizeof(bf
));
4413 fprintf(trace
.output
, "%s", bf
);
4417 if (!argc
&& target__none(&trace
.opts
.target
))
4418 trace
.opts
.target
.system_wide
= true;
4421 err
= trace__replay(&trace
);
4423 err
= trace__run(&trace
, argc
, argv
);
4426 if (output_name
!= NULL
)
4427 fclose(trace
.output
);