4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
16 * Released under the GPL v2. (and only v2, not any later version)
19 #include <traceevent/event-parse.h>
20 #include <api/fs/tracing_path.h>
22 #include "util/color.h"
23 #include "util/debug.h"
24 #include "util/evlist.h"
25 #include <subcmd/exec-cmd.h>
26 #include "util/machine.h"
27 #include "util/session.h"
28 #include "util/thread.h"
29 #include <subcmd/parse-options.h>
30 #include "util/strlist.h"
31 #include "util/intlist.h"
32 #include "util/thread_map.h"
33 #include "util/stat.h"
34 #include "trace-event.h"
35 #include "util/parse-events.h"
36 #include "util/bpf-loader.h"
37 #include "callchain.h"
38 #include "syscalltbl.h"
39 #include "rb_resort.h"
41 #include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
43 #include <linux/err.h>
44 #include <linux/filter.h>
45 #include <linux/audit.h>
46 #include <linux/random.h>
47 #include <linux/stringify.h>
48 #include <linux/time64.h>
51 # define O_CLOEXEC 02000000
55 struct perf_tool tool
;
56 struct syscalltbl
*sctbl
;
59 struct syscall
*table
;
61 struct perf_evsel
*sys_enter
,
65 struct record_opts opts
;
66 struct perf_evlist
*evlist
;
68 struct thread
*current
;
71 unsigned long nr_events
;
72 struct strlist
*ev_qualifier
;
81 double duration_filter
;
87 unsigned int max_stack
;
88 unsigned int min_stack
;
89 bool not_ev_qualifier
;
93 bool multiple_threads
;
99 bool kernel_syscallchains
;
109 u64 (*integer
)(struct tp_field
*field
, struct perf_sample
*sample
);
110 void *(*pointer
)(struct tp_field
*field
, struct perf_sample
*sample
);
114 #define TP_UINT_FIELD(bits) \
115 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
118 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
127 #define TP_UINT_FIELD__SWAPPED(bits) \
128 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
131 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
132 return bswap_##bits(value);\
135 TP_UINT_FIELD__SWAPPED(16);
136 TP_UINT_FIELD__SWAPPED(32);
137 TP_UINT_FIELD__SWAPPED(64);
139 static int tp_field__init_uint(struct tp_field
*field
,
140 struct format_field
*format_field
,
143 field
->offset
= format_field
->offset
;
145 switch (format_field
->size
) {
147 field
->integer
= tp_field__u8
;
150 field
->integer
= needs_swap
? tp_field__swapped_u16
: tp_field__u16
;
153 field
->integer
= needs_swap
? tp_field__swapped_u32
: tp_field__u32
;
156 field
->integer
= needs_swap
? tp_field__swapped_u64
: tp_field__u64
;
165 static void *tp_field__ptr(struct tp_field
*field
, struct perf_sample
*sample
)
167 return sample
->raw_data
+ field
->offset
;
170 static int tp_field__init_ptr(struct tp_field
*field
, struct format_field
*format_field
)
172 field
->offset
= format_field
->offset
;
173 field
->pointer
= tp_field__ptr
;
180 struct tp_field args
, ret
;
184 static int perf_evsel__init_tp_uint_field(struct perf_evsel
*evsel
,
185 struct tp_field
*field
,
188 struct format_field
*format_field
= perf_evsel__field(evsel
, name
);
190 if (format_field
== NULL
)
193 return tp_field__init_uint(field
, format_field
, evsel
->needs_swap
);
196 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
197 ({ struct syscall_tp *sc = evsel->priv;\
198 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
200 static int perf_evsel__init_tp_ptr_field(struct perf_evsel
*evsel
,
201 struct tp_field
*field
,
204 struct format_field
*format_field
= perf_evsel__field(evsel
, name
);
206 if (format_field
== NULL
)
209 return tp_field__init_ptr(field
, format_field
);
212 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
213 ({ struct syscall_tp *sc = evsel->priv;\
214 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
216 static void perf_evsel__delete_priv(struct perf_evsel
*evsel
)
219 perf_evsel__delete(evsel
);
222 static int perf_evsel__init_syscall_tp(struct perf_evsel
*evsel
, void *handler
)
224 evsel
->priv
= malloc(sizeof(struct syscall_tp
));
225 if (evsel
->priv
!= NULL
) {
226 if (perf_evsel__init_sc_tp_uint_field(evsel
, id
))
229 evsel
->handler
= handler
;
240 static struct perf_evsel
*perf_evsel__syscall_newtp(const char *direction
, void *handler
)
242 struct perf_evsel
*evsel
= perf_evsel__newtp("raw_syscalls", direction
);
244 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
246 evsel
= perf_evsel__newtp("syscalls", direction
);
251 if (perf_evsel__init_syscall_tp(evsel
, handler
))
257 perf_evsel__delete_priv(evsel
);
261 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
262 ({ struct syscall_tp *fields = evsel->priv; \
263 fields->name.integer(&fields->name, sample); })
265 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
266 ({ struct syscall_tp *fields = evsel->priv; \
267 fields->name.pointer(&fields->name, sample); })
271 struct thread
*thread
;
281 const char **entries
;
284 #define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
285 .nr_entries = ARRAY_SIZE(array), \
289 #define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
291 .nr_entries = ARRAY_SIZE(array), \
295 static size_t __syscall_arg__scnprintf_strarray(char *bf
, size_t size
,
297 struct syscall_arg
*arg
)
299 struct strarray
*sa
= arg
->parm
;
300 int idx
= arg
->val
- sa
->offset
;
302 if (idx
< 0 || idx
>= sa
->nr_entries
)
303 return scnprintf(bf
, size
, intfmt
, arg
->val
);
305 return scnprintf(bf
, size
, "%s", sa
->entries
[idx
]);
308 static size_t syscall_arg__scnprintf_strarray(char *bf
, size_t size
,
309 struct syscall_arg
*arg
)
311 return __syscall_arg__scnprintf_strarray(bf
, size
, "%d", arg
);
314 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
316 #if defined(__i386__) || defined(__x86_64__)
318 * FIXME: Make this available to all arches as soon as the ioctl beautifier
319 * gets rewritten to support all arches.
321 static size_t syscall_arg__scnprintf_strhexarray(char *bf
, size_t size
,
322 struct syscall_arg
*arg
)
324 return __syscall_arg__scnprintf_strarray(bf
, size
, "%#x", arg
);
327 #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
328 #endif /* defined(__i386__) || defined(__x86_64__) */
330 static size_t syscall_arg__scnprintf_fd(char *bf
, size_t size
,
331 struct syscall_arg
*arg
);
333 #define SCA_FD syscall_arg__scnprintf_fd
336 #define AT_FDCWD -100
339 static size_t syscall_arg__scnprintf_fd_at(char *bf
, size_t size
,
340 struct syscall_arg
*arg
)
345 return scnprintf(bf
, size
, "CWD");
347 return syscall_arg__scnprintf_fd(bf
, size
, arg
);
350 #define SCA_FDAT syscall_arg__scnprintf_fd_at
352 static size_t syscall_arg__scnprintf_close_fd(char *bf
, size_t size
,
353 struct syscall_arg
*arg
);
355 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
357 static size_t syscall_arg__scnprintf_hex(char *bf
, size_t size
,
358 struct syscall_arg
*arg
)
360 return scnprintf(bf
, size
, "%#lx", arg
->val
);
363 #define SCA_HEX syscall_arg__scnprintf_hex
365 static size_t syscall_arg__scnprintf_int(char *bf
, size_t size
,
366 struct syscall_arg
*arg
)
368 return scnprintf(bf
, size
, "%d", arg
->val
);
371 #define SCA_INT syscall_arg__scnprintf_int
373 static const char *bpf_cmd
[] = {
374 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
375 "MAP_GET_NEXT_KEY", "PROG_LOAD",
377 static DEFINE_STRARRAY(bpf_cmd
);
379 static const char *epoll_ctl_ops
[] = { "ADD", "DEL", "MOD", };
380 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops
, 1);
382 static const char *itimers
[] = { "REAL", "VIRTUAL", "PROF", };
383 static DEFINE_STRARRAY(itimers
);
385 static const char *keyctl_options
[] = {
386 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
387 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
388 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
389 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
390 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
392 static DEFINE_STRARRAY(keyctl_options
);
394 static const char *whences
[] = { "SET", "CUR", "END",
402 static DEFINE_STRARRAY(whences
);
404 static const char *fcntl_cmds
[] = {
405 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
406 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
407 "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
410 static DEFINE_STRARRAY(fcntl_cmds
);
412 static const char *rlimit_resources
[] = {
413 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
414 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
417 static DEFINE_STRARRAY(rlimit_resources
);
419 static const char *sighow
[] = { "BLOCK", "UNBLOCK", "SETMASK", };
420 static DEFINE_STRARRAY(sighow
);
422 static const char *clockid
[] = {
423 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
424 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
425 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
427 static DEFINE_STRARRAY(clockid
);
429 static const char *socket_families
[] = {
430 "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
431 "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
432 "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
433 "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
434 "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
435 "ALG", "NFC", "VSOCK",
437 static DEFINE_STRARRAY(socket_families
);
439 static size_t syscall_arg__scnprintf_access_mode(char *bf
, size_t size
,
440 struct syscall_arg
*arg
)
445 if (mode
== F_OK
) /* 0 */
446 return scnprintf(bf
, size
, "F");
448 if (mode & n##_OK) { \
449 printed += scnprintf(bf + printed, size - printed, "%s", #n); \
459 printed
+= scnprintf(bf
+ printed
, size
- printed
, "|%#x", mode
);
464 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
466 static size_t syscall_arg__scnprintf_filename(char *bf
, size_t size
,
467 struct syscall_arg
*arg
);
469 #define SCA_FILENAME syscall_arg__scnprintf_filename
471 static size_t syscall_arg__scnprintf_pipe_flags(char *bf
, size_t size
,
472 struct syscall_arg
*arg
)
474 int printed
= 0, flags
= arg
->val
;
477 if (flags & O_##n) { \
478 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
487 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s%#x", printed
? "|" : "", flags
);
492 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
494 #if defined(__i386__) || defined(__x86_64__)
496 * FIXME: Make this available to all arches.
498 #define TCGETS 0x5401
500 static const char *tioctls
[] = {
501 "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
502 "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
503 "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
504 "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
505 "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
506 "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
507 "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
508 "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
509 "TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
510 "TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
511 "TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
512 [0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
513 "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
514 "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
515 "TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
518 static DEFINE_STRARRAY_OFFSET(tioctls
, 0x5401);
519 #endif /* defined(__i386__) || defined(__x86_64__) */
521 #ifndef GRND_NONBLOCK
522 #define GRND_NONBLOCK 0x0001
525 #define GRND_RANDOM 0x0002
528 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf
, size_t size
,
529 struct syscall_arg
*arg
)
531 int printed
= 0, flags
= arg
->val
;
534 if (flags & GRND_##n) { \
535 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
536 flags &= ~GRND_##n; \
544 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s%#x", printed
? "|" : "", flags
);
549 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
551 #define STRARRAY(arg, name, array) \
552 .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
553 .arg_parm = { [arg] = &strarray__##array, }
555 #include "trace/beauty/eventfd.c"
556 #include "trace/beauty/flock.c"
557 #include "trace/beauty/futex_op.c"
558 #include "trace/beauty/mmap.c"
559 #include "trace/beauty/mode_t.c"
560 #include "trace/beauty/msg_flags.c"
561 #include "trace/beauty/open_flags.c"
562 #include "trace/beauty/perf_event_open.c"
563 #include "trace/beauty/pid.c"
564 #include "trace/beauty/sched_policy.c"
565 #include "trace/beauty/seccomp.c"
566 #include "trace/beauty/signum.c"
567 #include "trace/beauty/socket_type.c"
568 #include "trace/beauty/waitid_options.c"
570 static struct syscall_fmt
{
573 size_t (*arg_scnprintf
[6])(char *bf
, size_t size
, struct syscall_arg
*arg
);
580 { .name
= "access", .errmsg
= true,
581 .arg_scnprintf
= { [1] = SCA_ACCMODE
, /* mode */ }, },
582 { .name
= "arch_prctl", .errmsg
= true, .alias
= "prctl", },
583 { .name
= "bpf", .errmsg
= true, STRARRAY(0, cmd
, bpf_cmd
), },
584 { .name
= "brk", .hexret
= true,
585 .arg_scnprintf
= { [0] = SCA_HEX
, /* brk */ }, },
586 { .name
= "chdir", .errmsg
= true, },
587 { .name
= "chmod", .errmsg
= true, },
588 { .name
= "chroot", .errmsg
= true, },
589 { .name
= "clock_gettime", .errmsg
= true, STRARRAY(0, clk_id
, clockid
), },
590 { .name
= "clone", .errpid
= true, },
591 { .name
= "close", .errmsg
= true,
592 .arg_scnprintf
= { [0] = SCA_CLOSE_FD
, /* fd */ }, },
593 { .name
= "connect", .errmsg
= true, },
594 { .name
= "creat", .errmsg
= true, },
595 { .name
= "dup", .errmsg
= true, },
596 { .name
= "dup2", .errmsg
= true, },
597 { .name
= "dup3", .errmsg
= true, },
598 { .name
= "epoll_ctl", .errmsg
= true, STRARRAY(1, op
, epoll_ctl_ops
), },
599 { .name
= "eventfd2", .errmsg
= true,
600 .arg_scnprintf
= { [1] = SCA_EFD_FLAGS
, /* flags */ }, },
601 { .name
= "faccessat", .errmsg
= true, },
602 { .name
= "fadvise64", .errmsg
= true, },
603 { .name
= "fallocate", .errmsg
= true, },
604 { .name
= "fchdir", .errmsg
= true, },
605 { .name
= "fchmod", .errmsg
= true, },
606 { .name
= "fchmodat", .errmsg
= true,
607 .arg_scnprintf
= { [0] = SCA_FDAT
, /* fd */ }, },
608 { .name
= "fchown", .errmsg
= true, },
609 { .name
= "fchownat", .errmsg
= true,
610 .arg_scnprintf
= { [0] = SCA_FDAT
, /* fd */ }, },
611 { .name
= "fcntl", .errmsg
= true,
612 .arg_scnprintf
= { [1] = SCA_STRARRAY
, /* cmd */ },
613 .arg_parm
= { [1] = &strarray__fcntl_cmds
, /* cmd */ }, },
614 { .name
= "fdatasync", .errmsg
= true, },
615 { .name
= "flock", .errmsg
= true,
616 .arg_scnprintf
= { [1] = SCA_FLOCK
, /* cmd */ }, },
617 { .name
= "fsetxattr", .errmsg
= true, },
618 { .name
= "fstat", .errmsg
= true, .alias
= "newfstat", },
619 { .name
= "fstatat", .errmsg
= true, .alias
= "newfstatat", },
620 { .name
= "fstatfs", .errmsg
= true, },
621 { .name
= "fsync", .errmsg
= true, },
622 { .name
= "ftruncate", .errmsg
= true, },
623 { .name
= "futex", .errmsg
= true,
624 .arg_scnprintf
= { [1] = SCA_FUTEX_OP
, /* op */ }, },
625 { .name
= "futimesat", .errmsg
= true,
626 .arg_scnprintf
= { [0] = SCA_FDAT
, /* fd */ }, },
627 { .name
= "getdents", .errmsg
= true, },
628 { .name
= "getdents64", .errmsg
= true, },
629 { .name
= "getitimer", .errmsg
= true, STRARRAY(0, which
, itimers
), },
630 { .name
= "getpid", .errpid
= true, },
631 { .name
= "getpgid", .errpid
= true, },
632 { .name
= "getppid", .errpid
= true, },
633 { .name
= "getrandom", .errmsg
= true,
634 .arg_scnprintf
= { [2] = SCA_GETRANDOM_FLAGS
, /* flags */ }, },
635 { .name
= "getrlimit", .errmsg
= true, STRARRAY(0, resource
, rlimit_resources
), },
636 { .name
= "getxattr", .errmsg
= true, },
637 { .name
= "inotify_add_watch", .errmsg
= true, },
638 { .name
= "ioctl", .errmsg
= true,
640 #if defined(__i386__) || defined(__x86_64__)
642 * FIXME: Make this available to all arches.
644 [1] = SCA_STRHEXARRAY
, /* cmd */
645 [2] = SCA_HEX
, /* arg */ },
646 .arg_parm
= { [1] = &strarray__tioctls
, /* cmd */ }, },
648 [2] = SCA_HEX
, /* arg */ }, },
650 { .name
= "keyctl", .errmsg
= true, STRARRAY(0, option
, keyctl_options
), },
651 { .name
= "kill", .errmsg
= true,
652 .arg_scnprintf
= { [1] = SCA_SIGNUM
, /* sig */ }, },
653 { .name
= "lchown", .errmsg
= true, },
654 { .name
= "lgetxattr", .errmsg
= true, },
655 { .name
= "linkat", .errmsg
= true,
656 .arg_scnprintf
= { [0] = SCA_FDAT
, /* fd */ }, },
657 { .name
= "listxattr", .errmsg
= true, },
658 { .name
= "llistxattr", .errmsg
= true, },
659 { .name
= "lremovexattr", .errmsg
= true, },
660 { .name
= "lseek", .errmsg
= true,
661 .arg_scnprintf
= { [2] = SCA_STRARRAY
, /* whence */ },
662 .arg_parm
= { [2] = &strarray__whences
, /* whence */ }, },
663 { .name
= "lsetxattr", .errmsg
= true, },
664 { .name
= "lstat", .errmsg
= true, .alias
= "newlstat", },
665 { .name
= "lsxattr", .errmsg
= true, },
666 { .name
= "madvise", .errmsg
= true,
667 .arg_scnprintf
= { [0] = SCA_HEX
, /* start */
668 [2] = SCA_MADV_BHV
, /* behavior */ }, },
669 { .name
= "mkdir", .errmsg
= true, },
670 { .name
= "mkdirat", .errmsg
= true,
671 .arg_scnprintf
= { [0] = SCA_FDAT
, /* fd */ }, },
672 { .name
= "mknod", .errmsg
= true, },
673 { .name
= "mknodat", .errmsg
= true,
674 .arg_scnprintf
= { [0] = SCA_FDAT
, /* fd */ }, },
675 { .name
= "mlock", .errmsg
= true,
676 .arg_scnprintf
= { [0] = SCA_HEX
, /* addr */ }, },
677 { .name
= "mlockall", .errmsg
= true,
678 .arg_scnprintf
= { [0] = SCA_HEX
, /* addr */ }, },
679 { .name
= "mmap", .hexret
= true,
680 .arg_scnprintf
= { [0] = SCA_HEX
, /* addr */
681 [2] = SCA_MMAP_PROT
, /* prot */
682 [3] = SCA_MMAP_FLAGS
, /* flags */ }, },
683 { .name
= "mprotect", .errmsg
= true,
684 .arg_scnprintf
= { [0] = SCA_HEX
, /* start */
685 [2] = SCA_MMAP_PROT
, /* prot */ }, },
686 { .name
= "mq_unlink", .errmsg
= true,
687 .arg_scnprintf
= { [0] = SCA_FILENAME
, /* u_name */ }, },
688 { .name
= "mremap", .hexret
= true,
689 .arg_scnprintf
= { [0] = SCA_HEX
, /* addr */
690 [3] = SCA_MREMAP_FLAGS
, /* flags */
691 [4] = SCA_HEX
, /* new_addr */ }, },
692 { .name
= "munlock", .errmsg
= true,
693 .arg_scnprintf
= { [0] = SCA_HEX
, /* addr */ }, },
694 { .name
= "munmap", .errmsg
= true,
695 .arg_scnprintf
= { [0] = SCA_HEX
, /* addr */ }, },
696 { .name
= "name_to_handle_at", .errmsg
= true,
697 .arg_scnprintf
= { [0] = SCA_FDAT
, /* dfd */ }, },
698 { .name
= "newfstatat", .errmsg
= true,
699 .arg_scnprintf
= { [0] = SCA_FDAT
, /* dfd */ }, },
700 { .name
= "open", .errmsg
= true,
701 .arg_scnprintf
= { [1] = SCA_OPEN_FLAGS
, /* flags */ }, },
702 { .name
= "open_by_handle_at", .errmsg
= true,
703 .arg_scnprintf
= { [0] = SCA_FDAT
, /* dfd */
704 [2] = SCA_OPEN_FLAGS
, /* flags */ }, },
705 { .name
= "openat", .errmsg
= true,
706 .arg_scnprintf
= { [0] = SCA_FDAT
, /* dfd */
707 [2] = SCA_OPEN_FLAGS
, /* flags */ }, },
708 { .name
= "perf_event_open", .errmsg
= true,
709 .arg_scnprintf
= { [2] = SCA_INT
, /* cpu */
710 [3] = SCA_FD
, /* group_fd */
711 [4] = SCA_PERF_FLAGS
, /* flags */ }, },
712 { .name
= "pipe2", .errmsg
= true,
713 .arg_scnprintf
= { [1] = SCA_PIPE_FLAGS
, /* flags */ }, },
714 { .name
= "poll", .errmsg
= true, .timeout
= true, },
715 { .name
= "ppoll", .errmsg
= true, .timeout
= true, },
716 { .name
= "pread", .errmsg
= true, .alias
= "pread64", },
717 { .name
= "preadv", .errmsg
= true, .alias
= "pread", },
718 { .name
= "prlimit64", .errmsg
= true, STRARRAY(1, resource
, rlimit_resources
), },
719 { .name
= "pwrite", .errmsg
= true, .alias
= "pwrite64", },
720 { .name
= "pwritev", .errmsg
= true, },
721 { .name
= "read", .errmsg
= true, },
722 { .name
= "readlink", .errmsg
= true, },
723 { .name
= "readlinkat", .errmsg
= true,
724 .arg_scnprintf
= { [0] = SCA_FDAT
, /* dfd */ }, },
725 { .name
= "readv", .errmsg
= true, },
726 { .name
= "recvfrom", .errmsg
= true,
727 .arg_scnprintf
= { [3] = SCA_MSG_FLAGS
, /* flags */ }, },
728 { .name
= "recvmmsg", .errmsg
= true,
729 .arg_scnprintf
= { [3] = SCA_MSG_FLAGS
, /* flags */ }, },
730 { .name
= "recvmsg", .errmsg
= true,
731 .arg_scnprintf
= { [2] = SCA_MSG_FLAGS
, /* flags */ }, },
732 { .name
= "removexattr", .errmsg
= true, },
733 { .name
= "renameat", .errmsg
= true,
734 .arg_scnprintf
= { [0] = SCA_FDAT
, /* dfd */ }, },
735 { .name
= "rmdir", .errmsg
= true, },
736 { .name
= "rt_sigaction", .errmsg
= true,
737 .arg_scnprintf
= { [0] = SCA_SIGNUM
, /* sig */ }, },
738 { .name
= "rt_sigprocmask", .errmsg
= true, STRARRAY(0, how
, sighow
), },
739 { .name
= "rt_sigqueueinfo", .errmsg
= true,
740 .arg_scnprintf
= { [1] = SCA_SIGNUM
, /* sig */ }, },
741 { .name
= "rt_tgsigqueueinfo", .errmsg
= true,
742 .arg_scnprintf
= { [2] = SCA_SIGNUM
, /* sig */ }, },
743 { .name
= "sched_getattr", .errmsg
= true, },
744 { .name
= "sched_setattr", .errmsg
= true, },
745 { .name
= "sched_setscheduler", .errmsg
= true,
746 .arg_scnprintf
= { [1] = SCA_SCHED_POLICY
, /* policy */ }, },
747 { .name
= "seccomp", .errmsg
= true,
748 .arg_scnprintf
= { [0] = SCA_SECCOMP_OP
, /* op */
749 [1] = SCA_SECCOMP_FLAGS
, /* flags */ }, },
750 { .name
= "select", .errmsg
= true, .timeout
= true, },
751 { .name
= "sendmmsg", .errmsg
= true,
752 .arg_scnprintf
= { [3] = SCA_MSG_FLAGS
, /* flags */ }, },
753 { .name
= "sendmsg", .errmsg
= true,
754 .arg_scnprintf
= { [2] = SCA_MSG_FLAGS
, /* flags */ }, },
755 { .name
= "sendto", .errmsg
= true,
756 .arg_scnprintf
= { [3] = SCA_MSG_FLAGS
, /* flags */ }, },
757 { .name
= "set_tid_address", .errpid
= true, },
758 { .name
= "setitimer", .errmsg
= true, STRARRAY(0, which
, itimers
), },
759 { .name
= "setpgid", .errmsg
= true, },
760 { .name
= "setrlimit", .errmsg
= true, STRARRAY(0, resource
, rlimit_resources
), },
761 { .name
= "setxattr", .errmsg
= true, },
762 { .name
= "shutdown", .errmsg
= true, },
763 { .name
= "socket", .errmsg
= true,
764 .arg_scnprintf
= { [0] = SCA_STRARRAY
, /* family */
765 [1] = SCA_SK_TYPE
, /* type */ },
766 .arg_parm
= { [0] = &strarray__socket_families
, /* family */ }, },
767 { .name
= "socketpair", .errmsg
= true,
768 .arg_scnprintf
= { [0] = SCA_STRARRAY
, /* family */
769 [1] = SCA_SK_TYPE
, /* type */ },
770 .arg_parm
= { [0] = &strarray__socket_families
, /* family */ }, },
771 { .name
= "stat", .errmsg
= true, .alias
= "newstat", },
772 { .name
= "statfs", .errmsg
= true, },
773 { .name
= "swapoff", .errmsg
= true,
774 .arg_scnprintf
= { [0] = SCA_FILENAME
, /* specialfile */ }, },
775 { .name
= "swapon", .errmsg
= true,
776 .arg_scnprintf
= { [0] = SCA_FILENAME
, /* specialfile */ }, },
777 { .name
= "symlinkat", .errmsg
= true,
778 .arg_scnprintf
= { [0] = SCA_FDAT
, /* dfd */ }, },
779 { .name
= "tgkill", .errmsg
= true,
780 .arg_scnprintf
= { [2] = SCA_SIGNUM
, /* sig */ }, },
781 { .name
= "tkill", .errmsg
= true,
782 .arg_scnprintf
= { [1] = SCA_SIGNUM
, /* sig */ }, },
783 { .name
= "truncate", .errmsg
= true, },
784 { .name
= "uname", .errmsg
= true, .alias
= "newuname", },
785 { .name
= "unlinkat", .errmsg
= true,
786 .arg_scnprintf
= { [0] = SCA_FDAT
, /* dfd */ }, },
787 { .name
= "utime", .errmsg
= true, },
788 { .name
= "utimensat", .errmsg
= true,
789 .arg_scnprintf
= { [0] = SCA_FDAT
, /* dirfd */ }, },
790 { .name
= "utimes", .errmsg
= true, },
791 { .name
= "vmsplice", .errmsg
= true, },
792 { .name
= "wait4", .errpid
= true,
793 .arg_scnprintf
= { [2] = SCA_WAITID_OPTIONS
, /* options */ }, },
794 { .name
= "waitid", .errpid
= true,
795 .arg_scnprintf
= { [3] = SCA_WAITID_OPTIONS
, /* options */ }, },
796 { .name
= "write", .errmsg
= true, },
797 { .name
= "writev", .errmsg
= true, },
800 static int syscall_fmt__cmp(const void *name
, const void *fmtp
)
802 const struct syscall_fmt
*fmt
= fmtp
;
803 return strcmp(name
, fmt
->name
);
806 static struct syscall_fmt
*syscall_fmt__find(const char *name
)
808 const int nmemb
= ARRAY_SIZE(syscall_fmts
);
809 return bsearch(name
, syscall_fmts
, nmemb
, sizeof(struct syscall_fmt
), syscall_fmt__cmp
);
813 struct event_format
*tp_format
;
815 struct format_field
*args
;
818 struct syscall_fmt
*fmt
;
819 size_t (**arg_scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
);
823 static size_t fprintf_duration(unsigned long t
, FILE *fp
)
825 double duration
= (double)t
/ NSEC_PER_MSEC
;
826 size_t printed
= fprintf(fp
, "(");
829 printed
+= color_fprintf(fp
, PERF_COLOR_RED
, "%6.3f ms", duration
);
830 else if (duration
>= 0.01)
831 printed
+= color_fprintf(fp
, PERF_COLOR_YELLOW
, "%6.3f ms", duration
);
833 printed
+= color_fprintf(fp
, PERF_COLOR_NORMAL
, "%6.3f ms", duration
);
834 return printed
+ fprintf(fp
, "): ");
838 * filename.ptr: The filename char pointer that will be vfs_getname'd
839 * filename.entry_str_pos: Where to insert the string translated from
840 * filename.ptr by the vfs_getname tracepoint/kprobe.
842 struct thread_trace
{
845 unsigned long nr_events
;
846 unsigned long pfmaj
, pfmin
;
851 short int entry_str_pos
;
853 unsigned int namelen
;
861 struct intlist
*syscall_stats
;
864 static struct thread_trace
*thread_trace__new(void)
866 struct thread_trace
*ttrace
= zalloc(sizeof(struct thread_trace
));
869 ttrace
->paths
.max
= -1;
871 ttrace
->syscall_stats
= intlist__new(NULL
);
876 static struct thread_trace
*thread__trace(struct thread
*thread
, FILE *fp
)
878 struct thread_trace
*ttrace
;
883 if (thread__priv(thread
) == NULL
)
884 thread__set_priv(thread
, thread_trace__new());
886 if (thread__priv(thread
) == NULL
)
889 ttrace
= thread__priv(thread
);
894 color_fprintf(fp
, PERF_COLOR_RED
,
895 "WARNING: not enough memory, dropping samples!\n");
899 #define TRACE_PFMAJ (1 << 0)
900 #define TRACE_PFMIN (1 << 1)
902 static const size_t trace__entry_str_size
= 2048;
904 static int trace__set_fd_pathname(struct thread
*thread
, int fd
, const char *pathname
)
906 struct thread_trace
*ttrace
= thread__priv(thread
);
908 if (fd
> ttrace
->paths
.max
) {
909 char **npath
= realloc(ttrace
->paths
.table
, (fd
+ 1) * sizeof(char *));
914 if (ttrace
->paths
.max
!= -1) {
915 memset(npath
+ ttrace
->paths
.max
+ 1, 0,
916 (fd
- ttrace
->paths
.max
) * sizeof(char *));
918 memset(npath
, 0, (fd
+ 1) * sizeof(char *));
921 ttrace
->paths
.table
= npath
;
922 ttrace
->paths
.max
= fd
;
925 ttrace
->paths
.table
[fd
] = strdup(pathname
);
927 return ttrace
->paths
.table
[fd
] != NULL
? 0 : -1;
930 static int thread__read_fd_path(struct thread
*thread
, int fd
)
932 char linkname
[PATH_MAX
], pathname
[PATH_MAX
];
936 if (thread
->pid_
== thread
->tid
) {
937 scnprintf(linkname
, sizeof(linkname
),
938 "/proc/%d/fd/%d", thread
->pid_
, fd
);
940 scnprintf(linkname
, sizeof(linkname
),
941 "/proc/%d/task/%d/fd/%d", thread
->pid_
, thread
->tid
, fd
);
944 if (lstat(linkname
, &st
) < 0 || st
.st_size
+ 1 > (off_t
)sizeof(pathname
))
947 ret
= readlink(linkname
, pathname
, sizeof(pathname
));
949 if (ret
< 0 || ret
> st
.st_size
)
952 pathname
[ret
] = '\0';
953 return trace__set_fd_pathname(thread
, fd
, pathname
);
956 static const char *thread__fd_path(struct thread
*thread
, int fd
,
959 struct thread_trace
*ttrace
= thread__priv(thread
);
967 if ((fd
> ttrace
->paths
.max
|| ttrace
->paths
.table
[fd
] == NULL
)) {
970 ++trace
->stats
.proc_getname
;
971 if (thread__read_fd_path(thread
, fd
))
975 return ttrace
->paths
.table
[fd
];
978 static size_t syscall_arg__scnprintf_fd(char *bf
, size_t size
,
979 struct syscall_arg
*arg
)
982 size_t printed
= scnprintf(bf
, size
, "%d", fd
);
983 const char *path
= thread__fd_path(arg
->thread
, fd
, arg
->trace
);
986 printed
+= scnprintf(bf
+ printed
, size
- printed
, "<%s>", path
);
991 static size_t syscall_arg__scnprintf_close_fd(char *bf
, size_t size
,
992 struct syscall_arg
*arg
)
995 size_t printed
= syscall_arg__scnprintf_fd(bf
, size
, arg
);
996 struct thread_trace
*ttrace
= thread__priv(arg
->thread
);
998 if (ttrace
&& fd
>= 0 && fd
<= ttrace
->paths
.max
)
999 zfree(&ttrace
->paths
.table
[fd
]);
1004 static void thread__set_filename_pos(struct thread
*thread
, const char *bf
,
1007 struct thread_trace
*ttrace
= thread__priv(thread
);
1009 ttrace
->filename
.ptr
= ptr
;
1010 ttrace
->filename
.entry_str_pos
= bf
- ttrace
->entry_str
;
1013 static size_t syscall_arg__scnprintf_filename(char *bf
, size_t size
,
1014 struct syscall_arg
*arg
)
1016 unsigned long ptr
= arg
->val
;
1018 if (!arg
->trace
->vfs_getname
)
1019 return scnprintf(bf
, size
, "%#x", ptr
);
1021 thread__set_filename_pos(arg
->thread
, bf
, ptr
);
1025 static bool trace__filter_duration(struct trace
*trace
, double t
)
1027 return t
< (trace
->duration_filter
* NSEC_PER_MSEC
);
1030 static size_t trace__fprintf_tstamp(struct trace
*trace
, u64 tstamp
, FILE *fp
)
1032 double ts
= (double)(tstamp
- trace
->base_time
) / NSEC_PER_MSEC
;
1034 return fprintf(fp
, "%10.3f ", ts
);
1037 static bool done
= false;
1038 static bool interrupted
= false;
1040 static void sig_handler(int sig
)
1043 interrupted
= sig
== SIGINT
;
1046 static size_t trace__fprintf_entry_head(struct trace
*trace
, struct thread
*thread
,
1047 u64 duration
, u64 tstamp
, FILE *fp
)
1049 size_t printed
= trace__fprintf_tstamp(trace
, tstamp
, fp
);
1050 printed
+= fprintf_duration(duration
, fp
);
1052 if (trace
->multiple_threads
) {
1053 if (trace
->show_comm
)
1054 printed
+= fprintf(fp
, "%.14s/", thread__comm_str(thread
));
1055 printed
+= fprintf(fp
, "%d ", thread
->tid
);
1061 static int trace__process_event(struct trace
*trace
, struct machine
*machine
,
1062 union perf_event
*event
, struct perf_sample
*sample
)
1066 switch (event
->header
.type
) {
1067 case PERF_RECORD_LOST
:
1068 color_fprintf(trace
->output
, PERF_COLOR_RED
,
1069 "LOST %" PRIu64
" events!\n", event
->lost
.lost
);
1070 ret
= machine__process_lost_event(machine
, event
, sample
);
1073 ret
= machine__process_event(machine
, event
, sample
);
1080 static int trace__tool_process(struct perf_tool
*tool
,
1081 union perf_event
*event
,
1082 struct perf_sample
*sample
,
1083 struct machine
*machine
)
1085 struct trace
*trace
= container_of(tool
, struct trace
, tool
);
1086 return trace__process_event(trace
, machine
, event
, sample
);
1089 static char *trace__machine__resolve_kernel_addr(void *vmachine
, unsigned long long *addrp
, char **modp
)
1091 struct machine
*machine
= vmachine
;
1093 if (machine
->kptr_restrict_warned
)
1096 if (symbol_conf
.kptr_restrict
) {
1097 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1098 "Check /proc/sys/kernel/kptr_restrict.\n\n"
1099 "Kernel samples will not be resolved.\n");
1100 machine
->kptr_restrict_warned
= true;
1104 return machine__resolve_kernel_addr(vmachine
, addrp
, modp
);
1107 static int trace__symbols_init(struct trace
*trace
, struct perf_evlist
*evlist
)
1109 int err
= symbol__init(NULL
);
1114 trace
->host
= machine__new_host();
1115 if (trace
->host
== NULL
)
1118 if (trace_event__register_resolver(trace
->host
, trace__machine__resolve_kernel_addr
) < 0)
1121 err
= __machine__synthesize_threads(trace
->host
, &trace
->tool
, &trace
->opts
.target
,
1122 evlist
->threads
, trace__tool_process
, false,
1123 trace
->opts
.proc_map_timeout
);
1130 static int syscall__set_arg_fmts(struct syscall
*sc
)
1132 struct format_field
*field
;
1135 sc
->arg_scnprintf
= calloc(sc
->nr_args
, sizeof(void *));
1136 if (sc
->arg_scnprintf
== NULL
)
1140 sc
->arg_parm
= sc
->fmt
->arg_parm
;
1142 for (field
= sc
->args
; field
; field
= field
->next
) {
1143 if (sc
->fmt
&& sc
->fmt
->arg_scnprintf
[idx
])
1144 sc
->arg_scnprintf
[idx
] = sc
->fmt
->arg_scnprintf
[idx
];
1145 else if (strcmp(field
->type
, "const char *") == 0 &&
1146 (strcmp(field
->name
, "filename") == 0 ||
1147 strcmp(field
->name
, "path") == 0 ||
1148 strcmp(field
->name
, "pathname") == 0))
1149 sc
->arg_scnprintf
[idx
] = SCA_FILENAME
;
1150 else if (field
->flags
& FIELD_IS_POINTER
)
1151 sc
->arg_scnprintf
[idx
] = syscall_arg__scnprintf_hex
;
1152 else if (strcmp(field
->type
, "pid_t") == 0)
1153 sc
->arg_scnprintf
[idx
] = SCA_PID
;
1154 else if (strcmp(field
->type
, "umode_t") == 0)
1155 sc
->arg_scnprintf
[idx
] = SCA_MODE_T
;
1156 else if ((strcmp(field
->type
, "int") == 0 ||
1157 strcmp(field
->type
, "unsigned int") == 0 ||
1158 strcmp(field
->type
, "long") == 0) &&
1159 (len
= strlen(field
->name
)) >= 2 &&
1160 strcmp(field
->name
+ len
- 2, "fd") == 0) {
1162 * /sys/kernel/tracing/events/syscalls/sys_enter*
1163 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1168 sc
->arg_scnprintf
[idx
] = SCA_FD
;
1176 static int trace__read_syscall_info(struct trace
*trace
, int id
)
1180 const char *name
= syscalltbl__name(trace
->sctbl
, id
);
1185 if (id
> trace
->syscalls
.max
) {
1186 struct syscall
*nsyscalls
= realloc(trace
->syscalls
.table
, (id
+ 1) * sizeof(*sc
));
1188 if (nsyscalls
== NULL
)
1191 if (trace
->syscalls
.max
!= -1) {
1192 memset(nsyscalls
+ trace
->syscalls
.max
+ 1, 0,
1193 (id
- trace
->syscalls
.max
) * sizeof(*sc
));
1195 memset(nsyscalls
, 0, (id
+ 1) * sizeof(*sc
));
1198 trace
->syscalls
.table
= nsyscalls
;
1199 trace
->syscalls
.max
= id
;
1202 sc
= trace
->syscalls
.table
+ id
;
1205 sc
->fmt
= syscall_fmt__find(sc
->name
);
1207 snprintf(tp_name
, sizeof(tp_name
), "sys_enter_%s", sc
->name
);
1208 sc
->tp_format
= trace_event__tp_format("syscalls", tp_name
);
1210 if (IS_ERR(sc
->tp_format
) && sc
->fmt
&& sc
->fmt
->alias
) {
1211 snprintf(tp_name
, sizeof(tp_name
), "sys_enter_%s", sc
->fmt
->alias
);
1212 sc
->tp_format
= trace_event__tp_format("syscalls", tp_name
);
1215 if (IS_ERR(sc
->tp_format
))
1218 sc
->args
= sc
->tp_format
->format
.fields
;
1219 sc
->nr_args
= sc
->tp_format
->format
.nr_fields
;
1221 * We need to check and discard the first variable '__syscall_nr'
1222 * or 'nr' that mean the syscall number. It is needless here.
1223 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1225 if (sc
->args
&& (!strcmp(sc
->args
->name
, "__syscall_nr") || !strcmp(sc
->args
->name
, "nr"))) {
1226 sc
->args
= sc
->args
->next
;
1230 sc
->is_exit
= !strcmp(name
, "exit_group") || !strcmp(name
, "exit");
1232 return syscall__set_arg_fmts(sc
);
1235 static int trace__validate_ev_qualifier(struct trace
*trace
)
1238 struct str_node
*pos
;
1240 trace
->ev_qualifier_ids
.nr
= strlist__nr_entries(trace
->ev_qualifier
);
1241 trace
->ev_qualifier_ids
.entries
= malloc(trace
->ev_qualifier_ids
.nr
*
1242 sizeof(trace
->ev_qualifier_ids
.entries
[0]));
1244 if (trace
->ev_qualifier_ids
.entries
== NULL
) {
1245 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1253 strlist__for_each_entry(pos
, trace
->ev_qualifier
) {
1254 const char *sc
= pos
->s
;
1255 int id
= syscalltbl__id(trace
->sctbl
, sc
);
1259 fputs("Error:\tInvalid syscall ", trace
->output
);
1262 fputs(", ", trace
->output
);
1265 fputs(sc
, trace
->output
);
1268 trace
->ev_qualifier_ids
.entries
[i
++] = id
;
1272 fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
1273 "\nHint:\tand: 'man syscalls'\n", trace
->output
);
1274 zfree(&trace
->ev_qualifier_ids
.entries
);
1275 trace
->ev_qualifier_ids
.nr
= 0;
1282 * args is to be interpreted as a series of longs but we need to handle
1283 * 8-byte unaligned accesses. args points to raw_data within the event
1284 * and raw_data is guaranteed to be 8-byte unaligned because it is
1285 * preceded by raw_size which is a u32. So we need to copy args to a temp
1286 * variable to read it. Most notably this avoids extended load instructions
1287 * on unaligned addresses
1290 static size_t syscall__scnprintf_args(struct syscall
*sc
, char *bf
, size_t size
,
1291 unsigned char *args
, struct trace
*trace
,
1292 struct thread
*thread
)
1298 if (sc
->args
!= NULL
) {
1299 struct format_field
*field
;
1301 struct syscall_arg arg
= {
1308 for (field
= sc
->args
; field
;
1309 field
= field
->next
, ++arg
.idx
, bit
<<= 1) {
1313 /* special care for unaligned accesses */
1314 p
= args
+ sizeof(unsigned long) * arg
.idx
;
1315 memcpy(&val
, p
, sizeof(val
));
1318 * Suppress this argument if its value is zero and
1319 * and we don't have a string associated in an
1323 !(sc
->arg_scnprintf
&&
1324 sc
->arg_scnprintf
[arg
.idx
] == SCA_STRARRAY
&&
1325 sc
->arg_parm
[arg
.idx
]))
1328 printed
+= scnprintf(bf
+ printed
, size
- printed
,
1329 "%s%s: ", printed
? ", " : "", field
->name
);
1330 if (sc
->arg_scnprintf
&& sc
->arg_scnprintf
[arg
.idx
]) {
1333 arg
.parm
= sc
->arg_parm
[arg
.idx
];
1334 printed
+= sc
->arg_scnprintf
[arg
.idx
](bf
+ printed
,
1335 size
- printed
, &arg
);
1337 printed
+= scnprintf(bf
+ printed
, size
- printed
,
1341 } else if (IS_ERR(sc
->tp_format
)) {
1343 * If we managed to read the tracepoint /format file, then we
1344 * may end up not having any args, like with gettid(), so only
1345 * print the raw args when we didn't manage to read it.
1350 /* special care for unaligned accesses */
1351 p
= args
+ sizeof(unsigned long) * i
;
1352 memcpy(&val
, p
, sizeof(val
));
1353 printed
+= scnprintf(bf
+ printed
, size
- printed
,
1355 printed
? ", " : "", i
, val
);
1363 typedef int (*tracepoint_handler
)(struct trace
*trace
, struct perf_evsel
*evsel
,
1364 union perf_event
*event
,
1365 struct perf_sample
*sample
);
1367 static struct syscall
*trace__syscall_info(struct trace
*trace
,
1368 struct perf_evsel
*evsel
, int id
)
1374 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1375 * before that, leaving at a higher verbosity level till that is
1376 * explained. Reproduced with plain ftrace with:
1378 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1379 * grep "NR -1 " /t/trace_pipe
1381 * After generating some load on the machine.
1385 fprintf(trace
->output
, "Invalid syscall %d id, skipping (%s, %" PRIu64
") ...\n",
1386 id
, perf_evsel__name(evsel
), ++n
);
1391 if ((id
> trace
->syscalls
.max
|| trace
->syscalls
.table
[id
].name
== NULL
) &&
1392 trace__read_syscall_info(trace
, id
))
1395 if ((id
> trace
->syscalls
.max
|| trace
->syscalls
.table
[id
].name
== NULL
))
1398 return &trace
->syscalls
.table
[id
];
1402 fprintf(trace
->output
, "Problems reading syscall %d", id
);
1403 if (id
<= trace
->syscalls
.max
&& trace
->syscalls
.table
[id
].name
!= NULL
)
1404 fprintf(trace
->output
, "(%s)", trace
->syscalls
.table
[id
].name
);
1405 fputs(" information\n", trace
->output
);
1410 static void thread__update_stats(struct thread_trace
*ttrace
,
1411 int id
, struct perf_sample
*sample
)
1413 struct int_node
*inode
;
1414 struct stats
*stats
;
1417 inode
= intlist__findnew(ttrace
->syscall_stats
, id
);
1421 stats
= inode
->priv
;
1422 if (stats
== NULL
) {
1423 stats
= malloc(sizeof(struct stats
));
1427 inode
->priv
= stats
;
1430 if (ttrace
->entry_time
&& sample
->time
> ttrace
->entry_time
)
1431 duration
= sample
->time
- ttrace
->entry_time
;
1433 update_stats(stats
, duration
);
1436 static int trace__printf_interrupted_entry(struct trace
*trace
, struct perf_sample
*sample
)
1438 struct thread_trace
*ttrace
;
1442 if (trace
->current
== NULL
)
1445 ttrace
= thread__priv(trace
->current
);
1447 if (!ttrace
->entry_pending
)
1450 duration
= sample
->time
- ttrace
->entry_time
;
1452 printed
= trace__fprintf_entry_head(trace
, trace
->current
, duration
, ttrace
->entry_time
, trace
->output
);
1453 printed
+= fprintf(trace
->output
, "%-70s) ...\n", ttrace
->entry_str
);
1454 ttrace
->entry_pending
= false;
1459 static int trace__sys_enter(struct trace
*trace
, struct perf_evsel
*evsel
,
1460 union perf_event
*event __maybe_unused
,
1461 struct perf_sample
*sample
)
1466 struct thread
*thread
;
1467 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1;
1468 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
1469 struct thread_trace
*ttrace
;
1474 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
1475 ttrace
= thread__trace(thread
, trace
->output
);
1479 args
= perf_evsel__sc_tp_ptr(evsel
, args
, sample
);
1481 if (ttrace
->entry_str
== NULL
) {
1482 ttrace
->entry_str
= malloc(trace__entry_str_size
);
1483 if (!ttrace
->entry_str
)
1487 if (!(trace
->duration_filter
|| trace
->summary_only
|| trace
->min_stack
))
1488 trace__printf_interrupted_entry(trace
, sample
);
1490 ttrace
->entry_time
= sample
->time
;
1491 msg
= ttrace
->entry_str
;
1492 printed
+= scnprintf(msg
+ printed
, trace__entry_str_size
- printed
, "%s(", sc
->name
);
1494 printed
+= syscall__scnprintf_args(sc
, msg
+ printed
, trace__entry_str_size
- printed
,
1495 args
, trace
, thread
);
1498 if (!(trace
->duration_filter
|| trace
->summary_only
|| trace
->min_stack
)) {
1499 trace__fprintf_entry_head(trace
, thread
, 1, ttrace
->entry_time
, trace
->output
);
1500 fprintf(trace
->output
, "%-70s)\n", ttrace
->entry_str
);
1503 ttrace
->entry_pending
= true;
1504 /* See trace__vfs_getname & trace__sys_exit */
1505 ttrace
->filename
.pending_open
= false;
1508 if (trace
->current
!= thread
) {
1509 thread__put(trace
->current
);
1510 trace
->current
= thread__get(thread
);
1514 thread__put(thread
);
1518 static int trace__resolve_callchain(struct trace
*trace
, struct perf_evsel
*evsel
,
1519 struct perf_sample
*sample
,
1520 struct callchain_cursor
*cursor
)
1522 struct addr_location al
;
1524 if (machine__resolve(trace
->host
, &al
, sample
) < 0 ||
1525 thread__resolve_callchain(al
.thread
, cursor
, evsel
, sample
, NULL
, NULL
, trace
->max_stack
))
1531 static int trace__fprintf_callchain(struct trace
*trace
, struct perf_sample
*sample
)
1533 /* TODO: user-configurable print_opts */
1534 const unsigned int print_opts
= EVSEL__PRINT_SYM
|
1536 EVSEL__PRINT_UNKNOWN_AS_ADDR
;
1538 return sample__fprintf_callchain(sample
, 38, print_opts
, &callchain_cursor
, trace
->output
);
1541 static int trace__sys_exit(struct trace
*trace
, struct perf_evsel
*evsel
,
1542 union perf_event
*event __maybe_unused
,
1543 struct perf_sample
*sample
)
1547 struct thread
*thread
;
1548 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1, callchain_ret
= 0;
1549 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
1550 struct thread_trace
*ttrace
;
1555 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
1556 ttrace
= thread__trace(thread
, trace
->output
);
1561 thread__update_stats(ttrace
, id
, sample
);
1563 ret
= perf_evsel__sc_tp_uint(evsel
, ret
, sample
);
1565 if (id
== trace
->open_id
&& ret
>= 0 && ttrace
->filename
.pending_open
) {
1566 trace__set_fd_pathname(thread
, ret
, ttrace
->filename
.name
);
1567 ttrace
->filename
.pending_open
= false;
1568 ++trace
->stats
.vfs_getname
;
1571 if (ttrace
->entry_time
) {
1572 duration
= sample
->time
- ttrace
->entry_time
;
1573 if (trace__filter_duration(trace
, duration
))
1575 } else if (trace
->duration_filter
)
1578 if (sample
->callchain
) {
1579 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
1580 if (callchain_ret
== 0) {
1581 if (callchain_cursor
.nr
< trace
->min_stack
)
1587 if (trace
->summary_only
)
1590 trace__fprintf_entry_head(trace
, thread
, duration
, ttrace
->entry_time
, trace
->output
);
1592 if (ttrace
->entry_pending
) {
1593 fprintf(trace
->output
, "%-70s", ttrace
->entry_str
);
1595 fprintf(trace
->output
, " ... [");
1596 color_fprintf(trace
->output
, PERF_COLOR_YELLOW
, "continued");
1597 fprintf(trace
->output
, "]: %s()", sc
->name
);
1600 if (sc
->fmt
== NULL
) {
1602 fprintf(trace
->output
, ") = %ld", ret
);
1603 } else if (ret
< 0 && (sc
->fmt
->errmsg
|| sc
->fmt
->errpid
)) {
1604 char bf
[STRERR_BUFSIZE
];
1605 const char *emsg
= str_error_r(-ret
, bf
, sizeof(bf
)),
1606 *e
= audit_errno_to_name(-ret
);
1608 fprintf(trace
->output
, ") = -1 %s %s", e
, emsg
);
1609 } else if (ret
== 0 && sc
->fmt
->timeout
)
1610 fprintf(trace
->output
, ") = 0 Timeout");
1611 else if (sc
->fmt
->hexret
)
1612 fprintf(trace
->output
, ") = %#lx", ret
);
1613 else if (sc
->fmt
->errpid
) {
1614 struct thread
*child
= machine__find_thread(trace
->host
, ret
, ret
);
1616 if (child
!= NULL
) {
1617 fprintf(trace
->output
, ") = %ld", ret
);
1618 if (child
->comm_set
)
1619 fprintf(trace
->output
, " (%s)", thread__comm_str(child
));
1625 fputc('\n', trace
->output
);
1627 if (callchain_ret
> 0)
1628 trace__fprintf_callchain(trace
, sample
);
1629 else if (callchain_ret
< 0)
1630 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
1632 ttrace
->entry_pending
= false;
1635 thread__put(thread
);
1639 static int trace__vfs_getname(struct trace
*trace
, struct perf_evsel
*evsel
,
1640 union perf_event
*event __maybe_unused
,
1641 struct perf_sample
*sample
)
1643 struct thread
*thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
1644 struct thread_trace
*ttrace
;
1645 size_t filename_len
, entry_str_len
, to_move
;
1646 ssize_t remaining_space
;
1648 const char *filename
= perf_evsel__rawptr(evsel
, sample
, "pathname");
1653 ttrace
= thread__priv(thread
);
1657 filename_len
= strlen(filename
);
1659 if (ttrace
->filename
.namelen
< filename_len
) {
1660 char *f
= realloc(ttrace
->filename
.name
, filename_len
+ 1);
1665 ttrace
->filename
.namelen
= filename_len
;
1666 ttrace
->filename
.name
= f
;
1669 strcpy(ttrace
->filename
.name
, filename
);
1670 ttrace
->filename
.pending_open
= true;
1672 if (!ttrace
->filename
.ptr
)
1675 entry_str_len
= strlen(ttrace
->entry_str
);
1676 remaining_space
= trace__entry_str_size
- entry_str_len
- 1; /* \0 */
1677 if (remaining_space
<= 0)
1680 if (filename_len
> (size_t)remaining_space
) {
1681 filename
+= filename_len
- remaining_space
;
1682 filename_len
= remaining_space
;
1685 to_move
= entry_str_len
- ttrace
->filename
.entry_str_pos
+ 1; /* \0 */
1686 pos
= ttrace
->entry_str
+ ttrace
->filename
.entry_str_pos
;
1687 memmove(pos
+ filename_len
, pos
, to_move
);
1688 memcpy(pos
, filename
, filename_len
);
1690 ttrace
->filename
.ptr
= 0;
1691 ttrace
->filename
.entry_str_pos
= 0;
1696 static int trace__sched_stat_runtime(struct trace
*trace
, struct perf_evsel
*evsel
,
1697 union perf_event
*event __maybe_unused
,
1698 struct perf_sample
*sample
)
1700 u64 runtime
= perf_evsel__intval(evsel
, sample
, "runtime");
1701 double runtime_ms
= (double)runtime
/ NSEC_PER_MSEC
;
1702 struct thread
*thread
= machine__findnew_thread(trace
->host
,
1705 struct thread_trace
*ttrace
= thread__trace(thread
, trace
->output
);
1710 ttrace
->runtime_ms
+= runtime_ms
;
1711 trace
->runtime_ms
+= runtime_ms
;
1712 thread__put(thread
);
1716 fprintf(trace
->output
, "%s: comm=%s,pid=%u,runtime=%" PRIu64
",vruntime=%" PRIu64
")\n",
1718 perf_evsel__strval(evsel
, sample
, "comm"),
1719 (pid_t
)perf_evsel__intval(evsel
, sample
, "pid"),
1721 perf_evsel__intval(evsel
, sample
, "vruntime"));
1722 thread__put(thread
);
1726 static void bpf_output__printer(enum binary_printer_ops op
,
1727 unsigned int val
, void *extra
)
1729 FILE *output
= extra
;
1730 unsigned char ch
= (unsigned char)val
;
1733 case BINARY_PRINT_CHAR_DATA
:
1734 fprintf(output
, "%c", isprint(ch
) ? ch
: '.');
1736 case BINARY_PRINT_DATA_BEGIN
:
1737 case BINARY_PRINT_LINE_BEGIN
:
1738 case BINARY_PRINT_ADDR
:
1739 case BINARY_PRINT_NUM_DATA
:
1740 case BINARY_PRINT_NUM_PAD
:
1741 case BINARY_PRINT_SEP
:
1742 case BINARY_PRINT_CHAR_PAD
:
1743 case BINARY_PRINT_LINE_END
:
1744 case BINARY_PRINT_DATA_END
:
1750 static void bpf_output__fprintf(struct trace
*trace
,
1751 struct perf_sample
*sample
)
1753 print_binary(sample
->raw_data
, sample
->raw_size
, 8,
1754 bpf_output__printer
, trace
->output
);
1757 static int trace__event_handler(struct trace
*trace
, struct perf_evsel
*evsel
,
1758 union perf_event
*event __maybe_unused
,
1759 struct perf_sample
*sample
)
1761 int callchain_ret
= 0;
1763 if (sample
->callchain
) {
1764 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
1765 if (callchain_ret
== 0) {
1766 if (callchain_cursor
.nr
< trace
->min_stack
)
1772 trace__printf_interrupted_entry(trace
, sample
);
1773 trace__fprintf_tstamp(trace
, sample
->time
, trace
->output
);
1775 if (trace
->trace_syscalls
)
1776 fprintf(trace
->output
, "( ): ");
1778 fprintf(trace
->output
, "%s:", evsel
->name
);
1780 if (perf_evsel__is_bpf_output(evsel
)) {
1781 bpf_output__fprintf(trace
, sample
);
1782 } else if (evsel
->tp_format
) {
1783 event_format__fprintf(evsel
->tp_format
, sample
->cpu
,
1784 sample
->raw_data
, sample
->raw_size
,
1788 fprintf(trace
->output
, ")\n");
1790 if (callchain_ret
> 0)
1791 trace__fprintf_callchain(trace
, sample
);
1792 else if (callchain_ret
< 0)
1793 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
1798 static void print_location(FILE *f
, struct perf_sample
*sample
,
1799 struct addr_location
*al
,
1800 bool print_dso
, bool print_sym
)
1803 if ((verbose
|| print_dso
) && al
->map
)
1804 fprintf(f
, "%s@", al
->map
->dso
->long_name
);
1806 if ((verbose
|| print_sym
) && al
->sym
)
1807 fprintf(f
, "%s+0x%" PRIx64
, al
->sym
->name
,
1808 al
->addr
- al
->sym
->start
);
1810 fprintf(f
, "0x%" PRIx64
, al
->addr
);
1812 fprintf(f
, "0x%" PRIx64
, sample
->addr
);
1815 static int trace__pgfault(struct trace
*trace
,
1816 struct perf_evsel
*evsel
,
1817 union perf_event
*event __maybe_unused
,
1818 struct perf_sample
*sample
)
1820 struct thread
*thread
;
1821 struct addr_location al
;
1822 char map_type
= 'd';
1823 struct thread_trace
*ttrace
;
1825 int callchain_ret
= 0;
1827 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
1829 if (sample
->callchain
) {
1830 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
1831 if (callchain_ret
== 0) {
1832 if (callchain_cursor
.nr
< trace
->min_stack
)
1838 ttrace
= thread__trace(thread
, trace
->output
);
1842 if (evsel
->attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
)
1847 if (trace
->summary_only
)
1850 thread__find_addr_location(thread
, sample
->cpumode
, MAP__FUNCTION
,
1853 trace__fprintf_entry_head(trace
, thread
, 0, sample
->time
, trace
->output
);
1855 fprintf(trace
->output
, "%sfault [",
1856 evsel
->attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
?
1859 print_location(trace
->output
, sample
, &al
, false, true);
1861 fprintf(trace
->output
, "] => ");
1863 thread__find_addr_location(thread
, sample
->cpumode
, MAP__VARIABLE
,
1867 thread__find_addr_location(thread
, sample
->cpumode
,
1868 MAP__FUNCTION
, sample
->addr
, &al
);
1876 print_location(trace
->output
, sample
, &al
, true, false);
1878 fprintf(trace
->output
, " (%c%c)\n", map_type
, al
.level
);
1880 if (callchain_ret
> 0)
1881 trace__fprintf_callchain(trace
, sample
);
1882 else if (callchain_ret
< 0)
1883 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
1887 thread__put(thread
);
1891 static void trace__set_base_time(struct trace
*trace
,
1892 struct perf_evsel
*evsel
,
1893 struct perf_sample
*sample
)
1896 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
1897 * and don't use sample->time unconditionally, we may end up having
1898 * some other event in the future without PERF_SAMPLE_TIME for good
1899 * reason, i.e. we may not be interested in its timestamps, just in
1900 * it taking place, picking some piece of information when it
1901 * appears in our event stream (vfs_getname comes to mind).
1903 if (trace
->base_time
== 0 && !trace
->full_time
&&
1904 (evsel
->attr
.sample_type
& PERF_SAMPLE_TIME
))
1905 trace
->base_time
= sample
->time
;
1908 static int trace__process_sample(struct perf_tool
*tool
,
1909 union perf_event
*event
,
1910 struct perf_sample
*sample
,
1911 struct perf_evsel
*evsel
,
1912 struct machine
*machine __maybe_unused
)
1914 struct trace
*trace
= container_of(tool
, struct trace
, tool
);
1915 struct thread
*thread
;
1918 tracepoint_handler handler
= evsel
->handler
;
1920 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
1921 if (thread
&& thread__is_filtered(thread
))
1924 trace__set_base_time(trace
, evsel
, sample
);
1928 handler(trace
, evsel
, event
, sample
);
1934 static int trace__record(struct trace
*trace
, int argc
, const char **argv
)
1936 unsigned int rec_argc
, i
, j
;
1937 const char **rec_argv
;
1938 const char * const record_args
[] = {
1945 const char * const sc_args
[] = { "-e", };
1946 unsigned int sc_args_nr
= ARRAY_SIZE(sc_args
);
1947 const char * const majpf_args
[] = { "-e", "major-faults" };
1948 unsigned int majpf_args_nr
= ARRAY_SIZE(majpf_args
);
1949 const char * const minpf_args
[] = { "-e", "minor-faults" };
1950 unsigned int minpf_args_nr
= ARRAY_SIZE(minpf_args
);
1952 /* +1 is for the event string below */
1953 rec_argc
= ARRAY_SIZE(record_args
) + sc_args_nr
+ 1 +
1954 majpf_args_nr
+ minpf_args_nr
+ argc
;
1955 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
1957 if (rec_argv
== NULL
)
1961 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
1962 rec_argv
[j
++] = record_args
[i
];
1964 if (trace
->trace_syscalls
) {
1965 for (i
= 0; i
< sc_args_nr
; i
++)
1966 rec_argv
[j
++] = sc_args
[i
];
1968 /* event string may be different for older kernels - e.g., RHEL6 */
1969 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
1970 rec_argv
[j
++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
1971 else if (is_valid_tracepoint("syscalls:sys_enter"))
1972 rec_argv
[j
++] = "syscalls:sys_enter,syscalls:sys_exit";
1974 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
1979 if (trace
->trace_pgfaults
& TRACE_PFMAJ
)
1980 for (i
= 0; i
< majpf_args_nr
; i
++)
1981 rec_argv
[j
++] = majpf_args
[i
];
1983 if (trace
->trace_pgfaults
& TRACE_PFMIN
)
1984 for (i
= 0; i
< minpf_args_nr
; i
++)
1985 rec_argv
[j
++] = minpf_args
[i
];
1987 for (i
= 0; i
< (unsigned int)argc
; i
++)
1988 rec_argv
[j
++] = argv
[i
];
1990 return cmd_record(j
, rec_argv
, NULL
);
1993 static size_t trace__fprintf_thread_summary(struct trace
*trace
, FILE *fp
);
1995 static bool perf_evlist__add_vfs_getname(struct perf_evlist
*evlist
)
1997 struct perf_evsel
*evsel
= perf_evsel__newtp("probe", "vfs_getname");
2002 if (perf_evsel__field(evsel
, "pathname") == NULL
) {
2003 perf_evsel__delete(evsel
);
2007 evsel
->handler
= trace__vfs_getname
;
2008 perf_evlist__add(evlist
, evsel
);
2012 static struct perf_evsel
*perf_evsel__new_pgfault(u64 config
)
2014 struct perf_evsel
*evsel
;
2015 struct perf_event_attr attr
= {
2016 .type
= PERF_TYPE_SOFTWARE
,
2020 attr
.config
= config
;
2021 attr
.sample_period
= 1;
2023 event_attr_init(&attr
);
2025 evsel
= perf_evsel__new(&attr
);
2027 evsel
->handler
= trace__pgfault
;
2032 static void trace__handle_event(struct trace
*trace
, union perf_event
*event
, struct perf_sample
*sample
)
2034 const u32 type
= event
->header
.type
;
2035 struct perf_evsel
*evsel
;
2037 if (type
!= PERF_RECORD_SAMPLE
) {
2038 trace__process_event(trace
, trace
->host
, event
, sample
);
2042 evsel
= perf_evlist__id2evsel(trace
->evlist
, sample
->id
);
2043 if (evsel
== NULL
) {
2044 fprintf(trace
->output
, "Unknown tp ID %" PRIu64
", skipping...\n", sample
->id
);
2048 trace__set_base_time(trace
, evsel
, sample
);
2050 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
2051 sample
->raw_data
== NULL
) {
2052 fprintf(trace
->output
, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2053 perf_evsel__name(evsel
), sample
->tid
,
2054 sample
->cpu
, sample
->raw_size
);
2056 tracepoint_handler handler
= evsel
->handler
;
2057 handler(trace
, evsel
, event
, sample
);
2061 static int trace__add_syscall_newtp(struct trace
*trace
)
2064 struct perf_evlist
*evlist
= trace
->evlist
;
2065 struct perf_evsel
*sys_enter
, *sys_exit
;
2067 sys_enter
= perf_evsel__syscall_newtp("sys_enter", trace__sys_enter
);
2068 if (sys_enter
== NULL
)
2071 if (perf_evsel__init_sc_tp_ptr_field(sys_enter
, args
))
2072 goto out_delete_sys_enter
;
2074 sys_exit
= perf_evsel__syscall_newtp("sys_exit", trace__sys_exit
);
2075 if (sys_exit
== NULL
)
2076 goto out_delete_sys_enter
;
2078 if (perf_evsel__init_sc_tp_uint_field(sys_exit
, ret
))
2079 goto out_delete_sys_exit
;
2081 perf_evlist__add(evlist
, sys_enter
);
2082 perf_evlist__add(evlist
, sys_exit
);
2084 if (callchain_param
.enabled
&& !trace
->kernel_syscallchains
) {
2086 * We're interested only in the user space callchain
2087 * leading to the syscall, allow overriding that for
2088 * debugging reasons using --kernel_syscall_callchains
2090 sys_exit
->attr
.exclude_callchain_kernel
= 1;
2093 trace
->syscalls
.events
.sys_enter
= sys_enter
;
2094 trace
->syscalls
.events
.sys_exit
= sys_exit
;
2100 out_delete_sys_exit
:
2101 perf_evsel__delete_priv(sys_exit
);
2102 out_delete_sys_enter
:
2103 perf_evsel__delete_priv(sys_enter
);
2107 static int trace__set_ev_qualifier_filter(struct trace
*trace
)
2110 struct perf_evsel
*sys_exit
;
2111 char *filter
= asprintf_expr_inout_ints("id", !trace
->not_ev_qualifier
,
2112 trace
->ev_qualifier_ids
.nr
,
2113 trace
->ev_qualifier_ids
.entries
);
2118 if (!perf_evsel__append_tp_filter(trace
->syscalls
.events
.sys_enter
,
2120 sys_exit
= trace
->syscalls
.events
.sys_exit
;
2121 err
= perf_evsel__append_tp_filter(sys_exit
, filter
);
2132 static int trace__run(struct trace
*trace
, int argc
, const char **argv
)
2134 struct perf_evlist
*evlist
= trace
->evlist
;
2135 struct perf_evsel
*evsel
, *pgfault_maj
= NULL
, *pgfault_min
= NULL
;
2137 unsigned long before
;
2138 const bool forks
= argc
> 0;
2139 bool draining
= false;
2143 if (trace
->trace_syscalls
&& trace__add_syscall_newtp(trace
))
2144 goto out_error_raw_syscalls
;
2146 if (trace
->trace_syscalls
)
2147 trace
->vfs_getname
= perf_evlist__add_vfs_getname(evlist
);
2149 if ((trace
->trace_pgfaults
& TRACE_PFMAJ
)) {
2150 pgfault_maj
= perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ
);
2151 if (pgfault_maj
== NULL
)
2153 perf_evlist__add(evlist
, pgfault_maj
);
2156 if ((trace
->trace_pgfaults
& TRACE_PFMIN
)) {
2157 pgfault_min
= perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN
);
2158 if (pgfault_min
== NULL
)
2160 perf_evlist__add(evlist
, pgfault_min
);
2164 perf_evlist__add_newtp(evlist
, "sched", "sched_stat_runtime",
2165 trace__sched_stat_runtime
))
2166 goto out_error_sched_stat_runtime
;
2168 err
= perf_evlist__create_maps(evlist
, &trace
->opts
.target
);
2170 fprintf(trace
->output
, "Problems parsing the target to trace, check your options!\n");
2171 goto out_delete_evlist
;
2174 err
= trace__symbols_init(trace
, evlist
);
2176 fprintf(trace
->output
, "Problems initializing symbol libraries!\n");
2177 goto out_delete_evlist
;
2180 perf_evlist__config(evlist
, &trace
->opts
, NULL
);
2182 if (callchain_param
.enabled
) {
2183 bool use_identifier
= false;
2185 if (trace
->syscalls
.events
.sys_exit
) {
2186 perf_evsel__config_callchain(trace
->syscalls
.events
.sys_exit
,
2187 &trace
->opts
, &callchain_param
);
2188 use_identifier
= true;
2192 perf_evsel__config_callchain(pgfault_maj
, &trace
->opts
, &callchain_param
);
2193 use_identifier
= true;
2197 perf_evsel__config_callchain(pgfault_min
, &trace
->opts
, &callchain_param
);
2198 use_identifier
= true;
2201 if (use_identifier
) {
2203 * Now we have evsels with different sample_ids, use
2204 * PERF_SAMPLE_IDENTIFIER to map from sample to evsel
2205 * from a fixed position in each ring buffer record.
2207 * As of this the changeset introducing this comment, this
2208 * isn't strictly needed, as the fields that can come before
2209 * PERF_SAMPLE_ID are all used, but we'll probably disable
2210 * some of those for things like copying the payload of
2211 * pointer syscall arguments, and for vfs_getname we don't
2212 * need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
2213 * here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
2215 perf_evlist__set_sample_bit(evlist
, IDENTIFIER
);
2216 perf_evlist__reset_sample_bit(evlist
, ID
);
2220 signal(SIGCHLD
, sig_handler
);
2221 signal(SIGINT
, sig_handler
);
2224 err
= perf_evlist__prepare_workload(evlist
, &trace
->opts
.target
,
2227 fprintf(trace
->output
, "Couldn't run the workload!\n");
2228 goto out_delete_evlist
;
2232 err
= perf_evlist__open(evlist
);
2234 goto out_error_open
;
2236 err
= bpf__apply_obj_config();
2238 char errbuf
[BUFSIZ
];
2240 bpf__strerror_apply_obj_config(err
, errbuf
, sizeof(errbuf
));
2241 pr_err("ERROR: Apply config to BPF failed: %s\n",
2243 goto out_error_open
;
2247 * Better not use !target__has_task() here because we need to cover the
2248 * case where no threads were specified in the command line, but a
2249 * workload was, and in that case we will fill in the thread_map when
2250 * we fork the workload in perf_evlist__prepare_workload.
2252 if (trace
->filter_pids
.nr
> 0)
2253 err
= perf_evlist__set_filter_pids(evlist
, trace
->filter_pids
.nr
, trace
->filter_pids
.entries
);
2254 else if (thread_map__pid(evlist
->threads
, 0) == -1)
2255 err
= perf_evlist__set_filter_pid(evlist
, getpid());
2260 if (trace
->ev_qualifier_ids
.nr
> 0) {
2261 err
= trace__set_ev_qualifier_filter(trace
);
2265 pr_debug("event qualifier tracepoint filter: %s\n",
2266 trace
->syscalls
.events
.sys_exit
->filter
);
2269 err
= perf_evlist__apply_filters(evlist
, &evsel
);
2271 goto out_error_apply_filters
;
2273 err
= perf_evlist__mmap(evlist
, trace
->opts
.mmap_pages
, false);
2275 goto out_error_mmap
;
2277 if (!target__none(&trace
->opts
.target
) && !trace
->opts
.initial_delay
)
2278 perf_evlist__enable(evlist
);
2281 perf_evlist__start_workload(evlist
);
2283 if (trace
->opts
.initial_delay
) {
2284 usleep(trace
->opts
.initial_delay
* 1000);
2285 perf_evlist__enable(evlist
);
2288 trace
->multiple_threads
= thread_map__pid(evlist
->threads
, 0) == -1 ||
2289 evlist
->threads
->nr
> 1 ||
2290 perf_evlist__first(evlist
)->attr
.inherit
;
2292 before
= trace
->nr_events
;
2294 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
2295 union perf_event
*event
;
2297 while ((event
= perf_evlist__mmap_read(evlist
, i
)) != NULL
) {
2298 struct perf_sample sample
;
2302 err
= perf_evlist__parse_sample(evlist
, event
, &sample
);
2304 fprintf(trace
->output
, "Can't parse sample, err = %d, skipping...\n", err
);
2308 trace__handle_event(trace
, event
, &sample
);
2310 perf_evlist__mmap_consume(evlist
, i
);
2315 if (done
&& !draining
) {
2316 perf_evlist__disable(evlist
);
2322 if (trace
->nr_events
== before
) {
2323 int timeout
= done
? 100 : -1;
2325 if (!draining
&& perf_evlist__poll(evlist
, timeout
) > 0) {
2326 if (perf_evlist__filter_pollfd(evlist
, POLLERR
| POLLHUP
) == 0)
2336 thread__zput(trace
->current
);
2338 perf_evlist__disable(evlist
);
2342 trace__fprintf_thread_summary(trace
, trace
->output
);
2344 if (trace
->show_tool_stats
) {
2345 fprintf(trace
->output
, "Stats:\n "
2346 " vfs_getname : %" PRIu64
"\n"
2347 " proc_getname: %" PRIu64
"\n",
2348 trace
->stats
.vfs_getname
,
2349 trace
->stats
.proc_getname
);
2354 perf_evlist__delete(evlist
);
2355 trace
->evlist
= NULL
;
2356 trace
->live
= false;
2359 char errbuf
[BUFSIZ
];
2361 out_error_sched_stat_runtime
:
2362 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "sched", "sched_stat_runtime");
2365 out_error_raw_syscalls
:
2366 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "raw_syscalls", "sys_(enter|exit)");
2370 perf_evlist__strerror_mmap(evlist
, errno
, errbuf
, sizeof(errbuf
));
2374 perf_evlist__strerror_open(evlist
, errno
, errbuf
, sizeof(errbuf
));
2377 fprintf(trace
->output
, "%s\n", errbuf
);
2378 goto out_delete_evlist
;
2380 out_error_apply_filters
:
2381 fprintf(trace
->output
,
2382 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
2383 evsel
->filter
, perf_evsel__name(evsel
), errno
,
2384 str_error_r(errno
, errbuf
, sizeof(errbuf
)));
2385 goto out_delete_evlist
;
2388 fprintf(trace
->output
, "Not enough memory to run!\n");
2389 goto out_delete_evlist
;
2392 fprintf(trace
->output
, "errno=%d,%s\n", errno
, strerror(errno
));
2393 goto out_delete_evlist
;
2396 static int trace__replay(struct trace
*trace
)
2398 const struct perf_evsel_str_handler handlers
[] = {
2399 { "probe:vfs_getname", trace__vfs_getname
, },
2401 struct perf_data_file file
= {
2403 .mode
= PERF_DATA_MODE_READ
,
2404 .force
= trace
->force
,
2406 struct perf_session
*session
;
2407 struct perf_evsel
*evsel
;
2410 trace
->tool
.sample
= trace__process_sample
;
2411 trace
->tool
.mmap
= perf_event__process_mmap
;
2412 trace
->tool
.mmap2
= perf_event__process_mmap2
;
2413 trace
->tool
.comm
= perf_event__process_comm
;
2414 trace
->tool
.exit
= perf_event__process_exit
;
2415 trace
->tool
.fork
= perf_event__process_fork
;
2416 trace
->tool
.attr
= perf_event__process_attr
;
2417 trace
->tool
.tracing_data
= perf_event__process_tracing_data
;
2418 trace
->tool
.build_id
= perf_event__process_build_id
;
2420 trace
->tool
.ordered_events
= true;
2421 trace
->tool
.ordering_requires_timestamps
= true;
2423 /* add tid to output */
2424 trace
->multiple_threads
= true;
2426 session
= perf_session__new(&file
, false, &trace
->tool
);
2427 if (session
== NULL
)
2430 if (trace
->opts
.target
.pid
)
2431 symbol_conf
.pid_list_str
= strdup(trace
->opts
.target
.pid
);
2433 if (trace
->opts
.target
.tid
)
2434 symbol_conf
.tid_list_str
= strdup(trace
->opts
.target
.tid
);
2436 if (symbol__init(&session
->header
.env
) < 0)
2439 trace
->host
= &session
->machines
.host
;
2441 err
= perf_session__set_tracepoints_handlers(session
, handlers
);
2445 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
2446 "raw_syscalls:sys_enter");
2447 /* older kernels have syscalls tp versus raw_syscalls */
2449 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
2450 "syscalls:sys_enter");
2453 (perf_evsel__init_syscall_tp(evsel
, trace__sys_enter
) < 0 ||
2454 perf_evsel__init_sc_tp_ptr_field(evsel
, args
))) {
2455 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
2459 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
2460 "raw_syscalls:sys_exit");
2462 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
2463 "syscalls:sys_exit");
2465 (perf_evsel__init_syscall_tp(evsel
, trace__sys_exit
) < 0 ||
2466 perf_evsel__init_sc_tp_uint_field(evsel
, ret
))) {
2467 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
2471 evlist__for_each_entry(session
->evlist
, evsel
) {
2472 if (evsel
->attr
.type
== PERF_TYPE_SOFTWARE
&&
2473 (evsel
->attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
||
2474 evsel
->attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MIN
||
2475 evsel
->attr
.config
== PERF_COUNT_SW_PAGE_FAULTS
))
2476 evsel
->handler
= trace__pgfault
;
2481 err
= perf_session__process_events(session
);
2483 pr_err("Failed to process events, error %d", err
);
2485 else if (trace
->summary
)
2486 trace__fprintf_thread_summary(trace
, trace
->output
);
2489 perf_session__delete(session
);
2494 static size_t trace__fprintf_threads_header(FILE *fp
)
2498 printed
= fprintf(fp
, "\n Summary of events:\n\n");
2503 DEFINE_RESORT_RB(syscall_stats
, a
->msecs
> b
->msecs
,
2504 struct stats
*stats
;
2509 struct int_node
*source
= rb_entry(nd
, struct int_node
, rb_node
);
2510 struct stats
*stats
= source
->priv
;
2512 entry
->syscall
= source
->i
;
2513 entry
->stats
= stats
;
2514 entry
->msecs
= stats
? (u64
)stats
->n
* (avg_stats(stats
) / NSEC_PER_MSEC
) : 0;
2517 static size_t thread__dump_stats(struct thread_trace
*ttrace
,
2518 struct trace
*trace
, FILE *fp
)
2523 DECLARE_RESORT_RB_INTLIST(syscall_stats
, ttrace
->syscall_stats
);
2525 if (syscall_stats
== NULL
)
2528 printed
+= fprintf(fp
, "\n");
2530 printed
+= fprintf(fp
, " syscall calls total min avg max stddev\n");
2531 printed
+= fprintf(fp
, " (msec) (msec) (msec) (msec) (%%)\n");
2532 printed
+= fprintf(fp
, " --------------- -------- --------- --------- --------- --------- ------\n");
2534 resort_rb__for_each_entry(nd
, syscall_stats
) {
2535 struct stats
*stats
= syscall_stats_entry
->stats
;
2537 double min
= (double)(stats
->min
) / NSEC_PER_MSEC
;
2538 double max
= (double)(stats
->max
) / NSEC_PER_MSEC
;
2539 double avg
= avg_stats(stats
);
2541 u64 n
= (u64
) stats
->n
;
2543 pct
= avg
? 100.0 * stddev_stats(stats
)/avg
: 0.0;
2544 avg
/= NSEC_PER_MSEC
;
2546 sc
= &trace
->syscalls
.table
[syscall_stats_entry
->syscall
];
2547 printed
+= fprintf(fp
, " %-15s", sc
->name
);
2548 printed
+= fprintf(fp
, " %8" PRIu64
" %9.3f %9.3f %9.3f",
2549 n
, syscall_stats_entry
->msecs
, min
, avg
);
2550 printed
+= fprintf(fp
, " %9.3f %9.2f%%\n", max
, pct
);
2554 resort_rb__delete(syscall_stats
);
2555 printed
+= fprintf(fp
, "\n\n");
2560 static size_t trace__fprintf_thread(FILE *fp
, struct thread
*thread
, struct trace
*trace
)
2563 struct thread_trace
*ttrace
= thread__priv(thread
);
2569 ratio
= (double)ttrace
->nr_events
/ trace
->nr_events
* 100.0;
2571 printed
+= fprintf(fp
, " %s (%d), ", thread__comm_str(thread
), thread
->tid
);
2572 printed
+= fprintf(fp
, "%lu events, ", ttrace
->nr_events
);
2573 printed
+= fprintf(fp
, "%.1f%%", ratio
);
2575 printed
+= fprintf(fp
, ", %lu majfaults", ttrace
->pfmaj
);
2577 printed
+= fprintf(fp
, ", %lu minfaults", ttrace
->pfmin
);
2579 printed
+= fprintf(fp
, ", %.3f msec\n", ttrace
->runtime_ms
);
2580 else if (fputc('\n', fp
) != EOF
)
2583 printed
+= thread__dump_stats(ttrace
, trace
, fp
);
2588 static unsigned long thread__nr_events(struct thread_trace
*ttrace
)
2590 return ttrace
? ttrace
->nr_events
: 0;
2593 DEFINE_RESORT_RB(threads
, (thread__nr_events(a
->thread
->priv
) < thread__nr_events(b
->thread
->priv
)),
2594 struct thread
*thread
;
2597 entry
->thread
= rb_entry(nd
, struct thread
, rb_node
);
2600 static size_t trace__fprintf_thread_summary(struct trace
*trace
, FILE *fp
)
2602 DECLARE_RESORT_RB_MACHINE_THREADS(threads
, trace
->host
);
2603 size_t printed
= trace__fprintf_threads_header(fp
);
2606 if (threads
== NULL
) {
2607 fprintf(fp
, "%s", "Error sorting output by nr_events!\n");
2611 resort_rb__for_each_entry(nd
, threads
)
2612 printed
+= trace__fprintf_thread(fp
, threads_entry
->thread
, trace
);
2614 resort_rb__delete(threads
);
2619 static int trace__set_duration(const struct option
*opt
, const char *str
,
2620 int unset __maybe_unused
)
2622 struct trace
*trace
= opt
->value
;
2624 trace
->duration_filter
= atof(str
);
2628 static int trace__set_filter_pids(const struct option
*opt
, const char *str
,
2629 int unset __maybe_unused
)
2633 struct trace
*trace
= opt
->value
;
2635 * FIXME: introduce a intarray class, plain parse csv and create a
2636 * { int nr, int entries[] } struct...
2638 struct intlist
*list
= intlist__new(str
);
2643 i
= trace
->filter_pids
.nr
= intlist__nr_entries(list
) + 1;
2644 trace
->filter_pids
.entries
= calloc(i
, sizeof(pid_t
));
2646 if (trace
->filter_pids
.entries
== NULL
)
2649 trace
->filter_pids
.entries
[0] = getpid();
2651 for (i
= 1; i
< trace
->filter_pids
.nr
; ++i
)
2652 trace
->filter_pids
.entries
[i
] = intlist__entry(list
, i
- 1)->i
;
2654 intlist__delete(list
);
2660 static int trace__open_output(struct trace
*trace
, const char *filename
)
2664 if (!stat(filename
, &st
) && st
.st_size
) {
2665 char oldname
[PATH_MAX
];
2667 scnprintf(oldname
, sizeof(oldname
), "%s.old", filename
);
2669 rename(filename
, oldname
);
2672 trace
->output
= fopen(filename
, "w");
2674 return trace
->output
== NULL
? -errno
: 0;
2677 static int parse_pagefaults(const struct option
*opt
, const char *str
,
2678 int unset __maybe_unused
)
2680 int *trace_pgfaults
= opt
->value
;
2682 if (strcmp(str
, "all") == 0)
2683 *trace_pgfaults
|= TRACE_PFMAJ
| TRACE_PFMIN
;
2684 else if (strcmp(str
, "maj") == 0)
2685 *trace_pgfaults
|= TRACE_PFMAJ
;
2686 else if (strcmp(str
, "min") == 0)
2687 *trace_pgfaults
|= TRACE_PFMIN
;
2694 static void evlist__set_evsel_handler(struct perf_evlist
*evlist
, void *handler
)
2696 struct perf_evsel
*evsel
;
2698 evlist__for_each_entry(evlist
, evsel
)
2699 evsel
->handler
= handler
;
2702 int cmd_trace(int argc
, const char **argv
, const char *prefix __maybe_unused
)
2704 const char *trace_usage
[] = {
2705 "perf trace [<options>] [<command>]",
2706 "perf trace [<options>] -- <command> [<options>]",
2707 "perf trace record [<options>] [<command>]",
2708 "perf trace record [<options>] -- <command> [<options>]",
2711 struct trace trace
= {
2720 .user_freq
= UINT_MAX
,
2721 .user_interval
= ULLONG_MAX
,
2722 .no_buffering
= true,
2723 .mmap_pages
= UINT_MAX
,
2724 .proc_map_timeout
= 500,
2728 .trace_syscalls
= true,
2729 .kernel_syscallchains
= false,
2730 .max_stack
= UINT_MAX
,
2732 const char *output_name
= NULL
;
2733 const char *ev_qualifier_str
= NULL
;
2734 const struct option trace_options
[] = {
2735 OPT_CALLBACK(0, "event", &trace
.evlist
, "event",
2736 "event selector. use 'perf list' to list available events",
2737 parse_events_option
),
2738 OPT_BOOLEAN(0, "comm", &trace
.show_comm
,
2739 "show the thread COMM next to its id"),
2740 OPT_BOOLEAN(0, "tool_stats", &trace
.show_tool_stats
, "show tool stats"),
2741 OPT_STRING('e', "expr", &ev_qualifier_str
, "expr", "list of syscalls to trace"),
2742 OPT_STRING('o', "output", &output_name
, "file", "output file name"),
2743 OPT_STRING('i', "input", &input_name
, "file", "Analyze events in file"),
2744 OPT_STRING('p', "pid", &trace
.opts
.target
.pid
, "pid",
2745 "trace events on existing process id"),
2746 OPT_STRING('t', "tid", &trace
.opts
.target
.tid
, "tid",
2747 "trace events on existing thread id"),
2748 OPT_CALLBACK(0, "filter-pids", &trace
, "CSV list of pids",
2749 "pids to filter (by the kernel)", trace__set_filter_pids
),
2750 OPT_BOOLEAN('a', "all-cpus", &trace
.opts
.target
.system_wide
,
2751 "system-wide collection from all CPUs"),
2752 OPT_STRING('C', "cpu", &trace
.opts
.target
.cpu_list
, "cpu",
2753 "list of cpus to monitor"),
2754 OPT_BOOLEAN(0, "no-inherit", &trace
.opts
.no_inherit
,
2755 "child tasks do not inherit counters"),
2756 OPT_CALLBACK('m', "mmap-pages", &trace
.opts
.mmap_pages
, "pages",
2757 "number of mmap data pages",
2758 perf_evlist__parse_mmap_pages
),
2759 OPT_STRING('u', "uid", &trace
.opts
.target
.uid_str
, "user",
2761 OPT_CALLBACK(0, "duration", &trace
, "float",
2762 "show only events with duration > N.M ms",
2763 trace__set_duration
),
2764 OPT_BOOLEAN(0, "sched", &trace
.sched
, "show blocking scheduler events"),
2765 OPT_INCR('v', "verbose", &verbose
, "be more verbose"),
2766 OPT_BOOLEAN('T', "time", &trace
.full_time
,
2767 "Show full timestamp, not time relative to first start"),
2768 OPT_BOOLEAN('s', "summary", &trace
.summary_only
,
2769 "Show only syscall summary with statistics"),
2770 OPT_BOOLEAN('S', "with-summary", &trace
.summary
,
2771 "Show all syscalls and summary with statistics"),
2772 OPT_CALLBACK_DEFAULT('F', "pf", &trace
.trace_pgfaults
, "all|maj|min",
2773 "Trace pagefaults", parse_pagefaults
, "maj"),
2774 OPT_BOOLEAN(0, "syscalls", &trace
.trace_syscalls
, "Trace syscalls"),
2775 OPT_BOOLEAN('f', "force", &trace
.force
, "don't complain, do it"),
2776 OPT_CALLBACK(0, "call-graph", &trace
.opts
,
2777 "record_mode[,record_size]", record_callchain_help
,
2778 &record_parse_callchain_opt
),
2779 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace
.kernel_syscallchains
,
2780 "Show the kernel callchains on the syscall exit path"),
2781 OPT_UINTEGER(0, "min-stack", &trace
.min_stack
,
2782 "Set the minimum stack depth when parsing the callchain, "
2783 "anything below the specified depth will be ignored."),
2784 OPT_UINTEGER(0, "max-stack", &trace
.max_stack
,
2785 "Set the maximum stack depth when parsing the callchain, "
2786 "anything beyond the specified depth will be ignored. "
2787 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH
)),
2788 OPT_UINTEGER(0, "proc-map-timeout", &trace
.opts
.proc_map_timeout
,
2789 "per thread proc mmap processing timeout in ms"),
2790 OPT_UINTEGER('D', "delay", &trace
.opts
.initial_delay
,
2791 "ms to wait before starting measurement after program "
2795 bool __maybe_unused max_stack_user_set
= true;
2796 bool mmap_pages_user_set
= true;
2797 const char * const trace_subcommands
[] = { "record", NULL
};
2801 signal(SIGSEGV
, sighandler_dump_stack
);
2802 signal(SIGFPE
, sighandler_dump_stack
);
2804 trace
.evlist
= perf_evlist__new();
2805 trace
.sctbl
= syscalltbl__new();
2807 if (trace
.evlist
== NULL
|| trace
.sctbl
== NULL
) {
2808 pr_err("Not enough memory to run!\n");
2813 argc
= parse_options_subcommand(argc
, argv
, trace_options
, trace_subcommands
,
2814 trace_usage
, PARSE_OPT_STOP_AT_NON_OPTION
);
2816 err
= bpf__setup_stdout(trace
.evlist
);
2818 bpf__strerror_setup_stdout(trace
.evlist
, err
, bf
, sizeof(bf
));
2819 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf
);
2825 if (trace
.trace_pgfaults
) {
2826 trace
.opts
.sample_address
= true;
2827 trace
.opts
.sample_time
= true;
2830 if (trace
.opts
.mmap_pages
== UINT_MAX
)
2831 mmap_pages_user_set
= false;
2833 if (trace
.max_stack
== UINT_MAX
) {
2834 trace
.max_stack
= input_name
? PERF_MAX_STACK_DEPTH
: sysctl_perf_event_max_stack
;
2835 max_stack_user_set
= false;
2838 #ifdef HAVE_DWARF_UNWIND_SUPPORT
2839 if ((trace
.min_stack
|| max_stack_user_set
) && !callchain_param
.enabled
&& trace
.trace_syscalls
)
2840 record_opts__parse_callchain(&trace
.opts
, &callchain_param
, "dwarf", false);
2843 if (callchain_param
.enabled
) {
2844 if (!mmap_pages_user_set
&& geteuid() == 0)
2845 trace
.opts
.mmap_pages
= perf_event_mlock_kb_in_pages() * 4;
2847 symbol_conf
.use_callchain
= true;
2850 if (trace
.evlist
->nr_entries
> 0)
2851 evlist__set_evsel_handler(trace
.evlist
, trace__event_handler
);
2853 if ((argc
>= 1) && (strcmp(argv
[0], "record") == 0))
2854 return trace__record(&trace
, argc
-1, &argv
[1]);
2856 /* summary_only implies summary option, but don't overwrite summary if set */
2857 if (trace
.summary_only
)
2858 trace
.summary
= trace
.summary_only
;
2860 if (!trace
.trace_syscalls
&& !trace
.trace_pgfaults
&&
2861 trace
.evlist
->nr_entries
== 0 /* Was --events used? */) {
2862 pr_err("Please specify something to trace.\n");
2866 if (!trace
.trace_syscalls
&& ev_qualifier_str
) {
2867 pr_err("The -e option can't be used with --no-syscalls.\n");
2871 if (output_name
!= NULL
) {
2872 err
= trace__open_output(&trace
, output_name
);
2874 perror("failed to create output file");
2879 trace
.open_id
= syscalltbl__id(trace
.sctbl
, "open");
2881 if (ev_qualifier_str
!= NULL
) {
2882 const char *s
= ev_qualifier_str
;
2883 struct strlist_config slist_config
= {
2884 .dirname
= system_path(STRACE_GROUPS_DIR
),
2887 trace
.not_ev_qualifier
= *s
== '!';
2888 if (trace
.not_ev_qualifier
)
2890 trace
.ev_qualifier
= strlist__new(s
, &slist_config
);
2891 if (trace
.ev_qualifier
== NULL
) {
2892 fputs("Not enough memory to parse event qualifier",
2898 err
= trace__validate_ev_qualifier(&trace
);
2903 err
= target__validate(&trace
.opts
.target
);
2905 target__strerror(&trace
.opts
.target
, err
, bf
, sizeof(bf
));
2906 fprintf(trace
.output
, "%s", bf
);
2910 err
= target__parse_uid(&trace
.opts
.target
);
2912 target__strerror(&trace
.opts
.target
, err
, bf
, sizeof(bf
));
2913 fprintf(trace
.output
, "%s", bf
);
2917 if (!argc
&& target__none(&trace
.opts
.target
))
2918 trace
.opts
.target
.system_wide
= true;
2921 err
= trace__replay(&trace
);
2923 err
= trace__run(&trace
, argc
, argv
);
2926 if (output_name
!= NULL
)
2927 fclose(trace
.output
);