4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
12 #include "util/util.h"
13 #include "util/parse-options.h"
14 #include "util/parse-events.h"
15 #include "util/string.h"
17 #include "util/header.h"
18 #include "util/event.h"
19 #include "util/debug.h"
20 #include "util/trace-event.h"
25 #define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
26 #define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
28 static int fd
[MAX_NR_CPUS
][MAX_COUNTERS
];
30 static long default_interval
= 100000;
32 static int nr_cpus
= 0;
33 static unsigned int page_size
;
34 static unsigned int mmap_pages
= 128;
37 static const char *output_name
= "perf.data";
39 static unsigned int realtime_prio
= 0;
40 static int raw_samples
= 0;
41 static int system_wide
= 0;
42 static int profile_cpu
= -1;
43 static pid_t target_pid
= -1;
44 static int inherit
= 1;
46 static int append_file
= 0;
47 static int call_graph
= 0;
48 static int inherit_stat
= 0;
49 static int no_samples
= 0;
50 static int sample_address
= 0;
53 static struct timeval last_read
;
54 static struct timeval this_read
;
56 static u64 bytes_written
;
58 static struct pollfd event_array
[MAX_NR_CPUS
* MAX_COUNTERS
];
63 static int file_new
= 1;
65 struct perf_header
*header
;
74 static struct mmap_data mmap_array
[MAX_NR_CPUS
][MAX_COUNTERS
];
76 static unsigned long mmap_read_head(struct mmap_data
*md
)
78 struct perf_counter_mmap_page
*pc
= md
->base
;
87 static void mmap_write_tail(struct mmap_data
*md
, unsigned long tail
)
89 struct perf_counter_mmap_page
*pc
= md
->base
;
92 * ensure all reads are done before we write the tail out.
98 static void write_output(void *buf
, size_t size
)
101 int ret
= write(output
, buf
, size
);
104 die("failed to write");
109 bytes_written
+= ret
;
113 static void mmap_read(struct mmap_data
*md
)
115 unsigned int head
= mmap_read_head(md
);
116 unsigned int old
= md
->prev
;
117 unsigned char *data
= md
->base
+ page_size
;
122 gettimeofday(&this_read
, NULL
);
125 * If we're further behind than half the buffer, there's a chance
126 * the writer will bite our tail and mess up the samples under us.
128 * If we somehow ended up ahead of the head, we got messed up.
130 * In either case, truncate and restart at head.
137 timersub(&this_read
, &last_read
, &iv
);
138 msecs
= iv
.tv_sec
*1000 + iv
.tv_usec
/1000;
140 fprintf(stderr
, "WARNING: failed to keep up with mmap data."
141 " Last read %lu msecs ago.\n", msecs
);
144 * head points to a known good entry, start there.
149 last_read
= this_read
;
156 if ((old
& md
->mask
) + size
!= (head
& md
->mask
)) {
157 buf
= &data
[old
& md
->mask
];
158 size
= md
->mask
+ 1 - (old
& md
->mask
);
161 write_output(buf
, size
);
164 buf
= &data
[old
& md
->mask
];
168 write_output(buf
, size
);
171 mmap_write_tail(md
, old
);
174 static volatile int done
= 0;
175 static volatile int signr
= -1;
177 static void sig_handler(int sig
)
183 static void sig_atexit(void)
188 signal(signr
, SIG_DFL
);
189 kill(getpid(), signr
);
192 static pid_t
pid_synthesize_comm_event(pid_t pid
, int full
)
194 struct comm_event comm_ev
;
195 char filename
[PATH_MAX
];
200 struct dirent dirent
, *next
;
203 snprintf(filename
, sizeof(filename
), "/proc/%d/status", pid
);
205 fp
= fopen(filename
, "r");
208 * We raced with a task exiting - just return:
211 fprintf(stderr
, "couldn't open %s\n", filename
);
215 memset(&comm_ev
, 0, sizeof(comm_ev
));
216 while (!comm_ev
.comm
[0] || !comm_ev
.pid
) {
217 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
220 if (memcmp(bf
, "Name:", 5) == 0) {
222 while (*name
&& isspace(*name
))
224 size
= strlen(name
) - 1;
225 memcpy(comm_ev
.comm
, name
, size
++);
226 } else if (memcmp(bf
, "Tgid:", 5) == 0) {
227 char *tgids
= bf
+ 5;
228 while (*tgids
&& isspace(*tgids
))
230 tgid
= comm_ev
.pid
= atoi(tgids
);
234 comm_ev
.header
.type
= PERF_EVENT_COMM
;
235 size
= ALIGN(size
, sizeof(u64
));
236 comm_ev
.header
.size
= sizeof(comm_ev
) - (sizeof(comm_ev
.comm
) - size
);
241 write_output(&comm_ev
, comm_ev
.header
.size
);
245 snprintf(filename
, sizeof(filename
), "/proc/%d/task", pid
);
247 tasks
= opendir(filename
);
248 while (!readdir_r(tasks
, &dirent
, &next
) && next
) {
250 pid
= strtol(dirent
.d_name
, &end
, 10);
256 write_output(&comm_ev
, comm_ev
.header
.size
);
265 fprintf(stderr
, "couldn't get COMM and pgid, malformed %s\n",
270 static void pid_synthesize_mmap_samples(pid_t pid
, pid_t tgid
)
272 char filename
[PATH_MAX
];
275 snprintf(filename
, sizeof(filename
), "/proc/%d/maps", pid
);
277 fp
= fopen(filename
, "r");
280 * We raced with a task exiting - just return:
283 fprintf(stderr
, "couldn't open %s\n", filename
);
287 char bf
[BUFSIZ
], *pbf
= bf
;
288 struct mmap_event mmap_ev
= {
289 .header
= { .type
= PERF_EVENT_MMAP
},
293 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
296 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
297 n
= hex2u64(pbf
, &mmap_ev
.start
);
301 n
= hex2u64(pbf
, &mmap_ev
.len
);
305 if (*pbf
== 'x') { /* vm_exec */
306 char *execname
= strchr(bf
, '/');
309 if (execname
== NULL
)
310 execname
= strstr(bf
, "[vdso]");
312 if (execname
== NULL
)
315 size
= strlen(execname
);
316 execname
[size
- 1] = '\0'; /* Remove \n */
317 memcpy(mmap_ev
.filename
, execname
, size
);
318 size
= ALIGN(size
, sizeof(u64
));
319 mmap_ev
.len
-= mmap_ev
.start
;
320 mmap_ev
.header
.size
= (sizeof(mmap_ev
) -
321 (sizeof(mmap_ev
.filename
) - size
));
325 write_output(&mmap_ev
, mmap_ev
.header
.size
);
332 static void synthesize_all(void)
335 struct dirent dirent
, *next
;
337 proc
= opendir("/proc");
339 while (!readdir_r(proc
, &dirent
, &next
) && next
) {
343 pid
= strtol(dirent
.d_name
, &end
, 10);
344 if (*end
) /* only interested in proper numerical dirents */
347 tgid
= pid_synthesize_comm_event(pid
, 1);
348 pid_synthesize_mmap_samples(pid
, tgid
);
356 static struct perf_header_attr
*get_header_attr(struct perf_counter_attr
*a
, int nr
)
358 struct perf_header_attr
*h_attr
;
360 if (nr
< header
->attrs
) {
361 h_attr
= header
->attr
[nr
];
363 h_attr
= perf_header_attr__new(a
);
364 perf_header__add_attr(header
, h_attr
);
370 static void create_counter(int counter
, int cpu
, pid_t pid
)
372 struct perf_counter_attr
*attr
= attrs
+ counter
;
373 struct perf_header_attr
*h_attr
;
374 int track
= !counter
; /* only the first counter needs these */
382 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
383 PERF_FORMAT_TOTAL_TIME_RUNNING
|
386 attr
->sample_type
|= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
;
389 attr
->sample_type
|= PERF_SAMPLE_PERIOD
;
391 attr
->sample_freq
= freq
;
395 attr
->sample_freq
= 0;
398 attr
->inherit_stat
= 1;
401 attr
->sample_type
|= PERF_SAMPLE_ADDR
;
404 attr
->sample_type
|= PERF_SAMPLE_CALLCHAIN
;
407 attr
->sample_type
|= PERF_SAMPLE_TIME
;
408 attr
->sample_type
|= PERF_SAMPLE_RAW
;
409 attr
->sample_type
|= PERF_SAMPLE_CPU
;
414 attr
->inherit
= (cpu
< 0) && inherit
;
418 fd
[nr_cpu
][counter
] = sys_perf_counter_open(attr
, pid
, cpu
, group_fd
, 0);
420 if (fd
[nr_cpu
][counter
] < 0) {
424 die("Permission error - are you root?\n");
425 else if (err
== ENODEV
&& profile_cpu
!= -1)
426 die("No such device - did you specify an out-of-range profile CPU?\n");
429 * If it's cycles then fall back to hrtimer
430 * based cpu-clock-tick sw counter, which
431 * is always available even if no PMU support:
433 if (attr
->type
== PERF_TYPE_HARDWARE
434 && attr
->config
== PERF_COUNT_HW_CPU_CYCLES
) {
437 warning(" ... trying to fall back to cpu-clock-ticks\n");
438 attr
->type
= PERF_TYPE_SOFTWARE
;
439 attr
->config
= PERF_COUNT_SW_CPU_CLOCK
;
443 error("perfcounter syscall returned with %d (%s)\n",
444 fd
[nr_cpu
][counter
], strerror(err
));
445 die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n");
449 h_attr
= get_header_attr(attr
, counter
);
452 if (memcmp(&h_attr
->attr
, attr
, sizeof(*attr
))) {
453 fprintf(stderr
, "incompatible append\n");
458 if (read(fd
[nr_cpu
][counter
], &read_data
, sizeof(read_data
)) == -1) {
459 perror("Unable to read perf file descriptor\n");
463 perf_header_attr__add_id(h_attr
, read_data
.id
);
465 assert(fd
[nr_cpu
][counter
] >= 0);
466 fcntl(fd
[nr_cpu
][counter
], F_SETFL
, O_NONBLOCK
);
469 * First counter acts as the group leader:
471 if (group
&& group_fd
== -1)
472 group_fd
= fd
[nr_cpu
][counter
];
474 event_array
[nr_poll
].fd
= fd
[nr_cpu
][counter
];
475 event_array
[nr_poll
].events
= POLLIN
;
478 mmap_array
[nr_cpu
][counter
].counter
= counter
;
479 mmap_array
[nr_cpu
][counter
].prev
= 0;
480 mmap_array
[nr_cpu
][counter
].mask
= mmap_pages
*page_size
- 1;
481 mmap_array
[nr_cpu
][counter
].base
= mmap(NULL
, (mmap_pages
+1)*page_size
,
482 PROT_READ
|PROT_WRITE
, MAP_SHARED
, fd
[nr_cpu
][counter
], 0);
483 if (mmap_array
[nr_cpu
][counter
].base
== MAP_FAILED
) {
484 error("failed to mmap with %d (%s)\n", errno
, strerror(errno
));
488 ioctl(fd
[nr_cpu
][counter
], PERF_COUNTER_IOC_ENABLE
);
491 static void open_counters(int cpu
, pid_t pid
)
496 for (counter
= 0; counter
< nr_counters
; counter
++)
497 create_counter(counter
, cpu
, pid
);
502 static void atexit_header(void)
504 header
->data_size
+= bytes_written
;
506 perf_header__write(header
, output
);
509 static int __cmd_record(int argc
, const char **argv
)
517 page_size
= sysconf(_SC_PAGE_SIZE
);
518 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
519 assert(nr_cpus
<= MAX_NR_CPUS
);
520 assert(nr_cpus
>= 0);
523 signal(SIGCHLD
, sig_handler
);
524 signal(SIGINT
, sig_handler
);
526 if (!stat(output_name
, &st
) && st
.st_size
) {
527 if (!force
&& !append_file
) {
528 fprintf(stderr
, "Error, output file %s exists, use -A to append or -f to overwrite.\n",
536 flags
= O_CREAT
|O_RDWR
;
542 output
= open(output_name
, flags
, S_IRUSR
|S_IWUSR
);
544 perror("failed to create output file");
549 header
= perf_header__read(output
);
551 header
= perf_header__new();
555 read_tracing_data(attrs
, nr_counters
);
557 for (i
= 0; i
< nr_counters
; i
++) {
558 if (attrs
[i
].sample_type
& PERF_SAMPLE_RAW
) {
559 read_tracing_data(attrs
, nr_counters
);
564 atexit(atexit_header
);
571 open_counters(profile_cpu
, pid
);
573 if (profile_cpu
!= -1) {
574 open_counters(profile_cpu
, target_pid
);
576 for (i
= 0; i
< nr_cpus
; i
++)
577 open_counters(i
, target_pid
);
582 perf_header__write(header
, output
);
585 pid_t tgid
= pid_synthesize_comm_event(pid
, 0);
586 pid_synthesize_mmap_samples(pid
, tgid
);
590 if (target_pid
== -1 && argc
) {
593 perror("failed to fork");
596 if (execvp(argv
[0], (char **)argv
)) {
604 struct sched_param param
;
606 param
.sched_priority
= realtime_prio
;
607 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
608 printf("Could not set realtime priority.\n");
616 for (i
= 0; i
< nr_cpu
; i
++) {
617 for (counter
= 0; counter
< nr_counters
; counter
++)
618 mmap_read(&mmap_array
[i
][counter
]);
621 if (hits
== samples
) {
624 ret
= poll(event_array
, nr_poll
, 100);
629 * Approximate RIP event size: 24 bytes.
632 "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
633 (double)bytes_written
/ 1024.0 / 1024.0,
640 static const char * const record_usage
[] = {
641 "perf record [<options>] [<command>]",
642 "perf record [<options>] -- <command> [<options>]",
646 static const struct option options
[] = {
647 OPT_CALLBACK('e', "event", NULL
, "event",
648 "event selector. use 'perf list' to list available events",
650 OPT_INTEGER('p', "pid", &target_pid
,
651 "record events on existing pid"),
652 OPT_INTEGER('r', "realtime", &realtime_prio
,
653 "collect data with this RT SCHED_FIFO priority"),
654 OPT_BOOLEAN('R', "raw-samples", &raw_samples
,
655 "collect raw sample records from all opened counters"),
656 OPT_BOOLEAN('a', "all-cpus", &system_wide
,
657 "system-wide collection from all CPUs"),
658 OPT_BOOLEAN('A', "append", &append_file
,
659 "append to the output file to do incremental profiling"),
660 OPT_INTEGER('C', "profile_cpu", &profile_cpu
,
661 "CPU to profile on"),
662 OPT_BOOLEAN('f', "force", &force
,
663 "overwrite existing data file"),
664 OPT_LONG('c', "count", &default_interval
,
665 "event period to sample"),
666 OPT_STRING('o', "output", &output_name
, "file",
668 OPT_BOOLEAN('i', "inherit", &inherit
,
669 "child tasks inherit counters"),
670 OPT_INTEGER('F', "freq", &freq
,
671 "profile at this frequency"),
672 OPT_INTEGER('m', "mmap-pages", &mmap_pages
,
673 "number of mmap data pages"),
674 OPT_BOOLEAN('g', "call-graph", &call_graph
,
675 "do call-graph (stack chain/backtrace) recording"),
676 OPT_BOOLEAN('v', "verbose", &verbose
,
677 "be more verbose (show counter open errors, etc)"),
678 OPT_BOOLEAN('s', "stat", &inherit_stat
,
679 "per thread counts"),
680 OPT_BOOLEAN('d', "data", &sample_address
,
682 OPT_BOOLEAN('n', "no-samples", &no_samples
,
687 int cmd_record(int argc
, const char **argv
, const char *prefix __used
)
691 argc
= parse_options(argc
, argv
, options
, record_usage
,
692 PARSE_OPT_STOP_AT_NON_OPTION
);
693 if (!argc
&& target_pid
== -1 && !system_wide
)
694 usage_with_options(record_usage
, options
);
698 attrs
[0].type
= PERF_TYPE_HARDWARE
;
699 attrs
[0].config
= PERF_COUNT_HW_CPU_CYCLES
;
702 for (counter
= 0; counter
< nr_counters
; counter
++) {
703 if (attrs
[counter
].sample_period
)
706 attrs
[counter
].sample_period
= default_interval
;
709 return __cmd_record(argc
, argv
);