1 // SPDX-License-Identifier: GPL-2.0
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
11 #include <sys/types.h>
13 #include <perf/cpumap.h>
15 #include "map_symbol.h"
25 #include "perf_regs.h"
29 #include "thread-stack.h"
30 #include "sample-raw.h"
32 #include "ui/progress.h"
34 #include "arch/common.h"
35 #include <internal/lib.h>
36 #include <linux/err.h>
38 #ifdef HAVE_ZSTD_SUPPORT
39 static int perf_session__process_compressed_event(struct perf_session
*session
,
40 union perf_event
*event
, u64 file_offset
)
43 size_t decomp_size
, src_size
;
44 u64 decomp_last_rem
= 0;
45 size_t mmap_len
, decomp_len
= session
->header
.env
.comp_mmap_len
;
46 struct decomp
*decomp
, *decomp_last
= session
->decomp_last
;
49 decomp_last_rem
= decomp_last
->size
- decomp_last
->head
;
50 decomp_len
+= decomp_last_rem
;
53 mmap_len
= sizeof(struct decomp
) + decomp_len
;
54 decomp
= mmap(NULL
, mmap_len
, PROT_READ
|PROT_WRITE
,
55 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
56 if (decomp
== MAP_FAILED
) {
57 pr_err("Couldn't allocate memory for decompression\n");
61 decomp
->file_pos
= file_offset
;
62 decomp
->mmap_len
= mmap_len
;
65 if (decomp_last_rem
) {
66 memcpy(decomp
->data
, &(decomp_last
->data
[decomp_last
->head
]), decomp_last_rem
);
67 decomp
->size
= decomp_last_rem
;
70 src
= (void *)event
+ sizeof(struct perf_record_compressed
);
71 src_size
= event
->pack
.header
.size
- sizeof(struct perf_record_compressed
);
73 decomp_size
= zstd_decompress_stream(&(session
->zstd_data
), src
, src_size
,
74 &(decomp
->data
[decomp_last_rem
]), decomp_len
- decomp_last_rem
);
76 munmap(decomp
, mmap_len
);
77 pr_err("Couldn't decompress data\n");
81 decomp
->size
+= decomp_size
;
83 if (session
->decomp
== NULL
) {
84 session
->decomp
= decomp
;
85 session
->decomp_last
= decomp
;
87 session
->decomp_last
->next
= decomp
;
88 session
->decomp_last
= decomp
;
91 pr_debug("decomp (B): %ld to %ld\n", src_size
, decomp_size
);
95 #else /* !HAVE_ZSTD_SUPPORT */
96 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
99 static int perf_session__deliver_event(struct perf_session
*session
,
100 union perf_event
*event
,
101 struct perf_tool
*tool
,
104 static int perf_session__open(struct perf_session
*session
)
106 struct perf_data
*data
= session
->data
;
108 if (perf_session__read_header(session
) < 0) {
109 pr_err("incompatible file format (rerun with -v to learn more)\n");
113 if (perf_data__is_pipe(data
))
116 if (perf_header__has_feat(&session
->header
, HEADER_STAT
))
119 if (!perf_evlist__valid_sample_type(session
->evlist
)) {
120 pr_err("non matching sample_type\n");
124 if (!perf_evlist__valid_sample_id_all(session
->evlist
)) {
125 pr_err("non matching sample_id_all\n");
129 if (!perf_evlist__valid_read_format(session
->evlist
)) {
130 pr_err("non matching read_format\n");
137 void perf_session__set_id_hdr_size(struct perf_session
*session
)
139 u16 id_hdr_size
= perf_evlist__id_hdr_size(session
->evlist
);
141 machines__set_id_hdr_size(&session
->machines
, id_hdr_size
);
144 int perf_session__create_kernel_maps(struct perf_session
*session
)
146 int ret
= machine__create_kernel_maps(&session
->machines
.host
);
149 ret
= machines__create_guest_kernel_maps(&session
->machines
);
153 static void perf_session__destroy_kernel_maps(struct perf_session
*session
)
155 machines__destroy_kernel_maps(&session
->machines
);
158 static bool perf_session__has_comm_exec(struct perf_session
*session
)
162 evlist__for_each_entry(session
->evlist
, evsel
) {
163 if (evsel
->core
.attr
.comm_exec
)
170 static void perf_session__set_comm_exec(struct perf_session
*session
)
172 bool comm_exec
= perf_session__has_comm_exec(session
);
174 machines__set_comm_exec(&session
->machines
, comm_exec
);
177 static int ordered_events__deliver_event(struct ordered_events
*oe
,
178 struct ordered_event
*event
)
180 struct perf_session
*session
= container_of(oe
, struct perf_session
,
183 return perf_session__deliver_event(session
, event
->event
,
184 session
->tool
, event
->file_offset
);
187 struct perf_session
*perf_session__new(struct perf_data
*data
,
188 bool repipe
, struct perf_tool
*tool
)
191 struct perf_session
*session
= zalloc(sizeof(*session
));
196 session
->repipe
= repipe
;
197 session
->tool
= tool
;
198 INIT_LIST_HEAD(&session
->auxtrace_index
);
199 machines__init(&session
->machines
);
200 ordered_events__init(&session
->ordered_events
,
201 ordered_events__deliver_event
, NULL
);
203 perf_env__init(&session
->header
.env
);
205 ret
= perf_data__open(data
);
209 session
->data
= data
;
211 if (perf_data__is_read(data
)) {
212 ret
= perf_session__open(session
);
217 * set session attributes that are present in perf.data
218 * but not in pipe-mode.
220 if (!data
->is_pipe
) {
221 perf_session__set_id_hdr_size(session
);
222 perf_session__set_comm_exec(session
);
225 perf_evlist__init_trace_event_sample_raw(session
->evlist
);
227 /* Open the directory data. */
229 ret
= perf_data__open_dir(data
);
234 if (!symbol_conf
.kallsyms_name
&&
235 !symbol_conf
.vmlinux_name
)
236 symbol_conf
.kallsyms_name
= perf_data__kallsyms_name(data
);
239 session
->machines
.host
.env
= &perf_env
;
242 session
->machines
.host
.single_address_space
=
243 perf_env__single_address_space(session
->machines
.host
.env
);
245 if (!data
|| perf_data__is_write(data
)) {
247 * In O_RDONLY mode this will be performed when reading the
248 * kernel MMAP event, in perf_event__process_mmap().
250 if (perf_session__create_kernel_maps(session
) < 0)
251 pr_warning("Cannot read kernel map\n");
255 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
256 * processed, so perf_evlist__sample_id_all is not meaningful here.
258 if ((!data
|| !data
->is_pipe
) && tool
&& tool
->ordering_requires_timestamps
&&
259 tool
->ordered_events
&& !perf_evlist__sample_id_all(session
->evlist
)) {
260 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
261 tool
->ordered_events
= false;
267 perf_session__delete(session
);
272 static void perf_session__delete_threads(struct perf_session
*session
)
274 machine__delete_threads(&session
->machines
.host
);
277 static void perf_session__release_decomp_events(struct perf_session
*session
)
279 struct decomp
*next
, *decomp
;
281 next
= session
->decomp
;
287 mmap_len
= decomp
->mmap_len
;
288 munmap(decomp
, mmap_len
);
292 void perf_session__delete(struct perf_session
*session
)
296 auxtrace__free(session
);
297 auxtrace_index__free(&session
->auxtrace_index
);
298 perf_session__destroy_kernel_maps(session
);
299 perf_session__delete_threads(session
);
300 perf_session__release_decomp_events(session
);
301 perf_env__exit(&session
->header
.env
);
302 machines__exit(&session
->machines
);
304 perf_data__close(session
->data
);
308 static int process_event_synth_tracing_data_stub(struct perf_session
*session
310 union perf_event
*event
313 dump_printf(": unhandled!\n");
317 static int process_event_synth_attr_stub(struct perf_tool
*tool __maybe_unused
,
318 union perf_event
*event __maybe_unused
,
319 struct evlist
**pevlist
322 dump_printf(": unhandled!\n");
326 static int process_event_synth_event_update_stub(struct perf_tool
*tool __maybe_unused
,
327 union perf_event
*event __maybe_unused
,
328 struct evlist
**pevlist
332 perf_event__fprintf_event_update(event
, stdout
);
334 dump_printf(": unhandled!\n");
338 static int process_event_sample_stub(struct perf_tool
*tool __maybe_unused
,
339 union perf_event
*event __maybe_unused
,
340 struct perf_sample
*sample __maybe_unused
,
341 struct evsel
*evsel __maybe_unused
,
342 struct machine
*machine __maybe_unused
)
344 dump_printf(": unhandled!\n");
348 static int process_event_stub(struct perf_tool
*tool __maybe_unused
,
349 union perf_event
*event __maybe_unused
,
350 struct perf_sample
*sample __maybe_unused
,
351 struct machine
*machine __maybe_unused
)
353 dump_printf(": unhandled!\n");
357 static int process_finished_round_stub(struct perf_tool
*tool __maybe_unused
,
358 union perf_event
*event __maybe_unused
,
359 struct ordered_events
*oe __maybe_unused
)
361 dump_printf(": unhandled!\n");
365 static int process_finished_round(struct perf_tool
*tool
,
366 union perf_event
*event
,
367 struct ordered_events
*oe
);
369 static int skipn(int fd
, off_t n
)
375 ret
= read(fd
, buf
, min(n
, (off_t
)sizeof(buf
)));
384 static s64
process_event_auxtrace_stub(struct perf_session
*session __maybe_unused
,
385 union perf_event
*event
)
387 dump_printf(": unhandled!\n");
388 if (perf_data__is_pipe(session
->data
))
389 skipn(perf_data__fd(session
->data
), event
->auxtrace
.size
);
390 return event
->auxtrace
.size
;
393 static int process_event_op2_stub(struct perf_session
*session __maybe_unused
,
394 union perf_event
*event __maybe_unused
)
396 dump_printf(": unhandled!\n");
402 int process_event_thread_map_stub(struct perf_session
*session __maybe_unused
,
403 union perf_event
*event __maybe_unused
)
406 perf_event__fprintf_thread_map(event
, stdout
);
408 dump_printf(": unhandled!\n");
413 int process_event_cpu_map_stub(struct perf_session
*session __maybe_unused
,
414 union perf_event
*event __maybe_unused
)
417 perf_event__fprintf_cpu_map(event
, stdout
);
419 dump_printf(": unhandled!\n");
424 int process_event_stat_config_stub(struct perf_session
*session __maybe_unused
,
425 union perf_event
*event __maybe_unused
)
428 perf_event__fprintf_stat_config(event
, stdout
);
430 dump_printf(": unhandled!\n");
434 static int process_stat_stub(struct perf_session
*perf_session __maybe_unused
,
435 union perf_event
*event
)
438 perf_event__fprintf_stat(event
, stdout
);
440 dump_printf(": unhandled!\n");
444 static int process_stat_round_stub(struct perf_session
*perf_session __maybe_unused
,
445 union perf_event
*event
)
448 perf_event__fprintf_stat_round(event
, stdout
);
450 dump_printf(": unhandled!\n");
454 static int perf_session__process_compressed_event_stub(struct perf_session
*session __maybe_unused
,
455 union perf_event
*event __maybe_unused
,
456 u64 file_offset __maybe_unused
)
458 dump_printf(": unhandled!\n");
462 void perf_tool__fill_defaults(struct perf_tool
*tool
)
464 if (tool
->sample
== NULL
)
465 tool
->sample
= process_event_sample_stub
;
466 if (tool
->mmap
== NULL
)
467 tool
->mmap
= process_event_stub
;
468 if (tool
->mmap2
== NULL
)
469 tool
->mmap2
= process_event_stub
;
470 if (tool
->comm
== NULL
)
471 tool
->comm
= process_event_stub
;
472 if (tool
->namespaces
== NULL
)
473 tool
->namespaces
= process_event_stub
;
474 if (tool
->fork
== NULL
)
475 tool
->fork
= process_event_stub
;
476 if (tool
->exit
== NULL
)
477 tool
->exit
= process_event_stub
;
478 if (tool
->lost
== NULL
)
479 tool
->lost
= perf_event__process_lost
;
480 if (tool
->lost_samples
== NULL
)
481 tool
->lost_samples
= perf_event__process_lost_samples
;
482 if (tool
->aux
== NULL
)
483 tool
->aux
= perf_event__process_aux
;
484 if (tool
->itrace_start
== NULL
)
485 tool
->itrace_start
= perf_event__process_itrace_start
;
486 if (tool
->context_switch
== NULL
)
487 tool
->context_switch
= perf_event__process_switch
;
488 if (tool
->ksymbol
== NULL
)
489 tool
->ksymbol
= perf_event__process_ksymbol
;
490 if (tool
->bpf
== NULL
)
491 tool
->bpf
= perf_event__process_bpf
;
492 if (tool
->read
== NULL
)
493 tool
->read
= process_event_sample_stub
;
494 if (tool
->throttle
== NULL
)
495 tool
->throttle
= process_event_stub
;
496 if (tool
->unthrottle
== NULL
)
497 tool
->unthrottle
= process_event_stub
;
498 if (tool
->attr
== NULL
)
499 tool
->attr
= process_event_synth_attr_stub
;
500 if (tool
->event_update
== NULL
)
501 tool
->event_update
= process_event_synth_event_update_stub
;
502 if (tool
->tracing_data
== NULL
)
503 tool
->tracing_data
= process_event_synth_tracing_data_stub
;
504 if (tool
->build_id
== NULL
)
505 tool
->build_id
= process_event_op2_stub
;
506 if (tool
->finished_round
== NULL
) {
507 if (tool
->ordered_events
)
508 tool
->finished_round
= process_finished_round
;
510 tool
->finished_round
= process_finished_round_stub
;
512 if (tool
->id_index
== NULL
)
513 tool
->id_index
= process_event_op2_stub
;
514 if (tool
->auxtrace_info
== NULL
)
515 tool
->auxtrace_info
= process_event_op2_stub
;
516 if (tool
->auxtrace
== NULL
)
517 tool
->auxtrace
= process_event_auxtrace_stub
;
518 if (tool
->auxtrace_error
== NULL
)
519 tool
->auxtrace_error
= process_event_op2_stub
;
520 if (tool
->thread_map
== NULL
)
521 tool
->thread_map
= process_event_thread_map_stub
;
522 if (tool
->cpu_map
== NULL
)
523 tool
->cpu_map
= process_event_cpu_map_stub
;
524 if (tool
->stat_config
== NULL
)
525 tool
->stat_config
= process_event_stat_config_stub
;
526 if (tool
->stat
== NULL
)
527 tool
->stat
= process_stat_stub
;
528 if (tool
->stat_round
== NULL
)
529 tool
->stat_round
= process_stat_round_stub
;
530 if (tool
->time_conv
== NULL
)
531 tool
->time_conv
= process_event_op2_stub
;
532 if (tool
->feature
== NULL
)
533 tool
->feature
= process_event_op2_stub
;
534 if (tool
->compressed
== NULL
)
535 tool
->compressed
= perf_session__process_compressed_event
;
538 static void swap_sample_id_all(union perf_event
*event
, void *data
)
540 void *end
= (void *) event
+ event
->header
.size
;
541 int size
= end
- data
;
543 BUG_ON(size
% sizeof(u64
));
544 mem_bswap_64(data
, size
);
547 static void perf_event__all64_swap(union perf_event
*event
,
548 bool sample_id_all __maybe_unused
)
550 struct perf_event_header
*hdr
= &event
->header
;
551 mem_bswap_64(hdr
+ 1, event
->header
.size
- sizeof(*hdr
));
554 static void perf_event__comm_swap(union perf_event
*event
, bool sample_id_all
)
556 event
->comm
.pid
= bswap_32(event
->comm
.pid
);
557 event
->comm
.tid
= bswap_32(event
->comm
.tid
);
560 void *data
= &event
->comm
.comm
;
562 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
563 swap_sample_id_all(event
, data
);
567 static void perf_event__mmap_swap(union perf_event
*event
,
570 event
->mmap
.pid
= bswap_32(event
->mmap
.pid
);
571 event
->mmap
.tid
= bswap_32(event
->mmap
.tid
);
572 event
->mmap
.start
= bswap_64(event
->mmap
.start
);
573 event
->mmap
.len
= bswap_64(event
->mmap
.len
);
574 event
->mmap
.pgoff
= bswap_64(event
->mmap
.pgoff
);
577 void *data
= &event
->mmap
.filename
;
579 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
580 swap_sample_id_all(event
, data
);
584 static void perf_event__mmap2_swap(union perf_event
*event
,
587 event
->mmap2
.pid
= bswap_32(event
->mmap2
.pid
);
588 event
->mmap2
.tid
= bswap_32(event
->mmap2
.tid
);
589 event
->mmap2
.start
= bswap_64(event
->mmap2
.start
);
590 event
->mmap2
.len
= bswap_64(event
->mmap2
.len
);
591 event
->mmap2
.pgoff
= bswap_64(event
->mmap2
.pgoff
);
592 event
->mmap2
.maj
= bswap_32(event
->mmap2
.maj
);
593 event
->mmap2
.min
= bswap_32(event
->mmap2
.min
);
594 event
->mmap2
.ino
= bswap_64(event
->mmap2
.ino
);
597 void *data
= &event
->mmap2
.filename
;
599 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
600 swap_sample_id_all(event
, data
);
603 static void perf_event__task_swap(union perf_event
*event
, bool sample_id_all
)
605 event
->fork
.pid
= bswap_32(event
->fork
.pid
);
606 event
->fork
.tid
= bswap_32(event
->fork
.tid
);
607 event
->fork
.ppid
= bswap_32(event
->fork
.ppid
);
608 event
->fork
.ptid
= bswap_32(event
->fork
.ptid
);
609 event
->fork
.time
= bswap_64(event
->fork
.time
);
612 swap_sample_id_all(event
, &event
->fork
+ 1);
615 static void perf_event__read_swap(union perf_event
*event
, bool sample_id_all
)
617 event
->read
.pid
= bswap_32(event
->read
.pid
);
618 event
->read
.tid
= bswap_32(event
->read
.tid
);
619 event
->read
.value
= bswap_64(event
->read
.value
);
620 event
->read
.time_enabled
= bswap_64(event
->read
.time_enabled
);
621 event
->read
.time_running
= bswap_64(event
->read
.time_running
);
622 event
->read
.id
= bswap_64(event
->read
.id
);
625 swap_sample_id_all(event
, &event
->read
+ 1);
628 static void perf_event__aux_swap(union perf_event
*event
, bool sample_id_all
)
630 event
->aux
.aux_offset
= bswap_64(event
->aux
.aux_offset
);
631 event
->aux
.aux_size
= bswap_64(event
->aux
.aux_size
);
632 event
->aux
.flags
= bswap_64(event
->aux
.flags
);
635 swap_sample_id_all(event
, &event
->aux
+ 1);
638 static void perf_event__itrace_start_swap(union perf_event
*event
,
641 event
->itrace_start
.pid
= bswap_32(event
->itrace_start
.pid
);
642 event
->itrace_start
.tid
= bswap_32(event
->itrace_start
.tid
);
645 swap_sample_id_all(event
, &event
->itrace_start
+ 1);
648 static void perf_event__switch_swap(union perf_event
*event
, bool sample_id_all
)
650 if (event
->header
.type
== PERF_RECORD_SWITCH_CPU_WIDE
) {
651 event
->context_switch
.next_prev_pid
=
652 bswap_32(event
->context_switch
.next_prev_pid
);
653 event
->context_switch
.next_prev_tid
=
654 bswap_32(event
->context_switch
.next_prev_tid
);
658 swap_sample_id_all(event
, &event
->context_switch
+ 1);
661 static void perf_event__throttle_swap(union perf_event
*event
,
664 event
->throttle
.time
= bswap_64(event
->throttle
.time
);
665 event
->throttle
.id
= bswap_64(event
->throttle
.id
);
666 event
->throttle
.stream_id
= bswap_64(event
->throttle
.stream_id
);
669 swap_sample_id_all(event
, &event
->throttle
+ 1);
672 static void perf_event__namespaces_swap(union perf_event
*event
,
677 event
->namespaces
.pid
= bswap_32(event
->namespaces
.pid
);
678 event
->namespaces
.tid
= bswap_32(event
->namespaces
.tid
);
679 event
->namespaces
.nr_namespaces
= bswap_64(event
->namespaces
.nr_namespaces
);
681 for (i
= 0; i
< event
->namespaces
.nr_namespaces
; i
++) {
682 struct perf_ns_link_info
*ns
= &event
->namespaces
.link_info
[i
];
684 ns
->dev
= bswap_64(ns
->dev
);
685 ns
->ino
= bswap_64(ns
->ino
);
689 swap_sample_id_all(event
, &event
->namespaces
.link_info
[i
]);
692 static u8
revbyte(u8 b
)
694 int rev
= (b
>> 4) | ((b
& 0xf) << 4);
695 rev
= ((rev
& 0xcc) >> 2) | ((rev
& 0x33) << 2);
696 rev
= ((rev
& 0xaa) >> 1) | ((rev
& 0x55) << 1);
701 * XXX this is hack in attempt to carry flags bitfield
702 * through endian village. ABI says:
704 * Bit-fields are allocated from right to left (least to most significant)
705 * on little-endian implementations and from left to right (most to least
706 * significant) on big-endian implementations.
708 * The above seems to be byte specific, so we need to reverse each
709 * byte of the bitfield. 'Internet' also says this might be implementation
710 * specific and we probably need proper fix and carry perf_event_attr
711 * bitfield flags in separate data file FEAT_ section. Thought this seems
714 static void swap_bitfield(u8
*p
, unsigned len
)
718 for (i
= 0; i
< len
; i
++) {
724 /* exported for swapping attributes in file header */
725 void perf_event__attr_swap(struct perf_event_attr
*attr
)
727 attr
->type
= bswap_32(attr
->type
);
728 attr
->size
= bswap_32(attr
->size
);
730 #define bswap_safe(f, n) \
731 (attr->size > (offsetof(struct perf_event_attr, f) + \
732 sizeof(attr->f) * (n)))
733 #define bswap_field(f, sz) \
735 if (bswap_safe(f, 0)) \
736 attr->f = bswap_##sz(attr->f); \
738 #define bswap_field_16(f) bswap_field(f, 16)
739 #define bswap_field_32(f) bswap_field(f, 32)
740 #define bswap_field_64(f) bswap_field(f, 64)
742 bswap_field_64(config
);
743 bswap_field_64(sample_period
);
744 bswap_field_64(sample_type
);
745 bswap_field_64(read_format
);
746 bswap_field_32(wakeup_events
);
747 bswap_field_32(bp_type
);
748 bswap_field_64(bp_addr
);
749 bswap_field_64(bp_len
);
750 bswap_field_64(branch_sample_type
);
751 bswap_field_64(sample_regs_user
);
752 bswap_field_32(sample_stack_user
);
753 bswap_field_32(aux_watermark
);
754 bswap_field_16(sample_max_stack
);
755 bswap_field_32(aux_sample_size
);
758 * After read_format are bitfields. Check read_format because
759 * we are unable to use offsetof on bitfield.
761 if (bswap_safe(read_format
, 1))
762 swap_bitfield((u8
*) (&attr
->read_format
+ 1),
764 #undef bswap_field_64
765 #undef bswap_field_32
770 static void perf_event__hdr_attr_swap(union perf_event
*event
,
771 bool sample_id_all __maybe_unused
)
775 perf_event__attr_swap(&event
->attr
.attr
);
777 size
= event
->header
.size
;
778 size
-= (void *)&event
->attr
.id
- (void *)event
;
779 mem_bswap_64(event
->attr
.id
, size
);
782 static void perf_event__event_update_swap(union perf_event
*event
,
783 bool sample_id_all __maybe_unused
)
785 event
->event_update
.type
= bswap_64(event
->event_update
.type
);
786 event
->event_update
.id
= bswap_64(event
->event_update
.id
);
789 static void perf_event__event_type_swap(union perf_event
*event
,
790 bool sample_id_all __maybe_unused
)
792 event
->event_type
.event_type
.event_id
=
793 bswap_64(event
->event_type
.event_type
.event_id
);
796 static void perf_event__tracing_data_swap(union perf_event
*event
,
797 bool sample_id_all __maybe_unused
)
799 event
->tracing_data
.size
= bswap_32(event
->tracing_data
.size
);
802 static void perf_event__auxtrace_info_swap(union perf_event
*event
,
803 bool sample_id_all __maybe_unused
)
807 event
->auxtrace_info
.type
= bswap_32(event
->auxtrace_info
.type
);
809 size
= event
->header
.size
;
810 size
-= (void *)&event
->auxtrace_info
.priv
- (void *)event
;
811 mem_bswap_64(event
->auxtrace_info
.priv
, size
);
814 static void perf_event__auxtrace_swap(union perf_event
*event
,
815 bool sample_id_all __maybe_unused
)
817 event
->auxtrace
.size
= bswap_64(event
->auxtrace
.size
);
818 event
->auxtrace
.offset
= bswap_64(event
->auxtrace
.offset
);
819 event
->auxtrace
.reference
= bswap_64(event
->auxtrace
.reference
);
820 event
->auxtrace
.idx
= bswap_32(event
->auxtrace
.idx
);
821 event
->auxtrace
.tid
= bswap_32(event
->auxtrace
.tid
);
822 event
->auxtrace
.cpu
= bswap_32(event
->auxtrace
.cpu
);
825 static void perf_event__auxtrace_error_swap(union perf_event
*event
,
826 bool sample_id_all __maybe_unused
)
828 event
->auxtrace_error
.type
= bswap_32(event
->auxtrace_error
.type
);
829 event
->auxtrace_error
.code
= bswap_32(event
->auxtrace_error
.code
);
830 event
->auxtrace_error
.cpu
= bswap_32(event
->auxtrace_error
.cpu
);
831 event
->auxtrace_error
.pid
= bswap_32(event
->auxtrace_error
.pid
);
832 event
->auxtrace_error
.tid
= bswap_32(event
->auxtrace_error
.tid
);
833 event
->auxtrace_error
.fmt
= bswap_32(event
->auxtrace_error
.fmt
);
834 event
->auxtrace_error
.ip
= bswap_64(event
->auxtrace_error
.ip
);
835 if (event
->auxtrace_error
.fmt
)
836 event
->auxtrace_error
.time
= bswap_64(event
->auxtrace_error
.time
);
839 static void perf_event__thread_map_swap(union perf_event
*event
,
840 bool sample_id_all __maybe_unused
)
844 event
->thread_map
.nr
= bswap_64(event
->thread_map
.nr
);
846 for (i
= 0; i
< event
->thread_map
.nr
; i
++)
847 event
->thread_map
.entries
[i
].pid
= bswap_64(event
->thread_map
.entries
[i
].pid
);
850 static void perf_event__cpu_map_swap(union perf_event
*event
,
851 bool sample_id_all __maybe_unused
)
853 struct perf_record_cpu_map_data
*data
= &event
->cpu_map
.data
;
854 struct cpu_map_entries
*cpus
;
855 struct perf_record_record_cpu_map
*mask
;
858 data
->type
= bswap_64(data
->type
);
860 switch (data
->type
) {
861 case PERF_CPU_MAP__CPUS
:
862 cpus
= (struct cpu_map_entries
*)data
->data
;
864 cpus
->nr
= bswap_16(cpus
->nr
);
866 for (i
= 0; i
< cpus
->nr
; i
++)
867 cpus
->cpu
[i
] = bswap_16(cpus
->cpu
[i
]);
869 case PERF_CPU_MAP__MASK
:
870 mask
= (struct perf_record_record_cpu_map
*)data
->data
;
872 mask
->nr
= bswap_16(mask
->nr
);
873 mask
->long_size
= bswap_16(mask
->long_size
);
875 switch (mask
->long_size
) {
876 case 4: mem_bswap_32(&mask
->mask
, mask
->nr
); break;
877 case 8: mem_bswap_64(&mask
->mask
, mask
->nr
); break;
879 pr_err("cpu_map swap: unsupported long size\n");
886 static void perf_event__stat_config_swap(union perf_event
*event
,
887 bool sample_id_all __maybe_unused
)
891 size
= event
->stat_config
.nr
* sizeof(event
->stat_config
.data
[0]);
892 size
+= 1; /* nr item itself */
893 mem_bswap_64(&event
->stat_config
.nr
, size
);
896 static void perf_event__stat_swap(union perf_event
*event
,
897 bool sample_id_all __maybe_unused
)
899 event
->stat
.id
= bswap_64(event
->stat
.id
);
900 event
->stat
.thread
= bswap_32(event
->stat
.thread
);
901 event
->stat
.cpu
= bswap_32(event
->stat
.cpu
);
902 event
->stat
.val
= bswap_64(event
->stat
.val
);
903 event
->stat
.ena
= bswap_64(event
->stat
.ena
);
904 event
->stat
.run
= bswap_64(event
->stat
.run
);
907 static void perf_event__stat_round_swap(union perf_event
*event
,
908 bool sample_id_all __maybe_unused
)
910 event
->stat_round
.type
= bswap_64(event
->stat_round
.type
);
911 event
->stat_round
.time
= bswap_64(event
->stat_round
.time
);
914 typedef void (*perf_event__swap_op
)(union perf_event
*event
,
917 static perf_event__swap_op perf_event__swap_ops
[] = {
918 [PERF_RECORD_MMAP
] = perf_event__mmap_swap
,
919 [PERF_RECORD_MMAP2
] = perf_event__mmap2_swap
,
920 [PERF_RECORD_COMM
] = perf_event__comm_swap
,
921 [PERF_RECORD_FORK
] = perf_event__task_swap
,
922 [PERF_RECORD_EXIT
] = perf_event__task_swap
,
923 [PERF_RECORD_LOST
] = perf_event__all64_swap
,
924 [PERF_RECORD_READ
] = perf_event__read_swap
,
925 [PERF_RECORD_THROTTLE
] = perf_event__throttle_swap
,
926 [PERF_RECORD_UNTHROTTLE
] = perf_event__throttle_swap
,
927 [PERF_RECORD_SAMPLE
] = perf_event__all64_swap
,
928 [PERF_RECORD_AUX
] = perf_event__aux_swap
,
929 [PERF_RECORD_ITRACE_START
] = perf_event__itrace_start_swap
,
930 [PERF_RECORD_LOST_SAMPLES
] = perf_event__all64_swap
,
931 [PERF_RECORD_SWITCH
] = perf_event__switch_swap
,
932 [PERF_RECORD_SWITCH_CPU_WIDE
] = perf_event__switch_swap
,
933 [PERF_RECORD_NAMESPACES
] = perf_event__namespaces_swap
,
934 [PERF_RECORD_HEADER_ATTR
] = perf_event__hdr_attr_swap
,
935 [PERF_RECORD_HEADER_EVENT_TYPE
] = perf_event__event_type_swap
,
936 [PERF_RECORD_HEADER_TRACING_DATA
] = perf_event__tracing_data_swap
,
937 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
938 [PERF_RECORD_ID_INDEX
] = perf_event__all64_swap
,
939 [PERF_RECORD_AUXTRACE_INFO
] = perf_event__auxtrace_info_swap
,
940 [PERF_RECORD_AUXTRACE
] = perf_event__auxtrace_swap
,
941 [PERF_RECORD_AUXTRACE_ERROR
] = perf_event__auxtrace_error_swap
,
942 [PERF_RECORD_THREAD_MAP
] = perf_event__thread_map_swap
,
943 [PERF_RECORD_CPU_MAP
] = perf_event__cpu_map_swap
,
944 [PERF_RECORD_STAT_CONFIG
] = perf_event__stat_config_swap
,
945 [PERF_RECORD_STAT
] = perf_event__stat_swap
,
946 [PERF_RECORD_STAT_ROUND
] = perf_event__stat_round_swap
,
947 [PERF_RECORD_EVENT_UPDATE
] = perf_event__event_update_swap
,
948 [PERF_RECORD_TIME_CONV
] = perf_event__all64_swap
,
949 [PERF_RECORD_HEADER_MAX
] = NULL
,
953 * When perf record finishes a pass on every buffers, it records this pseudo
955 * We record the max timestamp t found in the pass n.
956 * Assuming these timestamps are monotonic across cpus, we know that if
957 * a buffer still has events with timestamps below t, they will be all
958 * available and then read in the pass n + 1.
959 * Hence when we start to read the pass n + 2, we can safely flush every
960 * events with timestamps below t.
962 * ============ PASS n =================
965 * cnt1 timestamps | cnt2 timestamps
968 * - | 4 <--- max recorded
970 * ============ PASS n + 1 ==============
973 * cnt1 timestamps | cnt2 timestamps
976 * 5 | 7 <---- max recorded
978 * Flush every events below timestamp 4
980 * ============ PASS n + 2 ==============
983 * cnt1 timestamps | cnt2 timestamps
988 * Flush every events below timestamp 7
991 static int process_finished_round(struct perf_tool
*tool __maybe_unused
,
992 union perf_event
*event __maybe_unused
,
993 struct ordered_events
*oe
)
996 fprintf(stdout
, "\n");
997 return ordered_events__flush(oe
, OE_FLUSH__ROUND
);
1000 int perf_session__queue_event(struct perf_session
*s
, union perf_event
*event
,
1001 u64 timestamp
, u64 file_offset
)
1003 return ordered_events__queue(&s
->ordered_events
, event
, timestamp
, file_offset
);
1006 static void callchain__lbr_callstack_printf(struct perf_sample
*sample
)
1008 struct ip_callchain
*callchain
= sample
->callchain
;
1009 struct branch_stack
*lbr_stack
= sample
->branch_stack
;
1010 u64 kernel_callchain_nr
= callchain
->nr
;
1013 for (i
= 0; i
< kernel_callchain_nr
; i
++) {
1014 if (callchain
->ips
[i
] == PERF_CONTEXT_USER
)
1018 if ((i
!= kernel_callchain_nr
) && lbr_stack
->nr
) {
1021 * LBR callstack can only get user call chain,
1022 * i is kernel call chain number,
1023 * 1 is PERF_CONTEXT_USER.
1025 * The user call chain is stored in LBR registers.
1026 * LBR are pair registers. The caller is stored
1027 * in "from" register, while the callee is stored
1029 * For example, there is a call stack
1030 * "A"->"B"->"C"->"D".
1031 * The LBR registers will recorde like
1032 * "C"->"D", "B"->"C", "A"->"B".
1033 * So only the first "to" register and all "from"
1034 * registers are needed to construct the whole stack.
1036 total_nr
= i
+ 1 + lbr_stack
->nr
+ 1;
1037 kernel_callchain_nr
= i
+ 1;
1039 printf("... LBR call chain: nr:%" PRIu64
"\n", total_nr
);
1041 for (i
= 0; i
< kernel_callchain_nr
; i
++)
1042 printf("..... %2d: %016" PRIx64
"\n",
1043 i
, callchain
->ips
[i
]);
1045 printf("..... %2d: %016" PRIx64
"\n",
1046 (int)(kernel_callchain_nr
), lbr_stack
->entries
[0].to
);
1047 for (i
= 0; i
< lbr_stack
->nr
; i
++)
1048 printf("..... %2d: %016" PRIx64
"\n",
1049 (int)(i
+ kernel_callchain_nr
+ 1), lbr_stack
->entries
[i
].from
);
1053 static void callchain__printf(struct evsel
*evsel
,
1054 struct perf_sample
*sample
)
1057 struct ip_callchain
*callchain
= sample
->callchain
;
1059 if (perf_evsel__has_branch_callstack(evsel
))
1060 callchain__lbr_callstack_printf(sample
);
1062 printf("... FP chain: nr:%" PRIu64
"\n", callchain
->nr
);
1064 for (i
= 0; i
< callchain
->nr
; i
++)
1065 printf("..... %2d: %016" PRIx64
"\n",
1066 i
, callchain
->ips
[i
]);
1069 static void branch_stack__printf(struct perf_sample
*sample
, bool callstack
)
1073 printf("%s: nr:%" PRIu64
"\n",
1074 !callstack
? "... branch stack" : "... branch callstack",
1075 sample
->branch_stack
->nr
);
1077 for (i
= 0; i
< sample
->branch_stack
->nr
; i
++) {
1078 struct branch_entry
*e
= &sample
->branch_stack
->entries
[i
];
1081 printf("..... %2"PRIu64
": %016" PRIx64
" -> %016" PRIx64
" %hu cycles %s%s%s%s %x\n",
1083 (unsigned short)e
->flags
.cycles
,
1084 e
->flags
.mispred
? "M" : " ",
1085 e
->flags
.predicted
? "P" : " ",
1086 e
->flags
.abort
? "A" : " ",
1087 e
->flags
.in_tx
? "T" : " ",
1088 (unsigned)e
->flags
.reserved
);
1090 printf("..... %2"PRIu64
": %016" PRIx64
"\n",
1091 i
, i
> 0 ? e
->from
: e
->to
);
1096 static void regs_dump__printf(u64 mask
, u64
*regs
)
1098 unsigned rid
, i
= 0;
1100 for_each_set_bit(rid
, (unsigned long *) &mask
, sizeof(mask
) * 8) {
1101 u64 val
= regs
[i
++];
1103 printf(".... %-5s 0x%" PRIx64
"\n",
1104 perf_reg_name(rid
), val
);
1108 static const char *regs_abi
[] = {
1109 [PERF_SAMPLE_REGS_ABI_NONE
] = "none",
1110 [PERF_SAMPLE_REGS_ABI_32
] = "32-bit",
1111 [PERF_SAMPLE_REGS_ABI_64
] = "64-bit",
1114 static inline const char *regs_dump_abi(struct regs_dump
*d
)
1116 if (d
->abi
> PERF_SAMPLE_REGS_ABI_64
)
1119 return regs_abi
[d
->abi
];
1122 static void regs__printf(const char *type
, struct regs_dump
*regs
)
1124 u64 mask
= regs
->mask
;
1126 printf("... %s regs: mask 0x%" PRIx64
" ABI %s\n",
1129 regs_dump_abi(regs
));
1131 regs_dump__printf(mask
, regs
->regs
);
1134 static void regs_user__printf(struct perf_sample
*sample
)
1136 struct regs_dump
*user_regs
= &sample
->user_regs
;
1138 if (user_regs
->regs
)
1139 regs__printf("user", user_regs
);
1142 static void regs_intr__printf(struct perf_sample
*sample
)
1144 struct regs_dump
*intr_regs
= &sample
->intr_regs
;
1146 if (intr_regs
->regs
)
1147 regs__printf("intr", intr_regs
);
1150 static void stack_user__printf(struct stack_dump
*dump
)
1152 printf("... ustack: size %" PRIu64
", offset 0x%x\n",
1153 dump
->size
, dump
->offset
);
1156 static void perf_evlist__print_tstamp(struct evlist
*evlist
,
1157 union perf_event
*event
,
1158 struct perf_sample
*sample
)
1160 u64 sample_type
= __perf_evlist__combined_sample_type(evlist
);
1162 if (event
->header
.type
!= PERF_RECORD_SAMPLE
&&
1163 !perf_evlist__sample_id_all(evlist
)) {
1164 fputs("-1 -1 ", stdout
);
1168 if ((sample_type
& PERF_SAMPLE_CPU
))
1169 printf("%u ", sample
->cpu
);
1171 if (sample_type
& PERF_SAMPLE_TIME
)
1172 printf("%" PRIu64
" ", sample
->time
);
1175 static void sample_read__printf(struct perf_sample
*sample
, u64 read_format
)
1177 printf("... sample_read:\n");
1179 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1180 printf("...... time enabled %016" PRIx64
"\n",
1181 sample
->read
.time_enabled
);
1183 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1184 printf("...... time running %016" PRIx64
"\n",
1185 sample
->read
.time_running
);
1187 if (read_format
& PERF_FORMAT_GROUP
) {
1190 printf(".... group nr %" PRIu64
"\n", sample
->read
.group
.nr
);
1192 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
1193 struct sample_read_value
*value
;
1195 value
= &sample
->read
.group
.values
[i
];
1196 printf("..... id %016" PRIx64
1197 ", value %016" PRIx64
"\n",
1198 value
->id
, value
->value
);
1201 printf("..... id %016" PRIx64
", value %016" PRIx64
"\n",
1202 sample
->read
.one
.id
, sample
->read
.one
.value
);
1205 static void dump_event(struct evlist
*evlist
, union perf_event
*event
,
1206 u64 file_offset
, struct perf_sample
*sample
)
1211 printf("\n%#" PRIx64
" [%#x]: event: %d\n",
1212 file_offset
, event
->header
.size
, event
->header
.type
);
1215 if (event
->header
.type
== PERF_RECORD_SAMPLE
&& evlist
->trace_event_sample_raw
)
1216 evlist
->trace_event_sample_raw(evlist
, event
, sample
);
1219 perf_evlist__print_tstamp(evlist
, event
, sample
);
1221 printf("%#" PRIx64
" [%#x]: PERF_RECORD_%s", file_offset
,
1222 event
->header
.size
, perf_event__name(event
->header
.type
));
1225 static void dump_sample(struct evsel
*evsel
, union perf_event
*event
,
1226 struct perf_sample
*sample
)
1233 printf("(IP, 0x%x): %d/%d: %#" PRIx64
" period: %" PRIu64
" addr: %#" PRIx64
"\n",
1234 event
->header
.misc
, sample
->pid
, sample
->tid
, sample
->ip
,
1235 sample
->period
, sample
->addr
);
1237 sample_type
= evsel
->core
.attr
.sample_type
;
1239 if (evsel__has_callchain(evsel
))
1240 callchain__printf(evsel
, sample
);
1242 if (sample_type
& PERF_SAMPLE_BRANCH_STACK
)
1243 branch_stack__printf(sample
, perf_evsel__has_branch_callstack(evsel
));
1245 if (sample_type
& PERF_SAMPLE_REGS_USER
)
1246 regs_user__printf(sample
);
1248 if (sample_type
& PERF_SAMPLE_REGS_INTR
)
1249 regs_intr__printf(sample
);
1251 if (sample_type
& PERF_SAMPLE_STACK_USER
)
1252 stack_user__printf(&sample
->user_stack
);
1254 if (sample_type
& PERF_SAMPLE_WEIGHT
)
1255 printf("... weight: %" PRIu64
"\n", sample
->weight
);
1257 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
1258 printf(" . data_src: 0x%"PRIx64
"\n", sample
->data_src
);
1260 if (sample_type
& PERF_SAMPLE_PHYS_ADDR
)
1261 printf(" .. phys_addr: 0x%"PRIx64
"\n", sample
->phys_addr
);
1263 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
1264 printf("... transaction: %" PRIx64
"\n", sample
->transaction
);
1266 if (sample_type
& PERF_SAMPLE_READ
)
1267 sample_read__printf(sample
, evsel
->core
.attr
.read_format
);
1270 static void dump_read(struct evsel
*evsel
, union perf_event
*event
)
1272 struct perf_record_read
*read_event
= &event
->read
;
1278 printf(": %d %d %s %" PRI_lu64
"\n", event
->read
.pid
, event
->read
.tid
,
1279 perf_evsel__name(evsel
),
1285 read_format
= evsel
->core
.attr
.read_format
;
1287 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1288 printf("... time enabled : %" PRI_lu64
"\n", read_event
->time_enabled
);
1290 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1291 printf("... time running : %" PRI_lu64
"\n", read_event
->time_running
);
1293 if (read_format
& PERF_FORMAT_ID
)
1294 printf("... id : %" PRI_lu64
"\n", read_event
->id
);
1297 static struct machine
*machines__find_for_cpumode(struct machines
*machines
,
1298 union perf_event
*event
,
1299 struct perf_sample
*sample
)
1301 struct machine
*machine
;
1304 ((sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) ||
1305 (sample
->cpumode
== PERF_RECORD_MISC_GUEST_USER
))) {
1308 if (event
->header
.type
== PERF_RECORD_MMAP
1309 || event
->header
.type
== PERF_RECORD_MMAP2
)
1310 pid
= event
->mmap
.pid
;
1314 machine
= machines__find(machines
, pid
);
1316 machine
= machines__findnew(machines
, DEFAULT_GUEST_KERNEL_ID
);
1320 return &machines
->host
;
1323 static int deliver_sample_value(struct evlist
*evlist
,
1324 struct perf_tool
*tool
,
1325 union perf_event
*event
,
1326 struct perf_sample
*sample
,
1327 struct sample_read_value
*v
,
1328 struct machine
*machine
)
1330 struct perf_sample_id
*sid
= perf_evlist__id2sid(evlist
, v
->id
);
1331 struct evsel
*evsel
;
1335 sample
->period
= v
->value
- sid
->period
;
1336 sid
->period
= v
->value
;
1339 if (!sid
|| sid
->evsel
== NULL
) {
1340 ++evlist
->stats
.nr_unknown_id
;
1345 * There's no reason to deliver sample
1346 * for zero period, bail out.
1348 if (!sample
->period
)
1351 evsel
= container_of(sid
->evsel
, struct evsel
, core
);
1352 return tool
->sample(tool
, event
, sample
, evsel
, machine
);
1355 static int deliver_sample_group(struct evlist
*evlist
,
1356 struct perf_tool
*tool
,
1357 union perf_event
*event
,
1358 struct perf_sample
*sample
,
1359 struct machine
*machine
)
1364 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
1365 ret
= deliver_sample_value(evlist
, tool
, event
, sample
,
1366 &sample
->read
.group
.values
[i
],
1376 perf_evlist__deliver_sample(struct evlist
*evlist
,
1377 struct perf_tool
*tool
,
1378 union perf_event
*event
,
1379 struct perf_sample
*sample
,
1380 struct evsel
*evsel
,
1381 struct machine
*machine
)
1383 /* We know evsel != NULL. */
1384 u64 sample_type
= evsel
->core
.attr
.sample_type
;
1385 u64 read_format
= evsel
->core
.attr
.read_format
;
1387 /* Standard sample delivery. */
1388 if (!(sample_type
& PERF_SAMPLE_READ
))
1389 return tool
->sample(tool
, event
, sample
, evsel
, machine
);
1391 /* For PERF_SAMPLE_READ we have either single or group mode. */
1392 if (read_format
& PERF_FORMAT_GROUP
)
1393 return deliver_sample_group(evlist
, tool
, event
, sample
,
1396 return deliver_sample_value(evlist
, tool
, event
, sample
,
1397 &sample
->read
.one
, machine
);
1400 static int machines__deliver_event(struct machines
*machines
,
1401 struct evlist
*evlist
,
1402 union perf_event
*event
,
1403 struct perf_sample
*sample
,
1404 struct perf_tool
*tool
, u64 file_offset
)
1406 struct evsel
*evsel
;
1407 struct machine
*machine
;
1409 dump_event(evlist
, event
, file_offset
, sample
);
1411 evsel
= perf_evlist__id2evsel(evlist
, sample
->id
);
1413 machine
= machines__find_for_cpumode(machines
, event
, sample
);
1415 switch (event
->header
.type
) {
1416 case PERF_RECORD_SAMPLE
:
1417 if (evsel
== NULL
) {
1418 ++evlist
->stats
.nr_unknown_id
;
1421 dump_sample(evsel
, event
, sample
);
1422 if (machine
== NULL
) {
1423 ++evlist
->stats
.nr_unprocessable_samples
;
1426 return perf_evlist__deliver_sample(evlist
, tool
, event
, sample
, evsel
, machine
);
1427 case PERF_RECORD_MMAP
:
1428 return tool
->mmap(tool
, event
, sample
, machine
);
1429 case PERF_RECORD_MMAP2
:
1430 if (event
->header
.misc
& PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT
)
1431 ++evlist
->stats
.nr_proc_map_timeout
;
1432 return tool
->mmap2(tool
, event
, sample
, machine
);
1433 case PERF_RECORD_COMM
:
1434 return tool
->comm(tool
, event
, sample
, machine
);
1435 case PERF_RECORD_NAMESPACES
:
1436 return tool
->namespaces(tool
, event
, sample
, machine
);
1437 case PERF_RECORD_FORK
:
1438 return tool
->fork(tool
, event
, sample
, machine
);
1439 case PERF_RECORD_EXIT
:
1440 return tool
->exit(tool
, event
, sample
, machine
);
1441 case PERF_RECORD_LOST
:
1442 if (tool
->lost
== perf_event__process_lost
)
1443 evlist
->stats
.total_lost
+= event
->lost
.lost
;
1444 return tool
->lost(tool
, event
, sample
, machine
);
1445 case PERF_RECORD_LOST_SAMPLES
:
1446 if (tool
->lost_samples
== perf_event__process_lost_samples
)
1447 evlist
->stats
.total_lost_samples
+= event
->lost_samples
.lost
;
1448 return tool
->lost_samples(tool
, event
, sample
, machine
);
1449 case PERF_RECORD_READ
:
1450 dump_read(evsel
, event
);
1451 return tool
->read(tool
, event
, sample
, evsel
, machine
);
1452 case PERF_RECORD_THROTTLE
:
1453 return tool
->throttle(tool
, event
, sample
, machine
);
1454 case PERF_RECORD_UNTHROTTLE
:
1455 return tool
->unthrottle(tool
, event
, sample
, machine
);
1456 case PERF_RECORD_AUX
:
1457 if (tool
->aux
== perf_event__process_aux
) {
1458 if (event
->aux
.flags
& PERF_AUX_FLAG_TRUNCATED
)
1459 evlist
->stats
.total_aux_lost
+= 1;
1460 if (event
->aux
.flags
& PERF_AUX_FLAG_PARTIAL
)
1461 evlist
->stats
.total_aux_partial
+= 1;
1463 return tool
->aux(tool
, event
, sample
, machine
);
1464 case PERF_RECORD_ITRACE_START
:
1465 return tool
->itrace_start(tool
, event
, sample
, machine
);
1466 case PERF_RECORD_SWITCH
:
1467 case PERF_RECORD_SWITCH_CPU_WIDE
:
1468 return tool
->context_switch(tool
, event
, sample
, machine
);
1469 case PERF_RECORD_KSYMBOL
:
1470 return tool
->ksymbol(tool
, event
, sample
, machine
);
1471 case PERF_RECORD_BPF_EVENT
:
1472 return tool
->bpf(tool
, event
, sample
, machine
);
1474 ++evlist
->stats
.nr_unknown_events
;
1479 static int perf_session__deliver_event(struct perf_session
*session
,
1480 union perf_event
*event
,
1481 struct perf_tool
*tool
,
1484 struct perf_sample sample
;
1487 ret
= perf_evlist__parse_sample(session
->evlist
, event
, &sample
);
1489 pr_err("Can't parse sample, err = %d\n", ret
);
1493 ret
= auxtrace__process_event(session
, event
, &sample
, tool
);
1499 ret
= machines__deliver_event(&session
->machines
, session
->evlist
,
1500 event
, &sample
, tool
, file_offset
);
1502 if (dump_trace
&& sample
.aux_sample
.size
)
1503 auxtrace__dump_auxtrace_sample(session
, &sample
);
1508 static s64
perf_session__process_user_event(struct perf_session
*session
,
1509 union perf_event
*event
,
1512 struct ordered_events
*oe
= &session
->ordered_events
;
1513 struct perf_tool
*tool
= session
->tool
;
1514 struct perf_sample sample
= { .time
= 0, };
1515 int fd
= perf_data__fd(session
->data
);
1518 if (event
->header
.type
!= PERF_RECORD_COMPRESSED
||
1519 tool
->compressed
== perf_session__process_compressed_event_stub
)
1520 dump_event(session
->evlist
, event
, file_offset
, &sample
);
1522 /* These events are processed right away */
1523 switch (event
->header
.type
) {
1524 case PERF_RECORD_HEADER_ATTR
:
1525 err
= tool
->attr(tool
, event
, &session
->evlist
);
1527 perf_session__set_id_hdr_size(session
);
1528 perf_session__set_comm_exec(session
);
1531 case PERF_RECORD_EVENT_UPDATE
:
1532 return tool
->event_update(tool
, event
, &session
->evlist
);
1533 case PERF_RECORD_HEADER_EVENT_TYPE
:
1535 * Depreceated, but we need to handle it for sake
1536 * of old data files create in pipe mode.
1539 case PERF_RECORD_HEADER_TRACING_DATA
:
1540 /* setup for reading amidst mmap */
1541 lseek(fd
, file_offset
, SEEK_SET
);
1542 return tool
->tracing_data(session
, event
);
1543 case PERF_RECORD_HEADER_BUILD_ID
:
1544 return tool
->build_id(session
, event
);
1545 case PERF_RECORD_FINISHED_ROUND
:
1546 return tool
->finished_round(tool
, event
, oe
);
1547 case PERF_RECORD_ID_INDEX
:
1548 return tool
->id_index(session
, event
);
1549 case PERF_RECORD_AUXTRACE_INFO
:
1550 return tool
->auxtrace_info(session
, event
);
1551 case PERF_RECORD_AUXTRACE
:
1552 /* setup for reading amidst mmap */
1553 lseek(fd
, file_offset
+ event
->header
.size
, SEEK_SET
);
1554 return tool
->auxtrace(session
, event
);
1555 case PERF_RECORD_AUXTRACE_ERROR
:
1556 perf_session__auxtrace_error_inc(session
, event
);
1557 return tool
->auxtrace_error(session
, event
);
1558 case PERF_RECORD_THREAD_MAP
:
1559 return tool
->thread_map(session
, event
);
1560 case PERF_RECORD_CPU_MAP
:
1561 return tool
->cpu_map(session
, event
);
1562 case PERF_RECORD_STAT_CONFIG
:
1563 return tool
->stat_config(session
, event
);
1564 case PERF_RECORD_STAT
:
1565 return tool
->stat(session
, event
);
1566 case PERF_RECORD_STAT_ROUND
:
1567 return tool
->stat_round(session
, event
);
1568 case PERF_RECORD_TIME_CONV
:
1569 session
->time_conv
= event
->time_conv
;
1570 return tool
->time_conv(session
, event
);
1571 case PERF_RECORD_HEADER_FEATURE
:
1572 return tool
->feature(session
, event
);
1573 case PERF_RECORD_COMPRESSED
:
1574 err
= tool
->compressed(session
, event
, file_offset
);
1576 dump_event(session
->evlist
, event
, file_offset
, &sample
);
1583 int perf_session__deliver_synth_event(struct perf_session
*session
,
1584 union perf_event
*event
,
1585 struct perf_sample
*sample
)
1587 struct evlist
*evlist
= session
->evlist
;
1588 struct perf_tool
*tool
= session
->tool
;
1590 events_stats__inc(&evlist
->stats
, event
->header
.type
);
1592 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1593 return perf_session__process_user_event(session
, event
, 0);
1595 return machines__deliver_event(&session
->machines
, evlist
, event
, sample
, tool
, 0);
1598 static void event_swap(union perf_event
*event
, bool sample_id_all
)
1600 perf_event__swap_op swap
;
1602 swap
= perf_event__swap_ops
[event
->header
.type
];
1604 swap(event
, sample_id_all
);
1607 int perf_session__peek_event(struct perf_session
*session
, off_t file_offset
,
1608 void *buf
, size_t buf_sz
,
1609 union perf_event
**event_ptr
,
1610 struct perf_sample
*sample
)
1612 union perf_event
*event
;
1613 size_t hdr_sz
, rest
;
1616 if (session
->one_mmap
&& !session
->header
.needs_swap
) {
1617 event
= file_offset
- session
->one_mmap_offset
+
1618 session
->one_mmap_addr
;
1619 goto out_parse_sample
;
1622 if (perf_data__is_pipe(session
->data
))
1625 fd
= perf_data__fd(session
->data
);
1626 hdr_sz
= sizeof(struct perf_event_header
);
1628 if (buf_sz
< hdr_sz
)
1631 if (lseek(fd
, file_offset
, SEEK_SET
) == (off_t
)-1 ||
1632 readn(fd
, buf
, hdr_sz
) != (ssize_t
)hdr_sz
)
1635 event
= (union perf_event
*)buf
;
1637 if (session
->header
.needs_swap
)
1638 perf_event_header__bswap(&event
->header
);
1640 if (event
->header
.size
< hdr_sz
|| event
->header
.size
> buf_sz
)
1643 rest
= event
->header
.size
- hdr_sz
;
1645 if (readn(fd
, buf
, rest
) != (ssize_t
)rest
)
1648 if (session
->header
.needs_swap
)
1649 event_swap(event
, perf_evlist__sample_id_all(session
->evlist
));
1653 if (sample
&& event
->header
.type
< PERF_RECORD_USER_TYPE_START
&&
1654 perf_evlist__parse_sample(session
->evlist
, event
, sample
))
1662 int perf_session__peek_events(struct perf_session
*session
, u64 offset
,
1663 u64 size
, peek_events_cb_t cb
, void *data
)
1665 u64 max_offset
= offset
+ size
;
1666 char buf
[PERF_SAMPLE_MAX_SIZE
];
1667 union perf_event
*event
;
1671 err
= perf_session__peek_event(session
, offset
, buf
,
1672 PERF_SAMPLE_MAX_SIZE
, &event
,
1677 err
= cb(session
, event
, offset
, data
);
1681 offset
+= event
->header
.size
;
1682 if (event
->header
.type
== PERF_RECORD_AUXTRACE
)
1683 offset
+= event
->auxtrace
.size
;
1685 } while (offset
< max_offset
);
1690 static s64
perf_session__process_event(struct perf_session
*session
,
1691 union perf_event
*event
, u64 file_offset
)
1693 struct evlist
*evlist
= session
->evlist
;
1694 struct perf_tool
*tool
= session
->tool
;
1697 if (session
->header
.needs_swap
)
1698 event_swap(event
, perf_evlist__sample_id_all(evlist
));
1700 if (event
->header
.type
>= PERF_RECORD_HEADER_MAX
)
1703 events_stats__inc(&evlist
->stats
, event
->header
.type
);
1705 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1706 return perf_session__process_user_event(session
, event
, file_offset
);
1708 if (tool
->ordered_events
) {
1709 u64 timestamp
= -1ULL;
1711 ret
= perf_evlist__parse_sample_timestamp(evlist
, event
, ×tamp
);
1712 if (ret
&& ret
!= -1)
1715 ret
= perf_session__queue_event(session
, event
, timestamp
, file_offset
);
1720 return perf_session__deliver_event(session
, event
, tool
, file_offset
);
1723 void perf_event_header__bswap(struct perf_event_header
*hdr
)
1725 hdr
->type
= bswap_32(hdr
->type
);
1726 hdr
->misc
= bswap_16(hdr
->misc
);
1727 hdr
->size
= bswap_16(hdr
->size
);
1730 struct thread
*perf_session__findnew(struct perf_session
*session
, pid_t pid
)
1732 return machine__findnew_thread(&session
->machines
.host
, -1, pid
);
1736 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1737 * So here a single thread is created for that, but actually there is a separate
1738 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1739 * is only 1. That causes problems for some tools, requiring workarounds. For
1740 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1742 int perf_session__register_idle_thread(struct perf_session
*session
)
1744 struct thread
*thread
;
1747 thread
= machine__findnew_thread(&session
->machines
.host
, 0, 0);
1748 if (thread
== NULL
|| thread__set_comm(thread
, "swapper", 0)) {
1749 pr_err("problem inserting idle task.\n");
1753 if (thread
== NULL
|| thread__set_namespaces(thread
, 0, NULL
)) {
1754 pr_err("problem inserting idle task.\n");
1758 /* machine__findnew_thread() got the thread, so put it */
1759 thread__put(thread
);
1764 perf_session__warn_order(const struct perf_session
*session
)
1766 const struct ordered_events
*oe
= &session
->ordered_events
;
1767 struct evsel
*evsel
;
1768 bool should_warn
= true;
1770 evlist__for_each_entry(session
->evlist
, evsel
) {
1771 if (evsel
->core
.attr
.write_backward
)
1772 should_warn
= false;
1777 if (oe
->nr_unordered_events
!= 0)
1778 ui__warning("%u out of order events recorded.\n", oe
->nr_unordered_events
);
1781 static void perf_session__warn_about_errors(const struct perf_session
*session
)
1783 const struct events_stats
*stats
= &session
->evlist
->stats
;
1785 if (session
->tool
->lost
== perf_event__process_lost
&&
1786 stats
->nr_events
[PERF_RECORD_LOST
] != 0) {
1787 ui__warning("Processed %d events and lost %d chunks!\n\n"
1788 "Check IO/CPU overload!\n\n",
1789 stats
->nr_events
[0],
1790 stats
->nr_events
[PERF_RECORD_LOST
]);
1793 if (session
->tool
->lost_samples
== perf_event__process_lost_samples
) {
1796 drop_rate
= (double)stats
->total_lost_samples
/
1797 (double) (stats
->nr_events
[PERF_RECORD_SAMPLE
] + stats
->total_lost_samples
);
1798 if (drop_rate
> 0.05) {
1799 ui__warning("Processed %" PRIu64
" samples and lost %3.2f%%!\n\n",
1800 stats
->nr_events
[PERF_RECORD_SAMPLE
] + stats
->total_lost_samples
,
1805 if (session
->tool
->aux
== perf_event__process_aux
&&
1806 stats
->total_aux_lost
!= 0) {
1807 ui__warning("AUX data lost %" PRIu64
" times out of %u!\n\n",
1808 stats
->total_aux_lost
,
1809 stats
->nr_events
[PERF_RECORD_AUX
]);
1812 if (session
->tool
->aux
== perf_event__process_aux
&&
1813 stats
->total_aux_partial
!= 0) {
1814 bool vmm_exclusive
= false;
1816 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1819 ui__warning("AUX data had gaps in it %" PRIu64
" times out of %u!\n\n"
1820 "Are you running a KVM guest in the background?%s\n\n",
1821 stats
->total_aux_partial
,
1822 stats
->nr_events
[PERF_RECORD_AUX
],
1824 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1825 "will reduce the gaps to only guest's timeslices." :
1829 if (stats
->nr_unknown_events
!= 0) {
1830 ui__warning("Found %u unknown events!\n\n"
1831 "Is this an older tool processing a perf.data "
1832 "file generated by a more recent tool?\n\n"
1833 "If that is not the case, consider "
1834 "reporting to linux-kernel@vger.kernel.org.\n\n",
1835 stats
->nr_unknown_events
);
1838 if (stats
->nr_unknown_id
!= 0) {
1839 ui__warning("%u samples with id not present in the header\n",
1840 stats
->nr_unknown_id
);
1843 if (stats
->nr_invalid_chains
!= 0) {
1844 ui__warning("Found invalid callchains!\n\n"
1845 "%u out of %u events were discarded for this reason.\n\n"
1846 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1847 stats
->nr_invalid_chains
,
1848 stats
->nr_events
[PERF_RECORD_SAMPLE
]);
1851 if (stats
->nr_unprocessable_samples
!= 0) {
1852 ui__warning("%u unprocessable samples recorded.\n"
1853 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1854 stats
->nr_unprocessable_samples
);
1857 perf_session__warn_order(session
);
1859 events_stats__auxtrace_error_warn(stats
);
1861 if (stats
->nr_proc_map_timeout
!= 0) {
1862 ui__warning("%d map information files for pre-existing threads were\n"
1863 "not processed, if there are samples for addresses they\n"
1864 "will not be resolved, you may find out which are these\n"
1865 "threads by running with -v and redirecting the output\n"
1867 "The time limit to process proc map is too short?\n"
1868 "Increase it by --proc-map-timeout\n",
1869 stats
->nr_proc_map_timeout
);
1873 static int perf_session__flush_thread_stack(struct thread
*thread
,
1874 void *p __maybe_unused
)
1876 return thread_stack__flush(thread
);
1879 static int perf_session__flush_thread_stacks(struct perf_session
*session
)
1881 return machines__for_each_thread(&session
->machines
,
1882 perf_session__flush_thread_stack
,
1886 volatile int session_done
;
1888 static int __perf_session__process_decomp_events(struct perf_session
*session
);
1890 static int __perf_session__process_pipe_events(struct perf_session
*session
)
1892 struct ordered_events
*oe
= &session
->ordered_events
;
1893 struct perf_tool
*tool
= session
->tool
;
1894 int fd
= perf_data__fd(session
->data
);
1895 union perf_event
*event
;
1896 uint32_t size
, cur_size
= 0;
1903 perf_tool__fill_defaults(tool
);
1906 cur_size
= sizeof(union perf_event
);
1908 buf
= malloc(cur_size
);
1911 ordered_events__set_copy_on_queue(oe
, true);
1914 err
= readn(fd
, event
, sizeof(struct perf_event_header
));
1919 pr_err("failed to read event header\n");
1923 if (session
->header
.needs_swap
)
1924 perf_event_header__bswap(&event
->header
);
1926 size
= event
->header
.size
;
1927 if (size
< sizeof(struct perf_event_header
)) {
1928 pr_err("bad event header size\n");
1932 if (size
> cur_size
) {
1933 void *new = realloc(buf
, size
);
1935 pr_err("failed to allocate memory to read event\n");
1943 p
+= sizeof(struct perf_event_header
);
1945 if (size
- sizeof(struct perf_event_header
)) {
1946 err
= readn(fd
, p
, size
- sizeof(struct perf_event_header
));
1949 pr_err("unexpected end of event stream\n");
1953 pr_err("failed to read event data\n");
1958 if ((skip
= perf_session__process_event(session
, event
, head
)) < 0) {
1959 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1960 head
, event
->header
.size
, event
->header
.type
);
1970 err
= __perf_session__process_decomp_events(session
);
1974 if (!session_done())
1977 /* do the final flush for ordered samples */
1978 err
= ordered_events__flush(oe
, OE_FLUSH__FINAL
);
1981 err
= auxtrace__flush_events(session
, tool
);
1984 err
= perf_session__flush_thread_stacks(session
);
1988 perf_session__warn_about_errors(session
);
1989 ordered_events__free(&session
->ordered_events
);
1990 auxtrace__free_events(session
);
1994 static union perf_event
*
1995 prefetch_event(char *buf
, u64 head
, size_t mmap_size
,
1996 bool needs_swap
, union perf_event
*error
)
1998 union perf_event
*event
;
2001 * Ensure we have enough space remaining to read
2002 * the size of the event in the headers.
2004 if (head
+ sizeof(event
->header
) > mmap_size
)
2007 event
= (union perf_event
*)(buf
+ head
);
2009 perf_event_header__bswap(&event
->header
);
2011 if (head
+ event
->header
.size
<= mmap_size
)
2014 /* We're not fetching the event so swap back again */
2016 perf_event_header__bswap(&event
->header
);
2018 pr_debug("%s: head=%#" PRIx64
" event->header_size=%#x, mmap_size=%#zx:"
2019 " fuzzed or compressed perf.data?\n",__func__
, head
, event
->header
.size
, mmap_size
);
2024 static union perf_event
*
2025 fetch_mmaped_event(u64 head
, size_t mmap_size
, char *buf
, bool needs_swap
)
2027 return prefetch_event(buf
, head
, mmap_size
, needs_swap
, ERR_PTR(-EINVAL
));
2030 static union perf_event
*
2031 fetch_decomp_event(u64 head
, size_t mmap_size
, char *buf
, bool needs_swap
)
2033 return prefetch_event(buf
, head
, mmap_size
, needs_swap
, NULL
);
2036 static int __perf_session__process_decomp_events(struct perf_session
*session
)
2039 u64 size
, file_pos
= 0;
2040 struct decomp
*decomp
= session
->decomp_last
;
2045 while (decomp
->head
< decomp
->size
&& !session_done()) {
2046 union perf_event
*event
= fetch_decomp_event(decomp
->head
, decomp
->size
, decomp
->data
,
2047 session
->header
.needs_swap
);
2052 size
= event
->header
.size
;
2054 if (size
< sizeof(struct perf_event_header
) ||
2055 (skip
= perf_session__process_event(session
, event
, file_pos
)) < 0) {
2056 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
2057 decomp
->file_pos
+ decomp
->head
, event
->header
.size
, event
->header
.type
);
2064 decomp
->head
+= size
;
2071 * On 64bit we can mmap the data file in one go. No need for tiny mmap
2072 * slices. On 32bit we use 32MB.
2074 #if BITS_PER_LONG == 64
2075 #define MMAP_SIZE ULLONG_MAX
2078 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2079 #define NUM_MMAPS 128
2084 typedef s64 (*reader_cb_t
)(struct perf_session
*session
,
2085 union perf_event
*event
,
2092 reader_cb_t process
;
2096 reader__process_events(struct reader
*rd
, struct perf_session
*session
,
2097 struct ui_progress
*prog
)
2099 u64 data_size
= rd
->data_size
;
2100 u64 head
, page_offset
, file_offset
, file_pos
, size
;
2101 int err
= 0, mmap_prot
, mmap_flags
, map_idx
= 0;
2103 char *buf
, *mmaps
[NUM_MMAPS
];
2104 union perf_event
*event
;
2107 page_offset
= page_size
* (rd
->data_offset
/ page_size
);
2108 file_offset
= page_offset
;
2109 head
= rd
->data_offset
- page_offset
;
2111 ui_progress__init_size(prog
, data_size
, "Processing events...");
2113 data_size
+= rd
->data_offset
;
2115 mmap_size
= MMAP_SIZE
;
2116 if (mmap_size
> data_size
) {
2117 mmap_size
= data_size
;
2118 session
->one_mmap
= true;
2121 memset(mmaps
, 0, sizeof(mmaps
));
2123 mmap_prot
= PROT_READ
;
2124 mmap_flags
= MAP_SHARED
;
2126 if (session
->header
.needs_swap
) {
2127 mmap_prot
|= PROT_WRITE
;
2128 mmap_flags
= MAP_PRIVATE
;
2131 buf
= mmap(NULL
, mmap_size
, mmap_prot
, mmap_flags
, rd
->fd
,
2133 if (buf
== MAP_FAILED
) {
2134 pr_err("failed to mmap file\n");
2138 mmaps
[map_idx
] = buf
;
2139 map_idx
= (map_idx
+ 1) & (ARRAY_SIZE(mmaps
) - 1);
2140 file_pos
= file_offset
+ head
;
2141 if (session
->one_mmap
) {
2142 session
->one_mmap_addr
= buf
;
2143 session
->one_mmap_offset
= file_offset
;
2147 event
= fetch_mmaped_event(head
, mmap_size
, buf
, session
->header
.needs_swap
);
2149 return PTR_ERR(event
);
2152 if (mmaps
[map_idx
]) {
2153 munmap(mmaps
[map_idx
], mmap_size
);
2154 mmaps
[map_idx
] = NULL
;
2157 page_offset
= page_size
* (head
/ page_size
);
2158 file_offset
+= page_offset
;
2159 head
-= page_offset
;
2163 size
= event
->header
.size
;
2167 if (size
< sizeof(struct perf_event_header
) ||
2168 (skip
= rd
->process(session
, event
, file_pos
)) < 0) {
2169 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d [%s]\n",
2170 file_offset
+ head
, event
->header
.size
,
2171 event
->header
.type
, strerror(-skip
));
2182 err
= __perf_session__process_decomp_events(session
);
2186 ui_progress__update(prog
, size
);
2191 if (file_pos
< data_size
)
2198 static s64
process_simple(struct perf_session
*session
,
2199 union perf_event
*event
,
2202 return perf_session__process_event(session
, event
, file_offset
);
2205 static int __perf_session__process_events(struct perf_session
*session
)
2207 struct reader rd
= {
2208 .fd
= perf_data__fd(session
->data
),
2209 .data_size
= session
->header
.data_size
,
2210 .data_offset
= session
->header
.data_offset
,
2211 .process
= process_simple
,
2213 struct ordered_events
*oe
= &session
->ordered_events
;
2214 struct perf_tool
*tool
= session
->tool
;
2215 struct ui_progress prog
;
2218 perf_tool__fill_defaults(tool
);
2220 if (rd
.data_size
== 0)
2223 ui_progress__init_size(&prog
, rd
.data_size
, "Processing events...");
2225 err
= reader__process_events(&rd
, session
, &prog
);
2228 /* do the final flush for ordered samples */
2229 err
= ordered_events__flush(oe
, OE_FLUSH__FINAL
);
2232 err
= auxtrace__flush_events(session
, tool
);
2235 err
= perf_session__flush_thread_stacks(session
);
2237 ui_progress__finish();
2239 perf_session__warn_about_errors(session
);
2241 * We may switching perf.data output, make ordered_events
2244 ordered_events__reinit(&session
->ordered_events
);
2245 auxtrace__free_events(session
);
2246 session
->one_mmap
= false;
2250 int perf_session__process_events(struct perf_session
*session
)
2252 if (perf_session__register_idle_thread(session
) < 0)
2255 if (perf_data__is_pipe(session
->data
))
2256 return __perf_session__process_pipe_events(session
);
2258 return __perf_session__process_events(session
);
2261 bool perf_session__has_traces(struct perf_session
*session
, const char *msg
)
2263 struct evsel
*evsel
;
2265 evlist__for_each_entry(session
->evlist
, evsel
) {
2266 if (evsel
->core
.attr
.type
== PERF_TYPE_TRACEPOINT
)
2270 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
2274 int map__set_kallsyms_ref_reloc_sym(struct map
*map
, const char *symbol_name
, u64 addr
)
2277 struct ref_reloc_sym
*ref
;
2280 ref
= zalloc(sizeof(struct ref_reloc_sym
));
2284 ref
->name
= strdup(symbol_name
);
2285 if (ref
->name
== NULL
) {
2290 bracket
= strchr(ref
->name
, ']');
2296 kmap
= map__kmap(map
);
2298 kmap
->ref_reloc_sym
= ref
;
2303 size_t perf_session__fprintf_dsos(struct perf_session
*session
, FILE *fp
)
2305 return machines__fprintf_dsos(&session
->machines
, fp
);
2308 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*session
, FILE *fp
,
2309 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
2311 return machines__fprintf_dsos_buildid(&session
->machines
, fp
, skip
, parm
);
2314 size_t perf_session__fprintf_nr_events(struct perf_session
*session
, FILE *fp
)
2317 const char *msg
= "";
2319 if (perf_header__has_feat(&session
->header
, HEADER_AUXTRACE
))
2320 msg
= " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2322 ret
= fprintf(fp
, "\nAggregated stats:%s\n", msg
);
2324 ret
+= events_stats__fprintf(&session
->evlist
->stats
, fp
);
2328 size_t perf_session__fprintf(struct perf_session
*session
, FILE *fp
)
2331 * FIXME: Here we have to actually print all the machines in this
2332 * session, not just the host...
2334 return machine__fprintf(&session
->machines
.host
, fp
);
2337 struct evsel
*perf_session__find_first_evtype(struct perf_session
*session
,
2342 evlist__for_each_entry(session
->evlist
, pos
) {
2343 if (pos
->core
.attr
.type
== type
)
2349 int perf_session__cpu_bitmap(struct perf_session
*session
,
2350 const char *cpu_list
, unsigned long *cpu_bitmap
)
2353 struct perf_cpu_map
*map
;
2354 int nr_cpus
= min(session
->header
.env
.nr_cpus_online
, MAX_NR_CPUS
);
2356 for (i
= 0; i
< PERF_TYPE_MAX
; ++i
) {
2357 struct evsel
*evsel
;
2359 evsel
= perf_session__find_first_evtype(session
, i
);
2363 if (!(evsel
->core
.attr
.sample_type
& PERF_SAMPLE_CPU
)) {
2364 pr_err("File does not contain CPU events. "
2365 "Remove -C option to proceed.\n");
2370 map
= perf_cpu_map__new(cpu_list
);
2372 pr_err("Invalid cpu_list\n");
2376 for (i
= 0; i
< map
->nr
; i
++) {
2377 int cpu
= map
->map
[i
];
2379 if (cpu
>= nr_cpus
) {
2380 pr_err("Requested CPU %d too large. "
2381 "Consider raising MAX_NR_CPUS\n", cpu
);
2382 goto out_delete_map
;
2385 set_bit(cpu
, cpu_bitmap
);
2391 perf_cpu_map__put(map
);
2395 void perf_session__fprintf_info(struct perf_session
*session
, FILE *fp
,
2398 if (session
== NULL
|| fp
== NULL
)
2401 fprintf(fp
, "# ========\n");
2402 perf_header__fprintf_info(session
, fp
, full
);
2403 fprintf(fp
, "# ========\n#\n");
2406 int perf_event__process_id_index(struct perf_session
*session
,
2407 union perf_event
*event
)
2409 struct evlist
*evlist
= session
->evlist
;
2410 struct perf_record_id_index
*ie
= &event
->id_index
;
2411 size_t i
, nr
, max_nr
;
2413 max_nr
= (ie
->header
.size
- sizeof(struct perf_record_id_index
)) /
2414 sizeof(struct id_index_entry
);
2420 fprintf(stdout
, " nr: %zu\n", nr
);
2422 for (i
= 0; i
< nr
; i
++) {
2423 struct id_index_entry
*e
= &ie
->entries
[i
];
2424 struct perf_sample_id
*sid
;
2427 fprintf(stdout
, " ... id: %"PRI_lu64
, e
->id
);
2428 fprintf(stdout
, " idx: %"PRI_lu64
, e
->idx
);
2429 fprintf(stdout
, " cpu: %"PRI_ld64
, e
->cpu
);
2430 fprintf(stdout
, " tid: %"PRI_ld64
"\n", e
->tid
);
2433 sid
= perf_evlist__id2sid(evlist
, e
->id
);