1 #include <linux/kernel.h>
2 #include <traceevent/event-parse.h>
16 #include "perf_regs.h"
19 static int perf_session__open(struct perf_session
*self
, bool force
)
21 struct stat input_stat
;
23 if (!strcmp(self
->filename
, "-")) {
25 self
->fd
= STDIN_FILENO
;
27 if (perf_session__read_header(self
) < 0)
28 pr_err("incompatible file format (rerun with -v to learn more)");
33 self
->fd
= open(self
->filename
, O_RDONLY
);
37 pr_err("failed to open %s: %s", self
->filename
, strerror(err
));
38 if (err
== ENOENT
&& !strcmp(self
->filename
, "perf.data"))
39 pr_err(" (try 'perf record' first)");
44 if (fstat(self
->fd
, &input_stat
) < 0)
47 if (!force
&& input_stat
.st_uid
&& (input_stat
.st_uid
!= geteuid())) {
48 pr_err("file %s not owned by current user or root\n",
53 if (!input_stat
.st_size
) {
54 pr_info("zero-sized file (%s), nothing to do!\n",
59 if (perf_session__read_header(self
) < 0) {
60 pr_err("incompatible file format (rerun with -v to learn more)");
64 if (!perf_evlist__valid_sample_type(self
->evlist
)) {
65 pr_err("non matching sample_type");
69 if (!perf_evlist__valid_sample_id_all(self
->evlist
)) {
70 pr_err("non matching sample_id_all");
74 if (!perf_evlist__valid_read_format(self
->evlist
)) {
75 pr_err("non matching read_format");
79 self
->size
= input_stat
.st_size
;
88 void perf_session__set_id_hdr_size(struct perf_session
*session
)
90 u16 id_hdr_size
= perf_evlist__id_hdr_size(session
->evlist
);
92 machines__set_id_hdr_size(&session
->machines
, id_hdr_size
);
95 int perf_session__create_kernel_maps(struct perf_session
*self
)
97 int ret
= machine__create_kernel_maps(&self
->machines
.host
);
100 ret
= machines__create_guest_kernel_maps(&self
->machines
);
104 static void perf_session__destroy_kernel_maps(struct perf_session
*self
)
106 machines__destroy_kernel_maps(&self
->machines
);
109 struct perf_session
*perf_session__new(const char *filename
, int mode
,
110 bool force
, bool repipe
,
111 struct perf_tool
*tool
)
113 struct perf_session
*self
;
117 if (!filename
|| !strlen(filename
)) {
118 if (!fstat(STDIN_FILENO
, &st
) && S_ISFIFO(st
.st_mode
))
121 filename
= "perf.data";
124 len
= strlen(filename
);
125 self
= zalloc(sizeof(*self
) + len
);
130 memcpy(self
->filename
, filename
, len
);
131 self
->repipe
= repipe
;
132 INIT_LIST_HEAD(&self
->ordered_samples
.samples
);
133 INIT_LIST_HEAD(&self
->ordered_samples
.sample_cache
);
134 INIT_LIST_HEAD(&self
->ordered_samples
.to_free
);
135 machines__init(&self
->machines
);
137 if (mode
== O_RDONLY
) {
138 if (perf_session__open(self
, force
) < 0)
140 perf_session__set_id_hdr_size(self
);
141 } else if (mode
== O_WRONLY
) {
143 * In O_RDONLY mode this will be performed when reading the
144 * kernel MMAP event, in perf_event__process_mmap().
146 if (perf_session__create_kernel_maps(self
) < 0)
150 if (tool
&& tool
->ordering_requires_timestamps
&&
151 tool
->ordered_samples
&& !perf_evlist__sample_id_all(self
->evlist
)) {
152 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
153 tool
->ordered_samples
= false;
159 perf_session__delete(self
);
163 static void perf_session__delete_dead_threads(struct perf_session
*session
)
165 machine__delete_dead_threads(&session
->machines
.host
);
168 static void perf_session__delete_threads(struct perf_session
*session
)
170 machine__delete_threads(&session
->machines
.host
);
173 static void perf_session_env__delete(struct perf_session_env
*env
)
176 free(env
->os_release
);
183 free(env
->sibling_cores
);
184 free(env
->sibling_threads
);
185 free(env
->numa_nodes
);
186 free(env
->pmu_mappings
);
189 void perf_session__delete(struct perf_session
*self
)
191 perf_session__destroy_kernel_maps(self
);
192 perf_session__delete_dead_threads(self
);
193 perf_session__delete_threads(self
);
194 perf_session_env__delete(&self
->header
.env
);
195 machines__exit(&self
->machines
);
201 static int process_event_synth_tracing_data_stub(struct perf_tool
*tool
203 union perf_event
*event
205 struct perf_session
*session
208 dump_printf(": unhandled!\n");
212 static int process_event_synth_attr_stub(struct perf_tool
*tool __maybe_unused
,
213 union perf_event
*event __maybe_unused
,
214 struct perf_evlist
**pevlist
217 dump_printf(": unhandled!\n");
221 static int process_event_sample_stub(struct perf_tool
*tool __maybe_unused
,
222 union perf_event
*event __maybe_unused
,
223 struct perf_sample
*sample __maybe_unused
,
224 struct perf_evsel
*evsel __maybe_unused
,
225 struct machine
*machine __maybe_unused
)
227 dump_printf(": unhandled!\n");
231 static int process_event_stub(struct perf_tool
*tool __maybe_unused
,
232 union perf_event
*event __maybe_unused
,
233 struct perf_sample
*sample __maybe_unused
,
234 struct machine
*machine __maybe_unused
)
236 dump_printf(": unhandled!\n");
240 static int process_finished_round_stub(struct perf_tool
*tool __maybe_unused
,
241 union perf_event
*event __maybe_unused
,
242 struct perf_session
*perf_session
245 dump_printf(": unhandled!\n");
249 static int process_finished_round(struct perf_tool
*tool
,
250 union perf_event
*event
,
251 struct perf_session
*session
);
253 void perf_tool__fill_defaults(struct perf_tool
*tool
)
255 if (tool
->sample
== NULL
)
256 tool
->sample
= process_event_sample_stub
;
257 if (tool
->mmap
== NULL
)
258 tool
->mmap
= process_event_stub
;
259 if (tool
->mmap2
== NULL
)
260 tool
->mmap2
= process_event_stub
;
261 if (tool
->comm
== NULL
)
262 tool
->comm
= process_event_stub
;
263 if (tool
->fork
== NULL
)
264 tool
->fork
= process_event_stub
;
265 if (tool
->exit
== NULL
)
266 tool
->exit
= process_event_stub
;
267 if (tool
->lost
== NULL
)
268 tool
->lost
= perf_event__process_lost
;
269 if (tool
->read
== NULL
)
270 tool
->read
= process_event_sample_stub
;
271 if (tool
->throttle
== NULL
)
272 tool
->throttle
= process_event_stub
;
273 if (tool
->unthrottle
== NULL
)
274 tool
->unthrottle
= process_event_stub
;
275 if (tool
->attr
== NULL
)
276 tool
->attr
= process_event_synth_attr_stub
;
277 if (tool
->tracing_data
== NULL
)
278 tool
->tracing_data
= process_event_synth_tracing_data_stub
;
279 if (tool
->build_id
== NULL
)
280 tool
->build_id
= process_finished_round_stub
;
281 if (tool
->finished_round
== NULL
) {
282 if (tool
->ordered_samples
)
283 tool
->finished_round
= process_finished_round
;
285 tool
->finished_round
= process_finished_round_stub
;
289 void mem_bswap_32(void *src
, int byte_size
)
292 while (byte_size
> 0) {
294 byte_size
-= sizeof(u32
);
299 void mem_bswap_64(void *src
, int byte_size
)
303 while (byte_size
> 0) {
305 byte_size
-= sizeof(u64
);
310 static void swap_sample_id_all(union perf_event
*event
, void *data
)
312 void *end
= (void *) event
+ event
->header
.size
;
313 int size
= end
- data
;
315 BUG_ON(size
% sizeof(u64
));
316 mem_bswap_64(data
, size
);
319 static void perf_event__all64_swap(union perf_event
*event
,
320 bool sample_id_all __maybe_unused
)
322 struct perf_event_header
*hdr
= &event
->header
;
323 mem_bswap_64(hdr
+ 1, event
->header
.size
- sizeof(*hdr
));
326 static void perf_event__comm_swap(union perf_event
*event
, bool sample_id_all
)
328 event
->comm
.pid
= bswap_32(event
->comm
.pid
);
329 event
->comm
.tid
= bswap_32(event
->comm
.tid
);
332 void *data
= &event
->comm
.comm
;
334 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
335 swap_sample_id_all(event
, data
);
339 static void perf_event__mmap_swap(union perf_event
*event
,
342 event
->mmap
.pid
= bswap_32(event
->mmap
.pid
);
343 event
->mmap
.tid
= bswap_32(event
->mmap
.tid
);
344 event
->mmap
.start
= bswap_64(event
->mmap
.start
);
345 event
->mmap
.len
= bswap_64(event
->mmap
.len
);
346 event
->mmap
.pgoff
= bswap_64(event
->mmap
.pgoff
);
349 void *data
= &event
->mmap
.filename
;
351 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
352 swap_sample_id_all(event
, data
);
356 static void perf_event__mmap2_swap(union perf_event
*event
,
359 event
->mmap2
.pid
= bswap_32(event
->mmap2
.pid
);
360 event
->mmap2
.tid
= bswap_32(event
->mmap2
.tid
);
361 event
->mmap2
.start
= bswap_64(event
->mmap2
.start
);
362 event
->mmap2
.len
= bswap_64(event
->mmap2
.len
);
363 event
->mmap2
.pgoff
= bswap_64(event
->mmap2
.pgoff
);
364 event
->mmap2
.maj
= bswap_32(event
->mmap2
.maj
);
365 event
->mmap2
.min
= bswap_32(event
->mmap2
.min
);
366 event
->mmap2
.ino
= bswap_64(event
->mmap2
.ino
);
369 void *data
= &event
->mmap2
.filename
;
371 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
372 swap_sample_id_all(event
, data
);
375 static void perf_event__task_swap(union perf_event
*event
, bool sample_id_all
)
377 event
->fork
.pid
= bswap_32(event
->fork
.pid
);
378 event
->fork
.tid
= bswap_32(event
->fork
.tid
);
379 event
->fork
.ppid
= bswap_32(event
->fork
.ppid
);
380 event
->fork
.ptid
= bswap_32(event
->fork
.ptid
);
381 event
->fork
.time
= bswap_64(event
->fork
.time
);
384 swap_sample_id_all(event
, &event
->fork
+ 1);
387 static void perf_event__read_swap(union perf_event
*event
, bool sample_id_all
)
389 event
->read
.pid
= bswap_32(event
->read
.pid
);
390 event
->read
.tid
= bswap_32(event
->read
.tid
);
391 event
->read
.value
= bswap_64(event
->read
.value
);
392 event
->read
.time_enabled
= bswap_64(event
->read
.time_enabled
);
393 event
->read
.time_running
= bswap_64(event
->read
.time_running
);
394 event
->read
.id
= bswap_64(event
->read
.id
);
397 swap_sample_id_all(event
, &event
->read
+ 1);
400 static u8
revbyte(u8 b
)
402 int rev
= (b
>> 4) | ((b
& 0xf) << 4);
403 rev
= ((rev
& 0xcc) >> 2) | ((rev
& 0x33) << 2);
404 rev
= ((rev
& 0xaa) >> 1) | ((rev
& 0x55) << 1);
409 * XXX this is hack in attempt to carry flags bitfield
410 * throught endian village. ABI says:
412 * Bit-fields are allocated from right to left (least to most significant)
413 * on little-endian implementations and from left to right (most to least
414 * significant) on big-endian implementations.
416 * The above seems to be byte specific, so we need to reverse each
417 * byte of the bitfield. 'Internet' also says this might be implementation
418 * specific and we probably need proper fix and carry perf_event_attr
419 * bitfield flags in separate data file FEAT_ section. Thought this seems
422 static void swap_bitfield(u8
*p
, unsigned len
)
426 for (i
= 0; i
< len
; i
++) {
432 /* exported for swapping attributes in file header */
433 void perf_event__attr_swap(struct perf_event_attr
*attr
)
435 attr
->type
= bswap_32(attr
->type
);
436 attr
->size
= bswap_32(attr
->size
);
437 attr
->config
= bswap_64(attr
->config
);
438 attr
->sample_period
= bswap_64(attr
->sample_period
);
439 attr
->sample_type
= bswap_64(attr
->sample_type
);
440 attr
->read_format
= bswap_64(attr
->read_format
);
441 attr
->wakeup_events
= bswap_32(attr
->wakeup_events
);
442 attr
->bp_type
= bswap_32(attr
->bp_type
);
443 attr
->bp_addr
= bswap_64(attr
->bp_addr
);
444 attr
->bp_len
= bswap_64(attr
->bp_len
);
446 swap_bitfield((u8
*) (&attr
->read_format
+ 1), sizeof(u64
));
449 static void perf_event__hdr_attr_swap(union perf_event
*event
,
450 bool sample_id_all __maybe_unused
)
454 perf_event__attr_swap(&event
->attr
.attr
);
456 size
= event
->header
.size
;
457 size
-= (void *)&event
->attr
.id
- (void *)event
;
458 mem_bswap_64(event
->attr
.id
, size
);
461 static void perf_event__event_type_swap(union perf_event
*event
,
462 bool sample_id_all __maybe_unused
)
464 event
->event_type
.event_type
.event_id
=
465 bswap_64(event
->event_type
.event_type
.event_id
);
468 static void perf_event__tracing_data_swap(union perf_event
*event
,
469 bool sample_id_all __maybe_unused
)
471 event
->tracing_data
.size
= bswap_32(event
->tracing_data
.size
);
474 typedef void (*perf_event__swap_op
)(union perf_event
*event
,
477 static perf_event__swap_op perf_event__swap_ops
[] = {
478 [PERF_RECORD_MMAP
] = perf_event__mmap_swap
,
479 [PERF_RECORD_MMAP2
] = perf_event__mmap2_swap
,
480 [PERF_RECORD_COMM
] = perf_event__comm_swap
,
481 [PERF_RECORD_FORK
] = perf_event__task_swap
,
482 [PERF_RECORD_EXIT
] = perf_event__task_swap
,
483 [PERF_RECORD_LOST
] = perf_event__all64_swap
,
484 [PERF_RECORD_READ
] = perf_event__read_swap
,
485 [PERF_RECORD_SAMPLE
] = perf_event__all64_swap
,
486 [PERF_RECORD_HEADER_ATTR
] = perf_event__hdr_attr_swap
,
487 [PERF_RECORD_HEADER_EVENT_TYPE
] = perf_event__event_type_swap
,
488 [PERF_RECORD_HEADER_TRACING_DATA
] = perf_event__tracing_data_swap
,
489 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
490 [PERF_RECORD_HEADER_MAX
] = NULL
,
493 struct sample_queue
{
496 union perf_event
*event
;
497 struct list_head list
;
500 static void perf_session_free_sample_buffers(struct perf_session
*session
)
502 struct ordered_samples
*os
= &session
->ordered_samples
;
504 while (!list_empty(&os
->to_free
)) {
505 struct sample_queue
*sq
;
507 sq
= list_entry(os
->to_free
.next
, struct sample_queue
, list
);
513 static int perf_session_deliver_event(struct perf_session
*session
,
514 union perf_event
*event
,
515 struct perf_sample
*sample
,
516 struct perf_tool
*tool
,
519 static int flush_sample_queue(struct perf_session
*s
,
520 struct perf_tool
*tool
)
522 struct ordered_samples
*os
= &s
->ordered_samples
;
523 struct list_head
*head
= &os
->samples
;
524 struct sample_queue
*tmp
, *iter
;
525 struct perf_sample sample
;
526 u64 limit
= os
->next_flush
;
527 u64 last_ts
= os
->last_sample
? os
->last_sample
->timestamp
: 0ULL;
528 unsigned idx
= 0, progress_next
= os
->nr_samples
/ 16;
529 bool show_progress
= limit
== ULLONG_MAX
;
532 if (!tool
->ordered_samples
|| !limit
)
535 list_for_each_entry_safe(iter
, tmp
, head
, list
) {
539 if (iter
->timestamp
> limit
)
542 ret
= perf_evlist__parse_sample(s
->evlist
, iter
->event
, &sample
);
544 pr_err("Can't parse sample, err = %d\n", ret
);
546 ret
= perf_session_deliver_event(s
, iter
->event
, &sample
, tool
,
552 os
->last_flush
= iter
->timestamp
;
553 list_del(&iter
->list
);
554 list_add(&iter
->list
, &os
->sample_cache
);
555 if (show_progress
&& (++idx
>= progress_next
)) {
556 progress_next
+= os
->nr_samples
/ 16;
557 ui_progress__update(idx
, os
->nr_samples
,
558 "Processing time ordered events...");
562 if (list_empty(head
)) {
563 os
->last_sample
= NULL
;
564 } else if (last_ts
<= limit
) {
566 list_entry(head
->prev
, struct sample_queue
, list
);
575 * When perf record finishes a pass on every buffers, it records this pseudo
577 * We record the max timestamp t found in the pass n.
578 * Assuming these timestamps are monotonic across cpus, we know that if
579 * a buffer still has events with timestamps below t, they will be all
580 * available and then read in the pass n + 1.
581 * Hence when we start to read the pass n + 2, we can safely flush every
582 * events with timestamps below t.
584 * ============ PASS n =================
587 * cnt1 timestamps | cnt2 timestamps
590 * - | 4 <--- max recorded
592 * ============ PASS n + 1 ==============
595 * cnt1 timestamps | cnt2 timestamps
598 * 5 | 7 <---- max recorded
600 * Flush every events below timestamp 4
602 * ============ PASS n + 2 ==============
605 * cnt1 timestamps | cnt2 timestamps
610 * Flush every events below timestamp 7
613 static int process_finished_round(struct perf_tool
*tool
,
614 union perf_event
*event __maybe_unused
,
615 struct perf_session
*session
)
617 int ret
= flush_sample_queue(session
, tool
);
619 session
->ordered_samples
.next_flush
= session
->ordered_samples
.max_timestamp
;
624 /* The queue is ordered by time */
625 static void __queue_event(struct sample_queue
*new, struct perf_session
*s
)
627 struct ordered_samples
*os
= &s
->ordered_samples
;
628 struct sample_queue
*sample
= os
->last_sample
;
629 u64 timestamp
= new->timestamp
;
633 os
->last_sample
= new;
636 list_add(&new->list
, &os
->samples
);
637 os
->max_timestamp
= timestamp
;
642 * last_sample might point to some random place in the list as it's
643 * the last queued event. We expect that the new event is close to
646 if (sample
->timestamp
<= timestamp
) {
647 while (sample
->timestamp
<= timestamp
) {
648 p
= sample
->list
.next
;
649 if (p
== &os
->samples
) {
650 list_add_tail(&new->list
, &os
->samples
);
651 os
->max_timestamp
= timestamp
;
654 sample
= list_entry(p
, struct sample_queue
, list
);
656 list_add_tail(&new->list
, &sample
->list
);
658 while (sample
->timestamp
> timestamp
) {
659 p
= sample
->list
.prev
;
660 if (p
== &os
->samples
) {
661 list_add(&new->list
, &os
->samples
);
664 sample
= list_entry(p
, struct sample_queue
, list
);
666 list_add(&new->list
, &sample
->list
);
670 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
672 int perf_session_queue_event(struct perf_session
*s
, union perf_event
*event
,
673 struct perf_sample
*sample
, u64 file_offset
)
675 struct ordered_samples
*os
= &s
->ordered_samples
;
676 struct list_head
*sc
= &os
->sample_cache
;
677 u64 timestamp
= sample
->time
;
678 struct sample_queue
*new;
680 if (!timestamp
|| timestamp
== ~0ULL)
683 if (timestamp
< s
->ordered_samples
.last_flush
) {
684 printf("Warning: Timestamp below last timeslice flush\n");
688 if (!list_empty(sc
)) {
689 new = list_entry(sc
->next
, struct sample_queue
, list
);
690 list_del(&new->list
);
691 } else if (os
->sample_buffer
) {
692 new = os
->sample_buffer
+ os
->sample_buffer_idx
;
693 if (++os
->sample_buffer_idx
== MAX_SAMPLE_BUFFER
)
694 os
->sample_buffer
= NULL
;
696 os
->sample_buffer
= malloc(MAX_SAMPLE_BUFFER
* sizeof(*new));
697 if (!os
->sample_buffer
)
699 list_add(&os
->sample_buffer
->list
, &os
->to_free
);
700 os
->sample_buffer_idx
= 2;
701 new = os
->sample_buffer
+ 1;
704 new->timestamp
= timestamp
;
705 new->file_offset
= file_offset
;
708 __queue_event(new, s
);
713 static void callchain__printf(struct perf_sample
*sample
)
717 printf("... chain: nr:%" PRIu64
"\n", sample
->callchain
->nr
);
719 for (i
= 0; i
< sample
->callchain
->nr
; i
++)
720 printf("..... %2d: %016" PRIx64
"\n",
721 i
, sample
->callchain
->ips
[i
]);
724 static void branch_stack__printf(struct perf_sample
*sample
)
728 printf("... branch stack: nr:%" PRIu64
"\n", sample
->branch_stack
->nr
);
730 for (i
= 0; i
< sample
->branch_stack
->nr
; i
++)
731 printf("..... %2"PRIu64
": %016" PRIx64
" -> %016" PRIx64
"\n",
732 i
, sample
->branch_stack
->entries
[i
].from
,
733 sample
->branch_stack
->entries
[i
].to
);
736 static void regs_dump__printf(u64 mask
, u64
*regs
)
740 for_each_set_bit(rid
, (unsigned long *) &mask
, sizeof(mask
) * 8) {
743 printf(".... %-5s 0x%" PRIx64
"\n",
744 perf_reg_name(rid
), val
);
748 static void regs_user__printf(struct perf_sample
*sample
, u64 mask
)
750 struct regs_dump
*user_regs
= &sample
->user_regs
;
752 if (user_regs
->regs
) {
753 printf("... user regs: mask 0x%" PRIx64
"\n", mask
);
754 regs_dump__printf(mask
, user_regs
->regs
);
758 static void stack_user__printf(struct stack_dump
*dump
)
760 printf("... ustack: size %" PRIu64
", offset 0x%x\n",
761 dump
->size
, dump
->offset
);
764 static void perf_session__print_tstamp(struct perf_session
*session
,
765 union perf_event
*event
,
766 struct perf_sample
*sample
)
768 u64 sample_type
= __perf_evlist__combined_sample_type(session
->evlist
);
770 if (event
->header
.type
!= PERF_RECORD_SAMPLE
&&
771 !perf_evlist__sample_id_all(session
->evlist
)) {
772 fputs("-1 -1 ", stdout
);
776 if ((sample_type
& PERF_SAMPLE_CPU
))
777 printf("%u ", sample
->cpu
);
779 if (sample_type
& PERF_SAMPLE_TIME
)
780 printf("%" PRIu64
" ", sample
->time
);
783 static void sample_read__printf(struct perf_sample
*sample
, u64 read_format
)
785 printf("... sample_read:\n");
787 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
788 printf("...... time enabled %016" PRIx64
"\n",
789 sample
->read
.time_enabled
);
791 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
792 printf("...... time running %016" PRIx64
"\n",
793 sample
->read
.time_running
);
795 if (read_format
& PERF_FORMAT_GROUP
) {
798 printf(".... group nr %" PRIu64
"\n", sample
->read
.group
.nr
);
800 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
801 struct sample_read_value
*value
;
803 value
= &sample
->read
.group
.values
[i
];
804 printf("..... id %016" PRIx64
805 ", value %016" PRIx64
"\n",
806 value
->id
, value
->value
);
809 printf("..... id %016" PRIx64
", value %016" PRIx64
"\n",
810 sample
->read
.one
.id
, sample
->read
.one
.value
);
813 static void dump_event(struct perf_session
*session
, union perf_event
*event
,
814 u64 file_offset
, struct perf_sample
*sample
)
819 printf("\n%#" PRIx64
" [%#x]: event: %d\n",
820 file_offset
, event
->header
.size
, event
->header
.type
);
825 perf_session__print_tstamp(session
, event
, sample
);
827 printf("%#" PRIx64
" [%#x]: PERF_RECORD_%s", file_offset
,
828 event
->header
.size
, perf_event__name(event
->header
.type
));
831 static void dump_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
832 struct perf_sample
*sample
)
839 printf("(IP, %d): %d/%d: %#" PRIx64
" period: %" PRIu64
" addr: %#" PRIx64
"\n",
840 event
->header
.misc
, sample
->pid
, sample
->tid
, sample
->ip
,
841 sample
->period
, sample
->addr
);
843 sample_type
= evsel
->attr
.sample_type
;
845 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
846 callchain__printf(sample
);
848 if (sample_type
& PERF_SAMPLE_BRANCH_STACK
)
849 branch_stack__printf(sample
);
851 if (sample_type
& PERF_SAMPLE_REGS_USER
)
852 regs_user__printf(sample
, evsel
->attr
.sample_regs_user
);
854 if (sample_type
& PERF_SAMPLE_STACK_USER
)
855 stack_user__printf(&sample
->user_stack
);
857 if (sample_type
& PERF_SAMPLE_WEIGHT
)
858 printf("... weight: %" PRIu64
"\n", sample
->weight
);
860 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
861 printf(" . data_src: 0x%"PRIx64
"\n", sample
->data_src
);
863 if (sample_type
& PERF_SAMPLE_READ
)
864 sample_read__printf(sample
, evsel
->attr
.read_format
);
867 static struct machine
*
868 perf_session__find_machine_for_cpumode(struct perf_session
*session
,
869 union perf_event
*event
,
870 struct perf_sample
*sample
)
872 const u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
873 struct machine
*machine
;
876 ((cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) ||
877 (cpumode
== PERF_RECORD_MISC_GUEST_USER
))) {
880 if (event
->header
.type
== PERF_RECORD_MMAP
881 || event
->header
.type
== PERF_RECORD_MMAP2
)
882 pid
= event
->mmap
.pid
;
886 machine
= perf_session__find_machine(session
, pid
);
888 machine
= perf_session__findnew_machine(session
,
889 DEFAULT_GUEST_KERNEL_ID
);
893 return &session
->machines
.host
;
896 static int deliver_sample_value(struct perf_session
*session
,
897 struct perf_tool
*tool
,
898 union perf_event
*event
,
899 struct perf_sample
*sample
,
900 struct sample_read_value
*v
,
901 struct machine
*machine
)
903 struct perf_sample_id
*sid
;
905 sid
= perf_evlist__id2sid(session
->evlist
, v
->id
);
908 sample
->period
= v
->value
- sid
->period
;
909 sid
->period
= v
->value
;
912 if (!sid
|| sid
->evsel
== NULL
) {
913 ++session
->stats
.nr_unknown_id
;
917 return tool
->sample(tool
, event
, sample
, sid
->evsel
, machine
);
920 static int deliver_sample_group(struct perf_session
*session
,
921 struct perf_tool
*tool
,
922 union perf_event
*event
,
923 struct perf_sample
*sample
,
924 struct machine
*machine
)
929 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
930 ret
= deliver_sample_value(session
, tool
, event
, sample
,
931 &sample
->read
.group
.values
[i
],
941 perf_session__deliver_sample(struct perf_session
*session
,
942 struct perf_tool
*tool
,
943 union perf_event
*event
,
944 struct perf_sample
*sample
,
945 struct perf_evsel
*evsel
,
946 struct machine
*machine
)
948 /* We know evsel != NULL. */
949 u64 sample_type
= evsel
->attr
.sample_type
;
950 u64 read_format
= evsel
->attr
.read_format
;
952 /* Standard sample delievery. */
953 if (!(sample_type
& PERF_SAMPLE_READ
))
954 return tool
->sample(tool
, event
, sample
, evsel
, machine
);
956 /* For PERF_SAMPLE_READ we have either single or group mode. */
957 if (read_format
& PERF_FORMAT_GROUP
)
958 return deliver_sample_group(session
, tool
, event
, sample
,
961 return deliver_sample_value(session
, tool
, event
, sample
,
962 &sample
->read
.one
, machine
);
965 static int perf_session_deliver_event(struct perf_session
*session
,
966 union perf_event
*event
,
967 struct perf_sample
*sample
,
968 struct perf_tool
*tool
,
971 struct perf_evsel
*evsel
;
972 struct machine
*machine
;
974 dump_event(session
, event
, file_offset
, sample
);
976 evsel
= perf_evlist__id2evsel(session
->evlist
, sample
->id
);
977 if (evsel
!= NULL
&& event
->header
.type
!= PERF_RECORD_SAMPLE
) {
979 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
980 * because the tools right now may apply filters, discarding
981 * some of the samples. For consistency, in the future we
982 * should have something like nr_filtered_samples and remove
983 * the sample->period from total_sample_period, etc, KISS for
986 * Also testing against NULL allows us to handle files without
987 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
988 * future probably it'll be a good idea to restrict event
989 * processing via perf_session to files with both set.
991 hists__inc_nr_events(&evsel
->hists
, event
->header
.type
);
994 machine
= perf_session__find_machine_for_cpumode(session
, event
,
997 switch (event
->header
.type
) {
998 case PERF_RECORD_SAMPLE
:
999 dump_sample(evsel
, event
, sample
);
1000 if (evsel
== NULL
) {
1001 ++session
->stats
.nr_unknown_id
;
1004 if (machine
== NULL
) {
1005 ++session
->stats
.nr_unprocessable_samples
;
1008 return perf_session__deliver_sample(session
, tool
, event
,
1009 sample
, evsel
, machine
);
1010 case PERF_RECORD_MMAP
:
1011 return tool
->mmap(tool
, event
, sample
, machine
);
1012 case PERF_RECORD_MMAP2
:
1013 return tool
->mmap2(tool
, event
, sample
, machine
);
1014 case PERF_RECORD_COMM
:
1015 return tool
->comm(tool
, event
, sample
, machine
);
1016 case PERF_RECORD_FORK
:
1017 return tool
->fork(tool
, event
, sample
, machine
);
1018 case PERF_RECORD_EXIT
:
1019 return tool
->exit(tool
, event
, sample
, machine
);
1020 case PERF_RECORD_LOST
:
1021 if (tool
->lost
== perf_event__process_lost
)
1022 session
->stats
.total_lost
+= event
->lost
.lost
;
1023 return tool
->lost(tool
, event
, sample
, machine
);
1024 case PERF_RECORD_READ
:
1025 return tool
->read(tool
, event
, sample
, evsel
, machine
);
1026 case PERF_RECORD_THROTTLE
:
1027 return tool
->throttle(tool
, event
, sample
, machine
);
1028 case PERF_RECORD_UNTHROTTLE
:
1029 return tool
->unthrottle(tool
, event
, sample
, machine
);
1031 ++session
->stats
.nr_unknown_events
;
1036 static int perf_session__process_user_event(struct perf_session
*session
, union perf_event
*event
,
1037 struct perf_tool
*tool
, u64 file_offset
)
1041 dump_event(session
, event
, file_offset
, NULL
);
1043 /* These events are processed right away */
1044 switch (event
->header
.type
) {
1045 case PERF_RECORD_HEADER_ATTR
:
1046 err
= tool
->attr(tool
, event
, &session
->evlist
);
1048 perf_session__set_id_hdr_size(session
);
1050 case PERF_RECORD_HEADER_TRACING_DATA
:
1051 /* setup for reading amidst mmap */
1052 lseek(session
->fd
, file_offset
, SEEK_SET
);
1053 return tool
->tracing_data(tool
, event
, session
);
1054 case PERF_RECORD_HEADER_BUILD_ID
:
1055 return tool
->build_id(tool
, event
, session
);
1056 case PERF_RECORD_FINISHED_ROUND
:
1057 return tool
->finished_round(tool
, event
, session
);
1063 static void event_swap(union perf_event
*event
, bool sample_id_all
)
1065 perf_event__swap_op swap
;
1067 swap
= perf_event__swap_ops
[event
->header
.type
];
1069 swap(event
, sample_id_all
);
1072 static int perf_session__process_event(struct perf_session
*session
,
1073 union perf_event
*event
,
1074 struct perf_tool
*tool
,
1077 struct perf_sample sample
;
1080 if (session
->header
.needs_swap
)
1081 event_swap(event
, perf_evlist__sample_id_all(session
->evlist
));
1083 if (event
->header
.type
>= PERF_RECORD_HEADER_MAX
)
1086 events_stats__inc(&session
->stats
, event
->header
.type
);
1088 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1089 return perf_session__process_user_event(session
, event
, tool
, file_offset
);
1092 * For all kernel events we get the sample data
1094 ret
= perf_evlist__parse_sample(session
->evlist
, event
, &sample
);
1098 if (tool
->ordered_samples
) {
1099 ret
= perf_session_queue_event(session
, event
, &sample
,
1105 return perf_session_deliver_event(session
, event
, &sample
, tool
,
1109 void perf_event_header__bswap(struct perf_event_header
*self
)
1111 self
->type
= bswap_32(self
->type
);
1112 self
->misc
= bswap_16(self
->misc
);
1113 self
->size
= bswap_16(self
->size
);
1116 struct thread
*perf_session__findnew(struct perf_session
*session
, pid_t pid
)
1118 return machine__findnew_thread(&session
->machines
.host
, 0, pid
);
1121 static struct thread
*perf_session__register_idle_thread(struct perf_session
*self
)
1123 struct thread
*thread
= perf_session__findnew(self
, 0);
1125 if (thread
== NULL
|| thread__set_comm(thread
, "swapper")) {
1126 pr_err("problem inserting idle task.\n");
1133 static void perf_session__warn_about_errors(const struct perf_session
*session
,
1134 const struct perf_tool
*tool
)
1136 if (tool
->lost
== perf_event__process_lost
&&
1137 session
->stats
.nr_events
[PERF_RECORD_LOST
] != 0) {
1138 ui__warning("Processed %d events and lost %d chunks!\n\n"
1139 "Check IO/CPU overload!\n\n",
1140 session
->stats
.nr_events
[0],
1141 session
->stats
.nr_events
[PERF_RECORD_LOST
]);
1144 if (session
->stats
.nr_unknown_events
!= 0) {
1145 ui__warning("Found %u unknown events!\n\n"
1146 "Is this an older tool processing a perf.data "
1147 "file generated by a more recent tool?\n\n"
1148 "If that is not the case, consider "
1149 "reporting to linux-kernel@vger.kernel.org.\n\n",
1150 session
->stats
.nr_unknown_events
);
1153 if (session
->stats
.nr_unknown_id
!= 0) {
1154 ui__warning("%u samples with id not present in the header\n",
1155 session
->stats
.nr_unknown_id
);
1158 if (session
->stats
.nr_invalid_chains
!= 0) {
1159 ui__warning("Found invalid callchains!\n\n"
1160 "%u out of %u events were discarded for this reason.\n\n"
1161 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1162 session
->stats
.nr_invalid_chains
,
1163 session
->stats
.nr_events
[PERF_RECORD_SAMPLE
]);
1166 if (session
->stats
.nr_unprocessable_samples
!= 0) {
1167 ui__warning("%u unprocessable samples recorded.\n"
1168 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1169 session
->stats
.nr_unprocessable_samples
);
1173 volatile int session_done
;
1175 static int __perf_session__process_pipe_events(struct perf_session
*self
,
1176 struct perf_tool
*tool
)
1178 union perf_event
*event
;
1179 uint32_t size
, cur_size
= 0;
1186 perf_tool__fill_defaults(tool
);
1189 cur_size
= sizeof(union perf_event
);
1191 buf
= malloc(cur_size
);
1196 err
= readn(self
->fd
, event
, sizeof(struct perf_event_header
));
1201 pr_err("failed to read event header\n");
1205 if (self
->header
.needs_swap
)
1206 perf_event_header__bswap(&event
->header
);
1208 size
= event
->header
.size
;
1209 if (size
< sizeof(struct perf_event_header
)) {
1210 pr_err("bad event header size\n");
1214 if (size
> cur_size
) {
1215 void *new = realloc(buf
, size
);
1217 pr_err("failed to allocate memory to read event\n");
1225 p
+= sizeof(struct perf_event_header
);
1227 if (size
- sizeof(struct perf_event_header
)) {
1228 err
= readn(self
->fd
, p
, size
- sizeof(struct perf_event_header
));
1231 pr_err("unexpected end of event stream\n");
1235 pr_err("failed to read event data\n");
1240 if ((skip
= perf_session__process_event(self
, event
, tool
, head
)) < 0) {
1241 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1242 head
, event
->header
.size
, event
->header
.type
);
1252 if (!session_done())
1258 perf_session__warn_about_errors(self
, tool
);
1259 perf_session_free_sample_buffers(self
);
1263 static union perf_event
*
1264 fetch_mmaped_event(struct perf_session
*session
,
1265 u64 head
, size_t mmap_size
, char *buf
)
1267 union perf_event
*event
;
1270 * Ensure we have enough space remaining to read
1271 * the size of the event in the headers.
1273 if (head
+ sizeof(event
->header
) > mmap_size
)
1276 event
= (union perf_event
*)(buf
+ head
);
1278 if (session
->header
.needs_swap
)
1279 perf_event_header__bswap(&event
->header
);
1281 if (head
+ event
->header
.size
> mmap_size
) {
1282 /* We're not fetching the event so swap back again */
1283 if (session
->header
.needs_swap
)
1284 perf_event_header__bswap(&event
->header
);
1292 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1293 * slices. On 32bit we use 32MB.
1295 #if BITS_PER_LONG == 64
1296 #define MMAP_SIZE ULLONG_MAX
1299 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1300 #define NUM_MMAPS 128
1303 int __perf_session__process_events(struct perf_session
*session
,
1304 u64 data_offset
, u64 data_size
,
1305 u64 file_size
, struct perf_tool
*tool
)
1307 u64 head
, page_offset
, file_offset
, file_pos
, progress_next
;
1308 int err
, mmap_prot
, mmap_flags
, map_idx
= 0;
1310 char *buf
, *mmaps
[NUM_MMAPS
];
1311 union perf_event
*event
;
1314 perf_tool__fill_defaults(tool
);
1316 page_offset
= page_size
* (data_offset
/ page_size
);
1317 file_offset
= page_offset
;
1318 head
= data_offset
- page_offset
;
1320 if (data_size
&& (data_offset
+ data_size
< file_size
))
1321 file_size
= data_offset
+ data_size
;
1323 progress_next
= file_size
/ 16;
1325 mmap_size
= MMAP_SIZE
;
1326 if (mmap_size
> file_size
)
1327 mmap_size
= file_size
;
1329 memset(mmaps
, 0, sizeof(mmaps
));
1331 mmap_prot
= PROT_READ
;
1332 mmap_flags
= MAP_SHARED
;
1334 if (session
->header
.needs_swap
) {
1335 mmap_prot
|= PROT_WRITE
;
1336 mmap_flags
= MAP_PRIVATE
;
1339 buf
= mmap(NULL
, mmap_size
, mmap_prot
, mmap_flags
, session
->fd
,
1341 if (buf
== MAP_FAILED
) {
1342 pr_err("failed to mmap file\n");
1346 mmaps
[map_idx
] = buf
;
1347 map_idx
= (map_idx
+ 1) & (ARRAY_SIZE(mmaps
) - 1);
1348 file_pos
= file_offset
+ head
;
1351 event
= fetch_mmaped_event(session
, head
, mmap_size
, buf
);
1353 if (mmaps
[map_idx
]) {
1354 munmap(mmaps
[map_idx
], mmap_size
);
1355 mmaps
[map_idx
] = NULL
;
1358 page_offset
= page_size
* (head
/ page_size
);
1359 file_offset
+= page_offset
;
1360 head
-= page_offset
;
1364 size
= event
->header
.size
;
1366 if (size
< sizeof(struct perf_event_header
) ||
1367 perf_session__process_event(session
, event
, tool
, file_pos
) < 0) {
1368 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1369 file_offset
+ head
, event
->header
.size
,
1370 event
->header
.type
);
1378 if (file_pos
>= progress_next
) {
1379 progress_next
+= file_size
/ 16;
1380 ui_progress__update(file_pos
, file_size
,
1381 "Processing events...");
1388 if (file_pos
< file_size
)
1391 /* do the final flush for ordered samples */
1392 session
->ordered_samples
.next_flush
= ULLONG_MAX
;
1393 err
= flush_sample_queue(session
, tool
);
1395 ui_progress__finish();
1396 perf_session__warn_about_errors(session
, tool
);
1397 perf_session_free_sample_buffers(session
);
1401 int perf_session__process_events(struct perf_session
*self
,
1402 struct perf_tool
*tool
)
1406 if (perf_session__register_idle_thread(self
) == NULL
)
1410 err
= __perf_session__process_events(self
,
1411 self
->header
.data_offset
,
1412 self
->header
.data_size
,
1415 err
= __perf_session__process_pipe_events(self
, tool
);
1420 bool perf_session__has_traces(struct perf_session
*session
, const char *msg
)
1422 struct perf_evsel
*evsel
;
1424 list_for_each_entry(evsel
, &session
->evlist
->entries
, node
) {
1425 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
)
1429 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
1433 int maps__set_kallsyms_ref_reloc_sym(struct map
**maps
,
1434 const char *symbol_name
, u64 addr
)
1438 struct ref_reloc_sym
*ref
;
1440 ref
= zalloc(sizeof(struct ref_reloc_sym
));
1444 ref
->name
= strdup(symbol_name
);
1445 if (ref
->name
== NULL
) {
1450 bracket
= strchr(ref
->name
, ']');
1456 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
1457 struct kmap
*kmap
= map__kmap(maps
[i
]);
1458 kmap
->ref_reloc_sym
= ref
;
1464 size_t perf_session__fprintf_dsos(struct perf_session
*self
, FILE *fp
)
1466 return machines__fprintf_dsos(&self
->machines
, fp
);
1469 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*self
, FILE *fp
,
1470 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
1472 return machines__fprintf_dsos_buildid(&self
->machines
, fp
, skip
, parm
);
1475 size_t perf_session__fprintf_nr_events(struct perf_session
*session
, FILE *fp
)
1477 struct perf_evsel
*pos
;
1478 size_t ret
= fprintf(fp
, "Aggregated stats:\n");
1480 ret
+= events_stats__fprintf(&session
->stats
, fp
);
1482 list_for_each_entry(pos
, &session
->evlist
->entries
, node
) {
1483 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
1484 ret
+= events_stats__fprintf(&pos
->hists
.stats
, fp
);
1490 size_t perf_session__fprintf(struct perf_session
*session
, FILE *fp
)
1493 * FIXME: Here we have to actually print all the machines in this
1494 * session, not just the host...
1496 return machine__fprintf(&session
->machines
.host
, fp
);
1499 struct perf_evsel
*perf_session__find_first_evtype(struct perf_session
*session
,
1502 struct perf_evsel
*pos
;
1504 list_for_each_entry(pos
, &session
->evlist
->entries
, node
) {
1505 if (pos
->attr
.type
== type
)
1511 void perf_evsel__print_ip(struct perf_evsel
*evsel
, union perf_event
*event
,
1512 struct perf_sample
*sample
, struct machine
*machine
,
1513 unsigned int print_opts
, unsigned int stack_depth
)
1515 struct addr_location al
;
1516 struct callchain_cursor_node
*node
;
1517 int print_ip
= print_opts
& PRINT_IP_OPT_IP
;
1518 int print_sym
= print_opts
& PRINT_IP_OPT_SYM
;
1519 int print_dso
= print_opts
& PRINT_IP_OPT_DSO
;
1520 int print_symoffset
= print_opts
& PRINT_IP_OPT_SYMOFFSET
;
1521 int print_oneline
= print_opts
& PRINT_IP_OPT_ONELINE
;
1522 char s
= print_oneline
? ' ' : '\t';
1524 if (perf_event__preprocess_sample(event
, machine
, &al
, sample
) < 0) {
1525 error("problem processing %d event, skipping it.\n",
1526 event
->header
.type
);
1530 if (symbol_conf
.use_callchain
&& sample
->callchain
) {
1532 if (machine__resolve_callchain(machine
, evsel
, al
.thread
,
1533 sample
, NULL
, NULL
) != 0) {
1535 error("Failed to resolve callchain. Skipping\n");
1538 callchain_cursor_commit(&callchain_cursor
);
1540 while (stack_depth
) {
1541 node
= callchain_cursor_current(&callchain_cursor
);
1546 printf("%c%16" PRIx64
, s
, node
->ip
);
1550 if (print_symoffset
) {
1553 symbol__fprintf_symname_offs(node
->sym
, &al
, stdout
);
1555 symbol__fprintf_symname(node
->sym
, stdout
);
1560 map__fprintf_dsoname(node
->map
, stdout
);
1567 callchain_cursor_advance(&callchain_cursor
);
1574 printf("%16" PRIx64
, sample
->ip
);
1578 if (print_symoffset
)
1579 symbol__fprintf_symname_offs(al
.sym
, &al
,
1582 symbol__fprintf_symname(al
.sym
, stdout
);
1587 map__fprintf_dsoname(al
.map
, stdout
);
1593 int perf_session__cpu_bitmap(struct perf_session
*session
,
1594 const char *cpu_list
, unsigned long *cpu_bitmap
)
1597 struct cpu_map
*map
;
1599 for (i
= 0; i
< PERF_TYPE_MAX
; ++i
) {
1600 struct perf_evsel
*evsel
;
1602 evsel
= perf_session__find_first_evtype(session
, i
);
1606 if (!(evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)) {
1607 pr_err("File does not contain CPU events. "
1608 "Remove -c option to proceed.\n");
1613 map
= cpu_map__new(cpu_list
);
1615 pr_err("Invalid cpu_list\n");
1619 for (i
= 0; i
< map
->nr
; i
++) {
1620 int cpu
= map
->map
[i
];
1622 if (cpu
>= MAX_NR_CPUS
) {
1623 pr_err("Requested CPU %d too large. "
1624 "Consider raising MAX_NR_CPUS\n", cpu
);
1628 set_bit(cpu
, cpu_bitmap
);
1634 void perf_session__fprintf_info(struct perf_session
*session
, FILE *fp
,
1640 if (session
== NULL
|| fp
== NULL
)
1643 ret
= fstat(session
->fd
, &st
);
1647 fprintf(fp
, "# ========\n");
1648 fprintf(fp
, "# captured on: %s", ctime(&st
.st_ctime
));
1649 perf_header__fprintf_info(session
, fp
, full
);
1650 fprintf(fp
, "# ========\n#\n");
1654 int __perf_session__set_tracepoints_handlers(struct perf_session
*session
,
1655 const struct perf_evsel_str_handler
*assocs
,
1658 struct perf_evsel
*evsel
;
1662 for (i
= 0; i
< nr_assocs
; i
++) {
1664 * Adding a handler for an event not in the session,
1667 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
, assocs
[i
].name
);
1672 if (evsel
->handler
.func
!= NULL
)
1674 evsel
->handler
.func
= assocs
[i
].handler
;