1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/kernel.h>
5 #include <traceevent/event-parse.h>
10 #include <sys/types.h>
23 #include "perf_regs.h"
27 #include "thread-stack.h"
28 #include "sample-raw.h"
30 #include "arch/common.h"
32 static int perf_session__deliver_event(struct perf_session
*session
,
33 union perf_event
*event
,
34 struct perf_tool
*tool
,
37 static int perf_session__open(struct perf_session
*session
)
39 struct perf_data
*data
= session
->data
;
41 if (perf_session__read_header(session
) < 0) {
42 pr_err("incompatible file format (rerun with -v to learn more)\n");
46 if (perf_data__is_pipe(data
))
49 if (perf_header__has_feat(&session
->header
, HEADER_STAT
))
52 if (!perf_evlist__valid_sample_type(session
->evlist
)) {
53 pr_err("non matching sample_type\n");
57 if (!perf_evlist__valid_sample_id_all(session
->evlist
)) {
58 pr_err("non matching sample_id_all\n");
62 if (!perf_evlist__valid_read_format(session
->evlist
)) {
63 pr_err("non matching read_format\n");
70 void perf_session__set_id_hdr_size(struct perf_session
*session
)
72 u16 id_hdr_size
= perf_evlist__id_hdr_size(session
->evlist
);
74 machines__set_id_hdr_size(&session
->machines
, id_hdr_size
);
77 int perf_session__create_kernel_maps(struct perf_session
*session
)
79 int ret
= machine__create_kernel_maps(&session
->machines
.host
);
82 ret
= machines__create_guest_kernel_maps(&session
->machines
);
86 static void perf_session__destroy_kernel_maps(struct perf_session
*session
)
88 machines__destroy_kernel_maps(&session
->machines
);
91 static bool perf_session__has_comm_exec(struct perf_session
*session
)
93 struct perf_evsel
*evsel
;
95 evlist__for_each_entry(session
->evlist
, evsel
) {
96 if (evsel
->attr
.comm_exec
)
103 static void perf_session__set_comm_exec(struct perf_session
*session
)
105 bool comm_exec
= perf_session__has_comm_exec(session
);
107 machines__set_comm_exec(&session
->machines
, comm_exec
);
110 static int ordered_events__deliver_event(struct ordered_events
*oe
,
111 struct ordered_event
*event
)
113 struct perf_session
*session
= container_of(oe
, struct perf_session
,
116 return perf_session__deliver_event(session
, event
->event
,
117 session
->tool
, event
->file_offset
);
120 struct perf_session
*perf_session__new(struct perf_data
*data
,
121 bool repipe
, struct perf_tool
*tool
)
123 struct perf_session
*session
= zalloc(sizeof(*session
));
128 session
->repipe
= repipe
;
129 session
->tool
= tool
;
130 INIT_LIST_HEAD(&session
->auxtrace_index
);
131 machines__init(&session
->machines
);
132 ordered_events__init(&session
->ordered_events
,
133 ordered_events__deliver_event
, NULL
);
135 perf_env__init(&session
->header
.env
);
137 if (perf_data__open(data
))
140 session
->data
= data
;
142 if (perf_data__is_read(data
)) {
143 if (perf_session__open(session
) < 0)
147 * set session attributes that are present in perf.data
148 * but not in pipe-mode.
150 if (!data
->is_pipe
) {
151 perf_session__set_id_hdr_size(session
);
152 perf_session__set_comm_exec(session
);
155 perf_evlist__init_trace_event_sample_raw(session
->evlist
);
157 /* Open the directory data. */
158 if (data
->is_dir
&& perf_data__open_dir(data
))
162 session
->machines
.host
.env
= &perf_env
;
165 session
->machines
.host
.single_address_space
=
166 perf_env__single_address_space(session
->machines
.host
.env
);
168 if (!data
|| perf_data__is_write(data
)) {
170 * In O_RDONLY mode this will be performed when reading the
171 * kernel MMAP event, in perf_event__process_mmap().
173 if (perf_session__create_kernel_maps(session
) < 0)
174 pr_warning("Cannot read kernel map\n");
178 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
179 * processed, so perf_evlist__sample_id_all is not meaningful here.
181 if ((!data
|| !data
->is_pipe
) && tool
&& tool
->ordering_requires_timestamps
&&
182 tool
->ordered_events
&& !perf_evlist__sample_id_all(session
->evlist
)) {
183 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
184 tool
->ordered_events
= false;
190 perf_session__delete(session
);
195 static void perf_session__delete_threads(struct perf_session
*session
)
197 machine__delete_threads(&session
->machines
.host
);
200 void perf_session__delete(struct perf_session
*session
)
204 auxtrace__free(session
);
205 auxtrace_index__free(&session
->auxtrace_index
);
206 perf_session__destroy_kernel_maps(session
);
207 perf_session__delete_threads(session
);
208 perf_env__exit(&session
->header
.env
);
209 machines__exit(&session
->machines
);
211 perf_data__close(session
->data
);
215 static int process_event_synth_tracing_data_stub(struct perf_session
*session
217 union perf_event
*event
220 dump_printf(": unhandled!\n");
224 static int process_event_synth_attr_stub(struct perf_tool
*tool __maybe_unused
,
225 union perf_event
*event __maybe_unused
,
226 struct perf_evlist
**pevlist
229 dump_printf(": unhandled!\n");
233 static int process_event_synth_event_update_stub(struct perf_tool
*tool __maybe_unused
,
234 union perf_event
*event __maybe_unused
,
235 struct perf_evlist
**pevlist
239 perf_event__fprintf_event_update(event
, stdout
);
241 dump_printf(": unhandled!\n");
245 static int process_event_sample_stub(struct perf_tool
*tool __maybe_unused
,
246 union perf_event
*event __maybe_unused
,
247 struct perf_sample
*sample __maybe_unused
,
248 struct perf_evsel
*evsel __maybe_unused
,
249 struct machine
*machine __maybe_unused
)
251 dump_printf(": unhandled!\n");
255 static int process_event_stub(struct perf_tool
*tool __maybe_unused
,
256 union perf_event
*event __maybe_unused
,
257 struct perf_sample
*sample __maybe_unused
,
258 struct machine
*machine __maybe_unused
)
260 dump_printf(": unhandled!\n");
264 static int process_finished_round_stub(struct perf_tool
*tool __maybe_unused
,
265 union perf_event
*event __maybe_unused
,
266 struct ordered_events
*oe __maybe_unused
)
268 dump_printf(": unhandled!\n");
272 static int process_finished_round(struct perf_tool
*tool
,
273 union perf_event
*event
,
274 struct ordered_events
*oe
);
276 static int skipn(int fd
, off_t n
)
282 ret
= read(fd
, buf
, min(n
, (off_t
)sizeof(buf
)));
291 static s64
process_event_auxtrace_stub(struct perf_session
*session __maybe_unused
,
292 union perf_event
*event
)
294 dump_printf(": unhandled!\n");
295 if (perf_data__is_pipe(session
->data
))
296 skipn(perf_data__fd(session
->data
), event
->auxtrace
.size
);
297 return event
->auxtrace
.size
;
300 static int process_event_op2_stub(struct perf_session
*session __maybe_unused
,
301 union perf_event
*event __maybe_unused
)
303 dump_printf(": unhandled!\n");
309 int process_event_thread_map_stub(struct perf_session
*session __maybe_unused
,
310 union perf_event
*event __maybe_unused
)
313 perf_event__fprintf_thread_map(event
, stdout
);
315 dump_printf(": unhandled!\n");
320 int process_event_cpu_map_stub(struct perf_session
*session __maybe_unused
,
321 union perf_event
*event __maybe_unused
)
324 perf_event__fprintf_cpu_map(event
, stdout
);
326 dump_printf(": unhandled!\n");
331 int process_event_stat_config_stub(struct perf_session
*session __maybe_unused
,
332 union perf_event
*event __maybe_unused
)
335 perf_event__fprintf_stat_config(event
, stdout
);
337 dump_printf(": unhandled!\n");
341 static int process_stat_stub(struct perf_session
*perf_session __maybe_unused
,
342 union perf_event
*event
)
345 perf_event__fprintf_stat(event
, stdout
);
347 dump_printf(": unhandled!\n");
351 static int process_stat_round_stub(struct perf_session
*perf_session __maybe_unused
,
352 union perf_event
*event
)
355 perf_event__fprintf_stat_round(event
, stdout
);
357 dump_printf(": unhandled!\n");
361 void perf_tool__fill_defaults(struct perf_tool
*tool
)
363 if (tool
->sample
== NULL
)
364 tool
->sample
= process_event_sample_stub
;
365 if (tool
->mmap
== NULL
)
366 tool
->mmap
= process_event_stub
;
367 if (tool
->mmap2
== NULL
)
368 tool
->mmap2
= process_event_stub
;
369 if (tool
->comm
== NULL
)
370 tool
->comm
= process_event_stub
;
371 if (tool
->namespaces
== NULL
)
372 tool
->namespaces
= process_event_stub
;
373 if (tool
->fork
== NULL
)
374 tool
->fork
= process_event_stub
;
375 if (tool
->exit
== NULL
)
376 tool
->exit
= process_event_stub
;
377 if (tool
->lost
== NULL
)
378 tool
->lost
= perf_event__process_lost
;
379 if (tool
->lost_samples
== NULL
)
380 tool
->lost_samples
= perf_event__process_lost_samples
;
381 if (tool
->aux
== NULL
)
382 tool
->aux
= perf_event__process_aux
;
383 if (tool
->itrace_start
== NULL
)
384 tool
->itrace_start
= perf_event__process_itrace_start
;
385 if (tool
->context_switch
== NULL
)
386 tool
->context_switch
= perf_event__process_switch
;
387 if (tool
->ksymbol
== NULL
)
388 tool
->ksymbol
= perf_event__process_ksymbol
;
389 if (tool
->bpf_event
== NULL
)
390 tool
->bpf_event
= perf_event__process_bpf_event
;
391 if (tool
->read
== NULL
)
392 tool
->read
= process_event_sample_stub
;
393 if (tool
->throttle
== NULL
)
394 tool
->throttle
= process_event_stub
;
395 if (tool
->unthrottle
== NULL
)
396 tool
->unthrottle
= process_event_stub
;
397 if (tool
->attr
== NULL
)
398 tool
->attr
= process_event_synth_attr_stub
;
399 if (tool
->event_update
== NULL
)
400 tool
->event_update
= process_event_synth_event_update_stub
;
401 if (tool
->tracing_data
== NULL
)
402 tool
->tracing_data
= process_event_synth_tracing_data_stub
;
403 if (tool
->build_id
== NULL
)
404 tool
->build_id
= process_event_op2_stub
;
405 if (tool
->finished_round
== NULL
) {
406 if (tool
->ordered_events
)
407 tool
->finished_round
= process_finished_round
;
409 tool
->finished_round
= process_finished_round_stub
;
411 if (tool
->id_index
== NULL
)
412 tool
->id_index
= process_event_op2_stub
;
413 if (tool
->auxtrace_info
== NULL
)
414 tool
->auxtrace_info
= process_event_op2_stub
;
415 if (tool
->auxtrace
== NULL
)
416 tool
->auxtrace
= process_event_auxtrace_stub
;
417 if (tool
->auxtrace_error
== NULL
)
418 tool
->auxtrace_error
= process_event_op2_stub
;
419 if (tool
->thread_map
== NULL
)
420 tool
->thread_map
= process_event_thread_map_stub
;
421 if (tool
->cpu_map
== NULL
)
422 tool
->cpu_map
= process_event_cpu_map_stub
;
423 if (tool
->stat_config
== NULL
)
424 tool
->stat_config
= process_event_stat_config_stub
;
425 if (tool
->stat
== NULL
)
426 tool
->stat
= process_stat_stub
;
427 if (tool
->stat_round
== NULL
)
428 tool
->stat_round
= process_stat_round_stub
;
429 if (tool
->time_conv
== NULL
)
430 tool
->time_conv
= process_event_op2_stub
;
431 if (tool
->feature
== NULL
)
432 tool
->feature
= process_event_op2_stub
;
435 static void swap_sample_id_all(union perf_event
*event
, void *data
)
437 void *end
= (void *) event
+ event
->header
.size
;
438 int size
= end
- data
;
440 BUG_ON(size
% sizeof(u64
));
441 mem_bswap_64(data
, size
);
444 static void perf_event__all64_swap(union perf_event
*event
,
445 bool sample_id_all __maybe_unused
)
447 struct perf_event_header
*hdr
= &event
->header
;
448 mem_bswap_64(hdr
+ 1, event
->header
.size
- sizeof(*hdr
));
451 static void perf_event__comm_swap(union perf_event
*event
, bool sample_id_all
)
453 event
->comm
.pid
= bswap_32(event
->comm
.pid
);
454 event
->comm
.tid
= bswap_32(event
->comm
.tid
);
457 void *data
= &event
->comm
.comm
;
459 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
460 swap_sample_id_all(event
, data
);
464 static void perf_event__mmap_swap(union perf_event
*event
,
467 event
->mmap
.pid
= bswap_32(event
->mmap
.pid
);
468 event
->mmap
.tid
= bswap_32(event
->mmap
.tid
);
469 event
->mmap
.start
= bswap_64(event
->mmap
.start
);
470 event
->mmap
.len
= bswap_64(event
->mmap
.len
);
471 event
->mmap
.pgoff
= bswap_64(event
->mmap
.pgoff
);
474 void *data
= &event
->mmap
.filename
;
476 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
477 swap_sample_id_all(event
, data
);
481 static void perf_event__mmap2_swap(union perf_event
*event
,
484 event
->mmap2
.pid
= bswap_32(event
->mmap2
.pid
);
485 event
->mmap2
.tid
= bswap_32(event
->mmap2
.tid
);
486 event
->mmap2
.start
= bswap_64(event
->mmap2
.start
);
487 event
->mmap2
.len
= bswap_64(event
->mmap2
.len
);
488 event
->mmap2
.pgoff
= bswap_64(event
->mmap2
.pgoff
);
489 event
->mmap2
.maj
= bswap_32(event
->mmap2
.maj
);
490 event
->mmap2
.min
= bswap_32(event
->mmap2
.min
);
491 event
->mmap2
.ino
= bswap_64(event
->mmap2
.ino
);
494 void *data
= &event
->mmap2
.filename
;
496 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
497 swap_sample_id_all(event
, data
);
500 static void perf_event__task_swap(union perf_event
*event
, bool sample_id_all
)
502 event
->fork
.pid
= bswap_32(event
->fork
.pid
);
503 event
->fork
.tid
= bswap_32(event
->fork
.tid
);
504 event
->fork
.ppid
= bswap_32(event
->fork
.ppid
);
505 event
->fork
.ptid
= bswap_32(event
->fork
.ptid
);
506 event
->fork
.time
= bswap_64(event
->fork
.time
);
509 swap_sample_id_all(event
, &event
->fork
+ 1);
512 static void perf_event__read_swap(union perf_event
*event
, bool sample_id_all
)
514 event
->read
.pid
= bswap_32(event
->read
.pid
);
515 event
->read
.tid
= bswap_32(event
->read
.tid
);
516 event
->read
.value
= bswap_64(event
->read
.value
);
517 event
->read
.time_enabled
= bswap_64(event
->read
.time_enabled
);
518 event
->read
.time_running
= bswap_64(event
->read
.time_running
);
519 event
->read
.id
= bswap_64(event
->read
.id
);
522 swap_sample_id_all(event
, &event
->read
+ 1);
525 static void perf_event__aux_swap(union perf_event
*event
, bool sample_id_all
)
527 event
->aux
.aux_offset
= bswap_64(event
->aux
.aux_offset
);
528 event
->aux
.aux_size
= bswap_64(event
->aux
.aux_size
);
529 event
->aux
.flags
= bswap_64(event
->aux
.flags
);
532 swap_sample_id_all(event
, &event
->aux
+ 1);
535 static void perf_event__itrace_start_swap(union perf_event
*event
,
538 event
->itrace_start
.pid
= bswap_32(event
->itrace_start
.pid
);
539 event
->itrace_start
.tid
= bswap_32(event
->itrace_start
.tid
);
542 swap_sample_id_all(event
, &event
->itrace_start
+ 1);
545 static void perf_event__switch_swap(union perf_event
*event
, bool sample_id_all
)
547 if (event
->header
.type
== PERF_RECORD_SWITCH_CPU_WIDE
) {
548 event
->context_switch
.next_prev_pid
=
549 bswap_32(event
->context_switch
.next_prev_pid
);
550 event
->context_switch
.next_prev_tid
=
551 bswap_32(event
->context_switch
.next_prev_tid
);
555 swap_sample_id_all(event
, &event
->context_switch
+ 1);
558 static void perf_event__throttle_swap(union perf_event
*event
,
561 event
->throttle
.time
= bswap_64(event
->throttle
.time
);
562 event
->throttle
.id
= bswap_64(event
->throttle
.id
);
563 event
->throttle
.stream_id
= bswap_64(event
->throttle
.stream_id
);
566 swap_sample_id_all(event
, &event
->throttle
+ 1);
569 static u8
revbyte(u8 b
)
571 int rev
= (b
>> 4) | ((b
& 0xf) << 4);
572 rev
= ((rev
& 0xcc) >> 2) | ((rev
& 0x33) << 2);
573 rev
= ((rev
& 0xaa) >> 1) | ((rev
& 0x55) << 1);
578 * XXX this is hack in attempt to carry flags bitfield
579 * through endian village. ABI says:
581 * Bit-fields are allocated from right to left (least to most significant)
582 * on little-endian implementations and from left to right (most to least
583 * significant) on big-endian implementations.
585 * The above seems to be byte specific, so we need to reverse each
586 * byte of the bitfield. 'Internet' also says this might be implementation
587 * specific and we probably need proper fix and carry perf_event_attr
588 * bitfield flags in separate data file FEAT_ section. Thought this seems
591 static void swap_bitfield(u8
*p
, unsigned len
)
595 for (i
= 0; i
< len
; i
++) {
601 /* exported for swapping attributes in file header */
602 void perf_event__attr_swap(struct perf_event_attr
*attr
)
604 attr
->type
= bswap_32(attr
->type
);
605 attr
->size
= bswap_32(attr
->size
);
607 #define bswap_safe(f, n) \
608 (attr->size > (offsetof(struct perf_event_attr, f) + \
609 sizeof(attr->f) * (n)))
610 #define bswap_field(f, sz) \
612 if (bswap_safe(f, 0)) \
613 attr->f = bswap_##sz(attr->f); \
615 #define bswap_field_16(f) bswap_field(f, 16)
616 #define bswap_field_32(f) bswap_field(f, 32)
617 #define bswap_field_64(f) bswap_field(f, 64)
619 bswap_field_64(config
);
620 bswap_field_64(sample_period
);
621 bswap_field_64(sample_type
);
622 bswap_field_64(read_format
);
623 bswap_field_32(wakeup_events
);
624 bswap_field_32(bp_type
);
625 bswap_field_64(bp_addr
);
626 bswap_field_64(bp_len
);
627 bswap_field_64(branch_sample_type
);
628 bswap_field_64(sample_regs_user
);
629 bswap_field_32(sample_stack_user
);
630 bswap_field_32(aux_watermark
);
631 bswap_field_16(sample_max_stack
);
634 * After read_format are bitfields. Check read_format because
635 * we are unable to use offsetof on bitfield.
637 if (bswap_safe(read_format
, 1))
638 swap_bitfield((u8
*) (&attr
->read_format
+ 1),
640 #undef bswap_field_64
641 #undef bswap_field_32
646 static void perf_event__hdr_attr_swap(union perf_event
*event
,
647 bool sample_id_all __maybe_unused
)
651 perf_event__attr_swap(&event
->attr
.attr
);
653 size
= event
->header
.size
;
654 size
-= (void *)&event
->attr
.id
- (void *)event
;
655 mem_bswap_64(event
->attr
.id
, size
);
658 static void perf_event__event_update_swap(union perf_event
*event
,
659 bool sample_id_all __maybe_unused
)
661 event
->event_update
.type
= bswap_64(event
->event_update
.type
);
662 event
->event_update
.id
= bswap_64(event
->event_update
.id
);
665 static void perf_event__event_type_swap(union perf_event
*event
,
666 bool sample_id_all __maybe_unused
)
668 event
->event_type
.event_type
.event_id
=
669 bswap_64(event
->event_type
.event_type
.event_id
);
672 static void perf_event__tracing_data_swap(union perf_event
*event
,
673 bool sample_id_all __maybe_unused
)
675 event
->tracing_data
.size
= bswap_32(event
->tracing_data
.size
);
678 static void perf_event__auxtrace_info_swap(union perf_event
*event
,
679 bool sample_id_all __maybe_unused
)
683 event
->auxtrace_info
.type
= bswap_32(event
->auxtrace_info
.type
);
685 size
= event
->header
.size
;
686 size
-= (void *)&event
->auxtrace_info
.priv
- (void *)event
;
687 mem_bswap_64(event
->auxtrace_info
.priv
, size
);
690 static void perf_event__auxtrace_swap(union perf_event
*event
,
691 bool sample_id_all __maybe_unused
)
693 event
->auxtrace
.size
= bswap_64(event
->auxtrace
.size
);
694 event
->auxtrace
.offset
= bswap_64(event
->auxtrace
.offset
);
695 event
->auxtrace
.reference
= bswap_64(event
->auxtrace
.reference
);
696 event
->auxtrace
.idx
= bswap_32(event
->auxtrace
.idx
);
697 event
->auxtrace
.tid
= bswap_32(event
->auxtrace
.tid
);
698 event
->auxtrace
.cpu
= bswap_32(event
->auxtrace
.cpu
);
701 static void perf_event__auxtrace_error_swap(union perf_event
*event
,
702 bool sample_id_all __maybe_unused
)
704 event
->auxtrace_error
.type
= bswap_32(event
->auxtrace_error
.type
);
705 event
->auxtrace_error
.code
= bswap_32(event
->auxtrace_error
.code
);
706 event
->auxtrace_error
.cpu
= bswap_32(event
->auxtrace_error
.cpu
);
707 event
->auxtrace_error
.pid
= bswap_32(event
->auxtrace_error
.pid
);
708 event
->auxtrace_error
.tid
= bswap_32(event
->auxtrace_error
.tid
);
709 event
->auxtrace_error
.fmt
= bswap_32(event
->auxtrace_error
.fmt
);
710 event
->auxtrace_error
.ip
= bswap_64(event
->auxtrace_error
.ip
);
711 if (event
->auxtrace_error
.fmt
)
712 event
->auxtrace_error
.time
= bswap_64(event
->auxtrace_error
.time
);
715 static void perf_event__thread_map_swap(union perf_event
*event
,
716 bool sample_id_all __maybe_unused
)
720 event
->thread_map
.nr
= bswap_64(event
->thread_map
.nr
);
722 for (i
= 0; i
< event
->thread_map
.nr
; i
++)
723 event
->thread_map
.entries
[i
].pid
= bswap_64(event
->thread_map
.entries
[i
].pid
);
726 static void perf_event__cpu_map_swap(union perf_event
*event
,
727 bool sample_id_all __maybe_unused
)
729 struct cpu_map_data
*data
= &event
->cpu_map
.data
;
730 struct cpu_map_entries
*cpus
;
731 struct cpu_map_mask
*mask
;
734 data
->type
= bswap_64(data
->type
);
736 switch (data
->type
) {
737 case PERF_CPU_MAP__CPUS
:
738 cpus
= (struct cpu_map_entries
*)data
->data
;
740 cpus
->nr
= bswap_16(cpus
->nr
);
742 for (i
= 0; i
< cpus
->nr
; i
++)
743 cpus
->cpu
[i
] = bswap_16(cpus
->cpu
[i
]);
745 case PERF_CPU_MAP__MASK
:
746 mask
= (struct cpu_map_mask
*) data
->data
;
748 mask
->nr
= bswap_16(mask
->nr
);
749 mask
->long_size
= bswap_16(mask
->long_size
);
751 switch (mask
->long_size
) {
752 case 4: mem_bswap_32(&mask
->mask
, mask
->nr
); break;
753 case 8: mem_bswap_64(&mask
->mask
, mask
->nr
); break;
755 pr_err("cpu_map swap: unsupported long size\n");
762 static void perf_event__stat_config_swap(union perf_event
*event
,
763 bool sample_id_all __maybe_unused
)
767 size
= event
->stat_config
.nr
* sizeof(event
->stat_config
.data
[0]);
768 size
+= 1; /* nr item itself */
769 mem_bswap_64(&event
->stat_config
.nr
, size
);
772 static void perf_event__stat_swap(union perf_event
*event
,
773 bool sample_id_all __maybe_unused
)
775 event
->stat
.id
= bswap_64(event
->stat
.id
);
776 event
->stat
.thread
= bswap_32(event
->stat
.thread
);
777 event
->stat
.cpu
= bswap_32(event
->stat
.cpu
);
778 event
->stat
.val
= bswap_64(event
->stat
.val
);
779 event
->stat
.ena
= bswap_64(event
->stat
.ena
);
780 event
->stat
.run
= bswap_64(event
->stat
.run
);
783 static void perf_event__stat_round_swap(union perf_event
*event
,
784 bool sample_id_all __maybe_unused
)
786 event
->stat_round
.type
= bswap_64(event
->stat_round
.type
);
787 event
->stat_round
.time
= bswap_64(event
->stat_round
.time
);
790 typedef void (*perf_event__swap_op
)(union perf_event
*event
,
793 static perf_event__swap_op perf_event__swap_ops
[] = {
794 [PERF_RECORD_MMAP
] = perf_event__mmap_swap
,
795 [PERF_RECORD_MMAP2
] = perf_event__mmap2_swap
,
796 [PERF_RECORD_COMM
] = perf_event__comm_swap
,
797 [PERF_RECORD_FORK
] = perf_event__task_swap
,
798 [PERF_RECORD_EXIT
] = perf_event__task_swap
,
799 [PERF_RECORD_LOST
] = perf_event__all64_swap
,
800 [PERF_RECORD_READ
] = perf_event__read_swap
,
801 [PERF_RECORD_THROTTLE
] = perf_event__throttle_swap
,
802 [PERF_RECORD_UNTHROTTLE
] = perf_event__throttle_swap
,
803 [PERF_RECORD_SAMPLE
] = perf_event__all64_swap
,
804 [PERF_RECORD_AUX
] = perf_event__aux_swap
,
805 [PERF_RECORD_ITRACE_START
] = perf_event__itrace_start_swap
,
806 [PERF_RECORD_LOST_SAMPLES
] = perf_event__all64_swap
,
807 [PERF_RECORD_SWITCH
] = perf_event__switch_swap
,
808 [PERF_RECORD_SWITCH_CPU_WIDE
] = perf_event__switch_swap
,
809 [PERF_RECORD_HEADER_ATTR
] = perf_event__hdr_attr_swap
,
810 [PERF_RECORD_HEADER_EVENT_TYPE
] = perf_event__event_type_swap
,
811 [PERF_RECORD_HEADER_TRACING_DATA
] = perf_event__tracing_data_swap
,
812 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
813 [PERF_RECORD_ID_INDEX
] = perf_event__all64_swap
,
814 [PERF_RECORD_AUXTRACE_INFO
] = perf_event__auxtrace_info_swap
,
815 [PERF_RECORD_AUXTRACE
] = perf_event__auxtrace_swap
,
816 [PERF_RECORD_AUXTRACE_ERROR
] = perf_event__auxtrace_error_swap
,
817 [PERF_RECORD_THREAD_MAP
] = perf_event__thread_map_swap
,
818 [PERF_RECORD_CPU_MAP
] = perf_event__cpu_map_swap
,
819 [PERF_RECORD_STAT_CONFIG
] = perf_event__stat_config_swap
,
820 [PERF_RECORD_STAT
] = perf_event__stat_swap
,
821 [PERF_RECORD_STAT_ROUND
] = perf_event__stat_round_swap
,
822 [PERF_RECORD_EVENT_UPDATE
] = perf_event__event_update_swap
,
823 [PERF_RECORD_TIME_CONV
] = perf_event__all64_swap
,
824 [PERF_RECORD_HEADER_MAX
] = NULL
,
828 * When perf record finishes a pass on every buffers, it records this pseudo
830 * We record the max timestamp t found in the pass n.
831 * Assuming these timestamps are monotonic across cpus, we know that if
832 * a buffer still has events with timestamps below t, they will be all
833 * available and then read in the pass n + 1.
834 * Hence when we start to read the pass n + 2, we can safely flush every
835 * events with timestamps below t.
837 * ============ PASS n =================
840 * cnt1 timestamps | cnt2 timestamps
843 * - | 4 <--- max recorded
845 * ============ PASS n + 1 ==============
848 * cnt1 timestamps | cnt2 timestamps
851 * 5 | 7 <---- max recorded
853 * Flush every events below timestamp 4
855 * ============ PASS n + 2 ==============
858 * cnt1 timestamps | cnt2 timestamps
863 * Flush every events below timestamp 7
866 static int process_finished_round(struct perf_tool
*tool __maybe_unused
,
867 union perf_event
*event __maybe_unused
,
868 struct ordered_events
*oe
)
871 fprintf(stdout
, "\n");
872 return ordered_events__flush(oe
, OE_FLUSH__ROUND
);
875 int perf_session__queue_event(struct perf_session
*s
, union perf_event
*event
,
876 u64 timestamp
, u64 file_offset
)
878 return ordered_events__queue(&s
->ordered_events
, event
, timestamp
, file_offset
);
881 static void callchain__lbr_callstack_printf(struct perf_sample
*sample
)
883 struct ip_callchain
*callchain
= sample
->callchain
;
884 struct branch_stack
*lbr_stack
= sample
->branch_stack
;
885 u64 kernel_callchain_nr
= callchain
->nr
;
888 for (i
= 0; i
< kernel_callchain_nr
; i
++) {
889 if (callchain
->ips
[i
] == PERF_CONTEXT_USER
)
893 if ((i
!= kernel_callchain_nr
) && lbr_stack
->nr
) {
896 * LBR callstack can only get user call chain,
897 * i is kernel call chain number,
898 * 1 is PERF_CONTEXT_USER.
900 * The user call chain is stored in LBR registers.
901 * LBR are pair registers. The caller is stored
902 * in "from" register, while the callee is stored
904 * For example, there is a call stack
905 * "A"->"B"->"C"->"D".
906 * The LBR registers will recorde like
907 * "C"->"D", "B"->"C", "A"->"B".
908 * So only the first "to" register and all "from"
909 * registers are needed to construct the whole stack.
911 total_nr
= i
+ 1 + lbr_stack
->nr
+ 1;
912 kernel_callchain_nr
= i
+ 1;
914 printf("... LBR call chain: nr:%" PRIu64
"\n", total_nr
);
916 for (i
= 0; i
< kernel_callchain_nr
; i
++)
917 printf("..... %2d: %016" PRIx64
"\n",
918 i
, callchain
->ips
[i
]);
920 printf("..... %2d: %016" PRIx64
"\n",
921 (int)(kernel_callchain_nr
), lbr_stack
->entries
[0].to
);
922 for (i
= 0; i
< lbr_stack
->nr
; i
++)
923 printf("..... %2d: %016" PRIx64
"\n",
924 (int)(i
+ kernel_callchain_nr
+ 1), lbr_stack
->entries
[i
].from
);
928 static void callchain__printf(struct perf_evsel
*evsel
,
929 struct perf_sample
*sample
)
932 struct ip_callchain
*callchain
= sample
->callchain
;
934 if (perf_evsel__has_branch_callstack(evsel
))
935 callchain__lbr_callstack_printf(sample
);
937 printf("... FP chain: nr:%" PRIu64
"\n", callchain
->nr
);
939 for (i
= 0; i
< callchain
->nr
; i
++)
940 printf("..... %2d: %016" PRIx64
"\n",
941 i
, callchain
->ips
[i
]);
944 static void branch_stack__printf(struct perf_sample
*sample
)
948 printf("... branch stack: nr:%" PRIu64
"\n", sample
->branch_stack
->nr
);
950 for (i
= 0; i
< sample
->branch_stack
->nr
; i
++) {
951 struct branch_entry
*e
= &sample
->branch_stack
->entries
[i
];
953 printf("..... %2"PRIu64
": %016" PRIx64
" -> %016" PRIx64
" %hu cycles %s%s%s%s %x\n",
955 (unsigned short)e
->flags
.cycles
,
956 e
->flags
.mispred
? "M" : " ",
957 e
->flags
.predicted
? "P" : " ",
958 e
->flags
.abort
? "A" : " ",
959 e
->flags
.in_tx
? "T" : " ",
960 (unsigned)e
->flags
.reserved
);
964 static void regs_dump__printf(u64 mask
, u64
*regs
)
968 for_each_set_bit(rid
, (unsigned long *) &mask
, sizeof(mask
) * 8) {
971 printf(".... %-5s 0x%" PRIx64
"\n",
972 perf_reg_name(rid
), val
);
976 static const char *regs_abi
[] = {
977 [PERF_SAMPLE_REGS_ABI_NONE
] = "none",
978 [PERF_SAMPLE_REGS_ABI_32
] = "32-bit",
979 [PERF_SAMPLE_REGS_ABI_64
] = "64-bit",
982 static inline const char *regs_dump_abi(struct regs_dump
*d
)
984 if (d
->abi
> PERF_SAMPLE_REGS_ABI_64
)
987 return regs_abi
[d
->abi
];
990 static void regs__printf(const char *type
, struct regs_dump
*regs
)
992 u64 mask
= regs
->mask
;
994 printf("... %s regs: mask 0x%" PRIx64
" ABI %s\n",
997 regs_dump_abi(regs
));
999 regs_dump__printf(mask
, regs
->regs
);
1002 static void regs_user__printf(struct perf_sample
*sample
)
1004 struct regs_dump
*user_regs
= &sample
->user_regs
;
1006 if (user_regs
->regs
)
1007 regs__printf("user", user_regs
);
1010 static void regs_intr__printf(struct perf_sample
*sample
)
1012 struct regs_dump
*intr_regs
= &sample
->intr_regs
;
1014 if (intr_regs
->regs
)
1015 regs__printf("intr", intr_regs
);
1018 static void stack_user__printf(struct stack_dump
*dump
)
1020 printf("... ustack: size %" PRIu64
", offset 0x%x\n",
1021 dump
->size
, dump
->offset
);
1024 static void perf_evlist__print_tstamp(struct perf_evlist
*evlist
,
1025 union perf_event
*event
,
1026 struct perf_sample
*sample
)
1028 u64 sample_type
= __perf_evlist__combined_sample_type(evlist
);
1030 if (event
->header
.type
!= PERF_RECORD_SAMPLE
&&
1031 !perf_evlist__sample_id_all(evlist
)) {
1032 fputs("-1 -1 ", stdout
);
1036 if ((sample_type
& PERF_SAMPLE_CPU
))
1037 printf("%u ", sample
->cpu
);
1039 if (sample_type
& PERF_SAMPLE_TIME
)
1040 printf("%" PRIu64
" ", sample
->time
);
1043 static void sample_read__printf(struct perf_sample
*sample
, u64 read_format
)
1045 printf("... sample_read:\n");
1047 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1048 printf("...... time enabled %016" PRIx64
"\n",
1049 sample
->read
.time_enabled
);
1051 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1052 printf("...... time running %016" PRIx64
"\n",
1053 sample
->read
.time_running
);
1055 if (read_format
& PERF_FORMAT_GROUP
) {
1058 printf(".... group nr %" PRIu64
"\n", sample
->read
.group
.nr
);
1060 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
1061 struct sample_read_value
*value
;
1063 value
= &sample
->read
.group
.values
[i
];
1064 printf("..... id %016" PRIx64
1065 ", value %016" PRIx64
"\n",
1066 value
->id
, value
->value
);
1069 printf("..... id %016" PRIx64
", value %016" PRIx64
"\n",
1070 sample
->read
.one
.id
, sample
->read
.one
.value
);
1073 static void dump_event(struct perf_evlist
*evlist
, union perf_event
*event
,
1074 u64 file_offset
, struct perf_sample
*sample
)
1079 printf("\n%#" PRIx64
" [%#x]: event: %d\n",
1080 file_offset
, event
->header
.size
, event
->header
.type
);
1083 if (event
->header
.type
== PERF_RECORD_SAMPLE
&& evlist
->trace_event_sample_raw
)
1084 evlist
->trace_event_sample_raw(evlist
, event
, sample
);
1087 perf_evlist__print_tstamp(evlist
, event
, sample
);
1089 printf("%#" PRIx64
" [%#x]: PERF_RECORD_%s", file_offset
,
1090 event
->header
.size
, perf_event__name(event
->header
.type
));
1093 static void dump_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
1094 struct perf_sample
*sample
)
1101 printf("(IP, 0x%x): %d/%d: %#" PRIx64
" period: %" PRIu64
" addr: %#" PRIx64
"\n",
1102 event
->header
.misc
, sample
->pid
, sample
->tid
, sample
->ip
,
1103 sample
->period
, sample
->addr
);
1105 sample_type
= evsel
->attr
.sample_type
;
1107 if (evsel__has_callchain(evsel
))
1108 callchain__printf(evsel
, sample
);
1110 if ((sample_type
& PERF_SAMPLE_BRANCH_STACK
) && !perf_evsel__has_branch_callstack(evsel
))
1111 branch_stack__printf(sample
);
1113 if (sample_type
& PERF_SAMPLE_REGS_USER
)
1114 regs_user__printf(sample
);
1116 if (sample_type
& PERF_SAMPLE_REGS_INTR
)
1117 regs_intr__printf(sample
);
1119 if (sample_type
& PERF_SAMPLE_STACK_USER
)
1120 stack_user__printf(&sample
->user_stack
);
1122 if (sample_type
& PERF_SAMPLE_WEIGHT
)
1123 printf("... weight: %" PRIu64
"\n", sample
->weight
);
1125 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
1126 printf(" . data_src: 0x%"PRIx64
"\n", sample
->data_src
);
1128 if (sample_type
& PERF_SAMPLE_PHYS_ADDR
)
1129 printf(" .. phys_addr: 0x%"PRIx64
"\n", sample
->phys_addr
);
1131 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
1132 printf("... transaction: %" PRIx64
"\n", sample
->transaction
);
1134 if (sample_type
& PERF_SAMPLE_READ
)
1135 sample_read__printf(sample
, evsel
->attr
.read_format
);
1138 static void dump_read(struct perf_evsel
*evsel
, union perf_event
*event
)
1140 struct read_event
*read_event
= &event
->read
;
1146 printf(": %d %d %s %" PRIu64
"\n", event
->read
.pid
, event
->read
.tid
,
1147 evsel
? perf_evsel__name(evsel
) : "FAIL",
1150 read_format
= evsel
->attr
.read_format
;
1152 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1153 printf("... time enabled : %" PRIu64
"\n", read_event
->time_enabled
);
1155 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1156 printf("... time running : %" PRIu64
"\n", read_event
->time_running
);
1158 if (read_format
& PERF_FORMAT_ID
)
1159 printf("... id : %" PRIu64
"\n", read_event
->id
);
1162 static struct machine
*machines__find_for_cpumode(struct machines
*machines
,
1163 union perf_event
*event
,
1164 struct perf_sample
*sample
)
1166 struct machine
*machine
;
1169 ((sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) ||
1170 (sample
->cpumode
== PERF_RECORD_MISC_GUEST_USER
))) {
1173 if (event
->header
.type
== PERF_RECORD_MMAP
1174 || event
->header
.type
== PERF_RECORD_MMAP2
)
1175 pid
= event
->mmap
.pid
;
1179 machine
= machines__find(machines
, pid
);
1181 machine
= machines__findnew(machines
, DEFAULT_GUEST_KERNEL_ID
);
1185 return &machines
->host
;
1188 static int deliver_sample_value(struct perf_evlist
*evlist
,
1189 struct perf_tool
*tool
,
1190 union perf_event
*event
,
1191 struct perf_sample
*sample
,
1192 struct sample_read_value
*v
,
1193 struct machine
*machine
)
1195 struct perf_sample_id
*sid
= perf_evlist__id2sid(evlist
, v
->id
);
1199 sample
->period
= v
->value
- sid
->period
;
1200 sid
->period
= v
->value
;
1203 if (!sid
|| sid
->evsel
== NULL
) {
1204 ++evlist
->stats
.nr_unknown_id
;
1209 * There's no reason to deliver sample
1210 * for zero period, bail out.
1212 if (!sample
->period
)
1215 return tool
->sample(tool
, event
, sample
, sid
->evsel
, machine
);
1218 static int deliver_sample_group(struct perf_evlist
*evlist
,
1219 struct perf_tool
*tool
,
1220 union perf_event
*event
,
1221 struct perf_sample
*sample
,
1222 struct machine
*machine
)
1227 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
1228 ret
= deliver_sample_value(evlist
, tool
, event
, sample
,
1229 &sample
->read
.group
.values
[i
],
1239 perf_evlist__deliver_sample(struct perf_evlist
*evlist
,
1240 struct perf_tool
*tool
,
1241 union perf_event
*event
,
1242 struct perf_sample
*sample
,
1243 struct perf_evsel
*evsel
,
1244 struct machine
*machine
)
1246 /* We know evsel != NULL. */
1247 u64 sample_type
= evsel
->attr
.sample_type
;
1248 u64 read_format
= evsel
->attr
.read_format
;
1250 /* Standard sample delivery. */
1251 if (!(sample_type
& PERF_SAMPLE_READ
))
1252 return tool
->sample(tool
, event
, sample
, evsel
, machine
);
1254 /* For PERF_SAMPLE_READ we have either single or group mode. */
1255 if (read_format
& PERF_FORMAT_GROUP
)
1256 return deliver_sample_group(evlist
, tool
, event
, sample
,
1259 return deliver_sample_value(evlist
, tool
, event
, sample
,
1260 &sample
->read
.one
, machine
);
1263 static int machines__deliver_event(struct machines
*machines
,
1264 struct perf_evlist
*evlist
,
1265 union perf_event
*event
,
1266 struct perf_sample
*sample
,
1267 struct perf_tool
*tool
, u64 file_offset
)
1269 struct perf_evsel
*evsel
;
1270 struct machine
*machine
;
1272 dump_event(evlist
, event
, file_offset
, sample
);
1274 evsel
= perf_evlist__id2evsel(evlist
, sample
->id
);
1276 machine
= machines__find_for_cpumode(machines
, event
, sample
);
1278 switch (event
->header
.type
) {
1279 case PERF_RECORD_SAMPLE
:
1280 if (evsel
== NULL
) {
1281 ++evlist
->stats
.nr_unknown_id
;
1284 dump_sample(evsel
, event
, sample
);
1285 if (machine
== NULL
) {
1286 ++evlist
->stats
.nr_unprocessable_samples
;
1289 return perf_evlist__deliver_sample(evlist
, tool
, event
, sample
, evsel
, machine
);
1290 case PERF_RECORD_MMAP
:
1291 return tool
->mmap(tool
, event
, sample
, machine
);
1292 case PERF_RECORD_MMAP2
:
1293 if (event
->header
.misc
& PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT
)
1294 ++evlist
->stats
.nr_proc_map_timeout
;
1295 return tool
->mmap2(tool
, event
, sample
, machine
);
1296 case PERF_RECORD_COMM
:
1297 return tool
->comm(tool
, event
, sample
, machine
);
1298 case PERF_RECORD_NAMESPACES
:
1299 return tool
->namespaces(tool
, event
, sample
, machine
);
1300 case PERF_RECORD_FORK
:
1301 return tool
->fork(tool
, event
, sample
, machine
);
1302 case PERF_RECORD_EXIT
:
1303 return tool
->exit(tool
, event
, sample
, machine
);
1304 case PERF_RECORD_LOST
:
1305 if (tool
->lost
== perf_event__process_lost
)
1306 evlist
->stats
.total_lost
+= event
->lost
.lost
;
1307 return tool
->lost(tool
, event
, sample
, machine
);
1308 case PERF_RECORD_LOST_SAMPLES
:
1309 if (tool
->lost_samples
== perf_event__process_lost_samples
)
1310 evlist
->stats
.total_lost_samples
+= event
->lost_samples
.lost
;
1311 return tool
->lost_samples(tool
, event
, sample
, machine
);
1312 case PERF_RECORD_READ
:
1313 dump_read(evsel
, event
);
1314 return tool
->read(tool
, event
, sample
, evsel
, machine
);
1315 case PERF_RECORD_THROTTLE
:
1316 return tool
->throttle(tool
, event
, sample
, machine
);
1317 case PERF_RECORD_UNTHROTTLE
:
1318 return tool
->unthrottle(tool
, event
, sample
, machine
);
1319 case PERF_RECORD_AUX
:
1320 if (tool
->aux
== perf_event__process_aux
) {
1321 if (event
->aux
.flags
& PERF_AUX_FLAG_TRUNCATED
)
1322 evlist
->stats
.total_aux_lost
+= 1;
1323 if (event
->aux
.flags
& PERF_AUX_FLAG_PARTIAL
)
1324 evlist
->stats
.total_aux_partial
+= 1;
1326 return tool
->aux(tool
, event
, sample
, machine
);
1327 case PERF_RECORD_ITRACE_START
:
1328 return tool
->itrace_start(tool
, event
, sample
, machine
);
1329 case PERF_RECORD_SWITCH
:
1330 case PERF_RECORD_SWITCH_CPU_WIDE
:
1331 return tool
->context_switch(tool
, event
, sample
, machine
);
1332 case PERF_RECORD_KSYMBOL
:
1333 return tool
->ksymbol(tool
, event
, sample
, machine
);
1334 case PERF_RECORD_BPF_EVENT
:
1335 return tool
->bpf_event(tool
, event
, sample
, machine
);
1337 ++evlist
->stats
.nr_unknown_events
;
1342 static int perf_session__deliver_event(struct perf_session
*session
,
1343 union perf_event
*event
,
1344 struct perf_tool
*tool
,
1347 struct perf_sample sample
;
1350 ret
= perf_evlist__parse_sample(session
->evlist
, event
, &sample
);
1352 pr_err("Can't parse sample, err = %d\n", ret
);
1356 ret
= auxtrace__process_event(session
, event
, &sample
, tool
);
1362 return machines__deliver_event(&session
->machines
, session
->evlist
,
1363 event
, &sample
, tool
, file_offset
);
1366 static s64
perf_session__process_user_event(struct perf_session
*session
,
1367 union perf_event
*event
,
1370 struct ordered_events
*oe
= &session
->ordered_events
;
1371 struct perf_tool
*tool
= session
->tool
;
1372 struct perf_sample sample
= { .time
= 0, };
1373 int fd
= perf_data__fd(session
->data
);
1376 dump_event(session
->evlist
, event
, file_offset
, &sample
);
1378 /* These events are processed right away */
1379 switch (event
->header
.type
) {
1380 case PERF_RECORD_HEADER_ATTR
:
1381 err
= tool
->attr(tool
, event
, &session
->evlist
);
1383 perf_session__set_id_hdr_size(session
);
1384 perf_session__set_comm_exec(session
);
1387 case PERF_RECORD_EVENT_UPDATE
:
1388 return tool
->event_update(tool
, event
, &session
->evlist
);
1389 case PERF_RECORD_HEADER_EVENT_TYPE
:
1391 * Depreceated, but we need to handle it for sake
1392 * of old data files create in pipe mode.
1395 case PERF_RECORD_HEADER_TRACING_DATA
:
1396 /* setup for reading amidst mmap */
1397 lseek(fd
, file_offset
, SEEK_SET
);
1398 return tool
->tracing_data(session
, event
);
1399 case PERF_RECORD_HEADER_BUILD_ID
:
1400 return tool
->build_id(session
, event
);
1401 case PERF_RECORD_FINISHED_ROUND
:
1402 return tool
->finished_round(tool
, event
, oe
);
1403 case PERF_RECORD_ID_INDEX
:
1404 return tool
->id_index(session
, event
);
1405 case PERF_RECORD_AUXTRACE_INFO
:
1406 return tool
->auxtrace_info(session
, event
);
1407 case PERF_RECORD_AUXTRACE
:
1408 /* setup for reading amidst mmap */
1409 lseek(fd
, file_offset
+ event
->header
.size
, SEEK_SET
);
1410 return tool
->auxtrace(session
, event
);
1411 case PERF_RECORD_AUXTRACE_ERROR
:
1412 perf_session__auxtrace_error_inc(session
, event
);
1413 return tool
->auxtrace_error(session
, event
);
1414 case PERF_RECORD_THREAD_MAP
:
1415 return tool
->thread_map(session
, event
);
1416 case PERF_RECORD_CPU_MAP
:
1417 return tool
->cpu_map(session
, event
);
1418 case PERF_RECORD_STAT_CONFIG
:
1419 return tool
->stat_config(session
, event
);
1420 case PERF_RECORD_STAT
:
1421 return tool
->stat(session
, event
);
1422 case PERF_RECORD_STAT_ROUND
:
1423 return tool
->stat_round(session
, event
);
1424 case PERF_RECORD_TIME_CONV
:
1425 session
->time_conv
= event
->time_conv
;
1426 return tool
->time_conv(session
, event
);
1427 case PERF_RECORD_HEADER_FEATURE
:
1428 return tool
->feature(session
, event
);
1434 int perf_session__deliver_synth_event(struct perf_session
*session
,
1435 union perf_event
*event
,
1436 struct perf_sample
*sample
)
1438 struct perf_evlist
*evlist
= session
->evlist
;
1439 struct perf_tool
*tool
= session
->tool
;
1441 events_stats__inc(&evlist
->stats
, event
->header
.type
);
1443 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1444 return perf_session__process_user_event(session
, event
, 0);
1446 return machines__deliver_event(&session
->machines
, evlist
, event
, sample
, tool
, 0);
1449 static void event_swap(union perf_event
*event
, bool sample_id_all
)
1451 perf_event__swap_op swap
;
1453 swap
= perf_event__swap_ops
[event
->header
.type
];
1455 swap(event
, sample_id_all
);
1458 int perf_session__peek_event(struct perf_session
*session
, off_t file_offset
,
1459 void *buf
, size_t buf_sz
,
1460 union perf_event
**event_ptr
,
1461 struct perf_sample
*sample
)
1463 union perf_event
*event
;
1464 size_t hdr_sz
, rest
;
1467 if (session
->one_mmap
&& !session
->header
.needs_swap
) {
1468 event
= file_offset
- session
->one_mmap_offset
+
1469 session
->one_mmap_addr
;
1470 goto out_parse_sample
;
1473 if (perf_data__is_pipe(session
->data
))
1476 fd
= perf_data__fd(session
->data
);
1477 hdr_sz
= sizeof(struct perf_event_header
);
1479 if (buf_sz
< hdr_sz
)
1482 if (lseek(fd
, file_offset
, SEEK_SET
) == (off_t
)-1 ||
1483 readn(fd
, buf
, hdr_sz
) != (ssize_t
)hdr_sz
)
1486 event
= (union perf_event
*)buf
;
1488 if (session
->header
.needs_swap
)
1489 perf_event_header__bswap(&event
->header
);
1491 if (event
->header
.size
< hdr_sz
|| event
->header
.size
> buf_sz
)
1494 rest
= event
->header
.size
- hdr_sz
;
1496 if (readn(fd
, buf
, rest
) != (ssize_t
)rest
)
1499 if (session
->header
.needs_swap
)
1500 event_swap(event
, perf_evlist__sample_id_all(session
->evlist
));
1504 if (sample
&& event
->header
.type
< PERF_RECORD_USER_TYPE_START
&&
1505 perf_evlist__parse_sample(session
->evlist
, event
, sample
))
1513 static s64
perf_session__process_event(struct perf_session
*session
,
1514 union perf_event
*event
, u64 file_offset
)
1516 struct perf_evlist
*evlist
= session
->evlist
;
1517 struct perf_tool
*tool
= session
->tool
;
1520 if (session
->header
.needs_swap
)
1521 event_swap(event
, perf_evlist__sample_id_all(evlist
));
1523 if (event
->header
.type
>= PERF_RECORD_HEADER_MAX
)
1526 events_stats__inc(&evlist
->stats
, event
->header
.type
);
1528 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1529 return perf_session__process_user_event(session
, event
, file_offset
);
1531 if (tool
->ordered_events
) {
1532 u64 timestamp
= -1ULL;
1534 ret
= perf_evlist__parse_sample_timestamp(evlist
, event
, ×tamp
);
1535 if (ret
&& ret
!= -1)
1538 ret
= perf_session__queue_event(session
, event
, timestamp
, file_offset
);
1543 return perf_session__deliver_event(session
, event
, tool
, file_offset
);
1546 void perf_event_header__bswap(struct perf_event_header
*hdr
)
1548 hdr
->type
= bswap_32(hdr
->type
);
1549 hdr
->misc
= bswap_16(hdr
->misc
);
1550 hdr
->size
= bswap_16(hdr
->size
);
1553 struct thread
*perf_session__findnew(struct perf_session
*session
, pid_t pid
)
1555 return machine__findnew_thread(&session
->machines
.host
, -1, pid
);
1559 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1560 * So here a single thread is created for that, but actually there is a separate
1561 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1562 * is only 1. That causes problems for some tools, requiring workarounds. For
1563 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1565 int perf_session__register_idle_thread(struct perf_session
*session
)
1567 struct thread
*thread
;
1570 thread
= machine__findnew_thread(&session
->machines
.host
, 0, 0);
1571 if (thread
== NULL
|| thread__set_comm(thread
, "swapper", 0)) {
1572 pr_err("problem inserting idle task.\n");
1576 if (thread
== NULL
|| thread__set_namespaces(thread
, 0, NULL
)) {
1577 pr_err("problem inserting idle task.\n");
1581 /* machine__findnew_thread() got the thread, so put it */
1582 thread__put(thread
);
1587 perf_session__warn_order(const struct perf_session
*session
)
1589 const struct ordered_events
*oe
= &session
->ordered_events
;
1590 struct perf_evsel
*evsel
;
1591 bool should_warn
= true;
1593 evlist__for_each_entry(session
->evlist
, evsel
) {
1594 if (evsel
->attr
.write_backward
)
1595 should_warn
= false;
1600 if (oe
->nr_unordered_events
!= 0)
1601 ui__warning("%u out of order events recorded.\n", oe
->nr_unordered_events
);
1604 static void perf_session__warn_about_errors(const struct perf_session
*session
)
1606 const struct events_stats
*stats
= &session
->evlist
->stats
;
1608 if (session
->tool
->lost
== perf_event__process_lost
&&
1609 stats
->nr_events
[PERF_RECORD_LOST
] != 0) {
1610 ui__warning("Processed %d events and lost %d chunks!\n\n"
1611 "Check IO/CPU overload!\n\n",
1612 stats
->nr_events
[0],
1613 stats
->nr_events
[PERF_RECORD_LOST
]);
1616 if (session
->tool
->lost_samples
== perf_event__process_lost_samples
) {
1619 drop_rate
= (double)stats
->total_lost_samples
/
1620 (double) (stats
->nr_events
[PERF_RECORD_SAMPLE
] + stats
->total_lost_samples
);
1621 if (drop_rate
> 0.05) {
1622 ui__warning("Processed %" PRIu64
" samples and lost %3.2f%%!\n\n",
1623 stats
->nr_events
[PERF_RECORD_SAMPLE
] + stats
->total_lost_samples
,
1628 if (session
->tool
->aux
== perf_event__process_aux
&&
1629 stats
->total_aux_lost
!= 0) {
1630 ui__warning("AUX data lost %" PRIu64
" times out of %u!\n\n",
1631 stats
->total_aux_lost
,
1632 stats
->nr_events
[PERF_RECORD_AUX
]);
1635 if (session
->tool
->aux
== perf_event__process_aux
&&
1636 stats
->total_aux_partial
!= 0) {
1637 bool vmm_exclusive
= false;
1639 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1642 ui__warning("AUX data had gaps in it %" PRIu64
" times out of %u!\n\n"
1643 "Are you running a KVM guest in the background?%s\n\n",
1644 stats
->total_aux_partial
,
1645 stats
->nr_events
[PERF_RECORD_AUX
],
1647 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1648 "will reduce the gaps to only guest's timeslices." :
1652 if (stats
->nr_unknown_events
!= 0) {
1653 ui__warning("Found %u unknown events!\n\n"
1654 "Is this an older tool processing a perf.data "
1655 "file generated by a more recent tool?\n\n"
1656 "If that is not the case, consider "
1657 "reporting to linux-kernel@vger.kernel.org.\n\n",
1658 stats
->nr_unknown_events
);
1661 if (stats
->nr_unknown_id
!= 0) {
1662 ui__warning("%u samples with id not present in the header\n",
1663 stats
->nr_unknown_id
);
1666 if (stats
->nr_invalid_chains
!= 0) {
1667 ui__warning("Found invalid callchains!\n\n"
1668 "%u out of %u events were discarded for this reason.\n\n"
1669 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1670 stats
->nr_invalid_chains
,
1671 stats
->nr_events
[PERF_RECORD_SAMPLE
]);
1674 if (stats
->nr_unprocessable_samples
!= 0) {
1675 ui__warning("%u unprocessable samples recorded.\n"
1676 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1677 stats
->nr_unprocessable_samples
);
1680 perf_session__warn_order(session
);
1682 events_stats__auxtrace_error_warn(stats
);
1684 if (stats
->nr_proc_map_timeout
!= 0) {
1685 ui__warning("%d map information files for pre-existing threads were\n"
1686 "not processed, if there are samples for addresses they\n"
1687 "will not be resolved, you may find out which are these\n"
1688 "threads by running with -v and redirecting the output\n"
1690 "The time limit to process proc map is too short?\n"
1691 "Increase it by --proc-map-timeout\n",
1692 stats
->nr_proc_map_timeout
);
1696 static int perf_session__flush_thread_stack(struct thread
*thread
,
1697 void *p __maybe_unused
)
1699 return thread_stack__flush(thread
);
1702 static int perf_session__flush_thread_stacks(struct perf_session
*session
)
1704 return machines__for_each_thread(&session
->machines
,
1705 perf_session__flush_thread_stack
,
1709 volatile int session_done
;
1711 static int __perf_session__process_pipe_events(struct perf_session
*session
)
1713 struct ordered_events
*oe
= &session
->ordered_events
;
1714 struct perf_tool
*tool
= session
->tool
;
1715 int fd
= perf_data__fd(session
->data
);
1716 union perf_event
*event
;
1717 uint32_t size
, cur_size
= 0;
1724 perf_tool__fill_defaults(tool
);
1727 cur_size
= sizeof(union perf_event
);
1729 buf
= malloc(cur_size
);
1732 ordered_events__set_copy_on_queue(oe
, true);
1735 err
= readn(fd
, event
, sizeof(struct perf_event_header
));
1740 pr_err("failed to read event header\n");
1744 if (session
->header
.needs_swap
)
1745 perf_event_header__bswap(&event
->header
);
1747 size
= event
->header
.size
;
1748 if (size
< sizeof(struct perf_event_header
)) {
1749 pr_err("bad event header size\n");
1753 if (size
> cur_size
) {
1754 void *new = realloc(buf
, size
);
1756 pr_err("failed to allocate memory to read event\n");
1764 p
+= sizeof(struct perf_event_header
);
1766 if (size
- sizeof(struct perf_event_header
)) {
1767 err
= readn(fd
, p
, size
- sizeof(struct perf_event_header
));
1770 pr_err("unexpected end of event stream\n");
1774 pr_err("failed to read event data\n");
1779 if ((skip
= perf_session__process_event(session
, event
, head
)) < 0) {
1780 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1781 head
, event
->header
.size
, event
->header
.type
);
1791 if (!session_done())
1794 /* do the final flush for ordered samples */
1795 err
= ordered_events__flush(oe
, OE_FLUSH__FINAL
);
1798 err
= auxtrace__flush_events(session
, tool
);
1801 err
= perf_session__flush_thread_stacks(session
);
1805 perf_session__warn_about_errors(session
);
1806 ordered_events__free(&session
->ordered_events
);
1807 auxtrace__free_events(session
);
1811 static union perf_event
*
1812 fetch_mmaped_event(struct perf_session
*session
,
1813 u64 head
, size_t mmap_size
, char *buf
)
1815 union perf_event
*event
;
1818 * Ensure we have enough space remaining to read
1819 * the size of the event in the headers.
1821 if (head
+ sizeof(event
->header
) > mmap_size
)
1824 event
= (union perf_event
*)(buf
+ head
);
1826 if (session
->header
.needs_swap
)
1827 perf_event_header__bswap(&event
->header
);
1829 if (head
+ event
->header
.size
> mmap_size
) {
1830 /* We're not fetching the event so swap back again */
1831 if (session
->header
.needs_swap
)
1832 perf_event_header__bswap(&event
->header
);
1840 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1841 * slices. On 32bit we use 32MB.
1843 #if BITS_PER_LONG == 64
1844 #define MMAP_SIZE ULLONG_MAX
1847 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1848 #define NUM_MMAPS 128
1853 typedef s64 (*reader_cb_t
)(struct perf_session
*session
,
1854 union perf_event
*event
,
1861 reader_cb_t process
;
1865 reader__process_events(struct reader
*rd
, struct perf_session
*session
,
1866 struct ui_progress
*prog
)
1868 u64 data_size
= rd
->data_size
;
1869 u64 head
, page_offset
, file_offset
, file_pos
, size
;
1870 int err
= 0, mmap_prot
, mmap_flags
, map_idx
= 0;
1872 char *buf
, *mmaps
[NUM_MMAPS
];
1873 union perf_event
*event
;
1876 page_offset
= page_size
* (rd
->data_offset
/ page_size
);
1877 file_offset
= page_offset
;
1878 head
= rd
->data_offset
- page_offset
;
1880 ui_progress__init_size(prog
, data_size
, "Processing events...");
1882 data_size
+= rd
->data_offset
;
1884 mmap_size
= MMAP_SIZE
;
1885 if (mmap_size
> data_size
) {
1886 mmap_size
= data_size
;
1887 session
->one_mmap
= true;
1890 memset(mmaps
, 0, sizeof(mmaps
));
1892 mmap_prot
= PROT_READ
;
1893 mmap_flags
= MAP_SHARED
;
1895 if (session
->header
.needs_swap
) {
1896 mmap_prot
|= PROT_WRITE
;
1897 mmap_flags
= MAP_PRIVATE
;
1900 buf
= mmap(NULL
, mmap_size
, mmap_prot
, mmap_flags
, rd
->fd
,
1902 if (buf
== MAP_FAILED
) {
1903 pr_err("failed to mmap file\n");
1907 mmaps
[map_idx
] = buf
;
1908 map_idx
= (map_idx
+ 1) & (ARRAY_SIZE(mmaps
) - 1);
1909 file_pos
= file_offset
+ head
;
1910 if (session
->one_mmap
) {
1911 session
->one_mmap_addr
= buf
;
1912 session
->one_mmap_offset
= file_offset
;
1916 event
= fetch_mmaped_event(session
, head
, mmap_size
, buf
);
1918 if (mmaps
[map_idx
]) {
1919 munmap(mmaps
[map_idx
], mmap_size
);
1920 mmaps
[map_idx
] = NULL
;
1923 page_offset
= page_size
* (head
/ page_size
);
1924 file_offset
+= page_offset
;
1925 head
-= page_offset
;
1929 size
= event
->header
.size
;
1933 if (size
< sizeof(struct perf_event_header
) ||
1934 (skip
= rd
->process(session
, event
, file_pos
)) < 0) {
1935 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d [%s]\n",
1936 file_offset
+ head
, event
->header
.size
,
1937 event
->header
.type
, strerror(-skip
));
1948 ui_progress__update(prog
, size
);
1953 if (file_pos
< data_size
)
1960 static s64
process_simple(struct perf_session
*session
,
1961 union perf_event
*event
,
1964 return perf_session__process_event(session
, event
, file_offset
);
1967 static int __perf_session__process_events(struct perf_session
*session
)
1969 struct reader rd
= {
1970 .fd
= perf_data__fd(session
->data
),
1971 .data_size
= session
->header
.data_size
,
1972 .data_offset
= session
->header
.data_offset
,
1973 .process
= process_simple
,
1975 struct ordered_events
*oe
= &session
->ordered_events
;
1976 struct perf_tool
*tool
= session
->tool
;
1977 struct ui_progress prog
;
1980 perf_tool__fill_defaults(tool
);
1982 if (rd
.data_size
== 0)
1985 ui_progress__init_size(&prog
, rd
.data_size
, "Processing events...");
1987 err
= reader__process_events(&rd
, session
, &prog
);
1990 /* do the final flush for ordered samples */
1991 err
= ordered_events__flush(oe
, OE_FLUSH__FINAL
);
1994 err
= auxtrace__flush_events(session
, tool
);
1997 err
= perf_session__flush_thread_stacks(session
);
1999 ui_progress__finish();
2001 perf_session__warn_about_errors(session
);
2003 * We may switching perf.data output, make ordered_events
2006 ordered_events__reinit(&session
->ordered_events
);
2007 auxtrace__free_events(session
);
2008 session
->one_mmap
= false;
2012 int perf_session__process_events(struct perf_session
*session
)
2014 if (perf_session__register_idle_thread(session
) < 0)
2017 if (perf_data__is_pipe(session
->data
))
2018 return __perf_session__process_pipe_events(session
);
2020 return __perf_session__process_events(session
);
2023 bool perf_session__has_traces(struct perf_session
*session
, const char *msg
)
2025 struct perf_evsel
*evsel
;
2027 evlist__for_each_entry(session
->evlist
, evsel
) {
2028 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
)
2032 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
2036 int map__set_kallsyms_ref_reloc_sym(struct map
*map
, const char *symbol_name
, u64 addr
)
2039 struct ref_reloc_sym
*ref
;
2042 ref
= zalloc(sizeof(struct ref_reloc_sym
));
2046 ref
->name
= strdup(symbol_name
);
2047 if (ref
->name
== NULL
) {
2052 bracket
= strchr(ref
->name
, ']');
2058 kmap
= map__kmap(map
);
2060 kmap
->ref_reloc_sym
= ref
;
2065 size_t perf_session__fprintf_dsos(struct perf_session
*session
, FILE *fp
)
2067 return machines__fprintf_dsos(&session
->machines
, fp
);
2070 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*session
, FILE *fp
,
2071 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
2073 return machines__fprintf_dsos_buildid(&session
->machines
, fp
, skip
, parm
);
2076 size_t perf_session__fprintf_nr_events(struct perf_session
*session
, FILE *fp
)
2079 const char *msg
= "";
2081 if (perf_header__has_feat(&session
->header
, HEADER_AUXTRACE
))
2082 msg
= " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2084 ret
= fprintf(fp
, "\nAggregated stats:%s\n", msg
);
2086 ret
+= events_stats__fprintf(&session
->evlist
->stats
, fp
);
2090 size_t perf_session__fprintf(struct perf_session
*session
, FILE *fp
)
2093 * FIXME: Here we have to actually print all the machines in this
2094 * session, not just the host...
2096 return machine__fprintf(&session
->machines
.host
, fp
);
2099 struct perf_evsel
*perf_session__find_first_evtype(struct perf_session
*session
,
2102 struct perf_evsel
*pos
;
2104 evlist__for_each_entry(session
->evlist
, pos
) {
2105 if (pos
->attr
.type
== type
)
2111 int perf_session__cpu_bitmap(struct perf_session
*session
,
2112 const char *cpu_list
, unsigned long *cpu_bitmap
)
2115 struct cpu_map
*map
;
2117 for (i
= 0; i
< PERF_TYPE_MAX
; ++i
) {
2118 struct perf_evsel
*evsel
;
2120 evsel
= perf_session__find_first_evtype(session
, i
);
2124 if (!(evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)) {
2125 pr_err("File does not contain CPU events. "
2126 "Remove -C option to proceed.\n");
2131 map
= cpu_map__new(cpu_list
);
2133 pr_err("Invalid cpu_list\n");
2137 for (i
= 0; i
< map
->nr
; i
++) {
2138 int cpu
= map
->map
[i
];
2140 if (cpu
>= MAX_NR_CPUS
) {
2141 pr_err("Requested CPU %d too large. "
2142 "Consider raising MAX_NR_CPUS\n", cpu
);
2143 goto out_delete_map
;
2146 set_bit(cpu
, cpu_bitmap
);
2156 void perf_session__fprintf_info(struct perf_session
*session
, FILE *fp
,
2159 if (session
== NULL
|| fp
== NULL
)
2162 fprintf(fp
, "# ========\n");
2163 perf_header__fprintf_info(session
, fp
, full
);
2164 fprintf(fp
, "# ========\n#\n");
2168 int __perf_session__set_tracepoints_handlers(struct perf_session
*session
,
2169 const struct perf_evsel_str_handler
*assocs
,
2172 struct perf_evsel
*evsel
;
2176 for (i
= 0; i
< nr_assocs
; i
++) {
2178 * Adding a handler for an event not in the session,
2181 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
, assocs
[i
].name
);
2186 if (evsel
->handler
!= NULL
)
2188 evsel
->handler
= assocs
[i
].handler
;
2196 int perf_event__process_id_index(struct perf_session
*session
,
2197 union perf_event
*event
)
2199 struct perf_evlist
*evlist
= session
->evlist
;
2200 struct id_index_event
*ie
= &event
->id_index
;
2201 size_t i
, nr
, max_nr
;
2203 max_nr
= (ie
->header
.size
- sizeof(struct id_index_event
)) /
2204 sizeof(struct id_index_entry
);
2210 fprintf(stdout
, " nr: %zu\n", nr
);
2212 for (i
= 0; i
< nr
; i
++) {
2213 struct id_index_entry
*e
= &ie
->entries
[i
];
2214 struct perf_sample_id
*sid
;
2217 fprintf(stdout
, " ... id: %"PRIu64
, e
->id
);
2218 fprintf(stdout
, " idx: %"PRIu64
, e
->idx
);
2219 fprintf(stdout
, " cpu: %"PRId64
, e
->cpu
);
2220 fprintf(stdout
, " tid: %"PRId64
"\n", e
->tid
);
2223 sid
= perf_evlist__id2sid(evlist
, e
->id
);
2233 int perf_event__synthesize_id_index(struct perf_tool
*tool
,
2234 perf_event__handler_t process
,
2235 struct perf_evlist
*evlist
,
2236 struct machine
*machine
)
2238 union perf_event
*ev
;
2239 struct perf_evsel
*evsel
;
2240 size_t nr
= 0, i
= 0, sz
, max_nr
, n
;
2243 pr_debug2("Synthesizing id index\n");
2245 max_nr
= (UINT16_MAX
- sizeof(struct id_index_event
)) /
2246 sizeof(struct id_index_entry
);
2248 evlist__for_each_entry(evlist
, evsel
)
2251 n
= nr
> max_nr
? max_nr
: nr
;
2252 sz
= sizeof(struct id_index_event
) + n
* sizeof(struct id_index_entry
);
2257 ev
->id_index
.header
.type
= PERF_RECORD_ID_INDEX
;
2258 ev
->id_index
.header
.size
= sz
;
2259 ev
->id_index
.nr
= n
;
2261 evlist__for_each_entry(evlist
, evsel
) {
2264 for (j
= 0; j
< evsel
->ids
; j
++) {
2265 struct id_index_entry
*e
;
2266 struct perf_sample_id
*sid
;
2269 err
= process(tool
, ev
, NULL
, machine
);
2276 e
= &ev
->id_index
.entries
[i
++];
2278 e
->id
= evsel
->id
[j
];
2280 sid
= perf_evlist__id2sid(evlist
, e
->id
);
2292 sz
= sizeof(struct id_index_event
) + nr
* sizeof(struct id_index_entry
);
2293 ev
->id_index
.header
.size
= sz
;
2294 ev
->id_index
.nr
= nr
;
2296 err
= process(tool
, ev
, NULL
, machine
);