1 #include <linux/kernel.h>
2 #include <traceevent/event-parse.h>
16 #include "perf_regs.h"
19 static int perf_session__open(struct perf_session
*session
)
21 struct perf_data_file
*file
= session
->file
;
23 if (perf_session__read_header(session
) < 0) {
24 pr_err("incompatible file format (rerun with -v to learn more)");
28 if (perf_data_file__is_pipe(file
))
31 if (!perf_evlist__valid_sample_type(session
->evlist
)) {
32 pr_err("non matching sample_type");
36 if (!perf_evlist__valid_sample_id_all(session
->evlist
)) {
37 pr_err("non matching sample_id_all");
41 if (!perf_evlist__valid_read_format(session
->evlist
)) {
42 pr_err("non matching read_format");
49 void perf_session__set_id_hdr_size(struct perf_session
*session
)
51 u16 id_hdr_size
= perf_evlist__id_hdr_size(session
->evlist
);
53 machines__set_id_hdr_size(&session
->machines
, id_hdr_size
);
56 int perf_session__create_kernel_maps(struct perf_session
*session
)
58 int ret
= machine__create_kernel_maps(&session
->machines
.host
);
61 ret
= machines__create_guest_kernel_maps(&session
->machines
);
65 static void perf_session__destroy_kernel_maps(struct perf_session
*session
)
67 machines__destroy_kernel_maps(&session
->machines
);
70 static bool perf_session__has_comm_exec(struct perf_session
*session
)
72 struct perf_evsel
*evsel
;
74 evlist__for_each(session
->evlist
, evsel
) {
75 if (evsel
->attr
.comm_exec
)
82 static void perf_session__set_comm_exec(struct perf_session
*session
)
84 bool comm_exec
= perf_session__has_comm_exec(session
);
86 machines__set_comm_exec(&session
->machines
, comm_exec
);
89 struct perf_session
*perf_session__new(struct perf_data_file
*file
,
90 bool repipe
, struct perf_tool
*tool
)
92 struct perf_session
*session
= zalloc(sizeof(*session
));
97 session
->repipe
= repipe
;
98 ordered_events__init(&session
->ordered_events
);
99 machines__init(&session
->machines
);
102 if (perf_data_file__open(file
))
105 session
->file
= file
;
107 if (perf_data_file__is_read(file
)) {
108 if (perf_session__open(session
) < 0)
111 perf_session__set_id_hdr_size(session
);
112 perf_session__set_comm_exec(session
);
116 if (!file
|| perf_data_file__is_write(file
)) {
118 * In O_RDONLY mode this will be performed when reading the
119 * kernel MMAP event, in perf_event__process_mmap().
121 if (perf_session__create_kernel_maps(session
) < 0)
122 pr_warning("Cannot read kernel map\n");
125 if (tool
&& tool
->ordering_requires_timestamps
&&
126 tool
->ordered_events
&& !perf_evlist__sample_id_all(session
->evlist
)) {
127 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
128 tool
->ordered_events
= false;
134 perf_data_file__close(file
);
136 perf_session__delete(session
);
141 static void perf_session__delete_dead_threads(struct perf_session
*session
)
143 machine__delete_dead_threads(&session
->machines
.host
);
146 static void perf_session__delete_threads(struct perf_session
*session
)
148 machine__delete_threads(&session
->machines
.host
);
151 static void perf_session_env__delete(struct perf_session_env
*env
)
153 zfree(&env
->hostname
);
154 zfree(&env
->os_release
);
155 zfree(&env
->version
);
157 zfree(&env
->cpu_desc
);
160 zfree(&env
->cmdline
);
161 zfree(&env
->sibling_cores
);
162 zfree(&env
->sibling_threads
);
163 zfree(&env
->numa_nodes
);
164 zfree(&env
->pmu_mappings
);
167 void perf_session__delete(struct perf_session
*session
)
169 perf_session__destroy_kernel_maps(session
);
170 perf_session__delete_dead_threads(session
);
171 perf_session__delete_threads(session
);
172 perf_session_env__delete(&session
->header
.env
);
173 machines__exit(&session
->machines
);
175 perf_data_file__close(session
->file
);
179 static int process_event_synth_tracing_data_stub(struct perf_tool
*tool
181 union perf_event
*event
183 struct perf_session
*session
186 dump_printf(": unhandled!\n");
190 static int process_event_synth_attr_stub(struct perf_tool
*tool __maybe_unused
,
191 union perf_event
*event __maybe_unused
,
192 struct perf_evlist
**pevlist
195 dump_printf(": unhandled!\n");
199 static int process_event_sample_stub(struct perf_tool
*tool __maybe_unused
,
200 union perf_event
*event __maybe_unused
,
201 struct perf_sample
*sample __maybe_unused
,
202 struct perf_evsel
*evsel __maybe_unused
,
203 struct machine
*machine __maybe_unused
)
205 dump_printf(": unhandled!\n");
209 static int process_event_stub(struct perf_tool
*tool __maybe_unused
,
210 union perf_event
*event __maybe_unused
,
211 struct perf_sample
*sample __maybe_unused
,
212 struct machine
*machine __maybe_unused
)
214 dump_printf(": unhandled!\n");
218 static int process_finished_round_stub(struct perf_tool
*tool __maybe_unused
,
219 union perf_event
*event __maybe_unused
,
220 struct perf_session
*perf_session
223 dump_printf(": unhandled!\n");
227 static int process_finished_round(struct perf_tool
*tool
,
228 union perf_event
*event
,
229 struct perf_session
*session
);
231 static int process_id_index_stub(struct perf_tool
*tool __maybe_unused
,
232 union perf_event
*event __maybe_unused
,
233 struct perf_session
*perf_session
236 dump_printf(": unhandled!\n");
240 void perf_tool__fill_defaults(struct perf_tool
*tool
)
242 if (tool
->sample
== NULL
)
243 tool
->sample
= process_event_sample_stub
;
244 if (tool
->mmap
== NULL
)
245 tool
->mmap
= process_event_stub
;
246 if (tool
->mmap2
== NULL
)
247 tool
->mmap2
= process_event_stub
;
248 if (tool
->comm
== NULL
)
249 tool
->comm
= process_event_stub
;
250 if (tool
->fork
== NULL
)
251 tool
->fork
= process_event_stub
;
252 if (tool
->exit
== NULL
)
253 tool
->exit
= process_event_stub
;
254 if (tool
->lost
== NULL
)
255 tool
->lost
= perf_event__process_lost
;
256 if (tool
->read
== NULL
)
257 tool
->read
= process_event_sample_stub
;
258 if (tool
->throttle
== NULL
)
259 tool
->throttle
= process_event_stub
;
260 if (tool
->unthrottle
== NULL
)
261 tool
->unthrottle
= process_event_stub
;
262 if (tool
->attr
== NULL
)
263 tool
->attr
= process_event_synth_attr_stub
;
264 if (tool
->tracing_data
== NULL
)
265 tool
->tracing_data
= process_event_synth_tracing_data_stub
;
266 if (tool
->build_id
== NULL
)
267 tool
->build_id
= process_finished_round_stub
;
268 if (tool
->finished_round
== NULL
) {
269 if (tool
->ordered_events
)
270 tool
->finished_round
= process_finished_round
;
272 tool
->finished_round
= process_finished_round_stub
;
274 if (tool
->id_index
== NULL
)
275 tool
->id_index
= process_id_index_stub
;
278 static void swap_sample_id_all(union perf_event
*event
, void *data
)
280 void *end
= (void *) event
+ event
->header
.size
;
281 int size
= end
- data
;
283 BUG_ON(size
% sizeof(u64
));
284 mem_bswap_64(data
, size
);
287 static void perf_event__all64_swap(union perf_event
*event
,
288 bool sample_id_all __maybe_unused
)
290 struct perf_event_header
*hdr
= &event
->header
;
291 mem_bswap_64(hdr
+ 1, event
->header
.size
- sizeof(*hdr
));
294 static void perf_event__comm_swap(union perf_event
*event
, bool sample_id_all
)
296 event
->comm
.pid
= bswap_32(event
->comm
.pid
);
297 event
->comm
.tid
= bswap_32(event
->comm
.tid
);
300 void *data
= &event
->comm
.comm
;
302 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
303 swap_sample_id_all(event
, data
);
307 static void perf_event__mmap_swap(union perf_event
*event
,
310 event
->mmap
.pid
= bswap_32(event
->mmap
.pid
);
311 event
->mmap
.tid
= bswap_32(event
->mmap
.tid
);
312 event
->mmap
.start
= bswap_64(event
->mmap
.start
);
313 event
->mmap
.len
= bswap_64(event
->mmap
.len
);
314 event
->mmap
.pgoff
= bswap_64(event
->mmap
.pgoff
);
317 void *data
= &event
->mmap
.filename
;
319 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
320 swap_sample_id_all(event
, data
);
324 static void perf_event__mmap2_swap(union perf_event
*event
,
327 event
->mmap2
.pid
= bswap_32(event
->mmap2
.pid
);
328 event
->mmap2
.tid
= bswap_32(event
->mmap2
.tid
);
329 event
->mmap2
.start
= bswap_64(event
->mmap2
.start
);
330 event
->mmap2
.len
= bswap_64(event
->mmap2
.len
);
331 event
->mmap2
.pgoff
= bswap_64(event
->mmap2
.pgoff
);
332 event
->mmap2
.maj
= bswap_32(event
->mmap2
.maj
);
333 event
->mmap2
.min
= bswap_32(event
->mmap2
.min
);
334 event
->mmap2
.ino
= bswap_64(event
->mmap2
.ino
);
337 void *data
= &event
->mmap2
.filename
;
339 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
340 swap_sample_id_all(event
, data
);
343 static void perf_event__task_swap(union perf_event
*event
, bool sample_id_all
)
345 event
->fork
.pid
= bswap_32(event
->fork
.pid
);
346 event
->fork
.tid
= bswap_32(event
->fork
.tid
);
347 event
->fork
.ppid
= bswap_32(event
->fork
.ppid
);
348 event
->fork
.ptid
= bswap_32(event
->fork
.ptid
);
349 event
->fork
.time
= bswap_64(event
->fork
.time
);
352 swap_sample_id_all(event
, &event
->fork
+ 1);
355 static void perf_event__read_swap(union perf_event
*event
, bool sample_id_all
)
357 event
->read
.pid
= bswap_32(event
->read
.pid
);
358 event
->read
.tid
= bswap_32(event
->read
.tid
);
359 event
->read
.value
= bswap_64(event
->read
.value
);
360 event
->read
.time_enabled
= bswap_64(event
->read
.time_enabled
);
361 event
->read
.time_running
= bswap_64(event
->read
.time_running
);
362 event
->read
.id
= bswap_64(event
->read
.id
);
365 swap_sample_id_all(event
, &event
->read
+ 1);
368 static void perf_event__throttle_swap(union perf_event
*event
,
371 event
->throttle
.time
= bswap_64(event
->throttle
.time
);
372 event
->throttle
.id
= bswap_64(event
->throttle
.id
);
373 event
->throttle
.stream_id
= bswap_64(event
->throttle
.stream_id
);
376 swap_sample_id_all(event
, &event
->throttle
+ 1);
379 static u8
revbyte(u8 b
)
381 int rev
= (b
>> 4) | ((b
& 0xf) << 4);
382 rev
= ((rev
& 0xcc) >> 2) | ((rev
& 0x33) << 2);
383 rev
= ((rev
& 0xaa) >> 1) | ((rev
& 0x55) << 1);
388 * XXX this is hack in attempt to carry flags bitfield
389 * throught endian village. ABI says:
391 * Bit-fields are allocated from right to left (least to most significant)
392 * on little-endian implementations and from left to right (most to least
393 * significant) on big-endian implementations.
395 * The above seems to be byte specific, so we need to reverse each
396 * byte of the bitfield. 'Internet' also says this might be implementation
397 * specific and we probably need proper fix and carry perf_event_attr
398 * bitfield flags in separate data file FEAT_ section. Thought this seems
401 static void swap_bitfield(u8
*p
, unsigned len
)
405 for (i
= 0; i
< len
; i
++) {
411 /* exported for swapping attributes in file header */
412 void perf_event__attr_swap(struct perf_event_attr
*attr
)
414 attr
->type
= bswap_32(attr
->type
);
415 attr
->size
= bswap_32(attr
->size
);
416 attr
->config
= bswap_64(attr
->config
);
417 attr
->sample_period
= bswap_64(attr
->sample_period
);
418 attr
->sample_type
= bswap_64(attr
->sample_type
);
419 attr
->read_format
= bswap_64(attr
->read_format
);
420 attr
->wakeup_events
= bswap_32(attr
->wakeup_events
);
421 attr
->bp_type
= bswap_32(attr
->bp_type
);
422 attr
->bp_addr
= bswap_64(attr
->bp_addr
);
423 attr
->bp_len
= bswap_64(attr
->bp_len
);
424 attr
->branch_sample_type
= bswap_64(attr
->branch_sample_type
);
425 attr
->sample_regs_user
= bswap_64(attr
->sample_regs_user
);
426 attr
->sample_stack_user
= bswap_32(attr
->sample_stack_user
);
428 swap_bitfield((u8
*) (&attr
->read_format
+ 1), sizeof(u64
));
431 static void perf_event__hdr_attr_swap(union perf_event
*event
,
432 bool sample_id_all __maybe_unused
)
436 perf_event__attr_swap(&event
->attr
.attr
);
438 size
= event
->header
.size
;
439 size
-= (void *)&event
->attr
.id
- (void *)event
;
440 mem_bswap_64(event
->attr
.id
, size
);
443 static void perf_event__event_type_swap(union perf_event
*event
,
444 bool sample_id_all __maybe_unused
)
446 event
->event_type
.event_type
.event_id
=
447 bswap_64(event
->event_type
.event_type
.event_id
);
450 static void perf_event__tracing_data_swap(union perf_event
*event
,
451 bool sample_id_all __maybe_unused
)
453 event
->tracing_data
.size
= bswap_32(event
->tracing_data
.size
);
456 typedef void (*perf_event__swap_op
)(union perf_event
*event
,
459 static perf_event__swap_op perf_event__swap_ops
[] = {
460 [PERF_RECORD_MMAP
] = perf_event__mmap_swap
,
461 [PERF_RECORD_MMAP2
] = perf_event__mmap2_swap
,
462 [PERF_RECORD_COMM
] = perf_event__comm_swap
,
463 [PERF_RECORD_FORK
] = perf_event__task_swap
,
464 [PERF_RECORD_EXIT
] = perf_event__task_swap
,
465 [PERF_RECORD_LOST
] = perf_event__all64_swap
,
466 [PERF_RECORD_READ
] = perf_event__read_swap
,
467 [PERF_RECORD_THROTTLE
] = perf_event__throttle_swap
,
468 [PERF_RECORD_UNTHROTTLE
] = perf_event__throttle_swap
,
469 [PERF_RECORD_SAMPLE
] = perf_event__all64_swap
,
470 [PERF_RECORD_HEADER_ATTR
] = perf_event__hdr_attr_swap
,
471 [PERF_RECORD_HEADER_EVENT_TYPE
] = perf_event__event_type_swap
,
472 [PERF_RECORD_HEADER_TRACING_DATA
] = perf_event__tracing_data_swap
,
473 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
474 [PERF_RECORD_ID_INDEX
] = perf_event__all64_swap
,
475 [PERF_RECORD_HEADER_MAX
] = NULL
,
479 * When perf record finishes a pass on every buffers, it records this pseudo
481 * We record the max timestamp t found in the pass n.
482 * Assuming these timestamps are monotonic across cpus, we know that if
483 * a buffer still has events with timestamps below t, they will be all
484 * available and then read in the pass n + 1.
485 * Hence when we start to read the pass n + 2, we can safely flush every
486 * events with timestamps below t.
488 * ============ PASS n =================
491 * cnt1 timestamps | cnt2 timestamps
494 * - | 4 <--- max recorded
496 * ============ PASS n + 1 ==============
499 * cnt1 timestamps | cnt2 timestamps
502 * 5 | 7 <---- max recorded
504 * Flush every events below timestamp 4
506 * ============ PASS n + 2 ==============
509 * cnt1 timestamps | cnt2 timestamps
514 * Flush every events below timestamp 7
517 static int process_finished_round(struct perf_tool
*tool
,
518 union perf_event
*event __maybe_unused
,
519 struct perf_session
*session
)
521 return ordered_events__flush(session
, tool
, OE_FLUSH__ROUND
);
524 int perf_session_queue_event(struct perf_session
*s
, union perf_event
*event
,
525 struct perf_tool
*tool
, struct perf_sample
*sample
,
528 struct ordered_events
*oe
= &s
->ordered_events
;
529 u64 timestamp
= sample
->time
;
530 struct ordered_event
*new;
532 if (!timestamp
|| timestamp
== ~0ULL)
535 if (timestamp
< oe
->last_flush
) {
536 pr_oe_time(timestamp
, "out of order event\n");
537 pr_oe_time(oe
->last_flush
, "last flush, last_flush_type %d\n",
538 oe
->last_flush_type
);
540 s
->stats
.nr_unordered_events
++;
543 new = ordered_events__new(oe
, timestamp
, event
);
545 ordered_events__flush(s
, tool
, OE_FLUSH__HALF
);
546 new = ordered_events__new(oe
, timestamp
, event
);
552 new->file_offset
= file_offset
;
556 static void callchain__printf(struct perf_sample
*sample
)
560 printf("... chain: nr:%" PRIu64
"\n", sample
->callchain
->nr
);
562 for (i
= 0; i
< sample
->callchain
->nr
; i
++)
563 printf("..... %2d: %016" PRIx64
"\n",
564 i
, sample
->callchain
->ips
[i
]);
567 static void branch_stack__printf(struct perf_sample
*sample
)
571 printf("... branch stack: nr:%" PRIu64
"\n", sample
->branch_stack
->nr
);
573 for (i
= 0; i
< sample
->branch_stack
->nr
; i
++)
574 printf("..... %2"PRIu64
": %016" PRIx64
" -> %016" PRIx64
"\n",
575 i
, sample
->branch_stack
->entries
[i
].from
,
576 sample
->branch_stack
->entries
[i
].to
);
579 static void regs_dump__printf(u64 mask
, u64
*regs
)
583 for_each_set_bit(rid
, (unsigned long *) &mask
, sizeof(mask
) * 8) {
586 printf(".... %-5s 0x%" PRIx64
"\n",
587 perf_reg_name(rid
), val
);
591 static const char *regs_abi
[] = {
592 [PERF_SAMPLE_REGS_ABI_NONE
] = "none",
593 [PERF_SAMPLE_REGS_ABI_32
] = "32-bit",
594 [PERF_SAMPLE_REGS_ABI_64
] = "64-bit",
597 static inline const char *regs_dump_abi(struct regs_dump
*d
)
599 if (d
->abi
> PERF_SAMPLE_REGS_ABI_64
)
602 return regs_abi
[d
->abi
];
605 static void regs__printf(const char *type
, struct regs_dump
*regs
)
607 u64 mask
= regs
->mask
;
609 printf("... %s regs: mask 0x%" PRIx64
" ABI %s\n",
612 regs_dump_abi(regs
));
614 regs_dump__printf(mask
, regs
->regs
);
617 static void regs_user__printf(struct perf_sample
*sample
)
619 struct regs_dump
*user_regs
= &sample
->user_regs
;
622 regs__printf("user", user_regs
);
625 static void regs_intr__printf(struct perf_sample
*sample
)
627 struct regs_dump
*intr_regs
= &sample
->intr_regs
;
630 regs__printf("intr", intr_regs
);
633 static void stack_user__printf(struct stack_dump
*dump
)
635 printf("... ustack: size %" PRIu64
", offset 0x%x\n",
636 dump
->size
, dump
->offset
);
639 static void perf_session__print_tstamp(struct perf_session
*session
,
640 union perf_event
*event
,
641 struct perf_sample
*sample
)
643 u64 sample_type
= __perf_evlist__combined_sample_type(session
->evlist
);
645 if (event
->header
.type
!= PERF_RECORD_SAMPLE
&&
646 !perf_evlist__sample_id_all(session
->evlist
)) {
647 fputs("-1 -1 ", stdout
);
651 if ((sample_type
& PERF_SAMPLE_CPU
))
652 printf("%u ", sample
->cpu
);
654 if (sample_type
& PERF_SAMPLE_TIME
)
655 printf("%" PRIu64
" ", sample
->time
);
658 static void sample_read__printf(struct perf_sample
*sample
, u64 read_format
)
660 printf("... sample_read:\n");
662 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
663 printf("...... time enabled %016" PRIx64
"\n",
664 sample
->read
.time_enabled
);
666 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
667 printf("...... time running %016" PRIx64
"\n",
668 sample
->read
.time_running
);
670 if (read_format
& PERF_FORMAT_GROUP
) {
673 printf(".... group nr %" PRIu64
"\n", sample
->read
.group
.nr
);
675 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
676 struct sample_read_value
*value
;
678 value
= &sample
->read
.group
.values
[i
];
679 printf("..... id %016" PRIx64
680 ", value %016" PRIx64
"\n",
681 value
->id
, value
->value
);
684 printf("..... id %016" PRIx64
", value %016" PRIx64
"\n",
685 sample
->read
.one
.id
, sample
->read
.one
.value
);
688 static void dump_event(struct perf_session
*session
, union perf_event
*event
,
689 u64 file_offset
, struct perf_sample
*sample
)
694 printf("\n%#" PRIx64
" [%#x]: event: %d\n",
695 file_offset
, event
->header
.size
, event
->header
.type
);
700 perf_session__print_tstamp(session
, event
, sample
);
702 printf("%#" PRIx64
" [%#x]: PERF_RECORD_%s", file_offset
,
703 event
->header
.size
, perf_event__name(event
->header
.type
));
706 static void dump_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
707 struct perf_sample
*sample
)
714 printf("(IP, 0x%x): %d/%d: %#" PRIx64
" period: %" PRIu64
" addr: %#" PRIx64
"\n",
715 event
->header
.misc
, sample
->pid
, sample
->tid
, sample
->ip
,
716 sample
->period
, sample
->addr
);
718 sample_type
= evsel
->attr
.sample_type
;
720 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
721 callchain__printf(sample
);
723 if (sample_type
& PERF_SAMPLE_BRANCH_STACK
)
724 branch_stack__printf(sample
);
726 if (sample_type
& PERF_SAMPLE_REGS_USER
)
727 regs_user__printf(sample
);
729 if (sample_type
& PERF_SAMPLE_REGS_INTR
)
730 regs_intr__printf(sample
);
732 if (sample_type
& PERF_SAMPLE_STACK_USER
)
733 stack_user__printf(&sample
->user_stack
);
735 if (sample_type
& PERF_SAMPLE_WEIGHT
)
736 printf("... weight: %" PRIu64
"\n", sample
->weight
);
738 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
739 printf(" . data_src: 0x%"PRIx64
"\n", sample
->data_src
);
741 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
742 printf("... transaction: %" PRIx64
"\n", sample
->transaction
);
744 if (sample_type
& PERF_SAMPLE_READ
)
745 sample_read__printf(sample
, evsel
->attr
.read_format
);
748 static struct machine
*
749 perf_session__find_machine_for_cpumode(struct perf_session
*session
,
750 union perf_event
*event
,
751 struct perf_sample
*sample
)
753 const u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
754 struct machine
*machine
;
757 ((cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) ||
758 (cpumode
== PERF_RECORD_MISC_GUEST_USER
))) {
761 if (event
->header
.type
== PERF_RECORD_MMAP
762 || event
->header
.type
== PERF_RECORD_MMAP2
)
763 pid
= event
->mmap
.pid
;
767 machine
= perf_session__find_machine(session
, pid
);
769 machine
= perf_session__findnew_machine(session
,
770 DEFAULT_GUEST_KERNEL_ID
);
774 return &session
->machines
.host
;
777 static int deliver_sample_value(struct perf_session
*session
,
778 struct perf_tool
*tool
,
779 union perf_event
*event
,
780 struct perf_sample
*sample
,
781 struct sample_read_value
*v
,
782 struct machine
*machine
)
784 struct perf_sample_id
*sid
;
786 sid
= perf_evlist__id2sid(session
->evlist
, v
->id
);
789 sample
->period
= v
->value
- sid
->period
;
790 sid
->period
= v
->value
;
793 if (!sid
|| sid
->evsel
== NULL
) {
794 ++session
->stats
.nr_unknown_id
;
798 return tool
->sample(tool
, event
, sample
, sid
->evsel
, machine
);
801 static int deliver_sample_group(struct perf_session
*session
,
802 struct perf_tool
*tool
,
803 union perf_event
*event
,
804 struct perf_sample
*sample
,
805 struct machine
*machine
)
810 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
811 ret
= deliver_sample_value(session
, tool
, event
, sample
,
812 &sample
->read
.group
.values
[i
],
822 perf_session__deliver_sample(struct perf_session
*session
,
823 struct perf_tool
*tool
,
824 union perf_event
*event
,
825 struct perf_sample
*sample
,
826 struct perf_evsel
*evsel
,
827 struct machine
*machine
)
829 /* We know evsel != NULL. */
830 u64 sample_type
= evsel
->attr
.sample_type
;
831 u64 read_format
= evsel
->attr
.read_format
;
833 /* Standard sample delievery. */
834 if (!(sample_type
& PERF_SAMPLE_READ
))
835 return tool
->sample(tool
, event
, sample
, evsel
, machine
);
837 /* For PERF_SAMPLE_READ we have either single or group mode. */
838 if (read_format
& PERF_FORMAT_GROUP
)
839 return deliver_sample_group(session
, tool
, event
, sample
,
842 return deliver_sample_value(session
, tool
, event
, sample
,
843 &sample
->read
.one
, machine
);
846 int perf_session__deliver_event(struct perf_session
*session
,
847 union perf_event
*event
,
848 struct perf_sample
*sample
,
849 struct perf_tool
*tool
, u64 file_offset
)
851 struct perf_evsel
*evsel
;
852 struct machine
*machine
;
854 dump_event(session
, event
, file_offset
, sample
);
856 evsel
= perf_evlist__id2evsel(session
->evlist
, sample
->id
);
858 machine
= perf_session__find_machine_for_cpumode(session
, event
,
861 switch (event
->header
.type
) {
862 case PERF_RECORD_SAMPLE
:
863 dump_sample(evsel
, event
, sample
);
865 ++session
->stats
.nr_unknown_id
;
868 if (machine
== NULL
) {
869 ++session
->stats
.nr_unprocessable_samples
;
872 return perf_session__deliver_sample(session
, tool
, event
,
873 sample
, evsel
, machine
);
874 case PERF_RECORD_MMAP
:
875 return tool
->mmap(tool
, event
, sample
, machine
);
876 case PERF_RECORD_MMAP2
:
877 return tool
->mmap2(tool
, event
, sample
, machine
);
878 case PERF_RECORD_COMM
:
879 return tool
->comm(tool
, event
, sample
, machine
);
880 case PERF_RECORD_FORK
:
881 return tool
->fork(tool
, event
, sample
, machine
);
882 case PERF_RECORD_EXIT
:
883 return tool
->exit(tool
, event
, sample
, machine
);
884 case PERF_RECORD_LOST
:
885 if (tool
->lost
== perf_event__process_lost
)
886 session
->stats
.total_lost
+= event
->lost
.lost
;
887 return tool
->lost(tool
, event
, sample
, machine
);
888 case PERF_RECORD_READ
:
889 return tool
->read(tool
, event
, sample
, evsel
, machine
);
890 case PERF_RECORD_THROTTLE
:
891 return tool
->throttle(tool
, event
, sample
, machine
);
892 case PERF_RECORD_UNTHROTTLE
:
893 return tool
->unthrottle(tool
, event
, sample
, machine
);
895 ++session
->stats
.nr_unknown_events
;
900 static s64
perf_session__process_user_event(struct perf_session
*session
,
901 union perf_event
*event
,
902 struct perf_tool
*tool
,
905 int fd
= perf_data_file__fd(session
->file
);
908 dump_event(session
, event
, file_offset
, NULL
);
910 /* These events are processed right away */
911 switch (event
->header
.type
) {
912 case PERF_RECORD_HEADER_ATTR
:
913 err
= tool
->attr(tool
, event
, &session
->evlist
);
915 perf_session__set_id_hdr_size(session
);
916 perf_session__set_comm_exec(session
);
919 case PERF_RECORD_HEADER_EVENT_TYPE
:
921 * Depreceated, but we need to handle it for sake
922 * of old data files create in pipe mode.
925 case PERF_RECORD_HEADER_TRACING_DATA
:
926 /* setup for reading amidst mmap */
927 lseek(fd
, file_offset
, SEEK_SET
);
928 return tool
->tracing_data(tool
, event
, session
);
929 case PERF_RECORD_HEADER_BUILD_ID
:
930 return tool
->build_id(tool
, event
, session
);
931 case PERF_RECORD_FINISHED_ROUND
:
932 return tool
->finished_round(tool
, event
, session
);
933 case PERF_RECORD_ID_INDEX
:
934 return tool
->id_index(tool
, event
, session
);
940 int perf_session__deliver_synth_event(struct perf_session
*session
,
941 union perf_event
*event
,
942 struct perf_sample
*sample
,
943 struct perf_tool
*tool
)
945 events_stats__inc(&session
->stats
, event
->header
.type
);
947 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
948 return perf_session__process_user_event(session
, event
, tool
, 0);
950 return perf_session__deliver_event(session
, event
, sample
, tool
, 0);
953 static void event_swap(union perf_event
*event
, bool sample_id_all
)
955 perf_event__swap_op swap
;
957 swap
= perf_event__swap_ops
[event
->header
.type
];
959 swap(event
, sample_id_all
);
962 int perf_session__peek_event(struct perf_session
*session
, off_t file_offset
,
963 void *buf
, size_t buf_sz
,
964 union perf_event
**event_ptr
,
965 struct perf_sample
*sample
)
967 union perf_event
*event
;
971 if (session
->one_mmap
&& !session
->header
.needs_swap
) {
972 event
= file_offset
- session
->one_mmap_offset
+
973 session
->one_mmap_addr
;
974 goto out_parse_sample
;
977 if (perf_data_file__is_pipe(session
->file
))
980 fd
= perf_data_file__fd(session
->file
);
981 hdr_sz
= sizeof(struct perf_event_header
);
986 if (lseek(fd
, file_offset
, SEEK_SET
) == (off_t
)-1 ||
987 readn(fd
, &buf
, hdr_sz
) != (ssize_t
)hdr_sz
)
990 event
= (union perf_event
*)buf
;
992 if (session
->header
.needs_swap
)
993 perf_event_header__bswap(&event
->header
);
995 if (event
->header
.size
< hdr_sz
)
998 rest
= event
->header
.size
- hdr_sz
;
1000 if (readn(fd
, &buf
, rest
) != (ssize_t
)rest
)
1003 if (session
->header
.needs_swap
)
1004 event_swap(event
, perf_evlist__sample_id_all(session
->evlist
));
1008 if (sample
&& event
->header
.type
< PERF_RECORD_USER_TYPE_START
&&
1009 perf_evlist__parse_sample(session
->evlist
, event
, sample
))
1017 static s64
perf_session__process_event(struct perf_session
*session
,
1018 union perf_event
*event
,
1019 struct perf_tool
*tool
,
1022 struct perf_sample sample
;
1025 if (session
->header
.needs_swap
)
1026 event_swap(event
, perf_evlist__sample_id_all(session
->evlist
));
1028 if (event
->header
.type
>= PERF_RECORD_HEADER_MAX
)
1031 events_stats__inc(&session
->stats
, event
->header
.type
);
1033 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1034 return perf_session__process_user_event(session
, event
, tool
, file_offset
);
1037 * For all kernel events we get the sample data
1039 ret
= perf_evlist__parse_sample(session
->evlist
, event
, &sample
);
1043 if (tool
->ordered_events
) {
1044 ret
= perf_session_queue_event(session
, event
, tool
, &sample
,
1050 return perf_session__deliver_event(session
, event
, &sample
, tool
,
1054 void perf_event_header__bswap(struct perf_event_header
*hdr
)
1056 hdr
->type
= bswap_32(hdr
->type
);
1057 hdr
->misc
= bswap_16(hdr
->misc
);
1058 hdr
->size
= bswap_16(hdr
->size
);
1061 struct thread
*perf_session__findnew(struct perf_session
*session
, pid_t pid
)
1063 return machine__findnew_thread(&session
->machines
.host
, -1, pid
);
1066 static struct thread
*perf_session__register_idle_thread(struct perf_session
*session
)
1068 struct thread
*thread
;
1070 thread
= machine__findnew_thread(&session
->machines
.host
, 0, 0);
1071 if (thread
== NULL
|| thread__set_comm(thread
, "swapper", 0)) {
1072 pr_err("problem inserting idle task.\n");
1079 static void perf_session__warn_about_errors(const struct perf_session
*session
,
1080 const struct perf_tool
*tool
)
1082 if (tool
->lost
== perf_event__process_lost
&&
1083 session
->stats
.nr_events
[PERF_RECORD_LOST
] != 0) {
1084 ui__warning("Processed %d events and lost %d chunks!\n\n"
1085 "Check IO/CPU overload!\n\n",
1086 session
->stats
.nr_events
[0],
1087 session
->stats
.nr_events
[PERF_RECORD_LOST
]);
1090 if (session
->stats
.nr_unknown_events
!= 0) {
1091 ui__warning("Found %u unknown events!\n\n"
1092 "Is this an older tool processing a perf.data "
1093 "file generated by a more recent tool?\n\n"
1094 "If that is not the case, consider "
1095 "reporting to linux-kernel@vger.kernel.org.\n\n",
1096 session
->stats
.nr_unknown_events
);
1099 if (session
->stats
.nr_unknown_id
!= 0) {
1100 ui__warning("%u samples with id not present in the header\n",
1101 session
->stats
.nr_unknown_id
);
1104 if (session
->stats
.nr_invalid_chains
!= 0) {
1105 ui__warning("Found invalid callchains!\n\n"
1106 "%u out of %u events were discarded for this reason.\n\n"
1107 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1108 session
->stats
.nr_invalid_chains
,
1109 session
->stats
.nr_events
[PERF_RECORD_SAMPLE
]);
1112 if (session
->stats
.nr_unprocessable_samples
!= 0) {
1113 ui__warning("%u unprocessable samples recorded.\n"
1114 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1115 session
->stats
.nr_unprocessable_samples
);
1118 if (session
->stats
.nr_unordered_events
!= 0)
1119 ui__warning("%u out of order events recorded.\n", session
->stats
.nr_unordered_events
);
1122 volatile int session_done
;
1124 static int __perf_session__process_pipe_events(struct perf_session
*session
,
1125 struct perf_tool
*tool
)
1127 int fd
= perf_data_file__fd(session
->file
);
1128 union perf_event
*event
;
1129 uint32_t size
, cur_size
= 0;
1136 perf_tool__fill_defaults(tool
);
1139 cur_size
= sizeof(union perf_event
);
1141 buf
= malloc(cur_size
);
1146 err
= readn(fd
, event
, sizeof(struct perf_event_header
));
1151 pr_err("failed to read event header\n");
1155 if (session
->header
.needs_swap
)
1156 perf_event_header__bswap(&event
->header
);
1158 size
= event
->header
.size
;
1159 if (size
< sizeof(struct perf_event_header
)) {
1160 pr_err("bad event header size\n");
1164 if (size
> cur_size
) {
1165 void *new = realloc(buf
, size
);
1167 pr_err("failed to allocate memory to read event\n");
1175 p
+= sizeof(struct perf_event_header
);
1177 if (size
- sizeof(struct perf_event_header
)) {
1178 err
= readn(fd
, p
, size
- sizeof(struct perf_event_header
));
1181 pr_err("unexpected end of event stream\n");
1185 pr_err("failed to read event data\n");
1190 if ((skip
= perf_session__process_event(session
, event
, tool
, head
)) < 0) {
1191 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1192 head
, event
->header
.size
, event
->header
.type
);
1202 if (!session_done())
1205 /* do the final flush for ordered samples */
1206 err
= ordered_events__flush(session
, tool
, OE_FLUSH__FINAL
);
1209 perf_session__warn_about_errors(session
, tool
);
1210 ordered_events__free(&session
->ordered_events
);
1214 static union perf_event
*
1215 fetch_mmaped_event(struct perf_session
*session
,
1216 u64 head
, size_t mmap_size
, char *buf
)
1218 union perf_event
*event
;
1221 * Ensure we have enough space remaining to read
1222 * the size of the event in the headers.
1224 if (head
+ sizeof(event
->header
) > mmap_size
)
1227 event
= (union perf_event
*)(buf
+ head
);
1229 if (session
->header
.needs_swap
)
1230 perf_event_header__bswap(&event
->header
);
1232 if (head
+ event
->header
.size
> mmap_size
) {
1233 /* We're not fetching the event so swap back again */
1234 if (session
->header
.needs_swap
)
1235 perf_event_header__bswap(&event
->header
);
1243 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1244 * slices. On 32bit we use 32MB.
1246 #if BITS_PER_LONG == 64
1247 #define MMAP_SIZE ULLONG_MAX
1250 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1251 #define NUM_MMAPS 128
1254 int __perf_session__process_events(struct perf_session
*session
,
1255 u64 data_offset
, u64 data_size
,
1256 u64 file_size
, struct perf_tool
*tool
)
1258 int fd
= perf_data_file__fd(session
->file
);
1259 u64 head
, page_offset
, file_offset
, file_pos
, size
;
1260 int err
, mmap_prot
, mmap_flags
, map_idx
= 0;
1262 char *buf
, *mmaps
[NUM_MMAPS
];
1263 union perf_event
*event
;
1264 struct ui_progress prog
;
1267 perf_tool__fill_defaults(tool
);
1269 page_offset
= page_size
* (data_offset
/ page_size
);
1270 file_offset
= page_offset
;
1271 head
= data_offset
- page_offset
;
1273 if (data_size
&& (data_offset
+ data_size
< file_size
))
1274 file_size
= data_offset
+ data_size
;
1276 ui_progress__init(&prog
, file_size
, "Processing events...");
1278 mmap_size
= MMAP_SIZE
;
1279 if (mmap_size
> file_size
) {
1280 mmap_size
= file_size
;
1281 session
->one_mmap
= true;
1284 memset(mmaps
, 0, sizeof(mmaps
));
1286 mmap_prot
= PROT_READ
;
1287 mmap_flags
= MAP_SHARED
;
1289 if (session
->header
.needs_swap
) {
1290 mmap_prot
|= PROT_WRITE
;
1291 mmap_flags
= MAP_PRIVATE
;
1294 buf
= mmap(NULL
, mmap_size
, mmap_prot
, mmap_flags
, fd
,
1296 if (buf
== MAP_FAILED
) {
1297 pr_err("failed to mmap file\n");
1301 mmaps
[map_idx
] = buf
;
1302 map_idx
= (map_idx
+ 1) & (ARRAY_SIZE(mmaps
) - 1);
1303 file_pos
= file_offset
+ head
;
1304 if (session
->one_mmap
) {
1305 session
->one_mmap_addr
= buf
;
1306 session
->one_mmap_offset
= file_offset
;
1310 event
= fetch_mmaped_event(session
, head
, mmap_size
, buf
);
1312 if (mmaps
[map_idx
]) {
1313 munmap(mmaps
[map_idx
], mmap_size
);
1314 mmaps
[map_idx
] = NULL
;
1317 page_offset
= page_size
* (head
/ page_size
);
1318 file_offset
+= page_offset
;
1319 head
-= page_offset
;
1323 size
= event
->header
.size
;
1325 if (size
< sizeof(struct perf_event_header
) ||
1326 (skip
= perf_session__process_event(session
, event
, tool
, file_pos
))
1328 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1329 file_offset
+ head
, event
->header
.size
,
1330 event
->header
.type
);
1341 ui_progress__update(&prog
, size
);
1346 if (file_pos
< file_size
)
1350 /* do the final flush for ordered samples */
1351 err
= ordered_events__flush(session
, tool
, OE_FLUSH__FINAL
);
1353 ui_progress__finish();
1354 perf_session__warn_about_errors(session
, tool
);
1355 ordered_events__free(&session
->ordered_events
);
1356 session
->one_mmap
= false;
1360 int perf_session__process_events(struct perf_session
*session
,
1361 struct perf_tool
*tool
)
1363 u64 size
= perf_data_file__size(session
->file
);
1366 if (perf_session__register_idle_thread(session
) == NULL
)
1369 if (!perf_data_file__is_pipe(session
->file
))
1370 err
= __perf_session__process_events(session
,
1371 session
->header
.data_offset
,
1372 session
->header
.data_size
,
1375 err
= __perf_session__process_pipe_events(session
, tool
);
1380 bool perf_session__has_traces(struct perf_session
*session
, const char *msg
)
1382 struct perf_evsel
*evsel
;
1384 evlist__for_each(session
->evlist
, evsel
) {
1385 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
)
1389 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
1393 int maps__set_kallsyms_ref_reloc_sym(struct map
**maps
,
1394 const char *symbol_name
, u64 addr
)
1398 struct ref_reloc_sym
*ref
;
1400 ref
= zalloc(sizeof(struct ref_reloc_sym
));
1404 ref
->name
= strdup(symbol_name
);
1405 if (ref
->name
== NULL
) {
1410 bracket
= strchr(ref
->name
, ']');
1416 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
1417 struct kmap
*kmap
= map__kmap(maps
[i
]);
1418 kmap
->ref_reloc_sym
= ref
;
1424 size_t perf_session__fprintf_dsos(struct perf_session
*session
, FILE *fp
)
1426 return machines__fprintf_dsos(&session
->machines
, fp
);
1429 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*session
, FILE *fp
,
1430 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
1432 return machines__fprintf_dsos_buildid(&session
->machines
, fp
, skip
, parm
);
1435 size_t perf_session__fprintf_nr_events(struct perf_session
*session
, FILE *fp
)
1437 size_t ret
= fprintf(fp
, "Aggregated stats:\n");
1439 ret
+= events_stats__fprintf(&session
->stats
, fp
);
1443 size_t perf_session__fprintf(struct perf_session
*session
, FILE *fp
)
1446 * FIXME: Here we have to actually print all the machines in this
1447 * session, not just the host...
1449 return machine__fprintf(&session
->machines
.host
, fp
);
1452 struct perf_evsel
*perf_session__find_first_evtype(struct perf_session
*session
,
1455 struct perf_evsel
*pos
;
1457 evlist__for_each(session
->evlist
, pos
) {
1458 if (pos
->attr
.type
== type
)
1464 void perf_evsel__print_ip(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1465 struct addr_location
*al
,
1466 unsigned int print_opts
, unsigned int stack_depth
)
1468 struct callchain_cursor_node
*node
;
1469 int print_ip
= print_opts
& PRINT_IP_OPT_IP
;
1470 int print_sym
= print_opts
& PRINT_IP_OPT_SYM
;
1471 int print_dso
= print_opts
& PRINT_IP_OPT_DSO
;
1472 int print_symoffset
= print_opts
& PRINT_IP_OPT_SYMOFFSET
;
1473 int print_oneline
= print_opts
& PRINT_IP_OPT_ONELINE
;
1474 int print_srcline
= print_opts
& PRINT_IP_OPT_SRCLINE
;
1475 char s
= print_oneline
? ' ' : '\t';
1477 if (symbol_conf
.use_callchain
&& sample
->callchain
) {
1478 struct addr_location node_al
;
1480 if (thread__resolve_callchain(al
->thread
, evsel
,
1482 PERF_MAX_STACK_DEPTH
) != 0) {
1484 error("Failed to resolve callchain. Skipping\n");
1487 callchain_cursor_commit(&callchain_cursor
);
1489 if (print_symoffset
)
1492 while (stack_depth
) {
1495 node
= callchain_cursor_current(&callchain_cursor
);
1499 if (node
->sym
&& node
->sym
->ignore
)
1503 printf("%c%16" PRIx64
, s
, node
->ip
);
1506 addr
= node
->map
->map_ip(node
->map
, node
->ip
);
1510 if (print_symoffset
) {
1511 node_al
.addr
= addr
;
1512 node_al
.map
= node
->map
;
1513 symbol__fprintf_symname_offs(node
->sym
, &node_al
, stdout
);
1515 symbol__fprintf_symname(node
->sym
, stdout
);
1520 map__fprintf_dsoname(node
->map
, stdout
);
1525 map__fprintf_srcline(node
->map
, addr
, "\n ",
1533 callchain_cursor_advance(&callchain_cursor
);
1537 if (al
->sym
&& al
->sym
->ignore
)
1541 printf("%16" PRIx64
, sample
->ip
);
1545 if (print_symoffset
)
1546 symbol__fprintf_symname_offs(al
->sym
, al
,
1549 symbol__fprintf_symname(al
->sym
, stdout
);
1554 map__fprintf_dsoname(al
->map
, stdout
);
1559 map__fprintf_srcline(al
->map
, al
->addr
, "\n ", stdout
);
1563 int perf_session__cpu_bitmap(struct perf_session
*session
,
1564 const char *cpu_list
, unsigned long *cpu_bitmap
)
1567 struct cpu_map
*map
;
1569 for (i
= 0; i
< PERF_TYPE_MAX
; ++i
) {
1570 struct perf_evsel
*evsel
;
1572 evsel
= perf_session__find_first_evtype(session
, i
);
1576 if (!(evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)) {
1577 pr_err("File does not contain CPU events. "
1578 "Remove -c option to proceed.\n");
1583 map
= cpu_map__new(cpu_list
);
1585 pr_err("Invalid cpu_list\n");
1589 for (i
= 0; i
< map
->nr
; i
++) {
1590 int cpu
= map
->map
[i
];
1592 if (cpu
>= MAX_NR_CPUS
) {
1593 pr_err("Requested CPU %d too large. "
1594 "Consider raising MAX_NR_CPUS\n", cpu
);
1595 goto out_delete_map
;
1598 set_bit(cpu
, cpu_bitmap
);
1604 cpu_map__delete(map
);
1608 void perf_session__fprintf_info(struct perf_session
*session
, FILE *fp
,
1614 if (session
== NULL
|| fp
== NULL
)
1617 fd
= perf_data_file__fd(session
->file
);
1619 ret
= fstat(fd
, &st
);
1623 fprintf(fp
, "# ========\n");
1624 fprintf(fp
, "# captured on: %s", ctime(&st
.st_ctime
));
1625 perf_header__fprintf_info(session
, fp
, full
);
1626 fprintf(fp
, "# ========\n#\n");
1630 int __perf_session__set_tracepoints_handlers(struct perf_session
*session
,
1631 const struct perf_evsel_str_handler
*assocs
,
1634 struct perf_evsel
*evsel
;
1638 for (i
= 0; i
< nr_assocs
; i
++) {
1640 * Adding a handler for an event not in the session,
1643 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
, assocs
[i
].name
);
1648 if (evsel
->handler
!= NULL
)
1650 evsel
->handler
= assocs
[i
].handler
;
1658 int perf_event__process_id_index(struct perf_tool
*tool __maybe_unused
,
1659 union perf_event
*event
,
1660 struct perf_session
*session
)
1662 struct perf_evlist
*evlist
= session
->evlist
;
1663 struct id_index_event
*ie
= &event
->id_index
;
1664 size_t i
, nr
, max_nr
;
1666 max_nr
= (ie
->header
.size
- sizeof(struct id_index_event
)) /
1667 sizeof(struct id_index_entry
);
1673 fprintf(stdout
, " nr: %zu\n", nr
);
1675 for (i
= 0; i
< nr
; i
++) {
1676 struct id_index_entry
*e
= &ie
->entries
[i
];
1677 struct perf_sample_id
*sid
;
1680 fprintf(stdout
, " ... id: %"PRIu64
, e
->id
);
1681 fprintf(stdout
, " idx: %"PRIu64
, e
->idx
);
1682 fprintf(stdout
, " cpu: %"PRId64
, e
->cpu
);
1683 fprintf(stdout
, " tid: %"PRId64
"\n", e
->tid
);
1686 sid
= perf_evlist__id2sid(evlist
, e
->id
);
1696 int perf_event__synthesize_id_index(struct perf_tool
*tool
,
1697 perf_event__handler_t process
,
1698 struct perf_evlist
*evlist
,
1699 struct machine
*machine
)
1701 union perf_event
*ev
;
1702 struct perf_evsel
*evsel
;
1703 size_t nr
= 0, i
= 0, sz
, max_nr
, n
;
1706 pr_debug2("Synthesizing id index\n");
1708 max_nr
= (UINT16_MAX
- sizeof(struct id_index_event
)) /
1709 sizeof(struct id_index_entry
);
1711 evlist__for_each(evlist
, evsel
)
1714 n
= nr
> max_nr
? max_nr
: nr
;
1715 sz
= sizeof(struct id_index_event
) + n
* sizeof(struct id_index_entry
);
1720 ev
->id_index
.header
.type
= PERF_RECORD_ID_INDEX
;
1721 ev
->id_index
.header
.size
= sz
;
1722 ev
->id_index
.nr
= n
;
1724 evlist__for_each(evlist
, evsel
) {
1727 for (j
= 0; j
< evsel
->ids
; j
++) {
1728 struct id_index_entry
*e
;
1729 struct perf_sample_id
*sid
;
1732 err
= process(tool
, ev
, NULL
, machine
);
1739 e
= &ev
->id_index
.entries
[i
++];
1741 e
->id
= evsel
->id
[j
];
1743 sid
= perf_evlist__id2sid(evlist
, e
->id
);
1755 sz
= sizeof(struct id_index_event
) + nr
* sizeof(struct id_index_entry
);
1756 ev
->id_index
.header
.size
= sz
;
1757 ev
->id_index
.nr
= nr
;
1759 err
= process(tool
, ev
, NULL
, machine
);