1 #include <linux/types.h>
10 static const char *event__name
[] = {
12 [PERF_RECORD_MMAP
] = "MMAP",
13 [PERF_RECORD_LOST
] = "LOST",
14 [PERF_RECORD_COMM
] = "COMM",
15 [PERF_RECORD_EXIT
] = "EXIT",
16 [PERF_RECORD_THROTTLE
] = "THROTTLE",
17 [PERF_RECORD_UNTHROTTLE
] = "UNTHROTTLE",
18 [PERF_RECORD_FORK
] = "FORK",
19 [PERF_RECORD_READ
] = "READ",
20 [PERF_RECORD_SAMPLE
] = "SAMPLE",
21 [PERF_RECORD_HEADER_ATTR
] = "ATTR",
22 [PERF_RECORD_HEADER_EVENT_TYPE
] = "EVENT_TYPE",
23 [PERF_RECORD_HEADER_TRACING_DATA
] = "TRACING_DATA",
24 [PERF_RECORD_HEADER_BUILD_ID
] = "BUILD_ID",
25 [PERF_RECORD_FINISHED_ROUND
] = "FINISHED_ROUND",
28 const char *event__get_event_name(unsigned int id
)
30 if (id
>= ARRAY_SIZE(event__name
))
34 return event__name
[id
];
37 static struct sample_data synth_sample
= {
46 static pid_t
event__synthesize_comm(event_t
*event
, pid_t pid
, int full
,
47 event__handler_t process
,
48 struct perf_session
*session
)
50 char filename
[PATH_MAX
];
55 struct dirent dirent
, *next
;
58 snprintf(filename
, sizeof(filename
), "/proc/%d/status", pid
);
60 fp
= fopen(filename
, "r");
64 * We raced with a task exiting - just return:
66 pr_debug("couldn't open %s\n", filename
);
70 memset(&event
->comm
, 0, sizeof(event
->comm
));
72 while (!event
->comm
.comm
[0] || !event
->comm
.pid
) {
73 if (fgets(bf
, sizeof(bf
), fp
) == NULL
) {
74 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename
);
78 if (memcmp(bf
, "Name:", 5) == 0) {
80 while (*name
&& isspace(*name
))
82 size
= strlen(name
) - 1;
83 memcpy(event
->comm
.comm
, name
, size
++);
84 } else if (memcmp(bf
, "Tgid:", 5) == 0) {
86 while (*tgids
&& isspace(*tgids
))
88 tgid
= event
->comm
.pid
= atoi(tgids
);
92 event
->comm
.header
.type
= PERF_RECORD_COMM
;
93 size
= ALIGN(size
, sizeof(u64
));
94 memset(event
->comm
.comm
+ size
, 0, session
->id_hdr_size
);
95 event
->comm
.header
.size
= (sizeof(event
->comm
) -
96 (sizeof(event
->comm
.comm
) - size
) +
97 session
->id_hdr_size
);
99 event
->comm
.tid
= pid
;
101 process(event
, &synth_sample
, session
);
105 snprintf(filename
, sizeof(filename
), "/proc/%d/task", pid
);
107 tasks
= opendir(filename
);
111 while (!readdir_r(tasks
, &dirent
, &next
) && next
) {
113 pid
= strtol(dirent
.d_name
, &end
, 10);
117 event
->comm
.tid
= pid
;
119 process(event
, &synth_sample
, session
);
129 static int event__synthesize_mmap_events(event_t
*event
, pid_t pid
, pid_t tgid
,
130 event__handler_t process
,
131 struct perf_session
*session
)
133 char filename
[PATH_MAX
];
136 snprintf(filename
, sizeof(filename
), "/proc/%d/maps", pid
);
138 fp
= fopen(filename
, "r");
141 * We raced with a task exiting - just return:
143 pr_debug("couldn't open %s\n", filename
);
147 event
->header
.type
= PERF_RECORD_MMAP
;
149 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
151 event
->header
.misc
= PERF_RECORD_MISC_USER
;
154 char bf
[BUFSIZ
], *pbf
= bf
;
157 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
160 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
161 n
= hex2u64(pbf
, &event
->mmap
.start
);
165 n
= hex2u64(pbf
, &event
->mmap
.len
);
169 if (*pbf
== 'x') { /* vm_exec */
170 char *execname
= strchr(bf
, '/');
173 if (execname
== NULL
)
174 execname
= strstr(bf
, "[vdso]");
176 if (execname
== NULL
)
180 n
= hex2u64(pbf
, &event
->mmap
.pgoff
);
182 size
= strlen(execname
);
183 execname
[size
- 1] = '\0'; /* Remove \n */
184 memcpy(event
->mmap
.filename
, execname
, size
);
185 size
= ALIGN(size
, sizeof(u64
));
186 event
->mmap
.len
-= event
->mmap
.start
;
187 event
->mmap
.header
.size
= (sizeof(event
->mmap
) -
188 (sizeof(event
->mmap
.filename
) - size
));
189 memset(event
->mmap
.filename
+ size
, 0, session
->id_hdr_size
);
190 event
->mmap
.header
.size
+= session
->id_hdr_size
;
191 event
->mmap
.pid
= tgid
;
192 event
->mmap
.tid
= pid
;
194 process(event
, &synth_sample
, session
);
202 int event__synthesize_modules(event__handler_t process
,
203 struct perf_session
*session
,
204 struct machine
*machine
)
207 struct map_groups
*kmaps
= &machine
->kmaps
;
208 event_t
*event
= zalloc(sizeof(event
->mmap
) + session
->id_hdr_size
);
211 pr_debug("Not enough memory synthesizing mmap event "
212 "for kernel modules\n");
216 event
->header
.type
= PERF_RECORD_MMAP
;
219 * kernel uses 0 for user space maps, see kernel/perf_event.c
222 if (machine__is_host(machine
))
223 event
->header
.misc
= PERF_RECORD_MISC_KERNEL
;
225 event
->header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
227 for (nd
= rb_first(&kmaps
->maps
[MAP__FUNCTION
]);
228 nd
; nd
= rb_next(nd
)) {
230 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
232 if (pos
->dso
->kernel
)
235 size
= ALIGN(pos
->dso
->long_name_len
+ 1, sizeof(u64
));
236 event
->mmap
.header
.type
= PERF_RECORD_MMAP
;
237 event
->mmap
.header
.size
= (sizeof(event
->mmap
) -
238 (sizeof(event
->mmap
.filename
) - size
));
239 memset(event
->mmap
.filename
+ size
, 0, session
->id_hdr_size
);
240 event
->mmap
.header
.size
+= session
->id_hdr_size
;
241 event
->mmap
.start
= pos
->start
;
242 event
->mmap
.len
= pos
->end
- pos
->start
;
243 event
->mmap
.pid
= machine
->pid
;
245 memcpy(event
->mmap
.filename
, pos
->dso
->long_name
,
246 pos
->dso
->long_name_len
+ 1);
247 process(event
, &synth_sample
, session
);
254 static int __event__synthesize_thread(event_t
*comm_event
, event_t
*mmap_event
,
255 pid_t pid
, event__handler_t process
,
256 struct perf_session
*session
)
258 pid_t tgid
= event__synthesize_comm(comm_event
, pid
, 1, process
,
262 return event__synthesize_mmap_events(mmap_event
, pid
, tgid
,
266 int event__synthesize_thread(pid_t pid
, event__handler_t process
,
267 struct perf_session
*session
)
269 event_t
*comm_event
, *mmap_event
;
272 comm_event
= malloc(sizeof(comm_event
->comm
) + session
->id_hdr_size
);
273 if (comm_event
== NULL
)
276 mmap_event
= malloc(sizeof(mmap_event
->mmap
) + session
->id_hdr_size
);
277 if (mmap_event
== NULL
)
280 err
= __event__synthesize_thread(comm_event
, mmap_event
, pid
,
289 int event__synthesize_threads(event__handler_t process
,
290 struct perf_session
*session
)
293 struct dirent dirent
, *next
;
294 event_t
*comm_event
, *mmap_event
;
297 comm_event
= malloc(sizeof(comm_event
->comm
) + session
->id_hdr_size
);
298 if (comm_event
== NULL
)
301 mmap_event
= malloc(sizeof(mmap_event
->mmap
) + session
->id_hdr_size
);
302 if (mmap_event
== NULL
)
305 proc
= opendir("/proc");
309 while (!readdir_r(proc
, &dirent
, &next
) && next
) {
311 pid_t pid
= strtol(dirent
.d_name
, &end
, 10);
313 if (*end
) /* only interested in proper numerical dirents */
316 __event__synthesize_thread(comm_event
, mmap_event
, pid
,
330 struct process_symbol_args
{
335 static int find_symbol_cb(void *arg
, const char *name
, char type
,
336 u64 start
, u64 end __used
)
338 struct process_symbol_args
*args
= arg
;
341 * Must be a function or at least an alias, as in PARISC64, where "_text" is
342 * an 'A' to the same address as "_stext".
344 if (!(symbol_type__is_a(type
, MAP__FUNCTION
) ||
345 type
== 'A') || strcmp(name
, args
->name
))
352 int event__synthesize_kernel_mmap(event__handler_t process
,
353 struct perf_session
*session
,
354 struct machine
*machine
,
355 const char *symbol_name
)
358 const char *filename
, *mmap_name
;
360 char name_buff
[PATH_MAX
];
364 * We should get this from /sys/kernel/sections/.text, but till that is
365 * available use this, and after it is use this as a fallback for older
368 struct process_symbol_args args
= { .name
= symbol_name
, };
369 event_t
*event
= zalloc(sizeof(event
->mmap
) + session
->id_hdr_size
);
372 pr_debug("Not enough memory synthesizing mmap event "
373 "for kernel modules\n");
377 mmap_name
= machine__mmap_name(machine
, name_buff
, sizeof(name_buff
));
378 if (machine__is_host(machine
)) {
380 * kernel uses PERF_RECORD_MISC_USER for user space maps,
381 * see kernel/perf_event.c __perf_event_mmap
383 event
->header
.misc
= PERF_RECORD_MISC_KERNEL
;
384 filename
= "/proc/kallsyms";
386 event
->header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
387 if (machine__is_default_guest(machine
))
388 filename
= (char *) symbol_conf
.default_guest_kallsyms
;
390 sprintf(path
, "%s/proc/kallsyms", machine
->root_dir
);
395 if (kallsyms__parse(filename
, &args
, find_symbol_cb
) <= 0)
398 map
= machine
->vmlinux_maps
[MAP__FUNCTION
];
399 size
= snprintf(event
->mmap
.filename
, sizeof(event
->mmap
.filename
),
400 "%s%s", mmap_name
, symbol_name
) + 1;
401 size
= ALIGN(size
, sizeof(u64
));
402 event
->mmap
.header
.type
= PERF_RECORD_MMAP
;
403 event
->mmap
.header
.size
= (sizeof(event
->mmap
) -
404 (sizeof(event
->mmap
.filename
) - size
) + session
->id_hdr_size
);
405 event
->mmap
.pgoff
= args
.start
;
406 event
->mmap
.start
= map
->start
;
407 event
->mmap
.len
= map
->end
- event
->mmap
.start
;
408 event
->mmap
.pid
= machine
->pid
;
410 err
= process(event
, &synth_sample
, session
);
416 static void thread__comm_adjust(struct thread
*self
, struct hists
*hists
)
418 char *comm
= self
->comm
;
420 if (!symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
421 (!symbol_conf
.comm_list
||
422 strlist__has_entry(symbol_conf
.comm_list
, comm
))) {
423 u16 slen
= strlen(comm
);
425 if (hists__new_col_len(hists
, HISTC_COMM
, slen
))
426 hists__set_col_len(hists
, HISTC_THREAD
, slen
+ 6);
430 static int thread__set_comm_adjust(struct thread
*self
, const char *comm
,
433 int ret
= thread__set_comm(self
, comm
);
438 thread__comm_adjust(self
, hists
);
443 int event__process_comm(event_t
*self
, struct sample_data
*sample __used
,
444 struct perf_session
*session
)
446 struct thread
*thread
= perf_session__findnew(session
, self
->comm
.tid
);
448 dump_printf(": %s:%d\n", self
->comm
.comm
, self
->comm
.tid
);
450 if (thread
== NULL
|| thread__set_comm_adjust(thread
, self
->comm
.comm
,
452 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
459 int event__process_lost(event_t
*self
, struct sample_data
*sample __used
,
460 struct perf_session
*session
)
462 dump_printf(": id:%Ld: lost:%Ld\n", self
->lost
.id
, self
->lost
.lost
);
463 session
->hists
.stats
.total_lost
+= self
->lost
.lost
;
467 static void event_set_kernel_mmap_len(struct map
**maps
, event_t
*self
)
469 maps
[MAP__FUNCTION
]->start
= self
->mmap
.start
;
470 maps
[MAP__FUNCTION
]->end
= self
->mmap
.start
+ self
->mmap
.len
;
472 * Be a bit paranoid here, some perf.data file came with
473 * a zero sized synthesized MMAP event for the kernel.
475 if (maps
[MAP__FUNCTION
]->end
== 0)
476 maps
[MAP__FUNCTION
]->end
= ~0ULL;
479 static int event__process_kernel_mmap(event_t
*self
,
480 struct perf_session
*session
)
483 char kmmap_prefix
[PATH_MAX
];
484 struct machine
*machine
;
485 enum dso_kernel_type kernel_type
;
488 machine
= perf_session__findnew_machine(session
, self
->mmap
.pid
);
490 pr_err("Can't find id %d's machine\n", self
->mmap
.pid
);
494 machine__mmap_name(machine
, kmmap_prefix
, sizeof(kmmap_prefix
));
495 if (machine__is_host(machine
))
496 kernel_type
= DSO_TYPE_KERNEL
;
498 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
500 is_kernel_mmap
= memcmp(self
->mmap
.filename
,
502 strlen(kmmap_prefix
)) == 0;
503 if (self
->mmap
.filename
[0] == '/' ||
504 (!is_kernel_mmap
&& self
->mmap
.filename
[0] == '[')) {
506 char short_module_name
[1024];
509 if (self
->mmap
.filename
[0] == '/') {
510 name
= strrchr(self
->mmap
.filename
, '/');
515 dot
= strrchr(name
, '.');
518 snprintf(short_module_name
, sizeof(short_module_name
),
519 "[%.*s]", (int)(dot
- name
), name
);
520 strxfrchar(short_module_name
, '-', '_');
522 strcpy(short_module_name
, self
->mmap
.filename
);
524 map
= machine__new_module(machine
, self
->mmap
.start
,
525 self
->mmap
.filename
);
529 name
= strdup(short_module_name
);
533 map
->dso
->short_name
= name
;
534 map
->dso
->sname_alloc
= 1;
535 map
->end
= map
->start
+ self
->mmap
.len
;
536 } else if (is_kernel_mmap
) {
537 const char *symbol_name
= (self
->mmap
.filename
+
538 strlen(kmmap_prefix
));
540 * Should be there already, from the build-id table in
543 struct dso
*kernel
= __dsos__findnew(&machine
->kernel_dsos
,
548 kernel
->kernel
= kernel_type
;
549 if (__machine__create_kernel_maps(machine
, kernel
) < 0)
552 event_set_kernel_mmap_len(machine
->vmlinux_maps
, self
);
553 perf_session__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
,
556 if (machine__is_default_guest(machine
)) {
558 * preload dso of guest kernel and modules
560 dso__load(kernel
, machine
->vmlinux_maps
[MAP__FUNCTION
],
569 int event__process_mmap(event_t
*self
, struct sample_data
*sample __used
,
570 struct perf_session
*session
)
572 struct machine
*machine
;
573 struct thread
*thread
;
575 u8 cpumode
= self
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
578 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
579 self
->mmap
.pid
, self
->mmap
.tid
, self
->mmap
.start
,
580 self
->mmap
.len
, self
->mmap
.pgoff
, self
->mmap
.filename
);
582 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
583 cpumode
== PERF_RECORD_MISC_KERNEL
) {
584 ret
= event__process_kernel_mmap(self
, session
);
590 machine
= perf_session__find_host_machine(session
);
593 thread
= perf_session__findnew(session
, self
->mmap
.pid
);
596 map
= map__new(&machine
->user_dsos
, self
->mmap
.start
,
597 self
->mmap
.len
, self
->mmap
.pgoff
,
598 self
->mmap
.pid
, self
->mmap
.filename
,
603 thread__insert_map(thread
, map
);
607 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
611 int event__process_task(event_t
*self
, struct sample_data
*sample __used
,
612 struct perf_session
*session
)
614 struct thread
*thread
= perf_session__findnew(session
, self
->fork
.tid
);
615 struct thread
*parent
= perf_session__findnew(session
, self
->fork
.ptid
);
617 dump_printf("(%d:%d):(%d:%d)\n", self
->fork
.pid
, self
->fork
.tid
,
618 self
->fork
.ppid
, self
->fork
.ptid
);
620 if (self
->header
.type
== PERF_RECORD_EXIT
) {
621 perf_session__remove_thread(session
, thread
);
625 if (thread
== NULL
|| parent
== NULL
||
626 thread__fork(thread
, parent
) < 0) {
627 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
634 int event__process(event_t
*event
, struct sample_data
*sample
,
635 struct perf_session
*session
)
637 switch (event
->header
.type
) {
638 case PERF_RECORD_COMM
:
639 event__process_comm(event
, sample
, session
);
641 case PERF_RECORD_MMAP
:
642 event__process_mmap(event
, sample
, session
);
644 case PERF_RECORD_FORK
:
645 case PERF_RECORD_EXIT
:
646 event__process_task(event
, sample
, session
);
655 void thread__find_addr_map(struct thread
*self
,
656 struct perf_session
*session
, u8 cpumode
,
657 enum map_type type
, pid_t pid
, u64 addr
,
658 struct addr_location
*al
)
660 struct map_groups
*mg
= &self
->mg
;
661 struct machine
*machine
= NULL
;
665 al
->cpumode
= cpumode
;
666 al
->filtered
= false;
668 if (cpumode
== PERF_RECORD_MISC_KERNEL
&& perf_host
) {
670 machine
= perf_session__find_host_machine(session
);
671 if (machine
== NULL
) {
675 mg
= &machine
->kmaps
;
676 } else if (cpumode
== PERF_RECORD_MISC_USER
&& perf_host
) {
678 machine
= perf_session__find_host_machine(session
);
679 } else if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
&& perf_guest
) {
681 machine
= perf_session__find_machine(session
, pid
);
682 if (machine
== NULL
) {
686 mg
= &machine
->kmaps
;
689 * 'u' means guest os user space.
690 * TODO: We don't support guest user space. Might support late.
692 if (cpumode
== PERF_RECORD_MISC_GUEST_USER
&& perf_guest
)
698 if ((cpumode
== PERF_RECORD_MISC_GUEST_USER
||
699 cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) &&
702 if ((cpumode
== PERF_RECORD_MISC_USER
||
703 cpumode
== PERF_RECORD_MISC_KERNEL
) &&
710 al
->map
= map_groups__find(mg
, type
, al
->addr
);
711 if (al
->map
== NULL
) {
713 * If this is outside of all known maps, and is a negative
714 * address, try to look it up in the kernel dso, as it might be
715 * a vsyscall or vdso (which executes in user-mode).
717 * XXX This is nasty, we should have a symbol list in the
718 * "[vdso]" dso, but for now lets use the old trick of looking
719 * in the whole kernel symbol list.
721 if ((long long)al
->addr
< 0 &&
722 cpumode
== PERF_RECORD_MISC_KERNEL
&&
723 machine
&& mg
!= &machine
->kmaps
) {
724 mg
= &machine
->kmaps
;
728 al
->addr
= al
->map
->map_ip(al
->map
, al
->addr
);
731 void thread__find_addr_location(struct thread
*self
,
732 struct perf_session
*session
, u8 cpumode
,
733 enum map_type type
, pid_t pid
, u64 addr
,
734 struct addr_location
*al
,
735 symbol_filter_t filter
)
737 thread__find_addr_map(self
, session
, cpumode
, type
, pid
, addr
, al
);
739 al
->sym
= map__find_symbol(al
->map
, al
->addr
, filter
);
744 static void dso__calc_col_width(struct dso
*self
, struct hists
*hists
)
746 if (!symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
747 (!symbol_conf
.dso_list
||
748 strlist__has_entry(symbol_conf
.dso_list
, self
->name
))) {
749 u16 slen
= dso__name_len(self
);
750 hists__new_col_len(hists
, HISTC_DSO
, slen
);
753 self
->slen_calculated
= 1;
756 int event__preprocess_sample(const event_t
*self
, struct perf_session
*session
,
757 struct addr_location
*al
, struct sample_data
*data
,
758 symbol_filter_t filter
)
760 u8 cpumode
= self
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
761 struct thread
*thread
= perf_session__findnew(session
, self
->ip
.pid
);
766 if (symbol_conf
.comm_list
&&
767 !strlist__has_entry(symbol_conf
.comm_list
, thread
->comm
))
770 dump_printf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
772 * Have we already created the kernel maps for the host machine?
774 * This should have happened earlier, when we processed the kernel MMAP
775 * events, but for older perf.data files there was no such thing, so do
778 if (cpumode
== PERF_RECORD_MISC_KERNEL
&&
779 session
->host_machine
.vmlinux_maps
[MAP__FUNCTION
] == NULL
)
780 machine__create_kernel_maps(&session
->host_machine
);
782 thread__find_addr_map(thread
, session
, cpumode
, MAP__FUNCTION
,
783 self
->ip
.pid
, self
->ip
.ip
, al
);
784 dump_printf(" ...... dso: %s\n",
785 al
->map
? al
->map
->dso
->long_name
:
786 al
->level
== 'H' ? "[hypervisor]" : "<not found>");
791 if (symbol_conf
.dso_list
&&
792 (!al
->map
|| !al
->map
->dso
||
793 !(strlist__has_entry(symbol_conf
.dso_list
,
794 al
->map
->dso
->short_name
) ||
795 (al
->map
->dso
->short_name
!= al
->map
->dso
->long_name
&&
796 strlist__has_entry(symbol_conf
.dso_list
,
797 al
->map
->dso
->long_name
)))))
800 * We have to do this here as we may have a dso with no symbol
801 * hit that has a name longer than the ones with symbols
804 if (!sort_dso
.elide
&& !al
->map
->dso
->slen_calculated
)
805 dso__calc_col_width(al
->map
->dso
, &session
->hists
);
807 al
->sym
= map__find_symbol(al
->map
, al
->addr
, filter
);
809 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
811 if (hists__col_len(&session
->hists
, HISTC_DSO
) < unresolved_col_width
&&
812 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
813 !symbol_conf
.dso_list
)
814 hists__set_col_len(&session
->hists
, HISTC_DSO
,
815 unresolved_col_width
);
818 if (symbol_conf
.sym_list
&& al
->sym
&&
819 !strlist__has_entry(symbol_conf
.sym_list
, al
->sym
->name
))
829 static int event__parse_id_sample(const event_t
*event
,
830 struct perf_session
*session
,
831 struct sample_data
*sample
)
836 sample
->cpu
= sample
->pid
= sample
->tid
= -1;
837 sample
->stream_id
= sample
->id
= sample
->time
= -1ULL;
839 if (!session
->sample_id_all
)
842 array
= event
->sample
.array
;
843 array
+= ((event
->header
.size
-
844 sizeof(event
->header
)) / sizeof(u64
)) - 1;
845 type
= session
->sample_type
;
847 if (type
& PERF_SAMPLE_CPU
) {
848 u32
*p
= (u32
*)array
;
853 if (type
& PERF_SAMPLE_STREAM_ID
) {
854 sample
->stream_id
= *array
;
858 if (type
& PERF_SAMPLE_ID
) {
863 if (type
& PERF_SAMPLE_TIME
) {
864 sample
->time
= *array
;
868 if (type
& PERF_SAMPLE_TID
) {
869 u32
*p
= (u32
*)array
;
877 int event__parse_sample(const event_t
*event
, struct perf_session
*session
,
878 struct sample_data
*data
)
883 if (event
->header
.type
!= PERF_RECORD_SAMPLE
)
884 return event__parse_id_sample(event
, session
, data
);
886 array
= event
->sample
.array
;
887 type
= session
->sample_type
;
889 if (type
& PERF_SAMPLE_IP
) {
890 data
->ip
= event
->ip
.ip
;
894 if (type
& PERF_SAMPLE_TID
) {
895 u32
*p
= (u32
*)array
;
901 if (type
& PERF_SAMPLE_TIME
) {
906 if (type
& PERF_SAMPLE_ADDR
) {
912 if (type
& PERF_SAMPLE_ID
) {
917 if (type
& PERF_SAMPLE_STREAM_ID
) {
918 data
->stream_id
= *array
;
922 if (type
& PERF_SAMPLE_CPU
) {
923 u32
*p
= (u32
*)array
;
929 if (type
& PERF_SAMPLE_PERIOD
) {
930 data
->period
= *array
;
934 if (type
& PERF_SAMPLE_READ
) {
935 pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
939 if (type
& PERF_SAMPLE_CALLCHAIN
) {
940 data
->callchain
= (struct ip_callchain
*)array
;
941 array
+= 1 + data
->callchain
->nr
;
944 if (type
& PERF_SAMPLE_RAW
) {
945 u32
*p
= (u32
*)array
;