1 #define _FILE_OFFSET_BITS 64
8 #include <linux/list.h>
9 #include <linux/kernel.h>
16 #include "trace-event.h"
21 static bool no_buildid_cache
= false;
23 static int event_count
;
24 static struct perf_trace_event_type
*events
;
26 int perf_header__push_event(u64 id
, const char *name
)
28 if (strlen(name
) > MAX_EVENT_NAME
)
29 pr_warning("Event %s will be truncated\n", name
);
32 events
= malloc(sizeof(struct perf_trace_event_type
));
36 struct perf_trace_event_type
*nevents
;
38 nevents
= realloc(events
, (event_count
+ 1) * sizeof(*events
));
43 memset(&events
[event_count
], 0, sizeof(struct perf_trace_event_type
));
44 events
[event_count
].event_id
= id
;
45 strncpy(events
[event_count
].name
, name
, MAX_EVENT_NAME
- 1);
50 char *perf_header__find_event(u64 id
)
53 for (i
= 0 ; i
< event_count
; i
++) {
54 if (events
[i
].event_id
== id
)
55 return events
[i
].name
;
60 static const char *__perf_magic
= "PERFFILE";
62 #define PERF_MAGIC (*(u64 *)__perf_magic)
64 struct perf_file_attr
{
65 struct perf_event_attr attr
;
66 struct perf_file_section ids
;
69 void perf_header__set_feat(struct perf_header
*header
, int feat
)
71 set_bit(feat
, header
->adds_features
);
74 void perf_header__clear_feat(struct perf_header
*header
, int feat
)
76 clear_bit(feat
, header
->adds_features
);
79 bool perf_header__has_feat(const struct perf_header
*header
, int feat
)
81 return test_bit(feat
, header
->adds_features
);
84 static int do_write(int fd
, const void *buf
, size_t size
)
87 int ret
= write(fd
, buf
, size
);
101 static int write_padded(int fd
, const void *bf
, size_t count
,
102 size_t count_aligned
)
104 static const char zero_buf
[NAME_ALIGN
];
105 int err
= do_write(fd
, bf
, count
);
108 err
= do_write(fd
, zero_buf
, count_aligned
- count
);
113 #define dsos__for_each_with_build_id(pos, head) \
114 list_for_each_entry(pos, head, node) \
115 if (!pos->has_build_id) \
119 static int __dsos__write_buildid_table(struct list_head
*head
, pid_t pid
,
124 dsos__for_each_with_build_id(pos
, head
) {
126 struct build_id_event b
;
131 len
= pos
->long_name_len
+ 1;
132 len
= ALIGN(len
, NAME_ALIGN
);
133 memset(&b
, 0, sizeof(b
));
134 memcpy(&b
.build_id
, pos
->build_id
, sizeof(pos
->build_id
));
136 b
.header
.misc
= misc
;
137 b
.header
.size
= sizeof(b
) + len
;
138 err
= do_write(fd
, &b
, sizeof(b
));
141 err
= write_padded(fd
, pos
->long_name
,
142 pos
->long_name_len
+ 1, len
);
150 static int machine__write_buildid_table(struct machine
*machine
, int fd
)
153 u16 kmisc
= PERF_RECORD_MISC_KERNEL
,
154 umisc
= PERF_RECORD_MISC_USER
;
156 if (!machine__is_host(machine
)) {
157 kmisc
= PERF_RECORD_MISC_GUEST_KERNEL
;
158 umisc
= PERF_RECORD_MISC_GUEST_USER
;
161 err
= __dsos__write_buildid_table(&machine
->kernel_dsos
, machine
->pid
,
164 err
= __dsos__write_buildid_table(&machine
->user_dsos
,
165 machine
->pid
, umisc
, fd
);
169 static int dsos__write_buildid_table(struct perf_header
*header
, int fd
)
171 struct perf_session
*session
= container_of(header
,
172 struct perf_session
, header
);
174 int err
= machine__write_buildid_table(&session
->host_machine
, fd
);
179 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
180 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
181 err
= machine__write_buildid_table(pos
, fd
);
188 int build_id_cache__add_s(const char *sbuild_id
, const char *debugdir
,
189 const char *name
, bool is_kallsyms
)
191 const size_t size
= PATH_MAX
;
192 char *realname
, *filename
= zalloc(size
),
193 *linkname
= zalloc(size
), *targetname
;
197 if (symbol_conf
.kptr_restrict
) {
198 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
201 realname
= (char *)name
;
203 realname
= realpath(name
, NULL
);
205 if (realname
== NULL
|| filename
== NULL
|| linkname
== NULL
)
208 len
= snprintf(filename
, size
, "%s%s%s",
209 debugdir
, is_kallsyms
? "/" : "", realname
);
210 if (mkdir_p(filename
, 0755))
213 snprintf(filename
+ len
, sizeof(filename
) - len
, "/%s", sbuild_id
);
215 if (access(filename
, F_OK
)) {
217 if (copyfile("/proc/kallsyms", filename
))
219 } else if (link(realname
, filename
) && copyfile(name
, filename
))
223 len
= snprintf(linkname
, size
, "%s/.build-id/%.2s",
224 debugdir
, sbuild_id
);
226 if (access(linkname
, X_OK
) && mkdir_p(linkname
, 0755))
229 snprintf(linkname
+ len
, size
- len
, "/%s", sbuild_id
+ 2);
230 targetname
= filename
+ strlen(debugdir
) - 5;
231 memcpy(targetname
, "../..", 5);
233 if (symlink(targetname
, linkname
) == 0)
243 static int build_id_cache__add_b(const u8
*build_id
, size_t build_id_size
,
244 const char *name
, const char *debugdir
,
247 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
249 build_id__sprintf(build_id
, build_id_size
, sbuild_id
);
251 return build_id_cache__add_s(sbuild_id
, debugdir
, name
, is_kallsyms
);
254 int build_id_cache__remove_s(const char *sbuild_id
, const char *debugdir
)
256 const size_t size
= PATH_MAX
;
257 char *filename
= zalloc(size
),
258 *linkname
= zalloc(size
);
261 if (filename
== NULL
|| linkname
== NULL
)
264 snprintf(linkname
, size
, "%s/.build-id/%.2s/%s",
265 debugdir
, sbuild_id
, sbuild_id
+ 2);
267 if (access(linkname
, F_OK
))
270 if (readlink(linkname
, filename
, size
) < 0)
273 if (unlink(linkname
))
277 * Since the link is relative, we must make it absolute:
279 snprintf(linkname
, size
, "%s/.build-id/%.2s/%s",
280 debugdir
, sbuild_id
, filename
);
282 if (unlink(linkname
))
292 static int dso__cache_build_id(struct dso
*dso
, const char *debugdir
)
294 bool is_kallsyms
= dso
->kernel
&& dso
->long_name
[0] != '/';
296 return build_id_cache__add_b(dso
->build_id
, sizeof(dso
->build_id
),
297 dso
->long_name
, debugdir
, is_kallsyms
);
300 static int __dsos__cache_build_ids(struct list_head
*head
, const char *debugdir
)
305 dsos__for_each_with_build_id(pos
, head
)
306 if (dso__cache_build_id(pos
, debugdir
))
312 static int machine__cache_build_ids(struct machine
*machine
, const char *debugdir
)
314 int ret
= __dsos__cache_build_ids(&machine
->kernel_dsos
, debugdir
);
315 ret
|= __dsos__cache_build_ids(&machine
->user_dsos
, debugdir
);
319 static int perf_session__cache_build_ids(struct perf_session
*session
)
323 char debugdir
[PATH_MAX
];
325 snprintf(debugdir
, sizeof(debugdir
), "%s", buildid_dir
);
327 if (mkdir(debugdir
, 0755) != 0 && errno
!= EEXIST
)
330 ret
= machine__cache_build_ids(&session
->host_machine
, debugdir
);
332 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
333 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
334 ret
|= machine__cache_build_ids(pos
, debugdir
);
339 static bool machine__read_build_ids(struct machine
*machine
, bool with_hits
)
341 bool ret
= __dsos__read_build_ids(&machine
->kernel_dsos
, with_hits
);
342 ret
|= __dsos__read_build_ids(&machine
->user_dsos
, with_hits
);
346 static bool perf_session__read_build_ids(struct perf_session
*session
, bool with_hits
)
349 bool ret
= machine__read_build_ids(&session
->host_machine
, with_hits
);
351 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
352 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
353 ret
|= machine__read_build_ids(pos
, with_hits
);
359 static int perf_header__adds_write(struct perf_header
*header
,
360 struct perf_evlist
*evlist
, int fd
)
363 struct perf_session
*session
;
364 struct perf_file_section
*feat_sec
;
369 session
= container_of(header
, struct perf_session
, header
);
371 if (perf_header__has_feat(header
, HEADER_BUILD_ID
&&
372 !perf_session__read_build_ids(session
, true)))
373 perf_header__clear_feat(header
, HEADER_BUILD_ID
);
375 nr_sections
= bitmap_weight(header
->adds_features
, HEADER_FEAT_BITS
);
379 feat_sec
= calloc(sizeof(*feat_sec
), nr_sections
);
380 if (feat_sec
== NULL
)
383 sec_size
= sizeof(*feat_sec
) * nr_sections
;
385 sec_start
= header
->data_offset
+ header
->data_size
;
386 lseek(fd
, sec_start
+ sec_size
, SEEK_SET
);
388 if (perf_header__has_feat(header
, HEADER_TRACE_INFO
)) {
389 struct perf_file_section
*trace_sec
;
391 trace_sec
= &feat_sec
[idx
++];
393 /* Write trace info */
394 trace_sec
->offset
= lseek(fd
, 0, SEEK_CUR
);
395 read_tracing_data(fd
, &evlist
->entries
);
396 trace_sec
->size
= lseek(fd
, 0, SEEK_CUR
) - trace_sec
->offset
;
399 if (perf_header__has_feat(header
, HEADER_BUILD_ID
)) {
400 struct perf_file_section
*buildid_sec
;
402 buildid_sec
= &feat_sec
[idx
++];
404 /* Write build-ids */
405 buildid_sec
->offset
= lseek(fd
, 0, SEEK_CUR
);
406 err
= dsos__write_buildid_table(header
, fd
);
408 pr_debug("failed to write buildid table\n");
411 buildid_sec
->size
= lseek(fd
, 0, SEEK_CUR
) -
413 if (!no_buildid_cache
)
414 perf_session__cache_build_ids(session
);
417 lseek(fd
, sec_start
, SEEK_SET
);
418 err
= do_write(fd
, feat_sec
, sec_size
);
420 pr_debug("failed to write feature section\n");
426 int perf_header__write_pipe(int fd
)
428 struct perf_pipe_file_header f_header
;
431 f_header
= (struct perf_pipe_file_header
){
433 .size
= sizeof(f_header
),
436 err
= do_write(fd
, &f_header
, sizeof(f_header
));
438 pr_debug("failed to write perf pipe header\n");
445 int perf_session__write_header(struct perf_session
*session
,
446 struct perf_evlist
*evlist
,
447 int fd
, bool at_exit
)
449 struct perf_file_header f_header
;
450 struct perf_file_attr f_attr
;
451 struct perf_header
*header
= &session
->header
;
452 struct perf_evsel
*attr
, *pair
= NULL
;
455 lseek(fd
, sizeof(f_header
), SEEK_SET
);
457 if (session
->evlist
!= evlist
)
458 pair
= list_entry(session
->evlist
->entries
.next
, struct perf_evsel
, node
);
460 list_for_each_entry(attr
, &evlist
->entries
, node
) {
461 attr
->id_offset
= lseek(fd
, 0, SEEK_CUR
);
462 err
= do_write(fd
, attr
->id
, attr
->ids
* sizeof(u64
));
465 pr_debug("failed to write perf header\n");
468 if (session
->evlist
!= evlist
) {
469 err
= do_write(fd
, pair
->id
, pair
->ids
* sizeof(u64
));
472 attr
->ids
+= pair
->ids
;
473 pair
= list_entry(pair
->node
.next
, struct perf_evsel
, node
);
477 header
->attr_offset
= lseek(fd
, 0, SEEK_CUR
);
479 list_for_each_entry(attr
, &evlist
->entries
, node
) {
480 f_attr
= (struct perf_file_attr
){
483 .offset
= attr
->id_offset
,
484 .size
= attr
->ids
* sizeof(u64
),
487 err
= do_write(fd
, &f_attr
, sizeof(f_attr
));
489 pr_debug("failed to write perf header attribute\n");
494 header
->event_offset
= lseek(fd
, 0, SEEK_CUR
);
495 header
->event_size
= event_count
* sizeof(struct perf_trace_event_type
);
497 err
= do_write(fd
, events
, header
->event_size
);
499 pr_debug("failed to write perf header events\n");
504 header
->data_offset
= lseek(fd
, 0, SEEK_CUR
);
507 err
= perf_header__adds_write(header
, evlist
, fd
);
512 f_header
= (struct perf_file_header
){
514 .size
= sizeof(f_header
),
515 .attr_size
= sizeof(f_attr
),
517 .offset
= header
->attr_offset
,
518 .size
= evlist
->nr_entries
* sizeof(f_attr
),
521 .offset
= header
->data_offset
,
522 .size
= header
->data_size
,
525 .offset
= header
->event_offset
,
526 .size
= header
->event_size
,
530 memcpy(&f_header
.adds_features
, &header
->adds_features
, sizeof(header
->adds_features
));
532 lseek(fd
, 0, SEEK_SET
);
533 err
= do_write(fd
, &f_header
, sizeof(f_header
));
535 pr_debug("failed to write perf header\n");
538 lseek(fd
, header
->data_offset
+ header
->data_size
, SEEK_SET
);
544 static int perf_header__getbuffer64(struct perf_header
*header
,
545 int fd
, void *buf
, size_t size
)
547 if (readn(fd
, buf
, size
) <= 0)
550 if (header
->needs_swap
)
551 mem_bswap_64(buf
, size
);
556 int perf_header__process_sections(struct perf_header
*header
, int fd
,
557 int (*process
)(struct perf_file_section
*section
,
558 struct perf_header
*ph
,
561 struct perf_file_section
*feat_sec
;
565 int err
= -1, feat
= 1;
567 nr_sections
= bitmap_weight(header
->adds_features
, HEADER_FEAT_BITS
);
571 feat_sec
= calloc(sizeof(*feat_sec
), nr_sections
);
575 sec_size
= sizeof(*feat_sec
) * nr_sections
;
577 lseek(fd
, header
->data_offset
+ header
->data_size
, SEEK_SET
);
579 if (perf_header__getbuffer64(header
, fd
, feat_sec
, sec_size
))
583 while (idx
< nr_sections
&& feat
< HEADER_LAST_FEATURE
) {
584 if (perf_header__has_feat(header
, feat
)) {
585 struct perf_file_section
*sec
= &feat_sec
[idx
++];
587 err
= process(sec
, header
, feat
, fd
);
598 int perf_file_header__read(struct perf_file_header
*header
,
599 struct perf_header
*ph
, int fd
)
601 lseek(fd
, 0, SEEK_SET
);
603 if (readn(fd
, header
, sizeof(*header
)) <= 0 ||
604 memcmp(&header
->magic
, __perf_magic
, sizeof(header
->magic
)))
607 if (header
->attr_size
!= sizeof(struct perf_file_attr
)) {
608 u64 attr_size
= bswap_64(header
->attr_size
);
610 if (attr_size
!= sizeof(struct perf_file_attr
))
613 mem_bswap_64(header
, offsetof(struct perf_file_header
,
615 ph
->needs_swap
= true;
618 if (header
->size
!= sizeof(*header
)) {
619 /* Support the previous format */
620 if (header
->size
== offsetof(typeof(*header
), adds_features
))
621 bitmap_zero(header
->adds_features
, HEADER_FEAT_BITS
);
626 memcpy(&ph
->adds_features
, &header
->adds_features
,
627 sizeof(ph
->adds_features
));
629 * FIXME: hack that assumes that if we need swap the perf.data file
630 * may be coming from an arch with a different word-size, ergo different
631 * DEFINE_BITMAP format, investigate more later, but for now its mostly
632 * safe to assume that we have a build-id section. Trace files probably
633 * have several other issues in this realm anyway...
635 if (ph
->needs_swap
) {
636 memset(&ph
->adds_features
, 0, sizeof(ph
->adds_features
));
637 perf_header__set_feat(ph
, HEADER_BUILD_ID
);
640 ph
->event_offset
= header
->event_types
.offset
;
641 ph
->event_size
= header
->event_types
.size
;
642 ph
->data_offset
= header
->data
.offset
;
643 ph
->data_size
= header
->data
.size
;
647 static int __event_process_build_id(struct build_id_event
*bev
,
649 struct perf_session
*session
)
652 struct list_head
*head
;
653 struct machine
*machine
;
656 enum dso_kernel_type dso_type
;
658 machine
= perf_session__findnew_machine(session
, bev
->pid
);
662 misc
= bev
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
665 case PERF_RECORD_MISC_KERNEL
:
666 dso_type
= DSO_TYPE_KERNEL
;
667 head
= &machine
->kernel_dsos
;
669 case PERF_RECORD_MISC_GUEST_KERNEL
:
670 dso_type
= DSO_TYPE_GUEST_KERNEL
;
671 head
= &machine
->kernel_dsos
;
673 case PERF_RECORD_MISC_USER
:
674 case PERF_RECORD_MISC_GUEST_USER
:
675 dso_type
= DSO_TYPE_USER
;
676 head
= &machine
->user_dsos
;
682 dso
= __dsos__findnew(head
, filename
);
684 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
686 dso__set_build_id(dso
, &bev
->build_id
);
688 if (filename
[0] == '[')
689 dso
->kernel
= dso_type
;
691 build_id__sprintf(dso
->build_id
, sizeof(dso
->build_id
),
693 pr_debug("build id event received for %s: %s\n",
694 dso
->long_name
, sbuild_id
);
702 static int perf_header__read_build_ids_abi_quirk(struct perf_header
*header
,
703 int input
, u64 offset
, u64 size
)
705 struct perf_session
*session
= container_of(header
, struct perf_session
, header
);
707 struct perf_event_header header
;
708 u8 build_id
[ALIGN(BUILD_ID_SIZE
, sizeof(u64
))];
711 struct build_id_event bev
;
712 char filename
[PATH_MAX
];
713 u64 limit
= offset
+ size
;
715 while (offset
< limit
) {
718 if (read(input
, &old_bev
, sizeof(old_bev
)) != sizeof(old_bev
))
721 if (header
->needs_swap
)
722 perf_event_header__bswap(&old_bev
.header
);
724 len
= old_bev
.header
.size
- sizeof(old_bev
);
725 if (read(input
, filename
, len
) != len
)
728 bev
.header
= old_bev
.header
;
731 * As the pid is the missing value, we need to fill
732 * it properly. The header.misc value give us nice hint.
734 bev
.pid
= HOST_KERNEL_ID
;
735 if (bev
.header
.misc
== PERF_RECORD_MISC_GUEST_USER
||
736 bev
.header
.misc
== PERF_RECORD_MISC_GUEST_KERNEL
)
737 bev
.pid
= DEFAULT_GUEST_KERNEL_ID
;
739 memcpy(bev
.build_id
, old_bev
.build_id
, sizeof(bev
.build_id
));
740 __event_process_build_id(&bev
, filename
, session
);
742 offset
+= bev
.header
.size
;
748 static int perf_header__read_build_ids(struct perf_header
*header
,
749 int input
, u64 offset
, u64 size
)
751 struct perf_session
*session
= container_of(header
, struct perf_session
, header
);
752 struct build_id_event bev
;
753 char filename
[PATH_MAX
];
754 u64 limit
= offset
+ size
, orig_offset
= offset
;
757 while (offset
< limit
) {
760 if (read(input
, &bev
, sizeof(bev
)) != sizeof(bev
))
763 if (header
->needs_swap
)
764 perf_event_header__bswap(&bev
.header
);
766 len
= bev
.header
.size
- sizeof(bev
);
767 if (read(input
, filename
, len
) != len
)
770 * The a1645ce1 changeset:
772 * "perf: 'perf kvm' tool for monitoring guest performance from host"
774 * Added a field to struct build_id_event that broke the file
777 * Since the kernel build-id is the first entry, process the
778 * table using the old format if the well known
779 * '[kernel.kallsyms]' string for the kernel build-id has the
780 * first 4 characters chopped off (where the pid_t sits).
782 if (memcmp(filename
, "nel.kallsyms]", 13) == 0) {
783 if (lseek(input
, orig_offset
, SEEK_SET
) == (off_t
)-1)
785 return perf_header__read_build_ids_abi_quirk(header
, input
, offset
, size
);
788 __event_process_build_id(&bev
, filename
, session
);
790 offset
+= bev
.header
.size
;
797 static int perf_file_section__process(struct perf_file_section
*section
,
798 struct perf_header
*ph
,
801 if (lseek(fd
, section
->offset
, SEEK_SET
) == (off_t
)-1) {
802 pr_debug("Failed to lseek to %" PRIu64
" offset for feature "
803 "%d, continuing...\n", section
->offset
, feat
);
808 case HEADER_TRACE_INFO
:
809 trace_report(fd
, false);
812 case HEADER_BUILD_ID
:
813 if (perf_header__read_build_ids(ph
, fd
, section
->offset
, section
->size
))
814 pr_debug("Failed to read buildids, continuing...\n");
817 pr_debug("unknown feature %d, continuing...\n", feat
);
823 static int perf_file_header__read_pipe(struct perf_pipe_file_header
*header
,
824 struct perf_header
*ph
, int fd
,
827 if (readn(fd
, header
, sizeof(*header
)) <= 0 ||
828 memcmp(&header
->magic
, __perf_magic
, sizeof(header
->magic
)))
831 if (repipe
&& do_write(STDOUT_FILENO
, header
, sizeof(*header
)) < 0)
834 if (header
->size
!= sizeof(*header
)) {
835 u64 size
= bswap_64(header
->size
);
837 if (size
!= sizeof(*header
))
840 ph
->needs_swap
= true;
846 static int perf_header__read_pipe(struct perf_session
*session
, int fd
)
848 struct perf_header
*header
= &session
->header
;
849 struct perf_pipe_file_header f_header
;
851 if (perf_file_header__read_pipe(&f_header
, header
, fd
,
852 session
->repipe
) < 0) {
853 pr_debug("incompatible file format\n");
862 int perf_session__read_header(struct perf_session
*session
, int fd
)
864 struct perf_header
*header
= &session
->header
;
865 struct perf_file_header f_header
;
866 struct perf_file_attr f_attr
;
868 int nr_attrs
, nr_ids
, i
, j
;
870 session
->evlist
= perf_evlist__new(NULL
, NULL
);
871 if (session
->evlist
== NULL
)
874 if (session
->fd_pipe
)
875 return perf_header__read_pipe(session
, fd
);
877 if (perf_file_header__read(&f_header
, header
, fd
) < 0) {
878 pr_debug("incompatible file format\n");
882 nr_attrs
= f_header
.attrs
.size
/ sizeof(f_attr
);
883 lseek(fd
, f_header
.attrs
.offset
, SEEK_SET
);
885 for (i
= 0; i
< nr_attrs
; i
++) {
886 struct perf_evsel
*evsel
;
889 if (readn(fd
, &f_attr
, sizeof(f_attr
)) <= 0)
892 if (header
->needs_swap
)
893 perf_event__attr_swap(&f_attr
.attr
);
895 tmp
= lseek(fd
, 0, SEEK_CUR
);
896 evsel
= perf_evsel__new(&f_attr
.attr
, i
);
899 goto out_delete_evlist
;
901 * Do it before so that if perf_evsel__alloc_id fails, this
902 * entry gets purged too at perf_evlist__delete().
904 perf_evlist__add(session
->evlist
, evsel
);
906 nr_ids
= f_attr
.ids
.size
/ sizeof(u64
);
908 * We don't have the cpu and thread maps on the header, so
909 * for allocating the perf_sample_id table we fake 1 cpu and
910 * hattr->ids threads.
912 if (perf_evsel__alloc_id(evsel
, 1, nr_ids
))
913 goto out_delete_evlist
;
915 lseek(fd
, f_attr
.ids
.offset
, SEEK_SET
);
917 for (j
= 0; j
< nr_ids
; j
++) {
918 if (perf_header__getbuffer64(header
, fd
, &f_id
, sizeof(f_id
)))
921 perf_evlist__id_add(session
->evlist
, evsel
, 0, j
, f_id
);
924 lseek(fd
, tmp
, SEEK_SET
);
927 if (f_header
.event_types
.size
) {
928 lseek(fd
, f_header
.event_types
.offset
, SEEK_SET
);
929 events
= malloc(f_header
.event_types
.size
);
932 if (perf_header__getbuffer64(header
, fd
, events
,
933 f_header
.event_types
.size
))
935 event_count
= f_header
.event_types
.size
/ sizeof(struct perf_trace_event_type
);
938 perf_header__process_sections(header
, fd
, perf_file_section__process
);
940 lseek(fd
, header
->data_offset
, SEEK_SET
);
948 perf_evlist__delete(session
->evlist
);
949 session
->evlist
= NULL
;
953 int perf_event__synthesize_attr(struct perf_event_attr
*attr
, u16 ids
, u64
*id
,
954 perf_event__handler_t process
,
955 struct perf_session
*session
)
957 union perf_event
*ev
;
961 size
= sizeof(struct perf_event_attr
);
962 size
= ALIGN(size
, sizeof(u64
));
963 size
+= sizeof(struct perf_event_header
);
964 size
+= ids
* sizeof(u64
);
971 ev
->attr
.attr
= *attr
;
972 memcpy(ev
->attr
.id
, id
, ids
* sizeof(u64
));
974 ev
->attr
.header
.type
= PERF_RECORD_HEADER_ATTR
;
975 ev
->attr
.header
.size
= size
;
977 err
= process(ev
, NULL
, session
);
984 int perf_session__synthesize_attrs(struct perf_session
*session
,
985 perf_event__handler_t process
)
987 struct perf_evsel
*attr
;
990 list_for_each_entry(attr
, &session
->evlist
->entries
, node
) {
991 err
= perf_event__synthesize_attr(&attr
->attr
, attr
->ids
,
992 attr
->id
, process
, session
);
994 pr_debug("failed to create perf header attribute\n");
1002 int perf_event__process_attr(union perf_event
*event
,
1003 struct perf_session
*session
)
1005 unsigned int i
, ids
, n_ids
;
1006 struct perf_evsel
*evsel
;
1008 if (session
->evlist
== NULL
) {
1009 session
->evlist
= perf_evlist__new(NULL
, NULL
);
1010 if (session
->evlist
== NULL
)
1014 evsel
= perf_evsel__new(&event
->attr
.attr
,
1015 session
->evlist
->nr_entries
);
1019 perf_evlist__add(session
->evlist
, evsel
);
1021 ids
= event
->header
.size
;
1022 ids
-= (void *)&event
->attr
.id
- (void *)event
;
1023 n_ids
= ids
/ sizeof(u64
);
1025 * We don't have the cpu and thread maps on the header, so
1026 * for allocating the perf_sample_id table we fake 1 cpu and
1027 * hattr->ids threads.
1029 if (perf_evsel__alloc_id(evsel
, 1, n_ids
))
1032 for (i
= 0; i
< n_ids
; i
++) {
1033 perf_evlist__id_add(session
->evlist
, evsel
, 0, i
,
1037 perf_session__update_sample_type(session
);
1042 int perf_event__synthesize_event_type(u64 event_id
, char *name
,
1043 perf_event__handler_t process
,
1044 struct perf_session
*session
)
1046 union perf_event ev
;
1050 memset(&ev
, 0, sizeof(ev
));
1052 ev
.event_type
.event_type
.event_id
= event_id
;
1053 memset(ev
.event_type
.event_type
.name
, 0, MAX_EVENT_NAME
);
1054 strncpy(ev
.event_type
.event_type
.name
, name
, MAX_EVENT_NAME
- 1);
1056 ev
.event_type
.header
.type
= PERF_RECORD_HEADER_EVENT_TYPE
;
1057 size
= strlen(name
);
1058 size
= ALIGN(size
, sizeof(u64
));
1059 ev
.event_type
.header
.size
= sizeof(ev
.event_type
) -
1060 (sizeof(ev
.event_type
.event_type
.name
) - size
);
1062 err
= process(&ev
, NULL
, session
);
1067 int perf_event__synthesize_event_types(perf_event__handler_t process
,
1068 struct perf_session
*session
)
1070 struct perf_trace_event_type
*type
;
1073 for (i
= 0; i
< event_count
; i
++) {
1076 err
= perf_event__synthesize_event_type(type
->event_id
,
1077 type
->name
, process
,
1080 pr_debug("failed to create perf header event type\n");
1088 int perf_event__process_event_type(union perf_event
*event
,
1089 struct perf_session
*session __unused
)
1091 if (perf_header__push_event(event
->event_type
.event_type
.event_id
,
1092 event
->event_type
.event_type
.name
) < 0)
1098 int perf_event__synthesize_tracing_data(int fd
, struct perf_evlist
*evlist
,
1099 perf_event__handler_t process
,
1100 struct perf_session
*session __unused
)
1102 union perf_event ev
;
1103 ssize_t size
= 0, aligned_size
= 0, padding
;
1106 memset(&ev
, 0, sizeof(ev
));
1108 ev
.tracing_data
.header
.type
= PERF_RECORD_HEADER_TRACING_DATA
;
1109 size
= read_tracing_data_size(fd
, &evlist
->entries
);
1112 aligned_size
= ALIGN(size
, sizeof(u64
));
1113 padding
= aligned_size
- size
;
1114 ev
.tracing_data
.header
.size
= sizeof(ev
.tracing_data
);
1115 ev
.tracing_data
.size
= aligned_size
;
1117 process(&ev
, NULL
, session
);
1119 err
= read_tracing_data(fd
, &evlist
->entries
);
1120 write_padded(fd
, NULL
, 0, padding
);
1122 return aligned_size
;
1125 int perf_event__process_tracing_data(union perf_event
*event
,
1126 struct perf_session
*session
)
1128 ssize_t size_read
, padding
, size
= event
->tracing_data
.size
;
1129 off_t offset
= lseek(session
->fd
, 0, SEEK_CUR
);
1132 /* setup for reading amidst mmap */
1133 lseek(session
->fd
, offset
+ sizeof(struct tracing_data_event
),
1136 size_read
= trace_report(session
->fd
, session
->repipe
);
1138 padding
= ALIGN(size_read
, sizeof(u64
)) - size_read
;
1140 if (read(session
->fd
, buf
, padding
) < 0)
1141 die("reading input file");
1142 if (session
->repipe
) {
1143 int retw
= write(STDOUT_FILENO
, buf
, padding
);
1144 if (retw
<= 0 || retw
!= padding
)
1145 die("repiping tracing data padding");
1148 if (size_read
+ padding
!= size
)
1149 die("tracing data size mismatch");
1151 return size_read
+ padding
;
1154 int perf_event__synthesize_build_id(struct dso
*pos
, u16 misc
,
1155 perf_event__handler_t process
,
1156 struct machine
*machine
,
1157 struct perf_session
*session
)
1159 union perf_event ev
;
1166 memset(&ev
, 0, sizeof(ev
));
1168 len
= pos
->long_name_len
+ 1;
1169 len
= ALIGN(len
, NAME_ALIGN
);
1170 memcpy(&ev
.build_id
.build_id
, pos
->build_id
, sizeof(pos
->build_id
));
1171 ev
.build_id
.header
.type
= PERF_RECORD_HEADER_BUILD_ID
;
1172 ev
.build_id
.header
.misc
= misc
;
1173 ev
.build_id
.pid
= machine
->pid
;
1174 ev
.build_id
.header
.size
= sizeof(ev
.build_id
) + len
;
1175 memcpy(&ev
.build_id
.filename
, pos
->long_name
, pos
->long_name_len
);
1177 err
= process(&ev
, NULL
, session
);
1182 int perf_event__process_build_id(union perf_event
*event
,
1183 struct perf_session
*session
)
1185 __event_process_build_id(&event
->build_id
,
1186 event
->build_id
.filename
,
1191 void disable_buildid_cache(void)
1193 no_buildid_cache
= true;