1 #define _FILE_OFFSET_BITS 64
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <sys/utsname.h>
18 #include "trace-event.h"
24 static bool no_buildid_cache
= false;
26 static int event_count
;
27 static struct perf_trace_event_type
*events
;
29 static u32 header_argc
;
30 static const char **header_argv
;
32 int perf_header__push_event(u64 id
, const char *name
)
34 struct perf_trace_event_type
*nevents
;
36 if (strlen(name
) > MAX_EVENT_NAME
)
37 pr_warning("Event %s will be truncated\n", name
);
39 nevents
= realloc(events
, (event_count
+ 1) * sizeof(*events
));
44 memset(&events
[event_count
], 0, sizeof(struct perf_trace_event_type
));
45 events
[event_count
].event_id
= id
;
46 strncpy(events
[event_count
].name
, name
, MAX_EVENT_NAME
- 1);
51 char *perf_header__find_event(u64 id
)
54 for (i
= 0 ; i
< event_count
; i
++) {
55 if (events
[i
].event_id
== id
)
56 return events
[i
].name
;
63 * must be a numerical value to let the endianness
64 * determine the memory layout. That way we are able
65 * to detect endianness when reading the perf.data file
68 * we check for legacy (PERFFILE) format.
70 static const char *__perf_magic1
= "PERFFILE";
71 static const u64 __perf_magic2
= 0x32454c4946524550ULL
;
72 static const u64 __perf_magic2_sw
= 0x50455246494c4532ULL
;
74 #define PERF_MAGIC __perf_magic2
76 struct perf_file_attr
{
77 struct perf_event_attr attr
;
78 struct perf_file_section ids
;
81 void perf_header__set_feat(struct perf_header
*header
, int feat
)
83 set_bit(feat
, header
->adds_features
);
86 void perf_header__clear_feat(struct perf_header
*header
, int feat
)
88 clear_bit(feat
, header
->adds_features
);
91 bool perf_header__has_feat(const struct perf_header
*header
, int feat
)
93 return test_bit(feat
, header
->adds_features
);
96 static int do_write(int fd
, const void *buf
, size_t size
)
99 int ret
= write(fd
, buf
, size
);
111 #define NAME_ALIGN 64
113 static int write_padded(int fd
, const void *bf
, size_t count
,
114 size_t count_aligned
)
116 static const char zero_buf
[NAME_ALIGN
];
117 int err
= do_write(fd
, bf
, count
);
120 err
= do_write(fd
, zero_buf
, count_aligned
- count
);
125 static int do_write_string(int fd
, const char *str
)
130 olen
= strlen(str
) + 1;
131 len
= ALIGN(olen
, NAME_ALIGN
);
133 /* write len, incl. \0 */
134 ret
= do_write(fd
, &len
, sizeof(len
));
138 return write_padded(fd
, str
, olen
, len
);
141 static char *do_read_string(int fd
, struct perf_header
*ph
)
147 sz
= read(fd
, &len
, sizeof(len
));
148 if (sz
< (ssize_t
)sizeof(len
))
158 ret
= read(fd
, buf
, len
);
159 if (ret
== (ssize_t
)len
) {
161 * strings are padded by zeroes
162 * thus the actual strlen of buf
163 * may be less than len
173 perf_header__set_cmdline(int argc
, const char **argv
)
177 header_argc
= (u32
)argc
;
179 /* do not include NULL termination */
180 header_argv
= calloc(argc
, sizeof(char *));
185 * must copy argv contents because it gets moved
186 * around during option parsing
188 for (i
= 0; i
< argc
; i
++)
189 header_argv
[i
] = argv
[i
];
194 #define dsos__for_each_with_build_id(pos, head) \
195 list_for_each_entry(pos, head, node) \
196 if (!pos->has_build_id) \
200 static int __dsos__write_buildid_table(struct list_head
*head
, pid_t pid
,
205 dsos__for_each_with_build_id(pos
, head
) {
207 struct build_id_event b
;
212 len
= pos
->long_name_len
+ 1;
213 len
= ALIGN(len
, NAME_ALIGN
);
214 memset(&b
, 0, sizeof(b
));
215 memcpy(&b
.build_id
, pos
->build_id
, sizeof(pos
->build_id
));
217 b
.header
.misc
= misc
;
218 b
.header
.size
= sizeof(b
) + len
;
219 err
= do_write(fd
, &b
, sizeof(b
));
222 err
= write_padded(fd
, pos
->long_name
,
223 pos
->long_name_len
+ 1, len
);
231 static int machine__write_buildid_table(struct machine
*machine
, int fd
)
234 u16 kmisc
= PERF_RECORD_MISC_KERNEL
,
235 umisc
= PERF_RECORD_MISC_USER
;
237 if (!machine__is_host(machine
)) {
238 kmisc
= PERF_RECORD_MISC_GUEST_KERNEL
;
239 umisc
= PERF_RECORD_MISC_GUEST_USER
;
242 err
= __dsos__write_buildid_table(&machine
->kernel_dsos
, machine
->pid
,
245 err
= __dsos__write_buildid_table(&machine
->user_dsos
,
246 machine
->pid
, umisc
, fd
);
250 static int dsos__write_buildid_table(struct perf_header
*header
, int fd
)
252 struct perf_session
*session
= container_of(header
,
253 struct perf_session
, header
);
255 int err
= machine__write_buildid_table(&session
->host_machine
, fd
);
260 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
261 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
262 err
= machine__write_buildid_table(pos
, fd
);
269 int build_id_cache__add_s(const char *sbuild_id
, const char *debugdir
,
270 const char *name
, bool is_kallsyms
)
272 const size_t size
= PATH_MAX
;
273 char *realname
, *filename
= zalloc(size
),
274 *linkname
= zalloc(size
), *targetname
;
278 if (symbol_conf
.kptr_restrict
) {
279 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
282 realname
= (char *)name
;
284 realname
= realpath(name
, NULL
);
286 if (realname
== NULL
|| filename
== NULL
|| linkname
== NULL
)
289 len
= scnprintf(filename
, size
, "%s%s%s",
290 debugdir
, is_kallsyms
? "/" : "", realname
);
291 if (mkdir_p(filename
, 0755))
294 snprintf(filename
+ len
, size
- len
, "/%s", sbuild_id
);
296 if (access(filename
, F_OK
)) {
298 if (copyfile("/proc/kallsyms", filename
))
300 } else if (link(realname
, filename
) && copyfile(name
, filename
))
304 len
= scnprintf(linkname
, size
, "%s/.build-id/%.2s",
305 debugdir
, sbuild_id
);
307 if (access(linkname
, X_OK
) && mkdir_p(linkname
, 0755))
310 snprintf(linkname
+ len
, size
- len
, "/%s", sbuild_id
+ 2);
311 targetname
= filename
+ strlen(debugdir
) - 5;
312 memcpy(targetname
, "../..", 5);
314 if (symlink(targetname
, linkname
) == 0)
324 static int build_id_cache__add_b(const u8
*build_id
, size_t build_id_size
,
325 const char *name
, const char *debugdir
,
328 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
330 build_id__sprintf(build_id
, build_id_size
, sbuild_id
);
332 return build_id_cache__add_s(sbuild_id
, debugdir
, name
, is_kallsyms
);
335 int build_id_cache__remove_s(const char *sbuild_id
, const char *debugdir
)
337 const size_t size
= PATH_MAX
;
338 char *filename
= zalloc(size
),
339 *linkname
= zalloc(size
);
342 if (filename
== NULL
|| linkname
== NULL
)
345 snprintf(linkname
, size
, "%s/.build-id/%.2s/%s",
346 debugdir
, sbuild_id
, sbuild_id
+ 2);
348 if (access(linkname
, F_OK
))
351 if (readlink(linkname
, filename
, size
- 1) < 0)
354 if (unlink(linkname
))
358 * Since the link is relative, we must make it absolute:
360 snprintf(linkname
, size
, "%s/.build-id/%.2s/%s",
361 debugdir
, sbuild_id
, filename
);
363 if (unlink(linkname
))
373 static int dso__cache_build_id(struct dso
*dso
, const char *debugdir
)
375 bool is_kallsyms
= dso
->kernel
&& dso
->long_name
[0] != '/';
377 return build_id_cache__add_b(dso
->build_id
, sizeof(dso
->build_id
),
378 dso
->long_name
, debugdir
, is_kallsyms
);
381 static int __dsos__cache_build_ids(struct list_head
*head
, const char *debugdir
)
386 dsos__for_each_with_build_id(pos
, head
)
387 if (dso__cache_build_id(pos
, debugdir
))
393 static int machine__cache_build_ids(struct machine
*machine
, const char *debugdir
)
395 int ret
= __dsos__cache_build_ids(&machine
->kernel_dsos
, debugdir
);
396 ret
|= __dsos__cache_build_ids(&machine
->user_dsos
, debugdir
);
400 static int perf_session__cache_build_ids(struct perf_session
*session
)
404 char debugdir
[PATH_MAX
];
406 snprintf(debugdir
, sizeof(debugdir
), "%s", buildid_dir
);
408 if (mkdir(debugdir
, 0755) != 0 && errno
!= EEXIST
)
411 ret
= machine__cache_build_ids(&session
->host_machine
, debugdir
);
413 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
414 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
415 ret
|= machine__cache_build_ids(pos
, debugdir
);
420 static bool machine__read_build_ids(struct machine
*machine
, bool with_hits
)
422 bool ret
= __dsos__read_build_ids(&machine
->kernel_dsos
, with_hits
);
423 ret
|= __dsos__read_build_ids(&machine
->user_dsos
, with_hits
);
427 static bool perf_session__read_build_ids(struct perf_session
*session
, bool with_hits
)
430 bool ret
= machine__read_build_ids(&session
->host_machine
, with_hits
);
432 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
433 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
434 ret
|= machine__read_build_ids(pos
, with_hits
);
440 static int write_tracing_data(int fd
, struct perf_header
*h __used
,
441 struct perf_evlist
*evlist
)
443 return read_tracing_data(fd
, &evlist
->entries
);
447 static int write_build_id(int fd
, struct perf_header
*h
,
448 struct perf_evlist
*evlist __used
)
450 struct perf_session
*session
;
453 session
= container_of(h
, struct perf_session
, header
);
455 if (!perf_session__read_build_ids(session
, true))
458 err
= dsos__write_buildid_table(h
, fd
);
460 pr_debug("failed to write buildid table\n");
463 if (!no_buildid_cache
)
464 perf_session__cache_build_ids(session
);
469 static int write_hostname(int fd
, struct perf_header
*h __used
,
470 struct perf_evlist
*evlist __used
)
479 return do_write_string(fd
, uts
.nodename
);
482 static int write_osrelease(int fd
, struct perf_header
*h __used
,
483 struct perf_evlist
*evlist __used
)
492 return do_write_string(fd
, uts
.release
);
495 static int write_arch(int fd
, struct perf_header
*h __used
,
496 struct perf_evlist
*evlist __used
)
505 return do_write_string(fd
, uts
.machine
);
508 static int write_version(int fd
, struct perf_header
*h __used
,
509 struct perf_evlist
*evlist __used
)
511 return do_write_string(fd
, perf_version_string
);
514 static int write_cpudesc(int fd
, struct perf_header
*h __used
,
515 struct perf_evlist
*evlist __used
)
518 #define CPUINFO_PROC NULL
523 const char *search
= CPUINFO_PROC
;
530 file
= fopen("/proc/cpuinfo", "r");
534 while (getline(&buf
, &len
, file
) > 0) {
535 ret
= strncmp(buf
, search
, strlen(search
));
545 p
= strchr(buf
, ':');
546 if (p
&& *(p
+1) == ' ' && *(p
+2))
552 /* squash extra space characters (branding string) */
559 while (*q
&& isspace(*q
))
562 while ((*r
++ = *q
++));
566 ret
= do_write_string(fd
, s
);
573 static int write_nrcpus(int fd
, struct perf_header
*h __used
,
574 struct perf_evlist
*evlist __used
)
580 nr
= sysconf(_SC_NPROCESSORS_CONF
);
584 nrc
= (u32
)(nr
& UINT_MAX
);
586 nr
= sysconf(_SC_NPROCESSORS_ONLN
);
590 nra
= (u32
)(nr
& UINT_MAX
);
592 ret
= do_write(fd
, &nrc
, sizeof(nrc
));
596 return do_write(fd
, &nra
, sizeof(nra
));
599 static int write_event_desc(int fd
, struct perf_header
*h __used
,
600 struct perf_evlist
*evlist
)
602 struct perf_evsel
*attr
;
603 u32 nre
= 0, nri
, sz
;
606 list_for_each_entry(attr
, &evlist
->entries
, node
)
610 * write number of events
612 ret
= do_write(fd
, &nre
, sizeof(nre
));
617 * size of perf_event_attr struct
619 sz
= (u32
)sizeof(attr
->attr
);
620 ret
= do_write(fd
, &sz
, sizeof(sz
));
624 list_for_each_entry(attr
, &evlist
->entries
, node
) {
626 ret
= do_write(fd
, &attr
->attr
, sz
);
630 * write number of unique id per event
631 * there is one id per instance of an event
633 * copy into an nri to be independent of the
637 ret
= do_write(fd
, &nri
, sizeof(nri
));
642 * write event string as passed on cmdline
644 ret
= do_write_string(fd
, event_name(attr
));
648 * write unique ids for this event
650 ret
= do_write(fd
, attr
->id
, attr
->ids
* sizeof(u64
));
657 static int write_cmdline(int fd
, struct perf_header
*h __used
,
658 struct perf_evlist
*evlist __used
)
660 char buf
[MAXPATHLEN
];
666 * actual atual path to perf binary
668 sprintf(proc
, "/proc/%d/exe", getpid());
669 ret
= readlink(proc
, buf
, sizeof(buf
));
673 /* readlink() does not add null termination */
676 /* account for binary path */
679 ret
= do_write(fd
, &n
, sizeof(n
));
683 ret
= do_write_string(fd
, buf
);
687 for (i
= 0 ; i
< header_argc
; i
++) {
688 ret
= do_write_string(fd
, header_argv
[i
]);
695 #define CORE_SIB_FMT \
696 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
697 #define THRD_SIB_FMT \
698 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
703 char **core_siblings
;
704 char **thread_siblings
;
707 static int build_cpu_topo(struct cpu_topo
*tp
, int cpu
)
710 char filename
[MAXPATHLEN
];
711 char *buf
= NULL
, *p
;
716 sprintf(filename
, CORE_SIB_FMT
, cpu
);
717 fp
= fopen(filename
, "r");
721 if (getline(&buf
, &len
, fp
) <= 0)
726 p
= strchr(buf
, '\n');
730 for (i
= 0; i
< tp
->core_sib
; i
++) {
731 if (!strcmp(buf
, tp
->core_siblings
[i
]))
734 if (i
== tp
->core_sib
) {
735 tp
->core_siblings
[i
] = buf
;
741 sprintf(filename
, THRD_SIB_FMT
, cpu
);
742 fp
= fopen(filename
, "r");
746 if (getline(&buf
, &len
, fp
) <= 0)
749 p
= strchr(buf
, '\n');
753 for (i
= 0; i
< tp
->thread_sib
; i
++) {
754 if (!strcmp(buf
, tp
->thread_siblings
[i
]))
757 if (i
== tp
->thread_sib
) {
758 tp
->thread_siblings
[i
] = buf
;
770 static void free_cpu_topo(struct cpu_topo
*tp
)
777 for (i
= 0 ; i
< tp
->core_sib
; i
++)
778 free(tp
->core_siblings
[i
]);
780 for (i
= 0 ; i
< tp
->thread_sib
; i
++)
781 free(tp
->thread_siblings
[i
]);
786 static struct cpu_topo
*build_cpu_topology(void)
795 ncpus
= sysconf(_SC_NPROCESSORS_CONF
);
799 nr
= (u32
)(ncpus
& UINT_MAX
);
801 sz
= nr
* sizeof(char *);
803 addr
= calloc(1, sizeof(*tp
) + 2 * sz
);
810 tp
->core_siblings
= addr
;
812 tp
->thread_siblings
= addr
;
814 for (i
= 0; i
< nr
; i
++) {
815 ret
= build_cpu_topo(tp
, i
);
826 static int write_cpu_topology(int fd
, struct perf_header
*h __used
,
827 struct perf_evlist
*evlist __used
)
833 tp
= build_cpu_topology();
837 ret
= do_write(fd
, &tp
->core_sib
, sizeof(tp
->core_sib
));
841 for (i
= 0; i
< tp
->core_sib
; i
++) {
842 ret
= do_write_string(fd
, tp
->core_siblings
[i
]);
846 ret
= do_write(fd
, &tp
->thread_sib
, sizeof(tp
->thread_sib
));
850 for (i
= 0; i
< tp
->thread_sib
; i
++) {
851 ret
= do_write_string(fd
, tp
->thread_siblings
[i
]);
862 static int write_total_mem(int fd
, struct perf_header
*h __used
,
863 struct perf_evlist
*evlist __used
)
871 fp
= fopen("/proc/meminfo", "r");
875 while (getline(&buf
, &len
, fp
) > 0) {
876 ret
= strncmp(buf
, "MemTotal:", 9);
881 n
= sscanf(buf
, "%*s %"PRIu64
, &mem
);
883 ret
= do_write(fd
, &mem
, sizeof(mem
));
890 static int write_topo_node(int fd
, int node
)
892 char str
[MAXPATHLEN
];
894 char *buf
= NULL
, *p
;
897 u64 mem_total
, mem_free
, mem
;
900 sprintf(str
, "/sys/devices/system/node/node%d/meminfo", node
);
901 fp
= fopen(str
, "r");
905 while (getline(&buf
, &len
, fp
) > 0) {
906 /* skip over invalid lines */
907 if (!strchr(buf
, ':'))
909 if (sscanf(buf
, "%*s %*d %s %"PRIu64
, field
, &mem
) != 2)
911 if (!strcmp(field
, "MemTotal:"))
913 if (!strcmp(field
, "MemFree:"))
919 ret
= do_write(fd
, &mem_total
, sizeof(u64
));
923 ret
= do_write(fd
, &mem_free
, sizeof(u64
));
928 sprintf(str
, "/sys/devices/system/node/node%d/cpulist", node
);
930 fp
= fopen(str
, "r");
934 if (getline(&buf
, &len
, fp
) <= 0)
937 p
= strchr(buf
, '\n');
941 ret
= do_write_string(fd
, buf
);
948 static int write_numa_topology(int fd
, struct perf_header
*h __used
,
949 struct perf_evlist
*evlist __used
)
954 struct cpu_map
*node_map
= NULL
;
959 fp
= fopen("/sys/devices/system/node/online", "r");
963 if (getline(&buf
, &len
, fp
) <= 0)
966 c
= strchr(buf
, '\n');
970 node_map
= cpu_map__new(buf
);
974 nr
= (u32
)node_map
->nr
;
976 ret
= do_write(fd
, &nr
, sizeof(nr
));
980 for (i
= 0; i
< nr
; i
++) {
981 j
= (u32
)node_map
->map
[i
];
982 ret
= do_write(fd
, &j
, sizeof(j
));
986 ret
= write_topo_node(fd
, i
);
998 * default get_cpuid(): nothing gets recorded
999 * actual implementation must be in arch/$(ARCH)/util/header.c
1001 int __attribute__((weak
)) get_cpuid(char *buffer __used
, size_t sz __used
)
1006 static int write_cpuid(int fd
, struct perf_header
*h __used
,
1007 struct perf_evlist
*evlist __used
)
1012 ret
= get_cpuid(buffer
, sizeof(buffer
));
1018 return do_write_string(fd
, buffer
);
1021 static int write_branch_stack(int fd __used
, struct perf_header
*h __used
,
1022 struct perf_evlist
*evlist __used
)
1027 static void print_hostname(struct perf_header
*ph
, int fd
, FILE *fp
)
1029 char *str
= do_read_string(fd
, ph
);
1030 fprintf(fp
, "# hostname : %s\n", str
);
1034 static void print_osrelease(struct perf_header
*ph
, int fd
, FILE *fp
)
1036 char *str
= do_read_string(fd
, ph
);
1037 fprintf(fp
, "# os release : %s\n", str
);
1041 static void print_arch(struct perf_header
*ph
, int fd
, FILE *fp
)
1043 char *str
= do_read_string(fd
, ph
);
1044 fprintf(fp
, "# arch : %s\n", str
);
1048 static void print_cpudesc(struct perf_header
*ph
, int fd
, FILE *fp
)
1050 char *str
= do_read_string(fd
, ph
);
1051 fprintf(fp
, "# cpudesc : %s\n", str
);
1055 static void print_nrcpus(struct perf_header
*ph
, int fd
, FILE *fp
)
1060 ret
= read(fd
, &nr
, sizeof(nr
));
1061 if (ret
!= (ssize_t
)sizeof(nr
))
1062 nr
= -1; /* interpreted as error */
1067 fprintf(fp
, "# nrcpus online : %u\n", nr
);
1069 ret
= read(fd
, &nr
, sizeof(nr
));
1070 if (ret
!= (ssize_t
)sizeof(nr
))
1071 nr
= -1; /* interpreted as error */
1076 fprintf(fp
, "# nrcpus avail : %u\n", nr
);
1079 static void print_version(struct perf_header
*ph
, int fd
, FILE *fp
)
1081 char *str
= do_read_string(fd
, ph
);
1082 fprintf(fp
, "# perf version : %s\n", str
);
1086 static void print_cmdline(struct perf_header
*ph
, int fd
, FILE *fp
)
1092 ret
= read(fd
, &nr
, sizeof(nr
));
1093 if (ret
!= (ssize_t
)sizeof(nr
))
1099 fprintf(fp
, "# cmdline : ");
1101 for (i
= 0; i
< nr
; i
++) {
1102 str
= do_read_string(fd
, ph
);
1103 fprintf(fp
, "%s ", str
);
1109 static void print_cpu_topology(struct perf_header
*ph
, int fd
, FILE *fp
)
1115 ret
= read(fd
, &nr
, sizeof(nr
));
1116 if (ret
!= (ssize_t
)sizeof(nr
))
1122 for (i
= 0; i
< nr
; i
++) {
1123 str
= do_read_string(fd
, ph
);
1124 fprintf(fp
, "# sibling cores : %s\n", str
);
1128 ret
= read(fd
, &nr
, sizeof(nr
));
1129 if (ret
!= (ssize_t
)sizeof(nr
))
1135 for (i
= 0; i
< nr
; i
++) {
1136 str
= do_read_string(fd
, ph
);
1137 fprintf(fp
, "# sibling threads : %s\n", str
);
1142 static void print_event_desc(struct perf_header
*ph
, int fd
, FILE *fp
)
1144 struct perf_event_attr attr
;
1148 u32 nre
, sz
, nr
, i
, j
;
1152 /* number of events */
1153 ret
= read(fd
, &nre
, sizeof(nre
));
1154 if (ret
!= (ssize_t
)sizeof(nre
))
1158 nre
= bswap_32(nre
);
1160 ret
= read(fd
, &sz
, sizeof(sz
));
1161 if (ret
!= (ssize_t
)sizeof(sz
))
1167 memset(&attr
, 0, sizeof(attr
));
1169 /* buffer to hold on file attr struct */
1178 for (i
= 0 ; i
< nre
; i
++) {
1181 * must read entire on-file attr struct to
1182 * sync up with layout.
1184 ret
= read(fd
, buf
, sz
);
1185 if (ret
!= (ssize_t
)sz
)
1189 perf_event__attr_swap(buf
);
1191 memcpy(&attr
, buf
, msz
);
1193 ret
= read(fd
, &nr
, sizeof(nr
));
1194 if (ret
!= (ssize_t
)sizeof(nr
))
1200 str
= do_read_string(fd
, ph
);
1201 fprintf(fp
, "# event : name = %s, ", str
);
1204 fprintf(fp
, "type = %d, config = 0x%"PRIx64
1205 ", config1 = 0x%"PRIx64
", config2 = 0x%"PRIx64
,
1211 fprintf(fp
, ", excl_usr = %d, excl_kern = %d",
1213 attr
.exclude_kernel
);
1216 fprintf(fp
, ", id = {");
1218 for (j
= 0 ; j
< nr
; j
++) {
1219 ret
= read(fd
, &id
, sizeof(id
));
1220 if (ret
!= (ssize_t
)sizeof(id
))
1229 fprintf(fp
, " %"PRIu64
, id
);
1238 fprintf(fp
, "# event desc: not available or unable to read\n");
1241 static void print_total_mem(struct perf_header
*h __used
, int fd
, FILE *fp
)
1246 ret
= read(fd
, &mem
, sizeof(mem
));
1247 if (ret
!= sizeof(mem
))
1251 mem
= bswap_64(mem
);
1253 fprintf(fp
, "# total memory : %"PRIu64
" kB\n", mem
);
1256 fprintf(fp
, "# total memory : unknown\n");
1259 static void print_numa_topology(struct perf_header
*h __used
, int fd
, FILE *fp
)
1264 uint64_t mem_total
, mem_free
;
1267 ret
= read(fd
, &nr
, sizeof(nr
));
1268 if (ret
!= (ssize_t
)sizeof(nr
))
1274 for (i
= 0; i
< nr
; i
++) {
1277 ret
= read(fd
, &c
, sizeof(c
));
1278 if (ret
!= (ssize_t
)sizeof(c
))
1284 ret
= read(fd
, &mem_total
, sizeof(u64
));
1285 if (ret
!= sizeof(u64
))
1288 ret
= read(fd
, &mem_free
, sizeof(u64
));
1289 if (ret
!= sizeof(u64
))
1292 if (h
->needs_swap
) {
1293 mem_total
= bswap_64(mem_total
);
1294 mem_free
= bswap_64(mem_free
);
1297 fprintf(fp
, "# node%u meminfo : total = %"PRIu64
" kB,"
1298 " free = %"PRIu64
" kB\n",
1303 str
= do_read_string(fd
, h
);
1304 fprintf(fp
, "# node%u cpu list : %s\n", c
, str
);
1309 fprintf(fp
, "# numa topology : not available\n");
1312 static void print_cpuid(struct perf_header
*ph
, int fd
, FILE *fp
)
1314 char *str
= do_read_string(fd
, ph
);
1315 fprintf(fp
, "# cpuid : %s\n", str
);
1319 static void print_branch_stack(struct perf_header
*ph __used
, int fd __used
,
1322 fprintf(fp
, "# contains samples with branch stack\n");
1325 static int __event_process_build_id(struct build_id_event
*bev
,
1327 struct perf_session
*session
)
1330 struct list_head
*head
;
1331 struct machine
*machine
;
1334 enum dso_kernel_type dso_type
;
1336 machine
= perf_session__findnew_machine(session
, bev
->pid
);
1340 misc
= bev
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1343 case PERF_RECORD_MISC_KERNEL
:
1344 dso_type
= DSO_TYPE_KERNEL
;
1345 head
= &machine
->kernel_dsos
;
1347 case PERF_RECORD_MISC_GUEST_KERNEL
:
1348 dso_type
= DSO_TYPE_GUEST_KERNEL
;
1349 head
= &machine
->kernel_dsos
;
1351 case PERF_RECORD_MISC_USER
:
1352 case PERF_RECORD_MISC_GUEST_USER
:
1353 dso_type
= DSO_TYPE_USER
;
1354 head
= &machine
->user_dsos
;
1360 dso
= __dsos__findnew(head
, filename
);
1362 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
1364 dso__set_build_id(dso
, &bev
->build_id
);
1366 if (filename
[0] == '[')
1367 dso
->kernel
= dso_type
;
1369 build_id__sprintf(dso
->build_id
, sizeof(dso
->build_id
),
1371 pr_debug("build id event received for %s: %s\n",
1372 dso
->long_name
, sbuild_id
);
1380 static int perf_header__read_build_ids_abi_quirk(struct perf_header
*header
,
1381 int input
, u64 offset
, u64 size
)
1383 struct perf_session
*session
= container_of(header
, struct perf_session
, header
);
1385 struct perf_event_header header
;
1386 u8 build_id
[ALIGN(BUILD_ID_SIZE
, sizeof(u64
))];
1389 struct build_id_event bev
;
1390 char filename
[PATH_MAX
];
1391 u64 limit
= offset
+ size
;
1393 while (offset
< limit
) {
1396 if (read(input
, &old_bev
, sizeof(old_bev
)) != sizeof(old_bev
))
1399 if (header
->needs_swap
)
1400 perf_event_header__bswap(&old_bev
.header
);
1402 len
= old_bev
.header
.size
- sizeof(old_bev
);
1403 if (read(input
, filename
, len
) != len
)
1406 bev
.header
= old_bev
.header
;
1409 * As the pid is the missing value, we need to fill
1410 * it properly. The header.misc value give us nice hint.
1412 bev
.pid
= HOST_KERNEL_ID
;
1413 if (bev
.header
.misc
== PERF_RECORD_MISC_GUEST_USER
||
1414 bev
.header
.misc
== PERF_RECORD_MISC_GUEST_KERNEL
)
1415 bev
.pid
= DEFAULT_GUEST_KERNEL_ID
;
1417 memcpy(bev
.build_id
, old_bev
.build_id
, sizeof(bev
.build_id
));
1418 __event_process_build_id(&bev
, filename
, session
);
1420 offset
+= bev
.header
.size
;
1426 static int perf_header__read_build_ids(struct perf_header
*header
,
1427 int input
, u64 offset
, u64 size
)
1429 struct perf_session
*session
= container_of(header
, struct perf_session
, header
);
1430 struct build_id_event bev
;
1431 char filename
[PATH_MAX
];
1432 u64 limit
= offset
+ size
, orig_offset
= offset
;
1435 while (offset
< limit
) {
1438 if (read(input
, &bev
, sizeof(bev
)) != sizeof(bev
))
1441 if (header
->needs_swap
)
1442 perf_event_header__bswap(&bev
.header
);
1444 len
= bev
.header
.size
- sizeof(bev
);
1445 if (read(input
, filename
, len
) != len
)
1448 * The a1645ce1 changeset:
1450 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1452 * Added a field to struct build_id_event that broke the file
1455 * Since the kernel build-id is the first entry, process the
1456 * table using the old format if the well known
1457 * '[kernel.kallsyms]' string for the kernel build-id has the
1458 * first 4 characters chopped off (where the pid_t sits).
1460 if (memcmp(filename
, "nel.kallsyms]", 13) == 0) {
1461 if (lseek(input
, orig_offset
, SEEK_SET
) == (off_t
)-1)
1463 return perf_header__read_build_ids_abi_quirk(header
, input
, offset
, size
);
1466 __event_process_build_id(&bev
, filename
, session
);
1468 offset
+= bev
.header
.size
;
1475 static int process_tracing_data(struct perf_file_section
*section __unused
,
1476 struct perf_header
*ph __unused
,
1477 int feat __unused
, int fd
)
1479 trace_report(fd
, false);
1483 static int process_build_id(struct perf_file_section
*section
,
1484 struct perf_header
*ph
,
1485 int feat __unused
, int fd
)
1487 if (perf_header__read_build_ids(ph
, fd
, section
->offset
, section
->size
))
1488 pr_debug("Failed to read buildids, continuing...\n");
1492 struct feature_ops
{
1493 int (*write
)(int fd
, struct perf_header
*h
, struct perf_evlist
*evlist
);
1494 void (*print
)(struct perf_header
*h
, int fd
, FILE *fp
);
1495 int (*process
)(struct perf_file_section
*section
,
1496 struct perf_header
*h
, int feat
, int fd
);
1501 #define FEAT_OPA(n, func) \
1502 [n] = { .name = #n, .write = write_##func, .print = print_##func }
1503 #define FEAT_OPP(n, func) \
1504 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1505 .process = process_##func }
1506 #define FEAT_OPF(n, func) \
1507 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1510 /* feature_ops not implemented: */
1511 #define print_tracing_data NULL
1512 #define print_build_id NULL
1514 static const struct feature_ops feat_ops
[HEADER_LAST_FEATURE
] = {
1515 FEAT_OPP(HEADER_TRACING_DATA
, tracing_data
),
1516 FEAT_OPP(HEADER_BUILD_ID
, build_id
),
1517 FEAT_OPA(HEADER_HOSTNAME
, hostname
),
1518 FEAT_OPA(HEADER_OSRELEASE
, osrelease
),
1519 FEAT_OPA(HEADER_VERSION
, version
),
1520 FEAT_OPA(HEADER_ARCH
, arch
),
1521 FEAT_OPA(HEADER_NRCPUS
, nrcpus
),
1522 FEAT_OPA(HEADER_CPUDESC
, cpudesc
),
1523 FEAT_OPA(HEADER_CPUID
, cpuid
),
1524 FEAT_OPA(HEADER_TOTAL_MEM
, total_mem
),
1525 FEAT_OPA(HEADER_EVENT_DESC
, event_desc
),
1526 FEAT_OPA(HEADER_CMDLINE
, cmdline
),
1527 FEAT_OPF(HEADER_CPU_TOPOLOGY
, cpu_topology
),
1528 FEAT_OPF(HEADER_NUMA_TOPOLOGY
, numa_topology
),
1529 FEAT_OPA(HEADER_BRANCH_STACK
, branch_stack
),
1532 struct header_print_data
{
1534 bool full
; /* extended list of headers */
1537 static int perf_file_section__fprintf_info(struct perf_file_section
*section
,
1538 struct perf_header
*ph
,
1539 int feat
, int fd
, void *data
)
1541 struct header_print_data
*hd
= data
;
1543 if (lseek(fd
, section
->offset
, SEEK_SET
) == (off_t
)-1) {
1544 pr_debug("Failed to lseek to %" PRIu64
" offset for feature "
1545 "%d, continuing...\n", section
->offset
, feat
);
1548 if (feat
>= HEADER_LAST_FEATURE
) {
1549 pr_warning("unknown feature %d\n", feat
);
1552 if (!feat_ops
[feat
].print
)
1555 if (!feat_ops
[feat
].full_only
|| hd
->full
)
1556 feat_ops
[feat
].print(ph
, fd
, hd
->fp
);
1558 fprintf(hd
->fp
, "# %s info available, use -I to display\n",
1559 feat_ops
[feat
].name
);
1564 int perf_header__fprintf_info(struct perf_session
*session
, FILE *fp
, bool full
)
1566 struct header_print_data hd
;
1567 struct perf_header
*header
= &session
->header
;
1568 int fd
= session
->fd
;
1572 perf_header__process_sections(header
, fd
, &hd
,
1573 perf_file_section__fprintf_info
);
1577 static int do_write_feat(int fd
, struct perf_header
*h
, int type
,
1578 struct perf_file_section
**p
,
1579 struct perf_evlist
*evlist
)
1584 if (perf_header__has_feat(h
, type
)) {
1585 if (!feat_ops
[type
].write
)
1588 (*p
)->offset
= lseek(fd
, 0, SEEK_CUR
);
1590 err
= feat_ops
[type
].write(fd
, h
, evlist
);
1592 pr_debug("failed to write feature %d\n", type
);
1594 /* undo anything written */
1595 lseek(fd
, (*p
)->offset
, SEEK_SET
);
1599 (*p
)->size
= lseek(fd
, 0, SEEK_CUR
) - (*p
)->offset
;
1605 static int perf_header__adds_write(struct perf_header
*header
,
1606 struct perf_evlist
*evlist
, int fd
)
1609 struct perf_file_section
*feat_sec
, *p
;
1615 nr_sections
= bitmap_weight(header
->adds_features
, HEADER_FEAT_BITS
);
1619 feat_sec
= p
= calloc(sizeof(*feat_sec
), nr_sections
);
1620 if (feat_sec
== NULL
)
1623 sec_size
= sizeof(*feat_sec
) * nr_sections
;
1625 sec_start
= header
->data_offset
+ header
->data_size
;
1626 lseek(fd
, sec_start
+ sec_size
, SEEK_SET
);
1628 for_each_set_bit(feat
, header
->adds_features
, HEADER_FEAT_BITS
) {
1629 if (do_write_feat(fd
, header
, feat
, &p
, evlist
))
1630 perf_header__clear_feat(header
, feat
);
1633 lseek(fd
, sec_start
, SEEK_SET
);
1635 * may write more than needed due to dropped feature, but
1636 * this is okay, reader will skip the mising entries
1638 err
= do_write(fd
, feat_sec
, sec_size
);
1640 pr_debug("failed to write feature section\n");
1645 int perf_header__write_pipe(int fd
)
1647 struct perf_pipe_file_header f_header
;
1650 f_header
= (struct perf_pipe_file_header
){
1651 .magic
= PERF_MAGIC
,
1652 .size
= sizeof(f_header
),
1655 err
= do_write(fd
, &f_header
, sizeof(f_header
));
1657 pr_debug("failed to write perf pipe header\n");
1664 int perf_session__write_header(struct perf_session
*session
,
1665 struct perf_evlist
*evlist
,
1666 int fd
, bool at_exit
)
1668 struct perf_file_header f_header
;
1669 struct perf_file_attr f_attr
;
1670 struct perf_header
*header
= &session
->header
;
1671 struct perf_evsel
*attr
, *pair
= NULL
;
1674 lseek(fd
, sizeof(f_header
), SEEK_SET
);
1676 if (session
->evlist
!= evlist
)
1677 pair
= list_entry(session
->evlist
->entries
.next
, struct perf_evsel
, node
);
1679 list_for_each_entry(attr
, &evlist
->entries
, node
) {
1680 attr
->id_offset
= lseek(fd
, 0, SEEK_CUR
);
1681 err
= do_write(fd
, attr
->id
, attr
->ids
* sizeof(u64
));
1684 pr_debug("failed to write perf header\n");
1687 if (session
->evlist
!= evlist
) {
1688 err
= do_write(fd
, pair
->id
, pair
->ids
* sizeof(u64
));
1691 attr
->ids
+= pair
->ids
;
1692 pair
= list_entry(pair
->node
.next
, struct perf_evsel
, node
);
1696 header
->attr_offset
= lseek(fd
, 0, SEEK_CUR
);
1698 list_for_each_entry(attr
, &evlist
->entries
, node
) {
1699 f_attr
= (struct perf_file_attr
){
1702 .offset
= attr
->id_offset
,
1703 .size
= attr
->ids
* sizeof(u64
),
1706 err
= do_write(fd
, &f_attr
, sizeof(f_attr
));
1708 pr_debug("failed to write perf header attribute\n");
1713 header
->event_offset
= lseek(fd
, 0, SEEK_CUR
);
1714 header
->event_size
= event_count
* sizeof(struct perf_trace_event_type
);
1716 err
= do_write(fd
, events
, header
->event_size
);
1718 pr_debug("failed to write perf header events\n");
1723 header
->data_offset
= lseek(fd
, 0, SEEK_CUR
);
1726 err
= perf_header__adds_write(header
, evlist
, fd
);
1731 f_header
= (struct perf_file_header
){
1732 .magic
= PERF_MAGIC
,
1733 .size
= sizeof(f_header
),
1734 .attr_size
= sizeof(f_attr
),
1736 .offset
= header
->attr_offset
,
1737 .size
= evlist
->nr_entries
* sizeof(f_attr
),
1740 .offset
= header
->data_offset
,
1741 .size
= header
->data_size
,
1744 .offset
= header
->event_offset
,
1745 .size
= header
->event_size
,
1749 memcpy(&f_header
.adds_features
, &header
->adds_features
, sizeof(header
->adds_features
));
1751 lseek(fd
, 0, SEEK_SET
);
1752 err
= do_write(fd
, &f_header
, sizeof(f_header
));
1754 pr_debug("failed to write perf header\n");
1757 lseek(fd
, header
->data_offset
+ header
->data_size
, SEEK_SET
);
1763 static int perf_header__getbuffer64(struct perf_header
*header
,
1764 int fd
, void *buf
, size_t size
)
1766 if (readn(fd
, buf
, size
) <= 0)
1769 if (header
->needs_swap
)
1770 mem_bswap_64(buf
, size
);
1775 int perf_header__process_sections(struct perf_header
*header
, int fd
,
1777 int (*process
)(struct perf_file_section
*section
,
1778 struct perf_header
*ph
,
1779 int feat
, int fd
, void *data
))
1781 struct perf_file_section
*feat_sec
, *sec
;
1787 nr_sections
= bitmap_weight(header
->adds_features
, HEADER_FEAT_BITS
);
1791 feat_sec
= sec
= calloc(sizeof(*feat_sec
), nr_sections
);
1795 sec_size
= sizeof(*feat_sec
) * nr_sections
;
1797 lseek(fd
, header
->data_offset
+ header
->data_size
, SEEK_SET
);
1799 err
= perf_header__getbuffer64(header
, fd
, feat_sec
, sec_size
);
1803 for_each_set_bit(feat
, header
->adds_features
, HEADER_LAST_FEATURE
) {
1804 err
= process(sec
++, header
, feat
, fd
, data
);
1814 static const int attr_file_abi_sizes
[] = {
1815 [0] = PERF_ATTR_SIZE_VER0
,
1816 [1] = PERF_ATTR_SIZE_VER1
,
1821 * In the legacy file format, the magic number is not used to encode endianness.
1822 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
1823 * on ABI revisions, we need to try all combinations for all endianness to
1824 * detect the endianness.
1826 static int try_all_file_abis(uint64_t hdr_sz
, struct perf_header
*ph
)
1828 uint64_t ref_size
, attr_size
;
1831 for (i
= 0 ; attr_file_abi_sizes
[i
]; i
++) {
1832 ref_size
= attr_file_abi_sizes
[i
]
1833 + sizeof(struct perf_file_section
);
1834 if (hdr_sz
!= ref_size
) {
1835 attr_size
= bswap_64(hdr_sz
);
1836 if (attr_size
!= ref_size
)
1839 ph
->needs_swap
= true;
1841 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
1846 /* could not determine endianness */
1850 #define PERF_PIPE_HDR_VER0 16
1852 static const size_t attr_pipe_abi_sizes
[] = {
1853 [0] = PERF_PIPE_HDR_VER0
,
1858 * In the legacy pipe format, there is an implicit assumption that endiannesss
1859 * between host recording the samples, and host parsing the samples is the
1860 * same. This is not always the case given that the pipe output may always be
1861 * redirected into a file and analyzed on a different machine with possibly a
1862 * different endianness and perf_event ABI revsions in the perf tool itself.
1864 static int try_all_pipe_abis(uint64_t hdr_sz
, struct perf_header
*ph
)
1869 for (i
= 0 ; attr_pipe_abi_sizes
[i
]; i
++) {
1870 if (hdr_sz
!= attr_pipe_abi_sizes
[i
]) {
1871 attr_size
= bswap_64(hdr_sz
);
1872 if (attr_size
!= hdr_sz
)
1875 ph
->needs_swap
= true;
1877 pr_debug("Pipe ABI%d perf.data file detected\n", i
);
1883 static int check_magic_endian(u64 magic
, uint64_t hdr_sz
,
1884 bool is_pipe
, struct perf_header
*ph
)
1888 /* check for legacy format */
1889 ret
= memcmp(&magic
, __perf_magic1
, sizeof(magic
));
1891 pr_debug("legacy perf.data format\n");
1893 return try_all_pipe_abis(hdr_sz
, ph
);
1895 return try_all_file_abis(hdr_sz
, ph
);
1898 * the new magic number serves two purposes:
1899 * - unique number to identify actual perf.data files
1900 * - encode endianness of file
1903 /* check magic number with one endianness */
1904 if (magic
== __perf_magic2
)
1907 /* check magic number with opposite endianness */
1908 if (magic
!= __perf_magic2_sw
)
1911 ph
->needs_swap
= true;
1916 int perf_file_header__read(struct perf_file_header
*header
,
1917 struct perf_header
*ph
, int fd
)
1921 lseek(fd
, 0, SEEK_SET
);
1923 ret
= readn(fd
, header
, sizeof(*header
));
1927 if (check_magic_endian(header
->magic
,
1928 header
->attr_size
, false, ph
) < 0) {
1929 pr_debug("magic/endian check failed\n");
1933 if (ph
->needs_swap
) {
1934 mem_bswap_64(header
, offsetof(struct perf_file_header
,
1938 if (header
->size
!= sizeof(*header
)) {
1939 /* Support the previous format */
1940 if (header
->size
== offsetof(typeof(*header
), adds_features
))
1941 bitmap_zero(header
->adds_features
, HEADER_FEAT_BITS
);
1944 } else if (ph
->needs_swap
) {
1946 * feature bitmap is declared as an array of unsigned longs --
1947 * not good since its size can differ between the host that
1948 * generated the data file and the host analyzing the file.
1950 * We need to handle endianness, but we don't know the size of
1951 * the unsigned long where the file was generated. Take a best
1952 * guess at determining it: try 64-bit swap first (ie., file
1953 * created on a 64-bit host), and check if the hostname feature
1954 * bit is set (this feature bit is forced on as of fbe96f2).
1955 * If the bit is not, undo the 64-bit swap and try a 32-bit
1956 * swap. If the hostname bit is still not set (e.g., older data
1957 * file), punt and fallback to the original behavior --
1958 * clearing all feature bits and setting buildid.
1960 mem_bswap_64(&header
->adds_features
,
1961 BITS_TO_U64(HEADER_FEAT_BITS
));
1963 if (!test_bit(HEADER_HOSTNAME
, header
->adds_features
)) {
1965 mem_bswap_64(&header
->adds_features
,
1966 BITS_TO_U64(HEADER_FEAT_BITS
));
1969 mem_bswap_32(&header
->adds_features
,
1970 BITS_TO_U32(HEADER_FEAT_BITS
));
1973 if (!test_bit(HEADER_HOSTNAME
, header
->adds_features
)) {
1974 bitmap_zero(header
->adds_features
, HEADER_FEAT_BITS
);
1975 set_bit(HEADER_BUILD_ID
, header
->adds_features
);
1979 memcpy(&ph
->adds_features
, &header
->adds_features
,
1980 sizeof(ph
->adds_features
));
1982 ph
->event_offset
= header
->event_types
.offset
;
1983 ph
->event_size
= header
->event_types
.size
;
1984 ph
->data_offset
= header
->data
.offset
;
1985 ph
->data_size
= header
->data
.size
;
1989 static int perf_file_section__process(struct perf_file_section
*section
,
1990 struct perf_header
*ph
,
1991 int feat
, int fd
, void *data __used
)
1993 if (lseek(fd
, section
->offset
, SEEK_SET
) == (off_t
)-1) {
1994 pr_debug("Failed to lseek to %" PRIu64
" offset for feature "
1995 "%d, continuing...\n", section
->offset
, feat
);
1999 if (feat
>= HEADER_LAST_FEATURE
) {
2000 pr_debug("unknown feature %d, continuing...\n", feat
);
2004 if (!feat_ops
[feat
].process
)
2007 return feat_ops
[feat
].process(section
, ph
, feat
, fd
);
2010 static int perf_file_header__read_pipe(struct perf_pipe_file_header
*header
,
2011 struct perf_header
*ph
, int fd
,
2016 ret
= readn(fd
, header
, sizeof(*header
));
2020 if (check_magic_endian(header
->magic
, header
->size
, true, ph
) < 0) {
2021 pr_debug("endian/magic failed\n");
2026 header
->size
= bswap_64(header
->size
);
2028 if (repipe
&& do_write(STDOUT_FILENO
, header
, sizeof(*header
)) < 0)
2034 static int perf_header__read_pipe(struct perf_session
*session
, int fd
)
2036 struct perf_header
*header
= &session
->header
;
2037 struct perf_pipe_file_header f_header
;
2039 if (perf_file_header__read_pipe(&f_header
, header
, fd
,
2040 session
->repipe
) < 0) {
2041 pr_debug("incompatible file format\n");
2050 static int read_attr(int fd
, struct perf_header
*ph
,
2051 struct perf_file_attr
*f_attr
)
2053 struct perf_event_attr
*attr
= &f_attr
->attr
;
2055 size_t our_sz
= sizeof(f_attr
->attr
);
2058 memset(f_attr
, 0, sizeof(*f_attr
));
2060 /* read minimal guaranteed structure */
2061 ret
= readn(fd
, attr
, PERF_ATTR_SIZE_VER0
);
2063 pr_debug("cannot read %d bytes of header attr\n",
2064 PERF_ATTR_SIZE_VER0
);
2068 /* on file perf_event_attr size */
2076 sz
= PERF_ATTR_SIZE_VER0
;
2077 } else if (sz
> our_sz
) {
2078 pr_debug("file uses a more recent and unsupported ABI"
2079 " (%zu bytes extra)\n", sz
- our_sz
);
2082 /* what we have not yet read and that we know about */
2083 left
= sz
- PERF_ATTR_SIZE_VER0
;
2086 ptr
+= PERF_ATTR_SIZE_VER0
;
2088 ret
= readn(fd
, ptr
, left
);
2090 /* read perf_file_section, ids are read in caller */
2091 ret
= readn(fd
, &f_attr
->ids
, sizeof(f_attr
->ids
));
2093 return ret
<= 0 ? -1 : 0;
2096 static int perf_evsel__set_tracepoint_name(struct perf_evsel
*evsel
)
2098 struct event_format
*event
= trace_find_event(evsel
->attr
.config
);
2104 snprintf(bf
, sizeof(bf
), "%s:%s", event
->system
, event
->name
);
2105 evsel
->name
= strdup(bf
);
2106 if (event
->name
== NULL
)
2112 static int perf_evlist__set_tracepoint_names(struct perf_evlist
*evlist
)
2114 struct perf_evsel
*pos
;
2116 list_for_each_entry(pos
, &evlist
->entries
, node
) {
2117 if (pos
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
2118 perf_evsel__set_tracepoint_name(pos
))
2125 int perf_session__read_header(struct perf_session
*session
, int fd
)
2127 struct perf_header
*header
= &session
->header
;
2128 struct perf_file_header f_header
;
2129 struct perf_file_attr f_attr
;
2131 int nr_attrs
, nr_ids
, i
, j
;
2133 session
->evlist
= perf_evlist__new(NULL
, NULL
);
2134 if (session
->evlist
== NULL
)
2137 if (session
->fd_pipe
)
2138 return perf_header__read_pipe(session
, fd
);
2140 if (perf_file_header__read(&f_header
, header
, fd
) < 0)
2143 nr_attrs
= f_header
.attrs
.size
/ f_header
.attr_size
;
2144 lseek(fd
, f_header
.attrs
.offset
, SEEK_SET
);
2146 for (i
= 0; i
< nr_attrs
; i
++) {
2147 struct perf_evsel
*evsel
;
2150 if (read_attr(fd
, header
, &f_attr
) < 0)
2153 if (header
->needs_swap
)
2154 perf_event__attr_swap(&f_attr
.attr
);
2156 tmp
= lseek(fd
, 0, SEEK_CUR
);
2157 evsel
= perf_evsel__new(&f_attr
.attr
, i
);
2160 goto out_delete_evlist
;
2162 * Do it before so that if perf_evsel__alloc_id fails, this
2163 * entry gets purged too at perf_evlist__delete().
2165 perf_evlist__add(session
->evlist
, evsel
);
2167 nr_ids
= f_attr
.ids
.size
/ sizeof(u64
);
2169 * We don't have the cpu and thread maps on the header, so
2170 * for allocating the perf_sample_id table we fake 1 cpu and
2171 * hattr->ids threads.
2173 if (perf_evsel__alloc_id(evsel
, 1, nr_ids
))
2174 goto out_delete_evlist
;
2176 lseek(fd
, f_attr
.ids
.offset
, SEEK_SET
);
2178 for (j
= 0; j
< nr_ids
; j
++) {
2179 if (perf_header__getbuffer64(header
, fd
, &f_id
, sizeof(f_id
)))
2182 perf_evlist__id_add(session
->evlist
, evsel
, 0, j
, f_id
);
2185 lseek(fd
, tmp
, SEEK_SET
);
2188 symbol_conf
.nr_events
= nr_attrs
;
2190 if (f_header
.event_types
.size
) {
2191 lseek(fd
, f_header
.event_types
.offset
, SEEK_SET
);
2192 events
= malloc(f_header
.event_types
.size
);
2195 if (perf_header__getbuffer64(header
, fd
, events
,
2196 f_header
.event_types
.size
))
2198 event_count
= f_header
.event_types
.size
/ sizeof(struct perf_trace_event_type
);
2201 perf_header__process_sections(header
, fd
, NULL
,
2202 perf_file_section__process
);
2204 lseek(fd
, header
->data_offset
, SEEK_SET
);
2206 if (perf_evlist__set_tracepoint_names(session
->evlist
))
2207 goto out_delete_evlist
;
2215 perf_evlist__delete(session
->evlist
);
2216 session
->evlist
= NULL
;
2220 int perf_event__synthesize_attr(struct perf_tool
*tool
,
2221 struct perf_event_attr
*attr
, u16 ids
, u64
*id
,
2222 perf_event__handler_t process
)
2224 union perf_event
*ev
;
2228 size
= sizeof(struct perf_event_attr
);
2229 size
= ALIGN(size
, sizeof(u64
));
2230 size
+= sizeof(struct perf_event_header
);
2231 size
+= ids
* sizeof(u64
);
2238 ev
->attr
.attr
= *attr
;
2239 memcpy(ev
->attr
.id
, id
, ids
* sizeof(u64
));
2241 ev
->attr
.header
.type
= PERF_RECORD_HEADER_ATTR
;
2242 ev
->attr
.header
.size
= size
;
2244 err
= process(tool
, ev
, NULL
, NULL
);
2251 int perf_event__synthesize_attrs(struct perf_tool
*tool
,
2252 struct perf_session
*session
,
2253 perf_event__handler_t process
)
2255 struct perf_evsel
*attr
;
2258 list_for_each_entry(attr
, &session
->evlist
->entries
, node
) {
2259 err
= perf_event__synthesize_attr(tool
, &attr
->attr
, attr
->ids
,
2262 pr_debug("failed to create perf header attribute\n");
2270 int perf_event__process_attr(union perf_event
*event
,
2271 struct perf_evlist
**pevlist
)
2273 unsigned int i
, ids
, n_ids
;
2274 struct perf_evsel
*evsel
;
2275 struct perf_evlist
*evlist
= *pevlist
;
2277 if (evlist
== NULL
) {
2278 *pevlist
= evlist
= perf_evlist__new(NULL
, NULL
);
2283 evsel
= perf_evsel__new(&event
->attr
.attr
, evlist
->nr_entries
);
2287 perf_evlist__add(evlist
, evsel
);
2289 ids
= event
->header
.size
;
2290 ids
-= (void *)&event
->attr
.id
- (void *)event
;
2291 n_ids
= ids
/ sizeof(u64
);
2293 * We don't have the cpu and thread maps on the header, so
2294 * for allocating the perf_sample_id table we fake 1 cpu and
2295 * hattr->ids threads.
2297 if (perf_evsel__alloc_id(evsel
, 1, n_ids
))
2300 for (i
= 0; i
< n_ids
; i
++) {
2301 perf_evlist__id_add(evlist
, evsel
, 0, i
, event
->attr
.id
[i
]);
2307 int perf_event__synthesize_event_type(struct perf_tool
*tool
,
2308 u64 event_id
, char *name
,
2309 perf_event__handler_t process
,
2310 struct machine
*machine
)
2312 union perf_event ev
;
2316 memset(&ev
, 0, sizeof(ev
));
2318 ev
.event_type
.event_type
.event_id
= event_id
;
2319 memset(ev
.event_type
.event_type
.name
, 0, MAX_EVENT_NAME
);
2320 strncpy(ev
.event_type
.event_type
.name
, name
, MAX_EVENT_NAME
- 1);
2322 ev
.event_type
.header
.type
= PERF_RECORD_HEADER_EVENT_TYPE
;
2323 size
= strlen(ev
.event_type
.event_type
.name
);
2324 size
= ALIGN(size
, sizeof(u64
));
2325 ev
.event_type
.header
.size
= sizeof(ev
.event_type
) -
2326 (sizeof(ev
.event_type
.event_type
.name
) - size
);
2328 err
= process(tool
, &ev
, NULL
, machine
);
2333 int perf_event__synthesize_event_types(struct perf_tool
*tool
,
2334 perf_event__handler_t process
,
2335 struct machine
*machine
)
2337 struct perf_trace_event_type
*type
;
2340 for (i
= 0; i
< event_count
; i
++) {
2343 err
= perf_event__synthesize_event_type(tool
, type
->event_id
,
2344 type
->name
, process
,
2347 pr_debug("failed to create perf header event type\n");
2355 int perf_event__process_event_type(struct perf_tool
*tool __unused
,
2356 union perf_event
*event
)
2358 if (perf_header__push_event(event
->event_type
.event_type
.event_id
,
2359 event
->event_type
.event_type
.name
) < 0)
2365 int perf_event__synthesize_tracing_data(struct perf_tool
*tool
, int fd
,
2366 struct perf_evlist
*evlist
,
2367 perf_event__handler_t process
)
2369 union perf_event ev
;
2370 struct tracing_data
*tdata
;
2371 ssize_t size
= 0, aligned_size
= 0, padding
;
2375 * We are going to store the size of the data followed
2376 * by the data contents. Since the fd descriptor is a pipe,
2377 * we cannot seek back to store the size of the data once
2378 * we know it. Instead we:
2380 * - write the tracing data to the temp file
2381 * - get/write the data size to pipe
2382 * - write the tracing data from the temp file
2385 tdata
= tracing_data_get(&evlist
->entries
, fd
, true);
2389 memset(&ev
, 0, sizeof(ev
));
2391 ev
.tracing_data
.header
.type
= PERF_RECORD_HEADER_TRACING_DATA
;
2393 aligned_size
= ALIGN(size
, sizeof(u64
));
2394 padding
= aligned_size
- size
;
2395 ev
.tracing_data
.header
.size
= sizeof(ev
.tracing_data
);
2396 ev
.tracing_data
.size
= aligned_size
;
2398 process(tool
, &ev
, NULL
, NULL
);
2401 * The put function will copy all the tracing data
2402 * stored in temp file to the pipe.
2404 tracing_data_put(tdata
);
2406 write_padded(fd
, NULL
, 0, padding
);
2408 return aligned_size
;
2411 int perf_event__process_tracing_data(union perf_event
*event
,
2412 struct perf_session
*session
)
2414 ssize_t size_read
, padding
, size
= event
->tracing_data
.size
;
2415 off_t offset
= lseek(session
->fd
, 0, SEEK_CUR
);
2418 /* setup for reading amidst mmap */
2419 lseek(session
->fd
, offset
+ sizeof(struct tracing_data_event
),
2422 size_read
= trace_report(session
->fd
, session
->repipe
);
2424 padding
= ALIGN(size_read
, sizeof(u64
)) - size_read
;
2426 if (read(session
->fd
, buf
, padding
) < 0)
2427 die("reading input file");
2428 if (session
->repipe
) {
2429 int retw
= write(STDOUT_FILENO
, buf
, padding
);
2430 if (retw
<= 0 || retw
!= padding
)
2431 die("repiping tracing data padding");
2434 if (size_read
+ padding
!= size
)
2435 die("tracing data size mismatch");
2437 return size_read
+ padding
;
2440 int perf_event__synthesize_build_id(struct perf_tool
*tool
,
2441 struct dso
*pos
, u16 misc
,
2442 perf_event__handler_t process
,
2443 struct machine
*machine
)
2445 union perf_event ev
;
2452 memset(&ev
, 0, sizeof(ev
));
2454 len
= pos
->long_name_len
+ 1;
2455 len
= ALIGN(len
, NAME_ALIGN
);
2456 memcpy(&ev
.build_id
.build_id
, pos
->build_id
, sizeof(pos
->build_id
));
2457 ev
.build_id
.header
.type
= PERF_RECORD_HEADER_BUILD_ID
;
2458 ev
.build_id
.header
.misc
= misc
;
2459 ev
.build_id
.pid
= machine
->pid
;
2460 ev
.build_id
.header
.size
= sizeof(ev
.build_id
) + len
;
2461 memcpy(&ev
.build_id
.filename
, pos
->long_name
, pos
->long_name_len
);
2463 err
= process(tool
, &ev
, NULL
, machine
);
2468 int perf_event__process_build_id(struct perf_tool
*tool __used
,
2469 union perf_event
*event
,
2470 struct perf_session
*session
)
2472 __event_process_build_id(&event
->build_id
,
2473 event
->build_id
.filename
,
2478 void disable_buildid_cache(void)
2480 no_buildid_cache
= true;