1 #define _FILE_OFFSET_BITS 64
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <sys/utsname.h>
18 #include "trace-event.h"
24 static bool no_buildid_cache
= false;
26 static int event_count
;
27 static struct perf_trace_event_type
*events
;
29 static u32 header_argc
;
30 static const char **header_argv
;
32 int perf_header__push_event(u64 id
, const char *name
)
34 if (strlen(name
) > MAX_EVENT_NAME
)
35 pr_warning("Event %s will be truncated\n", name
);
38 events
= malloc(sizeof(struct perf_trace_event_type
));
42 struct perf_trace_event_type
*nevents
;
44 nevents
= realloc(events
, (event_count
+ 1) * sizeof(*events
));
49 memset(&events
[event_count
], 0, sizeof(struct perf_trace_event_type
));
50 events
[event_count
].event_id
= id
;
51 strncpy(events
[event_count
].name
, name
, MAX_EVENT_NAME
- 1);
56 char *perf_header__find_event(u64 id
)
59 for (i
= 0 ; i
< event_count
; i
++) {
60 if (events
[i
].event_id
== id
)
61 return events
[i
].name
;
66 static const char *__perf_magic
= "PERFFILE";
68 #define PERF_MAGIC (*(u64 *)__perf_magic)
70 struct perf_file_attr
{
71 struct perf_event_attr attr
;
72 struct perf_file_section ids
;
75 void perf_header__set_feat(struct perf_header
*header
, int feat
)
77 set_bit(feat
, header
->adds_features
);
80 void perf_header__clear_feat(struct perf_header
*header
, int feat
)
82 clear_bit(feat
, header
->adds_features
);
85 bool perf_header__has_feat(const struct perf_header
*header
, int feat
)
87 return test_bit(feat
, header
->adds_features
);
90 static int do_write(int fd
, const void *buf
, size_t size
)
93 int ret
= write(fd
, buf
, size
);
105 #define NAME_ALIGN 64
107 static int write_padded(int fd
, const void *bf
, size_t count
,
108 size_t count_aligned
)
110 static const char zero_buf
[NAME_ALIGN
];
111 int err
= do_write(fd
, bf
, count
);
114 err
= do_write(fd
, zero_buf
, count_aligned
- count
);
119 static int do_write_string(int fd
, const char *str
)
124 olen
= strlen(str
) + 1;
125 len
= ALIGN(olen
, NAME_ALIGN
);
127 /* write len, incl. \0 */
128 ret
= do_write(fd
, &len
, sizeof(len
));
132 return write_padded(fd
, str
, olen
, len
);
135 static char *do_read_string(int fd
, struct perf_header
*ph
)
141 sz
= read(fd
, &len
, sizeof(len
));
142 if (sz
< (ssize_t
)sizeof(len
))
152 ret
= read(fd
, buf
, len
);
153 if (ret
== (ssize_t
)len
) {
155 * strings are padded by zeroes
156 * thus the actual strlen of buf
157 * may be less than len
167 perf_header__set_cmdline(int argc
, const char **argv
)
171 header_argc
= (u32
)argc
;
173 /* do not include NULL termination */
174 header_argv
= calloc(argc
, sizeof(char *));
179 * must copy argv contents because it gets moved
180 * around during option parsing
182 for (i
= 0; i
< argc
; i
++)
183 header_argv
[i
] = argv
[i
];
188 #define dsos__for_each_with_build_id(pos, head) \
189 list_for_each_entry(pos, head, node) \
190 if (!pos->has_build_id) \
194 static int __dsos__write_buildid_table(struct list_head
*head
, pid_t pid
,
199 dsos__for_each_with_build_id(pos
, head
) {
201 struct build_id_event b
;
206 len
= pos
->long_name_len
+ 1;
207 len
= ALIGN(len
, NAME_ALIGN
);
208 memset(&b
, 0, sizeof(b
));
209 memcpy(&b
.build_id
, pos
->build_id
, sizeof(pos
->build_id
));
211 b
.header
.misc
= misc
;
212 b
.header
.size
= sizeof(b
) + len
;
213 err
= do_write(fd
, &b
, sizeof(b
));
216 err
= write_padded(fd
, pos
->long_name
,
217 pos
->long_name_len
+ 1, len
);
225 static int machine__write_buildid_table(struct machine
*machine
, int fd
)
228 u16 kmisc
= PERF_RECORD_MISC_KERNEL
,
229 umisc
= PERF_RECORD_MISC_USER
;
231 if (!machine__is_host(machine
)) {
232 kmisc
= PERF_RECORD_MISC_GUEST_KERNEL
;
233 umisc
= PERF_RECORD_MISC_GUEST_USER
;
236 err
= __dsos__write_buildid_table(&machine
->kernel_dsos
, machine
->pid
,
239 err
= __dsos__write_buildid_table(&machine
->user_dsos
,
240 machine
->pid
, umisc
, fd
);
244 static int dsos__write_buildid_table(struct perf_header
*header
, int fd
)
246 struct perf_session
*session
= container_of(header
,
247 struct perf_session
, header
);
249 int err
= machine__write_buildid_table(&session
->host_machine
, fd
);
254 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
255 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
256 err
= machine__write_buildid_table(pos
, fd
);
263 int build_id_cache__add_s(const char *sbuild_id
, const char *debugdir
,
264 const char *name
, bool is_kallsyms
)
266 const size_t size
= PATH_MAX
;
267 char *realname
, *filename
= zalloc(size
),
268 *linkname
= zalloc(size
), *targetname
;
272 if (symbol_conf
.kptr_restrict
) {
273 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
276 realname
= (char *)name
;
278 realname
= realpath(name
, NULL
);
280 if (realname
== NULL
|| filename
== NULL
|| linkname
== NULL
)
283 len
= scnprintf(filename
, size
, "%s%s%s",
284 debugdir
, is_kallsyms
? "/" : "", realname
);
285 if (mkdir_p(filename
, 0755))
288 snprintf(filename
+ len
, sizeof(filename
) - len
, "/%s", sbuild_id
);
290 if (access(filename
, F_OK
)) {
292 if (copyfile("/proc/kallsyms", filename
))
294 } else if (link(realname
, filename
) && copyfile(name
, filename
))
298 len
= scnprintf(linkname
, size
, "%s/.build-id/%.2s",
299 debugdir
, sbuild_id
);
301 if (access(linkname
, X_OK
) && mkdir_p(linkname
, 0755))
304 snprintf(linkname
+ len
, size
- len
, "/%s", sbuild_id
+ 2);
305 targetname
= filename
+ strlen(debugdir
) - 5;
306 memcpy(targetname
, "../..", 5);
308 if (symlink(targetname
, linkname
) == 0)
318 static int build_id_cache__add_b(const u8
*build_id
, size_t build_id_size
,
319 const char *name
, const char *debugdir
,
322 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
324 build_id__sprintf(build_id
, build_id_size
, sbuild_id
);
326 return build_id_cache__add_s(sbuild_id
, debugdir
, name
, is_kallsyms
);
329 int build_id_cache__remove_s(const char *sbuild_id
, const char *debugdir
)
331 const size_t size
= PATH_MAX
;
332 char *filename
= zalloc(size
),
333 *linkname
= zalloc(size
);
336 if (filename
== NULL
|| linkname
== NULL
)
339 snprintf(linkname
, size
, "%s/.build-id/%.2s/%s",
340 debugdir
, sbuild_id
, sbuild_id
+ 2);
342 if (access(linkname
, F_OK
))
345 if (readlink(linkname
, filename
, size
- 1) < 0)
348 if (unlink(linkname
))
352 * Since the link is relative, we must make it absolute:
354 snprintf(linkname
, size
, "%s/.build-id/%.2s/%s",
355 debugdir
, sbuild_id
, filename
);
357 if (unlink(linkname
))
367 static int dso__cache_build_id(struct dso
*dso
, const char *debugdir
)
369 bool is_kallsyms
= dso
->kernel
&& dso
->long_name
[0] != '/';
371 return build_id_cache__add_b(dso
->build_id
, sizeof(dso
->build_id
),
372 dso
->long_name
, debugdir
, is_kallsyms
);
375 static int __dsos__cache_build_ids(struct list_head
*head
, const char *debugdir
)
380 dsos__for_each_with_build_id(pos
, head
)
381 if (dso__cache_build_id(pos
, debugdir
))
387 static int machine__cache_build_ids(struct machine
*machine
, const char *debugdir
)
389 int ret
= __dsos__cache_build_ids(&machine
->kernel_dsos
, debugdir
);
390 ret
|= __dsos__cache_build_ids(&machine
->user_dsos
, debugdir
);
394 static int perf_session__cache_build_ids(struct perf_session
*session
)
398 char debugdir
[PATH_MAX
];
400 snprintf(debugdir
, sizeof(debugdir
), "%s", buildid_dir
);
402 if (mkdir(debugdir
, 0755) != 0 && errno
!= EEXIST
)
405 ret
= machine__cache_build_ids(&session
->host_machine
, debugdir
);
407 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
408 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
409 ret
|= machine__cache_build_ids(pos
, debugdir
);
414 static bool machine__read_build_ids(struct machine
*machine
, bool with_hits
)
416 bool ret
= __dsos__read_build_ids(&machine
->kernel_dsos
, with_hits
);
417 ret
|= __dsos__read_build_ids(&machine
->user_dsos
, with_hits
);
421 static bool perf_session__read_build_ids(struct perf_session
*session
, bool with_hits
)
424 bool ret
= machine__read_build_ids(&session
->host_machine
, with_hits
);
426 for (nd
= rb_first(&session
->machines
); nd
; nd
= rb_next(nd
)) {
427 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
428 ret
|= machine__read_build_ids(pos
, with_hits
);
434 static int write_trace_info(int fd
, struct perf_header
*h __used
,
435 struct perf_evlist
*evlist
)
437 return read_tracing_data(fd
, &evlist
->entries
);
441 static int write_build_id(int fd
, struct perf_header
*h
,
442 struct perf_evlist
*evlist __used
)
444 struct perf_session
*session
;
447 session
= container_of(h
, struct perf_session
, header
);
449 if (!perf_session__read_build_ids(session
, true))
452 err
= dsos__write_buildid_table(h
, fd
);
454 pr_debug("failed to write buildid table\n");
457 if (!no_buildid_cache
)
458 perf_session__cache_build_ids(session
);
463 static int write_hostname(int fd
, struct perf_header
*h __used
,
464 struct perf_evlist
*evlist __used
)
473 return do_write_string(fd
, uts
.nodename
);
476 static int write_osrelease(int fd
, struct perf_header
*h __used
,
477 struct perf_evlist
*evlist __used
)
486 return do_write_string(fd
, uts
.release
);
489 static int write_arch(int fd
, struct perf_header
*h __used
,
490 struct perf_evlist
*evlist __used
)
499 return do_write_string(fd
, uts
.machine
);
502 static int write_version(int fd
, struct perf_header
*h __used
,
503 struct perf_evlist
*evlist __used
)
505 return do_write_string(fd
, perf_version_string
);
508 static int write_cpudesc(int fd
, struct perf_header
*h __used
,
509 struct perf_evlist
*evlist __used
)
512 #define CPUINFO_PROC NULL
517 const char *search
= CPUINFO_PROC
;
524 file
= fopen("/proc/cpuinfo", "r");
528 while (getline(&buf
, &len
, file
) > 0) {
529 ret
= strncmp(buf
, search
, strlen(search
));
539 p
= strchr(buf
, ':');
540 if (p
&& *(p
+1) == ' ' && *(p
+2))
546 /* squash extra space characters (branding string) */
553 while (*q
&& isspace(*q
))
556 while ((*r
++ = *q
++));
560 ret
= do_write_string(fd
, s
);
567 static int write_nrcpus(int fd
, struct perf_header
*h __used
,
568 struct perf_evlist
*evlist __used
)
574 nr
= sysconf(_SC_NPROCESSORS_CONF
);
578 nrc
= (u32
)(nr
& UINT_MAX
);
580 nr
= sysconf(_SC_NPROCESSORS_ONLN
);
584 nra
= (u32
)(nr
& UINT_MAX
);
586 ret
= do_write(fd
, &nrc
, sizeof(nrc
));
590 return do_write(fd
, &nra
, sizeof(nra
));
593 static int write_event_desc(int fd
, struct perf_header
*h __used
,
594 struct perf_evlist
*evlist
)
596 struct perf_evsel
*attr
;
597 u32 nre
= 0, nri
, sz
;
600 list_for_each_entry(attr
, &evlist
->entries
, node
)
604 * write number of events
606 ret
= do_write(fd
, &nre
, sizeof(nre
));
611 * size of perf_event_attr struct
613 sz
= (u32
)sizeof(attr
->attr
);
614 ret
= do_write(fd
, &sz
, sizeof(sz
));
618 list_for_each_entry(attr
, &evlist
->entries
, node
) {
620 ret
= do_write(fd
, &attr
->attr
, sz
);
624 * write number of unique id per event
625 * there is one id per instance of an event
627 * copy into an nri to be independent of the
631 ret
= do_write(fd
, &nri
, sizeof(nri
));
636 * write event string as passed on cmdline
638 ret
= do_write_string(fd
, event_name(attr
));
642 * write unique ids for this event
644 ret
= do_write(fd
, attr
->id
, attr
->ids
* sizeof(u64
));
651 static int write_cmdline(int fd
, struct perf_header
*h __used
,
652 struct perf_evlist
*evlist __used
)
654 char buf
[MAXPATHLEN
];
660 * actual atual path to perf binary
662 sprintf(proc
, "/proc/%d/exe", getpid());
663 ret
= readlink(proc
, buf
, sizeof(buf
));
667 /* readlink() does not add null termination */
670 /* account for binary path */
673 ret
= do_write(fd
, &n
, sizeof(n
));
677 ret
= do_write_string(fd
, buf
);
681 for (i
= 0 ; i
< header_argc
; i
++) {
682 ret
= do_write_string(fd
, header_argv
[i
]);
689 #define CORE_SIB_FMT \
690 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
691 #define THRD_SIB_FMT \
692 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
697 char **core_siblings
;
698 char **thread_siblings
;
701 static int build_cpu_topo(struct cpu_topo
*tp
, int cpu
)
704 char filename
[MAXPATHLEN
];
705 char *buf
= NULL
, *p
;
710 sprintf(filename
, CORE_SIB_FMT
, cpu
);
711 fp
= fopen(filename
, "r");
715 if (getline(&buf
, &len
, fp
) <= 0)
720 p
= strchr(buf
, '\n');
724 for (i
= 0; i
< tp
->core_sib
; i
++) {
725 if (!strcmp(buf
, tp
->core_siblings
[i
]))
728 if (i
== tp
->core_sib
) {
729 tp
->core_siblings
[i
] = buf
;
735 sprintf(filename
, THRD_SIB_FMT
, cpu
);
736 fp
= fopen(filename
, "r");
740 if (getline(&buf
, &len
, fp
) <= 0)
743 p
= strchr(buf
, '\n');
747 for (i
= 0; i
< tp
->thread_sib
; i
++) {
748 if (!strcmp(buf
, tp
->thread_siblings
[i
]))
751 if (i
== tp
->thread_sib
) {
752 tp
->thread_siblings
[i
] = buf
;
764 static void free_cpu_topo(struct cpu_topo
*tp
)
771 for (i
= 0 ; i
< tp
->core_sib
; i
++)
772 free(tp
->core_siblings
[i
]);
774 for (i
= 0 ; i
< tp
->thread_sib
; i
++)
775 free(tp
->thread_siblings
[i
]);
780 static struct cpu_topo
*build_cpu_topology(void)
789 ncpus
= sysconf(_SC_NPROCESSORS_CONF
);
793 nr
= (u32
)(ncpus
& UINT_MAX
);
795 sz
= nr
* sizeof(char *);
797 addr
= calloc(1, sizeof(*tp
) + 2 * sz
);
804 tp
->core_siblings
= addr
;
806 tp
->thread_siblings
= addr
;
808 for (i
= 0; i
< nr
; i
++) {
809 ret
= build_cpu_topo(tp
, i
);
820 static int write_cpu_topology(int fd
, struct perf_header
*h __used
,
821 struct perf_evlist
*evlist __used
)
827 tp
= build_cpu_topology();
831 ret
= do_write(fd
, &tp
->core_sib
, sizeof(tp
->core_sib
));
835 for (i
= 0; i
< tp
->core_sib
; i
++) {
836 ret
= do_write_string(fd
, tp
->core_siblings
[i
]);
840 ret
= do_write(fd
, &tp
->thread_sib
, sizeof(tp
->thread_sib
));
844 for (i
= 0; i
< tp
->thread_sib
; i
++) {
845 ret
= do_write_string(fd
, tp
->thread_siblings
[i
]);
856 static int write_total_mem(int fd
, struct perf_header
*h __used
,
857 struct perf_evlist
*evlist __used
)
865 fp
= fopen("/proc/meminfo", "r");
869 while (getline(&buf
, &len
, fp
) > 0) {
870 ret
= strncmp(buf
, "MemTotal:", 9);
875 n
= sscanf(buf
, "%*s %"PRIu64
, &mem
);
877 ret
= do_write(fd
, &mem
, sizeof(mem
));
884 static int write_topo_node(int fd
, int node
)
886 char str
[MAXPATHLEN
];
888 char *buf
= NULL
, *p
;
891 u64 mem_total
, mem_free
, mem
;
894 sprintf(str
, "/sys/devices/system/node/node%d/meminfo", node
);
895 fp
= fopen(str
, "r");
899 while (getline(&buf
, &len
, fp
) > 0) {
900 /* skip over invalid lines */
901 if (!strchr(buf
, ':'))
903 if (sscanf(buf
, "%*s %*d %s %"PRIu64
, field
, &mem
) != 2)
905 if (!strcmp(field
, "MemTotal:"))
907 if (!strcmp(field
, "MemFree:"))
913 ret
= do_write(fd
, &mem_total
, sizeof(u64
));
917 ret
= do_write(fd
, &mem_free
, sizeof(u64
));
922 sprintf(str
, "/sys/devices/system/node/node%d/cpulist", node
);
924 fp
= fopen(str
, "r");
928 if (getline(&buf
, &len
, fp
) <= 0)
931 p
= strchr(buf
, '\n');
935 ret
= do_write_string(fd
, buf
);
942 static int write_numa_topology(int fd
, struct perf_header
*h __used
,
943 struct perf_evlist
*evlist __used
)
948 struct cpu_map
*node_map
= NULL
;
953 fp
= fopen("/sys/devices/system/node/online", "r");
957 if (getline(&buf
, &len
, fp
) <= 0)
960 c
= strchr(buf
, '\n');
964 node_map
= cpu_map__new(buf
);
968 nr
= (u32
)node_map
->nr
;
970 ret
= do_write(fd
, &nr
, sizeof(nr
));
974 for (i
= 0; i
< nr
; i
++) {
975 j
= (u32
)node_map
->map
[i
];
976 ret
= do_write(fd
, &j
, sizeof(j
));
980 ret
= write_topo_node(fd
, i
);
992 * default get_cpuid(): nothing gets recorded
993 * actual implementation must be in arch/$(ARCH)/util/header.c
995 int __attribute__((weak
)) get_cpuid(char *buffer __used
, size_t sz __used
)
1000 static int write_cpuid(int fd
, struct perf_header
*h __used
,
1001 struct perf_evlist
*evlist __used
)
1006 ret
= get_cpuid(buffer
, sizeof(buffer
));
1012 return do_write_string(fd
, buffer
);
1015 static void print_hostname(struct perf_header
*ph
, int fd
, FILE *fp
)
1017 char *str
= do_read_string(fd
, ph
);
1018 fprintf(fp
, "# hostname : %s\n", str
);
1022 static void print_osrelease(struct perf_header
*ph
, int fd
, FILE *fp
)
1024 char *str
= do_read_string(fd
, ph
);
1025 fprintf(fp
, "# os release : %s\n", str
);
1029 static void print_arch(struct perf_header
*ph
, int fd
, FILE *fp
)
1031 char *str
= do_read_string(fd
, ph
);
1032 fprintf(fp
, "# arch : %s\n", str
);
1036 static void print_cpudesc(struct perf_header
*ph
, int fd
, FILE *fp
)
1038 char *str
= do_read_string(fd
, ph
);
1039 fprintf(fp
, "# cpudesc : %s\n", str
);
1043 static void print_nrcpus(struct perf_header
*ph
, int fd
, FILE *fp
)
1048 ret
= read(fd
, &nr
, sizeof(nr
));
1049 if (ret
!= (ssize_t
)sizeof(nr
))
1050 nr
= -1; /* interpreted as error */
1055 fprintf(fp
, "# nrcpus online : %u\n", nr
);
1057 ret
= read(fd
, &nr
, sizeof(nr
));
1058 if (ret
!= (ssize_t
)sizeof(nr
))
1059 nr
= -1; /* interpreted as error */
1064 fprintf(fp
, "# nrcpus avail : %u\n", nr
);
1067 static void print_version(struct perf_header
*ph
, int fd
, FILE *fp
)
1069 char *str
= do_read_string(fd
, ph
);
1070 fprintf(fp
, "# perf version : %s\n", str
);
1074 static void print_cmdline(struct perf_header
*ph
, int fd
, FILE *fp
)
1080 ret
= read(fd
, &nr
, sizeof(nr
));
1081 if (ret
!= (ssize_t
)sizeof(nr
))
1087 fprintf(fp
, "# cmdline : ");
1089 for (i
= 0; i
< nr
; i
++) {
1090 str
= do_read_string(fd
, ph
);
1091 fprintf(fp
, "%s ", str
);
1097 static void print_cpu_topology(struct perf_header
*ph
, int fd
, FILE *fp
)
1103 ret
= read(fd
, &nr
, sizeof(nr
));
1104 if (ret
!= (ssize_t
)sizeof(nr
))
1110 for (i
= 0; i
< nr
; i
++) {
1111 str
= do_read_string(fd
, ph
);
1112 fprintf(fp
, "# sibling cores : %s\n", str
);
1116 ret
= read(fd
, &nr
, sizeof(nr
));
1117 if (ret
!= (ssize_t
)sizeof(nr
))
1123 for (i
= 0; i
< nr
; i
++) {
1124 str
= do_read_string(fd
, ph
);
1125 fprintf(fp
, "# sibling threads : %s\n", str
);
1130 static void print_event_desc(struct perf_header
*ph
, int fd
, FILE *fp
)
1132 struct perf_event_attr attr
;
1136 u32 nre
, sz
, nr
, i
, j
, msz
;
1139 /* number of events */
1140 ret
= read(fd
, &nre
, sizeof(nre
));
1141 if (ret
!= (ssize_t
)sizeof(nre
))
1145 nre
= bswap_32(nre
);
1147 ret
= read(fd
, &sz
, sizeof(sz
));
1148 if (ret
!= (ssize_t
)sizeof(sz
))
1155 * ensure it is at least to our ABI rev
1157 if (sz
< (u32
)sizeof(attr
))
1160 memset(&attr
, 0, sizeof(attr
));
1162 /* read entire region to sync up to next field */
1171 for (i
= 0 ; i
< nre
; i
++) {
1173 ret
= read(fd
, buf
, sz
);
1174 if (ret
!= (ssize_t
)sz
)
1178 perf_event__attr_swap(buf
);
1180 memcpy(&attr
, buf
, msz
);
1182 ret
= read(fd
, &nr
, sizeof(nr
));
1183 if (ret
!= (ssize_t
)sizeof(nr
))
1189 str
= do_read_string(fd
, ph
);
1190 fprintf(fp
, "# event : name = %s, ", str
);
1193 fprintf(fp
, "type = %d, config = 0x%"PRIx64
1194 ", config1 = 0x%"PRIx64
", config2 = 0x%"PRIx64
,
1200 fprintf(fp
, ", excl_usr = %d, excl_kern = %d",
1202 attr
.exclude_kernel
);
1205 fprintf(fp
, ", id = {");
1207 for (j
= 0 ; j
< nr
; j
++) {
1208 ret
= read(fd
, &id
, sizeof(id
));
1209 if (ret
!= (ssize_t
)sizeof(id
))
1218 fprintf(fp
, " %"PRIu64
, id
);
1227 fprintf(fp
, "# event desc: not available or unable to read\n");
1230 static void print_total_mem(struct perf_header
*h __used
, int fd
, FILE *fp
)
1235 ret
= read(fd
, &mem
, sizeof(mem
));
1236 if (ret
!= sizeof(mem
))
1240 mem
= bswap_64(mem
);
1242 fprintf(fp
, "# total memory : %"PRIu64
" kB\n", mem
);
1245 fprintf(fp
, "# total memory : unknown\n");
1248 static void print_numa_topology(struct perf_header
*h __used
, int fd
, FILE *fp
)
1253 uint64_t mem_total
, mem_free
;
1256 ret
= read(fd
, &nr
, sizeof(nr
));
1257 if (ret
!= (ssize_t
)sizeof(nr
))
1263 for (i
= 0; i
< nr
; i
++) {
1266 ret
= read(fd
, &c
, sizeof(c
));
1267 if (ret
!= (ssize_t
)sizeof(c
))
1273 ret
= read(fd
, &mem_total
, sizeof(u64
));
1274 if (ret
!= sizeof(u64
))
1277 ret
= read(fd
, &mem_free
, sizeof(u64
));
1278 if (ret
!= sizeof(u64
))
1281 if (h
->needs_swap
) {
1282 mem_total
= bswap_64(mem_total
);
1283 mem_free
= bswap_64(mem_free
);
1286 fprintf(fp
, "# node%u meminfo : total = %"PRIu64
" kB,"
1287 " free = %"PRIu64
" kB\n",
1292 str
= do_read_string(fd
, h
);
1293 fprintf(fp
, "# node%u cpu list : %s\n", c
, str
);
1298 fprintf(fp
, "# numa topology : not available\n");
1301 static void print_cpuid(struct perf_header
*ph
, int fd
, FILE *fp
)
1303 char *str
= do_read_string(fd
, ph
);
1304 fprintf(fp
, "# cpuid : %s\n", str
);
1308 struct feature_ops
{
1309 int (*write
)(int fd
, struct perf_header
*h
, struct perf_evlist
*evlist
);
1310 void (*print
)(struct perf_header
*h
, int fd
, FILE *fp
);
1315 #define FEAT_OPA(n, func) \
1316 [n] = { .name = #n, .write = write_##func, .print = print_##func }
1317 #define FEAT_OPF(n, func) \
1318 [n] = { .name = #n, .write = write_##func, .print = print_##func, .full_only = true }
1320 /* feature_ops not implemented: */
1321 #define print_trace_info NULL
1322 #define print_build_id NULL
1324 static const struct feature_ops feat_ops
[HEADER_LAST_FEATURE
] = {
1325 FEAT_OPA(HEADER_TRACE_INFO
, trace_info
),
1326 FEAT_OPA(HEADER_BUILD_ID
, build_id
),
1327 FEAT_OPA(HEADER_HOSTNAME
, hostname
),
1328 FEAT_OPA(HEADER_OSRELEASE
, osrelease
),
1329 FEAT_OPA(HEADER_VERSION
, version
),
1330 FEAT_OPA(HEADER_ARCH
, arch
),
1331 FEAT_OPA(HEADER_NRCPUS
, nrcpus
),
1332 FEAT_OPA(HEADER_CPUDESC
, cpudesc
),
1333 FEAT_OPA(HEADER_CPUID
, cpuid
),
1334 FEAT_OPA(HEADER_TOTAL_MEM
, total_mem
),
1335 FEAT_OPA(HEADER_EVENT_DESC
, event_desc
),
1336 FEAT_OPA(HEADER_CMDLINE
, cmdline
),
1337 FEAT_OPF(HEADER_CPU_TOPOLOGY
, cpu_topology
),
1338 FEAT_OPF(HEADER_NUMA_TOPOLOGY
, numa_topology
),
1341 struct header_print_data
{
1343 bool full
; /* extended list of headers */
1346 static int perf_file_section__fprintf_info(struct perf_file_section
*section
,
1347 struct perf_header
*ph
,
1348 int feat
, int fd
, void *data
)
1350 struct header_print_data
*hd
= data
;
1352 if (lseek(fd
, section
->offset
, SEEK_SET
) == (off_t
)-1) {
1353 pr_debug("Failed to lseek to %" PRIu64
" offset for feature "
1354 "%d, continuing...\n", section
->offset
, feat
);
1357 if (feat
>= HEADER_LAST_FEATURE
) {
1358 pr_warning("unknown feature %d\n", feat
);
1361 if (!feat_ops
[feat
].print
)
1364 if (!feat_ops
[feat
].full_only
|| hd
->full
)
1365 feat_ops
[feat
].print(ph
, fd
, hd
->fp
);
1367 fprintf(hd
->fp
, "# %s info available, use -I to display\n",
1368 feat_ops
[feat
].name
);
1373 int perf_header__fprintf_info(struct perf_session
*session
, FILE *fp
, bool full
)
1375 struct header_print_data hd
;
1376 struct perf_header
*header
= &session
->header
;
1377 int fd
= session
->fd
;
1381 perf_header__process_sections(header
, fd
, &hd
,
1382 perf_file_section__fprintf_info
);
1386 static int do_write_feat(int fd
, struct perf_header
*h
, int type
,
1387 struct perf_file_section
**p
,
1388 struct perf_evlist
*evlist
)
1393 if (perf_header__has_feat(h
, type
)) {
1394 if (!feat_ops
[type
].write
)
1397 (*p
)->offset
= lseek(fd
, 0, SEEK_CUR
);
1399 err
= feat_ops
[type
].write(fd
, h
, evlist
);
1401 pr_debug("failed to write feature %d\n", type
);
1403 /* undo anything written */
1404 lseek(fd
, (*p
)->offset
, SEEK_SET
);
1408 (*p
)->size
= lseek(fd
, 0, SEEK_CUR
) - (*p
)->offset
;
1414 static int perf_header__adds_write(struct perf_header
*header
,
1415 struct perf_evlist
*evlist
, int fd
)
1418 struct perf_file_section
*feat_sec
, *p
;
1424 nr_sections
= bitmap_weight(header
->adds_features
, HEADER_FEAT_BITS
);
1428 feat_sec
= p
= calloc(sizeof(*feat_sec
), nr_sections
);
1429 if (feat_sec
== NULL
)
1432 sec_size
= sizeof(*feat_sec
) * nr_sections
;
1434 sec_start
= header
->data_offset
+ header
->data_size
;
1435 lseek(fd
, sec_start
+ sec_size
, SEEK_SET
);
1437 for_each_set_bit(feat
, header
->adds_features
, HEADER_FEAT_BITS
) {
1438 if (do_write_feat(fd
, header
, feat
, &p
, evlist
))
1439 perf_header__clear_feat(header
, feat
);
1442 lseek(fd
, sec_start
, SEEK_SET
);
1444 * may write more than needed due to dropped feature, but
1445 * this is okay, reader will skip the mising entries
1447 err
= do_write(fd
, feat_sec
, sec_size
);
1449 pr_debug("failed to write feature section\n");
1454 int perf_header__write_pipe(int fd
)
1456 struct perf_pipe_file_header f_header
;
1459 f_header
= (struct perf_pipe_file_header
){
1460 .magic
= PERF_MAGIC
,
1461 .size
= sizeof(f_header
),
1464 err
= do_write(fd
, &f_header
, sizeof(f_header
));
1466 pr_debug("failed to write perf pipe header\n");
1473 int perf_session__write_header(struct perf_session
*session
,
1474 struct perf_evlist
*evlist
,
1475 int fd
, bool at_exit
)
1477 struct perf_file_header f_header
;
1478 struct perf_file_attr f_attr
;
1479 struct perf_header
*header
= &session
->header
;
1480 struct perf_evsel
*attr
, *pair
= NULL
;
1483 lseek(fd
, sizeof(f_header
), SEEK_SET
);
1485 if (session
->evlist
!= evlist
)
1486 pair
= list_entry(session
->evlist
->entries
.next
, struct perf_evsel
, node
);
1488 list_for_each_entry(attr
, &evlist
->entries
, node
) {
1489 attr
->id_offset
= lseek(fd
, 0, SEEK_CUR
);
1490 err
= do_write(fd
, attr
->id
, attr
->ids
* sizeof(u64
));
1493 pr_debug("failed to write perf header\n");
1496 if (session
->evlist
!= evlist
) {
1497 err
= do_write(fd
, pair
->id
, pair
->ids
* sizeof(u64
));
1500 attr
->ids
+= pair
->ids
;
1501 pair
= list_entry(pair
->node
.next
, struct perf_evsel
, node
);
1505 header
->attr_offset
= lseek(fd
, 0, SEEK_CUR
);
1507 list_for_each_entry(attr
, &evlist
->entries
, node
) {
1508 f_attr
= (struct perf_file_attr
){
1511 .offset
= attr
->id_offset
,
1512 .size
= attr
->ids
* sizeof(u64
),
1515 err
= do_write(fd
, &f_attr
, sizeof(f_attr
));
1517 pr_debug("failed to write perf header attribute\n");
1522 header
->event_offset
= lseek(fd
, 0, SEEK_CUR
);
1523 header
->event_size
= event_count
* sizeof(struct perf_trace_event_type
);
1525 err
= do_write(fd
, events
, header
->event_size
);
1527 pr_debug("failed to write perf header events\n");
1532 header
->data_offset
= lseek(fd
, 0, SEEK_CUR
);
1535 err
= perf_header__adds_write(header
, evlist
, fd
);
1540 f_header
= (struct perf_file_header
){
1541 .magic
= PERF_MAGIC
,
1542 .size
= sizeof(f_header
),
1543 .attr_size
= sizeof(f_attr
),
1545 .offset
= header
->attr_offset
,
1546 .size
= evlist
->nr_entries
* sizeof(f_attr
),
1549 .offset
= header
->data_offset
,
1550 .size
= header
->data_size
,
1553 .offset
= header
->event_offset
,
1554 .size
= header
->event_size
,
1558 memcpy(&f_header
.adds_features
, &header
->adds_features
, sizeof(header
->adds_features
));
1560 lseek(fd
, 0, SEEK_SET
);
1561 err
= do_write(fd
, &f_header
, sizeof(f_header
));
1563 pr_debug("failed to write perf header\n");
1566 lseek(fd
, header
->data_offset
+ header
->data_size
, SEEK_SET
);
1572 static int perf_header__getbuffer64(struct perf_header
*header
,
1573 int fd
, void *buf
, size_t size
)
1575 if (readn(fd
, buf
, size
) <= 0)
1578 if (header
->needs_swap
)
1579 mem_bswap_64(buf
, size
);
1584 int perf_header__process_sections(struct perf_header
*header
, int fd
,
1586 int (*process
)(struct perf_file_section
*section
,
1587 struct perf_header
*ph
,
1588 int feat
, int fd
, void *data
))
1590 struct perf_file_section
*feat_sec
, *sec
;
1596 nr_sections
= bitmap_weight(header
->adds_features
, HEADER_FEAT_BITS
);
1600 feat_sec
= sec
= calloc(sizeof(*feat_sec
), nr_sections
);
1604 sec_size
= sizeof(*feat_sec
) * nr_sections
;
1606 lseek(fd
, header
->data_offset
+ header
->data_size
, SEEK_SET
);
1608 err
= perf_header__getbuffer64(header
, fd
, feat_sec
, sec_size
);
1612 for_each_set_bit(feat
, header
->adds_features
, HEADER_LAST_FEATURE
) {
1613 err
= process(sec
++, header
, feat
, fd
, data
);
1623 int perf_file_header__read(struct perf_file_header
*header
,
1624 struct perf_header
*ph
, int fd
)
1626 lseek(fd
, 0, SEEK_SET
);
1628 if (readn(fd
, header
, sizeof(*header
)) <= 0 ||
1629 memcmp(&header
->magic
, __perf_magic
, sizeof(header
->magic
)))
1632 if (header
->attr_size
!= sizeof(struct perf_file_attr
)) {
1633 u64 attr_size
= bswap_64(header
->attr_size
);
1635 if (attr_size
!= sizeof(struct perf_file_attr
))
1638 mem_bswap_64(header
, offsetof(struct perf_file_header
,
1640 ph
->needs_swap
= true;
1643 if (header
->size
!= sizeof(*header
)) {
1644 /* Support the previous format */
1645 if (header
->size
== offsetof(typeof(*header
), adds_features
))
1646 bitmap_zero(header
->adds_features
, HEADER_FEAT_BITS
);
1649 } else if (ph
->needs_swap
) {
1652 * feature bitmap is declared as an array of unsigned longs --
1653 * not good since its size can differ between the host that
1654 * generated the data file and the host analyzing the file.
1656 * We need to handle endianness, but we don't know the size of
1657 * the unsigned long where the file was generated. Take a best
1658 * guess at determining it: try 64-bit swap first (ie., file
1659 * created on a 64-bit host), and check if the hostname feature
1660 * bit is set (this feature bit is forced on as of fbe96f2).
1661 * If the bit is not, undo the 64-bit swap and try a 32-bit
1662 * swap. If the hostname bit is still not set (e.g., older data
1663 * file), punt and fallback to the original behavior --
1664 * clearing all feature bits and setting buildid.
1666 for (i
= 0; i
< BITS_TO_LONGS(HEADER_FEAT_BITS
); ++i
)
1667 header
->adds_features
[i
] = bswap_64(header
->adds_features
[i
]);
1669 if (!test_bit(HEADER_HOSTNAME
, header
->adds_features
)) {
1670 for (i
= 0; i
< BITS_TO_LONGS(HEADER_FEAT_BITS
); ++i
) {
1671 header
->adds_features
[i
] = bswap_64(header
->adds_features
[i
]);
1672 header
->adds_features
[i
] = bswap_32(header
->adds_features
[i
]);
1676 if (!test_bit(HEADER_HOSTNAME
, header
->adds_features
)) {
1677 bitmap_zero(header
->adds_features
, HEADER_FEAT_BITS
);
1678 set_bit(HEADER_BUILD_ID
, header
->adds_features
);
1682 memcpy(&ph
->adds_features
, &header
->adds_features
,
1683 sizeof(ph
->adds_features
));
1685 ph
->event_offset
= header
->event_types
.offset
;
1686 ph
->event_size
= header
->event_types
.size
;
1687 ph
->data_offset
= header
->data
.offset
;
1688 ph
->data_size
= header
->data
.size
;
1692 static int __event_process_build_id(struct build_id_event
*bev
,
1694 struct perf_session
*session
)
1697 struct list_head
*head
;
1698 struct machine
*machine
;
1701 enum dso_kernel_type dso_type
;
1703 machine
= perf_session__findnew_machine(session
, bev
->pid
);
1707 misc
= bev
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1710 case PERF_RECORD_MISC_KERNEL
:
1711 dso_type
= DSO_TYPE_KERNEL
;
1712 head
= &machine
->kernel_dsos
;
1714 case PERF_RECORD_MISC_GUEST_KERNEL
:
1715 dso_type
= DSO_TYPE_GUEST_KERNEL
;
1716 head
= &machine
->kernel_dsos
;
1718 case PERF_RECORD_MISC_USER
:
1719 case PERF_RECORD_MISC_GUEST_USER
:
1720 dso_type
= DSO_TYPE_USER
;
1721 head
= &machine
->user_dsos
;
1727 dso
= __dsos__findnew(head
, filename
);
1729 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
1731 dso__set_build_id(dso
, &bev
->build_id
);
1733 if (filename
[0] == '[')
1734 dso
->kernel
= dso_type
;
1736 build_id__sprintf(dso
->build_id
, sizeof(dso
->build_id
),
1738 pr_debug("build id event received for %s: %s\n",
1739 dso
->long_name
, sbuild_id
);
1747 static int perf_header__read_build_ids_abi_quirk(struct perf_header
*header
,
1748 int input
, u64 offset
, u64 size
)
1750 struct perf_session
*session
= container_of(header
, struct perf_session
, header
);
1752 struct perf_event_header header
;
1753 u8 build_id
[ALIGN(BUILD_ID_SIZE
, sizeof(u64
))];
1756 struct build_id_event bev
;
1757 char filename
[PATH_MAX
];
1758 u64 limit
= offset
+ size
;
1760 while (offset
< limit
) {
1763 if (read(input
, &old_bev
, sizeof(old_bev
)) != sizeof(old_bev
))
1766 if (header
->needs_swap
)
1767 perf_event_header__bswap(&old_bev
.header
);
1769 len
= old_bev
.header
.size
- sizeof(old_bev
);
1770 if (read(input
, filename
, len
) != len
)
1773 bev
.header
= old_bev
.header
;
1776 * As the pid is the missing value, we need to fill
1777 * it properly. The header.misc value give us nice hint.
1779 bev
.pid
= HOST_KERNEL_ID
;
1780 if (bev
.header
.misc
== PERF_RECORD_MISC_GUEST_USER
||
1781 bev
.header
.misc
== PERF_RECORD_MISC_GUEST_KERNEL
)
1782 bev
.pid
= DEFAULT_GUEST_KERNEL_ID
;
1784 memcpy(bev
.build_id
, old_bev
.build_id
, sizeof(bev
.build_id
));
1785 __event_process_build_id(&bev
, filename
, session
);
1787 offset
+= bev
.header
.size
;
1793 static int perf_header__read_build_ids(struct perf_header
*header
,
1794 int input
, u64 offset
, u64 size
)
1796 struct perf_session
*session
= container_of(header
, struct perf_session
, header
);
1797 struct build_id_event bev
;
1798 char filename
[PATH_MAX
];
1799 u64 limit
= offset
+ size
, orig_offset
= offset
;
1802 while (offset
< limit
) {
1805 if (read(input
, &bev
, sizeof(bev
)) != sizeof(bev
))
1808 if (header
->needs_swap
)
1809 perf_event_header__bswap(&bev
.header
);
1811 len
= bev
.header
.size
- sizeof(bev
);
1812 if (read(input
, filename
, len
) != len
)
1815 * The a1645ce1 changeset:
1817 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1819 * Added a field to struct build_id_event that broke the file
1822 * Since the kernel build-id is the first entry, process the
1823 * table using the old format if the well known
1824 * '[kernel.kallsyms]' string for the kernel build-id has the
1825 * first 4 characters chopped off (where the pid_t sits).
1827 if (memcmp(filename
, "nel.kallsyms]", 13) == 0) {
1828 if (lseek(input
, orig_offset
, SEEK_SET
) == (off_t
)-1)
1830 return perf_header__read_build_ids_abi_quirk(header
, input
, offset
, size
);
1833 __event_process_build_id(&bev
, filename
, session
);
1835 offset
+= bev
.header
.size
;
1842 static int perf_file_section__process(struct perf_file_section
*section
,
1843 struct perf_header
*ph
,
1844 int feat
, int fd
, void *data __used
)
1846 if (lseek(fd
, section
->offset
, SEEK_SET
) == (off_t
)-1) {
1847 pr_debug("Failed to lseek to %" PRIu64
" offset for feature "
1848 "%d, continuing...\n", section
->offset
, feat
);
1852 if (feat
>= HEADER_LAST_FEATURE
) {
1853 pr_debug("unknown feature %d, continuing...\n", feat
);
1858 case HEADER_TRACE_INFO
:
1859 trace_report(fd
, false);
1861 case HEADER_BUILD_ID
:
1862 if (perf_header__read_build_ids(ph
, fd
, section
->offset
, section
->size
))
1863 pr_debug("Failed to read buildids, continuing...\n");
1872 static int perf_file_header__read_pipe(struct perf_pipe_file_header
*header
,
1873 struct perf_header
*ph
, int fd
,
1876 if (readn(fd
, header
, sizeof(*header
)) <= 0 ||
1877 memcmp(&header
->magic
, __perf_magic
, sizeof(header
->magic
)))
1880 if (repipe
&& do_write(STDOUT_FILENO
, header
, sizeof(*header
)) < 0)
1883 if (header
->size
!= sizeof(*header
)) {
1884 u64 size
= bswap_64(header
->size
);
1886 if (size
!= sizeof(*header
))
1889 ph
->needs_swap
= true;
1895 static int perf_header__read_pipe(struct perf_session
*session
, int fd
)
1897 struct perf_header
*header
= &session
->header
;
1898 struct perf_pipe_file_header f_header
;
1900 if (perf_file_header__read_pipe(&f_header
, header
, fd
,
1901 session
->repipe
) < 0) {
1902 pr_debug("incompatible file format\n");
1911 int perf_session__read_header(struct perf_session
*session
, int fd
)
1913 struct perf_header
*header
= &session
->header
;
1914 struct perf_file_header f_header
;
1915 struct perf_file_attr f_attr
;
1917 int nr_attrs
, nr_ids
, i
, j
;
1919 session
->evlist
= perf_evlist__new(NULL
, NULL
);
1920 if (session
->evlist
== NULL
)
1923 if (session
->fd_pipe
)
1924 return perf_header__read_pipe(session
, fd
);
1926 if (perf_file_header__read(&f_header
, header
, fd
) < 0) {
1927 pr_debug("incompatible file format\n");
1931 nr_attrs
= f_header
.attrs
.size
/ sizeof(f_attr
);
1932 lseek(fd
, f_header
.attrs
.offset
, SEEK_SET
);
1934 for (i
= 0; i
< nr_attrs
; i
++) {
1935 struct perf_evsel
*evsel
;
1938 if (readn(fd
, &f_attr
, sizeof(f_attr
)) <= 0)
1941 if (header
->needs_swap
)
1942 perf_event__attr_swap(&f_attr
.attr
);
1944 tmp
= lseek(fd
, 0, SEEK_CUR
);
1945 evsel
= perf_evsel__new(&f_attr
.attr
, i
);
1948 goto out_delete_evlist
;
1950 * Do it before so that if perf_evsel__alloc_id fails, this
1951 * entry gets purged too at perf_evlist__delete().
1953 perf_evlist__add(session
->evlist
, evsel
);
1955 nr_ids
= f_attr
.ids
.size
/ sizeof(u64
);
1957 * We don't have the cpu and thread maps on the header, so
1958 * for allocating the perf_sample_id table we fake 1 cpu and
1959 * hattr->ids threads.
1961 if (perf_evsel__alloc_id(evsel
, 1, nr_ids
))
1962 goto out_delete_evlist
;
1964 lseek(fd
, f_attr
.ids
.offset
, SEEK_SET
);
1966 for (j
= 0; j
< nr_ids
; j
++) {
1967 if (perf_header__getbuffer64(header
, fd
, &f_id
, sizeof(f_id
)))
1970 perf_evlist__id_add(session
->evlist
, evsel
, 0, j
, f_id
);
1973 lseek(fd
, tmp
, SEEK_SET
);
1976 symbol_conf
.nr_events
= nr_attrs
;
1978 if (f_header
.event_types
.size
) {
1979 lseek(fd
, f_header
.event_types
.offset
, SEEK_SET
);
1980 events
= malloc(f_header
.event_types
.size
);
1983 if (perf_header__getbuffer64(header
, fd
, events
,
1984 f_header
.event_types
.size
))
1986 event_count
= f_header
.event_types
.size
/ sizeof(struct perf_trace_event_type
);
1989 perf_header__process_sections(header
, fd
, NULL
,
1990 perf_file_section__process
);
1992 lseek(fd
, header
->data_offset
, SEEK_SET
);
2000 perf_evlist__delete(session
->evlist
);
2001 session
->evlist
= NULL
;
2005 int perf_event__synthesize_attr(struct perf_tool
*tool
,
2006 struct perf_event_attr
*attr
, u16 ids
, u64
*id
,
2007 perf_event__handler_t process
)
2009 union perf_event
*ev
;
2013 size
= sizeof(struct perf_event_attr
);
2014 size
= ALIGN(size
, sizeof(u64
));
2015 size
+= sizeof(struct perf_event_header
);
2016 size
+= ids
* sizeof(u64
);
2023 ev
->attr
.attr
= *attr
;
2024 memcpy(ev
->attr
.id
, id
, ids
* sizeof(u64
));
2026 ev
->attr
.header
.type
= PERF_RECORD_HEADER_ATTR
;
2027 ev
->attr
.header
.size
= size
;
2029 err
= process(tool
, ev
, NULL
, NULL
);
2036 int perf_event__synthesize_attrs(struct perf_tool
*tool
,
2037 struct perf_session
*session
,
2038 perf_event__handler_t process
)
2040 struct perf_evsel
*attr
;
2043 list_for_each_entry(attr
, &session
->evlist
->entries
, node
) {
2044 err
= perf_event__synthesize_attr(tool
, &attr
->attr
, attr
->ids
,
2047 pr_debug("failed to create perf header attribute\n");
2055 int perf_event__process_attr(union perf_event
*event
,
2056 struct perf_evlist
**pevlist
)
2058 unsigned int i
, ids
, n_ids
;
2059 struct perf_evsel
*evsel
;
2060 struct perf_evlist
*evlist
= *pevlist
;
2062 if (evlist
== NULL
) {
2063 *pevlist
= evlist
= perf_evlist__new(NULL
, NULL
);
2068 evsel
= perf_evsel__new(&event
->attr
.attr
, evlist
->nr_entries
);
2072 perf_evlist__add(evlist
, evsel
);
2074 ids
= event
->header
.size
;
2075 ids
-= (void *)&event
->attr
.id
- (void *)event
;
2076 n_ids
= ids
/ sizeof(u64
);
2078 * We don't have the cpu and thread maps on the header, so
2079 * for allocating the perf_sample_id table we fake 1 cpu and
2080 * hattr->ids threads.
2082 if (perf_evsel__alloc_id(evsel
, 1, n_ids
))
2085 for (i
= 0; i
< n_ids
; i
++) {
2086 perf_evlist__id_add(evlist
, evsel
, 0, i
, event
->attr
.id
[i
]);
2092 int perf_event__synthesize_event_type(struct perf_tool
*tool
,
2093 u64 event_id
, char *name
,
2094 perf_event__handler_t process
,
2095 struct machine
*machine
)
2097 union perf_event ev
;
2101 memset(&ev
, 0, sizeof(ev
));
2103 ev
.event_type
.event_type
.event_id
= event_id
;
2104 memset(ev
.event_type
.event_type
.name
, 0, MAX_EVENT_NAME
);
2105 strncpy(ev
.event_type
.event_type
.name
, name
, MAX_EVENT_NAME
- 1);
2107 ev
.event_type
.header
.type
= PERF_RECORD_HEADER_EVENT_TYPE
;
2108 size
= strlen(ev
.event_type
.event_type
.name
);
2109 size
= ALIGN(size
, sizeof(u64
));
2110 ev
.event_type
.header
.size
= sizeof(ev
.event_type
) -
2111 (sizeof(ev
.event_type
.event_type
.name
) - size
);
2113 err
= process(tool
, &ev
, NULL
, machine
);
2118 int perf_event__synthesize_event_types(struct perf_tool
*tool
,
2119 perf_event__handler_t process
,
2120 struct machine
*machine
)
2122 struct perf_trace_event_type
*type
;
2125 for (i
= 0; i
< event_count
; i
++) {
2128 err
= perf_event__synthesize_event_type(tool
, type
->event_id
,
2129 type
->name
, process
,
2132 pr_debug("failed to create perf header event type\n");
2140 int perf_event__process_event_type(struct perf_tool
*tool __unused
,
2141 union perf_event
*event
)
2143 if (perf_header__push_event(event
->event_type
.event_type
.event_id
,
2144 event
->event_type
.event_type
.name
) < 0)
2150 int perf_event__synthesize_tracing_data(struct perf_tool
*tool
, int fd
,
2151 struct perf_evlist
*evlist
,
2152 perf_event__handler_t process
)
2154 union perf_event ev
;
2155 struct tracing_data
*tdata
;
2156 ssize_t size
= 0, aligned_size
= 0, padding
;
2160 * We are going to store the size of the data followed
2161 * by the data contents. Since the fd descriptor is a pipe,
2162 * we cannot seek back to store the size of the data once
2163 * we know it. Instead we:
2165 * - write the tracing data to the temp file
2166 * - get/write the data size to pipe
2167 * - write the tracing data from the temp file
2170 tdata
= tracing_data_get(&evlist
->entries
, fd
, true);
2174 memset(&ev
, 0, sizeof(ev
));
2176 ev
.tracing_data
.header
.type
= PERF_RECORD_HEADER_TRACING_DATA
;
2178 aligned_size
= ALIGN(size
, sizeof(u64
));
2179 padding
= aligned_size
- size
;
2180 ev
.tracing_data
.header
.size
= sizeof(ev
.tracing_data
);
2181 ev
.tracing_data
.size
= aligned_size
;
2183 process(tool
, &ev
, NULL
, NULL
);
2186 * The put function will copy all the tracing data
2187 * stored in temp file to the pipe.
2189 tracing_data_put(tdata
);
2191 write_padded(fd
, NULL
, 0, padding
);
2193 return aligned_size
;
2196 int perf_event__process_tracing_data(union perf_event
*event
,
2197 struct perf_session
*session
)
2199 ssize_t size_read
, padding
, size
= event
->tracing_data
.size
;
2200 off_t offset
= lseek(session
->fd
, 0, SEEK_CUR
);
2203 /* setup for reading amidst mmap */
2204 lseek(session
->fd
, offset
+ sizeof(struct tracing_data_event
),
2207 size_read
= trace_report(session
->fd
, session
->repipe
);
2209 padding
= ALIGN(size_read
, sizeof(u64
)) - size_read
;
2211 if (read(session
->fd
, buf
, padding
) < 0)
2212 die("reading input file");
2213 if (session
->repipe
) {
2214 int retw
= write(STDOUT_FILENO
, buf
, padding
);
2215 if (retw
<= 0 || retw
!= padding
)
2216 die("repiping tracing data padding");
2219 if (size_read
+ padding
!= size
)
2220 die("tracing data size mismatch");
2222 return size_read
+ padding
;
2225 int perf_event__synthesize_build_id(struct perf_tool
*tool
,
2226 struct dso
*pos
, u16 misc
,
2227 perf_event__handler_t process
,
2228 struct machine
*machine
)
2230 union perf_event ev
;
2237 memset(&ev
, 0, sizeof(ev
));
2239 len
= pos
->long_name_len
+ 1;
2240 len
= ALIGN(len
, NAME_ALIGN
);
2241 memcpy(&ev
.build_id
.build_id
, pos
->build_id
, sizeof(pos
->build_id
));
2242 ev
.build_id
.header
.type
= PERF_RECORD_HEADER_BUILD_ID
;
2243 ev
.build_id
.header
.misc
= misc
;
2244 ev
.build_id
.pid
= machine
->pid
;
2245 ev
.build_id
.header
.size
= sizeof(ev
.build_id
) + len
;
2246 memcpy(&ev
.build_id
.filename
, pos
->long_name
, pos
->long_name_len
);
2248 err
= process(tool
, &ev
, NULL
, machine
);
2253 int perf_event__process_build_id(struct perf_tool
*tool __used
,
2254 union perf_event
*event
,
2255 struct perf_session
*session
)
2257 __event_process_build_id(&event
->build_id
,
2258 event
->build_id
.filename
,
2263 void disable_buildid_cache(void)
2265 no_buildid_cache
= true;