1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/compiler.h>
12 #include <linux/list.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <linux/string.h>
16 #include <linux/stringify.h>
17 #include <linux/zalloc.h>
19 #include <sys/utsname.h>
20 #include <linux/time64.h>
22 #include <bpf/libbpf.h>
23 #include <perf/cpumap.h>
28 #include "util/evsel_fprintf.h"
31 #include "trace-event.h"
41 #include <api/fs/fs.h>
44 #include "time-utils.h"
46 #include "util/util.h" // perf_exe()
48 #include "bpf-event.h"
50 #include <linux/ctype.h>
51 #include <internal/lib.h>
55 * must be a numerical value to let the endianness
56 * determine the memory layout. That way we are able
57 * to detect endianness when reading the perf.data file
60 * we check for legacy (PERFFILE) format.
62 static const char *__perf_magic1
= "PERFFILE";
63 static const u64 __perf_magic2
= 0x32454c4946524550ULL
;
64 static const u64 __perf_magic2_sw
= 0x50455246494c4532ULL
;
66 #define PERF_MAGIC __perf_magic2
68 const char perf_version_string
[] = PERF_VERSION
;
70 struct perf_file_attr
{
71 struct perf_event_attr attr
;
72 struct perf_file_section ids
;
75 void perf_header__set_feat(struct perf_header
*header
, int feat
)
77 set_bit(feat
, header
->adds_features
);
80 void perf_header__clear_feat(struct perf_header
*header
, int feat
)
82 clear_bit(feat
, header
->adds_features
);
85 bool perf_header__has_feat(const struct perf_header
*header
, int feat
)
87 return test_bit(feat
, header
->adds_features
);
90 static int __do_write_fd(struct feat_fd
*ff
, const void *buf
, size_t size
)
92 ssize_t ret
= writen(ff
->fd
, buf
, size
);
94 if (ret
!= (ssize_t
)size
)
95 return ret
< 0 ? (int)ret
: -1;
99 static int __do_write_buf(struct feat_fd
*ff
, const void *buf
, size_t size
)
101 /* struct perf_event_header::size is u16 */
102 const size_t max_size
= 0xffff - sizeof(struct perf_event_header
);
103 size_t new_size
= ff
->size
;
106 if (size
+ ff
->offset
> max_size
)
109 while (size
> (new_size
- ff
->offset
))
111 new_size
= min(max_size
, new_size
);
113 if (ff
->size
< new_size
) {
114 addr
= realloc(ff
->buf
, new_size
);
121 memcpy(ff
->buf
+ ff
->offset
, buf
, size
);
127 /* Return: 0 if succeded, -ERR if failed. */
128 int do_write(struct feat_fd
*ff
, const void *buf
, size_t size
)
131 return __do_write_fd(ff
, buf
, size
);
132 return __do_write_buf(ff
, buf
, size
);
135 /* Return: 0 if succeded, -ERR if failed. */
136 static int do_write_bitmap(struct feat_fd
*ff
, unsigned long *set
, u64 size
)
138 u64
*p
= (u64
*) set
;
141 ret
= do_write(ff
, &size
, sizeof(size
));
145 for (i
= 0; (u64
) i
< BITS_TO_U64(size
); i
++) {
146 ret
= do_write(ff
, p
+ i
, sizeof(*p
));
154 /* Return: 0 if succeded, -ERR if failed. */
155 int write_padded(struct feat_fd
*ff
, const void *bf
,
156 size_t count
, size_t count_aligned
)
158 static const char zero_buf
[NAME_ALIGN
];
159 int err
= do_write(ff
, bf
, count
);
162 err
= do_write(ff
, zero_buf
, count_aligned
- count
);
167 #define string_size(str) \
168 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
170 /* Return: 0 if succeded, -ERR if failed. */
171 static int do_write_string(struct feat_fd
*ff
, const char *str
)
176 olen
= strlen(str
) + 1;
177 len
= PERF_ALIGN(olen
, NAME_ALIGN
);
179 /* write len, incl. \0 */
180 ret
= do_write(ff
, &len
, sizeof(len
));
184 return write_padded(ff
, str
, olen
, len
);
187 static int __do_read_fd(struct feat_fd
*ff
, void *addr
, ssize_t size
)
189 ssize_t ret
= readn(ff
->fd
, addr
, size
);
192 return ret
< 0 ? (int)ret
: -1;
196 static int __do_read_buf(struct feat_fd
*ff
, void *addr
, ssize_t size
)
198 if (size
> (ssize_t
)ff
->size
- ff
->offset
)
201 memcpy(addr
, ff
->buf
+ ff
->offset
, size
);
208 static int __do_read(struct feat_fd
*ff
, void *addr
, ssize_t size
)
211 return __do_read_fd(ff
, addr
, size
);
212 return __do_read_buf(ff
, addr
, size
);
215 static int do_read_u32(struct feat_fd
*ff
, u32
*addr
)
219 ret
= __do_read(ff
, addr
, sizeof(*addr
));
223 if (ff
->ph
->needs_swap
)
224 *addr
= bswap_32(*addr
);
228 static int do_read_u64(struct feat_fd
*ff
, u64
*addr
)
232 ret
= __do_read(ff
, addr
, sizeof(*addr
));
236 if (ff
->ph
->needs_swap
)
237 *addr
= bswap_64(*addr
);
241 static char *do_read_string(struct feat_fd
*ff
)
246 if (do_read_u32(ff
, &len
))
253 if (!__do_read(ff
, buf
, len
)) {
255 * strings are padded by zeroes
256 * thus the actual strlen of buf
257 * may be less than len
266 /* Return: 0 if succeded, -ERR if failed. */
267 static int do_read_bitmap(struct feat_fd
*ff
, unsigned long **pset
, u64
*psize
)
273 ret
= do_read_u64(ff
, &size
);
277 set
= bitmap_alloc(size
);
283 for (i
= 0; (u64
) i
< BITS_TO_U64(size
); i
++) {
284 ret
= do_read_u64(ff
, p
+ i
);
296 static int write_tracing_data(struct feat_fd
*ff
,
297 struct evlist
*evlist
)
299 if (WARN(ff
->buf
, "Error: calling %s in pipe-mode.\n", __func__
))
302 return read_tracing_data(ff
->fd
, &evlist
->core
.entries
);
305 static int write_build_id(struct feat_fd
*ff
,
306 struct evlist
*evlist __maybe_unused
)
308 struct perf_session
*session
;
311 session
= container_of(ff
->ph
, struct perf_session
, header
);
313 if (!perf_session__read_build_ids(session
, true))
316 if (WARN(ff
->buf
, "Error: calling %s in pipe-mode.\n", __func__
))
319 err
= perf_session__write_buildid_table(session
, ff
);
321 pr_debug("failed to write buildid table\n");
324 perf_session__cache_build_ids(session
);
329 static int write_hostname(struct feat_fd
*ff
,
330 struct evlist
*evlist __maybe_unused
)
339 return do_write_string(ff
, uts
.nodename
);
342 static int write_osrelease(struct feat_fd
*ff
,
343 struct evlist
*evlist __maybe_unused
)
352 return do_write_string(ff
, uts
.release
);
355 static int write_arch(struct feat_fd
*ff
,
356 struct evlist
*evlist __maybe_unused
)
365 return do_write_string(ff
, uts
.machine
);
368 static int write_version(struct feat_fd
*ff
,
369 struct evlist
*evlist __maybe_unused
)
371 return do_write_string(ff
, perf_version_string
);
374 static int __write_cpudesc(struct feat_fd
*ff
, const char *cpuinfo_proc
)
379 const char *search
= cpuinfo_proc
;
386 file
= fopen("/proc/cpuinfo", "r");
390 while (getline(&buf
, &len
, file
) > 0) {
391 ret
= strncmp(buf
, search
, strlen(search
));
403 p
= strchr(buf
, ':');
404 if (p
&& *(p
+1) == ' ' && *(p
+2))
410 /* squash extra space characters (branding string) */
415 char *q
= skip_spaces(r
);
418 while ((*r
++ = *q
++));
422 ret
= do_write_string(ff
, s
);
429 static int write_cpudesc(struct feat_fd
*ff
,
430 struct evlist
*evlist __maybe_unused
)
432 #if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
433 #define CPUINFO_PROC { "cpu", }
434 #elif defined(__s390__)
435 #define CPUINFO_PROC { "vendor_id", }
436 #elif defined(__sh__)
437 #define CPUINFO_PROC { "cpu type", }
438 #elif defined(__alpha__) || defined(__mips__)
439 #define CPUINFO_PROC { "cpu model", }
440 #elif defined(__arm__)
441 #define CPUINFO_PROC { "model name", "Processor", }
442 #elif defined(__arc__)
443 #define CPUINFO_PROC { "Processor", }
444 #elif defined(__xtensa__)
445 #define CPUINFO_PROC { "core ID", }
447 #define CPUINFO_PROC { "model name", }
449 const char *cpuinfo_procs
[] = CPUINFO_PROC
;
453 for (i
= 0; i
< ARRAY_SIZE(cpuinfo_procs
); i
++) {
455 ret
= __write_cpudesc(ff
, cpuinfo_procs
[i
]);
463 static int write_nrcpus(struct feat_fd
*ff
,
464 struct evlist
*evlist __maybe_unused
)
470 nrc
= cpu__max_present_cpu();
472 nr
= sysconf(_SC_NPROCESSORS_ONLN
);
476 nra
= (u32
)(nr
& UINT_MAX
);
478 ret
= do_write(ff
, &nrc
, sizeof(nrc
));
482 return do_write(ff
, &nra
, sizeof(nra
));
485 static int write_event_desc(struct feat_fd
*ff
,
486 struct evlist
*evlist
)
492 nre
= evlist
->core
.nr_entries
;
495 * write number of events
497 ret
= do_write(ff
, &nre
, sizeof(nre
));
502 * size of perf_event_attr struct
504 sz
= (u32
)sizeof(evsel
->core
.attr
);
505 ret
= do_write(ff
, &sz
, sizeof(sz
));
509 evlist__for_each_entry(evlist
, evsel
) {
510 ret
= do_write(ff
, &evsel
->core
.attr
, sz
);
514 * write number of unique id per event
515 * there is one id per instance of an event
517 * copy into an nri to be independent of the
520 nri
= evsel
->core
.ids
;
521 ret
= do_write(ff
, &nri
, sizeof(nri
));
526 * write event string as passed on cmdline
528 ret
= do_write_string(ff
, perf_evsel__name(evsel
));
532 * write unique ids for this event
534 ret
= do_write(ff
, evsel
->core
.id
, evsel
->core
.ids
* sizeof(u64
));
541 static int write_cmdline(struct feat_fd
*ff
,
542 struct evlist
*evlist __maybe_unused
)
544 char pbuf
[MAXPATHLEN
], *buf
;
547 /* actual path to perf binary */
548 buf
= perf_exe(pbuf
, MAXPATHLEN
);
550 /* account for binary path */
551 n
= perf_env
.nr_cmdline
+ 1;
553 ret
= do_write(ff
, &n
, sizeof(n
));
557 ret
= do_write_string(ff
, buf
);
561 for (i
= 0 ; i
< perf_env
.nr_cmdline
; i
++) {
562 ret
= do_write_string(ff
, perf_env
.cmdline_argv
[i
]);
570 static int write_cpu_topology(struct feat_fd
*ff
,
571 struct evlist
*evlist __maybe_unused
)
573 struct cpu_topology
*tp
;
577 tp
= cpu_topology__new();
581 ret
= do_write(ff
, &tp
->core_sib
, sizeof(tp
->core_sib
));
585 for (i
= 0; i
< tp
->core_sib
; i
++) {
586 ret
= do_write_string(ff
, tp
->core_siblings
[i
]);
590 ret
= do_write(ff
, &tp
->thread_sib
, sizeof(tp
->thread_sib
));
594 for (i
= 0; i
< tp
->thread_sib
; i
++) {
595 ret
= do_write_string(ff
, tp
->thread_siblings
[i
]);
600 ret
= perf_env__read_cpu_topology_map(&perf_env
);
604 for (j
= 0; j
< perf_env
.nr_cpus_avail
; j
++) {
605 ret
= do_write(ff
, &perf_env
.cpu
[j
].core_id
,
606 sizeof(perf_env
.cpu
[j
].core_id
));
609 ret
= do_write(ff
, &perf_env
.cpu
[j
].socket_id
,
610 sizeof(perf_env
.cpu
[j
].socket_id
));
618 ret
= do_write(ff
, &tp
->die_sib
, sizeof(tp
->die_sib
));
622 for (i
= 0; i
< tp
->die_sib
; i
++) {
623 ret
= do_write_string(ff
, tp
->die_siblings
[i
]);
628 for (j
= 0; j
< perf_env
.nr_cpus_avail
; j
++) {
629 ret
= do_write(ff
, &perf_env
.cpu
[j
].die_id
,
630 sizeof(perf_env
.cpu
[j
].die_id
));
636 cpu_topology__delete(tp
);
642 static int write_total_mem(struct feat_fd
*ff
,
643 struct evlist
*evlist __maybe_unused
)
651 fp
= fopen("/proc/meminfo", "r");
655 while (getline(&buf
, &len
, fp
) > 0) {
656 ret
= strncmp(buf
, "MemTotal:", 9);
661 n
= sscanf(buf
, "%*s %"PRIu64
, &mem
);
663 ret
= do_write(ff
, &mem
, sizeof(mem
));
671 static int write_numa_topology(struct feat_fd
*ff
,
672 struct evlist
*evlist __maybe_unused
)
674 struct numa_topology
*tp
;
678 tp
= numa_topology__new();
682 ret
= do_write(ff
, &tp
->nr
, sizeof(u32
));
686 for (i
= 0; i
< tp
->nr
; i
++) {
687 struct numa_topology_node
*n
= &tp
->nodes
[i
];
689 ret
= do_write(ff
, &n
->node
, sizeof(u32
));
693 ret
= do_write(ff
, &n
->mem_total
, sizeof(u64
));
697 ret
= do_write(ff
, &n
->mem_free
, sizeof(u64
));
701 ret
= do_write_string(ff
, n
->cpus
);
709 numa_topology__delete(tp
);
716 * struct pmu_mappings {
725 static int write_pmu_mappings(struct feat_fd
*ff
,
726 struct evlist
*evlist __maybe_unused
)
728 struct perf_pmu
*pmu
= NULL
;
733 * Do a first pass to count number of pmu to avoid lseek so this
734 * works in pipe mode as well.
736 while ((pmu
= perf_pmu__scan(pmu
))) {
742 ret
= do_write(ff
, &pmu_num
, sizeof(pmu_num
));
746 while ((pmu
= perf_pmu__scan(pmu
))) {
750 ret
= do_write(ff
, &pmu
->type
, sizeof(pmu
->type
));
754 ret
= do_write_string(ff
, pmu
->name
);
765 * struct group_descs {
767 * struct group_desc {
774 static int write_group_desc(struct feat_fd
*ff
,
775 struct evlist
*evlist
)
777 u32 nr_groups
= evlist
->nr_groups
;
781 ret
= do_write(ff
, &nr_groups
, sizeof(nr_groups
));
785 evlist__for_each_entry(evlist
, evsel
) {
786 if (perf_evsel__is_group_leader(evsel
) &&
787 evsel
->core
.nr_members
> 1) {
788 const char *name
= evsel
->group_name
?: "{anon_group}";
789 u32 leader_idx
= evsel
->idx
;
790 u32 nr_members
= evsel
->core
.nr_members
;
792 ret
= do_write_string(ff
, name
);
796 ret
= do_write(ff
, &leader_idx
, sizeof(leader_idx
));
800 ret
= do_write(ff
, &nr_members
, sizeof(nr_members
));
809 * Return the CPU id as a raw string.
811 * Each architecture should provide a more precise id string that
812 * can be use to match the architecture's "mapfile".
814 char * __weak
get_cpuid_str(struct perf_pmu
*pmu __maybe_unused
)
819 /* Return zero when the cpuid from the mapfile.csv matches the
820 * cpuid string generated on this platform.
821 * Otherwise return non-zero.
823 int __weak
strcmp_cpuid_str(const char *mapcpuid
, const char *cpuid
)
826 regmatch_t pmatch
[1];
829 if (regcomp(&re
, mapcpuid
, REG_EXTENDED
) != 0) {
830 /* Warn unable to generate match particular string. */
831 pr_info("Invalid regular expression %s\n", mapcpuid
);
835 match
= !regexec(&re
, cpuid
, 1, pmatch
, 0);
838 size_t match_len
= (pmatch
[0].rm_eo
- pmatch
[0].rm_so
);
840 /* Verify the entire string matched. */
841 if (match_len
== strlen(cpuid
))
848 * default get_cpuid(): nothing gets recorded
849 * actual implementation must be in arch/$(SRCARCH)/util/header.c
851 int __weak
get_cpuid(char *buffer __maybe_unused
, size_t sz __maybe_unused
)
853 return ENOSYS
; /* Not implemented */
856 static int write_cpuid(struct feat_fd
*ff
,
857 struct evlist
*evlist __maybe_unused
)
862 ret
= get_cpuid(buffer
, sizeof(buffer
));
866 return do_write_string(ff
, buffer
);
869 static int write_branch_stack(struct feat_fd
*ff __maybe_unused
,
870 struct evlist
*evlist __maybe_unused
)
875 static int write_auxtrace(struct feat_fd
*ff
,
876 struct evlist
*evlist __maybe_unused
)
878 struct perf_session
*session
;
881 if (WARN(ff
->buf
, "Error: calling %s in pipe-mode.\n", __func__
))
884 session
= container_of(ff
->ph
, struct perf_session
, header
);
886 err
= auxtrace_index__write(ff
->fd
, &session
->auxtrace_index
);
888 pr_err("Failed to write auxtrace index\n");
892 static int write_clockid(struct feat_fd
*ff
,
893 struct evlist
*evlist __maybe_unused
)
895 return do_write(ff
, &ff
->ph
->env
.clockid_res_ns
,
896 sizeof(ff
->ph
->env
.clockid_res_ns
));
899 static int write_dir_format(struct feat_fd
*ff
,
900 struct evlist
*evlist __maybe_unused
)
902 struct perf_session
*session
;
903 struct perf_data
*data
;
905 session
= container_of(ff
->ph
, struct perf_session
, header
);
906 data
= session
->data
;
908 if (WARN_ON(!perf_data__is_dir(data
)))
911 return do_write(ff
, &data
->dir
.version
, sizeof(data
->dir
.version
));
914 #ifdef HAVE_LIBBPF_SUPPORT
915 static int write_bpf_prog_info(struct feat_fd
*ff
,
916 struct evlist
*evlist __maybe_unused
)
918 struct perf_env
*env
= &ff
->ph
->env
;
919 struct rb_root
*root
;
920 struct rb_node
*next
;
923 down_read(&env
->bpf_progs
.lock
);
925 ret
= do_write(ff
, &env
->bpf_progs
.infos_cnt
,
926 sizeof(env
->bpf_progs
.infos_cnt
));
930 root
= &env
->bpf_progs
.infos
;
931 next
= rb_first(root
);
933 struct bpf_prog_info_node
*node
;
936 node
= rb_entry(next
, struct bpf_prog_info_node
, rb_node
);
937 next
= rb_next(&node
->rb_node
);
938 len
= sizeof(struct bpf_prog_info_linear
) +
939 node
->info_linear
->data_len
;
941 /* before writing to file, translate address to offset */
942 bpf_program__bpil_addr_to_offs(node
->info_linear
);
943 ret
= do_write(ff
, node
->info_linear
, len
);
945 * translate back to address even when do_write() fails,
946 * so that this function never changes the data.
948 bpf_program__bpil_offs_to_addr(node
->info_linear
);
953 up_read(&env
->bpf_progs
.lock
);
956 #else // HAVE_LIBBPF_SUPPORT
957 static int write_bpf_prog_info(struct feat_fd
*ff __maybe_unused
,
958 struct evlist
*evlist __maybe_unused
)
962 #endif // HAVE_LIBBPF_SUPPORT
964 static int write_bpf_btf(struct feat_fd
*ff
,
965 struct evlist
*evlist __maybe_unused
)
967 struct perf_env
*env
= &ff
->ph
->env
;
968 struct rb_root
*root
;
969 struct rb_node
*next
;
972 down_read(&env
->bpf_progs
.lock
);
974 ret
= do_write(ff
, &env
->bpf_progs
.btfs_cnt
,
975 sizeof(env
->bpf_progs
.btfs_cnt
));
980 root
= &env
->bpf_progs
.btfs
;
981 next
= rb_first(root
);
983 struct btf_node
*node
;
985 node
= rb_entry(next
, struct btf_node
, rb_node
);
986 next
= rb_next(&node
->rb_node
);
987 ret
= do_write(ff
, &node
->id
,
988 sizeof(u32
) * 2 + node
->data_size
);
993 up_read(&env
->bpf_progs
.lock
);
997 static int cpu_cache_level__sort(const void *a
, const void *b
)
999 struct cpu_cache_level
*cache_a
= (struct cpu_cache_level
*)a
;
1000 struct cpu_cache_level
*cache_b
= (struct cpu_cache_level
*)b
;
1002 return cache_a
->level
- cache_b
->level
;
1005 static bool cpu_cache_level__cmp(struct cpu_cache_level
*a
, struct cpu_cache_level
*b
)
1007 if (a
->level
!= b
->level
)
1010 if (a
->line_size
!= b
->line_size
)
1013 if (a
->sets
!= b
->sets
)
1016 if (a
->ways
!= b
->ways
)
1019 if (strcmp(a
->type
, b
->type
))
1022 if (strcmp(a
->size
, b
->size
))
1025 if (strcmp(a
->map
, b
->map
))
1031 static int cpu_cache_level__read(struct cpu_cache_level
*cache
, u32 cpu
, u16 level
)
1033 char path
[PATH_MAX
], file
[PATH_MAX
];
1037 scnprintf(path
, PATH_MAX
, "devices/system/cpu/cpu%d/cache/index%d/", cpu
, level
);
1038 scnprintf(file
, PATH_MAX
, "%s/%s", sysfs__mountpoint(), path
);
1040 if (stat(file
, &st
))
1043 scnprintf(file
, PATH_MAX
, "%s/level", path
);
1044 if (sysfs__read_int(file
, (int *) &cache
->level
))
1047 scnprintf(file
, PATH_MAX
, "%s/coherency_line_size", path
);
1048 if (sysfs__read_int(file
, (int *) &cache
->line_size
))
1051 scnprintf(file
, PATH_MAX
, "%s/number_of_sets", path
);
1052 if (sysfs__read_int(file
, (int *) &cache
->sets
))
1055 scnprintf(file
, PATH_MAX
, "%s/ways_of_associativity", path
);
1056 if (sysfs__read_int(file
, (int *) &cache
->ways
))
1059 scnprintf(file
, PATH_MAX
, "%s/type", path
);
1060 if (sysfs__read_str(file
, &cache
->type
, &len
))
1063 cache
->type
[len
] = 0;
1064 cache
->type
= strim(cache
->type
);
1066 scnprintf(file
, PATH_MAX
, "%s/size", path
);
1067 if (sysfs__read_str(file
, &cache
->size
, &len
)) {
1068 zfree(&cache
->type
);
1072 cache
->size
[len
] = 0;
1073 cache
->size
= strim(cache
->size
);
1075 scnprintf(file
, PATH_MAX
, "%s/shared_cpu_list", path
);
1076 if (sysfs__read_str(file
, &cache
->map
, &len
)) {
1077 zfree(&cache
->size
);
1078 zfree(&cache
->type
);
1082 cache
->map
[len
] = 0;
1083 cache
->map
= strim(cache
->map
);
1087 static void cpu_cache_level__fprintf(FILE *out
, struct cpu_cache_level
*c
)
1089 fprintf(out
, "L%d %-15s %8s [%s]\n", c
->level
, c
->type
, c
->size
, c
->map
);
1092 #define MAX_CACHE_LVL 4
1094 static int build_caches(struct cpu_cache_level caches
[], u32
*cntp
)
1100 nr
= cpu__max_cpu();
1102 for (cpu
= 0; cpu
< nr
; cpu
++) {
1103 for (level
= 0; level
< MAX_CACHE_LVL
; level
++) {
1104 struct cpu_cache_level c
;
1107 err
= cpu_cache_level__read(&c
, cpu
, level
);
1114 for (i
= 0; i
< cnt
; i
++) {
1115 if (cpu_cache_level__cmp(&c
, &caches
[i
]))
1122 cpu_cache_level__free(&c
);
1129 static int write_cache(struct feat_fd
*ff
,
1130 struct evlist
*evlist __maybe_unused
)
1132 u32 max_caches
= cpu__max_cpu() * MAX_CACHE_LVL
;
1133 struct cpu_cache_level caches
[max_caches
];
1134 u32 cnt
= 0, i
, version
= 1;
1137 ret
= build_caches(caches
, &cnt
);
1141 qsort(&caches
, cnt
, sizeof(struct cpu_cache_level
), cpu_cache_level__sort
);
1143 ret
= do_write(ff
, &version
, sizeof(u32
));
1147 ret
= do_write(ff
, &cnt
, sizeof(u32
));
1151 for (i
= 0; i
< cnt
; i
++) {
1152 struct cpu_cache_level
*c
= &caches
[i
];
1155 ret = do_write(ff, &c->v, sizeof(u32)); \
1166 ret = do_write_string(ff, (const char *) c->v); \
1177 for (i
= 0; i
< cnt
; i
++)
1178 cpu_cache_level__free(&caches
[i
]);
1182 static int write_stat(struct feat_fd
*ff __maybe_unused
,
1183 struct evlist
*evlist __maybe_unused
)
1188 static int write_sample_time(struct feat_fd
*ff
,
1189 struct evlist
*evlist
)
1193 ret
= do_write(ff
, &evlist
->first_sample_time
,
1194 sizeof(evlist
->first_sample_time
));
1198 return do_write(ff
, &evlist
->last_sample_time
,
1199 sizeof(evlist
->last_sample_time
));
1203 static int memory_node__read(struct memory_node
*n
, unsigned long idx
)
1205 unsigned int phys
, size
= 0;
1206 char path
[PATH_MAX
];
1210 #define for_each_memory(mem, dir) \
1211 while ((ent = readdir(dir))) \
1212 if (strcmp(ent->d_name, ".") && \
1213 strcmp(ent->d_name, "..") && \
1214 sscanf(ent->d_name, "memory%u", &mem) == 1)
1216 scnprintf(path
, PATH_MAX
,
1217 "%s/devices/system/node/node%lu",
1218 sysfs__mountpoint(), idx
);
1220 dir
= opendir(path
);
1222 pr_warning("failed: cant' open memory sysfs data\n");
1226 for_each_memory(phys
, dir
) {
1227 size
= max(phys
, size
);
1232 n
->set
= bitmap_alloc(size
);
1243 for_each_memory(phys
, dir
) {
1244 set_bit(phys
, n
->set
);
1251 static int memory_node__sort(const void *a
, const void *b
)
1253 const struct memory_node
*na
= a
;
1254 const struct memory_node
*nb
= b
;
1256 return na
->node
- nb
->node
;
1259 static int build_mem_topology(struct memory_node
*nodes
, u64 size
, u64
*cntp
)
1261 char path
[PATH_MAX
];
1267 scnprintf(path
, PATH_MAX
, "%s/devices/system/node/",
1268 sysfs__mountpoint());
1270 dir
= opendir(path
);
1272 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1277 while (!ret
&& (ent
= readdir(dir
))) {
1281 if (!strcmp(ent
->d_name
, ".") ||
1282 !strcmp(ent
->d_name
, ".."))
1285 r
= sscanf(ent
->d_name
, "node%u", &idx
);
1289 if (WARN_ONCE(cnt
>= size
,
1290 "failed to write MEM_TOPOLOGY, way too many nodes\n")) {
1295 ret
= memory_node__read(&nodes
[cnt
++], idx
);
1302 qsort(nodes
, cnt
, sizeof(nodes
[0]), memory_node__sort
);
1307 #define MAX_MEMORY_NODES 2000
1310 * The MEM_TOPOLOGY holds physical memory map for every
1311 * node in system. The format of data is as follows:
1313 * 0 - version | for future changes
1314 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1315 * 16 - count | number of nodes
1317 * For each node we store map of physical indexes for
1320 * 32 - node id | node index
1321 * 40 - size | size of bitmap
1322 * 48 - bitmap | bitmap of memory indexes that belongs to node
1324 static int write_mem_topology(struct feat_fd
*ff __maybe_unused
,
1325 struct evlist
*evlist __maybe_unused
)
1327 static struct memory_node nodes
[MAX_MEMORY_NODES
];
1328 u64 bsize
, version
= 1, i
, nr
;
1331 ret
= sysfs__read_xll("devices/system/memory/block_size_bytes",
1332 (unsigned long long *) &bsize
);
1336 ret
= build_mem_topology(&nodes
[0], MAX_MEMORY_NODES
, &nr
);
1340 ret
= do_write(ff
, &version
, sizeof(version
));
1344 ret
= do_write(ff
, &bsize
, sizeof(bsize
));
1348 ret
= do_write(ff
, &nr
, sizeof(nr
));
1352 for (i
= 0; i
< nr
; i
++) {
1353 struct memory_node
*n
= &nodes
[i
];
1356 ret = do_write(ff, &n->v, sizeof(n->v)); \
1365 ret
= do_write_bitmap(ff
, n
->set
, n
->size
);
1374 static int write_compressed(struct feat_fd
*ff __maybe_unused
,
1375 struct evlist
*evlist __maybe_unused
)
1379 ret
= do_write(ff
, &(ff
->ph
->env
.comp_ver
), sizeof(ff
->ph
->env
.comp_ver
));
1383 ret
= do_write(ff
, &(ff
->ph
->env
.comp_type
), sizeof(ff
->ph
->env
.comp_type
));
1387 ret
= do_write(ff
, &(ff
->ph
->env
.comp_level
), sizeof(ff
->ph
->env
.comp_level
));
1391 ret
= do_write(ff
, &(ff
->ph
->env
.comp_ratio
), sizeof(ff
->ph
->env
.comp_ratio
));
1395 return do_write(ff
, &(ff
->ph
->env
.comp_mmap_len
), sizeof(ff
->ph
->env
.comp_mmap_len
));
1398 static void print_hostname(struct feat_fd
*ff
, FILE *fp
)
1400 fprintf(fp
, "# hostname : %s\n", ff
->ph
->env
.hostname
);
1403 static void print_osrelease(struct feat_fd
*ff
, FILE *fp
)
1405 fprintf(fp
, "# os release : %s\n", ff
->ph
->env
.os_release
);
1408 static void print_arch(struct feat_fd
*ff
, FILE *fp
)
1410 fprintf(fp
, "# arch : %s\n", ff
->ph
->env
.arch
);
1413 static void print_cpudesc(struct feat_fd
*ff
, FILE *fp
)
1415 fprintf(fp
, "# cpudesc : %s\n", ff
->ph
->env
.cpu_desc
);
1418 static void print_nrcpus(struct feat_fd
*ff
, FILE *fp
)
1420 fprintf(fp
, "# nrcpus online : %u\n", ff
->ph
->env
.nr_cpus_online
);
1421 fprintf(fp
, "# nrcpus avail : %u\n", ff
->ph
->env
.nr_cpus_avail
);
1424 static void print_version(struct feat_fd
*ff
, FILE *fp
)
1426 fprintf(fp
, "# perf version : %s\n", ff
->ph
->env
.version
);
1429 static void print_cmdline(struct feat_fd
*ff
, FILE *fp
)
1433 nr
= ff
->ph
->env
.nr_cmdline
;
1435 fprintf(fp
, "# cmdline : ");
1437 for (i
= 0; i
< nr
; i
++) {
1438 char *argv_i
= strdup(ff
->ph
->env
.cmdline_argv
[i
]);
1440 fprintf(fp
, "%s ", ff
->ph
->env
.cmdline_argv
[i
]);
1444 char *quote
= strchr(argv_i
, '\'');
1448 fprintf(fp
, "%s\\\'", argv_i
);
1451 fprintf(fp
, "%s ", argv_i
);
1458 static void print_cpu_topology(struct feat_fd
*ff
, FILE *fp
)
1460 struct perf_header
*ph
= ff
->ph
;
1461 int cpu_nr
= ph
->env
.nr_cpus_avail
;
1465 nr
= ph
->env
.nr_sibling_cores
;
1466 str
= ph
->env
.sibling_cores
;
1468 for (i
= 0; i
< nr
; i
++) {
1469 fprintf(fp
, "# sibling sockets : %s\n", str
);
1470 str
+= strlen(str
) + 1;
1473 if (ph
->env
.nr_sibling_dies
) {
1474 nr
= ph
->env
.nr_sibling_dies
;
1475 str
= ph
->env
.sibling_dies
;
1477 for (i
= 0; i
< nr
; i
++) {
1478 fprintf(fp
, "# sibling dies : %s\n", str
);
1479 str
+= strlen(str
) + 1;
1483 nr
= ph
->env
.nr_sibling_threads
;
1484 str
= ph
->env
.sibling_threads
;
1486 for (i
= 0; i
< nr
; i
++) {
1487 fprintf(fp
, "# sibling threads : %s\n", str
);
1488 str
+= strlen(str
) + 1;
1491 if (ph
->env
.nr_sibling_dies
) {
1492 if (ph
->env
.cpu
!= NULL
) {
1493 for (i
= 0; i
< cpu_nr
; i
++)
1494 fprintf(fp
, "# CPU %d: Core ID %d, "
1495 "Die ID %d, Socket ID %d\n",
1496 i
, ph
->env
.cpu
[i
].core_id
,
1497 ph
->env
.cpu
[i
].die_id
,
1498 ph
->env
.cpu
[i
].socket_id
);
1500 fprintf(fp
, "# Core ID, Die ID and Socket ID "
1501 "information is not available\n");
1503 if (ph
->env
.cpu
!= NULL
) {
1504 for (i
= 0; i
< cpu_nr
; i
++)
1505 fprintf(fp
, "# CPU %d: Core ID %d, "
1507 i
, ph
->env
.cpu
[i
].core_id
,
1508 ph
->env
.cpu
[i
].socket_id
);
1510 fprintf(fp
, "# Core ID and Socket ID "
1511 "information is not available\n");
1515 static void print_clockid(struct feat_fd
*ff
, FILE *fp
)
1517 fprintf(fp
, "# clockid frequency: %"PRIu64
" MHz\n",
1518 ff
->ph
->env
.clockid_res_ns
* 1000);
1521 static void print_dir_format(struct feat_fd
*ff
, FILE *fp
)
1523 struct perf_session
*session
;
1524 struct perf_data
*data
;
1526 session
= container_of(ff
->ph
, struct perf_session
, header
);
1527 data
= session
->data
;
1529 fprintf(fp
, "# directory data version : %"PRIu64
"\n", data
->dir
.version
);
1532 static void print_bpf_prog_info(struct feat_fd
*ff
, FILE *fp
)
1534 struct perf_env
*env
= &ff
->ph
->env
;
1535 struct rb_root
*root
;
1536 struct rb_node
*next
;
1538 down_read(&env
->bpf_progs
.lock
);
1540 root
= &env
->bpf_progs
.infos
;
1541 next
= rb_first(root
);
1544 struct bpf_prog_info_node
*node
;
1546 node
= rb_entry(next
, struct bpf_prog_info_node
, rb_node
);
1547 next
= rb_next(&node
->rb_node
);
1549 bpf_event__print_bpf_prog_info(&node
->info_linear
->info
,
1553 up_read(&env
->bpf_progs
.lock
);
1556 static void print_bpf_btf(struct feat_fd
*ff
, FILE *fp
)
1558 struct perf_env
*env
= &ff
->ph
->env
;
1559 struct rb_root
*root
;
1560 struct rb_node
*next
;
1562 down_read(&env
->bpf_progs
.lock
);
1564 root
= &env
->bpf_progs
.btfs
;
1565 next
= rb_first(root
);
1568 struct btf_node
*node
;
1570 node
= rb_entry(next
, struct btf_node
, rb_node
);
1571 next
= rb_next(&node
->rb_node
);
1572 fprintf(fp
, "# btf info of id %u\n", node
->id
);
1575 up_read(&env
->bpf_progs
.lock
);
1578 static void free_event_desc(struct evsel
*events
)
1580 struct evsel
*evsel
;
1585 for (evsel
= events
; evsel
->core
.attr
.size
; evsel
++) {
1586 zfree(&evsel
->name
);
1587 zfree(&evsel
->core
.id
);
1593 static struct evsel
*read_event_desc(struct feat_fd
*ff
)
1595 struct evsel
*evsel
, *events
= NULL
;
1598 u32 nre
, sz
, nr
, i
, j
;
1601 /* number of events */
1602 if (do_read_u32(ff
, &nre
))
1605 if (do_read_u32(ff
, &sz
))
1608 /* buffer to hold on file attr struct */
1613 /* the last event terminates with evsel->core.attr.size == 0: */
1614 events
= calloc(nre
+ 1, sizeof(*events
));
1618 msz
= sizeof(evsel
->core
.attr
);
1622 for (i
= 0, evsel
= events
; i
< nre
; evsel
++, i
++) {
1626 * must read entire on-file attr struct to
1627 * sync up with layout.
1629 if (__do_read(ff
, buf
, sz
))
1632 if (ff
->ph
->needs_swap
)
1633 perf_event__attr_swap(buf
);
1635 memcpy(&evsel
->core
.attr
, buf
, msz
);
1637 if (do_read_u32(ff
, &nr
))
1640 if (ff
->ph
->needs_swap
)
1641 evsel
->needs_swap
= true;
1643 evsel
->name
= do_read_string(ff
);
1650 id
= calloc(nr
, sizeof(*id
));
1653 evsel
->core
.ids
= nr
;
1654 evsel
->core
.id
= id
;
1656 for (j
= 0 ; j
< nr
; j
++) {
1657 if (do_read_u64(ff
, id
))
1666 free_event_desc(events
);
1671 static int __desc_attr__fprintf(FILE *fp
, const char *name
, const char *val
,
1672 void *priv __maybe_unused
)
1674 return fprintf(fp
, ", %s = %s", name
, val
);
1677 static void print_event_desc(struct feat_fd
*ff
, FILE *fp
)
1679 struct evsel
*evsel
, *events
;
1684 events
= ff
->events
;
1686 events
= read_event_desc(ff
);
1689 fprintf(fp
, "# event desc: not available or unable to read\n");
1693 for (evsel
= events
; evsel
->core
.attr
.size
; evsel
++) {
1694 fprintf(fp
, "# event : name = %s, ", evsel
->name
);
1696 if (evsel
->core
.ids
) {
1697 fprintf(fp
, ", id = {");
1698 for (j
= 0, id
= evsel
->core
.id
; j
< evsel
->core
.ids
; j
++, id
++) {
1701 fprintf(fp
, " %"PRIu64
, *id
);
1706 perf_event_attr__fprintf(fp
, &evsel
->core
.attr
, __desc_attr__fprintf
, NULL
);
1711 free_event_desc(events
);
1715 static void print_total_mem(struct feat_fd
*ff
, FILE *fp
)
1717 fprintf(fp
, "# total memory : %llu kB\n", ff
->ph
->env
.total_mem
);
1720 static void print_numa_topology(struct feat_fd
*ff
, FILE *fp
)
1723 struct numa_node
*n
;
1725 for (i
= 0; i
< ff
->ph
->env
.nr_numa_nodes
; i
++) {
1726 n
= &ff
->ph
->env
.numa_nodes
[i
];
1728 fprintf(fp
, "# node%u meminfo : total = %"PRIu64
" kB,"
1729 " free = %"PRIu64
" kB\n",
1730 n
->node
, n
->mem_total
, n
->mem_free
);
1732 fprintf(fp
, "# node%u cpu list : ", n
->node
);
1733 cpu_map__fprintf(n
->map
, fp
);
1737 static void print_cpuid(struct feat_fd
*ff
, FILE *fp
)
1739 fprintf(fp
, "# cpuid : %s\n", ff
->ph
->env
.cpuid
);
1742 static void print_branch_stack(struct feat_fd
*ff __maybe_unused
, FILE *fp
)
1744 fprintf(fp
, "# contains samples with branch stack\n");
1747 static void print_auxtrace(struct feat_fd
*ff __maybe_unused
, FILE *fp
)
1749 fprintf(fp
, "# contains AUX area data (e.g. instruction trace)\n");
1752 static void print_stat(struct feat_fd
*ff __maybe_unused
, FILE *fp
)
1754 fprintf(fp
, "# contains stat data\n");
1757 static void print_cache(struct feat_fd
*ff
, FILE *fp __maybe_unused
)
1761 fprintf(fp
, "# CPU cache info:\n");
1762 for (i
= 0; i
< ff
->ph
->env
.caches_cnt
; i
++) {
1764 cpu_cache_level__fprintf(fp
, &ff
->ph
->env
.caches
[i
]);
1768 static void print_compressed(struct feat_fd
*ff
, FILE *fp
)
1770 fprintf(fp
, "# compressed : %s, level = %d, ratio = %d\n",
1771 ff
->ph
->env
.comp_type
== PERF_COMP_ZSTD
? "Zstd" : "Unknown",
1772 ff
->ph
->env
.comp_level
, ff
->ph
->env
.comp_ratio
);
1775 static void print_pmu_mappings(struct feat_fd
*ff
, FILE *fp
)
1777 const char *delimiter
= "# pmu mappings: ";
1782 pmu_num
= ff
->ph
->env
.nr_pmu_mappings
;
1784 fprintf(fp
, "# pmu mappings: not available\n");
1788 str
= ff
->ph
->env
.pmu_mappings
;
1791 type
= strtoul(str
, &tmp
, 0);
1796 fprintf(fp
, "%s%s = %" PRIu32
, delimiter
, str
, type
);
1799 str
+= strlen(str
) + 1;
1808 fprintf(fp
, "# pmu mappings: unable to read\n");
1811 static void print_group_desc(struct feat_fd
*ff
, FILE *fp
)
1813 struct perf_session
*session
;
1814 struct evsel
*evsel
;
1817 session
= container_of(ff
->ph
, struct perf_session
, header
);
1819 evlist__for_each_entry(session
->evlist
, evsel
) {
1820 if (perf_evsel__is_group_leader(evsel
) &&
1821 evsel
->core
.nr_members
> 1) {
1822 fprintf(fp
, "# group: %s{%s", evsel
->group_name
?: "",
1823 perf_evsel__name(evsel
));
1825 nr
= evsel
->core
.nr_members
- 1;
1827 fprintf(fp
, ",%s", perf_evsel__name(evsel
));
1835 static void print_sample_time(struct feat_fd
*ff
, FILE *fp
)
1837 struct perf_session
*session
;
1841 session
= container_of(ff
->ph
, struct perf_session
, header
);
1843 timestamp__scnprintf_usec(session
->evlist
->first_sample_time
,
1844 time_buf
, sizeof(time_buf
));
1845 fprintf(fp
, "# time of first sample : %s\n", time_buf
);
1847 timestamp__scnprintf_usec(session
->evlist
->last_sample_time
,
1848 time_buf
, sizeof(time_buf
));
1849 fprintf(fp
, "# time of last sample : %s\n", time_buf
);
1851 d
= (double)(session
->evlist
->last_sample_time
-
1852 session
->evlist
->first_sample_time
) / NSEC_PER_MSEC
;
1854 fprintf(fp
, "# sample duration : %10.3f ms\n", d
);
1857 static void memory_node__fprintf(struct memory_node
*n
,
1858 unsigned long long bsize
, FILE *fp
)
1860 char buf_map
[100], buf_size
[50];
1861 unsigned long long size
;
1863 size
= bsize
* bitmap_weight(n
->set
, n
->size
);
1864 unit_number__scnprintf(buf_size
, 50, size
);
1866 bitmap_scnprintf(n
->set
, n
->size
, buf_map
, 100);
1867 fprintf(fp
, "# %3" PRIu64
" [%s]: %s\n", n
->node
, buf_size
, buf_map
);
1870 static void print_mem_topology(struct feat_fd
*ff
, FILE *fp
)
1872 struct memory_node
*nodes
;
1875 nodes
= ff
->ph
->env
.memory_nodes
;
1876 nr
= ff
->ph
->env
.nr_memory_nodes
;
1878 fprintf(fp
, "# memory nodes (nr %d, block size 0x%llx):\n",
1879 nr
, ff
->ph
->env
.memory_bsize
);
1881 for (i
= 0; i
< nr
; i
++) {
1882 memory_node__fprintf(&nodes
[i
], ff
->ph
->env
.memory_bsize
, fp
);
1886 static int __event_process_build_id(struct perf_record_header_build_id
*bev
,
1888 struct perf_session
*session
)
1891 struct machine
*machine
;
1894 enum dso_kernel_type dso_type
;
1896 machine
= perf_session__findnew_machine(session
, bev
->pid
);
1900 cpumode
= bev
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1903 case PERF_RECORD_MISC_KERNEL
:
1904 dso_type
= DSO_TYPE_KERNEL
;
1906 case PERF_RECORD_MISC_GUEST_KERNEL
:
1907 dso_type
= DSO_TYPE_GUEST_KERNEL
;
1909 case PERF_RECORD_MISC_USER
:
1910 case PERF_RECORD_MISC_GUEST_USER
:
1911 dso_type
= DSO_TYPE_USER
;
1917 dso
= machine__findnew_dso(machine
, filename
);
1919 char sbuild_id
[SBUILD_ID_SIZE
];
1921 dso__set_build_id(dso
, &bev
->build_id
);
1923 if (dso_type
!= DSO_TYPE_USER
) {
1924 struct kmod_path m
= { .name
= NULL
, };
1926 if (!kmod_path__parse_name(&m
, filename
) && m
.kmod
)
1927 dso__set_module_info(dso
, &m
, machine
);
1929 dso
->kernel
= dso_type
;
1934 build_id__sprintf(dso
->build_id
, sizeof(dso
->build_id
),
1936 pr_debug("build id event received for %s: %s\n",
1937 dso
->long_name
, sbuild_id
);
1946 static int perf_header__read_build_ids_abi_quirk(struct perf_header
*header
,
1947 int input
, u64 offset
, u64 size
)
1949 struct perf_session
*session
= container_of(header
, struct perf_session
, header
);
1951 struct perf_event_header header
;
1952 u8 build_id
[PERF_ALIGN(BUILD_ID_SIZE
, sizeof(u64
))];
1955 struct perf_record_header_build_id bev
;
1956 char filename
[PATH_MAX
];
1957 u64 limit
= offset
+ size
;
1959 while (offset
< limit
) {
1962 if (readn(input
, &old_bev
, sizeof(old_bev
)) != sizeof(old_bev
))
1965 if (header
->needs_swap
)
1966 perf_event_header__bswap(&old_bev
.header
);
1968 len
= old_bev
.header
.size
- sizeof(old_bev
);
1969 if (readn(input
, filename
, len
) != len
)
1972 bev
.header
= old_bev
.header
;
1975 * As the pid is the missing value, we need to fill
1976 * it properly. The header.misc value give us nice hint.
1978 bev
.pid
= HOST_KERNEL_ID
;
1979 if (bev
.header
.misc
== PERF_RECORD_MISC_GUEST_USER
||
1980 bev
.header
.misc
== PERF_RECORD_MISC_GUEST_KERNEL
)
1981 bev
.pid
= DEFAULT_GUEST_KERNEL_ID
;
1983 memcpy(bev
.build_id
, old_bev
.build_id
, sizeof(bev
.build_id
));
1984 __event_process_build_id(&bev
, filename
, session
);
1986 offset
+= bev
.header
.size
;
1992 static int perf_header__read_build_ids(struct perf_header
*header
,
1993 int input
, u64 offset
, u64 size
)
1995 struct perf_session
*session
= container_of(header
, struct perf_session
, header
);
1996 struct perf_record_header_build_id bev
;
1997 char filename
[PATH_MAX
];
1998 u64 limit
= offset
+ size
, orig_offset
= offset
;
2001 while (offset
< limit
) {
2004 if (readn(input
, &bev
, sizeof(bev
)) != sizeof(bev
))
2007 if (header
->needs_swap
)
2008 perf_event_header__bswap(&bev
.header
);
2010 len
= bev
.header
.size
- sizeof(bev
);
2011 if (readn(input
, filename
, len
) != len
)
2014 * The a1645ce1 changeset:
2016 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2018 * Added a field to struct perf_record_header_build_id that broke the file
2021 * Since the kernel build-id is the first entry, process the
2022 * table using the old format if the well known
2023 * '[kernel.kallsyms]' string for the kernel build-id has the
2024 * first 4 characters chopped off (where the pid_t sits).
2026 if (memcmp(filename
, "nel.kallsyms]", 13) == 0) {
2027 if (lseek(input
, orig_offset
, SEEK_SET
) == (off_t
)-1)
2029 return perf_header__read_build_ids_abi_quirk(header
, input
, offset
, size
);
2032 __event_process_build_id(&bev
, filename
, session
);
2034 offset
+= bev
.header
.size
;
2041 /* Macro for features that simply need to read and store a string. */
2042 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2043 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2045 ff->ph->env.__feat_env = do_read_string(ff); \
2046 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2049 FEAT_PROCESS_STR_FUN(hostname
, hostname
);
2050 FEAT_PROCESS_STR_FUN(osrelease
, os_release
);
2051 FEAT_PROCESS_STR_FUN(version
, version
);
2052 FEAT_PROCESS_STR_FUN(arch
, arch
);
2053 FEAT_PROCESS_STR_FUN(cpudesc
, cpu_desc
);
2054 FEAT_PROCESS_STR_FUN(cpuid
, cpuid
);
2056 static int process_tracing_data(struct feat_fd
*ff
, void *data
)
2058 ssize_t ret
= trace_report(ff
->fd
, data
, false);
2060 return ret
< 0 ? -1 : 0;
2063 static int process_build_id(struct feat_fd
*ff
, void *data __maybe_unused
)
2065 if (perf_header__read_build_ids(ff
->ph
, ff
->fd
, ff
->offset
, ff
->size
))
2066 pr_debug("Failed to read buildids, continuing...\n");
2070 static int process_nrcpus(struct feat_fd
*ff
, void *data __maybe_unused
)
2073 u32 nr_cpus_avail
, nr_cpus_online
;
2075 ret
= do_read_u32(ff
, &nr_cpus_avail
);
2079 ret
= do_read_u32(ff
, &nr_cpus_online
);
2082 ff
->ph
->env
.nr_cpus_avail
= (int)nr_cpus_avail
;
2083 ff
->ph
->env
.nr_cpus_online
= (int)nr_cpus_online
;
2087 static int process_total_mem(struct feat_fd
*ff
, void *data __maybe_unused
)
2092 ret
= do_read_u64(ff
, &total_mem
);
2095 ff
->ph
->env
.total_mem
= (unsigned long long)total_mem
;
2099 static struct evsel
*
2100 perf_evlist__find_by_index(struct evlist
*evlist
, int idx
)
2102 struct evsel
*evsel
;
2104 evlist__for_each_entry(evlist
, evsel
) {
2105 if (evsel
->idx
== idx
)
2113 perf_evlist__set_event_name(struct evlist
*evlist
,
2114 struct evsel
*event
)
2116 struct evsel
*evsel
;
2121 evsel
= perf_evlist__find_by_index(evlist
, event
->idx
);
2128 evsel
->name
= strdup(event
->name
);
2132 process_event_desc(struct feat_fd
*ff
, void *data __maybe_unused
)
2134 struct perf_session
*session
;
2135 struct evsel
*evsel
, *events
= read_event_desc(ff
);
2140 session
= container_of(ff
->ph
, struct perf_session
, header
);
2142 if (session
->data
->is_pipe
) {
2143 /* Save events for reading later by print_event_desc,
2144 * since they can't be read again in pipe mode. */
2145 ff
->events
= events
;
2148 for (evsel
= events
; evsel
->core
.attr
.size
; evsel
++)
2149 perf_evlist__set_event_name(session
->evlist
, evsel
);
2151 if (!session
->data
->is_pipe
)
2152 free_event_desc(events
);
2157 static int process_cmdline(struct feat_fd
*ff
, void *data __maybe_unused
)
2159 char *str
, *cmdline
= NULL
, **argv
= NULL
;
2162 if (do_read_u32(ff
, &nr
))
2165 ff
->ph
->env
.nr_cmdline
= nr
;
2167 cmdline
= zalloc(ff
->size
+ nr
+ 1);
2171 argv
= zalloc(sizeof(char *) * (nr
+ 1));
2175 for (i
= 0; i
< nr
; i
++) {
2176 str
= do_read_string(ff
);
2180 argv
[i
] = cmdline
+ len
;
2181 memcpy(argv
[i
], str
, strlen(str
) + 1);
2182 len
+= strlen(str
) + 1;
2185 ff
->ph
->env
.cmdline
= cmdline
;
2186 ff
->ph
->env
.cmdline_argv
= (const char **) argv
;
2195 static int process_cpu_topology(struct feat_fd
*ff
, void *data __maybe_unused
)
2200 int cpu_nr
= ff
->ph
->env
.nr_cpus_avail
;
2202 struct perf_header
*ph
= ff
->ph
;
2203 bool do_core_id_test
= true;
2205 ph
->env
.cpu
= calloc(cpu_nr
, sizeof(*ph
->env
.cpu
));
2209 if (do_read_u32(ff
, &nr
))
2212 ph
->env
.nr_sibling_cores
= nr
;
2213 size
+= sizeof(u32
);
2214 if (strbuf_init(&sb
, 128) < 0)
2217 for (i
= 0; i
< nr
; i
++) {
2218 str
= do_read_string(ff
);
2222 /* include a NULL character at the end */
2223 if (strbuf_add(&sb
, str
, strlen(str
) + 1) < 0)
2225 size
+= string_size(str
);
2228 ph
->env
.sibling_cores
= strbuf_detach(&sb
, NULL
);
2230 if (do_read_u32(ff
, &nr
))
2233 ph
->env
.nr_sibling_threads
= nr
;
2234 size
+= sizeof(u32
);
2236 for (i
= 0; i
< nr
; i
++) {
2237 str
= do_read_string(ff
);
2241 /* include a NULL character at the end */
2242 if (strbuf_add(&sb
, str
, strlen(str
) + 1) < 0)
2244 size
+= string_size(str
);
2247 ph
->env
.sibling_threads
= strbuf_detach(&sb
, NULL
);
2250 * The header may be from old perf,
2251 * which doesn't include core id and socket id information.
2253 if (ff
->size
<= size
) {
2254 zfree(&ph
->env
.cpu
);
2258 /* On s390 the socket_id number is not related to the numbers of cpus.
2259 * The socket_id number might be higher than the numbers of cpus.
2260 * This depends on the configuration.
2261 * AArch64 is the same.
2263 if (ph
->env
.arch
&& (!strncmp(ph
->env
.arch
, "s390", 4)
2264 || !strncmp(ph
->env
.arch
, "aarch64", 7)))
2265 do_core_id_test
= false;
2267 for (i
= 0; i
< (u32
)cpu_nr
; i
++) {
2268 if (do_read_u32(ff
, &nr
))
2271 ph
->env
.cpu
[i
].core_id
= nr
;
2272 size
+= sizeof(u32
);
2274 if (do_read_u32(ff
, &nr
))
2277 if (do_core_id_test
&& nr
!= (u32
)-1 && nr
> (u32
)cpu_nr
) {
2278 pr_debug("socket_id number is too big."
2279 "You may need to upgrade the perf tool.\n");
2283 ph
->env
.cpu
[i
].socket_id
= nr
;
2284 size
+= sizeof(u32
);
2288 * The header may be from old perf,
2289 * which doesn't include die information.
2291 if (ff
->size
<= size
)
2294 if (do_read_u32(ff
, &nr
))
2297 ph
->env
.nr_sibling_dies
= nr
;
2298 size
+= sizeof(u32
);
2300 for (i
= 0; i
< nr
; i
++) {
2301 str
= do_read_string(ff
);
2305 /* include a NULL character at the end */
2306 if (strbuf_add(&sb
, str
, strlen(str
) + 1) < 0)
2308 size
+= string_size(str
);
2311 ph
->env
.sibling_dies
= strbuf_detach(&sb
, NULL
);
2313 for (i
= 0; i
< (u32
)cpu_nr
; i
++) {
2314 if (do_read_u32(ff
, &nr
))
2317 ph
->env
.cpu
[i
].die_id
= nr
;
2323 strbuf_release(&sb
);
2325 zfree(&ph
->env
.cpu
);
2329 static int process_numa_topology(struct feat_fd
*ff
, void *data __maybe_unused
)
2331 struct numa_node
*nodes
, *n
;
2336 if (do_read_u32(ff
, &nr
))
2339 nodes
= zalloc(sizeof(*nodes
) * nr
);
2343 for (i
= 0; i
< nr
; i
++) {
2347 if (do_read_u32(ff
, &n
->node
))
2350 if (do_read_u64(ff
, &n
->mem_total
))
2353 if (do_read_u64(ff
, &n
->mem_free
))
2356 str
= do_read_string(ff
);
2360 n
->map
= perf_cpu_map__new(str
);
2366 ff
->ph
->env
.nr_numa_nodes
= nr
;
2367 ff
->ph
->env
.numa_nodes
= nodes
;
2375 static int process_pmu_mappings(struct feat_fd
*ff
, void *data __maybe_unused
)
2382 if (do_read_u32(ff
, &pmu_num
))
2386 pr_debug("pmu mappings not available\n");
2390 ff
->ph
->env
.nr_pmu_mappings
= pmu_num
;
2391 if (strbuf_init(&sb
, 128) < 0)
2395 if (do_read_u32(ff
, &type
))
2398 name
= do_read_string(ff
);
2402 if (strbuf_addf(&sb
, "%u:%s", type
, name
) < 0)
2404 /* include a NULL character at the end */
2405 if (strbuf_add(&sb
, "", 1) < 0)
2408 if (!strcmp(name
, "msr"))
2409 ff
->ph
->env
.msr_pmu_type
= type
;
2414 ff
->ph
->env
.pmu_mappings
= strbuf_detach(&sb
, NULL
);
2418 strbuf_release(&sb
);
2422 static int process_group_desc(struct feat_fd
*ff
, void *data __maybe_unused
)
2425 u32 i
, nr
, nr_groups
;
2426 struct perf_session
*session
;
2427 struct evsel
*evsel
, *leader
= NULL
;
2434 if (do_read_u32(ff
, &nr_groups
))
2437 ff
->ph
->env
.nr_groups
= nr_groups
;
2439 pr_debug("group desc not available\n");
2443 desc
= calloc(nr_groups
, sizeof(*desc
));
2447 for (i
= 0; i
< nr_groups
; i
++) {
2448 desc
[i
].name
= do_read_string(ff
);
2452 if (do_read_u32(ff
, &desc
[i
].leader_idx
))
2455 if (do_read_u32(ff
, &desc
[i
].nr_members
))
2460 * Rebuild group relationship based on the group_desc
2462 session
= container_of(ff
->ph
, struct perf_session
, header
);
2463 session
->evlist
->nr_groups
= nr_groups
;
2466 evlist__for_each_entry(session
->evlist
, evsel
) {
2467 if (evsel
->idx
== (int) desc
[i
].leader_idx
) {
2468 evsel
->leader
= evsel
;
2469 /* {anon_group} is a dummy name */
2470 if (strcmp(desc
[i
].name
, "{anon_group}")) {
2471 evsel
->group_name
= desc
[i
].name
;
2472 desc
[i
].name
= NULL
;
2474 evsel
->core
.nr_members
= desc
[i
].nr_members
;
2476 if (i
>= nr_groups
|| nr
> 0) {
2477 pr_debug("invalid group desc\n");
2482 nr
= evsel
->core
.nr_members
- 1;
2485 /* This is a group member */
2486 evsel
->leader
= leader
;
2492 if (i
!= nr_groups
|| nr
!= 0) {
2493 pr_debug("invalid group desc\n");
2499 for (i
= 0; i
< nr_groups
; i
++)
2500 zfree(&desc
[i
].name
);
2506 static int process_auxtrace(struct feat_fd
*ff
, void *data __maybe_unused
)
2508 struct perf_session
*session
;
2511 session
= container_of(ff
->ph
, struct perf_session
, header
);
2513 err
= auxtrace_index__process(ff
->fd
, ff
->size
, session
,
2514 ff
->ph
->needs_swap
);
2516 pr_err("Failed to process auxtrace index\n");
2520 static int process_cache(struct feat_fd
*ff
, void *data __maybe_unused
)
2522 struct cpu_cache_level
*caches
;
2523 u32 cnt
, i
, version
;
2525 if (do_read_u32(ff
, &version
))
2531 if (do_read_u32(ff
, &cnt
))
2534 caches
= zalloc(sizeof(*caches
) * cnt
);
2538 for (i
= 0; i
< cnt
; i
++) {
2539 struct cpu_cache_level c
;
2542 if (do_read_u32(ff, &c.v))\
2543 goto out_free_caches; \
2552 c.v = do_read_string(ff); \
2554 goto out_free_caches;
2564 ff
->ph
->env
.caches
= caches
;
2565 ff
->ph
->env
.caches_cnt
= cnt
;
2572 static int process_sample_time(struct feat_fd
*ff
, void *data __maybe_unused
)
2574 struct perf_session
*session
;
2575 u64 first_sample_time
, last_sample_time
;
2578 session
= container_of(ff
->ph
, struct perf_session
, header
);
2580 ret
= do_read_u64(ff
, &first_sample_time
);
2584 ret
= do_read_u64(ff
, &last_sample_time
);
2588 session
->evlist
->first_sample_time
= first_sample_time
;
2589 session
->evlist
->last_sample_time
= last_sample_time
;
2593 static int process_mem_topology(struct feat_fd
*ff
,
2594 void *data __maybe_unused
)
2596 struct memory_node
*nodes
;
2597 u64 version
, i
, nr
, bsize
;
2600 if (do_read_u64(ff
, &version
))
2606 if (do_read_u64(ff
, &bsize
))
2609 if (do_read_u64(ff
, &nr
))
2612 nodes
= zalloc(sizeof(*nodes
) * nr
);
2616 for (i
= 0; i
< nr
; i
++) {
2617 struct memory_node n
;
2620 if (do_read_u64(ff, &n.v)) \
2628 if (do_read_bitmap(ff
, &n
.set
, &n
.size
))
2634 ff
->ph
->env
.memory_bsize
= bsize
;
2635 ff
->ph
->env
.memory_nodes
= nodes
;
2636 ff
->ph
->env
.nr_memory_nodes
= nr
;
2645 static int process_clockid(struct feat_fd
*ff
,
2646 void *data __maybe_unused
)
2648 if (do_read_u64(ff
, &ff
->ph
->env
.clockid_res_ns
))
2654 static int process_dir_format(struct feat_fd
*ff
,
2655 void *_data __maybe_unused
)
2657 struct perf_session
*session
;
2658 struct perf_data
*data
;
2660 session
= container_of(ff
->ph
, struct perf_session
, header
);
2661 data
= session
->data
;
2663 if (WARN_ON(!perf_data__is_dir(data
)))
2666 return do_read_u64(ff
, &data
->dir
.version
);
2669 #ifdef HAVE_LIBBPF_SUPPORT
2670 static int process_bpf_prog_info(struct feat_fd
*ff
, void *data __maybe_unused
)
2672 struct bpf_prog_info_linear
*info_linear
;
2673 struct bpf_prog_info_node
*info_node
;
2674 struct perf_env
*env
= &ff
->ph
->env
;
2678 if (ff
->ph
->needs_swap
) {
2679 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
2683 if (do_read_u32(ff
, &count
))
2686 down_write(&env
->bpf_progs
.lock
);
2688 for (i
= 0; i
< count
; ++i
) {
2689 u32 info_len
, data_len
;
2693 if (do_read_u32(ff
, &info_len
))
2695 if (do_read_u32(ff
, &data_len
))
2698 if (info_len
> sizeof(struct bpf_prog_info
)) {
2699 pr_warning("detected invalid bpf_prog_info\n");
2703 info_linear
= malloc(sizeof(struct bpf_prog_info_linear
) +
2707 info_linear
->info_len
= sizeof(struct bpf_prog_info
);
2708 info_linear
->data_len
= data_len
;
2709 if (do_read_u64(ff
, (u64
*)(&info_linear
->arrays
)))
2711 if (__do_read(ff
, &info_linear
->info
, info_len
))
2713 if (info_len
< sizeof(struct bpf_prog_info
))
2714 memset(((void *)(&info_linear
->info
)) + info_len
, 0,
2715 sizeof(struct bpf_prog_info
) - info_len
);
2717 if (__do_read(ff
, info_linear
->data
, data_len
))
2720 info_node
= malloc(sizeof(struct bpf_prog_info_node
));
2724 /* after reading from file, translate offset to address */
2725 bpf_program__bpil_offs_to_addr(info_linear
);
2726 info_node
->info_linear
= info_linear
;
2727 perf_env__insert_bpf_prog_info(env
, info_node
);
2730 up_write(&env
->bpf_progs
.lock
);
2735 up_write(&env
->bpf_progs
.lock
);
2738 #else // HAVE_LIBBPF_SUPPORT
2739 static int process_bpf_prog_info(struct feat_fd
*ff __maybe_unused
, void *data __maybe_unused
)
2743 #endif // HAVE_LIBBPF_SUPPORT
2745 static int process_bpf_btf(struct feat_fd
*ff
, void *data __maybe_unused
)
2747 struct perf_env
*env
= &ff
->ph
->env
;
2748 struct btf_node
*node
= NULL
;
2752 if (ff
->ph
->needs_swap
) {
2753 pr_warning("interpreting btf from systems with endianity is not yet supported\n");
2757 if (do_read_u32(ff
, &count
))
2760 down_write(&env
->bpf_progs
.lock
);
2762 for (i
= 0; i
< count
; ++i
) {
2765 if (do_read_u32(ff
, &id
))
2767 if (do_read_u32(ff
, &data_size
))
2770 node
= malloc(sizeof(struct btf_node
) + data_size
);
2775 node
->data_size
= data_size
;
2777 if (__do_read(ff
, node
->data
, data_size
))
2780 perf_env__insert_btf(env
, node
);
2786 up_write(&env
->bpf_progs
.lock
);
2791 static int process_compressed(struct feat_fd
*ff
,
2792 void *data __maybe_unused
)
2794 if (do_read_u32(ff
, &(ff
->ph
->env
.comp_ver
)))
2797 if (do_read_u32(ff
, &(ff
->ph
->env
.comp_type
)))
2800 if (do_read_u32(ff
, &(ff
->ph
->env
.comp_level
)))
2803 if (do_read_u32(ff
, &(ff
->ph
->env
.comp_ratio
)))
2806 if (do_read_u32(ff
, &(ff
->ph
->env
.comp_mmap_len
)))
2812 #define FEAT_OPR(n, func, __full_only) \
2814 .name = __stringify(n), \
2815 .write = write_##func, \
2816 .print = print_##func, \
2817 .full_only = __full_only, \
2818 .process = process_##func, \
2819 .synthesize = true \
2822 #define FEAT_OPN(n, func, __full_only) \
2824 .name = __stringify(n), \
2825 .write = write_##func, \
2826 .print = print_##func, \
2827 .full_only = __full_only, \
2828 .process = process_##func \
2831 /* feature_ops not implemented: */
2832 #define print_tracing_data NULL
2833 #define print_build_id NULL
2835 #define process_branch_stack NULL
2836 #define process_stat NULL
2838 // Only used in util/synthetic-events.c
2839 const struct perf_header_feature_ops feat_ops
[HEADER_LAST_FEATURE
];
2841 const struct perf_header_feature_ops feat_ops
[HEADER_LAST_FEATURE
] = {
2842 FEAT_OPN(TRACING_DATA
, tracing_data
, false),
2843 FEAT_OPN(BUILD_ID
, build_id
, false),
2844 FEAT_OPR(HOSTNAME
, hostname
, false),
2845 FEAT_OPR(OSRELEASE
, osrelease
, false),
2846 FEAT_OPR(VERSION
, version
, false),
2847 FEAT_OPR(ARCH
, arch
, false),
2848 FEAT_OPR(NRCPUS
, nrcpus
, false),
2849 FEAT_OPR(CPUDESC
, cpudesc
, false),
2850 FEAT_OPR(CPUID
, cpuid
, false),
2851 FEAT_OPR(TOTAL_MEM
, total_mem
, false),
2852 FEAT_OPR(EVENT_DESC
, event_desc
, false),
2853 FEAT_OPR(CMDLINE
, cmdline
, false),
2854 FEAT_OPR(CPU_TOPOLOGY
, cpu_topology
, true),
2855 FEAT_OPR(NUMA_TOPOLOGY
, numa_topology
, true),
2856 FEAT_OPN(BRANCH_STACK
, branch_stack
, false),
2857 FEAT_OPR(PMU_MAPPINGS
, pmu_mappings
, false),
2858 FEAT_OPR(GROUP_DESC
, group_desc
, false),
2859 FEAT_OPN(AUXTRACE
, auxtrace
, false),
2860 FEAT_OPN(STAT
, stat
, false),
2861 FEAT_OPN(CACHE
, cache
, true),
2862 FEAT_OPR(SAMPLE_TIME
, sample_time
, false),
2863 FEAT_OPR(MEM_TOPOLOGY
, mem_topology
, true),
2864 FEAT_OPR(CLOCKID
, clockid
, false),
2865 FEAT_OPN(DIR_FORMAT
, dir_format
, false),
2866 FEAT_OPR(BPF_PROG_INFO
, bpf_prog_info
, false),
2867 FEAT_OPR(BPF_BTF
, bpf_btf
, false),
2868 FEAT_OPR(COMPRESSED
, compressed
, false),
2871 struct header_print_data
{
2873 bool full
; /* extended list of headers */
2876 static int perf_file_section__fprintf_info(struct perf_file_section
*section
,
2877 struct perf_header
*ph
,
2878 int feat
, int fd
, void *data
)
2880 struct header_print_data
*hd
= data
;
2883 if (lseek(fd
, section
->offset
, SEEK_SET
) == (off_t
)-1) {
2884 pr_debug("Failed to lseek to %" PRIu64
" offset for feature "
2885 "%d, continuing...\n", section
->offset
, feat
);
2888 if (feat
>= HEADER_LAST_FEATURE
) {
2889 pr_warning("unknown feature %d\n", feat
);
2892 if (!feat_ops
[feat
].print
)
2895 ff
= (struct feat_fd
) {
2900 if (!feat_ops
[feat
].full_only
|| hd
->full
)
2901 feat_ops
[feat
].print(&ff
, hd
->fp
);
2903 fprintf(hd
->fp
, "# %s info available, use -I to display\n",
2904 feat_ops
[feat
].name
);
2909 int perf_header__fprintf_info(struct perf_session
*session
, FILE *fp
, bool full
)
2911 struct header_print_data hd
;
2912 struct perf_header
*header
= &session
->header
;
2913 int fd
= perf_data__fd(session
->data
);
2921 ret
= fstat(fd
, &st
);
2925 stctime
= st
.st_mtime
;
2926 fprintf(fp
, "# captured on : %s", ctime(&stctime
));
2928 fprintf(fp
, "# header version : %u\n", header
->version
);
2929 fprintf(fp
, "# data offset : %" PRIu64
"\n", header
->data_offset
);
2930 fprintf(fp
, "# data size : %" PRIu64
"\n", header
->data_size
);
2931 fprintf(fp
, "# feat offset : %" PRIu64
"\n", header
->feat_offset
);
2933 perf_header__process_sections(header
, fd
, &hd
,
2934 perf_file_section__fprintf_info
);
2936 if (session
->data
->is_pipe
)
2939 fprintf(fp
, "# missing features: ");
2940 for_each_clear_bit(bit
, header
->adds_features
, HEADER_LAST_FEATURE
) {
2942 fprintf(fp
, "%s ", feat_ops
[bit
].name
);
2949 static int do_write_feat(struct feat_fd
*ff
, int type
,
2950 struct perf_file_section
**p
,
2951 struct evlist
*evlist
)
2956 if (perf_header__has_feat(ff
->ph
, type
)) {
2957 if (!feat_ops
[type
].write
)
2960 if (WARN(ff
->buf
, "Error: calling %s in pipe-mode.\n", __func__
))
2963 (*p
)->offset
= lseek(ff
->fd
, 0, SEEK_CUR
);
2965 err
= feat_ops
[type
].write(ff
, evlist
);
2967 pr_debug("failed to write feature %s\n", feat_ops
[type
].name
);
2969 /* undo anything written */
2970 lseek(ff
->fd
, (*p
)->offset
, SEEK_SET
);
2974 (*p
)->size
= lseek(ff
->fd
, 0, SEEK_CUR
) - (*p
)->offset
;
2980 static int perf_header__adds_write(struct perf_header
*header
,
2981 struct evlist
*evlist
, int fd
)
2985 struct perf_file_section
*feat_sec
, *p
;
2991 ff
= (struct feat_fd
){
2996 nr_sections
= bitmap_weight(header
->adds_features
, HEADER_FEAT_BITS
);
3000 feat_sec
= p
= calloc(nr_sections
, sizeof(*feat_sec
));
3001 if (feat_sec
== NULL
)
3004 sec_size
= sizeof(*feat_sec
) * nr_sections
;
3006 sec_start
= header
->feat_offset
;
3007 lseek(fd
, sec_start
+ sec_size
, SEEK_SET
);
3009 for_each_set_bit(feat
, header
->adds_features
, HEADER_FEAT_BITS
) {
3010 if (do_write_feat(&ff
, feat
, &p
, evlist
))
3011 perf_header__clear_feat(header
, feat
);
3014 lseek(fd
, sec_start
, SEEK_SET
);
3016 * may write more than needed due to dropped feature, but
3017 * this is okay, reader will skip the missing entries
3019 err
= do_write(&ff
, feat_sec
, sec_size
);
3021 pr_debug("failed to write feature section\n");
3026 int perf_header__write_pipe(int fd
)
3028 struct perf_pipe_file_header f_header
;
3032 ff
= (struct feat_fd
){ .fd
= fd
};
3034 f_header
= (struct perf_pipe_file_header
){
3035 .magic
= PERF_MAGIC
,
3036 .size
= sizeof(f_header
),
3039 err
= do_write(&ff
, &f_header
, sizeof(f_header
));
3041 pr_debug("failed to write perf pipe header\n");
3048 int perf_session__write_header(struct perf_session
*session
,
3049 struct evlist
*evlist
,
3050 int fd
, bool at_exit
)
3052 struct perf_file_header f_header
;
3053 struct perf_file_attr f_attr
;
3054 struct perf_header
*header
= &session
->header
;
3055 struct evsel
*evsel
;
3060 ff
= (struct feat_fd
){ .fd
= fd
};
3061 lseek(fd
, sizeof(f_header
), SEEK_SET
);
3063 evlist__for_each_entry(session
->evlist
, evsel
) {
3064 evsel
->id_offset
= lseek(fd
, 0, SEEK_CUR
);
3065 err
= do_write(&ff
, evsel
->core
.id
, evsel
->core
.ids
* sizeof(u64
));
3067 pr_debug("failed to write perf header\n");
3072 attr_offset
= lseek(ff
.fd
, 0, SEEK_CUR
);
3074 evlist__for_each_entry(evlist
, evsel
) {
3075 f_attr
= (struct perf_file_attr
){
3076 .attr
= evsel
->core
.attr
,
3078 .offset
= evsel
->id_offset
,
3079 .size
= evsel
->core
.ids
* sizeof(u64
),
3082 err
= do_write(&ff
, &f_attr
, sizeof(f_attr
));
3084 pr_debug("failed to write perf header attribute\n");
3089 if (!header
->data_offset
)
3090 header
->data_offset
= lseek(fd
, 0, SEEK_CUR
);
3091 header
->feat_offset
= header
->data_offset
+ header
->data_size
;
3094 err
= perf_header__adds_write(header
, evlist
, fd
);
3099 f_header
= (struct perf_file_header
){
3100 .magic
= PERF_MAGIC
,
3101 .size
= sizeof(f_header
),
3102 .attr_size
= sizeof(f_attr
),
3104 .offset
= attr_offset
,
3105 .size
= evlist
->core
.nr_entries
* sizeof(f_attr
),
3108 .offset
= header
->data_offset
,
3109 .size
= header
->data_size
,
3111 /* event_types is ignored, store zeros */
3114 memcpy(&f_header
.adds_features
, &header
->adds_features
, sizeof(header
->adds_features
));
3116 lseek(fd
, 0, SEEK_SET
);
3117 err
= do_write(&ff
, &f_header
, sizeof(f_header
));
3119 pr_debug("failed to write perf header\n");
3122 lseek(fd
, header
->data_offset
+ header
->data_size
, SEEK_SET
);
3127 static int perf_header__getbuffer64(struct perf_header
*header
,
3128 int fd
, void *buf
, size_t size
)
3130 if (readn(fd
, buf
, size
) <= 0)
3133 if (header
->needs_swap
)
3134 mem_bswap_64(buf
, size
);
3139 int perf_header__process_sections(struct perf_header
*header
, int fd
,
3141 int (*process
)(struct perf_file_section
*section
,
3142 struct perf_header
*ph
,
3143 int feat
, int fd
, void *data
))
3145 struct perf_file_section
*feat_sec
, *sec
;
3151 nr_sections
= bitmap_weight(header
->adds_features
, HEADER_FEAT_BITS
);
3155 feat_sec
= sec
= calloc(nr_sections
, sizeof(*feat_sec
));
3159 sec_size
= sizeof(*feat_sec
) * nr_sections
;
3161 lseek(fd
, header
->feat_offset
, SEEK_SET
);
3163 err
= perf_header__getbuffer64(header
, fd
, feat_sec
, sec_size
);
3167 for_each_set_bit(feat
, header
->adds_features
, HEADER_LAST_FEATURE
) {
3168 err
= process(sec
++, header
, feat
, fd
, data
);
3178 static const int attr_file_abi_sizes
[] = {
3179 [0] = PERF_ATTR_SIZE_VER0
,
3180 [1] = PERF_ATTR_SIZE_VER1
,
3181 [2] = PERF_ATTR_SIZE_VER2
,
3182 [3] = PERF_ATTR_SIZE_VER3
,
3183 [4] = PERF_ATTR_SIZE_VER4
,
3188 * In the legacy file format, the magic number is not used to encode endianness.
3189 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3190 * on ABI revisions, we need to try all combinations for all endianness to
3191 * detect the endianness.
3193 static int try_all_file_abis(uint64_t hdr_sz
, struct perf_header
*ph
)
3195 uint64_t ref_size
, attr_size
;
3198 for (i
= 0 ; attr_file_abi_sizes
[i
]; i
++) {
3199 ref_size
= attr_file_abi_sizes
[i
]
3200 + sizeof(struct perf_file_section
);
3201 if (hdr_sz
!= ref_size
) {
3202 attr_size
= bswap_64(hdr_sz
);
3203 if (attr_size
!= ref_size
)
3206 ph
->needs_swap
= true;
3208 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3213 /* could not determine endianness */
3217 #define PERF_PIPE_HDR_VER0 16
3219 static const size_t attr_pipe_abi_sizes
[] = {
3220 [0] = PERF_PIPE_HDR_VER0
,
3225 * In the legacy pipe format, there is an implicit assumption that endiannesss
3226 * between host recording the samples, and host parsing the samples is the
3227 * same. This is not always the case given that the pipe output may always be
3228 * redirected into a file and analyzed on a different machine with possibly a
3229 * different endianness and perf_event ABI revsions in the perf tool itself.
3231 static int try_all_pipe_abis(uint64_t hdr_sz
, struct perf_header
*ph
)
3236 for (i
= 0 ; attr_pipe_abi_sizes
[i
]; i
++) {
3237 if (hdr_sz
!= attr_pipe_abi_sizes
[i
]) {
3238 attr_size
= bswap_64(hdr_sz
);
3239 if (attr_size
!= hdr_sz
)
3242 ph
->needs_swap
= true;
3244 pr_debug("Pipe ABI%d perf.data file detected\n", i
);
3250 bool is_perf_magic(u64 magic
)
3252 if (!memcmp(&magic
, __perf_magic1
, sizeof(magic
))
3253 || magic
== __perf_magic2
3254 || magic
== __perf_magic2_sw
)
3260 static int check_magic_endian(u64 magic
, uint64_t hdr_sz
,
3261 bool is_pipe
, struct perf_header
*ph
)
3265 /* check for legacy format */
3266 ret
= memcmp(&magic
, __perf_magic1
, sizeof(magic
));
3268 ph
->version
= PERF_HEADER_VERSION_1
;
3269 pr_debug("legacy perf.data format\n");
3271 return try_all_pipe_abis(hdr_sz
, ph
);
3273 return try_all_file_abis(hdr_sz
, ph
);
3276 * the new magic number serves two purposes:
3277 * - unique number to identify actual perf.data files
3278 * - encode endianness of file
3280 ph
->version
= PERF_HEADER_VERSION_2
;
3282 /* check magic number with one endianness */
3283 if (magic
== __perf_magic2
)
3286 /* check magic number with opposite endianness */
3287 if (magic
!= __perf_magic2_sw
)
3290 ph
->needs_swap
= true;
3295 int perf_file_header__read(struct perf_file_header
*header
,
3296 struct perf_header
*ph
, int fd
)
3300 lseek(fd
, 0, SEEK_SET
);
3302 ret
= readn(fd
, header
, sizeof(*header
));
3306 if (check_magic_endian(header
->magic
,
3307 header
->attr_size
, false, ph
) < 0) {
3308 pr_debug("magic/endian check failed\n");
3312 if (ph
->needs_swap
) {
3313 mem_bswap_64(header
, offsetof(struct perf_file_header
,
3317 if (header
->size
!= sizeof(*header
)) {
3318 /* Support the previous format */
3319 if (header
->size
== offsetof(typeof(*header
), adds_features
))
3320 bitmap_zero(header
->adds_features
, HEADER_FEAT_BITS
);
3323 } else if (ph
->needs_swap
) {
3325 * feature bitmap is declared as an array of unsigned longs --
3326 * not good since its size can differ between the host that
3327 * generated the data file and the host analyzing the file.
3329 * We need to handle endianness, but we don't know the size of
3330 * the unsigned long where the file was generated. Take a best
3331 * guess at determining it: try 64-bit swap first (ie., file
3332 * created on a 64-bit host), and check if the hostname feature
3333 * bit is set (this feature bit is forced on as of fbe96f2).
3334 * If the bit is not, undo the 64-bit swap and try a 32-bit
3335 * swap. If the hostname bit is still not set (e.g., older data
3336 * file), punt and fallback to the original behavior --
3337 * clearing all feature bits and setting buildid.
3339 mem_bswap_64(&header
->adds_features
,
3340 BITS_TO_U64(HEADER_FEAT_BITS
));
3342 if (!test_bit(HEADER_HOSTNAME
, header
->adds_features
)) {
3344 mem_bswap_64(&header
->adds_features
,
3345 BITS_TO_U64(HEADER_FEAT_BITS
));
3348 mem_bswap_32(&header
->adds_features
,
3349 BITS_TO_U32(HEADER_FEAT_BITS
));
3352 if (!test_bit(HEADER_HOSTNAME
, header
->adds_features
)) {
3353 bitmap_zero(header
->adds_features
, HEADER_FEAT_BITS
);
3354 set_bit(HEADER_BUILD_ID
, header
->adds_features
);
3358 memcpy(&ph
->adds_features
, &header
->adds_features
,
3359 sizeof(ph
->adds_features
));
3361 ph
->data_offset
= header
->data
.offset
;
3362 ph
->data_size
= header
->data
.size
;
3363 ph
->feat_offset
= header
->data
.offset
+ header
->data
.size
;
3367 static int perf_file_section__process(struct perf_file_section
*section
,
3368 struct perf_header
*ph
,
3369 int feat
, int fd
, void *data
)
3371 struct feat_fd fdd
= {
3374 .size
= section
->size
,
3375 .offset
= section
->offset
,
3378 if (lseek(fd
, section
->offset
, SEEK_SET
) == (off_t
)-1) {
3379 pr_debug("Failed to lseek to %" PRIu64
" offset for feature "
3380 "%d, continuing...\n", section
->offset
, feat
);
3384 if (feat
>= HEADER_LAST_FEATURE
) {
3385 pr_debug("unknown feature %d, continuing...\n", feat
);
3389 if (!feat_ops
[feat
].process
)
3392 return feat_ops
[feat
].process(&fdd
, data
);
3395 static int perf_file_header__read_pipe(struct perf_pipe_file_header
*header
,
3396 struct perf_header
*ph
, int fd
,
3399 struct feat_fd ff
= {
3400 .fd
= STDOUT_FILENO
,
3405 ret
= readn(fd
, header
, sizeof(*header
));
3409 if (check_magic_endian(header
->magic
, header
->size
, true, ph
) < 0) {
3410 pr_debug("endian/magic failed\n");
3415 header
->size
= bswap_64(header
->size
);
3417 if (repipe
&& do_write(&ff
, header
, sizeof(*header
)) < 0)
3423 static int perf_header__read_pipe(struct perf_session
*session
)
3425 struct perf_header
*header
= &session
->header
;
3426 struct perf_pipe_file_header f_header
;
3428 if (perf_file_header__read_pipe(&f_header
, header
,
3429 perf_data__fd(session
->data
),
3430 session
->repipe
) < 0) {
3431 pr_debug("incompatible file format\n");
3438 static int read_attr(int fd
, struct perf_header
*ph
,
3439 struct perf_file_attr
*f_attr
)
3441 struct perf_event_attr
*attr
= &f_attr
->attr
;
3443 size_t our_sz
= sizeof(f_attr
->attr
);
3446 memset(f_attr
, 0, sizeof(*f_attr
));
3448 /* read minimal guaranteed structure */
3449 ret
= readn(fd
, attr
, PERF_ATTR_SIZE_VER0
);
3451 pr_debug("cannot read %d bytes of header attr\n",
3452 PERF_ATTR_SIZE_VER0
);
3456 /* on file perf_event_attr size */
3464 sz
= PERF_ATTR_SIZE_VER0
;
3465 } else if (sz
> our_sz
) {
3466 pr_debug("file uses a more recent and unsupported ABI"
3467 " (%zu bytes extra)\n", sz
- our_sz
);
3470 /* what we have not yet read and that we know about */
3471 left
= sz
- PERF_ATTR_SIZE_VER0
;
3474 ptr
+= PERF_ATTR_SIZE_VER0
;
3476 ret
= readn(fd
, ptr
, left
);
3478 /* read perf_file_section, ids are read in caller */
3479 ret
= readn(fd
, &f_attr
->ids
, sizeof(f_attr
->ids
));
3481 return ret
<= 0 ? -1 : 0;
3484 static int perf_evsel__prepare_tracepoint_event(struct evsel
*evsel
,
3485 struct tep_handle
*pevent
)
3487 struct tep_event
*event
;
3490 /* already prepared */
3491 if (evsel
->tp_format
)
3494 if (pevent
== NULL
) {
3495 pr_debug("broken or missing trace data\n");
3499 event
= tep_find_event(pevent
, evsel
->core
.attr
.config
);
3500 if (event
== NULL
) {
3501 pr_debug("cannot find event format for %d\n", (int)evsel
->core
.attr
.config
);
3506 snprintf(bf
, sizeof(bf
), "%s:%s", event
->system
, event
->name
);
3507 evsel
->name
= strdup(bf
);
3508 if (evsel
->name
== NULL
)
3512 evsel
->tp_format
= event
;
3516 static int perf_evlist__prepare_tracepoint_events(struct evlist
*evlist
,
3517 struct tep_handle
*pevent
)
3521 evlist__for_each_entry(evlist
, pos
) {
3522 if (pos
->core
.attr
.type
== PERF_TYPE_TRACEPOINT
&&
3523 perf_evsel__prepare_tracepoint_event(pos
, pevent
))
3530 int perf_session__read_header(struct perf_session
*session
)
3532 struct perf_data
*data
= session
->data
;
3533 struct perf_header
*header
= &session
->header
;
3534 struct perf_file_header f_header
;
3535 struct perf_file_attr f_attr
;
3537 int nr_attrs
, nr_ids
, i
, j
;
3538 int fd
= perf_data__fd(data
);
3540 session
->evlist
= evlist__new();
3541 if (session
->evlist
== NULL
)
3544 session
->evlist
->env
= &header
->env
;
3545 session
->machines
.host
.env
= &header
->env
;
3546 if (perf_data__is_pipe(data
))
3547 return perf_header__read_pipe(session
);
3549 if (perf_file_header__read(&f_header
, header
, fd
) < 0)
3553 * Sanity check that perf.data was written cleanly; data size is
3554 * initialized to 0 and updated only if the on_exit function is run.
3555 * If data size is still 0 then the file contains only partial
3556 * information. Just warn user and process it as much as it can.
3558 if (f_header
.data
.size
== 0) {
3559 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3560 "Was the 'perf record' command properly terminated?\n",
3564 if (f_header
.attr_size
== 0) {
3565 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
3566 "Was the 'perf record' command properly terminated?\n",
3571 nr_attrs
= f_header
.attrs
.size
/ f_header
.attr_size
;
3572 lseek(fd
, f_header
.attrs
.offset
, SEEK_SET
);
3574 for (i
= 0; i
< nr_attrs
; i
++) {
3575 struct evsel
*evsel
;
3578 if (read_attr(fd
, header
, &f_attr
) < 0)
3581 if (header
->needs_swap
) {
3582 f_attr
.ids
.size
= bswap_64(f_attr
.ids
.size
);
3583 f_attr
.ids
.offset
= bswap_64(f_attr
.ids
.offset
);
3584 perf_event__attr_swap(&f_attr
.attr
);
3587 tmp
= lseek(fd
, 0, SEEK_CUR
);
3588 evsel
= evsel__new(&f_attr
.attr
);
3591 goto out_delete_evlist
;
3593 evsel
->needs_swap
= header
->needs_swap
;
3595 * Do it before so that if perf_evsel__alloc_id fails, this
3596 * entry gets purged too at evlist__delete().
3598 evlist__add(session
->evlist
, evsel
);
3600 nr_ids
= f_attr
.ids
.size
/ sizeof(u64
);
3602 * We don't have the cpu and thread maps on the header, so
3603 * for allocating the perf_sample_id table we fake 1 cpu and
3604 * hattr->ids threads.
3606 if (perf_evsel__alloc_id(&evsel
->core
, 1, nr_ids
))
3607 goto out_delete_evlist
;
3609 lseek(fd
, f_attr
.ids
.offset
, SEEK_SET
);
3611 for (j
= 0; j
< nr_ids
; j
++) {
3612 if (perf_header__getbuffer64(header
, fd
, &f_id
, sizeof(f_id
)))
3615 perf_evlist__id_add(&session
->evlist
->core
, &evsel
->core
, 0, j
, f_id
);
3618 lseek(fd
, tmp
, SEEK_SET
);
3621 perf_header__process_sections(header
, fd
, &session
->tevent
,
3622 perf_file_section__process
);
3624 if (perf_evlist__prepare_tracepoint_events(session
->evlist
,
3625 session
->tevent
.pevent
))
3626 goto out_delete_evlist
;
3633 evlist__delete(session
->evlist
);
3634 session
->evlist
= NULL
;
3638 int perf_event__process_feature(struct perf_session
*session
,
3639 union perf_event
*event
)
3641 struct perf_tool
*tool
= session
->tool
;
3642 struct feat_fd ff
= { .fd
= 0 };
3643 struct perf_record_header_feature
*fe
= (struct perf_record_header_feature
*)event
;
3644 int type
= fe
->header
.type
;
3645 u64 feat
= fe
->feat_id
;
3647 if (type
< 0 || type
>= PERF_RECORD_HEADER_MAX
) {
3648 pr_warning("invalid record type %d in pipe-mode\n", type
);
3651 if (feat
== HEADER_RESERVED
|| feat
>= HEADER_LAST_FEATURE
) {
3652 pr_warning("invalid record type %d in pipe-mode\n", type
);
3656 if (!feat_ops
[feat
].process
)
3659 ff
.buf
= (void *)fe
->data
;
3660 ff
.size
= event
->header
.size
- sizeof(*fe
);
3661 ff
.ph
= &session
->header
;
3663 if (feat_ops
[feat
].process(&ff
, NULL
))
3666 if (!feat_ops
[feat
].print
|| !tool
->show_feat_hdr
)
3669 if (!feat_ops
[feat
].full_only
||
3670 tool
->show_feat_hdr
>= SHOW_FEAT_HEADER_FULL_INFO
) {
3671 feat_ops
[feat
].print(&ff
, stdout
);
3673 fprintf(stdout
, "# %s info available, use -I to display\n",
3674 feat_ops
[feat
].name
);
3680 size_t perf_event__fprintf_event_update(union perf_event
*event
, FILE *fp
)
3682 struct perf_record_event_update
*ev
= &event
->event_update
;
3683 struct perf_record_event_update_scale
*ev_scale
;
3684 struct perf_record_event_update_cpus
*ev_cpus
;
3685 struct perf_cpu_map
*map
;
3688 ret
= fprintf(fp
, "\n... id: %" PRI_lu64
"\n", ev
->id
);
3691 case PERF_EVENT_UPDATE__SCALE
:
3692 ev_scale
= (struct perf_record_event_update_scale
*)ev
->data
;
3693 ret
+= fprintf(fp
, "... scale: %f\n", ev_scale
->scale
);
3695 case PERF_EVENT_UPDATE__UNIT
:
3696 ret
+= fprintf(fp
, "... unit: %s\n", ev
->data
);
3698 case PERF_EVENT_UPDATE__NAME
:
3699 ret
+= fprintf(fp
, "... name: %s\n", ev
->data
);
3701 case PERF_EVENT_UPDATE__CPUS
:
3702 ev_cpus
= (struct perf_record_event_update_cpus
*)ev
->data
;
3703 ret
+= fprintf(fp
, "... ");
3705 map
= cpu_map__new_data(&ev_cpus
->cpus
);
3707 ret
+= cpu_map__fprintf(map
, fp
);
3709 ret
+= fprintf(fp
, "failed to get cpus\n");
3712 ret
+= fprintf(fp
, "... unknown type\n");
3719 int perf_event__process_attr(struct perf_tool
*tool __maybe_unused
,
3720 union perf_event
*event
,
3721 struct evlist
**pevlist
)
3724 struct evsel
*evsel
;
3725 struct evlist
*evlist
= *pevlist
;
3727 if (evlist
== NULL
) {
3728 *pevlist
= evlist
= evlist__new();
3733 evsel
= evsel__new(&event
->attr
.attr
);
3737 evlist__add(evlist
, evsel
);
3739 ids
= event
->header
.size
;
3740 ids
-= (void *)&event
->attr
.id
- (void *)event
;
3741 n_ids
= ids
/ sizeof(u64
);
3743 * We don't have the cpu and thread maps on the header, so
3744 * for allocating the perf_sample_id table we fake 1 cpu and
3745 * hattr->ids threads.
3747 if (perf_evsel__alloc_id(&evsel
->core
, 1, n_ids
))
3750 for (i
= 0; i
< n_ids
; i
++) {
3751 perf_evlist__id_add(&evlist
->core
, &evsel
->core
, 0, i
, event
->attr
.id
[i
]);
3757 int perf_event__process_event_update(struct perf_tool
*tool __maybe_unused
,
3758 union perf_event
*event
,
3759 struct evlist
**pevlist
)
3761 struct perf_record_event_update
*ev
= &event
->event_update
;
3762 struct perf_record_event_update_scale
*ev_scale
;
3763 struct perf_record_event_update_cpus
*ev_cpus
;
3764 struct evlist
*evlist
;
3765 struct evsel
*evsel
;
3766 struct perf_cpu_map
*map
;
3768 if (!pevlist
|| *pevlist
== NULL
)
3773 evsel
= perf_evlist__id2evsel(evlist
, ev
->id
);
3778 case PERF_EVENT_UPDATE__UNIT
:
3779 evsel
->unit
= strdup(ev
->data
);
3781 case PERF_EVENT_UPDATE__NAME
:
3782 evsel
->name
= strdup(ev
->data
);
3784 case PERF_EVENT_UPDATE__SCALE
:
3785 ev_scale
= (struct perf_record_event_update_scale
*)ev
->data
;
3786 evsel
->scale
= ev_scale
->scale
;
3788 case PERF_EVENT_UPDATE__CPUS
:
3789 ev_cpus
= (struct perf_record_event_update_cpus
*)ev
->data
;
3791 map
= cpu_map__new_data(&ev_cpus
->cpus
);
3793 evsel
->core
.own_cpus
= map
;
3795 pr_err("failed to get event_update cpus\n");
3803 int perf_event__process_tracing_data(struct perf_session
*session
,
3804 union perf_event
*event
)
3806 ssize_t size_read
, padding
, size
= event
->tracing_data
.size
;
3807 int fd
= perf_data__fd(session
->data
);
3808 off_t offset
= lseek(fd
, 0, SEEK_CUR
);
3811 /* setup for reading amidst mmap */
3812 lseek(fd
, offset
+ sizeof(struct perf_record_header_tracing_data
),
3815 size_read
= trace_report(fd
, &session
->tevent
,
3817 padding
= PERF_ALIGN(size_read
, sizeof(u64
)) - size_read
;
3819 if (readn(fd
, buf
, padding
) < 0) {
3820 pr_err("%s: reading input file", __func__
);
3823 if (session
->repipe
) {
3824 int retw
= write(STDOUT_FILENO
, buf
, padding
);
3825 if (retw
<= 0 || retw
!= padding
) {
3826 pr_err("%s: repiping tracing data padding", __func__
);
3831 if (size_read
+ padding
!= size
) {
3832 pr_err("%s: tracing data size mismatch", __func__
);
3836 perf_evlist__prepare_tracepoint_events(session
->evlist
,
3837 session
->tevent
.pevent
);
3839 return size_read
+ padding
;
3842 int perf_event__process_build_id(struct perf_session
*session
,
3843 union perf_event
*event
)
3845 __event_process_build_id(&event
->build_id
,
3846 event
->build_id
.filename
,