ARM: dts: kirkwood: gpio-leds fixes for linkstation ls-wvl/vl
[linux/fpc-iii.git] / tools / perf / util / header.c
blobf50b7235ecb6558d167a475bf4199e8b05f52143
1 #include "util.h"
2 #include <sys/types.h>
3 #include <byteswap.h>
4 #include <unistd.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <linux/list.h>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <sys/utsname.h>
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "header.h"
15 #include "../perf.h"
16 #include "trace-event.h"
17 #include "session.h"
18 #include "symbol.h"
19 #include "debug.h"
20 #include "cpumap.h"
21 #include "pmu.h"
22 #include "vdso.h"
23 #include "strbuf.h"
24 #include "build-id.h"
25 #include "data.h"
28 * magic2 = "PERFILE2"
29 * must be a numerical value to let the endianness
30 * determine the memory layout. That way we are able
31 * to detect endianness when reading the perf.data file
32 * back.
34 * we check for legacy (PERFFILE) format.
36 static const char *__perf_magic1 = "PERFFILE";
37 static const u64 __perf_magic2 = 0x32454c4946524550ULL;
38 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
40 #define PERF_MAGIC __perf_magic2
42 struct perf_file_attr {
43 struct perf_event_attr attr;
44 struct perf_file_section ids;
47 void perf_header__set_feat(struct perf_header *header, int feat)
49 set_bit(feat, header->adds_features);
52 void perf_header__clear_feat(struct perf_header *header, int feat)
54 clear_bit(feat, header->adds_features);
57 bool perf_header__has_feat(const struct perf_header *header, int feat)
59 return test_bit(feat, header->adds_features);
62 static int do_write(int fd, const void *buf, size_t size)
64 while (size) {
65 int ret = write(fd, buf, size);
67 if (ret < 0)
68 return -errno;
70 size -= ret;
71 buf += ret;
74 return 0;
77 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
79 static const char zero_buf[NAME_ALIGN];
80 int err = do_write(fd, bf, count);
82 if (!err)
83 err = do_write(fd, zero_buf, count_aligned - count);
85 return err;
88 #define string_size(str) \
89 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
91 static int do_write_string(int fd, const char *str)
93 u32 len, olen;
94 int ret;
96 olen = strlen(str) + 1;
97 len = PERF_ALIGN(olen, NAME_ALIGN);
99 /* write len, incl. \0 */
100 ret = do_write(fd, &len, sizeof(len));
101 if (ret < 0)
102 return ret;
104 return write_padded(fd, str, olen, len);
107 static char *do_read_string(int fd, struct perf_header *ph)
109 ssize_t sz, ret;
110 u32 len;
111 char *buf;
113 sz = readn(fd, &len, sizeof(len));
114 if (sz < (ssize_t)sizeof(len))
115 return NULL;
117 if (ph->needs_swap)
118 len = bswap_32(len);
120 buf = malloc(len);
121 if (!buf)
122 return NULL;
124 ret = readn(fd, buf, len);
125 if (ret == (ssize_t)len) {
127 * strings are padded by zeroes
128 * thus the actual strlen of buf
129 * may be less than len
131 return buf;
134 free(buf);
135 return NULL;
138 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
139 struct perf_evlist *evlist)
141 return read_tracing_data(fd, &evlist->entries);
145 static int write_build_id(int fd, struct perf_header *h,
146 struct perf_evlist *evlist __maybe_unused)
148 struct perf_session *session;
149 int err;
151 session = container_of(h, struct perf_session, header);
153 if (!perf_session__read_build_ids(session, true))
154 return -1;
156 err = perf_session__write_buildid_table(session, fd);
157 if (err < 0) {
158 pr_debug("failed to write buildid table\n");
159 return err;
161 perf_session__cache_build_ids(session);
163 return 0;
166 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
167 struct perf_evlist *evlist __maybe_unused)
169 struct utsname uts;
170 int ret;
172 ret = uname(&uts);
173 if (ret < 0)
174 return -1;
176 return do_write_string(fd, uts.nodename);
179 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
180 struct perf_evlist *evlist __maybe_unused)
182 struct utsname uts;
183 int ret;
185 ret = uname(&uts);
186 if (ret < 0)
187 return -1;
189 return do_write_string(fd, uts.release);
192 static int write_arch(int fd, struct perf_header *h __maybe_unused,
193 struct perf_evlist *evlist __maybe_unused)
195 struct utsname uts;
196 int ret;
198 ret = uname(&uts);
199 if (ret < 0)
200 return -1;
202 return do_write_string(fd, uts.machine);
205 static int write_version(int fd, struct perf_header *h __maybe_unused,
206 struct perf_evlist *evlist __maybe_unused)
208 return do_write_string(fd, perf_version_string);
211 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
213 FILE *file;
214 char *buf = NULL;
215 char *s, *p;
216 const char *search = cpuinfo_proc;
217 size_t len = 0;
218 int ret = -1;
220 if (!search)
221 return -1;
223 file = fopen("/proc/cpuinfo", "r");
224 if (!file)
225 return -1;
227 while (getline(&buf, &len, file) > 0) {
228 ret = strncmp(buf, search, strlen(search));
229 if (!ret)
230 break;
233 if (ret) {
234 ret = -1;
235 goto done;
238 s = buf;
240 p = strchr(buf, ':');
241 if (p && *(p+1) == ' ' && *(p+2))
242 s = p + 2;
243 p = strchr(s, '\n');
244 if (p)
245 *p = '\0';
247 /* squash extra space characters (branding string) */
248 p = s;
249 while (*p) {
250 if (isspace(*p)) {
251 char *r = p + 1;
252 char *q = r;
253 *p = ' ';
254 while (*q && isspace(*q))
255 q++;
256 if (q != (p+1))
257 while ((*r++ = *q++));
259 p++;
261 ret = do_write_string(fd, s);
262 done:
263 free(buf);
264 fclose(file);
265 return ret;
268 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
269 struct perf_evlist *evlist __maybe_unused)
271 #ifndef CPUINFO_PROC
272 #define CPUINFO_PROC {"model name", }
273 #endif
274 const char *cpuinfo_procs[] = CPUINFO_PROC;
275 unsigned int i;
277 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
278 int ret;
279 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
280 if (ret >= 0)
281 return ret;
283 return -1;
287 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
288 struct perf_evlist *evlist __maybe_unused)
290 long nr;
291 u32 nrc, nra;
292 int ret;
294 nr = sysconf(_SC_NPROCESSORS_CONF);
295 if (nr < 0)
296 return -1;
298 nrc = (u32)(nr & UINT_MAX);
300 nr = sysconf(_SC_NPROCESSORS_ONLN);
301 if (nr < 0)
302 return -1;
304 nra = (u32)(nr & UINT_MAX);
306 ret = do_write(fd, &nrc, sizeof(nrc));
307 if (ret < 0)
308 return ret;
310 return do_write(fd, &nra, sizeof(nra));
313 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
314 struct perf_evlist *evlist)
316 struct perf_evsel *evsel;
317 u32 nre, nri, sz;
318 int ret;
320 nre = evlist->nr_entries;
323 * write number of events
325 ret = do_write(fd, &nre, sizeof(nre));
326 if (ret < 0)
327 return ret;
330 * size of perf_event_attr struct
332 sz = (u32)sizeof(evsel->attr);
333 ret = do_write(fd, &sz, sizeof(sz));
334 if (ret < 0)
335 return ret;
337 evlist__for_each(evlist, evsel) {
338 ret = do_write(fd, &evsel->attr, sz);
339 if (ret < 0)
340 return ret;
342 * write number of unique id per event
343 * there is one id per instance of an event
345 * copy into an nri to be independent of the
346 * type of ids,
348 nri = evsel->ids;
349 ret = do_write(fd, &nri, sizeof(nri));
350 if (ret < 0)
351 return ret;
354 * write event string as passed on cmdline
356 ret = do_write_string(fd, perf_evsel__name(evsel));
357 if (ret < 0)
358 return ret;
360 * write unique ids for this event
362 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
363 if (ret < 0)
364 return ret;
366 return 0;
369 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
370 struct perf_evlist *evlist __maybe_unused)
372 char buf[MAXPATHLEN];
373 char proc[32];
374 u32 n;
375 int i, ret;
378 * actual atual path to perf binary
380 sprintf(proc, "/proc/%d/exe", getpid());
381 ret = readlink(proc, buf, sizeof(buf));
382 if (ret <= 0)
383 return -1;
385 /* readlink() does not add null termination */
386 buf[ret] = '\0';
388 /* account for binary path */
389 n = perf_env.nr_cmdline + 1;
391 ret = do_write(fd, &n, sizeof(n));
392 if (ret < 0)
393 return ret;
395 ret = do_write_string(fd, buf);
396 if (ret < 0)
397 return ret;
399 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
400 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
401 if (ret < 0)
402 return ret;
404 return 0;
407 #define CORE_SIB_FMT \
408 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
409 #define THRD_SIB_FMT \
410 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
412 struct cpu_topo {
413 u32 cpu_nr;
414 u32 core_sib;
415 u32 thread_sib;
416 char **core_siblings;
417 char **thread_siblings;
420 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
422 FILE *fp;
423 char filename[MAXPATHLEN];
424 char *buf = NULL, *p;
425 size_t len = 0;
426 ssize_t sret;
427 u32 i = 0;
428 int ret = -1;
430 sprintf(filename, CORE_SIB_FMT, cpu);
431 fp = fopen(filename, "r");
432 if (!fp)
433 goto try_threads;
435 sret = getline(&buf, &len, fp);
436 fclose(fp);
437 if (sret <= 0)
438 goto try_threads;
440 p = strchr(buf, '\n');
441 if (p)
442 *p = '\0';
444 for (i = 0; i < tp->core_sib; i++) {
445 if (!strcmp(buf, tp->core_siblings[i]))
446 break;
448 if (i == tp->core_sib) {
449 tp->core_siblings[i] = buf;
450 tp->core_sib++;
451 buf = NULL;
452 len = 0;
454 ret = 0;
456 try_threads:
457 sprintf(filename, THRD_SIB_FMT, cpu);
458 fp = fopen(filename, "r");
459 if (!fp)
460 goto done;
462 if (getline(&buf, &len, fp) <= 0)
463 goto done;
465 p = strchr(buf, '\n');
466 if (p)
467 *p = '\0';
469 for (i = 0; i < tp->thread_sib; i++) {
470 if (!strcmp(buf, tp->thread_siblings[i]))
471 break;
473 if (i == tp->thread_sib) {
474 tp->thread_siblings[i] = buf;
475 tp->thread_sib++;
476 buf = NULL;
478 ret = 0;
479 done:
480 if(fp)
481 fclose(fp);
482 free(buf);
483 return ret;
486 static void free_cpu_topo(struct cpu_topo *tp)
488 u32 i;
490 if (!tp)
491 return;
493 for (i = 0 ; i < tp->core_sib; i++)
494 zfree(&tp->core_siblings[i]);
496 for (i = 0 ; i < tp->thread_sib; i++)
497 zfree(&tp->thread_siblings[i]);
499 free(tp);
502 static struct cpu_topo *build_cpu_topology(void)
504 struct cpu_topo *tp;
505 void *addr;
506 u32 nr, i;
507 size_t sz;
508 long ncpus;
509 int ret = -1;
511 ncpus = sysconf(_SC_NPROCESSORS_CONF);
512 if (ncpus < 0)
513 return NULL;
515 nr = (u32)(ncpus & UINT_MAX);
517 sz = nr * sizeof(char *);
519 addr = calloc(1, sizeof(*tp) + 2 * sz);
520 if (!addr)
521 return NULL;
523 tp = addr;
524 tp->cpu_nr = nr;
525 addr += sizeof(*tp);
526 tp->core_siblings = addr;
527 addr += sz;
528 tp->thread_siblings = addr;
530 for (i = 0; i < nr; i++) {
531 ret = build_cpu_topo(tp, i);
532 if (ret < 0)
533 break;
535 if (ret) {
536 free_cpu_topo(tp);
537 tp = NULL;
539 return tp;
542 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
543 struct perf_evlist *evlist __maybe_unused)
545 struct cpu_topo *tp;
546 u32 i;
547 int ret, j;
549 tp = build_cpu_topology();
550 if (!tp)
551 return -1;
553 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
554 if (ret < 0)
555 goto done;
557 for (i = 0; i < tp->core_sib; i++) {
558 ret = do_write_string(fd, tp->core_siblings[i]);
559 if (ret < 0)
560 goto done;
562 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
563 if (ret < 0)
564 goto done;
566 for (i = 0; i < tp->thread_sib; i++) {
567 ret = do_write_string(fd, tp->thread_siblings[i]);
568 if (ret < 0)
569 break;
572 ret = perf_env__read_cpu_topology_map(&perf_env);
573 if (ret < 0)
574 goto done;
576 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
577 ret = do_write(fd, &perf_env.cpu[j].core_id,
578 sizeof(perf_env.cpu[j].core_id));
579 if (ret < 0)
580 return ret;
581 ret = do_write(fd, &perf_env.cpu[j].socket_id,
582 sizeof(perf_env.cpu[j].socket_id));
583 if (ret < 0)
584 return ret;
586 done:
587 free_cpu_topo(tp);
588 return ret;
593 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
594 struct perf_evlist *evlist __maybe_unused)
596 char *buf = NULL;
597 FILE *fp;
598 size_t len = 0;
599 int ret = -1, n;
600 uint64_t mem;
602 fp = fopen("/proc/meminfo", "r");
603 if (!fp)
604 return -1;
606 while (getline(&buf, &len, fp) > 0) {
607 ret = strncmp(buf, "MemTotal:", 9);
608 if (!ret)
609 break;
611 if (!ret) {
612 n = sscanf(buf, "%*s %"PRIu64, &mem);
613 if (n == 1)
614 ret = do_write(fd, &mem, sizeof(mem));
615 } else
616 ret = -1;
617 free(buf);
618 fclose(fp);
619 return ret;
622 static int write_topo_node(int fd, int node)
624 char str[MAXPATHLEN];
625 char field[32];
626 char *buf = NULL, *p;
627 size_t len = 0;
628 FILE *fp;
629 u64 mem_total, mem_free, mem;
630 int ret = -1;
632 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
633 fp = fopen(str, "r");
634 if (!fp)
635 return -1;
637 while (getline(&buf, &len, fp) > 0) {
638 /* skip over invalid lines */
639 if (!strchr(buf, ':'))
640 continue;
641 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
642 goto done;
643 if (!strcmp(field, "MemTotal:"))
644 mem_total = mem;
645 if (!strcmp(field, "MemFree:"))
646 mem_free = mem;
649 fclose(fp);
650 fp = NULL;
652 ret = do_write(fd, &mem_total, sizeof(u64));
653 if (ret)
654 goto done;
656 ret = do_write(fd, &mem_free, sizeof(u64));
657 if (ret)
658 goto done;
660 ret = -1;
661 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
663 fp = fopen(str, "r");
664 if (!fp)
665 goto done;
667 if (getline(&buf, &len, fp) <= 0)
668 goto done;
670 p = strchr(buf, '\n');
671 if (p)
672 *p = '\0';
674 ret = do_write_string(fd, buf);
675 done:
676 free(buf);
677 if (fp)
678 fclose(fp);
679 return ret;
682 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
683 struct perf_evlist *evlist __maybe_unused)
685 char *buf = NULL;
686 size_t len = 0;
687 FILE *fp;
688 struct cpu_map *node_map = NULL;
689 char *c;
690 u32 nr, i, j;
691 int ret = -1;
693 fp = fopen("/sys/devices/system/node/online", "r");
694 if (!fp)
695 return -1;
697 if (getline(&buf, &len, fp) <= 0)
698 goto done;
700 c = strchr(buf, '\n');
701 if (c)
702 *c = '\0';
704 node_map = cpu_map__new(buf);
705 if (!node_map)
706 goto done;
708 nr = (u32)node_map->nr;
710 ret = do_write(fd, &nr, sizeof(nr));
711 if (ret < 0)
712 goto done;
714 for (i = 0; i < nr; i++) {
715 j = (u32)node_map->map[i];
716 ret = do_write(fd, &j, sizeof(j));
717 if (ret < 0)
718 break;
720 ret = write_topo_node(fd, i);
721 if (ret < 0)
722 break;
724 done:
725 free(buf);
726 fclose(fp);
727 cpu_map__put(node_map);
728 return ret;
732 * File format:
734 * struct pmu_mappings {
735 * u32 pmu_num;
736 * struct pmu_map {
737 * u32 type;
738 * char name[];
739 * }[pmu_num];
740 * };
743 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
744 struct perf_evlist *evlist __maybe_unused)
746 struct perf_pmu *pmu = NULL;
747 off_t offset = lseek(fd, 0, SEEK_CUR);
748 __u32 pmu_num = 0;
749 int ret;
751 /* write real pmu_num later */
752 ret = do_write(fd, &pmu_num, sizeof(pmu_num));
753 if (ret < 0)
754 return ret;
756 while ((pmu = perf_pmu__scan(pmu))) {
757 if (!pmu->name)
758 continue;
759 pmu_num++;
761 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
762 if (ret < 0)
763 return ret;
765 ret = do_write_string(fd, pmu->name);
766 if (ret < 0)
767 return ret;
770 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
771 /* discard all */
772 lseek(fd, offset, SEEK_SET);
773 return -1;
776 return 0;
780 * File format:
782 * struct group_descs {
783 * u32 nr_groups;
784 * struct group_desc {
785 * char name[];
786 * u32 leader_idx;
787 * u32 nr_members;
788 * }[nr_groups];
789 * };
791 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
792 struct perf_evlist *evlist)
794 u32 nr_groups = evlist->nr_groups;
795 struct perf_evsel *evsel;
796 int ret;
798 ret = do_write(fd, &nr_groups, sizeof(nr_groups));
799 if (ret < 0)
800 return ret;
802 evlist__for_each(evlist, evsel) {
803 if (perf_evsel__is_group_leader(evsel) &&
804 evsel->nr_members > 1) {
805 const char *name = evsel->group_name ?: "{anon_group}";
806 u32 leader_idx = evsel->idx;
807 u32 nr_members = evsel->nr_members;
809 ret = do_write_string(fd, name);
810 if (ret < 0)
811 return ret;
813 ret = do_write(fd, &leader_idx, sizeof(leader_idx));
814 if (ret < 0)
815 return ret;
817 ret = do_write(fd, &nr_members, sizeof(nr_members));
818 if (ret < 0)
819 return ret;
822 return 0;
826 * default get_cpuid(): nothing gets recorded
827 * actual implementation must be in arch/$(ARCH)/util/header.c
829 int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused,
830 size_t sz __maybe_unused)
832 return -1;
835 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
836 struct perf_evlist *evlist __maybe_unused)
838 char buffer[64];
839 int ret;
841 ret = get_cpuid(buffer, sizeof(buffer));
842 if (!ret)
843 goto write_it;
845 return -1;
846 write_it:
847 return do_write_string(fd, buffer);
850 static int write_branch_stack(int fd __maybe_unused,
851 struct perf_header *h __maybe_unused,
852 struct perf_evlist *evlist __maybe_unused)
854 return 0;
857 static int write_auxtrace(int fd, struct perf_header *h,
858 struct perf_evlist *evlist __maybe_unused)
860 struct perf_session *session;
861 int err;
863 session = container_of(h, struct perf_session, header);
865 err = auxtrace_index__write(fd, &session->auxtrace_index);
866 if (err < 0)
867 pr_err("Failed to write auxtrace index\n");
868 return err;
871 static int write_stat(int fd __maybe_unused,
872 struct perf_header *h __maybe_unused,
873 struct perf_evlist *evlist __maybe_unused)
875 return 0;
878 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
879 FILE *fp)
881 fprintf(fp, "# hostname : %s\n", ph->env.hostname);
884 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
885 FILE *fp)
887 fprintf(fp, "# os release : %s\n", ph->env.os_release);
890 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
892 fprintf(fp, "# arch : %s\n", ph->env.arch);
895 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
896 FILE *fp)
898 fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
901 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
902 FILE *fp)
904 fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
905 fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
908 static void print_version(struct perf_header *ph, int fd __maybe_unused,
909 FILE *fp)
911 fprintf(fp, "# perf version : %s\n", ph->env.version);
914 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
915 FILE *fp)
917 int nr, i;
919 nr = ph->env.nr_cmdline;
921 fprintf(fp, "# cmdline : ");
923 for (i = 0; i < nr; i++)
924 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
925 fputc('\n', fp);
928 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
929 FILE *fp)
931 int nr, i;
932 char *str;
933 int cpu_nr = ph->env.nr_cpus_online;
935 nr = ph->env.nr_sibling_cores;
936 str = ph->env.sibling_cores;
938 for (i = 0; i < nr; i++) {
939 fprintf(fp, "# sibling cores : %s\n", str);
940 str += strlen(str) + 1;
943 nr = ph->env.nr_sibling_threads;
944 str = ph->env.sibling_threads;
946 for (i = 0; i < nr; i++) {
947 fprintf(fp, "# sibling threads : %s\n", str);
948 str += strlen(str) + 1;
951 if (ph->env.cpu != NULL) {
952 for (i = 0; i < cpu_nr; i++)
953 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
954 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
955 } else
956 fprintf(fp, "# Core ID and Socket ID information is not available\n");
959 static void free_event_desc(struct perf_evsel *events)
961 struct perf_evsel *evsel;
963 if (!events)
964 return;
966 for (evsel = events; evsel->attr.size; evsel++) {
967 zfree(&evsel->name);
968 zfree(&evsel->id);
971 free(events);
974 static struct perf_evsel *
975 read_event_desc(struct perf_header *ph, int fd)
977 struct perf_evsel *evsel, *events = NULL;
978 u64 *id;
979 void *buf = NULL;
980 u32 nre, sz, nr, i, j;
981 ssize_t ret;
982 size_t msz;
984 /* number of events */
985 ret = readn(fd, &nre, sizeof(nre));
986 if (ret != (ssize_t)sizeof(nre))
987 goto error;
989 if (ph->needs_swap)
990 nre = bswap_32(nre);
992 ret = readn(fd, &sz, sizeof(sz));
993 if (ret != (ssize_t)sizeof(sz))
994 goto error;
996 if (ph->needs_swap)
997 sz = bswap_32(sz);
999 /* buffer to hold on file attr struct */
1000 buf = malloc(sz);
1001 if (!buf)
1002 goto error;
1004 /* the last event terminates with evsel->attr.size == 0: */
1005 events = calloc(nre + 1, sizeof(*events));
1006 if (!events)
1007 goto error;
1009 msz = sizeof(evsel->attr);
1010 if (sz < msz)
1011 msz = sz;
1013 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1014 evsel->idx = i;
1017 * must read entire on-file attr struct to
1018 * sync up with layout.
1020 ret = readn(fd, buf, sz);
1021 if (ret != (ssize_t)sz)
1022 goto error;
1024 if (ph->needs_swap)
1025 perf_event__attr_swap(buf);
1027 memcpy(&evsel->attr, buf, msz);
1029 ret = readn(fd, &nr, sizeof(nr));
1030 if (ret != (ssize_t)sizeof(nr))
1031 goto error;
1033 if (ph->needs_swap) {
1034 nr = bswap_32(nr);
1035 evsel->needs_swap = true;
1038 evsel->name = do_read_string(fd, ph);
1040 if (!nr)
1041 continue;
1043 id = calloc(nr, sizeof(*id));
1044 if (!id)
1045 goto error;
1046 evsel->ids = nr;
1047 evsel->id = id;
1049 for (j = 0 ; j < nr; j++) {
1050 ret = readn(fd, id, sizeof(*id));
1051 if (ret != (ssize_t)sizeof(*id))
1052 goto error;
1053 if (ph->needs_swap)
1054 *id = bswap_64(*id);
1055 id++;
1058 out:
1059 free(buf);
1060 return events;
1061 error:
1062 free_event_desc(events);
1063 events = NULL;
1064 goto out;
1067 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1068 void *priv __attribute__((unused)))
1070 return fprintf(fp, ", %s = %s", name, val);
1073 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1075 struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1076 u32 j;
1077 u64 *id;
1079 if (!events) {
1080 fprintf(fp, "# event desc: not available or unable to read\n");
1081 return;
1084 for (evsel = events; evsel->attr.size; evsel++) {
1085 fprintf(fp, "# event : name = %s, ", evsel->name);
1087 if (evsel->ids) {
1088 fprintf(fp, ", id = {");
1089 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1090 if (j)
1091 fputc(',', fp);
1092 fprintf(fp, " %"PRIu64, *id);
1094 fprintf(fp, " }");
1097 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1099 fputc('\n', fp);
1102 free_event_desc(events);
1105 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1106 FILE *fp)
1108 fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1111 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1112 FILE *fp)
1114 u32 nr, c, i;
1115 char *str, *tmp;
1116 uint64_t mem_total, mem_free;
1118 /* nr nodes */
1119 nr = ph->env.nr_numa_nodes;
1120 str = ph->env.numa_nodes;
1122 for (i = 0; i < nr; i++) {
1123 /* node number */
1124 c = strtoul(str, &tmp, 0);
1125 if (*tmp != ':')
1126 goto error;
1128 str = tmp + 1;
1129 mem_total = strtoull(str, &tmp, 0);
1130 if (*tmp != ':')
1131 goto error;
1133 str = tmp + 1;
1134 mem_free = strtoull(str, &tmp, 0);
1135 if (*tmp != ':')
1136 goto error;
1138 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1139 " free = %"PRIu64" kB\n",
1140 c, mem_total, mem_free);
1142 str = tmp + 1;
1143 fprintf(fp, "# node%u cpu list : %s\n", c, str);
1145 str += strlen(str) + 1;
1147 return;
1148 error:
1149 fprintf(fp, "# numa topology : not available\n");
1152 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1154 fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1157 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1158 int fd __maybe_unused, FILE *fp)
1160 fprintf(fp, "# contains samples with branch stack\n");
1163 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1164 int fd __maybe_unused, FILE *fp)
1166 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1169 static void print_stat(struct perf_header *ph __maybe_unused,
1170 int fd __maybe_unused, FILE *fp)
1172 fprintf(fp, "# contains stat data\n");
1175 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1176 FILE *fp)
1178 const char *delimiter = "# pmu mappings: ";
1179 char *str, *tmp;
1180 u32 pmu_num;
1181 u32 type;
1183 pmu_num = ph->env.nr_pmu_mappings;
1184 if (!pmu_num) {
1185 fprintf(fp, "# pmu mappings: not available\n");
1186 return;
1189 str = ph->env.pmu_mappings;
1191 while (pmu_num) {
1192 type = strtoul(str, &tmp, 0);
1193 if (*tmp != ':')
1194 goto error;
1196 str = tmp + 1;
1197 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1199 delimiter = ", ";
1200 str += strlen(str) + 1;
1201 pmu_num--;
1204 fprintf(fp, "\n");
1206 if (!pmu_num)
1207 return;
1208 error:
1209 fprintf(fp, "# pmu mappings: unable to read\n");
1212 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1213 FILE *fp)
1215 struct perf_session *session;
1216 struct perf_evsel *evsel;
1217 u32 nr = 0;
1219 session = container_of(ph, struct perf_session, header);
1221 evlist__for_each(session->evlist, evsel) {
1222 if (perf_evsel__is_group_leader(evsel) &&
1223 evsel->nr_members > 1) {
1224 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1225 perf_evsel__name(evsel));
1227 nr = evsel->nr_members - 1;
1228 } else if (nr) {
1229 fprintf(fp, ",%s", perf_evsel__name(evsel));
1231 if (--nr == 0)
1232 fprintf(fp, "}\n");
1237 static int __event_process_build_id(struct build_id_event *bev,
1238 char *filename,
1239 struct perf_session *session)
1241 int err = -1;
1242 struct machine *machine;
1243 u16 cpumode;
1244 struct dso *dso;
1245 enum dso_kernel_type dso_type;
1247 machine = perf_session__findnew_machine(session, bev->pid);
1248 if (!machine)
1249 goto out;
1251 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1253 switch (cpumode) {
1254 case PERF_RECORD_MISC_KERNEL:
1255 dso_type = DSO_TYPE_KERNEL;
1256 break;
1257 case PERF_RECORD_MISC_GUEST_KERNEL:
1258 dso_type = DSO_TYPE_GUEST_KERNEL;
1259 break;
1260 case PERF_RECORD_MISC_USER:
1261 case PERF_RECORD_MISC_GUEST_USER:
1262 dso_type = DSO_TYPE_USER;
1263 break;
1264 default:
1265 goto out;
1268 dso = machine__findnew_dso(machine, filename);
1269 if (dso != NULL) {
1270 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1272 dso__set_build_id(dso, &bev->build_id);
1274 if (!is_kernel_module(filename, cpumode))
1275 dso->kernel = dso_type;
1277 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1278 sbuild_id);
1279 pr_debug("build id event received for %s: %s\n",
1280 dso->long_name, sbuild_id);
1281 dso__put(dso);
1284 err = 0;
1285 out:
1286 return err;
1289 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1290 int input, u64 offset, u64 size)
1292 struct perf_session *session = container_of(header, struct perf_session, header);
1293 struct {
1294 struct perf_event_header header;
1295 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1296 char filename[0];
1297 } old_bev;
1298 struct build_id_event bev;
1299 char filename[PATH_MAX];
1300 u64 limit = offset + size;
1302 while (offset < limit) {
1303 ssize_t len;
1305 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1306 return -1;
1308 if (header->needs_swap)
1309 perf_event_header__bswap(&old_bev.header);
1311 len = old_bev.header.size - sizeof(old_bev);
1312 if (readn(input, filename, len) != len)
1313 return -1;
1315 bev.header = old_bev.header;
1318 * As the pid is the missing value, we need to fill
1319 * it properly. The header.misc value give us nice hint.
1321 bev.pid = HOST_KERNEL_ID;
1322 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1323 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1324 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1326 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1327 __event_process_build_id(&bev, filename, session);
1329 offset += bev.header.size;
1332 return 0;
1335 static int perf_header__read_build_ids(struct perf_header *header,
1336 int input, u64 offset, u64 size)
1338 struct perf_session *session = container_of(header, struct perf_session, header);
1339 struct build_id_event bev;
1340 char filename[PATH_MAX];
1341 u64 limit = offset + size, orig_offset = offset;
1342 int err = -1;
1344 while (offset < limit) {
1345 ssize_t len;
1347 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1348 goto out;
1350 if (header->needs_swap)
1351 perf_event_header__bswap(&bev.header);
1353 len = bev.header.size - sizeof(bev);
1354 if (readn(input, filename, len) != len)
1355 goto out;
1357 * The a1645ce1 changeset:
1359 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1361 * Added a field to struct build_id_event that broke the file
1362 * format.
1364 * Since the kernel build-id is the first entry, process the
1365 * table using the old format if the well known
1366 * '[kernel.kallsyms]' string for the kernel build-id has the
1367 * first 4 characters chopped off (where the pid_t sits).
1369 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1370 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1371 return -1;
1372 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1375 __event_process_build_id(&bev, filename, session);
1377 offset += bev.header.size;
1379 err = 0;
1380 out:
1381 return err;
1384 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1385 struct perf_header *ph __maybe_unused,
1386 int fd, void *data)
1388 ssize_t ret = trace_report(fd, data, false);
1389 return ret < 0 ? -1 : 0;
1392 static int process_build_id(struct perf_file_section *section,
1393 struct perf_header *ph, int fd,
1394 void *data __maybe_unused)
1396 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1397 pr_debug("Failed to read buildids, continuing...\n");
1398 return 0;
1401 static int process_hostname(struct perf_file_section *section __maybe_unused,
1402 struct perf_header *ph, int fd,
1403 void *data __maybe_unused)
1405 ph->env.hostname = do_read_string(fd, ph);
1406 return ph->env.hostname ? 0 : -ENOMEM;
1409 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1410 struct perf_header *ph, int fd,
1411 void *data __maybe_unused)
1413 ph->env.os_release = do_read_string(fd, ph);
1414 return ph->env.os_release ? 0 : -ENOMEM;
1417 static int process_version(struct perf_file_section *section __maybe_unused,
1418 struct perf_header *ph, int fd,
1419 void *data __maybe_unused)
1421 ph->env.version = do_read_string(fd, ph);
1422 return ph->env.version ? 0 : -ENOMEM;
1425 static int process_arch(struct perf_file_section *section __maybe_unused,
1426 struct perf_header *ph, int fd,
1427 void *data __maybe_unused)
1429 ph->env.arch = do_read_string(fd, ph);
1430 return ph->env.arch ? 0 : -ENOMEM;
1433 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1434 struct perf_header *ph, int fd,
1435 void *data __maybe_unused)
1437 ssize_t ret;
1438 u32 nr;
1440 ret = readn(fd, &nr, sizeof(nr));
1441 if (ret != sizeof(nr))
1442 return -1;
1444 if (ph->needs_swap)
1445 nr = bswap_32(nr);
1447 ph->env.nr_cpus_avail = nr;
1449 ret = readn(fd, &nr, sizeof(nr));
1450 if (ret != sizeof(nr))
1451 return -1;
1453 if (ph->needs_swap)
1454 nr = bswap_32(nr);
1456 ph->env.nr_cpus_online = nr;
1457 return 0;
1460 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1461 struct perf_header *ph, int fd,
1462 void *data __maybe_unused)
1464 ph->env.cpu_desc = do_read_string(fd, ph);
1465 return ph->env.cpu_desc ? 0 : -ENOMEM;
1468 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1469 struct perf_header *ph, int fd,
1470 void *data __maybe_unused)
1472 ph->env.cpuid = do_read_string(fd, ph);
1473 return ph->env.cpuid ? 0 : -ENOMEM;
1476 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1477 struct perf_header *ph, int fd,
1478 void *data __maybe_unused)
1480 uint64_t mem;
1481 ssize_t ret;
1483 ret = readn(fd, &mem, sizeof(mem));
1484 if (ret != sizeof(mem))
1485 return -1;
1487 if (ph->needs_swap)
1488 mem = bswap_64(mem);
1490 ph->env.total_mem = mem;
1491 return 0;
1494 static struct perf_evsel *
1495 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1497 struct perf_evsel *evsel;
1499 evlist__for_each(evlist, evsel) {
1500 if (evsel->idx == idx)
1501 return evsel;
1504 return NULL;
1507 static void
1508 perf_evlist__set_event_name(struct perf_evlist *evlist,
1509 struct perf_evsel *event)
1511 struct perf_evsel *evsel;
1513 if (!event->name)
1514 return;
1516 evsel = perf_evlist__find_by_index(evlist, event->idx);
1517 if (!evsel)
1518 return;
1520 if (evsel->name)
1521 return;
1523 evsel->name = strdup(event->name);
1526 static int
1527 process_event_desc(struct perf_file_section *section __maybe_unused,
1528 struct perf_header *header, int fd,
1529 void *data __maybe_unused)
1531 struct perf_session *session;
1532 struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1534 if (!events)
1535 return 0;
1537 session = container_of(header, struct perf_session, header);
1538 for (evsel = events; evsel->attr.size; evsel++)
1539 perf_evlist__set_event_name(session->evlist, evsel);
1541 free_event_desc(events);
1543 return 0;
1546 static int process_cmdline(struct perf_file_section *section,
1547 struct perf_header *ph, int fd,
1548 void *data __maybe_unused)
1550 ssize_t ret;
1551 char *str, *cmdline = NULL, **argv = NULL;
1552 u32 nr, i, len = 0;
1554 ret = readn(fd, &nr, sizeof(nr));
1555 if (ret != sizeof(nr))
1556 return -1;
1558 if (ph->needs_swap)
1559 nr = bswap_32(nr);
1561 ph->env.nr_cmdline = nr;
1563 cmdline = zalloc(section->size + nr + 1);
1564 if (!cmdline)
1565 return -1;
1567 argv = zalloc(sizeof(char *) * (nr + 1));
1568 if (!argv)
1569 goto error;
1571 for (i = 0; i < nr; i++) {
1572 str = do_read_string(fd, ph);
1573 if (!str)
1574 goto error;
1576 argv[i] = cmdline + len;
1577 memcpy(argv[i], str, strlen(str) + 1);
1578 len += strlen(str) + 1;
1579 free(str);
1581 ph->env.cmdline = cmdline;
1582 ph->env.cmdline_argv = (const char **) argv;
1583 return 0;
1585 error:
1586 free(argv);
1587 free(cmdline);
1588 return -1;
1591 static int process_cpu_topology(struct perf_file_section *section,
1592 struct perf_header *ph, int fd,
1593 void *data __maybe_unused)
1595 ssize_t ret;
1596 u32 nr, i;
1597 char *str;
1598 struct strbuf sb;
1599 int cpu_nr = ph->env.nr_cpus_online;
1600 u64 size = 0;
1602 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1603 if (!ph->env.cpu)
1604 return -1;
1606 ret = readn(fd, &nr, sizeof(nr));
1607 if (ret != sizeof(nr))
1608 goto free_cpu;
1610 if (ph->needs_swap)
1611 nr = bswap_32(nr);
1613 ph->env.nr_sibling_cores = nr;
1614 size += sizeof(u32);
1615 strbuf_init(&sb, 128);
1617 for (i = 0; i < nr; i++) {
1618 str = do_read_string(fd, ph);
1619 if (!str)
1620 goto error;
1622 /* include a NULL character at the end */
1623 strbuf_add(&sb, str, strlen(str) + 1);
1624 size += string_size(str);
1625 free(str);
1627 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1629 ret = readn(fd, &nr, sizeof(nr));
1630 if (ret != sizeof(nr))
1631 return -1;
1633 if (ph->needs_swap)
1634 nr = bswap_32(nr);
1636 ph->env.nr_sibling_threads = nr;
1637 size += sizeof(u32);
1639 for (i = 0; i < nr; i++) {
1640 str = do_read_string(fd, ph);
1641 if (!str)
1642 goto error;
1644 /* include a NULL character at the end */
1645 strbuf_add(&sb, str, strlen(str) + 1);
1646 size += string_size(str);
1647 free(str);
1649 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1652 * The header may be from old perf,
1653 * which doesn't include core id and socket id information.
1655 if (section->size <= size) {
1656 zfree(&ph->env.cpu);
1657 return 0;
1660 for (i = 0; i < (u32)cpu_nr; i++) {
1661 ret = readn(fd, &nr, sizeof(nr));
1662 if (ret != sizeof(nr))
1663 goto free_cpu;
1665 if (ph->needs_swap)
1666 nr = bswap_32(nr);
1668 if (nr > (u32)cpu_nr) {
1669 pr_debug("core_id number is too big."
1670 "You may need to upgrade the perf tool.\n");
1671 goto free_cpu;
1673 ph->env.cpu[i].core_id = nr;
1675 ret = readn(fd, &nr, sizeof(nr));
1676 if (ret != sizeof(nr))
1677 goto free_cpu;
1679 if (ph->needs_swap)
1680 nr = bswap_32(nr);
1682 if (nr > (u32)cpu_nr) {
1683 pr_debug("socket_id number is too big."
1684 "You may need to upgrade the perf tool.\n");
1685 goto free_cpu;
1688 ph->env.cpu[i].socket_id = nr;
1691 return 0;
1693 error:
1694 strbuf_release(&sb);
1695 free_cpu:
1696 zfree(&ph->env.cpu);
1697 return -1;
1700 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1701 struct perf_header *ph, int fd,
1702 void *data __maybe_unused)
1704 ssize_t ret;
1705 u32 nr, node, i;
1706 char *str;
1707 uint64_t mem_total, mem_free;
1708 struct strbuf sb;
1710 /* nr nodes */
1711 ret = readn(fd, &nr, sizeof(nr));
1712 if (ret != sizeof(nr))
1713 goto error;
1715 if (ph->needs_swap)
1716 nr = bswap_32(nr);
1718 ph->env.nr_numa_nodes = nr;
1719 strbuf_init(&sb, 256);
1721 for (i = 0; i < nr; i++) {
1722 /* node number */
1723 ret = readn(fd, &node, sizeof(node));
1724 if (ret != sizeof(node))
1725 goto error;
1727 ret = readn(fd, &mem_total, sizeof(u64));
1728 if (ret != sizeof(u64))
1729 goto error;
1731 ret = readn(fd, &mem_free, sizeof(u64));
1732 if (ret != sizeof(u64))
1733 goto error;
1735 if (ph->needs_swap) {
1736 node = bswap_32(node);
1737 mem_total = bswap_64(mem_total);
1738 mem_free = bswap_64(mem_free);
1741 strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
1742 node, mem_total, mem_free);
1744 str = do_read_string(fd, ph);
1745 if (!str)
1746 goto error;
1748 /* include a NULL character at the end */
1749 strbuf_add(&sb, str, strlen(str) + 1);
1750 free(str);
1752 ph->env.numa_nodes = strbuf_detach(&sb, NULL);
1753 return 0;
1755 error:
1756 strbuf_release(&sb);
1757 return -1;
1760 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1761 struct perf_header *ph, int fd,
1762 void *data __maybe_unused)
1764 ssize_t ret;
1765 char *name;
1766 u32 pmu_num;
1767 u32 type;
1768 struct strbuf sb;
1770 ret = readn(fd, &pmu_num, sizeof(pmu_num));
1771 if (ret != sizeof(pmu_num))
1772 return -1;
1774 if (ph->needs_swap)
1775 pmu_num = bswap_32(pmu_num);
1777 if (!pmu_num) {
1778 pr_debug("pmu mappings not available\n");
1779 return 0;
1782 ph->env.nr_pmu_mappings = pmu_num;
1783 strbuf_init(&sb, 128);
1785 while (pmu_num) {
1786 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1787 goto error;
1788 if (ph->needs_swap)
1789 type = bswap_32(type);
1791 name = do_read_string(fd, ph);
1792 if (!name)
1793 goto error;
1795 strbuf_addf(&sb, "%u:%s", type, name);
1796 /* include a NULL character at the end */
1797 strbuf_add(&sb, "", 1);
1799 if (!strcmp(name, "msr"))
1800 ph->env.msr_pmu_type = type;
1802 free(name);
1803 pmu_num--;
1805 ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
1806 return 0;
1808 error:
1809 strbuf_release(&sb);
1810 return -1;
1813 static int process_group_desc(struct perf_file_section *section __maybe_unused,
1814 struct perf_header *ph, int fd,
1815 void *data __maybe_unused)
1817 size_t ret = -1;
1818 u32 i, nr, nr_groups;
1819 struct perf_session *session;
1820 struct perf_evsel *evsel, *leader = NULL;
1821 struct group_desc {
1822 char *name;
1823 u32 leader_idx;
1824 u32 nr_members;
1825 } *desc;
1827 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
1828 return -1;
1830 if (ph->needs_swap)
1831 nr_groups = bswap_32(nr_groups);
1833 ph->env.nr_groups = nr_groups;
1834 if (!nr_groups) {
1835 pr_debug("group desc not available\n");
1836 return 0;
1839 desc = calloc(nr_groups, sizeof(*desc));
1840 if (!desc)
1841 return -1;
1843 for (i = 0; i < nr_groups; i++) {
1844 desc[i].name = do_read_string(fd, ph);
1845 if (!desc[i].name)
1846 goto out_free;
1848 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
1849 goto out_free;
1851 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
1852 goto out_free;
1854 if (ph->needs_swap) {
1855 desc[i].leader_idx = bswap_32(desc[i].leader_idx);
1856 desc[i].nr_members = bswap_32(desc[i].nr_members);
1861 * Rebuild group relationship based on the group_desc
1863 session = container_of(ph, struct perf_session, header);
1864 session->evlist->nr_groups = nr_groups;
1866 i = nr = 0;
1867 evlist__for_each(session->evlist, evsel) {
1868 if (evsel->idx == (int) desc[i].leader_idx) {
1869 evsel->leader = evsel;
1870 /* {anon_group} is a dummy name */
1871 if (strcmp(desc[i].name, "{anon_group}")) {
1872 evsel->group_name = desc[i].name;
1873 desc[i].name = NULL;
1875 evsel->nr_members = desc[i].nr_members;
1877 if (i >= nr_groups || nr > 0) {
1878 pr_debug("invalid group desc\n");
1879 goto out_free;
1882 leader = evsel;
1883 nr = evsel->nr_members - 1;
1884 i++;
1885 } else if (nr) {
1886 /* This is a group member */
1887 evsel->leader = leader;
1889 nr--;
1893 if (i != nr_groups || nr != 0) {
1894 pr_debug("invalid group desc\n");
1895 goto out_free;
1898 ret = 0;
1899 out_free:
1900 for (i = 0; i < nr_groups; i++)
1901 zfree(&desc[i].name);
1902 free(desc);
1904 return ret;
1907 static int process_auxtrace(struct perf_file_section *section,
1908 struct perf_header *ph, int fd,
1909 void *data __maybe_unused)
1911 struct perf_session *session;
1912 int err;
1914 session = container_of(ph, struct perf_session, header);
1916 err = auxtrace_index__process(fd, section->size, session,
1917 ph->needs_swap);
1918 if (err < 0)
1919 pr_err("Failed to process auxtrace index\n");
1920 return err;
1923 struct feature_ops {
1924 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
1925 void (*print)(struct perf_header *h, int fd, FILE *fp);
1926 int (*process)(struct perf_file_section *section,
1927 struct perf_header *h, int fd, void *data);
1928 const char *name;
1929 bool full_only;
1932 #define FEAT_OPA(n, func) \
1933 [n] = { .name = #n, .write = write_##func, .print = print_##func }
1934 #define FEAT_OPP(n, func) \
1935 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1936 .process = process_##func }
1937 #define FEAT_OPF(n, func) \
1938 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1939 .process = process_##func, .full_only = true }
1941 /* feature_ops not implemented: */
1942 #define print_tracing_data NULL
1943 #define print_build_id NULL
1945 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
1946 FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
1947 FEAT_OPP(HEADER_BUILD_ID, build_id),
1948 FEAT_OPP(HEADER_HOSTNAME, hostname),
1949 FEAT_OPP(HEADER_OSRELEASE, osrelease),
1950 FEAT_OPP(HEADER_VERSION, version),
1951 FEAT_OPP(HEADER_ARCH, arch),
1952 FEAT_OPP(HEADER_NRCPUS, nrcpus),
1953 FEAT_OPP(HEADER_CPUDESC, cpudesc),
1954 FEAT_OPP(HEADER_CPUID, cpuid),
1955 FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
1956 FEAT_OPP(HEADER_EVENT_DESC, event_desc),
1957 FEAT_OPP(HEADER_CMDLINE, cmdline),
1958 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
1959 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
1960 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
1961 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
1962 FEAT_OPP(HEADER_GROUP_DESC, group_desc),
1963 FEAT_OPP(HEADER_AUXTRACE, auxtrace),
1964 FEAT_OPA(HEADER_STAT, stat),
1967 struct header_print_data {
1968 FILE *fp;
1969 bool full; /* extended list of headers */
1972 static int perf_file_section__fprintf_info(struct perf_file_section *section,
1973 struct perf_header *ph,
1974 int feat, int fd, void *data)
1976 struct header_print_data *hd = data;
1978 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
1979 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
1980 "%d, continuing...\n", section->offset, feat);
1981 return 0;
1983 if (feat >= HEADER_LAST_FEATURE) {
1984 pr_warning("unknown feature %d\n", feat);
1985 return 0;
1987 if (!feat_ops[feat].print)
1988 return 0;
1990 if (!feat_ops[feat].full_only || hd->full)
1991 feat_ops[feat].print(ph, fd, hd->fp);
1992 else
1993 fprintf(hd->fp, "# %s info available, use -I to display\n",
1994 feat_ops[feat].name);
1996 return 0;
1999 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2001 struct header_print_data hd;
2002 struct perf_header *header = &session->header;
2003 int fd = perf_data_file__fd(session->file);
2004 hd.fp = fp;
2005 hd.full = full;
2007 perf_header__process_sections(header, fd, &hd,
2008 perf_file_section__fprintf_info);
2009 return 0;
2012 static int do_write_feat(int fd, struct perf_header *h, int type,
2013 struct perf_file_section **p,
2014 struct perf_evlist *evlist)
2016 int err;
2017 int ret = 0;
2019 if (perf_header__has_feat(h, type)) {
2020 if (!feat_ops[type].write)
2021 return -1;
2023 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2025 err = feat_ops[type].write(fd, h, evlist);
2026 if (err < 0) {
2027 pr_debug("failed to write feature %d\n", type);
2029 /* undo anything written */
2030 lseek(fd, (*p)->offset, SEEK_SET);
2032 return -1;
2034 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2035 (*p)++;
2037 return ret;
2040 static int perf_header__adds_write(struct perf_header *header,
2041 struct perf_evlist *evlist, int fd)
2043 int nr_sections;
2044 struct perf_file_section *feat_sec, *p;
2045 int sec_size;
2046 u64 sec_start;
2047 int feat;
2048 int err;
2050 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2051 if (!nr_sections)
2052 return 0;
2054 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2055 if (feat_sec == NULL)
2056 return -ENOMEM;
2058 sec_size = sizeof(*feat_sec) * nr_sections;
2060 sec_start = header->feat_offset;
2061 lseek(fd, sec_start + sec_size, SEEK_SET);
2063 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2064 if (do_write_feat(fd, header, feat, &p, evlist))
2065 perf_header__clear_feat(header, feat);
2068 lseek(fd, sec_start, SEEK_SET);
2070 * may write more than needed due to dropped feature, but
2071 * this is okay, reader will skip the mising entries
2073 err = do_write(fd, feat_sec, sec_size);
2074 if (err < 0)
2075 pr_debug("failed to write feature section\n");
2076 free(feat_sec);
2077 return err;
2080 int perf_header__write_pipe(int fd)
2082 struct perf_pipe_file_header f_header;
2083 int err;
2085 f_header = (struct perf_pipe_file_header){
2086 .magic = PERF_MAGIC,
2087 .size = sizeof(f_header),
2090 err = do_write(fd, &f_header, sizeof(f_header));
2091 if (err < 0) {
2092 pr_debug("failed to write perf pipe header\n");
2093 return err;
2096 return 0;
2099 int perf_session__write_header(struct perf_session *session,
2100 struct perf_evlist *evlist,
2101 int fd, bool at_exit)
2103 struct perf_file_header f_header;
2104 struct perf_file_attr f_attr;
2105 struct perf_header *header = &session->header;
2106 struct perf_evsel *evsel;
2107 u64 attr_offset;
2108 int err;
2110 lseek(fd, sizeof(f_header), SEEK_SET);
2112 evlist__for_each(session->evlist, evsel) {
2113 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2114 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2115 if (err < 0) {
2116 pr_debug("failed to write perf header\n");
2117 return err;
2121 attr_offset = lseek(fd, 0, SEEK_CUR);
2123 evlist__for_each(evlist, evsel) {
2124 f_attr = (struct perf_file_attr){
2125 .attr = evsel->attr,
2126 .ids = {
2127 .offset = evsel->id_offset,
2128 .size = evsel->ids * sizeof(u64),
2131 err = do_write(fd, &f_attr, sizeof(f_attr));
2132 if (err < 0) {
2133 pr_debug("failed to write perf header attribute\n");
2134 return err;
2138 if (!header->data_offset)
2139 header->data_offset = lseek(fd, 0, SEEK_CUR);
2140 header->feat_offset = header->data_offset + header->data_size;
2142 if (at_exit) {
2143 err = perf_header__adds_write(header, evlist, fd);
2144 if (err < 0)
2145 return err;
2148 f_header = (struct perf_file_header){
2149 .magic = PERF_MAGIC,
2150 .size = sizeof(f_header),
2151 .attr_size = sizeof(f_attr),
2152 .attrs = {
2153 .offset = attr_offset,
2154 .size = evlist->nr_entries * sizeof(f_attr),
2156 .data = {
2157 .offset = header->data_offset,
2158 .size = header->data_size,
2160 /* event_types is ignored, store zeros */
2163 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2165 lseek(fd, 0, SEEK_SET);
2166 err = do_write(fd, &f_header, sizeof(f_header));
2167 if (err < 0) {
2168 pr_debug("failed to write perf header\n");
2169 return err;
2171 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2173 return 0;
2176 static int perf_header__getbuffer64(struct perf_header *header,
2177 int fd, void *buf, size_t size)
2179 if (readn(fd, buf, size) <= 0)
2180 return -1;
2182 if (header->needs_swap)
2183 mem_bswap_64(buf, size);
2185 return 0;
2188 int perf_header__process_sections(struct perf_header *header, int fd,
2189 void *data,
2190 int (*process)(struct perf_file_section *section,
2191 struct perf_header *ph,
2192 int feat, int fd, void *data))
2194 struct perf_file_section *feat_sec, *sec;
2195 int nr_sections;
2196 int sec_size;
2197 int feat;
2198 int err;
2200 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2201 if (!nr_sections)
2202 return 0;
2204 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2205 if (!feat_sec)
2206 return -1;
2208 sec_size = sizeof(*feat_sec) * nr_sections;
2210 lseek(fd, header->feat_offset, SEEK_SET);
2212 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2213 if (err < 0)
2214 goto out_free;
2216 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2217 err = process(sec++, header, feat, fd, data);
2218 if (err < 0)
2219 goto out_free;
2221 err = 0;
2222 out_free:
2223 free(feat_sec);
2224 return err;
2227 static const int attr_file_abi_sizes[] = {
2228 [0] = PERF_ATTR_SIZE_VER0,
2229 [1] = PERF_ATTR_SIZE_VER1,
2230 [2] = PERF_ATTR_SIZE_VER2,
2231 [3] = PERF_ATTR_SIZE_VER3,
2232 [4] = PERF_ATTR_SIZE_VER4,
2237 * In the legacy file format, the magic number is not used to encode endianness.
2238 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2239 * on ABI revisions, we need to try all combinations for all endianness to
2240 * detect the endianness.
2242 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2244 uint64_t ref_size, attr_size;
2245 int i;
2247 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2248 ref_size = attr_file_abi_sizes[i]
2249 + sizeof(struct perf_file_section);
2250 if (hdr_sz != ref_size) {
2251 attr_size = bswap_64(hdr_sz);
2252 if (attr_size != ref_size)
2253 continue;
2255 ph->needs_swap = true;
2257 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2259 ph->needs_swap);
2260 return 0;
2262 /* could not determine endianness */
2263 return -1;
2266 #define PERF_PIPE_HDR_VER0 16
2268 static const size_t attr_pipe_abi_sizes[] = {
2269 [0] = PERF_PIPE_HDR_VER0,
2274 * In the legacy pipe format, there is an implicit assumption that endiannesss
2275 * between host recording the samples, and host parsing the samples is the
2276 * same. This is not always the case given that the pipe output may always be
2277 * redirected into a file and analyzed on a different machine with possibly a
2278 * different endianness and perf_event ABI revsions in the perf tool itself.
2280 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2282 u64 attr_size;
2283 int i;
2285 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2286 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2287 attr_size = bswap_64(hdr_sz);
2288 if (attr_size != hdr_sz)
2289 continue;
2291 ph->needs_swap = true;
2293 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2294 return 0;
2296 return -1;
2299 bool is_perf_magic(u64 magic)
2301 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2302 || magic == __perf_magic2
2303 || magic == __perf_magic2_sw)
2304 return true;
2306 return false;
2309 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2310 bool is_pipe, struct perf_header *ph)
2312 int ret;
2314 /* check for legacy format */
2315 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2316 if (ret == 0) {
2317 ph->version = PERF_HEADER_VERSION_1;
2318 pr_debug("legacy perf.data format\n");
2319 if (is_pipe)
2320 return try_all_pipe_abis(hdr_sz, ph);
2322 return try_all_file_abis(hdr_sz, ph);
2325 * the new magic number serves two purposes:
2326 * - unique number to identify actual perf.data files
2327 * - encode endianness of file
2329 ph->version = PERF_HEADER_VERSION_2;
2331 /* check magic number with one endianness */
2332 if (magic == __perf_magic2)
2333 return 0;
2335 /* check magic number with opposite endianness */
2336 if (magic != __perf_magic2_sw)
2337 return -1;
2339 ph->needs_swap = true;
2341 return 0;
2344 int perf_file_header__read(struct perf_file_header *header,
2345 struct perf_header *ph, int fd)
2347 ssize_t ret;
2349 lseek(fd, 0, SEEK_SET);
2351 ret = readn(fd, header, sizeof(*header));
2352 if (ret <= 0)
2353 return -1;
2355 if (check_magic_endian(header->magic,
2356 header->attr_size, false, ph) < 0) {
2357 pr_debug("magic/endian check failed\n");
2358 return -1;
2361 if (ph->needs_swap) {
2362 mem_bswap_64(header, offsetof(struct perf_file_header,
2363 adds_features));
2366 if (header->size != sizeof(*header)) {
2367 /* Support the previous format */
2368 if (header->size == offsetof(typeof(*header), adds_features))
2369 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2370 else
2371 return -1;
2372 } else if (ph->needs_swap) {
2374 * feature bitmap is declared as an array of unsigned longs --
2375 * not good since its size can differ between the host that
2376 * generated the data file and the host analyzing the file.
2378 * We need to handle endianness, but we don't know the size of
2379 * the unsigned long where the file was generated. Take a best
2380 * guess at determining it: try 64-bit swap first (ie., file
2381 * created on a 64-bit host), and check if the hostname feature
2382 * bit is set (this feature bit is forced on as of fbe96f2).
2383 * If the bit is not, undo the 64-bit swap and try a 32-bit
2384 * swap. If the hostname bit is still not set (e.g., older data
2385 * file), punt and fallback to the original behavior --
2386 * clearing all feature bits and setting buildid.
2388 mem_bswap_64(&header->adds_features,
2389 BITS_TO_U64(HEADER_FEAT_BITS));
2391 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2392 /* unswap as u64 */
2393 mem_bswap_64(&header->adds_features,
2394 BITS_TO_U64(HEADER_FEAT_BITS));
2396 /* unswap as u32 */
2397 mem_bswap_32(&header->adds_features,
2398 BITS_TO_U32(HEADER_FEAT_BITS));
2401 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2402 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2403 set_bit(HEADER_BUILD_ID, header->adds_features);
2407 memcpy(&ph->adds_features, &header->adds_features,
2408 sizeof(ph->adds_features));
2410 ph->data_offset = header->data.offset;
2411 ph->data_size = header->data.size;
2412 ph->feat_offset = header->data.offset + header->data.size;
2413 return 0;
2416 static int perf_file_section__process(struct perf_file_section *section,
2417 struct perf_header *ph,
2418 int feat, int fd, void *data)
2420 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2421 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2422 "%d, continuing...\n", section->offset, feat);
2423 return 0;
2426 if (feat >= HEADER_LAST_FEATURE) {
2427 pr_debug("unknown feature %d, continuing...\n", feat);
2428 return 0;
2431 if (!feat_ops[feat].process)
2432 return 0;
2434 return feat_ops[feat].process(section, ph, fd, data);
2437 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2438 struct perf_header *ph, int fd,
2439 bool repipe)
2441 ssize_t ret;
2443 ret = readn(fd, header, sizeof(*header));
2444 if (ret <= 0)
2445 return -1;
2447 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2448 pr_debug("endian/magic failed\n");
2449 return -1;
2452 if (ph->needs_swap)
2453 header->size = bswap_64(header->size);
2455 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2456 return -1;
2458 return 0;
2461 static int perf_header__read_pipe(struct perf_session *session)
2463 struct perf_header *header = &session->header;
2464 struct perf_pipe_file_header f_header;
2466 if (perf_file_header__read_pipe(&f_header, header,
2467 perf_data_file__fd(session->file),
2468 session->repipe) < 0) {
2469 pr_debug("incompatible file format\n");
2470 return -EINVAL;
2473 return 0;
2476 static int read_attr(int fd, struct perf_header *ph,
2477 struct perf_file_attr *f_attr)
2479 struct perf_event_attr *attr = &f_attr->attr;
2480 size_t sz, left;
2481 size_t our_sz = sizeof(f_attr->attr);
2482 ssize_t ret;
2484 memset(f_attr, 0, sizeof(*f_attr));
2486 /* read minimal guaranteed structure */
2487 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2488 if (ret <= 0) {
2489 pr_debug("cannot read %d bytes of header attr\n",
2490 PERF_ATTR_SIZE_VER0);
2491 return -1;
2494 /* on file perf_event_attr size */
2495 sz = attr->size;
2497 if (ph->needs_swap)
2498 sz = bswap_32(sz);
2500 if (sz == 0) {
2501 /* assume ABI0 */
2502 sz = PERF_ATTR_SIZE_VER0;
2503 } else if (sz > our_sz) {
2504 pr_debug("file uses a more recent and unsupported ABI"
2505 " (%zu bytes extra)\n", sz - our_sz);
2506 return -1;
2508 /* what we have not yet read and that we know about */
2509 left = sz - PERF_ATTR_SIZE_VER0;
2510 if (left) {
2511 void *ptr = attr;
2512 ptr += PERF_ATTR_SIZE_VER0;
2514 ret = readn(fd, ptr, left);
2516 /* read perf_file_section, ids are read in caller */
2517 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2519 return ret <= 0 ? -1 : 0;
2522 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2523 struct pevent *pevent)
2525 struct event_format *event;
2526 char bf[128];
2528 /* already prepared */
2529 if (evsel->tp_format)
2530 return 0;
2532 if (pevent == NULL) {
2533 pr_debug("broken or missing trace data\n");
2534 return -1;
2537 event = pevent_find_event(pevent, evsel->attr.config);
2538 if (event == NULL)
2539 return -1;
2541 if (!evsel->name) {
2542 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2543 evsel->name = strdup(bf);
2544 if (evsel->name == NULL)
2545 return -1;
2548 evsel->tp_format = event;
2549 return 0;
2552 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2553 struct pevent *pevent)
2555 struct perf_evsel *pos;
2557 evlist__for_each(evlist, pos) {
2558 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2559 perf_evsel__prepare_tracepoint_event(pos, pevent))
2560 return -1;
2563 return 0;
2566 int perf_session__read_header(struct perf_session *session)
2568 struct perf_data_file *file = session->file;
2569 struct perf_header *header = &session->header;
2570 struct perf_file_header f_header;
2571 struct perf_file_attr f_attr;
2572 u64 f_id;
2573 int nr_attrs, nr_ids, i, j;
2574 int fd = perf_data_file__fd(file);
2576 session->evlist = perf_evlist__new();
2577 if (session->evlist == NULL)
2578 return -ENOMEM;
2580 session->evlist->env = &header->env;
2581 session->machines.host.env = &header->env;
2582 if (perf_data_file__is_pipe(file))
2583 return perf_header__read_pipe(session);
2585 if (perf_file_header__read(&f_header, header, fd) < 0)
2586 return -EINVAL;
2589 * Sanity check that perf.data was written cleanly; data size is
2590 * initialized to 0 and updated only if the on_exit function is run.
2591 * If data size is still 0 then the file contains only partial
2592 * information. Just warn user and process it as much as it can.
2594 if (f_header.data.size == 0) {
2595 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2596 "Was the 'perf record' command properly terminated?\n",
2597 file->path);
2600 nr_attrs = f_header.attrs.size / f_header.attr_size;
2601 lseek(fd, f_header.attrs.offset, SEEK_SET);
2603 for (i = 0; i < nr_attrs; i++) {
2604 struct perf_evsel *evsel;
2605 off_t tmp;
2607 if (read_attr(fd, header, &f_attr) < 0)
2608 goto out_errno;
2610 if (header->needs_swap) {
2611 f_attr.ids.size = bswap_64(f_attr.ids.size);
2612 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2613 perf_event__attr_swap(&f_attr.attr);
2616 tmp = lseek(fd, 0, SEEK_CUR);
2617 evsel = perf_evsel__new(&f_attr.attr);
2619 if (evsel == NULL)
2620 goto out_delete_evlist;
2622 evsel->needs_swap = header->needs_swap;
2624 * Do it before so that if perf_evsel__alloc_id fails, this
2625 * entry gets purged too at perf_evlist__delete().
2627 perf_evlist__add(session->evlist, evsel);
2629 nr_ids = f_attr.ids.size / sizeof(u64);
2631 * We don't have the cpu and thread maps on the header, so
2632 * for allocating the perf_sample_id table we fake 1 cpu and
2633 * hattr->ids threads.
2635 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2636 goto out_delete_evlist;
2638 lseek(fd, f_attr.ids.offset, SEEK_SET);
2640 for (j = 0; j < nr_ids; j++) {
2641 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2642 goto out_errno;
2644 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2647 lseek(fd, tmp, SEEK_SET);
2650 symbol_conf.nr_events = nr_attrs;
2652 perf_header__process_sections(header, fd, &session->tevent,
2653 perf_file_section__process);
2655 if (perf_evlist__prepare_tracepoint_events(session->evlist,
2656 session->tevent.pevent))
2657 goto out_delete_evlist;
2659 return 0;
2660 out_errno:
2661 return -errno;
2663 out_delete_evlist:
2664 perf_evlist__delete(session->evlist);
2665 session->evlist = NULL;
2666 return -ENOMEM;
2669 int perf_event__synthesize_attr(struct perf_tool *tool,
2670 struct perf_event_attr *attr, u32 ids, u64 *id,
2671 perf_event__handler_t process)
2673 union perf_event *ev;
2674 size_t size;
2675 int err;
2677 size = sizeof(struct perf_event_attr);
2678 size = PERF_ALIGN(size, sizeof(u64));
2679 size += sizeof(struct perf_event_header);
2680 size += ids * sizeof(u64);
2682 ev = malloc(size);
2684 if (ev == NULL)
2685 return -ENOMEM;
2687 ev->attr.attr = *attr;
2688 memcpy(ev->attr.id, id, ids * sizeof(u64));
2690 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2691 ev->attr.header.size = (u16)size;
2693 if (ev->attr.header.size == size)
2694 err = process(tool, ev, NULL, NULL);
2695 else
2696 err = -E2BIG;
2698 free(ev);
2700 return err;
2703 static struct event_update_event *
2704 event_update_event__new(size_t size, u64 type, u64 id)
2706 struct event_update_event *ev;
2708 size += sizeof(*ev);
2709 size = PERF_ALIGN(size, sizeof(u64));
2711 ev = zalloc(size);
2712 if (ev) {
2713 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2714 ev->header.size = (u16)size;
2715 ev->type = type;
2716 ev->id = id;
2718 return ev;
2722 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
2723 struct perf_evsel *evsel,
2724 perf_event__handler_t process)
2726 struct event_update_event *ev;
2727 size_t size = strlen(evsel->unit);
2728 int err;
2730 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
2731 if (ev == NULL)
2732 return -ENOMEM;
2734 strncpy(ev->data, evsel->unit, size);
2735 err = process(tool, (union perf_event *)ev, NULL, NULL);
2736 free(ev);
2737 return err;
2741 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
2742 struct perf_evsel *evsel,
2743 perf_event__handler_t process)
2745 struct event_update_event *ev;
2746 struct event_update_event_scale *ev_data;
2747 int err;
2749 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
2750 if (ev == NULL)
2751 return -ENOMEM;
2753 ev_data = (struct event_update_event_scale *) ev->data;
2754 ev_data->scale = evsel->scale;
2755 err = process(tool, (union perf_event*) ev, NULL, NULL);
2756 free(ev);
2757 return err;
2761 perf_event__synthesize_event_update_name(struct perf_tool *tool,
2762 struct perf_evsel *evsel,
2763 perf_event__handler_t process)
2765 struct event_update_event *ev;
2766 size_t len = strlen(evsel->name);
2767 int err;
2769 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
2770 if (ev == NULL)
2771 return -ENOMEM;
2773 strncpy(ev->data, evsel->name, len);
2774 err = process(tool, (union perf_event*) ev, NULL, NULL);
2775 free(ev);
2776 return err;
2780 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
2781 struct perf_evsel *evsel,
2782 perf_event__handler_t process)
2784 size_t size = sizeof(struct event_update_event);
2785 struct event_update_event *ev;
2786 int max, err;
2787 u16 type;
2789 if (!evsel->own_cpus)
2790 return 0;
2792 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
2793 if (!ev)
2794 return -ENOMEM;
2796 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2797 ev->header.size = (u16)size;
2798 ev->type = PERF_EVENT_UPDATE__CPUS;
2799 ev->id = evsel->id[0];
2801 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
2802 evsel->own_cpus,
2803 type, max);
2805 err = process(tool, (union perf_event*) ev, NULL, NULL);
2806 free(ev);
2807 return err;
2810 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
2812 struct event_update_event *ev = &event->event_update;
2813 struct event_update_event_scale *ev_scale;
2814 struct event_update_event_cpus *ev_cpus;
2815 struct cpu_map *map;
2816 size_t ret;
2818 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
2820 switch (ev->type) {
2821 case PERF_EVENT_UPDATE__SCALE:
2822 ev_scale = (struct event_update_event_scale *) ev->data;
2823 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
2824 break;
2825 case PERF_EVENT_UPDATE__UNIT:
2826 ret += fprintf(fp, "... unit: %s\n", ev->data);
2827 break;
2828 case PERF_EVENT_UPDATE__NAME:
2829 ret += fprintf(fp, "... name: %s\n", ev->data);
2830 break;
2831 case PERF_EVENT_UPDATE__CPUS:
2832 ev_cpus = (struct event_update_event_cpus *) ev->data;
2833 ret += fprintf(fp, "... ");
2835 map = cpu_map__new_data(&ev_cpus->cpus);
2836 if (map)
2837 ret += cpu_map__fprintf(map, fp);
2838 else
2839 ret += fprintf(fp, "failed to get cpus\n");
2840 break;
2841 default:
2842 ret += fprintf(fp, "... unknown type\n");
2843 break;
2846 return ret;
2849 int perf_event__synthesize_attrs(struct perf_tool *tool,
2850 struct perf_session *session,
2851 perf_event__handler_t process)
2853 struct perf_evsel *evsel;
2854 int err = 0;
2856 evlist__for_each(session->evlist, evsel) {
2857 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
2858 evsel->id, process);
2859 if (err) {
2860 pr_debug("failed to create perf header attribute\n");
2861 return err;
2865 return err;
2868 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
2869 union perf_event *event,
2870 struct perf_evlist **pevlist)
2872 u32 i, ids, n_ids;
2873 struct perf_evsel *evsel;
2874 struct perf_evlist *evlist = *pevlist;
2876 if (evlist == NULL) {
2877 *pevlist = evlist = perf_evlist__new();
2878 if (evlist == NULL)
2879 return -ENOMEM;
2882 evsel = perf_evsel__new(&event->attr.attr);
2883 if (evsel == NULL)
2884 return -ENOMEM;
2886 perf_evlist__add(evlist, evsel);
2888 ids = event->header.size;
2889 ids -= (void *)&event->attr.id - (void *)event;
2890 n_ids = ids / sizeof(u64);
2892 * We don't have the cpu and thread maps on the header, so
2893 * for allocating the perf_sample_id table we fake 1 cpu and
2894 * hattr->ids threads.
2896 if (perf_evsel__alloc_id(evsel, 1, n_ids))
2897 return -ENOMEM;
2899 for (i = 0; i < n_ids; i++) {
2900 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
2903 symbol_conf.nr_events = evlist->nr_entries;
2905 return 0;
2908 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
2909 union perf_event *event,
2910 struct perf_evlist **pevlist)
2912 struct event_update_event *ev = &event->event_update;
2913 struct event_update_event_scale *ev_scale;
2914 struct event_update_event_cpus *ev_cpus;
2915 struct perf_evlist *evlist;
2916 struct perf_evsel *evsel;
2917 struct cpu_map *map;
2919 if (!pevlist || *pevlist == NULL)
2920 return -EINVAL;
2922 evlist = *pevlist;
2924 evsel = perf_evlist__id2evsel(evlist, ev->id);
2925 if (evsel == NULL)
2926 return -EINVAL;
2928 switch (ev->type) {
2929 case PERF_EVENT_UPDATE__UNIT:
2930 evsel->unit = strdup(ev->data);
2931 break;
2932 case PERF_EVENT_UPDATE__NAME:
2933 evsel->name = strdup(ev->data);
2934 break;
2935 case PERF_EVENT_UPDATE__SCALE:
2936 ev_scale = (struct event_update_event_scale *) ev->data;
2937 evsel->scale = ev_scale->scale;
2938 case PERF_EVENT_UPDATE__CPUS:
2939 ev_cpus = (struct event_update_event_cpus *) ev->data;
2941 map = cpu_map__new_data(&ev_cpus->cpus);
2942 if (map)
2943 evsel->own_cpus = map;
2944 else
2945 pr_err("failed to get event_update cpus\n");
2946 default:
2947 break;
2950 return 0;
2953 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
2954 struct perf_evlist *evlist,
2955 perf_event__handler_t process)
2957 union perf_event ev;
2958 struct tracing_data *tdata;
2959 ssize_t size = 0, aligned_size = 0, padding;
2960 int err __maybe_unused = 0;
2963 * We are going to store the size of the data followed
2964 * by the data contents. Since the fd descriptor is a pipe,
2965 * we cannot seek back to store the size of the data once
2966 * we know it. Instead we:
2968 * - write the tracing data to the temp file
2969 * - get/write the data size to pipe
2970 * - write the tracing data from the temp file
2971 * to the pipe
2973 tdata = tracing_data_get(&evlist->entries, fd, true);
2974 if (!tdata)
2975 return -1;
2977 memset(&ev, 0, sizeof(ev));
2979 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2980 size = tdata->size;
2981 aligned_size = PERF_ALIGN(size, sizeof(u64));
2982 padding = aligned_size - size;
2983 ev.tracing_data.header.size = sizeof(ev.tracing_data);
2984 ev.tracing_data.size = aligned_size;
2986 process(tool, &ev, NULL, NULL);
2989 * The put function will copy all the tracing data
2990 * stored in temp file to the pipe.
2992 tracing_data_put(tdata);
2994 write_padded(fd, NULL, 0, padding);
2996 return aligned_size;
2999 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3000 union perf_event *event,
3001 struct perf_session *session)
3003 ssize_t size_read, padding, size = event->tracing_data.size;
3004 int fd = perf_data_file__fd(session->file);
3005 off_t offset = lseek(fd, 0, SEEK_CUR);
3006 char buf[BUFSIZ];
3008 /* setup for reading amidst mmap */
3009 lseek(fd, offset + sizeof(struct tracing_data_event),
3010 SEEK_SET);
3012 size_read = trace_report(fd, &session->tevent,
3013 session->repipe);
3014 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3016 if (readn(fd, buf, padding) < 0) {
3017 pr_err("%s: reading input file", __func__);
3018 return -1;
3020 if (session->repipe) {
3021 int retw = write(STDOUT_FILENO, buf, padding);
3022 if (retw <= 0 || retw != padding) {
3023 pr_err("%s: repiping tracing data padding", __func__);
3024 return -1;
3028 if (size_read + padding != size) {
3029 pr_err("%s: tracing data size mismatch", __func__);
3030 return -1;
3033 perf_evlist__prepare_tracepoint_events(session->evlist,
3034 session->tevent.pevent);
3036 return size_read + padding;
3039 int perf_event__synthesize_build_id(struct perf_tool *tool,
3040 struct dso *pos, u16 misc,
3041 perf_event__handler_t process,
3042 struct machine *machine)
3044 union perf_event ev;
3045 size_t len;
3046 int err = 0;
3048 if (!pos->hit)
3049 return err;
3051 memset(&ev, 0, sizeof(ev));
3053 len = pos->long_name_len + 1;
3054 len = PERF_ALIGN(len, NAME_ALIGN);
3055 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3056 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3057 ev.build_id.header.misc = misc;
3058 ev.build_id.pid = machine->pid;
3059 ev.build_id.header.size = sizeof(ev.build_id) + len;
3060 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3062 err = process(tool, &ev, NULL, machine);
3064 return err;
3067 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3068 union perf_event *event,
3069 struct perf_session *session)
3071 __event_process_build_id(&event->build_id,
3072 event->build_id.filename,
3073 session);
3074 return 0;