vmxnet3: Fix inconsistent LRO state after initialization
[linux-2.6/linux-mips.git] / tools / perf / util / header.c
blob93862a8027ea05c675616ff7004879309584b311
1 #define _FILE_OFFSET_BITS 64
3 #include <sys/types.h>
4 #include <byteswap.h>
5 #include <unistd.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <linux/list.h>
9 #include <linux/kernel.h>
11 #include "evlist.h"
12 #include "evsel.h"
13 #include "util.h"
14 #include "header.h"
15 #include "../perf.h"
16 #include "trace-event.h"
17 #include "session.h"
18 #include "symbol.h"
19 #include "debug.h"
21 static bool no_buildid_cache = false;
23 static int event_count;
24 static struct perf_trace_event_type *events;
26 int perf_header__push_event(u64 id, const char *name)
28 if (strlen(name) > MAX_EVENT_NAME)
29 pr_warning("Event %s will be truncated\n", name);
31 if (!events) {
32 events = malloc(sizeof(struct perf_trace_event_type));
33 if (events == NULL)
34 return -ENOMEM;
35 } else {
36 struct perf_trace_event_type *nevents;
38 nevents = realloc(events, (event_count + 1) * sizeof(*events));
39 if (nevents == NULL)
40 return -ENOMEM;
41 events = nevents;
43 memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
44 events[event_count].event_id = id;
45 strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
46 event_count++;
47 return 0;
50 char *perf_header__find_event(u64 id)
52 int i;
53 for (i = 0 ; i < event_count; i++) {
54 if (events[i].event_id == id)
55 return events[i].name;
57 return NULL;
60 static const char *__perf_magic = "PERFFILE";
62 #define PERF_MAGIC (*(u64 *)__perf_magic)
64 struct perf_file_attr {
65 struct perf_event_attr attr;
66 struct perf_file_section ids;
69 void perf_header__set_feat(struct perf_header *header, int feat)
71 set_bit(feat, header->adds_features);
74 void perf_header__clear_feat(struct perf_header *header, int feat)
76 clear_bit(feat, header->adds_features);
79 bool perf_header__has_feat(const struct perf_header *header, int feat)
81 return test_bit(feat, header->adds_features);
84 static int do_write(int fd, const void *buf, size_t size)
86 while (size) {
87 int ret = write(fd, buf, size);
89 if (ret < 0)
90 return -errno;
92 size -= ret;
93 buf += ret;
96 return 0;
99 #define NAME_ALIGN 64
101 static int write_padded(int fd, const void *bf, size_t count,
102 size_t count_aligned)
104 static const char zero_buf[NAME_ALIGN];
105 int err = do_write(fd, bf, count);
107 if (!err)
108 err = do_write(fd, zero_buf, count_aligned - count);
110 return err;
113 #define dsos__for_each_with_build_id(pos, head) \
114 list_for_each_entry(pos, head, node) \
115 if (!pos->has_build_id) \
116 continue; \
117 else
119 static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
120 u16 misc, int fd)
122 struct dso *pos;
124 dsos__for_each_with_build_id(pos, head) {
125 int err;
126 struct build_id_event b;
127 size_t len;
129 if (!pos->hit)
130 continue;
131 len = pos->long_name_len + 1;
132 len = ALIGN(len, NAME_ALIGN);
133 memset(&b, 0, sizeof(b));
134 memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
135 b.pid = pid;
136 b.header.misc = misc;
137 b.header.size = sizeof(b) + len;
138 err = do_write(fd, &b, sizeof(b));
139 if (err < 0)
140 return err;
141 err = write_padded(fd, pos->long_name,
142 pos->long_name_len + 1, len);
143 if (err < 0)
144 return err;
147 return 0;
150 static int machine__write_buildid_table(struct machine *machine, int fd)
152 int err;
153 u16 kmisc = PERF_RECORD_MISC_KERNEL,
154 umisc = PERF_RECORD_MISC_USER;
156 if (!machine__is_host(machine)) {
157 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
158 umisc = PERF_RECORD_MISC_GUEST_USER;
161 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
162 kmisc, fd);
163 if (err == 0)
164 err = __dsos__write_buildid_table(&machine->user_dsos,
165 machine->pid, umisc, fd);
166 return err;
169 static int dsos__write_buildid_table(struct perf_header *header, int fd)
171 struct perf_session *session = container_of(header,
172 struct perf_session, header);
173 struct rb_node *nd;
174 int err = machine__write_buildid_table(&session->host_machine, fd);
176 if (err)
177 return err;
179 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
180 struct machine *pos = rb_entry(nd, struct machine, rb_node);
181 err = machine__write_buildid_table(pos, fd);
182 if (err)
183 break;
185 return err;
188 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
189 const char *name, bool is_kallsyms)
191 const size_t size = PATH_MAX;
192 char *realname, *filename = malloc(size),
193 *linkname = malloc(size), *targetname;
194 int len, err = -1;
196 if (is_kallsyms)
197 realname = (char *)name;
198 else
199 realname = realpath(name, NULL);
201 if (realname == NULL || filename == NULL || linkname == NULL)
202 goto out_free;
204 len = snprintf(filename, size, "%s%s%s",
205 debugdir, is_kallsyms ? "/" : "", realname);
206 if (mkdir_p(filename, 0755))
207 goto out_free;
209 snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
211 if (access(filename, F_OK)) {
212 if (is_kallsyms) {
213 if (copyfile("/proc/kallsyms", filename))
214 goto out_free;
215 } else if (link(realname, filename) && copyfile(name, filename))
216 goto out_free;
219 len = snprintf(linkname, size, "%s/.build-id/%.2s",
220 debugdir, sbuild_id);
222 if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
223 goto out_free;
225 snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
226 targetname = filename + strlen(debugdir) - 5;
227 memcpy(targetname, "../..", 5);
229 if (symlink(targetname, linkname) == 0)
230 err = 0;
231 out_free:
232 if (!is_kallsyms)
233 free(realname);
234 free(filename);
235 free(linkname);
236 return err;
239 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
240 const char *name, const char *debugdir,
241 bool is_kallsyms)
243 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
245 build_id__sprintf(build_id, build_id_size, sbuild_id);
247 return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
250 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
252 const size_t size = PATH_MAX;
253 char *filename = malloc(size),
254 *linkname = malloc(size);
255 int err = -1;
257 if (filename == NULL || linkname == NULL)
258 goto out_free;
260 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
261 debugdir, sbuild_id, sbuild_id + 2);
263 if (access(linkname, F_OK))
264 goto out_free;
266 if (readlink(linkname, filename, size) < 0)
267 goto out_free;
269 if (unlink(linkname))
270 goto out_free;
273 * Since the link is relative, we must make it absolute:
275 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
276 debugdir, sbuild_id, filename);
278 if (unlink(linkname))
279 goto out_free;
281 err = 0;
282 out_free:
283 free(filename);
284 free(linkname);
285 return err;
288 static int dso__cache_build_id(struct dso *dso, const char *debugdir)
290 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
292 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
293 dso->long_name, debugdir, is_kallsyms);
296 static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
298 struct dso *pos;
299 int err = 0;
301 dsos__for_each_with_build_id(pos, head)
302 if (dso__cache_build_id(pos, debugdir))
303 err = -1;
305 return err;
308 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
310 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
311 ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
312 return ret;
315 static int perf_session__cache_build_ids(struct perf_session *session)
317 struct rb_node *nd;
318 int ret;
319 char debugdir[PATH_MAX];
321 snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
323 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
324 return -1;
326 ret = machine__cache_build_ids(&session->host_machine, debugdir);
328 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
329 struct machine *pos = rb_entry(nd, struct machine, rb_node);
330 ret |= machine__cache_build_ids(pos, debugdir);
332 return ret ? -1 : 0;
335 static bool machine__read_build_ids(struct machine *machine, bool with_hits)
337 bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
338 ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
339 return ret;
342 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
344 struct rb_node *nd;
345 bool ret = machine__read_build_ids(&session->host_machine, with_hits);
347 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
348 struct machine *pos = rb_entry(nd, struct machine, rb_node);
349 ret |= machine__read_build_ids(pos, with_hits);
352 return ret;
355 static int perf_header__adds_write(struct perf_header *header,
356 struct perf_evlist *evlist, int fd)
358 int nr_sections;
359 struct perf_session *session;
360 struct perf_file_section *feat_sec;
361 int sec_size;
362 u64 sec_start;
363 int idx = 0, err;
365 session = container_of(header, struct perf_session, header);
367 if (perf_header__has_feat(header, HEADER_BUILD_ID &&
368 !perf_session__read_build_ids(session, true)))
369 perf_header__clear_feat(header, HEADER_BUILD_ID);
371 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
372 if (!nr_sections)
373 return 0;
375 feat_sec = calloc(sizeof(*feat_sec), nr_sections);
376 if (feat_sec == NULL)
377 return -ENOMEM;
379 sec_size = sizeof(*feat_sec) * nr_sections;
381 sec_start = header->data_offset + header->data_size;
382 lseek(fd, sec_start + sec_size, SEEK_SET);
384 if (perf_header__has_feat(header, HEADER_TRACE_INFO)) {
385 struct perf_file_section *trace_sec;
387 trace_sec = &feat_sec[idx++];
389 /* Write trace info */
390 trace_sec->offset = lseek(fd, 0, SEEK_CUR);
391 read_tracing_data(fd, &evlist->entries);
392 trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
395 if (perf_header__has_feat(header, HEADER_BUILD_ID)) {
396 struct perf_file_section *buildid_sec;
398 buildid_sec = &feat_sec[idx++];
400 /* Write build-ids */
401 buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
402 err = dsos__write_buildid_table(header, fd);
403 if (err < 0) {
404 pr_debug("failed to write buildid table\n");
405 goto out_free;
407 buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
408 buildid_sec->offset;
409 if (!no_buildid_cache)
410 perf_session__cache_build_ids(session);
413 lseek(fd, sec_start, SEEK_SET);
414 err = do_write(fd, feat_sec, sec_size);
415 if (err < 0)
416 pr_debug("failed to write feature section\n");
417 out_free:
418 free(feat_sec);
419 return err;
422 int perf_header__write_pipe(int fd)
424 struct perf_pipe_file_header f_header;
425 int err;
427 f_header = (struct perf_pipe_file_header){
428 .magic = PERF_MAGIC,
429 .size = sizeof(f_header),
432 err = do_write(fd, &f_header, sizeof(f_header));
433 if (err < 0) {
434 pr_debug("failed to write perf pipe header\n");
435 return err;
438 return 0;
441 int perf_session__write_header(struct perf_session *session,
442 struct perf_evlist *evlist,
443 int fd, bool at_exit)
445 struct perf_file_header f_header;
446 struct perf_file_attr f_attr;
447 struct perf_header *header = &session->header;
448 struct perf_evsel *attr, *pair = NULL;
449 int err;
451 lseek(fd, sizeof(f_header), SEEK_SET);
453 if (session->evlist != evlist)
454 pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
456 list_for_each_entry(attr, &evlist->entries, node) {
457 attr->id_offset = lseek(fd, 0, SEEK_CUR);
458 err = do_write(fd, attr->id, attr->ids * sizeof(u64));
459 if (err < 0) {
460 out_err_write:
461 pr_debug("failed to write perf header\n");
462 return err;
464 if (session->evlist != evlist) {
465 err = do_write(fd, pair->id, pair->ids * sizeof(u64));
466 if (err < 0)
467 goto out_err_write;
468 attr->ids += pair->ids;
469 pair = list_entry(pair->node.next, struct perf_evsel, node);
473 header->attr_offset = lseek(fd, 0, SEEK_CUR);
475 list_for_each_entry(attr, &evlist->entries, node) {
476 f_attr = (struct perf_file_attr){
477 .attr = attr->attr,
478 .ids = {
479 .offset = attr->id_offset,
480 .size = attr->ids * sizeof(u64),
483 err = do_write(fd, &f_attr, sizeof(f_attr));
484 if (err < 0) {
485 pr_debug("failed to write perf header attribute\n");
486 return err;
490 header->event_offset = lseek(fd, 0, SEEK_CUR);
491 header->event_size = event_count * sizeof(struct perf_trace_event_type);
492 if (events) {
493 err = do_write(fd, events, header->event_size);
494 if (err < 0) {
495 pr_debug("failed to write perf header events\n");
496 return err;
500 header->data_offset = lseek(fd, 0, SEEK_CUR);
502 if (at_exit) {
503 err = perf_header__adds_write(header, evlist, fd);
504 if (err < 0)
505 return err;
508 f_header = (struct perf_file_header){
509 .magic = PERF_MAGIC,
510 .size = sizeof(f_header),
511 .attr_size = sizeof(f_attr),
512 .attrs = {
513 .offset = header->attr_offset,
514 .size = evlist->nr_entries * sizeof(f_attr),
516 .data = {
517 .offset = header->data_offset,
518 .size = header->data_size,
520 .event_types = {
521 .offset = header->event_offset,
522 .size = header->event_size,
526 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
528 lseek(fd, 0, SEEK_SET);
529 err = do_write(fd, &f_header, sizeof(f_header));
530 if (err < 0) {
531 pr_debug("failed to write perf header\n");
532 return err;
534 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
536 header->frozen = 1;
537 return 0;
540 static int perf_header__getbuffer64(struct perf_header *header,
541 int fd, void *buf, size_t size)
543 if (readn(fd, buf, size) <= 0)
544 return -1;
546 if (header->needs_swap)
547 mem_bswap_64(buf, size);
549 return 0;
552 int perf_header__process_sections(struct perf_header *header, int fd,
553 int (*process)(struct perf_file_section *section,
554 struct perf_header *ph,
555 int feat, int fd))
557 struct perf_file_section *feat_sec;
558 int nr_sections;
559 int sec_size;
560 int idx = 0;
561 int err = -1, feat = 1;
563 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
564 if (!nr_sections)
565 return 0;
567 feat_sec = calloc(sizeof(*feat_sec), nr_sections);
568 if (!feat_sec)
569 return -1;
571 sec_size = sizeof(*feat_sec) * nr_sections;
573 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
575 if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
576 goto out_free;
578 err = 0;
579 while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
580 if (perf_header__has_feat(header, feat)) {
581 struct perf_file_section *sec = &feat_sec[idx++];
583 err = process(sec, header, feat, fd);
584 if (err < 0)
585 break;
587 ++feat;
589 out_free:
590 free(feat_sec);
591 return err;
594 int perf_file_header__read(struct perf_file_header *header,
595 struct perf_header *ph, int fd)
597 lseek(fd, 0, SEEK_SET);
599 if (readn(fd, header, sizeof(*header)) <= 0 ||
600 memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
601 return -1;
603 if (header->attr_size != sizeof(struct perf_file_attr)) {
604 u64 attr_size = bswap_64(header->attr_size);
606 if (attr_size != sizeof(struct perf_file_attr))
607 return -1;
609 mem_bswap_64(header, offsetof(struct perf_file_header,
610 adds_features));
611 ph->needs_swap = true;
614 if (header->size != sizeof(*header)) {
615 /* Support the previous format */
616 if (header->size == offsetof(typeof(*header), adds_features))
617 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
618 else
619 return -1;
622 memcpy(&ph->adds_features, &header->adds_features,
623 sizeof(ph->adds_features));
625 * FIXME: hack that assumes that if we need swap the perf.data file
626 * may be coming from an arch with a different word-size, ergo different
627 * DEFINE_BITMAP format, investigate more later, but for now its mostly
628 * safe to assume that we have a build-id section. Trace files probably
629 * have several other issues in this realm anyway...
631 if (ph->needs_swap) {
632 memset(&ph->adds_features, 0, sizeof(ph->adds_features));
633 perf_header__set_feat(ph, HEADER_BUILD_ID);
636 ph->event_offset = header->event_types.offset;
637 ph->event_size = header->event_types.size;
638 ph->data_offset = header->data.offset;
639 ph->data_size = header->data.size;
640 return 0;
643 static int __event_process_build_id(struct build_id_event *bev,
644 char *filename,
645 struct perf_session *session)
647 int err = -1;
648 struct list_head *head;
649 struct machine *machine;
650 u16 misc;
651 struct dso *dso;
652 enum dso_kernel_type dso_type;
654 machine = perf_session__findnew_machine(session, bev->pid);
655 if (!machine)
656 goto out;
658 misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
660 switch (misc) {
661 case PERF_RECORD_MISC_KERNEL:
662 dso_type = DSO_TYPE_KERNEL;
663 head = &machine->kernel_dsos;
664 break;
665 case PERF_RECORD_MISC_GUEST_KERNEL:
666 dso_type = DSO_TYPE_GUEST_KERNEL;
667 head = &machine->kernel_dsos;
668 break;
669 case PERF_RECORD_MISC_USER:
670 case PERF_RECORD_MISC_GUEST_USER:
671 dso_type = DSO_TYPE_USER;
672 head = &machine->user_dsos;
673 break;
674 default:
675 goto out;
678 dso = __dsos__findnew(head, filename);
679 if (dso != NULL) {
680 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
682 dso__set_build_id(dso, &bev->build_id);
684 if (filename[0] == '[')
685 dso->kernel = dso_type;
687 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
688 sbuild_id);
689 pr_debug("build id event received for %s: %s\n",
690 dso->long_name, sbuild_id);
693 err = 0;
694 out:
695 return err;
698 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
699 int input, u64 offset, u64 size)
701 struct perf_session *session = container_of(header, struct perf_session, header);
702 struct {
703 struct perf_event_header header;
704 u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
705 char filename[0];
706 } old_bev;
707 struct build_id_event bev;
708 char filename[PATH_MAX];
709 u64 limit = offset + size;
711 while (offset < limit) {
712 ssize_t len;
714 if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
715 return -1;
717 if (header->needs_swap)
718 perf_event_header__bswap(&old_bev.header);
720 len = old_bev.header.size - sizeof(old_bev);
721 if (read(input, filename, len) != len)
722 return -1;
724 bev.header = old_bev.header;
725 bev.pid = 0;
726 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
727 __event_process_build_id(&bev, filename, session);
729 offset += bev.header.size;
732 return 0;
735 static int perf_header__read_build_ids(struct perf_header *header,
736 int input, u64 offset, u64 size)
738 struct perf_session *session = container_of(header, struct perf_session, header);
739 struct build_id_event bev;
740 char filename[PATH_MAX];
741 u64 limit = offset + size, orig_offset = offset;
742 int err = -1;
744 while (offset < limit) {
745 ssize_t len;
747 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
748 goto out;
750 if (header->needs_swap)
751 perf_event_header__bswap(&bev.header);
753 len = bev.header.size - sizeof(bev);
754 if (read(input, filename, len) != len)
755 goto out;
757 * The a1645ce1 changeset:
759 * "perf: 'perf kvm' tool for monitoring guest performance from host"
761 * Added a field to struct build_id_event that broke the file
762 * format.
764 * Since the kernel build-id is the first entry, process the
765 * table using the old format if the well known
766 * '[kernel.kallsyms]' string for the kernel build-id has the
767 * first 4 characters chopped off (where the pid_t sits).
769 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
770 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
771 return -1;
772 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
775 __event_process_build_id(&bev, filename, session);
777 offset += bev.header.size;
779 err = 0;
780 out:
781 return err;
784 static int perf_file_section__process(struct perf_file_section *section,
785 struct perf_header *ph,
786 int feat, int fd)
788 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
789 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
790 "%d, continuing...\n", section->offset, feat);
791 return 0;
794 switch (feat) {
795 case HEADER_TRACE_INFO:
796 trace_report(fd, false);
797 break;
799 case HEADER_BUILD_ID:
800 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
801 pr_debug("Failed to read buildids, continuing...\n");
802 break;
803 default:
804 pr_debug("unknown feature %d, continuing...\n", feat);
807 return 0;
810 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
811 struct perf_header *ph, int fd,
812 bool repipe)
814 if (readn(fd, header, sizeof(*header)) <= 0 ||
815 memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
816 return -1;
818 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
819 return -1;
821 if (header->size != sizeof(*header)) {
822 u64 size = bswap_64(header->size);
824 if (size != sizeof(*header))
825 return -1;
827 ph->needs_swap = true;
830 return 0;
833 static int perf_header__read_pipe(struct perf_session *session, int fd)
835 struct perf_header *header = &session->header;
836 struct perf_pipe_file_header f_header;
838 if (perf_file_header__read_pipe(&f_header, header, fd,
839 session->repipe) < 0) {
840 pr_debug("incompatible file format\n");
841 return -EINVAL;
844 session->fd = fd;
846 return 0;
849 int perf_session__read_header(struct perf_session *session, int fd)
851 struct perf_header *header = &session->header;
852 struct perf_file_header f_header;
853 struct perf_file_attr f_attr;
854 u64 f_id;
855 int nr_attrs, nr_ids, i, j;
857 session->evlist = perf_evlist__new(NULL, NULL);
858 if (session->evlist == NULL)
859 return -ENOMEM;
861 if (session->fd_pipe)
862 return perf_header__read_pipe(session, fd);
864 if (perf_file_header__read(&f_header, header, fd) < 0) {
865 pr_debug("incompatible file format\n");
866 return -EINVAL;
869 nr_attrs = f_header.attrs.size / sizeof(f_attr);
870 lseek(fd, f_header.attrs.offset, SEEK_SET);
872 for (i = 0; i < nr_attrs; i++) {
873 struct perf_evsel *evsel;
874 off_t tmp;
876 if (perf_header__getbuffer64(header, fd, &f_attr, sizeof(f_attr)))
877 goto out_errno;
879 tmp = lseek(fd, 0, SEEK_CUR);
880 evsel = perf_evsel__new(&f_attr.attr, i);
882 if (evsel == NULL)
883 goto out_delete_evlist;
885 * Do it before so that if perf_evsel__alloc_id fails, this
886 * entry gets purged too at perf_evlist__delete().
888 perf_evlist__add(session->evlist, evsel);
890 nr_ids = f_attr.ids.size / sizeof(u64);
892 * We don't have the cpu and thread maps on the header, so
893 * for allocating the perf_sample_id table we fake 1 cpu and
894 * hattr->ids threads.
896 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
897 goto out_delete_evlist;
899 lseek(fd, f_attr.ids.offset, SEEK_SET);
901 for (j = 0; j < nr_ids; j++) {
902 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
903 goto out_errno;
905 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
908 lseek(fd, tmp, SEEK_SET);
911 if (f_header.event_types.size) {
912 lseek(fd, f_header.event_types.offset, SEEK_SET);
913 events = malloc(f_header.event_types.size);
914 if (events == NULL)
915 return -ENOMEM;
916 if (perf_header__getbuffer64(header, fd, events,
917 f_header.event_types.size))
918 goto out_errno;
919 event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
922 perf_header__process_sections(header, fd, perf_file_section__process);
924 lseek(fd, header->data_offset, SEEK_SET);
926 header->frozen = 1;
927 return 0;
928 out_errno:
929 return -errno;
931 out_delete_evlist:
932 perf_evlist__delete(session->evlist);
933 session->evlist = NULL;
934 return -ENOMEM;
937 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
939 struct perf_evsel *pos;
940 u64 type = 0;
942 list_for_each_entry(pos, &evlist->entries, node) {
943 if (!type)
944 type = pos->attr.sample_type;
945 else if (type != pos->attr.sample_type)
946 die("non matching sample_type");
949 return type;
952 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
954 bool value = false, first = true;
955 struct perf_evsel *pos;
957 list_for_each_entry(pos, &evlist->entries, node) {
958 if (first) {
959 value = pos->attr.sample_id_all;
960 first = false;
961 } else if (value != pos->attr.sample_id_all)
962 die("non matching sample_id_all");
965 return value;
968 int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
969 perf_event__handler_t process,
970 struct perf_session *session)
972 union perf_event *ev;
973 size_t size;
974 int err;
976 size = sizeof(struct perf_event_attr);
977 size = ALIGN(size, sizeof(u64));
978 size += sizeof(struct perf_event_header);
979 size += ids * sizeof(u64);
981 ev = malloc(size);
983 if (ev == NULL)
984 return -ENOMEM;
986 ev->attr.attr = *attr;
987 memcpy(ev->attr.id, id, ids * sizeof(u64));
989 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
990 ev->attr.header.size = size;
992 err = process(ev, NULL, session);
994 free(ev);
996 return err;
999 int perf_session__synthesize_attrs(struct perf_session *session,
1000 perf_event__handler_t process)
1002 struct perf_evsel *attr;
1003 int err = 0;
1005 list_for_each_entry(attr, &session->evlist->entries, node) {
1006 err = perf_event__synthesize_attr(&attr->attr, attr->ids,
1007 attr->id, process, session);
1008 if (err) {
1009 pr_debug("failed to create perf header attribute\n");
1010 return err;
1014 return err;
1017 int perf_event__process_attr(union perf_event *event,
1018 struct perf_session *session)
1020 unsigned int i, ids, n_ids;
1021 struct perf_evsel *evsel;
1023 if (session->evlist == NULL) {
1024 session->evlist = perf_evlist__new(NULL, NULL);
1025 if (session->evlist == NULL)
1026 return -ENOMEM;
1029 evsel = perf_evsel__new(&event->attr.attr,
1030 session->evlist->nr_entries);
1031 if (evsel == NULL)
1032 return -ENOMEM;
1034 perf_evlist__add(session->evlist, evsel);
1036 ids = event->header.size;
1037 ids -= (void *)&event->attr.id - (void *)event;
1038 n_ids = ids / sizeof(u64);
1040 * We don't have the cpu and thread maps on the header, so
1041 * for allocating the perf_sample_id table we fake 1 cpu and
1042 * hattr->ids threads.
1044 if (perf_evsel__alloc_id(evsel, 1, n_ids))
1045 return -ENOMEM;
1047 for (i = 0; i < n_ids; i++) {
1048 perf_evlist__id_add(session->evlist, evsel, 0, i,
1049 event->attr.id[i]);
1052 perf_session__update_sample_type(session);
1054 return 0;
1057 int perf_event__synthesize_event_type(u64 event_id, char *name,
1058 perf_event__handler_t process,
1059 struct perf_session *session)
1061 union perf_event ev;
1062 size_t size = 0;
1063 int err = 0;
1065 memset(&ev, 0, sizeof(ev));
1067 ev.event_type.event_type.event_id = event_id;
1068 memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
1069 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
1071 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
1072 size = strlen(name);
1073 size = ALIGN(size, sizeof(u64));
1074 ev.event_type.header.size = sizeof(ev.event_type) -
1075 (sizeof(ev.event_type.event_type.name) - size);
1077 err = process(&ev, NULL, session);
1079 return err;
1082 int perf_event__synthesize_event_types(perf_event__handler_t process,
1083 struct perf_session *session)
1085 struct perf_trace_event_type *type;
1086 int i, err = 0;
1088 for (i = 0; i < event_count; i++) {
1089 type = &events[i];
1091 err = perf_event__synthesize_event_type(type->event_id,
1092 type->name, process,
1093 session);
1094 if (err) {
1095 pr_debug("failed to create perf header event type\n");
1096 return err;
1100 return err;
1103 int perf_event__process_event_type(union perf_event *event,
1104 struct perf_session *session __unused)
1106 if (perf_header__push_event(event->event_type.event_type.event_id,
1107 event->event_type.event_type.name) < 0)
1108 return -ENOMEM;
1110 return 0;
1113 int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
1114 perf_event__handler_t process,
1115 struct perf_session *session __unused)
1117 union perf_event ev;
1118 ssize_t size = 0, aligned_size = 0, padding;
1119 int err __used = 0;
1121 memset(&ev, 0, sizeof(ev));
1123 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1124 size = read_tracing_data_size(fd, &evlist->entries);
1125 if (size <= 0)
1126 return size;
1127 aligned_size = ALIGN(size, sizeof(u64));
1128 padding = aligned_size - size;
1129 ev.tracing_data.header.size = sizeof(ev.tracing_data);
1130 ev.tracing_data.size = aligned_size;
1132 process(&ev, NULL, session);
1134 err = read_tracing_data(fd, &evlist->entries);
1135 write_padded(fd, NULL, 0, padding);
1137 return aligned_size;
1140 int perf_event__process_tracing_data(union perf_event *event,
1141 struct perf_session *session)
1143 ssize_t size_read, padding, size = event->tracing_data.size;
1144 off_t offset = lseek(session->fd, 0, SEEK_CUR);
1145 char buf[BUFSIZ];
1147 /* setup for reading amidst mmap */
1148 lseek(session->fd, offset + sizeof(struct tracing_data_event),
1149 SEEK_SET);
1151 size_read = trace_report(session->fd, session->repipe);
1153 padding = ALIGN(size_read, sizeof(u64)) - size_read;
1155 if (read(session->fd, buf, padding) < 0)
1156 die("reading input file");
1157 if (session->repipe) {
1158 int retw = write(STDOUT_FILENO, buf, padding);
1159 if (retw <= 0 || retw != padding)
1160 die("repiping tracing data padding");
1163 if (size_read + padding != size)
1164 die("tracing data size mismatch");
1166 return size_read + padding;
1169 int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
1170 perf_event__handler_t process,
1171 struct machine *machine,
1172 struct perf_session *session)
1174 union perf_event ev;
1175 size_t len;
1176 int err = 0;
1178 if (!pos->hit)
1179 return err;
1181 memset(&ev, 0, sizeof(ev));
1183 len = pos->long_name_len + 1;
1184 len = ALIGN(len, NAME_ALIGN);
1185 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
1186 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1187 ev.build_id.header.misc = misc;
1188 ev.build_id.pid = machine->pid;
1189 ev.build_id.header.size = sizeof(ev.build_id) + len;
1190 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1192 err = process(&ev, NULL, session);
1194 return err;
1197 int perf_event__process_build_id(union perf_event *event,
1198 struct perf_session *session)
1200 __event_process_build_id(&event->build_id,
1201 event->build_id.filename,
1202 session);
1203 return 0;
1206 void disable_buildid_cache(void)
1208 no_buildid_cache = true;