1 // SPDX-License-Identifier: GPL-2.0-only
3 #include "util/debug.h"
5 #include "util/event.h"
6 #include "util/evlist.h"
7 #include "util/machine.h"
9 #include "util/map_symbol.h"
10 #include "util/branch.h"
11 #include "util/memswap.h"
12 #include "util/namespaces.h"
13 #include "util/session.h"
14 #include "util/stat.h"
15 #include "util/symbol.h"
16 #include "util/synthetic-events.h"
17 #include "util/target.h"
18 #include "util/time-utils.h"
19 #include <linux/bitops.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/zalloc.h>
23 #include <linux/perf_event.h>
25 #include <perf/evsel.h>
26 #include <internal/cpumap.h>
27 #include <perf/cpumap.h>
28 #include <internal/lib.h> // page_size
29 #include <internal/threadmap.h>
30 #include <perf/threadmap.h>
31 #include <symbol/kallsyms.h>
37 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
38 #include <api/fs/fs.h>
39 #include <sys/types.h>
44 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
46 unsigned int proc_map_timeout
= DEFAULT_PROC_MAP_PARSE_TIMEOUT
;
48 int perf_tool__process_synth_event(struct perf_tool
*tool
,
49 union perf_event
*event
,
50 struct machine
*machine
,
51 perf_event__handler_t process
)
53 struct perf_sample synth_sample
= {
60 .cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
,
63 return process(tool
, event
, &synth_sample
, machine
);
67 * Assumes that the first 4095 bytes of /proc/pid/stat contains
68 * the comm, tgid and ppid.
70 static int perf_event__get_comm_ids(pid_t pid
, char *comm
, size_t len
,
71 pid_t
*tgid
, pid_t
*ppid
)
73 char filename
[PATH_MAX
];
78 char *name
, *tgids
, *ppids
;
83 snprintf(filename
, sizeof(filename
), "/proc/%d/status", pid
);
85 fd
= open(filename
, O_RDONLY
);
87 pr_debug("couldn't open %s\n", filename
);
91 n
= read(fd
, bf
, sizeof(bf
) - 1);
94 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
100 name
= strstr(bf
, "Name:");
101 tgids
= strstr(bf
, "Tgid:");
102 ppids
= strstr(bf
, "PPid:");
107 name
= skip_spaces(name
+ 5); /* strlen("Name:") */
108 nl
= strchr(name
, '\n');
115 memcpy(comm
, name
, size
);
118 pr_debug("Name: string not found for pid %d\n", pid
);
122 tgids
+= 5; /* strlen("Tgid:") */
125 pr_debug("Tgid: string not found for pid %d\n", pid
);
129 ppids
+= 5; /* strlen("PPid:") */
132 pr_debug("PPid: string not found for pid %d\n", pid
);
138 static int perf_event__prepare_comm(union perf_event
*event
, pid_t pid
,
139 struct machine
*machine
,
140 pid_t
*tgid
, pid_t
*ppid
)
146 memset(&event
->comm
, 0, sizeof(event
->comm
));
148 if (machine__is_host(machine
)) {
149 if (perf_event__get_comm_ids(pid
, event
->comm
.comm
,
150 sizeof(event
->comm
.comm
),
155 *tgid
= machine
->pid
;
161 event
->comm
.pid
= *tgid
;
162 event
->comm
.header
.type
= PERF_RECORD_COMM
;
164 size
= strlen(event
->comm
.comm
) + 1;
165 size
= PERF_ALIGN(size
, sizeof(u64
));
166 memset(event
->comm
.comm
+ size
, 0, machine
->id_hdr_size
);
167 event
->comm
.header
.size
= (sizeof(event
->comm
) -
168 (sizeof(event
->comm
.comm
) - size
) +
169 machine
->id_hdr_size
);
170 event
->comm
.tid
= pid
;
175 pid_t
perf_event__synthesize_comm(struct perf_tool
*tool
,
176 union perf_event
*event
, pid_t pid
,
177 perf_event__handler_t process
,
178 struct machine
*machine
)
182 if (perf_event__prepare_comm(event
, pid
, machine
, &tgid
, &ppid
) != 0)
185 if (perf_tool__process_synth_event(tool
, event
, machine
, process
) != 0)
191 static void perf_event__get_ns_link_info(pid_t pid
, const char *ns
,
192 struct perf_ns_link_info
*ns_link_info
)
197 sprintf(proc_ns
, "/proc/%u/ns/%s", pid
, ns
);
198 if (stat64(proc_ns
, &st
) == 0) {
199 ns_link_info
->dev
= st
.st_dev
;
200 ns_link_info
->ino
= st
.st_ino
;
204 int perf_event__synthesize_namespaces(struct perf_tool
*tool
,
205 union perf_event
*event
,
206 pid_t pid
, pid_t tgid
,
207 perf_event__handler_t process
,
208 struct machine
*machine
)
211 struct perf_ns_link_info
*ns_link_info
;
213 if (!tool
|| !tool
->namespace_events
)
216 memset(&event
->namespaces
, 0, (sizeof(event
->namespaces
) +
217 (NR_NAMESPACES
* sizeof(struct perf_ns_link_info
)) +
218 machine
->id_hdr_size
));
220 event
->namespaces
.pid
= tgid
;
221 event
->namespaces
.tid
= pid
;
223 event
->namespaces
.nr_namespaces
= NR_NAMESPACES
;
225 ns_link_info
= event
->namespaces
.link_info
;
227 for (idx
= 0; idx
< event
->namespaces
.nr_namespaces
; idx
++)
228 perf_event__get_ns_link_info(pid
, perf_ns__name(idx
),
231 event
->namespaces
.header
.type
= PERF_RECORD_NAMESPACES
;
233 event
->namespaces
.header
.size
= (sizeof(event
->namespaces
) +
234 (NR_NAMESPACES
* sizeof(struct perf_ns_link_info
)) +
235 machine
->id_hdr_size
);
237 if (perf_tool__process_synth_event(tool
, event
, machine
, process
) != 0)
243 static int perf_event__synthesize_fork(struct perf_tool
*tool
,
244 union perf_event
*event
,
245 pid_t pid
, pid_t tgid
, pid_t ppid
,
246 perf_event__handler_t process
,
247 struct machine
*machine
)
249 memset(&event
->fork
, 0, sizeof(event
->fork
) + machine
->id_hdr_size
);
252 * for main thread set parent to ppid from status file. For other
253 * threads set parent pid to main thread. ie., assume main thread
254 * spawns all threads in a process
257 event
->fork
.ppid
= ppid
;
258 event
->fork
.ptid
= ppid
;
260 event
->fork
.ppid
= tgid
;
261 event
->fork
.ptid
= tgid
;
263 event
->fork
.pid
= tgid
;
264 event
->fork
.tid
= pid
;
265 event
->fork
.header
.type
= PERF_RECORD_FORK
;
266 event
->fork
.header
.misc
= PERF_RECORD_MISC_FORK_EXEC
;
268 event
->fork
.header
.size
= (sizeof(event
->fork
) + machine
->id_hdr_size
);
270 if (perf_tool__process_synth_event(tool
, event
, machine
, process
) != 0)
276 int perf_event__synthesize_mmap_events(struct perf_tool
*tool
,
277 union perf_event
*event
,
278 pid_t pid
, pid_t tgid
,
279 perf_event__handler_t process
,
280 struct machine
*machine
,
283 char filename
[PATH_MAX
];
285 unsigned long long t
;
286 bool truncation
= false;
287 unsigned long long timeout
= proc_map_timeout
* 1000000ULL;
289 const char *hugetlbfs_mnt
= hugetlbfs__mountpoint();
290 int hugetlbfs_mnt_len
= hugetlbfs_mnt
? strlen(hugetlbfs_mnt
) : 0;
292 if (machine__is_default_guest(machine
))
295 snprintf(filename
, sizeof(filename
), "%s/proc/%d/task/%d/maps",
296 machine
->root_dir
, pid
, pid
);
298 fp
= fopen(filename
, "r");
301 * We raced with a task exiting - just return:
303 pr_debug("couldn't open %s\n", filename
);
307 event
->header
.type
= PERF_RECORD_MMAP2
;
313 char execname
[PATH_MAX
];
314 char anonstr
[] = "//anon";
319 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
322 if ((rdclock() - t
) > timeout
) {
323 pr_warning("Reading %s time out. "
324 "You may want to increase "
325 "the time limit by --proc-map-timeout\n",
331 /* ensure null termination since stack will be reused. */
332 strcpy(execname
, "");
334 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
335 n
= sscanf(bf
, "%"PRI_lx64
"-%"PRI_lx64
" %s %"PRI_lx64
" %x:%x %u %[^\n]\n",
336 &event
->mmap2
.start
, &event
->mmap2
.len
, prot
,
337 &event
->mmap2
.pgoff
, &event
->mmap2
.maj
,
342 * Anon maps don't have the execname.
347 event
->mmap2
.ino
= (u64
)ino
;
350 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
352 if (machine__is_host(machine
))
353 event
->header
.misc
= PERF_RECORD_MISC_USER
;
355 event
->header
.misc
= PERF_RECORD_MISC_GUEST_USER
;
357 /* map protection and flags bits */
358 event
->mmap2
.prot
= 0;
359 event
->mmap2
.flags
= 0;
361 event
->mmap2
.prot
|= PROT_READ
;
363 event
->mmap2
.prot
|= PROT_WRITE
;
365 event
->mmap2
.prot
|= PROT_EXEC
;
368 event
->mmap2
.flags
|= MAP_SHARED
;
370 event
->mmap2
.flags
|= MAP_PRIVATE
;
372 if (prot
[2] != 'x') {
373 if (!mmap_data
|| prot
[0] != 'r')
376 event
->header
.misc
|= PERF_RECORD_MISC_MMAP_DATA
;
381 event
->header
.misc
|= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT
;
383 if (!strcmp(execname
, ""))
384 strcpy(execname
, anonstr
);
386 if (hugetlbfs_mnt_len
&&
387 !strncmp(execname
, hugetlbfs_mnt
, hugetlbfs_mnt_len
)) {
388 strcpy(execname
, anonstr
);
389 event
->mmap2
.flags
|= MAP_HUGETLB
;
392 size
= strlen(execname
) + 1;
393 memcpy(event
->mmap2
.filename
, execname
, size
);
394 size
= PERF_ALIGN(size
, sizeof(u64
));
395 event
->mmap2
.len
-= event
->mmap
.start
;
396 event
->mmap2
.header
.size
= (sizeof(event
->mmap2
) -
397 (sizeof(event
->mmap2
.filename
) - size
));
398 memset(event
->mmap2
.filename
+ size
, 0, machine
->id_hdr_size
);
399 event
->mmap2
.header
.size
+= machine
->id_hdr_size
;
400 event
->mmap2
.pid
= tgid
;
401 event
->mmap2
.tid
= pid
;
403 if (perf_tool__process_synth_event(tool
, event
, machine
, process
) != 0) {
416 int perf_event__synthesize_modules(struct perf_tool
*tool
, perf_event__handler_t process
,
417 struct machine
*machine
)
421 struct maps
*maps
= machine__kernel_maps(machine
);
422 union perf_event
*event
= zalloc((sizeof(event
->mmap
) +
423 machine
->id_hdr_size
));
425 pr_debug("Not enough memory synthesizing mmap event "
426 "for kernel modules\n");
430 event
->header
.type
= PERF_RECORD_MMAP
;
433 * kernel uses 0 for user space maps, see kernel/perf_event.c
436 if (machine__is_host(machine
))
437 event
->header
.misc
= PERF_RECORD_MISC_KERNEL
;
439 event
->header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
441 maps__for_each_entry(maps
, pos
) {
444 if (!__map__is_kmodule(pos
))
447 size
= PERF_ALIGN(pos
->dso
->long_name_len
+ 1, sizeof(u64
));
448 event
->mmap
.header
.type
= PERF_RECORD_MMAP
;
449 event
->mmap
.header
.size
= (sizeof(event
->mmap
) -
450 (sizeof(event
->mmap
.filename
) - size
));
451 memset(event
->mmap
.filename
+ size
, 0, machine
->id_hdr_size
);
452 event
->mmap
.header
.size
+= machine
->id_hdr_size
;
453 event
->mmap
.start
= pos
->start
;
454 event
->mmap
.len
= pos
->end
- pos
->start
;
455 event
->mmap
.pid
= machine
->pid
;
457 memcpy(event
->mmap
.filename
, pos
->dso
->long_name
,
458 pos
->dso
->long_name_len
+ 1);
459 if (perf_tool__process_synth_event(tool
, event
, machine
, process
) != 0) {
469 static int __event__synthesize_thread(union perf_event
*comm_event
,
470 union perf_event
*mmap_event
,
471 union perf_event
*fork_event
,
472 union perf_event
*namespaces_event
,
473 pid_t pid
, int full
, perf_event__handler_t process
,
474 struct perf_tool
*tool
, struct machine
*machine
, bool mmap_data
)
476 char filename
[PATH_MAX
];
478 struct dirent
*dirent
;
482 /* special case: only send one comm event using passed in pid */
484 tgid
= perf_event__synthesize_comm(tool
, comm_event
, pid
,
490 if (perf_event__synthesize_namespaces(tool
, namespaces_event
, pid
,
491 tgid
, process
, machine
) < 0)
495 * send mmap only for thread group leader
496 * see thread__init_maps()
499 perf_event__synthesize_mmap_events(tool
, mmap_event
, pid
, tgid
,
500 process
, machine
, mmap_data
))
506 if (machine__is_default_guest(machine
))
509 snprintf(filename
, sizeof(filename
), "%s/proc/%d/task",
510 machine
->root_dir
, pid
);
512 tasks
= opendir(filename
);
514 pr_debug("couldn't open %s\n", filename
);
518 while ((dirent
= readdir(tasks
)) != NULL
) {
522 _pid
= strtol(dirent
->d_name
, &end
, 10);
527 if (perf_event__prepare_comm(comm_event
, _pid
, machine
,
531 if (perf_event__synthesize_fork(tool
, fork_event
, _pid
, tgid
,
532 ppid
, process
, machine
) < 0)
535 if (perf_event__synthesize_namespaces(tool
, namespaces_event
, _pid
,
536 tgid
, process
, machine
) < 0)
540 * Send the prepared comm event
542 if (perf_tool__process_synth_event(tool
, comm_event
, machine
, process
) != 0)
547 /* process the parent's maps too */
548 rc
= perf_event__synthesize_mmap_events(tool
, mmap_event
, pid
, tgid
,
549 process
, machine
, mmap_data
);
559 int perf_event__synthesize_thread_map(struct perf_tool
*tool
,
560 struct perf_thread_map
*threads
,
561 perf_event__handler_t process
,
562 struct machine
*machine
,
565 union perf_event
*comm_event
, *mmap_event
, *fork_event
;
566 union perf_event
*namespaces_event
;
567 int err
= -1, thread
, j
;
569 comm_event
= malloc(sizeof(comm_event
->comm
) + machine
->id_hdr_size
);
570 if (comm_event
== NULL
)
573 mmap_event
= malloc(sizeof(mmap_event
->mmap2
) + machine
->id_hdr_size
);
574 if (mmap_event
== NULL
)
577 fork_event
= malloc(sizeof(fork_event
->fork
) + machine
->id_hdr_size
);
578 if (fork_event
== NULL
)
581 namespaces_event
= malloc(sizeof(namespaces_event
->namespaces
) +
582 (NR_NAMESPACES
* sizeof(struct perf_ns_link_info
)) +
583 machine
->id_hdr_size
);
584 if (namespaces_event
== NULL
)
588 for (thread
= 0; thread
< threads
->nr
; ++thread
) {
589 if (__event__synthesize_thread(comm_event
, mmap_event
,
590 fork_event
, namespaces_event
,
591 perf_thread_map__pid(threads
, thread
), 0,
592 process
, tool
, machine
,
599 * comm.pid is set to thread group id by
600 * perf_event__synthesize_comm
602 if ((int) comm_event
->comm
.pid
!= perf_thread_map__pid(threads
, thread
)) {
603 bool need_leader
= true;
605 /* is thread group leader in thread_map? */
606 for (j
= 0; j
< threads
->nr
; ++j
) {
607 if ((int) comm_event
->comm
.pid
== perf_thread_map__pid(threads
, j
)) {
613 /* if not, generate events for it */
615 __event__synthesize_thread(comm_event
, mmap_event
,
616 fork_event
, namespaces_event
,
617 comm_event
->comm
.pid
, 0,
618 process
, tool
, machine
,
625 free(namespaces_event
);
636 static int __perf_event__synthesize_threads(struct perf_tool
*tool
,
637 perf_event__handler_t process
,
638 struct machine
*machine
,
640 struct dirent
**dirent
,
644 union perf_event
*comm_event
, *mmap_event
, *fork_event
;
645 union perf_event
*namespaces_event
;
651 comm_event
= malloc(sizeof(comm_event
->comm
) + machine
->id_hdr_size
);
652 if (comm_event
== NULL
)
655 mmap_event
= malloc(sizeof(mmap_event
->mmap2
) + machine
->id_hdr_size
);
656 if (mmap_event
== NULL
)
659 fork_event
= malloc(sizeof(fork_event
->fork
) + machine
->id_hdr_size
);
660 if (fork_event
== NULL
)
663 namespaces_event
= malloc(sizeof(namespaces_event
->namespaces
) +
664 (NR_NAMESPACES
* sizeof(struct perf_ns_link_info
)) +
665 machine
->id_hdr_size
);
666 if (namespaces_event
== NULL
)
669 for (i
= start
; i
< start
+ num
; i
++) {
670 if (!isdigit(dirent
[i
]->d_name
[0]))
673 pid
= (pid_t
)strtol(dirent
[i
]->d_name
, &end
, 10);
674 /* only interested in proper numerical dirents */
678 * We may race with exiting thread, so don't stop just because
679 * one thread couldn't be synthesized.
681 __event__synthesize_thread(comm_event
, mmap_event
, fork_event
,
682 namespaces_event
, pid
, 1, process
,
683 tool
, machine
, mmap_data
);
687 free(namespaces_event
);
698 struct synthesize_threads_arg
{
699 struct perf_tool
*tool
;
700 perf_event__handler_t process
;
701 struct machine
*machine
;
703 struct dirent
**dirent
;
708 static void *synthesize_threads_worker(void *arg
)
710 struct synthesize_threads_arg
*args
= arg
;
712 __perf_event__synthesize_threads(args
->tool
, args
->process
,
713 args
->machine
, args
->mmap_data
,
715 args
->start
, args
->num
);
719 int perf_event__synthesize_threads(struct perf_tool
*tool
,
720 perf_event__handler_t process
,
721 struct machine
*machine
,
723 unsigned int nr_threads_synthesize
)
725 struct synthesize_threads_arg
*args
= NULL
;
726 pthread_t
*synthesize_threads
= NULL
;
727 char proc_path
[PATH_MAX
];
728 struct dirent
**dirent
;
736 if (machine__is_default_guest(machine
))
739 snprintf(proc_path
, sizeof(proc_path
), "%s/proc", machine
->root_dir
);
740 n
= scandir(proc_path
, &dirent
, 0, alphasort
);
744 if (nr_threads_synthesize
== UINT_MAX
)
745 thread_nr
= sysconf(_SC_NPROCESSORS_ONLN
);
747 thread_nr
= nr_threads_synthesize
;
749 if (thread_nr
<= 1) {
750 err
= __perf_event__synthesize_threads(tool
, process
,
758 synthesize_threads
= calloc(sizeof(pthread_t
), thread_nr
);
759 if (synthesize_threads
== NULL
)
762 args
= calloc(sizeof(*args
), thread_nr
);
766 num_per_thread
= n
/ thread_nr
;
768 for (i
= 0; i
< thread_nr
; i
++) {
770 args
[i
].process
= process
;
771 args
[i
].machine
= machine
;
772 args
[i
].mmap_data
= mmap_data
;
773 args
[i
].dirent
= dirent
;
775 for (i
= 0; i
< m
; i
++) {
776 args
[i
].num
= num_per_thread
+ 1;
777 args
[i
].start
= i
* args
[i
].num
;
780 base
= args
[i
-1].start
+ args
[i
-1].num
;
781 for (j
= i
; j
< thread_nr
; j
++) {
782 args
[j
].num
= num_per_thread
;
783 args
[j
].start
= base
+ (j
- i
) * args
[i
].num
;
786 for (i
= 0; i
< thread_nr
; i
++) {
787 if (pthread_create(&synthesize_threads
[i
], NULL
,
788 synthesize_threads_worker
, &args
[i
]))
793 for (i
= 0; i
< thread_nr
; i
++)
794 pthread_join(synthesize_threads
[i
], NULL
);
797 free(synthesize_threads
);
799 for (i
= 0; i
< n
; i
++)
806 int __weak
perf_event__synthesize_extra_kmaps(struct perf_tool
*tool __maybe_unused
,
807 perf_event__handler_t process __maybe_unused
,
808 struct machine
*machine __maybe_unused
)
813 static int __perf_event__synthesize_kernel_mmap(struct perf_tool
*tool
,
814 perf_event__handler_t process
,
815 struct machine
*machine
)
818 struct map
*map
= machine__kernel_map(machine
);
821 union perf_event
*event
;
826 kmap
= map__kmap(map
);
827 if (!kmap
->ref_reloc_sym
)
831 * We should get this from /sys/kernel/sections/.text, but till that is
832 * available use this, and after it is use this as a fallback for older
835 event
= zalloc((sizeof(event
->mmap
) + machine
->id_hdr_size
));
837 pr_debug("Not enough memory synthesizing mmap event "
838 "for kernel modules\n");
842 if (machine__is_host(machine
)) {
844 * kernel uses PERF_RECORD_MISC_USER for user space maps,
845 * see kernel/perf_event.c __perf_event_mmap
847 event
->header
.misc
= PERF_RECORD_MISC_KERNEL
;
849 event
->header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
852 size
= snprintf(event
->mmap
.filename
, sizeof(event
->mmap
.filename
),
853 "%s%s", machine
->mmap_name
, kmap
->ref_reloc_sym
->name
) + 1;
854 size
= PERF_ALIGN(size
, sizeof(u64
));
855 event
->mmap
.header
.type
= PERF_RECORD_MMAP
;
856 event
->mmap
.header
.size
= (sizeof(event
->mmap
) -
857 (sizeof(event
->mmap
.filename
) - size
) + machine
->id_hdr_size
);
858 event
->mmap
.pgoff
= kmap
->ref_reloc_sym
->addr
;
859 event
->mmap
.start
= map
->start
;
860 event
->mmap
.len
= map
->end
- event
->mmap
.start
;
861 event
->mmap
.pid
= machine
->pid
;
863 err
= perf_tool__process_synth_event(tool
, event
, machine
, process
);
869 int perf_event__synthesize_kernel_mmap(struct perf_tool
*tool
,
870 perf_event__handler_t process
,
871 struct machine
*machine
)
875 err
= __perf_event__synthesize_kernel_mmap(tool
, process
, machine
);
879 return perf_event__synthesize_extra_kmaps(tool
, process
, machine
);
882 int perf_event__synthesize_thread_map2(struct perf_tool
*tool
,
883 struct perf_thread_map
*threads
,
884 perf_event__handler_t process
,
885 struct machine
*machine
)
887 union perf_event
*event
;
890 size
= sizeof(event
->thread_map
);
891 size
+= threads
->nr
* sizeof(event
->thread_map
.entries
[0]);
893 event
= zalloc(size
);
897 event
->header
.type
= PERF_RECORD_THREAD_MAP
;
898 event
->header
.size
= size
;
899 event
->thread_map
.nr
= threads
->nr
;
901 for (i
= 0; i
< threads
->nr
; i
++) {
902 struct perf_record_thread_map_entry
*entry
= &event
->thread_map
.entries
[i
];
903 char *comm
= perf_thread_map__comm(threads
, i
);
908 entry
->pid
= perf_thread_map__pid(threads
, i
);
909 strncpy((char *) &entry
->comm
, comm
, sizeof(entry
->comm
));
912 err
= process(tool
, event
, NULL
, machine
);
918 static void synthesize_cpus(struct cpu_map_entries
*cpus
,
919 struct perf_cpu_map
*map
)
925 for (i
= 0; i
< map
->nr
; i
++)
926 cpus
->cpu
[i
] = map
->map
[i
];
929 static void synthesize_mask(struct perf_record_record_cpu_map
*mask
,
930 struct perf_cpu_map
*map
, int max
)
934 mask
->nr
= BITS_TO_LONGS(max
);
935 mask
->long_size
= sizeof(long);
937 for (i
= 0; i
< map
->nr
; i
++)
938 set_bit(map
->map
[i
], mask
->mask
);
941 static size_t cpus_size(struct perf_cpu_map
*map
)
943 return sizeof(struct cpu_map_entries
) + map
->nr
* sizeof(u16
);
946 static size_t mask_size(struct perf_cpu_map
*map
, int *max
)
952 for (i
= 0; i
< map
->nr
; i
++) {
953 /* bit possition of the cpu is + 1 */
954 int bit
= map
->map
[i
] + 1;
960 return sizeof(struct perf_record_record_cpu_map
) + BITS_TO_LONGS(*max
) * sizeof(long);
963 void *cpu_map_data__alloc(struct perf_cpu_map
*map
, size_t *size
, u16
*type
, int *max
)
965 size_t size_cpus
, size_mask
;
966 bool is_dummy
= perf_cpu_map__empty(map
);
969 * Both array and mask data have variable size based
970 * on the number of cpus and their actual values.
971 * The size of the 'struct perf_record_cpu_map_data' is:
973 * array = size of 'struct cpu_map_entries' +
974 * number of cpus * sizeof(u64)
976 * mask = size of 'struct perf_record_record_cpu_map' +
977 * maximum cpu bit converted to size of longs
979 * and finaly + the size of 'struct perf_record_cpu_map_data'.
981 size_cpus
= cpus_size(map
);
982 size_mask
= mask_size(map
, max
);
984 if (is_dummy
|| (size_cpus
< size_mask
)) {
986 *type
= PERF_CPU_MAP__CPUS
;
989 *type
= PERF_CPU_MAP__MASK
;
992 *size
+= sizeof(struct perf_record_cpu_map_data
);
993 *size
= PERF_ALIGN(*size
, sizeof(u64
));
994 return zalloc(*size
);
997 void cpu_map_data__synthesize(struct perf_record_cpu_map_data
*data
, struct perf_cpu_map
*map
,
1003 case PERF_CPU_MAP__CPUS
:
1004 synthesize_cpus((struct cpu_map_entries
*) data
->data
, map
);
1006 case PERF_CPU_MAP__MASK
:
1007 synthesize_mask((struct perf_record_record_cpu_map
*)data
->data
, map
, max
);
1013 static struct perf_record_cpu_map
*cpu_map_event__new(struct perf_cpu_map
*map
)
1015 size_t size
= sizeof(struct perf_record_cpu_map
);
1016 struct perf_record_cpu_map
*event
;
1020 event
= cpu_map_data__alloc(map
, &size
, &type
, &max
);
1024 event
->header
.type
= PERF_RECORD_CPU_MAP
;
1025 event
->header
.size
= size
;
1026 event
->data
.type
= type
;
1028 cpu_map_data__synthesize(&event
->data
, map
, type
, max
);
1032 int perf_event__synthesize_cpu_map(struct perf_tool
*tool
,
1033 struct perf_cpu_map
*map
,
1034 perf_event__handler_t process
,
1035 struct machine
*machine
)
1037 struct perf_record_cpu_map
*event
;
1040 event
= cpu_map_event__new(map
);
1044 err
= process(tool
, (union perf_event
*) event
, NULL
, machine
);
1050 int perf_event__synthesize_stat_config(struct perf_tool
*tool
,
1051 struct perf_stat_config
*config
,
1052 perf_event__handler_t process
,
1053 struct machine
*machine
)
1055 struct perf_record_stat_config
*event
;
1056 int size
, i
= 0, err
;
1058 size
= sizeof(*event
);
1059 size
+= (PERF_STAT_CONFIG_TERM__MAX
* sizeof(event
->data
[0]));
1061 event
= zalloc(size
);
1065 event
->header
.type
= PERF_RECORD_STAT_CONFIG
;
1066 event
->header
.size
= size
;
1067 event
->nr
= PERF_STAT_CONFIG_TERM__MAX
;
1069 #define ADD(__term, __val) \
1070 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
1071 event->data[i].val = __val; \
1074 ADD(AGGR_MODE
, config
->aggr_mode
)
1075 ADD(INTERVAL
, config
->interval
)
1076 ADD(SCALE
, config
->scale
)
1078 WARN_ONCE(i
!= PERF_STAT_CONFIG_TERM__MAX
,
1079 "stat config terms unbalanced\n");
1082 err
= process(tool
, (union perf_event
*) event
, NULL
, machine
);
1088 int perf_event__synthesize_stat(struct perf_tool
*tool
,
1089 u32 cpu
, u32 thread
, u64 id
,
1090 struct perf_counts_values
*count
,
1091 perf_event__handler_t process
,
1092 struct machine
*machine
)
1094 struct perf_record_stat event
;
1096 event
.header
.type
= PERF_RECORD_STAT
;
1097 event
.header
.size
= sizeof(event
);
1098 event
.header
.misc
= 0;
1102 event
.thread
= thread
;
1103 event
.val
= count
->val
;
1104 event
.ena
= count
->ena
;
1105 event
.run
= count
->run
;
1107 return process(tool
, (union perf_event
*) &event
, NULL
, machine
);
1110 int perf_event__synthesize_stat_round(struct perf_tool
*tool
,
1111 u64 evtime
, u64 type
,
1112 perf_event__handler_t process
,
1113 struct machine
*machine
)
1115 struct perf_record_stat_round event
;
1117 event
.header
.type
= PERF_RECORD_STAT_ROUND
;
1118 event
.header
.size
= sizeof(event
);
1119 event
.header
.misc
= 0;
1121 event
.time
= evtime
;
1124 return process(tool
, (union perf_event
*) &event
, NULL
, machine
);
1127 size_t perf_event__sample_event_size(const struct perf_sample
*sample
, u64 type
, u64 read_format
)
1129 size_t sz
, result
= sizeof(struct perf_record_sample
);
1131 if (type
& PERF_SAMPLE_IDENTIFIER
)
1132 result
+= sizeof(u64
);
1134 if (type
& PERF_SAMPLE_IP
)
1135 result
+= sizeof(u64
);
1137 if (type
& PERF_SAMPLE_TID
)
1138 result
+= sizeof(u64
);
1140 if (type
& PERF_SAMPLE_TIME
)
1141 result
+= sizeof(u64
);
1143 if (type
& PERF_SAMPLE_ADDR
)
1144 result
+= sizeof(u64
);
1146 if (type
& PERF_SAMPLE_ID
)
1147 result
+= sizeof(u64
);
1149 if (type
& PERF_SAMPLE_STREAM_ID
)
1150 result
+= sizeof(u64
);
1152 if (type
& PERF_SAMPLE_CPU
)
1153 result
+= sizeof(u64
);
1155 if (type
& PERF_SAMPLE_PERIOD
)
1156 result
+= sizeof(u64
);
1158 if (type
& PERF_SAMPLE_READ
) {
1159 result
+= sizeof(u64
);
1160 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1161 result
+= sizeof(u64
);
1162 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1163 result
+= sizeof(u64
);
1164 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1165 if (read_format
& PERF_FORMAT_GROUP
) {
1166 sz
= sample
->read
.group
.nr
*
1167 sizeof(struct sample_read_value
);
1170 result
+= sizeof(u64
);
1174 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1175 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1179 if (type
& PERF_SAMPLE_RAW
) {
1180 result
+= sizeof(u32
);
1181 result
+= sample
->raw_size
;
1184 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1185 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
1190 if (type
& PERF_SAMPLE_REGS_USER
) {
1191 if (sample
->user_regs
.abi
) {
1192 result
+= sizeof(u64
);
1193 sz
= hweight64(sample
->user_regs
.mask
) * sizeof(u64
);
1196 result
+= sizeof(u64
);
1200 if (type
& PERF_SAMPLE_STACK_USER
) {
1201 sz
= sample
->user_stack
.size
;
1202 result
+= sizeof(u64
);
1205 result
+= sizeof(u64
);
1209 if (type
& PERF_SAMPLE_WEIGHT
)
1210 result
+= sizeof(u64
);
1212 if (type
& PERF_SAMPLE_DATA_SRC
)
1213 result
+= sizeof(u64
);
1215 if (type
& PERF_SAMPLE_TRANSACTION
)
1216 result
+= sizeof(u64
);
1218 if (type
& PERF_SAMPLE_REGS_INTR
) {
1219 if (sample
->intr_regs
.abi
) {
1220 result
+= sizeof(u64
);
1221 sz
= hweight64(sample
->intr_regs
.mask
) * sizeof(u64
);
1224 result
+= sizeof(u64
);
1228 if (type
& PERF_SAMPLE_PHYS_ADDR
)
1229 result
+= sizeof(u64
);
1231 if (type
& PERF_SAMPLE_AUX
) {
1232 result
+= sizeof(u64
);
1233 result
+= sample
->aux_sample
.size
;
1239 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
, u64 read_format
,
1240 const struct perf_sample
*sample
)
1245 * used for cross-endian analysis. See git commit 65014ab3
1246 * for why this goofiness is needed.
1250 array
= event
->sample
.array
;
1252 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1253 *array
= sample
->id
;
1257 if (type
& PERF_SAMPLE_IP
) {
1258 *array
= sample
->ip
;
1262 if (type
& PERF_SAMPLE_TID
) {
1263 u
.val32
[0] = sample
->pid
;
1264 u
.val32
[1] = sample
->tid
;
1269 if (type
& PERF_SAMPLE_TIME
) {
1270 *array
= sample
->time
;
1274 if (type
& PERF_SAMPLE_ADDR
) {
1275 *array
= sample
->addr
;
1279 if (type
& PERF_SAMPLE_ID
) {
1280 *array
= sample
->id
;
1284 if (type
& PERF_SAMPLE_STREAM_ID
) {
1285 *array
= sample
->stream_id
;
1289 if (type
& PERF_SAMPLE_CPU
) {
1290 u
.val32
[0] = sample
->cpu
;
1296 if (type
& PERF_SAMPLE_PERIOD
) {
1297 *array
= sample
->period
;
1301 if (type
& PERF_SAMPLE_READ
) {
1302 if (read_format
& PERF_FORMAT_GROUP
)
1303 *array
= sample
->read
.group
.nr
;
1305 *array
= sample
->read
.one
.value
;
1308 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1309 *array
= sample
->read
.time_enabled
;
1313 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1314 *array
= sample
->read
.time_running
;
1318 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1319 if (read_format
& PERF_FORMAT_GROUP
) {
1320 sz
= sample
->read
.group
.nr
*
1321 sizeof(struct sample_read_value
);
1322 memcpy(array
, sample
->read
.group
.values
, sz
);
1323 array
= (void *)array
+ sz
;
1325 *array
= sample
->read
.one
.id
;
1330 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1331 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1332 memcpy(array
, sample
->callchain
, sz
);
1333 array
= (void *)array
+ sz
;
1336 if (type
& PERF_SAMPLE_RAW
) {
1337 u
.val32
[0] = sample
->raw_size
;
1339 array
= (void *)array
+ sizeof(u32
);
1341 memcpy(array
, sample
->raw_data
, sample
->raw_size
);
1342 array
= (void *)array
+ sample
->raw_size
;
1345 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1346 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
1348 memcpy(array
, sample
->branch_stack
, sz
);
1349 array
= (void *)array
+ sz
;
1352 if (type
& PERF_SAMPLE_REGS_USER
) {
1353 if (sample
->user_regs
.abi
) {
1354 *array
++ = sample
->user_regs
.abi
;
1355 sz
= hweight64(sample
->user_regs
.mask
) * sizeof(u64
);
1356 memcpy(array
, sample
->user_regs
.regs
, sz
);
1357 array
= (void *)array
+ sz
;
1363 if (type
& PERF_SAMPLE_STACK_USER
) {
1364 sz
= sample
->user_stack
.size
;
1367 memcpy(array
, sample
->user_stack
.data
, sz
);
1368 array
= (void *)array
+ sz
;
1373 if (type
& PERF_SAMPLE_WEIGHT
) {
1374 *array
= sample
->weight
;
1378 if (type
& PERF_SAMPLE_DATA_SRC
) {
1379 *array
= sample
->data_src
;
1383 if (type
& PERF_SAMPLE_TRANSACTION
) {
1384 *array
= sample
->transaction
;
1388 if (type
& PERF_SAMPLE_REGS_INTR
) {
1389 if (sample
->intr_regs
.abi
) {
1390 *array
++ = sample
->intr_regs
.abi
;
1391 sz
= hweight64(sample
->intr_regs
.mask
) * sizeof(u64
);
1392 memcpy(array
, sample
->intr_regs
.regs
, sz
);
1393 array
= (void *)array
+ sz
;
1399 if (type
& PERF_SAMPLE_PHYS_ADDR
) {
1400 *array
= sample
->phys_addr
;
1404 if (type
& PERF_SAMPLE_AUX
) {
1405 sz
= sample
->aux_sample
.size
;
1407 memcpy(array
, sample
->aux_sample
.data
, sz
);
1408 array
= (void *)array
+ sz
;
1414 int perf_event__synthesize_id_index(struct perf_tool
*tool
, perf_event__handler_t process
,
1415 struct evlist
*evlist
, struct machine
*machine
)
1417 union perf_event
*ev
;
1418 struct evsel
*evsel
;
1419 size_t nr
= 0, i
= 0, sz
, max_nr
, n
;
1422 pr_debug2("Synthesizing id index\n");
1424 max_nr
= (UINT16_MAX
- sizeof(struct perf_record_id_index
)) /
1425 sizeof(struct id_index_entry
);
1427 evlist__for_each_entry(evlist
, evsel
)
1428 nr
+= evsel
->core
.ids
;
1430 n
= nr
> max_nr
? max_nr
: nr
;
1431 sz
= sizeof(struct perf_record_id_index
) + n
* sizeof(struct id_index_entry
);
1436 ev
->id_index
.header
.type
= PERF_RECORD_ID_INDEX
;
1437 ev
->id_index
.header
.size
= sz
;
1438 ev
->id_index
.nr
= n
;
1440 evlist__for_each_entry(evlist
, evsel
) {
1443 for (j
= 0; j
< evsel
->core
.ids
; j
++) {
1444 struct id_index_entry
*e
;
1445 struct perf_sample_id
*sid
;
1448 err
= process(tool
, ev
, NULL
, machine
);
1455 e
= &ev
->id_index
.entries
[i
++];
1457 e
->id
= evsel
->core
.id
[j
];
1459 sid
= perf_evlist__id2sid(evlist
, e
->id
);
1471 sz
= sizeof(struct perf_record_id_index
) + nr
* sizeof(struct id_index_entry
);
1472 ev
->id_index
.header
.size
= sz
;
1473 ev
->id_index
.nr
= nr
;
1475 err
= process(tool
, ev
, NULL
, machine
);
1482 int __machine__synthesize_threads(struct machine
*machine
, struct perf_tool
*tool
,
1483 struct target
*target
, struct perf_thread_map
*threads
,
1484 perf_event__handler_t process
, bool data_mmap
,
1485 unsigned int nr_threads_synthesize
)
1487 if (target__has_task(target
))
1488 return perf_event__synthesize_thread_map(tool
, threads
, process
, machine
, data_mmap
);
1489 else if (target__has_cpu(target
))
1490 return perf_event__synthesize_threads(tool
, process
,
1492 nr_threads_synthesize
);
1493 /* command specified */
1497 int machine__synthesize_threads(struct machine
*machine
, struct target
*target
,
1498 struct perf_thread_map
*threads
, bool data_mmap
,
1499 unsigned int nr_threads_synthesize
)
1501 return __machine__synthesize_threads(machine
, NULL
, target
, threads
,
1502 perf_event__process
, data_mmap
,
1503 nr_threads_synthesize
);
1506 static struct perf_record_event_update
*event_update_event__new(size_t size
, u64 type
, u64 id
)
1508 struct perf_record_event_update
*ev
;
1510 size
+= sizeof(*ev
);
1511 size
= PERF_ALIGN(size
, sizeof(u64
));
1515 ev
->header
.type
= PERF_RECORD_EVENT_UPDATE
;
1516 ev
->header
.size
= (u16
)size
;
1523 int perf_event__synthesize_event_update_unit(struct perf_tool
*tool
, struct evsel
*evsel
,
1524 perf_event__handler_t process
)
1526 size_t size
= strlen(evsel
->unit
);
1527 struct perf_record_event_update
*ev
;
1530 ev
= event_update_event__new(size
+ 1, PERF_EVENT_UPDATE__UNIT
, evsel
->core
.id
[0]);
1534 strlcpy(ev
->data
, evsel
->unit
, size
+ 1);
1535 err
= process(tool
, (union perf_event
*)ev
, NULL
, NULL
);
1540 int perf_event__synthesize_event_update_scale(struct perf_tool
*tool
, struct evsel
*evsel
,
1541 perf_event__handler_t process
)
1543 struct perf_record_event_update
*ev
;
1544 struct perf_record_event_update_scale
*ev_data
;
1547 ev
= event_update_event__new(sizeof(*ev_data
), PERF_EVENT_UPDATE__SCALE
, evsel
->core
.id
[0]);
1551 ev_data
= (struct perf_record_event_update_scale
*)ev
->data
;
1552 ev_data
->scale
= evsel
->scale
;
1553 err
= process(tool
, (union perf_event
*)ev
, NULL
, NULL
);
1558 int perf_event__synthesize_event_update_name(struct perf_tool
*tool
, struct evsel
*evsel
,
1559 perf_event__handler_t process
)
1561 struct perf_record_event_update
*ev
;
1562 size_t len
= strlen(evsel
->name
);
1565 ev
= event_update_event__new(len
+ 1, PERF_EVENT_UPDATE__NAME
, evsel
->core
.id
[0]);
1569 strlcpy(ev
->data
, evsel
->name
, len
+ 1);
1570 err
= process(tool
, (union perf_event
*)ev
, NULL
, NULL
);
1575 int perf_event__synthesize_event_update_cpus(struct perf_tool
*tool
, struct evsel
*evsel
,
1576 perf_event__handler_t process
)
1578 size_t size
= sizeof(struct perf_record_event_update
);
1579 struct perf_record_event_update
*ev
;
1583 if (!evsel
->core
.own_cpus
)
1586 ev
= cpu_map_data__alloc(evsel
->core
.own_cpus
, &size
, &type
, &max
);
1590 ev
->header
.type
= PERF_RECORD_EVENT_UPDATE
;
1591 ev
->header
.size
= (u16
)size
;
1592 ev
->type
= PERF_EVENT_UPDATE__CPUS
;
1593 ev
->id
= evsel
->core
.id
[0];
1595 cpu_map_data__synthesize((struct perf_record_cpu_map_data
*)ev
->data
,
1596 evsel
->core
.own_cpus
, type
, max
);
1598 err
= process(tool
, (union perf_event
*)ev
, NULL
, NULL
);
1603 int perf_event__synthesize_attrs(struct perf_tool
*tool
, struct evlist
*evlist
,
1604 perf_event__handler_t process
)
1606 struct evsel
*evsel
;
1609 evlist__for_each_entry(evlist
, evsel
) {
1610 err
= perf_event__synthesize_attr(tool
, &evsel
->core
.attr
, evsel
->core
.ids
,
1611 evsel
->core
.id
, process
);
1613 pr_debug("failed to create perf header attribute\n");
1621 static bool has_unit(struct evsel
*evsel
)
1623 return evsel
->unit
&& *evsel
->unit
;
1626 static bool has_scale(struct evsel
*evsel
)
1628 return evsel
->scale
!= 1;
1631 int perf_event__synthesize_extra_attr(struct perf_tool
*tool
, struct evlist
*evsel_list
,
1632 perf_event__handler_t process
, bool is_pipe
)
1634 struct evsel
*evsel
;
1638 * Synthesize other events stuff not carried within
1639 * attr event - unit, scale, name
1641 evlist__for_each_entry(evsel_list
, evsel
) {
1642 if (!evsel
->supported
)
1646 * Synthesize unit and scale only if it's defined.
1648 if (has_unit(evsel
)) {
1649 err
= perf_event__synthesize_event_update_unit(tool
, evsel
, process
);
1651 pr_err("Couldn't synthesize evsel unit.\n");
1656 if (has_scale(evsel
)) {
1657 err
= perf_event__synthesize_event_update_scale(tool
, evsel
, process
);
1659 pr_err("Couldn't synthesize evsel evsel.\n");
1664 if (evsel
->core
.own_cpus
) {
1665 err
= perf_event__synthesize_event_update_cpus(tool
, evsel
, process
);
1667 pr_err("Couldn't synthesize evsel cpus.\n");
1673 * Name is needed only for pipe output,
1674 * perf.data carries event names.
1677 err
= perf_event__synthesize_event_update_name(tool
, evsel
, process
);
1679 pr_err("Couldn't synthesize evsel name.\n");
1687 int perf_event__synthesize_attr(struct perf_tool
*tool
, struct perf_event_attr
*attr
,
1688 u32 ids
, u64
*id
, perf_event__handler_t process
)
1690 union perf_event
*ev
;
1694 size
= sizeof(struct perf_event_attr
);
1695 size
= PERF_ALIGN(size
, sizeof(u64
));
1696 size
+= sizeof(struct perf_event_header
);
1697 size
+= ids
* sizeof(u64
);
1704 ev
->attr
.attr
= *attr
;
1705 memcpy(ev
->attr
.id
, id
, ids
* sizeof(u64
));
1707 ev
->attr
.header
.type
= PERF_RECORD_HEADER_ATTR
;
1708 ev
->attr
.header
.size
= (u16
)size
;
1710 if (ev
->attr
.header
.size
== size
)
1711 err
= process(tool
, ev
, NULL
, NULL
);
1720 int perf_event__synthesize_tracing_data(struct perf_tool
*tool
, int fd
, struct evlist
*evlist
,
1721 perf_event__handler_t process
)
1723 union perf_event ev
;
1724 struct tracing_data
*tdata
;
1725 ssize_t size
= 0, aligned_size
= 0, padding
;
1729 * We are going to store the size of the data followed
1730 * by the data contents. Since the fd descriptor is a pipe,
1731 * we cannot seek back to store the size of the data once
1732 * we know it. Instead we:
1734 * - write the tracing data to the temp file
1735 * - get/write the data size to pipe
1736 * - write the tracing data from the temp file
1739 tdata
= tracing_data_get(&evlist
->core
.entries
, fd
, true);
1743 memset(&ev
, 0, sizeof(ev
));
1745 ev
.tracing_data
.header
.type
= PERF_RECORD_HEADER_TRACING_DATA
;
1747 aligned_size
= PERF_ALIGN(size
, sizeof(u64
));
1748 padding
= aligned_size
- size
;
1749 ev
.tracing_data
.header
.size
= sizeof(ev
.tracing_data
);
1750 ev
.tracing_data
.size
= aligned_size
;
1752 process(tool
, &ev
, NULL
, NULL
);
1755 * The put function will copy all the tracing data
1756 * stored in temp file to the pipe.
1758 tracing_data_put(tdata
);
1760 ff
= (struct feat_fd
){ .fd
= fd
};
1761 if (write_padded(&ff
, NULL
, 0, padding
))
1764 return aligned_size
;
1767 int perf_event__synthesize_build_id(struct perf_tool
*tool
, struct dso
*pos
, u16 misc
,
1768 perf_event__handler_t process
, struct machine
*machine
)
1770 union perf_event ev
;
1776 memset(&ev
, 0, sizeof(ev
));
1778 len
= pos
->long_name_len
+ 1;
1779 len
= PERF_ALIGN(len
, NAME_ALIGN
);
1780 memcpy(&ev
.build_id
.build_id
, pos
->build_id
, sizeof(pos
->build_id
));
1781 ev
.build_id
.header
.type
= PERF_RECORD_HEADER_BUILD_ID
;
1782 ev
.build_id
.header
.misc
= misc
;
1783 ev
.build_id
.pid
= machine
->pid
;
1784 ev
.build_id
.header
.size
= sizeof(ev
.build_id
) + len
;
1785 memcpy(&ev
.build_id
.filename
, pos
->long_name
, pos
->long_name_len
);
1787 return process(tool
, &ev
, NULL
, machine
);
1790 int perf_event__synthesize_stat_events(struct perf_stat_config
*config
, struct perf_tool
*tool
,
1791 struct evlist
*evlist
, perf_event__handler_t process
, bool attrs
)
1796 err
= perf_event__synthesize_attrs(tool
, evlist
, process
);
1798 pr_err("Couldn't synthesize attrs.\n");
1803 err
= perf_event__synthesize_extra_attr(tool
, evlist
, process
, attrs
);
1804 err
= perf_event__synthesize_thread_map2(tool
, evlist
->core
.threads
, process
, NULL
);
1806 pr_err("Couldn't synthesize thread map.\n");
1810 err
= perf_event__synthesize_cpu_map(tool
, evlist
->core
.cpus
, process
, NULL
);
1812 pr_err("Couldn't synthesize thread map.\n");
1816 err
= perf_event__synthesize_stat_config(tool
, config
, process
, NULL
);
1818 pr_err("Couldn't synthesize config.\n");
1825 int __weak
perf_event__synth_time_conv(const struct perf_event_mmap_page
*pc __maybe_unused
,
1826 struct perf_tool
*tool __maybe_unused
,
1827 perf_event__handler_t process __maybe_unused
,
1828 struct machine
*machine __maybe_unused
)
1833 extern const struct perf_header_feature_ops feat_ops
[HEADER_LAST_FEATURE
];
1835 int perf_event__synthesize_features(struct perf_tool
*tool
, struct perf_session
*session
,
1836 struct evlist
*evlist
, perf_event__handler_t process
)
1838 struct perf_header
*header
= &session
->header
;
1839 struct perf_record_header_feature
*fe
;
1844 sz_hdr
= sizeof(fe
->header
);
1845 sz
= sizeof(union perf_event
);
1846 /* get a nice alignment */
1847 sz
= PERF_ALIGN(sz
, page_size
);
1849 memset(&ff
, 0, sizeof(ff
));
1851 ff
.buf
= malloc(sz
);
1855 ff
.size
= sz
- sz_hdr
;
1856 ff
.ph
= &session
->header
;
1858 for_each_set_bit(feat
, header
->adds_features
, HEADER_FEAT_BITS
) {
1859 if (!feat_ops
[feat
].synthesize
) {
1860 pr_debug("No record header feature for header :%d\n", feat
);
1864 ff
.offset
= sizeof(*fe
);
1866 ret
= feat_ops
[feat
].write(&ff
, evlist
);
1867 if (ret
|| ff
.offset
<= (ssize_t
)sizeof(*fe
)) {
1868 pr_debug("Error writing feature\n");
1871 /* ff.buf may have changed due to realloc in do_write() */
1873 memset(fe
, 0, sizeof(*fe
));
1876 fe
->header
.type
= PERF_RECORD_HEADER_FEATURE
;
1877 fe
->header
.size
= ff
.offset
;
1879 ret
= process(tool
, ff
.buf
, NULL
, NULL
);
1886 /* Send HEADER_LAST_FEATURE mark. */
1888 fe
->feat_id
= HEADER_LAST_FEATURE
;
1889 fe
->header
.type
= PERF_RECORD_HEADER_FEATURE
;
1890 fe
->header
.size
= sizeof(*fe
);
1892 ret
= process(tool
, ff
.buf
, NULL
, NULL
);