Linux 5.1.15
[linux/fpc-iii.git] / tools / perf / util / event.c
blobba7be74fad6ea1fedc61b32b6fdb24cba382a69f
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <inttypes.h>
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
12 #include <api/fs/fs.h>
13 #include <linux/perf_event.h>
14 #include "event.h"
15 #include "debug.h"
16 #include "hist.h"
17 #include "machine.h"
18 #include "sort.h"
19 #include "string2.h"
20 #include "strlist.h"
21 #include "thread.h"
22 #include "thread_map.h"
23 #include "sane_ctype.h"
24 #include "map.h"
25 #include "symbol.h"
26 #include "symbol/kallsyms.h"
27 #include "asm/bug.h"
28 #include "stat.h"
29 #include "session.h"
30 #include "bpf-event.h"
32 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
34 static const char *perf_event__names[] = {
35 [0] = "TOTAL",
36 [PERF_RECORD_MMAP] = "MMAP",
37 [PERF_RECORD_MMAP2] = "MMAP2",
38 [PERF_RECORD_LOST] = "LOST",
39 [PERF_RECORD_COMM] = "COMM",
40 [PERF_RECORD_EXIT] = "EXIT",
41 [PERF_RECORD_THROTTLE] = "THROTTLE",
42 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
43 [PERF_RECORD_FORK] = "FORK",
44 [PERF_RECORD_READ] = "READ",
45 [PERF_RECORD_SAMPLE] = "SAMPLE",
46 [PERF_RECORD_AUX] = "AUX",
47 [PERF_RECORD_ITRACE_START] = "ITRACE_START",
48 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES",
49 [PERF_RECORD_SWITCH] = "SWITCH",
50 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
51 [PERF_RECORD_NAMESPACES] = "NAMESPACES",
52 [PERF_RECORD_KSYMBOL] = "KSYMBOL",
53 [PERF_RECORD_BPF_EVENT] = "BPF_EVENT",
54 [PERF_RECORD_HEADER_ATTR] = "ATTR",
55 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
56 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
57 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
58 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
59 [PERF_RECORD_ID_INDEX] = "ID_INDEX",
60 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO",
61 [PERF_RECORD_AUXTRACE] = "AUXTRACE",
62 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR",
63 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP",
64 [PERF_RECORD_CPU_MAP] = "CPU_MAP",
65 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG",
66 [PERF_RECORD_STAT] = "STAT",
67 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND",
68 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE",
69 [PERF_RECORD_TIME_CONV] = "TIME_CONV",
70 [PERF_RECORD_HEADER_FEATURE] = "FEATURE",
73 static const char *perf_ns__names[] = {
74 [NET_NS_INDEX] = "net",
75 [UTS_NS_INDEX] = "uts",
76 [IPC_NS_INDEX] = "ipc",
77 [PID_NS_INDEX] = "pid",
78 [USER_NS_INDEX] = "user",
79 [MNT_NS_INDEX] = "mnt",
80 [CGROUP_NS_INDEX] = "cgroup",
83 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
85 const char *perf_event__name(unsigned int id)
87 if (id >= ARRAY_SIZE(perf_event__names))
88 return "INVALID";
89 if (!perf_event__names[id])
90 return "UNKNOWN";
91 return perf_event__names[id];
94 static const char *perf_ns__name(unsigned int id)
96 if (id >= ARRAY_SIZE(perf_ns__names))
97 return "UNKNOWN";
98 return perf_ns__names[id];
101 int perf_tool__process_synth_event(struct perf_tool *tool,
102 union perf_event *event,
103 struct machine *machine,
104 perf_event__handler_t process)
106 struct perf_sample synth_sample = {
107 .pid = -1,
108 .tid = -1,
109 .time = -1,
110 .stream_id = -1,
111 .cpu = -1,
112 .period = 1,
113 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
116 return process(tool, event, &synth_sample, machine);
120 * Assumes that the first 4095 bytes of /proc/pid/stat contains
121 * the comm, tgid and ppid.
123 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
124 pid_t *tgid, pid_t *ppid)
126 char filename[PATH_MAX];
127 char bf[4096];
128 int fd;
129 size_t size = 0;
130 ssize_t n;
131 char *name, *tgids, *ppids;
133 *tgid = -1;
134 *ppid = -1;
136 snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
138 fd = open(filename, O_RDONLY);
139 if (fd < 0) {
140 pr_debug("couldn't open %s\n", filename);
141 return -1;
144 n = read(fd, bf, sizeof(bf) - 1);
145 close(fd);
146 if (n <= 0) {
147 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
148 pid);
149 return -1;
151 bf[n] = '\0';
153 name = strstr(bf, "Name:");
154 tgids = strstr(bf, "Tgid:");
155 ppids = strstr(bf, "PPid:");
157 if (name) {
158 char *nl;
160 name += 5; /* strlen("Name:") */
161 name = ltrim(name);
163 nl = strchr(name, '\n');
164 if (nl)
165 *nl = '\0';
167 size = strlen(name);
168 if (size >= len)
169 size = len - 1;
170 memcpy(comm, name, size);
171 comm[size] = '\0';
172 } else {
173 pr_debug("Name: string not found for pid %d\n", pid);
176 if (tgids) {
177 tgids += 5; /* strlen("Tgid:") */
178 *tgid = atoi(tgids);
179 } else {
180 pr_debug("Tgid: string not found for pid %d\n", pid);
183 if (ppids) {
184 ppids += 5; /* strlen("PPid:") */
185 *ppid = atoi(ppids);
186 } else {
187 pr_debug("PPid: string not found for pid %d\n", pid);
190 return 0;
193 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
194 struct machine *machine,
195 pid_t *tgid, pid_t *ppid)
197 size_t size;
199 *ppid = -1;
201 memset(&event->comm, 0, sizeof(event->comm));
203 if (machine__is_host(machine)) {
204 if (perf_event__get_comm_ids(pid, event->comm.comm,
205 sizeof(event->comm.comm),
206 tgid, ppid) != 0) {
207 return -1;
209 } else {
210 *tgid = machine->pid;
213 if (*tgid < 0)
214 return -1;
216 event->comm.pid = *tgid;
217 event->comm.header.type = PERF_RECORD_COMM;
219 size = strlen(event->comm.comm) + 1;
220 size = PERF_ALIGN(size, sizeof(u64));
221 memset(event->comm.comm + size, 0, machine->id_hdr_size);
222 event->comm.header.size = (sizeof(event->comm) -
223 (sizeof(event->comm.comm) - size) +
224 machine->id_hdr_size);
225 event->comm.tid = pid;
227 return 0;
230 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
231 union perf_event *event, pid_t pid,
232 perf_event__handler_t process,
233 struct machine *machine)
235 pid_t tgid, ppid;
237 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
238 return -1;
240 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
241 return -1;
243 return tgid;
246 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
247 struct perf_ns_link_info *ns_link_info)
249 struct stat64 st;
250 char proc_ns[128];
252 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
253 if (stat64(proc_ns, &st) == 0) {
254 ns_link_info->dev = st.st_dev;
255 ns_link_info->ino = st.st_ino;
259 int perf_event__synthesize_namespaces(struct perf_tool *tool,
260 union perf_event *event,
261 pid_t pid, pid_t tgid,
262 perf_event__handler_t process,
263 struct machine *machine)
265 u32 idx;
266 struct perf_ns_link_info *ns_link_info;
268 if (!tool || !tool->namespace_events)
269 return 0;
271 memset(&event->namespaces, 0, (sizeof(event->namespaces) +
272 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
273 machine->id_hdr_size));
275 event->namespaces.pid = tgid;
276 event->namespaces.tid = pid;
278 event->namespaces.nr_namespaces = NR_NAMESPACES;
280 ns_link_info = event->namespaces.link_info;
282 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
283 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
284 &ns_link_info[idx]);
286 event->namespaces.header.type = PERF_RECORD_NAMESPACES;
288 event->namespaces.header.size = (sizeof(event->namespaces) +
289 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
290 machine->id_hdr_size);
292 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
293 return -1;
295 return 0;
298 static int perf_event__synthesize_fork(struct perf_tool *tool,
299 union perf_event *event,
300 pid_t pid, pid_t tgid, pid_t ppid,
301 perf_event__handler_t process,
302 struct machine *machine)
304 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
307 * for main thread set parent to ppid from status file. For other
308 * threads set parent pid to main thread. ie., assume main thread
309 * spawns all threads in a process
311 if (tgid == pid) {
312 event->fork.ppid = ppid;
313 event->fork.ptid = ppid;
314 } else {
315 event->fork.ppid = tgid;
316 event->fork.ptid = tgid;
318 event->fork.pid = tgid;
319 event->fork.tid = pid;
320 event->fork.header.type = PERF_RECORD_FORK;
321 event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
323 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
325 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
326 return -1;
328 return 0;
331 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
332 union perf_event *event,
333 pid_t pid, pid_t tgid,
334 perf_event__handler_t process,
335 struct machine *machine,
336 bool mmap_data)
338 char filename[PATH_MAX];
339 FILE *fp;
340 unsigned long long t;
341 bool truncation = false;
342 unsigned long long timeout = proc_map_timeout * 1000000ULL;
343 int rc = 0;
344 const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
345 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
347 if (machine__is_default_guest(machine))
348 return 0;
350 snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
351 machine->root_dir, pid, pid);
353 fp = fopen(filename, "r");
354 if (fp == NULL) {
356 * We raced with a task exiting - just return:
358 pr_debug("couldn't open %s\n", filename);
359 return -1;
362 event->header.type = PERF_RECORD_MMAP2;
363 t = rdclock();
365 while (1) {
366 char bf[BUFSIZ];
367 char prot[5];
368 char execname[PATH_MAX];
369 char anonstr[] = "//anon";
370 unsigned int ino;
371 size_t size;
372 ssize_t n;
374 if (fgets(bf, sizeof(bf), fp) == NULL)
375 break;
377 if ((rdclock() - t) > timeout) {
378 pr_warning("Reading %s time out. "
379 "You may want to increase "
380 "the time limit by --proc-map-timeout\n",
381 filename);
382 truncation = true;
383 goto out;
386 /* ensure null termination since stack will be reused. */
387 strcpy(execname, "");
389 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
390 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
391 &event->mmap2.start, &event->mmap2.len, prot,
392 &event->mmap2.pgoff, &event->mmap2.maj,
393 &event->mmap2.min,
394 &ino, execname);
397 * Anon maps don't have the execname.
399 if (n < 7)
400 continue;
402 event->mmap2.ino = (u64)ino;
405 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
407 if (machine__is_host(machine))
408 event->header.misc = PERF_RECORD_MISC_USER;
409 else
410 event->header.misc = PERF_RECORD_MISC_GUEST_USER;
412 /* map protection and flags bits */
413 event->mmap2.prot = 0;
414 event->mmap2.flags = 0;
415 if (prot[0] == 'r')
416 event->mmap2.prot |= PROT_READ;
417 if (prot[1] == 'w')
418 event->mmap2.prot |= PROT_WRITE;
419 if (prot[2] == 'x')
420 event->mmap2.prot |= PROT_EXEC;
422 if (prot[3] == 's')
423 event->mmap2.flags |= MAP_SHARED;
424 else
425 event->mmap2.flags |= MAP_PRIVATE;
427 if (prot[2] != 'x') {
428 if (!mmap_data || prot[0] != 'r')
429 continue;
431 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
434 out:
435 if (truncation)
436 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
438 if (!strcmp(execname, ""))
439 strcpy(execname, anonstr);
441 if (hugetlbfs_mnt_len &&
442 !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
443 strcpy(execname, anonstr);
444 event->mmap2.flags |= MAP_HUGETLB;
447 size = strlen(execname) + 1;
448 memcpy(event->mmap2.filename, execname, size);
449 size = PERF_ALIGN(size, sizeof(u64));
450 event->mmap2.len -= event->mmap.start;
451 event->mmap2.header.size = (sizeof(event->mmap2) -
452 (sizeof(event->mmap2.filename) - size));
453 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
454 event->mmap2.header.size += machine->id_hdr_size;
455 event->mmap2.pid = tgid;
456 event->mmap2.tid = pid;
458 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
459 rc = -1;
460 break;
463 if (truncation)
464 break;
467 fclose(fp);
468 return rc;
471 int perf_event__synthesize_modules(struct perf_tool *tool,
472 perf_event__handler_t process,
473 struct machine *machine)
475 int rc = 0;
476 struct map *pos;
477 struct maps *maps = machine__kernel_maps(machine);
478 union perf_event *event = zalloc((sizeof(event->mmap) +
479 machine->id_hdr_size));
480 if (event == NULL) {
481 pr_debug("Not enough memory synthesizing mmap event "
482 "for kernel modules\n");
483 return -1;
486 event->header.type = PERF_RECORD_MMAP;
489 * kernel uses 0 for user space maps, see kernel/perf_event.c
490 * __perf_event_mmap
492 if (machine__is_host(machine))
493 event->header.misc = PERF_RECORD_MISC_KERNEL;
494 else
495 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
497 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
498 size_t size;
500 if (!__map__is_kmodule(pos))
501 continue;
503 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
504 event->mmap.header.type = PERF_RECORD_MMAP;
505 event->mmap.header.size = (sizeof(event->mmap) -
506 (sizeof(event->mmap.filename) - size));
507 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
508 event->mmap.header.size += machine->id_hdr_size;
509 event->mmap.start = pos->start;
510 event->mmap.len = pos->end - pos->start;
511 event->mmap.pid = machine->pid;
513 memcpy(event->mmap.filename, pos->dso->long_name,
514 pos->dso->long_name_len + 1);
515 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
516 rc = -1;
517 break;
521 free(event);
522 return rc;
525 static int __event__synthesize_thread(union perf_event *comm_event,
526 union perf_event *mmap_event,
527 union perf_event *fork_event,
528 union perf_event *namespaces_event,
529 pid_t pid, int full,
530 perf_event__handler_t process,
531 struct perf_tool *tool,
532 struct machine *machine,
533 bool mmap_data)
535 char filename[PATH_MAX];
536 DIR *tasks;
537 struct dirent *dirent;
538 pid_t tgid, ppid;
539 int rc = 0;
541 /* special case: only send one comm event using passed in pid */
542 if (!full) {
543 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
544 process, machine);
546 if (tgid == -1)
547 return -1;
549 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
550 tgid, process, machine) < 0)
551 return -1;
554 * send mmap only for thread group leader
555 * see thread__init_map_groups
557 if (pid == tgid &&
558 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
559 process, machine, mmap_data))
560 return -1;
562 return 0;
565 if (machine__is_default_guest(machine))
566 return 0;
568 snprintf(filename, sizeof(filename), "%s/proc/%d/task",
569 machine->root_dir, pid);
571 tasks = opendir(filename);
572 if (tasks == NULL) {
573 pr_debug("couldn't open %s\n", filename);
574 return 0;
577 while ((dirent = readdir(tasks)) != NULL) {
578 char *end;
579 pid_t _pid;
581 _pid = strtol(dirent->d_name, &end, 10);
582 if (*end)
583 continue;
585 rc = -1;
586 if (perf_event__prepare_comm(comm_event, _pid, machine,
587 &tgid, &ppid) != 0)
588 break;
590 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
591 ppid, process, machine) < 0)
592 break;
594 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
595 tgid, process, machine) < 0)
596 break;
599 * Send the prepared comm event
601 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
602 break;
604 rc = 0;
605 if (_pid == pid) {
606 /* process the parent's maps too */
607 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
608 process, machine, mmap_data);
609 if (rc)
610 break;
614 closedir(tasks);
615 return rc;
618 int perf_event__synthesize_thread_map(struct perf_tool *tool,
619 struct thread_map *threads,
620 perf_event__handler_t process,
621 struct machine *machine,
622 bool mmap_data)
624 union perf_event *comm_event, *mmap_event, *fork_event;
625 union perf_event *namespaces_event;
626 int err = -1, thread, j;
628 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
629 if (comm_event == NULL)
630 goto out;
632 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
633 if (mmap_event == NULL)
634 goto out_free_comm;
636 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
637 if (fork_event == NULL)
638 goto out_free_mmap;
640 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
641 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
642 machine->id_hdr_size);
643 if (namespaces_event == NULL)
644 goto out_free_fork;
646 err = 0;
647 for (thread = 0; thread < threads->nr; ++thread) {
648 if (__event__synthesize_thread(comm_event, mmap_event,
649 fork_event, namespaces_event,
650 thread_map__pid(threads, thread), 0,
651 process, tool, machine,
652 mmap_data)) {
653 err = -1;
654 break;
658 * comm.pid is set to thread group id by
659 * perf_event__synthesize_comm
661 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
662 bool need_leader = true;
664 /* is thread group leader in thread_map? */
665 for (j = 0; j < threads->nr; ++j) {
666 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
667 need_leader = false;
668 break;
672 /* if not, generate events for it */
673 if (need_leader &&
674 __event__synthesize_thread(comm_event, mmap_event,
675 fork_event, namespaces_event,
676 comm_event->comm.pid, 0,
677 process, tool, machine,
678 mmap_data)) {
679 err = -1;
680 break;
684 free(namespaces_event);
685 out_free_fork:
686 free(fork_event);
687 out_free_mmap:
688 free(mmap_event);
689 out_free_comm:
690 free(comm_event);
691 out:
692 return err;
695 static int __perf_event__synthesize_threads(struct perf_tool *tool,
696 perf_event__handler_t process,
697 struct machine *machine,
698 bool mmap_data,
699 struct dirent **dirent,
700 int start,
701 int num)
703 union perf_event *comm_event, *mmap_event, *fork_event;
704 union perf_event *namespaces_event;
705 int err = -1;
706 char *end;
707 pid_t pid;
708 int i;
710 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
711 if (comm_event == NULL)
712 goto out;
714 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
715 if (mmap_event == NULL)
716 goto out_free_comm;
718 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
719 if (fork_event == NULL)
720 goto out_free_mmap;
722 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
723 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
724 machine->id_hdr_size);
725 if (namespaces_event == NULL)
726 goto out_free_fork;
728 for (i = start; i < start + num; i++) {
729 if (!isdigit(dirent[i]->d_name[0]))
730 continue;
732 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
733 /* only interested in proper numerical dirents */
734 if (*end)
735 continue;
737 * We may race with exiting thread, so don't stop just because
738 * one thread couldn't be synthesized.
740 __event__synthesize_thread(comm_event, mmap_event, fork_event,
741 namespaces_event, pid, 1, process,
742 tool, machine, mmap_data);
744 err = 0;
746 free(namespaces_event);
747 out_free_fork:
748 free(fork_event);
749 out_free_mmap:
750 free(mmap_event);
751 out_free_comm:
752 free(comm_event);
753 out:
754 return err;
757 struct synthesize_threads_arg {
758 struct perf_tool *tool;
759 perf_event__handler_t process;
760 struct machine *machine;
761 bool mmap_data;
762 struct dirent **dirent;
763 int num;
764 int start;
767 static void *synthesize_threads_worker(void *arg)
769 struct synthesize_threads_arg *args = arg;
771 __perf_event__synthesize_threads(args->tool, args->process,
772 args->machine, args->mmap_data,
773 args->dirent,
774 args->start, args->num);
775 return NULL;
778 int perf_event__synthesize_threads(struct perf_tool *tool,
779 perf_event__handler_t process,
780 struct machine *machine,
781 bool mmap_data,
782 unsigned int nr_threads_synthesize)
784 struct synthesize_threads_arg *args = NULL;
785 pthread_t *synthesize_threads = NULL;
786 char proc_path[PATH_MAX];
787 struct dirent **dirent;
788 int num_per_thread;
789 int m, n, i, j;
790 int thread_nr;
791 int base = 0;
792 int err = -1;
795 if (machine__is_default_guest(machine))
796 return 0;
798 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
799 n = scandir(proc_path, &dirent, 0, alphasort);
800 if (n < 0)
801 return err;
803 if (nr_threads_synthesize == UINT_MAX)
804 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
805 else
806 thread_nr = nr_threads_synthesize;
808 if (thread_nr <= 1) {
809 err = __perf_event__synthesize_threads(tool, process,
810 machine, mmap_data,
811 dirent, base, n);
812 goto free_dirent;
814 if (thread_nr > n)
815 thread_nr = n;
817 synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
818 if (synthesize_threads == NULL)
819 goto free_dirent;
821 args = calloc(sizeof(*args), thread_nr);
822 if (args == NULL)
823 goto free_threads;
825 num_per_thread = n / thread_nr;
826 m = n % thread_nr;
827 for (i = 0; i < thread_nr; i++) {
828 args[i].tool = tool;
829 args[i].process = process;
830 args[i].machine = machine;
831 args[i].mmap_data = mmap_data;
832 args[i].dirent = dirent;
834 for (i = 0; i < m; i++) {
835 args[i].num = num_per_thread + 1;
836 args[i].start = i * args[i].num;
838 if (i != 0)
839 base = args[i-1].start + args[i-1].num;
840 for (j = i; j < thread_nr; j++) {
841 args[j].num = num_per_thread;
842 args[j].start = base + (j - i) * args[i].num;
845 for (i = 0; i < thread_nr; i++) {
846 if (pthread_create(&synthesize_threads[i], NULL,
847 synthesize_threads_worker, &args[i]))
848 goto out_join;
850 err = 0;
851 out_join:
852 for (i = 0; i < thread_nr; i++)
853 pthread_join(synthesize_threads[i], NULL);
854 free(args);
855 free_threads:
856 free(synthesize_threads);
857 free_dirent:
858 for (i = 0; i < n; i++)
859 free(dirent[i]);
860 free(dirent);
862 return err;
865 struct process_symbol_args {
866 const char *name;
867 u64 start;
870 static int find_symbol_cb(void *arg, const char *name, char type,
871 u64 start)
873 struct process_symbol_args *args = arg;
876 * Must be a function or at least an alias, as in PARISC64, where "_text" is
877 * an 'A' to the same address as "_stext".
879 if (!(kallsyms__is_function(type) ||
880 type == 'A') || strcmp(name, args->name))
881 return 0;
883 args->start = start;
884 return 1;
887 int kallsyms__get_function_start(const char *kallsyms_filename,
888 const char *symbol_name, u64 *addr)
890 struct process_symbol_args args = { .name = symbol_name, };
892 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
893 return -1;
895 *addr = args.start;
896 return 0;
899 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
900 perf_event__handler_t process __maybe_unused,
901 struct machine *machine __maybe_unused)
903 return 0;
906 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
907 perf_event__handler_t process,
908 struct machine *machine)
910 size_t size;
911 struct map *map = machine__kernel_map(machine);
912 struct kmap *kmap;
913 int err;
914 union perf_event *event;
916 if (symbol_conf.kptr_restrict)
917 return -1;
918 if (map == NULL)
919 return -1;
922 * We should get this from /sys/kernel/sections/.text, but till that is
923 * available use this, and after it is use this as a fallback for older
924 * kernels.
926 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
927 if (event == NULL) {
928 pr_debug("Not enough memory synthesizing mmap event "
929 "for kernel modules\n");
930 return -1;
933 if (machine__is_host(machine)) {
935 * kernel uses PERF_RECORD_MISC_USER for user space maps,
936 * see kernel/perf_event.c __perf_event_mmap
938 event->header.misc = PERF_RECORD_MISC_KERNEL;
939 } else {
940 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
943 kmap = map__kmap(map);
944 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
945 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
946 size = PERF_ALIGN(size, sizeof(u64));
947 event->mmap.header.type = PERF_RECORD_MMAP;
948 event->mmap.header.size = (sizeof(event->mmap) -
949 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
950 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
951 event->mmap.start = map->start;
952 event->mmap.len = map->end - event->mmap.start;
953 event->mmap.pid = machine->pid;
955 err = perf_tool__process_synth_event(tool, event, machine, process);
956 free(event);
958 return err;
961 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
962 perf_event__handler_t process,
963 struct machine *machine)
965 int err;
967 err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
968 if (err < 0)
969 return err;
971 return perf_event__synthesize_extra_kmaps(tool, process, machine);
974 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
975 struct thread_map *threads,
976 perf_event__handler_t process,
977 struct machine *machine)
979 union perf_event *event;
980 int i, err, size;
982 size = sizeof(event->thread_map);
983 size += threads->nr * sizeof(event->thread_map.entries[0]);
985 event = zalloc(size);
986 if (!event)
987 return -ENOMEM;
989 event->header.type = PERF_RECORD_THREAD_MAP;
990 event->header.size = size;
991 event->thread_map.nr = threads->nr;
993 for (i = 0; i < threads->nr; i++) {
994 struct thread_map_event_entry *entry = &event->thread_map.entries[i];
995 char *comm = thread_map__comm(threads, i);
997 if (!comm)
998 comm = (char *) "";
1000 entry->pid = thread_map__pid(threads, i);
1001 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1004 err = process(tool, event, NULL, machine);
1006 free(event);
1007 return err;
1010 static void synthesize_cpus(struct cpu_map_entries *cpus,
1011 struct cpu_map *map)
1013 int i;
1015 cpus->nr = map->nr;
1017 for (i = 0; i < map->nr; i++)
1018 cpus->cpu[i] = map->map[i];
1021 static void synthesize_mask(struct cpu_map_mask *mask,
1022 struct cpu_map *map, int max)
1024 int i;
1026 mask->nr = BITS_TO_LONGS(max);
1027 mask->long_size = sizeof(long);
1029 for (i = 0; i < map->nr; i++)
1030 set_bit(map->map[i], mask->mask);
1033 static size_t cpus_size(struct cpu_map *map)
1035 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1038 static size_t mask_size(struct cpu_map *map, int *max)
1040 int i;
1042 *max = 0;
1044 for (i = 0; i < map->nr; i++) {
1045 /* bit possition of the cpu is + 1 */
1046 int bit = map->map[i] + 1;
1048 if (bit > *max)
1049 *max = bit;
1052 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
1055 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
1057 size_t size_cpus, size_mask;
1058 bool is_dummy = cpu_map__empty(map);
1061 * Both array and mask data have variable size based
1062 * on the number of cpus and their actual values.
1063 * The size of the 'struct cpu_map_data' is:
1065 * array = size of 'struct cpu_map_entries' +
1066 * number of cpus * sizeof(u64)
1068 * mask = size of 'struct cpu_map_mask' +
1069 * maximum cpu bit converted to size of longs
1071 * and finaly + the size of 'struct cpu_map_data'.
1073 size_cpus = cpus_size(map);
1074 size_mask = mask_size(map, max);
1076 if (is_dummy || (size_cpus < size_mask)) {
1077 *size += size_cpus;
1078 *type = PERF_CPU_MAP__CPUS;
1079 } else {
1080 *size += size_mask;
1081 *type = PERF_CPU_MAP__MASK;
1084 *size += sizeof(struct cpu_map_data);
1085 *size = PERF_ALIGN(*size, sizeof(u64));
1086 return zalloc(*size);
1089 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
1090 u16 type, int max)
1092 data->type = type;
1094 switch (type) {
1095 case PERF_CPU_MAP__CPUS:
1096 synthesize_cpus((struct cpu_map_entries *) data->data, map);
1097 break;
1098 case PERF_CPU_MAP__MASK:
1099 synthesize_mask((struct cpu_map_mask *) data->data, map, max);
1100 default:
1101 break;
1105 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
1107 size_t size = sizeof(struct cpu_map_event);
1108 struct cpu_map_event *event;
1109 int max;
1110 u16 type;
1112 event = cpu_map_data__alloc(map, &size, &type, &max);
1113 if (!event)
1114 return NULL;
1116 event->header.type = PERF_RECORD_CPU_MAP;
1117 event->header.size = size;
1118 event->data.type = type;
1120 cpu_map_data__synthesize(&event->data, map, type, max);
1121 return event;
1124 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1125 struct cpu_map *map,
1126 perf_event__handler_t process,
1127 struct machine *machine)
1129 struct cpu_map_event *event;
1130 int err;
1132 event = cpu_map_event__new(map);
1133 if (!event)
1134 return -ENOMEM;
1136 err = process(tool, (union perf_event *) event, NULL, machine);
1138 free(event);
1139 return err;
1142 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1143 struct perf_stat_config *config,
1144 perf_event__handler_t process,
1145 struct machine *machine)
1147 struct stat_config_event *event;
1148 int size, i = 0, err;
1150 size = sizeof(*event);
1151 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1153 event = zalloc(size);
1154 if (!event)
1155 return -ENOMEM;
1157 event->header.type = PERF_RECORD_STAT_CONFIG;
1158 event->header.size = size;
1159 event->nr = PERF_STAT_CONFIG_TERM__MAX;
1161 #define ADD(__term, __val) \
1162 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
1163 event->data[i].val = __val; \
1164 i++;
1166 ADD(AGGR_MODE, config->aggr_mode)
1167 ADD(INTERVAL, config->interval)
1168 ADD(SCALE, config->scale)
1170 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1171 "stat config terms unbalanced\n");
1172 #undef ADD
1174 err = process(tool, (union perf_event *) event, NULL, machine);
1176 free(event);
1177 return err;
1180 int perf_event__synthesize_stat(struct perf_tool *tool,
1181 u32 cpu, u32 thread, u64 id,
1182 struct perf_counts_values *count,
1183 perf_event__handler_t process,
1184 struct machine *machine)
1186 struct stat_event event;
1188 event.header.type = PERF_RECORD_STAT;
1189 event.header.size = sizeof(event);
1190 event.header.misc = 0;
1192 event.id = id;
1193 event.cpu = cpu;
1194 event.thread = thread;
1195 event.val = count->val;
1196 event.ena = count->ena;
1197 event.run = count->run;
1199 return process(tool, (union perf_event *) &event, NULL, machine);
1202 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1203 u64 evtime, u64 type,
1204 perf_event__handler_t process,
1205 struct machine *machine)
1207 struct stat_round_event event;
1209 event.header.type = PERF_RECORD_STAT_ROUND;
1210 event.header.size = sizeof(event);
1211 event.header.misc = 0;
1213 event.time = evtime;
1214 event.type = type;
1216 return process(tool, (union perf_event *) &event, NULL, machine);
1219 void perf_event__read_stat_config(struct perf_stat_config *config,
1220 struct stat_config_event *event)
1222 unsigned i;
1224 for (i = 0; i < event->nr; i++) {
1226 switch (event->data[i].tag) {
1227 #define CASE(__term, __val) \
1228 case PERF_STAT_CONFIG_TERM__##__term: \
1229 config->__val = event->data[i].val; \
1230 break;
1232 CASE(AGGR_MODE, aggr_mode)
1233 CASE(SCALE, scale)
1234 CASE(INTERVAL, interval)
1235 #undef CASE
1236 default:
1237 pr_warning("unknown stat config term %" PRIu64 "\n",
1238 event->data[i].tag);
1243 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1245 const char *s;
1247 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1248 s = " exec";
1249 else
1250 s = "";
1252 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1255 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
1257 size_t ret = 0;
1258 struct perf_ns_link_info *ns_link_info;
1259 u32 nr_namespaces, idx;
1261 ns_link_info = event->namespaces.link_info;
1262 nr_namespaces = event->namespaces.nr_namespaces;
1264 ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
1265 event->namespaces.pid,
1266 event->namespaces.tid,
1267 nr_namespaces);
1269 for (idx = 0; idx < nr_namespaces; idx++) {
1270 if (idx && (idx % 4 == 0))
1271 ret += fprintf(fp, "\n\t\t ");
1273 ret += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
1274 perf_ns__name(idx), (u64)ns_link_info[idx].dev,
1275 (u64)ns_link_info[idx].ino,
1276 ((idx + 1) != nr_namespaces) ? ", " : "]\n");
1279 return ret;
1282 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1283 union perf_event *event,
1284 struct perf_sample *sample,
1285 struct machine *machine)
1287 return machine__process_comm_event(machine, event, sample);
1290 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
1291 union perf_event *event,
1292 struct perf_sample *sample,
1293 struct machine *machine)
1295 return machine__process_namespaces_event(machine, event, sample);
1298 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1299 union perf_event *event,
1300 struct perf_sample *sample,
1301 struct machine *machine)
1303 return machine__process_lost_event(machine, event, sample);
1306 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1307 union perf_event *event,
1308 struct perf_sample *sample __maybe_unused,
1309 struct machine *machine)
1311 return machine__process_aux_event(machine, event);
1314 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1315 union perf_event *event,
1316 struct perf_sample *sample __maybe_unused,
1317 struct machine *machine)
1319 return machine__process_itrace_start_event(machine, event);
1322 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1323 union perf_event *event,
1324 struct perf_sample *sample,
1325 struct machine *machine)
1327 return machine__process_lost_samples_event(machine, event, sample);
1330 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1331 union perf_event *event,
1332 struct perf_sample *sample __maybe_unused,
1333 struct machine *machine)
1335 return machine__process_switch_event(machine, event);
1338 int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
1339 union perf_event *event,
1340 struct perf_sample *sample __maybe_unused,
1341 struct machine *machine)
1343 return machine__process_ksymbol(machine, event, sample);
1346 int perf_event__process_bpf_event(struct perf_tool *tool __maybe_unused,
1347 union perf_event *event,
1348 struct perf_sample *sample __maybe_unused,
1349 struct machine *machine)
1351 return machine__process_bpf_event(machine, event, sample);
1354 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1356 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1357 event->mmap.pid, event->mmap.tid, event->mmap.start,
1358 event->mmap.len, event->mmap.pgoff,
1359 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1360 event->mmap.filename);
1363 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1365 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1366 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1367 event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1368 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1369 event->mmap2.min, event->mmap2.ino,
1370 event->mmap2.ino_generation,
1371 (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1372 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1373 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1374 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1375 event->mmap2.filename);
1378 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1380 struct thread_map *threads = thread_map__new_event(&event->thread_map);
1381 size_t ret;
1383 ret = fprintf(fp, " nr: ");
1385 if (threads)
1386 ret += thread_map__fprintf(threads, fp);
1387 else
1388 ret += fprintf(fp, "failed to get threads from event\n");
1390 thread_map__put(threads);
1391 return ret;
1394 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1396 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1397 size_t ret;
1399 ret = fprintf(fp, ": ");
1401 if (cpus)
1402 ret += cpu_map__fprintf(cpus, fp);
1403 else
1404 ret += fprintf(fp, "failed to get cpumap from event\n");
1406 cpu_map__put(cpus);
1407 return ret;
1410 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1411 union perf_event *event,
1412 struct perf_sample *sample,
1413 struct machine *machine)
1415 return machine__process_mmap_event(machine, event, sample);
1418 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1419 union perf_event *event,
1420 struct perf_sample *sample,
1421 struct machine *machine)
1423 return machine__process_mmap2_event(machine, event, sample);
1426 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1428 return fprintf(fp, "(%d:%d):(%d:%d)\n",
1429 event->fork.pid, event->fork.tid,
1430 event->fork.ppid, event->fork.ptid);
1433 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1434 union perf_event *event,
1435 struct perf_sample *sample,
1436 struct machine *machine)
1438 return machine__process_fork_event(machine, event, sample);
1441 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1442 union perf_event *event,
1443 struct perf_sample *sample,
1444 struct machine *machine)
1446 return machine__process_exit_event(machine, event, sample);
1449 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1451 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1452 event->aux.aux_offset, event->aux.aux_size,
1453 event->aux.flags,
1454 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1455 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
1456 event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : "");
1459 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1461 return fprintf(fp, " pid: %u tid: %u\n",
1462 event->itrace_start.pid, event->itrace_start.tid);
1465 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1467 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1468 const char *in_out = !out ? "IN " :
1469 !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
1470 "OUT " : "OUT preempt";
1472 if (event->header.type == PERF_RECORD_SWITCH)
1473 return fprintf(fp, " %s\n", in_out);
1475 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n",
1476 in_out, out ? "next" : "prev",
1477 event->context_switch.next_prev_pid,
1478 event->context_switch.next_prev_tid);
1481 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
1483 return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
1486 size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
1488 return fprintf(fp, " ksymbol event with addr %" PRIx64 " len %u type %u flags 0x%x name %s\n",
1489 event->ksymbol_event.addr, event->ksymbol_event.len,
1490 event->ksymbol_event.ksym_type,
1491 event->ksymbol_event.flags, event->ksymbol_event.name);
1494 size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp)
1496 return fprintf(fp, " bpf event with type %u, flags %u, id %u\n",
1497 event->bpf_event.type, event->bpf_event.flags,
1498 event->bpf_event.id);
1501 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1503 size_t ret = fprintf(fp, "PERF_RECORD_%s",
1504 perf_event__name(event->header.type));
1506 switch (event->header.type) {
1507 case PERF_RECORD_COMM:
1508 ret += perf_event__fprintf_comm(event, fp);
1509 break;
1510 case PERF_RECORD_FORK:
1511 case PERF_RECORD_EXIT:
1512 ret += perf_event__fprintf_task(event, fp);
1513 break;
1514 case PERF_RECORD_MMAP:
1515 ret += perf_event__fprintf_mmap(event, fp);
1516 break;
1517 case PERF_RECORD_NAMESPACES:
1518 ret += perf_event__fprintf_namespaces(event, fp);
1519 break;
1520 case PERF_RECORD_MMAP2:
1521 ret += perf_event__fprintf_mmap2(event, fp);
1522 break;
1523 case PERF_RECORD_AUX:
1524 ret += perf_event__fprintf_aux(event, fp);
1525 break;
1526 case PERF_RECORD_ITRACE_START:
1527 ret += perf_event__fprintf_itrace_start(event, fp);
1528 break;
1529 case PERF_RECORD_SWITCH:
1530 case PERF_RECORD_SWITCH_CPU_WIDE:
1531 ret += perf_event__fprintf_switch(event, fp);
1532 break;
1533 case PERF_RECORD_LOST:
1534 ret += perf_event__fprintf_lost(event, fp);
1535 break;
1536 case PERF_RECORD_KSYMBOL:
1537 ret += perf_event__fprintf_ksymbol(event, fp);
1538 break;
1539 case PERF_RECORD_BPF_EVENT:
1540 ret += perf_event__fprintf_bpf_event(event, fp);
1541 break;
1542 default:
1543 ret += fprintf(fp, "\n");
1546 return ret;
1549 int perf_event__process(struct perf_tool *tool __maybe_unused,
1550 union perf_event *event,
1551 struct perf_sample *sample,
1552 struct machine *machine)
1554 return machine__process_event(machine, event, sample);
1557 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
1558 struct addr_location *al)
1560 struct map_groups *mg = thread->mg;
1561 struct machine *machine = mg->machine;
1562 bool load_map = false;
1564 al->machine = machine;
1565 al->thread = thread;
1566 al->addr = addr;
1567 al->cpumode = cpumode;
1568 al->filtered = 0;
1570 if (machine == NULL) {
1571 al->map = NULL;
1572 return NULL;
1575 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1576 al->level = 'k';
1577 mg = &machine->kmaps;
1578 load_map = true;
1579 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1580 al->level = '.';
1581 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1582 al->level = 'g';
1583 mg = &machine->kmaps;
1584 load_map = true;
1585 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1586 al->level = 'u';
1587 } else {
1588 al->level = 'H';
1589 al->map = NULL;
1591 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1592 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1593 !perf_guest)
1594 al->filtered |= (1 << HIST_FILTER__GUEST);
1595 if ((cpumode == PERF_RECORD_MISC_USER ||
1596 cpumode == PERF_RECORD_MISC_KERNEL) &&
1597 !perf_host)
1598 al->filtered |= (1 << HIST_FILTER__HOST);
1600 return NULL;
1603 al->map = map_groups__find(mg, al->addr);
1604 if (al->map != NULL) {
1606 * Kernel maps might be changed when loading symbols so loading
1607 * must be done prior to using kernel maps.
1609 if (load_map)
1610 map__load(al->map);
1611 al->addr = al->map->map_ip(al->map, al->addr);
1614 return al->map;
1618 * For branch stacks or branch samples, the sample cpumode might not be correct
1619 * because it applies only to the sample 'ip' and not necessary to 'addr' or
1620 * branch stack addresses. If possible, use a fallback to deal with those cases.
1622 struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
1623 struct addr_location *al)
1625 struct map *map = thread__find_map(thread, cpumode, addr, al);
1626 struct machine *machine = thread->mg->machine;
1627 u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
1629 if (map || addr_cpumode == cpumode)
1630 return map;
1632 return thread__find_map(thread, addr_cpumode, addr, al);
1635 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
1636 u64 addr, struct addr_location *al)
1638 al->sym = NULL;
1639 if (thread__find_map(thread, cpumode, addr, al))
1640 al->sym = map__find_symbol(al->map, al->addr);
1641 return al->sym;
1644 struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
1645 u64 addr, struct addr_location *al)
1647 al->sym = NULL;
1648 if (thread__find_map_fb(thread, cpumode, addr, al))
1649 al->sym = map__find_symbol(al->map, al->addr);
1650 return al->sym;
1654 * Callers need to drop the reference to al->thread, obtained in
1655 * machine__findnew_thread()
1657 int machine__resolve(struct machine *machine, struct addr_location *al,
1658 struct perf_sample *sample)
1660 struct thread *thread = machine__findnew_thread(machine, sample->pid,
1661 sample->tid);
1663 if (thread == NULL)
1664 return -1;
1666 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1667 thread__find_map(thread, sample->cpumode, sample->ip, al);
1668 dump_printf(" ...... dso: %s\n",
1669 al->map ? al->map->dso->long_name :
1670 al->level == 'H' ? "[hypervisor]" : "<not found>");
1672 if (thread__is_filtered(thread))
1673 al->filtered |= (1 << HIST_FILTER__THREAD);
1675 al->sym = NULL;
1676 al->cpu = sample->cpu;
1677 al->socket = -1;
1678 al->srcline = NULL;
1680 if (al->cpu >= 0) {
1681 struct perf_env *env = machine->env;
1683 if (env && env->cpu)
1684 al->socket = env->cpu[al->cpu].socket_id;
1687 if (al->map) {
1688 struct dso *dso = al->map->dso;
1690 if (symbol_conf.dso_list &&
1691 (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1692 dso->short_name) ||
1693 (dso->short_name != dso->long_name &&
1694 strlist__has_entry(symbol_conf.dso_list,
1695 dso->long_name))))) {
1696 al->filtered |= (1 << HIST_FILTER__DSO);
1699 al->sym = map__find_symbol(al->map, al->addr);
1702 if (symbol_conf.sym_list &&
1703 (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1704 al->sym->name))) {
1705 al->filtered |= (1 << HIST_FILTER__SYMBOL);
1708 return 0;
1712 * The preprocess_sample method will return with reference counts for the
1713 * in it, when done using (and perhaps getting ref counts if needing to
1714 * keep a pointer to one of those entries) it must be paired with
1715 * addr_location__put(), so that the refcounts can be decremented.
1717 void addr_location__put(struct addr_location *al)
1719 thread__zput(al->thread);
1722 bool is_bts_event(struct perf_event_attr *attr)
1724 return attr->type == PERF_TYPE_HARDWARE &&
1725 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1726 attr->sample_period == 1;
1729 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1731 if (attr->type == PERF_TYPE_SOFTWARE &&
1732 (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1733 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1734 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1735 return true;
1737 if (is_bts_event(attr))
1738 return true;
1740 return false;
1743 void thread__resolve(struct thread *thread, struct addr_location *al,
1744 struct perf_sample *sample)
1746 thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
1748 al->cpu = sample->cpu;
1749 al->sym = NULL;
1751 if (al->map)
1752 al->sym = map__find_symbol(al->map, al->addr);