1 // SPDX-License-Identifier: GPL-2.0
4 /* For the CLR_() macros */
7 #include <perf/cpumap.h>
12 #include "thread_map.h"
14 #include "util/mmap.h"
15 #include <linux/err.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <perf/evlist.h>
19 #include <perf/mmap.h>
22 * This test will generate random numbers of calls to some getpid syscalls,
23 * then establish an mmap for a group of events that are created to monitor
26 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
27 * sample.id field to map back to its respective perf_evsel instance.
29 * Then it checks if the number of syscalls reported as perf events by
30 * the kernel corresponds to the number of syscalls made.
32 int test__basic_mmap(struct test
*test __maybe_unused
, int subtest __maybe_unused
)
35 union perf_event
*event
;
36 struct perf_thread_map
*threads
;
37 struct perf_cpu_map
*cpus
;
38 struct evlist
*evlist
;
40 const char *syscall_names
[] = { "getsid", "getppid", "getpgid", };
41 pid_t (*syscalls
[])(void) = { (void *)getsid
, getppid
, (void*)getpgid
};
42 #define nsyscalls ARRAY_SIZE(syscall_names)
43 unsigned int nr_events
[nsyscalls
],
44 expected_nr_events
[nsyscalls
], i
, j
;
45 struct evsel
*evsels
[nsyscalls
], *evsel
;
46 char sbuf
[STRERR_BUFSIZE
];
49 threads
= thread_map__new(-1, getpid(), UINT_MAX
);
50 if (threads
== NULL
) {
51 pr_debug("thread_map__new\n");
55 cpus
= perf_cpu_map__new(NULL
);
57 pr_debug("perf_cpu_map__new\n");
58 goto out_free_threads
;
62 CPU_SET(cpus
->map
[0], &cpu_set
);
63 sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
);
64 if (sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
) < 0) {
65 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
66 cpus
->map
[0], str_error_r(errno
, sbuf
, sizeof(sbuf
)));
70 evlist
= evlist__new();
72 pr_debug("perf_evlist__new\n");
76 perf_evlist__set_maps(&evlist
->core
, cpus
, threads
);
78 for (i
= 0; i
< nsyscalls
; ++i
) {
81 snprintf(name
, sizeof(name
), "sys_enter_%s", syscall_names
[i
]);
82 evsels
[i
] = perf_evsel__newtp("syscalls", name
);
83 if (IS_ERR(evsels
[i
])) {
84 pr_debug("perf_evsel__new(%s)\n", name
);
85 goto out_delete_evlist
;
88 evsels
[i
]->core
.attr
.wakeup_events
= 1;
89 perf_evsel__set_sample_id(evsels
[i
], false);
91 evlist__add(evlist
, evsels
[i
]);
93 if (evsel__open(evsels
[i
], cpus
, threads
) < 0) {
94 pr_debug("failed to open counter: %s, "
95 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
96 str_error_r(errno
, sbuf
, sizeof(sbuf
)));
97 goto out_delete_evlist
;
101 expected_nr_events
[i
] = 1 + rand() % 127;
104 if (evlist__mmap(evlist
, 128) < 0) {
105 pr_debug("failed to mmap events: %d (%s)\n", errno
,
106 str_error_r(errno
, sbuf
, sizeof(sbuf
)));
107 goto out_delete_evlist
;
110 for (i
= 0; i
< nsyscalls
; ++i
)
111 for (j
= 0; j
< expected_nr_events
[i
]; ++j
) {
112 int foo
= syscalls
[i
]();
116 md
= &evlist
->mmap
[0];
117 if (perf_mmap__read_init(&md
->core
) < 0)
120 while ((event
= perf_mmap__read_event(&md
->core
)) != NULL
) {
121 struct perf_sample sample
;
123 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
124 pr_debug("unexpected %s event\n",
125 perf_event__name(event
->header
.type
));
126 goto out_delete_evlist
;
129 err
= perf_evlist__parse_sample(evlist
, event
, &sample
);
131 pr_err("Can't parse sample, err = %d\n", err
);
132 goto out_delete_evlist
;
136 evsel
= perf_evlist__id2evsel(evlist
, sample
.id
);
138 pr_debug("event with id %" PRIu64
139 " doesn't map to an evsel\n", sample
.id
);
140 goto out_delete_evlist
;
142 nr_events
[evsel
->idx
]++;
143 perf_mmap__consume(&md
->core
);
145 perf_mmap__read_done(&md
->core
);
149 evlist__for_each_entry(evlist
, evsel
) {
150 if (nr_events
[evsel
->idx
] != expected_nr_events
[evsel
->idx
]) {
151 pr_debug("expected %d %s events, got %d\n",
152 expected_nr_events
[evsel
->idx
],
153 perf_evsel__name(evsel
), nr_events
[evsel
->idx
]);
155 goto out_delete_evlist
;
160 evlist__delete(evlist
);
164 perf_cpu_map__put(cpus
);
166 perf_thread_map__put(threads
);