1 // SPDX-License-Identifier: GPL-2.0
4 /* For the CPU_* macros */
10 #include <api/fs/fs.h>
11 #include <linux/err.h>
12 #include <linux/string.h>
13 #include <api/fs/tracing_path.h>
16 #include "thread_map.h"
17 #include <perf/cpumap.h>
18 #include <internal/cpumap.h>
21 #include "util/counts.h"
23 int test__openat_syscall_event_on_all_cpus(struct test
*test __maybe_unused
, int subtest __maybe_unused
)
25 int err
= -1, fd
, cpu
;
26 struct perf_cpu_map
*cpus
;
28 unsigned int nr_openat_calls
= 111, i
;
30 struct perf_thread_map
*threads
= thread_map__new(-1, getpid(), UINT_MAX
);
31 char sbuf
[STRERR_BUFSIZE
];
34 if (threads
== NULL
) {
35 pr_debug("thread_map__new\n");
39 cpus
= perf_cpu_map__new(NULL
);
41 pr_debug("perf_cpu_map__new\n");
42 goto out_thread_map_delete
;
47 evsel
= evsel__newtp("syscalls", "sys_enter_openat");
49 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "syscalls", "sys_enter_openat");
50 pr_debug("%s\n", errbuf
);
51 goto out_cpu_map_delete
;
54 if (evsel__open(evsel
, cpus
, threads
) < 0) {
55 pr_debug("failed to open counter: %s, "
56 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
57 str_error_r(errno
, sbuf
, sizeof(sbuf
)));
58 goto out_evsel_delete
;
61 for (cpu
= 0; cpu
< cpus
->nr
; ++cpu
) {
62 unsigned int ncalls
= nr_openat_calls
+ cpu
;
64 * XXX eventually lift this restriction in a way that
65 * keeps perf building on older glibc installations
66 * without CPU_ALLOC. 1024 cpus in 2010 still seems
67 * a reasonable upper limit tho :-)
69 if (cpus
->map
[cpu
] >= CPU_SETSIZE
) {
70 pr_debug("Ignoring CPU %d\n", cpus
->map
[cpu
]);
74 CPU_SET(cpus
->map
[cpu
], &cpu_set
);
75 if (sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
) < 0) {
76 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
78 str_error_r(errno
, sbuf
, sizeof(sbuf
)));
81 for (i
= 0; i
< ncalls
; ++i
) {
82 fd
= openat(0, "/etc/passwd", O_RDONLY
);
85 CPU_CLR(cpus
->map
[cpu
], &cpu_set
);
89 * Here we need to explicitly preallocate the counts, as if
90 * we use the auto allocation it will allocate just for 1 cpu,
91 * as we start by cpu 0.
93 if (evsel__alloc_counts(evsel
, cpus
->nr
, 1) < 0) {
94 pr_debug("evsel__alloc_counts(ncpus=%d)\n", cpus
->nr
);
100 for (cpu
= 0; cpu
< cpus
->nr
; ++cpu
) {
101 unsigned int expected
;
103 if (cpus
->map
[cpu
] >= CPU_SETSIZE
)
106 if (evsel__read_on_cpu(evsel
, cpu
, 0) < 0) {
107 pr_debug("evsel__read_on_cpu\n");
112 expected
= nr_openat_calls
+ cpu
;
113 if (perf_counts(evsel
->counts
, cpu
, 0)->val
!= expected
) {
114 pr_debug("evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64
"\n",
115 expected
, cpus
->map
[cpu
], perf_counts(evsel
->counts
, cpu
, 0)->val
);
120 evsel__free_counts(evsel
);
122 perf_evsel__close_fd(&evsel
->core
);
124 evsel__delete(evsel
);
126 perf_cpu_map__put(cpus
);
127 out_thread_map_delete
:
128 perf_thread_map__put(threads
);