3 #include "thread_map.h"
8 int test__openat_syscall_event_on_all_cpus(void)
10 int err
= -1, fd
, cpu
;
12 struct perf_evsel
*evsel
;
13 unsigned int nr_openat_calls
= 111, i
;
15 struct thread_map
*threads
= thread_map__new(-1, getpid(), UINT_MAX
);
16 char sbuf
[STRERR_BUFSIZE
];
18 if (threads
== NULL
) {
19 pr_debug("thread_map__new\n");
23 cpus
= cpu_map__new(NULL
);
25 pr_debug("cpu_map__new\n");
26 goto out_thread_map_delete
;
31 evsel
= perf_evsel__newtp("syscalls", "sys_enter_openat");
33 if (tracefs_configured())
34 pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
35 else if (debugfs_configured())
36 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
38 pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
39 goto out_thread_map_delete
;
42 if (perf_evsel__open(evsel
, cpus
, threads
) < 0) {
43 pr_debug("failed to open counter: %s, "
44 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
45 strerror_r(errno
, sbuf
, sizeof(sbuf
)));
46 goto out_evsel_delete
;
49 for (cpu
= 0; cpu
< cpus
->nr
; ++cpu
) {
50 unsigned int ncalls
= nr_openat_calls
+ cpu
;
52 * XXX eventually lift this restriction in a way that
53 * keeps perf building on older glibc installations
54 * without CPU_ALLOC. 1024 cpus in 2010 still seems
55 * a reasonable upper limit tho :-)
57 if (cpus
->map
[cpu
] >= CPU_SETSIZE
) {
58 pr_debug("Ignoring CPU %d\n", cpus
->map
[cpu
]);
62 CPU_SET(cpus
->map
[cpu
], &cpu_set
);
63 if (sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
) < 0) {
64 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
66 strerror_r(errno
, sbuf
, sizeof(sbuf
)));
69 for (i
= 0; i
< ncalls
; ++i
) {
70 fd
= openat(0, "/etc/passwd", O_RDONLY
);
73 CPU_CLR(cpus
->map
[cpu
], &cpu_set
);
77 * Here we need to explicitely preallocate the counts, as if
78 * we use the auto allocation it will allocate just for 1 cpu,
79 * as we start by cpu 0.
81 if (perf_evsel__alloc_counts(evsel
, cpus
->nr
, 1) < 0) {
82 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus
->nr
);
88 for (cpu
= 0; cpu
< cpus
->nr
; ++cpu
) {
89 unsigned int expected
;
91 if (cpus
->map
[cpu
] >= CPU_SETSIZE
)
94 if (perf_evsel__read_on_cpu(evsel
, cpu
, 0) < 0) {
95 pr_debug("perf_evsel__read_on_cpu\n");
100 expected
= nr_openat_calls
+ cpu
;
101 if (perf_counts(evsel
->counts
, cpu
, 0)->val
!= expected
) {
102 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64
"\n",
103 expected
, cpus
->map
[cpu
], perf_counts(evsel
->counts
, cpu
, 0)->val
);
108 perf_evsel__free_counts(evsel
);
110 perf_evsel__close_fd(evsel
, 1, threads
->nr
);
112 perf_evsel__delete(evsel
);
113 out_thread_map_delete
:
114 thread_map__put(threads
);