Linux 4.1.18
[linux/fpc-iii.git] / tools / perf / tests / open-syscall-all-cpus.c
blob3ec885c48f8fbcc77bdeaf0891fa54a2aa3935bc
1 #include "evsel.h"
2 #include "tests.h"
3 #include "thread_map.h"
4 #include "cpumap.h"
5 #include "debug.h"
7 int test__open_syscall_event_on_all_cpus(void)
9 int err = -1, fd, cpu;
10 struct cpu_map *cpus;
11 struct perf_evsel *evsel;
12 unsigned int nr_open_calls = 111, i;
13 cpu_set_t cpu_set;
14 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
15 char sbuf[STRERR_BUFSIZE];
17 if (threads == NULL) {
18 pr_debug("thread_map__new\n");
19 return -1;
22 cpus = cpu_map__new(NULL);
23 if (cpus == NULL) {
24 pr_debug("cpu_map__new\n");
25 goto out_thread_map_delete;
28 CPU_ZERO(&cpu_set);
30 evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
31 if (evsel == NULL) {
32 if (tracefs_configured())
33 pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
34 else if (debugfs_configured())
35 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
36 else
37 pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
38 goto out_thread_map_delete;
41 if (perf_evsel__open(evsel, cpus, threads) < 0) {
42 pr_debug("failed to open counter: %s, "
43 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
44 strerror_r(errno, sbuf, sizeof(sbuf)));
45 goto out_evsel_delete;
48 for (cpu = 0; cpu < cpus->nr; ++cpu) {
49 unsigned int ncalls = nr_open_calls + cpu;
51 * XXX eventually lift this restriction in a way that
52 * keeps perf building on older glibc installations
53 * without CPU_ALLOC. 1024 cpus in 2010 still seems
54 * a reasonable upper limit tho :-)
56 if (cpus->map[cpu] >= CPU_SETSIZE) {
57 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
58 continue;
61 CPU_SET(cpus->map[cpu], &cpu_set);
62 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
63 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
64 cpus->map[cpu],
65 strerror_r(errno, sbuf, sizeof(sbuf)));
66 goto out_close_fd;
68 for (i = 0; i < ncalls; ++i) {
69 fd = open("/etc/passwd", O_RDONLY);
70 close(fd);
72 CPU_CLR(cpus->map[cpu], &cpu_set);
76 * Here we need to explicitely preallocate the counts, as if
77 * we use the auto allocation it will allocate just for 1 cpu,
78 * as we start by cpu 0.
80 if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
81 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
82 goto out_close_fd;
85 err = 0;
87 for (cpu = 0; cpu < cpus->nr; ++cpu) {
88 unsigned int expected;
90 if (cpus->map[cpu] >= CPU_SETSIZE)
91 continue;
93 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
94 pr_debug("perf_evsel__read_on_cpu\n");
95 err = -1;
96 break;
99 expected = nr_open_calls + cpu;
100 if (evsel->counts->cpu[cpu].val != expected) {
101 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
102 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
103 err = -1;
107 perf_evsel__free_counts(evsel);
108 out_close_fd:
109 perf_evsel__close_fd(evsel, 1, threads->nr);
110 out_evsel_delete:
111 perf_evsel__delete(evsel);
112 out_thread_map_delete:
113 thread_map__delete(threads);
114 return err;