3 #include "thread_map.h"
8 * This test will generate random numbers of calls to some getpid syscalls,
9 * then establish an mmap for a group of events that are created to monitor
12 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
13 * sample.id field to map back to its respective perf_evsel instance.
15 * Then it checks if the number of syscalls reported as perf events by
16 * the kernel corresponds to the number of syscalls made.
18 int test__basic_mmap(void)
21 union perf_event
*event
;
22 struct thread_map
*threads
;
24 struct perf_evlist
*evlist
;
26 const char *syscall_names
[] = { "getsid", "getppid", "getpgrp",
28 pid_t (*syscalls
[])(void) = { (void *)getsid
, getppid
, getpgrp
,
30 #define nsyscalls ARRAY_SIZE(syscall_names)
31 unsigned int nr_events
[nsyscalls
],
32 expected_nr_events
[nsyscalls
], i
, j
;
33 struct perf_evsel
*evsels
[nsyscalls
], *evsel
;
35 threads
= thread_map__new(-1, getpid(), UINT_MAX
);
36 if (threads
== NULL
) {
37 pr_debug("thread_map__new\n");
41 cpus
= cpu_map__new(NULL
);
43 pr_debug("cpu_map__new\n");
44 goto out_free_threads
;
48 CPU_SET(cpus
->map
[0], &cpu_set
);
49 sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
);
50 if (sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
) < 0) {
51 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
52 cpus
->map
[0], strerror(errno
));
56 evlist
= perf_evlist__new();
58 pr_debug("perf_evlist__new\n");
62 perf_evlist__set_maps(evlist
, cpus
, threads
);
64 for (i
= 0; i
< nsyscalls
; ++i
) {
67 snprintf(name
, sizeof(name
), "sys_enter_%s", syscall_names
[i
]);
68 evsels
[i
] = perf_evsel__newtp("syscalls", name
, i
);
69 if (evsels
[i
] == NULL
) {
70 pr_debug("perf_evsel__new\n");
74 evsels
[i
]->attr
.wakeup_events
= 1;
75 perf_evsel__set_sample_id(evsels
[i
], false);
77 perf_evlist__add(evlist
, evsels
[i
]);
79 if (perf_evsel__open(evsels
[i
], cpus
, threads
) < 0) {
80 pr_debug("failed to open counter: %s, "
81 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
87 expected_nr_events
[i
] = 1 + rand() % 127;
90 if (perf_evlist__mmap(evlist
, 128, true) < 0) {
91 pr_debug("failed to mmap events: %d (%s)\n", errno
,
96 for (i
= 0; i
< nsyscalls
; ++i
)
97 for (j
= 0; j
< expected_nr_events
[i
]; ++j
) {
98 int foo
= syscalls
[i
]();
102 while ((event
= perf_evlist__mmap_read(evlist
, 0)) != NULL
) {
103 struct perf_sample sample
;
105 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
106 pr_debug("unexpected %s event\n",
107 perf_event__name(event
->header
.type
));
111 err
= perf_evlist__parse_sample(evlist
, event
, &sample
);
113 pr_err("Can't parse sample, err = %d\n", err
);
118 evsel
= perf_evlist__id2evsel(evlist
, sample
.id
);
120 pr_debug("event with id %" PRIu64
121 " doesn't map to an evsel\n", sample
.id
);
124 nr_events
[evsel
->idx
]++;
125 perf_evlist__mmap_consume(evlist
, 0);
129 list_for_each_entry(evsel
, &evlist
->entries
, node
) {
130 if (nr_events
[evsel
->idx
] != expected_nr_events
[evsel
->idx
]) {
131 pr_debug("expected %d %s events, got %d\n",
132 expected_nr_events
[evsel
->idx
],
133 perf_evsel__name(evsel
), nr_events
[evsel
->idx
]);
140 perf_evlist__munmap(evlist
);
142 for (i
= 0; i
< nsyscalls
; ++i
)
143 perf_evsel__close_fd(evsels
[i
], 1, threads
->nr
);
145 perf_evlist__delete(evlist
);
147 cpu_map__delete(cpus
);
149 thread_map__delete(threads
);