1 // SPDX-License-Identifier: GPL-2.0
3 * Ring-buffer memory mapping tests
5 * Copyright (c) 2024 Vincent Donnefort <vdonnefort@google.com>
14 #include <linux/trace_mmap.h>
17 #include <sys/ioctl.h>
19 #include "../user_events/user_events_selftests.h" /* share tracefs setup */
20 #include "../kselftest_harness.h"
22 #define TRACEFS_ROOT "/sys/kernel/tracing"
24 static int __tracefs_write(const char *path
, const char *value
)
28 fd
= open(path
, O_WRONLY
| O_TRUNC
);
32 ret
= write(fd
, value
, strlen(value
));
36 return ret
== -1 ? -errno
: 0;
39 static int __tracefs_write_int(const char *path
, int value
)
44 if (asprintf(&str
, "%d", value
) < 0)
47 ret
= __tracefs_write(path
, str
);
54 #define tracefs_write_int(path, value) \
55 ASSERT_EQ(__tracefs_write_int((path), (value)), 0)
57 #define tracefs_write(path, value) \
58 ASSERT_EQ(__tracefs_write((path), (value)), 0)
60 static int tracefs_reset(void)
62 if (__tracefs_write_int(TRACEFS_ROOT
"/tracing_on", 0))
64 if (__tracefs_write(TRACEFS_ROOT
"/trace", ""))
66 if (__tracefs_write(TRACEFS_ROOT
"/set_event", ""))
68 if (__tracefs_write(TRACEFS_ROOT
"/current_tracer", "nop"))
74 struct tracefs_cpu_map_desc
{
75 struct trace_buffer_meta
*meta
;
79 int tracefs_cpu_map(struct tracefs_cpu_map_desc
*desc
, int cpu
)
81 int page_size
= getpagesize();
85 if (asprintf(&cpu_path
,
86 TRACEFS_ROOT
"/per_cpu/cpu%d/trace_pipe_raw",
90 desc
->cpu_fd
= open(cpu_path
, O_RDONLY
| O_NONBLOCK
);
96 map
= mmap(NULL
, page_size
, PROT_READ
, MAP_SHARED
, desc
->cpu_fd
, 0);
97 if (map
== MAP_FAILED
)
100 desc
->meta
= (struct trace_buffer_meta
*)map
;
102 /* the meta-page is bigger than the original mapping */
103 if (page_size
< desc
->meta
->meta_struct_len
) {
104 int meta_page_size
= desc
->meta
->meta_page_size
;
106 munmap(desc
->meta
, page_size
);
107 page_size
= meta_page_size
;
114 void tracefs_cpu_unmap(struct tracefs_cpu_map_desc
*desc
)
116 munmap(desc
->meta
, desc
->meta
->meta_page_size
);
121 struct tracefs_cpu_map_desc map_desc
;
125 FIXTURE_VARIANT(map
) {
129 FIXTURE_VARIANT_ADD(map
, subbuf_size_4k
) {
133 FIXTURE_VARIANT_ADD(map
, subbuf_size_8k
) {
139 int cpu
= sched_getcpu();
145 SKIP(return, "Skipping: %s", "Please run the test as root");
147 if (!tracefs_enabled(&message
, &fail
, &umount
)) {
149 TH_LOG("Tracefs setup failed: %s", message
);
152 SKIP(return, "Skipping: %s", message
);
155 self
->umount
= umount
;
159 ASSERT_EQ(tracefs_reset(), 0);
161 tracefs_write_int(TRACEFS_ROOT
"/buffer_subbuf_size_kb", variant
->subbuf_size
);
163 ASSERT_EQ(tracefs_cpu_map(&self
->map_desc
, cpu
), 0);
166 * Ensure generated events will be found on this very same ring-buffer.
169 CPU_SET(cpu
, &cpu_mask
);
170 ASSERT_EQ(sched_setaffinity(0, sizeof(cpu_mask
), &cpu_mask
), 0);
173 FIXTURE_TEARDOWN(map
)
180 tracefs_cpu_unmap(&self
->map_desc
);
183 TEST_F(map
, meta_page_check
)
185 struct tracefs_cpu_map_desc
*desc
= &self
->map_desc
;
188 ASSERT_EQ(desc
->meta
->entries
, 0);
189 ASSERT_EQ(desc
->meta
->overrun
, 0);
190 ASSERT_EQ(desc
->meta
->read
, 0);
192 ASSERT_EQ(desc
->meta
->reader
.id
, 0);
193 ASSERT_EQ(desc
->meta
->reader
.read
, 0);
195 ASSERT_EQ(ioctl(desc
->cpu_fd
, TRACE_MMAP_IOCTL_GET_READER
), 0);
196 ASSERT_EQ(desc
->meta
->reader
.id
, 0);
198 tracefs_write_int(TRACEFS_ROOT
"/tracing_on", 1);
199 for (int i
= 0; i
< 16; i
++)
200 tracefs_write_int(TRACEFS_ROOT
"/trace_marker", i
);
202 ASSERT_EQ(ioctl(desc
->cpu_fd
, TRACE_MMAP_IOCTL_GET_READER
), 0);
204 ASSERT_EQ(desc
->meta
->entries
, 16);
205 ASSERT_EQ(desc
->meta
->overrun
, 0);
206 ASSERT_EQ(desc
->meta
->read
, 16);
208 ASSERT_EQ(desc
->meta
->reader
.id
, 1);
214 TEST_F(map
, data_mmap
)
216 struct tracefs_cpu_map_desc
*desc
= &self
->map_desc
;
217 unsigned long meta_len
, data_len
;
220 meta_len
= desc
->meta
->meta_page_size
;
221 data_len
= desc
->meta
->subbuf_size
* desc
->meta
->nr_subbufs
;
223 /* Map all the available subbufs */
224 data
= mmap(NULL
, data_len
, PROT_READ
, MAP_SHARED
,
225 desc
->cpu_fd
, meta_len
);
226 ASSERT_NE(data
, MAP_FAILED
);
227 munmap(data
, data_len
);
229 /* Map all the available subbufs - 1 */
230 data_len
-= desc
->meta
->subbuf_size
;
231 data
= mmap(NULL
, data_len
, PROT_READ
, MAP_SHARED
,
232 desc
->cpu_fd
, meta_len
);
233 ASSERT_NE(data
, MAP_FAILED
);
234 munmap(data
, data_len
);
236 /* Overflow the available subbufs by 1 */
237 meta_len
+= desc
->meta
->subbuf_size
* 2;
238 data
= mmap(NULL
, data_len
, PROT_READ
, MAP_SHARED
,
239 desc
->cpu_fd
, meta_len
);
240 ASSERT_EQ(data
, MAP_FAILED
);
242 /* Verify meta-page padding */
243 if (desc
->meta
->meta_page_size
> getpagesize()) {
244 data_len
= desc
->meta
->meta_page_size
;
245 data
= mmap(NULL
, data_len
,
246 PROT_READ
, MAP_SHARED
, desc
->cpu_fd
, 0);
247 ASSERT_NE(data
, MAP_FAILED
);
249 for (int i
= desc
->meta
->meta_struct_len
;
250 i
< desc
->meta
->meta_page_size
; i
+= sizeof(int))
251 ASSERT_EQ(*(int *)(data
+ i
), 0);
253 munmap(data
, data_len
);
261 FIXTURE_SETUP(snapshot
)
268 SKIP(return, "Skipping: %s", "Please run the test as root");
270 if (stat(TRACEFS_ROOT
"/snapshot", &sb
))
271 SKIP(return, "Skipping: %s", "snapshot not available");
273 if (!tracefs_enabled(&message
, &fail
, &umount
)) {
275 TH_LOG("Tracefs setup failed: %s", message
);
278 SKIP(return, "Skipping: %s", message
);
281 self
->umount
= umount
;
284 FIXTURE_TEARDOWN(snapshot
)
286 __tracefs_write(TRACEFS_ROOT
"/events/sched/sched_switch/trigger",
294 TEST_F(snapshot
, excludes_map
)
296 struct tracefs_cpu_map_desc map_desc
;
297 int cpu
= sched_getcpu();
300 tracefs_write(TRACEFS_ROOT
"/events/sched/sched_switch/trigger",
302 ASSERT_EQ(tracefs_cpu_map(&map_desc
, cpu
), -EBUSY
);
305 TEST_F(snapshot
, excluded_by_map
)
307 struct tracefs_cpu_map_desc map_desc
;
308 int cpu
= sched_getcpu();
310 ASSERT_EQ(tracefs_cpu_map(&map_desc
, cpu
), 0);
312 ASSERT_EQ(__tracefs_write(TRACEFS_ROOT
"/events/sched/sched_switch/trigger",
313 "snapshot"), -EBUSY
);
314 ASSERT_EQ(__tracefs_write(TRACEFS_ROOT
"/snapshot",