WIP FPC-III support
[linux/fpc-iii.git] / tools / testing / selftests / bpf / prog_tests / mmap.c
blob9c3c5c0f068fb521897794aa050b841df90ff347
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <sys/mman.h>
4 #include "test_mmap.skel.h"
6 struct map_data {
7 __u64 val[512 * 4];
8 };
10 static size_t roundup_page(size_t sz)
12 long page_size = sysconf(_SC_PAGE_SIZE);
13 return (sz + page_size - 1) / page_size * page_size;
16 void test_mmap(void)
18 const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss));
19 const size_t map_sz = roundup_page(sizeof(struct map_data));
20 const int zero = 0, one = 1, two = 2, far = 1500;
21 const long page_size = sysconf(_SC_PAGE_SIZE);
22 int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
23 struct bpf_map *data_map, *bss_map;
24 void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
25 struct test_mmap__bss *bss_data;
26 struct bpf_map_info map_info;
27 __u32 map_info_sz = sizeof(map_info);
28 struct map_data *map_data;
29 struct test_mmap *skel;
30 __u64 val = 0;
32 skel = test_mmap__open_and_load();
33 if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
34 return;
36 bss_map = skel->maps.bss;
37 data_map = skel->maps.data_map;
38 data_map_fd = bpf_map__fd(data_map);
40 rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
41 tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
42 if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
43 munmap(tmp1, 4096);
44 goto cleanup;
46 /* now double-check if it's mmap()'able at all */
47 tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0);
48 if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
49 goto cleanup;
51 /* get map's ID */
52 memset(&map_info, 0, map_info_sz);
53 err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
54 if (CHECK(err, "map_get_info", "failed %d\n", errno))
55 goto cleanup;
56 data_map_id = map_info.id;
58 /* mmap BSS map */
59 bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
60 bpf_map__fd(bss_map), 0);
61 if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
62 ".bss mmap failed: %d\n", errno)) {
63 bss_mmaped = NULL;
64 goto cleanup;
66 /* map as R/W first */
67 map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
68 data_map_fd, 0);
69 if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
70 "data_map mmap failed: %d\n", errno)) {
71 map_mmaped = NULL;
72 goto cleanup;
75 bss_data = bss_mmaped;
76 map_data = map_mmaped;
78 CHECK_FAIL(bss_data->in_val);
79 CHECK_FAIL(bss_data->out_val);
80 CHECK_FAIL(skel->bss->in_val);
81 CHECK_FAIL(skel->bss->out_val);
82 CHECK_FAIL(map_data->val[0]);
83 CHECK_FAIL(map_data->val[1]);
84 CHECK_FAIL(map_data->val[2]);
85 CHECK_FAIL(map_data->val[far]);
87 err = test_mmap__attach(skel);
88 if (CHECK(err, "attach_raw_tp", "err %d\n", err))
89 goto cleanup;
91 bss_data->in_val = 123;
92 val = 111;
93 CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
95 usleep(1);
97 CHECK_FAIL(bss_data->in_val != 123);
98 CHECK_FAIL(bss_data->out_val != 123);
99 CHECK_FAIL(skel->bss->in_val != 123);
100 CHECK_FAIL(skel->bss->out_val != 123);
101 CHECK_FAIL(map_data->val[0] != 111);
102 CHECK_FAIL(map_data->val[1] != 222);
103 CHECK_FAIL(map_data->val[2] != 123);
104 CHECK_FAIL(map_data->val[far] != 3 * 123);
106 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
107 CHECK_FAIL(val != 111);
108 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
109 CHECK_FAIL(val != 222);
110 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
111 CHECK_FAIL(val != 123);
112 CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
113 CHECK_FAIL(val != 3 * 123);
115 /* data_map freeze should fail due to R/W mmap() */
116 err = bpf_map_freeze(data_map_fd);
117 if (CHECK(!err || errno != EBUSY, "no_freeze",
118 "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
119 goto cleanup;
121 err = mprotect(map_mmaped, map_sz, PROT_READ);
122 if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno))
123 goto cleanup;
125 /* unmap R/W mapping */
126 err = munmap(map_mmaped, map_sz);
127 map_mmaped = NULL;
128 if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
129 goto cleanup;
131 /* re-map as R/O now */
132 map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
133 if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
134 "data_map R/O mmap failed: %d\n", errno)) {
135 map_mmaped = NULL;
136 goto cleanup;
138 err = mprotect(map_mmaped, map_sz, PROT_WRITE);
139 if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n"))
140 goto cleanup;
141 err = mprotect(map_mmaped, map_sz, PROT_EXEC);
142 if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n"))
143 goto cleanup;
144 map_data = map_mmaped;
146 /* map/unmap in a loop to test ref counting */
147 for (i = 0; i < 10; i++) {
148 int flags = i % 2 ? PROT_READ : PROT_WRITE;
149 void *p;
151 p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
152 if (CHECK_FAIL(p == MAP_FAILED))
153 goto cleanup;
154 err = munmap(p, map_sz);
155 if (CHECK_FAIL(err))
156 goto cleanup;
159 /* data_map freeze should now succeed due to no R/W mapping */
160 err = bpf_map_freeze(data_map_fd);
161 if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
162 err, errno))
163 goto cleanup;
165 /* mapping as R/W now should fail */
166 tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
167 data_map_fd, 0);
168 if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
169 munmap(tmp1, map_sz);
170 goto cleanup;
173 bss_data->in_val = 321;
174 usleep(1);
175 CHECK_FAIL(bss_data->in_val != 321);
176 CHECK_FAIL(bss_data->out_val != 321);
177 CHECK_FAIL(skel->bss->in_val != 321);
178 CHECK_FAIL(skel->bss->out_val != 321);
179 CHECK_FAIL(map_data->val[0] != 111);
180 CHECK_FAIL(map_data->val[1] != 222);
181 CHECK_FAIL(map_data->val[2] != 321);
182 CHECK_FAIL(map_data->val[far] != 3 * 321);
184 /* check some more advanced mmap() manipulations */
186 tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
187 -1, 0);
188 if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
189 goto cleanup;
191 /* map all but last page: pages 1-3 mapped */
192 tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
193 data_map_fd, 0);
194 if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
195 munmap(tmp0, 4 * page_size);
196 goto cleanup;
199 /* unmap second page: pages 1, 3 mapped */
200 err = munmap(tmp1 + page_size, page_size);
201 if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
202 munmap(tmp1, 4 * page_size);
203 goto cleanup;
206 /* map page 2 back */
207 tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
208 MAP_SHARED | MAP_FIXED, data_map_fd, 0);
209 if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
210 munmap(tmp1, page_size);
211 munmap(tmp1 + 2*page_size, 2 * page_size);
212 goto cleanup;
214 CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
215 "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
217 /* re-map all 4 pages */
218 tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
219 data_map_fd, 0);
220 if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
221 munmap(tmp1, 4 * page_size); /* unmap page 1 */
222 goto cleanup;
224 CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
226 map_data = tmp2;
227 CHECK_FAIL(bss_data->in_val != 321);
228 CHECK_FAIL(bss_data->out_val != 321);
229 CHECK_FAIL(skel->bss->in_val != 321);
230 CHECK_FAIL(skel->bss->out_val != 321);
231 CHECK_FAIL(map_data->val[0] != 111);
232 CHECK_FAIL(map_data->val[1] != 222);
233 CHECK_FAIL(map_data->val[2] != 321);
234 CHECK_FAIL(map_data->val[far] != 3 * 321);
236 munmap(tmp2, 4 * page_size);
238 /* map all 4 pages, but with pg_off=1 page, should fail */
239 tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
240 data_map_fd, page_size /* initial page shift */);
241 if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
242 munmap(tmp1, 4 * page_size);
243 goto cleanup;
246 tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
247 if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
248 goto cleanup;
250 test_mmap__destroy(skel);
251 skel = NULL;
252 CHECK_FAIL(munmap(bss_mmaped, bss_sz));
253 bss_mmaped = NULL;
254 CHECK_FAIL(munmap(map_mmaped, map_sz));
255 map_mmaped = NULL;
257 /* map should be still held by active mmap */
258 tmp_fd = bpf_map_get_fd_by_id(data_map_id);
259 if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) {
260 munmap(tmp1, map_sz);
261 goto cleanup;
263 close(tmp_fd);
265 /* this should release data map finally */
266 munmap(tmp1, map_sz);
268 /* we need to wait for RCU grace period */
269 for (i = 0; i < 10000; i++) {
270 __u32 id = data_map_id - 1;
271 if (bpf_map_get_next_id(id, &id) || id > data_map_id)
272 break;
273 usleep(1);
276 /* should fail to get map FD by non-existing ID */
277 tmp_fd = bpf_map_get_fd_by_id(data_map_id);
278 if (CHECK(tmp_fd >= 0, "get_map_by_id_after",
279 "unexpectedly succeeded %d\n", tmp_fd)) {
280 close(tmp_fd);
281 goto cleanup;
284 cleanup:
285 if (bss_mmaped)
286 CHECK_FAIL(munmap(bss_mmaped, bss_sz));
287 if (map_mmaped)
288 CHECK_FAIL(munmap(map_mmaped, map_sz));
289 test_mmap__destroy(skel);