1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/compiler.h>
6 #include <sys/resource.h>
7 #include <sys/socket.h>
17 #include <bpf/libbpf.h>
20 #include <test_maps.h>
22 static struct bpf_create_map_attr xattr
= {
23 .name
= "sk_storage_map",
24 .map_type
= BPF_MAP_TYPE_SK_STORAGE
,
25 .map_flags
= BPF_F_NO_PREALLOC
,
30 .btf_value_type_id
= 3,
34 static unsigned int nr_sk_threads_done
;
35 static unsigned int nr_sk_threads_err
;
36 static unsigned int nr_sk_per_thread
= 4096;
37 static unsigned int nr_sk_threads
= 4;
38 static int sk_storage_map
= -1;
39 static unsigned int stop
;
40 static int runtime_s
= 5;
42 static bool is_stopped(void)
44 return READ_ONCE(stop
);
47 static unsigned int threads_err(void)
49 return READ_ONCE(nr_sk_threads_err
);
52 static void notify_thread_err(void)
54 __sync_add_and_fetch(&nr_sk_threads_err
, 1);
57 static bool wait_for_threads_err(void)
59 while (!is_stopped() && !threads_err())
65 static unsigned int threads_done(void)
67 return READ_ONCE(nr_sk_threads_done
);
70 static void notify_thread_done(void)
72 __sync_add_and_fetch(&nr_sk_threads_done
, 1);
75 static void notify_thread_redo(void)
77 __sync_sub_and_fetch(&nr_sk_threads_done
, 1);
80 static bool wait_for_threads_done(void)
82 while (threads_done() != nr_sk_threads
&& !is_stopped() &&
86 return !is_stopped() && !threads_err();
89 static bool wait_for_threads_redo(void)
91 while (threads_done() && !is_stopped() && !threads_err())
94 return !is_stopped() && !threads_err();
97 static bool wait_for_map(void)
99 while (READ_ONCE(sk_storage_map
) == -1 && !is_stopped())
102 return !is_stopped();
105 static bool wait_for_map_close(void)
107 while (READ_ONCE(sk_storage_map
) != -1 && !is_stopped())
110 return !is_stopped();
113 static int load_btf(void)
115 const char btf_str_sec
[] = "\0bpf_spin_lock\0val\0cnt\0l";
116 __u32 btf_raw_types
[] = {
118 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED
, 0, 32, 4), /* [1] */
119 /* struct bpf_spin_lock */ /* [2] */
120 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT
, 0, 1), 4),
121 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
122 /* struct val */ /* [3] */
123 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT
, 0, 2), 8),
124 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
125 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
127 struct btf_header btf_hdr
= {
129 .version
= BTF_VERSION
,
130 .hdr_len
= sizeof(struct btf_header
),
131 .type_len
= sizeof(btf_raw_types
),
132 .str_off
= sizeof(btf_raw_types
),
133 .str_len
= sizeof(btf_str_sec
),
135 __u8 raw_btf
[sizeof(struct btf_header
) + sizeof(btf_raw_types
) +
136 sizeof(btf_str_sec
)];
138 memcpy(raw_btf
, &btf_hdr
, sizeof(btf_hdr
));
139 memcpy(raw_btf
+ sizeof(btf_hdr
), btf_raw_types
, sizeof(btf_raw_types
));
140 memcpy(raw_btf
+ sizeof(btf_hdr
) + sizeof(btf_raw_types
),
141 btf_str_sec
, sizeof(btf_str_sec
));
143 return bpf_load_btf(raw_btf
, sizeof(raw_btf
), 0, 0, 0);
146 static int create_sk_storage_map(void)
151 CHECK(btf_fd
== -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
153 xattr
.btf_fd
= btf_fd
;
155 map_fd
= bpf_create_map_xattr(&xattr
);
159 "bpf_create_map_xattr()", "errno:%d\n", errno
);
164 static void *insert_close_thread(void *arg
)
169 } value
= { .cnt
= 0xeB9F, .lock
= 0, };
170 int i
, map_fd
, err
, *sk_fds
;
172 sk_fds
= malloc(sizeof(*sk_fds
) * nr_sk_per_thread
);
175 return ERR_PTR(-ENOMEM
);
178 for (i
= 0; i
< nr_sk_per_thread
; i
++)
181 while (!is_stopped()) {
185 map_fd
= READ_ONCE(sk_storage_map
);
186 for (i
= 0; i
< nr_sk_per_thread
&& !is_stopped(); i
++) {
187 sk_fds
[i
] = socket(AF_INET6
, SOCK_STREAM
, 0);
188 if (sk_fds
[i
] == -1) {
190 fprintf(stderr
, "socket(): errno:%d\n", errno
);
193 err
= bpf_map_update_elem(map_fd
, &sk_fds
[i
], &value
,
198 "bpf_map_update_elem(): errno:%d\n",
204 notify_thread_done();
205 wait_for_map_close();
208 for (i
= 0; i
< nr_sk_per_thread
; i
++) {
213 notify_thread_redo();
220 for (i
= 0; i
< nr_sk_per_thread
&& sk_fds
[i
] != -1; i
++)
227 static int do_sk_storage_map_stress_free(void)
229 int i
, map_fd
= -1, err
= 0, nr_threads_created
= 0;
230 pthread_t
*sk_thread_ids
;
233 sk_thread_ids
= malloc(sizeof(pthread_t
) * nr_sk_threads
);
234 if (!sk_thread_ids
) {
235 fprintf(stderr
, "malloc(sk_threads): NULL\n");
239 for (i
= 0; i
< nr_sk_threads
; i
++) {
240 err
= pthread_create(&sk_thread_ids
[i
], NULL
,
241 insert_close_thread
, NULL
);
246 nr_threads_created
++;
249 while (!is_stopped()) {
250 map_fd
= create_sk_storage_map();
251 WRITE_ONCE(sk_storage_map
, map_fd
);
253 if (!wait_for_threads_done())
256 WRITE_ONCE(sk_storage_map
, -1);
260 if (!wait_for_threads_redo())
266 for (i
= 0; i
< nr_threads_created
; i
++) {
267 pthread_join(sk_thread_ids
[i
], &thread_ret
);
268 if (IS_ERR(thread_ret
) && !err
) {
269 err
= PTR_ERR(thread_ret
);
270 fprintf(stderr
, "threads#%u: err:%d\n", i
, err
);
281 static void *update_thread(void *arg
)
286 } value
= { .cnt
= 0xeB9F, .lock
= 0, };
287 int map_fd
= READ_ONCE(sk_storage_map
);
288 int sk_fd
= *(int *)arg
;
289 int err
= 0; /* Suppress compiler false alarm */
291 while (!is_stopped()) {
292 err
= bpf_map_update_elem(map_fd
, &sk_fd
, &value
, 0);
293 if (err
&& errno
!= EAGAIN
) {
295 fprintf(stderr
, "bpf_map_update_elem: %d %d\n",
309 static void *delete_thread(void *arg
)
311 int map_fd
= READ_ONCE(sk_storage_map
);
312 int sk_fd
= *(int *)arg
;
313 int err
= 0; /* Suppress compiler false alarm */
315 while (!is_stopped()) {
316 err
= bpf_map_delete_elem(map_fd
, &sk_fd
);
317 if (err
&& errno
!= ENOENT
) {
319 fprintf(stderr
, "bpf_map_delete_elem: %d %d\n",
333 static int do_sk_storage_map_stress_change(void)
335 int i
, sk_fd
, map_fd
= -1, err
= 0, nr_threads_created
= 0;
336 pthread_t
*sk_thread_ids
;
339 sk_thread_ids
= malloc(sizeof(pthread_t
) * nr_sk_threads
);
340 if (!sk_thread_ids
) {
341 fprintf(stderr
, "malloc(sk_threads): NULL\n");
345 sk_fd
= socket(AF_INET6
, SOCK_STREAM
, 0);
351 map_fd
= create_sk_storage_map();
352 WRITE_ONCE(sk_storage_map
, map_fd
);
354 for (i
= 0; i
< nr_sk_threads
; i
++) {
356 err
= pthread_create(&sk_thread_ids
[i
], NULL
,
357 update_thread
, &sk_fd
);
359 err
= pthread_create(&sk_thread_ids
[i
], NULL
,
360 delete_thread
, &sk_fd
);
365 nr_threads_created
++;
368 wait_for_threads_err();
372 for (i
= 0; i
< nr_threads_created
; i
++) {
373 pthread_join(sk_thread_ids
[i
], &thread_ret
);
374 if (IS_ERR(thread_ret
) && !err
) {
375 err
= PTR_ERR(thread_ret
);
376 fprintf(stderr
, "threads#%u: err:%d\n", i
, err
);
388 static void stop_handler(int signum
)
390 if (signum
!= SIGALRM
)
391 printf("stopping...\n");
395 #define BPF_SK_STORAGE_MAP_TEST_NR_THREADS "BPF_SK_STORAGE_MAP_TEST_NR_THREADS"
396 #define BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD "BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD"
397 #define BPF_SK_STORAGE_MAP_TEST_RUNTIME_S "BPF_SK_STORAGE_MAP_TEST_RUNTIME_S"
398 #define BPF_SK_STORAGE_MAP_TEST_NAME "BPF_SK_STORAGE_MAP_TEST_NAME"
400 static void test_sk_storage_map_stress_free(void)
402 struct rlimit rlim_old
, rlim_new
= {};
405 getrlimit(RLIMIT_NOFILE
, &rlim_old
);
407 signal(SIGTERM
, stop_handler
);
408 signal(SIGINT
, stop_handler
);
410 signal(SIGALRM
, stop_handler
);
414 if (rlim_old
.rlim_cur
< nr_sk_threads
* nr_sk_per_thread
) {
415 rlim_new
.rlim_cur
= nr_sk_threads
* nr_sk_per_thread
+ 128;
416 rlim_new
.rlim_max
= rlim_new
.rlim_cur
+ 128;
417 err
= setrlimit(RLIMIT_NOFILE
, &rlim_new
);
418 CHECK(err
, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d",
419 rlim_new
.rlim_cur
, errno
);
422 err
= do_sk_storage_map_stress_free();
424 signal(SIGTERM
, SIG_DFL
);
425 signal(SIGINT
, SIG_DFL
);
427 signal(SIGALRM
, SIG_DFL
);
431 if (rlim_new
.rlim_cur
)
432 setrlimit(RLIMIT_NOFILE
, &rlim_old
);
434 CHECK(err
, "test_sk_storage_map_stress_free", "err:%d\n", err
);
437 static void test_sk_storage_map_stress_change(void)
441 signal(SIGTERM
, stop_handler
);
442 signal(SIGINT
, stop_handler
);
444 signal(SIGALRM
, stop_handler
);
448 err
= do_sk_storage_map_stress_change();
450 signal(SIGTERM
, SIG_DFL
);
451 signal(SIGINT
, SIG_DFL
);
453 signal(SIGALRM
, SIG_DFL
);
457 CHECK(err
, "test_sk_storage_map_stress_change", "err:%d\n", err
);
460 static void test_sk_storage_map_basic(void)
465 } value
= { .cnt
= 0xeB9f, .lock
= 0, }, lookup_value
;
466 struct bpf_create_map_attr bad_xattr
;
467 int btf_fd
, map_fd
, sk_fd
, err
;
470 CHECK(btf_fd
== -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
472 xattr
.btf_fd
= btf_fd
;
474 sk_fd
= socket(AF_INET6
, SOCK_STREAM
, 0);
475 CHECK(sk_fd
== -1, "socket()", "sk_fd:%d errno:%d\n",
478 map_fd
= bpf_create_map_xattr(&xattr
);
479 CHECK(map_fd
== -1, "bpf_create_map_xattr(good_xattr)",
480 "map_fd:%d errno:%d\n", map_fd
, errno
);
483 memcpy(&lookup_value
, &value
, sizeof(value
));
484 err
= bpf_map_update_elem(map_fd
, &sk_fd
, &value
,
485 BPF_NOEXIST
| BPF_F_LOCK
);
486 CHECK(err
, "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
487 "err:%d errno:%d\n", err
, errno
);
488 err
= bpf_map_lookup_elem_flags(map_fd
, &sk_fd
, &lookup_value
,
490 CHECK(err
|| lookup_value
.cnt
!= value
.cnt
,
491 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
492 "err:%d errno:%d cnt:%x(%x)\n",
493 err
, errno
, lookup_value
.cnt
, value
.cnt
);
495 /* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
497 err
= bpf_map_update_elem(map_fd
, &sk_fd
, &value
,
498 BPF_EXIST
| BPF_F_LOCK
);
499 CHECK(err
, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
500 "err:%d errno:%d\n", err
, errno
);
501 err
= bpf_map_lookup_elem_flags(map_fd
, &sk_fd
, &lookup_value
,
503 CHECK(err
|| lookup_value
.cnt
!= value
.cnt
,
504 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
505 "err:%d errno:%d cnt:%x(%x)\n",
506 err
, errno
, lookup_value
.cnt
, value
.cnt
);
508 /* Bump the cnt and update with BPF_EXIST */
510 err
= bpf_map_update_elem(map_fd
, &sk_fd
, &value
, BPF_EXIST
);
511 CHECK(err
, "bpf_map_update_elem(BPF_EXIST)",
512 "err:%d errno:%d\n", err
, errno
);
513 err
= bpf_map_lookup_elem_flags(map_fd
, &sk_fd
, &lookup_value
,
515 CHECK(err
|| lookup_value
.cnt
!= value
.cnt
,
516 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
517 "err:%d errno:%d cnt:%x(%x)\n",
518 err
, errno
, lookup_value
.cnt
, value
.cnt
);
520 /* Update with BPF_NOEXIST */
522 err
= bpf_map_update_elem(map_fd
, &sk_fd
, &value
,
523 BPF_NOEXIST
| BPF_F_LOCK
);
524 CHECK(!err
|| errno
!= EEXIST
,
525 "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
526 "err:%d errno:%d\n", err
, errno
);
527 err
= bpf_map_update_elem(map_fd
, &sk_fd
, &value
, BPF_NOEXIST
);
528 CHECK(!err
|| errno
!= EEXIST
, "bpf_map_update_elem(BPF_NOEXIST)",
529 "err:%d errno:%d\n", err
, errno
);
531 err
= bpf_map_lookup_elem_flags(map_fd
, &sk_fd
, &lookup_value
,
533 CHECK(err
|| lookup_value
.cnt
!= value
.cnt
,
534 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
535 "err:%d errno:%d cnt:%x(%x)\n",
536 err
, errno
, lookup_value
.cnt
, value
.cnt
);
538 /* Bump the cnt again and update with map_flags == 0 */
540 err
= bpf_map_update_elem(map_fd
, &sk_fd
, &value
, 0);
541 CHECK(err
, "bpf_map_update_elem()", "err:%d errno:%d\n",
543 err
= bpf_map_lookup_elem_flags(map_fd
, &sk_fd
, &lookup_value
,
545 CHECK(err
|| lookup_value
.cnt
!= value
.cnt
,
546 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
547 "err:%d errno:%d cnt:%x(%x)\n",
548 err
, errno
, lookup_value
.cnt
, value
.cnt
);
550 /* Test delete elem */
551 err
= bpf_map_delete_elem(map_fd
, &sk_fd
);
552 CHECK(err
, "bpf_map_delete_elem()", "err:%d errno:%d\n",
554 err
= bpf_map_lookup_elem_flags(map_fd
, &sk_fd
, &lookup_value
,
556 CHECK(!err
|| errno
!= ENOENT
,
557 "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
558 "err:%d errno:%d\n", err
, errno
);
559 err
= bpf_map_delete_elem(map_fd
, &sk_fd
);
560 CHECK(!err
|| errno
!= ENOENT
, "bpf_map_delete_elem()",
561 "err:%d errno:%d\n", err
, errno
);
563 memcpy(&bad_xattr
, &xattr
, sizeof(xattr
));
564 bad_xattr
.btf_key_type_id
= 0;
565 err
= bpf_create_map_xattr(&bad_xattr
);
566 CHECK(!err
|| errno
!= EINVAL
, "bap_create_map_xattr(bad_xattr)",
567 "err:%d errno:%d\n", err
, errno
);
569 memcpy(&bad_xattr
, &xattr
, sizeof(xattr
));
570 bad_xattr
.btf_key_type_id
= 3;
571 err
= bpf_create_map_xattr(&bad_xattr
);
572 CHECK(!err
|| errno
!= EINVAL
, "bap_create_map_xattr(bad_xattr)",
573 "err:%d errno:%d\n", err
, errno
);
575 memcpy(&bad_xattr
, &xattr
, sizeof(xattr
));
576 bad_xattr
.max_entries
= 1;
577 err
= bpf_create_map_xattr(&bad_xattr
);
578 CHECK(!err
|| errno
!= EINVAL
, "bap_create_map_xattr(bad_xattr)",
579 "err:%d errno:%d\n", err
, errno
);
581 memcpy(&bad_xattr
, &xattr
, sizeof(xattr
));
582 bad_xattr
.map_flags
= 0;
583 err
= bpf_create_map_xattr(&bad_xattr
);
584 CHECK(!err
|| errno
!= EINVAL
, "bap_create_map_xattr(bad_xattr)",
585 "err:%d errno:%d\n", err
, errno
);
593 void test_sk_storage_map(void)
595 const char *test_name
, *env_opt
;
596 bool test_ran
= false;
598 test_name
= getenv(BPF_SK_STORAGE_MAP_TEST_NAME
);
600 env_opt
= getenv(BPF_SK_STORAGE_MAP_TEST_NR_THREADS
);
602 nr_sk_threads
= atoi(env_opt
);
604 env_opt
= getenv(BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD
);
606 nr_sk_per_thread
= atoi(env_opt
);
608 env_opt
= getenv(BPF_SK_STORAGE_MAP_TEST_RUNTIME_S
);
610 runtime_s
= atoi(env_opt
);
612 if (!test_name
|| !strcmp(test_name
, "basic")) {
613 test_sk_storage_map_basic();
616 if (!test_name
|| !strcmp(test_name
, "stress_free")) {
617 test_sk_storage_map_stress_free();
620 if (!test_name
|| !strcmp(test_name
, "stress_change")) {
621 test_sk_storage_map_stress_change();
626 printf("%s:PASS\n", __func__
);
628 CHECK(1, "Invalid test_name", "%s\n", test_name
);