1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016 Facebook
18 #include <bpf/libbpf.h>
21 #include "bpf_rlimit.h"
22 #include "../../../include/linux/filter.h"
24 #define LOCAL_FREE_TARGET (128)
25 #define PERCPU_FREE_TARGET (4)
29 static int create_map(int map_type
, int map_flags
, unsigned int size
)
33 map_fd
= bpf_create_map(map_type
, sizeof(unsigned long long),
34 sizeof(unsigned long long), size
, map_flags
);
37 perror("bpf_create_map");
42 static int bpf_map_lookup_elem_with_ref_bit(int fd
, unsigned long long key
,
45 struct bpf_load_program_attr prog
;
46 struct bpf_create_map_attr map
;
47 struct bpf_insn insns
[] = {
48 BPF_LD_MAP_VALUE(BPF_REG_9
, 0, 0),
49 BPF_LD_MAP_FD(BPF_REG_1
, fd
),
50 BPF_LD_IMM64(BPF_REG_3
, key
),
51 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
52 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
53 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, 0),
54 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
55 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
56 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
57 BPF_STX_MEM(BPF_DW
, BPF_REG_9
, BPF_REG_1
, 0),
58 BPF_MOV64_IMM(BPF_REG_0
, 42),
59 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
60 BPF_MOV64_IMM(BPF_REG_0
, 1),
64 int mfd
, pfd
, ret
, zero
= 0;
67 memset(&map
, 0, sizeof(map
));
68 map
.map_type
= BPF_MAP_TYPE_ARRAY
;
69 map
.key_size
= sizeof(int);
70 map
.value_size
= sizeof(unsigned long long);
73 mfd
= bpf_create_map_xattr(&map
);
79 memset(&prog
, 0, sizeof(prog
));
80 prog
.prog_type
= BPF_PROG_TYPE_SCHED_CLS
;
82 prog
.insns_cnt
= ARRAY_SIZE(insns
);
85 pfd
= bpf_load_program_xattr(&prog
, NULL
, 0);
91 ret
= bpf_prog_test_run(pfd
, 1, data
, sizeof(data
),
92 NULL
, NULL
, &retval
, NULL
);
93 if (ret
< 0 || retval
!= 42) {
96 assert(!bpf_map_lookup_elem(mfd
, &zero
, value
));
104 static int map_subset(int map0
, int map1
)
106 unsigned long long next_key
= 0;
107 unsigned long long value0
[nr_cpus
], value1
[nr_cpus
];
110 while (!bpf_map_get_next_key(map1
, &next_key
, &next_key
)) {
111 assert(!bpf_map_lookup_elem(map1
, &next_key
, value1
));
112 ret
= bpf_map_lookup_elem(map0
, &next_key
, value0
);
114 printf("key:%llu not found from map. %s(%d)\n",
115 next_key
, strerror(errno
), errno
);
118 if (value0
[0] != value1
[0]) {
119 printf("key:%llu value0:%llu != value1:%llu\n",
120 next_key
, value0
[0], value1
[0]);
127 static int map_equal(int lru_map
, int expected
)
129 return map_subset(lru_map
, expected
) && map_subset(expected
, lru_map
);
132 static int sched_next_online(int pid
, int *next_to_try
)
135 int next
= *next_to_try
;
138 while (next
< nr_cpus
) {
140 CPU_SET(next
++, &cpuset
);
141 if (!sched_setaffinity(pid
, sizeof(cpuset
), &cpuset
)) {
151 /* Size of the LRU map is 2
156 * => Key=2 will be removed by LRU
157 * Iterate map. Only found key=1 and key=3
159 static void test_lru_sanity0(int map_type
, int map_flags
)
161 unsigned long long key
, value
[nr_cpus
];
162 int lru_map_fd
, expected_map_fd
;
165 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
168 assert(sched_next_online(0, &next_cpu
) != -1);
170 if (map_flags
& BPF_F_NO_COMMON_LRU
)
171 lru_map_fd
= create_map(map_type
, map_flags
, 2 * nr_cpus
);
173 lru_map_fd
= create_map(map_type
, map_flags
, 2);
174 assert(lru_map_fd
!= -1);
176 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, 2);
177 assert(expected_map_fd
!= -1);
181 /* insert key=1 element */
184 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
185 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
188 /* BPF_NOEXIST means: add new element if it doesn't exist */
189 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
) == -1
190 /* key=1 already exists */
193 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, -1) == -1 &&
196 /* insert key=2 element */
198 /* check that key=2 is not found */
200 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
203 /* BPF_EXIST means: update existing element */
204 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_EXIST
) == -1 &&
205 /* key=2 is not there */
208 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
210 /* insert key=3 element */
212 /* check that key=3 is not found */
214 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
217 /* check that key=1 can be found and mark the ref bit to
218 * stop LRU from removing key=1
221 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd
, key
, value
));
222 assert(value
[0] == 1234);
225 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
226 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
229 /* key=2 has been removed from the LRU */
231 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
234 assert(map_equal(lru_map_fd
, expected_map_fd
));
236 close(expected_map_fd
);
242 /* Size of the LRU map is 1.5*tgt_free
243 * Insert 1 to tgt_free (+tgt_free keys)
244 * Lookup 1 to tgt_free/2
245 * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
246 * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
248 static void test_lru_sanity1(int map_type
, int map_flags
, unsigned int tgt_free
)
250 unsigned long long key
, end_key
, value
[nr_cpus
];
251 int lru_map_fd
, expected_map_fd
;
252 unsigned int batch_size
;
253 unsigned int map_size
;
256 if (map_flags
& BPF_F_NO_COMMON_LRU
)
257 /* This test is only applicable to common LRU list */
260 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
263 assert(sched_next_online(0, &next_cpu
) != -1);
265 batch_size
= tgt_free
/ 2;
266 assert(batch_size
* 2 == tgt_free
);
268 map_size
= tgt_free
+ batch_size
;
269 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
270 assert(lru_map_fd
!= -1);
272 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
273 assert(expected_map_fd
!= -1);
277 /* Insert 1 to tgt_free (+tgt_free keys) */
278 end_key
= 1 + tgt_free
;
279 for (key
= 1; key
< end_key
; key
++)
280 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
283 /* Lookup 1 to tgt_free/2 */
284 end_key
= 1 + batch_size
;
285 for (key
= 1; key
< end_key
; key
++) {
286 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd
, key
, value
));
287 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
291 /* Insert 1+tgt_free to 2*tgt_free
292 * => 1+tgt_free/2 to LOCALFREE_TARGET will be
296 end_key
= key
+ tgt_free
;
297 for (; key
< end_key
; key
++) {
298 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
300 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
304 assert(map_equal(lru_map_fd
, expected_map_fd
));
306 close(expected_map_fd
);
312 /* Size of the LRU map 1.5 * tgt_free
313 * Insert 1 to tgt_free (+tgt_free keys)
314 * Update 1 to tgt_free/2
315 * => The original 1 to tgt_free/2 will be removed due to
316 * the LRU shrink process
317 * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
318 * Insert 1+tgt_free to tgt_free*3/2
319 * Insert 1+tgt_free*3/2 to tgt_free*5/2
320 * => Key 1+tgt_free to tgt_free*3/2
321 * will be removed from LRU because it has never
322 * been lookup and ref bit is not set
324 static void test_lru_sanity2(int map_type
, int map_flags
, unsigned int tgt_free
)
326 unsigned long long key
, value
[nr_cpus
];
327 unsigned long long end_key
;
328 int lru_map_fd
, expected_map_fd
;
329 unsigned int batch_size
;
330 unsigned int map_size
;
333 if (map_flags
& BPF_F_NO_COMMON_LRU
)
334 /* This test is only applicable to common LRU list */
337 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
340 assert(sched_next_online(0, &next_cpu
) != -1);
342 batch_size
= tgt_free
/ 2;
343 assert(batch_size
* 2 == tgt_free
);
345 map_size
= tgt_free
+ batch_size
;
346 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
347 assert(lru_map_fd
!= -1);
349 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
350 assert(expected_map_fd
!= -1);
354 /* Insert 1 to tgt_free (+tgt_free keys) */
355 end_key
= 1 + tgt_free
;
356 for (key
= 1; key
< end_key
; key
++)
357 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
360 /* Any bpf_map_update_elem will require to acquire a new node
363 * The local list is running out of free nodes.
364 * It gets from the global LRU list which tries to
365 * shrink the inactive list to get tgt_free
366 * number of free nodes.
368 * Hence, the oldest key 1 to tgt_free/2
369 * are removed from the LRU list.
372 if (map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
) {
373 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
375 assert(!bpf_map_delete_elem(lru_map_fd
, &key
));
377 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
,
381 /* Re-insert 1 to tgt_free/2 again and do a lookup
384 end_key
= 1 + batch_size
;
386 for (key
= 1; key
< end_key
; key
++) {
387 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
389 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
391 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd
, key
, value
));
392 assert(value
[0] == 4321);
393 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
399 /* Insert 1+tgt_free to tgt_free*3/2 */
400 end_key
= 1 + tgt_free
+ batch_size
;
401 for (key
= 1 + tgt_free
; key
< end_key
; key
++)
402 /* These newly added but not referenced keys will be
403 * gone during the next LRU shrink.
405 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
408 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
409 end_key
= key
+ tgt_free
;
410 for (; key
< end_key
; key
++) {
411 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
413 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
417 assert(map_equal(lru_map_fd
, expected_map_fd
));
419 close(expected_map_fd
);
425 /* Size of the LRU map is 2*tgt_free
426 * It is to test the active/inactive list rotation
427 * Insert 1 to 2*tgt_free (+2*tgt_free keys)
428 * Lookup key 1 to tgt_free*3/2
429 * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
430 * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
432 static void test_lru_sanity3(int map_type
, int map_flags
, unsigned int tgt_free
)
434 unsigned long long key
, end_key
, value
[nr_cpus
];
435 int lru_map_fd
, expected_map_fd
;
436 unsigned int batch_size
;
437 unsigned int map_size
;
440 if (map_flags
& BPF_F_NO_COMMON_LRU
)
441 /* This test is only applicable to common LRU list */
444 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
447 assert(sched_next_online(0, &next_cpu
) != -1);
449 batch_size
= tgt_free
/ 2;
450 assert(batch_size
* 2 == tgt_free
);
452 map_size
= tgt_free
* 2;
453 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
454 assert(lru_map_fd
!= -1);
456 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
457 assert(expected_map_fd
!= -1);
461 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
462 end_key
= 1 + (2 * tgt_free
);
463 for (key
= 1; key
< end_key
; key
++)
464 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
467 /* Lookup key 1 to tgt_free*3/2 */
468 end_key
= tgt_free
+ batch_size
;
469 for (key
= 1; key
< end_key
; key
++) {
470 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd
, key
, value
));
471 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
475 /* Add 1+2*tgt_free to tgt_free*5/2
478 key
= 2 * tgt_free
+ 1;
479 end_key
= key
+ batch_size
;
480 for (; key
< end_key
; key
++) {
481 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
483 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
487 assert(map_equal(lru_map_fd
, expected_map_fd
));
489 close(expected_map_fd
);
496 static void test_lru_sanity4(int map_type
, int map_flags
, unsigned int tgt_free
)
498 int lru_map_fd
, expected_map_fd
;
499 unsigned long long key
, value
[nr_cpus
];
500 unsigned long long end_key
;
503 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
506 assert(sched_next_online(0, &next_cpu
) != -1);
508 if (map_flags
& BPF_F_NO_COMMON_LRU
)
509 lru_map_fd
= create_map(map_type
, map_flags
,
510 3 * tgt_free
* nr_cpus
);
512 lru_map_fd
= create_map(map_type
, map_flags
, 3 * tgt_free
);
513 assert(lru_map_fd
!= -1);
515 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0,
517 assert(expected_map_fd
!= -1);
521 for (key
= 1; key
<= 2 * tgt_free
; key
++)
522 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
526 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
528 for (key
= 1; key
<= tgt_free
; key
++) {
529 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd
, key
, value
));
530 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
534 for (; key
<= 2 * tgt_free
; key
++) {
535 assert(!bpf_map_delete_elem(lru_map_fd
, &key
));
536 assert(bpf_map_delete_elem(lru_map_fd
, &key
));
539 end_key
= key
+ 2 * tgt_free
;
540 for (; key
< end_key
; key
++) {
541 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
543 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
547 assert(map_equal(lru_map_fd
, expected_map_fd
));
549 close(expected_map_fd
);
555 static void do_test_lru_sanity5(unsigned long long last_key
, int map_fd
)
557 unsigned long long key
, value
[nr_cpus
];
559 /* Ensure the last key inserted by previous CPU can be found */
560 assert(!bpf_map_lookup_elem_with_ref_bit(map_fd
, last_key
, value
));
564 assert(!bpf_map_update_elem(map_fd
, &key
, value
, BPF_NOEXIST
));
565 assert(!bpf_map_lookup_elem_with_ref_bit(map_fd
, key
, value
));
567 /* Cannot find the last key because it was removed by LRU */
568 assert(bpf_map_lookup_elem(map_fd
, &last_key
, value
) == -1 &&
572 /* Test map with only one element */
573 static void test_lru_sanity5(int map_type
, int map_flags
)
575 unsigned long long key
, value
[nr_cpus
];
579 if (map_flags
& BPF_F_NO_COMMON_LRU
)
582 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
585 map_fd
= create_map(map_type
, map_flags
, 1);
586 assert(map_fd
!= -1);
590 assert(!bpf_map_update_elem(map_fd
, &key
, value
, BPF_NOEXIST
));
592 while (sched_next_online(0, &next_cpu
) != -1) {
597 do_test_lru_sanity5(key
, map_fd
);
599 } else if (pid
== -1) {
600 printf("couldn't spawn process to test key:%llu\n",
606 assert(waitpid(pid
, &status
, 0) == pid
);
613 /* At least one key should be tested */
619 /* Test list rotation for BPF_F_NO_COMMON_LRU map */
620 static void test_lru_sanity6(int map_type
, int map_flags
, int tgt_free
)
622 int lru_map_fd
, expected_map_fd
;
623 unsigned long long key
, value
[nr_cpus
];
624 unsigned int map_size
= tgt_free
* 2;
627 if (!(map_flags
& BPF_F_NO_COMMON_LRU
))
630 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
633 assert(sched_next_online(0, &next_cpu
) != -1);
635 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
636 assert(expected_map_fd
!= -1);
638 lru_map_fd
= create_map(map_type
, map_flags
, map_size
* nr_cpus
);
639 assert(lru_map_fd
!= -1);
643 for (key
= 1; key
<= tgt_free
; key
++) {
644 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
646 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
650 for (; key
<= tgt_free
* 2; key
++) {
651 unsigned long long stable_key
;
653 /* Make ref bit sticky for key: [1, tgt_free] */
654 for (stable_key
= 1; stable_key
<= tgt_free
; stable_key
++) {
655 /* Mark the ref bit */
656 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd
,
659 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
663 for (; key
<= tgt_free
* 3; key
++) {
664 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
666 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
670 assert(map_equal(lru_map_fd
, expected_map_fd
));
672 close(expected_map_fd
);
678 /* Size of the LRU map is 2
681 * Lookup Key=1 (datapath)
682 * Lookup Key=2 (syscall)
684 * => Key=2 will be removed by LRU
685 * Iterate map. Only found key=1 and key=3
687 static void test_lru_sanity7(int map_type
, int map_flags
)
689 unsigned long long key
, value
[nr_cpus
];
690 int lru_map_fd
, expected_map_fd
;
693 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
696 assert(sched_next_online(0, &next_cpu
) != -1);
698 if (map_flags
& BPF_F_NO_COMMON_LRU
)
699 lru_map_fd
= create_map(map_type
, map_flags
, 2 * nr_cpus
);
701 lru_map_fd
= create_map(map_type
, map_flags
, 2);
702 assert(lru_map_fd
!= -1);
704 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, 2);
705 assert(expected_map_fd
!= -1);
709 /* insert key=1 element */
712 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
713 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
716 /* BPF_NOEXIST means: add new element if it doesn't exist */
717 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
) == -1
718 /* key=1 already exists */
721 /* insert key=2 element */
723 /* check that key=2 is not found */
725 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
728 /* BPF_EXIST means: update existing element */
729 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_EXIST
) == -1 &&
730 /* key=2 is not there */
733 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
735 /* insert key=3 element */
737 /* check that key=3 is not found */
739 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
742 /* check that key=1 can be found and mark the ref bit to
743 * stop LRU from removing key=1
746 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd
, key
, value
));
747 assert(value
[0] == 1234);
749 /* check that key=2 can be found and do _not_ mark ref bit.
750 * this will be evicted on next update.
753 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
754 assert(value
[0] == 1234);
757 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
758 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
761 /* key=2 has been removed from the LRU */
763 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
766 assert(map_equal(lru_map_fd
, expected_map_fd
));
768 close(expected_map_fd
);
774 /* Size of the LRU map is 2
777 * Lookup Key=1 (syscall)
778 * Lookup Key=2 (datapath)
780 * => Key=1 will be removed by LRU
781 * Iterate map. Only found key=2 and key=3
783 static void test_lru_sanity8(int map_type
, int map_flags
)
785 unsigned long long key
, value
[nr_cpus
];
786 int lru_map_fd
, expected_map_fd
;
789 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
792 assert(sched_next_online(0, &next_cpu
) != -1);
794 if (map_flags
& BPF_F_NO_COMMON_LRU
)
795 lru_map_fd
= create_map(map_type
, map_flags
, 2 * nr_cpus
);
797 lru_map_fd
= create_map(map_type
, map_flags
, 2);
798 assert(lru_map_fd
!= -1);
800 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, 2);
801 assert(expected_map_fd
!= -1);
805 /* insert key=1 element */
808 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
810 /* BPF_NOEXIST means: add new element if it doesn't exist */
811 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
) == -1
812 /* key=1 already exists */
815 /* insert key=2 element */
817 /* check that key=2 is not found */
819 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
822 /* BPF_EXIST means: update existing element */
823 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_EXIST
) == -1 &&
824 /* key=2 is not there */
827 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
828 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
831 /* insert key=3 element */
833 /* check that key=3 is not found */
835 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
838 /* check that key=1 can be found and do _not_ mark ref bit.
839 * this will be evicted on next update.
842 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
843 assert(value
[0] == 1234);
845 /* check that key=2 can be found and mark the ref bit to
846 * stop LRU from removing key=2
849 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd
, key
, value
));
850 assert(value
[0] == 1234);
853 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
854 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
857 /* key=1 has been removed from the LRU */
859 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
862 assert(map_equal(lru_map_fd
, expected_map_fd
));
864 close(expected_map_fd
);
870 int main(int argc
, char **argv
)
872 int map_types
[] = {BPF_MAP_TYPE_LRU_HASH
,
873 BPF_MAP_TYPE_LRU_PERCPU_HASH
};
874 int map_flags
[] = {0, BPF_F_NO_COMMON_LRU
};
877 setbuf(stdout
, NULL
);
879 nr_cpus
= bpf_num_possible_cpus();
880 assert(nr_cpus
!= -1);
881 printf("nr_cpus:%d\n\n", nr_cpus
);
883 for (f
= 0; f
< sizeof(map_flags
) / sizeof(*map_flags
); f
++) {
884 unsigned int tgt_free
= (map_flags
[f
] & BPF_F_NO_COMMON_LRU
) ?
885 PERCPU_FREE_TARGET
: LOCAL_FREE_TARGET
;
887 for (t
= 0; t
< sizeof(map_types
) / sizeof(*map_types
); t
++) {
888 test_lru_sanity0(map_types
[t
], map_flags
[f
]);
889 test_lru_sanity1(map_types
[t
], map_flags
[f
], tgt_free
);
890 test_lru_sanity2(map_types
[t
], map_flags
[f
], tgt_free
);
891 test_lru_sanity3(map_types
[t
], map_flags
[f
], tgt_free
);
892 test_lru_sanity4(map_types
[t
], map_flags
[f
], tgt_free
);
893 test_lru_sanity5(map_types
[t
], map_flags
[f
]);
894 test_lru_sanity6(map_types
[t
], map_flags
[f
], tgt_free
);
895 test_lru_sanity7(map_types
[t
], map_flags
[f
]);
896 test_lru_sanity8(map_types
[t
], map_flags
[f
]);