2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
23 #include "bpf_rlimit.h"
25 #define LOCAL_FREE_TARGET (128)
26 #define PERCPU_FREE_TARGET (4)
30 static int create_map(int map_type
, int map_flags
, unsigned int size
)
34 map_fd
= bpf_create_map(map_type
, sizeof(unsigned long long),
35 sizeof(unsigned long long), size
, map_flags
);
38 perror("bpf_create_map");
43 static int map_subset(int map0
, int map1
)
45 unsigned long long next_key
= 0;
46 unsigned long long value0
[nr_cpus
], value1
[nr_cpus
];
49 while (!bpf_map_get_next_key(map1
, &next_key
, &next_key
)) {
50 assert(!bpf_map_lookup_elem(map1
, &next_key
, value1
));
51 ret
= bpf_map_lookup_elem(map0
, &next_key
, value0
);
53 printf("key:%llu not found from map. %s(%d)\n",
54 next_key
, strerror(errno
), errno
);
57 if (value0
[0] != value1
[0]) {
58 printf("key:%llu value0:%llu != value1:%llu\n",
59 next_key
, value0
[0], value1
[0]);
66 static int map_equal(int lru_map
, int expected
)
68 return map_subset(lru_map
, expected
) && map_subset(expected
, lru_map
);
71 static int sched_next_online(int pid
, int *next_to_try
)
74 int next
= *next_to_try
;
77 while (next
< nr_cpus
) {
79 CPU_SET(next
++, &cpuset
);
80 if (!sched_setaffinity(pid
, sizeof(cpuset
), &cpuset
)) {
90 /* Size of the LRU amp is 2
95 * => Key=2 will be removed by LRU
96 * Iterate map. Only found key=1 and key=3
98 static void test_lru_sanity0(int map_type
, int map_flags
)
100 unsigned long long key
, value
[nr_cpus
];
101 int lru_map_fd
, expected_map_fd
;
104 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
107 assert(sched_next_online(0, &next_cpu
) != -1);
109 if (map_flags
& BPF_F_NO_COMMON_LRU
)
110 lru_map_fd
= create_map(map_type
, map_flags
, 2 * nr_cpus
);
112 lru_map_fd
= create_map(map_type
, map_flags
, 2);
113 assert(lru_map_fd
!= -1);
115 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, 2);
116 assert(expected_map_fd
!= -1);
120 /* insert key=1 element */
123 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
124 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
127 /* BPF_NOEXIST means: add new element if it doesn't exist */
128 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
) == -1
129 /* key=1 already exists */
132 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, -1) == -1 &&
135 /* insert key=2 element */
137 /* check that key=2 is not found */
139 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
142 /* BPF_EXIST means: update existing element */
143 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_EXIST
) == -1 &&
144 /* key=2 is not there */
147 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
149 /* insert key=3 element */
151 /* check that key=3 is not found */
153 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
156 /* check that key=1 can be found and mark the ref bit to
157 * stop LRU from removing key=1
160 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
161 assert(value
[0] == 1234);
164 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
165 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
168 /* key=2 has been removed from the LRU */
170 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1);
172 assert(map_equal(lru_map_fd
, expected_map_fd
));
174 close(expected_map_fd
);
180 /* Size of the LRU map is 1.5*tgt_free
181 * Insert 1 to tgt_free (+tgt_free keys)
182 * Lookup 1 to tgt_free/2
183 * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
184 * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
186 static void test_lru_sanity1(int map_type
, int map_flags
, unsigned int tgt_free
)
188 unsigned long long key
, end_key
, value
[nr_cpus
];
189 int lru_map_fd
, expected_map_fd
;
190 unsigned int batch_size
;
191 unsigned int map_size
;
194 if (map_flags
& BPF_F_NO_COMMON_LRU
)
195 /* This test is only applicable to common LRU list */
198 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
201 assert(sched_next_online(0, &next_cpu
) != -1);
203 batch_size
= tgt_free
/ 2;
204 assert(batch_size
* 2 == tgt_free
);
206 map_size
= tgt_free
+ batch_size
;
207 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
208 assert(lru_map_fd
!= -1);
210 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
211 assert(expected_map_fd
!= -1);
215 /* Insert 1 to tgt_free (+tgt_free keys) */
216 end_key
= 1 + tgt_free
;
217 for (key
= 1; key
< end_key
; key
++)
218 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
221 /* Lookup 1 to tgt_free/2 */
222 end_key
= 1 + batch_size
;
223 for (key
= 1; key
< end_key
; key
++) {
224 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
225 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
229 /* Insert 1+tgt_free to 2*tgt_free
230 * => 1+tgt_free/2 to LOCALFREE_TARGET will be
234 end_key
= key
+ tgt_free
;
235 for (; key
< end_key
; key
++) {
236 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
238 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
242 assert(map_equal(lru_map_fd
, expected_map_fd
));
244 close(expected_map_fd
);
250 /* Size of the LRU map 1.5 * tgt_free
251 * Insert 1 to tgt_free (+tgt_free keys)
252 * Update 1 to tgt_free/2
253 * => The original 1 to tgt_free/2 will be removed due to
254 * the LRU shrink process
255 * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
256 * Insert 1+tgt_free to tgt_free*3/2
257 * Insert 1+tgt_free*3/2 to tgt_free*5/2
258 * => Key 1+tgt_free to tgt_free*3/2
259 * will be removed from LRU because it has never
260 * been lookup and ref bit is not set
262 static void test_lru_sanity2(int map_type
, int map_flags
, unsigned int tgt_free
)
264 unsigned long long key
, value
[nr_cpus
];
265 unsigned long long end_key
;
266 int lru_map_fd
, expected_map_fd
;
267 unsigned int batch_size
;
268 unsigned int map_size
;
271 if (map_flags
& BPF_F_NO_COMMON_LRU
)
272 /* This test is only applicable to common LRU list */
275 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
278 assert(sched_next_online(0, &next_cpu
) != -1);
280 batch_size
= tgt_free
/ 2;
281 assert(batch_size
* 2 == tgt_free
);
283 map_size
= tgt_free
+ batch_size
;
284 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
285 assert(lru_map_fd
!= -1);
287 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
288 assert(expected_map_fd
!= -1);
292 /* Insert 1 to tgt_free (+tgt_free keys) */
293 end_key
= 1 + tgt_free
;
294 for (key
= 1; key
< end_key
; key
++)
295 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
298 /* Any bpf_map_update_elem will require to acquire a new node
301 * The local list is running out of free nodes.
302 * It gets from the global LRU list which tries to
303 * shrink the inactive list to get tgt_free
304 * number of free nodes.
306 * Hence, the oldest key 1 to tgt_free/2
307 * are removed from the LRU list.
310 if (map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
) {
311 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
313 assert(!bpf_map_delete_elem(lru_map_fd
, &key
));
315 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
,
319 /* Re-insert 1 to tgt_free/2 again and do a lookup
322 end_key
= 1 + batch_size
;
324 for (key
= 1; key
< end_key
; key
++) {
325 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
326 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
328 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
329 assert(value
[0] == 4321);
330 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
336 /* Insert 1+tgt_free to tgt_free*3/2 */
337 end_key
= 1 + tgt_free
+ batch_size
;
338 for (key
= 1 + tgt_free
; key
< end_key
; key
++)
339 /* These newly added but not referenced keys will be
340 * gone during the next LRU shrink.
342 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
345 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
346 end_key
= key
+ tgt_free
;
347 for (; key
< end_key
; key
++) {
348 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
350 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
354 assert(map_equal(lru_map_fd
, expected_map_fd
));
356 close(expected_map_fd
);
362 /* Size of the LRU map is 2*tgt_free
363 * It is to test the active/inactive list rotation
364 * Insert 1 to 2*tgt_free (+2*tgt_free keys)
365 * Lookup key 1 to tgt_free*3/2
366 * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
367 * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
369 static void test_lru_sanity3(int map_type
, int map_flags
, unsigned int tgt_free
)
371 unsigned long long key
, end_key
, value
[nr_cpus
];
372 int lru_map_fd
, expected_map_fd
;
373 unsigned int batch_size
;
374 unsigned int map_size
;
377 if (map_flags
& BPF_F_NO_COMMON_LRU
)
378 /* This test is only applicable to common LRU list */
381 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
384 assert(sched_next_online(0, &next_cpu
) != -1);
386 batch_size
= tgt_free
/ 2;
387 assert(batch_size
* 2 == tgt_free
);
389 map_size
= tgt_free
* 2;
390 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
391 assert(lru_map_fd
!= -1);
393 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
394 assert(expected_map_fd
!= -1);
398 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
399 end_key
= 1 + (2 * tgt_free
);
400 for (key
= 1; key
< end_key
; key
++)
401 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
404 /* Lookup key 1 to tgt_free*3/2 */
405 end_key
= tgt_free
+ batch_size
;
406 for (key
= 1; key
< end_key
; key
++) {
407 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
408 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
412 /* Add 1+2*tgt_free to tgt_free*5/2
415 key
= 2 * tgt_free
+ 1;
416 end_key
= key
+ batch_size
;
417 for (; key
< end_key
; key
++) {
418 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
420 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
424 assert(map_equal(lru_map_fd
, expected_map_fd
));
426 close(expected_map_fd
);
433 static void test_lru_sanity4(int map_type
, int map_flags
, unsigned int tgt_free
)
435 int lru_map_fd
, expected_map_fd
;
436 unsigned long long key
, value
[nr_cpus
];
437 unsigned long long end_key
;
440 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
443 assert(sched_next_online(0, &next_cpu
) != -1);
445 if (map_flags
& BPF_F_NO_COMMON_LRU
)
446 lru_map_fd
= create_map(map_type
, map_flags
,
447 3 * tgt_free
* nr_cpus
);
449 lru_map_fd
= create_map(map_type
, map_flags
, 3 * tgt_free
);
450 assert(lru_map_fd
!= -1);
452 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0,
454 assert(expected_map_fd
!= -1);
458 for (key
= 1; key
<= 2 * tgt_free
; key
++)
459 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
463 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
465 for (key
= 1; key
<= tgt_free
; key
++) {
466 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
467 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
471 for (; key
<= 2 * tgt_free
; key
++) {
472 assert(!bpf_map_delete_elem(lru_map_fd
, &key
));
473 assert(bpf_map_delete_elem(lru_map_fd
, &key
));
476 end_key
= key
+ 2 * tgt_free
;
477 for (; key
< end_key
; key
++) {
478 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
480 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
484 assert(map_equal(lru_map_fd
, expected_map_fd
));
486 close(expected_map_fd
);
492 static void do_test_lru_sanity5(unsigned long long last_key
, int map_fd
)
494 unsigned long long key
, value
[nr_cpus
];
496 /* Ensure the last key inserted by previous CPU can be found */
497 assert(!bpf_map_lookup_elem(map_fd
, &last_key
, value
));
502 assert(!bpf_map_update_elem(map_fd
, &key
, value
, BPF_NOEXIST
));
503 assert(!bpf_map_lookup_elem(map_fd
, &key
, value
));
505 /* Cannot find the last key because it was removed by LRU */
506 assert(bpf_map_lookup_elem(map_fd
, &last_key
, value
));
509 /* Test map with only one element */
510 static void test_lru_sanity5(int map_type
, int map_flags
)
512 unsigned long long key
, value
[nr_cpus
];
516 if (map_flags
& BPF_F_NO_COMMON_LRU
)
519 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
522 map_fd
= create_map(map_type
, map_flags
, 1);
523 assert(map_fd
!= -1);
527 assert(!bpf_map_update_elem(map_fd
, &key
, value
, BPF_NOEXIST
));
529 while (sched_next_online(0, &next_cpu
) != -1) {
534 do_test_lru_sanity5(key
, map_fd
);
536 } else if (pid
== -1) {
537 printf("couldn't spawn process to test key:%llu\n",
543 assert(waitpid(pid
, &status
, 0) == pid
);
550 /* At least one key should be tested */
556 /* Test list rotation for BPF_F_NO_COMMON_LRU map */
557 static void test_lru_sanity6(int map_type
, int map_flags
, int tgt_free
)
559 int lru_map_fd
, expected_map_fd
;
560 unsigned long long key
, value
[nr_cpus
];
561 unsigned int map_size
= tgt_free
* 2;
564 if (!(map_flags
& BPF_F_NO_COMMON_LRU
))
567 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
570 assert(sched_next_online(0, &next_cpu
) != -1);
572 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
573 assert(expected_map_fd
!= -1);
575 lru_map_fd
= create_map(map_type
, map_flags
, map_size
* nr_cpus
);
576 assert(lru_map_fd
!= -1);
580 for (key
= 1; key
<= tgt_free
; key
++) {
581 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
583 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
587 for (; key
<= tgt_free
* 2; key
++) {
588 unsigned long long stable_key
;
590 /* Make ref bit sticky for key: [1, tgt_free] */
591 for (stable_key
= 1; stable_key
<= tgt_free
; stable_key
++) {
592 /* Mark the ref bit */
593 assert(!bpf_map_lookup_elem(lru_map_fd
, &stable_key
,
596 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
600 for (; key
<= tgt_free
* 3; key
++) {
601 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
603 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
607 assert(map_equal(lru_map_fd
, expected_map_fd
));
609 close(expected_map_fd
);
615 int main(int argc
, char **argv
)
617 int map_types
[] = {BPF_MAP_TYPE_LRU_HASH
,
618 BPF_MAP_TYPE_LRU_PERCPU_HASH
};
619 int map_flags
[] = {0, BPF_F_NO_COMMON_LRU
};
622 setbuf(stdout
, NULL
);
624 nr_cpus
= bpf_num_possible_cpus();
625 assert(nr_cpus
!= -1);
626 printf("nr_cpus:%d\n\n", nr_cpus
);
628 for (f
= 0; f
< sizeof(map_flags
) / sizeof(*map_flags
); f
++) {
629 unsigned int tgt_free
= (map_flags
[f
] & BPF_F_NO_COMMON_LRU
) ?
630 PERCPU_FREE_TARGET
: LOCAL_FREE_TARGET
;
632 for (t
= 0; t
< sizeof(map_types
) / sizeof(*map_types
); t
++) {
633 test_lru_sanity0(map_types
[t
], map_flags
[f
]);
634 test_lru_sanity1(map_types
[t
], map_flags
[f
], tgt_free
);
635 test_lru_sanity2(map_types
[t
], map_flags
[f
], tgt_free
);
636 test_lru_sanity3(map_types
[t
], map_flags
[f
], tgt_free
);
637 test_lru_sanity4(map_types
[t
], map_flags
[f
], tgt_free
);
638 test_lru_sanity5(map_types
[t
], map_flags
[f
]);
639 test_lru_sanity6(map_types
[t
], map_flags
[f
], tgt_free
);