2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
19 #include <sys/resource.h>
24 #define LOCAL_FREE_TARGET (128)
25 #define PERCPU_FREE_TARGET (4)
29 static int create_map(int map_type
, int map_flags
, unsigned int size
)
33 map_fd
= bpf_create_map(map_type
, sizeof(unsigned long long),
34 sizeof(unsigned long long), size
, map_flags
);
37 perror("bpf_create_map");
42 static int map_subset(int map0
, int map1
)
44 unsigned long long next_key
= 0;
45 unsigned long long value0
[nr_cpus
], value1
[nr_cpus
];
48 while (!bpf_map_get_next_key(map1
, &next_key
, &next_key
)) {
49 assert(!bpf_map_lookup_elem(map1
, &next_key
, value1
));
50 ret
= bpf_map_lookup_elem(map0
, &next_key
, value0
);
52 printf("key:%llu not found from map. %s(%d)\n",
53 next_key
, strerror(errno
), errno
);
56 if (value0
[0] != value1
[0]) {
57 printf("key:%llu value0:%llu != value1:%llu\n",
58 next_key
, value0
[0], value1
[0]);
65 static int map_equal(int lru_map
, int expected
)
67 return map_subset(lru_map
, expected
) && map_subset(expected
, lru_map
);
70 static int sched_next_online(int pid
, int *next_to_try
)
73 int next
= *next_to_try
;
76 while (next
< nr_cpus
) {
78 CPU_SET(next
++, &cpuset
);
79 if (!sched_setaffinity(pid
, sizeof(cpuset
), &cpuset
)) {
89 /* Size of the LRU amp is 2
94 * => Key=2 will be removed by LRU
95 * Iterate map. Only found key=1 and key=3
97 static void test_lru_sanity0(int map_type
, int map_flags
)
99 unsigned long long key
, value
[nr_cpus
];
100 int lru_map_fd
, expected_map_fd
;
103 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
106 assert(sched_next_online(0, &next_cpu
) != -1);
108 if (map_flags
& BPF_F_NO_COMMON_LRU
)
109 lru_map_fd
= create_map(map_type
, map_flags
, 2 * nr_cpus
);
111 lru_map_fd
= create_map(map_type
, map_flags
, 2);
112 assert(lru_map_fd
!= -1);
114 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, 2);
115 assert(expected_map_fd
!= -1);
119 /* insert key=1 element */
122 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
123 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
126 /* BPF_NOEXIST means: add new element if it doesn't exist */
127 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
) == -1
128 /* key=1 already exists */
131 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, -1) == -1 &&
134 /* insert key=2 element */
136 /* check that key=2 is not found */
138 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
141 /* BPF_EXIST means: update existing element */
142 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_EXIST
) == -1 &&
143 /* key=2 is not there */
146 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
148 /* insert key=3 element */
150 /* check that key=3 is not found */
152 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1 &&
155 /* check that key=1 can be found and mark the ref bit to
156 * stop LRU from removing key=1
159 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
160 assert(value
[0] == 1234);
163 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
164 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
167 /* key=2 has been removed from the LRU */
169 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
) == -1);
171 assert(map_equal(lru_map_fd
, expected_map_fd
));
173 close(expected_map_fd
);
179 /* Size of the LRU map is 1.5*tgt_free
180 * Insert 1 to tgt_free (+tgt_free keys)
181 * Lookup 1 to tgt_free/2
182 * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
183 * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
185 static void test_lru_sanity1(int map_type
, int map_flags
, unsigned int tgt_free
)
187 unsigned long long key
, end_key
, value
[nr_cpus
];
188 int lru_map_fd
, expected_map_fd
;
189 unsigned int batch_size
;
190 unsigned int map_size
;
193 if (map_flags
& BPF_F_NO_COMMON_LRU
)
194 /* This test is only applicable to common LRU list */
197 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
200 assert(sched_next_online(0, &next_cpu
) != -1);
202 batch_size
= tgt_free
/ 2;
203 assert(batch_size
* 2 == tgt_free
);
205 map_size
= tgt_free
+ batch_size
;
206 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
207 assert(lru_map_fd
!= -1);
209 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
210 assert(expected_map_fd
!= -1);
214 /* Insert 1 to tgt_free (+tgt_free keys) */
215 end_key
= 1 + tgt_free
;
216 for (key
= 1; key
< end_key
; key
++)
217 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
220 /* Lookup 1 to tgt_free/2 */
221 end_key
= 1 + batch_size
;
222 for (key
= 1; key
< end_key
; key
++) {
223 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
224 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
228 /* Insert 1+tgt_free to 2*tgt_free
229 * => 1+tgt_free/2 to LOCALFREE_TARGET will be
233 end_key
= key
+ tgt_free
;
234 for (; key
< end_key
; key
++) {
235 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
237 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
241 assert(map_equal(lru_map_fd
, expected_map_fd
));
243 close(expected_map_fd
);
249 /* Size of the LRU map 1.5 * tgt_free
250 * Insert 1 to tgt_free (+tgt_free keys)
251 * Update 1 to tgt_free/2
252 * => The original 1 to tgt_free/2 will be removed due to
253 * the LRU shrink process
254 * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
255 * Insert 1+tgt_free to tgt_free*3/2
256 * Insert 1+tgt_free*3/2 to tgt_free*5/2
257 * => Key 1+tgt_free to tgt_free*3/2
258 * will be removed from LRU because it has never
259 * been lookup and ref bit is not set
261 static void test_lru_sanity2(int map_type
, int map_flags
, unsigned int tgt_free
)
263 unsigned long long key
, value
[nr_cpus
];
264 unsigned long long end_key
;
265 int lru_map_fd
, expected_map_fd
;
266 unsigned int batch_size
;
267 unsigned int map_size
;
270 if (map_flags
& BPF_F_NO_COMMON_LRU
)
271 /* This test is only applicable to common LRU list */
274 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
277 assert(sched_next_online(0, &next_cpu
) != -1);
279 batch_size
= tgt_free
/ 2;
280 assert(batch_size
* 2 == tgt_free
);
282 map_size
= tgt_free
+ batch_size
;
283 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
284 assert(lru_map_fd
!= -1);
286 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
287 assert(expected_map_fd
!= -1);
291 /* Insert 1 to tgt_free (+tgt_free keys) */
292 end_key
= 1 + tgt_free
;
293 for (key
= 1; key
< end_key
; key
++)
294 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
297 /* Any bpf_map_update_elem will require to acquire a new node
300 * The local list is running out of free nodes.
301 * It gets from the global LRU list which tries to
302 * shrink the inactive list to get tgt_free
303 * number of free nodes.
305 * Hence, the oldest key 1 to tgt_free/2
306 * are removed from the LRU list.
309 if (map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
) {
310 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
312 assert(!bpf_map_delete_elem(lru_map_fd
, &key
));
314 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
,
318 /* Re-insert 1 to tgt_free/2 again and do a lookup
321 end_key
= 1 + batch_size
;
323 for (key
= 1; key
< end_key
; key
++) {
324 assert(bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
325 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
327 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
328 assert(value
[0] == 4321);
329 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
335 /* Insert 1+tgt_free to tgt_free*3/2 */
336 end_key
= 1 + tgt_free
+ batch_size
;
337 for (key
= 1 + tgt_free
; key
< end_key
; key
++)
338 /* These newly added but not referenced keys will be
339 * gone during the next LRU shrink.
341 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
344 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
345 end_key
= key
+ tgt_free
;
346 for (; key
< end_key
; key
++) {
347 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
349 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
353 assert(map_equal(lru_map_fd
, expected_map_fd
));
355 close(expected_map_fd
);
361 /* Size of the LRU map is 2*tgt_free
362 * It is to test the active/inactive list rotation
363 * Insert 1 to 2*tgt_free (+2*tgt_free keys)
364 * Lookup key 1 to tgt_free*3/2
365 * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
366 * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
368 static void test_lru_sanity3(int map_type
, int map_flags
, unsigned int tgt_free
)
370 unsigned long long key
, end_key
, value
[nr_cpus
];
371 int lru_map_fd
, expected_map_fd
;
372 unsigned int batch_size
;
373 unsigned int map_size
;
376 if (map_flags
& BPF_F_NO_COMMON_LRU
)
377 /* This test is only applicable to common LRU list */
380 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
383 assert(sched_next_online(0, &next_cpu
) != -1);
385 batch_size
= tgt_free
/ 2;
386 assert(batch_size
* 2 == tgt_free
);
388 map_size
= tgt_free
* 2;
389 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
390 assert(lru_map_fd
!= -1);
392 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
393 assert(expected_map_fd
!= -1);
397 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
398 end_key
= 1 + (2 * tgt_free
);
399 for (key
= 1; key
< end_key
; key
++)
400 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
403 /* Lookup key 1 to tgt_free*3/2 */
404 end_key
= tgt_free
+ batch_size
;
405 for (key
= 1; key
< end_key
; key
++) {
406 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
407 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
411 /* Add 1+2*tgt_free to tgt_free*5/2
414 key
= 2 * tgt_free
+ 1;
415 end_key
= key
+ batch_size
;
416 for (; key
< end_key
; key
++) {
417 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
419 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
423 assert(map_equal(lru_map_fd
, expected_map_fd
));
425 close(expected_map_fd
);
432 static void test_lru_sanity4(int map_type
, int map_flags
, unsigned int tgt_free
)
434 int lru_map_fd
, expected_map_fd
;
435 unsigned long long key
, value
[nr_cpus
];
436 unsigned long long end_key
;
439 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
442 assert(sched_next_online(0, &next_cpu
) != -1);
444 if (map_flags
& BPF_F_NO_COMMON_LRU
)
445 lru_map_fd
= create_map(map_type
, map_flags
,
446 3 * tgt_free
* nr_cpus
);
448 lru_map_fd
= create_map(map_type
, map_flags
, 3 * tgt_free
);
449 assert(lru_map_fd
!= -1);
451 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0,
453 assert(expected_map_fd
!= -1);
457 for (key
= 1; key
<= 2 * tgt_free
; key
++)
458 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
462 assert(bpf_map_update_elem(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
464 for (key
= 1; key
<= tgt_free
; key
++) {
465 assert(!bpf_map_lookup_elem(lru_map_fd
, &key
, value
));
466 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
470 for (; key
<= 2 * tgt_free
; key
++) {
471 assert(!bpf_map_delete_elem(lru_map_fd
, &key
));
472 assert(bpf_map_delete_elem(lru_map_fd
, &key
));
475 end_key
= key
+ 2 * tgt_free
;
476 for (; key
< end_key
; key
++) {
477 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
479 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
483 assert(map_equal(lru_map_fd
, expected_map_fd
));
485 close(expected_map_fd
);
491 static void do_test_lru_sanity5(unsigned long long last_key
, int map_fd
)
493 unsigned long long key
, value
[nr_cpus
];
495 /* Ensure the last key inserted by previous CPU can be found */
496 assert(!bpf_map_lookup_elem(map_fd
, &last_key
, value
));
501 assert(!bpf_map_update_elem(map_fd
, &key
, value
, BPF_NOEXIST
));
502 assert(!bpf_map_lookup_elem(map_fd
, &key
, value
));
504 /* Cannot find the last key because it was removed by LRU */
505 assert(bpf_map_lookup_elem(map_fd
, &last_key
, value
));
508 /* Test map with only one element */
509 static void test_lru_sanity5(int map_type
, int map_flags
)
511 unsigned long long key
, value
[nr_cpus
];
515 if (map_flags
& BPF_F_NO_COMMON_LRU
)
518 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
521 map_fd
= create_map(map_type
, map_flags
, 1);
522 assert(map_fd
!= -1);
526 assert(!bpf_map_update_elem(map_fd
, &key
, value
, BPF_NOEXIST
));
528 while (sched_next_online(0, &next_cpu
) != -1) {
533 do_test_lru_sanity5(key
, map_fd
);
535 } else if (pid
== -1) {
536 printf("couldn't spawn process to test key:%llu\n",
542 assert(waitpid(pid
, &status
, 0) == pid
);
549 /* At least one key should be tested */
555 /* Test list rotation for BPF_F_NO_COMMON_LRU map */
556 static void test_lru_sanity6(int map_type
, int map_flags
, int tgt_free
)
558 int lru_map_fd
, expected_map_fd
;
559 unsigned long long key
, value
[nr_cpus
];
560 unsigned int map_size
= tgt_free
* 2;
563 if (!(map_flags
& BPF_F_NO_COMMON_LRU
))
566 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
569 assert(sched_next_online(0, &next_cpu
) != -1);
571 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
572 assert(expected_map_fd
!= -1);
574 lru_map_fd
= create_map(map_type
, map_flags
, map_size
* nr_cpus
);
575 assert(lru_map_fd
!= -1);
579 for (key
= 1; key
<= tgt_free
; key
++) {
580 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
582 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
586 for (; key
<= tgt_free
* 2; key
++) {
587 unsigned long long stable_key
;
589 /* Make ref bit sticky for key: [1, tgt_free] */
590 for (stable_key
= 1; stable_key
<= tgt_free
; stable_key
++) {
591 /* Mark the ref bit */
592 assert(!bpf_map_lookup_elem(lru_map_fd
, &stable_key
,
595 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
599 for (; key
<= tgt_free
* 3; key
++) {
600 assert(!bpf_map_update_elem(lru_map_fd
, &key
, value
,
602 assert(!bpf_map_update_elem(expected_map_fd
, &key
, value
,
606 assert(map_equal(lru_map_fd
, expected_map_fd
));
608 close(expected_map_fd
);
614 int main(int argc
, char **argv
)
616 struct rlimit r
= {RLIM_INFINITY
, RLIM_INFINITY
};
617 int map_types
[] = {BPF_MAP_TYPE_LRU_HASH
,
618 BPF_MAP_TYPE_LRU_PERCPU_HASH
};
619 int map_flags
[] = {0, BPF_F_NO_COMMON_LRU
};
622 setbuf(stdout
, NULL
);
624 assert(!setrlimit(RLIMIT_MEMLOCK
, &r
));
626 nr_cpus
= bpf_num_possible_cpus();
627 assert(nr_cpus
!= -1);
628 printf("nr_cpus:%d\n\n", nr_cpus
);
630 for (f
= 0; f
< sizeof(map_flags
) / sizeof(*map_flags
); f
++) {
631 unsigned int tgt_free
= (map_flags
[f
] & BPF_F_NO_COMMON_LRU
) ?
632 PERCPU_FREE_TARGET
: LOCAL_FREE_TARGET
;
634 for (t
= 0; t
< sizeof(map_types
) / sizeof(*map_types
); t
++) {
635 test_lru_sanity0(map_types
[t
], map_flags
[f
]);
636 test_lru_sanity1(map_types
[t
], map_flags
[f
], tgt_free
);
637 test_lru_sanity2(map_types
[t
], map_flags
[f
], tgt_free
);
638 test_lru_sanity3(map_types
[t
], map_flags
[f
], tgt_free
);
639 test_lru_sanity4(map_types
[t
], map_flags
[f
], tgt_free
);
640 test_lru_sanity5(map_types
[t
], map_flags
[f
]);
641 test_lru_sanity6(map_types
[t
], map_flags
[f
], tgt_free
);