2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
19 #include <sys/resource.h>
24 #define LOCAL_FREE_TARGET (128)
25 #define PERCPU_FREE_TARGET (16)
29 static int create_map(int map_type
, int map_flags
, unsigned int size
)
33 map_fd
= bpf_map_create(map_type
, sizeof(unsigned long long),
34 sizeof(unsigned long long), size
, map_flags
);
37 perror("bpf_map_create");
42 static int map_subset(int map0
, int map1
)
44 unsigned long long next_key
= 0;
45 unsigned long long value0
[nr_cpus
], value1
[nr_cpus
];
48 while (!bpf_map_next_key(map1
, &next_key
, &next_key
)) {
49 assert(!bpf_map_lookup(map1
, &next_key
, value1
));
50 ret
= bpf_map_lookup(map0
, &next_key
, value0
);
52 printf("key:%llu not found from map. %s(%d)\n",
53 next_key
, strerror(errno
), errno
);
56 if (value0
[0] != value1
[0]) {
57 printf("key:%llu value0:%llu != value1:%llu\n",
58 next_key
, value0
[0], value1
[0]);
65 static int map_equal(int lru_map
, int expected
)
67 return map_subset(lru_map
, expected
) && map_subset(expected
, lru_map
);
70 static int sched_next_online(int pid
, int next_to_try
)
74 if (next_to_try
== nr_cpus
)
77 while (next_to_try
< nr_cpus
) {
79 CPU_SET(next_to_try
++, &cpuset
);
80 if (!sched_setaffinity(pid
, sizeof(cpuset
), &cpuset
))
87 /* Size of the LRU amp is 2
92 * => Key=2 will be removed by LRU
93 * Iterate map. Only found key=1 and key=3
95 static void test_lru_sanity0(int map_type
, int map_flags
)
97 unsigned long long key
, value
[nr_cpus
];
98 int lru_map_fd
, expected_map_fd
;
100 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
103 assert(sched_next_online(0, 0) != -1);
105 if (map_flags
& BPF_F_NO_COMMON_LRU
)
106 lru_map_fd
= create_map(map_type
, map_flags
, 2 * nr_cpus
);
108 lru_map_fd
= create_map(map_type
, map_flags
, 2);
109 assert(lru_map_fd
!= -1);
111 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, 2);
112 assert(expected_map_fd
!= -1);
116 /* insert key=1 element */
119 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
120 assert(!bpf_map_update(expected_map_fd
, &key
, value
, BPF_NOEXIST
));
122 /* BPF_NOEXIST means: add new element if it doesn't exist */
123 assert(bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
) == -1 &&
124 /* key=1 already exists */
127 assert(bpf_map_update(lru_map_fd
, &key
, value
, -1) == -1 &&
130 /* insert key=2 element */
132 /* check that key=2 is not found */
134 assert(bpf_map_lookup(lru_map_fd
, &key
, value
) == -1 &&
137 /* BPF_EXIST means: update existing element */
138 assert(bpf_map_update(lru_map_fd
, &key
, value
, BPF_EXIST
) == -1 &&
139 /* key=2 is not there */
142 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
144 /* insert key=3 element */
146 /* check that key=3 is not found */
148 assert(bpf_map_lookup(lru_map_fd
, &key
, value
) == -1 &&
151 /* check that key=1 can be found and mark the ref bit to
152 * stop LRU from removing key=1
155 assert(!bpf_map_lookup(lru_map_fd
, &key
, value
));
156 assert(value
[0] == 1234);
159 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
160 assert(!bpf_map_update(expected_map_fd
, &key
, value
, BPF_NOEXIST
));
162 /* key=2 has been removed from the LRU */
164 assert(bpf_map_lookup(lru_map_fd
, &key
, value
) == -1);
166 assert(map_equal(lru_map_fd
, expected_map_fd
));
168 close(expected_map_fd
);
174 /* Size of the LRU map is 1.5*tgt_free
175 * Insert 1 to tgt_free (+tgt_free keys)
176 * Lookup 1 to tgt_free/2
177 * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
178 * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
180 static void test_lru_sanity1(int map_type
, int map_flags
, unsigned int tgt_free
)
182 unsigned long long key
, end_key
, value
[nr_cpus
];
183 int lru_map_fd
, expected_map_fd
;
184 unsigned int batch_size
;
185 unsigned int map_size
;
187 if (map_flags
& BPF_F_NO_COMMON_LRU
)
188 /* Ther percpu lru list (i.e each cpu has its own LRU
189 * list) does not have a local free list. Hence,
190 * it will only free old nodes till there is no free
191 * from the LRU list. Hence, this test does not apply
192 * to BPF_F_NO_COMMON_LRU
196 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
199 assert(sched_next_online(0, 0) != -1);
201 batch_size
= tgt_free
/ 2;
202 assert(batch_size
* 2 == tgt_free
);
204 map_size
= tgt_free
+ batch_size
;
205 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
206 assert(lru_map_fd
!= -1);
208 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
209 assert(expected_map_fd
!= -1);
213 /* Insert 1 to tgt_free (+tgt_free keys) */
214 end_key
= 1 + tgt_free
;
215 for (key
= 1; key
< end_key
; key
++)
216 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
218 /* Lookup 1 to tgt_free/2 */
219 end_key
= 1 + batch_size
;
220 for (key
= 1; key
< end_key
; key
++) {
221 assert(!bpf_map_lookup(lru_map_fd
, &key
, value
));
222 assert(!bpf_map_update(expected_map_fd
, &key
, value
,
226 /* Insert 1+tgt_free to 2*tgt_free
227 * => 1+tgt_free/2 to LOCALFREE_TARGET will be
231 end_key
= key
+ tgt_free
;
232 for (; key
< end_key
; key
++) {
233 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
234 assert(!bpf_map_update(expected_map_fd
, &key
, value
,
238 assert(map_equal(lru_map_fd
, expected_map_fd
));
240 close(expected_map_fd
);
246 /* Size of the LRU map 1.5 * tgt_free
247 * Insert 1 to tgt_free (+tgt_free keys)
248 * Update 1 to tgt_free/2
249 * => The original 1 to tgt_free/2 will be removed due to
250 * the LRU shrink process
251 * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
252 * Insert 1+tgt_free to tgt_free*3/2
253 * Insert 1+tgt_free*3/2 to tgt_free*5/2
254 * => Key 1+tgt_free to tgt_free*3/2
255 * will be removed from LRU because it has never
256 * been lookup and ref bit is not set
258 static void test_lru_sanity2(int map_type
, int map_flags
, unsigned int tgt_free
)
260 unsigned long long key
, value
[nr_cpus
];
261 unsigned long long end_key
;
262 int lru_map_fd
, expected_map_fd
;
263 unsigned int batch_size
;
264 unsigned int map_size
;
266 if (map_flags
& BPF_F_NO_COMMON_LRU
)
267 /* Ther percpu lru list (i.e each cpu has its own LRU
268 * list) does not have a local free list. Hence,
269 * it will only free old nodes till there is no free
270 * from the LRU list. Hence, this test does not apply
271 * to BPF_F_NO_COMMON_LRU
275 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
278 assert(sched_next_online(0, 0) != -1);
280 batch_size
= tgt_free
/ 2;
281 assert(batch_size
* 2 == tgt_free
);
283 map_size
= tgt_free
+ batch_size
;
284 if (map_flags
& BPF_F_NO_COMMON_LRU
)
285 lru_map_fd
= create_map(map_type
, map_flags
,
288 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
289 assert(lru_map_fd
!= -1);
291 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
292 assert(expected_map_fd
!= -1);
296 /* Insert 1 to tgt_free (+tgt_free keys) */
297 end_key
= 1 + tgt_free
;
298 for (key
= 1; key
< end_key
; key
++)
299 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
301 /* Any bpf_map_update will require to acquire a new node
304 * The local list is running out of free nodes.
305 * It gets from the global LRU list which tries to
306 * shrink the inactive list to get tgt_free
307 * number of free nodes.
309 * Hence, the oldest key 1 to tgt_free/2
310 * are removed from the LRU list.
313 if (map_type
== BPF_MAP_TYPE_LRU_PERCPU_HASH
) {
314 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
315 assert(!bpf_map_delete(lru_map_fd
, &key
));
317 assert(bpf_map_update(lru_map_fd
, &key
, value
, BPF_EXIST
));
320 /* Re-insert 1 to tgt_free/2 again and do a lookup
323 end_key
= 1 + batch_size
;
325 for (key
= 1; key
< end_key
; key
++) {
326 assert(bpf_map_lookup(lru_map_fd
, &key
, value
));
327 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
328 assert(!bpf_map_lookup(lru_map_fd
, &key
, value
));
329 assert(value
[0] == 4321);
330 assert(!bpf_map_update(expected_map_fd
, &key
, value
,
336 /* Insert 1+tgt_free to tgt_free*3/2 */
337 end_key
= 1 + tgt_free
+ batch_size
;
338 for (key
= 1 + tgt_free
; key
< end_key
; key
++)
339 /* These newly added but not referenced keys will be
340 * gone during the next LRU shrink.
342 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
344 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
345 end_key
= key
+ tgt_free
;
346 for (; key
< end_key
; key
++) {
347 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
348 assert(!bpf_map_update(expected_map_fd
, &key
, value
,
352 assert(map_equal(lru_map_fd
, expected_map_fd
));
354 close(expected_map_fd
);
360 /* Size of the LRU map is 2*tgt_free
361 * It is to test the active/inactive list rotation
362 * Insert 1 to 2*tgt_free (+2*tgt_free keys)
363 * Lookup key 1 to tgt_free*3/2
364 * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
365 * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
367 static void test_lru_sanity3(int map_type
, int map_flags
, unsigned int tgt_free
)
369 unsigned long long key
, end_key
, value
[nr_cpus
];
370 int lru_map_fd
, expected_map_fd
;
371 unsigned int batch_size
;
372 unsigned int map_size
;
374 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
377 assert(sched_next_online(0, 0) != -1);
379 batch_size
= tgt_free
/ 2;
380 assert(batch_size
* 2 == tgt_free
);
382 map_size
= tgt_free
* 2;
383 if (map_flags
& BPF_F_NO_COMMON_LRU
)
384 lru_map_fd
= create_map(map_type
, map_flags
,
387 lru_map_fd
= create_map(map_type
, map_flags
, map_size
);
388 assert(lru_map_fd
!= -1);
390 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0, map_size
);
391 assert(expected_map_fd
!= -1);
395 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
396 end_key
= 1 + (2 * tgt_free
);
397 for (key
= 1; key
< end_key
; key
++)
398 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
400 /* Lookup key 1 to tgt_free*3/2 */
401 end_key
= tgt_free
+ batch_size
;
402 for (key
= 1; key
< end_key
; key
++) {
403 assert(!bpf_map_lookup(lru_map_fd
, &key
, value
));
404 assert(!bpf_map_update(expected_map_fd
, &key
, value
,
408 /* Add 1+2*tgt_free to tgt_free*5/2
411 key
= 2 * tgt_free
+ 1;
412 end_key
= key
+ batch_size
;
413 for (; key
< end_key
; key
++) {
414 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
415 assert(!bpf_map_update(expected_map_fd
, &key
, value
,
419 assert(map_equal(lru_map_fd
, expected_map_fd
));
421 close(expected_map_fd
);
428 static void test_lru_sanity4(int map_type
, int map_flags
, unsigned int tgt_free
)
430 int lru_map_fd
, expected_map_fd
;
431 unsigned long long key
, value
[nr_cpus
];
432 unsigned long long end_key
;
434 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
437 assert(sched_next_online(0, 0) != -1);
439 if (map_flags
& BPF_F_NO_COMMON_LRU
)
440 lru_map_fd
= create_map(map_type
, map_flags
,
441 3 * tgt_free
* nr_cpus
);
443 lru_map_fd
= create_map(map_type
, map_flags
, 3 * tgt_free
);
444 assert(lru_map_fd
!= -1);
446 expected_map_fd
= create_map(BPF_MAP_TYPE_HASH
, 0,
448 assert(expected_map_fd
!= -1);
452 for (key
= 1; key
<= 2 * tgt_free
; key
++)
453 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
456 assert(bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
458 for (key
= 1; key
<= tgt_free
; key
++) {
459 assert(!bpf_map_lookup(lru_map_fd
, &key
, value
));
460 assert(!bpf_map_update(expected_map_fd
, &key
, value
,
464 for (; key
<= 2 * tgt_free
; key
++) {
465 assert(!bpf_map_delete(lru_map_fd
, &key
));
466 assert(bpf_map_delete(lru_map_fd
, &key
));
469 end_key
= key
+ 2 * tgt_free
;
470 for (; key
< end_key
; key
++) {
471 assert(!bpf_map_update(lru_map_fd
, &key
, value
, BPF_NOEXIST
));
472 assert(!bpf_map_update(expected_map_fd
, &key
, value
,
476 assert(map_equal(lru_map_fd
, expected_map_fd
));
478 close(expected_map_fd
);
484 static void do_test_lru_sanity5(unsigned long long last_key
, int map_fd
)
486 unsigned long long key
, value
[nr_cpus
];
488 /* Ensure the last key inserted by previous CPU can be found */
489 assert(!bpf_map_lookup(map_fd
, &last_key
, value
));
494 assert(!bpf_map_update(map_fd
, &key
, value
, BPF_NOEXIST
));
495 assert(!bpf_map_lookup(map_fd
, &key
, value
));
497 /* Cannot find the last key because it was removed by LRU */
498 assert(bpf_map_lookup(map_fd
, &last_key
, value
));
501 /* Test map with only one element */
502 static void test_lru_sanity5(int map_type
, int map_flags
)
504 unsigned long long key
, value
[nr_cpus
];
505 int next_sched_cpu
= 0;
509 if (map_flags
& BPF_F_NO_COMMON_LRU
)
512 printf("%s (map_type:%d map_flags:0x%X): ", __func__
, map_type
,
515 map_fd
= create_map(map_type
, map_flags
, 1);
516 assert(map_fd
!= -1);
520 assert(!bpf_map_update(map_fd
, &key
, value
, BPF_NOEXIST
));
522 for (i
= 0; i
< nr_cpus
; i
++) {
527 next_sched_cpu
= sched_next_online(0, next_sched_cpu
);
528 if (next_sched_cpu
!= -1)
529 do_test_lru_sanity5(key
, map_fd
);
531 } else if (pid
== -1) {
532 printf("couldn't spawn #%d process\n", i
);
537 /* It is mostly redundant and just allow the parent
538 * process to update next_shced_cpu for the next child
541 next_sched_cpu
= sched_next_online(pid
, next_sched_cpu
);
543 assert(waitpid(pid
, &status
, 0) == pid
);
554 int main(int argc
, char **argv
)
556 struct rlimit r
= {RLIM_INFINITY
, RLIM_INFINITY
};
557 int map_types
[] = {BPF_MAP_TYPE_LRU_HASH
,
558 BPF_MAP_TYPE_LRU_PERCPU_HASH
};
559 int map_flags
[] = {0, BPF_F_NO_COMMON_LRU
};
562 setbuf(stdout
, NULL
);
564 assert(!setrlimit(RLIMIT_MEMLOCK
, &r
));
566 nr_cpus
= bpf_num_possible_cpus();
567 assert(nr_cpus
!= -1);
568 printf("nr_cpus:%d\n\n", nr_cpus
);
570 for (f
= 0; f
< sizeof(map_flags
) / sizeof(*map_flags
); f
++) {
571 unsigned int tgt_free
= (map_flags
[f
] & BPF_F_NO_COMMON_LRU
) ?
572 PERCPU_FREE_TARGET
: LOCAL_FREE_TARGET
;
574 for (t
= 0; t
< sizeof(map_types
) / sizeof(*map_types
); t
++) {
575 test_lru_sanity0(map_types
[t
], map_flags
[f
]);
576 test_lru_sanity1(map_types
[t
], map_flags
[f
], tgt_free
);
577 test_lru_sanity2(map_types
[t
], map_flags
[f
], tgt_free
);
578 test_lru_sanity3(map_types
[t
], map_flags
[f
], tgt_free
);
579 test_lru_sanity4(map_types
[t
], map_flags
[f
], tgt_free
);
580 test_lru_sanity5(map_types
[t
], map_flags
[f
]);