1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
5 * This contains some basic static unit tests for the allowedips data structure.
6 * It also has two additional modes that are disabled and meant to be used by
7 * folks directly playing with this file. If you define the macro
8 * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in
9 * memory, it will be printed out as KERN_DEBUG in a format that can be passed
10 * to graphviz (the dot command) to visualize it. If you define the macro
11 * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of
12 * randomized tests done against a trivial implementation, which may take
13 * upwards of a half-hour to complete. There's no set of users who should be
14 * enabling these, and the only developers that should go anywhere near these
15 * nobs are the ones who are reading this comment.
20 #include <linux/siphash.h>
22 static __init
void print_node(struct allowedips_node
*node
, u8 bits
)
24 char *fmt_connection
= KERN_DEBUG
"\t\"%p/%d\" -> \"%p/%d\";\n";
25 char *fmt_declaration
= KERN_DEBUG
"\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
26 u8 ip1
[16], ip2
[16], cidr1
, cidr2
;
27 char *style
= "dotted";
33 fmt_connection
= KERN_DEBUG
"\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
34 fmt_declaration
= KERN_DEBUG
"\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
35 } else if (bits
== 128) {
36 fmt_connection
= KERN_DEBUG
"\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
37 fmt_declaration
= KERN_DEBUG
"\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
40 hsiphash_key_t key
= { { 0 } };
42 memcpy(&key
, &node
->peer
, sizeof(node
->peer
));
43 color
= hsiphash_1u32(0xdeadbeef, &key
) % 200 << 16 |
44 hsiphash_1u32(0xbabecafe, &key
) % 200 << 8 |
45 hsiphash_1u32(0xabad1dea, &key
) % 200;
48 wg_allowedips_read_node(node
, ip1
, &cidr1
);
49 printk(fmt_declaration
, ip1
, cidr1
, style
, color
);
51 wg_allowedips_read_node(rcu_dereference_raw(node
->bit
[0]), ip2
, &cidr2
);
52 printk(fmt_connection
, ip1
, cidr1
, ip2
, cidr2
);
55 wg_allowedips_read_node(rcu_dereference_raw(node
->bit
[1]), ip2
, &cidr2
);
56 printk(fmt_connection
, ip1
, cidr1
, ip2
, cidr2
);
59 print_node(rcu_dereference_raw(node
->bit
[0]), bits
);
61 print_node(rcu_dereference_raw(node
->bit
[1]), bits
);
64 static __init
void print_tree(struct allowedips_node __rcu
*top
, u8 bits
)
66 printk(KERN_DEBUG
"digraph trie {\n");
67 print_node(rcu_dereference_raw(top
), bits
);
68 printk(KERN_DEBUG
"}\n");
73 NUM_RAND_ROUTES
= 400,
74 NUM_MUTATED_ROUTES
= 100,
75 NUM_QUERIES
= NUM_RAND_ROUTES
* NUM_MUTATED_ROUTES
* 30
78 struct horrible_allowedips
{
79 struct hlist_head head
;
82 struct horrible_allowedips_node
{
83 struct hlist_node table
;
84 union nf_inet_addr ip
;
85 union nf_inet_addr mask
;
90 static __init
void horrible_allowedips_init(struct horrible_allowedips
*table
)
92 INIT_HLIST_HEAD(&table
->head
);
95 static __init
void horrible_allowedips_free(struct horrible_allowedips
*table
)
97 struct horrible_allowedips_node
*node
;
100 hlist_for_each_entry_safe(node
, h
, &table
->head
, table
) {
101 hlist_del(&node
->table
);
106 static __init
inline union nf_inet_addr
horrible_cidr_to_mask(u8 cidr
)
108 union nf_inet_addr mask
;
110 memset(&mask
, 0, sizeof(mask
));
111 memset(&mask
.all
, 0xff, cidr
/ 8);
113 mask
.all
[cidr
/ 32] = (__force u32
)htonl(
114 (0xFFFFFFFFUL
<< (32 - (cidr
% 32))) & 0xFFFFFFFFUL
);
118 static __init
inline u8
horrible_mask_to_cidr(union nf_inet_addr subnet
)
120 return hweight32(subnet
.all
[0]) + hweight32(subnet
.all
[1]) +
121 hweight32(subnet
.all
[2]) + hweight32(subnet
.all
[3]);
124 static __init
inline void
125 horrible_mask_self(struct horrible_allowedips_node
*node
)
127 if (node
->ip_version
== 4) {
128 node
->ip
.ip
&= node
->mask
.ip
;
129 } else if (node
->ip_version
== 6) {
130 node
->ip
.ip6
[0] &= node
->mask
.ip6
[0];
131 node
->ip
.ip6
[1] &= node
->mask
.ip6
[1];
132 node
->ip
.ip6
[2] &= node
->mask
.ip6
[2];
133 node
->ip
.ip6
[3] &= node
->mask
.ip6
[3];
137 static __init
inline bool
138 horrible_match_v4(const struct horrible_allowedips_node
*node
, struct in_addr
*ip
)
140 return (ip
->s_addr
& node
->mask
.ip
) == node
->ip
.ip
;
143 static __init
inline bool
144 horrible_match_v6(const struct horrible_allowedips_node
*node
, struct in6_addr
*ip
)
146 return (ip
->in6_u
.u6_addr32
[0] & node
->mask
.ip6
[0]) == node
->ip
.ip6
[0] &&
147 (ip
->in6_u
.u6_addr32
[1] & node
->mask
.ip6
[1]) == node
->ip
.ip6
[1] &&
148 (ip
->in6_u
.u6_addr32
[2] & node
->mask
.ip6
[2]) == node
->ip
.ip6
[2] &&
149 (ip
->in6_u
.u6_addr32
[3] & node
->mask
.ip6
[3]) == node
->ip
.ip6
[3];
153 horrible_insert_ordered(struct horrible_allowedips
*table
, struct horrible_allowedips_node
*node
)
155 struct horrible_allowedips_node
*other
= NULL
, *where
= NULL
;
156 u8 my_cidr
= horrible_mask_to_cidr(node
->mask
);
158 hlist_for_each_entry(other
, &table
->head
, table
) {
159 if (other
->ip_version
== node
->ip_version
&&
160 !memcmp(&other
->mask
, &node
->mask
, sizeof(union nf_inet_addr
)) &&
161 !memcmp(&other
->ip
, &node
->ip
, sizeof(union nf_inet_addr
))) {
162 other
->value
= node
->value
;
167 hlist_for_each_entry(other
, &table
->head
, table
) {
169 if (horrible_mask_to_cidr(other
->mask
) <= my_cidr
)
172 if (!other
&& !where
)
173 hlist_add_head(&node
->table
, &table
->head
);
175 hlist_add_behind(&node
->table
, &where
->table
);
177 hlist_add_before(&node
->table
, &where
->table
);
181 horrible_allowedips_insert_v4(struct horrible_allowedips
*table
,
182 struct in_addr
*ip
, u8 cidr
, void *value
)
184 struct horrible_allowedips_node
*node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
189 node
->mask
= horrible_cidr_to_mask(cidr
);
190 node
->ip_version
= 4;
192 horrible_mask_self(node
);
193 horrible_insert_ordered(table
, node
);
198 horrible_allowedips_insert_v6(struct horrible_allowedips
*table
,
199 struct in6_addr
*ip
, u8 cidr
, void *value
)
201 struct horrible_allowedips_node
*node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
206 node
->mask
= horrible_cidr_to_mask(cidr
);
207 node
->ip_version
= 6;
209 horrible_mask_self(node
);
210 horrible_insert_ordered(table
, node
);
215 horrible_allowedips_lookup_v4(struct horrible_allowedips
*table
, struct in_addr
*ip
)
217 struct horrible_allowedips_node
*node
;
219 hlist_for_each_entry(node
, &table
->head
, table
) {
220 if (node
->ip_version
== 4 && horrible_match_v4(node
, ip
))
227 horrible_allowedips_lookup_v6(struct horrible_allowedips
*table
, struct in6_addr
*ip
)
229 struct horrible_allowedips_node
*node
;
231 hlist_for_each_entry(node
, &table
->head
, table
) {
232 if (node
->ip_version
== 6 && horrible_match_v6(node
, ip
))
240 horrible_allowedips_remove_by_value(struct horrible_allowedips
*table
, void *value
)
242 struct horrible_allowedips_node
*node
;
243 struct hlist_node
*h
;
245 hlist_for_each_entry_safe(node
, h
, &table
->head
, table
) {
246 if (node
->value
!= value
)
248 hlist_del(&node
->table
);
254 static __init
bool randomized_test(void)
256 unsigned int i
, j
, k
, mutate_amount
, cidr
;
257 u8 ip
[16], mutate_mask
[16], mutated
[16];
258 struct wg_peer
**peers
, *peer
;
259 struct horrible_allowedips h
;
266 wg_allowedips_init(&t
);
267 horrible_allowedips_init(&h
);
269 peers
= kcalloc(NUM_PEERS
, sizeof(*peers
), GFP_KERNEL
);
270 if (unlikely(!peers
)) {
271 pr_err("allowedips random self-test malloc: FAIL\n");
274 for (i
= 0; i
< NUM_PEERS
; ++i
) {
275 peers
[i
] = kzalloc(sizeof(*peers
[i
]), GFP_KERNEL
);
276 if (unlikely(!peers
[i
])) {
277 pr_err("allowedips random self-test malloc: FAIL\n");
280 kref_init(&peers
[i
]->refcount
);
281 INIT_LIST_HEAD(&peers
[i
]->allowedips_list
);
286 for (i
= 0; i
< NUM_RAND_ROUTES
; ++i
) {
287 get_random_bytes(ip
, 4);
288 cidr
= get_random_u32_inclusive(1, 32);
289 peer
= peers
[get_random_u32_below(NUM_PEERS
)];
290 if (wg_allowedips_insert_v4(&t
, (struct in_addr
*)ip
, cidr
,
292 pr_err("allowedips random self-test malloc: FAIL\n");
295 if (horrible_allowedips_insert_v4(&h
, (struct in_addr
*)ip
,
297 pr_err("allowedips random self-test malloc: FAIL\n");
300 for (j
= 0; j
< NUM_MUTATED_ROUTES
; ++j
) {
301 memcpy(mutated
, ip
, 4);
302 get_random_bytes(mutate_mask
, 4);
303 mutate_amount
= get_random_u32_below(32);
304 for (k
= 0; k
< mutate_amount
/ 8; ++k
)
305 mutate_mask
[k
] = 0xff;
306 mutate_mask
[k
] = 0xff
307 << ((8 - (mutate_amount
% 8)) % 8);
310 for (k
= 0; k
< 4; ++k
)
311 mutated
[k
] = (mutated
[k
] & mutate_mask
[k
]) |
314 cidr
= get_random_u32_inclusive(1, 32);
315 peer
= peers
[get_random_u32_below(NUM_PEERS
)];
316 if (wg_allowedips_insert_v4(&t
,
317 (struct in_addr
*)mutated
,
318 cidr
, peer
, &mutex
) < 0) {
319 pr_err("allowedips random self-test malloc: FAIL\n");
322 if (horrible_allowedips_insert_v4(&h
,
323 (struct in_addr
*)mutated
, cidr
, peer
)) {
324 pr_err("allowedips random self-test malloc: FAIL\n");
330 for (i
= 0; i
< NUM_RAND_ROUTES
; ++i
) {
331 get_random_bytes(ip
, 16);
332 cidr
= get_random_u32_inclusive(1, 128);
333 peer
= peers
[get_random_u32_below(NUM_PEERS
)];
334 if (wg_allowedips_insert_v6(&t
, (struct in6_addr
*)ip
, cidr
,
336 pr_err("allowedips random self-test malloc: FAIL\n");
339 if (horrible_allowedips_insert_v6(&h
, (struct in6_addr
*)ip
,
341 pr_err("allowedips random self-test malloc: FAIL\n");
344 for (j
= 0; j
< NUM_MUTATED_ROUTES
; ++j
) {
345 memcpy(mutated
, ip
, 16);
346 get_random_bytes(mutate_mask
, 16);
347 mutate_amount
= get_random_u32_below(128);
348 for (k
= 0; k
< mutate_amount
/ 8; ++k
)
349 mutate_mask
[k
] = 0xff;
350 mutate_mask
[k
] = 0xff
351 << ((8 - (mutate_amount
% 8)) % 8);
354 for (k
= 0; k
< 4; ++k
)
355 mutated
[k
] = (mutated
[k
] & mutate_mask
[k
]) |
358 cidr
= get_random_u32_inclusive(1, 128);
359 peer
= peers
[get_random_u32_below(NUM_PEERS
)];
360 if (wg_allowedips_insert_v6(&t
,
361 (struct in6_addr
*)mutated
,
362 cidr
, peer
, &mutex
) < 0) {
363 pr_err("allowedips random self-test malloc: FAIL\n");
366 if (horrible_allowedips_insert_v6(
367 &h
, (struct in6_addr
*)mutated
, cidr
,
369 pr_err("allowedips random self-test malloc: FAIL\n");
375 mutex_unlock(&mutex
);
377 if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ
)) {
378 print_tree(t
.root4
, 32);
379 print_tree(t
.root6
, 128);
383 for (i
= 0; i
< NUM_QUERIES
; ++i
) {
384 get_random_bytes(ip
, 4);
385 if (lookup(t
.root4
, 32, ip
) != horrible_allowedips_lookup_v4(&h
, (struct in_addr
*)ip
)) {
386 pr_err("allowedips random v4 self-test: FAIL\n");
389 get_random_bytes(ip
, 16);
390 if (lookup(t
.root6
, 128, ip
) != horrible_allowedips_lookup_v6(&h
, (struct in6_addr
*)ip
)) {
391 pr_err("allowedips random v6 self-test: FAIL\n");
398 wg_allowedips_remove_by_peer(&t
, peers
[j
], &mutex
);
399 mutex_unlock(&mutex
);
400 horrible_allowedips_remove_by_value(&h
, peers
[j
]);
403 if (t
.root4
|| t
.root6
) {
404 pr_err("allowedips random self-test removal: FAIL\n");
413 wg_allowedips_free(&t
, &mutex
);
414 mutex_unlock(&mutex
);
415 horrible_allowedips_free(&h
);
417 for (i
= 0; i
< NUM_PEERS
; ++i
)
424 static __init
inline struct in_addr
*ip4(u8 a
, u8 b
, u8 c
, u8 d
)
426 static struct in_addr ip
;
427 u8
*split
= (u8
*)&ip
;
436 static __init
inline struct in6_addr
*ip6(u32 a
, u32 b
, u32 c
, u32 d
)
438 static struct in6_addr ip
;
439 __be32
*split
= (__be32
*)&ip
;
441 split
[0] = cpu_to_be32(a
);
442 split
[1] = cpu_to_be32(b
);
443 split
[2] = cpu_to_be32(c
);
444 split
[3] = cpu_to_be32(d
);
448 static __init
struct wg_peer
*init_peer(void)
450 struct wg_peer
*peer
= kzalloc(sizeof(*peer
), GFP_KERNEL
);
454 kref_init(&peer
->refcount
);
455 INIT_LIST_HEAD(&peer
->allowedips_list
);
459 #define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \
460 wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
463 #define maybe_fail() do { \
466 pr_info("allowedips self-test %zu: FAIL\n", i); \
471 #define test(version, mem, ipa, ipb, ipc, ipd) do { \
472 bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
473 ip##version(ipa, ipb, ipc, ipd)) == (mem); \
477 #define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \
478 bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
479 ip##version(ipa, ipb, ipc, ipd)) != (mem); \
483 #define test_boolean(cond) do { \
488 bool __init
wg_allowedips_selftest(void)
490 bool found_a
= false, found_b
= false, found_c
= false, found_d
= false,
491 found_e
= false, found_other
= false;
492 struct wg_peer
*a
= init_peer(), *b
= init_peer(), *c
= init_peer(),
493 *d
= init_peer(), *e
= init_peer(), *f
= init_peer(),
494 *g
= init_peer(), *h
= init_peer();
495 struct allowedips_node
*iter_node
;
496 bool success
= false;
500 size_t i
= 0, count
= 0;
505 wg_allowedips_init(&t
);
507 if (!a
|| !b
|| !c
|| !d
|| !e
|| !f
|| !g
|| !h
) {
508 pr_err("allowedips self-test malloc: FAIL\n");
512 insert(4, a
, 192, 168, 4, 0, 24);
513 insert(4, b
, 192, 168, 4, 4, 32);
514 insert(4, c
, 192, 168, 0, 0, 16);
515 insert(4, d
, 192, 95, 5, 64, 27);
516 /* replaces previous entry, and maskself is required */
517 insert(4, c
, 192, 95, 5, 65, 27);
518 insert(6, d
, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
519 insert(6, c
, 0x26075300, 0x60006b00, 0, 0, 64);
520 insert(4, e
, 0, 0, 0, 0, 0);
521 insert(6, e
, 0, 0, 0, 0, 0);
522 /* replaces previous entry */
523 insert(6, f
, 0, 0, 0, 0, 0);
524 insert(6, g
, 0x24046800, 0, 0, 0, 32);
525 /* maskself is required */
526 insert(6, h
, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64);
527 insert(6, a
, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128);
528 insert(6, c
, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
529 insert(6, b
, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
530 insert(4, g
, 64, 15, 112, 0, 20);
531 /* maskself is required */
532 insert(4, h
, 64, 15, 123, 211, 25);
533 insert(4, a
, 10, 0, 0, 0, 25);
534 insert(4, b
, 10, 0, 0, 128, 25);
535 insert(4, a
, 10, 1, 0, 0, 30);
536 insert(4, b
, 10, 1, 0, 4, 30);
537 insert(4, c
, 10, 1, 0, 8, 29);
538 insert(4, d
, 10, 1, 0, 16, 29);
540 if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ
)) {
541 print_tree(t
.root4
, 32);
542 print_tree(t
.root6
, 128);
547 test(4, a
, 192, 168, 4, 20);
548 test(4, a
, 192, 168, 4, 0);
549 test(4, b
, 192, 168, 4, 4);
550 test(4, c
, 192, 168, 200, 182);
551 test(4, c
, 192, 95, 5, 68);
552 test(4, e
, 192, 95, 5, 96);
553 test(6, d
, 0x26075300, 0x60006b00, 0, 0xc05f0543);
554 test(6, c
, 0x26075300, 0x60006b00, 0, 0xc02e01ee);
555 test(6, f
, 0x26075300, 0x60006b01, 0, 0);
556 test(6, g
, 0x24046800, 0x40040806, 0, 0x1006);
557 test(6, g
, 0x24046800, 0x40040806, 0x1234, 0x5678);
558 test(6, f
, 0x240467ff, 0x40040806, 0x1234, 0x5678);
559 test(6, f
, 0x24046801, 0x40040806, 0x1234, 0x5678);
560 test(6, h
, 0x24046800, 0x40040800, 0x1234, 0x5678);
561 test(6, h
, 0x24046800, 0x40040800, 0, 0);
562 test(6, h
, 0x24046800, 0x40040800, 0x10101010, 0x10101010);
563 test(6, a
, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef);
564 test(4, g
, 64, 15, 116, 26);
565 test(4, g
, 64, 15, 127, 3);
566 test(4, g
, 64, 15, 123, 1);
567 test(4, h
, 64, 15, 123, 128);
568 test(4, h
, 64, 15, 123, 129);
569 test(4, a
, 10, 0, 0, 52);
570 test(4, b
, 10, 0, 0, 220);
571 test(4, a
, 10, 1, 0, 2);
572 test(4, b
, 10, 1, 0, 6);
573 test(4, c
, 10, 1, 0, 10);
574 test(4, d
, 10, 1, 0, 20);
576 insert(4, a
, 1, 0, 0, 0, 32);
577 insert(4, a
, 64, 0, 0, 0, 32);
578 insert(4, a
, 128, 0, 0, 0, 32);
579 insert(4, a
, 192, 0, 0, 0, 32);
580 insert(4, a
, 255, 0, 0, 0, 32);
581 wg_allowedips_remove_by_peer(&t
, a
, &mutex
);
582 test_negative(4, a
, 1, 0, 0, 0);
583 test_negative(4, a
, 64, 0, 0, 0);
584 test_negative(4, a
, 128, 0, 0, 0);
585 test_negative(4, a
, 192, 0, 0, 0);
586 test_negative(4, a
, 255, 0, 0, 0);
588 wg_allowedips_free(&t
, &mutex
);
589 wg_allowedips_init(&t
);
590 insert(4, a
, 192, 168, 0, 0, 16);
591 insert(4, a
, 192, 168, 0, 0, 24);
592 wg_allowedips_remove_by_peer(&t
, a
, &mutex
);
593 test_negative(4, a
, 192, 168, 0, 1);
595 /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node
596 * if something goes wrong.
598 for (i
= 0; i
< 64; ++i
) {
599 part
= cpu_to_be64(~0LLU << i
);
600 memset(&ip
, 0xff, 8);
601 memcpy((u8
*)&ip
+ 8, &part
, 8);
602 wg_allowedips_insert_v6(&t
, &ip
, 128, a
, &mutex
);
603 memcpy(&ip
, &part
, 8);
604 memset((u8
*)&ip
+ 8, 0, 8);
605 wg_allowedips_insert_v6(&t
, &ip
, 128, a
, &mutex
);
608 wg_allowedips_insert_v6(&t
, &ip
, 128, a
, &mutex
);
609 wg_allowedips_free(&t
, &mutex
);
611 wg_allowedips_init(&t
);
612 insert(4, a
, 192, 95, 5, 93, 27);
613 insert(6, a
, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
614 insert(4, a
, 10, 1, 0, 20, 29);
615 insert(6, a
, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83);
616 insert(6, a
, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21);
617 list_for_each_entry(iter_node
, &a
->allowedips_list
, peer_list
) {
618 u8 cidr
, ip
[16] __aligned(__alignof(u64
));
619 int family
= wg_allowedips_read_node(iter_node
, ip
, &cidr
);
623 if (cidr
== 27 && family
== AF_INET
&&
624 !memcmp(ip
, ip4(192, 95, 5, 64), sizeof(struct in_addr
)))
626 else if (cidr
== 128 && family
== AF_INET6
&&
627 !memcmp(ip
, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543),
628 sizeof(struct in6_addr
)))
630 else if (cidr
== 29 && family
== AF_INET
&&
631 !memcmp(ip
, ip4(10, 1, 0, 16), sizeof(struct in_addr
)))
633 else if (cidr
== 83 && family
== AF_INET6
&&
634 !memcmp(ip
, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0),
635 sizeof(struct in6_addr
)))
637 else if (cidr
== 21 && family
== AF_INET6
&&
638 !memcmp(ip
, ip6(0x26075000, 0, 0, 0),
639 sizeof(struct in6_addr
)))
644 test_boolean(count
== 5);
645 test_boolean(found_a
);
646 test_boolean(found_b
);
647 test_boolean(found_c
);
648 test_boolean(found_d
);
649 test_boolean(found_e
);
650 test_boolean(!found_other
);
652 if (IS_ENABLED(DEBUG_RANDOM_TRIE
) && success
)
653 success
= randomized_test();
656 pr_info("allowedips self-tests: pass\n");
659 wg_allowedips_free(&t
, &mutex
);
668 mutex_unlock(&mutex
);