2 * Lightweight Autonomic Network Architecture
4 * Original userspace code from D. J. Bernstein. (http://cr.yp.to/critbit.html)
5 * Added critbit_get method hack by instead of copying strings into the nodes
6 * (original version), we now hold the reference to it and fetch the container
7 * structure on lookups. By doing this, we only need to guarantee, that the
8 * string is power of two boundary aligned. Added RCU and kmem_cache aligned
9 * node allocation support.
11 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
12 * Swiss federal institute of technology (ETH Zurich)
17 * Compared to a hash table, a crit-bit tree has comparable speed and two big
18 * advantages. The first advantage is that a crit-bit tree supports more fast
19 * operations: finding the smallest string, for example. The second advantage
20 * is that a crit-bit tree guarantees good performance: it doesn't have any
21 * tricky slowdowns for unusual (or malicious) data.
23 * Crit-bit trees are faster than comparison-based structures such as AVL trees
24 * and B-trees. They're also simpler, especially for variable-length strings.
26 * Crit-bit trees have the disadvantage of not (yet!) being widely appreciated.
27 * Very few textbooks explain them, and very few libraries implement them.
28 * (D. J. Bernstein, http://cr.yp.to/critbit.html)
31 #include <linux/kernel.h>
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/spinlock.h>
35 #include <linux/slab.h>
36 #include <linux/cache.h>
37 #include <linux/rcupdate.h>
38 #include <linux/atomic.h>
40 #include "xt_critbit.h"
42 typedef long intptr_t;
49 } ____cacheline_aligned
;
51 struct critbit_node_cache
{
52 struct kmem_cache
*cache
;
56 static struct critbit_node_cache critbit_cache
= { 0 };
58 static void critbit_ctor(void *obj
)
60 struct critbit_node
*node
= obj
;
61 node
->child
[0] = node
->child
[1] = NULL
;
64 static inline struct critbit_node
*critbit_alloc_node_aligned(gfp_t flags
)
66 struct critbit_node
*cn
;
68 cn
= kmem_cache_alloc(critbit_cache
.cache
, flags
);
70 __module_get(THIS_MODULE
);
72 cn
= kmalloc(sizeof(*cn
), flags
);
75 __module_get(THIS_MODULE
);
81 static inline void critbit_free_node(struct critbit_node
*p
)
84 kmem_cache_free(critbit_cache
.cache
, p
);
88 module_put(THIS_MODULE
);
91 int __critbit_contains(struct critbit_tree
*tree
, const char *elem
)
93 const u8
*ubytes
= (void *) elem
;
94 const size_t ulen
= strlen(elem
);
96 struct critbit_node
*q
;
99 if (unlikely(!rcu_read_lock_held())) {
100 printk(KERN_ERR
"No rcu_read_lock held!\n");
103 p
= rcu_dereference_raw(tree
->root
);
106 while (1 & (intptr_t) p
) {
108 q
= (void *) (p
- 1);
111 direction
= (1 + (q
->otherbits
| c
)) >> 8;
112 p
= rcu_dereference_raw(q
->child
[direction
]);
115 return (0 == strcmp(elem
, (char *) p
));
117 EXPORT_SYMBOL(__critbit_contains
);
119 int critbit_contains(struct critbit_tree
*tree
, const char *elem
)
123 ret
= __critbit_contains(tree
, elem
);
127 EXPORT_SYMBOL(critbit_contains
);
129 char *__critbit_get(struct critbit_tree
*tree
, const char *elem
)
131 const u8
*ubytes
= (void *) elem
;
132 const size_t ulen
= strlen(elem
);
134 struct critbit_node
*q
;
137 if (unlikely(!rcu_read_lock_held())) {
138 printk(KERN_ERR
"No rcu_read_lock held!\n");
141 p
= rcu_dereference_raw(tree
->root
);
144 while (1 & (intptr_t) p
) {
146 q
= (void *) (p
- 1);
149 direction
= (1 + (q
->otherbits
| c
)) >> 8;
150 p
= rcu_dereference_raw(q
->child
[direction
]);
153 return (0 == strcmp(elem
, (char *) p
)) ? (char *) p
: NULL
;
155 EXPORT_SYMBOL(__critbit_get
);
157 char *critbit_get(struct critbit_tree
*tree
, const char *elem
)
161 ret
= __critbit_get(tree
, elem
);
165 EXPORT_SYMBOL(critbit_get
);
167 int __critbit_insert(struct critbit_tree
*tree
, char *elem
)
169 const u8
*const ubytes
= (void *) elem
;
170 const size_t ulen
= strlen(elem
);
171 u8 c
, *p
= rcu_dereference_raw(tree
->root
);
172 u32 newbyte
, newotherbits
;
173 struct critbit_node
*q
, *newnode
;
174 int direction
, newdirection
;
177 if (unlikely(!IS_ALIGNED((unsigned long) elem
, SMP_CACHE_BYTES
))) {
178 printk(KERN_ERR
"[lana] Your string is not power "
179 "of two aligned!\n");
183 rcu_assign_pointer(tree
->root
, elem
);
187 while (1 & (intptr_t) p
) {
189 q
= (void *) (p
- 1);
192 direction
= (1 + (q
->otherbits
| c
)) >> 8;
193 p
= rcu_dereference_raw(q
->child
[direction
]);
196 for (newbyte
= 0; newbyte
< ulen
; ++newbyte
) {
197 if (p
[newbyte
] != ubytes
[newbyte
]) {
198 newotherbits
= p
[newbyte
] ^ ubytes
[newbyte
];
199 goto different_byte_found
;
203 if (p
[newbyte
] != 0) {
204 newotherbits
= p
[newbyte
];
205 goto different_byte_found
;
210 different_byte_found
:
211 while (newotherbits
& (newotherbits
- 1))
212 newotherbits
&= newotherbits
- 1;
215 newdirection
= (1 + (newotherbits
| c
)) >> 8;
216 newnode
= critbit_alloc_node_aligned(GFP_ATOMIC
| __GFP_ZERO
);
219 newnode
->byte
= newbyte
;
220 newnode
->otherbits
= newotherbits
;
221 newnode
->child
[1 - newdirection
] = elem
;
223 for (wherep
= &tree
->root
;;) {
225 if (!(1 & (intptr_t) p
))
227 q
= (void *) (p
- 1);
228 if (q
->byte
> newbyte
)
230 if (q
->byte
== newbyte
&& q
->otherbits
> newotherbits
)
235 direction
= (1 + (q
->otherbits
| c
)) >> 8;
236 wherep
= q
->child
+ direction
;
239 newnode
->child
[newdirection
] = *wherep
;
240 rcu_assign_pointer(*wherep
, (void *) (1 + (char *) newnode
));
243 EXPORT_SYMBOL(__critbit_insert
);
245 int critbit_insert(struct critbit_tree
*tree
, char *elem
)
249 spin_lock_irqsave(&tree
->wr_lock
, flags
);
250 ret
= __critbit_insert(tree
, elem
);
251 spin_unlock_irqrestore(&tree
->wr_lock
, flags
);
254 EXPORT_SYMBOL(critbit_insert
);
256 static void critbit_do_free_rcu(struct rcu_head
*rp
)
258 struct critbit_node
*p
= container_of(rp
, struct critbit_node
, rcu
);
259 critbit_free_node(p
);
262 int __critbit_delete(struct critbit_tree
*tree
, const char *elem
)
264 const u8
*ubytes
= (void *) elem
;
265 const size_t ulen
= strlen(elem
);
266 u8 c
, *p
= rcu_dereference_raw(tree
->root
);
267 void **wherep
= &tree
->root
;
268 void **whereq
= NULL
;
269 struct critbit_node
*q
= NULL
;
274 while (1 & (intptr_t) p
) {
276 q
= (void *) (p
- 1);
280 direction
= (1 + (q
->otherbits
| c
)) >> 8;
281 wherep
= q
->child
+ direction
;
285 if (0 != strcmp(elem
, (char *) p
))
287 /* Here, we could decrement a refcount to the elem. */
293 rcu_assign_pointer(*whereq
, q
->child
[1 - direction
]);
294 call_rcu(&q
->rcu
, critbit_do_free_rcu
);
297 EXPORT_SYMBOL(__critbit_delete
);
299 int critbit_delete(struct critbit_tree
*tree
, const char *elem
)
303 spin_lock_irqsave(&tree
->wr_lock
, flags
);
304 ret
= __critbit_delete(tree
, elem
);
305 spin_unlock_irqrestore(&tree
->wr_lock
, flags
);
308 EXPORT_SYMBOL(critbit_delete
);
310 static int critbit_node_cache_init(void)
312 atomic_set(&critbit_cache
.refcnt
, 1);
313 critbit_cache
.cache
= kmem_cache_create("critbit",
314 sizeof(struct critbit_node
),
315 0, SLAB_HWCACHE_ALIGN
|
317 SLAB_RECLAIM_ACCOUNT
,
319 if (!critbit_cache
.cache
)
324 static void critbit_node_cache_destroy(void)
327 kmem_cache_destroy(critbit_cache
.cache
);
330 void get_critbit_cache(void)
332 if (unlikely(!atomic_read(&critbit_cache
.refcnt
))) {
333 if (critbit_node_cache_init())
334 panic("No mem left for critbit cache!\n");
336 atomic_inc(&critbit_cache
.refcnt
);
338 EXPORT_SYMBOL(get_critbit_cache
);
340 void put_critbit_cache(void)
342 if (likely(!atomic_dec_and_test(&critbit_cache
.refcnt
)))
344 critbit_node_cache_destroy();
346 EXPORT_SYMBOL(put_critbit_cache
);