1 // SPDX-License-Identifier: GPL-2.0-only
3 * count the number of connections matching an arbitrary key.
5 * (C) 2017 Red Hat GmbH
6 * Author: Florian Westphal <fw@strlen.de>
8 * split from xt_connlimit.c:
9 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
10 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
11 * only ignore TIME_WAIT or gone connections
12 * (C) CC Computer Consultants GmbH, 2007
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_count.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
35 #define CONNCOUNT_SLOTS 256U
37 #define CONNCOUNT_GC_MAX_NODES 8
40 /* we will save the tuples of all connections we care about */
41 struct nf_conncount_tuple
{
42 struct list_head node
;
43 struct nf_conntrack_tuple tuple
;
44 struct nf_conntrack_zone zone
;
49 struct nf_conncount_rb
{
51 struct nf_conncount_list list
;
53 struct rcu_head rcu_head
;
56 static spinlock_t nf_conncount_locks
[CONNCOUNT_SLOTS
] __cacheline_aligned_in_smp
;
58 struct nf_conncount_data
{
60 struct rb_root root
[CONNCOUNT_SLOTS
];
62 struct work_struct gc_work
;
63 unsigned long pending_trees
[BITS_TO_LONGS(CONNCOUNT_SLOTS
)];
67 static u_int32_t conncount_rnd __read_mostly
;
68 static struct kmem_cache
*conncount_rb_cachep __read_mostly
;
69 static struct kmem_cache
*conncount_conn_cachep __read_mostly
;
71 static inline bool already_closed(const struct nf_conn
*conn
)
73 if (nf_ct_protonum(conn
) == IPPROTO_TCP
)
74 return conn
->proto
.tcp
.state
== TCP_CONNTRACK_TIME_WAIT
||
75 conn
->proto
.tcp
.state
== TCP_CONNTRACK_CLOSE
;
80 static int key_diff(const u32
*a
, const u32
*b
, unsigned int klen
)
82 return memcmp(a
, b
, klen
* sizeof(u32
));
85 static void conn_free(struct nf_conncount_list
*list
,
86 struct nf_conncount_tuple
*conn
)
88 lockdep_assert_held(&list
->list_lock
);
91 list_del(&conn
->node
);
93 kmem_cache_free(conncount_conn_cachep
, conn
);
96 static const struct nf_conntrack_tuple_hash
*
97 find_or_evict(struct net
*net
, struct nf_conncount_list
*list
,
98 struct nf_conncount_tuple
*conn
)
100 const struct nf_conntrack_tuple_hash
*found
;
102 int cpu
= raw_smp_processor_id();
105 found
= nf_conntrack_find_get(net
, &conn
->zone
, &conn
->tuple
);
111 /* conn might have been added just before by another cpu and
112 * might still be unconfirmed. In this case, nf_conntrack_find()
113 * returns no result. Thus only evict if this cpu added the
114 * stale entry or if the entry is older than two jiffies.
117 if (conn
->cpu
== cpu
|| age
>= 2) {
118 conn_free(list
, conn
);
119 return ERR_PTR(-ENOENT
);
122 return ERR_PTR(-EAGAIN
);
125 static int __nf_conncount_add(struct net
*net
,
126 struct nf_conncount_list
*list
,
127 const struct nf_conntrack_tuple
*tuple
,
128 const struct nf_conntrack_zone
*zone
)
130 const struct nf_conntrack_tuple_hash
*found
;
131 struct nf_conncount_tuple
*conn
, *conn_n
;
132 struct nf_conn
*found_ct
;
133 unsigned int collect
= 0;
135 /* check the saved connections */
136 list_for_each_entry_safe(conn
, conn_n
, &list
->head
, node
) {
137 if (collect
> CONNCOUNT_GC_MAX_NODES
)
140 found
= find_or_evict(net
, list
, conn
);
142 /* Not found, but might be about to be confirmed */
143 if (PTR_ERR(found
) == -EAGAIN
) {
144 if (nf_ct_tuple_equal(&conn
->tuple
, tuple
) &&
145 nf_ct_zone_id(&conn
->zone
, conn
->zone
.dir
) ==
146 nf_ct_zone_id(zone
, zone
->dir
))
147 return 0; /* already exists */
154 found_ct
= nf_ct_tuplehash_to_ctrack(found
);
156 if (nf_ct_tuple_equal(&conn
->tuple
, tuple
) &&
157 nf_ct_zone_equal(found_ct
, zone
, zone
->dir
)) {
159 * We should not see tuples twice unless someone hooks
160 * this into a table without "-p tcp --syn".
162 * Attempt to avoid a re-add in this case.
166 } else if (already_closed(found_ct
)) {
168 * we do not care about connections which are
169 * closed already -> ditch it
172 conn_free(list
, conn
);
180 if (WARN_ON_ONCE(list
->count
> INT_MAX
))
183 conn
= kmem_cache_alloc(conncount_conn_cachep
, GFP_ATOMIC
);
187 conn
->tuple
= *tuple
;
189 conn
->cpu
= raw_smp_processor_id();
190 conn
->jiffies32
= (u32
)jiffies
;
191 list_add_tail(&conn
->node
, &list
->head
);
196 int nf_conncount_add(struct net
*net
,
197 struct nf_conncount_list
*list
,
198 const struct nf_conntrack_tuple
*tuple
,
199 const struct nf_conntrack_zone
*zone
)
203 /* check the saved connections */
204 spin_lock_bh(&list
->list_lock
);
205 ret
= __nf_conncount_add(net
, list
, tuple
, zone
);
206 spin_unlock_bh(&list
->list_lock
);
210 EXPORT_SYMBOL_GPL(nf_conncount_add
);
212 void nf_conncount_list_init(struct nf_conncount_list
*list
)
214 spin_lock_init(&list
->list_lock
);
215 INIT_LIST_HEAD(&list
->head
);
218 EXPORT_SYMBOL_GPL(nf_conncount_list_init
);
220 /* Return true if the list is empty. Must be called with BH disabled. */
221 bool nf_conncount_gc_list(struct net
*net
,
222 struct nf_conncount_list
*list
)
224 const struct nf_conntrack_tuple_hash
*found
;
225 struct nf_conncount_tuple
*conn
, *conn_n
;
226 struct nf_conn
*found_ct
;
227 unsigned int collected
= 0;
230 /* don't bother if other cpu is already doing GC */
231 if (!spin_trylock(&list
->list_lock
))
234 list_for_each_entry_safe(conn
, conn_n
, &list
->head
, node
) {
235 found
= find_or_evict(net
, list
, conn
);
237 if (PTR_ERR(found
) == -ENOENT
)
242 found_ct
= nf_ct_tuplehash_to_ctrack(found
);
243 if (already_closed(found_ct
)) {
245 * we do not care about connections which are
246 * closed already -> ditch it
249 conn_free(list
, conn
);
255 if (collected
> CONNCOUNT_GC_MAX_NODES
)
261 spin_unlock(&list
->list_lock
);
265 EXPORT_SYMBOL_GPL(nf_conncount_gc_list
);
267 static void __tree_nodes_free(struct rcu_head
*h
)
269 struct nf_conncount_rb
*rbconn
;
271 rbconn
= container_of(h
, struct nf_conncount_rb
, rcu_head
);
272 kmem_cache_free(conncount_rb_cachep
, rbconn
);
275 /* caller must hold tree nf_conncount_locks[] lock */
276 static void tree_nodes_free(struct rb_root
*root
,
277 struct nf_conncount_rb
*gc_nodes
[],
278 unsigned int gc_count
)
280 struct nf_conncount_rb
*rbconn
;
283 rbconn
= gc_nodes
[--gc_count
];
284 spin_lock(&rbconn
->list
.list_lock
);
285 if (!rbconn
->list
.count
) {
286 rb_erase(&rbconn
->node
, root
);
287 call_rcu(&rbconn
->rcu_head
, __tree_nodes_free
);
289 spin_unlock(&rbconn
->list
.list_lock
);
293 static void schedule_gc_worker(struct nf_conncount_data
*data
, int tree
)
295 set_bit(tree
, data
->pending_trees
);
296 schedule_work(&data
->gc_work
);
300 insert_tree(struct net
*net
,
301 struct nf_conncount_data
*data
,
302 struct rb_root
*root
,
305 const struct nf_conntrack_tuple
*tuple
,
306 const struct nf_conntrack_zone
*zone
)
308 struct nf_conncount_rb
*gc_nodes
[CONNCOUNT_GC_MAX_NODES
];
309 struct rb_node
**rbnode
, *parent
;
310 struct nf_conncount_rb
*rbconn
;
311 struct nf_conncount_tuple
*conn
;
312 unsigned int count
= 0, gc_count
= 0;
313 u8 keylen
= data
->keylen
;
316 spin_lock_bh(&nf_conncount_locks
[hash
]);
319 rbnode
= &(root
->rb_node
);
322 rbconn
= rb_entry(*rbnode
, struct nf_conncount_rb
, node
);
325 diff
= key_diff(key
, rbconn
->key
, keylen
);
327 rbnode
= &((*rbnode
)->rb_left
);
328 } else if (diff
> 0) {
329 rbnode
= &((*rbnode
)->rb_right
);
333 ret
= nf_conncount_add(net
, &rbconn
->list
, tuple
, zone
);
335 count
= 0; /* hotdrop */
337 count
= rbconn
->list
.count
;
338 tree_nodes_free(root
, gc_nodes
, gc_count
);
342 if (gc_count
>= ARRAY_SIZE(gc_nodes
))
345 if (do_gc
&& nf_conncount_gc_list(net
, &rbconn
->list
))
346 gc_nodes
[gc_count
++] = rbconn
;
350 tree_nodes_free(root
, gc_nodes
, gc_count
);
351 schedule_gc_worker(data
, hash
);
357 /* expected case: match, insert new node */
358 rbconn
= kmem_cache_alloc(conncount_rb_cachep
, GFP_ATOMIC
);
362 conn
= kmem_cache_alloc(conncount_conn_cachep
, GFP_ATOMIC
);
364 kmem_cache_free(conncount_rb_cachep
, rbconn
);
368 conn
->tuple
= *tuple
;
370 memcpy(rbconn
->key
, key
, sizeof(u32
) * keylen
);
372 nf_conncount_list_init(&rbconn
->list
);
373 list_add(&conn
->node
, &rbconn
->list
.head
);
375 rbconn
->list
.count
= count
;
377 rb_link_node_rcu(&rbconn
->node
, parent
, rbnode
);
378 rb_insert_color(&rbconn
->node
, root
);
380 spin_unlock_bh(&nf_conncount_locks
[hash
]);
385 count_tree(struct net
*net
,
386 struct nf_conncount_data
*data
,
388 const struct nf_conntrack_tuple
*tuple
,
389 const struct nf_conntrack_zone
*zone
)
391 struct rb_root
*root
;
392 struct rb_node
*parent
;
393 struct nf_conncount_rb
*rbconn
;
395 u8 keylen
= data
->keylen
;
397 hash
= jhash2(key
, data
->keylen
, conncount_rnd
) % CONNCOUNT_SLOTS
;
398 root
= &data
->root
[hash
];
400 parent
= rcu_dereference_raw(root
->rb_node
);
404 rbconn
= rb_entry(parent
, struct nf_conncount_rb
, node
);
406 diff
= key_diff(key
, rbconn
->key
, keylen
);
408 parent
= rcu_dereference_raw(parent
->rb_left
);
409 } else if (diff
> 0) {
410 parent
= rcu_dereference_raw(parent
->rb_right
);
415 nf_conncount_gc_list(net
, &rbconn
->list
);
416 return rbconn
->list
.count
;
419 spin_lock_bh(&rbconn
->list
.list_lock
);
420 /* Node might be about to be free'd.
421 * We need to defer to insert_tree() in this case.
423 if (rbconn
->list
.count
== 0) {
424 spin_unlock_bh(&rbconn
->list
.list_lock
);
428 /* same source network -> be counted! */
429 ret
= __nf_conncount_add(net
, &rbconn
->list
, tuple
, zone
);
430 spin_unlock_bh(&rbconn
->list
.list_lock
);
432 return 0; /* hotdrop */
434 return rbconn
->list
.count
;
441 return insert_tree(net
, data
, root
, hash
, key
, tuple
, zone
);
444 static void tree_gc_worker(struct work_struct
*work
)
446 struct nf_conncount_data
*data
= container_of(work
, struct nf_conncount_data
, gc_work
);
447 struct nf_conncount_rb
*gc_nodes
[CONNCOUNT_GC_MAX_NODES
], *rbconn
;
448 struct rb_root
*root
;
449 struct rb_node
*node
;
450 unsigned int tree
, next_tree
, gc_count
= 0;
452 tree
= data
->gc_tree
% CONNCOUNT_SLOTS
;
453 root
= &data
->root
[tree
];
457 for (node
= rb_first(root
); node
!= NULL
; node
= rb_next(node
)) {
458 rbconn
= rb_entry(node
, struct nf_conncount_rb
, node
);
459 if (nf_conncount_gc_list(data
->net
, &rbconn
->list
))
467 spin_lock_bh(&nf_conncount_locks
[tree
]);
468 if (gc_count
< ARRAY_SIZE(gc_nodes
))
469 goto next
; /* do not bother */
472 node
= rb_first(root
);
473 while (node
!= NULL
) {
474 rbconn
= rb_entry(node
, struct nf_conncount_rb
, node
);
475 node
= rb_next(node
);
477 if (rbconn
->list
.count
> 0)
480 gc_nodes
[gc_count
++] = rbconn
;
481 if (gc_count
>= ARRAY_SIZE(gc_nodes
)) {
482 tree_nodes_free(root
, gc_nodes
, gc_count
);
487 tree_nodes_free(root
, gc_nodes
, gc_count
);
489 clear_bit(tree
, data
->pending_trees
);
491 next_tree
= (tree
+ 1) % CONNCOUNT_SLOTS
;
492 next_tree
= find_next_bit(data
->pending_trees
, CONNCOUNT_SLOTS
, next_tree
);
494 if (next_tree
< CONNCOUNT_SLOTS
) {
495 data
->gc_tree
= next_tree
;
499 spin_unlock_bh(&nf_conncount_locks
[tree
]);
502 /* Count and return number of conntrack entries in 'net' with particular 'key'.
503 * If 'tuple' is not null, insert it into the accounting data structure.
504 * Call with RCU read lock.
506 unsigned int nf_conncount_count(struct net
*net
,
507 struct nf_conncount_data
*data
,
509 const struct nf_conntrack_tuple
*tuple
,
510 const struct nf_conntrack_zone
*zone
)
512 return count_tree(net
, data
, key
, tuple
, zone
);
514 EXPORT_SYMBOL_GPL(nf_conncount_count
);
516 struct nf_conncount_data
*nf_conncount_init(struct net
*net
, unsigned int family
,
519 struct nf_conncount_data
*data
;
522 if (keylen
% sizeof(u32
) ||
523 keylen
/ sizeof(u32
) > MAX_KEYLEN
||
525 return ERR_PTR(-EINVAL
);
527 net_get_random_once(&conncount_rnd
, sizeof(conncount_rnd
));
529 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
531 return ERR_PTR(-ENOMEM
);
533 ret
= nf_ct_netns_get(net
, family
);
539 for (i
= 0; i
< ARRAY_SIZE(data
->root
); ++i
)
540 data
->root
[i
] = RB_ROOT
;
542 data
->keylen
= keylen
/ sizeof(u32
);
544 INIT_WORK(&data
->gc_work
, tree_gc_worker
);
548 EXPORT_SYMBOL_GPL(nf_conncount_init
);
550 void nf_conncount_cache_free(struct nf_conncount_list
*list
)
552 struct nf_conncount_tuple
*conn
, *conn_n
;
554 list_for_each_entry_safe(conn
, conn_n
, &list
->head
, node
)
555 kmem_cache_free(conncount_conn_cachep
, conn
);
557 EXPORT_SYMBOL_GPL(nf_conncount_cache_free
);
559 static void destroy_tree(struct rb_root
*r
)
561 struct nf_conncount_rb
*rbconn
;
562 struct rb_node
*node
;
564 while ((node
= rb_first(r
)) != NULL
) {
565 rbconn
= rb_entry(node
, struct nf_conncount_rb
, node
);
569 nf_conncount_cache_free(&rbconn
->list
);
571 kmem_cache_free(conncount_rb_cachep
, rbconn
);
575 void nf_conncount_destroy(struct net
*net
, unsigned int family
,
576 struct nf_conncount_data
*data
)
580 cancel_work_sync(&data
->gc_work
);
581 nf_ct_netns_put(net
, family
);
583 for (i
= 0; i
< ARRAY_SIZE(data
->root
); ++i
)
584 destroy_tree(&data
->root
[i
]);
588 EXPORT_SYMBOL_GPL(nf_conncount_destroy
);
590 static int __init
nf_conncount_modinit(void)
594 for (i
= 0; i
< CONNCOUNT_SLOTS
; ++i
)
595 spin_lock_init(&nf_conncount_locks
[i
]);
597 conncount_conn_cachep
= kmem_cache_create("nf_conncount_tuple",
598 sizeof(struct nf_conncount_tuple
),
600 if (!conncount_conn_cachep
)
603 conncount_rb_cachep
= kmem_cache_create("nf_conncount_rb",
604 sizeof(struct nf_conncount_rb
),
606 if (!conncount_rb_cachep
) {
607 kmem_cache_destroy(conncount_conn_cachep
);
614 static void __exit
nf_conncount_modexit(void)
616 kmem_cache_destroy(conncount_conn_cachep
);
617 kmem_cache_destroy(conncount_rb_cachep
);
620 module_init(nf_conncount_modinit
);
621 module_exit(nf_conncount_modexit
);
622 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
623 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
624 MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
625 MODULE_LICENSE("GPL");