drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / net / netfilter / nf_conncount.c
blob4890af4dc263fdd36a463536640208ee27f10768
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * count the number of connections matching an arbitrary key.
5 * (C) 2017 Red Hat GmbH
6 * Author: Florian Westphal <fw@strlen.de>
8 * split from xt_connlimit.c:
9 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
10 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
11 * only ignore TIME_WAIT or gone connections
12 * (C) CC Computer Consultants GmbH, 2007
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_count.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
35 #define CONNCOUNT_SLOTS 256U
37 #define CONNCOUNT_GC_MAX_NODES 8
38 #define MAX_KEYLEN 5
40 /* we will save the tuples of all connections we care about */
41 struct nf_conncount_tuple {
42 struct list_head node;
43 struct nf_conntrack_tuple tuple;
44 struct nf_conntrack_zone zone;
45 int cpu;
46 u32 jiffies32;
49 struct nf_conncount_rb {
50 struct rb_node node;
51 struct nf_conncount_list list;
52 u32 key[MAX_KEYLEN];
53 struct rcu_head rcu_head;
56 static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
58 struct nf_conncount_data {
59 unsigned int keylen;
60 struct rb_root root[CONNCOUNT_SLOTS];
61 struct net *net;
62 struct work_struct gc_work;
63 unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)];
64 unsigned int gc_tree;
67 static u_int32_t conncount_rnd __read_mostly;
68 static struct kmem_cache *conncount_rb_cachep __read_mostly;
69 static struct kmem_cache *conncount_conn_cachep __read_mostly;
71 static inline bool already_closed(const struct nf_conn *conn)
73 if (nf_ct_protonum(conn) == IPPROTO_TCP)
74 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
75 conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
76 else
77 return false;
80 static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
82 return memcmp(a, b, klen * sizeof(u32));
85 static void conn_free(struct nf_conncount_list *list,
86 struct nf_conncount_tuple *conn)
88 lockdep_assert_held(&list->list_lock);
90 list->count--;
91 list_del(&conn->node);
93 kmem_cache_free(conncount_conn_cachep, conn);
96 static const struct nf_conntrack_tuple_hash *
97 find_or_evict(struct net *net, struct nf_conncount_list *list,
98 struct nf_conncount_tuple *conn)
100 const struct nf_conntrack_tuple_hash *found;
101 unsigned long a, b;
102 int cpu = raw_smp_processor_id();
103 u32 age;
105 found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
106 if (found)
107 return found;
108 b = conn->jiffies32;
109 a = (u32)jiffies;
111 /* conn might have been added just before by another cpu and
112 * might still be unconfirmed. In this case, nf_conntrack_find()
113 * returns no result. Thus only evict if this cpu added the
114 * stale entry or if the entry is older than two jiffies.
116 age = a - b;
117 if (conn->cpu == cpu || age >= 2) {
118 conn_free(list, conn);
119 return ERR_PTR(-ENOENT);
122 return ERR_PTR(-EAGAIN);
125 static int __nf_conncount_add(struct net *net,
126 struct nf_conncount_list *list,
127 const struct nf_conntrack_tuple *tuple,
128 const struct nf_conntrack_zone *zone)
130 const struct nf_conntrack_tuple_hash *found;
131 struct nf_conncount_tuple *conn, *conn_n;
132 struct nf_conn *found_ct;
133 unsigned int collect = 0;
135 if (time_is_after_eq_jiffies((unsigned long)list->last_gc))
136 goto add_new_node;
138 /* check the saved connections */
139 list_for_each_entry_safe(conn, conn_n, &list->head, node) {
140 if (collect > CONNCOUNT_GC_MAX_NODES)
141 break;
143 found = find_or_evict(net, list, conn);
144 if (IS_ERR(found)) {
145 /* Not found, but might be about to be confirmed */
146 if (PTR_ERR(found) == -EAGAIN) {
147 if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
148 nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
149 nf_ct_zone_id(zone, zone->dir))
150 return 0; /* already exists */
151 } else {
152 collect++;
154 continue;
157 found_ct = nf_ct_tuplehash_to_ctrack(found);
159 if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
160 nf_ct_zone_equal(found_ct, zone, zone->dir)) {
162 * We should not see tuples twice unless someone hooks
163 * this into a table without "-p tcp --syn".
165 * Attempt to avoid a re-add in this case.
167 nf_ct_put(found_ct);
168 return 0;
169 } else if (already_closed(found_ct)) {
171 * we do not care about connections which are
172 * closed already -> ditch it
174 nf_ct_put(found_ct);
175 conn_free(list, conn);
176 collect++;
177 continue;
180 nf_ct_put(found_ct);
183 add_new_node:
184 if (WARN_ON_ONCE(list->count > INT_MAX))
185 return -EOVERFLOW;
187 conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
188 if (conn == NULL)
189 return -ENOMEM;
191 conn->tuple = *tuple;
192 conn->zone = *zone;
193 conn->cpu = raw_smp_processor_id();
194 conn->jiffies32 = (u32)jiffies;
195 list_add_tail(&conn->node, &list->head);
196 list->count++;
197 list->last_gc = (u32)jiffies;
198 return 0;
201 int nf_conncount_add(struct net *net,
202 struct nf_conncount_list *list,
203 const struct nf_conntrack_tuple *tuple,
204 const struct nf_conntrack_zone *zone)
206 int ret;
208 /* check the saved connections */
209 spin_lock_bh(&list->list_lock);
210 ret = __nf_conncount_add(net, list, tuple, zone);
211 spin_unlock_bh(&list->list_lock);
213 return ret;
215 EXPORT_SYMBOL_GPL(nf_conncount_add);
217 void nf_conncount_list_init(struct nf_conncount_list *list)
219 spin_lock_init(&list->list_lock);
220 INIT_LIST_HEAD(&list->head);
221 list->count = 0;
222 list->last_gc = (u32)jiffies;
224 EXPORT_SYMBOL_GPL(nf_conncount_list_init);
226 /* Return true if the list is empty. Must be called with BH disabled. */
227 bool nf_conncount_gc_list(struct net *net,
228 struct nf_conncount_list *list)
230 const struct nf_conntrack_tuple_hash *found;
231 struct nf_conncount_tuple *conn, *conn_n;
232 struct nf_conn *found_ct;
233 unsigned int collected = 0;
234 bool ret = false;
236 /* don't bother if we just did GC */
237 if (time_is_after_eq_jiffies((unsigned long)READ_ONCE(list->last_gc)))
238 return false;
240 /* don't bother if other cpu is already doing GC */
241 if (!spin_trylock(&list->list_lock))
242 return false;
244 list_for_each_entry_safe(conn, conn_n, &list->head, node) {
245 found = find_or_evict(net, list, conn);
246 if (IS_ERR(found)) {
247 if (PTR_ERR(found) == -ENOENT)
248 collected++;
249 continue;
252 found_ct = nf_ct_tuplehash_to_ctrack(found);
253 if (already_closed(found_ct)) {
255 * we do not care about connections which are
256 * closed already -> ditch it
258 nf_ct_put(found_ct);
259 conn_free(list, conn);
260 collected++;
261 continue;
264 nf_ct_put(found_ct);
265 if (collected > CONNCOUNT_GC_MAX_NODES)
266 break;
269 if (!list->count)
270 ret = true;
271 list->last_gc = (u32)jiffies;
272 spin_unlock(&list->list_lock);
274 return ret;
276 EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
278 static void __tree_nodes_free(struct rcu_head *h)
280 struct nf_conncount_rb *rbconn;
282 rbconn = container_of(h, struct nf_conncount_rb, rcu_head);
283 kmem_cache_free(conncount_rb_cachep, rbconn);
286 /* caller must hold tree nf_conncount_locks[] lock */
287 static void tree_nodes_free(struct rb_root *root,
288 struct nf_conncount_rb *gc_nodes[],
289 unsigned int gc_count)
291 struct nf_conncount_rb *rbconn;
293 while (gc_count) {
294 rbconn = gc_nodes[--gc_count];
295 spin_lock(&rbconn->list.list_lock);
296 if (!rbconn->list.count) {
297 rb_erase(&rbconn->node, root);
298 call_rcu(&rbconn->rcu_head, __tree_nodes_free);
300 spin_unlock(&rbconn->list.list_lock);
304 static void schedule_gc_worker(struct nf_conncount_data *data, int tree)
306 set_bit(tree, data->pending_trees);
307 schedule_work(&data->gc_work);
310 static unsigned int
311 insert_tree(struct net *net,
312 struct nf_conncount_data *data,
313 struct rb_root *root,
314 unsigned int hash,
315 const u32 *key,
316 const struct nf_conntrack_tuple *tuple,
317 const struct nf_conntrack_zone *zone)
319 struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
320 struct rb_node **rbnode, *parent;
321 struct nf_conncount_rb *rbconn;
322 struct nf_conncount_tuple *conn;
323 unsigned int count = 0, gc_count = 0;
324 bool do_gc = true;
326 spin_lock_bh(&nf_conncount_locks[hash]);
327 restart:
328 parent = NULL;
329 rbnode = &(root->rb_node);
330 while (*rbnode) {
331 int diff;
332 rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
334 parent = *rbnode;
335 diff = key_diff(key, rbconn->key, data->keylen);
336 if (diff < 0) {
337 rbnode = &((*rbnode)->rb_left);
338 } else if (diff > 0) {
339 rbnode = &((*rbnode)->rb_right);
340 } else {
341 int ret;
343 ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
344 if (ret)
345 count = 0; /* hotdrop */
346 else
347 count = rbconn->list.count;
348 tree_nodes_free(root, gc_nodes, gc_count);
349 goto out_unlock;
352 if (gc_count >= ARRAY_SIZE(gc_nodes))
353 continue;
355 if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
356 gc_nodes[gc_count++] = rbconn;
359 if (gc_count) {
360 tree_nodes_free(root, gc_nodes, gc_count);
361 schedule_gc_worker(data, hash);
362 gc_count = 0;
363 do_gc = false;
364 goto restart;
367 /* expected case: match, insert new node */
368 rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
369 if (rbconn == NULL)
370 goto out_unlock;
372 conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
373 if (conn == NULL) {
374 kmem_cache_free(conncount_rb_cachep, rbconn);
375 goto out_unlock;
378 conn->tuple = *tuple;
379 conn->zone = *zone;
380 memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
382 nf_conncount_list_init(&rbconn->list);
383 list_add(&conn->node, &rbconn->list.head);
384 count = 1;
385 rbconn->list.count = count;
387 rb_link_node_rcu(&rbconn->node, parent, rbnode);
388 rb_insert_color(&rbconn->node, root);
389 out_unlock:
390 spin_unlock_bh(&nf_conncount_locks[hash]);
391 return count;
394 static unsigned int
395 count_tree(struct net *net,
396 struct nf_conncount_data *data,
397 const u32 *key,
398 const struct nf_conntrack_tuple *tuple,
399 const struct nf_conntrack_zone *zone)
401 struct rb_root *root;
402 struct rb_node *parent;
403 struct nf_conncount_rb *rbconn;
404 unsigned int hash;
406 hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
407 root = &data->root[hash];
409 parent = rcu_dereference_raw(root->rb_node);
410 while (parent) {
411 int diff;
413 rbconn = rb_entry(parent, struct nf_conncount_rb, node);
415 diff = key_diff(key, rbconn->key, data->keylen);
416 if (diff < 0) {
417 parent = rcu_dereference_raw(parent->rb_left);
418 } else if (diff > 0) {
419 parent = rcu_dereference_raw(parent->rb_right);
420 } else {
421 int ret;
423 if (!tuple) {
424 nf_conncount_gc_list(net, &rbconn->list);
425 return rbconn->list.count;
428 spin_lock_bh(&rbconn->list.list_lock);
429 /* Node might be about to be free'd.
430 * We need to defer to insert_tree() in this case.
432 if (rbconn->list.count == 0) {
433 spin_unlock_bh(&rbconn->list.list_lock);
434 break;
437 /* same source network -> be counted! */
438 ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
439 spin_unlock_bh(&rbconn->list.list_lock);
440 if (ret)
441 return 0; /* hotdrop */
442 else
443 return rbconn->list.count;
447 if (!tuple)
448 return 0;
450 return insert_tree(net, data, root, hash, key, tuple, zone);
453 static void tree_gc_worker(struct work_struct *work)
455 struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work);
456 struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES], *rbconn;
457 struct rb_root *root;
458 struct rb_node *node;
459 unsigned int tree, next_tree, gc_count = 0;
461 tree = data->gc_tree % CONNCOUNT_SLOTS;
462 root = &data->root[tree];
464 local_bh_disable();
465 rcu_read_lock();
466 for (node = rb_first(root); node != NULL; node = rb_next(node)) {
467 rbconn = rb_entry(node, struct nf_conncount_rb, node);
468 if (nf_conncount_gc_list(data->net, &rbconn->list))
469 gc_count++;
471 rcu_read_unlock();
472 local_bh_enable();
474 cond_resched();
476 spin_lock_bh(&nf_conncount_locks[tree]);
477 if (gc_count < ARRAY_SIZE(gc_nodes))
478 goto next; /* do not bother */
480 gc_count = 0;
481 node = rb_first(root);
482 while (node != NULL) {
483 rbconn = rb_entry(node, struct nf_conncount_rb, node);
484 node = rb_next(node);
486 if (rbconn->list.count > 0)
487 continue;
489 gc_nodes[gc_count++] = rbconn;
490 if (gc_count >= ARRAY_SIZE(gc_nodes)) {
491 tree_nodes_free(root, gc_nodes, gc_count);
492 gc_count = 0;
496 tree_nodes_free(root, gc_nodes, gc_count);
497 next:
498 clear_bit(tree, data->pending_trees);
500 next_tree = (tree + 1) % CONNCOUNT_SLOTS;
501 next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
503 if (next_tree < CONNCOUNT_SLOTS) {
504 data->gc_tree = next_tree;
505 schedule_work(work);
508 spin_unlock_bh(&nf_conncount_locks[tree]);
511 /* Count and return number of conntrack entries in 'net' with particular 'key'.
512 * If 'tuple' is not null, insert it into the accounting data structure.
513 * Call with RCU read lock.
515 unsigned int nf_conncount_count(struct net *net,
516 struct nf_conncount_data *data,
517 const u32 *key,
518 const struct nf_conntrack_tuple *tuple,
519 const struct nf_conntrack_zone *zone)
521 return count_tree(net, data, key, tuple, zone);
523 EXPORT_SYMBOL_GPL(nf_conncount_count);
525 struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int keylen)
527 struct nf_conncount_data *data;
528 int i;
530 if (keylen % sizeof(u32) ||
531 keylen / sizeof(u32) > MAX_KEYLEN ||
532 keylen == 0)
533 return ERR_PTR(-EINVAL);
535 net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
537 data = kmalloc(sizeof(*data), GFP_KERNEL);
538 if (!data)
539 return ERR_PTR(-ENOMEM);
541 for (i = 0; i < ARRAY_SIZE(data->root); ++i)
542 data->root[i] = RB_ROOT;
544 data->keylen = keylen / sizeof(u32);
545 data->net = net;
546 INIT_WORK(&data->gc_work, tree_gc_worker);
548 return data;
550 EXPORT_SYMBOL_GPL(nf_conncount_init);
552 void nf_conncount_cache_free(struct nf_conncount_list *list)
554 struct nf_conncount_tuple *conn, *conn_n;
556 list_for_each_entry_safe(conn, conn_n, &list->head, node)
557 kmem_cache_free(conncount_conn_cachep, conn);
559 EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
561 static void destroy_tree(struct rb_root *r)
563 struct nf_conncount_rb *rbconn;
564 struct rb_node *node;
566 while ((node = rb_first(r)) != NULL) {
567 rbconn = rb_entry(node, struct nf_conncount_rb, node);
569 rb_erase(node, r);
571 nf_conncount_cache_free(&rbconn->list);
573 kmem_cache_free(conncount_rb_cachep, rbconn);
577 void nf_conncount_destroy(struct net *net, struct nf_conncount_data *data)
579 unsigned int i;
581 cancel_work_sync(&data->gc_work);
583 for (i = 0; i < ARRAY_SIZE(data->root); ++i)
584 destroy_tree(&data->root[i]);
586 kfree(data);
588 EXPORT_SYMBOL_GPL(nf_conncount_destroy);
590 static int __init nf_conncount_modinit(void)
592 int i;
594 for (i = 0; i < CONNCOUNT_SLOTS; ++i)
595 spin_lock_init(&nf_conncount_locks[i]);
597 conncount_conn_cachep = KMEM_CACHE(nf_conncount_tuple, 0);
598 if (!conncount_conn_cachep)
599 return -ENOMEM;
601 conncount_rb_cachep = KMEM_CACHE(nf_conncount_rb, 0);
602 if (!conncount_rb_cachep) {
603 kmem_cache_destroy(conncount_conn_cachep);
604 return -ENOMEM;
607 return 0;
610 static void __exit nf_conncount_modexit(void)
612 kmem_cache_destroy(conncount_conn_cachep);
613 kmem_cache_destroy(conncount_rb_cachep);
616 module_init(nf_conncount_modinit);
617 module_exit(nf_conncount_modexit);
618 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
619 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
620 MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
621 MODULE_LICENSE("GPL");