2 * netfilter module to limit the number of parallel tcp
3 * connections per IP address.
4 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
5 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
6 * only ignore TIME_WAIT or gone connections
7 * (C) CC Computer Consultants GmbH, 2007
11 * Kernel module to match connection tracking information.
12 * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter/xt_connlimit.h>
30 #include <net/netfilter/nf_conntrack.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
35 #define CONNLIMIT_SLOTS 256U
38 #define CONNLIMIT_LOCK_SLOTS 8U
40 #define CONNLIMIT_LOCK_SLOTS 256U
43 #define CONNLIMIT_GC_MAX_NODES 8
45 /* we will save the tuples of all connections we care about */
46 struct xt_connlimit_conn
{
47 struct hlist_node node
;
48 struct nf_conntrack_tuple tuple
;
49 union nf_inet_addr addr
;
52 struct xt_connlimit_rb
{
54 struct hlist_head hhead
; /* connections/hosts in same subnet */
55 union nf_inet_addr addr
; /* search key */
58 static spinlock_t xt_connlimit_locks
[CONNLIMIT_LOCK_SLOTS
] __cacheline_aligned_in_smp
;
60 struct xt_connlimit_data
{
61 struct rb_root climit_root4
[CONNLIMIT_SLOTS
];
62 struct rb_root climit_root6
[CONNLIMIT_SLOTS
];
65 static u_int32_t connlimit_rnd __read_mostly
;
66 static struct kmem_cache
*connlimit_rb_cachep __read_mostly
;
67 static struct kmem_cache
*connlimit_conn_cachep __read_mostly
;
69 static inline unsigned int connlimit_iphash(__be32 addr
)
71 return jhash_1word((__force __u32
)addr
,
72 connlimit_rnd
) % CONNLIMIT_SLOTS
;
75 static inline unsigned int
76 connlimit_iphash6(const union nf_inet_addr
*addr
,
77 const union nf_inet_addr
*mask
)
79 union nf_inet_addr res
;
82 for (i
= 0; i
< ARRAY_SIZE(addr
->ip6
); ++i
)
83 res
.ip6
[i
] = addr
->ip6
[i
] & mask
->ip6
[i
];
85 return jhash2((u32
*)res
.ip6
, ARRAY_SIZE(res
.ip6
),
86 connlimit_rnd
) % CONNLIMIT_SLOTS
;
89 static inline bool already_closed(const struct nf_conn
*conn
)
91 if (nf_ct_protonum(conn
) == IPPROTO_TCP
)
92 return conn
->proto
.tcp
.state
== TCP_CONNTRACK_TIME_WAIT
||
93 conn
->proto
.tcp
.state
== TCP_CONNTRACK_CLOSE
;
99 same_source_net(const union nf_inet_addr
*addr
,
100 const union nf_inet_addr
*mask
,
101 const union nf_inet_addr
*u3
, u_int8_t family
)
103 if (family
== NFPROTO_IPV4
) {
104 return ntohl(addr
->ip
& mask
->ip
) -
105 ntohl(u3
->ip
& mask
->ip
);
107 union nf_inet_addr lh
, rh
;
110 for (i
= 0; i
< ARRAY_SIZE(addr
->ip6
); ++i
) {
111 lh
.ip6
[i
] = addr
->ip6
[i
] & mask
->ip6
[i
];
112 rh
.ip6
[i
] = u3
->ip6
[i
] & mask
->ip6
[i
];
115 return memcmp(&lh
.ip6
, &rh
.ip6
, sizeof(lh
.ip6
));
119 static bool add_hlist(struct hlist_head
*head
,
120 const struct nf_conntrack_tuple
*tuple
,
121 const union nf_inet_addr
*addr
)
123 struct xt_connlimit_conn
*conn
;
125 conn
= kmem_cache_alloc(connlimit_conn_cachep
, GFP_ATOMIC
);
128 conn
->tuple
= *tuple
;
130 hlist_add_head(&conn
->node
, head
);
134 static unsigned int check_hlist(struct net
*net
,
135 struct hlist_head
*head
,
136 const struct nf_conntrack_tuple
*tuple
,
139 const struct nf_conntrack_tuple_hash
*found
;
140 struct xt_connlimit_conn
*conn
;
141 struct hlist_node
*n
;
142 struct nf_conn
*found_ct
;
143 unsigned int length
= 0;
148 /* check the saved connections */
149 hlist_for_each_entry_safe(conn
, n
, head
, node
) {
150 found
= nf_conntrack_find_get(net
, NF_CT_DEFAULT_ZONE
,
153 hlist_del(&conn
->node
);
154 kmem_cache_free(connlimit_conn_cachep
, conn
);
158 found_ct
= nf_ct_tuplehash_to_ctrack(found
);
160 if (nf_ct_tuple_equal(&conn
->tuple
, tuple
)) {
162 * Just to be sure we have it only once in the list.
163 * We should not see tuples twice unless someone hooks
164 * this into a table without "-p tcp --syn".
167 } else if (already_closed(found_ct
)) {
169 * we do not care about connections which are
170 * closed already -> ditch it
173 hlist_del(&conn
->node
);
174 kmem_cache_free(connlimit_conn_cachep
, conn
);
187 static void tree_nodes_free(struct rb_root
*root
,
188 struct xt_connlimit_rb
*gc_nodes
[],
189 unsigned int gc_count
)
191 struct xt_connlimit_rb
*rbconn
;
194 rbconn
= gc_nodes
[--gc_count
];
195 rb_erase(&rbconn
->node
, root
);
196 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
201 count_tree(struct net
*net
, struct rb_root
*root
,
202 const struct nf_conntrack_tuple
*tuple
,
203 const union nf_inet_addr
*addr
, const union nf_inet_addr
*mask
,
206 struct xt_connlimit_rb
*gc_nodes
[CONNLIMIT_GC_MAX_NODES
];
207 struct rb_node
**rbnode
, *parent
;
208 struct xt_connlimit_rb
*rbconn
;
209 struct xt_connlimit_conn
*conn
;
210 unsigned int gc_count
;
216 rbnode
= &(root
->rb_node
);
221 rbconn
= container_of(*rbnode
, struct xt_connlimit_rb
, node
);
224 diff
= same_source_net(addr
, mask
, &rbconn
->addr
, family
);
226 rbnode
= &((*rbnode
)->rb_left
);
227 } else if (diff
> 0) {
228 rbnode
= &((*rbnode
)->rb_right
);
230 /* same source network -> be counted! */
232 count
= check_hlist(net
, &rbconn
->hhead
, tuple
, &addit
);
234 tree_nodes_free(root
, gc_nodes
, gc_count
);
238 if (!add_hlist(&rbconn
->hhead
, tuple
, addr
))
239 return 0; /* hotdrop */
244 if (no_gc
|| gc_count
>= ARRAY_SIZE(gc_nodes
))
247 /* only used for GC on hhead, retval and 'addit' ignored */
248 check_hlist(net
, &rbconn
->hhead
, tuple
, &addit
);
249 if (hlist_empty(&rbconn
->hhead
))
250 gc_nodes
[gc_count
++] = rbconn
;
255 tree_nodes_free(root
, gc_nodes
, gc_count
);
256 /* tree_node_free before new allocation permits
257 * allocator to re-use newly free'd object.
259 * This is a rare event; in most cases we will find
260 * existing node to re-use. (or gc_count is 0).
265 /* no match, need to insert new node */
266 rbconn
= kmem_cache_alloc(connlimit_rb_cachep
, GFP_ATOMIC
);
270 conn
= kmem_cache_alloc(connlimit_conn_cachep
, GFP_ATOMIC
);
272 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
276 conn
->tuple
= *tuple
;
278 rbconn
->addr
= *addr
;
280 INIT_HLIST_HEAD(&rbconn
->hhead
);
281 hlist_add_head(&conn
->node
, &rbconn
->hhead
);
283 rb_link_node(&rbconn
->node
, parent
, rbnode
);
284 rb_insert_color(&rbconn
->node
, root
);
288 static int count_them(struct net
*net
,
289 struct xt_connlimit_data
*data
,
290 const struct nf_conntrack_tuple
*tuple
,
291 const union nf_inet_addr
*addr
,
292 const union nf_inet_addr
*mask
,
295 struct rb_root
*root
;
299 if (family
== NFPROTO_IPV6
) {
300 hash
= connlimit_iphash6(addr
, mask
);
301 root
= &data
->climit_root6
[hash
];
303 hash
= connlimit_iphash(addr
->ip
& mask
->ip
);
304 root
= &data
->climit_root4
[hash
];
307 spin_lock_bh(&xt_connlimit_locks
[hash
% CONNLIMIT_LOCK_SLOTS
]);
309 count
= count_tree(net
, root
, tuple
, addr
, mask
, family
);
311 spin_unlock_bh(&xt_connlimit_locks
[hash
% CONNLIMIT_LOCK_SLOTS
]);
317 connlimit_mt(const struct sk_buff
*skb
, struct xt_action_param
*par
)
319 struct net
*net
= dev_net(par
->in
? par
->in
: par
->out
);
320 const struct xt_connlimit_info
*info
= par
->matchinfo
;
321 union nf_inet_addr addr
;
322 struct nf_conntrack_tuple tuple
;
323 const struct nf_conntrack_tuple
*tuple_ptr
= &tuple
;
324 enum ip_conntrack_info ctinfo
;
325 const struct nf_conn
*ct
;
326 unsigned int connections
;
328 ct
= nf_ct_get(skb
, &ctinfo
);
330 tuple_ptr
= &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
;
331 else if (!nf_ct_get_tuplepr(skb
, skb_network_offset(skb
),
332 par
->family
, &tuple
))
335 if (par
->family
== NFPROTO_IPV6
) {
336 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
337 memcpy(&addr
.ip6
, (info
->flags
& XT_CONNLIMIT_DADDR
) ?
338 &iph
->daddr
: &iph
->saddr
, sizeof(addr
.ip6
));
340 const struct iphdr
*iph
= ip_hdr(skb
);
341 addr
.ip
= (info
->flags
& XT_CONNLIMIT_DADDR
) ?
342 iph
->daddr
: iph
->saddr
;
345 connections
= count_them(net
, info
->data
, tuple_ptr
, &addr
,
346 &info
->mask
, par
->family
);
347 if (connections
== 0)
348 /* kmalloc failed, drop it entirely */
351 return (connections
> info
->limit
) ^
352 !!(info
->flags
& XT_CONNLIMIT_INVERT
);
359 static int connlimit_mt_check(const struct xt_mtchk_param
*par
)
361 struct xt_connlimit_info
*info
= par
->matchinfo
;
365 if (unlikely(!connlimit_rnd
)) {
369 get_random_bytes(&rand
, sizeof(rand
));
371 cmpxchg(&connlimit_rnd
, 0, rand
);
373 ret
= nf_ct_l3proto_try_module_get(par
->family
);
375 pr_info("cannot load conntrack support for "
376 "address family %u\n", par
->family
);
380 /* init private data */
381 info
->data
= kmalloc(sizeof(struct xt_connlimit_data
), GFP_KERNEL
);
382 if (info
->data
== NULL
) {
383 nf_ct_l3proto_module_put(par
->family
);
387 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root4
); ++i
)
388 info
->data
->climit_root4
[i
] = RB_ROOT
;
389 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root6
); ++i
)
390 info
->data
->climit_root6
[i
] = RB_ROOT
;
395 static void destroy_tree(struct rb_root
*r
)
397 struct xt_connlimit_conn
*conn
;
398 struct xt_connlimit_rb
*rbconn
;
399 struct hlist_node
*n
;
400 struct rb_node
*node
;
402 while ((node
= rb_first(r
)) != NULL
) {
403 rbconn
= container_of(node
, struct xt_connlimit_rb
, node
);
407 hlist_for_each_entry_safe(conn
, n
, &rbconn
->hhead
, node
)
408 kmem_cache_free(connlimit_conn_cachep
, conn
);
410 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
414 static void connlimit_mt_destroy(const struct xt_mtdtor_param
*par
)
416 const struct xt_connlimit_info
*info
= par
->matchinfo
;
419 nf_ct_l3proto_module_put(par
->family
);
421 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root4
); ++i
)
422 destroy_tree(&info
->data
->climit_root4
[i
]);
423 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root6
); ++i
)
424 destroy_tree(&info
->data
->climit_root6
[i
]);
429 static struct xt_match connlimit_mt_reg __read_mostly
= {
432 .family
= NFPROTO_UNSPEC
,
433 .checkentry
= connlimit_mt_check
,
434 .match
= connlimit_mt
,
435 .matchsize
= sizeof(struct xt_connlimit_info
),
436 .destroy
= connlimit_mt_destroy
,
440 static int __init
connlimit_mt_init(void)
444 BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS
> CONNLIMIT_SLOTS
);
445 BUILD_BUG_ON((CONNLIMIT_SLOTS
% CONNLIMIT_LOCK_SLOTS
) != 0);
447 for (i
= 0; i
< CONNLIMIT_LOCK_SLOTS
; ++i
)
448 spin_lock_init(&xt_connlimit_locks
[i
]);
450 connlimit_conn_cachep
= kmem_cache_create("xt_connlimit_conn",
451 sizeof(struct xt_connlimit_conn
),
453 if (!connlimit_conn_cachep
)
456 connlimit_rb_cachep
= kmem_cache_create("xt_connlimit_rb",
457 sizeof(struct xt_connlimit_rb
),
459 if (!connlimit_rb_cachep
) {
460 kmem_cache_destroy(connlimit_conn_cachep
);
463 ret
= xt_register_match(&connlimit_mt_reg
);
465 kmem_cache_destroy(connlimit_conn_cachep
);
466 kmem_cache_destroy(connlimit_rb_cachep
);
471 static void __exit
connlimit_mt_exit(void)
473 xt_unregister_match(&connlimit_mt_reg
);
474 kmem_cache_destroy(connlimit_conn_cachep
);
475 kmem_cache_destroy(connlimit_rb_cachep
);
478 module_init(connlimit_mt_init
);
479 module_exit(connlimit_mt_exit
);
480 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
481 MODULE_DESCRIPTION("Xtables: Number of connections matching");
482 MODULE_LICENSE("GPL");
483 MODULE_ALIAS("ipt_connlimit");
484 MODULE_ALIAS("ip6t_connlimit");