2 * netfilter module to limit the number of parallel tcp
3 * connections per IP address.
4 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
5 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
6 * only ignore TIME_WAIT or gone connections
7 * (C) CC Computer Consultants GmbH, 2007
11 * Kernel module to match connection tracking information.
12 * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter/xt_connlimit.h>
30 #include <net/netfilter/nf_conntrack.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
35 #define CONNLIMIT_SLOTS 256U
38 #define CONNLIMIT_LOCK_SLOTS 8U
40 #define CONNLIMIT_LOCK_SLOTS 256U
43 #define CONNLIMIT_GC_MAX_NODES 8
45 /* we will save the tuples of all connections we care about */
46 struct xt_connlimit_conn
{
47 struct hlist_node node
;
48 struct nf_conntrack_tuple tuple
;
49 union nf_inet_addr addr
;
52 struct xt_connlimit_rb
{
54 struct hlist_head hhead
; /* connections/hosts in same subnet */
55 union nf_inet_addr addr
; /* search key */
58 static spinlock_t xt_connlimit_locks
[CONNLIMIT_LOCK_SLOTS
] __cacheline_aligned_in_smp
;
60 struct xt_connlimit_data
{
61 struct rb_root climit_root4
[CONNLIMIT_SLOTS
];
62 struct rb_root climit_root6
[CONNLIMIT_SLOTS
];
65 static u_int32_t connlimit_rnd __read_mostly
;
66 static struct kmem_cache
*connlimit_rb_cachep __read_mostly
;
67 static struct kmem_cache
*connlimit_conn_cachep __read_mostly
;
69 static inline unsigned int connlimit_iphash(__be32 addr
)
71 return jhash_1word((__force __u32
)addr
,
72 connlimit_rnd
) % CONNLIMIT_SLOTS
;
75 static inline unsigned int
76 connlimit_iphash6(const union nf_inet_addr
*addr
,
77 const union nf_inet_addr
*mask
)
79 union nf_inet_addr res
;
82 for (i
= 0; i
< ARRAY_SIZE(addr
->ip6
); ++i
)
83 res
.ip6
[i
] = addr
->ip6
[i
] & mask
->ip6
[i
];
85 return jhash2((u32
*)res
.ip6
, ARRAY_SIZE(res
.ip6
),
86 connlimit_rnd
) % CONNLIMIT_SLOTS
;
89 static inline bool already_closed(const struct nf_conn
*conn
)
91 if (nf_ct_protonum(conn
) == IPPROTO_TCP
)
92 return conn
->proto
.tcp
.state
== TCP_CONNTRACK_TIME_WAIT
||
93 conn
->proto
.tcp
.state
== TCP_CONNTRACK_CLOSE
;
99 same_source_net(const union nf_inet_addr
*addr
,
100 const union nf_inet_addr
*mask
,
101 const union nf_inet_addr
*u3
, u_int8_t family
)
103 if (family
== NFPROTO_IPV4
) {
104 return ntohl(addr
->ip
& mask
->ip
) -
105 ntohl(u3
->ip
& mask
->ip
);
107 union nf_inet_addr lh
, rh
;
110 for (i
= 0; i
< ARRAY_SIZE(addr
->ip6
); ++i
) {
111 lh
.ip6
[i
] = addr
->ip6
[i
] & mask
->ip6
[i
];
112 rh
.ip6
[i
] = u3
->ip6
[i
] & mask
->ip6
[i
];
115 return memcmp(&lh
.ip6
, &rh
.ip6
, sizeof(lh
.ip6
));
119 static bool add_hlist(struct hlist_head
*head
,
120 const struct nf_conntrack_tuple
*tuple
,
121 const union nf_inet_addr
*addr
)
123 struct xt_connlimit_conn
*conn
;
125 conn
= kmem_cache_alloc(connlimit_conn_cachep
, GFP_ATOMIC
);
128 conn
->tuple
= *tuple
;
130 hlist_add_head(&conn
->node
, head
);
134 static unsigned int check_hlist(struct net
*net
,
135 struct hlist_head
*head
,
136 const struct nf_conntrack_tuple
*tuple
,
137 const struct nf_conntrack_zone
*zone
,
140 const struct nf_conntrack_tuple_hash
*found
;
141 struct xt_connlimit_conn
*conn
;
142 struct hlist_node
*n
;
143 struct nf_conn
*found_ct
;
144 unsigned int length
= 0;
149 /* check the saved connections */
150 hlist_for_each_entry_safe(conn
, n
, head
, node
) {
151 found
= nf_conntrack_find_get(net
, zone
, &conn
->tuple
);
153 hlist_del(&conn
->node
);
154 kmem_cache_free(connlimit_conn_cachep
, conn
);
158 found_ct
= nf_ct_tuplehash_to_ctrack(found
);
160 if (nf_ct_tuple_equal(&conn
->tuple
, tuple
)) {
162 * Just to be sure we have it only once in the list.
163 * We should not see tuples twice unless someone hooks
164 * this into a table without "-p tcp --syn".
167 } else if (already_closed(found_ct
)) {
169 * we do not care about connections which are
170 * closed already -> ditch it
173 hlist_del(&conn
->node
);
174 kmem_cache_free(connlimit_conn_cachep
, conn
);
187 static void tree_nodes_free(struct rb_root
*root
,
188 struct xt_connlimit_rb
*gc_nodes
[],
189 unsigned int gc_count
)
191 struct xt_connlimit_rb
*rbconn
;
194 rbconn
= gc_nodes
[--gc_count
];
195 rb_erase(&rbconn
->node
, root
);
196 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
201 count_tree(struct net
*net
, struct rb_root
*root
,
202 const struct nf_conntrack_tuple
*tuple
,
203 const union nf_inet_addr
*addr
, const union nf_inet_addr
*mask
,
204 u8 family
, const struct nf_conntrack_zone
*zone
)
206 struct xt_connlimit_rb
*gc_nodes
[CONNLIMIT_GC_MAX_NODES
];
207 struct rb_node
**rbnode
, *parent
;
208 struct xt_connlimit_rb
*rbconn
;
209 struct xt_connlimit_conn
*conn
;
210 unsigned int gc_count
;
216 rbnode
= &(root
->rb_node
);
221 rbconn
= container_of(*rbnode
, struct xt_connlimit_rb
, node
);
224 diff
= same_source_net(addr
, mask
, &rbconn
->addr
, family
);
226 rbnode
= &((*rbnode
)->rb_left
);
227 } else if (diff
> 0) {
228 rbnode
= &((*rbnode
)->rb_right
);
230 /* same source network -> be counted! */
232 count
= check_hlist(net
, &rbconn
->hhead
, tuple
, zone
, &addit
);
234 tree_nodes_free(root
, gc_nodes
, gc_count
);
238 if (!add_hlist(&rbconn
->hhead
, tuple
, addr
))
239 return 0; /* hotdrop */
244 if (no_gc
|| gc_count
>= ARRAY_SIZE(gc_nodes
))
247 /* only used for GC on hhead, retval and 'addit' ignored */
248 check_hlist(net
, &rbconn
->hhead
, tuple
, zone
, &addit
);
249 if (hlist_empty(&rbconn
->hhead
))
250 gc_nodes
[gc_count
++] = rbconn
;
255 tree_nodes_free(root
, gc_nodes
, gc_count
);
256 /* tree_node_free before new allocation permits
257 * allocator to re-use newly free'd object.
259 * This is a rare event; in most cases we will find
260 * existing node to re-use. (or gc_count is 0).
265 /* no match, need to insert new node */
266 rbconn
= kmem_cache_alloc(connlimit_rb_cachep
, GFP_ATOMIC
);
270 conn
= kmem_cache_alloc(connlimit_conn_cachep
, GFP_ATOMIC
);
272 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
276 conn
->tuple
= *tuple
;
278 rbconn
->addr
= *addr
;
280 INIT_HLIST_HEAD(&rbconn
->hhead
);
281 hlist_add_head(&conn
->node
, &rbconn
->hhead
);
283 rb_link_node(&rbconn
->node
, parent
, rbnode
);
284 rb_insert_color(&rbconn
->node
, root
);
288 static int count_them(struct net
*net
,
289 struct xt_connlimit_data
*data
,
290 const struct nf_conntrack_tuple
*tuple
,
291 const union nf_inet_addr
*addr
,
292 const union nf_inet_addr
*mask
,
294 const struct nf_conntrack_zone
*zone
)
296 struct rb_root
*root
;
300 if (family
== NFPROTO_IPV6
) {
301 hash
= connlimit_iphash6(addr
, mask
);
302 root
= &data
->climit_root6
[hash
];
304 hash
= connlimit_iphash(addr
->ip
& mask
->ip
);
305 root
= &data
->climit_root4
[hash
];
308 spin_lock_bh(&xt_connlimit_locks
[hash
% CONNLIMIT_LOCK_SLOTS
]);
310 count
= count_tree(net
, root
, tuple
, addr
, mask
, family
, zone
);
312 spin_unlock_bh(&xt_connlimit_locks
[hash
% CONNLIMIT_LOCK_SLOTS
]);
318 connlimit_mt(const struct sk_buff
*skb
, struct xt_action_param
*par
)
320 struct net
*net
= par
->net
;
321 const struct xt_connlimit_info
*info
= par
->matchinfo
;
322 union nf_inet_addr addr
;
323 struct nf_conntrack_tuple tuple
;
324 const struct nf_conntrack_tuple
*tuple_ptr
= &tuple
;
325 const struct nf_conntrack_zone
*zone
= &nf_ct_zone_dflt
;
326 enum ip_conntrack_info ctinfo
;
327 const struct nf_conn
*ct
;
328 unsigned int connections
;
330 ct
= nf_ct_get(skb
, &ctinfo
);
332 tuple_ptr
= &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
;
333 zone
= nf_ct_zone(ct
);
334 } else if (!nf_ct_get_tuplepr(skb
, skb_network_offset(skb
),
335 par
->family
, net
, &tuple
)) {
339 if (par
->family
== NFPROTO_IPV6
) {
340 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
341 memcpy(&addr
.ip6
, (info
->flags
& XT_CONNLIMIT_DADDR
) ?
342 &iph
->daddr
: &iph
->saddr
, sizeof(addr
.ip6
));
344 const struct iphdr
*iph
= ip_hdr(skb
);
345 addr
.ip
= (info
->flags
& XT_CONNLIMIT_DADDR
) ?
346 iph
->daddr
: iph
->saddr
;
349 connections
= count_them(net
, info
->data
, tuple_ptr
, &addr
,
350 &info
->mask
, par
->family
, zone
);
351 if (connections
== 0)
352 /* kmalloc failed, drop it entirely */
355 return (connections
> info
->limit
) ^
356 !!(info
->flags
& XT_CONNLIMIT_INVERT
);
363 static int connlimit_mt_check(const struct xt_mtchk_param
*par
)
365 struct xt_connlimit_info
*info
= par
->matchinfo
;
369 net_get_random_once(&connlimit_rnd
, sizeof(connlimit_rnd
));
371 ret
= nf_ct_l3proto_try_module_get(par
->family
);
373 pr_info("cannot load conntrack support for "
374 "address family %u\n", par
->family
);
378 /* init private data */
379 info
->data
= kmalloc(sizeof(struct xt_connlimit_data
), GFP_KERNEL
);
380 if (info
->data
== NULL
) {
381 nf_ct_l3proto_module_put(par
->family
);
385 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root4
); ++i
)
386 info
->data
->climit_root4
[i
] = RB_ROOT
;
387 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root6
); ++i
)
388 info
->data
->climit_root6
[i
] = RB_ROOT
;
393 static void destroy_tree(struct rb_root
*r
)
395 struct xt_connlimit_conn
*conn
;
396 struct xt_connlimit_rb
*rbconn
;
397 struct hlist_node
*n
;
398 struct rb_node
*node
;
400 while ((node
= rb_first(r
)) != NULL
) {
401 rbconn
= container_of(node
, struct xt_connlimit_rb
, node
);
405 hlist_for_each_entry_safe(conn
, n
, &rbconn
->hhead
, node
)
406 kmem_cache_free(connlimit_conn_cachep
, conn
);
408 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
412 static void connlimit_mt_destroy(const struct xt_mtdtor_param
*par
)
414 const struct xt_connlimit_info
*info
= par
->matchinfo
;
417 nf_ct_l3proto_module_put(par
->family
);
419 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root4
); ++i
)
420 destroy_tree(&info
->data
->climit_root4
[i
]);
421 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root6
); ++i
)
422 destroy_tree(&info
->data
->climit_root6
[i
]);
427 static struct xt_match connlimit_mt_reg __read_mostly
= {
430 .family
= NFPROTO_UNSPEC
,
431 .checkentry
= connlimit_mt_check
,
432 .match
= connlimit_mt
,
433 .matchsize
= sizeof(struct xt_connlimit_info
),
434 .destroy
= connlimit_mt_destroy
,
438 static int __init
connlimit_mt_init(void)
442 BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS
> CONNLIMIT_SLOTS
);
443 BUILD_BUG_ON((CONNLIMIT_SLOTS
% CONNLIMIT_LOCK_SLOTS
) != 0);
445 for (i
= 0; i
< CONNLIMIT_LOCK_SLOTS
; ++i
)
446 spin_lock_init(&xt_connlimit_locks
[i
]);
448 connlimit_conn_cachep
= kmem_cache_create("xt_connlimit_conn",
449 sizeof(struct xt_connlimit_conn
),
451 if (!connlimit_conn_cachep
)
454 connlimit_rb_cachep
= kmem_cache_create("xt_connlimit_rb",
455 sizeof(struct xt_connlimit_rb
),
457 if (!connlimit_rb_cachep
) {
458 kmem_cache_destroy(connlimit_conn_cachep
);
461 ret
= xt_register_match(&connlimit_mt_reg
);
463 kmem_cache_destroy(connlimit_conn_cachep
);
464 kmem_cache_destroy(connlimit_rb_cachep
);
469 static void __exit
connlimit_mt_exit(void)
471 xt_unregister_match(&connlimit_mt_reg
);
472 kmem_cache_destroy(connlimit_conn_cachep
);
473 kmem_cache_destroy(connlimit_rb_cachep
);
476 module_init(connlimit_mt_init
);
477 module_exit(connlimit_mt_exit
);
478 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
479 MODULE_DESCRIPTION("Xtables: Number of connections matching");
480 MODULE_LICENSE("GPL");
481 MODULE_ALIAS("ipt_connlimit");
482 MODULE_ALIAS("ip6t_connlimit");