2 * netfilter module to limit the number of parallel tcp
3 * connections per IP address.
4 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
5 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
6 * only ignore TIME_WAIT or gone connections
7 * (C) CC Computer Consultants GmbH, 2007
11 * Kernel module to match connection tracking information.
12 * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter/xt_connlimit.h>
30 #include <net/netfilter/nf_conntrack.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
35 #define CONNLIMIT_SLOTS 256U
38 #define CONNLIMIT_LOCK_SLOTS 8U
40 #define CONNLIMIT_LOCK_SLOTS 256U
43 #define CONNLIMIT_GC_MAX_NODES 8
45 /* we will save the tuples of all connections we care about */
46 struct xt_connlimit_conn
{
47 struct hlist_node node
;
48 struct nf_conntrack_tuple tuple
;
49 union nf_inet_addr addr
;
52 struct xt_connlimit_rb
{
54 struct hlist_head hhead
; /* connections/hosts in same subnet */
55 union nf_inet_addr addr
; /* search key */
58 static spinlock_t xt_connlimit_locks
[CONNLIMIT_LOCK_SLOTS
] __cacheline_aligned_in_smp
;
60 struct xt_connlimit_data
{
61 struct rb_root climit_root
[CONNLIMIT_SLOTS
];
64 static u_int32_t connlimit_rnd __read_mostly
;
65 static struct kmem_cache
*connlimit_rb_cachep __read_mostly
;
66 static struct kmem_cache
*connlimit_conn_cachep __read_mostly
;
68 static inline unsigned int connlimit_iphash(__be32 addr
)
70 return jhash_1word((__force __u32
)addr
,
71 connlimit_rnd
) % CONNLIMIT_SLOTS
;
74 static inline unsigned int
75 connlimit_iphash6(const union nf_inet_addr
*addr
,
76 const union nf_inet_addr
*mask
)
78 union nf_inet_addr res
;
81 for (i
= 0; i
< ARRAY_SIZE(addr
->ip6
); ++i
)
82 res
.ip6
[i
] = addr
->ip6
[i
] & mask
->ip6
[i
];
84 return jhash2((u32
*)res
.ip6
, ARRAY_SIZE(res
.ip6
),
85 connlimit_rnd
) % CONNLIMIT_SLOTS
;
88 static inline bool already_closed(const struct nf_conn
*conn
)
90 if (nf_ct_protonum(conn
) == IPPROTO_TCP
)
91 return conn
->proto
.tcp
.state
== TCP_CONNTRACK_TIME_WAIT
||
92 conn
->proto
.tcp
.state
== TCP_CONNTRACK_CLOSE
;
98 same_source_net(const union nf_inet_addr
*addr
,
99 const union nf_inet_addr
*mask
,
100 const union nf_inet_addr
*u3
, u_int8_t family
)
102 if (family
== NFPROTO_IPV4
) {
103 return ntohl(addr
->ip
& mask
->ip
) -
104 ntohl(u3
->ip
& mask
->ip
);
106 union nf_inet_addr lh
, rh
;
109 for (i
= 0; i
< ARRAY_SIZE(addr
->ip6
); ++i
) {
110 lh
.ip6
[i
] = addr
->ip6
[i
] & mask
->ip6
[i
];
111 rh
.ip6
[i
] = u3
->ip6
[i
] & mask
->ip6
[i
];
114 return memcmp(&lh
.ip6
, &rh
.ip6
, sizeof(lh
.ip6
));
118 static bool add_hlist(struct hlist_head
*head
,
119 const struct nf_conntrack_tuple
*tuple
,
120 const union nf_inet_addr
*addr
)
122 struct xt_connlimit_conn
*conn
;
124 conn
= kmem_cache_alloc(connlimit_conn_cachep
, GFP_ATOMIC
);
127 conn
->tuple
= *tuple
;
129 hlist_add_head(&conn
->node
, head
);
133 static unsigned int check_hlist(struct net
*net
,
134 struct hlist_head
*head
,
135 const struct nf_conntrack_tuple
*tuple
,
136 const struct nf_conntrack_zone
*zone
,
139 const struct nf_conntrack_tuple_hash
*found
;
140 struct xt_connlimit_conn
*conn
;
141 struct hlist_node
*n
;
142 struct nf_conn
*found_ct
;
143 unsigned int length
= 0;
147 /* check the saved connections */
148 hlist_for_each_entry_safe(conn
, n
, head
, node
) {
149 found
= nf_conntrack_find_get(net
, zone
, &conn
->tuple
);
151 hlist_del(&conn
->node
);
152 kmem_cache_free(connlimit_conn_cachep
, conn
);
156 found_ct
= nf_ct_tuplehash_to_ctrack(found
);
158 if (nf_ct_tuple_equal(&conn
->tuple
, tuple
)) {
160 * Just to be sure we have it only once in the list.
161 * We should not see tuples twice unless someone hooks
162 * this into a table without "-p tcp --syn".
165 } else if (already_closed(found_ct
)) {
167 * we do not care about connections which are
168 * closed already -> ditch it
171 hlist_del(&conn
->node
);
172 kmem_cache_free(connlimit_conn_cachep
, conn
);
183 static void tree_nodes_free(struct rb_root
*root
,
184 struct xt_connlimit_rb
*gc_nodes
[],
185 unsigned int gc_count
)
187 struct xt_connlimit_rb
*rbconn
;
190 rbconn
= gc_nodes
[--gc_count
];
191 rb_erase(&rbconn
->node
, root
);
192 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
197 count_tree(struct net
*net
, struct rb_root
*root
,
198 const struct nf_conntrack_tuple
*tuple
,
199 const union nf_inet_addr
*addr
, const union nf_inet_addr
*mask
,
200 u8 family
, const struct nf_conntrack_zone
*zone
)
202 struct xt_connlimit_rb
*gc_nodes
[CONNLIMIT_GC_MAX_NODES
];
203 struct rb_node
**rbnode
, *parent
;
204 struct xt_connlimit_rb
*rbconn
;
205 struct xt_connlimit_conn
*conn
;
206 unsigned int gc_count
;
212 rbnode
= &(root
->rb_node
);
217 rbconn
= rb_entry(*rbnode
, struct xt_connlimit_rb
, node
);
220 diff
= same_source_net(addr
, mask
, &rbconn
->addr
, family
);
222 rbnode
= &((*rbnode
)->rb_left
);
223 } else if (diff
> 0) {
224 rbnode
= &((*rbnode
)->rb_right
);
226 /* same source network -> be counted! */
228 count
= check_hlist(net
, &rbconn
->hhead
, tuple
, zone
, &addit
);
230 tree_nodes_free(root
, gc_nodes
, gc_count
);
234 if (!add_hlist(&rbconn
->hhead
, tuple
, addr
))
235 return 0; /* hotdrop */
240 if (no_gc
|| gc_count
>= ARRAY_SIZE(gc_nodes
))
243 /* only used for GC on hhead, retval and 'addit' ignored */
244 check_hlist(net
, &rbconn
->hhead
, tuple
, zone
, &addit
);
245 if (hlist_empty(&rbconn
->hhead
))
246 gc_nodes
[gc_count
++] = rbconn
;
251 tree_nodes_free(root
, gc_nodes
, gc_count
);
252 /* tree_node_free before new allocation permits
253 * allocator to re-use newly free'd object.
255 * This is a rare event; in most cases we will find
256 * existing node to re-use. (or gc_count is 0).
261 /* no match, need to insert new node */
262 rbconn
= kmem_cache_alloc(connlimit_rb_cachep
, GFP_ATOMIC
);
266 conn
= kmem_cache_alloc(connlimit_conn_cachep
, GFP_ATOMIC
);
268 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
272 conn
->tuple
= *tuple
;
274 rbconn
->addr
= *addr
;
276 INIT_HLIST_HEAD(&rbconn
->hhead
);
277 hlist_add_head(&conn
->node
, &rbconn
->hhead
);
279 rb_link_node(&rbconn
->node
, parent
, rbnode
);
280 rb_insert_color(&rbconn
->node
, root
);
284 static int count_them(struct net
*net
,
285 struct xt_connlimit_data
*data
,
286 const struct nf_conntrack_tuple
*tuple
,
287 const union nf_inet_addr
*addr
,
288 const union nf_inet_addr
*mask
,
290 const struct nf_conntrack_zone
*zone
)
292 struct rb_root
*root
;
296 if (family
== NFPROTO_IPV6
)
297 hash
= connlimit_iphash6(addr
, mask
);
299 hash
= connlimit_iphash(addr
->ip
& mask
->ip
);
300 root
= &data
->climit_root
[hash
];
302 spin_lock_bh(&xt_connlimit_locks
[hash
% CONNLIMIT_LOCK_SLOTS
]);
304 count
= count_tree(net
, root
, tuple
, addr
, mask
, family
, zone
);
306 spin_unlock_bh(&xt_connlimit_locks
[hash
% CONNLIMIT_LOCK_SLOTS
]);
312 connlimit_mt(const struct sk_buff
*skb
, struct xt_action_param
*par
)
314 struct net
*net
= xt_net(par
);
315 const struct xt_connlimit_info
*info
= par
->matchinfo
;
316 union nf_inet_addr addr
;
317 struct nf_conntrack_tuple tuple
;
318 const struct nf_conntrack_tuple
*tuple_ptr
= &tuple
;
319 const struct nf_conntrack_zone
*zone
= &nf_ct_zone_dflt
;
320 enum ip_conntrack_info ctinfo
;
321 const struct nf_conn
*ct
;
322 unsigned int connections
;
324 ct
= nf_ct_get(skb
, &ctinfo
);
326 tuple_ptr
= &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
;
327 zone
= nf_ct_zone(ct
);
328 } else if (!nf_ct_get_tuplepr(skb
, skb_network_offset(skb
),
329 xt_family(par
), net
, &tuple
)) {
333 if (xt_family(par
) == NFPROTO_IPV6
) {
334 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
335 memcpy(&addr
.ip6
, (info
->flags
& XT_CONNLIMIT_DADDR
) ?
336 &iph
->daddr
: &iph
->saddr
, sizeof(addr
.ip6
));
338 const struct iphdr
*iph
= ip_hdr(skb
);
339 addr
.ip
= (info
->flags
& XT_CONNLIMIT_DADDR
) ?
340 iph
->daddr
: iph
->saddr
;
343 connections
= count_them(net
, info
->data
, tuple_ptr
, &addr
,
344 &info
->mask
, xt_family(par
), zone
);
345 if (connections
== 0)
346 /* kmalloc failed, drop it entirely */
349 return (connections
> info
->limit
) ^
350 !!(info
->flags
& XT_CONNLIMIT_INVERT
);
357 static int connlimit_mt_check(const struct xt_mtchk_param
*par
)
359 struct xt_connlimit_info
*info
= par
->matchinfo
;
363 net_get_random_once(&connlimit_rnd
, sizeof(connlimit_rnd
));
365 ret
= nf_ct_netns_get(par
->net
, par
->family
);
367 pr_info("cannot load conntrack support for "
368 "address family %u\n", par
->family
);
372 /* init private data */
373 info
->data
= kmalloc(sizeof(struct xt_connlimit_data
), GFP_KERNEL
);
374 if (info
->data
== NULL
) {
375 nf_ct_netns_put(par
->net
, par
->family
);
379 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root
); ++i
)
380 info
->data
->climit_root
[i
] = RB_ROOT
;
385 static void destroy_tree(struct rb_root
*r
)
387 struct xt_connlimit_conn
*conn
;
388 struct xt_connlimit_rb
*rbconn
;
389 struct hlist_node
*n
;
390 struct rb_node
*node
;
392 while ((node
= rb_first(r
)) != NULL
) {
393 rbconn
= rb_entry(node
, struct xt_connlimit_rb
, node
);
397 hlist_for_each_entry_safe(conn
, n
, &rbconn
->hhead
, node
)
398 kmem_cache_free(connlimit_conn_cachep
, conn
);
400 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
404 static void connlimit_mt_destroy(const struct xt_mtdtor_param
*par
)
406 const struct xt_connlimit_info
*info
= par
->matchinfo
;
409 nf_ct_netns_put(par
->net
, par
->family
);
411 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root
); ++i
)
412 destroy_tree(&info
->data
->climit_root
[i
]);
417 static struct xt_match connlimit_mt_reg __read_mostly
= {
420 .family
= NFPROTO_UNSPEC
,
421 .checkentry
= connlimit_mt_check
,
422 .match
= connlimit_mt
,
423 .matchsize
= sizeof(struct xt_connlimit_info
),
424 .usersize
= offsetof(struct xt_connlimit_info
, data
),
425 .destroy
= connlimit_mt_destroy
,
429 static int __init
connlimit_mt_init(void)
433 BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS
> CONNLIMIT_SLOTS
);
434 BUILD_BUG_ON((CONNLIMIT_SLOTS
% CONNLIMIT_LOCK_SLOTS
) != 0);
436 for (i
= 0; i
< CONNLIMIT_LOCK_SLOTS
; ++i
)
437 spin_lock_init(&xt_connlimit_locks
[i
]);
439 connlimit_conn_cachep
= kmem_cache_create("xt_connlimit_conn",
440 sizeof(struct xt_connlimit_conn
),
442 if (!connlimit_conn_cachep
)
445 connlimit_rb_cachep
= kmem_cache_create("xt_connlimit_rb",
446 sizeof(struct xt_connlimit_rb
),
448 if (!connlimit_rb_cachep
) {
449 kmem_cache_destroy(connlimit_conn_cachep
);
452 ret
= xt_register_match(&connlimit_mt_reg
);
454 kmem_cache_destroy(connlimit_conn_cachep
);
455 kmem_cache_destroy(connlimit_rb_cachep
);
460 static void __exit
connlimit_mt_exit(void)
462 xt_unregister_match(&connlimit_mt_reg
);
463 kmem_cache_destroy(connlimit_conn_cachep
);
464 kmem_cache_destroy(connlimit_rb_cachep
);
467 module_init(connlimit_mt_init
);
468 module_exit(connlimit_mt_exit
);
469 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
470 MODULE_DESCRIPTION("Xtables: Number of connections matching");
471 MODULE_LICENSE("GPL");
472 MODULE_ALIAS("ipt_connlimit");
473 MODULE_ALIAS("ip6t_connlimit");