1 /* iptables match extension to limit the number of packets per second
2 * seperately for each hashbucket (sourceip/sourceport/dstip/dstport)
4 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
6 * $Id: ipt_hashlimit.c 3244 2004-10-20 16:24:29Z laforge@netfilter.org $
8 * Development of this code was funded by Astaro AG, http://www.astaro.com/
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/random.h>
13 #include <linux/jhash.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/list.h>
19 #include <linux/skbuff.h>
22 #include <linux/ipv6.h>
24 #include <linux/netfilter/x_tables.h>
25 #include <linux/netfilter_ipv4/ip_tables.h>
26 #include <linux/netfilter_ipv6/ip6_tables.h>
27 #include <linux/netfilter/xt_hashlimit.h>
28 #include <linux/mutex.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
32 MODULE_DESCRIPTION("iptables match for limiting per hash-bucket");
33 MODULE_ALIAS("ipt_hashlimit");
34 MODULE_ALIAS("ip6t_hashlimit");
36 /* need to declare this at the top */
37 static struct proc_dir_entry
*hashlimit_procdir4
;
38 static struct proc_dir_entry
*hashlimit_procdir6
;
39 static struct file_operations dl_file_ops
;
58 /* static / read-only parts in the beginning */
59 struct hlist_node node
;
60 struct dsthash_dst dst
;
62 /* modified structure members in the end */
63 unsigned long expires
; /* precalculated expiry time */
65 unsigned long prev
; /* last modification */
67 u_int32_t credit_cap
, cost
;
71 struct xt_hashlimit_htable
{
72 struct hlist_node node
; /* global list of all htables */
76 struct hashlimit_cfg cfg
; /* config */
79 spinlock_t lock
; /* lock for list_head */
80 u_int32_t rnd
; /* random seed for hash */
82 unsigned int count
; /* number entries in table */
83 struct timer_list timer
; /* timer for gc */
86 struct proc_dir_entry
*pde
;
88 struct hlist_head hash
[0]; /* hashtable itself */
91 static DEFINE_SPINLOCK(hashlimit_lock
); /* protects htables list */
92 static DEFINE_MUTEX(hlimit_mutex
); /* additional checkentry protection */
93 static HLIST_HEAD(hashlimit_htables
);
94 static kmem_cache_t
*hashlimit_cachep __read_mostly
;
96 static inline int dst_cmp(const struct dsthash_ent
*ent
, struct dsthash_dst
*b
)
98 return !memcmp(&ent
->dst
, b
, sizeof(ent
->dst
));
102 hash_dst(const struct xt_hashlimit_htable
*ht
, const struct dsthash_dst
*dst
)
104 return jhash(dst
, sizeof(*dst
), ht
->rnd
) % ht
->cfg
.size
;
107 static struct dsthash_ent
*
108 dsthash_find(const struct xt_hashlimit_htable
*ht
, struct dsthash_dst
*dst
)
110 struct dsthash_ent
*ent
;
111 struct hlist_node
*pos
;
112 u_int32_t hash
= hash_dst(ht
, dst
);
114 if (!hlist_empty(&ht
->hash
[hash
])) {
115 hlist_for_each_entry(ent
, pos
, &ht
->hash
[hash
], node
)
116 if (dst_cmp(ent
, dst
))
122 /* allocate dsthash_ent, initialize dst, put in htable and lock it */
123 static struct dsthash_ent
*
124 dsthash_alloc_init(struct xt_hashlimit_htable
*ht
, struct dsthash_dst
*dst
)
126 struct dsthash_ent
*ent
;
128 /* initialize hash with random val at the time we allocate
129 * the first hashtable entry */
130 if (!ht
->rnd_initialized
) {
131 get_random_bytes(&ht
->rnd
, 4);
132 ht
->rnd_initialized
= 1;
135 if (ht
->cfg
.max
&& ht
->count
>= ht
->cfg
.max
) {
136 /* FIXME: do something. question is what.. */
139 "xt_hashlimit: max count of %u reached\n",
144 ent
= kmem_cache_alloc(hashlimit_cachep
, GFP_ATOMIC
);
148 "xt_hashlimit: can't allocate dsthash_ent\n");
151 memcpy(&ent
->dst
, dst
, sizeof(ent
->dst
));
153 hlist_add_head(&ent
->node
, &ht
->hash
[hash_dst(ht
, dst
)]);
159 dsthash_free(struct xt_hashlimit_htable
*ht
, struct dsthash_ent
*ent
)
161 hlist_del(&ent
->node
);
162 kmem_cache_free(hashlimit_cachep
, ent
);
165 static void htable_gc(unsigned long htlong
);
167 static int htable_create(struct xt_hashlimit_info
*minfo
, int family
)
169 struct xt_hashlimit_htable
*hinfo
;
174 size
= minfo
->cfg
.size
;
176 size
= ((num_physpages
<< PAGE_SHIFT
) / 16384) /
177 sizeof(struct list_head
);
178 if (num_physpages
> (1024 * 1024 * 1024 / PAGE_SIZE
))
183 /* FIXME: don't use vmalloc() here or anywhere else -HW */
184 hinfo
= vmalloc(sizeof(struct xt_hashlimit_htable
) +
185 sizeof(struct list_head
) * size
);
187 printk(KERN_ERR
"xt_hashlimit: unable to create hashtable\n");
190 minfo
->hinfo
= hinfo
;
192 /* copy match config into hashtable config */
193 memcpy(&hinfo
->cfg
, &minfo
->cfg
, sizeof(hinfo
->cfg
));
194 hinfo
->cfg
.size
= size
;
196 hinfo
->cfg
.max
= 8 * hinfo
->cfg
.size
;
197 else if (hinfo
->cfg
.max
< hinfo
->cfg
.size
)
198 hinfo
->cfg
.max
= hinfo
->cfg
.size
;
200 for (i
= 0; i
< hinfo
->cfg
.size
; i
++)
201 INIT_HLIST_HEAD(&hinfo
->hash
[i
]);
203 atomic_set(&hinfo
->use
, 1);
205 hinfo
->family
= family
;
206 hinfo
->rnd_initialized
= 0;
207 spin_lock_init(&hinfo
->lock
);
208 hinfo
->pde
= create_proc_entry(minfo
->name
, 0,
209 family
== AF_INET
? hashlimit_procdir4
:
215 hinfo
->pde
->proc_fops
= &dl_file_ops
;
216 hinfo
->pde
->data
= hinfo
;
218 init_timer(&hinfo
->timer
);
219 hinfo
->timer
.expires
= jiffies
+ msecs_to_jiffies(hinfo
->cfg
.gc_interval
);
220 hinfo
->timer
.data
= (unsigned long )hinfo
;
221 hinfo
->timer
.function
= htable_gc
;
222 add_timer(&hinfo
->timer
);
224 spin_lock_bh(&hashlimit_lock
);
225 hlist_add_head(&hinfo
->node
, &hashlimit_htables
);
226 spin_unlock_bh(&hashlimit_lock
);
231 static int select_all(struct xt_hashlimit_htable
*ht
, struct dsthash_ent
*he
)
236 static int select_gc(struct xt_hashlimit_htable
*ht
, struct dsthash_ent
*he
)
238 return (jiffies
>= he
->expires
);
241 static void htable_selective_cleanup(struct xt_hashlimit_htable
*ht
,
242 int (*select
)(struct xt_hashlimit_htable
*ht
,
243 struct dsthash_ent
*he
))
247 /* lock hash table and iterate over it */
248 spin_lock_bh(&ht
->lock
);
249 for (i
= 0; i
< ht
->cfg
.size
; i
++) {
250 struct dsthash_ent
*dh
;
251 struct hlist_node
*pos
, *n
;
252 hlist_for_each_entry_safe(dh
, pos
, n
, &ht
->hash
[i
], node
) {
253 if ((*select
)(ht
, dh
))
254 dsthash_free(ht
, dh
);
257 spin_unlock_bh(&ht
->lock
);
260 /* hash table garbage collector, run by timer */
261 static void htable_gc(unsigned long htlong
)
263 struct xt_hashlimit_htable
*ht
= (struct xt_hashlimit_htable
*)htlong
;
265 htable_selective_cleanup(ht
, select_gc
);
267 /* re-add the timer accordingly */
268 ht
->timer
.expires
= jiffies
+ msecs_to_jiffies(ht
->cfg
.gc_interval
);
269 add_timer(&ht
->timer
);
272 static void htable_destroy(struct xt_hashlimit_htable
*hinfo
)
274 /* remove timer, if it is pending */
275 if (timer_pending(&hinfo
->timer
))
276 del_timer(&hinfo
->timer
);
278 /* remove proc entry */
279 remove_proc_entry(hinfo
->pde
->name
,
280 hinfo
->family
== AF_INET
? hashlimit_procdir4
:
282 htable_selective_cleanup(hinfo
, select_all
);
286 static struct xt_hashlimit_htable
*htable_find_get(char *name
, int family
)
288 struct xt_hashlimit_htable
*hinfo
;
289 struct hlist_node
*pos
;
291 spin_lock_bh(&hashlimit_lock
);
292 hlist_for_each_entry(hinfo
, pos
, &hashlimit_htables
, node
) {
293 if (!strcmp(name
, hinfo
->pde
->name
) &&
294 hinfo
->family
== family
) {
295 atomic_inc(&hinfo
->use
);
296 spin_unlock_bh(&hashlimit_lock
);
300 spin_unlock_bh(&hashlimit_lock
);
304 static void htable_put(struct xt_hashlimit_htable
*hinfo
)
306 if (atomic_dec_and_test(&hinfo
->use
)) {
307 spin_lock_bh(&hashlimit_lock
);
308 hlist_del(&hinfo
->node
);
309 spin_unlock_bh(&hashlimit_lock
);
310 htable_destroy(hinfo
);
314 /* The algorithm used is the Simple Token Bucket Filter (TBF)
315 * see net/sched/sch_tbf.c in the linux source tree
318 /* Rusty: This is my (non-mathematically-inclined) understanding of
319 this algorithm. The `average rate' in jiffies becomes your initial
320 amount of credit `credit' and the most credit you can ever have
321 `credit_cap'. The `peak rate' becomes the cost of passing the
324 `prev' tracks the last packet hit: you gain one credit per jiffy.
325 If you get credit balance more than this, the extra credit is
326 discarded. Every time the match passes, you lose `cost' credits;
327 if you don't have that many, the test fails.
329 See Alexey's formal explanation in net/sched/sch_tbf.c.
331 To get the maximum range, we multiply by this factor (ie. you get N
332 credits per jiffy). We want to allow a rate as low as 1 per day
333 (slowest userspace tool allows), which means
334 CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
336 #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
338 /* Repeated shift and or gives us all 1s, final shift and add 1 gives
339 * us the power of 2 below the theoretical max, so GCC simply does a
341 #define _POW2_BELOW2(x) ((x)|((x)>>1))
342 #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
343 #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
344 #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
345 #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
346 #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
348 #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
350 /* Precision saver. */
351 static inline u_int32_t
352 user2credits(u_int32_t user
)
354 /* If multiplying would overflow... */
355 if (user
> 0xFFFFFFFF / (HZ
*CREDITS_PER_JIFFY
))
357 return (user
/ XT_HASHLIMIT_SCALE
) * HZ
* CREDITS_PER_JIFFY
;
359 return (user
* HZ
* CREDITS_PER_JIFFY
) / XT_HASHLIMIT_SCALE
;
362 static inline void rateinfo_recalc(struct dsthash_ent
*dh
, unsigned long now
)
364 dh
->rateinfo
.credit
+= (now
- dh
->rateinfo
.prev
) * CREDITS_PER_JIFFY
;
365 if (dh
->rateinfo
.credit
> dh
->rateinfo
.credit_cap
)
366 dh
->rateinfo
.credit
= dh
->rateinfo
.credit_cap
;
367 dh
->rateinfo
.prev
= now
;
371 hashlimit_init_dst(struct xt_hashlimit_htable
*hinfo
, struct dsthash_dst
*dst
,
372 const struct sk_buff
*skb
, unsigned int protoff
)
374 __be16 _ports
[2], *ports
;
377 memset(dst
, 0, sizeof(*dst
));
379 switch (hinfo
->family
) {
381 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_DIP
)
382 dst
->addr
.ip
.dst
= skb
->nh
.iph
->daddr
;
383 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_SIP
)
384 dst
->addr
.ip
.src
= skb
->nh
.iph
->saddr
;
386 if (!(hinfo
->cfg
.mode
&
387 (XT_HASHLIMIT_HASH_DPT
| XT_HASHLIMIT_HASH_SPT
)))
389 nexthdr
= skb
->nh
.iph
->protocol
;
391 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
393 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_DIP
)
394 memcpy(&dst
->addr
.ip6
.dst
, &skb
->nh
.ipv6h
->daddr
,
395 sizeof(dst
->addr
.ip6
.dst
));
396 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_SIP
)
397 memcpy(&dst
->addr
.ip6
.src
, &skb
->nh
.ipv6h
->saddr
,
398 sizeof(dst
->addr
.ip6
.src
));
400 if (!(hinfo
->cfg
.mode
&
401 (XT_HASHLIMIT_HASH_DPT
| XT_HASHLIMIT_HASH_SPT
)))
403 nexthdr
= ipv6_find_hdr(skb
, &protoff
, -1, NULL
);
418 ports
= skb_header_pointer(skb
, protoff
, sizeof(_ports
),
422 _ports
[0] = _ports
[1] = 0;
428 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_SPT
)
429 dst
->src_port
= ports
[0];
430 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_DPT
)
431 dst
->dst_port
= ports
[1];
436 hashlimit_match(const struct sk_buff
*skb
,
437 const struct net_device
*in
,
438 const struct net_device
*out
,
439 const struct xt_match
*match
,
440 const void *matchinfo
,
442 unsigned int protoff
,
445 struct xt_hashlimit_info
*r
=
446 ((struct xt_hashlimit_info
*)matchinfo
)->u
.master
;
447 struct xt_hashlimit_htable
*hinfo
= r
->hinfo
;
448 unsigned long now
= jiffies
;
449 struct dsthash_ent
*dh
;
450 struct dsthash_dst dst
;
452 if (hashlimit_init_dst(hinfo
, &dst
, skb
, protoff
) < 0)
455 spin_lock_bh(&hinfo
->lock
);
456 dh
= dsthash_find(hinfo
, &dst
);
458 dh
= dsthash_alloc_init(hinfo
, &dst
);
460 spin_unlock_bh(&hinfo
->lock
);
464 dh
->expires
= jiffies
+ msecs_to_jiffies(hinfo
->cfg
.expire
);
465 dh
->rateinfo
.prev
= jiffies
;
466 dh
->rateinfo
.credit
= user2credits(hinfo
->cfg
.avg
*
468 dh
->rateinfo
.credit_cap
= user2credits(hinfo
->cfg
.avg
*
470 dh
->rateinfo
.cost
= user2credits(hinfo
->cfg
.avg
);
472 /* update expiration timeout */
473 dh
->expires
= now
+ msecs_to_jiffies(hinfo
->cfg
.expire
);
474 rateinfo_recalc(dh
, now
);
477 if (dh
->rateinfo
.credit
>= dh
->rateinfo
.cost
) {
478 /* We're underlimit. */
479 dh
->rateinfo
.credit
-= dh
->rateinfo
.cost
;
480 spin_unlock_bh(&hinfo
->lock
);
484 spin_unlock_bh(&hinfo
->lock
);
486 /* default case: we're overlimit, thus don't match */
495 hashlimit_checkentry(const char *tablename
,
497 const struct xt_match
*match
,
499 unsigned int hook_mask
)
501 struct xt_hashlimit_info
*r
= matchinfo
;
503 /* Check for overflow. */
504 if (r
->cfg
.burst
== 0 ||
505 user2credits(r
->cfg
.avg
* r
->cfg
.burst
) < user2credits(r
->cfg
.avg
)) {
506 printk(KERN_ERR
"xt_hashlimit: overflow, try lower: %u/%u\n",
507 r
->cfg
.avg
, r
->cfg
.burst
);
510 if (r
->cfg
.mode
== 0 ||
511 r
->cfg
.mode
> (XT_HASHLIMIT_HASH_DPT
|
512 XT_HASHLIMIT_HASH_DIP
|
513 XT_HASHLIMIT_HASH_SIP
|
514 XT_HASHLIMIT_HASH_SPT
))
516 if (!r
->cfg
.gc_interval
)
520 if (r
->name
[sizeof(r
->name
) - 1] != '\0')
523 /* This is the best we've got: We cannot release and re-grab lock,
524 * since checkentry() is called before x_tables.c grabs xt_mutex.
525 * We also cannot grab the hashtable spinlock, since htable_create will
526 * call vmalloc, and that can sleep. And we cannot just re-search
527 * the list of htable's in htable_create(), since then we would
528 * create duplicate proc files. -HW */
529 mutex_lock(&hlimit_mutex
);
530 r
->hinfo
= htable_find_get(r
->name
, match
->family
);
531 if (!r
->hinfo
&& htable_create(r
, match
->family
) != 0) {
532 mutex_unlock(&hlimit_mutex
);
535 mutex_unlock(&hlimit_mutex
);
537 /* Ugly hack: For SMP, we only want to use one set */
543 hashlimit_destroy(const struct xt_match
*match
, void *matchinfo
)
545 struct xt_hashlimit_info
*r
= matchinfo
;
547 htable_put(r
->hinfo
);
551 struct compat_xt_hashlimit_info
{
553 struct hashlimit_cfg cfg
;
555 compat_uptr_t master
;
558 static void compat_from_user(void *dst
, void *src
)
560 int off
= offsetof(struct compat_xt_hashlimit_info
, hinfo
);
562 memcpy(dst
, src
, off
);
563 memset(dst
+ off
, 0, sizeof(struct compat_xt_hashlimit_info
) - off
);
566 static int compat_to_user(void __user
*dst
, void *src
)
568 int off
= offsetof(struct compat_xt_hashlimit_info
, hinfo
);
570 return copy_to_user(dst
, src
, off
) ? -EFAULT
: 0;
574 static struct xt_match xt_hashlimit
[] = {
578 .match
= hashlimit_match
,
579 .matchsize
= sizeof(struct xt_hashlimit_info
),
581 .compatsize
= sizeof(struct compat_xt_hashlimit_info
),
582 .compat_from_user
= compat_from_user
,
583 .compat_to_user
= compat_to_user
,
585 .checkentry
= hashlimit_checkentry
,
586 .destroy
= hashlimit_destroy
,
592 .match
= hashlimit_match
,
593 .matchsize
= sizeof(struct xt_hashlimit_info
),
595 .compatsize
= sizeof(struct compat_xt_hashlimit_info
),
596 .compat_from_user
= compat_from_user
,
597 .compat_to_user
= compat_to_user
,
599 .checkentry
= hashlimit_checkentry
,
600 .destroy
= hashlimit_destroy
,
606 static void *dl_seq_start(struct seq_file
*s
, loff_t
*pos
)
608 struct proc_dir_entry
*pde
= s
->private;
609 struct xt_hashlimit_htable
*htable
= pde
->data
;
610 unsigned int *bucket
;
612 spin_lock_bh(&htable
->lock
);
613 if (*pos
>= htable
->cfg
.size
)
616 bucket
= kmalloc(sizeof(unsigned int), GFP_ATOMIC
);
618 return ERR_PTR(-ENOMEM
);
624 static void *dl_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
626 struct proc_dir_entry
*pde
= s
->private;
627 struct xt_hashlimit_htable
*htable
= pde
->data
;
628 unsigned int *bucket
= (unsigned int *)v
;
631 if (*pos
>= htable
->cfg
.size
) {
638 static void dl_seq_stop(struct seq_file
*s
, void *v
)
640 struct proc_dir_entry
*pde
= s
->private;
641 struct xt_hashlimit_htable
*htable
= pde
->data
;
642 unsigned int *bucket
= (unsigned int *)v
;
645 spin_unlock_bh(&htable
->lock
);
648 static int dl_seq_real_show(struct dsthash_ent
*ent
, int family
,
651 /* recalculate to show accurate numbers */
652 rateinfo_recalc(ent
, jiffies
);
656 return seq_printf(s
, "%ld %u.%u.%u.%u:%u->"
657 "%u.%u.%u.%u:%u %u %u %u\n",
658 (long)(ent
->expires
- jiffies
)/HZ
,
659 NIPQUAD(ent
->dst
.addr
.ip
.src
),
660 ntohs(ent
->dst
.src_port
),
661 NIPQUAD(ent
->dst
.addr
.ip
.dst
),
662 ntohs(ent
->dst
.dst_port
),
663 ent
->rateinfo
.credit
, ent
->rateinfo
.credit_cap
,
666 return seq_printf(s
, "%ld " NIP6_FMT
":%u->"
667 NIP6_FMT
":%u %u %u %u\n",
668 (long)(ent
->expires
- jiffies
)/HZ
,
669 NIP6(*(struct in6_addr
*)&ent
->dst
.addr
.ip6
.src
),
670 ntohs(ent
->dst
.src_port
),
671 NIP6(*(struct in6_addr
*)&ent
->dst
.addr
.ip6
.dst
),
672 ntohs(ent
->dst
.dst_port
),
673 ent
->rateinfo
.credit
, ent
->rateinfo
.credit_cap
,
681 static int dl_seq_show(struct seq_file
*s
, void *v
)
683 struct proc_dir_entry
*pde
= s
->private;
684 struct xt_hashlimit_htable
*htable
= pde
->data
;
685 unsigned int *bucket
= (unsigned int *)v
;
686 struct dsthash_ent
*ent
;
687 struct hlist_node
*pos
;
689 if (!hlist_empty(&htable
->hash
[*bucket
])) {
690 hlist_for_each_entry(ent
, pos
, &htable
->hash
[*bucket
], node
)
691 if (dl_seq_real_show(ent
, htable
->family
, s
))
697 static struct seq_operations dl_seq_ops
= {
698 .start
= dl_seq_start
,
704 static int dl_proc_open(struct inode
*inode
, struct file
*file
)
706 int ret
= seq_open(file
, &dl_seq_ops
);
709 struct seq_file
*sf
= file
->private_data
;
710 sf
->private = PDE(inode
);
715 static struct file_operations dl_file_ops
= {
716 .owner
= THIS_MODULE
,
717 .open
= dl_proc_open
,
720 .release
= seq_release
723 static int __init
xt_hashlimit_init(void)
727 err
= xt_register_matches(xt_hashlimit
, ARRAY_SIZE(xt_hashlimit
));
732 hashlimit_cachep
= kmem_cache_create("xt_hashlimit",
733 sizeof(struct dsthash_ent
), 0, 0,
735 if (!hashlimit_cachep
) {
736 printk(KERN_ERR
"xt_hashlimit: unable to create slab cache\n");
739 hashlimit_procdir4
= proc_mkdir("ipt_hashlimit", proc_net
);
740 if (!hashlimit_procdir4
) {
741 printk(KERN_ERR
"xt_hashlimit: unable to create proc dir "
745 hashlimit_procdir6
= proc_mkdir("ip6t_hashlimit", proc_net
);
746 if (!hashlimit_procdir6
) {
747 printk(KERN_ERR
"xt_hashlimit: tnable to create proc dir "
753 remove_proc_entry("ipt_hashlimit", proc_net
);
755 kmem_cache_destroy(hashlimit_cachep
);
757 xt_unregister_matches(xt_hashlimit
, ARRAY_SIZE(xt_hashlimit
));
763 static void __exit
xt_hashlimit_fini(void)
765 remove_proc_entry("ipt_hashlimit", proc_net
);
766 remove_proc_entry("ip6t_hashlimit", proc_net
);
767 kmem_cache_destroy(hashlimit_cachep
);
768 xt_unregister_matches(xt_hashlimit
, ARRAY_SIZE(xt_hashlimit
));
771 module_init(xt_hashlimit_init
);
772 module_exit(xt_hashlimit_fini
);