1 /* iptables match extension to limit the number of packets per second
2 * seperately for each hashbucket (sourceip/sourceport/dstip/dstport)
4 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
6 * $Id: ipt_hashlimit.c 3244 2004-10-20 16:24:29Z laforge@netfilter.org $
8 * Development of this code was funded by Astaro AG, http://www.astaro.com/
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/random.h>
13 #include <linux/jhash.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/list.h>
19 #include <linux/skbuff.h>
23 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
24 #include <linux/ipv6.h>
28 #include <net/net_namespace.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <linux/netfilter_ipv4/ip_tables.h>
32 #include <linux/netfilter_ipv6/ip6_tables.h>
33 #include <linux/netfilter/xt_hashlimit.h>
34 #include <linux/mutex.h>
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
38 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
39 MODULE_ALIAS("ipt_hashlimit");
40 MODULE_ALIAS("ip6t_hashlimit");
42 /* need to declare this at the top */
43 static struct proc_dir_entry
*hashlimit_procdir4
;
44 static struct proc_dir_entry
*hashlimit_procdir6
;
45 static const struct file_operations dl_file_ops
;
54 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
66 /* static / read-only parts in the beginning */
67 struct hlist_node node
;
68 struct dsthash_dst dst
;
70 /* modified structure members in the end */
71 unsigned long expires
; /* precalculated expiry time */
73 unsigned long prev
; /* last modification */
75 u_int32_t credit_cap
, cost
;
79 struct xt_hashlimit_htable
{
80 struct hlist_node node
; /* global list of all htables */
84 struct hashlimit_cfg cfg
; /* config */
87 spinlock_t lock
; /* lock for list_head */
88 u_int32_t rnd
; /* random seed for hash */
90 unsigned int count
; /* number entries in table */
91 struct timer_list timer
; /* timer for gc */
94 struct proc_dir_entry
*pde
;
96 struct hlist_head hash
[0]; /* hashtable itself */
99 static DEFINE_SPINLOCK(hashlimit_lock
); /* protects htables list */
100 static DEFINE_MUTEX(hlimit_mutex
); /* additional checkentry protection */
101 static HLIST_HEAD(hashlimit_htables
);
102 static struct kmem_cache
*hashlimit_cachep __read_mostly
;
104 static inline bool dst_cmp(const struct dsthash_ent
*ent
,
105 const struct dsthash_dst
*b
)
107 return !memcmp(&ent
->dst
, b
, sizeof(ent
->dst
));
111 hash_dst(const struct xt_hashlimit_htable
*ht
, const struct dsthash_dst
*dst
)
113 u_int32_t hash
= jhash2((const u32
*)dst
,
114 sizeof(*dst
)/sizeof(u32
),
117 * Instead of returning hash % ht->cfg.size (implying a divide)
118 * we return the high 32 bits of the (hash * ht->cfg.size) that will
119 * give results between [0 and cfg.size-1] and same hash distribution,
120 * but using a multiply, less expensive than a divide
122 return ((u64
)hash
* ht
->cfg
.size
) >> 32;
125 static struct dsthash_ent
*
126 dsthash_find(const struct xt_hashlimit_htable
*ht
,
127 const struct dsthash_dst
*dst
)
129 struct dsthash_ent
*ent
;
130 struct hlist_node
*pos
;
131 u_int32_t hash
= hash_dst(ht
, dst
);
133 if (!hlist_empty(&ht
->hash
[hash
])) {
134 hlist_for_each_entry(ent
, pos
, &ht
->hash
[hash
], node
)
135 if (dst_cmp(ent
, dst
))
141 /* allocate dsthash_ent, initialize dst, put in htable and lock it */
142 static struct dsthash_ent
*
143 dsthash_alloc_init(struct xt_hashlimit_htable
*ht
,
144 const struct dsthash_dst
*dst
)
146 struct dsthash_ent
*ent
;
148 /* initialize hash with random val at the time we allocate
149 * the first hashtable entry */
150 if (!ht
->rnd_initialized
) {
151 get_random_bytes(&ht
->rnd
, 4);
152 ht
->rnd_initialized
= 1;
155 if (ht
->cfg
.max
&& ht
->count
>= ht
->cfg
.max
) {
156 /* FIXME: do something. question is what.. */
159 "xt_hashlimit: max count of %u reached\n",
164 ent
= kmem_cache_alloc(hashlimit_cachep
, GFP_ATOMIC
);
168 "xt_hashlimit: can't allocate dsthash_ent\n");
171 memcpy(&ent
->dst
, dst
, sizeof(ent
->dst
));
173 hlist_add_head(&ent
->node
, &ht
->hash
[hash_dst(ht
, dst
)]);
179 dsthash_free(struct xt_hashlimit_htable
*ht
, struct dsthash_ent
*ent
)
181 hlist_del(&ent
->node
);
182 kmem_cache_free(hashlimit_cachep
, ent
);
185 static void htable_gc(unsigned long htlong
);
187 static int htable_create(struct xt_hashlimit_info
*minfo
, int family
)
189 struct xt_hashlimit_htable
*hinfo
;
194 size
= minfo
->cfg
.size
;
196 size
= ((num_physpages
<< PAGE_SHIFT
) / 16384) /
197 sizeof(struct list_head
);
198 if (num_physpages
> (1024 * 1024 * 1024 / PAGE_SIZE
))
203 /* FIXME: don't use vmalloc() here or anywhere else -HW */
204 hinfo
= vmalloc(sizeof(struct xt_hashlimit_htable
) +
205 sizeof(struct list_head
) * size
);
207 printk(KERN_ERR
"xt_hashlimit: unable to create hashtable\n");
210 minfo
->hinfo
= hinfo
;
212 /* copy match config into hashtable config */
213 memcpy(&hinfo
->cfg
, &minfo
->cfg
, sizeof(hinfo
->cfg
));
214 hinfo
->cfg
.size
= size
;
216 hinfo
->cfg
.max
= 8 * hinfo
->cfg
.size
;
217 else if (hinfo
->cfg
.max
< hinfo
->cfg
.size
)
218 hinfo
->cfg
.max
= hinfo
->cfg
.size
;
220 for (i
= 0; i
< hinfo
->cfg
.size
; i
++)
221 INIT_HLIST_HEAD(&hinfo
->hash
[i
]);
223 atomic_set(&hinfo
->use
, 1);
225 hinfo
->family
= family
;
226 hinfo
->rnd_initialized
= 0;
227 spin_lock_init(&hinfo
->lock
);
228 hinfo
->pde
= create_proc_entry(minfo
->name
, 0,
229 family
== AF_INET
? hashlimit_procdir4
:
235 hinfo
->pde
->proc_fops
= &dl_file_ops
;
236 hinfo
->pde
->data
= hinfo
;
238 setup_timer(&hinfo
->timer
, htable_gc
, (unsigned long )hinfo
);
239 hinfo
->timer
.expires
= jiffies
+ msecs_to_jiffies(hinfo
->cfg
.gc_interval
);
240 add_timer(&hinfo
->timer
);
242 spin_lock_bh(&hashlimit_lock
);
243 hlist_add_head(&hinfo
->node
, &hashlimit_htables
);
244 spin_unlock_bh(&hashlimit_lock
);
249 static bool select_all(const struct xt_hashlimit_htable
*ht
,
250 const struct dsthash_ent
*he
)
255 static bool select_gc(const struct xt_hashlimit_htable
*ht
,
256 const struct dsthash_ent
*he
)
258 return time_after_eq(jiffies
, he
->expires
);
261 static void htable_selective_cleanup(struct xt_hashlimit_htable
*ht
,
262 bool (*select
)(const struct xt_hashlimit_htable
*ht
,
263 const struct dsthash_ent
*he
))
267 /* lock hash table and iterate over it */
268 spin_lock_bh(&ht
->lock
);
269 for (i
= 0; i
< ht
->cfg
.size
; i
++) {
270 struct dsthash_ent
*dh
;
271 struct hlist_node
*pos
, *n
;
272 hlist_for_each_entry_safe(dh
, pos
, n
, &ht
->hash
[i
], node
) {
273 if ((*select
)(ht
, dh
))
274 dsthash_free(ht
, dh
);
277 spin_unlock_bh(&ht
->lock
);
280 /* hash table garbage collector, run by timer */
281 static void htable_gc(unsigned long htlong
)
283 struct xt_hashlimit_htable
*ht
= (struct xt_hashlimit_htable
*)htlong
;
285 htable_selective_cleanup(ht
, select_gc
);
287 /* re-add the timer accordingly */
288 ht
->timer
.expires
= jiffies
+ msecs_to_jiffies(ht
->cfg
.gc_interval
);
289 add_timer(&ht
->timer
);
292 static void htable_destroy(struct xt_hashlimit_htable
*hinfo
)
294 /* remove timer, if it is pending */
295 if (timer_pending(&hinfo
->timer
))
296 del_timer(&hinfo
->timer
);
298 /* remove proc entry */
299 remove_proc_entry(hinfo
->pde
->name
,
300 hinfo
->family
== AF_INET
? hashlimit_procdir4
:
302 htable_selective_cleanup(hinfo
, select_all
);
306 static struct xt_hashlimit_htable
*htable_find_get(const char *name
,
309 struct xt_hashlimit_htable
*hinfo
;
310 struct hlist_node
*pos
;
312 spin_lock_bh(&hashlimit_lock
);
313 hlist_for_each_entry(hinfo
, pos
, &hashlimit_htables
, node
) {
314 if (!strcmp(name
, hinfo
->pde
->name
) &&
315 hinfo
->family
== family
) {
316 atomic_inc(&hinfo
->use
);
317 spin_unlock_bh(&hashlimit_lock
);
321 spin_unlock_bh(&hashlimit_lock
);
325 static void htable_put(struct xt_hashlimit_htable
*hinfo
)
327 if (atomic_dec_and_test(&hinfo
->use
)) {
328 spin_lock_bh(&hashlimit_lock
);
329 hlist_del(&hinfo
->node
);
330 spin_unlock_bh(&hashlimit_lock
);
331 htable_destroy(hinfo
);
335 /* The algorithm used is the Simple Token Bucket Filter (TBF)
336 * see net/sched/sch_tbf.c in the linux source tree
339 /* Rusty: This is my (non-mathematically-inclined) understanding of
340 this algorithm. The `average rate' in jiffies becomes your initial
341 amount of credit `credit' and the most credit you can ever have
342 `credit_cap'. The `peak rate' becomes the cost of passing the
345 `prev' tracks the last packet hit: you gain one credit per jiffy.
346 If you get credit balance more than this, the extra credit is
347 discarded. Every time the match passes, you lose `cost' credits;
348 if you don't have that many, the test fails.
350 See Alexey's formal explanation in net/sched/sch_tbf.c.
352 To get the maximum range, we multiply by this factor (ie. you get N
353 credits per jiffy). We want to allow a rate as low as 1 per day
354 (slowest userspace tool allows), which means
355 CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
357 #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
359 /* Repeated shift and or gives us all 1s, final shift and add 1 gives
360 * us the power of 2 below the theoretical max, so GCC simply does a
362 #define _POW2_BELOW2(x) ((x)|((x)>>1))
363 #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
364 #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
365 #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
366 #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
367 #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
369 #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
371 /* Precision saver. */
372 static inline u_int32_t
373 user2credits(u_int32_t user
)
375 /* If multiplying would overflow... */
376 if (user
> 0xFFFFFFFF / (HZ
*CREDITS_PER_JIFFY
))
378 return (user
/ XT_HASHLIMIT_SCALE
) * HZ
* CREDITS_PER_JIFFY
;
380 return (user
* HZ
* CREDITS_PER_JIFFY
) / XT_HASHLIMIT_SCALE
;
383 static inline void rateinfo_recalc(struct dsthash_ent
*dh
, unsigned long now
)
385 dh
->rateinfo
.credit
+= (now
- dh
->rateinfo
.prev
) * CREDITS_PER_JIFFY
;
386 if (dh
->rateinfo
.credit
> dh
->rateinfo
.credit_cap
)
387 dh
->rateinfo
.credit
= dh
->rateinfo
.credit_cap
;
388 dh
->rateinfo
.prev
= now
;
392 hashlimit_init_dst(const struct xt_hashlimit_htable
*hinfo
,
393 struct dsthash_dst
*dst
,
394 const struct sk_buff
*skb
, unsigned int protoff
)
396 __be16 _ports
[2], *ports
;
399 memset(dst
, 0, sizeof(*dst
));
401 switch (hinfo
->family
) {
403 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_DIP
)
404 dst
->addr
.ip
.dst
= ip_hdr(skb
)->daddr
;
405 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_SIP
)
406 dst
->addr
.ip
.src
= ip_hdr(skb
)->saddr
;
408 if (!(hinfo
->cfg
.mode
&
409 (XT_HASHLIMIT_HASH_DPT
| XT_HASHLIMIT_HASH_SPT
)))
411 nexthdr
= ip_hdr(skb
)->protocol
;
413 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
415 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_DIP
)
416 memcpy(&dst
->addr
.ip6
.dst
, &ipv6_hdr(skb
)->daddr
,
417 sizeof(dst
->addr
.ip6
.dst
));
418 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_SIP
)
419 memcpy(&dst
->addr
.ip6
.src
, &ipv6_hdr(skb
)->saddr
,
420 sizeof(dst
->addr
.ip6
.src
));
422 if (!(hinfo
->cfg
.mode
&
423 (XT_HASHLIMIT_HASH_DPT
| XT_HASHLIMIT_HASH_SPT
)))
425 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
426 protoff
= ipv6_skip_exthdr(skb
, sizeof(struct ipv6hdr
), &nexthdr
);
427 if ((int)protoff
< 0)
439 case IPPROTO_UDPLITE
:
442 ports
= skb_header_pointer(skb
, protoff
, sizeof(_ports
),
446 _ports
[0] = _ports
[1] = 0;
452 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_SPT
)
453 dst
->src_port
= ports
[0];
454 if (hinfo
->cfg
.mode
& XT_HASHLIMIT_HASH_DPT
)
455 dst
->dst_port
= ports
[1];
460 hashlimit_mt(const struct sk_buff
*skb
, const struct net_device
*in
,
461 const struct net_device
*out
, const struct xt_match
*match
,
462 const void *matchinfo
, int offset
, unsigned int protoff
,
465 const struct xt_hashlimit_info
*r
=
466 ((const struct xt_hashlimit_info
*)matchinfo
)->u
.master
;
467 struct xt_hashlimit_htable
*hinfo
= r
->hinfo
;
468 unsigned long now
= jiffies
;
469 struct dsthash_ent
*dh
;
470 struct dsthash_dst dst
;
472 if (hashlimit_init_dst(hinfo
, &dst
, skb
, protoff
) < 0)
475 spin_lock_bh(&hinfo
->lock
);
476 dh
= dsthash_find(hinfo
, &dst
);
478 dh
= dsthash_alloc_init(hinfo
, &dst
);
480 spin_unlock_bh(&hinfo
->lock
);
484 dh
->expires
= jiffies
+ msecs_to_jiffies(hinfo
->cfg
.expire
);
485 dh
->rateinfo
.prev
= jiffies
;
486 dh
->rateinfo
.credit
= user2credits(hinfo
->cfg
.avg
*
488 dh
->rateinfo
.credit_cap
= user2credits(hinfo
->cfg
.avg
*
490 dh
->rateinfo
.cost
= user2credits(hinfo
->cfg
.avg
);
492 /* update expiration timeout */
493 dh
->expires
= now
+ msecs_to_jiffies(hinfo
->cfg
.expire
);
494 rateinfo_recalc(dh
, now
);
497 if (dh
->rateinfo
.credit
>= dh
->rateinfo
.cost
) {
498 /* We're underlimit. */
499 dh
->rateinfo
.credit
-= dh
->rateinfo
.cost
;
500 spin_unlock_bh(&hinfo
->lock
);
504 spin_unlock_bh(&hinfo
->lock
);
506 /* default case: we're overlimit, thus don't match */
515 hashlimit_mt_check(const char *tablename
, const void *inf
,
516 const struct xt_match
*match
, void *matchinfo
,
517 unsigned int hook_mask
)
519 struct xt_hashlimit_info
*r
= matchinfo
;
521 /* Check for overflow. */
522 if (r
->cfg
.burst
== 0 ||
523 user2credits(r
->cfg
.avg
* r
->cfg
.burst
) < user2credits(r
->cfg
.avg
)) {
524 printk(KERN_ERR
"xt_hashlimit: overflow, try lower: %u/%u\n",
525 r
->cfg
.avg
, r
->cfg
.burst
);
528 if (r
->cfg
.mode
== 0 ||
529 r
->cfg
.mode
> (XT_HASHLIMIT_HASH_DPT
|
530 XT_HASHLIMIT_HASH_DIP
|
531 XT_HASHLIMIT_HASH_SIP
|
532 XT_HASHLIMIT_HASH_SPT
))
534 if (!r
->cfg
.gc_interval
)
538 if (r
->name
[sizeof(r
->name
) - 1] != '\0')
541 /* This is the best we've got: We cannot release and re-grab lock,
542 * since checkentry() is called before x_tables.c grabs xt_mutex.
543 * We also cannot grab the hashtable spinlock, since htable_create will
544 * call vmalloc, and that can sleep. And we cannot just re-search
545 * the list of htable's in htable_create(), since then we would
546 * create duplicate proc files. -HW */
547 mutex_lock(&hlimit_mutex
);
548 r
->hinfo
= htable_find_get(r
->name
, match
->family
);
549 if (!r
->hinfo
&& htable_create(r
, match
->family
) != 0) {
550 mutex_unlock(&hlimit_mutex
);
553 mutex_unlock(&hlimit_mutex
);
555 /* Ugly hack: For SMP, we only want to use one set */
561 hashlimit_mt_destroy(const struct xt_match
*match
, void *matchinfo
)
563 const struct xt_hashlimit_info
*r
= matchinfo
;
565 htable_put(r
->hinfo
);
569 struct compat_xt_hashlimit_info
{
571 struct hashlimit_cfg cfg
;
573 compat_uptr_t master
;
576 static void hashlimit_mt_compat_from_user(void *dst
, void *src
)
578 int off
= offsetof(struct compat_xt_hashlimit_info
, hinfo
);
580 memcpy(dst
, src
, off
);
581 memset(dst
+ off
, 0, sizeof(struct compat_xt_hashlimit_info
) - off
);
584 static int hashlimit_mt_compat_to_user(void __user
*dst
, void *src
)
586 int off
= offsetof(struct compat_xt_hashlimit_info
, hinfo
);
588 return copy_to_user(dst
, src
, off
) ? -EFAULT
: 0;
592 static struct xt_match hashlimit_mt_reg
[] __read_mostly
= {
596 .match
= hashlimit_mt
,
597 .matchsize
= sizeof(struct xt_hashlimit_info
),
599 .compatsize
= sizeof(struct compat_xt_hashlimit_info
),
600 .compat_from_user
= hashlimit_mt_compat_from_user
,
601 .compat_to_user
= hashlimit_mt_compat_to_user
,
603 .checkentry
= hashlimit_mt_check
,
604 .destroy
= hashlimit_mt_destroy
,
607 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
611 .match
= hashlimit_mt
,
612 .matchsize
= sizeof(struct xt_hashlimit_info
),
614 .compatsize
= sizeof(struct compat_xt_hashlimit_info
),
615 .compat_from_user
= hashlimit_mt_compat_from_user
,
616 .compat_to_user
= hashlimit_mt_compat_to_user
,
618 .checkentry
= hashlimit_mt_check
,
619 .destroy
= hashlimit_mt_destroy
,
626 static void *dl_seq_start(struct seq_file
*s
, loff_t
*pos
)
628 struct proc_dir_entry
*pde
= s
->private;
629 struct xt_hashlimit_htable
*htable
= pde
->data
;
630 unsigned int *bucket
;
632 spin_lock_bh(&htable
->lock
);
633 if (*pos
>= htable
->cfg
.size
)
636 bucket
= kmalloc(sizeof(unsigned int), GFP_ATOMIC
);
638 return ERR_PTR(-ENOMEM
);
644 static void *dl_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
646 struct proc_dir_entry
*pde
= s
->private;
647 struct xt_hashlimit_htable
*htable
= pde
->data
;
648 unsigned int *bucket
= (unsigned int *)v
;
651 if (*pos
>= htable
->cfg
.size
) {
658 static void dl_seq_stop(struct seq_file
*s
, void *v
)
660 struct proc_dir_entry
*pde
= s
->private;
661 struct xt_hashlimit_htable
*htable
= pde
->data
;
662 unsigned int *bucket
= (unsigned int *)v
;
665 spin_unlock_bh(&htable
->lock
);
668 static int dl_seq_real_show(struct dsthash_ent
*ent
, int family
,
671 /* recalculate to show accurate numbers */
672 rateinfo_recalc(ent
, jiffies
);
676 return seq_printf(s
, "%ld %u.%u.%u.%u:%u->"
677 "%u.%u.%u.%u:%u %u %u %u\n",
678 (long)(ent
->expires
- jiffies
)/HZ
,
679 NIPQUAD(ent
->dst
.addr
.ip
.src
),
680 ntohs(ent
->dst
.src_port
),
681 NIPQUAD(ent
->dst
.addr
.ip
.dst
),
682 ntohs(ent
->dst
.dst_port
),
683 ent
->rateinfo
.credit
, ent
->rateinfo
.credit_cap
,
685 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
687 return seq_printf(s
, "%ld " NIP6_FMT
":%u->"
688 NIP6_FMT
":%u %u %u %u\n",
689 (long)(ent
->expires
- jiffies
)/HZ
,
690 NIP6(*(struct in6_addr
*)&ent
->dst
.addr
.ip6
.src
),
691 ntohs(ent
->dst
.src_port
),
692 NIP6(*(struct in6_addr
*)&ent
->dst
.addr
.ip6
.dst
),
693 ntohs(ent
->dst
.dst_port
),
694 ent
->rateinfo
.credit
, ent
->rateinfo
.credit_cap
,
703 static int dl_seq_show(struct seq_file
*s
, void *v
)
705 struct proc_dir_entry
*pde
= s
->private;
706 struct xt_hashlimit_htable
*htable
= pde
->data
;
707 unsigned int *bucket
= (unsigned int *)v
;
708 struct dsthash_ent
*ent
;
709 struct hlist_node
*pos
;
711 if (!hlist_empty(&htable
->hash
[*bucket
])) {
712 hlist_for_each_entry(ent
, pos
, &htable
->hash
[*bucket
], node
)
713 if (dl_seq_real_show(ent
, htable
->family
, s
))
719 static const struct seq_operations dl_seq_ops
= {
720 .start
= dl_seq_start
,
726 static int dl_proc_open(struct inode
*inode
, struct file
*file
)
728 int ret
= seq_open(file
, &dl_seq_ops
);
731 struct seq_file
*sf
= file
->private_data
;
732 sf
->private = PDE(inode
);
737 static const struct file_operations dl_file_ops
= {
738 .owner
= THIS_MODULE
,
739 .open
= dl_proc_open
,
742 .release
= seq_release
745 static int __init
hashlimit_mt_init(void)
749 err
= xt_register_matches(hashlimit_mt_reg
,
750 ARRAY_SIZE(hashlimit_mt_reg
));
755 hashlimit_cachep
= kmem_cache_create("xt_hashlimit",
756 sizeof(struct dsthash_ent
), 0, 0,
758 if (!hashlimit_cachep
) {
759 printk(KERN_ERR
"xt_hashlimit: unable to create slab cache\n");
762 hashlimit_procdir4
= proc_mkdir("ipt_hashlimit", init_net
.proc_net
);
763 if (!hashlimit_procdir4
) {
764 printk(KERN_ERR
"xt_hashlimit: unable to create proc dir "
769 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
770 hashlimit_procdir6
= proc_mkdir("ip6t_hashlimit", init_net
.proc_net
);
771 if (!hashlimit_procdir6
) {
772 printk(KERN_ERR
"xt_hashlimit: unable to create proc dir "
779 remove_proc_entry("ipt_hashlimit", init_net
.proc_net
);
781 kmem_cache_destroy(hashlimit_cachep
);
783 xt_unregister_matches(hashlimit_mt_reg
, ARRAY_SIZE(hashlimit_mt_reg
));
789 static void __exit
hashlimit_mt_exit(void)
791 remove_proc_entry("ipt_hashlimit", init_net
.proc_net
);
792 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
793 remove_proc_entry("ip6t_hashlimit", init_net
.proc_net
);
795 kmem_cache_destroy(hashlimit_cachep
);
796 xt_unregister_matches(hashlimit_mt_reg
, ARRAY_SIZE(hashlimit_mt_reg
));
799 module_init(hashlimit_mt_init
);
800 module_exit(hashlimit_mt_exit
);