2 * x_tables core - Backend for {ip,ip6,arp}_tables
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 * Based on existing ip_tables code which is
8 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/socket.h>
20 #include <linux/net.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/string.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mutex.h>
27 #include <linux/slab.h>
28 #include <linux/audit.h>
29 #include <net/net_namespace.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <linux/netfilter_arp.h>
33 #include <linux/netfilter_ipv4/ip_tables.h>
34 #include <linux/netfilter_ipv6/ip6_tables.h>
35 #include <linux/netfilter_arp/arp_tables.h>
37 MODULE_LICENSE("GPL");
38 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
39 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
41 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
44 unsigned int offset
; /* offset in kernel */
45 int delta
; /* delta in 32bit user land */
50 struct list_head match
;
51 struct list_head target
;
53 struct mutex compat_mutex
;
54 struct compat_delta
*compat_tab
;
55 unsigned int number
; /* number of slots in compat_tab[] */
56 unsigned int cur
; /* number of used slots in compat_tab[] */
60 static struct xt_af
*xt
;
62 static const char *const xt_prefix
[NFPROTO_NUMPROTO
] = {
63 [NFPROTO_UNSPEC
] = "x",
64 [NFPROTO_IPV4
] = "ip",
65 [NFPROTO_ARP
] = "arp",
66 [NFPROTO_BRIDGE
] = "eb",
67 [NFPROTO_IPV6
] = "ip6",
70 /* Allow this many total (re)entries. */
71 static const unsigned int xt_jumpstack_multiplier
= 2;
73 /* Registration hooks for targets. */
75 xt_register_target(struct xt_target
*target
)
77 u_int8_t af
= target
->family
;
80 ret
= mutex_lock_interruptible(&xt
[af
].mutex
);
83 list_add(&target
->list
, &xt
[af
].target
);
84 mutex_unlock(&xt
[af
].mutex
);
87 EXPORT_SYMBOL(xt_register_target
);
90 xt_unregister_target(struct xt_target
*target
)
92 u_int8_t af
= target
->family
;
94 mutex_lock(&xt
[af
].mutex
);
95 list_del(&target
->list
);
96 mutex_unlock(&xt
[af
].mutex
);
98 EXPORT_SYMBOL(xt_unregister_target
);
101 xt_register_targets(struct xt_target
*target
, unsigned int n
)
106 for (i
= 0; i
< n
; i
++) {
107 err
= xt_register_target(&target
[i
]);
115 xt_unregister_targets(target
, i
);
118 EXPORT_SYMBOL(xt_register_targets
);
121 xt_unregister_targets(struct xt_target
*target
, unsigned int n
)
124 xt_unregister_target(&target
[n
]);
126 EXPORT_SYMBOL(xt_unregister_targets
);
129 xt_register_match(struct xt_match
*match
)
131 u_int8_t af
= match
->family
;
134 ret
= mutex_lock_interruptible(&xt
[af
].mutex
);
138 list_add(&match
->list
, &xt
[af
].match
);
139 mutex_unlock(&xt
[af
].mutex
);
143 EXPORT_SYMBOL(xt_register_match
);
146 xt_unregister_match(struct xt_match
*match
)
148 u_int8_t af
= match
->family
;
150 mutex_lock(&xt
[af
].mutex
);
151 list_del(&match
->list
);
152 mutex_unlock(&xt
[af
].mutex
);
154 EXPORT_SYMBOL(xt_unregister_match
);
157 xt_register_matches(struct xt_match
*match
, unsigned int n
)
162 for (i
= 0; i
< n
; i
++) {
163 err
= xt_register_match(&match
[i
]);
171 xt_unregister_matches(match
, i
);
174 EXPORT_SYMBOL(xt_register_matches
);
177 xt_unregister_matches(struct xt_match
*match
, unsigned int n
)
180 xt_unregister_match(&match
[n
]);
182 EXPORT_SYMBOL(xt_unregister_matches
);
186 * These are weird, but module loading must not be done with mutex
187 * held (since they will register), and we have to have a single
191 /* Find match, grabs ref. Returns ERR_PTR() on error. */
192 struct xt_match
*xt_find_match(u8 af
, const char *name
, u8 revision
)
197 if (mutex_lock_interruptible(&xt
[af
].mutex
) != 0)
198 return ERR_PTR(-EINTR
);
200 list_for_each_entry(m
, &xt
[af
].match
, list
) {
201 if (strcmp(m
->name
, name
) == 0) {
202 if (m
->revision
== revision
) {
203 if (try_module_get(m
->me
)) {
204 mutex_unlock(&xt
[af
].mutex
);
208 err
= -EPROTOTYPE
; /* Found something. */
211 mutex_unlock(&xt
[af
].mutex
);
213 if (af
!= NFPROTO_UNSPEC
)
214 /* Try searching again in the family-independent list */
215 return xt_find_match(NFPROTO_UNSPEC
, name
, revision
);
219 EXPORT_SYMBOL(xt_find_match
);
222 xt_request_find_match(uint8_t nfproto
, const char *name
, uint8_t revision
)
224 struct xt_match
*match
;
226 match
= xt_find_match(nfproto
, name
, revision
);
228 request_module("%st_%s", xt_prefix
[nfproto
], name
);
229 match
= xt_find_match(nfproto
, name
, revision
);
234 EXPORT_SYMBOL_GPL(xt_request_find_match
);
236 /* Find target, grabs ref. Returns ERR_PTR() on error. */
237 struct xt_target
*xt_find_target(u8 af
, const char *name
, u8 revision
)
242 if (mutex_lock_interruptible(&xt
[af
].mutex
) != 0)
243 return ERR_PTR(-EINTR
);
245 list_for_each_entry(t
, &xt
[af
].target
, list
) {
246 if (strcmp(t
->name
, name
) == 0) {
247 if (t
->revision
== revision
) {
248 if (try_module_get(t
->me
)) {
249 mutex_unlock(&xt
[af
].mutex
);
253 err
= -EPROTOTYPE
; /* Found something. */
256 mutex_unlock(&xt
[af
].mutex
);
258 if (af
!= NFPROTO_UNSPEC
)
259 /* Try searching again in the family-independent list */
260 return xt_find_target(NFPROTO_UNSPEC
, name
, revision
);
264 EXPORT_SYMBOL(xt_find_target
);
266 struct xt_target
*xt_request_find_target(u8 af
, const char *name
, u8 revision
)
268 struct xt_target
*target
;
270 target
= xt_find_target(af
, name
, revision
);
271 if (IS_ERR(target
)) {
272 request_module("%st_%s", xt_prefix
[af
], name
);
273 target
= xt_find_target(af
, name
, revision
);
278 EXPORT_SYMBOL_GPL(xt_request_find_target
);
280 static int match_revfn(u8 af
, const char *name
, u8 revision
, int *bestp
)
282 const struct xt_match
*m
;
285 list_for_each_entry(m
, &xt
[af
].match
, list
) {
286 if (strcmp(m
->name
, name
) == 0) {
287 if (m
->revision
> *bestp
)
288 *bestp
= m
->revision
;
289 if (m
->revision
== revision
)
294 if (af
!= NFPROTO_UNSPEC
&& !have_rev
)
295 return match_revfn(NFPROTO_UNSPEC
, name
, revision
, bestp
);
300 static int target_revfn(u8 af
, const char *name
, u8 revision
, int *bestp
)
302 const struct xt_target
*t
;
305 list_for_each_entry(t
, &xt
[af
].target
, list
) {
306 if (strcmp(t
->name
, name
) == 0) {
307 if (t
->revision
> *bestp
)
308 *bestp
= t
->revision
;
309 if (t
->revision
== revision
)
314 if (af
!= NFPROTO_UNSPEC
&& !have_rev
)
315 return target_revfn(NFPROTO_UNSPEC
, name
, revision
, bestp
);
320 /* Returns true or false (if no such extension at all) */
321 int xt_find_revision(u8 af
, const char *name
, u8 revision
, int target
,
324 int have_rev
, best
= -1;
326 if (mutex_lock_interruptible(&xt
[af
].mutex
) != 0) {
331 have_rev
= target_revfn(af
, name
, revision
, &best
);
333 have_rev
= match_revfn(af
, name
, revision
, &best
);
334 mutex_unlock(&xt
[af
].mutex
);
336 /* Nothing at all? Return 0 to try loading module. */
344 *err
= -EPROTONOSUPPORT
;
347 EXPORT_SYMBOL_GPL(xt_find_revision
);
350 textify_hooks(char *buf
, size_t size
, unsigned int mask
, uint8_t nfproto
)
352 static const char *const inetbr_names
[] = {
353 "PREROUTING", "INPUT", "FORWARD",
354 "OUTPUT", "POSTROUTING", "BROUTING",
356 static const char *const arp_names
[] = {
357 "INPUT", "FORWARD", "OUTPUT",
359 const char *const *names
;
365 names
= (nfproto
== NFPROTO_ARP
) ? arp_names
: inetbr_names
;
366 max
= (nfproto
== NFPROTO_ARP
) ? ARRAY_SIZE(arp_names
) :
367 ARRAY_SIZE(inetbr_names
);
369 for (i
= 0; i
< max
; ++i
) {
370 if (!(mask
& (1 << i
)))
372 res
= snprintf(p
, size
, "%s%s", np
? "/" : "", names
[i
]);
383 int xt_check_match(struct xt_mtchk_param
*par
,
384 unsigned int size
, u_int8_t proto
, bool inv_proto
)
388 if (XT_ALIGN(par
->match
->matchsize
) != size
&&
389 par
->match
->matchsize
!= -1) {
391 * ebt_among is exempt from centralized matchsize checking
392 * because it uses a dynamic-size data set.
394 pr_err("%s_tables: %s.%u match: invalid size "
395 "%u (kernel) != (user) %u\n",
396 xt_prefix
[par
->family
], par
->match
->name
,
397 par
->match
->revision
,
398 XT_ALIGN(par
->match
->matchsize
), size
);
401 if (par
->match
->table
!= NULL
&&
402 strcmp(par
->match
->table
, par
->table
) != 0) {
403 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
404 xt_prefix
[par
->family
], par
->match
->name
,
405 par
->match
->table
, par
->table
);
408 if (par
->match
->hooks
&& (par
->hook_mask
& ~par
->match
->hooks
) != 0) {
409 char used
[64], allow
[64];
411 pr_err("%s_tables: %s match: used from hooks %s, but only "
413 xt_prefix
[par
->family
], par
->match
->name
,
414 textify_hooks(used
, sizeof(used
), par
->hook_mask
,
416 textify_hooks(allow
, sizeof(allow
), par
->match
->hooks
,
420 if (par
->match
->proto
&& (par
->match
->proto
!= proto
|| inv_proto
)) {
421 pr_err("%s_tables: %s match: only valid for protocol %u\n",
422 xt_prefix
[par
->family
], par
->match
->name
,
426 if (par
->match
->checkentry
!= NULL
) {
427 ret
= par
->match
->checkentry(par
);
431 /* Flag up potential errors. */
436 EXPORT_SYMBOL_GPL(xt_check_match
);
439 int xt_compat_add_offset(u_int8_t af
, unsigned int offset
, int delta
)
441 struct xt_af
*xp
= &xt
[af
];
443 if (!xp
->compat_tab
) {
446 xp
->compat_tab
= vmalloc(sizeof(struct compat_delta
) * xp
->number
);
452 if (xp
->cur
>= xp
->number
)
456 delta
+= xp
->compat_tab
[xp
->cur
- 1].delta
;
457 xp
->compat_tab
[xp
->cur
].offset
= offset
;
458 xp
->compat_tab
[xp
->cur
].delta
= delta
;
462 EXPORT_SYMBOL_GPL(xt_compat_add_offset
);
464 void xt_compat_flush_offsets(u_int8_t af
)
466 if (xt
[af
].compat_tab
) {
467 vfree(xt
[af
].compat_tab
);
468 xt
[af
].compat_tab
= NULL
;
473 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets
);
475 int xt_compat_calc_jump(u_int8_t af
, unsigned int offset
)
477 struct compat_delta
*tmp
= xt
[af
].compat_tab
;
478 int mid
, left
= 0, right
= xt
[af
].cur
- 1;
480 while (left
<= right
) {
481 mid
= (left
+ right
) >> 1;
482 if (offset
> tmp
[mid
].offset
)
484 else if (offset
< tmp
[mid
].offset
)
487 return mid
? tmp
[mid
- 1].delta
: 0;
489 return left
? tmp
[left
- 1].delta
: 0;
491 EXPORT_SYMBOL_GPL(xt_compat_calc_jump
);
493 void xt_compat_init_offsets(u_int8_t af
, unsigned int number
)
495 xt
[af
].number
= number
;
498 EXPORT_SYMBOL(xt_compat_init_offsets
);
500 int xt_compat_match_offset(const struct xt_match
*match
)
502 u_int16_t csize
= match
->compatsize
? : match
->matchsize
;
503 return XT_ALIGN(match
->matchsize
) - COMPAT_XT_ALIGN(csize
);
505 EXPORT_SYMBOL_GPL(xt_compat_match_offset
);
507 int xt_compat_match_from_user(struct xt_entry_match
*m
, void **dstptr
,
510 const struct xt_match
*match
= m
->u
.kernel
.match
;
511 struct compat_xt_entry_match
*cm
= (struct compat_xt_entry_match
*)m
;
512 int pad
, off
= xt_compat_match_offset(match
);
513 u_int16_t msize
= cm
->u
.user
.match_size
;
516 memcpy(m
, cm
, sizeof(*cm
));
517 if (match
->compat_from_user
)
518 match
->compat_from_user(m
->data
, cm
->data
);
520 memcpy(m
->data
, cm
->data
, msize
- sizeof(*cm
));
521 pad
= XT_ALIGN(match
->matchsize
) - match
->matchsize
;
523 memset(m
->data
+ match
->matchsize
, 0, pad
);
526 m
->u
.user
.match_size
= msize
;
532 EXPORT_SYMBOL_GPL(xt_compat_match_from_user
);
534 int xt_compat_match_to_user(const struct xt_entry_match
*m
,
535 void __user
**dstptr
, unsigned int *size
)
537 const struct xt_match
*match
= m
->u
.kernel
.match
;
538 struct compat_xt_entry_match __user
*cm
= *dstptr
;
539 int off
= xt_compat_match_offset(match
);
540 u_int16_t msize
= m
->u
.user
.match_size
- off
;
542 if (copy_to_user(cm
, m
, sizeof(*cm
)) ||
543 put_user(msize
, &cm
->u
.user
.match_size
) ||
544 copy_to_user(cm
->u
.user
.name
, m
->u
.kernel
.match
->name
,
545 strlen(m
->u
.kernel
.match
->name
) + 1))
548 if (match
->compat_to_user
) {
549 if (match
->compat_to_user((void __user
*)cm
->data
, m
->data
))
552 if (copy_to_user(cm
->data
, m
->data
, msize
- sizeof(*cm
)))
560 EXPORT_SYMBOL_GPL(xt_compat_match_to_user
);
561 #endif /* CONFIG_COMPAT */
563 int xt_check_target(struct xt_tgchk_param
*par
,
564 unsigned int size
, u_int8_t proto
, bool inv_proto
)
568 if (XT_ALIGN(par
->target
->targetsize
) != size
) {
569 pr_err("%s_tables: %s.%u target: invalid size "
570 "%u (kernel) != (user) %u\n",
571 xt_prefix
[par
->family
], par
->target
->name
,
572 par
->target
->revision
,
573 XT_ALIGN(par
->target
->targetsize
), size
);
576 if (par
->target
->table
!= NULL
&&
577 strcmp(par
->target
->table
, par
->table
) != 0) {
578 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
579 xt_prefix
[par
->family
], par
->target
->name
,
580 par
->target
->table
, par
->table
);
583 if (par
->target
->hooks
&& (par
->hook_mask
& ~par
->target
->hooks
) != 0) {
584 char used
[64], allow
[64];
586 pr_err("%s_tables: %s target: used from hooks %s, but only "
588 xt_prefix
[par
->family
], par
->target
->name
,
589 textify_hooks(used
, sizeof(used
), par
->hook_mask
,
591 textify_hooks(allow
, sizeof(allow
), par
->target
->hooks
,
595 if (par
->target
->proto
&& (par
->target
->proto
!= proto
|| inv_proto
)) {
596 pr_err("%s_tables: %s target: only valid for protocol %u\n",
597 xt_prefix
[par
->family
], par
->target
->name
,
601 if (par
->target
->checkentry
!= NULL
) {
602 ret
= par
->target
->checkentry(par
);
606 /* Flag up potential errors. */
611 EXPORT_SYMBOL_GPL(xt_check_target
);
614 int xt_compat_target_offset(const struct xt_target
*target
)
616 u_int16_t csize
= target
->compatsize
? : target
->targetsize
;
617 return XT_ALIGN(target
->targetsize
) - COMPAT_XT_ALIGN(csize
);
619 EXPORT_SYMBOL_GPL(xt_compat_target_offset
);
621 void xt_compat_target_from_user(struct xt_entry_target
*t
, void **dstptr
,
624 const struct xt_target
*target
= t
->u
.kernel
.target
;
625 struct compat_xt_entry_target
*ct
= (struct compat_xt_entry_target
*)t
;
626 int pad
, off
= xt_compat_target_offset(target
);
627 u_int16_t tsize
= ct
->u
.user
.target_size
;
630 memcpy(t
, ct
, sizeof(*ct
));
631 if (target
->compat_from_user
)
632 target
->compat_from_user(t
->data
, ct
->data
);
634 memcpy(t
->data
, ct
->data
, tsize
- sizeof(*ct
));
635 pad
= XT_ALIGN(target
->targetsize
) - target
->targetsize
;
637 memset(t
->data
+ target
->targetsize
, 0, pad
);
640 t
->u
.user
.target_size
= tsize
;
645 EXPORT_SYMBOL_GPL(xt_compat_target_from_user
);
647 int xt_compat_target_to_user(const struct xt_entry_target
*t
,
648 void __user
**dstptr
, unsigned int *size
)
650 const struct xt_target
*target
= t
->u
.kernel
.target
;
651 struct compat_xt_entry_target __user
*ct
= *dstptr
;
652 int off
= xt_compat_target_offset(target
);
653 u_int16_t tsize
= t
->u
.user
.target_size
- off
;
655 if (copy_to_user(ct
, t
, sizeof(*ct
)) ||
656 put_user(tsize
, &ct
->u
.user
.target_size
) ||
657 copy_to_user(ct
->u
.user
.name
, t
->u
.kernel
.target
->name
,
658 strlen(t
->u
.kernel
.target
->name
) + 1))
661 if (target
->compat_to_user
) {
662 if (target
->compat_to_user((void __user
*)ct
->data
, t
->data
))
665 if (copy_to_user(ct
->data
, t
->data
, tsize
- sizeof(*ct
)))
673 EXPORT_SYMBOL_GPL(xt_compat_target_to_user
);
676 struct xt_table_info
*xt_alloc_table_info(unsigned int size
)
678 struct xt_table_info
*newinfo
;
681 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
682 if ((SMP_ALIGN(size
) >> PAGE_SHIFT
) + 2 > totalram_pages
)
685 newinfo
= kzalloc(XT_TABLE_INFO_SZ
, GFP_KERNEL
);
689 newinfo
->size
= size
;
691 for_each_possible_cpu(cpu
) {
692 if (size
<= PAGE_SIZE
)
693 newinfo
->entries
[cpu
] = kmalloc_node(size
,
697 newinfo
->entries
[cpu
] = vmalloc_node(size
,
700 if (newinfo
->entries
[cpu
] == NULL
) {
701 xt_free_table_info(newinfo
);
708 EXPORT_SYMBOL(xt_alloc_table_info
);
710 void xt_free_table_info(struct xt_table_info
*info
)
714 for_each_possible_cpu(cpu
) {
715 if (info
->size
<= PAGE_SIZE
)
716 kfree(info
->entries
[cpu
]);
718 vfree(info
->entries
[cpu
]);
721 if (info
->jumpstack
!= NULL
) {
722 if (sizeof(void *) * info
->stacksize
> PAGE_SIZE
) {
723 for_each_possible_cpu(cpu
)
724 vfree(info
->jumpstack
[cpu
]);
726 for_each_possible_cpu(cpu
)
727 kfree(info
->jumpstack
[cpu
]);
731 if (sizeof(void **) * nr_cpu_ids
> PAGE_SIZE
)
732 vfree(info
->jumpstack
);
734 kfree(info
->jumpstack
);
736 free_percpu(info
->stackptr
);
740 EXPORT_SYMBOL(xt_free_table_info
);
742 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
743 struct xt_table
*xt_find_table_lock(struct net
*net
, u_int8_t af
,
748 if (mutex_lock_interruptible(&xt
[af
].mutex
) != 0)
749 return ERR_PTR(-EINTR
);
751 list_for_each_entry(t
, &net
->xt
.tables
[af
], list
)
752 if (strcmp(t
->name
, name
) == 0 && try_module_get(t
->me
))
754 mutex_unlock(&xt
[af
].mutex
);
757 EXPORT_SYMBOL_GPL(xt_find_table_lock
);
759 void xt_table_unlock(struct xt_table
*table
)
761 mutex_unlock(&xt
[table
->af
].mutex
);
763 EXPORT_SYMBOL_GPL(xt_table_unlock
);
766 void xt_compat_lock(u_int8_t af
)
768 mutex_lock(&xt
[af
].compat_mutex
);
770 EXPORT_SYMBOL_GPL(xt_compat_lock
);
772 void xt_compat_unlock(u_int8_t af
)
774 mutex_unlock(&xt
[af
].compat_mutex
);
776 EXPORT_SYMBOL_GPL(xt_compat_unlock
);
779 DEFINE_PER_CPU(seqcount_t
, xt_recseq
);
780 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq
);
782 static int xt_jumpstack_alloc(struct xt_table_info
*i
)
787 i
->stackptr
= alloc_percpu(unsigned int);
788 if (i
->stackptr
== NULL
)
791 size
= sizeof(void **) * nr_cpu_ids
;
792 if (size
> PAGE_SIZE
)
793 i
->jumpstack
= vzalloc(size
);
795 i
->jumpstack
= kzalloc(size
, GFP_KERNEL
);
796 if (i
->jumpstack
== NULL
)
799 i
->stacksize
*= xt_jumpstack_multiplier
;
800 size
= sizeof(void *) * i
->stacksize
;
801 for_each_possible_cpu(cpu
) {
802 if (size
> PAGE_SIZE
)
803 i
->jumpstack
[cpu
] = vmalloc_node(size
,
806 i
->jumpstack
[cpu
] = kmalloc_node(size
,
807 GFP_KERNEL
, cpu_to_node(cpu
));
808 if (i
->jumpstack
[cpu
] == NULL
)
810 * Freeing will be done later on by the callers. The
811 * chain is: xt_replace_table -> __do_replace ->
812 * do_replace -> xt_free_table_info.
820 struct xt_table_info
*
821 xt_replace_table(struct xt_table
*table
,
822 unsigned int num_counters
,
823 struct xt_table_info
*newinfo
,
826 struct xt_table_info
*private;
829 ret
= xt_jumpstack_alloc(newinfo
);
835 /* Do the substitution. */
837 private = table
->private;
839 /* Check inside lock: is the old number correct? */
840 if (num_counters
!= private->number
) {
841 pr_debug("num_counters != table->private->number (%u/%u)\n",
842 num_counters
, private->number
);
848 table
->private = newinfo
;
849 newinfo
->initial_entries
= private->initial_entries
;
852 * Even though table entries have now been swapped, other CPU's
853 * may still be using the old entries. This is okay, because
854 * resynchronization happens because of the locking done
855 * during the get_counters() routine.
861 struct audit_buffer
*ab
;
863 ab
= audit_log_start(current
->audit_context
, GFP_KERNEL
,
864 AUDIT_NETFILTER_CFG
);
866 audit_log_format(ab
, "table=%s family=%u entries=%u",
867 table
->name
, table
->af
,
876 EXPORT_SYMBOL_GPL(xt_replace_table
);
878 struct xt_table
*xt_register_table(struct net
*net
,
879 const struct xt_table
*input_table
,
880 struct xt_table_info
*bootstrap
,
881 struct xt_table_info
*newinfo
)
884 struct xt_table_info
*private;
885 struct xt_table
*t
, *table
;
887 /* Don't add one object to multiple lists. */
888 table
= kmemdup(input_table
, sizeof(struct xt_table
), GFP_KERNEL
);
894 ret
= mutex_lock_interruptible(&xt
[table
->af
].mutex
);
898 /* Don't autoload: we'd eat our tail... */
899 list_for_each_entry(t
, &net
->xt
.tables
[table
->af
], list
) {
900 if (strcmp(t
->name
, table
->name
) == 0) {
906 /* Simplifies replace_table code. */
907 table
->private = bootstrap
;
909 if (!xt_replace_table(table
, 0, newinfo
, &ret
))
912 private = table
->private;
913 pr_debug("table->private->number = %u\n", private->number
);
915 /* save number of initial entries */
916 private->initial_entries
= private->number
;
918 list_add(&table
->list
, &net
->xt
.tables
[table
->af
]);
919 mutex_unlock(&xt
[table
->af
].mutex
);
923 mutex_unlock(&xt
[table
->af
].mutex
);
929 EXPORT_SYMBOL_GPL(xt_register_table
);
931 void *xt_unregister_table(struct xt_table
*table
)
933 struct xt_table_info
*private;
935 mutex_lock(&xt
[table
->af
].mutex
);
936 private = table
->private;
937 list_del(&table
->list
);
938 mutex_unlock(&xt
[table
->af
].mutex
);
943 EXPORT_SYMBOL_GPL(xt_unregister_table
);
945 #ifdef CONFIG_PROC_FS
946 struct xt_names_priv
{
947 struct seq_net_private p
;
950 static void *xt_table_seq_start(struct seq_file
*seq
, loff_t
*pos
)
952 struct xt_names_priv
*priv
= seq
->private;
953 struct net
*net
= seq_file_net(seq
);
954 u_int8_t af
= priv
->af
;
956 mutex_lock(&xt
[af
].mutex
);
957 return seq_list_start(&net
->xt
.tables
[af
], *pos
);
960 static void *xt_table_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
962 struct xt_names_priv
*priv
= seq
->private;
963 struct net
*net
= seq_file_net(seq
);
964 u_int8_t af
= priv
->af
;
966 return seq_list_next(v
, &net
->xt
.tables
[af
], pos
);
969 static void xt_table_seq_stop(struct seq_file
*seq
, void *v
)
971 struct xt_names_priv
*priv
= seq
->private;
972 u_int8_t af
= priv
->af
;
974 mutex_unlock(&xt
[af
].mutex
);
977 static int xt_table_seq_show(struct seq_file
*seq
, void *v
)
979 struct xt_table
*table
= list_entry(v
, struct xt_table
, list
);
981 if (strlen(table
->name
))
982 return seq_printf(seq
, "%s\n", table
->name
);
987 static const struct seq_operations xt_table_seq_ops
= {
988 .start
= xt_table_seq_start
,
989 .next
= xt_table_seq_next
,
990 .stop
= xt_table_seq_stop
,
991 .show
= xt_table_seq_show
,
994 static int xt_table_open(struct inode
*inode
, struct file
*file
)
997 struct xt_names_priv
*priv
;
999 ret
= seq_open_net(inode
, file
, &xt_table_seq_ops
,
1000 sizeof(struct xt_names_priv
));
1002 priv
= ((struct seq_file
*)file
->private_data
)->private;
1003 priv
->af
= (unsigned long)PDE_DATA(inode
);
1008 static const struct file_operations xt_table_ops
= {
1009 .owner
= THIS_MODULE
,
1010 .open
= xt_table_open
,
1012 .llseek
= seq_lseek
,
1013 .release
= seq_release_net
,
1017 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1018 * the multi-AF mutexes.
1020 struct nf_mttg_trav
{
1021 struct list_head
*head
, *curr
;
1022 uint8_t class, nfproto
;
1027 MTTG_TRAV_NFP_UNSPEC
,
1032 static void *xt_mttg_seq_next(struct seq_file
*seq
, void *v
, loff_t
*ppos
,
1035 static const uint8_t next_class
[] = {
1036 [MTTG_TRAV_NFP_UNSPEC
] = MTTG_TRAV_NFP_SPEC
,
1037 [MTTG_TRAV_NFP_SPEC
] = MTTG_TRAV_DONE
,
1039 struct nf_mttg_trav
*trav
= seq
->private;
1041 switch (trav
->class) {
1042 case MTTG_TRAV_INIT
:
1043 trav
->class = MTTG_TRAV_NFP_UNSPEC
;
1044 mutex_lock(&xt
[NFPROTO_UNSPEC
].mutex
);
1045 trav
->head
= trav
->curr
= is_target
?
1046 &xt
[NFPROTO_UNSPEC
].target
: &xt
[NFPROTO_UNSPEC
].match
;
1048 case MTTG_TRAV_NFP_UNSPEC
:
1049 trav
->curr
= trav
->curr
->next
;
1050 if (trav
->curr
!= trav
->head
)
1052 mutex_unlock(&xt
[NFPROTO_UNSPEC
].mutex
);
1053 mutex_lock(&xt
[trav
->nfproto
].mutex
);
1054 trav
->head
= trav
->curr
= is_target
?
1055 &xt
[trav
->nfproto
].target
: &xt
[trav
->nfproto
].match
;
1056 trav
->class = next_class
[trav
->class];
1058 case MTTG_TRAV_NFP_SPEC
:
1059 trav
->curr
= trav
->curr
->next
;
1060 if (trav
->curr
!= trav
->head
)
1062 /* fallthru, _stop will unlock */
1072 static void *xt_mttg_seq_start(struct seq_file
*seq
, loff_t
*pos
,
1075 struct nf_mttg_trav
*trav
= seq
->private;
1078 trav
->class = MTTG_TRAV_INIT
;
1079 for (j
= 0; j
< *pos
; ++j
)
1080 if (xt_mttg_seq_next(seq
, NULL
, NULL
, is_target
) == NULL
)
1085 static void xt_mttg_seq_stop(struct seq_file
*seq
, void *v
)
1087 struct nf_mttg_trav
*trav
= seq
->private;
1089 switch (trav
->class) {
1090 case MTTG_TRAV_NFP_UNSPEC
:
1091 mutex_unlock(&xt
[NFPROTO_UNSPEC
].mutex
);
1093 case MTTG_TRAV_NFP_SPEC
:
1094 mutex_unlock(&xt
[trav
->nfproto
].mutex
);
1099 static void *xt_match_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1101 return xt_mttg_seq_start(seq
, pos
, false);
1104 static void *xt_match_seq_next(struct seq_file
*seq
, void *v
, loff_t
*ppos
)
1106 return xt_mttg_seq_next(seq
, v
, ppos
, false);
1109 static int xt_match_seq_show(struct seq_file
*seq
, void *v
)
1111 const struct nf_mttg_trav
*trav
= seq
->private;
1112 const struct xt_match
*match
;
1114 switch (trav
->class) {
1115 case MTTG_TRAV_NFP_UNSPEC
:
1116 case MTTG_TRAV_NFP_SPEC
:
1117 if (trav
->curr
== trav
->head
)
1119 match
= list_entry(trav
->curr
, struct xt_match
, list
);
1120 return (*match
->name
== '\0') ? 0 :
1121 seq_printf(seq
, "%s\n", match
->name
);
1126 static const struct seq_operations xt_match_seq_ops
= {
1127 .start
= xt_match_seq_start
,
1128 .next
= xt_match_seq_next
,
1129 .stop
= xt_mttg_seq_stop
,
1130 .show
= xt_match_seq_show
,
1133 static int xt_match_open(struct inode
*inode
, struct file
*file
)
1135 struct seq_file
*seq
;
1136 struct nf_mttg_trav
*trav
;
1139 trav
= kmalloc(sizeof(*trav
), GFP_KERNEL
);
1143 ret
= seq_open(file
, &xt_match_seq_ops
);
1149 seq
= file
->private_data
;
1150 seq
->private = trav
;
1151 trav
->nfproto
= (unsigned long)PDE_DATA(inode
);
1155 static const struct file_operations xt_match_ops
= {
1156 .owner
= THIS_MODULE
,
1157 .open
= xt_match_open
,
1159 .llseek
= seq_lseek
,
1160 .release
= seq_release_private
,
1163 static void *xt_target_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1165 return xt_mttg_seq_start(seq
, pos
, true);
1168 static void *xt_target_seq_next(struct seq_file
*seq
, void *v
, loff_t
*ppos
)
1170 return xt_mttg_seq_next(seq
, v
, ppos
, true);
1173 static int xt_target_seq_show(struct seq_file
*seq
, void *v
)
1175 const struct nf_mttg_trav
*trav
= seq
->private;
1176 const struct xt_target
*target
;
1178 switch (trav
->class) {
1179 case MTTG_TRAV_NFP_UNSPEC
:
1180 case MTTG_TRAV_NFP_SPEC
:
1181 if (trav
->curr
== trav
->head
)
1183 target
= list_entry(trav
->curr
, struct xt_target
, list
);
1184 return (*target
->name
== '\0') ? 0 :
1185 seq_printf(seq
, "%s\n", target
->name
);
1190 static const struct seq_operations xt_target_seq_ops
= {
1191 .start
= xt_target_seq_start
,
1192 .next
= xt_target_seq_next
,
1193 .stop
= xt_mttg_seq_stop
,
1194 .show
= xt_target_seq_show
,
1197 static int xt_target_open(struct inode
*inode
, struct file
*file
)
1199 struct seq_file
*seq
;
1200 struct nf_mttg_trav
*trav
;
1203 trav
= kmalloc(sizeof(*trav
), GFP_KERNEL
);
1207 ret
= seq_open(file
, &xt_target_seq_ops
);
1213 seq
= file
->private_data
;
1214 seq
->private = trav
;
1215 trav
->nfproto
= (unsigned long)PDE_DATA(inode
);
1219 static const struct file_operations xt_target_ops
= {
1220 .owner
= THIS_MODULE
,
1221 .open
= xt_target_open
,
1223 .llseek
= seq_lseek
,
1224 .release
= seq_release_private
,
1227 #define FORMAT_TABLES "_tables_names"
1228 #define FORMAT_MATCHES "_tables_matches"
1229 #define FORMAT_TARGETS "_tables_targets"
1231 #endif /* CONFIG_PROC_FS */
1234 * xt_hook_link - set up hooks for a new table
1235 * @table: table with metadata needed to set up hooks
1236 * @fn: Hook function
1238 * This function will take care of creating and registering the necessary
1239 * Netfilter hooks for XT tables.
1241 struct nf_hook_ops
*xt_hook_link(const struct xt_table
*table
, nf_hookfn
*fn
)
1243 unsigned int hook_mask
= table
->valid_hooks
;
1244 uint8_t i
, num_hooks
= hweight32(hook_mask
);
1246 struct nf_hook_ops
*ops
;
1249 ops
= kmalloc(sizeof(*ops
) * num_hooks
, GFP_KERNEL
);
1251 return ERR_PTR(-ENOMEM
);
1253 for (i
= 0, hooknum
= 0; i
< num_hooks
&& hook_mask
!= 0;
1254 hook_mask
>>= 1, ++hooknum
) {
1255 if (!(hook_mask
& 1))
1258 ops
[i
].owner
= table
->me
;
1259 ops
[i
].pf
= table
->af
;
1260 ops
[i
].hooknum
= hooknum
;
1261 ops
[i
].priority
= table
->priority
;
1265 ret
= nf_register_hooks(ops
, num_hooks
);
1268 return ERR_PTR(ret
);
1273 EXPORT_SYMBOL_GPL(xt_hook_link
);
1276 * xt_hook_unlink - remove hooks for a table
1277 * @ops: nf_hook_ops array as returned by nf_hook_link
1278 * @hook_mask: the very same mask that was passed to nf_hook_link
1280 void xt_hook_unlink(const struct xt_table
*table
, struct nf_hook_ops
*ops
)
1282 nf_unregister_hooks(ops
, hweight32(table
->valid_hooks
));
1285 EXPORT_SYMBOL_GPL(xt_hook_unlink
);
1287 int xt_proto_init(struct net
*net
, u_int8_t af
)
1289 #ifdef CONFIG_PROC_FS
1290 char buf
[XT_FUNCTION_MAXNAMELEN
];
1291 struct proc_dir_entry
*proc
;
1294 if (af
>= ARRAY_SIZE(xt_prefix
))
1298 #ifdef CONFIG_PROC_FS
1299 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1300 strlcat(buf
, FORMAT_TABLES
, sizeof(buf
));
1301 proc
= proc_create_data(buf
, 0440, net
->proc_net
, &xt_table_ops
,
1302 (void *)(unsigned long)af
);
1306 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1307 strlcat(buf
, FORMAT_MATCHES
, sizeof(buf
));
1308 proc
= proc_create_data(buf
, 0440, net
->proc_net
, &xt_match_ops
,
1309 (void *)(unsigned long)af
);
1311 goto out_remove_tables
;
1313 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1314 strlcat(buf
, FORMAT_TARGETS
, sizeof(buf
));
1315 proc
= proc_create_data(buf
, 0440, net
->proc_net
, &xt_target_ops
,
1316 (void *)(unsigned long)af
);
1318 goto out_remove_matches
;
1323 #ifdef CONFIG_PROC_FS
1325 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1326 strlcat(buf
, FORMAT_MATCHES
, sizeof(buf
));
1327 remove_proc_entry(buf
, net
->proc_net
);
1330 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1331 strlcat(buf
, FORMAT_TABLES
, sizeof(buf
));
1332 remove_proc_entry(buf
, net
->proc_net
);
1337 EXPORT_SYMBOL_GPL(xt_proto_init
);
1339 void xt_proto_fini(struct net
*net
, u_int8_t af
)
1341 #ifdef CONFIG_PROC_FS
1342 char buf
[XT_FUNCTION_MAXNAMELEN
];
1344 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1345 strlcat(buf
, FORMAT_TABLES
, sizeof(buf
));
1346 remove_proc_entry(buf
, net
->proc_net
);
1348 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1349 strlcat(buf
, FORMAT_TARGETS
, sizeof(buf
));
1350 remove_proc_entry(buf
, net
->proc_net
);
1352 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1353 strlcat(buf
, FORMAT_MATCHES
, sizeof(buf
));
1354 remove_proc_entry(buf
, net
->proc_net
);
1355 #endif /*CONFIG_PROC_FS*/
1357 EXPORT_SYMBOL_GPL(xt_proto_fini
);
1359 static int __net_init
xt_net_init(struct net
*net
)
1363 for (i
= 0; i
< NFPROTO_NUMPROTO
; i
++)
1364 INIT_LIST_HEAD(&net
->xt
.tables
[i
]);
1368 static struct pernet_operations xt_net_ops
= {
1369 .init
= xt_net_init
,
1372 static int __init
xt_init(void)
1377 for_each_possible_cpu(i
) {
1378 seqcount_init(&per_cpu(xt_recseq
, i
));
1381 xt
= kmalloc(sizeof(struct xt_af
) * NFPROTO_NUMPROTO
, GFP_KERNEL
);
1385 for (i
= 0; i
< NFPROTO_NUMPROTO
; i
++) {
1386 mutex_init(&xt
[i
].mutex
);
1387 #ifdef CONFIG_COMPAT
1388 mutex_init(&xt
[i
].compat_mutex
);
1389 xt
[i
].compat_tab
= NULL
;
1391 INIT_LIST_HEAD(&xt
[i
].target
);
1392 INIT_LIST_HEAD(&xt
[i
].match
);
1394 rv
= register_pernet_subsys(&xt_net_ops
);
1400 static void __exit
xt_fini(void)
1402 unregister_pernet_subsys(&xt_net_ops
);
1406 module_init(xt_init
);
1407 module_exit(xt_fini
);