2 * x_tables core - Backend for {ip,ip6,arp}_tables
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 * Based on existing ip_tables code which is
8 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/socket.h>
20 #include <linux/net.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/string.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mutex.h>
27 #include <linux/slab.h>
28 #include <linux/audit.h>
29 #include <linux/user_namespace.h>
30 #include <net/net_namespace.h>
32 #include <linux/netfilter/x_tables.h>
33 #include <linux/netfilter_arp.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35 #include <linux/netfilter_ipv6/ip6_tables.h>
36 #include <linux/netfilter_arp/arp_tables.h>
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
42 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
43 #define XT_PCPU_BLOCK_SIZE 4096
46 unsigned int offset
; /* offset in kernel */
47 int delta
; /* delta in 32bit user land */
52 struct list_head match
;
53 struct list_head target
;
55 struct mutex compat_mutex
;
56 struct compat_delta
*compat_tab
;
57 unsigned int number
; /* number of slots in compat_tab[] */
58 unsigned int cur
; /* number of used slots in compat_tab[] */
62 static struct xt_af
*xt
;
64 static const char *const xt_prefix
[NFPROTO_NUMPROTO
] = {
65 [NFPROTO_UNSPEC
] = "x",
66 [NFPROTO_IPV4
] = "ip",
67 [NFPROTO_ARP
] = "arp",
68 [NFPROTO_BRIDGE
] = "eb",
69 [NFPROTO_IPV6
] = "ip6",
72 /* Registration hooks for targets. */
73 int xt_register_target(struct xt_target
*target
)
75 u_int8_t af
= target
->family
;
77 mutex_lock(&xt
[af
].mutex
);
78 list_add(&target
->list
, &xt
[af
].target
);
79 mutex_unlock(&xt
[af
].mutex
);
82 EXPORT_SYMBOL(xt_register_target
);
85 xt_unregister_target(struct xt_target
*target
)
87 u_int8_t af
= target
->family
;
89 mutex_lock(&xt
[af
].mutex
);
90 list_del(&target
->list
);
91 mutex_unlock(&xt
[af
].mutex
);
93 EXPORT_SYMBOL(xt_unregister_target
);
96 xt_register_targets(struct xt_target
*target
, unsigned int n
)
101 for (i
= 0; i
< n
; i
++) {
102 err
= xt_register_target(&target
[i
]);
110 xt_unregister_targets(target
, i
);
113 EXPORT_SYMBOL(xt_register_targets
);
116 xt_unregister_targets(struct xt_target
*target
, unsigned int n
)
119 xt_unregister_target(&target
[n
]);
121 EXPORT_SYMBOL(xt_unregister_targets
);
123 int xt_register_match(struct xt_match
*match
)
125 u_int8_t af
= match
->family
;
127 mutex_lock(&xt
[af
].mutex
);
128 list_add(&match
->list
, &xt
[af
].match
);
129 mutex_unlock(&xt
[af
].mutex
);
132 EXPORT_SYMBOL(xt_register_match
);
135 xt_unregister_match(struct xt_match
*match
)
137 u_int8_t af
= match
->family
;
139 mutex_lock(&xt
[af
].mutex
);
140 list_del(&match
->list
);
141 mutex_unlock(&xt
[af
].mutex
);
143 EXPORT_SYMBOL(xt_unregister_match
);
146 xt_register_matches(struct xt_match
*match
, unsigned int n
)
151 for (i
= 0; i
< n
; i
++) {
152 err
= xt_register_match(&match
[i
]);
160 xt_unregister_matches(match
, i
);
163 EXPORT_SYMBOL(xt_register_matches
);
166 xt_unregister_matches(struct xt_match
*match
, unsigned int n
)
169 xt_unregister_match(&match
[n
]);
171 EXPORT_SYMBOL(xt_unregister_matches
);
175 * These are weird, but module loading must not be done with mutex
176 * held (since they will register), and we have to have a single
180 /* Find match, grabs ref. Returns ERR_PTR() on error. */
181 struct xt_match
*xt_find_match(u8 af
, const char *name
, u8 revision
)
186 mutex_lock(&xt
[af
].mutex
);
187 list_for_each_entry(m
, &xt
[af
].match
, list
) {
188 if (strcmp(m
->name
, name
) == 0) {
189 if (m
->revision
== revision
) {
190 if (try_module_get(m
->me
)) {
191 mutex_unlock(&xt
[af
].mutex
);
195 err
= -EPROTOTYPE
; /* Found something. */
198 mutex_unlock(&xt
[af
].mutex
);
200 if (af
!= NFPROTO_UNSPEC
)
201 /* Try searching again in the family-independent list */
202 return xt_find_match(NFPROTO_UNSPEC
, name
, revision
);
206 EXPORT_SYMBOL(xt_find_match
);
209 xt_request_find_match(uint8_t nfproto
, const char *name
, uint8_t revision
)
211 struct xt_match
*match
;
213 match
= xt_find_match(nfproto
, name
, revision
);
215 request_module("%st_%s", xt_prefix
[nfproto
], name
);
216 match
= xt_find_match(nfproto
, name
, revision
);
221 EXPORT_SYMBOL_GPL(xt_request_find_match
);
223 /* Find target, grabs ref. Returns ERR_PTR() on error. */
224 struct xt_target
*xt_find_target(u8 af
, const char *name
, u8 revision
)
229 mutex_lock(&xt
[af
].mutex
);
230 list_for_each_entry(t
, &xt
[af
].target
, list
) {
231 if (strcmp(t
->name
, name
) == 0) {
232 if (t
->revision
== revision
) {
233 if (try_module_get(t
->me
)) {
234 mutex_unlock(&xt
[af
].mutex
);
238 err
= -EPROTOTYPE
; /* Found something. */
241 mutex_unlock(&xt
[af
].mutex
);
243 if (af
!= NFPROTO_UNSPEC
)
244 /* Try searching again in the family-independent list */
245 return xt_find_target(NFPROTO_UNSPEC
, name
, revision
);
249 EXPORT_SYMBOL(xt_find_target
);
251 struct xt_target
*xt_request_find_target(u8 af
, const char *name
, u8 revision
)
253 struct xt_target
*target
;
255 target
= xt_find_target(af
, name
, revision
);
256 if (IS_ERR(target
)) {
257 request_module("%st_%s", xt_prefix
[af
], name
);
258 target
= xt_find_target(af
, name
, revision
);
263 EXPORT_SYMBOL_GPL(xt_request_find_target
);
266 static int xt_obj_to_user(u16 __user
*psize
, u16 size
,
267 void __user
*pname
, const char *name
,
268 u8 __user
*prev
, u8 rev
)
270 if (put_user(size
, psize
))
272 if (copy_to_user(pname
, name
, strlen(name
) + 1))
274 if (put_user(rev
, prev
))
280 #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
281 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
282 U->u.user.name, K->u.kernel.TYPE->name, \
283 &U->u.user.revision, K->u.kernel.TYPE->revision)
285 int xt_data_to_user(void __user
*dst
, const void *src
,
286 int usersize
, int size
, int aligned_size
)
288 usersize
= usersize
? : size
;
289 if (copy_to_user(dst
, src
, usersize
))
291 if (usersize
!= aligned_size
&&
292 clear_user(dst
+ usersize
, aligned_size
- usersize
))
297 EXPORT_SYMBOL_GPL(xt_data_to_user
);
299 #define XT_DATA_TO_USER(U, K, TYPE) \
300 xt_data_to_user(U->data, K->data, \
301 K->u.kernel.TYPE->usersize, \
302 K->u.kernel.TYPE->TYPE##size, \
303 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
305 int xt_match_to_user(const struct xt_entry_match
*m
,
306 struct xt_entry_match __user
*u
)
308 return XT_OBJ_TO_USER(u
, m
, match
, 0) ||
309 XT_DATA_TO_USER(u
, m
, match
);
311 EXPORT_SYMBOL_GPL(xt_match_to_user
);
313 int xt_target_to_user(const struct xt_entry_target
*t
,
314 struct xt_entry_target __user
*u
)
316 return XT_OBJ_TO_USER(u
, t
, target
, 0) ||
317 XT_DATA_TO_USER(u
, t
, target
);
319 EXPORT_SYMBOL_GPL(xt_target_to_user
);
321 static int match_revfn(u8 af
, const char *name
, u8 revision
, int *bestp
)
323 const struct xt_match
*m
;
326 list_for_each_entry(m
, &xt
[af
].match
, list
) {
327 if (strcmp(m
->name
, name
) == 0) {
328 if (m
->revision
> *bestp
)
329 *bestp
= m
->revision
;
330 if (m
->revision
== revision
)
335 if (af
!= NFPROTO_UNSPEC
&& !have_rev
)
336 return match_revfn(NFPROTO_UNSPEC
, name
, revision
, bestp
);
341 static int target_revfn(u8 af
, const char *name
, u8 revision
, int *bestp
)
343 const struct xt_target
*t
;
346 list_for_each_entry(t
, &xt
[af
].target
, list
) {
347 if (strcmp(t
->name
, name
) == 0) {
348 if (t
->revision
> *bestp
)
349 *bestp
= t
->revision
;
350 if (t
->revision
== revision
)
355 if (af
!= NFPROTO_UNSPEC
&& !have_rev
)
356 return target_revfn(NFPROTO_UNSPEC
, name
, revision
, bestp
);
361 /* Returns true or false (if no such extension at all) */
362 int xt_find_revision(u8 af
, const char *name
, u8 revision
, int target
,
365 int have_rev
, best
= -1;
367 mutex_lock(&xt
[af
].mutex
);
369 have_rev
= target_revfn(af
, name
, revision
, &best
);
371 have_rev
= match_revfn(af
, name
, revision
, &best
);
372 mutex_unlock(&xt
[af
].mutex
);
374 /* Nothing at all? Return 0 to try loading module. */
382 *err
= -EPROTONOSUPPORT
;
385 EXPORT_SYMBOL_GPL(xt_find_revision
);
388 textify_hooks(char *buf
, size_t size
, unsigned int mask
, uint8_t nfproto
)
390 static const char *const inetbr_names
[] = {
391 "PREROUTING", "INPUT", "FORWARD",
392 "OUTPUT", "POSTROUTING", "BROUTING",
394 static const char *const arp_names
[] = {
395 "INPUT", "FORWARD", "OUTPUT",
397 const char *const *names
;
403 names
= (nfproto
== NFPROTO_ARP
) ? arp_names
: inetbr_names
;
404 max
= (nfproto
== NFPROTO_ARP
) ? ARRAY_SIZE(arp_names
) :
405 ARRAY_SIZE(inetbr_names
);
407 for (i
= 0; i
< max
; ++i
) {
408 if (!(mask
& (1 << i
)))
410 res
= snprintf(p
, size
, "%s%s", np
? "/" : "", names
[i
]);
421 int xt_check_match(struct xt_mtchk_param
*par
,
422 unsigned int size
, u_int8_t proto
, bool inv_proto
)
426 if (XT_ALIGN(par
->match
->matchsize
) != size
&&
427 par
->match
->matchsize
!= -1) {
429 * ebt_among is exempt from centralized matchsize checking
430 * because it uses a dynamic-size data set.
432 pr_err("%s_tables: %s.%u match: invalid size "
433 "%u (kernel) != (user) %u\n",
434 xt_prefix
[par
->family
], par
->match
->name
,
435 par
->match
->revision
,
436 XT_ALIGN(par
->match
->matchsize
), size
);
439 if (par
->match
->table
!= NULL
&&
440 strcmp(par
->match
->table
, par
->table
) != 0) {
441 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
442 xt_prefix
[par
->family
], par
->match
->name
,
443 par
->match
->table
, par
->table
);
446 if (par
->match
->hooks
&& (par
->hook_mask
& ~par
->match
->hooks
) != 0) {
447 char used
[64], allow
[64];
449 pr_err("%s_tables: %s match: used from hooks %s, but only "
451 xt_prefix
[par
->family
], par
->match
->name
,
452 textify_hooks(used
, sizeof(used
), par
->hook_mask
,
454 textify_hooks(allow
, sizeof(allow
), par
->match
->hooks
,
458 if (par
->match
->proto
&& (par
->match
->proto
!= proto
|| inv_proto
)) {
459 pr_err("%s_tables: %s match: only valid for protocol %u\n",
460 xt_prefix
[par
->family
], par
->match
->name
,
464 if (par
->match
->checkentry
!= NULL
) {
465 ret
= par
->match
->checkentry(par
);
469 /* Flag up potential errors. */
474 EXPORT_SYMBOL_GPL(xt_check_match
);
476 /** xt_check_entry_match - check that matches end before start of target
478 * @match: beginning of xt_entry_match
479 * @target: beginning of this rules target (alleged end of matches)
480 * @alignment: alignment requirement of match structures
482 * Validates that all matches add up to the beginning of the target,
483 * and that each match covers at least the base structure size.
485 * Return: 0 on success, negative errno on failure.
487 static int xt_check_entry_match(const char *match
, const char *target
,
488 const size_t alignment
)
490 const struct xt_entry_match
*pos
;
491 int length
= target
- match
;
493 if (length
== 0) /* no matches */
496 pos
= (struct xt_entry_match
*)match
;
498 if ((unsigned long)pos
% alignment
)
501 if (length
< (int)sizeof(struct xt_entry_match
))
504 if (pos
->u
.match_size
< sizeof(struct xt_entry_match
))
507 if (pos
->u
.match_size
> length
)
510 length
-= pos
->u
.match_size
;
511 pos
= ((void *)((char *)(pos
) + (pos
)->u
.match_size
));
512 } while (length
> 0);
518 int xt_compat_add_offset(u_int8_t af
, unsigned int offset
, int delta
)
520 struct xt_af
*xp
= &xt
[af
];
522 if (!xp
->compat_tab
) {
525 xp
->compat_tab
= vmalloc(sizeof(struct compat_delta
) * xp
->number
);
531 if (xp
->cur
>= xp
->number
)
535 delta
+= xp
->compat_tab
[xp
->cur
- 1].delta
;
536 xp
->compat_tab
[xp
->cur
].offset
= offset
;
537 xp
->compat_tab
[xp
->cur
].delta
= delta
;
541 EXPORT_SYMBOL_GPL(xt_compat_add_offset
);
543 void xt_compat_flush_offsets(u_int8_t af
)
545 if (xt
[af
].compat_tab
) {
546 vfree(xt
[af
].compat_tab
);
547 xt
[af
].compat_tab
= NULL
;
552 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets
);
554 int xt_compat_calc_jump(u_int8_t af
, unsigned int offset
)
556 struct compat_delta
*tmp
= xt
[af
].compat_tab
;
557 int mid
, left
= 0, right
= xt
[af
].cur
- 1;
559 while (left
<= right
) {
560 mid
= (left
+ right
) >> 1;
561 if (offset
> tmp
[mid
].offset
)
563 else if (offset
< tmp
[mid
].offset
)
566 return mid
? tmp
[mid
- 1].delta
: 0;
568 return left
? tmp
[left
- 1].delta
: 0;
570 EXPORT_SYMBOL_GPL(xt_compat_calc_jump
);
572 void xt_compat_init_offsets(u_int8_t af
, unsigned int number
)
574 xt
[af
].number
= number
;
577 EXPORT_SYMBOL(xt_compat_init_offsets
);
579 int xt_compat_match_offset(const struct xt_match
*match
)
581 u_int16_t csize
= match
->compatsize
? : match
->matchsize
;
582 return XT_ALIGN(match
->matchsize
) - COMPAT_XT_ALIGN(csize
);
584 EXPORT_SYMBOL_GPL(xt_compat_match_offset
);
586 void xt_compat_match_from_user(struct xt_entry_match
*m
, void **dstptr
,
589 const struct xt_match
*match
= m
->u
.kernel
.match
;
590 struct compat_xt_entry_match
*cm
= (struct compat_xt_entry_match
*)m
;
591 int pad
, off
= xt_compat_match_offset(match
);
592 u_int16_t msize
= cm
->u
.user
.match_size
;
593 char name
[sizeof(m
->u
.user
.name
)];
596 memcpy(m
, cm
, sizeof(*cm
));
597 if (match
->compat_from_user
)
598 match
->compat_from_user(m
->data
, cm
->data
);
600 memcpy(m
->data
, cm
->data
, msize
- sizeof(*cm
));
601 pad
= XT_ALIGN(match
->matchsize
) - match
->matchsize
;
603 memset(m
->data
+ match
->matchsize
, 0, pad
);
606 m
->u
.user
.match_size
= msize
;
607 strlcpy(name
, match
->name
, sizeof(name
));
608 module_put(match
->me
);
609 strncpy(m
->u
.user
.name
, name
, sizeof(m
->u
.user
.name
));
614 EXPORT_SYMBOL_GPL(xt_compat_match_from_user
);
616 #define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
617 xt_data_to_user(U->data, K->data, \
618 K->u.kernel.TYPE->usersize, \
620 COMPAT_XT_ALIGN(C_SIZE))
622 int xt_compat_match_to_user(const struct xt_entry_match
*m
,
623 void __user
**dstptr
, unsigned int *size
)
625 const struct xt_match
*match
= m
->u
.kernel
.match
;
626 struct compat_xt_entry_match __user
*cm
= *dstptr
;
627 int off
= xt_compat_match_offset(match
);
628 u_int16_t msize
= m
->u
.user
.match_size
- off
;
630 if (XT_OBJ_TO_USER(cm
, m
, match
, msize
))
633 if (match
->compat_to_user
) {
634 if (match
->compat_to_user((void __user
*)cm
->data
, m
->data
))
637 if (COMPAT_XT_DATA_TO_USER(cm
, m
, match
, msize
- sizeof(*cm
)))
645 EXPORT_SYMBOL_GPL(xt_compat_match_to_user
);
647 /* non-compat version may have padding after verdict */
648 struct compat_xt_standard_target
{
649 struct compat_xt_entry_target t
;
650 compat_uint_t verdict
;
653 int xt_compat_check_entry_offsets(const void *base
, const char *elems
,
654 unsigned int target_offset
,
655 unsigned int next_offset
)
657 long size_of_base_struct
= elems
- (const char *)base
;
658 const struct compat_xt_entry_target
*t
;
659 const char *e
= base
;
661 if (target_offset
< size_of_base_struct
)
664 if (target_offset
+ sizeof(*t
) > next_offset
)
667 t
= (void *)(e
+ target_offset
);
668 if (t
->u
.target_size
< sizeof(*t
))
671 if (target_offset
+ t
->u
.target_size
> next_offset
)
674 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) == 0 &&
675 COMPAT_XT_ALIGN(target_offset
+ sizeof(struct compat_xt_standard_target
)) != next_offset
)
678 /* compat_xt_entry match has less strict alignment requirements,
679 * otherwise they are identical. In case of padding differences
680 * we need to add compat version of xt_check_entry_match.
682 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match
) != sizeof(struct xt_entry_match
));
684 return xt_check_entry_match(elems
, base
+ target_offset
,
685 __alignof__(struct compat_xt_entry_match
));
687 EXPORT_SYMBOL(xt_compat_check_entry_offsets
);
688 #endif /* CONFIG_COMPAT */
691 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
693 * @base: pointer to arp/ip/ip6t_entry
694 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
695 * @target_offset: the arp/ip/ip6_t->target_offset
696 * @next_offset: the arp/ip/ip6_t->next_offset
698 * validates that target_offset and next_offset are sane and that all
699 * match sizes (if any) align with the target offset.
701 * This function does not validate the targets or matches themselves, it
702 * only tests that all the offsets and sizes are correct, that all
703 * match structures are aligned, and that the last structure ends where
704 * the target structure begins.
706 * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
708 * The arp/ip/ip6t_entry structure @base must have passed following tests:
709 * - it must point to a valid memory location
710 * - base to base + next_offset must be accessible, i.e. not exceed allocated
713 * A well-formed entry looks like this:
715 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
716 * e->elems[]-----' | |
720 * target_offset---------------------------------' |
721 * next_offset---------------------------------------------------'
723 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
724 * This is where matches (if any) and the target reside.
725 * target_offset: beginning of target.
726 * next_offset: start of the next rule; also: size of this rule.
727 * Since targets have a minimum size, target_offset + minlen <= next_offset.
729 * Every match stores its size, sum of sizes must not exceed target_offset.
731 * Return: 0 on success, negative errno on failure.
733 int xt_check_entry_offsets(const void *base
,
735 unsigned int target_offset
,
736 unsigned int next_offset
)
738 long size_of_base_struct
= elems
- (const char *)base
;
739 const struct xt_entry_target
*t
;
740 const char *e
= base
;
742 /* target start is within the ip/ip6/arpt_entry struct */
743 if (target_offset
< size_of_base_struct
)
746 if (target_offset
+ sizeof(*t
) > next_offset
)
749 t
= (void *)(e
+ target_offset
);
750 if (t
->u
.target_size
< sizeof(*t
))
753 if (target_offset
+ t
->u
.target_size
> next_offset
)
756 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) == 0 &&
757 XT_ALIGN(target_offset
+ sizeof(struct xt_standard_target
)) != next_offset
)
760 return xt_check_entry_match(elems
, base
+ target_offset
,
761 __alignof__(struct xt_entry_match
));
763 EXPORT_SYMBOL(xt_check_entry_offsets
);
766 * xt_alloc_entry_offsets - allocate array to store rule head offsets
768 * @size: number of entries
770 * Return: NULL or kmalloc'd or vmalloc'd array
772 unsigned int *xt_alloc_entry_offsets(unsigned int size
)
774 return kvmalloc_array(size
, sizeof(unsigned int), GFP_KERNEL
| __GFP_ZERO
);
777 EXPORT_SYMBOL(xt_alloc_entry_offsets
);
780 * xt_find_jump_offset - check if target is a valid jump offset
782 * @offsets: array containing all valid rule start offsets of a rule blob
783 * @target: the jump target to search for
784 * @size: entries in @offset
786 bool xt_find_jump_offset(const unsigned int *offsets
,
787 unsigned int target
, unsigned int size
)
789 int m
, low
= 0, hi
= size
;
794 if (offsets
[m
] > target
)
796 else if (offsets
[m
] < target
)
804 EXPORT_SYMBOL(xt_find_jump_offset
);
806 int xt_check_target(struct xt_tgchk_param
*par
,
807 unsigned int size
, u_int8_t proto
, bool inv_proto
)
811 if (XT_ALIGN(par
->target
->targetsize
) != size
) {
812 pr_err("%s_tables: %s.%u target: invalid size "
813 "%u (kernel) != (user) %u\n",
814 xt_prefix
[par
->family
], par
->target
->name
,
815 par
->target
->revision
,
816 XT_ALIGN(par
->target
->targetsize
), size
);
819 if (par
->target
->table
!= NULL
&&
820 strcmp(par
->target
->table
, par
->table
) != 0) {
821 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
822 xt_prefix
[par
->family
], par
->target
->name
,
823 par
->target
->table
, par
->table
);
826 if (par
->target
->hooks
&& (par
->hook_mask
& ~par
->target
->hooks
) != 0) {
827 char used
[64], allow
[64];
829 pr_err("%s_tables: %s target: used from hooks %s, but only "
831 xt_prefix
[par
->family
], par
->target
->name
,
832 textify_hooks(used
, sizeof(used
), par
->hook_mask
,
834 textify_hooks(allow
, sizeof(allow
), par
->target
->hooks
,
838 if (par
->target
->proto
&& (par
->target
->proto
!= proto
|| inv_proto
)) {
839 pr_err("%s_tables: %s target: only valid for protocol %u\n",
840 xt_prefix
[par
->family
], par
->target
->name
,
844 if (par
->target
->checkentry
!= NULL
) {
845 ret
= par
->target
->checkentry(par
);
849 /* Flag up potential errors. */
854 EXPORT_SYMBOL_GPL(xt_check_target
);
857 * xt_copy_counters_from_user - copy counters and metadata from userspace
859 * @user: src pointer to userspace memory
860 * @len: alleged size of userspace memory
861 * @info: where to store the xt_counters_info metadata
862 * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
864 * Copies counter meta data from @user and stores it in @info.
866 * vmallocs memory to hold the counters, then copies the counter data
867 * from @user to the new memory and returns a pointer to it.
869 * If @compat is true, @info gets converted automatically to the 64bit
872 * The metadata associated with the counters is stored in @info.
874 * Return: returns pointer that caller has to test via IS_ERR().
875 * If IS_ERR is false, caller has to vfree the pointer.
877 void *xt_copy_counters_from_user(const void __user
*user
, unsigned int len
,
878 struct xt_counters_info
*info
, bool compat
)
885 /* structures only differ in size due to alignment */
886 struct compat_xt_counters_info compat_tmp
;
888 if (len
<= sizeof(compat_tmp
))
889 return ERR_PTR(-EINVAL
);
891 len
-= sizeof(compat_tmp
);
892 if (copy_from_user(&compat_tmp
, user
, sizeof(compat_tmp
)) != 0)
893 return ERR_PTR(-EFAULT
);
895 strlcpy(info
->name
, compat_tmp
.name
, sizeof(info
->name
));
896 info
->num_counters
= compat_tmp
.num_counters
;
897 user
+= sizeof(compat_tmp
);
901 if (len
<= sizeof(*info
))
902 return ERR_PTR(-EINVAL
);
904 len
-= sizeof(*info
);
905 if (copy_from_user(info
, user
, sizeof(*info
)) != 0)
906 return ERR_PTR(-EFAULT
);
908 info
->name
[sizeof(info
->name
) - 1] = '\0';
909 user
+= sizeof(*info
);
912 size
= sizeof(struct xt_counters
);
913 size
*= info
->num_counters
;
915 if (size
!= (u64
)len
)
916 return ERR_PTR(-EINVAL
);
920 return ERR_PTR(-ENOMEM
);
922 if (copy_from_user(mem
, user
, len
) == 0)
926 return ERR_PTR(-EFAULT
);
928 EXPORT_SYMBOL_GPL(xt_copy_counters_from_user
);
931 int xt_compat_target_offset(const struct xt_target
*target
)
933 u_int16_t csize
= target
->compatsize
? : target
->targetsize
;
934 return XT_ALIGN(target
->targetsize
) - COMPAT_XT_ALIGN(csize
);
936 EXPORT_SYMBOL_GPL(xt_compat_target_offset
);
938 void xt_compat_target_from_user(struct xt_entry_target
*t
, void **dstptr
,
941 const struct xt_target
*target
= t
->u
.kernel
.target
;
942 struct compat_xt_entry_target
*ct
= (struct compat_xt_entry_target
*)t
;
943 int pad
, off
= xt_compat_target_offset(target
);
944 u_int16_t tsize
= ct
->u
.user
.target_size
;
945 char name
[sizeof(t
->u
.user
.name
)];
948 memcpy(t
, ct
, sizeof(*ct
));
949 if (target
->compat_from_user
)
950 target
->compat_from_user(t
->data
, ct
->data
);
952 memcpy(t
->data
, ct
->data
, tsize
- sizeof(*ct
));
953 pad
= XT_ALIGN(target
->targetsize
) - target
->targetsize
;
955 memset(t
->data
+ target
->targetsize
, 0, pad
);
958 t
->u
.user
.target_size
= tsize
;
959 strlcpy(name
, target
->name
, sizeof(name
));
960 module_put(target
->me
);
961 strncpy(t
->u
.user
.name
, name
, sizeof(t
->u
.user
.name
));
966 EXPORT_SYMBOL_GPL(xt_compat_target_from_user
);
968 int xt_compat_target_to_user(const struct xt_entry_target
*t
,
969 void __user
**dstptr
, unsigned int *size
)
971 const struct xt_target
*target
= t
->u
.kernel
.target
;
972 struct compat_xt_entry_target __user
*ct
= *dstptr
;
973 int off
= xt_compat_target_offset(target
);
974 u_int16_t tsize
= t
->u
.user
.target_size
- off
;
976 if (XT_OBJ_TO_USER(ct
, t
, target
, tsize
))
979 if (target
->compat_to_user
) {
980 if (target
->compat_to_user((void __user
*)ct
->data
, t
->data
))
983 if (COMPAT_XT_DATA_TO_USER(ct
, t
, target
, tsize
- sizeof(*ct
)))
991 EXPORT_SYMBOL_GPL(xt_compat_target_to_user
);
994 struct xt_table_info
*xt_alloc_table_info(unsigned int size
)
996 struct xt_table_info
*info
= NULL
;
997 size_t sz
= sizeof(*info
) + size
;
999 if (sz
< sizeof(*info
))
1002 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
1003 if ((SMP_ALIGN(size
) >> PAGE_SHIFT
) + 2 > totalram_pages
)
1006 info
= kvmalloc(sz
, GFP_KERNEL
);
1010 memset(info
, 0, sizeof(*info
));
1014 EXPORT_SYMBOL(xt_alloc_table_info
);
1016 void xt_free_table_info(struct xt_table_info
*info
)
1020 if (info
->jumpstack
!= NULL
) {
1021 for_each_possible_cpu(cpu
)
1022 kvfree(info
->jumpstack
[cpu
]);
1023 kvfree(info
->jumpstack
);
1028 EXPORT_SYMBOL(xt_free_table_info
);
1030 /* Find table by name, grabs mutex & ref. Returns NULL on error. */
1031 struct xt_table
*xt_find_table_lock(struct net
*net
, u_int8_t af
,
1034 struct xt_table
*t
, *found
= NULL
;
1036 mutex_lock(&xt
[af
].mutex
);
1037 list_for_each_entry(t
, &net
->xt
.tables
[af
], list
)
1038 if (strcmp(t
->name
, name
) == 0 && try_module_get(t
->me
))
1041 if (net
== &init_net
)
1044 /* Table doesn't exist in this netns, re-try init */
1045 list_for_each_entry(t
, &init_net
.xt
.tables
[af
], list
) {
1046 if (strcmp(t
->name
, name
))
1048 if (!try_module_get(t
->me
)) {
1049 mutex_unlock(&xt
[af
].mutex
);
1053 mutex_unlock(&xt
[af
].mutex
);
1054 if (t
->table_init(net
) != 0) {
1061 mutex_lock(&xt
[af
].mutex
);
1068 /* and once again: */
1069 list_for_each_entry(t
, &net
->xt
.tables
[af
], list
)
1070 if (strcmp(t
->name
, name
) == 0)
1073 module_put(found
->me
);
1075 mutex_unlock(&xt
[af
].mutex
);
1078 EXPORT_SYMBOL_GPL(xt_find_table_lock
);
1080 void xt_table_unlock(struct xt_table
*table
)
1082 mutex_unlock(&xt
[table
->af
].mutex
);
1084 EXPORT_SYMBOL_GPL(xt_table_unlock
);
1086 #ifdef CONFIG_COMPAT
1087 void xt_compat_lock(u_int8_t af
)
1089 mutex_lock(&xt
[af
].compat_mutex
);
1091 EXPORT_SYMBOL_GPL(xt_compat_lock
);
1093 void xt_compat_unlock(u_int8_t af
)
1095 mutex_unlock(&xt
[af
].compat_mutex
);
1097 EXPORT_SYMBOL_GPL(xt_compat_unlock
);
1100 DEFINE_PER_CPU(seqcount_t
, xt_recseq
);
1101 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq
);
1103 struct static_key xt_tee_enabled __read_mostly
;
1104 EXPORT_SYMBOL_GPL(xt_tee_enabled
);
1106 static int xt_jumpstack_alloc(struct xt_table_info
*i
)
1111 size
= sizeof(void **) * nr_cpu_ids
;
1112 if (size
> PAGE_SIZE
)
1113 i
->jumpstack
= kvzalloc(size
, GFP_KERNEL
);
1115 i
->jumpstack
= kzalloc(size
, GFP_KERNEL
);
1116 if (i
->jumpstack
== NULL
)
1119 /* ruleset without jumps -- no stack needed */
1120 if (i
->stacksize
== 0)
1123 /* Jumpstack needs to be able to record two full callchains, one
1124 * from the first rule set traversal, plus one table reentrancy
1125 * via -j TEE without clobbering the callchain that brought us to
1128 * This is done by allocating two jumpstacks per cpu, on reentry
1129 * the upper half of the stack is used.
1131 * see the jumpstack setup in ipt_do_table() for more details.
1133 size
= sizeof(void *) * i
->stacksize
* 2u;
1134 for_each_possible_cpu(cpu
) {
1135 i
->jumpstack
[cpu
] = kvmalloc_node(size
, GFP_KERNEL
,
1137 if (i
->jumpstack
[cpu
] == NULL
)
1139 * Freeing will be done later on by the callers. The
1140 * chain is: xt_replace_table -> __do_replace ->
1141 * do_replace -> xt_free_table_info.
1149 struct xt_table_info
*
1150 xt_replace_table(struct xt_table
*table
,
1151 unsigned int num_counters
,
1152 struct xt_table_info
*newinfo
,
1155 struct xt_table_info
*private;
1158 ret
= xt_jumpstack_alloc(newinfo
);
1164 /* Do the substitution. */
1166 private = table
->private;
1168 /* Check inside lock: is the old number correct? */
1169 if (num_counters
!= private->number
) {
1170 pr_debug("num_counters != table->private->number (%u/%u)\n",
1171 num_counters
, private->number
);
1177 newinfo
->initial_entries
= private->initial_entries
;
1179 * Ensure contents of newinfo are visible before assigning to
1183 table
->private = newinfo
;
1186 * Even though table entries have now been swapped, other CPU's
1187 * may still be using the old entries. This is okay, because
1188 * resynchronization happens because of the locking done
1189 * during the get_counters() routine.
1194 if (audit_enabled
) {
1195 struct audit_buffer
*ab
;
1197 ab
= audit_log_start(current
->audit_context
, GFP_KERNEL
,
1198 AUDIT_NETFILTER_CFG
);
1200 audit_log_format(ab
, "table=%s family=%u entries=%u",
1201 table
->name
, table
->af
,
1210 EXPORT_SYMBOL_GPL(xt_replace_table
);
1212 struct xt_table
*xt_register_table(struct net
*net
,
1213 const struct xt_table
*input_table
,
1214 struct xt_table_info
*bootstrap
,
1215 struct xt_table_info
*newinfo
)
1218 struct xt_table_info
*private;
1219 struct xt_table
*t
, *table
;
1221 /* Don't add one object to multiple lists. */
1222 table
= kmemdup(input_table
, sizeof(struct xt_table
), GFP_KERNEL
);
1228 mutex_lock(&xt
[table
->af
].mutex
);
1229 /* Don't autoload: we'd eat our tail... */
1230 list_for_each_entry(t
, &net
->xt
.tables
[table
->af
], list
) {
1231 if (strcmp(t
->name
, table
->name
) == 0) {
1237 /* Simplifies replace_table code. */
1238 table
->private = bootstrap
;
1240 if (!xt_replace_table(table
, 0, newinfo
, &ret
))
1243 private = table
->private;
1244 pr_debug("table->private->number = %u\n", private->number
);
1246 /* save number of initial entries */
1247 private->initial_entries
= private->number
;
1249 list_add(&table
->list
, &net
->xt
.tables
[table
->af
]);
1250 mutex_unlock(&xt
[table
->af
].mutex
);
1254 mutex_unlock(&xt
[table
->af
].mutex
);
1257 return ERR_PTR(ret
);
1259 EXPORT_SYMBOL_GPL(xt_register_table
);
1261 void *xt_unregister_table(struct xt_table
*table
)
1263 struct xt_table_info
*private;
1265 mutex_lock(&xt
[table
->af
].mutex
);
1266 private = table
->private;
1267 list_del(&table
->list
);
1268 mutex_unlock(&xt
[table
->af
].mutex
);
1273 EXPORT_SYMBOL_GPL(xt_unregister_table
);
1275 #ifdef CONFIG_PROC_FS
1276 struct xt_names_priv
{
1277 struct seq_net_private p
;
1280 static void *xt_table_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1282 struct xt_names_priv
*priv
= seq
->private;
1283 struct net
*net
= seq_file_net(seq
);
1284 u_int8_t af
= priv
->af
;
1286 mutex_lock(&xt
[af
].mutex
);
1287 return seq_list_start(&net
->xt
.tables
[af
], *pos
);
1290 static void *xt_table_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1292 struct xt_names_priv
*priv
= seq
->private;
1293 struct net
*net
= seq_file_net(seq
);
1294 u_int8_t af
= priv
->af
;
1296 return seq_list_next(v
, &net
->xt
.tables
[af
], pos
);
1299 static void xt_table_seq_stop(struct seq_file
*seq
, void *v
)
1301 struct xt_names_priv
*priv
= seq
->private;
1302 u_int8_t af
= priv
->af
;
1304 mutex_unlock(&xt
[af
].mutex
);
1307 static int xt_table_seq_show(struct seq_file
*seq
, void *v
)
1309 struct xt_table
*table
= list_entry(v
, struct xt_table
, list
);
1312 seq_printf(seq
, "%s\n", table
->name
);
1316 static const struct seq_operations xt_table_seq_ops
= {
1317 .start
= xt_table_seq_start
,
1318 .next
= xt_table_seq_next
,
1319 .stop
= xt_table_seq_stop
,
1320 .show
= xt_table_seq_show
,
1323 static int xt_table_open(struct inode
*inode
, struct file
*file
)
1326 struct xt_names_priv
*priv
;
1328 ret
= seq_open_net(inode
, file
, &xt_table_seq_ops
,
1329 sizeof(struct xt_names_priv
));
1331 priv
= ((struct seq_file
*)file
->private_data
)->private;
1332 priv
->af
= (unsigned long)PDE_DATA(inode
);
1337 static const struct file_operations xt_table_ops
= {
1338 .owner
= THIS_MODULE
,
1339 .open
= xt_table_open
,
1341 .llseek
= seq_lseek
,
1342 .release
= seq_release_net
,
1346 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1347 * the multi-AF mutexes.
1349 struct nf_mttg_trav
{
1350 struct list_head
*head
, *curr
;
1351 uint8_t class, nfproto
;
1356 MTTG_TRAV_NFP_UNSPEC
,
1361 static void *xt_mttg_seq_next(struct seq_file
*seq
, void *v
, loff_t
*ppos
,
1364 static const uint8_t next_class
[] = {
1365 [MTTG_TRAV_NFP_UNSPEC
] = MTTG_TRAV_NFP_SPEC
,
1366 [MTTG_TRAV_NFP_SPEC
] = MTTG_TRAV_DONE
,
1368 struct nf_mttg_trav
*trav
= seq
->private;
1370 switch (trav
->class) {
1371 case MTTG_TRAV_INIT
:
1372 trav
->class = MTTG_TRAV_NFP_UNSPEC
;
1373 mutex_lock(&xt
[NFPROTO_UNSPEC
].mutex
);
1374 trav
->head
= trav
->curr
= is_target
?
1375 &xt
[NFPROTO_UNSPEC
].target
: &xt
[NFPROTO_UNSPEC
].match
;
1377 case MTTG_TRAV_NFP_UNSPEC
:
1378 trav
->curr
= trav
->curr
->next
;
1379 if (trav
->curr
!= trav
->head
)
1381 mutex_unlock(&xt
[NFPROTO_UNSPEC
].mutex
);
1382 mutex_lock(&xt
[trav
->nfproto
].mutex
);
1383 trav
->head
= trav
->curr
= is_target
?
1384 &xt
[trav
->nfproto
].target
: &xt
[trav
->nfproto
].match
;
1385 trav
->class = next_class
[trav
->class];
1387 case MTTG_TRAV_NFP_SPEC
:
1388 trav
->curr
= trav
->curr
->next
;
1389 if (trav
->curr
!= trav
->head
)
1391 /* fallthru, _stop will unlock */
1401 static void *xt_mttg_seq_start(struct seq_file
*seq
, loff_t
*pos
,
1404 struct nf_mttg_trav
*trav
= seq
->private;
1407 trav
->class = MTTG_TRAV_INIT
;
1408 for (j
= 0; j
< *pos
; ++j
)
1409 if (xt_mttg_seq_next(seq
, NULL
, NULL
, is_target
) == NULL
)
1414 static void xt_mttg_seq_stop(struct seq_file
*seq
, void *v
)
1416 struct nf_mttg_trav
*trav
= seq
->private;
1418 switch (trav
->class) {
1419 case MTTG_TRAV_NFP_UNSPEC
:
1420 mutex_unlock(&xt
[NFPROTO_UNSPEC
].mutex
);
1422 case MTTG_TRAV_NFP_SPEC
:
1423 mutex_unlock(&xt
[trav
->nfproto
].mutex
);
1428 static void *xt_match_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1430 return xt_mttg_seq_start(seq
, pos
, false);
1433 static void *xt_match_seq_next(struct seq_file
*seq
, void *v
, loff_t
*ppos
)
1435 return xt_mttg_seq_next(seq
, v
, ppos
, false);
1438 static int xt_match_seq_show(struct seq_file
*seq
, void *v
)
1440 const struct nf_mttg_trav
*trav
= seq
->private;
1441 const struct xt_match
*match
;
1443 switch (trav
->class) {
1444 case MTTG_TRAV_NFP_UNSPEC
:
1445 case MTTG_TRAV_NFP_SPEC
:
1446 if (trav
->curr
== trav
->head
)
1448 match
= list_entry(trav
->curr
, struct xt_match
, list
);
1450 seq_printf(seq
, "%s\n", match
->name
);
1455 static const struct seq_operations xt_match_seq_ops
= {
1456 .start
= xt_match_seq_start
,
1457 .next
= xt_match_seq_next
,
1458 .stop
= xt_mttg_seq_stop
,
1459 .show
= xt_match_seq_show
,
1462 static int xt_match_open(struct inode
*inode
, struct file
*file
)
1464 struct nf_mttg_trav
*trav
;
1465 trav
= __seq_open_private(file
, &xt_match_seq_ops
, sizeof(*trav
));
1469 trav
->nfproto
= (unsigned long)PDE_DATA(inode
);
1473 static const struct file_operations xt_match_ops
= {
1474 .owner
= THIS_MODULE
,
1475 .open
= xt_match_open
,
1477 .llseek
= seq_lseek
,
1478 .release
= seq_release_private
,
1481 static void *xt_target_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1483 return xt_mttg_seq_start(seq
, pos
, true);
1486 static void *xt_target_seq_next(struct seq_file
*seq
, void *v
, loff_t
*ppos
)
1488 return xt_mttg_seq_next(seq
, v
, ppos
, true);
1491 static int xt_target_seq_show(struct seq_file
*seq
, void *v
)
1493 const struct nf_mttg_trav
*trav
= seq
->private;
1494 const struct xt_target
*target
;
1496 switch (trav
->class) {
1497 case MTTG_TRAV_NFP_UNSPEC
:
1498 case MTTG_TRAV_NFP_SPEC
:
1499 if (trav
->curr
== trav
->head
)
1501 target
= list_entry(trav
->curr
, struct xt_target
, list
);
1503 seq_printf(seq
, "%s\n", target
->name
);
1508 static const struct seq_operations xt_target_seq_ops
= {
1509 .start
= xt_target_seq_start
,
1510 .next
= xt_target_seq_next
,
1511 .stop
= xt_mttg_seq_stop
,
1512 .show
= xt_target_seq_show
,
1515 static int xt_target_open(struct inode
*inode
, struct file
*file
)
1517 struct nf_mttg_trav
*trav
;
1518 trav
= __seq_open_private(file
, &xt_target_seq_ops
, sizeof(*trav
));
1522 trav
->nfproto
= (unsigned long)PDE_DATA(inode
);
1526 static const struct file_operations xt_target_ops
= {
1527 .owner
= THIS_MODULE
,
1528 .open
= xt_target_open
,
1530 .llseek
= seq_lseek
,
1531 .release
= seq_release_private
,
1534 #define FORMAT_TABLES "_tables_names"
1535 #define FORMAT_MATCHES "_tables_matches"
1536 #define FORMAT_TARGETS "_tables_targets"
1538 #endif /* CONFIG_PROC_FS */
1541 * xt_hook_ops_alloc - set up hooks for a new table
1542 * @table: table with metadata needed to set up hooks
1543 * @fn: Hook function
1545 * This function will create the nf_hook_ops that the x_table needs
1546 * to hand to xt_hook_link_net().
1548 struct nf_hook_ops
*
1549 xt_hook_ops_alloc(const struct xt_table
*table
, nf_hookfn
*fn
)
1551 unsigned int hook_mask
= table
->valid_hooks
;
1552 uint8_t i
, num_hooks
= hweight32(hook_mask
);
1554 struct nf_hook_ops
*ops
;
1557 return ERR_PTR(-EINVAL
);
1559 ops
= kcalloc(num_hooks
, sizeof(*ops
), GFP_KERNEL
);
1561 return ERR_PTR(-ENOMEM
);
1563 for (i
= 0, hooknum
= 0; i
< num_hooks
&& hook_mask
!= 0;
1564 hook_mask
>>= 1, ++hooknum
) {
1565 if (!(hook_mask
& 1))
1568 ops
[i
].pf
= table
->af
;
1569 ops
[i
].hooknum
= hooknum
;
1570 ops
[i
].priority
= table
->priority
;
1576 EXPORT_SYMBOL_GPL(xt_hook_ops_alloc
);
1578 int xt_proto_init(struct net
*net
, u_int8_t af
)
1580 #ifdef CONFIG_PROC_FS
1581 char buf
[XT_FUNCTION_MAXNAMELEN
];
1582 struct proc_dir_entry
*proc
;
1587 if (af
>= ARRAY_SIZE(xt_prefix
))
1591 #ifdef CONFIG_PROC_FS
1592 root_uid
= make_kuid(net
->user_ns
, 0);
1593 root_gid
= make_kgid(net
->user_ns
, 0);
1595 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1596 strlcat(buf
, FORMAT_TABLES
, sizeof(buf
));
1597 proc
= proc_create_data(buf
, 0440, net
->proc_net
, &xt_table_ops
,
1598 (void *)(unsigned long)af
);
1601 if (uid_valid(root_uid
) && gid_valid(root_gid
))
1602 proc_set_user(proc
, root_uid
, root_gid
);
1604 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1605 strlcat(buf
, FORMAT_MATCHES
, sizeof(buf
));
1606 proc
= proc_create_data(buf
, 0440, net
->proc_net
, &xt_match_ops
,
1607 (void *)(unsigned long)af
);
1609 goto out_remove_tables
;
1610 if (uid_valid(root_uid
) && gid_valid(root_gid
))
1611 proc_set_user(proc
, root_uid
, root_gid
);
1613 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1614 strlcat(buf
, FORMAT_TARGETS
, sizeof(buf
));
1615 proc
= proc_create_data(buf
, 0440, net
->proc_net
, &xt_target_ops
,
1616 (void *)(unsigned long)af
);
1618 goto out_remove_matches
;
1619 if (uid_valid(root_uid
) && gid_valid(root_gid
))
1620 proc_set_user(proc
, root_uid
, root_gid
);
1625 #ifdef CONFIG_PROC_FS
1627 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1628 strlcat(buf
, FORMAT_MATCHES
, sizeof(buf
));
1629 remove_proc_entry(buf
, net
->proc_net
);
1632 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1633 strlcat(buf
, FORMAT_TABLES
, sizeof(buf
));
1634 remove_proc_entry(buf
, net
->proc_net
);
1639 EXPORT_SYMBOL_GPL(xt_proto_init
);
1641 void xt_proto_fini(struct net
*net
, u_int8_t af
)
1643 #ifdef CONFIG_PROC_FS
1644 char buf
[XT_FUNCTION_MAXNAMELEN
];
1646 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1647 strlcat(buf
, FORMAT_TABLES
, sizeof(buf
));
1648 remove_proc_entry(buf
, net
->proc_net
);
1650 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1651 strlcat(buf
, FORMAT_TARGETS
, sizeof(buf
));
1652 remove_proc_entry(buf
, net
->proc_net
);
1654 strlcpy(buf
, xt_prefix
[af
], sizeof(buf
));
1655 strlcat(buf
, FORMAT_MATCHES
, sizeof(buf
));
1656 remove_proc_entry(buf
, net
->proc_net
);
1657 #endif /*CONFIG_PROC_FS*/
1659 EXPORT_SYMBOL_GPL(xt_proto_fini
);
1662 * xt_percpu_counter_alloc - allocate x_tables rule counter
1664 * @state: pointer to xt_percpu allocation state
1665 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1667 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1668 * contain the address of the real (percpu) counter.
1670 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1671 * to fetch the real percpu counter.
1673 * To speed up allocation and improve data locality, a 4kb block is
1676 * xt_percpu_counter_alloc_state contains the base address of the
1677 * allocated page and the current sub-offset.
1679 * returns false on error.
1681 bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state
*state
,
1682 struct xt_counters
*counter
)
1684 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE
< (sizeof(*counter
) * 2));
1686 if (nr_cpu_ids
<= 1)
1690 state
->mem
= __alloc_percpu(XT_PCPU_BLOCK_SIZE
,
1691 XT_PCPU_BLOCK_SIZE
);
1695 counter
->pcnt
= (__force
unsigned long)(state
->mem
+ state
->off
);
1696 state
->off
+= sizeof(*counter
);
1697 if (state
->off
> (XT_PCPU_BLOCK_SIZE
- sizeof(*counter
))) {
1703 EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc
);
1705 void xt_percpu_counter_free(struct xt_counters
*counters
)
1707 unsigned long pcnt
= counters
->pcnt
;
1709 if (nr_cpu_ids
> 1 && (pcnt
& (XT_PCPU_BLOCK_SIZE
- 1)) == 0)
1710 free_percpu((void __percpu
*)pcnt
);
1712 EXPORT_SYMBOL_GPL(xt_percpu_counter_free
);
1714 static int __net_init
xt_net_init(struct net
*net
)
1718 for (i
= 0; i
< NFPROTO_NUMPROTO
; i
++)
1719 INIT_LIST_HEAD(&net
->xt
.tables
[i
]);
1723 static struct pernet_operations xt_net_ops
= {
1724 .init
= xt_net_init
,
1727 static int __init
xt_init(void)
1732 for_each_possible_cpu(i
) {
1733 seqcount_init(&per_cpu(xt_recseq
, i
));
1736 xt
= kmalloc(sizeof(struct xt_af
) * NFPROTO_NUMPROTO
, GFP_KERNEL
);
1740 for (i
= 0; i
< NFPROTO_NUMPROTO
; i
++) {
1741 mutex_init(&xt
[i
].mutex
);
1742 #ifdef CONFIG_COMPAT
1743 mutex_init(&xt
[i
].compat_mutex
);
1744 xt
[i
].compat_tab
= NULL
;
1746 INIT_LIST_HEAD(&xt
[i
].target
);
1747 INIT_LIST_HEAD(&xt
[i
].match
);
1749 rv
= register_pernet_subsys(&xt_net_ops
);
1755 static void __exit
xt_fini(void)
1757 unregister_pernet_subsys(&xt_net_ops
);
1761 module_init(xt_init
);
1762 module_exit(xt_fini
);