5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
38 * Each cpu has its own set of counters, so there is no need for write_lock in
40 * For reading or updating the counters, the user context needs to
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48 COUNTER_OFFSET(n) * cpu))
52 static DEFINE_MUTEX(ebt_mutex
);
55 static void ebt_standard_compat_from_user(void *dst
, const void *src
)
57 int v
= *(compat_int_t
*)src
;
60 v
+= xt_compat_calc_jump(NFPROTO_BRIDGE
, v
);
61 memcpy(dst
, &v
, sizeof(v
));
64 static int ebt_standard_compat_to_user(void __user
*dst
, const void *src
)
66 compat_int_t cv
= *(int *)src
;
69 cv
-= xt_compat_calc_jump(NFPROTO_BRIDGE
, cv
);
70 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
75 static struct xt_target ebt_standard_target
= {
78 .family
= NFPROTO_BRIDGE
,
79 .targetsize
= sizeof(int),
81 .compatsize
= sizeof(compat_int_t
),
82 .compat_from_user
= ebt_standard_compat_from_user
,
83 .compat_to_user
= ebt_standard_compat_to_user
,
88 ebt_do_watcher(const struct ebt_entry_watcher
*w
, struct sk_buff
*skb
,
89 struct xt_action_param
*par
)
91 par
->target
= w
->u
.watcher
;
92 par
->targinfo
= w
->data
;
93 w
->u
.watcher
->target(skb
, par
);
94 /* watchers don't give a verdict */
99 ebt_do_match(struct ebt_entry_match
*m
, const struct sk_buff
*skb
,
100 struct xt_action_param
*par
)
102 par
->match
= m
->u
.match
;
103 par
->matchinfo
= m
->data
;
104 return m
->u
.match
->match(skb
, par
) ? EBT_MATCH
: EBT_NOMATCH
;
108 ebt_dev_check(const char *entry
, const struct net_device
*device
)
117 devname
= device
->name
;
118 /* 1 is the wildcard token */
119 while (entry
[i
] != '\0' && entry
[i
] != 1 && entry
[i
] == devname
[i
])
121 return (devname
[i
] != entry
[i
] && entry
[i
] != 1);
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
127 ebt_basic_match(const struct ebt_entry
*e
, const struct ethhdr
*h
,
128 const struct net_device
*in
, const struct net_device
*out
)
132 if (e
->bitmask
& EBT_802_3
) {
133 if (FWINV2(ntohs(h
->h_proto
) >= 1536, EBT_IPROTO
))
135 } else if (!(e
->bitmask
& EBT_NOPROTO
) &&
136 FWINV2(e
->ethproto
!= h
->h_proto
, EBT_IPROTO
))
139 if (FWINV2(ebt_dev_check(e
->in
, in
), EBT_IIN
))
141 if (FWINV2(ebt_dev_check(e
->out
, out
), EBT_IOUT
))
143 if ((!in
|| !in
->br_port
) ? 0 : FWINV2(ebt_dev_check(
144 e
->logical_in
, in
->br_port
->br
->dev
), EBT_ILOGICALIN
))
146 if ((!out
|| !out
->br_port
) ? 0 : FWINV2(ebt_dev_check(
147 e
->logical_out
, out
->br_port
->br
->dev
), EBT_ILOGICALOUT
))
150 if (e
->bitmask
& EBT_SOURCEMAC
) {
152 for (i
= 0; i
< 6; i
++)
153 verdict
|= (h
->h_source
[i
] ^ e
->sourcemac
[i
]) &
155 if (FWINV2(verdict
!= 0, EBT_ISOURCE
) )
158 if (e
->bitmask
& EBT_DESTMAC
) {
160 for (i
= 0; i
< 6; i
++)
161 verdict
|= (h
->h_dest
[i
] ^ e
->destmac
[i
]) &
163 if (FWINV2(verdict
!= 0, EBT_IDEST
) )
170 struct ebt_entry
*ebt_next_entry(const struct ebt_entry
*entry
)
172 return (void *)entry
+ entry
->next_offset
;
175 /* Do some firewalling */
176 unsigned int ebt_do_table (unsigned int hook
, struct sk_buff
*skb
,
177 const struct net_device
*in
, const struct net_device
*out
,
178 struct ebt_table
*table
)
181 struct ebt_entry
*point
;
182 struct ebt_counter
*counter_base
, *cb_base
;
183 const struct ebt_entry_target
*t
;
185 struct ebt_chainstack
*cs
;
186 struct ebt_entries
*chaininfo
;
188 const struct ebt_table_info
*private;
189 struct xt_action_param acpar
;
191 acpar
.family
= NFPROTO_BRIDGE
;
194 acpar
.hotdrop
= false;
195 acpar
.hooknum
= hook
;
197 read_lock_bh(&table
->lock
);
198 private = table
->private;
199 cb_base
= COUNTER_BASE(private->counters
, private->nentries
,
201 if (private->chainstack
)
202 cs
= private->chainstack
[smp_processor_id()];
205 chaininfo
= private->hook_entry
[hook
];
206 nentries
= private->hook_entry
[hook
]->nentries
;
207 point
= (struct ebt_entry
*)(private->hook_entry
[hook
]->data
);
208 counter_base
= cb_base
+ private->hook_entry
[hook
]->counter_offset
;
209 /* base for chain jumps */
210 base
= private->entries
;
212 while (i
< nentries
) {
213 if (ebt_basic_match(point
, eth_hdr(skb
), in
, out
))
216 if (EBT_MATCH_ITERATE(point
, ebt_do_match
, skb
, &acpar
) != 0)
219 read_unlock_bh(&table
->lock
);
223 /* increase counter */
224 (*(counter_base
+ i
)).pcnt
++;
225 (*(counter_base
+ i
)).bcnt
+= skb
->len
;
227 /* these should only watch: not modify, nor tell us
228 what to do with the packet */
229 EBT_WATCHER_ITERATE(point
, ebt_do_watcher
, skb
, &acpar
);
231 t
= (struct ebt_entry_target
*)
232 (((char *)point
) + point
->target_offset
);
233 /* standard target */
234 if (!t
->u
.target
->target
)
235 verdict
= ((struct ebt_standard_target
*)t
)->verdict
;
237 acpar
.target
= t
->u
.target
;
238 acpar
.targinfo
= t
->data
;
239 verdict
= t
->u
.target
->target(skb
, &acpar
);
241 if (verdict
== EBT_ACCEPT
) {
242 read_unlock_bh(&table
->lock
);
245 if (verdict
== EBT_DROP
) {
246 read_unlock_bh(&table
->lock
);
249 if (verdict
== EBT_RETURN
) {
251 #ifdef CONFIG_NETFILTER_DEBUG
253 BUGPRINT("RETURN on base chain");
254 /* act like this is EBT_CONTINUE */
259 /* put all the local variables right */
261 chaininfo
= cs
[sp
].chaininfo
;
262 nentries
= chaininfo
->nentries
;
264 counter_base
= cb_base
+
265 chaininfo
->counter_offset
;
268 if (verdict
== EBT_CONTINUE
)
270 #ifdef CONFIG_NETFILTER_DEBUG
272 BUGPRINT("bogus standard verdict\n");
273 read_unlock_bh(&table
->lock
);
279 cs
[sp
].chaininfo
= chaininfo
;
280 cs
[sp
].e
= ebt_next_entry(point
);
282 chaininfo
= (struct ebt_entries
*) (base
+ verdict
);
283 #ifdef CONFIG_NETFILTER_DEBUG
284 if (chaininfo
->distinguisher
) {
285 BUGPRINT("jump to non-chain\n");
286 read_unlock_bh(&table
->lock
);
290 nentries
= chaininfo
->nentries
;
291 point
= (struct ebt_entry
*)chaininfo
->data
;
292 counter_base
= cb_base
+ chaininfo
->counter_offset
;
296 point
= ebt_next_entry(point
);
300 /* I actually like this :) */
301 if (chaininfo
->policy
== EBT_RETURN
)
303 if (chaininfo
->policy
== EBT_ACCEPT
) {
304 read_unlock_bh(&table
->lock
);
307 read_unlock_bh(&table
->lock
);
311 /* If it succeeds, returns element and locks mutex */
313 find_inlist_lock_noload(struct list_head
*head
, const char *name
, int *error
,
317 struct list_head list
;
318 char name
[EBT_FUNCTION_MAXNAMELEN
];
321 *error
= mutex_lock_interruptible(mutex
);
325 list_for_each_entry(e
, head
, list
) {
326 if (strcmp(e
->name
, name
) == 0)
335 find_inlist_lock(struct list_head
*head
, const char *name
, const char *prefix
,
336 int *error
, struct mutex
*mutex
)
338 return try_then_request_module(
339 find_inlist_lock_noload(head
, name
, error
, mutex
),
340 "%s%s", prefix
, name
);
343 static inline struct ebt_table
*
344 find_table_lock(struct net
*net
, const char *name
, int *error
,
347 return find_inlist_lock(&net
->xt
.tables
[NFPROTO_BRIDGE
], name
,
348 "ebtable_", error
, mutex
);
352 ebt_check_match(struct ebt_entry_match
*m
, struct xt_mtchk_param
*par
,
355 const struct ebt_entry
*e
= par
->entryinfo
;
356 struct xt_match
*match
;
357 size_t left
= ((char *)e
+ e
->watchers_offset
) - (char *)m
;
360 if (left
< sizeof(struct ebt_entry_match
) ||
361 left
- sizeof(struct ebt_entry_match
) < m
->match_size
)
364 match
= xt_request_find_match(NFPROTO_BRIDGE
, m
->u
.name
, 0);
366 return PTR_ERR(match
);
370 par
->matchinfo
= m
->data
;
371 ret
= xt_check_match(par
, m
->match_size
,
372 e
->ethproto
, e
->invflags
& EBT_IPROTO
);
374 module_put(match
->me
);
383 ebt_check_watcher(struct ebt_entry_watcher
*w
, struct xt_tgchk_param
*par
,
386 const struct ebt_entry
*e
= par
->entryinfo
;
387 struct xt_target
*watcher
;
388 size_t left
= ((char *)e
+ e
->target_offset
) - (char *)w
;
391 if (left
< sizeof(struct ebt_entry_watcher
) ||
392 left
- sizeof(struct ebt_entry_watcher
) < w
->watcher_size
)
395 watcher
= xt_request_find_target(NFPROTO_BRIDGE
, w
->u
.name
, 0);
397 return PTR_ERR(watcher
);
398 w
->u
.watcher
= watcher
;
400 par
->target
= watcher
;
401 par
->targinfo
= w
->data
;
402 ret
= xt_check_target(par
, w
->watcher_size
,
403 e
->ethproto
, e
->invflags
& EBT_IPROTO
);
405 module_put(watcher
->me
);
413 static int ebt_verify_pointers(const struct ebt_replace
*repl
,
414 struct ebt_table_info
*newinfo
)
416 unsigned int limit
= repl
->entries_size
;
417 unsigned int valid_hooks
= repl
->valid_hooks
;
418 unsigned int offset
= 0;
421 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++)
422 newinfo
->hook_entry
[i
] = NULL
;
424 newinfo
->entries_size
= repl
->entries_size
;
425 newinfo
->nentries
= repl
->nentries
;
427 while (offset
< limit
) {
428 size_t left
= limit
- offset
;
429 struct ebt_entry
*e
= (void *)newinfo
->entries
+ offset
;
431 if (left
< sizeof(unsigned int))
434 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
435 if ((valid_hooks
& (1 << i
)) == 0)
437 if ((char __user
*)repl
->hook_entry
[i
] ==
438 repl
->entries
+ offset
)
442 if (i
!= NF_BR_NUMHOOKS
|| !(e
->bitmask
& EBT_ENTRY_OR_ENTRIES
)) {
443 if (e
->bitmask
!= 0) {
444 /* we make userspace set this right,
445 so there is no misunderstanding */
446 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
447 "in distinguisher\n");
450 if (i
!= NF_BR_NUMHOOKS
)
451 newinfo
->hook_entry
[i
] = (struct ebt_entries
*)e
;
452 if (left
< sizeof(struct ebt_entries
))
454 offset
+= sizeof(struct ebt_entries
);
456 if (left
< sizeof(struct ebt_entry
))
458 if (left
< e
->next_offset
)
460 if (e
->next_offset
< sizeof(struct ebt_entry
))
462 offset
+= e
->next_offset
;
465 if (offset
!= limit
) {
466 BUGPRINT("entries_size too small\n");
470 /* check if all valid hooks have a chain */
471 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
472 if (!newinfo
->hook_entry
[i
] &&
473 (valid_hooks
& (1 << i
))) {
474 BUGPRINT("Valid hook without chain\n");
482 * this one is very careful, as it is the first function
483 * to parse the userspace data
486 ebt_check_entry_size_and_hooks(const struct ebt_entry
*e
,
487 const struct ebt_table_info
*newinfo
,
488 unsigned int *n
, unsigned int *cnt
,
489 unsigned int *totalcnt
, unsigned int *udc_cnt
)
493 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
494 if ((void *)e
== (void *)newinfo
->hook_entry
[i
])
497 /* beginning of a new chain
498 if i == NF_BR_NUMHOOKS it must be a user defined chain */
499 if (i
!= NF_BR_NUMHOOKS
|| !e
->bitmask
) {
500 /* this checks if the previous chain has as many entries
503 BUGPRINT("nentries does not equal the nr of entries "
507 if (((struct ebt_entries
*)e
)->policy
!= EBT_DROP
&&
508 ((struct ebt_entries
*)e
)->policy
!= EBT_ACCEPT
) {
509 /* only RETURN from udc */
510 if (i
!= NF_BR_NUMHOOKS
||
511 ((struct ebt_entries
*)e
)->policy
!= EBT_RETURN
) {
512 BUGPRINT("bad policy\n");
516 if (i
== NF_BR_NUMHOOKS
) /* it's a user defined chain */
518 if (((struct ebt_entries
*)e
)->counter_offset
!= *totalcnt
) {
519 BUGPRINT("counter_offset != totalcnt");
522 *n
= ((struct ebt_entries
*)e
)->nentries
;
526 /* a plain old entry, heh */
527 if (sizeof(struct ebt_entry
) > e
->watchers_offset
||
528 e
->watchers_offset
> e
->target_offset
||
529 e
->target_offset
>= e
->next_offset
) {
530 BUGPRINT("entry offsets not in right order\n");
533 /* this is not checked anywhere else */
534 if (e
->next_offset
- e
->target_offset
< sizeof(struct ebt_entry_target
)) {
535 BUGPRINT("target size too small\n");
545 struct ebt_chainstack cs
;
547 unsigned int hookmask
;
551 * we need these positions to check that the jumps to a different part of the
552 * entries is a jump to the beginning of a new chain.
555 ebt_get_udc_positions(struct ebt_entry
*e
, struct ebt_table_info
*newinfo
,
556 unsigned int *n
, struct ebt_cl_stack
*udc
)
560 /* we're only interested in chain starts */
563 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
564 if (newinfo
->hook_entry
[i
] == (struct ebt_entries
*)e
)
567 /* only care about udc */
568 if (i
!= NF_BR_NUMHOOKS
)
571 udc
[*n
].cs
.chaininfo
= (struct ebt_entries
*)e
;
572 /* these initialisations are depended on later in check_chainloops() */
574 udc
[*n
].hookmask
= 0;
581 ebt_cleanup_match(struct ebt_entry_match
*m
, struct net
*net
, unsigned int *i
)
583 struct xt_mtdtor_param par
;
585 if (i
&& (*i
)-- == 0)
589 par
.match
= m
->u
.match
;
590 par
.matchinfo
= m
->data
;
591 par
.family
= NFPROTO_BRIDGE
;
592 if (par
.match
->destroy
!= NULL
)
593 par
.match
->destroy(&par
);
594 module_put(par
.match
->me
);
599 ebt_cleanup_watcher(struct ebt_entry_watcher
*w
, struct net
*net
, unsigned int *i
)
601 struct xt_tgdtor_param par
;
603 if (i
&& (*i
)-- == 0)
607 par
.target
= w
->u
.watcher
;
608 par
.targinfo
= w
->data
;
609 par
.family
= NFPROTO_BRIDGE
;
610 if (par
.target
->destroy
!= NULL
)
611 par
.target
->destroy(&par
);
612 module_put(par
.target
->me
);
617 ebt_cleanup_entry(struct ebt_entry
*e
, struct net
*net
, unsigned int *cnt
)
619 struct xt_tgdtor_param par
;
620 struct ebt_entry_target
*t
;
625 if (cnt
&& (*cnt
)-- == 0)
627 EBT_WATCHER_ITERATE(e
, ebt_cleanup_watcher
, net
, NULL
);
628 EBT_MATCH_ITERATE(e
, ebt_cleanup_match
, net
, NULL
);
629 t
= (struct ebt_entry_target
*)(((char *)e
) + e
->target_offset
);
632 par
.target
= t
->u
.target
;
633 par
.targinfo
= t
->data
;
634 par
.family
= NFPROTO_BRIDGE
;
635 if (par
.target
->destroy
!= NULL
)
636 par
.target
->destroy(&par
);
637 module_put(par
.target
->me
);
642 ebt_check_entry(struct ebt_entry
*e
, struct net
*net
,
643 const struct ebt_table_info
*newinfo
,
644 const char *name
, unsigned int *cnt
,
645 struct ebt_cl_stack
*cl_s
, unsigned int udc_cnt
)
647 struct ebt_entry_target
*t
;
648 struct xt_target
*target
;
649 unsigned int i
, j
, hook
= 0, hookmask
= 0;
652 struct xt_mtchk_param mtpar
;
653 struct xt_tgchk_param tgpar
;
655 /* don't mess with the struct ebt_entries */
659 if (e
->bitmask
& ~EBT_F_MASK
) {
660 BUGPRINT("Unknown flag for bitmask\n");
663 if (e
->invflags
& ~EBT_INV_MASK
) {
664 BUGPRINT("Unknown flag for inv bitmask\n");
667 if ( (e
->bitmask
& EBT_NOPROTO
) && (e
->bitmask
& EBT_802_3
) ) {
668 BUGPRINT("NOPROTO & 802_3 not allowed\n");
671 /* what hook do we belong to? */
672 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
673 if (!newinfo
->hook_entry
[i
])
675 if ((char *)newinfo
->hook_entry
[i
] < (char *)e
)
680 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
682 if (i
< NF_BR_NUMHOOKS
)
683 hookmask
= (1 << hook
) | (1 << NF_BR_NUMHOOKS
);
685 for (i
= 0; i
< udc_cnt
; i
++)
686 if ((char *)(cl_s
[i
].cs
.chaininfo
) > (char *)e
)
689 hookmask
= (1 << hook
) | (1 << NF_BR_NUMHOOKS
);
691 hookmask
= cl_s
[i
- 1].hookmask
;
695 mtpar
.net
= tgpar
.net
= net
;
696 mtpar
.table
= tgpar
.table
= name
;
697 mtpar
.entryinfo
= tgpar
.entryinfo
= e
;
698 mtpar
.hook_mask
= tgpar
.hook_mask
= hookmask
;
699 mtpar
.family
= tgpar
.family
= NFPROTO_BRIDGE
;
700 ret
= EBT_MATCH_ITERATE(e
, ebt_check_match
, &mtpar
, &i
);
702 goto cleanup_matches
;
704 ret
= EBT_WATCHER_ITERATE(e
, ebt_check_watcher
, &tgpar
, &j
);
706 goto cleanup_watchers
;
707 t
= (struct ebt_entry_target
*)(((char *)e
) + e
->target_offset
);
708 gap
= e
->next_offset
- e
->target_offset
;
710 target
= xt_request_find_target(NFPROTO_BRIDGE
, t
->u
.name
, 0);
711 if (IS_ERR(target
)) {
712 ret
= PTR_ERR(target
);
713 goto cleanup_watchers
;
716 t
->u
.target
= target
;
717 if (t
->u
.target
== &ebt_standard_target
) {
718 if (gap
< sizeof(struct ebt_standard_target
)) {
719 BUGPRINT("Standard target size too big\n");
721 goto cleanup_watchers
;
723 if (((struct ebt_standard_target
*)t
)->verdict
<
724 -NUM_STANDARD_TARGETS
) {
725 BUGPRINT("Invalid standard target\n");
727 goto cleanup_watchers
;
729 } else if (t
->target_size
> gap
- sizeof(struct ebt_entry_target
)) {
730 module_put(t
->u
.target
->me
);
732 goto cleanup_watchers
;
735 tgpar
.target
= target
;
736 tgpar
.targinfo
= t
->data
;
737 ret
= xt_check_target(&tgpar
, t
->target_size
,
738 e
->ethproto
, e
->invflags
& EBT_IPROTO
);
740 module_put(target
->me
);
741 goto cleanup_watchers
;
746 EBT_WATCHER_ITERATE(e
, ebt_cleanup_watcher
, net
, &j
);
748 EBT_MATCH_ITERATE(e
, ebt_cleanup_match
, net
, &i
);
753 * checks for loops and sets the hook mask for udc
754 * the hook mask for udc tells us from which base chains the udc can be
755 * accessed. This mask is a parameter to the check() functions of the extensions
757 static int check_chainloops(const struct ebt_entries
*chain
, struct ebt_cl_stack
*cl_s
,
758 unsigned int udc_cnt
, unsigned int hooknr
, char *base
)
760 int i
, chain_nr
= -1, pos
= 0, nentries
= chain
->nentries
, verdict
;
761 const struct ebt_entry
*e
= (struct ebt_entry
*)chain
->data
;
762 const struct ebt_entry_target
*t
;
764 while (pos
< nentries
|| chain_nr
!= -1) {
765 /* end of udc, go back one 'recursion' step */
766 if (pos
== nentries
) {
767 /* put back values of the time when this chain was called */
768 e
= cl_s
[chain_nr
].cs
.e
;
769 if (cl_s
[chain_nr
].from
!= -1)
771 cl_s
[cl_s
[chain_nr
].from
].cs
.chaininfo
->nentries
;
773 nentries
= chain
->nentries
;
774 pos
= cl_s
[chain_nr
].cs
.n
;
775 /* make sure we won't see a loop that isn't one */
776 cl_s
[chain_nr
].cs
.n
= 0;
777 chain_nr
= cl_s
[chain_nr
].from
;
781 t
= (struct ebt_entry_target
*)
782 (((char *)e
) + e
->target_offset
);
783 if (strcmp(t
->u
.name
, EBT_STANDARD_TARGET
))
785 if (e
->target_offset
+ sizeof(struct ebt_standard_target
) >
787 BUGPRINT("Standard target size too big\n");
790 verdict
= ((struct ebt_standard_target
*)t
)->verdict
;
791 if (verdict
>= 0) { /* jump to another chain */
792 struct ebt_entries
*hlp2
=
793 (struct ebt_entries
*)(base
+ verdict
);
794 for (i
= 0; i
< udc_cnt
; i
++)
795 if (hlp2
== cl_s
[i
].cs
.chaininfo
)
797 /* bad destination or loop */
799 BUGPRINT("bad destination\n");
806 if (cl_s
[i
].hookmask
& (1 << hooknr
))
808 /* this can't be 0, so the loop test is correct */
809 cl_s
[i
].cs
.n
= pos
+ 1;
811 cl_s
[i
].cs
.e
= ebt_next_entry(e
);
812 e
= (struct ebt_entry
*)(hlp2
->data
);
813 nentries
= hlp2
->nentries
;
814 cl_s
[i
].from
= chain_nr
;
816 /* this udc is accessible from the base chain for hooknr */
817 cl_s
[i
].hookmask
|= (1 << hooknr
);
821 e
= ebt_next_entry(e
);
827 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
828 static int translate_table(struct net
*net
, const char *name
,
829 struct ebt_table_info
*newinfo
)
831 unsigned int i
, j
, k
, udc_cnt
;
833 struct ebt_cl_stack
*cl_s
= NULL
; /* used in the checking for chain loops */
836 while (i
< NF_BR_NUMHOOKS
&& !newinfo
->hook_entry
[i
])
838 if (i
== NF_BR_NUMHOOKS
) {
839 BUGPRINT("No valid hooks specified\n");
842 if (newinfo
->hook_entry
[i
] != (struct ebt_entries
*)newinfo
->entries
) {
843 BUGPRINT("Chains don't start at beginning\n");
846 /* make sure chains are ordered after each other in same order
847 as their corresponding hooks */
848 for (j
= i
+ 1; j
< NF_BR_NUMHOOKS
; j
++) {
849 if (!newinfo
->hook_entry
[j
])
851 if (newinfo
->hook_entry
[j
] <= newinfo
->hook_entry
[i
]) {
852 BUGPRINT("Hook order must be followed\n");
858 /* do some early checkings and initialize some things */
859 i
= 0; /* holds the expected nr. of entries for the chain */
860 j
= 0; /* holds the up to now counted entries for the chain */
861 k
= 0; /* holds the total nr. of entries, should equal
862 newinfo->nentries afterwards */
863 udc_cnt
= 0; /* will hold the nr. of user defined chains (udc) */
864 ret
= EBT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->entries_size
,
865 ebt_check_entry_size_and_hooks
, newinfo
,
866 &i
, &j
, &k
, &udc_cnt
);
872 BUGPRINT("nentries does not equal the nr of entries in the "
876 if (k
!= newinfo
->nentries
) {
877 BUGPRINT("Total nentries is wrong\n");
881 /* get the location of the udc, put them in an array
882 while we're at it, allocate the chainstack */
884 /* this will get free'd in do_replace()/ebt_register_table()
885 if an error occurs */
886 newinfo
->chainstack
=
887 vmalloc(nr_cpu_ids
* sizeof(*(newinfo
->chainstack
)));
888 if (!newinfo
->chainstack
)
890 for_each_possible_cpu(i
) {
891 newinfo
->chainstack
[i
] =
892 vmalloc(udc_cnt
* sizeof(*(newinfo
->chainstack
[0])));
893 if (!newinfo
->chainstack
[i
]) {
895 vfree(newinfo
->chainstack
[--i
]);
896 vfree(newinfo
->chainstack
);
897 newinfo
->chainstack
= NULL
;
902 cl_s
= vmalloc(udc_cnt
* sizeof(*cl_s
));
905 i
= 0; /* the i'th udc */
906 EBT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->entries_size
,
907 ebt_get_udc_positions
, newinfo
, &i
, cl_s
);
910 BUGPRINT("i != udc_cnt\n");
916 /* Check for loops */
917 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++)
918 if (newinfo
->hook_entry
[i
])
919 if (check_chainloops(newinfo
->hook_entry
[i
],
920 cl_s
, udc_cnt
, i
, newinfo
->entries
)) {
925 /* we now know the following (along with E=mc²):
926 - the nr of entries in each chain is right
927 - the size of the allocated space is right
928 - all valid hooks have a corresponding chain
930 - wrong data can still be on the level of a single entry
931 - could be there are jumps to places that are not the
932 beginning of a chain. This can only occur in chains that
933 are not accessible from any base chains, so we don't care. */
935 /* used to know what we need to clean up if something goes wrong */
937 ret
= EBT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->entries_size
,
938 ebt_check_entry
, net
, newinfo
, name
, &i
, cl_s
, udc_cnt
);
940 EBT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->entries_size
,
941 ebt_cleanup_entry
, net
, &i
);
947 /* called under write_lock */
948 static void get_counters(const struct ebt_counter
*oldcounters
,
949 struct ebt_counter
*counters
, unsigned int nentries
)
952 struct ebt_counter
*counter_base
;
954 /* counters of cpu 0 */
955 memcpy(counters
, oldcounters
,
956 sizeof(struct ebt_counter
) * nentries
);
958 /* add other counters to those of cpu 0 */
959 for_each_possible_cpu(cpu
) {
962 counter_base
= COUNTER_BASE(oldcounters
, nentries
, cpu
);
963 for (i
= 0; i
< nentries
; i
++) {
964 counters
[i
].pcnt
+= counter_base
[i
].pcnt
;
965 counters
[i
].bcnt
+= counter_base
[i
].bcnt
;
970 static int do_replace_finish(struct net
*net
, struct ebt_replace
*repl
,
971 struct ebt_table_info
*newinfo
)
974 struct ebt_counter
*counterstmp
= NULL
;
975 /* used to be able to unlock earlier */
976 struct ebt_table_info
*table
;
979 /* the user wants counters back
980 the check on the size is done later, when we have the lock */
981 if (repl
->num_counters
) {
982 unsigned long size
= repl
->num_counters
* sizeof(*counterstmp
);
983 counterstmp
= vmalloc(size
);
988 newinfo
->chainstack
= NULL
;
989 ret
= ebt_verify_pointers(repl
, newinfo
);
991 goto free_counterstmp
;
993 ret
= translate_table(net
, repl
->name
, newinfo
);
996 goto free_counterstmp
;
998 t
= find_table_lock(net
, repl
->name
, &ret
, &ebt_mutex
);
1004 /* the table doesn't like it */
1005 if (t
->check
&& (ret
= t
->check(newinfo
, repl
->valid_hooks
)))
1008 if (repl
->num_counters
&& repl
->num_counters
!= t
->private->nentries
) {
1009 BUGPRINT("Wrong nr. of counters requested\n");
1014 /* we have the mutex lock, so no danger in reading this pointer */
1016 /* make sure the table can only be rmmod'ed if it contains no rules */
1017 if (!table
->nentries
&& newinfo
->nentries
&& !try_module_get(t
->me
)) {
1020 } else if (table
->nentries
&& !newinfo
->nentries
)
1022 /* we need an atomic snapshot of the counters */
1023 write_lock_bh(&t
->lock
);
1024 if (repl
->num_counters
)
1025 get_counters(t
->private->counters
, counterstmp
,
1026 t
->private->nentries
);
1028 t
->private = newinfo
;
1029 write_unlock_bh(&t
->lock
);
1030 mutex_unlock(&ebt_mutex
);
1031 /* so, a user can change the chains while having messed up her counter
1032 allocation. Only reason why this is done is because this way the lock
1033 is held only once, while this doesn't bring the kernel into a
1035 if (repl
->num_counters
&&
1036 copy_to_user(repl
->counters
, counterstmp
,
1037 repl
->num_counters
* sizeof(struct ebt_counter
))) {
1043 /* decrease module count and free resources */
1044 EBT_ENTRY_ITERATE(table
->entries
, table
->entries_size
,
1045 ebt_cleanup_entry
, net
, NULL
);
1047 vfree(table
->entries
);
1048 if (table
->chainstack
) {
1049 for_each_possible_cpu(i
)
1050 vfree(table
->chainstack
[i
]);
1051 vfree(table
->chainstack
);
1059 mutex_unlock(&ebt_mutex
);
1061 EBT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->entries_size
,
1062 ebt_cleanup_entry
, net
, NULL
);
1065 /* can be initialized in translate_table() */
1066 if (newinfo
->chainstack
) {
1067 for_each_possible_cpu(i
)
1068 vfree(newinfo
->chainstack
[i
]);
1069 vfree(newinfo
->chainstack
);
1074 /* replace the table */
1075 static int do_replace(struct net
*net
, const void __user
*user
,
1078 int ret
, countersize
;
1079 struct ebt_table_info
*newinfo
;
1080 struct ebt_replace tmp
;
1082 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1085 if (len
!= sizeof(tmp
) + tmp
.entries_size
) {
1086 BUGPRINT("Wrong len argument\n");
1090 if (tmp
.entries_size
== 0) {
1091 BUGPRINT("Entries_size never zero\n");
1094 /* overflow check */
1095 if (tmp
.nentries
>= ((INT_MAX
- sizeof(struct ebt_table_info
)) /
1096 NR_CPUS
- SMP_CACHE_BYTES
) / sizeof(struct ebt_counter
))
1098 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct ebt_counter
))
1101 countersize
= COUNTER_OFFSET(tmp
.nentries
) * nr_cpu_ids
;
1102 newinfo
= vmalloc(sizeof(*newinfo
) + countersize
);
1107 memset(newinfo
->counters
, 0, countersize
);
1109 newinfo
->entries
= vmalloc(tmp
.entries_size
);
1110 if (!newinfo
->entries
) {
1115 newinfo
->entries
, tmp
.entries
, tmp
.entries_size
) != 0) {
1116 BUGPRINT("Couldn't copy entries from userspace\n");
1121 ret
= do_replace_finish(net
, &tmp
, newinfo
);
1125 vfree(newinfo
->entries
);
1132 ebt_register_table(struct net
*net
, const struct ebt_table
*input_table
)
1134 struct ebt_table_info
*newinfo
;
1135 struct ebt_table
*t
, *table
;
1136 struct ebt_replace_kernel
*repl
;
1137 int ret
, i
, countersize
;
1140 if (input_table
== NULL
|| (repl
= input_table
->table
) == NULL
||
1141 repl
->entries
== 0 || repl
->entries_size
== 0 ||
1142 repl
->counters
!= NULL
|| input_table
->private != NULL
) {
1143 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1144 return ERR_PTR(-EINVAL
);
1147 /* Don't add one table to multiple lists. */
1148 table
= kmemdup(input_table
, sizeof(struct ebt_table
), GFP_KERNEL
);
1154 countersize
= COUNTER_OFFSET(repl
->nentries
) * nr_cpu_ids
;
1155 newinfo
= vmalloc(sizeof(*newinfo
) + countersize
);
1160 p
= vmalloc(repl
->entries_size
);
1164 memcpy(p
, repl
->entries
, repl
->entries_size
);
1165 newinfo
->entries
= p
;
1167 newinfo
->entries_size
= repl
->entries_size
;
1168 newinfo
->nentries
= repl
->nentries
;
1171 memset(newinfo
->counters
, 0, countersize
);
1173 /* fill in newinfo and parse the entries */
1174 newinfo
->chainstack
= NULL
;
1175 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
1176 if ((repl
->valid_hooks
& (1 << i
)) == 0)
1177 newinfo
->hook_entry
[i
] = NULL
;
1179 newinfo
->hook_entry
[i
] = p
+
1180 ((char *)repl
->hook_entry
[i
] - repl
->entries
);
1182 ret
= translate_table(net
, repl
->name
, newinfo
);
1184 BUGPRINT("Translate_table failed\n");
1185 goto free_chainstack
;
1188 if (table
->check
&& table
->check(newinfo
, table
->valid_hooks
)) {
1189 BUGPRINT("The table doesn't like its own initial data, lol\n");
1190 return ERR_PTR(-EINVAL
);
1193 table
->private = newinfo
;
1194 rwlock_init(&table
->lock
);
1195 ret
= mutex_lock_interruptible(&ebt_mutex
);
1197 goto free_chainstack
;
1199 list_for_each_entry(t
, &net
->xt
.tables
[NFPROTO_BRIDGE
], list
) {
1200 if (strcmp(t
->name
, table
->name
) == 0) {
1202 BUGPRINT("Table name already exists\n");
1207 /* Hold a reference count if the chains aren't empty */
1208 if (newinfo
->nentries
&& !try_module_get(table
->me
)) {
1212 list_add(&table
->list
, &net
->xt
.tables
[NFPROTO_BRIDGE
]);
1213 mutex_unlock(&ebt_mutex
);
1216 mutex_unlock(&ebt_mutex
);
1218 if (newinfo
->chainstack
) {
1219 for_each_possible_cpu(i
)
1220 vfree(newinfo
->chainstack
[i
]);
1221 vfree(newinfo
->chainstack
);
1223 vfree(newinfo
->entries
);
1229 return ERR_PTR(ret
);
1232 void ebt_unregister_table(struct net
*net
, struct ebt_table
*table
)
1237 BUGPRINT("Request to unregister NULL table!!!\n");
1240 mutex_lock(&ebt_mutex
);
1241 list_del(&table
->list
);
1242 mutex_unlock(&ebt_mutex
);
1243 EBT_ENTRY_ITERATE(table
->private->entries
, table
->private->entries_size
,
1244 ebt_cleanup_entry
, net
, NULL
);
1245 if (table
->private->nentries
)
1246 module_put(table
->me
);
1247 vfree(table
->private->entries
);
1248 if (table
->private->chainstack
) {
1249 for_each_possible_cpu(i
)
1250 vfree(table
->private->chainstack
[i
]);
1251 vfree(table
->private->chainstack
);
1253 vfree(table
->private);
1257 /* userspace just supplied us with counters */
1258 static int do_update_counters(struct net
*net
, const char *name
,
1259 struct ebt_counter __user
*counters
,
1260 unsigned int num_counters
,
1261 const void __user
*user
, unsigned int len
)
1264 struct ebt_counter
*tmp
;
1265 struct ebt_table
*t
;
1267 if (num_counters
== 0)
1270 tmp
= vmalloc(num_counters
* sizeof(*tmp
));
1274 t
= find_table_lock(net
, name
, &ret
, &ebt_mutex
);
1278 if (num_counters
!= t
->private->nentries
) {
1279 BUGPRINT("Wrong nr of counters\n");
1284 if (copy_from_user(tmp
, counters
, num_counters
* sizeof(*counters
))) {
1289 /* we want an atomic add of the counters */
1290 write_lock_bh(&t
->lock
);
1292 /* we add to the counters of the first cpu */
1293 for (i
= 0; i
< num_counters
; i
++) {
1294 t
->private->counters
[i
].pcnt
+= tmp
[i
].pcnt
;
1295 t
->private->counters
[i
].bcnt
+= tmp
[i
].bcnt
;
1298 write_unlock_bh(&t
->lock
);
1301 mutex_unlock(&ebt_mutex
);
1307 static int update_counters(struct net
*net
, const void __user
*user
,
1310 struct ebt_replace hlp
;
1312 if (copy_from_user(&hlp
, user
, sizeof(hlp
)))
1315 if (len
!= sizeof(hlp
) + hlp
.num_counters
* sizeof(struct ebt_counter
))
1318 return do_update_counters(net
, hlp
.name
, hlp
.counters
,
1319 hlp
.num_counters
, user
, len
);
1322 static inline int ebt_make_matchname(const struct ebt_entry_match
*m
,
1323 const char *base
, char __user
*ubase
)
1325 char __user
*hlp
= ubase
+ ((char *)m
- base
);
1326 if (copy_to_user(hlp
, m
->u
.match
->name
, EBT_FUNCTION_MAXNAMELEN
))
1331 static inline int ebt_make_watchername(const struct ebt_entry_watcher
*w
,
1332 const char *base
, char __user
*ubase
)
1334 char __user
*hlp
= ubase
+ ((char *)w
- base
);
1335 if (copy_to_user(hlp
, w
->u
.watcher
->name
, EBT_FUNCTION_MAXNAMELEN
))
1341 ebt_make_names(struct ebt_entry
*e
, const char *base
, char __user
*ubase
)
1345 const struct ebt_entry_target
*t
;
1347 if (e
->bitmask
== 0)
1350 hlp
= ubase
+ (((char *)e
+ e
->target_offset
) - base
);
1351 t
= (struct ebt_entry_target
*)(((char *)e
) + e
->target_offset
);
1353 ret
= EBT_MATCH_ITERATE(e
, ebt_make_matchname
, base
, ubase
);
1356 ret
= EBT_WATCHER_ITERATE(e
, ebt_make_watchername
, base
, ubase
);
1359 if (copy_to_user(hlp
, t
->u
.target
->name
, EBT_FUNCTION_MAXNAMELEN
))
1364 static int copy_counters_to_user(struct ebt_table
*t
,
1365 const struct ebt_counter
*oldcounters
,
1366 void __user
*user
, unsigned int num_counters
,
1367 unsigned int nentries
)
1369 struct ebt_counter
*counterstmp
;
1372 /* userspace might not need the counters */
1373 if (num_counters
== 0)
1376 if (num_counters
!= nentries
) {
1377 BUGPRINT("Num_counters wrong\n");
1381 counterstmp
= vmalloc(nentries
* sizeof(*counterstmp
));
1385 write_lock_bh(&t
->lock
);
1386 get_counters(oldcounters
, counterstmp
, nentries
);
1387 write_unlock_bh(&t
->lock
);
1389 if (copy_to_user(user
, counterstmp
,
1390 nentries
* sizeof(struct ebt_counter
)))
1396 /* called with ebt_mutex locked */
1397 static int copy_everything_to_user(struct ebt_table
*t
, void __user
*user
,
1398 const int *len
, int cmd
)
1400 struct ebt_replace tmp
;
1401 const struct ebt_counter
*oldcounters
;
1402 unsigned int entries_size
, nentries
;
1406 if (cmd
== EBT_SO_GET_ENTRIES
) {
1407 entries_size
= t
->private->entries_size
;
1408 nentries
= t
->private->nentries
;
1409 entries
= t
->private->entries
;
1410 oldcounters
= t
->private->counters
;
1412 entries_size
= t
->table
->entries_size
;
1413 nentries
= t
->table
->nentries
;
1414 entries
= t
->table
->entries
;
1415 oldcounters
= t
->table
->counters
;
1418 if (copy_from_user(&tmp
, user
, sizeof(tmp
)))
1421 if (*len
!= sizeof(struct ebt_replace
) + entries_size
+
1422 (tmp
.num_counters
? nentries
* sizeof(struct ebt_counter
): 0))
1425 if (tmp
.nentries
!= nentries
) {
1426 BUGPRINT("Nentries wrong\n");
1430 if (tmp
.entries_size
!= entries_size
) {
1431 BUGPRINT("Wrong size\n");
1435 ret
= copy_counters_to_user(t
, oldcounters
, tmp
.counters
,
1436 tmp
.num_counters
, nentries
);
1440 if (copy_to_user(tmp
.entries
, entries
, entries_size
)) {
1441 BUGPRINT("Couldn't copy entries to userspace\n");
1444 /* set the match/watcher/target names right */
1445 return EBT_ENTRY_ITERATE(entries
, entries_size
,
1446 ebt_make_names
, entries
, tmp
.entries
);
1449 static int do_ebt_set_ctl(struct sock
*sk
,
1450 int cmd
, void __user
*user
, unsigned int len
)
1454 if (!capable(CAP_NET_ADMIN
))
1458 case EBT_SO_SET_ENTRIES
:
1459 ret
= do_replace(sock_net(sk
), user
, len
);
1461 case EBT_SO_SET_COUNTERS
:
1462 ret
= update_counters(sock_net(sk
), user
, len
);
1470 static int do_ebt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1473 struct ebt_replace tmp
;
1474 struct ebt_table
*t
;
1476 if (!capable(CAP_NET_ADMIN
))
1479 if (copy_from_user(&tmp
, user
, sizeof(tmp
)))
1482 t
= find_table_lock(sock_net(sk
), tmp
.name
, &ret
, &ebt_mutex
);
1487 case EBT_SO_GET_INFO
:
1488 case EBT_SO_GET_INIT_INFO
:
1489 if (*len
!= sizeof(struct ebt_replace
)){
1491 mutex_unlock(&ebt_mutex
);
1494 if (cmd
== EBT_SO_GET_INFO
) {
1495 tmp
.nentries
= t
->private->nentries
;
1496 tmp
.entries_size
= t
->private->entries_size
;
1497 tmp
.valid_hooks
= t
->valid_hooks
;
1499 tmp
.nentries
= t
->table
->nentries
;
1500 tmp
.entries_size
= t
->table
->entries_size
;
1501 tmp
.valid_hooks
= t
->table
->valid_hooks
;
1503 mutex_unlock(&ebt_mutex
);
1504 if (copy_to_user(user
, &tmp
, *len
) != 0){
1505 BUGPRINT("c2u Didn't work\n");
1512 case EBT_SO_GET_ENTRIES
:
1513 case EBT_SO_GET_INIT_ENTRIES
:
1514 ret
= copy_everything_to_user(t
, user
, len
, cmd
);
1515 mutex_unlock(&ebt_mutex
);
1519 mutex_unlock(&ebt_mutex
);
1526 #ifdef CONFIG_COMPAT
1527 /* 32 bit-userspace compatibility definitions. */
1528 struct compat_ebt_replace
{
1529 char name
[EBT_TABLE_MAXNAMELEN
];
1530 compat_uint_t valid_hooks
;
1531 compat_uint_t nentries
;
1532 compat_uint_t entries_size
;
1533 /* start of the chains */
1534 compat_uptr_t hook_entry
[NF_BR_NUMHOOKS
];
1535 /* nr of counters userspace expects back */
1536 compat_uint_t num_counters
;
1537 /* where the kernel will put the old counters. */
1538 compat_uptr_t counters
;
1539 compat_uptr_t entries
;
1542 /* struct ebt_entry_match, _target and _watcher have same layout */
1543 struct compat_ebt_entry_mwt
{
1545 char name
[EBT_FUNCTION_MAXNAMELEN
];
1548 compat_uint_t match_size
;
1549 compat_uint_t data
[0];
1552 /* account for possible padding between match_size and ->data */
1553 static int ebt_compat_entry_padsize(void)
1555 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match
)) <
1556 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt
)));
1557 return (int) XT_ALIGN(sizeof(struct ebt_entry_match
)) -
1558 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt
));
1561 static int ebt_compat_match_offset(const struct xt_match
*match
,
1562 unsigned int userlen
)
1565 * ebt_among needs special handling. The kernel .matchsize is
1566 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1567 * value is expected.
1568 * Example: userspace sends 4500, ebt_among.c wants 4504.
1570 if (unlikely(match
->matchsize
== -1))
1571 return XT_ALIGN(userlen
) - COMPAT_XT_ALIGN(userlen
);
1572 return xt_compat_match_offset(match
);
1575 static int compat_match_to_user(struct ebt_entry_match
*m
, void __user
**dstptr
,
1578 const struct xt_match
*match
= m
->u
.match
;
1579 struct compat_ebt_entry_mwt __user
*cm
= *dstptr
;
1580 int off
= ebt_compat_match_offset(match
, m
->match_size
);
1581 compat_uint_t msize
= m
->match_size
- off
;
1583 BUG_ON(off
>= m
->match_size
);
1585 if (copy_to_user(cm
->u
.name
, match
->name
,
1586 strlen(match
->name
) + 1) || put_user(msize
, &cm
->match_size
))
1589 if (match
->compat_to_user
) {
1590 if (match
->compat_to_user(cm
->data
, m
->data
))
1592 } else if (copy_to_user(cm
->data
, m
->data
, msize
))
1595 *size
-= ebt_compat_entry_padsize() + off
;
1601 static int compat_target_to_user(struct ebt_entry_target
*t
,
1602 void __user
**dstptr
,
1605 const struct xt_target
*target
= t
->u
.target
;
1606 struct compat_ebt_entry_mwt __user
*cm
= *dstptr
;
1607 int off
= xt_compat_target_offset(target
);
1608 compat_uint_t tsize
= t
->target_size
- off
;
1610 BUG_ON(off
>= t
->target_size
);
1612 if (copy_to_user(cm
->u
.name
, target
->name
,
1613 strlen(target
->name
) + 1) || put_user(tsize
, &cm
->match_size
))
1616 if (target
->compat_to_user
) {
1617 if (target
->compat_to_user(cm
->data
, t
->data
))
1619 } else if (copy_to_user(cm
->data
, t
->data
, tsize
))
1622 *size
-= ebt_compat_entry_padsize() + off
;
1628 static int compat_watcher_to_user(struct ebt_entry_watcher
*w
,
1629 void __user
**dstptr
,
1632 return compat_target_to_user((struct ebt_entry_target
*)w
,
1636 static int compat_copy_entry_to_user(struct ebt_entry
*e
, void __user
**dstptr
,
1639 struct ebt_entry_target
*t
;
1640 struct ebt_entry __user
*ce
;
1641 u32 watchers_offset
, target_offset
, next_offset
;
1642 compat_uint_t origsize
;
1645 if (e
->bitmask
== 0) {
1646 if (*size
< sizeof(struct ebt_entries
))
1648 if (copy_to_user(*dstptr
, e
, sizeof(struct ebt_entries
)))
1651 *dstptr
+= sizeof(struct ebt_entries
);
1652 *size
-= sizeof(struct ebt_entries
);
1656 if (*size
< sizeof(*ce
))
1659 ce
= (struct ebt_entry __user
*)*dstptr
;
1660 if (copy_to_user(ce
, e
, sizeof(*ce
)))
1664 *dstptr
+= sizeof(*ce
);
1666 ret
= EBT_MATCH_ITERATE(e
, compat_match_to_user
, dstptr
, size
);
1669 watchers_offset
= e
->watchers_offset
- (origsize
- *size
);
1671 ret
= EBT_WATCHER_ITERATE(e
, compat_watcher_to_user
, dstptr
, size
);
1674 target_offset
= e
->target_offset
- (origsize
- *size
);
1676 t
= (struct ebt_entry_target
*) ((char *) e
+ e
->target_offset
);
1678 ret
= compat_target_to_user(t
, dstptr
, size
);
1681 next_offset
= e
->next_offset
- (origsize
- *size
);
1683 if (put_user(watchers_offset
, &ce
->watchers_offset
) ||
1684 put_user(target_offset
, &ce
->target_offset
) ||
1685 put_user(next_offset
, &ce
->next_offset
))
1688 *size
-= sizeof(*ce
);
1692 static int compat_calc_match(struct ebt_entry_match
*m
, int *off
)
1694 *off
+= ebt_compat_match_offset(m
->u
.match
, m
->match_size
);
1695 *off
+= ebt_compat_entry_padsize();
1699 static int compat_calc_watcher(struct ebt_entry_watcher
*w
, int *off
)
1701 *off
+= xt_compat_target_offset(w
->u
.watcher
);
1702 *off
+= ebt_compat_entry_padsize();
1706 static int compat_calc_entry(const struct ebt_entry
*e
,
1707 const struct ebt_table_info
*info
,
1709 struct compat_ebt_replace
*newinfo
)
1711 const struct ebt_entry_target
*t
;
1712 unsigned int entry_offset
;
1715 if (e
->bitmask
== 0)
1719 entry_offset
= (void *)e
- base
;
1721 EBT_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1722 EBT_WATCHER_ITERATE(e
, compat_calc_watcher
, &off
);
1724 t
= (const struct ebt_entry_target
*) ((char *) e
+ e
->target_offset
);
1726 off
+= xt_compat_target_offset(t
->u
.target
);
1727 off
+= ebt_compat_entry_padsize();
1729 newinfo
->entries_size
-= off
;
1731 ret
= xt_compat_add_offset(NFPROTO_BRIDGE
, entry_offset
, off
);
1735 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
1736 const void *hookptr
= info
->hook_entry
[i
];
1737 if (info
->hook_entry
[i
] &&
1738 (e
< (struct ebt_entry
*)(base
- hookptr
))) {
1739 newinfo
->hook_entry
[i
] -= off
;
1740 pr_debug("0x%08X -> 0x%08X\n",
1741 newinfo
->hook_entry
[i
] + off
,
1742 newinfo
->hook_entry
[i
]);
1750 static int compat_table_info(const struct ebt_table_info
*info
,
1751 struct compat_ebt_replace
*newinfo
)
1753 unsigned int size
= info
->entries_size
;
1754 const void *entries
= info
->entries
;
1756 newinfo
->entries_size
= size
;
1758 return EBT_ENTRY_ITERATE(entries
, size
, compat_calc_entry
, info
,
1762 static int compat_copy_everything_to_user(struct ebt_table
*t
,
1763 void __user
*user
, int *len
, int cmd
)
1765 struct compat_ebt_replace repl
, tmp
;
1766 struct ebt_counter
*oldcounters
;
1767 struct ebt_table_info tinfo
;
1771 memset(&tinfo
, 0, sizeof(tinfo
));
1773 if (cmd
== EBT_SO_GET_ENTRIES
) {
1774 tinfo
.entries_size
= t
->private->entries_size
;
1775 tinfo
.nentries
= t
->private->nentries
;
1776 tinfo
.entries
= t
->private->entries
;
1777 oldcounters
= t
->private->counters
;
1779 tinfo
.entries_size
= t
->table
->entries_size
;
1780 tinfo
.nentries
= t
->table
->nentries
;
1781 tinfo
.entries
= t
->table
->entries
;
1782 oldcounters
= t
->table
->counters
;
1785 if (copy_from_user(&tmp
, user
, sizeof(tmp
)))
1788 if (tmp
.nentries
!= tinfo
.nentries
||
1789 (tmp
.num_counters
&& tmp
.num_counters
!= tinfo
.nentries
))
1792 memcpy(&repl
, &tmp
, sizeof(repl
));
1793 if (cmd
== EBT_SO_GET_ENTRIES
)
1794 ret
= compat_table_info(t
->private, &repl
);
1796 ret
= compat_table_info(&tinfo
, &repl
);
1800 if (*len
!= sizeof(tmp
) + repl
.entries_size
+
1801 (tmp
.num_counters
? tinfo
.nentries
* sizeof(struct ebt_counter
): 0)) {
1802 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1803 *len
, tinfo
.entries_size
, repl
.entries_size
);
1807 /* userspace might not need the counters */
1808 ret
= copy_counters_to_user(t
, oldcounters
, compat_ptr(tmp
.counters
),
1809 tmp
.num_counters
, tinfo
.nentries
);
1813 pos
= compat_ptr(tmp
.entries
);
1814 return EBT_ENTRY_ITERATE(tinfo
.entries
, tinfo
.entries_size
,
1815 compat_copy_entry_to_user
, &pos
, &tmp
.entries_size
);
1818 struct ebt_entries_buf_state
{
1819 char *buf_kern_start
; /* kernel buffer to copy (translated) data to */
1820 u32 buf_kern_len
; /* total size of kernel buffer */
1821 u32 buf_kern_offset
; /* amount of data copied so far */
1822 u32 buf_user_offset
; /* read position in userspace buffer */
1825 static int ebt_buf_count(struct ebt_entries_buf_state
*state
, unsigned int sz
)
1827 state
->buf_kern_offset
+= sz
;
1828 return state
->buf_kern_offset
>= sz
? 0 : -EINVAL
;
1831 static int ebt_buf_add(struct ebt_entries_buf_state
*state
,
1832 void *data
, unsigned int sz
)
1834 if (state
->buf_kern_start
== NULL
)
1837 BUG_ON(state
->buf_kern_offset
+ sz
> state
->buf_kern_len
);
1839 memcpy(state
->buf_kern_start
+ state
->buf_kern_offset
, data
, sz
);
1842 state
->buf_user_offset
+= sz
;
1843 return ebt_buf_count(state
, sz
);
1846 static int ebt_buf_add_pad(struct ebt_entries_buf_state
*state
, unsigned int sz
)
1848 char *b
= state
->buf_kern_start
;
1850 BUG_ON(b
&& state
->buf_kern_offset
> state
->buf_kern_len
);
1852 if (b
!= NULL
&& sz
> 0)
1853 memset(b
+ state
->buf_kern_offset
, 0, sz
);
1854 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1855 return ebt_buf_count(state
, sz
);
1864 static int compat_mtw_from_user(struct compat_ebt_entry_mwt
*mwt
,
1865 enum compat_mwt compat_mwt
,
1866 struct ebt_entries_buf_state
*state
,
1867 const unsigned char *base
)
1869 char name
[EBT_FUNCTION_MAXNAMELEN
];
1870 struct xt_match
*match
;
1871 struct xt_target
*wt
;
1873 int off
, pad
= 0, ret
= 0;
1874 unsigned int size_kern
, entry_offset
, match_size
= mwt
->match_size
;
1876 strlcpy(name
, mwt
->u
.name
, sizeof(name
));
1878 if (state
->buf_kern_start
)
1879 dst
= state
->buf_kern_start
+ state
->buf_kern_offset
;
1881 entry_offset
= (unsigned char *) mwt
- base
;
1882 switch (compat_mwt
) {
1883 case EBT_COMPAT_MATCH
:
1884 match
= try_then_request_module(xt_find_match(NFPROTO_BRIDGE
,
1885 name
, 0), "ebt_%s", name
);
1889 return PTR_ERR(match
);
1891 off
= ebt_compat_match_offset(match
, match_size
);
1893 if (match
->compat_from_user
)
1894 match
->compat_from_user(dst
, mwt
->data
);
1896 memcpy(dst
, mwt
->data
, match_size
);
1899 size_kern
= match
->matchsize
;
1900 if (unlikely(size_kern
== -1))
1901 size_kern
= match_size
;
1902 module_put(match
->me
);
1904 case EBT_COMPAT_WATCHER
: /* fallthrough */
1905 case EBT_COMPAT_TARGET
:
1906 wt
= try_then_request_module(xt_find_target(NFPROTO_BRIDGE
,
1907 name
, 0), "ebt_%s", name
);
1912 off
= xt_compat_target_offset(wt
);
1915 if (wt
->compat_from_user
)
1916 wt
->compat_from_user(dst
, mwt
->data
);
1918 memcpy(dst
, mwt
->data
, match_size
);
1921 size_kern
= wt
->targetsize
;
1927 ret
= xt_compat_add_offset(NFPROTO_BRIDGE
, entry_offset
,
1928 off
+ ebt_compat_entry_padsize());
1933 state
->buf_kern_offset
+= match_size
+ off
;
1934 state
->buf_user_offset
+= match_size
;
1935 pad
= XT_ALIGN(size_kern
) - size_kern
;
1937 if (pad
> 0 && dst
) {
1938 BUG_ON(state
->buf_kern_len
<= pad
);
1939 BUG_ON(state
->buf_kern_offset
- (match_size
+ off
) + size_kern
> state
->buf_kern_len
- pad
);
1940 memset(dst
+ size_kern
, 0, pad
);
1942 return off
+ match_size
;
1946 * return size of all matches, watchers or target, including necessary
1947 * alignment and padding.
1949 static int ebt_size_mwt(struct compat_ebt_entry_mwt
*match32
,
1950 unsigned int size_left
, enum compat_mwt type
,
1951 struct ebt_entries_buf_state
*state
, const void *base
)
1959 buf
= (char *) match32
;
1961 while (size_left
>= sizeof(*match32
)) {
1962 struct ebt_entry_match
*match_kern
;
1965 match_kern
= (struct ebt_entry_match
*) state
->buf_kern_start
;
1968 tmp
= state
->buf_kern_start
+ state
->buf_kern_offset
;
1969 match_kern
= (struct ebt_entry_match
*) tmp
;
1971 ret
= ebt_buf_add(state
, buf
, sizeof(*match32
));
1974 size_left
-= sizeof(*match32
);
1976 /* add padding before match->data (if any) */
1977 ret
= ebt_buf_add_pad(state
, ebt_compat_entry_padsize());
1981 if (match32
->match_size
> size_left
)
1984 size_left
-= match32
->match_size
;
1986 ret
= compat_mtw_from_user(match32
, type
, state
, base
);
1990 BUG_ON(ret
< match32
->match_size
);
1991 growth
+= ret
- match32
->match_size
;
1992 growth
+= ebt_compat_entry_padsize();
1994 buf
+= sizeof(*match32
);
1995 buf
+= match32
->match_size
;
1998 match_kern
->match_size
= ret
;
2000 WARN_ON(type
== EBT_COMPAT_TARGET
&& size_left
);
2001 match32
= (struct compat_ebt_entry_mwt
*) buf
;
2007 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2011 struct compat_ebt_entry_mwt *__watcher; \
2013 for (__i = e->watchers_offset; \
2014 __i < (e)->target_offset; \
2015 __i += __watcher->watcher_size + \
2016 sizeof(struct compat_ebt_entry_mwt)) { \
2017 __watcher = (void *)(e) + __i; \
2018 __ret = fn(__watcher , ## args); \
2023 if (__i != (e)->target_offset) \
2029 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2033 struct compat_ebt_entry_mwt *__match; \
2035 for (__i = sizeof(struct ebt_entry); \
2036 __i < (e)->watchers_offset; \
2037 __i += __match->match_size + \
2038 sizeof(struct compat_ebt_entry_mwt)) { \
2039 __match = (void *)(e) + __i; \
2040 __ret = fn(__match , ## args); \
2045 if (__i != (e)->watchers_offset) \
2051 /* called for all ebt_entry structures. */
2052 static int size_entry_mwt(struct ebt_entry
*entry
, const unsigned char *base
,
2053 unsigned int *total
,
2054 struct ebt_entries_buf_state
*state
)
2056 unsigned int i
, j
, startoff
, new_offset
= 0;
2057 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2058 unsigned int offsets
[4];
2059 unsigned int *offsets_update
= NULL
;
2063 if (*total
< sizeof(struct ebt_entries
))
2066 if (!entry
->bitmask
) {
2067 *total
-= sizeof(struct ebt_entries
);
2068 return ebt_buf_add(state
, entry
, sizeof(struct ebt_entries
));
2070 if (*total
< sizeof(*entry
) || entry
->next_offset
< sizeof(*entry
))
2073 startoff
= state
->buf_user_offset
;
2074 /* pull in most part of ebt_entry, it does not need to be changed. */
2075 ret
= ebt_buf_add(state
, entry
,
2076 offsetof(struct ebt_entry
, watchers_offset
));
2080 offsets
[0] = sizeof(struct ebt_entry
); /* matches come first */
2081 memcpy(&offsets
[1], &entry
->watchers_offset
,
2082 sizeof(offsets
) - sizeof(offsets
[0]));
2084 if (state
->buf_kern_start
) {
2085 buf_start
= state
->buf_kern_start
+ state
->buf_kern_offset
;
2086 offsets_update
= (unsigned int *) buf_start
;
2088 ret
= ebt_buf_add(state
, &offsets
[1],
2089 sizeof(offsets
) - sizeof(offsets
[0]));
2092 buf_start
= (char *) entry
;
2094 * 0: matches offset, always follows ebt_entry.
2095 * 1: watchers offset, from ebt_entry structure
2096 * 2: target offset, from ebt_entry structure
2097 * 3: next ebt_entry offset, from ebt_entry structure
2099 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2101 for (i
= 0, j
= 1 ; j
< 4 ; j
++, i
++) {
2102 struct compat_ebt_entry_mwt
*match32
;
2104 char *buf
= buf_start
;
2106 buf
= buf_start
+ offsets
[i
];
2107 if (offsets
[i
] > offsets
[j
])
2110 match32
= (struct compat_ebt_entry_mwt
*) buf
;
2111 size
= offsets
[j
] - offsets
[i
];
2112 ret
= ebt_size_mwt(match32
, size
, i
, state
, base
);
2116 if (offsets_update
&& new_offset
) {
2117 pr_debug("change offset %d to %d\n",
2118 offsets_update
[i
], offsets
[j
] + new_offset
);
2119 offsets_update
[i
] = offsets
[j
] + new_offset
;
2123 startoff
= state
->buf_user_offset
- startoff
;
2125 BUG_ON(*total
< startoff
);
2131 * repl->entries_size is the size of the ebt_entry blob in userspace.
2132 * It might need more memory when copied to a 64 bit kernel in case
2133 * userspace is 32-bit. So, first task: find out how much memory is needed.
2135 * Called before validation is performed.
2137 static int compat_copy_entries(unsigned char *data
, unsigned int size_user
,
2138 struct ebt_entries_buf_state
*state
)
2140 unsigned int size_remaining
= size_user
;
2143 ret
= EBT_ENTRY_ITERATE(data
, size_user
, size_entry_mwt
, data
,
2144 &size_remaining
, state
);
2148 WARN_ON(size_remaining
);
2149 return state
->buf_kern_offset
;
2153 static int compat_copy_ebt_replace_from_user(struct ebt_replace
*repl
,
2154 void __user
*user
, unsigned int len
)
2156 struct compat_ebt_replace tmp
;
2159 if (len
< sizeof(tmp
))
2162 if (copy_from_user(&tmp
, user
, sizeof(tmp
)))
2165 if (len
!= sizeof(tmp
) + tmp
.entries_size
)
2168 if (tmp
.entries_size
== 0)
2171 if (tmp
.nentries
>= ((INT_MAX
- sizeof(struct ebt_table_info
)) /
2172 NR_CPUS
- SMP_CACHE_BYTES
) / sizeof(struct ebt_counter
))
2174 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct ebt_counter
))
2177 memcpy(repl
, &tmp
, offsetof(struct ebt_replace
, hook_entry
));
2179 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2180 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++)
2181 repl
->hook_entry
[i
] = compat_ptr(tmp
.hook_entry
[i
]);
2183 repl
->num_counters
= tmp
.num_counters
;
2184 repl
->counters
= compat_ptr(tmp
.counters
);
2185 repl
->entries
= compat_ptr(tmp
.entries
);
2189 static int compat_do_replace(struct net
*net
, void __user
*user
,
2192 int ret
, i
, countersize
, size64
;
2193 struct ebt_table_info
*newinfo
;
2194 struct ebt_replace tmp
;
2195 struct ebt_entries_buf_state state
;
2198 ret
= compat_copy_ebt_replace_from_user(&tmp
, user
, len
);
2200 /* try real handler in case userland supplied needed padding */
2201 if (ret
== -EINVAL
&& do_replace(net
, user
, len
) == 0)
2206 countersize
= COUNTER_OFFSET(tmp
.nentries
) * nr_cpu_ids
;
2207 newinfo
= vmalloc(sizeof(*newinfo
) + countersize
);
2212 memset(newinfo
->counters
, 0, countersize
);
2214 memset(&state
, 0, sizeof(state
));
2216 newinfo
->entries
= vmalloc(tmp
.entries_size
);
2217 if (!newinfo
->entries
) {
2222 newinfo
->entries
, tmp
.entries
, tmp
.entries_size
) != 0) {
2227 entries_tmp
= newinfo
->entries
;
2229 xt_compat_lock(NFPROTO_BRIDGE
);
2231 ret
= compat_copy_entries(entries_tmp
, tmp
.entries_size
, &state
);
2235 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2236 tmp
.entries_size
, state
.buf_kern_offset
, state
.buf_user_offset
,
2237 xt_compat_calc_jump(NFPROTO_BRIDGE
, tmp
.entries_size
));
2240 newinfo
->entries
= vmalloc(size64
);
2241 if (!newinfo
->entries
) {
2247 memset(&state
, 0, sizeof(state
));
2248 state
.buf_kern_start
= newinfo
->entries
;
2249 state
.buf_kern_len
= size64
;
2251 ret
= compat_copy_entries(entries_tmp
, tmp
.entries_size
, &state
);
2252 BUG_ON(ret
< 0); /* parses same data again */
2255 tmp
.entries_size
= size64
;
2257 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
2258 char __user
*usrptr
;
2259 if (tmp
.hook_entry
[i
]) {
2261 usrptr
= (char __user
*) tmp
.hook_entry
[i
];
2262 delta
= usrptr
- tmp
.entries
;
2263 usrptr
+= xt_compat_calc_jump(NFPROTO_BRIDGE
, delta
);
2264 tmp
.hook_entry
[i
] = (struct ebt_entries __user
*)usrptr
;
2268 xt_compat_flush_offsets(NFPROTO_BRIDGE
);
2269 xt_compat_unlock(NFPROTO_BRIDGE
);
2271 ret
= do_replace_finish(net
, &tmp
, newinfo
);
2275 vfree(newinfo
->entries
);
2280 xt_compat_flush_offsets(NFPROTO_BRIDGE
);
2281 xt_compat_unlock(NFPROTO_BRIDGE
);
2285 static int compat_update_counters(struct net
*net
, void __user
*user
,
2288 struct compat_ebt_replace hlp
;
2290 if (copy_from_user(&hlp
, user
, sizeof(hlp
)))
2293 /* try real handler in case userland supplied needed padding */
2294 if (len
!= sizeof(hlp
) + hlp
.num_counters
* sizeof(struct ebt_counter
))
2295 return update_counters(net
, user
, len
);
2297 return do_update_counters(net
, hlp
.name
, compat_ptr(hlp
.counters
),
2298 hlp
.num_counters
, user
, len
);
2301 static int compat_do_ebt_set_ctl(struct sock
*sk
,
2302 int cmd
, void __user
*user
, unsigned int len
)
2306 if (!capable(CAP_NET_ADMIN
))
2310 case EBT_SO_SET_ENTRIES
:
2311 ret
= compat_do_replace(sock_net(sk
), user
, len
);
2313 case EBT_SO_SET_COUNTERS
:
2314 ret
= compat_update_counters(sock_net(sk
), user
, len
);
2322 static int compat_do_ebt_get_ctl(struct sock
*sk
, int cmd
,
2323 void __user
*user
, int *len
)
2326 struct compat_ebt_replace tmp
;
2327 struct ebt_table
*t
;
2329 if (!capable(CAP_NET_ADMIN
))
2332 /* try real handler in case userland supplied needed padding */
2333 if ((cmd
== EBT_SO_GET_INFO
||
2334 cmd
== EBT_SO_GET_INIT_INFO
) && *len
!= sizeof(tmp
))
2335 return do_ebt_get_ctl(sk
, cmd
, user
, len
);
2337 if (copy_from_user(&tmp
, user
, sizeof(tmp
)))
2340 t
= find_table_lock(sock_net(sk
), tmp
.name
, &ret
, &ebt_mutex
);
2344 xt_compat_lock(NFPROTO_BRIDGE
);
2346 case EBT_SO_GET_INFO
:
2347 tmp
.nentries
= t
->private->nentries
;
2348 ret
= compat_table_info(t
->private, &tmp
);
2351 tmp
.valid_hooks
= t
->valid_hooks
;
2353 if (copy_to_user(user
, &tmp
, *len
) != 0) {
2359 case EBT_SO_GET_INIT_INFO
:
2360 tmp
.nentries
= t
->table
->nentries
;
2361 tmp
.entries_size
= t
->table
->entries_size
;
2362 tmp
.valid_hooks
= t
->table
->valid_hooks
;
2364 if (copy_to_user(user
, &tmp
, *len
) != 0) {
2370 case EBT_SO_GET_ENTRIES
:
2371 case EBT_SO_GET_INIT_ENTRIES
:
2373 * try real handler first in case of userland-side padding.
2374 * in case we are dealing with an 'ordinary' 32 bit binary
2375 * without 64bit compatibility padding, this will fail right
2376 * after copy_from_user when the *len argument is validated.
2378 * the compat_ variant needs to do one pass over the kernel
2379 * data set to adjust for size differences before it the check.
2381 if (copy_everything_to_user(t
, user
, len
, cmd
) == 0)
2384 ret
= compat_copy_everything_to_user(t
, user
, len
, cmd
);
2390 xt_compat_flush_offsets(NFPROTO_BRIDGE
);
2391 xt_compat_unlock(NFPROTO_BRIDGE
);
2392 mutex_unlock(&ebt_mutex
);
2397 static struct nf_sockopt_ops ebt_sockopts
=
2400 .set_optmin
= EBT_BASE_CTL
,
2401 .set_optmax
= EBT_SO_SET_MAX
+ 1,
2402 .set
= do_ebt_set_ctl
,
2403 #ifdef CONFIG_COMPAT
2404 .compat_set
= compat_do_ebt_set_ctl
,
2406 .get_optmin
= EBT_BASE_CTL
,
2407 .get_optmax
= EBT_SO_GET_MAX
+ 1,
2408 .get
= do_ebt_get_ctl
,
2409 #ifdef CONFIG_COMPAT
2410 .compat_get
= compat_do_ebt_get_ctl
,
2412 .owner
= THIS_MODULE
,
2415 static int __init
ebtables_init(void)
2419 ret
= xt_register_target(&ebt_standard_target
);
2422 ret
= nf_register_sockopt(&ebt_sockopts
);
2424 xt_unregister_target(&ebt_standard_target
);
2428 printk(KERN_INFO
"Ebtables v2.0 registered\n");
2432 static void __exit
ebtables_fini(void)
2434 nf_unregister_sockopt(&ebt_sockopts
);
2435 xt_unregister_target(&ebt_standard_target
);
2436 printk(KERN_INFO
"Ebtables v2.0 unregistered\n");
2439 EXPORT_SYMBOL(ebt_register_table
);
2440 EXPORT_SYMBOL(ebt_unregister_table
);
2441 EXPORT_SYMBOL(ebt_do_table
);
2442 module_init(ebtables_init
);
2443 module_exit(ebtables_fini
);
2444 MODULE_LICENSE("GPL");