2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __FUNCTION__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr
)
83 return ( (nexthdr
== IPPROTO_HOPOPTS
) ||
84 (nexthdr
== IPPROTO_ROUTING
) ||
85 (nexthdr
== IPPROTO_FRAGMENT
) ||
86 (nexthdr
== IPPROTO_ESP
) ||
87 (nexthdr
== IPPROTO_AH
) ||
88 (nexthdr
== IPPROTO_NONE
) ||
89 (nexthdr
== IPPROTO_DSTOPTS
) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff
*skb
,
98 const struct ip6t_ip6
*ip6info
,
99 unsigned int *protoff
,
100 int *fragoff
, bool *hotdrop
)
104 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
109 &ip6info
->src
), IP6T_INV_SRCIP
)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
111 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
112 dprintf("Source or dest mismatch.\n");
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
125 ret
|= (((const unsigned long *)indev
)[i
]
126 ^ ((const unsigned long *)ip6info
->iniface
)[i
])
127 & ((const unsigned long *)ip6info
->iniface_mask
)[i
];
130 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev
, ip6info
->iniface
,
133 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
137 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
138 ret
|= (((const unsigned long *)outdev
)[i
]
139 ^ ((const unsigned long *)ip6info
->outiface
)[i
])
140 & ((const unsigned long *)ip6info
->outiface_mask
)[i
];
143 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev
, ip6info
->outiface
,
146 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
150 /* ... might want to do something with class and flowlabel here ... */
152 /* look for the desired protocol header */
153 if((ip6info
->flags
& IP6T_F_PROTO
)) {
155 unsigned short _frag_off
;
157 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
);
163 *fragoff
= _frag_off
;
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
167 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
170 if (ip6info
->proto
== protohdr
) {
171 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
177 /* We need match for the '-p all', too! */
178 if ((ip6info
->proto
!= 0) &&
179 !(ip6info
->invflags
& IP6T_INV_PROTO
))
185 /* should be ip6 safe */
187 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
189 if (ipv6
->flags
& ~IP6T_F_MASK
) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6
->flags
& ~IP6T_F_MASK
);
194 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6
->invflags
& ~IP6T_INV_MASK
);
203 ip6t_error(struct sk_buff
*skb
,
204 const struct net_device
*in
,
205 const struct net_device
*out
,
206 unsigned int hooknum
,
207 const struct xt_target
*target
,
208 const void *targinfo
)
211 printk("ip6_tables: error: `%s'\n", (char *)targinfo
);
216 /* Performance critical - called for every packet */
218 do_match(struct ip6t_entry_match
*m
,
219 const struct sk_buff
*skb
,
220 const struct net_device
*in
,
221 const struct net_device
*out
,
223 unsigned int protoff
,
226 /* Stop iteration if it doesn't match */
227 if (!m
->u
.kernel
.match
->match(skb
, in
, out
, m
->u
.kernel
.match
, m
->data
,
228 offset
, protoff
, hotdrop
))
234 static inline struct ip6t_entry
*
235 get_entry(void *base
, unsigned int offset
)
237 return (struct ip6t_entry
*)(base
+ offset
);
240 /* All zeroes == unconditional rule. */
241 /* Mildly perf critical (only if packet tracing is on) */
243 unconditional(const struct ip6t_ip6
*ipv6
)
247 for (i
= 0; i
< sizeof(*ipv6
); i
++)
248 if (((char *)ipv6
)[i
])
251 return (i
== sizeof(*ipv6
));
254 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
255 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
256 /* This cries for unification! */
257 static const char *const hooknames
[] = {
258 [NF_INET_PRE_ROUTING
] = "PREROUTING",
259 [NF_INET_LOCAL_IN
] = "INPUT",
260 [NF_INET_FORWARD
] = "FORWARD",
261 [NF_INET_LOCAL_OUT
] = "OUTPUT",
262 [NF_INET_POST_ROUTING
] = "POSTROUTING",
265 enum nf_ip_trace_comments
{
266 NF_IP6_TRACE_COMMENT_RULE
,
267 NF_IP6_TRACE_COMMENT_RETURN
,
268 NF_IP6_TRACE_COMMENT_POLICY
,
271 static const char *const comments
[] = {
272 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
273 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
274 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
277 static struct nf_loginfo trace_loginfo
= {
278 .type
= NF_LOG_TYPE_LOG
,
282 .logflags
= NF_LOG_MASK
,
287 /* Mildly perf critical (only if packet tracing is on) */
289 get_chainname_rulenum(struct ip6t_entry
*s
, struct ip6t_entry
*e
,
290 char *hookname
, char **chainname
,
291 char **comment
, unsigned int *rulenum
)
293 struct ip6t_standard_target
*t
= (void *)ip6t_get_target(s
);
295 if (strcmp(t
->target
.u
.kernel
.target
->name
, IP6T_ERROR_TARGET
) == 0) {
296 /* Head of user chain: ERROR target with chainname */
297 *chainname
= t
->target
.data
;
302 if (s
->target_offset
== sizeof(struct ip6t_entry
)
303 && strcmp(t
->target
.u
.kernel
.target
->name
,
304 IP6T_STANDARD_TARGET
) == 0
306 && unconditional(&s
->ipv6
)) {
307 /* Tail of chains: STANDARD target (return/policy) */
308 *comment
= *chainname
== hookname
309 ? (char *)comments
[NF_IP6_TRACE_COMMENT_POLICY
]
310 : (char *)comments
[NF_IP6_TRACE_COMMENT_RETURN
];
319 static void trace_packet(struct sk_buff
*skb
,
321 const struct net_device
*in
,
322 const struct net_device
*out
,
323 const char *tablename
,
324 struct xt_table_info
*private,
325 struct ip6t_entry
*e
)
328 struct ip6t_entry
*root
;
329 char *hookname
, *chainname
, *comment
;
330 unsigned int rulenum
= 0;
332 table_base
= (void *)private->entries
[smp_processor_id()];
333 root
= get_entry(table_base
, private->hook_entry
[hook
]);
335 hookname
= chainname
= (char *)hooknames
[hook
];
336 comment
= (char *)comments
[NF_IP6_TRACE_COMMENT_RULE
];
338 IP6T_ENTRY_ITERATE(root
,
339 private->size
- private->hook_entry
[hook
],
340 get_chainname_rulenum
,
341 e
, hookname
, &chainname
, &comment
, &rulenum
);
343 nf_log_packet(AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
344 "TRACE: %s:%s:%s:%u ",
345 tablename
, chainname
, comment
, rulenum
);
349 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
351 ip6t_do_table(struct sk_buff
*skb
,
353 const struct net_device
*in
,
354 const struct net_device
*out
,
355 struct xt_table
*table
)
357 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
359 unsigned int protoff
= 0;
360 bool hotdrop
= false;
361 /* Initializing verdict to NF_DROP keeps gcc happy. */
362 unsigned int verdict
= NF_DROP
;
363 const char *indev
, *outdev
;
365 struct ip6t_entry
*e
, *back
;
366 struct xt_table_info
*private;
369 indev
= in
? in
->name
: nulldevname
;
370 outdev
= out
? out
->name
: nulldevname
;
371 /* We handle fragments by dealing with the first fragment as
372 * if it was a normal packet. All other fragments are treated
373 * normally, except that they will NEVER match rules that ask
374 * things we don't know, ie. tcp syn flag or ports). If the
375 * rule is also a fragment-specific rule, non-fragments won't
378 read_lock_bh(&table
->lock
);
379 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
380 private = table
->private;
381 table_base
= (void *)private->entries
[smp_processor_id()];
382 e
= get_entry(table_base
, private->hook_entry
[hook
]);
384 /* For return from builtin chain */
385 back
= get_entry(table_base
, private->underflow
[hook
]);
390 if (ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
391 &protoff
, &offset
, &hotdrop
)) {
392 struct ip6t_entry_target
*t
;
394 if (IP6T_MATCH_ITERATE(e
, do_match
,
396 offset
, protoff
, &hotdrop
) != 0)
399 ADD_COUNTER(e
->counters
,
400 ntohs(ipv6_hdr(skb
)->payload_len
) +
401 sizeof(struct ipv6hdr
), 1);
403 t
= ip6t_get_target(e
);
404 IP_NF_ASSERT(t
->u
.kernel
.target
);
406 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
407 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
408 /* The packet is traced: log it */
409 if (unlikely(skb
->nf_trace
))
410 trace_packet(skb
, hook
, in
, out
,
411 table
->name
, private, e
);
413 /* Standard target? */
414 if (!t
->u
.kernel
.target
->target
) {
417 v
= ((struct ip6t_standard_target
*)t
)->verdict
;
419 /* Pop from stack? */
420 if (v
!= IP6T_RETURN
) {
421 verdict
= (unsigned)(-v
) - 1;
425 back
= get_entry(table_base
,
429 if (table_base
+ v
!= (void *)e
+ e
->next_offset
430 && !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
431 /* Save old back ptr in next entry */
432 struct ip6t_entry
*next
433 = (void *)e
+ e
->next_offset
;
435 = (void *)back
- table_base
;
436 /* set back pointer to next entry */
440 e
= get_entry(table_base
, v
);
442 /* Targets which reenter must return
444 #ifdef CONFIG_NETFILTER_DEBUG
445 ((struct ip6t_entry
*)table_base
)->comefrom
448 verdict
= t
->u
.kernel
.target
->target(skb
,
454 #ifdef CONFIG_NETFILTER_DEBUG
455 if (((struct ip6t_entry
*)table_base
)->comefrom
457 && verdict
== IP6T_CONTINUE
) {
458 printk("Target %s reentered!\n",
459 t
->u
.kernel
.target
->name
);
462 ((struct ip6t_entry
*)table_base
)->comefrom
465 if (verdict
== IP6T_CONTINUE
)
466 e
= (void *)e
+ e
->next_offset
;
474 e
= (void *)e
+ e
->next_offset
;
478 #ifdef CONFIG_NETFILTER_DEBUG
479 ((struct ip6t_entry
*)table_base
)->comefrom
= NETFILTER_LINK_POISON
;
481 read_unlock_bh(&table
->lock
);
483 #ifdef DEBUG_ALLOW_ALL
492 /* Figures out from what hook each rule can be called: returns 0 if
493 there are loops. Puts hook bitmask in comefrom. */
495 mark_source_chains(struct xt_table_info
*newinfo
,
496 unsigned int valid_hooks
, void *entry0
)
500 /* No recursion; use packet counter to save back ptrs (reset
501 to 0 as we leave), and comefrom to save source hook bitmask */
502 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
503 unsigned int pos
= newinfo
->hook_entry
[hook
];
504 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
506 if (!(valid_hooks
& (1 << hook
)))
509 /* Set initial back pointer. */
510 e
->counters
.pcnt
= pos
;
513 struct ip6t_standard_target
*t
514 = (void *)ip6t_get_target(e
);
515 int visited
= e
->comefrom
& (1 << hook
);
517 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
518 printk("iptables: loop hook %u pos %u %08X.\n",
519 hook
, pos
, e
->comefrom
);
522 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
524 /* Unconditional return/END. */
525 if ((e
->target_offset
== sizeof(struct ip6t_entry
)
526 && (strcmp(t
->target
.u
.user
.name
,
527 IP6T_STANDARD_TARGET
) == 0)
529 && unconditional(&e
->ipv6
)) || visited
) {
530 unsigned int oldpos
, size
;
532 if (t
->verdict
< -NF_MAX_VERDICT
- 1) {
533 duprintf("mark_source_chains: bad "
534 "negative verdict (%i)\n",
539 /* Return: backtrack through the last
542 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
543 #ifdef DEBUG_IP_FIREWALL_USER
545 & (1 << NF_INET_NUMHOOKS
)) {
546 duprintf("Back unset "
553 pos
= e
->counters
.pcnt
;
554 e
->counters
.pcnt
= 0;
556 /* We're at the start. */
560 e
= (struct ip6t_entry
*)
562 } while (oldpos
== pos
+ e
->next_offset
);
565 size
= e
->next_offset
;
566 e
= (struct ip6t_entry
*)
567 (entry0
+ pos
+ size
);
568 e
->counters
.pcnt
= pos
;
571 int newpos
= t
->verdict
;
573 if (strcmp(t
->target
.u
.user
.name
,
574 IP6T_STANDARD_TARGET
) == 0
576 if (newpos
> newinfo
->size
-
577 sizeof(struct ip6t_entry
)) {
578 duprintf("mark_source_chains: "
579 "bad verdict (%i)\n",
583 /* This a jump; chase it. */
584 duprintf("Jump rule %u -> %u\n",
587 /* ... this is a fallthru */
588 newpos
= pos
+ e
->next_offset
;
590 e
= (struct ip6t_entry
*)
592 e
->counters
.pcnt
= pos
;
597 duprintf("Finished chain %u\n", hook
);
603 cleanup_match(struct ip6t_entry_match
*m
, unsigned int *i
)
605 if (i
&& (*i
)-- == 0)
608 if (m
->u
.kernel
.match
->destroy
)
609 m
->u
.kernel
.match
->destroy(m
->u
.kernel
.match
, m
->data
);
610 module_put(m
->u
.kernel
.match
->me
);
615 check_entry(struct ip6t_entry
*e
, const char *name
)
617 struct ip6t_entry_target
*t
;
619 if (!ip6_checkentry(&e
->ipv6
)) {
620 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
624 if (e
->target_offset
+ sizeof(struct ip6t_entry_target
) >
628 t
= ip6t_get_target(e
);
629 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
635 static int check_match(struct ip6t_entry_match
*m
, const char *name
,
636 const struct ip6t_ip6
*ipv6
,
637 unsigned int hookmask
, unsigned int *i
)
639 struct xt_match
*match
;
642 match
= m
->u
.kernel
.match
;
643 ret
= xt_check_match(match
, AF_INET6
, m
->u
.match_size
- sizeof(*m
),
644 name
, hookmask
, ipv6
->proto
,
645 ipv6
->invflags
& IP6T_INV_PROTO
);
646 if (!ret
&& m
->u
.kernel
.match
->checkentry
647 && !m
->u
.kernel
.match
->checkentry(name
, ipv6
, match
, m
->data
,
649 duprintf("ip_tables: check failed for `%s'.\n",
650 m
->u
.kernel
.match
->name
);
659 find_check_match(struct ip6t_entry_match
*m
,
661 const struct ip6t_ip6
*ipv6
,
662 unsigned int hookmask
,
665 struct xt_match
*match
;
668 match
= try_then_request_module(xt_find_match(AF_INET6
, m
->u
.user
.name
,
670 "ip6t_%s", m
->u
.user
.name
);
671 if (IS_ERR(match
) || !match
) {
672 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
673 return match
? PTR_ERR(match
) : -ENOENT
;
675 m
->u
.kernel
.match
= match
;
677 ret
= check_match(m
, name
, ipv6
, hookmask
, i
);
683 module_put(m
->u
.kernel
.match
->me
);
687 static int check_target(struct ip6t_entry
*e
, const char *name
)
689 struct ip6t_entry_target
*t
;
690 struct xt_target
*target
;
693 t
= ip6t_get_target(e
);
694 target
= t
->u
.kernel
.target
;
695 ret
= xt_check_target(target
, AF_INET6
, t
->u
.target_size
- sizeof(*t
),
696 name
, e
->comefrom
, e
->ipv6
.proto
,
697 e
->ipv6
.invflags
& IP6T_INV_PROTO
);
698 if (!ret
&& t
->u
.kernel
.target
->checkentry
699 && !t
->u
.kernel
.target
->checkentry(name
, e
, target
, t
->data
,
701 duprintf("ip_tables: check failed for `%s'.\n",
702 t
->u
.kernel
.target
->name
);
709 find_check_entry(struct ip6t_entry
*e
, const char *name
, unsigned int size
,
712 struct ip6t_entry_target
*t
;
713 struct xt_target
*target
;
717 ret
= check_entry(e
, name
);
722 ret
= IP6T_MATCH_ITERATE(e
, find_check_match
, name
, &e
->ipv6
,
725 goto cleanup_matches
;
727 t
= ip6t_get_target(e
);
728 target
= try_then_request_module(xt_find_target(AF_INET6
,
731 "ip6t_%s", t
->u
.user
.name
);
732 if (IS_ERR(target
) || !target
) {
733 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
734 ret
= target
? PTR_ERR(target
) : -ENOENT
;
735 goto cleanup_matches
;
737 t
->u
.kernel
.target
= target
;
739 ret
= check_target(e
, name
);
746 module_put(t
->u
.kernel
.target
->me
);
748 IP6T_MATCH_ITERATE(e
, cleanup_match
, &j
);
753 check_entry_size_and_hooks(struct ip6t_entry
*e
,
754 struct xt_table_info
*newinfo
,
756 unsigned char *limit
,
757 const unsigned int *hook_entries
,
758 const unsigned int *underflows
,
763 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0
764 || (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
) {
765 duprintf("Bad offset %p\n", e
);
770 < sizeof(struct ip6t_entry
) + sizeof(struct ip6t_entry_target
)) {
771 duprintf("checking: element %p size %u\n",
776 /* Check hooks & underflows */
777 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
778 if ((unsigned char *)e
- base
== hook_entries
[h
])
779 newinfo
->hook_entry
[h
] = hook_entries
[h
];
780 if ((unsigned char *)e
- base
== underflows
[h
])
781 newinfo
->underflow
[h
] = underflows
[h
];
784 /* FIXME: underflows must be unconditional, standard verdicts
785 < 0 (not IP6T_RETURN). --RR */
787 /* Clear counters and comefrom */
788 e
->counters
= ((struct xt_counters
) { 0, 0 });
796 cleanup_entry(struct ip6t_entry
*e
, unsigned int *i
)
798 struct ip6t_entry_target
*t
;
800 if (i
&& (*i
)-- == 0)
803 /* Cleanup all matches */
804 IP6T_MATCH_ITERATE(e
, cleanup_match
, NULL
);
805 t
= ip6t_get_target(e
);
806 if (t
->u
.kernel
.target
->destroy
)
807 t
->u
.kernel
.target
->destroy(t
->u
.kernel
.target
, t
->data
);
808 module_put(t
->u
.kernel
.target
->me
);
812 /* Checks and translates the user-supplied table segment (held in
815 translate_table(const char *name
,
816 unsigned int valid_hooks
,
817 struct xt_table_info
*newinfo
,
821 const unsigned int *hook_entries
,
822 const unsigned int *underflows
)
827 newinfo
->size
= size
;
828 newinfo
->number
= number
;
830 /* Init all hooks to impossible value. */
831 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
832 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
833 newinfo
->underflow
[i
] = 0xFFFFFFFF;
836 duprintf("translate_table: size %u\n", newinfo
->size
);
838 /* Walk through entries, checking offsets. */
839 ret
= IP6T_ENTRY_ITERATE(entry0
, newinfo
->size
,
840 check_entry_size_and_hooks
,
844 hook_entries
, underflows
, &i
);
849 duprintf("translate_table: %u not %u entries\n",
854 /* Check hooks all assigned */
855 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
856 /* Only hooks which are valid */
857 if (!(valid_hooks
& (1 << i
)))
859 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
860 duprintf("Invalid hook entry %u %u\n",
864 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
865 duprintf("Invalid underflow %u %u\n",
871 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
))
874 /* Finally, each sanity check must pass */
876 ret
= IP6T_ENTRY_ITERATE(entry0
, newinfo
->size
,
877 find_check_entry
, name
, size
, &i
);
880 IP6T_ENTRY_ITERATE(entry0
, newinfo
->size
,
885 /* And one copy for every other CPU */
886 for_each_possible_cpu(i
) {
887 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
888 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
896 add_entry_to_counter(const struct ip6t_entry
*e
,
897 struct xt_counters total
[],
900 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
907 set_entry_to_counter(const struct ip6t_entry
*e
,
908 struct ip6t_counters total
[],
911 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
918 get_counters(const struct xt_table_info
*t
,
919 struct xt_counters counters
[])
925 /* Instead of clearing (by a previous call to memset())
926 * the counters and using adds, we set the counters
927 * with data used by 'current' CPU
928 * We dont care about preemption here.
930 curcpu
= raw_smp_processor_id();
933 IP6T_ENTRY_ITERATE(t
->entries
[curcpu
],
935 set_entry_to_counter
,
939 for_each_possible_cpu(cpu
) {
943 IP6T_ENTRY_ITERATE(t
->entries
[cpu
],
945 add_entry_to_counter
,
951 static struct xt_counters
*alloc_counters(struct xt_table
*table
)
953 unsigned int countersize
;
954 struct xt_counters
*counters
;
955 struct xt_table_info
*private = table
->private;
957 /* We need atomic snapshot of counters: rest doesn't change
958 (other than comefrom, which userspace doesn't care
960 countersize
= sizeof(struct xt_counters
) * private->number
;
961 counters
= vmalloc_node(countersize
, numa_node_id());
963 if (counters
== NULL
)
964 return ERR_PTR(-ENOMEM
);
966 /* First, sum counters... */
967 write_lock_bh(&table
->lock
);
968 get_counters(private, counters
);
969 write_unlock_bh(&table
->lock
);
975 copy_entries_to_user(unsigned int total_size
,
976 struct xt_table
*table
,
977 void __user
*userptr
)
979 unsigned int off
, num
;
980 struct ip6t_entry
*e
;
981 struct xt_counters
*counters
;
982 struct xt_table_info
*private = table
->private;
986 counters
= alloc_counters(table
);
987 if (IS_ERR(counters
))
988 return PTR_ERR(counters
);
990 /* choose the copy that is on our node/cpu, ...
991 * This choice is lazy (because current thread is
992 * allowed to migrate to another cpu)
994 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
995 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
1000 /* FIXME: use iterator macros --RR */
1001 /* ... then go back and fix counters and names */
1002 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
1004 struct ip6t_entry_match
*m
;
1005 struct ip6t_entry_target
*t
;
1007 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
1008 if (copy_to_user(userptr
+ off
1009 + offsetof(struct ip6t_entry
, counters
),
1011 sizeof(counters
[num
])) != 0) {
1016 for (i
= sizeof(struct ip6t_entry
);
1017 i
< e
->target_offset
;
1018 i
+= m
->u
.match_size
) {
1021 if (copy_to_user(userptr
+ off
+ i
1022 + offsetof(struct ip6t_entry_match
,
1024 m
->u
.kernel
.match
->name
,
1025 strlen(m
->u
.kernel
.match
->name
)+1)
1032 t
= ip6t_get_target(e
);
1033 if (copy_to_user(userptr
+ off
+ e
->target_offset
1034 + offsetof(struct ip6t_entry_target
,
1036 t
->u
.kernel
.target
->name
,
1037 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1048 #ifdef CONFIG_COMPAT
1049 static void compat_standard_from_user(void *dst
, void *src
)
1051 int v
= *(compat_int_t
*)src
;
1054 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1055 memcpy(dst
, &v
, sizeof(v
));
1058 static int compat_standard_to_user(void __user
*dst
, void *src
)
1060 compat_int_t cv
= *(int *)src
;
1063 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1064 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1068 compat_calc_match(struct ip6t_entry_match
*m
, int *size
)
1070 *size
+= xt_compat_match_offset(m
->u
.kernel
.match
);
1074 static int compat_calc_entry(struct ip6t_entry
*e
,
1075 const struct xt_table_info
*info
,
1076 void *base
, struct xt_table_info
*newinfo
)
1078 struct ip6t_entry_target
*t
;
1079 unsigned int entry_offset
;
1082 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1083 entry_offset
= (void *)e
- base
;
1084 IP6T_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1085 t
= ip6t_get_target(e
);
1086 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1087 newinfo
->size
-= off
;
1088 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1092 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1093 if (info
->hook_entry
[i
] &&
1094 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1095 newinfo
->hook_entry
[i
] -= off
;
1096 if (info
->underflow
[i
] &&
1097 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1098 newinfo
->underflow
[i
] -= off
;
1103 static int compat_table_info(const struct xt_table_info
*info
,
1104 struct xt_table_info
*newinfo
)
1106 void *loc_cpu_entry
;
1108 if (!newinfo
|| !info
)
1111 /* we dont care about newinfo->entries[] */
1112 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1113 newinfo
->initial_entries
= 0;
1114 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1115 return IP6T_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
1116 compat_calc_entry
, info
, loc_cpu_entry
,
1121 static int get_info(struct net
*net
, void __user
*user
, int *len
, int compat
)
1123 char name
[IP6T_TABLE_MAXNAMELEN
];
1127 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1128 duprintf("length %u != %zu\n", *len
,
1129 sizeof(struct ip6t_getinfo
));
1133 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1136 name
[IP6T_TABLE_MAXNAMELEN
-1] = '\0';
1137 #ifdef CONFIG_COMPAT
1139 xt_compat_lock(AF_INET6
);
1141 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1142 "ip6table_%s", name
);
1143 if (t
&& !IS_ERR(t
)) {
1144 struct ip6t_getinfo info
;
1145 struct xt_table_info
*private = t
->private;
1147 #ifdef CONFIG_COMPAT
1149 struct xt_table_info tmp
;
1150 ret
= compat_table_info(private, &tmp
);
1151 xt_compat_flush_offsets(AF_INET6
);
1155 info
.valid_hooks
= t
->valid_hooks
;
1156 memcpy(info
.hook_entry
, private->hook_entry
,
1157 sizeof(info
.hook_entry
));
1158 memcpy(info
.underflow
, private->underflow
,
1159 sizeof(info
.underflow
));
1160 info
.num_entries
= private->number
;
1161 info
.size
= private->size
;
1162 strcpy(info
.name
, name
);
1164 if (copy_to_user(user
, &info
, *len
) != 0)
1172 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1173 #ifdef CONFIG_COMPAT
1175 xt_compat_unlock(AF_INET6
);
1181 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
, int *len
)
1184 struct ip6t_get_entries get
;
1187 if (*len
< sizeof(get
)) {
1188 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1191 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1193 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1194 duprintf("get_entries: %u != %zu\n",
1195 *len
, sizeof(get
) + get
.size
);
1199 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1200 if (t
&& !IS_ERR(t
)) {
1201 struct xt_table_info
*private = t
->private;
1202 duprintf("t->private->number = %u\n", private->number
);
1203 if (get
.size
== private->size
)
1204 ret
= copy_entries_to_user(private->size
,
1205 t
, uptr
->entrytable
);
1207 duprintf("get_entries: I've got %u not %u!\n",
1208 private->size
, get
.size
);
1214 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1220 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1221 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1222 void __user
*counters_ptr
)
1226 struct xt_table_info
*oldinfo
;
1227 struct xt_counters
*counters
;
1228 void *loc_cpu_old_entry
;
1231 counters
= vmalloc_node(num_counters
* sizeof(struct xt_counters
),
1238 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1239 "ip6table_%s", name
);
1240 if (!t
|| IS_ERR(t
)) {
1241 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1242 goto free_newinfo_counters_untrans
;
1246 if (valid_hooks
!= t
->valid_hooks
) {
1247 duprintf("Valid hook crap: %08X vs %08X\n",
1248 valid_hooks
, t
->valid_hooks
);
1253 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1257 /* Update module usage count based on number of rules */
1258 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1259 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1260 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1261 (newinfo
->number
<= oldinfo
->initial_entries
))
1263 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1264 (newinfo
->number
<= oldinfo
->initial_entries
))
1267 /* Get the old counters. */
1268 get_counters(oldinfo
, counters
);
1269 /* Decrease module usage counts and free resource */
1270 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1271 IP6T_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,
1273 xt_free_table_info(oldinfo
);
1274 if (copy_to_user(counters_ptr
, counters
,
1275 sizeof(struct xt_counters
) * num_counters
) != 0)
1284 free_newinfo_counters_untrans
:
1291 do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1294 struct ip6t_replace tmp
;
1295 struct xt_table_info
*newinfo
;
1296 void *loc_cpu_entry
;
1298 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1301 /* overflow check */
1302 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1305 newinfo
= xt_alloc_table_info(tmp
.size
);
1309 /* choose the copy that is on our node/cpu */
1310 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1311 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1317 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1318 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1319 tmp
.hook_entry
, tmp
.underflow
);
1323 duprintf("ip_tables: Translated table\n");
1325 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1326 tmp
.num_counters
, tmp
.counters
);
1328 goto free_newinfo_untrans
;
1331 free_newinfo_untrans
:
1332 IP6T_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1334 xt_free_table_info(newinfo
);
1338 /* We're lazy, and add to the first CPU; overflow works its fey magic
1339 * and everything is OK. */
1341 add_counter_to_entry(struct ip6t_entry
*e
,
1342 const struct xt_counters addme
[],
1346 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1348 (long unsigned int)e
->counters
.pcnt
,
1349 (long unsigned int)e
->counters
.bcnt
,
1350 (long unsigned int)addme
[*i
].pcnt
,
1351 (long unsigned int)addme
[*i
].bcnt
);
1354 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1361 do_add_counters(struct net
*net
, void __user
*user
, unsigned int len
,
1365 struct xt_counters_info tmp
;
1366 struct xt_counters
*paddc
;
1367 unsigned int num_counters
;
1372 struct xt_table_info
*private;
1374 void *loc_cpu_entry
;
1375 #ifdef CONFIG_COMPAT
1376 struct compat_xt_counters_info compat_tmp
;
1380 size
= sizeof(struct compat_xt_counters_info
);
1385 size
= sizeof(struct xt_counters_info
);
1388 if (copy_from_user(ptmp
, user
, size
) != 0)
1391 #ifdef CONFIG_COMPAT
1393 num_counters
= compat_tmp
.num_counters
;
1394 name
= compat_tmp
.name
;
1398 num_counters
= tmp
.num_counters
;
1402 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1405 paddc
= vmalloc_node(len
- size
, numa_node_id());
1409 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1414 t
= xt_find_table_lock(net
, AF_INET6
, name
);
1415 if (!t
|| IS_ERR(t
)) {
1416 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1420 write_lock_bh(&t
->lock
);
1421 private = t
->private;
1422 if (private->number
!= num_counters
) {
1424 goto unlock_up_free
;
1428 /* Choose the copy that is on our node */
1429 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1430 IP6T_ENTRY_ITERATE(loc_cpu_entry
,
1432 add_counter_to_entry
,
1436 write_unlock_bh(&t
->lock
);
1445 #ifdef CONFIG_COMPAT
1446 struct compat_ip6t_replace
{
1447 char name
[IP6T_TABLE_MAXNAMELEN
];
1451 u32 hook_entry
[NF_INET_NUMHOOKS
];
1452 u32 underflow
[NF_INET_NUMHOOKS
];
1454 compat_uptr_t counters
; /* struct ip6t_counters * */
1455 struct compat_ip6t_entry entries
[0];
1459 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1460 unsigned int *size
, struct xt_counters
*counters
,
1463 struct ip6t_entry_target
*t
;
1464 struct compat_ip6t_entry __user
*ce
;
1465 u_int16_t target_offset
, next_offset
;
1466 compat_uint_t origsize
;
1471 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1472 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)))
1475 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1478 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1479 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1481 ret
= IP6T_MATCH_ITERATE(e
, xt_compat_match_to_user
, dstptr
, size
);
1482 target_offset
= e
->target_offset
- (origsize
- *size
);
1485 t
= ip6t_get_target(e
);
1486 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1490 next_offset
= e
->next_offset
- (origsize
- *size
);
1491 if (put_user(target_offset
, &ce
->target_offset
))
1493 if (put_user(next_offset
, &ce
->next_offset
))
1503 compat_find_calc_match(struct ip6t_entry_match
*m
,
1505 const struct ip6t_ip6
*ipv6
,
1506 unsigned int hookmask
,
1507 int *size
, unsigned int *i
)
1509 struct xt_match
*match
;
1511 match
= try_then_request_module(xt_find_match(AF_INET6
, m
->u
.user
.name
,
1512 m
->u
.user
.revision
),
1513 "ip6t_%s", m
->u
.user
.name
);
1514 if (IS_ERR(match
) || !match
) {
1515 duprintf("compat_check_calc_match: `%s' not found\n",
1517 return match
? PTR_ERR(match
) : -ENOENT
;
1519 m
->u
.kernel
.match
= match
;
1520 *size
+= xt_compat_match_offset(match
);
1527 compat_release_match(struct ip6t_entry_match
*m
, unsigned int *i
)
1529 if (i
&& (*i
)-- == 0)
1532 module_put(m
->u
.kernel
.match
->me
);
1537 compat_release_entry(struct compat_ip6t_entry
*e
, unsigned int *i
)
1539 struct ip6t_entry_target
*t
;
1541 if (i
&& (*i
)-- == 0)
1544 /* Cleanup all matches */
1545 COMPAT_IP6T_MATCH_ITERATE(e
, compat_release_match
, NULL
);
1546 t
= compat_ip6t_get_target(e
);
1547 module_put(t
->u
.kernel
.target
->me
);
1552 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1553 struct xt_table_info
*newinfo
,
1555 unsigned char *base
,
1556 unsigned char *limit
,
1557 unsigned int *hook_entries
,
1558 unsigned int *underflows
,
1562 struct ip6t_entry_target
*t
;
1563 struct xt_target
*target
;
1564 unsigned int entry_offset
;
1568 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1569 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0
1570 || (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
) {
1571 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1575 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1576 sizeof(struct compat_xt_entry_target
)) {
1577 duprintf("checking: element %p size %u\n",
1582 /* For purposes of check_entry casting the compat entry is fine */
1583 ret
= check_entry((struct ip6t_entry
*)e
, name
);
1587 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1588 entry_offset
= (void *)e
- (void *)base
;
1590 ret
= COMPAT_IP6T_MATCH_ITERATE(e
, compat_find_calc_match
, name
,
1591 &e
->ipv6
, e
->comefrom
, &off
, &j
);
1593 goto release_matches
;
1595 t
= compat_ip6t_get_target(e
);
1596 target
= try_then_request_module(xt_find_target(AF_INET6
,
1598 t
->u
.user
.revision
),
1599 "ip6t_%s", t
->u
.user
.name
);
1600 if (IS_ERR(target
) || !target
) {
1601 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1603 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1604 goto release_matches
;
1606 t
->u
.kernel
.target
= target
;
1608 off
+= xt_compat_target_offset(target
);
1610 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1614 /* Check hooks & underflows */
1615 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1616 if ((unsigned char *)e
- base
== hook_entries
[h
])
1617 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1618 if ((unsigned char *)e
- base
== underflows
[h
])
1619 newinfo
->underflow
[h
] = underflows
[h
];
1622 /* Clear counters and comefrom */
1623 memset(&e
->counters
, 0, sizeof(e
->counters
));
1630 module_put(t
->u
.kernel
.target
->me
);
1632 IP6T_MATCH_ITERATE(e
, compat_release_match
, &j
);
1637 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1638 unsigned int *size
, const char *name
,
1639 struct xt_table_info
*newinfo
, unsigned char *base
)
1641 struct ip6t_entry_target
*t
;
1642 struct xt_target
*target
;
1643 struct ip6t_entry
*de
;
1644 unsigned int origsize
;
1649 de
= (struct ip6t_entry
*)*dstptr
;
1650 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1651 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1653 *dstptr
+= sizeof(struct ip6t_entry
);
1654 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1656 ret
= COMPAT_IP6T_MATCH_ITERATE(e
, xt_compat_match_from_user
,
1660 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1661 t
= compat_ip6t_get_target(e
);
1662 target
= t
->u
.kernel
.target
;
1663 xt_compat_target_from_user(t
, dstptr
, size
);
1665 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1666 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1667 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1668 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1669 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1670 newinfo
->underflow
[h
] -= origsize
- *size
;
1675 static int compat_check_entry(struct ip6t_entry
*e
, const char *name
,
1682 ret
= IP6T_MATCH_ITERATE(e
, check_match
, name
, &e
->ipv6
,
1685 goto cleanup_matches
;
1687 ret
= check_target(e
, name
);
1689 goto cleanup_matches
;
1695 IP6T_MATCH_ITERATE(e
, cleanup_match
, &j
);
1700 translate_compat_table(const char *name
,
1701 unsigned int valid_hooks
,
1702 struct xt_table_info
**pinfo
,
1704 unsigned int total_size
,
1705 unsigned int number
,
1706 unsigned int *hook_entries
,
1707 unsigned int *underflows
)
1710 struct xt_table_info
*newinfo
, *info
;
1711 void *pos
, *entry0
, *entry1
;
1718 info
->number
= number
;
1720 /* Init all hooks to impossible value. */
1721 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1722 info
->hook_entry
[i
] = 0xFFFFFFFF;
1723 info
->underflow
[i
] = 0xFFFFFFFF;
1726 duprintf("translate_compat_table: size %u\n", info
->size
);
1728 xt_compat_lock(AF_INET6
);
1729 /* Walk through entries, checking offsets. */
1730 ret
= COMPAT_IP6T_ENTRY_ITERATE(entry0
, total_size
,
1731 check_compat_entry_size_and_hooks
,
1732 info
, &size
, entry0
,
1733 entry0
+ total_size
,
1734 hook_entries
, underflows
, &j
, name
);
1740 duprintf("translate_compat_table: %u not %u entries\n",
1745 /* Check hooks all assigned */
1746 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1747 /* Only hooks which are valid */
1748 if (!(valid_hooks
& (1 << i
)))
1750 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1751 duprintf("Invalid hook entry %u %u\n",
1752 i
, hook_entries
[i
]);
1755 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1756 duprintf("Invalid underflow %u %u\n",
1763 newinfo
= xt_alloc_table_info(size
);
1767 newinfo
->number
= number
;
1768 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1769 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1770 newinfo
->underflow
[i
] = info
->underflow
[i
];
1772 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1775 ret
= COMPAT_IP6T_ENTRY_ITERATE(entry0
, total_size
,
1776 compat_copy_entry_from_user
,
1777 &pos
, &size
, name
, newinfo
, entry1
);
1778 xt_compat_flush_offsets(AF_INET6
);
1779 xt_compat_unlock(AF_INET6
);
1784 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1788 ret
= IP6T_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1792 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0
, newinfo
->size
, i
,
1793 compat_release_entry
, &j
);
1794 IP6T_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, &i
);
1795 xt_free_table_info(newinfo
);
1799 /* And one copy for every other CPU */
1800 for_each_possible_cpu(i
)
1801 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1802 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1806 xt_free_table_info(info
);
1810 xt_free_table_info(newinfo
);
1812 COMPAT_IP6T_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1815 xt_compat_flush_offsets(AF_INET6
);
1816 xt_compat_unlock(AF_INET6
);
1821 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1824 struct compat_ip6t_replace tmp
;
1825 struct xt_table_info
*newinfo
;
1826 void *loc_cpu_entry
;
1828 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1831 /* overflow check */
1832 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1834 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1837 newinfo
= xt_alloc_table_info(tmp
.size
);
1841 /* choose the copy that is on our node/cpu */
1842 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1843 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1849 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1850 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1851 tmp
.num_entries
, tmp
.hook_entry
,
1856 duprintf("compat_do_replace: Translated table\n");
1858 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1859 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1861 goto free_newinfo_untrans
;
1864 free_newinfo_untrans
:
1865 IP6T_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1867 xt_free_table_info(newinfo
);
1872 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1877 if (!capable(CAP_NET_ADMIN
))
1881 case IP6T_SO_SET_REPLACE
:
1882 ret
= compat_do_replace(sk
->sk_net
, user
, len
);
1885 case IP6T_SO_SET_ADD_COUNTERS
:
1886 ret
= do_add_counters(sk
->sk_net
, user
, len
, 1);
1890 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1897 struct compat_ip6t_get_entries
{
1898 char name
[IP6T_TABLE_MAXNAMELEN
];
1900 struct compat_ip6t_entry entrytable
[0];
1904 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1905 void __user
*userptr
)
1907 struct xt_counters
*counters
;
1908 struct xt_table_info
*private = table
->private;
1912 void *loc_cpu_entry
;
1915 counters
= alloc_counters(table
);
1916 if (IS_ERR(counters
))
1917 return PTR_ERR(counters
);
1919 /* choose the copy that is on our node/cpu, ...
1920 * This choice is lazy (because current thread is
1921 * allowed to migrate to another cpu)
1923 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1926 ret
= IP6T_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1927 compat_copy_entry_to_user
,
1928 &pos
, &size
, counters
, &i
);
1935 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1939 struct compat_ip6t_get_entries get
;
1942 if (*len
< sizeof(get
)) {
1943 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1947 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1950 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
1951 duprintf("compat_get_entries: %u != %zu\n",
1952 *len
, sizeof(get
) + get
.size
);
1956 xt_compat_lock(AF_INET6
);
1957 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1958 if (t
&& !IS_ERR(t
)) {
1959 struct xt_table_info
*private = t
->private;
1960 struct xt_table_info info
;
1961 duprintf("t->private->number = %u\n", private->number
);
1962 ret
= compat_table_info(private, &info
);
1963 if (!ret
&& get
.size
== info
.size
) {
1964 ret
= compat_copy_entries_to_user(private->size
,
1965 t
, uptr
->entrytable
);
1967 duprintf("compat_get_entries: I've got %u not %u!\n",
1968 private->size
, get
.size
);
1971 xt_compat_flush_offsets(AF_INET6
);
1975 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1977 xt_compat_unlock(AF_INET6
);
1981 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
1984 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1988 if (!capable(CAP_NET_ADMIN
))
1992 case IP6T_SO_GET_INFO
:
1993 ret
= get_info(sk
->sk_net
, user
, len
, 1);
1995 case IP6T_SO_GET_ENTRIES
:
1996 ret
= compat_get_entries(sk
->sk_net
, user
, len
);
1999 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
2006 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2010 if (!capable(CAP_NET_ADMIN
))
2014 case IP6T_SO_SET_REPLACE
:
2015 ret
= do_replace(sk
->sk_net
, user
, len
);
2018 case IP6T_SO_SET_ADD_COUNTERS
:
2019 ret
= do_add_counters(sk
->sk_net
, user
, len
, 0);
2023 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
2031 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2035 if (!capable(CAP_NET_ADMIN
))
2039 case IP6T_SO_GET_INFO
:
2040 ret
= get_info(sk
->sk_net
, user
, len
, 0);
2043 case IP6T_SO_GET_ENTRIES
:
2044 ret
= get_entries(sk
->sk_net
, user
, len
);
2047 case IP6T_SO_GET_REVISION_MATCH
:
2048 case IP6T_SO_GET_REVISION_TARGET
: {
2049 struct ip6t_get_revision rev
;
2052 if (*len
!= sizeof(rev
)) {
2056 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2061 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
2066 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
2069 "ip6t_%s", rev
.name
);
2074 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
2081 struct xt_table
*ip6t_register_table(struct net
*net
, struct xt_table
*table
,
2082 const struct ip6t_replace
*repl
)
2085 struct xt_table_info
*newinfo
;
2086 struct xt_table_info bootstrap
2087 = { 0, 0, 0, { 0 }, { 0 }, { } };
2088 void *loc_cpu_entry
;
2089 struct xt_table
*new_table
;
2091 newinfo
= xt_alloc_table_info(repl
->size
);
2097 /* choose the copy on our node/cpu, but dont care about preemption */
2098 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2099 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2101 ret
= translate_table(table
->name
, table
->valid_hooks
,
2102 newinfo
, loc_cpu_entry
, repl
->size
,
2109 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2110 if (IS_ERR(new_table
)) {
2111 ret
= PTR_ERR(new_table
);
2117 xt_free_table_info(newinfo
);
2119 return ERR_PTR(ret
);
2122 void ip6t_unregister_table(struct xt_table
*table
)
2124 struct xt_table_info
*private;
2125 void *loc_cpu_entry
;
2126 struct module
*table_owner
= table
->me
;
2128 private = xt_unregister_table(table
);
2130 /* Decrease module usage counts and free resources */
2131 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2132 IP6T_ENTRY_ITERATE(loc_cpu_entry
, private->size
, cleanup_entry
, NULL
);
2133 if (private->number
> private->initial_entries
)
2134 module_put(table_owner
);
2135 xt_free_table_info(private);
2138 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2140 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2141 u_int8_t type
, u_int8_t code
,
2144 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
2149 icmp6_match(const struct sk_buff
*skb
,
2150 const struct net_device
*in
,
2151 const struct net_device
*out
,
2152 const struct xt_match
*match
,
2153 const void *matchinfo
,
2155 unsigned int protoff
,
2158 struct icmp6hdr _icmph
, *ic
;
2159 const struct ip6t_icmp
*icmpinfo
= matchinfo
;
2161 /* Must not be a fragment. */
2165 ic
= skb_header_pointer(skb
, protoff
, sizeof(_icmph
), &_icmph
);
2167 /* We've been asked to examine this packet, and we
2168 * can't. Hence, no choice but to drop.
2170 duprintf("Dropping evil ICMP tinygram.\n");
2175 return icmp6_type_code_match(icmpinfo
->type
,
2178 ic
->icmp6_type
, ic
->icmp6_code
,
2179 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2182 /* Called when user tries to insert an entry of this type. */
2184 icmp6_checkentry(const char *tablename
,
2186 const struct xt_match
*match
,
2188 unsigned int hook_mask
)
2190 const struct ip6t_icmp
*icmpinfo
= matchinfo
;
2192 /* Must specify no unknown invflags */
2193 return !(icmpinfo
->invflags
& ~IP6T_ICMP_INV
);
2196 /* The built-in targets: standard (NULL) and error. */
2197 static struct xt_target ip6t_standard_target __read_mostly
= {
2198 .name
= IP6T_STANDARD_TARGET
,
2199 .targetsize
= sizeof(int),
2201 #ifdef CONFIG_COMPAT
2202 .compatsize
= sizeof(compat_int_t
),
2203 .compat_from_user
= compat_standard_from_user
,
2204 .compat_to_user
= compat_standard_to_user
,
2208 static struct xt_target ip6t_error_target __read_mostly
= {
2209 .name
= IP6T_ERROR_TARGET
,
2210 .target
= ip6t_error
,
2211 .targetsize
= IP6T_FUNCTION_MAXNAMELEN
,
2215 static struct nf_sockopt_ops ip6t_sockopts
= {
2217 .set_optmin
= IP6T_BASE_CTL
,
2218 .set_optmax
= IP6T_SO_SET_MAX
+1,
2219 .set
= do_ip6t_set_ctl
,
2220 #ifdef CONFIG_COMPAT
2221 .compat_set
= compat_do_ip6t_set_ctl
,
2223 .get_optmin
= IP6T_BASE_CTL
,
2224 .get_optmax
= IP6T_SO_GET_MAX
+1,
2225 .get
= do_ip6t_get_ctl
,
2226 #ifdef CONFIG_COMPAT
2227 .compat_get
= compat_do_ip6t_get_ctl
,
2229 .owner
= THIS_MODULE
,
2232 static struct xt_match icmp6_matchstruct __read_mostly
= {
2234 .match
= icmp6_match
,
2235 .matchsize
= sizeof(struct ip6t_icmp
),
2236 .checkentry
= icmp6_checkentry
,
2237 .proto
= IPPROTO_ICMPV6
,
2241 static int __net_init
ip6_tables_net_init(struct net
*net
)
2243 return xt_proto_init(net
, AF_INET6
);
2246 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2248 xt_proto_fini(net
, AF_INET6
);
2251 static struct pernet_operations ip6_tables_net_ops
= {
2252 .init
= ip6_tables_net_init
,
2253 .exit
= ip6_tables_net_exit
,
2256 static int __init
ip6_tables_init(void)
2260 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2264 /* Noone else will be downing sem now, so we won't sleep */
2265 ret
= xt_register_target(&ip6t_standard_target
);
2268 ret
= xt_register_target(&ip6t_error_target
);
2271 ret
= xt_register_match(&icmp6_matchstruct
);
2275 /* Register setsockopt */
2276 ret
= nf_register_sockopt(&ip6t_sockopts
);
2280 printk(KERN_INFO
"ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2284 xt_unregister_match(&icmp6_matchstruct
);
2286 xt_unregister_target(&ip6t_error_target
);
2288 xt_unregister_target(&ip6t_standard_target
);
2290 unregister_pernet_subsys(&ip6_tables_net_ops
);
2295 static void __exit
ip6_tables_fini(void)
2297 nf_unregister_sockopt(&ip6t_sockopts
);
2299 xt_unregister_match(&icmp6_matchstruct
);
2300 xt_unregister_target(&ip6t_error_target
);
2301 xt_unregister_target(&ip6t_standard_target
);
2303 unregister_pernet_subsys(&ip6_tables_net_ops
);
2307 * find the offset to specified header or the protocol number of last header
2308 * if target < 0. "last header" is transport protocol header, ESP, or
2311 * If target header is found, its offset is set in *offset and return protocol
2312 * number. Otherwise, return -1.
2314 * If the first fragment doesn't contain the final protocol header or
2315 * NEXTHDR_NONE it is considered invalid.
2317 * Note that non-1st fragment is special case that "the protocol number
2318 * of last header" is "next header" field in Fragment header. In this case,
2319 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2323 int ipv6_find_hdr(const struct sk_buff
*skb
, unsigned int *offset
,
2324 int target
, unsigned short *fragoff
)
2326 unsigned int start
= skb_network_offset(skb
) + sizeof(struct ipv6hdr
);
2327 u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
2328 unsigned int len
= skb
->len
- start
;
2333 while (nexthdr
!= target
) {
2334 struct ipv6_opt_hdr _hdr
, *hp
;
2335 unsigned int hdrlen
;
2337 if ((!ipv6_ext_hdr(nexthdr
)) || nexthdr
== NEXTHDR_NONE
) {
2343 hp
= skb_header_pointer(skb
, start
, sizeof(_hdr
), &_hdr
);
2346 if (nexthdr
== NEXTHDR_FRAGMENT
) {
2347 unsigned short _frag_off
;
2349 fp
= skb_header_pointer(skb
,
2350 start
+offsetof(struct frag_hdr
,
2357 _frag_off
= ntohs(*fp
) & ~0x7;
2360 ((!ipv6_ext_hdr(hp
->nexthdr
)) ||
2361 hp
->nexthdr
== NEXTHDR_NONE
)) {
2363 *fragoff
= _frag_off
;
2369 } else if (nexthdr
== NEXTHDR_AUTH
)
2370 hdrlen
= (hp
->hdrlen
+ 2) << 2;
2372 hdrlen
= ipv6_optlen(hp
);
2374 nexthdr
= hp
->nexthdr
;
2383 EXPORT_SYMBOL(ip6t_register_table
);
2384 EXPORT_SYMBOL(ip6t_unregister_table
);
2385 EXPORT_SYMBOL(ip6t_do_table
);
2386 EXPORT_SYMBOL(ip6t_ext_hdr
);
2387 EXPORT_SYMBOL(ipv6_find_hdr
);
2389 module_init(ip6_tables_init
);
2390 module_exit(ip6_tables_fini
);