2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h>
13 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/icmp.h>
21 #include <net/compat.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/err.h>
26 #include <linux/cpumask.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h>
31 #include "../../netfilter/xt_repldata.h"
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv4 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) pr_info(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) pr_info(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) WARN_ON(!(x))
56 #define IP_NF_ASSERT(x)
60 /* All the better to debug you with... */
65 void *ipt_alloc_initial_table(const struct xt_table
*info
)
67 return xt_alloc_initial_table(ipt
, IPT
);
69 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table
);
72 We keep a set of rules for each CPU, so we can avoid write-locking
73 them in the softirq when updating the counters and therefore
74 only need to read-lock in the softirq; doing a write_lock_bh() in user
75 context stops packets coming through and allows user context to read
76 the counters or update the rules.
78 Hence the start of any table is given by get_table() below. */
80 /* Returns whether matches rule or not. */
81 /* Performance critical - called for every packet */
83 ip_packet_match(const struct iphdr
*ip
,
86 const struct ipt_ip
*ipinfo
,
91 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
93 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
95 FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
97 dprintf("Source or dest mismatch.\n");
99 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
100 &ip
->saddr
, &ipinfo
->smsk
.s_addr
, &ipinfo
->src
.s_addr
,
101 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
102 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
103 &ip
->daddr
, &ipinfo
->dmsk
.s_addr
, &ipinfo
->dst
.s_addr
,
104 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
108 ret
= ifname_compare_aligned(indev
, ipinfo
->iniface
, ipinfo
->iniface_mask
);
110 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
111 dprintf("VIA in mismatch (%s vs %s).%s\n",
112 indev
, ipinfo
->iniface
,
113 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
117 ret
= ifname_compare_aligned(outdev
, ipinfo
->outiface
, ipinfo
->outiface_mask
);
119 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
120 dprintf("VIA out mismatch (%s vs %s).%s\n",
121 outdev
, ipinfo
->outiface
,
122 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
126 /* Check specific protocol */
128 FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
129 dprintf("Packet protocol %hi does not match %hi.%s\n",
130 ip
->protocol
, ipinfo
->proto
,
131 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
135 /* If we have a fragment rule but the packet is not a fragment
136 * then we return zero */
137 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
138 dprintf("Fragment rule but not fragment.%s\n",
139 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
147 ip_checkentry(const struct ipt_ip
*ip
)
149 if (ip
->flags
& ~IPT_F_MASK
) {
150 duprintf("Unknown flag bits set: %08X\n",
151 ip
->flags
& ~IPT_F_MASK
);
154 if (ip
->invflags
& ~IPT_INV_MASK
) {
155 duprintf("Unknown invflag bits set: %08X\n",
156 ip
->invflags
& ~IPT_INV_MASK
);
163 ipt_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
166 pr_info("error: `%s'\n", (const char *)par
->targinfo
);
171 /* Performance critical */
172 static inline struct ipt_entry
*
173 get_entry(const void *base
, unsigned int offset
)
175 return (struct ipt_entry
*)(base
+ offset
);
178 /* All zeroes == unconditional rule. */
179 /* Mildly perf critical (only if packet tracing is on) */
180 static inline bool unconditional(const struct ipt_ip
*ip
)
182 static const struct ipt_ip uncond
;
184 return memcmp(ip
, &uncond
, sizeof(uncond
)) == 0;
188 /* for const-correctness */
189 static inline const struct xt_entry_target
*
190 ipt_get_target_c(const struct ipt_entry
*e
)
192 return ipt_get_target((struct ipt_entry
*)e
);
195 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
196 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
197 static const char *const hooknames
[] = {
198 [NF_INET_PRE_ROUTING
] = "PREROUTING",
199 [NF_INET_LOCAL_IN
] = "INPUT",
200 [NF_INET_FORWARD
] = "FORWARD",
201 [NF_INET_LOCAL_OUT
] = "OUTPUT",
202 [NF_INET_POST_ROUTING
] = "POSTROUTING",
205 enum nf_ip_trace_comments
{
206 NF_IP_TRACE_COMMENT_RULE
,
207 NF_IP_TRACE_COMMENT_RETURN
,
208 NF_IP_TRACE_COMMENT_POLICY
,
211 static const char *const comments
[] = {
212 [NF_IP_TRACE_COMMENT_RULE
] = "rule",
213 [NF_IP_TRACE_COMMENT_RETURN
] = "return",
214 [NF_IP_TRACE_COMMENT_POLICY
] = "policy",
217 static struct nf_loginfo trace_loginfo
= {
218 .type
= NF_LOG_TYPE_LOG
,
222 .logflags
= NF_LOG_MASK
,
227 /* Mildly perf critical (only if packet tracing is on) */
229 get_chainname_rulenum(const struct ipt_entry
*s
, const struct ipt_entry
*e
,
230 const char *hookname
, const char **chainname
,
231 const char **comment
, unsigned int *rulenum
)
233 const struct xt_standard_target
*t
= (void *)ipt_get_target_c(s
);
235 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
236 /* Head of user chain: ERROR target with chainname */
237 *chainname
= t
->target
.data
;
242 if (s
->target_offset
== sizeof(struct ipt_entry
) &&
243 strcmp(t
->target
.u
.kernel
.target
->name
,
244 XT_STANDARD_TARGET
) == 0 &&
246 unconditional(&s
->ip
)) {
247 /* Tail of chains: STANDARD target (return/policy) */
248 *comment
= *chainname
== hookname
249 ? comments
[NF_IP_TRACE_COMMENT_POLICY
]
250 : comments
[NF_IP_TRACE_COMMENT_RETURN
];
259 static void trace_packet(const struct sk_buff
*skb
,
261 const struct net_device
*in
,
262 const struct net_device
*out
,
263 const char *tablename
,
264 const struct xt_table_info
*private,
265 const struct ipt_entry
*e
)
267 const void *table_base
;
268 const struct ipt_entry
*root
;
269 const char *hookname
, *chainname
, *comment
;
270 const struct ipt_entry
*iter
;
271 unsigned int rulenum
= 0;
273 table_base
= private->entries
[smp_processor_id()];
274 root
= get_entry(table_base
, private->hook_entry
[hook
]);
276 hookname
= chainname
= hooknames
[hook
];
277 comment
= comments
[NF_IP_TRACE_COMMENT_RULE
];
279 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
280 if (get_chainname_rulenum(iter
, e
, hookname
,
281 &chainname
, &comment
, &rulenum
) != 0)
284 nf_log_packet(AF_INET
, hook
, skb
, in
, out
, &trace_loginfo
,
285 "TRACE: %s:%s:%s:%u ",
286 tablename
, chainname
, comment
, rulenum
);
291 struct ipt_entry
*ipt_next_entry(const struct ipt_entry
*entry
)
293 return (void *)entry
+ entry
->next_offset
;
296 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
298 ipt_do_table(struct sk_buff
*skb
,
300 const struct net_device
*in
,
301 const struct net_device
*out
,
302 struct xt_table
*table
)
304 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
305 const struct iphdr
*ip
;
306 /* Initializing verdict to NF_DROP keeps gcc happy. */
307 unsigned int verdict
= NF_DROP
;
308 const char *indev
, *outdev
;
309 const void *table_base
;
310 struct ipt_entry
*e
, **jumpstack
;
311 unsigned int *stackptr
, origptr
, cpu
;
312 const struct xt_table_info
*private;
313 struct xt_action_param acpar
;
317 indev
= in
? in
->name
: nulldevname
;
318 outdev
= out
? out
->name
: nulldevname
;
319 /* We handle fragments by dealing with the first fragment as
320 * if it was a normal packet. All other fragments are treated
321 * normally, except that they will NEVER match rules that ask
322 * things we don't know, ie. tcp syn flag or ports). If the
323 * rule is also a fragment-specific rule, non-fragments won't
325 acpar
.fragoff
= ntohs(ip
->frag_off
) & IP_OFFSET
;
326 acpar
.thoff
= ip_hdrlen(skb
);
327 acpar
.hotdrop
= false;
330 acpar
.family
= NFPROTO_IPV4
;
331 acpar
.hooknum
= hook
;
333 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
335 private = table
->private;
336 cpu
= smp_processor_id();
337 table_base
= private->entries
[cpu
];
338 jumpstack
= (struct ipt_entry
**)private->jumpstack
[cpu
];
339 stackptr
= per_cpu_ptr(private->stackptr
, cpu
);
342 e
= get_entry(table_base
, private->hook_entry
[hook
]);
344 pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
345 table
->name
, hook
, origptr
,
346 get_entry(table_base
, private->underflow
[hook
]));
349 const struct xt_entry_target
*t
;
350 const struct xt_entry_match
*ematch
;
353 if (!ip_packet_match(ip
, indev
, outdev
,
354 &e
->ip
, acpar
.fragoff
)) {
356 e
= ipt_next_entry(e
);
360 xt_ematch_foreach(ematch
, e
) {
361 acpar
.match
= ematch
->u
.kernel
.match
;
362 acpar
.matchinfo
= ematch
->data
;
363 if (!acpar
.match
->match(skb
, &acpar
))
367 ADD_COUNTER(e
->counters
, skb
->len
, 1);
369 t
= ipt_get_target(e
);
370 IP_NF_ASSERT(t
->u
.kernel
.target
);
372 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
373 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
374 /* The packet is traced: log it */
375 if (unlikely(skb
->nf_trace
))
376 trace_packet(skb
, hook
, in
, out
,
377 table
->name
, private, e
);
379 /* Standard target? */
380 if (!t
->u
.kernel
.target
->target
) {
383 v
= ((struct xt_standard_target
*)t
)->verdict
;
385 /* Pop from stack? */
386 if (v
!= XT_RETURN
) {
387 verdict
= (unsigned)(-v
) - 1;
390 if (*stackptr
<= origptr
) {
391 e
= get_entry(table_base
,
392 private->underflow
[hook
]);
393 pr_debug("Underflow (this is normal) "
396 e
= jumpstack
[--*stackptr
];
397 pr_debug("Pulled %p out from pos %u\n",
399 e
= ipt_next_entry(e
);
403 if (table_base
+ v
!= ipt_next_entry(e
) &&
404 !(e
->ip
.flags
& IPT_F_GOTO
)) {
405 if (*stackptr
>= private->stacksize
) {
409 jumpstack
[(*stackptr
)++] = e
;
410 pr_debug("Pushed %p into pos %u\n",
414 e
= get_entry(table_base
, v
);
418 acpar
.target
= t
->u
.kernel
.target
;
419 acpar
.targinfo
= t
->data
;
421 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
422 /* Target might have changed stuff. */
424 if (verdict
== XT_CONTINUE
)
425 e
= ipt_next_entry(e
);
429 } while (!acpar
.hotdrop
);
430 pr_debug("Exiting %s; resetting sp from %u to %u\n",
431 __func__
, *stackptr
, origptr
);
433 xt_info_rdunlock_bh();
434 #ifdef DEBUG_ALLOW_ALL
443 /* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */
446 mark_source_chains(const struct xt_table_info
*newinfo
,
447 unsigned int valid_hooks
, void *entry0
)
451 /* No recursion; use packet counter to save back ptrs (reset
452 to 0 as we leave), and comefrom to save source hook bitmask */
453 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
454 unsigned int pos
= newinfo
->hook_entry
[hook
];
455 struct ipt_entry
*e
= (struct ipt_entry
*)(entry0
+ pos
);
457 if (!(valid_hooks
& (1 << hook
)))
460 /* Set initial back pointer. */
461 e
->counters
.pcnt
= pos
;
464 const struct xt_standard_target
*t
465 = (void *)ipt_get_target_c(e
);
466 int visited
= e
->comefrom
& (1 << hook
);
468 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
469 pr_err("iptables: loop hook %u pos %u %08X.\n",
470 hook
, pos
, e
->comefrom
);
473 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
475 /* Unconditional return/END. */
476 if ((e
->target_offset
== sizeof(struct ipt_entry
) &&
477 (strcmp(t
->target
.u
.user
.name
,
478 XT_STANDARD_TARGET
) == 0) &&
479 t
->verdict
< 0 && unconditional(&e
->ip
)) ||
481 unsigned int oldpos
, size
;
483 if ((strcmp(t
->target
.u
.user
.name
,
484 XT_STANDARD_TARGET
) == 0) &&
485 t
->verdict
< -NF_MAX_VERDICT
- 1) {
486 duprintf("mark_source_chains: bad "
487 "negative verdict (%i)\n",
492 /* Return: backtrack through the last
495 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
496 #ifdef DEBUG_IP_FIREWALL_USER
498 & (1 << NF_INET_NUMHOOKS
)) {
499 duprintf("Back unset "
506 pos
= e
->counters
.pcnt
;
507 e
->counters
.pcnt
= 0;
509 /* We're at the start. */
513 e
= (struct ipt_entry
*)
515 } while (oldpos
== pos
+ e
->next_offset
);
518 size
= e
->next_offset
;
519 e
= (struct ipt_entry
*)
520 (entry0
+ pos
+ size
);
521 e
->counters
.pcnt
= pos
;
524 int newpos
= t
->verdict
;
526 if (strcmp(t
->target
.u
.user
.name
,
527 XT_STANDARD_TARGET
) == 0 &&
529 if (newpos
> newinfo
->size
-
530 sizeof(struct ipt_entry
)) {
531 duprintf("mark_source_chains: "
532 "bad verdict (%i)\n",
536 /* This a jump; chase it. */
537 duprintf("Jump rule %u -> %u\n",
540 /* ... this is a fallthru */
541 newpos
= pos
+ e
->next_offset
;
543 e
= (struct ipt_entry
*)
545 e
->counters
.pcnt
= pos
;
550 duprintf("Finished chain %u\n", hook
);
555 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
557 struct xt_mtdtor_param par
;
560 par
.match
= m
->u
.kernel
.match
;
561 par
.matchinfo
= m
->data
;
562 par
.family
= NFPROTO_IPV4
;
563 if (par
.match
->destroy
!= NULL
)
564 par
.match
->destroy(&par
);
565 module_put(par
.match
->me
);
569 check_entry(const struct ipt_entry
*e
, const char *name
)
571 const struct xt_entry_target
*t
;
573 if (!ip_checkentry(&e
->ip
)) {
574 duprintf("ip check failed %p %s.\n", e
, par
->match
->name
);
578 if (e
->target_offset
+ sizeof(struct xt_entry_target
) >
582 t
= ipt_get_target_c(e
);
583 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
590 check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
592 const struct ipt_ip
*ip
= par
->entryinfo
;
595 par
->match
= m
->u
.kernel
.match
;
596 par
->matchinfo
= m
->data
;
598 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
599 ip
->proto
, ip
->invflags
& IPT_INV_PROTO
);
601 duprintf("check failed for `%s'.\n", par
->match
->name
);
608 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
610 struct xt_match
*match
;
613 match
= xt_request_find_match(NFPROTO_IPV4
, m
->u
.user
.name
,
616 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
617 return PTR_ERR(match
);
619 m
->u
.kernel
.match
= match
;
621 ret
= check_match(m
, par
);
627 module_put(m
->u
.kernel
.match
->me
);
631 static int check_target(struct ipt_entry
*e
, struct net
*net
, const char *name
)
633 struct xt_entry_target
*t
= ipt_get_target(e
);
634 struct xt_tgchk_param par
= {
638 .target
= t
->u
.kernel
.target
,
640 .hook_mask
= e
->comefrom
,
641 .family
= NFPROTO_IPV4
,
645 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
646 e
->ip
.proto
, e
->ip
.invflags
& IPT_INV_PROTO
);
648 duprintf("check failed for `%s'.\n",
649 t
->u
.kernel
.target
->name
);
656 find_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
,
659 struct xt_entry_target
*t
;
660 struct xt_target
*target
;
663 struct xt_mtchk_param mtpar
;
664 struct xt_entry_match
*ematch
;
666 ret
= check_entry(e
, name
);
673 mtpar
.entryinfo
= &e
->ip
;
674 mtpar
.hook_mask
= e
->comefrom
;
675 mtpar
.family
= NFPROTO_IPV4
;
676 xt_ematch_foreach(ematch
, e
) {
677 ret
= find_check_match(ematch
, &mtpar
);
679 goto cleanup_matches
;
683 t
= ipt_get_target(e
);
684 target
= xt_request_find_target(NFPROTO_IPV4
, t
->u
.user
.name
,
686 if (IS_ERR(target
)) {
687 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
688 ret
= PTR_ERR(target
);
689 goto cleanup_matches
;
691 t
->u
.kernel
.target
= target
;
693 ret
= check_target(e
, net
, name
);
698 module_put(t
->u
.kernel
.target
->me
);
700 xt_ematch_foreach(ematch
, e
) {
703 cleanup_match(ematch
, net
);
708 static bool check_underflow(const struct ipt_entry
*e
)
710 const struct xt_entry_target
*t
;
711 unsigned int verdict
;
713 if (!unconditional(&e
->ip
))
715 t
= ipt_get_target_c(e
);
716 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
718 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
719 verdict
= -verdict
- 1;
720 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
724 check_entry_size_and_hooks(struct ipt_entry
*e
,
725 struct xt_table_info
*newinfo
,
726 const unsigned char *base
,
727 const unsigned char *limit
,
728 const unsigned int *hook_entries
,
729 const unsigned int *underflows
,
730 unsigned int valid_hooks
)
734 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0 ||
735 (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
736 duprintf("Bad offset %p\n", e
);
741 < sizeof(struct ipt_entry
) + sizeof(struct xt_entry_target
)) {
742 duprintf("checking: element %p size %u\n",
747 /* Check hooks & underflows */
748 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
749 if (!(valid_hooks
& (1 << h
)))
751 if ((unsigned char *)e
- base
== hook_entries
[h
])
752 newinfo
->hook_entry
[h
] = hook_entries
[h
];
753 if ((unsigned char *)e
- base
== underflows
[h
]) {
754 if (!check_underflow(e
)) {
755 pr_err("Underflows must be unconditional and "
756 "use the STANDARD target with "
760 newinfo
->underflow
[h
] = underflows
[h
];
764 /* Clear counters and comefrom */
765 e
->counters
= ((struct xt_counters
) { 0, 0 });
771 cleanup_entry(struct ipt_entry
*e
, struct net
*net
)
773 struct xt_tgdtor_param par
;
774 struct xt_entry_target
*t
;
775 struct xt_entry_match
*ematch
;
777 /* Cleanup all matches */
778 xt_ematch_foreach(ematch
, e
)
779 cleanup_match(ematch
, net
);
780 t
= ipt_get_target(e
);
783 par
.target
= t
->u
.kernel
.target
;
784 par
.targinfo
= t
->data
;
785 par
.family
= NFPROTO_IPV4
;
786 if (par
.target
->destroy
!= NULL
)
787 par
.target
->destroy(&par
);
788 module_put(par
.target
->me
);
791 /* Checks and translates the user-supplied table segment (held in
794 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
795 const struct ipt_replace
*repl
)
797 struct ipt_entry
*iter
;
801 newinfo
->size
= repl
->size
;
802 newinfo
->number
= repl
->num_entries
;
804 /* Init all hooks to impossible value. */
805 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
806 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
807 newinfo
->underflow
[i
] = 0xFFFFFFFF;
810 duprintf("translate_table: size %u\n", newinfo
->size
);
812 /* Walk through entries, checking offsets. */
813 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
814 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
822 if (strcmp(ipt_get_target(iter
)->u
.user
.name
,
823 XT_ERROR_TARGET
) == 0)
824 ++newinfo
->stacksize
;
827 if (i
!= repl
->num_entries
) {
828 duprintf("translate_table: %u not %u entries\n",
829 i
, repl
->num_entries
);
833 /* Check hooks all assigned */
834 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
835 /* Only hooks which are valid */
836 if (!(repl
->valid_hooks
& (1 << i
)))
838 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
839 duprintf("Invalid hook entry %u %u\n",
840 i
, repl
->hook_entry
[i
]);
843 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
844 duprintf("Invalid underflow %u %u\n",
845 i
, repl
->underflow
[i
]);
850 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
853 /* Finally, each sanity check must pass */
855 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
856 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
863 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
866 cleanup_entry(iter
, net
);
871 /* And one copy for every other CPU */
872 for_each_possible_cpu(i
) {
873 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
874 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
881 get_counters(const struct xt_table_info
*t
,
882 struct xt_counters counters
[])
884 struct ipt_entry
*iter
;
888 for_each_possible_cpu(cpu
) {
889 seqlock_t
*lock
= &per_cpu(xt_info_locks
, cpu
).lock
;
892 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
897 start
= read_seqbegin(lock
);
898 bcnt
= iter
->counters
.bcnt
;
899 pcnt
= iter
->counters
.pcnt
;
900 } while (read_seqretry(lock
, start
));
902 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
903 ++i
; /* macro does multi eval of i */
908 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
910 unsigned int countersize
;
911 struct xt_counters
*counters
;
912 const struct xt_table_info
*private = table
->private;
914 /* We need atomic snapshot of counters: rest doesn't change
915 (other than comefrom, which userspace doesn't care
917 countersize
= sizeof(struct xt_counters
) * private->number
;
918 counters
= vzalloc(countersize
);
920 if (counters
== NULL
)
921 return ERR_PTR(-ENOMEM
);
923 get_counters(private, counters
);
929 copy_entries_to_user(unsigned int total_size
,
930 const struct xt_table
*table
,
931 void __user
*userptr
)
933 unsigned int off
, num
;
934 const struct ipt_entry
*e
;
935 struct xt_counters
*counters
;
936 const struct xt_table_info
*private = table
->private;
938 const void *loc_cpu_entry
;
940 counters
= alloc_counters(table
);
941 if (IS_ERR(counters
))
942 return PTR_ERR(counters
);
944 /* choose the copy that is on our node/cpu, ...
945 * This choice is lazy (because current thread is
946 * allowed to migrate to another cpu)
948 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
949 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
954 /* FIXME: use iterator macros --RR */
955 /* ... then go back and fix counters and names */
956 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
958 const struct xt_entry_match
*m
;
959 const struct xt_entry_target
*t
;
961 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
962 if (copy_to_user(userptr
+ off
963 + offsetof(struct ipt_entry
, counters
),
965 sizeof(counters
[num
])) != 0) {
970 for (i
= sizeof(struct ipt_entry
);
971 i
< e
->target_offset
;
972 i
+= m
->u
.match_size
) {
975 if (copy_to_user(userptr
+ off
+ i
976 + offsetof(struct xt_entry_match
,
978 m
->u
.kernel
.match
->name
,
979 strlen(m
->u
.kernel
.match
->name
)+1)
986 t
= ipt_get_target_c(e
);
987 if (copy_to_user(userptr
+ off
+ e
->target_offset
988 + offsetof(struct xt_entry_target
,
990 t
->u
.kernel
.target
->name
,
991 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1002 #ifdef CONFIG_COMPAT
1003 static void compat_standard_from_user(void *dst
, const void *src
)
1005 int v
= *(compat_int_t
*)src
;
1008 v
+= xt_compat_calc_jump(AF_INET
, v
);
1009 memcpy(dst
, &v
, sizeof(v
));
1012 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1014 compat_int_t cv
= *(int *)src
;
1017 cv
-= xt_compat_calc_jump(AF_INET
, cv
);
1018 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1021 static int compat_calc_entry(const struct ipt_entry
*e
,
1022 const struct xt_table_info
*info
,
1023 const void *base
, struct xt_table_info
*newinfo
)
1025 const struct xt_entry_match
*ematch
;
1026 const struct xt_entry_target
*t
;
1027 unsigned int entry_offset
;
1030 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1031 entry_offset
= (void *)e
- base
;
1032 xt_ematch_foreach(ematch
, e
)
1033 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1034 t
= ipt_get_target_c(e
);
1035 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1036 newinfo
->size
-= off
;
1037 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1041 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1042 if (info
->hook_entry
[i
] &&
1043 (e
< (struct ipt_entry
*)(base
+ info
->hook_entry
[i
])))
1044 newinfo
->hook_entry
[i
] -= off
;
1045 if (info
->underflow
[i
] &&
1046 (e
< (struct ipt_entry
*)(base
+ info
->underflow
[i
])))
1047 newinfo
->underflow
[i
] -= off
;
1052 static int compat_table_info(const struct xt_table_info
*info
,
1053 struct xt_table_info
*newinfo
)
1055 struct ipt_entry
*iter
;
1056 void *loc_cpu_entry
;
1059 if (!newinfo
|| !info
)
1062 /* we dont care about newinfo->entries[] */
1063 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1064 newinfo
->initial_entries
= 0;
1065 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1066 xt_compat_init_offsets(AF_INET
, info
->number
);
1067 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1068 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1076 static int get_info(struct net
*net
, void __user
*user
,
1077 const int *len
, int compat
)
1079 char name
[XT_TABLE_MAXNAMELEN
];
1083 if (*len
!= sizeof(struct ipt_getinfo
)) {
1084 duprintf("length %u != %zu\n", *len
,
1085 sizeof(struct ipt_getinfo
));
1089 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1092 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
1093 #ifdef CONFIG_COMPAT
1095 xt_compat_lock(AF_INET
);
1097 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1098 "iptable_%s", name
);
1099 if (t
&& !IS_ERR(t
)) {
1100 struct ipt_getinfo info
;
1101 const struct xt_table_info
*private = t
->private;
1102 #ifdef CONFIG_COMPAT
1103 struct xt_table_info tmp
;
1106 ret
= compat_table_info(private, &tmp
);
1107 xt_compat_flush_offsets(AF_INET
);
1111 memset(&info
, 0, sizeof(info
));
1112 info
.valid_hooks
= t
->valid_hooks
;
1113 memcpy(info
.hook_entry
, private->hook_entry
,
1114 sizeof(info
.hook_entry
));
1115 memcpy(info
.underflow
, private->underflow
,
1116 sizeof(info
.underflow
));
1117 info
.num_entries
= private->number
;
1118 info
.size
= private->size
;
1119 strcpy(info
.name
, name
);
1121 if (copy_to_user(user
, &info
, *len
) != 0)
1129 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1130 #ifdef CONFIG_COMPAT
1132 xt_compat_unlock(AF_INET
);
1138 get_entries(struct net
*net
, struct ipt_get_entries __user
*uptr
,
1142 struct ipt_get_entries get
;
1145 if (*len
< sizeof(get
)) {
1146 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1149 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1151 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1152 duprintf("get_entries: %u != %zu\n",
1153 *len
, sizeof(get
) + get
.size
);
1157 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1158 if (t
&& !IS_ERR(t
)) {
1159 const struct xt_table_info
*private = t
->private;
1160 duprintf("t->private->number = %u\n", private->number
);
1161 if (get
.size
== private->size
)
1162 ret
= copy_entries_to_user(private->size
,
1163 t
, uptr
->entrytable
);
1165 duprintf("get_entries: I've got %u not %u!\n",
1166 private->size
, get
.size
);
1172 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1178 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1179 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1180 void __user
*counters_ptr
)
1184 struct xt_table_info
*oldinfo
;
1185 struct xt_counters
*counters
;
1186 void *loc_cpu_old_entry
;
1187 struct ipt_entry
*iter
;
1190 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1196 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1197 "iptable_%s", name
);
1198 if (!t
|| IS_ERR(t
)) {
1199 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1200 goto free_newinfo_counters_untrans
;
1204 if (valid_hooks
!= t
->valid_hooks
) {
1205 duprintf("Valid hook crap: %08X vs %08X\n",
1206 valid_hooks
, t
->valid_hooks
);
1211 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1215 /* Update module usage count based on number of rules */
1216 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1217 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1218 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1219 (newinfo
->number
<= oldinfo
->initial_entries
))
1221 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1222 (newinfo
->number
<= oldinfo
->initial_entries
))
1225 /* Get the old counters, and synchronize with replace */
1226 get_counters(oldinfo
, counters
);
1228 /* Decrease module usage counts and free resource */
1229 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1230 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1231 cleanup_entry(iter
, net
);
1233 xt_free_table_info(oldinfo
);
1234 if (copy_to_user(counters_ptr
, counters
,
1235 sizeof(struct xt_counters
) * num_counters
) != 0)
1244 free_newinfo_counters_untrans
:
1251 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1254 struct ipt_replace tmp
;
1255 struct xt_table_info
*newinfo
;
1256 void *loc_cpu_entry
;
1257 struct ipt_entry
*iter
;
1259 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1262 /* overflow check */
1263 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1265 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1267 newinfo
= xt_alloc_table_info(tmp
.size
);
1271 /* choose the copy that is on our node/cpu */
1272 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1273 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1279 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1283 duprintf("Translated table\n");
1285 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1286 tmp
.num_counters
, tmp
.counters
);
1288 goto free_newinfo_untrans
;
1291 free_newinfo_untrans
:
1292 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1293 cleanup_entry(iter
, net
);
1295 xt_free_table_info(newinfo
);
1300 do_add_counters(struct net
*net
, const void __user
*user
,
1301 unsigned int len
, int compat
)
1303 unsigned int i
, curcpu
;
1304 struct xt_counters_info tmp
;
1305 struct xt_counters
*paddc
;
1306 unsigned int num_counters
;
1311 const struct xt_table_info
*private;
1313 void *loc_cpu_entry
;
1314 struct ipt_entry
*iter
;
1315 #ifdef CONFIG_COMPAT
1316 struct compat_xt_counters_info compat_tmp
;
1320 size
= sizeof(struct compat_xt_counters_info
);
1325 size
= sizeof(struct xt_counters_info
);
1328 if (copy_from_user(ptmp
, user
, size
) != 0)
1331 #ifdef CONFIG_COMPAT
1333 num_counters
= compat_tmp
.num_counters
;
1334 name
= compat_tmp
.name
;
1338 num_counters
= tmp
.num_counters
;
1342 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1345 paddc
= vmalloc(len
- size
);
1349 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1354 t
= xt_find_table_lock(net
, AF_INET
, name
);
1355 if (!t
|| IS_ERR(t
)) {
1356 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1361 private = t
->private;
1362 if (private->number
!= num_counters
) {
1364 goto unlock_up_free
;
1368 /* Choose the copy that is on our node */
1369 curcpu
= smp_processor_id();
1370 loc_cpu_entry
= private->entries
[curcpu
];
1371 xt_info_wrlock(curcpu
);
1372 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1373 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1376 xt_info_wrunlock(curcpu
);
1387 #ifdef CONFIG_COMPAT
1388 struct compat_ipt_replace
{
1389 char name
[XT_TABLE_MAXNAMELEN
];
1393 u32 hook_entry
[NF_INET_NUMHOOKS
];
1394 u32 underflow
[NF_INET_NUMHOOKS
];
1396 compat_uptr_t counters
; /* struct xt_counters * */
1397 struct compat_ipt_entry entries
[0];
1401 compat_copy_entry_to_user(struct ipt_entry
*e
, void __user
**dstptr
,
1402 unsigned int *size
, struct xt_counters
*counters
,
1405 struct xt_entry_target
*t
;
1406 struct compat_ipt_entry __user
*ce
;
1407 u_int16_t target_offset
, next_offset
;
1408 compat_uint_t origsize
;
1409 const struct xt_entry_match
*ematch
;
1413 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1414 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)) != 0 ||
1415 copy_to_user(&ce
->counters
, &counters
[i
],
1416 sizeof(counters
[i
])) != 0)
1419 *dstptr
+= sizeof(struct compat_ipt_entry
);
1420 *size
-= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1422 xt_ematch_foreach(ematch
, e
) {
1423 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1427 target_offset
= e
->target_offset
- (origsize
- *size
);
1428 t
= ipt_get_target(e
);
1429 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1432 next_offset
= e
->next_offset
- (origsize
- *size
);
1433 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1434 put_user(next_offset
, &ce
->next_offset
) != 0)
1440 compat_find_calc_match(struct xt_entry_match
*m
,
1442 const struct ipt_ip
*ip
,
1443 unsigned int hookmask
,
1446 struct xt_match
*match
;
1448 match
= xt_request_find_match(NFPROTO_IPV4
, m
->u
.user
.name
,
1449 m
->u
.user
.revision
);
1450 if (IS_ERR(match
)) {
1451 duprintf("compat_check_calc_match: `%s' not found\n",
1453 return PTR_ERR(match
);
1455 m
->u
.kernel
.match
= match
;
1456 *size
+= xt_compat_match_offset(match
);
1460 static void compat_release_entry(struct compat_ipt_entry
*e
)
1462 struct xt_entry_target
*t
;
1463 struct xt_entry_match
*ematch
;
1465 /* Cleanup all matches */
1466 xt_ematch_foreach(ematch
, e
)
1467 module_put(ematch
->u
.kernel
.match
->me
);
1468 t
= compat_ipt_get_target(e
);
1469 module_put(t
->u
.kernel
.target
->me
);
1473 check_compat_entry_size_and_hooks(struct compat_ipt_entry
*e
,
1474 struct xt_table_info
*newinfo
,
1476 const unsigned char *base
,
1477 const unsigned char *limit
,
1478 const unsigned int *hook_entries
,
1479 const unsigned int *underflows
,
1482 struct xt_entry_match
*ematch
;
1483 struct xt_entry_target
*t
;
1484 struct xt_target
*target
;
1485 unsigned int entry_offset
;
1489 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1490 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0 ||
1491 (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1492 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1496 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1497 sizeof(struct compat_xt_entry_target
)) {
1498 duprintf("checking: element %p size %u\n",
1503 /* For purposes of check_entry casting the compat entry is fine */
1504 ret
= check_entry((struct ipt_entry
*)e
, name
);
1508 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1509 entry_offset
= (void *)e
- (void *)base
;
1511 xt_ematch_foreach(ematch
, e
) {
1512 ret
= compat_find_calc_match(ematch
, name
,
1513 &e
->ip
, e
->comefrom
, &off
);
1515 goto release_matches
;
1519 t
= compat_ipt_get_target(e
);
1520 target
= xt_request_find_target(NFPROTO_IPV4
, t
->u
.user
.name
,
1521 t
->u
.user
.revision
);
1522 if (IS_ERR(target
)) {
1523 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1525 ret
= PTR_ERR(target
);
1526 goto release_matches
;
1528 t
->u
.kernel
.target
= target
;
1530 off
+= xt_compat_target_offset(target
);
1532 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1536 /* Check hooks & underflows */
1537 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1538 if ((unsigned char *)e
- base
== hook_entries
[h
])
1539 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1540 if ((unsigned char *)e
- base
== underflows
[h
])
1541 newinfo
->underflow
[h
] = underflows
[h
];
1544 /* Clear counters and comefrom */
1545 memset(&e
->counters
, 0, sizeof(e
->counters
));
1550 module_put(t
->u
.kernel
.target
->me
);
1552 xt_ematch_foreach(ematch
, e
) {
1555 module_put(ematch
->u
.kernel
.match
->me
);
1561 compat_copy_entry_from_user(struct compat_ipt_entry
*e
, void **dstptr
,
1562 unsigned int *size
, const char *name
,
1563 struct xt_table_info
*newinfo
, unsigned char *base
)
1565 struct xt_entry_target
*t
;
1566 struct xt_target
*target
;
1567 struct ipt_entry
*de
;
1568 unsigned int origsize
;
1570 struct xt_entry_match
*ematch
;
1574 de
= (struct ipt_entry
*)*dstptr
;
1575 memcpy(de
, e
, sizeof(struct ipt_entry
));
1576 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1578 *dstptr
+= sizeof(struct ipt_entry
);
1579 *size
+= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1581 xt_ematch_foreach(ematch
, e
) {
1582 ret
= xt_compat_match_from_user(ematch
, dstptr
, size
);
1586 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1587 t
= compat_ipt_get_target(e
);
1588 target
= t
->u
.kernel
.target
;
1589 xt_compat_target_from_user(t
, dstptr
, size
);
1591 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1592 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1593 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1594 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1595 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1596 newinfo
->underflow
[h
] -= origsize
- *size
;
1602 compat_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
)
1604 struct xt_entry_match
*ematch
;
1605 struct xt_mtchk_param mtpar
;
1612 mtpar
.entryinfo
= &e
->ip
;
1613 mtpar
.hook_mask
= e
->comefrom
;
1614 mtpar
.family
= NFPROTO_IPV4
;
1615 xt_ematch_foreach(ematch
, e
) {
1616 ret
= check_match(ematch
, &mtpar
);
1618 goto cleanup_matches
;
1622 ret
= check_target(e
, net
, name
);
1624 goto cleanup_matches
;
1628 xt_ematch_foreach(ematch
, e
) {
1631 cleanup_match(ematch
, net
);
1637 translate_compat_table(struct net
*net
,
1639 unsigned int valid_hooks
,
1640 struct xt_table_info
**pinfo
,
1642 unsigned int total_size
,
1643 unsigned int number
,
1644 unsigned int *hook_entries
,
1645 unsigned int *underflows
)
1648 struct xt_table_info
*newinfo
, *info
;
1649 void *pos
, *entry0
, *entry1
;
1650 struct compat_ipt_entry
*iter0
;
1651 struct ipt_entry
*iter1
;
1658 info
->number
= number
;
1660 /* Init all hooks to impossible value. */
1661 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1662 info
->hook_entry
[i
] = 0xFFFFFFFF;
1663 info
->underflow
[i
] = 0xFFFFFFFF;
1666 duprintf("translate_compat_table: size %u\n", info
->size
);
1668 xt_compat_lock(AF_INET
);
1669 xt_compat_init_offsets(AF_INET
, number
);
1670 /* Walk through entries, checking offsets. */
1671 xt_entry_foreach(iter0
, entry0
, total_size
) {
1672 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1674 entry0
+ total_size
,
1685 duprintf("translate_compat_table: %u not %u entries\n",
1690 /* Check hooks all assigned */
1691 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1692 /* Only hooks which are valid */
1693 if (!(valid_hooks
& (1 << i
)))
1695 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1696 duprintf("Invalid hook entry %u %u\n",
1697 i
, hook_entries
[i
]);
1700 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1701 duprintf("Invalid underflow %u %u\n",
1708 newinfo
= xt_alloc_table_info(size
);
1712 newinfo
->number
= number
;
1713 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1714 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1715 newinfo
->underflow
[i
] = info
->underflow
[i
];
1717 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1720 xt_entry_foreach(iter0
, entry0
, total_size
) {
1721 ret
= compat_copy_entry_from_user(iter0
, &pos
, &size
,
1722 name
, newinfo
, entry1
);
1726 xt_compat_flush_offsets(AF_INET
);
1727 xt_compat_unlock(AF_INET
);
1732 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1736 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1737 ret
= compat_check_entry(iter1
, net
, name
);
1741 if (strcmp(ipt_get_target(iter1
)->u
.user
.name
,
1742 XT_ERROR_TARGET
) == 0)
1743 ++newinfo
->stacksize
;
1747 * The first i matches need cleanup_entry (calls ->destroy)
1748 * because they had called ->check already. The other j-i
1749 * entries need only release.
1753 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1758 compat_release_entry(iter0
);
1760 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1763 cleanup_entry(iter1
, net
);
1765 xt_free_table_info(newinfo
);
1769 /* And one copy for every other CPU */
1770 for_each_possible_cpu(i
)
1771 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1772 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1776 xt_free_table_info(info
);
1780 xt_free_table_info(newinfo
);
1782 xt_entry_foreach(iter0
, entry0
, total_size
) {
1785 compat_release_entry(iter0
);
1789 xt_compat_flush_offsets(AF_INET
);
1790 xt_compat_unlock(AF_INET
);
1795 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1798 struct compat_ipt_replace tmp
;
1799 struct xt_table_info
*newinfo
;
1800 void *loc_cpu_entry
;
1801 struct ipt_entry
*iter
;
1803 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1806 /* overflow check */
1807 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1809 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1811 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1813 newinfo
= xt_alloc_table_info(tmp
.size
);
1817 /* choose the copy that is on our node/cpu */
1818 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1819 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1825 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1826 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1827 tmp
.num_entries
, tmp
.hook_entry
,
1832 duprintf("compat_do_replace: Translated table\n");
1834 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1835 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1837 goto free_newinfo_untrans
;
1840 free_newinfo_untrans
:
1841 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1842 cleanup_entry(iter
, net
);
1844 xt_free_table_info(newinfo
);
1849 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1854 if (!capable(CAP_NET_ADMIN
))
1858 case IPT_SO_SET_REPLACE
:
1859 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1862 case IPT_SO_SET_ADD_COUNTERS
:
1863 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1867 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1874 struct compat_ipt_get_entries
{
1875 char name
[XT_TABLE_MAXNAMELEN
];
1877 struct compat_ipt_entry entrytable
[0];
1881 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1882 void __user
*userptr
)
1884 struct xt_counters
*counters
;
1885 const struct xt_table_info
*private = table
->private;
1889 const void *loc_cpu_entry
;
1891 struct ipt_entry
*iter
;
1893 counters
= alloc_counters(table
);
1894 if (IS_ERR(counters
))
1895 return PTR_ERR(counters
);
1897 /* choose the copy that is on our node/cpu, ...
1898 * This choice is lazy (because current thread is
1899 * allowed to migrate to another cpu)
1901 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1904 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1905 ret
= compat_copy_entry_to_user(iter
, &pos
,
1906 &size
, counters
, i
++);
1916 compat_get_entries(struct net
*net
, struct compat_ipt_get_entries __user
*uptr
,
1920 struct compat_ipt_get_entries get
;
1923 if (*len
< sizeof(get
)) {
1924 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1928 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1931 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1932 duprintf("compat_get_entries: %u != %zu\n",
1933 *len
, sizeof(get
) + get
.size
);
1937 xt_compat_lock(AF_INET
);
1938 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1939 if (t
&& !IS_ERR(t
)) {
1940 const struct xt_table_info
*private = t
->private;
1941 struct xt_table_info info
;
1942 duprintf("t->private->number = %u\n", private->number
);
1943 ret
= compat_table_info(private, &info
);
1944 if (!ret
&& get
.size
== info
.size
) {
1945 ret
= compat_copy_entries_to_user(private->size
,
1946 t
, uptr
->entrytable
);
1948 duprintf("compat_get_entries: I've got %u not %u!\n",
1949 private->size
, get
.size
);
1952 xt_compat_flush_offsets(AF_INET
);
1956 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1958 xt_compat_unlock(AF_INET
);
1962 static int do_ipt_get_ctl(struct sock
*, int, void __user
*, int *);
1965 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1969 if (!capable(CAP_NET_ADMIN
))
1973 case IPT_SO_GET_INFO
:
1974 ret
= get_info(sock_net(sk
), user
, len
, 1);
1976 case IPT_SO_GET_ENTRIES
:
1977 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1980 ret
= do_ipt_get_ctl(sk
, cmd
, user
, len
);
1987 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1991 if (!capable(CAP_NET_ADMIN
))
1995 case IPT_SO_SET_REPLACE
:
1996 ret
= do_replace(sock_net(sk
), user
, len
);
1999 case IPT_SO_SET_ADD_COUNTERS
:
2000 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2004 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2012 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2016 if (!capable(CAP_NET_ADMIN
))
2020 case IPT_SO_GET_INFO
:
2021 ret
= get_info(sock_net(sk
), user
, len
, 0);
2024 case IPT_SO_GET_ENTRIES
:
2025 ret
= get_entries(sock_net(sk
), user
, len
);
2028 case IPT_SO_GET_REVISION_MATCH
:
2029 case IPT_SO_GET_REVISION_TARGET
: {
2030 struct xt_get_revision rev
;
2033 if (*len
!= sizeof(rev
)) {
2037 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2041 rev
.name
[sizeof(rev
.name
)-1] = 0;
2043 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2048 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2051 "ipt_%s", rev
.name
);
2056 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2063 struct xt_table
*ipt_register_table(struct net
*net
,
2064 const struct xt_table
*table
,
2065 const struct ipt_replace
*repl
)
2068 struct xt_table_info
*newinfo
;
2069 struct xt_table_info bootstrap
= {0};
2070 void *loc_cpu_entry
;
2071 struct xt_table
*new_table
;
2073 newinfo
= xt_alloc_table_info(repl
->size
);
2079 /* choose the copy on our node/cpu, but dont care about preemption */
2080 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2081 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2083 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
2087 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2088 if (IS_ERR(new_table
)) {
2089 ret
= PTR_ERR(new_table
);
2096 xt_free_table_info(newinfo
);
2098 return ERR_PTR(ret
);
2101 void ipt_unregister_table(struct net
*net
, struct xt_table
*table
)
2103 struct xt_table_info
*private;
2104 void *loc_cpu_entry
;
2105 struct module
*table_owner
= table
->me
;
2106 struct ipt_entry
*iter
;
2108 private = xt_unregister_table(table
);
2110 /* Decrease module usage counts and free resources */
2111 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2112 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2113 cleanup_entry(iter
, net
);
2114 if (private->number
> private->initial_entries
)
2115 module_put(table_owner
);
2116 xt_free_table_info(private);
2119 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2121 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2122 u_int8_t type
, u_int8_t code
,
2125 return ((test_type
== 0xFF) ||
2126 (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2131 icmp_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
2133 const struct icmphdr
*ic
;
2134 struct icmphdr _icmph
;
2135 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2137 /* Must not be a fragment. */
2138 if (par
->fragoff
!= 0)
2141 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2143 /* We've been asked to examine this packet, and we
2144 * can't. Hence, no choice but to drop.
2146 duprintf("Dropping evil ICMP tinygram.\n");
2147 par
->hotdrop
= true;
2151 return icmp_type_code_match(icmpinfo
->type
,
2155 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2158 static int icmp_checkentry(const struct xt_mtchk_param
*par
)
2160 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2162 /* Must specify no unknown invflags */
2163 return (icmpinfo
->invflags
& ~IPT_ICMP_INV
) ? -EINVAL
: 0;
2166 static struct xt_target ipt_builtin_tg
[] __read_mostly
= {
2168 .name
= XT_STANDARD_TARGET
,
2169 .targetsize
= sizeof(int),
2170 .family
= NFPROTO_IPV4
,
2171 #ifdef CONFIG_COMPAT
2172 .compatsize
= sizeof(compat_int_t
),
2173 .compat_from_user
= compat_standard_from_user
,
2174 .compat_to_user
= compat_standard_to_user
,
2178 .name
= XT_ERROR_TARGET
,
2179 .target
= ipt_error
,
2180 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
2181 .family
= NFPROTO_IPV4
,
2185 static struct nf_sockopt_ops ipt_sockopts
= {
2187 .set_optmin
= IPT_BASE_CTL
,
2188 .set_optmax
= IPT_SO_SET_MAX
+1,
2189 .set
= do_ipt_set_ctl
,
2190 #ifdef CONFIG_COMPAT
2191 .compat_set
= compat_do_ipt_set_ctl
,
2193 .get_optmin
= IPT_BASE_CTL
,
2194 .get_optmax
= IPT_SO_GET_MAX
+1,
2195 .get
= do_ipt_get_ctl
,
2196 #ifdef CONFIG_COMPAT
2197 .compat_get
= compat_do_ipt_get_ctl
,
2199 .owner
= THIS_MODULE
,
2202 static struct xt_match ipt_builtin_mt
[] __read_mostly
= {
2205 .match
= icmp_match
,
2206 .matchsize
= sizeof(struct ipt_icmp
),
2207 .checkentry
= icmp_checkentry
,
2208 .proto
= IPPROTO_ICMP
,
2209 .family
= NFPROTO_IPV4
,
2213 static int __net_init
ip_tables_net_init(struct net
*net
)
2215 return xt_proto_init(net
, NFPROTO_IPV4
);
2218 static void __net_exit
ip_tables_net_exit(struct net
*net
)
2220 xt_proto_fini(net
, NFPROTO_IPV4
);
2223 static struct pernet_operations ip_tables_net_ops
= {
2224 .init
= ip_tables_net_init
,
2225 .exit
= ip_tables_net_exit
,
2228 static int __init
ip_tables_init(void)
2232 ret
= register_pernet_subsys(&ip_tables_net_ops
);
2236 /* Noone else will be downing sem now, so we won't sleep */
2237 ret
= xt_register_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2240 ret
= xt_register_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2244 /* Register setsockopt */
2245 ret
= nf_register_sockopt(&ipt_sockopts
);
2249 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2253 xt_unregister_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2255 xt_unregister_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2257 unregister_pernet_subsys(&ip_tables_net_ops
);
2262 static void __exit
ip_tables_fini(void)
2264 nf_unregister_sockopt(&ipt_sockopts
);
2266 xt_unregister_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2267 xt_unregister_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2268 unregister_pernet_subsys(&ip_tables_net_ops
);
2271 EXPORT_SYMBOL(ipt_register_table
);
2272 EXPORT_SYMBOL(ipt_unregister_table
);
2273 EXPORT_SYMBOL(ipt_do_table
);
2274 module_init(ip_tables_init
);
2275 module_exit(ip_tables_fini
);