2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h>
13 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/icmp.h>
21 #include <net/compat.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/err.h>
26 #include <linux/cpumask.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h>
31 #include "../../netfilter/xt_repldata.h"
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv4 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) pr_info(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) pr_info(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) WARN_ON(!(x))
56 #define IP_NF_ASSERT(x)
60 /* All the better to debug you with... */
65 void *ipt_alloc_initial_table(const struct xt_table
*info
)
67 return xt_alloc_initial_table(ipt
, IPT
);
69 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table
);
72 We keep a set of rules for each CPU, so we can avoid write-locking
73 them in the softirq when updating the counters and therefore
74 only need to read-lock in the softirq; doing a write_lock_bh() in user
75 context stops packets coming through and allows user context to read
76 the counters or update the rules.
78 Hence the start of any table is given by get_table() below. */
80 /* Returns whether matches rule or not. */
81 /* Performance critical - called for every packet */
83 ip_packet_match(const struct iphdr
*ip
,
86 const struct ipt_ip
*ipinfo
,
91 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
93 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
95 FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
97 dprintf("Source or dest mismatch.\n");
99 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
100 &ip
->saddr
, &ipinfo
->smsk
.s_addr
, &ipinfo
->src
.s_addr
,
101 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
102 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
103 &ip
->daddr
, &ipinfo
->dmsk
.s_addr
, &ipinfo
->dst
.s_addr
,
104 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
108 ret
= ifname_compare_aligned(indev
, ipinfo
->iniface
, ipinfo
->iniface_mask
);
110 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
111 dprintf("VIA in mismatch (%s vs %s).%s\n",
112 indev
, ipinfo
->iniface
,
113 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
117 ret
= ifname_compare_aligned(outdev
, ipinfo
->outiface
, ipinfo
->outiface_mask
);
119 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
120 dprintf("VIA out mismatch (%s vs %s).%s\n",
121 outdev
, ipinfo
->outiface
,
122 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
126 /* Check specific protocol */
128 FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
129 dprintf("Packet protocol %hi does not match %hi.%s\n",
130 ip
->protocol
, ipinfo
->proto
,
131 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
135 /* If we have a fragment rule but the packet is not a fragment
136 * then we return zero */
137 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
138 dprintf("Fragment rule but not fragment.%s\n",
139 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
147 ip_checkentry(const struct ipt_ip
*ip
)
149 if (ip
->flags
& ~IPT_F_MASK
) {
150 duprintf("Unknown flag bits set: %08X\n",
151 ip
->flags
& ~IPT_F_MASK
);
154 if (ip
->invflags
& ~IPT_INV_MASK
) {
155 duprintf("Unknown invflag bits set: %08X\n",
156 ip
->invflags
& ~IPT_INV_MASK
);
163 ipt_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
166 pr_info("error: `%s'\n", (const char *)par
->targinfo
);
171 /* Performance critical */
172 static inline struct ipt_entry
*
173 get_entry(const void *base
, unsigned int offset
)
175 return (struct ipt_entry
*)(base
+ offset
);
178 /* All zeroes == unconditional rule. */
179 /* Mildly perf critical (only if packet tracing is on) */
180 static inline bool unconditional(const struct ipt_ip
*ip
)
182 static const struct ipt_ip uncond
;
184 return memcmp(ip
, &uncond
, sizeof(uncond
)) == 0;
188 /* for const-correctness */
189 static inline const struct xt_entry_target
*
190 ipt_get_target_c(const struct ipt_entry
*e
)
192 return ipt_get_target((struct ipt_entry
*)e
);
195 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
196 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
197 static const char *const hooknames
[] = {
198 [NF_INET_PRE_ROUTING
] = "PREROUTING",
199 [NF_INET_LOCAL_IN
] = "INPUT",
200 [NF_INET_FORWARD
] = "FORWARD",
201 [NF_INET_LOCAL_OUT
] = "OUTPUT",
202 [NF_INET_POST_ROUTING
] = "POSTROUTING",
205 enum nf_ip_trace_comments
{
206 NF_IP_TRACE_COMMENT_RULE
,
207 NF_IP_TRACE_COMMENT_RETURN
,
208 NF_IP_TRACE_COMMENT_POLICY
,
211 static const char *const comments
[] = {
212 [NF_IP_TRACE_COMMENT_RULE
] = "rule",
213 [NF_IP_TRACE_COMMENT_RETURN
] = "return",
214 [NF_IP_TRACE_COMMENT_POLICY
] = "policy",
217 static struct nf_loginfo trace_loginfo
= {
218 .type
= NF_LOG_TYPE_LOG
,
222 .logflags
= NF_LOG_MASK
,
227 /* Mildly perf critical (only if packet tracing is on) */
229 get_chainname_rulenum(const struct ipt_entry
*s
, const struct ipt_entry
*e
,
230 const char *hookname
, const char **chainname
,
231 const char **comment
, unsigned int *rulenum
)
233 const struct xt_standard_target
*t
= (void *)ipt_get_target_c(s
);
235 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
236 /* Head of user chain: ERROR target with chainname */
237 *chainname
= t
->target
.data
;
242 if (s
->target_offset
== sizeof(struct ipt_entry
) &&
243 strcmp(t
->target
.u
.kernel
.target
->name
,
244 XT_STANDARD_TARGET
) == 0 &&
246 unconditional(&s
->ip
)) {
247 /* Tail of chains: STANDARD target (return/policy) */
248 *comment
= *chainname
== hookname
249 ? comments
[NF_IP_TRACE_COMMENT_POLICY
]
250 : comments
[NF_IP_TRACE_COMMENT_RETURN
];
259 static void trace_packet(const struct sk_buff
*skb
,
261 const struct net_device
*in
,
262 const struct net_device
*out
,
263 const char *tablename
,
264 const struct xt_table_info
*private,
265 const struct ipt_entry
*e
)
267 const void *table_base
;
268 const struct ipt_entry
*root
;
269 const char *hookname
, *chainname
, *comment
;
270 const struct ipt_entry
*iter
;
271 unsigned int rulenum
= 0;
273 table_base
= private->entries
[smp_processor_id()];
274 root
= get_entry(table_base
, private->hook_entry
[hook
]);
276 hookname
= chainname
= hooknames
[hook
];
277 comment
= comments
[NF_IP_TRACE_COMMENT_RULE
];
279 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
280 if (get_chainname_rulenum(iter
, e
, hookname
,
281 &chainname
, &comment
, &rulenum
) != 0)
284 nf_log_packet(AF_INET
, hook
, skb
, in
, out
, &trace_loginfo
,
285 "TRACE: %s:%s:%s:%u ",
286 tablename
, chainname
, comment
, rulenum
);
291 struct ipt_entry
*ipt_next_entry(const struct ipt_entry
*entry
)
293 return (void *)entry
+ entry
->next_offset
;
296 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
298 ipt_do_table(struct sk_buff
*skb
,
300 const struct net_device
*in
,
301 const struct net_device
*out
,
302 struct xt_table
*table
)
304 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
305 const struct iphdr
*ip
;
306 /* Initializing verdict to NF_DROP keeps gcc happy. */
307 unsigned int verdict
= NF_DROP
;
308 const char *indev
, *outdev
;
309 const void *table_base
;
310 struct ipt_entry
*e
, **jumpstack
;
311 unsigned int *stackptr
, origptr
, cpu
;
312 const struct xt_table_info
*private;
313 struct xt_action_param acpar
;
317 indev
= in
? in
->name
: nulldevname
;
318 outdev
= out
? out
->name
: nulldevname
;
319 /* We handle fragments by dealing with the first fragment as
320 * if it was a normal packet. All other fragments are treated
321 * normally, except that they will NEVER match rules that ask
322 * things we don't know, ie. tcp syn flag or ports). If the
323 * rule is also a fragment-specific rule, non-fragments won't
325 acpar
.fragoff
= ntohs(ip
->frag_off
) & IP_OFFSET
;
326 acpar
.thoff
= ip_hdrlen(skb
);
327 acpar
.hotdrop
= false;
330 acpar
.family
= NFPROTO_IPV4
;
331 acpar
.hooknum
= hook
;
333 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
335 private = table
->private;
336 cpu
= smp_processor_id();
337 table_base
= private->entries
[cpu
];
338 jumpstack
= (struct ipt_entry
**)private->jumpstack
[cpu
];
339 stackptr
= per_cpu_ptr(private->stackptr
, cpu
);
342 e
= get_entry(table_base
, private->hook_entry
[hook
]);
344 pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
345 table
->name
, hook
, origptr
,
346 get_entry(table_base
, private->underflow
[hook
]));
349 const struct xt_entry_target
*t
;
350 const struct xt_entry_match
*ematch
;
353 if (!ip_packet_match(ip
, indev
, outdev
,
354 &e
->ip
, acpar
.fragoff
)) {
356 e
= ipt_next_entry(e
);
360 xt_ematch_foreach(ematch
, e
) {
361 acpar
.match
= ematch
->u
.kernel
.match
;
362 acpar
.matchinfo
= ematch
->data
;
363 if (!acpar
.match
->match(skb
, &acpar
))
367 ADD_COUNTER(e
->counters
, skb
->len
, 1);
369 t
= ipt_get_target(e
);
370 IP_NF_ASSERT(t
->u
.kernel
.target
);
372 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
373 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
374 /* The packet is traced: log it */
375 if (unlikely(skb
->nf_trace
))
376 trace_packet(skb
, hook
, in
, out
,
377 table
->name
, private, e
);
379 /* Standard target? */
380 if (!t
->u
.kernel
.target
->target
) {
383 v
= ((struct xt_standard_target
*)t
)->verdict
;
385 /* Pop from stack? */
386 if (v
!= XT_RETURN
) {
387 verdict
= (unsigned)(-v
) - 1;
390 if (*stackptr
== 0) {
391 e
= get_entry(table_base
,
392 private->underflow
[hook
]);
393 pr_debug("Underflow (this is normal) "
396 e
= jumpstack
[--*stackptr
];
397 pr_debug("Pulled %p out from pos %u\n",
399 e
= ipt_next_entry(e
);
403 if (table_base
+ v
!= ipt_next_entry(e
) &&
404 !(e
->ip
.flags
& IPT_F_GOTO
)) {
405 if (*stackptr
>= private->stacksize
) {
409 jumpstack
[(*stackptr
)++] = e
;
410 pr_debug("Pushed %p into pos %u\n",
414 e
= get_entry(table_base
, v
);
418 acpar
.target
= t
->u
.kernel
.target
;
419 acpar
.targinfo
= t
->data
;
421 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
422 /* Target might have changed stuff. */
424 if (verdict
== XT_CONTINUE
)
425 e
= ipt_next_entry(e
);
429 } while (!acpar
.hotdrop
);
430 xt_info_rdunlock_bh();
431 pr_debug("Exiting %s; resetting sp from %u to %u\n",
432 __func__
, *stackptr
, origptr
);
434 #ifdef DEBUG_ALLOW_ALL
443 /* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */
446 mark_source_chains(const struct xt_table_info
*newinfo
,
447 unsigned int valid_hooks
, void *entry0
)
451 /* No recursion; use packet counter to save back ptrs (reset
452 to 0 as we leave), and comefrom to save source hook bitmask */
453 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
454 unsigned int pos
= newinfo
->hook_entry
[hook
];
455 struct ipt_entry
*e
= (struct ipt_entry
*)(entry0
+ pos
);
457 if (!(valid_hooks
& (1 << hook
)))
460 /* Set initial back pointer. */
461 e
->counters
.pcnt
= pos
;
464 const struct xt_standard_target
*t
465 = (void *)ipt_get_target_c(e
);
466 int visited
= e
->comefrom
& (1 << hook
);
468 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
469 pr_err("iptables: loop hook %u pos %u %08X.\n",
470 hook
, pos
, e
->comefrom
);
473 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
475 /* Unconditional return/END. */
476 if ((e
->target_offset
== sizeof(struct ipt_entry
) &&
477 (strcmp(t
->target
.u
.user
.name
,
478 XT_STANDARD_TARGET
) == 0) &&
479 t
->verdict
< 0 && unconditional(&e
->ip
)) ||
481 unsigned int oldpos
, size
;
483 if ((strcmp(t
->target
.u
.user
.name
,
484 XT_STANDARD_TARGET
) == 0) &&
485 t
->verdict
< -NF_MAX_VERDICT
- 1) {
486 duprintf("mark_source_chains: bad "
487 "negative verdict (%i)\n",
492 /* Return: backtrack through the last
495 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
496 #ifdef DEBUG_IP_FIREWALL_USER
498 & (1 << NF_INET_NUMHOOKS
)) {
499 duprintf("Back unset "
506 pos
= e
->counters
.pcnt
;
507 e
->counters
.pcnt
= 0;
509 /* We're at the start. */
513 e
= (struct ipt_entry
*)
515 } while (oldpos
== pos
+ e
->next_offset
);
518 size
= e
->next_offset
;
519 e
= (struct ipt_entry
*)
520 (entry0
+ pos
+ size
);
521 e
->counters
.pcnt
= pos
;
524 int newpos
= t
->verdict
;
526 if (strcmp(t
->target
.u
.user
.name
,
527 XT_STANDARD_TARGET
) == 0 &&
529 if (newpos
> newinfo
->size
-
530 sizeof(struct ipt_entry
)) {
531 duprintf("mark_source_chains: "
532 "bad verdict (%i)\n",
536 /* This a jump; chase it. */
537 duprintf("Jump rule %u -> %u\n",
540 /* ... this is a fallthru */
541 newpos
= pos
+ e
->next_offset
;
543 e
= (struct ipt_entry
*)
545 e
->counters
.pcnt
= pos
;
550 duprintf("Finished chain %u\n", hook
);
555 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
557 struct xt_mtdtor_param par
;
560 par
.match
= m
->u
.kernel
.match
;
561 par
.matchinfo
= m
->data
;
562 par
.family
= NFPROTO_IPV4
;
563 if (par
.match
->destroy
!= NULL
)
564 par
.match
->destroy(&par
);
565 module_put(par
.match
->me
);
569 check_entry(const struct ipt_entry
*e
, const char *name
)
571 const struct xt_entry_target
*t
;
573 if (!ip_checkentry(&e
->ip
)) {
574 duprintf("ip check failed %p %s.\n", e
, par
->match
->name
);
578 if (e
->target_offset
+ sizeof(struct xt_entry_target
) >
582 t
= ipt_get_target_c(e
);
583 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
590 check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
592 const struct ipt_ip
*ip
= par
->entryinfo
;
595 par
->match
= m
->u
.kernel
.match
;
596 par
->matchinfo
= m
->data
;
598 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
599 ip
->proto
, ip
->invflags
& IPT_INV_PROTO
);
601 duprintf("check failed for `%s'.\n", par
->match
->name
);
608 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
610 struct xt_match
*match
;
613 match
= xt_request_find_match(NFPROTO_IPV4
, m
->u
.user
.name
,
616 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
617 return PTR_ERR(match
);
619 m
->u
.kernel
.match
= match
;
621 ret
= check_match(m
, par
);
627 module_put(m
->u
.kernel
.match
->me
);
631 static int check_target(struct ipt_entry
*e
, struct net
*net
, const char *name
)
633 struct xt_entry_target
*t
= ipt_get_target(e
);
634 struct xt_tgchk_param par
= {
638 .target
= t
->u
.kernel
.target
,
640 .hook_mask
= e
->comefrom
,
641 .family
= NFPROTO_IPV4
,
645 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
646 e
->ip
.proto
, e
->ip
.invflags
& IPT_INV_PROTO
);
648 duprintf("check failed for `%s'.\n",
649 t
->u
.kernel
.target
->name
);
656 find_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
,
659 struct xt_entry_target
*t
;
660 struct xt_target
*target
;
663 struct xt_mtchk_param mtpar
;
664 struct xt_entry_match
*ematch
;
666 ret
= check_entry(e
, name
);
673 mtpar
.entryinfo
= &e
->ip
;
674 mtpar
.hook_mask
= e
->comefrom
;
675 mtpar
.family
= NFPROTO_IPV4
;
676 xt_ematch_foreach(ematch
, e
) {
677 ret
= find_check_match(ematch
, &mtpar
);
679 goto cleanup_matches
;
683 t
= ipt_get_target(e
);
684 target
= xt_request_find_target(NFPROTO_IPV4
, t
->u
.user
.name
,
686 if (IS_ERR(target
)) {
687 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
688 ret
= PTR_ERR(target
);
689 goto cleanup_matches
;
691 t
->u
.kernel
.target
= target
;
693 ret
= check_target(e
, net
, name
);
698 module_put(t
->u
.kernel
.target
->me
);
700 xt_ematch_foreach(ematch
, e
) {
703 cleanup_match(ematch
, net
);
708 static bool check_underflow(const struct ipt_entry
*e
)
710 const struct xt_entry_target
*t
;
711 unsigned int verdict
;
713 if (!unconditional(&e
->ip
))
715 t
= ipt_get_target_c(e
);
716 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
718 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
719 verdict
= -verdict
- 1;
720 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
724 check_entry_size_and_hooks(struct ipt_entry
*e
,
725 struct xt_table_info
*newinfo
,
726 const unsigned char *base
,
727 const unsigned char *limit
,
728 const unsigned int *hook_entries
,
729 const unsigned int *underflows
,
730 unsigned int valid_hooks
)
734 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0 ||
735 (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
736 duprintf("Bad offset %p\n", e
);
741 < sizeof(struct ipt_entry
) + sizeof(struct xt_entry_target
)) {
742 duprintf("checking: element %p size %u\n",
747 /* Check hooks & underflows */
748 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
749 if (!(valid_hooks
& (1 << h
)))
751 if ((unsigned char *)e
- base
== hook_entries
[h
])
752 newinfo
->hook_entry
[h
] = hook_entries
[h
];
753 if ((unsigned char *)e
- base
== underflows
[h
]) {
754 if (!check_underflow(e
)) {
755 pr_err("Underflows must be unconditional and "
756 "use the STANDARD target with "
760 newinfo
->underflow
[h
] = underflows
[h
];
764 /* Clear counters and comefrom */
765 e
->counters
= ((struct xt_counters
) { 0, 0 });
771 cleanup_entry(struct ipt_entry
*e
, struct net
*net
)
773 struct xt_tgdtor_param par
;
774 struct xt_entry_target
*t
;
775 struct xt_entry_match
*ematch
;
777 /* Cleanup all matches */
778 xt_ematch_foreach(ematch
, e
)
779 cleanup_match(ematch
, net
);
780 t
= ipt_get_target(e
);
783 par
.target
= t
->u
.kernel
.target
;
784 par
.targinfo
= t
->data
;
785 par
.family
= NFPROTO_IPV4
;
786 if (par
.target
->destroy
!= NULL
)
787 par
.target
->destroy(&par
);
788 module_put(par
.target
->me
);
791 /* Checks and translates the user-supplied table segment (held in
794 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
795 const struct ipt_replace
*repl
)
797 struct ipt_entry
*iter
;
801 newinfo
->size
= repl
->size
;
802 newinfo
->number
= repl
->num_entries
;
804 /* Init all hooks to impossible value. */
805 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
806 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
807 newinfo
->underflow
[i
] = 0xFFFFFFFF;
810 duprintf("translate_table: size %u\n", newinfo
->size
);
812 /* Walk through entries, checking offsets. */
813 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
814 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
822 if (strcmp(ipt_get_target(iter
)->u
.user
.name
,
823 XT_ERROR_TARGET
) == 0)
824 ++newinfo
->stacksize
;
827 if (i
!= repl
->num_entries
) {
828 duprintf("translate_table: %u not %u entries\n",
829 i
, repl
->num_entries
);
833 /* Check hooks all assigned */
834 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
835 /* Only hooks which are valid */
836 if (!(repl
->valid_hooks
& (1 << i
)))
838 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
839 duprintf("Invalid hook entry %u %u\n",
840 i
, repl
->hook_entry
[i
]);
843 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
844 duprintf("Invalid underflow %u %u\n",
845 i
, repl
->underflow
[i
]);
850 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
853 /* Finally, each sanity check must pass */
855 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
856 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
863 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
866 cleanup_entry(iter
, net
);
871 /* And one copy for every other CPU */
872 for_each_possible_cpu(i
) {
873 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
874 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
881 get_counters(const struct xt_table_info
*t
,
882 struct xt_counters counters
[])
884 struct ipt_entry
*iter
;
888 for_each_possible_cpu(cpu
) {
889 seqlock_t
*lock
= &per_cpu(xt_info_locks
, cpu
).lock
;
892 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
897 start
= read_seqbegin(lock
);
898 bcnt
= iter
->counters
.bcnt
;
899 pcnt
= iter
->counters
.pcnt
;
900 } while (read_seqretry(lock
, start
));
902 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
903 ++i
; /* macro does multi eval of i */
908 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
910 unsigned int countersize
;
911 struct xt_counters
*counters
;
912 const struct xt_table_info
*private = table
->private;
914 /* We need atomic snapshot of counters: rest doesn't change
915 (other than comefrom, which userspace doesn't care
917 countersize
= sizeof(struct xt_counters
) * private->number
;
918 counters
= vzalloc(countersize
);
920 if (counters
== NULL
)
921 return ERR_PTR(-ENOMEM
);
923 get_counters(private, counters
);
929 copy_entries_to_user(unsigned int total_size
,
930 const struct xt_table
*table
,
931 void __user
*userptr
)
933 unsigned int off
, num
;
934 const struct ipt_entry
*e
;
935 struct xt_counters
*counters
;
936 const struct xt_table_info
*private = table
->private;
938 const void *loc_cpu_entry
;
940 counters
= alloc_counters(table
);
941 if (IS_ERR(counters
))
942 return PTR_ERR(counters
);
944 /* choose the copy that is on our node/cpu, ...
945 * This choice is lazy (because current thread is
946 * allowed to migrate to another cpu)
948 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
949 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
954 /* FIXME: use iterator macros --RR */
955 /* ... then go back and fix counters and names */
956 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
958 const struct xt_entry_match
*m
;
959 const struct xt_entry_target
*t
;
961 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
962 if (copy_to_user(userptr
+ off
963 + offsetof(struct ipt_entry
, counters
),
965 sizeof(counters
[num
])) != 0) {
970 for (i
= sizeof(struct ipt_entry
);
971 i
< e
->target_offset
;
972 i
+= m
->u
.match_size
) {
975 if (copy_to_user(userptr
+ off
+ i
976 + offsetof(struct xt_entry_match
,
978 m
->u
.kernel
.match
->name
,
979 strlen(m
->u
.kernel
.match
->name
)+1)
986 t
= ipt_get_target_c(e
);
987 if (copy_to_user(userptr
+ off
+ e
->target_offset
988 + offsetof(struct xt_entry_target
,
990 t
->u
.kernel
.target
->name
,
991 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1002 #ifdef CONFIG_COMPAT
1003 static void compat_standard_from_user(void *dst
, const void *src
)
1005 int v
= *(compat_int_t
*)src
;
1008 v
+= xt_compat_calc_jump(AF_INET
, v
);
1009 memcpy(dst
, &v
, sizeof(v
));
1012 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1014 compat_int_t cv
= *(int *)src
;
1017 cv
-= xt_compat_calc_jump(AF_INET
, cv
);
1018 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1021 static int compat_calc_entry(const struct ipt_entry
*e
,
1022 const struct xt_table_info
*info
,
1023 const void *base
, struct xt_table_info
*newinfo
)
1025 const struct xt_entry_match
*ematch
;
1026 const struct xt_entry_target
*t
;
1027 unsigned int entry_offset
;
1030 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1031 entry_offset
= (void *)e
- base
;
1032 xt_ematch_foreach(ematch
, e
)
1033 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1034 t
= ipt_get_target_c(e
);
1035 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1036 newinfo
->size
-= off
;
1037 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1041 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1042 if (info
->hook_entry
[i
] &&
1043 (e
< (struct ipt_entry
*)(base
+ info
->hook_entry
[i
])))
1044 newinfo
->hook_entry
[i
] -= off
;
1045 if (info
->underflow
[i
] &&
1046 (e
< (struct ipt_entry
*)(base
+ info
->underflow
[i
])))
1047 newinfo
->underflow
[i
] -= off
;
1052 static int compat_table_info(const struct xt_table_info
*info
,
1053 struct xt_table_info
*newinfo
)
1055 struct ipt_entry
*iter
;
1056 void *loc_cpu_entry
;
1059 if (!newinfo
|| !info
)
1062 /* we dont care about newinfo->entries[] */
1063 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1064 newinfo
->initial_entries
= 0;
1065 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1066 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1067 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1075 static int get_info(struct net
*net
, void __user
*user
,
1076 const int *len
, int compat
)
1078 char name
[XT_TABLE_MAXNAMELEN
];
1082 if (*len
!= sizeof(struct ipt_getinfo
)) {
1083 duprintf("length %u != %zu\n", *len
,
1084 sizeof(struct ipt_getinfo
));
1088 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1091 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
1092 #ifdef CONFIG_COMPAT
1094 xt_compat_lock(AF_INET
);
1096 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1097 "iptable_%s", name
);
1098 if (t
&& !IS_ERR(t
)) {
1099 struct ipt_getinfo info
;
1100 const struct xt_table_info
*private = t
->private;
1101 #ifdef CONFIG_COMPAT
1102 struct xt_table_info tmp
;
1105 ret
= compat_table_info(private, &tmp
);
1106 xt_compat_flush_offsets(AF_INET
);
1110 memset(&info
, 0, sizeof(info
));
1111 info
.valid_hooks
= t
->valid_hooks
;
1112 memcpy(info
.hook_entry
, private->hook_entry
,
1113 sizeof(info
.hook_entry
));
1114 memcpy(info
.underflow
, private->underflow
,
1115 sizeof(info
.underflow
));
1116 info
.num_entries
= private->number
;
1117 info
.size
= private->size
;
1118 strcpy(info
.name
, name
);
1120 if (copy_to_user(user
, &info
, *len
) != 0)
1128 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1129 #ifdef CONFIG_COMPAT
1131 xt_compat_unlock(AF_INET
);
1137 get_entries(struct net
*net
, struct ipt_get_entries __user
*uptr
,
1141 struct ipt_get_entries get
;
1144 if (*len
< sizeof(get
)) {
1145 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1148 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1150 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1151 duprintf("get_entries: %u != %zu\n",
1152 *len
, sizeof(get
) + get
.size
);
1156 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1157 if (t
&& !IS_ERR(t
)) {
1158 const struct xt_table_info
*private = t
->private;
1159 duprintf("t->private->number = %u\n", private->number
);
1160 if (get
.size
== private->size
)
1161 ret
= copy_entries_to_user(private->size
,
1162 t
, uptr
->entrytable
);
1164 duprintf("get_entries: I've got %u not %u!\n",
1165 private->size
, get
.size
);
1171 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1177 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1178 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1179 void __user
*counters_ptr
)
1183 struct xt_table_info
*oldinfo
;
1184 struct xt_counters
*counters
;
1185 void *loc_cpu_old_entry
;
1186 struct ipt_entry
*iter
;
1189 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1195 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1196 "iptable_%s", name
);
1197 if (!t
|| IS_ERR(t
)) {
1198 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1199 goto free_newinfo_counters_untrans
;
1203 if (valid_hooks
!= t
->valid_hooks
) {
1204 duprintf("Valid hook crap: %08X vs %08X\n",
1205 valid_hooks
, t
->valid_hooks
);
1210 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1214 /* Update module usage count based on number of rules */
1215 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1216 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1217 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1218 (newinfo
->number
<= oldinfo
->initial_entries
))
1220 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1221 (newinfo
->number
<= oldinfo
->initial_entries
))
1224 /* Get the old counters, and synchronize with replace */
1225 get_counters(oldinfo
, counters
);
1227 /* Decrease module usage counts and free resource */
1228 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1229 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1230 cleanup_entry(iter
, net
);
1232 xt_free_table_info(oldinfo
);
1233 if (copy_to_user(counters_ptr
, counters
,
1234 sizeof(struct xt_counters
) * num_counters
) != 0)
1243 free_newinfo_counters_untrans
:
1250 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1253 struct ipt_replace tmp
;
1254 struct xt_table_info
*newinfo
;
1255 void *loc_cpu_entry
;
1256 struct ipt_entry
*iter
;
1258 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1261 /* overflow check */
1262 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1265 newinfo
= xt_alloc_table_info(tmp
.size
);
1269 /* choose the copy that is on our node/cpu */
1270 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1271 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1277 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1281 duprintf("Translated table\n");
1283 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1284 tmp
.num_counters
, tmp
.counters
);
1286 goto free_newinfo_untrans
;
1289 free_newinfo_untrans
:
1290 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1291 cleanup_entry(iter
, net
);
1293 xt_free_table_info(newinfo
);
1298 do_add_counters(struct net
*net
, const void __user
*user
,
1299 unsigned int len
, int compat
)
1301 unsigned int i
, curcpu
;
1302 struct xt_counters_info tmp
;
1303 struct xt_counters
*paddc
;
1304 unsigned int num_counters
;
1309 const struct xt_table_info
*private;
1311 void *loc_cpu_entry
;
1312 struct ipt_entry
*iter
;
1313 #ifdef CONFIG_COMPAT
1314 struct compat_xt_counters_info compat_tmp
;
1318 size
= sizeof(struct compat_xt_counters_info
);
1323 size
= sizeof(struct xt_counters_info
);
1326 if (copy_from_user(ptmp
, user
, size
) != 0)
1329 #ifdef CONFIG_COMPAT
1331 num_counters
= compat_tmp
.num_counters
;
1332 name
= compat_tmp
.name
;
1336 num_counters
= tmp
.num_counters
;
1340 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1343 paddc
= vmalloc(len
- size
);
1347 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1352 t
= xt_find_table_lock(net
, AF_INET
, name
);
1353 if (!t
|| IS_ERR(t
)) {
1354 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1359 private = t
->private;
1360 if (private->number
!= num_counters
) {
1362 goto unlock_up_free
;
1366 /* Choose the copy that is on our node */
1367 curcpu
= smp_processor_id();
1368 loc_cpu_entry
= private->entries
[curcpu
];
1369 xt_info_wrlock(curcpu
);
1370 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1371 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1374 xt_info_wrunlock(curcpu
);
1385 #ifdef CONFIG_COMPAT
1386 struct compat_ipt_replace
{
1387 char name
[XT_TABLE_MAXNAMELEN
];
1391 u32 hook_entry
[NF_INET_NUMHOOKS
];
1392 u32 underflow
[NF_INET_NUMHOOKS
];
1394 compat_uptr_t counters
; /* struct xt_counters * */
1395 struct compat_ipt_entry entries
[0];
1399 compat_copy_entry_to_user(struct ipt_entry
*e
, void __user
**dstptr
,
1400 unsigned int *size
, struct xt_counters
*counters
,
1403 struct xt_entry_target
*t
;
1404 struct compat_ipt_entry __user
*ce
;
1405 u_int16_t target_offset
, next_offset
;
1406 compat_uint_t origsize
;
1407 const struct xt_entry_match
*ematch
;
1411 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1412 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)) != 0 ||
1413 copy_to_user(&ce
->counters
, &counters
[i
],
1414 sizeof(counters
[i
])) != 0)
1417 *dstptr
+= sizeof(struct compat_ipt_entry
);
1418 *size
-= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1420 xt_ematch_foreach(ematch
, e
) {
1421 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1425 target_offset
= e
->target_offset
- (origsize
- *size
);
1426 t
= ipt_get_target(e
);
1427 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1430 next_offset
= e
->next_offset
- (origsize
- *size
);
1431 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1432 put_user(next_offset
, &ce
->next_offset
) != 0)
1438 compat_find_calc_match(struct xt_entry_match
*m
,
1440 const struct ipt_ip
*ip
,
1441 unsigned int hookmask
,
1444 struct xt_match
*match
;
1446 match
= xt_request_find_match(NFPROTO_IPV4
, m
->u
.user
.name
,
1447 m
->u
.user
.revision
);
1448 if (IS_ERR(match
)) {
1449 duprintf("compat_check_calc_match: `%s' not found\n",
1451 return PTR_ERR(match
);
1453 m
->u
.kernel
.match
= match
;
1454 *size
+= xt_compat_match_offset(match
);
1458 static void compat_release_entry(struct compat_ipt_entry
*e
)
1460 struct xt_entry_target
*t
;
1461 struct xt_entry_match
*ematch
;
1463 /* Cleanup all matches */
1464 xt_ematch_foreach(ematch
, e
)
1465 module_put(ematch
->u
.kernel
.match
->me
);
1466 t
= compat_ipt_get_target(e
);
1467 module_put(t
->u
.kernel
.target
->me
);
1471 check_compat_entry_size_and_hooks(struct compat_ipt_entry
*e
,
1472 struct xt_table_info
*newinfo
,
1474 const unsigned char *base
,
1475 const unsigned char *limit
,
1476 const unsigned int *hook_entries
,
1477 const unsigned int *underflows
,
1480 struct xt_entry_match
*ematch
;
1481 struct xt_entry_target
*t
;
1482 struct xt_target
*target
;
1483 unsigned int entry_offset
;
1487 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1488 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0 ||
1489 (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1490 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1494 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1495 sizeof(struct compat_xt_entry_target
)) {
1496 duprintf("checking: element %p size %u\n",
1501 /* For purposes of check_entry casting the compat entry is fine */
1502 ret
= check_entry((struct ipt_entry
*)e
, name
);
1506 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1507 entry_offset
= (void *)e
- (void *)base
;
1509 xt_ematch_foreach(ematch
, e
) {
1510 ret
= compat_find_calc_match(ematch
, name
,
1511 &e
->ip
, e
->comefrom
, &off
);
1513 goto release_matches
;
1517 t
= compat_ipt_get_target(e
);
1518 target
= xt_request_find_target(NFPROTO_IPV4
, t
->u
.user
.name
,
1519 t
->u
.user
.revision
);
1520 if (IS_ERR(target
)) {
1521 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1523 ret
= PTR_ERR(target
);
1524 goto release_matches
;
1526 t
->u
.kernel
.target
= target
;
1528 off
+= xt_compat_target_offset(target
);
1530 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1534 /* Check hooks & underflows */
1535 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1536 if ((unsigned char *)e
- base
== hook_entries
[h
])
1537 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1538 if ((unsigned char *)e
- base
== underflows
[h
])
1539 newinfo
->underflow
[h
] = underflows
[h
];
1542 /* Clear counters and comefrom */
1543 memset(&e
->counters
, 0, sizeof(e
->counters
));
1548 module_put(t
->u
.kernel
.target
->me
);
1550 xt_ematch_foreach(ematch
, e
) {
1553 module_put(ematch
->u
.kernel
.match
->me
);
1559 compat_copy_entry_from_user(struct compat_ipt_entry
*e
, void **dstptr
,
1560 unsigned int *size
, const char *name
,
1561 struct xt_table_info
*newinfo
, unsigned char *base
)
1563 struct xt_entry_target
*t
;
1564 struct xt_target
*target
;
1565 struct ipt_entry
*de
;
1566 unsigned int origsize
;
1568 struct xt_entry_match
*ematch
;
1572 de
= (struct ipt_entry
*)*dstptr
;
1573 memcpy(de
, e
, sizeof(struct ipt_entry
));
1574 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1576 *dstptr
+= sizeof(struct ipt_entry
);
1577 *size
+= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1579 xt_ematch_foreach(ematch
, e
) {
1580 ret
= xt_compat_match_from_user(ematch
, dstptr
, size
);
1584 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1585 t
= compat_ipt_get_target(e
);
1586 target
= t
->u
.kernel
.target
;
1587 xt_compat_target_from_user(t
, dstptr
, size
);
1589 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1590 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1591 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1592 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1593 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1594 newinfo
->underflow
[h
] -= origsize
- *size
;
1600 compat_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
)
1602 struct xt_entry_match
*ematch
;
1603 struct xt_mtchk_param mtpar
;
1610 mtpar
.entryinfo
= &e
->ip
;
1611 mtpar
.hook_mask
= e
->comefrom
;
1612 mtpar
.family
= NFPROTO_IPV4
;
1613 xt_ematch_foreach(ematch
, e
) {
1614 ret
= check_match(ematch
, &mtpar
);
1616 goto cleanup_matches
;
1620 ret
= check_target(e
, net
, name
);
1622 goto cleanup_matches
;
1626 xt_ematch_foreach(ematch
, e
) {
1629 cleanup_match(ematch
, net
);
1635 translate_compat_table(struct net
*net
,
1637 unsigned int valid_hooks
,
1638 struct xt_table_info
**pinfo
,
1640 unsigned int total_size
,
1641 unsigned int number
,
1642 unsigned int *hook_entries
,
1643 unsigned int *underflows
)
1646 struct xt_table_info
*newinfo
, *info
;
1647 void *pos
, *entry0
, *entry1
;
1648 struct compat_ipt_entry
*iter0
;
1649 struct ipt_entry
*iter1
;
1656 info
->number
= number
;
1658 /* Init all hooks to impossible value. */
1659 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1660 info
->hook_entry
[i
] = 0xFFFFFFFF;
1661 info
->underflow
[i
] = 0xFFFFFFFF;
1664 duprintf("translate_compat_table: size %u\n", info
->size
);
1666 xt_compat_lock(AF_INET
);
1667 /* Walk through entries, checking offsets. */
1668 xt_entry_foreach(iter0
, entry0
, total_size
) {
1669 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1671 entry0
+ total_size
,
1682 duprintf("translate_compat_table: %u not %u entries\n",
1687 /* Check hooks all assigned */
1688 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1689 /* Only hooks which are valid */
1690 if (!(valid_hooks
& (1 << i
)))
1692 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1693 duprintf("Invalid hook entry %u %u\n",
1694 i
, hook_entries
[i
]);
1697 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1698 duprintf("Invalid underflow %u %u\n",
1705 newinfo
= xt_alloc_table_info(size
);
1709 newinfo
->number
= number
;
1710 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1711 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1712 newinfo
->underflow
[i
] = info
->underflow
[i
];
1714 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1717 xt_entry_foreach(iter0
, entry0
, total_size
) {
1718 ret
= compat_copy_entry_from_user(iter0
, &pos
, &size
,
1719 name
, newinfo
, entry1
);
1723 xt_compat_flush_offsets(AF_INET
);
1724 xt_compat_unlock(AF_INET
);
1729 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1733 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1734 ret
= compat_check_entry(iter1
, net
, name
);
1738 if (strcmp(ipt_get_target(iter1
)->u
.user
.name
,
1739 XT_ERROR_TARGET
) == 0)
1740 ++newinfo
->stacksize
;
1744 * The first i matches need cleanup_entry (calls ->destroy)
1745 * because they had called ->check already. The other j-i
1746 * entries need only release.
1750 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1755 compat_release_entry(iter0
);
1757 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1760 cleanup_entry(iter1
, net
);
1762 xt_free_table_info(newinfo
);
1766 /* And one copy for every other CPU */
1767 for_each_possible_cpu(i
)
1768 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1769 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1773 xt_free_table_info(info
);
1777 xt_free_table_info(newinfo
);
1779 xt_entry_foreach(iter0
, entry0
, total_size
) {
1782 compat_release_entry(iter0
);
1786 xt_compat_flush_offsets(AF_INET
);
1787 xt_compat_unlock(AF_INET
);
1792 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1795 struct compat_ipt_replace tmp
;
1796 struct xt_table_info
*newinfo
;
1797 void *loc_cpu_entry
;
1798 struct ipt_entry
*iter
;
1800 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1803 /* overflow check */
1804 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1806 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1809 newinfo
= xt_alloc_table_info(tmp
.size
);
1813 /* choose the copy that is on our node/cpu */
1814 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1815 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1821 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1822 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1823 tmp
.num_entries
, tmp
.hook_entry
,
1828 duprintf("compat_do_replace: Translated table\n");
1830 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1831 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1833 goto free_newinfo_untrans
;
1836 free_newinfo_untrans
:
1837 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1838 cleanup_entry(iter
, net
);
1840 xt_free_table_info(newinfo
);
1845 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1850 if (!capable(CAP_NET_ADMIN
))
1854 case IPT_SO_SET_REPLACE
:
1855 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1858 case IPT_SO_SET_ADD_COUNTERS
:
1859 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1863 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1870 struct compat_ipt_get_entries
{
1871 char name
[XT_TABLE_MAXNAMELEN
];
1873 struct compat_ipt_entry entrytable
[0];
1877 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1878 void __user
*userptr
)
1880 struct xt_counters
*counters
;
1881 const struct xt_table_info
*private = table
->private;
1885 const void *loc_cpu_entry
;
1887 struct ipt_entry
*iter
;
1889 counters
= alloc_counters(table
);
1890 if (IS_ERR(counters
))
1891 return PTR_ERR(counters
);
1893 /* choose the copy that is on our node/cpu, ...
1894 * This choice is lazy (because current thread is
1895 * allowed to migrate to another cpu)
1897 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1900 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1901 ret
= compat_copy_entry_to_user(iter
, &pos
,
1902 &size
, counters
, i
++);
1912 compat_get_entries(struct net
*net
, struct compat_ipt_get_entries __user
*uptr
,
1916 struct compat_ipt_get_entries get
;
1919 if (*len
< sizeof(get
)) {
1920 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1924 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1927 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1928 duprintf("compat_get_entries: %u != %zu\n",
1929 *len
, sizeof(get
) + get
.size
);
1933 xt_compat_lock(AF_INET
);
1934 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1935 if (t
&& !IS_ERR(t
)) {
1936 const struct xt_table_info
*private = t
->private;
1937 struct xt_table_info info
;
1938 duprintf("t->private->number = %u\n", private->number
);
1939 ret
= compat_table_info(private, &info
);
1940 if (!ret
&& get
.size
== info
.size
) {
1941 ret
= compat_copy_entries_to_user(private->size
,
1942 t
, uptr
->entrytable
);
1944 duprintf("compat_get_entries: I've got %u not %u!\n",
1945 private->size
, get
.size
);
1948 xt_compat_flush_offsets(AF_INET
);
1952 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1954 xt_compat_unlock(AF_INET
);
1958 static int do_ipt_get_ctl(struct sock
*, int, void __user
*, int *);
1961 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1965 if (!capable(CAP_NET_ADMIN
))
1969 case IPT_SO_GET_INFO
:
1970 ret
= get_info(sock_net(sk
), user
, len
, 1);
1972 case IPT_SO_GET_ENTRIES
:
1973 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1976 ret
= do_ipt_get_ctl(sk
, cmd
, user
, len
);
1983 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1987 if (!capable(CAP_NET_ADMIN
))
1991 case IPT_SO_SET_REPLACE
:
1992 ret
= do_replace(sock_net(sk
), user
, len
);
1995 case IPT_SO_SET_ADD_COUNTERS
:
1996 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2000 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2008 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2012 if (!capable(CAP_NET_ADMIN
))
2016 case IPT_SO_GET_INFO
:
2017 ret
= get_info(sock_net(sk
), user
, len
, 0);
2020 case IPT_SO_GET_ENTRIES
:
2021 ret
= get_entries(sock_net(sk
), user
, len
);
2024 case IPT_SO_GET_REVISION_MATCH
:
2025 case IPT_SO_GET_REVISION_TARGET
: {
2026 struct xt_get_revision rev
;
2029 if (*len
!= sizeof(rev
)) {
2033 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2038 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2043 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2046 "ipt_%s", rev
.name
);
2051 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2058 struct xt_table
*ipt_register_table(struct net
*net
,
2059 const struct xt_table
*table
,
2060 const struct ipt_replace
*repl
)
2063 struct xt_table_info
*newinfo
;
2064 struct xt_table_info bootstrap
= {0};
2065 void *loc_cpu_entry
;
2066 struct xt_table
*new_table
;
2068 newinfo
= xt_alloc_table_info(repl
->size
);
2074 /* choose the copy on our node/cpu, but dont care about preemption */
2075 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2076 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2078 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
2082 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2083 if (IS_ERR(new_table
)) {
2084 ret
= PTR_ERR(new_table
);
2091 xt_free_table_info(newinfo
);
2093 return ERR_PTR(ret
);
2096 void ipt_unregister_table(struct net
*net
, struct xt_table
*table
)
2098 struct xt_table_info
*private;
2099 void *loc_cpu_entry
;
2100 struct module
*table_owner
= table
->me
;
2101 struct ipt_entry
*iter
;
2103 private = xt_unregister_table(table
);
2105 /* Decrease module usage counts and free resources */
2106 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2107 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2108 cleanup_entry(iter
, net
);
2109 if (private->number
> private->initial_entries
)
2110 module_put(table_owner
);
2111 xt_free_table_info(private);
2114 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2116 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2117 u_int8_t type
, u_int8_t code
,
2120 return ((test_type
== 0xFF) ||
2121 (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2126 icmp_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
2128 const struct icmphdr
*ic
;
2129 struct icmphdr _icmph
;
2130 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2132 /* Must not be a fragment. */
2133 if (par
->fragoff
!= 0)
2136 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2138 /* We've been asked to examine this packet, and we
2139 * can't. Hence, no choice but to drop.
2141 duprintf("Dropping evil ICMP tinygram.\n");
2142 par
->hotdrop
= true;
2146 return icmp_type_code_match(icmpinfo
->type
,
2150 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2153 static int icmp_checkentry(const struct xt_mtchk_param
*par
)
2155 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2157 /* Must specify no unknown invflags */
2158 return (icmpinfo
->invflags
& ~IPT_ICMP_INV
) ? -EINVAL
: 0;
2161 static struct xt_target ipt_builtin_tg
[] __read_mostly
= {
2163 .name
= XT_STANDARD_TARGET
,
2164 .targetsize
= sizeof(int),
2165 .family
= NFPROTO_IPV4
,
2166 #ifdef CONFIG_COMPAT
2167 .compatsize
= sizeof(compat_int_t
),
2168 .compat_from_user
= compat_standard_from_user
,
2169 .compat_to_user
= compat_standard_to_user
,
2173 .name
= XT_ERROR_TARGET
,
2174 .target
= ipt_error
,
2175 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
2176 .family
= NFPROTO_IPV4
,
2180 static struct nf_sockopt_ops ipt_sockopts
= {
2182 .set_optmin
= IPT_BASE_CTL
,
2183 .set_optmax
= IPT_SO_SET_MAX
+1,
2184 .set
= do_ipt_set_ctl
,
2185 #ifdef CONFIG_COMPAT
2186 .compat_set
= compat_do_ipt_set_ctl
,
2188 .get_optmin
= IPT_BASE_CTL
,
2189 .get_optmax
= IPT_SO_GET_MAX
+1,
2190 .get
= do_ipt_get_ctl
,
2191 #ifdef CONFIG_COMPAT
2192 .compat_get
= compat_do_ipt_get_ctl
,
2194 .owner
= THIS_MODULE
,
2197 static struct xt_match ipt_builtin_mt
[] __read_mostly
= {
2200 .match
= icmp_match
,
2201 .matchsize
= sizeof(struct ipt_icmp
),
2202 .checkentry
= icmp_checkentry
,
2203 .proto
= IPPROTO_ICMP
,
2204 .family
= NFPROTO_IPV4
,
2208 static int __net_init
ip_tables_net_init(struct net
*net
)
2210 return xt_proto_init(net
, NFPROTO_IPV4
);
2213 static void __net_exit
ip_tables_net_exit(struct net
*net
)
2215 xt_proto_fini(net
, NFPROTO_IPV4
);
2218 static struct pernet_operations ip_tables_net_ops
= {
2219 .init
= ip_tables_net_init
,
2220 .exit
= ip_tables_net_exit
,
2223 static int __init
ip_tables_init(void)
2227 ret
= register_pernet_subsys(&ip_tables_net_ops
);
2231 /* Noone else will be downing sem now, so we won't sleep */
2232 ret
= xt_register_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2235 ret
= xt_register_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2239 /* Register setsockopt */
2240 ret
= nf_register_sockopt(&ipt_sockopts
);
2244 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2248 xt_unregister_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2250 xt_unregister_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2252 unregister_pernet_subsys(&ip_tables_net_ops
);
2257 static void __exit
ip_tables_fini(void)
2259 nf_unregister_sockopt(&ipt_sockopts
);
2261 xt_unregister_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2262 xt_unregister_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2263 unregister_pernet_subsys(&ip_tables_net_ops
);
2266 EXPORT_SYMBOL(ipt_register_table
);
2267 EXPORT_SYMBOL(ipt_unregister_table
);
2268 EXPORT_SYMBOL(ipt_do_table
);
2269 module_init(ip_tables_init
);
2270 module_exit(ip_tables_fini
);