2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h>
13 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/icmp.h>
21 #include <net/compat.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/err.h>
26 #include <linux/cpumask.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h>
31 #include "../../netfilter/xt_repldata.h"
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv4 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) pr_info(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) pr_info(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) WARN_ON(!(x))
56 #define IP_NF_ASSERT(x)
60 /* All the better to debug you with... */
65 void *ipt_alloc_initial_table(const struct xt_table
*info
)
67 return xt_alloc_initial_table(ipt
, IPT
);
69 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table
);
71 /* Returns whether matches rule or not. */
72 /* Performance critical - called for every packet */
74 ip_packet_match(const struct iphdr
*ip
,
77 const struct ipt_ip
*ipinfo
,
82 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
84 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
86 FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
88 dprintf("Source or dest mismatch.\n");
90 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
91 &ip
->saddr
, &ipinfo
->smsk
.s_addr
, &ipinfo
->src
.s_addr
,
92 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
93 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
94 &ip
->daddr
, &ipinfo
->dmsk
.s_addr
, &ipinfo
->dst
.s_addr
,
95 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
99 ret
= ifname_compare_aligned(indev
, ipinfo
->iniface
, ipinfo
->iniface_mask
);
101 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
102 dprintf("VIA in mismatch (%s vs %s).%s\n",
103 indev
, ipinfo
->iniface
,
104 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
108 ret
= ifname_compare_aligned(outdev
, ipinfo
->outiface
, ipinfo
->outiface_mask
);
110 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
111 dprintf("VIA out mismatch (%s vs %s).%s\n",
112 outdev
, ipinfo
->outiface
,
113 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
117 /* Check specific protocol */
119 FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
120 dprintf("Packet protocol %hi does not match %hi.%s\n",
121 ip
->protocol
, ipinfo
->proto
,
122 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
126 /* If we have a fragment rule but the packet is not a fragment
127 * then we return zero */
128 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
129 dprintf("Fragment rule but not fragment.%s\n",
130 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
138 ip_checkentry(const struct ipt_ip
*ip
)
140 if (ip
->flags
& ~IPT_F_MASK
) {
141 duprintf("Unknown flag bits set: %08X\n",
142 ip
->flags
& ~IPT_F_MASK
);
145 if (ip
->invflags
& ~IPT_INV_MASK
) {
146 duprintf("Unknown invflag bits set: %08X\n",
147 ip
->invflags
& ~IPT_INV_MASK
);
154 ipt_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
157 pr_info("error: `%s'\n", (const char *)par
->targinfo
);
162 /* Performance critical */
163 static inline struct ipt_entry
*
164 get_entry(const void *base
, unsigned int offset
)
166 return (struct ipt_entry
*)(base
+ offset
);
169 /* All zeroes == unconditional rule. */
170 /* Mildly perf critical (only if packet tracing is on) */
171 static inline bool unconditional(const struct ipt_ip
*ip
)
173 static const struct ipt_ip uncond
;
175 return memcmp(ip
, &uncond
, sizeof(uncond
)) == 0;
179 /* for const-correctness */
180 static inline const struct xt_entry_target
*
181 ipt_get_target_c(const struct ipt_entry
*e
)
183 return ipt_get_target((struct ipt_entry
*)e
);
186 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
187 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
188 static const char *const hooknames
[] = {
189 [NF_INET_PRE_ROUTING
] = "PREROUTING",
190 [NF_INET_LOCAL_IN
] = "INPUT",
191 [NF_INET_FORWARD
] = "FORWARD",
192 [NF_INET_LOCAL_OUT
] = "OUTPUT",
193 [NF_INET_POST_ROUTING
] = "POSTROUTING",
196 enum nf_ip_trace_comments
{
197 NF_IP_TRACE_COMMENT_RULE
,
198 NF_IP_TRACE_COMMENT_RETURN
,
199 NF_IP_TRACE_COMMENT_POLICY
,
202 static const char *const comments
[] = {
203 [NF_IP_TRACE_COMMENT_RULE
] = "rule",
204 [NF_IP_TRACE_COMMENT_RETURN
] = "return",
205 [NF_IP_TRACE_COMMENT_POLICY
] = "policy",
208 static struct nf_loginfo trace_loginfo
= {
209 .type
= NF_LOG_TYPE_LOG
,
213 .logflags
= NF_LOG_MASK
,
218 /* Mildly perf critical (only if packet tracing is on) */
220 get_chainname_rulenum(const struct ipt_entry
*s
, const struct ipt_entry
*e
,
221 const char *hookname
, const char **chainname
,
222 const char **comment
, unsigned int *rulenum
)
224 const struct xt_standard_target
*t
= (void *)ipt_get_target_c(s
);
226 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
227 /* Head of user chain: ERROR target with chainname */
228 *chainname
= t
->target
.data
;
233 if (s
->target_offset
== sizeof(struct ipt_entry
) &&
234 strcmp(t
->target
.u
.kernel
.target
->name
,
235 XT_STANDARD_TARGET
) == 0 &&
237 unconditional(&s
->ip
)) {
238 /* Tail of chains: STANDARD target (return/policy) */
239 *comment
= *chainname
== hookname
240 ? comments
[NF_IP_TRACE_COMMENT_POLICY
]
241 : comments
[NF_IP_TRACE_COMMENT_RETURN
];
250 static void trace_packet(const struct sk_buff
*skb
,
252 const struct net_device
*in
,
253 const struct net_device
*out
,
254 const char *tablename
,
255 const struct xt_table_info
*private,
256 const struct ipt_entry
*e
)
258 const void *table_base
;
259 const struct ipt_entry
*root
;
260 const char *hookname
, *chainname
, *comment
;
261 const struct ipt_entry
*iter
;
262 unsigned int rulenum
= 0;
264 table_base
= private->entries
[smp_processor_id()];
265 root
= get_entry(table_base
, private->hook_entry
[hook
]);
267 hookname
= chainname
= hooknames
[hook
];
268 comment
= comments
[NF_IP_TRACE_COMMENT_RULE
];
270 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
271 if (get_chainname_rulenum(iter
, e
, hookname
,
272 &chainname
, &comment
, &rulenum
) != 0)
275 nf_log_packet(AF_INET
, hook
, skb
, in
, out
, &trace_loginfo
,
276 "TRACE: %s:%s:%s:%u ",
277 tablename
, chainname
, comment
, rulenum
);
282 struct ipt_entry
*ipt_next_entry(const struct ipt_entry
*entry
)
284 return (void *)entry
+ entry
->next_offset
;
287 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
289 ipt_do_table(struct sk_buff
*skb
,
291 const struct net_device
*in
,
292 const struct net_device
*out
,
293 struct xt_table
*table
)
295 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
296 const struct iphdr
*ip
;
297 /* Initializing verdict to NF_DROP keeps gcc happy. */
298 unsigned int verdict
= NF_DROP
;
299 const char *indev
, *outdev
;
300 const void *table_base
;
301 struct ipt_entry
*e
, **jumpstack
;
302 unsigned int *stackptr
, origptr
, cpu
;
303 const struct xt_table_info
*private;
304 struct xt_action_param acpar
;
309 indev
= in
? in
->name
: nulldevname
;
310 outdev
= out
? out
->name
: nulldevname
;
311 /* We handle fragments by dealing with the first fragment as
312 * if it was a normal packet. All other fragments are treated
313 * normally, except that they will NEVER match rules that ask
314 * things we don't know, ie. tcp syn flag or ports). If the
315 * rule is also a fragment-specific rule, non-fragments won't
317 acpar
.fragoff
= ntohs(ip
->frag_off
) & IP_OFFSET
;
318 acpar
.thoff
= ip_hdrlen(skb
);
319 acpar
.hotdrop
= false;
322 acpar
.family
= NFPROTO_IPV4
;
323 acpar
.hooknum
= hook
;
325 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
327 addend
= xt_write_recseq_begin();
328 private = table
->private;
329 cpu
= smp_processor_id();
330 table_base
= private->entries
[cpu
];
331 jumpstack
= (struct ipt_entry
**)private->jumpstack
[cpu
];
332 stackptr
= per_cpu_ptr(private->stackptr
, cpu
);
335 e
= get_entry(table_base
, private->hook_entry
[hook
]);
337 pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
338 table
->name
, hook
, origptr
,
339 get_entry(table_base
, private->underflow
[hook
]));
342 const struct xt_entry_target
*t
;
343 const struct xt_entry_match
*ematch
;
346 if (!ip_packet_match(ip
, indev
, outdev
,
347 &e
->ip
, acpar
.fragoff
)) {
349 e
= ipt_next_entry(e
);
353 xt_ematch_foreach(ematch
, e
) {
354 acpar
.match
= ematch
->u
.kernel
.match
;
355 acpar
.matchinfo
= ematch
->data
;
356 if (!acpar
.match
->match(skb
, &acpar
))
360 ADD_COUNTER(e
->counters
, skb
->len
, 1);
362 t
= ipt_get_target(e
);
363 IP_NF_ASSERT(t
->u
.kernel
.target
);
365 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
366 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
367 /* The packet is traced: log it */
368 if (unlikely(skb
->nf_trace
))
369 trace_packet(skb
, hook
, in
, out
,
370 table
->name
, private, e
);
372 /* Standard target? */
373 if (!t
->u
.kernel
.target
->target
) {
376 v
= ((struct xt_standard_target
*)t
)->verdict
;
378 /* Pop from stack? */
379 if (v
!= XT_RETURN
) {
380 verdict
= (unsigned)(-v
) - 1;
383 if (*stackptr
<= origptr
) {
384 e
= get_entry(table_base
,
385 private->underflow
[hook
]);
386 pr_debug("Underflow (this is normal) "
389 e
= jumpstack
[--*stackptr
];
390 pr_debug("Pulled %p out from pos %u\n",
392 e
= ipt_next_entry(e
);
396 if (table_base
+ v
!= ipt_next_entry(e
) &&
397 !(e
->ip
.flags
& IPT_F_GOTO
)) {
398 if (*stackptr
>= private->stacksize
) {
402 jumpstack
[(*stackptr
)++] = e
;
403 pr_debug("Pushed %p into pos %u\n",
407 e
= get_entry(table_base
, v
);
411 acpar
.target
= t
->u
.kernel
.target
;
412 acpar
.targinfo
= t
->data
;
414 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
415 /* Target might have changed stuff. */
417 if (verdict
== XT_CONTINUE
)
418 e
= ipt_next_entry(e
);
422 } while (!acpar
.hotdrop
);
423 pr_debug("Exiting %s; resetting sp from %u to %u\n",
424 __func__
, *stackptr
, origptr
);
426 xt_write_recseq_end(addend
);
429 #ifdef DEBUG_ALLOW_ALL
438 /* Figures out from what hook each rule can be called: returns 0 if
439 there are loops. Puts hook bitmask in comefrom. */
441 mark_source_chains(const struct xt_table_info
*newinfo
,
442 unsigned int valid_hooks
, void *entry0
)
446 /* No recursion; use packet counter to save back ptrs (reset
447 to 0 as we leave), and comefrom to save source hook bitmask */
448 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
449 unsigned int pos
= newinfo
->hook_entry
[hook
];
450 struct ipt_entry
*e
= (struct ipt_entry
*)(entry0
+ pos
);
452 if (!(valid_hooks
& (1 << hook
)))
455 /* Set initial back pointer. */
456 e
->counters
.pcnt
= pos
;
459 const struct xt_standard_target
*t
460 = (void *)ipt_get_target_c(e
);
461 int visited
= e
->comefrom
& (1 << hook
);
463 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
464 pr_err("iptables: loop hook %u pos %u %08X.\n",
465 hook
, pos
, e
->comefrom
);
468 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
470 /* Unconditional return/END. */
471 if ((e
->target_offset
== sizeof(struct ipt_entry
) &&
472 (strcmp(t
->target
.u
.user
.name
,
473 XT_STANDARD_TARGET
) == 0) &&
474 t
->verdict
< 0 && unconditional(&e
->ip
)) ||
476 unsigned int oldpos
, size
;
478 if ((strcmp(t
->target
.u
.user
.name
,
479 XT_STANDARD_TARGET
) == 0) &&
480 t
->verdict
< -NF_MAX_VERDICT
- 1) {
481 duprintf("mark_source_chains: bad "
482 "negative verdict (%i)\n",
487 /* Return: backtrack through the last
490 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
491 #ifdef DEBUG_IP_FIREWALL_USER
493 & (1 << NF_INET_NUMHOOKS
)) {
494 duprintf("Back unset "
501 pos
= e
->counters
.pcnt
;
502 e
->counters
.pcnt
= 0;
504 /* We're at the start. */
508 e
= (struct ipt_entry
*)
510 } while (oldpos
== pos
+ e
->next_offset
);
513 size
= e
->next_offset
;
514 e
= (struct ipt_entry
*)
515 (entry0
+ pos
+ size
);
516 e
->counters
.pcnt
= pos
;
519 int newpos
= t
->verdict
;
521 if (strcmp(t
->target
.u
.user
.name
,
522 XT_STANDARD_TARGET
) == 0 &&
524 if (newpos
> newinfo
->size
-
525 sizeof(struct ipt_entry
)) {
526 duprintf("mark_source_chains: "
527 "bad verdict (%i)\n",
531 /* This a jump; chase it. */
532 duprintf("Jump rule %u -> %u\n",
535 /* ... this is a fallthru */
536 newpos
= pos
+ e
->next_offset
;
538 e
= (struct ipt_entry
*)
540 e
->counters
.pcnt
= pos
;
545 duprintf("Finished chain %u\n", hook
);
550 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
552 struct xt_mtdtor_param par
;
555 par
.match
= m
->u
.kernel
.match
;
556 par
.matchinfo
= m
->data
;
557 par
.family
= NFPROTO_IPV4
;
558 if (par
.match
->destroy
!= NULL
)
559 par
.match
->destroy(&par
);
560 module_put(par
.match
->me
);
564 check_entry(const struct ipt_entry
*e
, const char *name
)
566 const struct xt_entry_target
*t
;
568 if (!ip_checkentry(&e
->ip
)) {
569 duprintf("ip check failed %p %s.\n", e
, name
);
573 if (e
->target_offset
+ sizeof(struct xt_entry_target
) >
577 t
= ipt_get_target_c(e
);
578 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
585 check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
587 const struct ipt_ip
*ip
= par
->entryinfo
;
590 par
->match
= m
->u
.kernel
.match
;
591 par
->matchinfo
= m
->data
;
593 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
594 ip
->proto
, ip
->invflags
& IPT_INV_PROTO
);
596 duprintf("check failed for `%s'.\n", par
->match
->name
);
603 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
605 struct xt_match
*match
;
608 match
= xt_request_find_match(NFPROTO_IPV4
, m
->u
.user
.name
,
611 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
612 return PTR_ERR(match
);
614 m
->u
.kernel
.match
= match
;
616 ret
= check_match(m
, par
);
622 module_put(m
->u
.kernel
.match
->me
);
626 static int check_target(struct ipt_entry
*e
, struct net
*net
, const char *name
)
628 struct xt_entry_target
*t
= ipt_get_target(e
);
629 struct xt_tgchk_param par
= {
633 .target
= t
->u
.kernel
.target
,
635 .hook_mask
= e
->comefrom
,
636 .family
= NFPROTO_IPV4
,
640 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
641 e
->ip
.proto
, e
->ip
.invflags
& IPT_INV_PROTO
);
643 duprintf("check failed for `%s'.\n",
644 t
->u
.kernel
.target
->name
);
651 find_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
,
654 struct xt_entry_target
*t
;
655 struct xt_target
*target
;
658 struct xt_mtchk_param mtpar
;
659 struct xt_entry_match
*ematch
;
661 ret
= check_entry(e
, name
);
668 mtpar
.entryinfo
= &e
->ip
;
669 mtpar
.hook_mask
= e
->comefrom
;
670 mtpar
.family
= NFPROTO_IPV4
;
671 xt_ematch_foreach(ematch
, e
) {
672 ret
= find_check_match(ematch
, &mtpar
);
674 goto cleanup_matches
;
678 t
= ipt_get_target(e
);
679 target
= xt_request_find_target(NFPROTO_IPV4
, t
->u
.user
.name
,
681 if (IS_ERR(target
)) {
682 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
683 ret
= PTR_ERR(target
);
684 goto cleanup_matches
;
686 t
->u
.kernel
.target
= target
;
688 ret
= check_target(e
, net
, name
);
693 module_put(t
->u
.kernel
.target
->me
);
695 xt_ematch_foreach(ematch
, e
) {
698 cleanup_match(ematch
, net
);
703 static bool check_underflow(const struct ipt_entry
*e
)
705 const struct xt_entry_target
*t
;
706 unsigned int verdict
;
708 if (!unconditional(&e
->ip
))
710 t
= ipt_get_target_c(e
);
711 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
713 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
714 verdict
= -verdict
- 1;
715 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
719 check_entry_size_and_hooks(struct ipt_entry
*e
,
720 struct xt_table_info
*newinfo
,
721 const unsigned char *base
,
722 const unsigned char *limit
,
723 const unsigned int *hook_entries
,
724 const unsigned int *underflows
,
725 unsigned int valid_hooks
)
729 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0 ||
730 (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
731 duprintf("Bad offset %p\n", e
);
736 < sizeof(struct ipt_entry
) + sizeof(struct xt_entry_target
)) {
737 duprintf("checking: element %p size %u\n",
742 /* Check hooks & underflows */
743 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
744 if (!(valid_hooks
& (1 << h
)))
746 if ((unsigned char *)e
- base
== hook_entries
[h
])
747 newinfo
->hook_entry
[h
] = hook_entries
[h
];
748 if ((unsigned char *)e
- base
== underflows
[h
]) {
749 if (!check_underflow(e
)) {
750 pr_err("Underflows must be unconditional and "
751 "use the STANDARD target with "
755 newinfo
->underflow
[h
] = underflows
[h
];
759 /* Clear counters and comefrom */
760 e
->counters
= ((struct xt_counters
) { 0, 0 });
766 cleanup_entry(struct ipt_entry
*e
, struct net
*net
)
768 struct xt_tgdtor_param par
;
769 struct xt_entry_target
*t
;
770 struct xt_entry_match
*ematch
;
772 /* Cleanup all matches */
773 xt_ematch_foreach(ematch
, e
)
774 cleanup_match(ematch
, net
);
775 t
= ipt_get_target(e
);
778 par
.target
= t
->u
.kernel
.target
;
779 par
.targinfo
= t
->data
;
780 par
.family
= NFPROTO_IPV4
;
781 if (par
.target
->destroy
!= NULL
)
782 par
.target
->destroy(&par
);
783 module_put(par
.target
->me
);
786 /* Checks and translates the user-supplied table segment (held in
789 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
790 const struct ipt_replace
*repl
)
792 struct ipt_entry
*iter
;
796 newinfo
->size
= repl
->size
;
797 newinfo
->number
= repl
->num_entries
;
799 /* Init all hooks to impossible value. */
800 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
801 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
802 newinfo
->underflow
[i
] = 0xFFFFFFFF;
805 duprintf("translate_table: size %u\n", newinfo
->size
);
807 /* Walk through entries, checking offsets. */
808 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
809 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
817 if (strcmp(ipt_get_target(iter
)->u
.user
.name
,
818 XT_ERROR_TARGET
) == 0)
819 ++newinfo
->stacksize
;
822 if (i
!= repl
->num_entries
) {
823 duprintf("translate_table: %u not %u entries\n",
824 i
, repl
->num_entries
);
828 /* Check hooks all assigned */
829 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
830 /* Only hooks which are valid */
831 if (!(repl
->valid_hooks
& (1 << i
)))
833 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
834 duprintf("Invalid hook entry %u %u\n",
835 i
, repl
->hook_entry
[i
]);
838 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
839 duprintf("Invalid underflow %u %u\n",
840 i
, repl
->underflow
[i
]);
845 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
848 /* Finally, each sanity check must pass */
850 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
851 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
858 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
861 cleanup_entry(iter
, net
);
866 /* And one copy for every other CPU */
867 for_each_possible_cpu(i
) {
868 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
869 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
876 get_counters(const struct xt_table_info
*t
,
877 struct xt_counters counters
[])
879 struct ipt_entry
*iter
;
883 for_each_possible_cpu(cpu
) {
884 seqcount_t
*s
= &per_cpu(xt_recseq
, cpu
);
887 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
892 start
= read_seqcount_begin(s
);
893 bcnt
= iter
->counters
.bcnt
;
894 pcnt
= iter
->counters
.pcnt
;
895 } while (read_seqcount_retry(s
, start
));
897 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
898 ++i
; /* macro does multi eval of i */
903 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
905 unsigned int countersize
;
906 struct xt_counters
*counters
;
907 const struct xt_table_info
*private = table
->private;
909 /* We need atomic snapshot of counters: rest doesn't change
910 (other than comefrom, which userspace doesn't care
912 countersize
= sizeof(struct xt_counters
) * private->number
;
913 counters
= vzalloc(countersize
);
915 if (counters
== NULL
)
916 return ERR_PTR(-ENOMEM
);
918 get_counters(private, counters
);
924 copy_entries_to_user(unsigned int total_size
,
925 const struct xt_table
*table
,
926 void __user
*userptr
)
928 unsigned int off
, num
;
929 const struct ipt_entry
*e
;
930 struct xt_counters
*counters
;
931 const struct xt_table_info
*private = table
->private;
933 const void *loc_cpu_entry
;
935 counters
= alloc_counters(table
);
936 if (IS_ERR(counters
))
937 return PTR_ERR(counters
);
939 /* choose the copy that is on our node/cpu, ...
940 * This choice is lazy (because current thread is
941 * allowed to migrate to another cpu)
943 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
944 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
949 /* FIXME: use iterator macros --RR */
950 /* ... then go back and fix counters and names */
951 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
953 const struct xt_entry_match
*m
;
954 const struct xt_entry_target
*t
;
956 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
957 if (copy_to_user(userptr
+ off
958 + offsetof(struct ipt_entry
, counters
),
960 sizeof(counters
[num
])) != 0) {
965 for (i
= sizeof(struct ipt_entry
);
966 i
< e
->target_offset
;
967 i
+= m
->u
.match_size
) {
970 if (copy_to_user(userptr
+ off
+ i
971 + offsetof(struct xt_entry_match
,
973 m
->u
.kernel
.match
->name
,
974 strlen(m
->u
.kernel
.match
->name
)+1)
981 t
= ipt_get_target_c(e
);
982 if (copy_to_user(userptr
+ off
+ e
->target_offset
983 + offsetof(struct xt_entry_target
,
985 t
->u
.kernel
.target
->name
,
986 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
998 static void compat_standard_from_user(void *dst
, const void *src
)
1000 int v
= *(compat_int_t
*)src
;
1003 v
+= xt_compat_calc_jump(AF_INET
, v
);
1004 memcpy(dst
, &v
, sizeof(v
));
1007 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1009 compat_int_t cv
= *(int *)src
;
1012 cv
-= xt_compat_calc_jump(AF_INET
, cv
);
1013 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1016 static int compat_calc_entry(const struct ipt_entry
*e
,
1017 const struct xt_table_info
*info
,
1018 const void *base
, struct xt_table_info
*newinfo
)
1020 const struct xt_entry_match
*ematch
;
1021 const struct xt_entry_target
*t
;
1022 unsigned int entry_offset
;
1025 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1026 entry_offset
= (void *)e
- base
;
1027 xt_ematch_foreach(ematch
, e
)
1028 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1029 t
= ipt_get_target_c(e
);
1030 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1031 newinfo
->size
-= off
;
1032 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1036 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1037 if (info
->hook_entry
[i
] &&
1038 (e
< (struct ipt_entry
*)(base
+ info
->hook_entry
[i
])))
1039 newinfo
->hook_entry
[i
] -= off
;
1040 if (info
->underflow
[i
] &&
1041 (e
< (struct ipt_entry
*)(base
+ info
->underflow
[i
])))
1042 newinfo
->underflow
[i
] -= off
;
1047 static int compat_table_info(const struct xt_table_info
*info
,
1048 struct xt_table_info
*newinfo
)
1050 struct ipt_entry
*iter
;
1051 void *loc_cpu_entry
;
1054 if (!newinfo
|| !info
)
1057 /* we dont care about newinfo->entries[] */
1058 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1059 newinfo
->initial_entries
= 0;
1060 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1061 xt_compat_init_offsets(AF_INET
, info
->number
);
1062 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1063 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1071 static int get_info(struct net
*net
, void __user
*user
,
1072 const int *len
, int compat
)
1074 char name
[XT_TABLE_MAXNAMELEN
];
1078 if (*len
!= sizeof(struct ipt_getinfo
)) {
1079 duprintf("length %u != %zu\n", *len
,
1080 sizeof(struct ipt_getinfo
));
1084 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1087 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
1088 #ifdef CONFIG_COMPAT
1090 xt_compat_lock(AF_INET
);
1092 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1093 "iptable_%s", name
);
1094 if (t
&& !IS_ERR(t
)) {
1095 struct ipt_getinfo info
;
1096 const struct xt_table_info
*private = t
->private;
1097 #ifdef CONFIG_COMPAT
1098 struct xt_table_info tmp
;
1101 ret
= compat_table_info(private, &tmp
);
1102 xt_compat_flush_offsets(AF_INET
);
1106 memset(&info
, 0, sizeof(info
));
1107 info
.valid_hooks
= t
->valid_hooks
;
1108 memcpy(info
.hook_entry
, private->hook_entry
,
1109 sizeof(info
.hook_entry
));
1110 memcpy(info
.underflow
, private->underflow
,
1111 sizeof(info
.underflow
));
1112 info
.num_entries
= private->number
;
1113 info
.size
= private->size
;
1114 strcpy(info
.name
, name
);
1116 if (copy_to_user(user
, &info
, *len
) != 0)
1124 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1125 #ifdef CONFIG_COMPAT
1127 xt_compat_unlock(AF_INET
);
1133 get_entries(struct net
*net
, struct ipt_get_entries __user
*uptr
,
1137 struct ipt_get_entries get
;
1140 if (*len
< sizeof(get
)) {
1141 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1144 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1146 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1147 duprintf("get_entries: %u != %zu\n",
1148 *len
, sizeof(get
) + get
.size
);
1152 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1153 if (t
&& !IS_ERR(t
)) {
1154 const struct xt_table_info
*private = t
->private;
1155 duprintf("t->private->number = %u\n", private->number
);
1156 if (get
.size
== private->size
)
1157 ret
= copy_entries_to_user(private->size
,
1158 t
, uptr
->entrytable
);
1160 duprintf("get_entries: I've got %u not %u!\n",
1161 private->size
, get
.size
);
1167 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1173 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1174 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1175 void __user
*counters_ptr
)
1179 struct xt_table_info
*oldinfo
;
1180 struct xt_counters
*counters
;
1181 void *loc_cpu_old_entry
;
1182 struct ipt_entry
*iter
;
1185 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1191 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1192 "iptable_%s", name
);
1193 if (!t
|| IS_ERR(t
)) {
1194 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1195 goto free_newinfo_counters_untrans
;
1199 if (valid_hooks
!= t
->valid_hooks
) {
1200 duprintf("Valid hook crap: %08X vs %08X\n",
1201 valid_hooks
, t
->valid_hooks
);
1206 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1210 /* Update module usage count based on number of rules */
1211 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1212 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1213 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1214 (newinfo
->number
<= oldinfo
->initial_entries
))
1216 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1217 (newinfo
->number
<= oldinfo
->initial_entries
))
1220 /* Get the old counters, and synchronize with replace */
1221 get_counters(oldinfo
, counters
);
1223 /* Decrease module usage counts and free resource */
1224 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1225 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1226 cleanup_entry(iter
, net
);
1228 xt_free_table_info(oldinfo
);
1229 if (copy_to_user(counters_ptr
, counters
,
1230 sizeof(struct xt_counters
) * num_counters
) != 0)
1239 free_newinfo_counters_untrans
:
1246 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1249 struct ipt_replace tmp
;
1250 struct xt_table_info
*newinfo
;
1251 void *loc_cpu_entry
;
1252 struct ipt_entry
*iter
;
1254 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1257 /* overflow check */
1258 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1260 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1262 newinfo
= xt_alloc_table_info(tmp
.size
);
1266 /* choose the copy that is on our node/cpu */
1267 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1268 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1274 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1278 duprintf("Translated table\n");
1280 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1281 tmp
.num_counters
, tmp
.counters
);
1283 goto free_newinfo_untrans
;
1286 free_newinfo_untrans
:
1287 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1288 cleanup_entry(iter
, net
);
1290 xt_free_table_info(newinfo
);
1295 do_add_counters(struct net
*net
, const void __user
*user
,
1296 unsigned int len
, int compat
)
1298 unsigned int i
, curcpu
;
1299 struct xt_counters_info tmp
;
1300 struct xt_counters
*paddc
;
1301 unsigned int num_counters
;
1306 const struct xt_table_info
*private;
1308 void *loc_cpu_entry
;
1309 struct ipt_entry
*iter
;
1310 unsigned int addend
;
1311 #ifdef CONFIG_COMPAT
1312 struct compat_xt_counters_info compat_tmp
;
1316 size
= sizeof(struct compat_xt_counters_info
);
1321 size
= sizeof(struct xt_counters_info
);
1324 if (copy_from_user(ptmp
, user
, size
) != 0)
1327 #ifdef CONFIG_COMPAT
1329 num_counters
= compat_tmp
.num_counters
;
1330 name
= compat_tmp
.name
;
1334 num_counters
= tmp
.num_counters
;
1338 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1341 paddc
= vmalloc(len
- size
);
1345 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1350 t
= xt_find_table_lock(net
, AF_INET
, name
);
1351 if (!t
|| IS_ERR(t
)) {
1352 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1357 private = t
->private;
1358 if (private->number
!= num_counters
) {
1360 goto unlock_up_free
;
1364 /* Choose the copy that is on our node */
1365 curcpu
= smp_processor_id();
1366 loc_cpu_entry
= private->entries
[curcpu
];
1367 addend
= xt_write_recseq_begin();
1368 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1369 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1372 xt_write_recseq_end(addend
);
1383 #ifdef CONFIG_COMPAT
1384 struct compat_ipt_replace
{
1385 char name
[XT_TABLE_MAXNAMELEN
];
1389 u32 hook_entry
[NF_INET_NUMHOOKS
];
1390 u32 underflow
[NF_INET_NUMHOOKS
];
1392 compat_uptr_t counters
; /* struct xt_counters * */
1393 struct compat_ipt_entry entries
[0];
1397 compat_copy_entry_to_user(struct ipt_entry
*e
, void __user
**dstptr
,
1398 unsigned int *size
, struct xt_counters
*counters
,
1401 struct xt_entry_target
*t
;
1402 struct compat_ipt_entry __user
*ce
;
1403 u_int16_t target_offset
, next_offset
;
1404 compat_uint_t origsize
;
1405 const struct xt_entry_match
*ematch
;
1409 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1410 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)) != 0 ||
1411 copy_to_user(&ce
->counters
, &counters
[i
],
1412 sizeof(counters
[i
])) != 0)
1415 *dstptr
+= sizeof(struct compat_ipt_entry
);
1416 *size
-= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1418 xt_ematch_foreach(ematch
, e
) {
1419 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1423 target_offset
= e
->target_offset
- (origsize
- *size
);
1424 t
= ipt_get_target(e
);
1425 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1428 next_offset
= e
->next_offset
- (origsize
- *size
);
1429 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1430 put_user(next_offset
, &ce
->next_offset
) != 0)
1436 compat_find_calc_match(struct xt_entry_match
*m
,
1438 const struct ipt_ip
*ip
,
1439 unsigned int hookmask
,
1442 struct xt_match
*match
;
1444 match
= xt_request_find_match(NFPROTO_IPV4
, m
->u
.user
.name
,
1445 m
->u
.user
.revision
);
1446 if (IS_ERR(match
)) {
1447 duprintf("compat_check_calc_match: `%s' not found\n",
1449 return PTR_ERR(match
);
1451 m
->u
.kernel
.match
= match
;
1452 *size
+= xt_compat_match_offset(match
);
1456 static void compat_release_entry(struct compat_ipt_entry
*e
)
1458 struct xt_entry_target
*t
;
1459 struct xt_entry_match
*ematch
;
1461 /* Cleanup all matches */
1462 xt_ematch_foreach(ematch
, e
)
1463 module_put(ematch
->u
.kernel
.match
->me
);
1464 t
= compat_ipt_get_target(e
);
1465 module_put(t
->u
.kernel
.target
->me
);
1469 check_compat_entry_size_and_hooks(struct compat_ipt_entry
*e
,
1470 struct xt_table_info
*newinfo
,
1472 const unsigned char *base
,
1473 const unsigned char *limit
,
1474 const unsigned int *hook_entries
,
1475 const unsigned int *underflows
,
1478 struct xt_entry_match
*ematch
;
1479 struct xt_entry_target
*t
;
1480 struct xt_target
*target
;
1481 unsigned int entry_offset
;
1485 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1486 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0 ||
1487 (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1488 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1492 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1493 sizeof(struct compat_xt_entry_target
)) {
1494 duprintf("checking: element %p size %u\n",
1499 /* For purposes of check_entry casting the compat entry is fine */
1500 ret
= check_entry((struct ipt_entry
*)e
, name
);
1504 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1505 entry_offset
= (void *)e
- (void *)base
;
1507 xt_ematch_foreach(ematch
, e
) {
1508 ret
= compat_find_calc_match(ematch
, name
,
1509 &e
->ip
, e
->comefrom
, &off
);
1511 goto release_matches
;
1515 t
= compat_ipt_get_target(e
);
1516 target
= xt_request_find_target(NFPROTO_IPV4
, t
->u
.user
.name
,
1517 t
->u
.user
.revision
);
1518 if (IS_ERR(target
)) {
1519 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1521 ret
= PTR_ERR(target
);
1522 goto release_matches
;
1524 t
->u
.kernel
.target
= target
;
1526 off
+= xt_compat_target_offset(target
);
1528 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1532 /* Check hooks & underflows */
1533 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1534 if ((unsigned char *)e
- base
== hook_entries
[h
])
1535 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1536 if ((unsigned char *)e
- base
== underflows
[h
])
1537 newinfo
->underflow
[h
] = underflows
[h
];
1540 /* Clear counters and comefrom */
1541 memset(&e
->counters
, 0, sizeof(e
->counters
));
1546 module_put(t
->u
.kernel
.target
->me
);
1548 xt_ematch_foreach(ematch
, e
) {
1551 module_put(ematch
->u
.kernel
.match
->me
);
1557 compat_copy_entry_from_user(struct compat_ipt_entry
*e
, void **dstptr
,
1558 unsigned int *size
, const char *name
,
1559 struct xt_table_info
*newinfo
, unsigned char *base
)
1561 struct xt_entry_target
*t
;
1562 struct xt_target
*target
;
1563 struct ipt_entry
*de
;
1564 unsigned int origsize
;
1566 struct xt_entry_match
*ematch
;
1570 de
= (struct ipt_entry
*)*dstptr
;
1571 memcpy(de
, e
, sizeof(struct ipt_entry
));
1572 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1574 *dstptr
+= sizeof(struct ipt_entry
);
1575 *size
+= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1577 xt_ematch_foreach(ematch
, e
) {
1578 ret
= xt_compat_match_from_user(ematch
, dstptr
, size
);
1582 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1583 t
= compat_ipt_get_target(e
);
1584 target
= t
->u
.kernel
.target
;
1585 xt_compat_target_from_user(t
, dstptr
, size
);
1587 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1588 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1589 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1590 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1591 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1592 newinfo
->underflow
[h
] -= origsize
- *size
;
1598 compat_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
)
1600 struct xt_entry_match
*ematch
;
1601 struct xt_mtchk_param mtpar
;
1608 mtpar
.entryinfo
= &e
->ip
;
1609 mtpar
.hook_mask
= e
->comefrom
;
1610 mtpar
.family
= NFPROTO_IPV4
;
1611 xt_ematch_foreach(ematch
, e
) {
1612 ret
= check_match(ematch
, &mtpar
);
1614 goto cleanup_matches
;
1618 ret
= check_target(e
, net
, name
);
1620 goto cleanup_matches
;
1624 xt_ematch_foreach(ematch
, e
) {
1627 cleanup_match(ematch
, net
);
1633 translate_compat_table(struct net
*net
,
1635 unsigned int valid_hooks
,
1636 struct xt_table_info
**pinfo
,
1638 unsigned int total_size
,
1639 unsigned int number
,
1640 unsigned int *hook_entries
,
1641 unsigned int *underflows
)
1644 struct xt_table_info
*newinfo
, *info
;
1645 void *pos
, *entry0
, *entry1
;
1646 struct compat_ipt_entry
*iter0
;
1647 struct ipt_entry
*iter1
;
1654 info
->number
= number
;
1656 /* Init all hooks to impossible value. */
1657 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1658 info
->hook_entry
[i
] = 0xFFFFFFFF;
1659 info
->underflow
[i
] = 0xFFFFFFFF;
1662 duprintf("translate_compat_table: size %u\n", info
->size
);
1664 xt_compat_lock(AF_INET
);
1665 xt_compat_init_offsets(AF_INET
, number
);
1666 /* Walk through entries, checking offsets. */
1667 xt_entry_foreach(iter0
, entry0
, total_size
) {
1668 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1670 entry0
+ total_size
,
1681 duprintf("translate_compat_table: %u not %u entries\n",
1686 /* Check hooks all assigned */
1687 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1688 /* Only hooks which are valid */
1689 if (!(valid_hooks
& (1 << i
)))
1691 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1692 duprintf("Invalid hook entry %u %u\n",
1693 i
, hook_entries
[i
]);
1696 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1697 duprintf("Invalid underflow %u %u\n",
1704 newinfo
= xt_alloc_table_info(size
);
1708 newinfo
->number
= number
;
1709 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1710 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1711 newinfo
->underflow
[i
] = info
->underflow
[i
];
1713 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1716 xt_entry_foreach(iter0
, entry0
, total_size
) {
1717 ret
= compat_copy_entry_from_user(iter0
, &pos
, &size
,
1718 name
, newinfo
, entry1
);
1722 xt_compat_flush_offsets(AF_INET
);
1723 xt_compat_unlock(AF_INET
);
1728 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1732 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1733 ret
= compat_check_entry(iter1
, net
, name
);
1737 if (strcmp(ipt_get_target(iter1
)->u
.user
.name
,
1738 XT_ERROR_TARGET
) == 0)
1739 ++newinfo
->stacksize
;
1743 * The first i matches need cleanup_entry (calls ->destroy)
1744 * because they had called ->check already. The other j-i
1745 * entries need only release.
1749 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1754 compat_release_entry(iter0
);
1756 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1759 cleanup_entry(iter1
, net
);
1761 xt_free_table_info(newinfo
);
1765 /* And one copy for every other CPU */
1766 for_each_possible_cpu(i
)
1767 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1768 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1772 xt_free_table_info(info
);
1776 xt_free_table_info(newinfo
);
1778 xt_entry_foreach(iter0
, entry0
, total_size
) {
1781 compat_release_entry(iter0
);
1785 xt_compat_flush_offsets(AF_INET
);
1786 xt_compat_unlock(AF_INET
);
1791 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1794 struct compat_ipt_replace tmp
;
1795 struct xt_table_info
*newinfo
;
1796 void *loc_cpu_entry
;
1797 struct ipt_entry
*iter
;
1799 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1802 /* overflow check */
1803 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1805 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1807 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1809 newinfo
= xt_alloc_table_info(tmp
.size
);
1813 /* choose the copy that is on our node/cpu */
1814 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1815 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1821 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1822 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1823 tmp
.num_entries
, tmp
.hook_entry
,
1828 duprintf("compat_do_replace: Translated table\n");
1830 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1831 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1833 goto free_newinfo_untrans
;
1836 free_newinfo_untrans
:
1837 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1838 cleanup_entry(iter
, net
);
1840 xt_free_table_info(newinfo
);
1845 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1850 if (!capable(CAP_NET_ADMIN
))
1854 case IPT_SO_SET_REPLACE
:
1855 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1858 case IPT_SO_SET_ADD_COUNTERS
:
1859 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1863 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1870 struct compat_ipt_get_entries
{
1871 char name
[XT_TABLE_MAXNAMELEN
];
1873 struct compat_ipt_entry entrytable
[0];
1877 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1878 void __user
*userptr
)
1880 struct xt_counters
*counters
;
1881 const struct xt_table_info
*private = table
->private;
1885 const void *loc_cpu_entry
;
1887 struct ipt_entry
*iter
;
1889 counters
= alloc_counters(table
);
1890 if (IS_ERR(counters
))
1891 return PTR_ERR(counters
);
1893 /* choose the copy that is on our node/cpu, ...
1894 * This choice is lazy (because current thread is
1895 * allowed to migrate to another cpu)
1897 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1900 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1901 ret
= compat_copy_entry_to_user(iter
, &pos
,
1902 &size
, counters
, i
++);
1912 compat_get_entries(struct net
*net
, struct compat_ipt_get_entries __user
*uptr
,
1916 struct compat_ipt_get_entries get
;
1919 if (*len
< sizeof(get
)) {
1920 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1924 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1927 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1928 duprintf("compat_get_entries: %u != %zu\n",
1929 *len
, sizeof(get
) + get
.size
);
1933 xt_compat_lock(AF_INET
);
1934 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1935 if (t
&& !IS_ERR(t
)) {
1936 const struct xt_table_info
*private = t
->private;
1937 struct xt_table_info info
;
1938 duprintf("t->private->number = %u\n", private->number
);
1939 ret
= compat_table_info(private, &info
);
1940 if (!ret
&& get
.size
== info
.size
) {
1941 ret
= compat_copy_entries_to_user(private->size
,
1942 t
, uptr
->entrytable
);
1944 duprintf("compat_get_entries: I've got %u not %u!\n",
1945 private->size
, get
.size
);
1948 xt_compat_flush_offsets(AF_INET
);
1952 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1954 xt_compat_unlock(AF_INET
);
1958 static int do_ipt_get_ctl(struct sock
*, int, void __user
*, int *);
1961 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1965 if (!capable(CAP_NET_ADMIN
))
1969 case IPT_SO_GET_INFO
:
1970 ret
= get_info(sock_net(sk
), user
, len
, 1);
1972 case IPT_SO_GET_ENTRIES
:
1973 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1976 ret
= do_ipt_get_ctl(sk
, cmd
, user
, len
);
1983 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1987 if (!capable(CAP_NET_ADMIN
))
1991 case IPT_SO_SET_REPLACE
:
1992 ret
= do_replace(sock_net(sk
), user
, len
);
1995 case IPT_SO_SET_ADD_COUNTERS
:
1996 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2000 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2008 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2012 if (!capable(CAP_NET_ADMIN
))
2016 case IPT_SO_GET_INFO
:
2017 ret
= get_info(sock_net(sk
), user
, len
, 0);
2020 case IPT_SO_GET_ENTRIES
:
2021 ret
= get_entries(sock_net(sk
), user
, len
);
2024 case IPT_SO_GET_REVISION_MATCH
:
2025 case IPT_SO_GET_REVISION_TARGET
: {
2026 struct xt_get_revision rev
;
2029 if (*len
!= sizeof(rev
)) {
2033 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2037 rev
.name
[sizeof(rev
.name
)-1] = 0;
2039 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2044 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2047 "ipt_%s", rev
.name
);
2052 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2059 struct xt_table
*ipt_register_table(struct net
*net
,
2060 const struct xt_table
*table
,
2061 const struct ipt_replace
*repl
)
2064 struct xt_table_info
*newinfo
;
2065 struct xt_table_info bootstrap
= {0};
2066 void *loc_cpu_entry
;
2067 struct xt_table
*new_table
;
2069 newinfo
= xt_alloc_table_info(repl
->size
);
2075 /* choose the copy on our node/cpu, but dont care about preemption */
2076 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2077 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2079 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
2083 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2084 if (IS_ERR(new_table
)) {
2085 ret
= PTR_ERR(new_table
);
2092 xt_free_table_info(newinfo
);
2094 return ERR_PTR(ret
);
2097 void ipt_unregister_table(struct net
*net
, struct xt_table
*table
)
2099 struct xt_table_info
*private;
2100 void *loc_cpu_entry
;
2101 struct module
*table_owner
= table
->me
;
2102 struct ipt_entry
*iter
;
2104 private = xt_unregister_table(table
);
2106 /* Decrease module usage counts and free resources */
2107 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2108 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2109 cleanup_entry(iter
, net
);
2110 if (private->number
> private->initial_entries
)
2111 module_put(table_owner
);
2112 xt_free_table_info(private);
2115 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2117 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2118 u_int8_t type
, u_int8_t code
,
2121 return ((test_type
== 0xFF) ||
2122 (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2127 icmp_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
2129 const struct icmphdr
*ic
;
2130 struct icmphdr _icmph
;
2131 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2133 /* Must not be a fragment. */
2134 if (par
->fragoff
!= 0)
2137 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2139 /* We've been asked to examine this packet, and we
2140 * can't. Hence, no choice but to drop.
2142 duprintf("Dropping evil ICMP tinygram.\n");
2143 par
->hotdrop
= true;
2147 return icmp_type_code_match(icmpinfo
->type
,
2151 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2154 static int icmp_checkentry(const struct xt_mtchk_param
*par
)
2156 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2158 /* Must specify no unknown invflags */
2159 return (icmpinfo
->invflags
& ~IPT_ICMP_INV
) ? -EINVAL
: 0;
2162 static struct xt_target ipt_builtin_tg
[] __read_mostly
= {
2164 .name
= XT_STANDARD_TARGET
,
2165 .targetsize
= sizeof(int),
2166 .family
= NFPROTO_IPV4
,
2167 #ifdef CONFIG_COMPAT
2168 .compatsize
= sizeof(compat_int_t
),
2169 .compat_from_user
= compat_standard_from_user
,
2170 .compat_to_user
= compat_standard_to_user
,
2174 .name
= XT_ERROR_TARGET
,
2175 .target
= ipt_error
,
2176 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
2177 .family
= NFPROTO_IPV4
,
2181 static struct nf_sockopt_ops ipt_sockopts
= {
2183 .set_optmin
= IPT_BASE_CTL
,
2184 .set_optmax
= IPT_SO_SET_MAX
+1,
2185 .set
= do_ipt_set_ctl
,
2186 #ifdef CONFIG_COMPAT
2187 .compat_set
= compat_do_ipt_set_ctl
,
2189 .get_optmin
= IPT_BASE_CTL
,
2190 .get_optmax
= IPT_SO_GET_MAX
+1,
2191 .get
= do_ipt_get_ctl
,
2192 #ifdef CONFIG_COMPAT
2193 .compat_get
= compat_do_ipt_get_ctl
,
2195 .owner
= THIS_MODULE
,
2198 static struct xt_match ipt_builtin_mt
[] __read_mostly
= {
2201 .match
= icmp_match
,
2202 .matchsize
= sizeof(struct ipt_icmp
),
2203 .checkentry
= icmp_checkentry
,
2204 .proto
= IPPROTO_ICMP
,
2205 .family
= NFPROTO_IPV4
,
2209 static int __net_init
ip_tables_net_init(struct net
*net
)
2211 return xt_proto_init(net
, NFPROTO_IPV4
);
2214 static void __net_exit
ip_tables_net_exit(struct net
*net
)
2216 xt_proto_fini(net
, NFPROTO_IPV4
);
2219 static struct pernet_operations ip_tables_net_ops
= {
2220 .init
= ip_tables_net_init
,
2221 .exit
= ip_tables_net_exit
,
2224 static int __init
ip_tables_init(void)
2228 ret
= register_pernet_subsys(&ip_tables_net_ops
);
2232 /* No one else will be downing sem now, so we won't sleep */
2233 ret
= xt_register_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2236 ret
= xt_register_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2240 /* Register setsockopt */
2241 ret
= nf_register_sockopt(&ipt_sockopts
);
2245 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2249 xt_unregister_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2251 xt_unregister_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2253 unregister_pernet_subsys(&ip_tables_net_ops
);
2258 static void __exit
ip_tables_fini(void)
2260 nf_unregister_sockopt(&ipt_sockopts
);
2262 xt_unregister_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2263 xt_unregister_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2264 unregister_pernet_subsys(&ip_tables_net_ops
);
2267 EXPORT_SYMBOL(ipt_register_table
);
2268 EXPORT_SYMBOL(ipt_unregister_table
);
2269 EXPORT_SYMBOL(ipt_do_table
);
2270 module_init(ip_tables_init
);
2271 module_exit(ip_tables_fini
);