2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h>
13 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/icmp.h>
21 #include <net/compat.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/err.h>
26 #include <linux/cpumask.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h>
32 MODULE_LICENSE("GPL");
33 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
34 MODULE_DESCRIPTION("IPv4 packet filter");
36 /*#define DEBUG_IP_FIREWALL*/
37 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
38 /*#define DEBUG_IP_FIREWALL_USER*/
40 #ifdef DEBUG_IP_FIREWALL
41 #define dprintf(format, args...) printk(format , ## args)
43 #define dprintf(format, args...)
46 #ifdef DEBUG_IP_FIREWALL_USER
47 #define duprintf(format, args...) printk(format , ## args)
49 #define duprintf(format, args...)
52 #ifdef CONFIG_NETFILTER_DEBUG
53 #define IP_NF_ASSERT(x) \
56 printk("IP_NF_ASSERT: %s:%s:%u\n", \
57 __func__, __FILE__, __LINE__); \
60 #define IP_NF_ASSERT(x)
64 /* All the better to debug you with... */
70 We keep a set of rules for each CPU, so we can avoid write-locking
71 them in the softirq when updating the counters and therefore
72 only need to read-lock in the softirq; doing a write_lock_bh() in user
73 context stops packets coming through and allows user context to read
74 the counters or update the rules.
76 Hence the start of any table is given by get_table() below. */
78 /* Returns whether matches rule or not. */
79 /* Performance critical - called for every packet */
81 ip_packet_match(const struct iphdr
*ip
,
84 const struct ipt_ip
*ipinfo
,
89 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
91 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
93 || FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
95 dprintf("Source or dest mismatch.\n");
97 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
98 &ip
->saddr
, &ipinfo
->smsk
.s_addr
, &ipinfo
->src
.s_addr
,
99 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
100 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
101 &ip
->daddr
, &ipinfo
->dmsk
.s_addr
, &ipinfo
->dst
.s_addr
,
102 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
106 ret
= ifname_compare_aligned(indev
, ipinfo
->iniface
, ipinfo
->iniface_mask
);
108 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
109 dprintf("VIA in mismatch (%s vs %s).%s\n",
110 indev
, ipinfo
->iniface
,
111 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
115 ret
= ifname_compare_aligned(outdev
, ipinfo
->outiface
, ipinfo
->outiface_mask
);
117 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
118 dprintf("VIA out mismatch (%s vs %s).%s\n",
119 outdev
, ipinfo
->outiface
,
120 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
124 /* Check specific protocol */
126 && FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
127 dprintf("Packet protocol %hi does not match %hi.%s\n",
128 ip
->protocol
, ipinfo
->proto
,
129 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
133 /* If we have a fragment rule but the packet is not a fragment
134 * then we return zero */
135 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
136 dprintf("Fragment rule but not fragment.%s\n",
137 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
145 ip_checkentry(const struct ipt_ip
*ip
)
147 if (ip
->flags
& ~IPT_F_MASK
) {
148 duprintf("Unknown flag bits set: %08X\n",
149 ip
->flags
& ~IPT_F_MASK
);
152 if (ip
->invflags
& ~IPT_INV_MASK
) {
153 duprintf("Unknown invflag bits set: %08X\n",
154 ip
->invflags
& ~IPT_INV_MASK
);
161 ipt_error(struct sk_buff
*skb
, const struct xt_target_param
*par
)
164 printk("ip_tables: error: `%s'\n",
165 (const char *)par
->targinfo
);
170 /* Performance critical - called for every packet */
172 do_match(struct ipt_entry_match
*m
, const struct sk_buff
*skb
,
173 struct xt_match_param
*par
)
175 par
->match
= m
->u
.kernel
.match
;
176 par
->matchinfo
= m
->data
;
178 /* Stop iteration if it doesn't match */
179 if (!m
->u
.kernel
.match
->match(skb
, par
))
185 /* Performance critical */
186 static inline struct ipt_entry
*
187 get_entry(void *base
, unsigned int offset
)
189 return (struct ipt_entry
*)(base
+ offset
);
192 /* All zeroes == unconditional rule. */
193 /* Mildly perf critical (only if packet tracing is on) */
194 static inline bool unconditional(const struct ipt_ip
*ip
)
196 static const struct ipt_ip uncond
;
198 return memcmp(ip
, &uncond
, sizeof(uncond
)) == 0;
202 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
203 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
204 static const char *const hooknames
[] = {
205 [NF_INET_PRE_ROUTING
] = "PREROUTING",
206 [NF_INET_LOCAL_IN
] = "INPUT",
207 [NF_INET_FORWARD
] = "FORWARD",
208 [NF_INET_LOCAL_OUT
] = "OUTPUT",
209 [NF_INET_POST_ROUTING
] = "POSTROUTING",
212 enum nf_ip_trace_comments
{
213 NF_IP_TRACE_COMMENT_RULE
,
214 NF_IP_TRACE_COMMENT_RETURN
,
215 NF_IP_TRACE_COMMENT_POLICY
,
218 static const char *const comments
[] = {
219 [NF_IP_TRACE_COMMENT_RULE
] = "rule",
220 [NF_IP_TRACE_COMMENT_RETURN
] = "return",
221 [NF_IP_TRACE_COMMENT_POLICY
] = "policy",
224 static struct nf_loginfo trace_loginfo
= {
225 .type
= NF_LOG_TYPE_LOG
,
229 .logflags
= NF_LOG_MASK
,
234 /* Mildly perf critical (only if packet tracing is on) */
236 get_chainname_rulenum(struct ipt_entry
*s
, struct ipt_entry
*e
,
237 const char *hookname
, const char **chainname
,
238 const char **comment
, unsigned int *rulenum
)
240 struct ipt_standard_target
*t
= (void *)ipt_get_target(s
);
242 if (strcmp(t
->target
.u
.kernel
.target
->name
, IPT_ERROR_TARGET
) == 0) {
243 /* Head of user chain: ERROR target with chainname */
244 *chainname
= t
->target
.data
;
249 if (s
->target_offset
== sizeof(struct ipt_entry
)
250 && strcmp(t
->target
.u
.kernel
.target
->name
,
251 IPT_STANDARD_TARGET
) == 0
253 && unconditional(&s
->ip
)) {
254 /* Tail of chains: STANDARD target (return/policy) */
255 *comment
= *chainname
== hookname
256 ? comments
[NF_IP_TRACE_COMMENT_POLICY
]
257 : comments
[NF_IP_TRACE_COMMENT_RETURN
];
266 static void trace_packet(struct sk_buff
*skb
,
268 const struct net_device
*in
,
269 const struct net_device
*out
,
270 const char *tablename
,
271 struct xt_table_info
*private,
275 const struct ipt_entry
*root
;
276 const char *hookname
, *chainname
, *comment
;
277 unsigned int rulenum
= 0;
279 table_base
= private->entries
[smp_processor_id()];
280 root
= get_entry(table_base
, private->hook_entry
[hook
]);
282 hookname
= chainname
= hooknames
[hook
];
283 comment
= comments
[NF_IP_TRACE_COMMENT_RULE
];
285 IPT_ENTRY_ITERATE(root
,
286 private->size
- private->hook_entry
[hook
],
287 get_chainname_rulenum
,
288 e
, hookname
, &chainname
, &comment
, &rulenum
);
290 nf_log_packet(AF_INET
, hook
, skb
, in
, out
, &trace_loginfo
,
291 "TRACE: %s:%s:%s:%u ",
292 tablename
, chainname
, comment
, rulenum
);
297 struct ipt_entry
*ipt_next_entry(const struct ipt_entry
*entry
)
299 return (void *)entry
+ entry
->next_offset
;
302 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
304 ipt_do_table(struct sk_buff
*skb
,
306 const struct net_device
*in
,
307 const struct net_device
*out
,
308 struct xt_table
*table
)
310 #define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
312 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
313 const struct iphdr
*ip
;
314 bool hotdrop
= false;
315 /* Initializing verdict to NF_DROP keeps gcc happy. */
316 unsigned int verdict
= NF_DROP
;
317 const char *indev
, *outdev
;
319 struct ipt_entry
*e
, *back
;
320 struct xt_table_info
*private;
321 struct xt_match_param mtpar
;
322 struct xt_target_param tgpar
;
326 indev
= in
? in
->name
: nulldevname
;
327 outdev
= out
? out
->name
: nulldevname
;
328 /* We handle fragments by dealing with the first fragment as
329 * if it was a normal packet. All other fragments are treated
330 * normally, except that they will NEVER match rules that ask
331 * things we don't know, ie. tcp syn flag or ports). If the
332 * rule is also a fragment-specific rule, non-fragments won't
334 mtpar
.fragoff
= ntohs(ip
->frag_off
) & IP_OFFSET
;
335 mtpar
.thoff
= ip_hdrlen(skb
);
336 mtpar
.hotdrop
= &hotdrop
;
337 mtpar
.in
= tgpar
.in
= in
;
338 mtpar
.out
= tgpar
.out
= out
;
339 mtpar
.family
= tgpar
.family
= NFPROTO_IPV4
;
340 mtpar
.hooknum
= tgpar
.hooknum
= hook
;
342 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
344 private = table
->private;
345 table_base
= private->entries
[smp_processor_id()];
347 e
= get_entry(table_base
, private->hook_entry
[hook
]);
349 /* For return from builtin chain */
350 back
= get_entry(table_base
, private->underflow
[hook
]);
353 struct ipt_entry_target
*t
;
357 if (!ip_packet_match(ip
, indev
, outdev
,
358 &e
->ip
, mtpar
.fragoff
) ||
359 IPT_MATCH_ITERATE(e
, do_match
, skb
, &mtpar
) != 0) {
360 e
= ipt_next_entry(e
);
364 ADD_COUNTER(e
->counters
, ntohs(ip
->tot_len
), 1);
366 t
= ipt_get_target(e
);
367 IP_NF_ASSERT(t
->u
.kernel
.target
);
369 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
370 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
371 /* The packet is traced: log it */
372 if (unlikely(skb
->nf_trace
))
373 trace_packet(skb
, hook
, in
, out
,
374 table
->name
, private, e
);
376 /* Standard target? */
377 if (!t
->u
.kernel
.target
->target
) {
380 v
= ((struct ipt_standard_target
*)t
)->verdict
;
382 /* Pop from stack? */
383 if (v
!= IPT_RETURN
) {
384 verdict
= (unsigned)(-v
) - 1;
388 back
= get_entry(table_base
, back
->comefrom
);
391 if (table_base
+ v
!= ipt_next_entry(e
)
392 && !(e
->ip
.flags
& IPT_F_GOTO
)) {
393 /* Save old back ptr in next entry */
394 struct ipt_entry
*next
= ipt_next_entry(e
);
395 next
->comefrom
= (void *)back
- table_base
;
396 /* set back pointer to next entry */
400 e
= get_entry(table_base
, v
);
404 /* Targets which reenter must return
406 tgpar
.target
= t
->u
.kernel
.target
;
407 tgpar
.targinfo
= t
->data
;
410 #ifdef CONFIG_NETFILTER_DEBUG
411 tb_comefrom
= 0xeeeeeeec;
413 verdict
= t
->u
.kernel
.target
->target(skb
, &tgpar
);
414 #ifdef CONFIG_NETFILTER_DEBUG
415 if (tb_comefrom
!= 0xeeeeeeec && verdict
== IPT_CONTINUE
) {
416 printk("Target %s reentered!\n",
417 t
->u
.kernel
.target
->name
);
420 tb_comefrom
= 0x57acc001;
422 /* Target might have changed stuff. */
424 if (verdict
== IPT_CONTINUE
)
425 e
= ipt_next_entry(e
);
430 xt_info_rdunlock_bh();
432 #ifdef DEBUG_ALLOW_ALL
443 /* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */
446 mark_source_chains(struct xt_table_info
*newinfo
,
447 unsigned int valid_hooks
, void *entry0
)
451 /* No recursion; use packet counter to save back ptrs (reset
452 to 0 as we leave), and comefrom to save source hook bitmask */
453 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
454 unsigned int pos
= newinfo
->hook_entry
[hook
];
455 struct ipt_entry
*e
= (struct ipt_entry
*)(entry0
+ pos
);
457 if (!(valid_hooks
& (1 << hook
)))
460 /* Set initial back pointer. */
461 e
->counters
.pcnt
= pos
;
464 struct ipt_standard_target
*t
465 = (void *)ipt_get_target(e
);
466 int visited
= e
->comefrom
& (1 << hook
);
468 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
469 printk("iptables: loop hook %u pos %u %08X.\n",
470 hook
, pos
, e
->comefrom
);
473 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
475 /* Unconditional return/END. */
476 if ((e
->target_offset
== sizeof(struct ipt_entry
)
477 && (strcmp(t
->target
.u
.user
.name
,
478 IPT_STANDARD_TARGET
) == 0)
480 && unconditional(&e
->ip
)) || visited
) {
481 unsigned int oldpos
, size
;
483 if ((strcmp(t
->target
.u
.user
.name
,
484 IPT_STANDARD_TARGET
) == 0) &&
485 t
->verdict
< -NF_MAX_VERDICT
- 1) {
486 duprintf("mark_source_chains: bad "
487 "negative verdict (%i)\n",
492 /* Return: backtrack through the last
495 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
496 #ifdef DEBUG_IP_FIREWALL_USER
498 & (1 << NF_INET_NUMHOOKS
)) {
499 duprintf("Back unset "
506 pos
= e
->counters
.pcnt
;
507 e
->counters
.pcnt
= 0;
509 /* We're at the start. */
513 e
= (struct ipt_entry
*)
515 } while (oldpos
== pos
+ e
->next_offset
);
518 size
= e
->next_offset
;
519 e
= (struct ipt_entry
*)
520 (entry0
+ pos
+ size
);
521 e
->counters
.pcnt
= pos
;
524 int newpos
= t
->verdict
;
526 if (strcmp(t
->target
.u
.user
.name
,
527 IPT_STANDARD_TARGET
) == 0
529 if (newpos
> newinfo
->size
-
530 sizeof(struct ipt_entry
)) {
531 duprintf("mark_source_chains: "
532 "bad verdict (%i)\n",
536 /* This a jump; chase it. */
537 duprintf("Jump rule %u -> %u\n",
540 /* ... this is a fallthru */
541 newpos
= pos
+ e
->next_offset
;
543 e
= (struct ipt_entry
*)
545 e
->counters
.pcnt
= pos
;
550 duprintf("Finished chain %u\n", hook
);
556 cleanup_match(struct ipt_entry_match
*m
, unsigned int *i
)
558 struct xt_mtdtor_param par
;
560 if (i
&& (*i
)-- == 0)
563 par
.match
= m
->u
.kernel
.match
;
564 par
.matchinfo
= m
->data
;
565 par
.family
= NFPROTO_IPV4
;
566 if (par
.match
->destroy
!= NULL
)
567 par
.match
->destroy(&par
);
568 module_put(par
.match
->me
);
573 check_entry(struct ipt_entry
*e
, const char *name
)
575 struct ipt_entry_target
*t
;
577 if (!ip_checkentry(&e
->ip
)) {
578 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
582 if (e
->target_offset
+ sizeof(struct ipt_entry_target
) >
586 t
= ipt_get_target(e
);
587 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
594 check_match(struct ipt_entry_match
*m
, struct xt_mtchk_param
*par
,
597 const struct ipt_ip
*ip
= par
->entryinfo
;
600 par
->match
= m
->u
.kernel
.match
;
601 par
->matchinfo
= m
->data
;
603 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
604 ip
->proto
, ip
->invflags
& IPT_INV_PROTO
);
606 duprintf("ip_tables: check failed for `%s'.\n",
615 find_check_match(struct ipt_entry_match
*m
, struct xt_mtchk_param
*par
,
618 struct xt_match
*match
;
621 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
623 "ipt_%s", m
->u
.user
.name
);
624 if (IS_ERR(match
) || !match
) {
625 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
626 return match
? PTR_ERR(match
) : -ENOENT
;
628 m
->u
.kernel
.match
= match
;
630 ret
= check_match(m
, par
, i
);
636 module_put(m
->u
.kernel
.match
->me
);
640 static int check_target(struct ipt_entry
*e
, const char *name
)
642 struct ipt_entry_target
*t
= ipt_get_target(e
);
643 struct xt_tgchk_param par
= {
646 .target
= t
->u
.kernel
.target
,
648 .hook_mask
= e
->comefrom
,
649 .family
= NFPROTO_IPV4
,
653 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
654 e
->ip
.proto
, e
->ip
.invflags
& IPT_INV_PROTO
);
656 duprintf("ip_tables: check failed for `%s'.\n",
657 t
->u
.kernel
.target
->name
);
664 find_check_entry(struct ipt_entry
*e
, const char *name
, unsigned int size
,
667 struct ipt_entry_target
*t
;
668 struct xt_target
*target
;
671 struct xt_mtchk_param mtpar
;
673 ret
= check_entry(e
, name
);
679 mtpar
.entryinfo
= &e
->ip
;
680 mtpar
.hook_mask
= e
->comefrom
;
681 mtpar
.family
= NFPROTO_IPV4
;
682 ret
= IPT_MATCH_ITERATE(e
, find_check_match
, &mtpar
, &j
);
684 goto cleanup_matches
;
686 t
= ipt_get_target(e
);
687 target
= try_then_request_module(xt_find_target(AF_INET
,
690 "ipt_%s", t
->u
.user
.name
);
691 if (IS_ERR(target
) || !target
) {
692 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
693 ret
= target
? PTR_ERR(target
) : -ENOENT
;
694 goto cleanup_matches
;
696 t
->u
.kernel
.target
= target
;
698 ret
= check_target(e
, name
);
705 module_put(t
->u
.kernel
.target
->me
);
707 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
711 static bool check_underflow(struct ipt_entry
*e
)
713 const struct ipt_entry_target
*t
;
714 unsigned int verdict
;
716 if (!unconditional(&e
->ip
))
718 t
= ipt_get_target(e
);
719 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
721 verdict
= ((struct ipt_standard_target
*)t
)->verdict
;
722 verdict
= -verdict
- 1;
723 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
727 check_entry_size_and_hooks(struct ipt_entry
*e
,
728 struct xt_table_info
*newinfo
,
730 unsigned char *limit
,
731 const unsigned int *hook_entries
,
732 const unsigned int *underflows
,
733 unsigned int valid_hooks
,
738 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0
739 || (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
740 duprintf("Bad offset %p\n", e
);
745 < sizeof(struct ipt_entry
) + sizeof(struct ipt_entry_target
)) {
746 duprintf("checking: element %p size %u\n",
751 /* Check hooks & underflows */
752 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
753 if (!(valid_hooks
& (1 << h
)))
755 if ((unsigned char *)e
- base
== hook_entries
[h
])
756 newinfo
->hook_entry
[h
] = hook_entries
[h
];
757 if ((unsigned char *)e
- base
== underflows
[h
]) {
758 if (!check_underflow(e
)) {
759 pr_err("Underflows must be unconditional and "
760 "use the STANDARD target with "
764 newinfo
->underflow
[h
] = underflows
[h
];
768 /* Clear counters and comefrom */
769 e
->counters
= ((struct xt_counters
) { 0, 0 });
777 cleanup_entry(struct ipt_entry
*e
, unsigned int *i
)
779 struct xt_tgdtor_param par
;
780 struct ipt_entry_target
*t
;
782 if (i
&& (*i
)-- == 0)
785 /* Cleanup all matches */
786 IPT_MATCH_ITERATE(e
, cleanup_match
, NULL
);
787 t
= ipt_get_target(e
);
789 par
.target
= t
->u
.kernel
.target
;
790 par
.targinfo
= t
->data
;
791 par
.family
= NFPROTO_IPV4
;
792 if (par
.target
->destroy
!= NULL
)
793 par
.target
->destroy(&par
);
794 module_put(par
.target
->me
);
798 /* Checks and translates the user-supplied table segment (held in
801 translate_table(const char *name
,
802 unsigned int valid_hooks
,
803 struct xt_table_info
*newinfo
,
807 const unsigned int *hook_entries
,
808 const unsigned int *underflows
)
813 newinfo
->size
= size
;
814 newinfo
->number
= number
;
816 /* Init all hooks to impossible value. */
817 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
818 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
819 newinfo
->underflow
[i
] = 0xFFFFFFFF;
822 duprintf("translate_table: size %u\n", newinfo
->size
);
824 /* Walk through entries, checking offsets. */
825 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
826 check_entry_size_and_hooks
,
830 hook_entries
, underflows
, valid_hooks
, &i
);
835 duprintf("translate_table: %u not %u entries\n",
840 /* Check hooks all assigned */
841 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
842 /* Only hooks which are valid */
843 if (!(valid_hooks
& (1 << i
)))
845 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
846 duprintf("Invalid hook entry %u %u\n",
850 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
851 duprintf("Invalid underflow %u %u\n",
857 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
))
860 /* Finally, each sanity check must pass */
862 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
863 find_check_entry
, name
, size
, &i
);
866 IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
871 /* And one copy for every other CPU */
872 for_each_possible_cpu(i
) {
873 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
874 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
882 add_entry_to_counter(const struct ipt_entry
*e
,
883 struct xt_counters total
[],
886 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
893 set_entry_to_counter(const struct ipt_entry
*e
,
894 struct ipt_counters total
[],
897 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
904 get_counters(const struct xt_table_info
*t
,
905 struct xt_counters counters
[])
911 /* Instead of clearing (by a previous call to memset())
912 * the counters and using adds, we set the counters
913 * with data used by 'current' CPU.
915 * Bottom half has to be disabled to prevent deadlock
916 * if new softirq were to run and call ipt_do_table
919 curcpu
= smp_processor_id();
922 IPT_ENTRY_ITERATE(t
->entries
[curcpu
],
924 set_entry_to_counter
,
928 for_each_possible_cpu(cpu
) {
933 IPT_ENTRY_ITERATE(t
->entries
[cpu
],
935 add_entry_to_counter
,
938 xt_info_wrunlock(cpu
);
943 static struct xt_counters
* alloc_counters(struct xt_table
*table
)
945 unsigned int countersize
;
946 struct xt_counters
*counters
;
947 struct xt_table_info
*private = table
->private;
949 /* We need atomic snapshot of counters: rest doesn't change
950 (other than comefrom, which userspace doesn't care
952 countersize
= sizeof(struct xt_counters
) * private->number
;
953 counters
= vmalloc_node(countersize
, numa_node_id());
955 if (counters
== NULL
)
956 return ERR_PTR(-ENOMEM
);
958 get_counters(private, counters
);
964 copy_entries_to_user(unsigned int total_size
,
965 struct xt_table
*table
,
966 void __user
*userptr
)
968 unsigned int off
, num
;
970 struct xt_counters
*counters
;
971 const struct xt_table_info
*private = table
->private;
973 const void *loc_cpu_entry
;
975 counters
= alloc_counters(table
);
976 if (IS_ERR(counters
))
977 return PTR_ERR(counters
);
979 /* choose the copy that is on our node/cpu, ...
980 * This choice is lazy (because current thread is
981 * allowed to migrate to another cpu)
983 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
984 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
989 /* FIXME: use iterator macros --RR */
990 /* ... then go back and fix counters and names */
991 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
993 const struct ipt_entry_match
*m
;
994 const struct ipt_entry_target
*t
;
996 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
997 if (copy_to_user(userptr
+ off
998 + offsetof(struct ipt_entry
, counters
),
1000 sizeof(counters
[num
])) != 0) {
1005 for (i
= sizeof(struct ipt_entry
);
1006 i
< e
->target_offset
;
1007 i
+= m
->u
.match_size
) {
1010 if (copy_to_user(userptr
+ off
+ i
1011 + offsetof(struct ipt_entry_match
,
1013 m
->u
.kernel
.match
->name
,
1014 strlen(m
->u
.kernel
.match
->name
)+1)
1021 t
= ipt_get_target(e
);
1022 if (copy_to_user(userptr
+ off
+ e
->target_offset
1023 + offsetof(struct ipt_entry_target
,
1025 t
->u
.kernel
.target
->name
,
1026 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1037 #ifdef CONFIG_COMPAT
1038 static void compat_standard_from_user(void *dst
, void *src
)
1040 int v
= *(compat_int_t
*)src
;
1043 v
+= xt_compat_calc_jump(AF_INET
, v
);
1044 memcpy(dst
, &v
, sizeof(v
));
1047 static int compat_standard_to_user(void __user
*dst
, void *src
)
1049 compat_int_t cv
= *(int *)src
;
1052 cv
-= xt_compat_calc_jump(AF_INET
, cv
);
1053 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1057 compat_calc_match(struct ipt_entry_match
*m
, int *size
)
1059 *size
+= xt_compat_match_offset(m
->u
.kernel
.match
);
1063 static int compat_calc_entry(struct ipt_entry
*e
,
1064 const struct xt_table_info
*info
,
1065 void *base
, struct xt_table_info
*newinfo
)
1067 struct ipt_entry_target
*t
;
1068 unsigned int entry_offset
;
1071 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1072 entry_offset
= (void *)e
- base
;
1073 IPT_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1074 t
= ipt_get_target(e
);
1075 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1076 newinfo
->size
-= off
;
1077 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1081 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1082 if (info
->hook_entry
[i
] &&
1083 (e
< (struct ipt_entry
*)(base
+ info
->hook_entry
[i
])))
1084 newinfo
->hook_entry
[i
] -= off
;
1085 if (info
->underflow
[i
] &&
1086 (e
< (struct ipt_entry
*)(base
+ info
->underflow
[i
])))
1087 newinfo
->underflow
[i
] -= off
;
1092 static int compat_table_info(const struct xt_table_info
*info
,
1093 struct xt_table_info
*newinfo
)
1095 void *loc_cpu_entry
;
1097 if (!newinfo
|| !info
)
1100 /* we dont care about newinfo->entries[] */
1101 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1102 newinfo
->initial_entries
= 0;
1103 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1104 return IPT_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
1105 compat_calc_entry
, info
, loc_cpu_entry
,
1110 static int get_info(struct net
*net
, void __user
*user
, int *len
, int compat
)
1112 char name
[IPT_TABLE_MAXNAMELEN
];
1116 if (*len
!= sizeof(struct ipt_getinfo
)) {
1117 duprintf("length %u != %zu\n", *len
,
1118 sizeof(struct ipt_getinfo
));
1122 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1125 name
[IPT_TABLE_MAXNAMELEN
-1] = '\0';
1126 #ifdef CONFIG_COMPAT
1128 xt_compat_lock(AF_INET
);
1130 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1131 "iptable_%s", name
);
1132 if (t
&& !IS_ERR(t
)) {
1133 struct ipt_getinfo info
;
1134 const struct xt_table_info
*private = t
->private;
1136 #ifdef CONFIG_COMPAT
1138 struct xt_table_info tmp
;
1139 ret
= compat_table_info(private, &tmp
);
1140 xt_compat_flush_offsets(AF_INET
);
1144 info
.valid_hooks
= t
->valid_hooks
;
1145 memcpy(info
.hook_entry
, private->hook_entry
,
1146 sizeof(info
.hook_entry
));
1147 memcpy(info
.underflow
, private->underflow
,
1148 sizeof(info
.underflow
));
1149 info
.num_entries
= private->number
;
1150 info
.size
= private->size
;
1151 strcpy(info
.name
, name
);
1153 if (copy_to_user(user
, &info
, *len
) != 0)
1161 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1162 #ifdef CONFIG_COMPAT
1164 xt_compat_unlock(AF_INET
);
1170 get_entries(struct net
*net
, struct ipt_get_entries __user
*uptr
, int *len
)
1173 struct ipt_get_entries get
;
1176 if (*len
< sizeof(get
)) {
1177 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1180 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1182 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1183 duprintf("get_entries: %u != %zu\n",
1184 *len
, sizeof(get
) + get
.size
);
1188 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1189 if (t
&& !IS_ERR(t
)) {
1190 const struct xt_table_info
*private = t
->private;
1191 duprintf("t->private->number = %u\n", private->number
);
1192 if (get
.size
== private->size
)
1193 ret
= copy_entries_to_user(private->size
,
1194 t
, uptr
->entrytable
);
1196 duprintf("get_entries: I've got %u not %u!\n",
1197 private->size
, get
.size
);
1203 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1209 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1210 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1211 void __user
*counters_ptr
)
1215 struct xt_table_info
*oldinfo
;
1216 struct xt_counters
*counters
;
1217 void *loc_cpu_old_entry
;
1220 counters
= vmalloc(num_counters
* sizeof(struct xt_counters
));
1226 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1227 "iptable_%s", name
);
1228 if (!t
|| IS_ERR(t
)) {
1229 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1230 goto free_newinfo_counters_untrans
;
1234 if (valid_hooks
!= t
->valid_hooks
) {
1235 duprintf("Valid hook crap: %08X vs %08X\n",
1236 valid_hooks
, t
->valid_hooks
);
1241 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1245 /* Update module usage count based on number of rules */
1246 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1247 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1248 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1249 (newinfo
->number
<= oldinfo
->initial_entries
))
1251 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1252 (newinfo
->number
<= oldinfo
->initial_entries
))
1255 /* Get the old counters, and synchronize with replace */
1256 get_counters(oldinfo
, counters
);
1258 /* Decrease module usage counts and free resource */
1259 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1260 IPT_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,
1262 xt_free_table_info(oldinfo
);
1263 if (copy_to_user(counters_ptr
, counters
,
1264 sizeof(struct xt_counters
) * num_counters
) != 0)
1273 free_newinfo_counters_untrans
:
1280 do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1283 struct ipt_replace tmp
;
1284 struct xt_table_info
*newinfo
;
1285 void *loc_cpu_entry
;
1287 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1290 /* overflow check */
1291 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1294 newinfo
= xt_alloc_table_info(tmp
.size
);
1298 /* choose the copy that is on our node/cpu */
1299 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1300 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1306 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1307 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1308 tmp
.hook_entry
, tmp
.underflow
);
1312 duprintf("ip_tables: Translated table\n");
1314 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1315 tmp
.num_counters
, tmp
.counters
);
1317 goto free_newinfo_untrans
;
1320 free_newinfo_untrans
:
1321 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1323 xt_free_table_info(newinfo
);
1327 /* We're lazy, and add to the first CPU; overflow works its fey magic
1328 * and everything is OK. */
1330 add_counter_to_entry(struct ipt_entry
*e
,
1331 const struct xt_counters addme
[],
1334 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1341 do_add_counters(struct net
*net
, void __user
*user
, unsigned int len
, int compat
)
1343 unsigned int i
, curcpu
;
1344 struct xt_counters_info tmp
;
1345 struct xt_counters
*paddc
;
1346 unsigned int num_counters
;
1351 const struct xt_table_info
*private;
1353 void *loc_cpu_entry
;
1354 #ifdef CONFIG_COMPAT
1355 struct compat_xt_counters_info compat_tmp
;
1359 size
= sizeof(struct compat_xt_counters_info
);
1364 size
= sizeof(struct xt_counters_info
);
1367 if (copy_from_user(ptmp
, user
, size
) != 0)
1370 #ifdef CONFIG_COMPAT
1372 num_counters
= compat_tmp
.num_counters
;
1373 name
= compat_tmp
.name
;
1377 num_counters
= tmp
.num_counters
;
1381 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1384 paddc
= vmalloc_node(len
- size
, numa_node_id());
1388 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1393 t
= xt_find_table_lock(net
, AF_INET
, name
);
1394 if (!t
|| IS_ERR(t
)) {
1395 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1400 private = t
->private;
1401 if (private->number
!= num_counters
) {
1403 goto unlock_up_free
;
1407 /* Choose the copy that is on our node */
1408 curcpu
= smp_processor_id();
1409 loc_cpu_entry
= private->entries
[curcpu
];
1410 xt_info_wrlock(curcpu
);
1411 IPT_ENTRY_ITERATE(loc_cpu_entry
,
1413 add_counter_to_entry
,
1416 xt_info_wrunlock(curcpu
);
1427 #ifdef CONFIG_COMPAT
1428 struct compat_ipt_replace
{
1429 char name
[IPT_TABLE_MAXNAMELEN
];
1433 u32 hook_entry
[NF_INET_NUMHOOKS
];
1434 u32 underflow
[NF_INET_NUMHOOKS
];
1436 compat_uptr_t counters
; /* struct ipt_counters * */
1437 struct compat_ipt_entry entries
[0];
1441 compat_copy_entry_to_user(struct ipt_entry
*e
, void __user
**dstptr
,
1442 unsigned int *size
, struct xt_counters
*counters
,
1445 struct ipt_entry_target
*t
;
1446 struct compat_ipt_entry __user
*ce
;
1447 u_int16_t target_offset
, next_offset
;
1448 compat_uint_t origsize
;
1453 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1454 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)))
1457 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1460 *dstptr
+= sizeof(struct compat_ipt_entry
);
1461 *size
-= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1463 ret
= IPT_MATCH_ITERATE(e
, xt_compat_match_to_user
, dstptr
, size
);
1464 target_offset
= e
->target_offset
- (origsize
- *size
);
1467 t
= ipt_get_target(e
);
1468 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1472 next_offset
= e
->next_offset
- (origsize
- *size
);
1473 if (put_user(target_offset
, &ce
->target_offset
))
1475 if (put_user(next_offset
, &ce
->next_offset
))
1485 compat_find_calc_match(struct ipt_entry_match
*m
,
1487 const struct ipt_ip
*ip
,
1488 unsigned int hookmask
,
1489 int *size
, unsigned int *i
)
1491 struct xt_match
*match
;
1493 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
1494 m
->u
.user
.revision
),
1495 "ipt_%s", m
->u
.user
.name
);
1496 if (IS_ERR(match
) || !match
) {
1497 duprintf("compat_check_calc_match: `%s' not found\n",
1499 return match
? PTR_ERR(match
) : -ENOENT
;
1501 m
->u
.kernel
.match
= match
;
1502 *size
+= xt_compat_match_offset(match
);
1509 compat_release_match(struct ipt_entry_match
*m
, unsigned int *i
)
1511 if (i
&& (*i
)-- == 0)
1514 module_put(m
->u
.kernel
.match
->me
);
1519 compat_release_entry(struct compat_ipt_entry
*e
, unsigned int *i
)
1521 struct ipt_entry_target
*t
;
1523 if (i
&& (*i
)-- == 0)
1526 /* Cleanup all matches */
1527 COMPAT_IPT_MATCH_ITERATE(e
, compat_release_match
, NULL
);
1528 t
= compat_ipt_get_target(e
);
1529 module_put(t
->u
.kernel
.target
->me
);
1534 check_compat_entry_size_and_hooks(struct compat_ipt_entry
*e
,
1535 struct xt_table_info
*newinfo
,
1537 unsigned char *base
,
1538 unsigned char *limit
,
1539 unsigned int *hook_entries
,
1540 unsigned int *underflows
,
1544 struct ipt_entry_target
*t
;
1545 struct xt_target
*target
;
1546 unsigned int entry_offset
;
1550 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1551 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0
1552 || (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1553 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1557 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1558 sizeof(struct compat_xt_entry_target
)) {
1559 duprintf("checking: element %p size %u\n",
1564 /* For purposes of check_entry casting the compat entry is fine */
1565 ret
= check_entry((struct ipt_entry
*)e
, name
);
1569 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1570 entry_offset
= (void *)e
- (void *)base
;
1572 ret
= COMPAT_IPT_MATCH_ITERATE(e
, compat_find_calc_match
, name
,
1573 &e
->ip
, e
->comefrom
, &off
, &j
);
1575 goto release_matches
;
1577 t
= compat_ipt_get_target(e
);
1578 target
= try_then_request_module(xt_find_target(AF_INET
,
1580 t
->u
.user
.revision
),
1581 "ipt_%s", t
->u
.user
.name
);
1582 if (IS_ERR(target
) || !target
) {
1583 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1585 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1586 goto release_matches
;
1588 t
->u
.kernel
.target
= target
;
1590 off
+= xt_compat_target_offset(target
);
1592 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1596 /* Check hooks & underflows */
1597 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1598 if ((unsigned char *)e
- base
== hook_entries
[h
])
1599 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1600 if ((unsigned char *)e
- base
== underflows
[h
])
1601 newinfo
->underflow
[h
] = underflows
[h
];
1604 /* Clear counters and comefrom */
1605 memset(&e
->counters
, 0, sizeof(e
->counters
));
1612 module_put(t
->u
.kernel
.target
->me
);
1614 IPT_MATCH_ITERATE(e
, compat_release_match
, &j
);
1619 compat_copy_entry_from_user(struct compat_ipt_entry
*e
, void **dstptr
,
1620 unsigned int *size
, const char *name
,
1621 struct xt_table_info
*newinfo
, unsigned char *base
)
1623 struct ipt_entry_target
*t
;
1624 struct xt_target
*target
;
1625 struct ipt_entry
*de
;
1626 unsigned int origsize
;
1631 de
= (struct ipt_entry
*)*dstptr
;
1632 memcpy(de
, e
, sizeof(struct ipt_entry
));
1633 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1635 *dstptr
+= sizeof(struct ipt_entry
);
1636 *size
+= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1638 ret
= COMPAT_IPT_MATCH_ITERATE(e
, xt_compat_match_from_user
,
1642 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1643 t
= compat_ipt_get_target(e
);
1644 target
= t
->u
.kernel
.target
;
1645 xt_compat_target_from_user(t
, dstptr
, size
);
1647 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1648 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1649 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1650 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1651 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1652 newinfo
->underflow
[h
] -= origsize
- *size
;
1658 compat_check_entry(struct ipt_entry
*e
, const char *name
,
1661 struct xt_mtchk_param mtpar
;
1667 mtpar
.entryinfo
= &e
->ip
;
1668 mtpar
.hook_mask
= e
->comefrom
;
1669 mtpar
.family
= NFPROTO_IPV4
;
1670 ret
= IPT_MATCH_ITERATE(e
, check_match
, &mtpar
, &j
);
1672 goto cleanup_matches
;
1674 ret
= check_target(e
, name
);
1676 goto cleanup_matches
;
1682 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
1687 translate_compat_table(const char *name
,
1688 unsigned int valid_hooks
,
1689 struct xt_table_info
**pinfo
,
1691 unsigned int total_size
,
1692 unsigned int number
,
1693 unsigned int *hook_entries
,
1694 unsigned int *underflows
)
1697 struct xt_table_info
*newinfo
, *info
;
1698 void *pos
, *entry0
, *entry1
;
1705 info
->number
= number
;
1707 /* Init all hooks to impossible value. */
1708 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1709 info
->hook_entry
[i
] = 0xFFFFFFFF;
1710 info
->underflow
[i
] = 0xFFFFFFFF;
1713 duprintf("translate_compat_table: size %u\n", info
->size
);
1715 xt_compat_lock(AF_INET
);
1716 /* Walk through entries, checking offsets. */
1717 ret
= COMPAT_IPT_ENTRY_ITERATE(entry0
, total_size
,
1718 check_compat_entry_size_and_hooks
,
1719 info
, &size
, entry0
,
1720 entry0
+ total_size
,
1721 hook_entries
, underflows
, &j
, name
);
1727 duprintf("translate_compat_table: %u not %u entries\n",
1732 /* Check hooks all assigned */
1733 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1734 /* Only hooks which are valid */
1735 if (!(valid_hooks
& (1 << i
)))
1737 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1738 duprintf("Invalid hook entry %u %u\n",
1739 i
, hook_entries
[i
]);
1742 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1743 duprintf("Invalid underflow %u %u\n",
1750 newinfo
= xt_alloc_table_info(size
);
1754 newinfo
->number
= number
;
1755 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1756 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1757 newinfo
->underflow
[i
] = info
->underflow
[i
];
1759 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1762 ret
= COMPAT_IPT_ENTRY_ITERATE(entry0
, total_size
,
1763 compat_copy_entry_from_user
,
1764 &pos
, &size
, name
, newinfo
, entry1
);
1765 xt_compat_flush_offsets(AF_INET
);
1766 xt_compat_unlock(AF_INET
);
1771 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1775 ret
= IPT_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1779 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0
, newinfo
->size
, i
,
1780 compat_release_entry
, &j
);
1781 IPT_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, &i
);
1782 xt_free_table_info(newinfo
);
1786 /* And one copy for every other CPU */
1787 for_each_possible_cpu(i
)
1788 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1789 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1793 xt_free_table_info(info
);
1797 xt_free_table_info(newinfo
);
1799 COMPAT_IPT_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1802 xt_compat_flush_offsets(AF_INET
);
1803 xt_compat_unlock(AF_INET
);
1808 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1811 struct compat_ipt_replace tmp
;
1812 struct xt_table_info
*newinfo
;
1813 void *loc_cpu_entry
;
1815 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1818 /* overflow check */
1819 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1821 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1824 newinfo
= xt_alloc_table_info(tmp
.size
);
1828 /* choose the copy that is on our node/cpu */
1829 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1830 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1836 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1837 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1838 tmp
.num_entries
, tmp
.hook_entry
,
1843 duprintf("compat_do_replace: Translated table\n");
1845 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1846 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1848 goto free_newinfo_untrans
;
1851 free_newinfo_untrans
:
1852 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1854 xt_free_table_info(newinfo
);
1859 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1864 if (!capable(CAP_NET_ADMIN
))
1868 case IPT_SO_SET_REPLACE
:
1869 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1872 case IPT_SO_SET_ADD_COUNTERS
:
1873 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1877 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1884 struct compat_ipt_get_entries
{
1885 char name
[IPT_TABLE_MAXNAMELEN
];
1887 struct compat_ipt_entry entrytable
[0];
1891 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1892 void __user
*userptr
)
1894 struct xt_counters
*counters
;
1895 const struct xt_table_info
*private = table
->private;
1899 const void *loc_cpu_entry
;
1902 counters
= alloc_counters(table
);
1903 if (IS_ERR(counters
))
1904 return PTR_ERR(counters
);
1906 /* choose the copy that is on our node/cpu, ...
1907 * This choice is lazy (because current thread is
1908 * allowed to migrate to another cpu)
1910 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1913 ret
= IPT_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1914 compat_copy_entry_to_user
,
1915 &pos
, &size
, counters
, &i
);
1922 compat_get_entries(struct net
*net
, struct compat_ipt_get_entries __user
*uptr
,
1926 struct compat_ipt_get_entries get
;
1929 if (*len
< sizeof(get
)) {
1930 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1934 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1937 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1938 duprintf("compat_get_entries: %u != %zu\n",
1939 *len
, sizeof(get
) + get
.size
);
1943 xt_compat_lock(AF_INET
);
1944 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1945 if (t
&& !IS_ERR(t
)) {
1946 const struct xt_table_info
*private = t
->private;
1947 struct xt_table_info info
;
1948 duprintf("t->private->number = %u\n", private->number
);
1949 ret
= compat_table_info(private, &info
);
1950 if (!ret
&& get
.size
== info
.size
) {
1951 ret
= compat_copy_entries_to_user(private->size
,
1952 t
, uptr
->entrytable
);
1954 duprintf("compat_get_entries: I've got %u not %u!\n",
1955 private->size
, get
.size
);
1958 xt_compat_flush_offsets(AF_INET
);
1962 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1964 xt_compat_unlock(AF_INET
);
1968 static int do_ipt_get_ctl(struct sock
*, int, void __user
*, int *);
1971 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1975 if (!capable(CAP_NET_ADMIN
))
1979 case IPT_SO_GET_INFO
:
1980 ret
= get_info(sock_net(sk
), user
, len
, 1);
1982 case IPT_SO_GET_ENTRIES
:
1983 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1986 ret
= do_ipt_get_ctl(sk
, cmd
, user
, len
);
1993 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1997 if (!capable(CAP_NET_ADMIN
))
2001 case IPT_SO_SET_REPLACE
:
2002 ret
= do_replace(sock_net(sk
), user
, len
);
2005 case IPT_SO_SET_ADD_COUNTERS
:
2006 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2010 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2018 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2022 if (!capable(CAP_NET_ADMIN
))
2026 case IPT_SO_GET_INFO
:
2027 ret
= get_info(sock_net(sk
), user
, len
, 0);
2030 case IPT_SO_GET_ENTRIES
:
2031 ret
= get_entries(sock_net(sk
), user
, len
);
2034 case IPT_SO_GET_REVISION_MATCH
:
2035 case IPT_SO_GET_REVISION_TARGET
: {
2036 struct ipt_get_revision rev
;
2039 if (*len
!= sizeof(rev
)) {
2043 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2048 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2053 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2056 "ipt_%s", rev
.name
);
2061 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2068 struct xt_table
*ipt_register_table(struct net
*net
,
2069 const struct xt_table
*table
,
2070 const struct ipt_replace
*repl
)
2073 struct xt_table_info
*newinfo
;
2074 struct xt_table_info bootstrap
2075 = { 0, 0, 0, { 0 }, { 0 }, { } };
2076 void *loc_cpu_entry
;
2077 struct xt_table
*new_table
;
2079 newinfo
= xt_alloc_table_info(repl
->size
);
2085 /* choose the copy on our node/cpu, but dont care about preemption */
2086 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2087 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2089 ret
= translate_table(table
->name
, table
->valid_hooks
,
2090 newinfo
, loc_cpu_entry
, repl
->size
,
2097 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2098 if (IS_ERR(new_table
)) {
2099 ret
= PTR_ERR(new_table
);
2106 xt_free_table_info(newinfo
);
2108 return ERR_PTR(ret
);
2111 void ipt_unregister_table(struct xt_table
*table
)
2113 struct xt_table_info
*private;
2114 void *loc_cpu_entry
;
2115 struct module
*table_owner
= table
->me
;
2117 private = xt_unregister_table(table
);
2119 /* Decrease module usage counts and free resources */
2120 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2121 IPT_ENTRY_ITERATE(loc_cpu_entry
, private->size
, cleanup_entry
, NULL
);
2122 if (private->number
> private->initial_entries
)
2123 module_put(table_owner
);
2124 xt_free_table_info(private);
2127 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2129 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2130 u_int8_t type
, u_int8_t code
,
2133 return ((test_type
== 0xFF) ||
2134 (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2139 icmp_match(const struct sk_buff
*skb
, const struct xt_match_param
*par
)
2141 const struct icmphdr
*ic
;
2142 struct icmphdr _icmph
;
2143 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2145 /* Must not be a fragment. */
2146 if (par
->fragoff
!= 0)
2149 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2151 /* We've been asked to examine this packet, and we
2152 * can't. Hence, no choice but to drop.
2154 duprintf("Dropping evil ICMP tinygram.\n");
2155 *par
->hotdrop
= true;
2159 return icmp_type_code_match(icmpinfo
->type
,
2163 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2166 static bool icmp_checkentry(const struct xt_mtchk_param
*par
)
2168 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2170 /* Must specify no unknown invflags */
2171 return !(icmpinfo
->invflags
& ~IPT_ICMP_INV
);
2174 /* The built-in targets: standard (NULL) and error. */
2175 static struct xt_target ipt_standard_target __read_mostly
= {
2176 .name
= IPT_STANDARD_TARGET
,
2177 .targetsize
= sizeof(int),
2178 .family
= NFPROTO_IPV4
,
2179 #ifdef CONFIG_COMPAT
2180 .compatsize
= sizeof(compat_int_t
),
2181 .compat_from_user
= compat_standard_from_user
,
2182 .compat_to_user
= compat_standard_to_user
,
2186 static struct xt_target ipt_error_target __read_mostly
= {
2187 .name
= IPT_ERROR_TARGET
,
2188 .target
= ipt_error
,
2189 .targetsize
= IPT_FUNCTION_MAXNAMELEN
,
2190 .family
= NFPROTO_IPV4
,
2193 static struct nf_sockopt_ops ipt_sockopts
= {
2195 .set_optmin
= IPT_BASE_CTL
,
2196 .set_optmax
= IPT_SO_SET_MAX
+1,
2197 .set
= do_ipt_set_ctl
,
2198 #ifdef CONFIG_COMPAT
2199 .compat_set
= compat_do_ipt_set_ctl
,
2201 .get_optmin
= IPT_BASE_CTL
,
2202 .get_optmax
= IPT_SO_GET_MAX
+1,
2203 .get
= do_ipt_get_ctl
,
2204 #ifdef CONFIG_COMPAT
2205 .compat_get
= compat_do_ipt_get_ctl
,
2207 .owner
= THIS_MODULE
,
2210 static struct xt_match icmp_matchstruct __read_mostly
= {
2212 .match
= icmp_match
,
2213 .matchsize
= sizeof(struct ipt_icmp
),
2214 .checkentry
= icmp_checkentry
,
2215 .proto
= IPPROTO_ICMP
,
2216 .family
= NFPROTO_IPV4
,
2219 static int __net_init
ip_tables_net_init(struct net
*net
)
2221 return xt_proto_init(net
, NFPROTO_IPV4
);
2224 static void __net_exit
ip_tables_net_exit(struct net
*net
)
2226 xt_proto_fini(net
, NFPROTO_IPV4
);
2229 static struct pernet_operations ip_tables_net_ops
= {
2230 .init
= ip_tables_net_init
,
2231 .exit
= ip_tables_net_exit
,
2234 static int __init
ip_tables_init(void)
2238 ret
= register_pernet_subsys(&ip_tables_net_ops
);
2242 /* Noone else will be downing sem now, so we won't sleep */
2243 ret
= xt_register_target(&ipt_standard_target
);
2246 ret
= xt_register_target(&ipt_error_target
);
2249 ret
= xt_register_match(&icmp_matchstruct
);
2253 /* Register setsockopt */
2254 ret
= nf_register_sockopt(&ipt_sockopts
);
2258 printk(KERN_INFO
"ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2262 xt_unregister_match(&icmp_matchstruct
);
2264 xt_unregister_target(&ipt_error_target
);
2266 xt_unregister_target(&ipt_standard_target
);
2268 unregister_pernet_subsys(&ip_tables_net_ops
);
2273 static void __exit
ip_tables_fini(void)
2275 nf_unregister_sockopt(&ipt_sockopts
);
2277 xt_unregister_match(&icmp_matchstruct
);
2278 xt_unregister_target(&ipt_error_target
);
2279 xt_unregister_target(&ipt_standard_target
);
2281 unregister_pernet_subsys(&ip_tables_net_ops
);
2284 EXPORT_SYMBOL(ipt_register_table
);
2285 EXPORT_SYMBOL(ipt_unregister_table
);
2286 EXPORT_SYMBOL(ipt_do_table
);
2287 module_init(ip_tables_init
);
2288 module_exit(ip_tables_fini
);