2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...) printk(format , ## args)
47 #define dprintf(format, args...)
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
53 #define duprintf(format, args...)
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
64 #define IP_NF_ASSERT(x)
68 /* All the better to debug you with... */
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
84 ip_packet_match(const struct iphdr
*ip
,
87 const struct ipt_ip
*ipinfo
,
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
95 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
97 || FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
99 dprintf("Source or dest mismatch.\n");
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ipinfo
->smsk
.s_addr
),
104 NIPQUAD(ipinfo
->src
.s_addr
),
105 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
108 NIPQUAD(ipinfo
->dmsk
.s_addr
),
109 NIPQUAD(ipinfo
->dst
.s_addr
),
110 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
116 ret
|= (((const unsigned long *)indev
)[i
]
117 ^ ((const unsigned long *)ipinfo
->iniface
)[i
])
118 & ((const unsigned long *)ipinfo
->iniface_mask
)[i
];
121 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev
, ipinfo
->iniface
,
124 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
128 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
129 ret
|= (((const unsigned long *)outdev
)[i
]
130 ^ ((const unsigned long *)ipinfo
->outiface
)[i
])
131 & ((const unsigned long *)ipinfo
->outiface_mask
)[i
];
134 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev
, ipinfo
->outiface
,
137 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
141 /* Check specific protocol */
143 && FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip
->protocol
, ipinfo
->proto
,
146 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
162 ip_checkentry(const struct ipt_ip
*ip
)
164 if (ip
->flags
& ~IPT_F_MASK
) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip
->flags
& ~IPT_F_MASK
);
169 if (ip
->invflags
& ~IPT_INV_MASK
) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip
->invflags
& ~IPT_INV_MASK
);
178 ipt_error(struct sk_buff
**pskb
,
179 const struct net_device
*in
,
180 const struct net_device
*out
,
181 unsigned int hooknum
,
182 const struct xt_target
*target
,
183 const void *targinfo
,
187 printk("ip_tables: error: `%s'\n", (char *)targinfo
);
193 int do_match(struct ipt_entry_match
*m
,
194 const struct sk_buff
*skb
,
195 const struct net_device
*in
,
196 const struct net_device
*out
,
200 /* Stop iteration if it doesn't match */
201 if (!m
->u
.kernel
.match
->match(skb
, in
, out
, m
->u
.kernel
.match
, m
->data
,
202 offset
, skb
->nh
.iph
->ihl
*4, hotdrop
))
208 static inline struct ipt_entry
*
209 get_entry(void *base
, unsigned int offset
)
211 return (struct ipt_entry
*)(base
+ offset
);
214 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
216 ipt_do_table(struct sk_buff
**pskb
,
218 const struct net_device
*in
,
219 const struct net_device
*out
,
220 struct ipt_table
*table
,
223 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
228 /* Initializing verdict to NF_DROP keeps gcc happy. */
229 unsigned int verdict
= NF_DROP
;
230 const char *indev
, *outdev
;
232 struct ipt_entry
*e
, *back
;
233 struct xt_table_info
*private;
236 ip
= (*pskb
)->nh
.iph
;
237 datalen
= (*pskb
)->len
- ip
->ihl
* 4;
238 indev
= in
? in
->name
: nulldevname
;
239 outdev
= out
? out
->name
: nulldevname
;
240 /* We handle fragments by dealing with the first fragment as
241 * if it was a normal packet. All other fragments are treated
242 * normally, except that they will NEVER match rules that ask
243 * things we don't know, ie. tcp syn flag or ports). If the
244 * rule is also a fragment-specific rule, non-fragments won't
246 offset
= ntohs(ip
->frag_off
) & IP_OFFSET
;
248 read_lock_bh(&table
->lock
);
249 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
250 private = table
->private;
251 table_base
= (void *)private->entries
[smp_processor_id()];
252 e
= get_entry(table_base
, private->hook_entry
[hook
]);
254 /* For return from builtin chain */
255 back
= get_entry(table_base
, private->underflow
[hook
]);
260 if (ip_packet_match(ip
, indev
, outdev
, &e
->ip
, offset
)) {
261 struct ipt_entry_target
*t
;
263 if (IPT_MATCH_ITERATE(e
, do_match
,
265 offset
, &hotdrop
) != 0)
268 ADD_COUNTER(e
->counters
, ntohs(ip
->tot_len
), 1);
270 t
= ipt_get_target(e
);
271 IP_NF_ASSERT(t
->u
.kernel
.target
);
272 /* Standard target? */
273 if (!t
->u
.kernel
.target
->target
) {
276 v
= ((struct ipt_standard_target
*)t
)->verdict
;
278 /* Pop from stack? */
279 if (v
!= IPT_RETURN
) {
280 verdict
= (unsigned)(-v
) - 1;
284 back
= get_entry(table_base
,
288 if (table_base
+ v
!= (void *)e
+ e
->next_offset
289 && !(e
->ip
.flags
& IPT_F_GOTO
)) {
290 /* Save old back ptr in next entry */
291 struct ipt_entry
*next
292 = (void *)e
+ e
->next_offset
;
294 = (void *)back
- table_base
;
295 /* set back pointer to next entry */
299 e
= get_entry(table_base
, v
);
301 /* Targets which reenter must return
303 #ifdef CONFIG_NETFILTER_DEBUG
304 ((struct ipt_entry
*)table_base
)->comefrom
307 verdict
= t
->u
.kernel
.target
->target(pskb
,
314 #ifdef CONFIG_NETFILTER_DEBUG
315 if (((struct ipt_entry
*)table_base
)->comefrom
317 && verdict
== IPT_CONTINUE
) {
318 printk("Target %s reentered!\n",
319 t
->u
.kernel
.target
->name
);
322 ((struct ipt_entry
*)table_base
)->comefrom
325 /* Target might have changed stuff. */
326 ip
= (*pskb
)->nh
.iph
;
327 datalen
= (*pskb
)->len
- ip
->ihl
* 4;
329 if (verdict
== IPT_CONTINUE
)
330 e
= (void *)e
+ e
->next_offset
;
338 e
= (void *)e
+ e
->next_offset
;
342 read_unlock_bh(&table
->lock
);
344 #ifdef DEBUG_ALLOW_ALL
353 /* All zeroes == unconditional rule. */
355 unconditional(const struct ipt_ip
*ip
)
359 for (i
= 0; i
< sizeof(*ip
)/sizeof(__u32
); i
++)
360 if (((__u32
*)ip
)[i
])
366 /* Figures out from what hook each rule can be called: returns 0 if
367 there are loops. Puts hook bitmask in comefrom. */
369 mark_source_chains(struct xt_table_info
*newinfo
,
370 unsigned int valid_hooks
, void *entry0
)
374 /* No recursion; use packet counter to save back ptrs (reset
375 to 0 as we leave), and comefrom to save source hook bitmask */
376 for (hook
= 0; hook
< NF_IP_NUMHOOKS
; hook
++) {
377 unsigned int pos
= newinfo
->hook_entry
[hook
];
379 = (struct ipt_entry
*)(entry0
+ pos
);
381 if (!(valid_hooks
& (1 << hook
)))
384 /* Set initial back pointer. */
385 e
->counters
.pcnt
= pos
;
388 struct ipt_standard_target
*t
389 = (void *)ipt_get_target(e
);
391 if (e
->comefrom
& (1 << NF_IP_NUMHOOKS
)) {
392 printk("iptables: loop hook %u pos %u %08X.\n",
393 hook
, pos
, e
->comefrom
);
397 |= ((1 << hook
) | (1 << NF_IP_NUMHOOKS
));
399 /* Unconditional return/END. */
400 if (e
->target_offset
== sizeof(struct ipt_entry
)
401 && (strcmp(t
->target
.u
.user
.name
,
402 IPT_STANDARD_TARGET
) == 0)
404 && unconditional(&e
->ip
)) {
405 unsigned int oldpos
, size
;
407 /* Return: backtrack through the last
410 e
->comefrom
^= (1<<NF_IP_NUMHOOKS
);
411 #ifdef DEBUG_IP_FIREWALL_USER
413 & (1 << NF_IP_NUMHOOKS
)) {
414 duprintf("Back unset "
421 pos
= e
->counters
.pcnt
;
422 e
->counters
.pcnt
= 0;
424 /* We're at the start. */
428 e
= (struct ipt_entry
*)
430 } while (oldpos
== pos
+ e
->next_offset
);
433 size
= e
->next_offset
;
434 e
= (struct ipt_entry
*)
435 (entry0
+ pos
+ size
);
436 e
->counters
.pcnt
= pos
;
439 int newpos
= t
->verdict
;
441 if (strcmp(t
->target
.u
.user
.name
,
442 IPT_STANDARD_TARGET
) == 0
444 /* This a jump; chase it. */
445 duprintf("Jump rule %u -> %u\n",
448 /* ... this is a fallthru */
449 newpos
= pos
+ e
->next_offset
;
451 e
= (struct ipt_entry
*)
453 e
->counters
.pcnt
= pos
;
458 duprintf("Finished chain %u\n", hook
);
464 cleanup_match(struct ipt_entry_match
*m
, unsigned int *i
)
466 if (i
&& (*i
)-- == 0)
469 if (m
->u
.kernel
.match
->destroy
)
470 m
->u
.kernel
.match
->destroy(m
->u
.kernel
.match
, m
->data
,
471 m
->u
.match_size
- sizeof(*m
));
472 module_put(m
->u
.kernel
.match
->me
);
477 standard_check(const struct ipt_entry_target
*t
,
478 unsigned int max_offset
)
480 struct ipt_standard_target
*targ
= (void *)t
;
482 /* Check standard info. */
483 if (targ
->verdict
>= 0
484 && targ
->verdict
> max_offset
- sizeof(struct ipt_entry
)) {
485 duprintf("ipt_standard_check: bad verdict (%i)\n",
489 if (targ
->verdict
< -NF_MAX_VERDICT
- 1) {
490 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
498 check_match(struct ipt_entry_match
*m
,
500 const struct ipt_ip
*ip
,
501 unsigned int hookmask
,
504 struct ipt_match
*match
;
507 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
509 "ipt_%s", m
->u
.user
.name
);
510 if (IS_ERR(match
) || !match
) {
511 duprintf("check_match: `%s' not found\n", m
->u
.user
.name
);
512 return match
? PTR_ERR(match
) : -ENOENT
;
514 m
->u
.kernel
.match
= match
;
516 ret
= xt_check_match(match
, AF_INET
, m
->u
.match_size
- sizeof(*m
),
517 name
, hookmask
, ip
->proto
,
518 ip
->invflags
& IPT_INV_PROTO
);
522 if (m
->u
.kernel
.match
->checkentry
523 && !m
->u
.kernel
.match
->checkentry(name
, ip
, match
, m
->data
,
524 m
->u
.match_size
- sizeof(*m
),
526 duprintf("ip_tables: check failed for `%s'.\n",
527 m
->u
.kernel
.match
->name
);
535 module_put(m
->u
.kernel
.match
->me
);
539 static struct ipt_target ipt_standard_target
;
542 check_entry(struct ipt_entry
*e
, const char *name
, unsigned int size
,
545 struct ipt_entry_target
*t
;
546 struct ipt_target
*target
;
550 if (!ip_checkentry(&e
->ip
)) {
551 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
556 ret
= IPT_MATCH_ITERATE(e
, check_match
, name
, &e
->ip
, e
->comefrom
, &j
);
558 goto cleanup_matches
;
560 t
= ipt_get_target(e
);
561 target
= try_then_request_module(xt_find_target(AF_INET
,
564 "ipt_%s", t
->u
.user
.name
);
565 if (IS_ERR(target
) || !target
) {
566 duprintf("check_entry: `%s' not found\n", t
->u
.user
.name
);
567 ret
= target
? PTR_ERR(target
) : -ENOENT
;
568 goto cleanup_matches
;
570 t
->u
.kernel
.target
= target
;
572 ret
= xt_check_target(target
, AF_INET
, t
->u
.target_size
- sizeof(*t
),
573 name
, e
->comefrom
, e
->ip
.proto
,
574 e
->ip
.invflags
& IPT_INV_PROTO
);
578 if (t
->u
.kernel
.target
== &ipt_standard_target
) {
579 if (!standard_check(t
, size
)) {
581 goto cleanup_matches
;
583 } else if (t
->u
.kernel
.target
->checkentry
584 && !t
->u
.kernel
.target
->checkentry(name
, e
, target
, t
->data
,
588 duprintf("ip_tables: check failed for `%s'.\n",
589 t
->u
.kernel
.target
->name
);
597 module_put(t
->u
.kernel
.target
->me
);
599 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
604 check_entry_size_and_hooks(struct ipt_entry
*e
,
605 struct xt_table_info
*newinfo
,
607 unsigned char *limit
,
608 const unsigned int *hook_entries
,
609 const unsigned int *underflows
,
614 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0
615 || (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
616 duprintf("Bad offset %p\n", e
);
621 < sizeof(struct ipt_entry
) + sizeof(struct ipt_entry_target
)) {
622 duprintf("checking: element %p size %u\n",
627 /* Check hooks & underflows */
628 for (h
= 0; h
< NF_IP_NUMHOOKS
; h
++) {
629 if ((unsigned char *)e
- base
== hook_entries
[h
])
630 newinfo
->hook_entry
[h
] = hook_entries
[h
];
631 if ((unsigned char *)e
- base
== underflows
[h
])
632 newinfo
->underflow
[h
] = underflows
[h
];
635 /* FIXME: underflows must be unconditional, standard verdicts
636 < 0 (not IPT_RETURN). --RR */
638 /* Clear counters and comefrom */
639 e
->counters
= ((struct xt_counters
) { 0, 0 });
647 cleanup_entry(struct ipt_entry
*e
, unsigned int *i
)
649 struct ipt_entry_target
*t
;
651 if (i
&& (*i
)-- == 0)
654 /* Cleanup all matches */
655 IPT_MATCH_ITERATE(e
, cleanup_match
, NULL
);
656 t
= ipt_get_target(e
);
657 if (t
->u
.kernel
.target
->destroy
)
658 t
->u
.kernel
.target
->destroy(t
->u
.kernel
.target
, t
->data
,
659 t
->u
.target_size
- sizeof(*t
));
660 module_put(t
->u
.kernel
.target
->me
);
664 /* Checks and translates the user-supplied table segment (held in
667 translate_table(const char *name
,
668 unsigned int valid_hooks
,
669 struct xt_table_info
*newinfo
,
673 const unsigned int *hook_entries
,
674 const unsigned int *underflows
)
679 newinfo
->size
= size
;
680 newinfo
->number
= number
;
682 /* Init all hooks to impossible value. */
683 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
684 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
685 newinfo
->underflow
[i
] = 0xFFFFFFFF;
688 duprintf("translate_table: size %u\n", newinfo
->size
);
690 /* Walk through entries, checking offsets. */
691 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
692 check_entry_size_and_hooks
,
696 hook_entries
, underflows
, &i
);
701 duprintf("translate_table: %u not %u entries\n",
706 /* Check hooks all assigned */
707 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
708 /* Only hooks which are valid */
709 if (!(valid_hooks
& (1 << i
)))
711 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
712 duprintf("Invalid hook entry %u %u\n",
716 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
717 duprintf("Invalid underflow %u %u\n",
723 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
))
726 /* Finally, each sanity check must pass */
728 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
729 check_entry
, name
, size
, &i
);
732 IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
737 /* And one copy for every other CPU */
738 for_each_possible_cpu(i
) {
739 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
740 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
748 add_entry_to_counter(const struct ipt_entry
*e
,
749 struct xt_counters total
[],
752 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
759 set_entry_to_counter(const struct ipt_entry
*e
,
760 struct ipt_counters total
[],
763 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
770 get_counters(const struct xt_table_info
*t
,
771 struct xt_counters counters
[])
777 /* Instead of clearing (by a previous call to memset())
778 * the counters and using adds, we set the counters
779 * with data used by 'current' CPU
780 * We dont care about preemption here.
782 curcpu
= raw_smp_processor_id();
785 IPT_ENTRY_ITERATE(t
->entries
[curcpu
],
787 set_entry_to_counter
,
791 for_each_possible_cpu(cpu
) {
795 IPT_ENTRY_ITERATE(t
->entries
[cpu
],
797 add_entry_to_counter
,
803 static inline struct xt_counters
* alloc_counters(struct ipt_table
*table
)
805 unsigned int countersize
;
806 struct xt_counters
*counters
;
807 struct xt_table_info
*private = table
->private;
809 /* We need atomic snapshot of counters: rest doesn't change
810 (other than comefrom, which userspace doesn't care
812 countersize
= sizeof(struct xt_counters
) * private->number
;
813 counters
= vmalloc_node(countersize
, numa_node_id());
815 if (counters
== NULL
)
816 return ERR_PTR(-ENOMEM
);
818 /* First, sum counters... */
819 write_lock_bh(&table
->lock
);
820 get_counters(private, counters
);
821 write_unlock_bh(&table
->lock
);
827 copy_entries_to_user(unsigned int total_size
,
828 struct ipt_table
*table
,
829 void __user
*userptr
)
831 unsigned int off
, num
;
833 struct xt_counters
*counters
;
834 struct xt_table_info
*private = table
->private;
838 counters
= alloc_counters(table
);
839 if (IS_ERR(counters
))
840 return PTR_ERR(counters
);
842 /* choose the copy that is on our node/cpu, ...
843 * This choice is lazy (because current thread is
844 * allowed to migrate to another cpu)
846 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
847 /* ... then copy entire thing ... */
848 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
853 /* FIXME: use iterator macros --RR */
854 /* ... then go back and fix counters and names */
855 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
857 struct ipt_entry_match
*m
;
858 struct ipt_entry_target
*t
;
860 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
861 if (copy_to_user(userptr
+ off
862 + offsetof(struct ipt_entry
, counters
),
864 sizeof(counters
[num
])) != 0) {
869 for (i
= sizeof(struct ipt_entry
);
870 i
< e
->target_offset
;
871 i
+= m
->u
.match_size
) {
874 if (copy_to_user(userptr
+ off
+ i
875 + offsetof(struct ipt_entry_match
,
877 m
->u
.kernel
.match
->name
,
878 strlen(m
->u
.kernel
.match
->name
)+1)
885 t
= ipt_get_target(e
);
886 if (copy_to_user(userptr
+ off
+ e
->target_offset
887 + offsetof(struct ipt_entry_target
,
889 t
->u
.kernel
.target
->name
,
890 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
902 struct compat_delta
{
903 struct compat_delta
*next
;
908 static struct compat_delta
*compat_offsets
= NULL
;
910 static int compat_add_offset(u_int16_t offset
, short delta
)
912 struct compat_delta
*tmp
;
914 tmp
= kmalloc(sizeof(struct compat_delta
), GFP_KERNEL
);
917 tmp
->offset
= offset
;
919 if (compat_offsets
) {
920 tmp
->next
= compat_offsets
->next
;
921 compat_offsets
->next
= tmp
;
923 compat_offsets
= tmp
;
929 static void compat_flush_offsets(void)
931 struct compat_delta
*tmp
, *next
;
933 if (compat_offsets
) {
934 for(tmp
= compat_offsets
; tmp
; tmp
= next
) {
938 compat_offsets
= NULL
;
942 static short compat_calc_jump(u_int16_t offset
)
944 struct compat_delta
*tmp
;
947 for(tmp
= compat_offsets
, delta
= 0; tmp
; tmp
= tmp
->next
)
948 if (tmp
->offset
< offset
)
953 struct compat_ipt_standard_target
955 struct compat_xt_entry_target target
;
956 compat_int_t verdict
;
959 struct compat_ipt_standard
961 struct compat_ipt_entry entry
;
962 struct compat_ipt_standard_target target
;
965 #define IPT_ST_LEN XT_ALIGN(sizeof(struct ipt_standard_target))
966 #define IPT_ST_COMPAT_LEN COMPAT_XT_ALIGN(sizeof(struct compat_ipt_standard_target))
967 #define IPT_ST_OFFSET (IPT_ST_LEN - IPT_ST_COMPAT_LEN)
969 static int compat_ipt_standard_fn(void *target
,
970 void **dstptr
, int *size
, int convert
)
972 struct compat_ipt_standard_target compat_st
, *pcompat_st
;
973 struct ipt_standard_target st
, *pst
;
980 memcpy(&compat_st
.target
, &pst
->target
,
981 sizeof(compat_st
.target
));
982 compat_st
.verdict
= pst
->verdict
;
983 if (compat_st
.verdict
> 0)
985 compat_calc_jump(compat_st
.verdict
);
986 compat_st
.target
.u
.user
.target_size
= IPT_ST_COMPAT_LEN
;
987 if (copy_to_user(*dstptr
, &compat_st
, IPT_ST_COMPAT_LEN
))
989 *size
-= IPT_ST_OFFSET
;
990 *dstptr
+= IPT_ST_COMPAT_LEN
;
992 case COMPAT_FROM_USER
:
994 memcpy(&st
.target
, &pcompat_st
->target
, IPT_ST_COMPAT_LEN
);
995 st
.verdict
= pcompat_st
->verdict
;
997 st
.verdict
+= compat_calc_jump(st
.verdict
);
998 st
.target
.u
.user
.target_size
= IPT_ST_LEN
;
999 memcpy(*dstptr
, &st
, IPT_ST_LEN
);
1000 *size
+= IPT_ST_OFFSET
;
1001 *dstptr
+= IPT_ST_LEN
;
1003 case COMPAT_CALC_SIZE
:
1004 *size
+= IPT_ST_OFFSET
;
1014 compat_calc_match(struct ipt_entry_match
*m
, int * size
)
1016 if (m
->u
.kernel
.match
->compat
)
1017 m
->u
.kernel
.match
->compat(m
, NULL
, size
, COMPAT_CALC_SIZE
);
1019 xt_compat_match(m
, NULL
, size
, COMPAT_CALC_SIZE
);
1023 static int compat_calc_entry(struct ipt_entry
*e
, struct xt_table_info
*info
,
1024 void *base
, struct xt_table_info
*newinfo
)
1026 struct ipt_entry_target
*t
;
1027 u_int16_t entry_offset
;
1031 entry_offset
= (void *)e
- base
;
1032 IPT_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1033 t
= ipt_get_target(e
);
1034 if (t
->u
.kernel
.target
->compat
)
1035 t
->u
.kernel
.target
->compat(t
, NULL
, &off
, COMPAT_CALC_SIZE
);
1037 xt_compat_target(t
, NULL
, &off
, COMPAT_CALC_SIZE
);
1038 newinfo
->size
-= off
;
1039 ret
= compat_add_offset(entry_offset
, off
);
1043 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1044 if (info
->hook_entry
[i
] && (e
< (struct ipt_entry
*)
1045 (base
+ info
->hook_entry
[i
])))
1046 newinfo
->hook_entry
[i
] -= off
;
1047 if (info
->underflow
[i
] && (e
< (struct ipt_entry
*)
1048 (base
+ info
->underflow
[i
])))
1049 newinfo
->underflow
[i
] -= off
;
1054 static int compat_table_info(struct xt_table_info
*info
,
1055 struct xt_table_info
*newinfo
)
1057 void *loc_cpu_entry
;
1060 if (!newinfo
|| !info
)
1063 memset(newinfo
, 0, sizeof(struct xt_table_info
));
1064 newinfo
->size
= info
->size
;
1065 newinfo
->number
= info
->number
;
1066 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1067 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1068 newinfo
->underflow
[i
] = info
->underflow
[i
];
1070 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1071 return IPT_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
1072 compat_calc_entry
, info
, loc_cpu_entry
, newinfo
);
1076 static int get_info(void __user
*user
, int *len
, int compat
)
1078 char name
[IPT_TABLE_MAXNAMELEN
];
1079 struct ipt_table
*t
;
1082 if (*len
!= sizeof(struct ipt_getinfo
)) {
1083 duprintf("length %u != %u\n", *len
,
1084 (unsigned int)sizeof(struct ipt_getinfo
));
1088 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1091 name
[IPT_TABLE_MAXNAMELEN
-1] = '\0';
1092 #ifdef CONFIG_COMPAT
1094 xt_compat_lock(AF_INET
);
1096 t
= try_then_request_module(xt_find_table_lock(AF_INET
, name
),
1097 "iptable_%s", name
);
1098 if (t
&& !IS_ERR(t
)) {
1099 struct ipt_getinfo info
;
1100 struct xt_table_info
*private = t
->private;
1102 #ifdef CONFIG_COMPAT
1104 struct xt_table_info tmp
;
1105 ret
= compat_table_info(private, &tmp
);
1106 compat_flush_offsets();
1110 info
.valid_hooks
= t
->valid_hooks
;
1111 memcpy(info
.hook_entry
, private->hook_entry
,
1112 sizeof(info
.hook_entry
));
1113 memcpy(info
.underflow
, private->underflow
,
1114 sizeof(info
.underflow
));
1115 info
.num_entries
= private->number
;
1116 info
.size
= private->size
;
1117 strcpy(info
.name
, name
);
1119 if (copy_to_user(user
, &info
, *len
) != 0)
1127 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1128 #ifdef CONFIG_COMPAT
1130 xt_compat_unlock(AF_INET
);
1136 get_entries(struct ipt_get_entries __user
*uptr
, int *len
)
1139 struct ipt_get_entries get
;
1140 struct ipt_table
*t
;
1142 if (*len
< sizeof(get
)) {
1143 duprintf("get_entries: %u < %d\n", *len
,
1144 (unsigned int)sizeof(get
));
1147 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1149 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1150 duprintf("get_entries: %u != %u\n", *len
,
1151 (unsigned int)(sizeof(struct ipt_get_entries
) +
1156 t
= xt_find_table_lock(AF_INET
, get
.name
);
1157 if (t
&& !IS_ERR(t
)) {
1158 struct xt_table_info
*private = t
->private;
1159 duprintf("t->private->number = %u\n",
1161 if (get
.size
== private->size
)
1162 ret
= copy_entries_to_user(private->size
,
1163 t
, uptr
->entrytable
);
1165 duprintf("get_entries: I've got %u not %u!\n",
1173 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1179 __do_replace(const char *name
, unsigned int valid_hooks
,
1180 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1181 void __user
*counters_ptr
)
1184 struct ipt_table
*t
;
1185 struct xt_table_info
*oldinfo
;
1186 struct xt_counters
*counters
;
1187 void *loc_cpu_old_entry
;
1190 counters
= vmalloc(num_counters
* sizeof(struct xt_counters
));
1196 t
= try_then_request_module(xt_find_table_lock(AF_INET
, name
),
1197 "iptable_%s", name
);
1198 if (!t
|| IS_ERR(t
)) {
1199 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1200 goto free_newinfo_counters_untrans
;
1204 if (valid_hooks
!= t
->valid_hooks
) {
1205 duprintf("Valid hook crap: %08X vs %08X\n",
1206 valid_hooks
, t
->valid_hooks
);
1211 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1215 /* Update module usage count based on number of rules */
1216 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1217 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1218 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1219 (newinfo
->number
<= oldinfo
->initial_entries
))
1221 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1222 (newinfo
->number
<= oldinfo
->initial_entries
))
1225 /* Get the old counters. */
1226 get_counters(oldinfo
, counters
);
1227 /* Decrease module usage counts and free resource */
1228 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1229 IPT_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,NULL
);
1230 xt_free_table_info(oldinfo
);
1231 if (copy_to_user(counters_ptr
, counters
,
1232 sizeof(struct xt_counters
) * num_counters
) != 0)
1241 free_newinfo_counters_untrans
:
1248 do_replace(void __user
*user
, unsigned int len
)
1251 struct ipt_replace tmp
;
1252 struct xt_table_info
*newinfo
;
1253 void *loc_cpu_entry
;
1255 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1258 /* Hack: Causes ipchains to give correct error msg --RR */
1259 if (len
!= sizeof(tmp
) + tmp
.size
)
1260 return -ENOPROTOOPT
;
1262 /* overflow check */
1263 if (tmp
.size
>= (INT_MAX
- sizeof(struct xt_table_info
)) / NR_CPUS
-
1266 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1269 newinfo
= xt_alloc_table_info(tmp
.size
);
1273 /* choose the copy that is our node/cpu */
1274 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1275 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1281 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1282 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1283 tmp
.hook_entry
, tmp
.underflow
);
1287 duprintf("ip_tables: Translated table\n");
1289 ret
= __do_replace(tmp
.name
, tmp
.valid_hooks
,
1290 newinfo
, tmp
.num_counters
,
1293 goto free_newinfo_untrans
;
1296 free_newinfo_untrans
:
1297 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
,NULL
);
1299 xt_free_table_info(newinfo
);
1303 /* We're lazy, and add to the first CPU; overflow works its fey magic
1304 * and everything is OK. */
1306 add_counter_to_entry(struct ipt_entry
*e
,
1307 const struct xt_counters addme
[],
1311 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1313 (long unsigned int)e
->counters
.pcnt
,
1314 (long unsigned int)e
->counters
.bcnt
,
1315 (long unsigned int)addme
[*i
].pcnt
,
1316 (long unsigned int)addme
[*i
].bcnt
);
1319 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1326 do_add_counters(void __user
*user
, unsigned int len
, int compat
)
1329 struct xt_counters_info tmp
;
1330 struct xt_counters
*paddc
;
1331 unsigned int num_counters
;
1335 struct ipt_table
*t
;
1336 struct xt_table_info
*private;
1338 void *loc_cpu_entry
;
1339 #ifdef CONFIG_COMPAT
1340 struct compat_xt_counters_info compat_tmp
;
1344 size
= sizeof(struct compat_xt_counters_info
);
1349 size
= sizeof(struct xt_counters_info
);
1352 if (copy_from_user(ptmp
, user
, size
) != 0)
1355 #ifdef CONFIG_COMPAT
1357 num_counters
= compat_tmp
.num_counters
;
1358 name
= compat_tmp
.name
;
1362 num_counters
= tmp
.num_counters
;
1366 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1369 paddc
= vmalloc_node(len
- size
, numa_node_id());
1373 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1378 t
= xt_find_table_lock(AF_INET
, name
);
1379 if (!t
|| IS_ERR(t
)) {
1380 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1384 write_lock_bh(&t
->lock
);
1385 private = t
->private;
1386 if (private->number
!= num_counters
) {
1388 goto unlock_up_free
;
1392 /* Choose the copy that is on our node */
1393 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1394 IPT_ENTRY_ITERATE(loc_cpu_entry
,
1396 add_counter_to_entry
,
1400 write_unlock_bh(&t
->lock
);
1409 #ifdef CONFIG_COMPAT
1410 struct compat_ipt_replace
{
1411 char name
[IPT_TABLE_MAXNAMELEN
];
1415 u32 hook_entry
[NF_IP_NUMHOOKS
];
1416 u32 underflow
[NF_IP_NUMHOOKS
];
1418 compat_uptr_t counters
; /* struct ipt_counters * */
1419 struct compat_ipt_entry entries
[0];
1422 static inline int compat_copy_match_to_user(struct ipt_entry_match
*m
,
1423 void __user
**dstptr
, compat_uint_t
*size
)
1425 if (m
->u
.kernel
.match
->compat
)
1426 return m
->u
.kernel
.match
->compat(m
, dstptr
, size
,
1429 return xt_compat_match(m
, dstptr
, size
, COMPAT_TO_USER
);
1432 static int compat_copy_entry_to_user(struct ipt_entry
*e
,
1433 void __user
**dstptr
, compat_uint_t
*size
)
1435 struct ipt_entry_target __user
*t
;
1436 struct compat_ipt_entry __user
*ce
;
1437 u_int16_t target_offset
, next_offset
;
1438 compat_uint_t origsize
;
1443 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1444 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)))
1447 *dstptr
+= sizeof(struct compat_ipt_entry
);
1448 ret
= IPT_MATCH_ITERATE(e
, compat_copy_match_to_user
, dstptr
, size
);
1449 target_offset
= e
->target_offset
- (origsize
- *size
);
1452 t
= ipt_get_target(e
);
1453 if (t
->u
.kernel
.target
->compat
)
1454 ret
= t
->u
.kernel
.target
->compat(t
, dstptr
, size
,
1457 ret
= xt_compat_target(t
, dstptr
, size
, COMPAT_TO_USER
);
1461 next_offset
= e
->next_offset
- (origsize
- *size
);
1462 if (put_user(target_offset
, &ce
->target_offset
))
1464 if (put_user(next_offset
, &ce
->next_offset
))
1472 compat_check_calc_match(struct ipt_entry_match
*m
,
1474 const struct ipt_ip
*ip
,
1475 unsigned int hookmask
,
1478 struct ipt_match
*match
;
1480 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
1481 m
->u
.user
.revision
),
1482 "ipt_%s", m
->u
.user
.name
);
1483 if (IS_ERR(match
) || !match
) {
1484 duprintf("compat_check_calc_match: `%s' not found\n",
1486 return match
? PTR_ERR(match
) : -ENOENT
;
1488 m
->u
.kernel
.match
= match
;
1490 if (m
->u
.kernel
.match
->compat
)
1491 m
->u
.kernel
.match
->compat(m
, NULL
, size
, COMPAT_CALC_SIZE
);
1493 xt_compat_match(m
, NULL
, size
, COMPAT_CALC_SIZE
);
1500 check_compat_entry_size_and_hooks(struct ipt_entry
*e
,
1501 struct xt_table_info
*newinfo
,
1503 unsigned char *base
,
1504 unsigned char *limit
,
1505 unsigned int *hook_entries
,
1506 unsigned int *underflows
,
1510 struct ipt_entry_target
*t
;
1511 struct ipt_target
*target
;
1512 u_int16_t entry_offset
;
1515 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1516 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0
1517 || (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1518 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1522 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1523 sizeof(struct compat_xt_entry_target
)) {
1524 duprintf("checking: element %p size %u\n",
1529 if (!ip_checkentry(&e
->ip
)) {
1530 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
1535 entry_offset
= (void *)e
- (void *)base
;
1537 ret
= IPT_MATCH_ITERATE(e
, compat_check_calc_match
, name
, &e
->ip
,
1538 e
->comefrom
, &off
, &j
);
1542 t
= ipt_get_target(e
);
1543 target
= try_then_request_module(xt_find_target(AF_INET
,
1545 t
->u
.user
.revision
),
1546 "ipt_%s", t
->u
.user
.name
);
1547 if (IS_ERR(target
) || !target
) {
1548 duprintf("check_entry: `%s' not found\n", t
->u
.user
.name
);
1549 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1552 t
->u
.kernel
.target
= target
;
1554 if (t
->u
.kernel
.target
->compat
)
1555 t
->u
.kernel
.target
->compat(t
, NULL
, &off
, COMPAT_CALC_SIZE
);
1557 xt_compat_target(t
, NULL
, &off
, COMPAT_CALC_SIZE
);
1559 ret
= compat_add_offset(entry_offset
, off
);
1563 /* Check hooks & underflows */
1564 for (h
= 0; h
< NF_IP_NUMHOOKS
; h
++) {
1565 if ((unsigned char *)e
- base
== hook_entries
[h
])
1566 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1567 if ((unsigned char *)e
- base
== underflows
[h
])
1568 newinfo
->underflow
[h
] = underflows
[h
];
1571 /* Clear counters and comefrom */
1572 e
->counters
= ((struct ipt_counters
) { 0, 0 });
1578 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
1582 static inline int compat_copy_match_from_user(struct ipt_entry_match
*m
,
1583 void **dstptr
, compat_uint_t
*size
, const char *name
,
1584 const struct ipt_ip
*ip
, unsigned int hookmask
)
1586 struct ipt_entry_match
*dm
;
1587 struct ipt_match
*match
;
1590 dm
= (struct ipt_entry_match
*)*dstptr
;
1591 match
= m
->u
.kernel
.match
;
1593 match
->compat(m
, dstptr
, size
, COMPAT_FROM_USER
);
1595 xt_compat_match(m
, dstptr
, size
, COMPAT_FROM_USER
);
1597 ret
= xt_check_match(match
, AF_INET
, dm
->u
.match_size
- sizeof(*dm
),
1598 name
, hookmask
, ip
->proto
,
1599 ip
->invflags
& IPT_INV_PROTO
);
1603 if (m
->u
.kernel
.match
->checkentry
1604 && !m
->u
.kernel
.match
->checkentry(name
, ip
, match
, dm
->data
,
1605 dm
->u
.match_size
- sizeof(*dm
),
1607 duprintf("ip_tables: check failed for `%s'.\n",
1608 m
->u
.kernel
.match
->name
);
1614 static int compat_copy_entry_from_user(struct ipt_entry
*e
, void **dstptr
,
1615 unsigned int *size
, const char *name
,
1616 struct xt_table_info
*newinfo
, unsigned char *base
)
1618 struct ipt_entry_target
*t
;
1619 struct ipt_target
*target
;
1620 struct ipt_entry
*de
;
1621 unsigned int origsize
;
1626 de
= (struct ipt_entry
*)*dstptr
;
1627 memcpy(de
, e
, sizeof(struct ipt_entry
));
1629 *dstptr
+= sizeof(struct compat_ipt_entry
);
1630 ret
= IPT_MATCH_ITERATE(e
, compat_copy_match_from_user
, dstptr
, size
,
1631 name
, &de
->ip
, de
->comefrom
);
1634 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1635 t
= ipt_get_target(e
);
1636 target
= t
->u
.kernel
.target
;
1638 target
->compat(t
, dstptr
, size
, COMPAT_FROM_USER
);
1640 xt_compat_target(t
, dstptr
, size
, COMPAT_FROM_USER
);
1642 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1643 for (h
= 0; h
< NF_IP_NUMHOOKS
; h
++) {
1644 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1645 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1646 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1647 newinfo
->underflow
[h
] -= origsize
- *size
;
1650 t
= ipt_get_target(de
);
1651 target
= t
->u
.kernel
.target
;
1652 ret
= xt_check_target(target
, AF_INET
, t
->u
.target_size
- sizeof(*t
),
1653 name
, e
->comefrom
, e
->ip
.proto
,
1654 e
->ip
.invflags
& IPT_INV_PROTO
);
1659 if (t
->u
.kernel
.target
== &ipt_standard_target
) {
1660 if (!standard_check(t
, *size
))
1662 } else if (t
->u
.kernel
.target
->checkentry
1663 && !t
->u
.kernel
.target
->checkentry(name
, de
, target
,
1664 t
->data
, t
->u
.target_size
- sizeof(*t
),
1666 duprintf("ip_tables: compat: check failed for `%s'.\n",
1667 t
->u
.kernel
.target
->name
);
1676 translate_compat_table(const char *name
,
1677 unsigned int valid_hooks
,
1678 struct xt_table_info
**pinfo
,
1680 unsigned int total_size
,
1681 unsigned int number
,
1682 unsigned int *hook_entries
,
1683 unsigned int *underflows
)
1686 struct xt_table_info
*newinfo
, *info
;
1687 void *pos
, *entry0
, *entry1
;
1694 info
->number
= number
;
1696 /* Init all hooks to impossible value. */
1697 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1698 info
->hook_entry
[i
] = 0xFFFFFFFF;
1699 info
->underflow
[i
] = 0xFFFFFFFF;
1702 duprintf("translate_compat_table: size %u\n", info
->size
);
1704 xt_compat_lock(AF_INET
);
1705 /* Walk through entries, checking offsets. */
1706 ret
= IPT_ENTRY_ITERATE(entry0
, total_size
,
1707 check_compat_entry_size_and_hooks
,
1708 info
, &size
, entry0
,
1709 entry0
+ total_size
,
1710 hook_entries
, underflows
, &i
, name
);
1716 duprintf("translate_compat_table: %u not %u entries\n",
1721 /* Check hooks all assigned */
1722 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1723 /* Only hooks which are valid */
1724 if (!(valid_hooks
& (1 << i
)))
1726 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1727 duprintf("Invalid hook entry %u %u\n",
1728 i
, hook_entries
[i
]);
1731 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1732 duprintf("Invalid underflow %u %u\n",
1739 newinfo
= xt_alloc_table_info(size
);
1743 newinfo
->number
= number
;
1744 for (i
= 0; i
< NF_IP_NUMHOOKS
; i
++) {
1745 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1746 newinfo
->underflow
[i
] = info
->underflow
[i
];
1748 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1751 ret
= IPT_ENTRY_ITERATE(entry0
, total_size
,
1752 compat_copy_entry_from_user
, &pos
, &size
,
1753 name
, newinfo
, entry1
);
1754 compat_flush_offsets();
1755 xt_compat_unlock(AF_INET
);
1760 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1763 /* And one copy for every other CPU */
1764 for_each_possible_cpu(i
)
1765 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1766 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1770 xt_free_table_info(info
);
1774 xt_free_table_info(newinfo
);
1778 xt_compat_unlock(AF_INET
);
1783 compat_do_replace(void __user
*user
, unsigned int len
)
1786 struct compat_ipt_replace tmp
;
1787 struct xt_table_info
*newinfo
;
1788 void *loc_cpu_entry
;
1790 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1793 /* Hack: Causes ipchains to give correct error msg --RR */
1794 if (len
!= sizeof(tmp
) + tmp
.size
)
1795 return -ENOPROTOOPT
;
1797 /* overflow check */
1798 if (tmp
.size
>= (INT_MAX
- sizeof(struct xt_table_info
)) / NR_CPUS
-
1801 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1804 newinfo
= xt_alloc_table_info(tmp
.size
);
1808 /* choose the copy that is our node/cpu */
1809 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1810 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1816 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1817 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1818 tmp
.num_entries
, tmp
.hook_entry
, tmp
.underflow
);
1822 duprintf("compat_do_replace: Translated table\n");
1824 ret
= __do_replace(tmp
.name
, tmp
.valid_hooks
,
1825 newinfo
, tmp
.num_counters
,
1826 compat_ptr(tmp
.counters
));
1828 goto free_newinfo_untrans
;
1831 free_newinfo_untrans
:
1832 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
,NULL
);
1834 xt_free_table_info(newinfo
);
1839 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1844 if (!capable(CAP_NET_ADMIN
))
1848 case IPT_SO_SET_REPLACE
:
1849 ret
= compat_do_replace(user
, len
);
1852 case IPT_SO_SET_ADD_COUNTERS
:
1853 ret
= do_add_counters(user
, len
, 1);
1857 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1864 struct compat_ipt_get_entries
1866 char name
[IPT_TABLE_MAXNAMELEN
];
1868 struct compat_ipt_entry entrytable
[0];
1871 static int compat_copy_entries_to_user(unsigned int total_size
,
1872 struct ipt_table
*table
, void __user
*userptr
)
1874 unsigned int off
, num
;
1875 struct compat_ipt_entry e
;
1876 struct xt_counters
*counters
;
1877 struct xt_table_info
*private = table
->private;
1881 void *loc_cpu_entry
;
1883 counters
= alloc_counters(table
);
1884 if (IS_ERR(counters
))
1885 return PTR_ERR(counters
);
1887 /* choose the copy that is on our node/cpu, ...
1888 * This choice is lazy (because current thread is
1889 * allowed to migrate to another cpu)
1891 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1894 ret
= IPT_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1895 compat_copy_entry_to_user
, &pos
, &size
);
1899 /* ... then go back and fix counters and names */
1900 for (off
= 0, num
= 0; off
< size
; off
+= e
.next_offset
, num
++) {
1902 struct ipt_entry_match m
;
1903 struct ipt_entry_target t
;
1906 if (copy_from_user(&e
, userptr
+ off
,
1907 sizeof(struct compat_ipt_entry
)))
1909 if (copy_to_user(userptr
+ off
+
1910 offsetof(struct compat_ipt_entry
, counters
),
1911 &counters
[num
], sizeof(counters
[num
])))
1914 for (i
= sizeof(struct compat_ipt_entry
);
1915 i
< e
.target_offset
; i
+= m
.u
.match_size
) {
1916 if (copy_from_user(&m
, userptr
+ off
+ i
,
1917 sizeof(struct ipt_entry_match
)))
1919 if (copy_to_user(userptr
+ off
+ i
+
1920 offsetof(struct ipt_entry_match
, u
.user
.name
),
1921 m
.u
.kernel
.match
->name
,
1922 strlen(m
.u
.kernel
.match
->name
) + 1))
1926 if (copy_from_user(&t
, userptr
+ off
+ e
.target_offset
,
1927 sizeof(struct ipt_entry_target
)))
1929 if (copy_to_user(userptr
+ off
+ e
.target_offset
+
1930 offsetof(struct ipt_entry_target
, u
.user
.name
),
1931 t
.u
.kernel
.target
->name
,
1932 strlen(t
.u
.kernel
.target
->name
) + 1))
1942 compat_get_entries(struct compat_ipt_get_entries __user
*uptr
, int *len
)
1945 struct compat_ipt_get_entries get
;
1946 struct ipt_table
*t
;
1949 if (*len
< sizeof(get
)) {
1950 duprintf("compat_get_entries: %u < %u\n",
1951 *len
, (unsigned int)sizeof(get
));
1955 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1958 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1959 duprintf("compat_get_entries: %u != %u\n", *len
,
1960 (unsigned int)(sizeof(struct compat_ipt_get_entries
) +
1965 xt_compat_lock(AF_INET
);
1966 t
= xt_find_table_lock(AF_INET
, get
.name
);
1967 if (t
&& !IS_ERR(t
)) {
1968 struct xt_table_info
*private = t
->private;
1969 struct xt_table_info info
;
1970 duprintf("t->private->number = %u\n",
1972 ret
= compat_table_info(private, &info
);
1973 if (!ret
&& get
.size
== info
.size
) {
1974 ret
= compat_copy_entries_to_user(private->size
,
1975 t
, uptr
->entrytable
);
1977 duprintf("compat_get_entries: I've got %u not %u!\n",
1982 compat_flush_offsets();
1986 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1988 xt_compat_unlock(AF_INET
);
1993 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1998 case IPT_SO_GET_INFO
:
1999 ret
= get_info(user
, len
, 1);
2001 case IPT_SO_GET_ENTRIES
:
2002 ret
= compat_get_entries(user
, len
);
2005 duprintf("compat_do_ipt_get_ctl: unknown request %i\n", cmd
);
2013 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2017 if (!capable(CAP_NET_ADMIN
))
2021 case IPT_SO_SET_REPLACE
:
2022 ret
= do_replace(user
, len
);
2025 case IPT_SO_SET_ADD_COUNTERS
:
2026 ret
= do_add_counters(user
, len
, 0);
2030 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2038 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2042 if (!capable(CAP_NET_ADMIN
))
2046 case IPT_SO_GET_INFO
:
2047 ret
= get_info(user
, len
, 0);
2050 case IPT_SO_GET_ENTRIES
:
2051 ret
= get_entries(user
, len
);
2054 case IPT_SO_GET_REVISION_MATCH
:
2055 case IPT_SO_GET_REVISION_TARGET
: {
2056 struct ipt_get_revision rev
;
2059 if (*len
!= sizeof(rev
)) {
2063 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2068 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2073 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2076 "ipt_%s", rev
.name
);
2081 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2088 int ipt_register_table(struct xt_table
*table
, const struct ipt_replace
*repl
)
2091 struct xt_table_info
*newinfo
;
2092 static struct xt_table_info bootstrap
2093 = { 0, 0, 0, { 0 }, { 0 }, { } };
2094 void *loc_cpu_entry
;
2096 newinfo
= xt_alloc_table_info(repl
->size
);
2100 /* choose the copy on our node/cpu
2101 * but dont care of preemption
2103 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2104 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2106 ret
= translate_table(table
->name
, table
->valid_hooks
,
2107 newinfo
, loc_cpu_entry
, repl
->size
,
2112 xt_free_table_info(newinfo
);
2116 ret
= xt_register_table(table
, &bootstrap
, newinfo
);
2118 xt_free_table_info(newinfo
);
2125 void ipt_unregister_table(struct ipt_table
*table
)
2127 struct xt_table_info
*private;
2128 void *loc_cpu_entry
;
2130 private = xt_unregister_table(table
);
2132 /* Decrease module usage counts and free resources */
2133 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2134 IPT_ENTRY_ITERATE(loc_cpu_entry
, private->size
, cleanup_entry
, NULL
);
2135 xt_free_table_info(private);
2138 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2140 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2141 u_int8_t type
, u_int8_t code
,
2144 return ((test_type
== 0xFF) || (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2149 icmp_match(const struct sk_buff
*skb
,
2150 const struct net_device
*in
,
2151 const struct net_device
*out
,
2152 const struct xt_match
*match
,
2153 const void *matchinfo
,
2155 unsigned int protoff
,
2158 struct icmphdr _icmph
, *ic
;
2159 const struct ipt_icmp
*icmpinfo
= matchinfo
;
2161 /* Must not be a fragment. */
2165 ic
= skb_header_pointer(skb
, protoff
, sizeof(_icmph
), &_icmph
);
2167 /* We've been asked to examine this packet, and we
2168 * can't. Hence, no choice but to drop.
2170 duprintf("Dropping evil ICMP tinygram.\n");
2175 return icmp_type_code_match(icmpinfo
->type
,
2179 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2182 /* Called when user tries to insert an entry of this type. */
2184 icmp_checkentry(const char *tablename
,
2186 const struct xt_match
*match
,
2188 unsigned int matchsize
,
2189 unsigned int hook_mask
)
2191 const struct ipt_icmp
*icmpinfo
= matchinfo
;
2193 /* Must specify no unknown invflags */
2194 return !(icmpinfo
->invflags
& ~IPT_ICMP_INV
);
2197 /* The built-in targets: standard (NULL) and error. */
2198 static struct ipt_target ipt_standard_target
= {
2199 .name
= IPT_STANDARD_TARGET
,
2200 .targetsize
= sizeof(int),
2202 #ifdef CONFIG_COMPAT
2203 .compat
= &compat_ipt_standard_fn
,
2207 static struct ipt_target ipt_error_target
= {
2208 .name
= IPT_ERROR_TARGET
,
2209 .target
= ipt_error
,
2210 .targetsize
= IPT_FUNCTION_MAXNAMELEN
,
2214 static struct nf_sockopt_ops ipt_sockopts
= {
2216 .set_optmin
= IPT_BASE_CTL
,
2217 .set_optmax
= IPT_SO_SET_MAX
+1,
2218 .set
= do_ipt_set_ctl
,
2219 #ifdef CONFIG_COMPAT
2220 .compat_set
= compat_do_ipt_set_ctl
,
2222 .get_optmin
= IPT_BASE_CTL
,
2223 .get_optmax
= IPT_SO_GET_MAX
+1,
2224 .get
= do_ipt_get_ctl
,
2225 #ifdef CONFIG_COMPAT
2226 .compat_get
= compat_do_ipt_get_ctl
,
2230 static struct ipt_match icmp_matchstruct
= {
2232 .match
= icmp_match
,
2233 .matchsize
= sizeof(struct ipt_icmp
),
2234 .proto
= IPPROTO_ICMP
,
2236 .checkentry
= icmp_checkentry
,
2239 static int __init
ip_tables_init(void)
2243 ret
= xt_proto_init(AF_INET
);
2247 /* Noone else will be downing sem now, so we won't sleep */
2248 ret
= xt_register_target(&ipt_standard_target
);
2251 ret
= xt_register_target(&ipt_error_target
);
2254 ret
= xt_register_match(&icmp_matchstruct
);
2258 /* Register setsockopt */
2259 ret
= nf_register_sockopt(&ipt_sockopts
);
2263 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2267 xt_unregister_match(&icmp_matchstruct
);
2269 xt_unregister_target(&ipt_error_target
);
2271 xt_unregister_target(&ipt_standard_target
);
2273 xt_proto_fini(AF_INET
);
2278 static void __exit
ip_tables_fini(void)
2280 nf_unregister_sockopt(&ipt_sockopts
);
2282 xt_unregister_match(&icmp_matchstruct
);
2283 xt_unregister_target(&ipt_error_target
);
2284 xt_unregister_target(&ipt_standard_target
);
2286 xt_proto_fini(AF_INET
);
2289 EXPORT_SYMBOL(ipt_register_table
);
2290 EXPORT_SYMBOL(ipt_unregister_table
);
2291 EXPORT_SYMBOL(ipt_do_table
);
2292 module_init(ip_tables_init
);
2293 module_exit(ip_tables_fini
);