2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h>
13 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/icmp.h>
21 #include <net/compat.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/err.h>
26 #include <linux/cpumask.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h>
31 #include "../../netfilter/xt_repldata.h"
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv4 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
70 void *ipt_alloc_initial_table(const struct xt_table
*info
)
72 return xt_alloc_initial_table(ipt
, IPT
);
74 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table
);
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
83 Hence the start of any table is given by get_table() below. */
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
88 ip_packet_match(const struct iphdr
*ip
,
91 const struct ipt_ip
*ipinfo
,
96 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
98 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
100 FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
102 dprintf("Source or dest mismatch.\n");
104 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
105 &ip
->saddr
, &ipinfo
->smsk
.s_addr
, &ipinfo
->src
.s_addr
,
106 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
107 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
108 &ip
->daddr
, &ipinfo
->dmsk
.s_addr
, &ipinfo
->dst
.s_addr
,
109 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
113 ret
= ifname_compare_aligned(indev
, ipinfo
->iniface
, ipinfo
->iniface_mask
);
115 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev
, ipinfo
->iniface
,
118 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
122 ret
= ifname_compare_aligned(outdev
, ipinfo
->outiface
, ipinfo
->outiface_mask
);
124 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
125 dprintf("VIA out mismatch (%s vs %s).%s\n",
126 outdev
, ipinfo
->outiface
,
127 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
131 /* Check specific protocol */
133 FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
134 dprintf("Packet protocol %hi does not match %hi.%s\n",
135 ip
->protocol
, ipinfo
->proto
,
136 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
140 /* If we have a fragment rule but the packet is not a fragment
141 * then we return zero */
142 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
143 dprintf("Fragment rule but not fragment.%s\n",
144 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
152 ip_checkentry(const struct ipt_ip
*ip
)
154 if (ip
->flags
& ~IPT_F_MASK
) {
155 duprintf("Unknown flag bits set: %08X\n",
156 ip
->flags
& ~IPT_F_MASK
);
159 if (ip
->invflags
& ~IPT_INV_MASK
) {
160 duprintf("Unknown invflag bits set: %08X\n",
161 ip
->invflags
& ~IPT_INV_MASK
);
168 ipt_error(struct sk_buff
*skb
, const struct xt_target_param
*par
)
171 printk("ip_tables: error: `%s'\n",
172 (const char *)par
->targinfo
);
177 /* Performance critical - called for every packet */
179 do_match(const struct ipt_entry_match
*m
, const struct sk_buff
*skb
,
180 struct xt_match_param
*par
)
182 par
->match
= m
->u
.kernel
.match
;
183 par
->matchinfo
= m
->data
;
185 /* Stop iteration if it doesn't match */
186 if (!m
->u
.kernel
.match
->match(skb
, par
))
192 /* Performance critical */
193 static inline struct ipt_entry
*
194 get_entry(const void *base
, unsigned int offset
)
196 return (struct ipt_entry
*)(base
+ offset
);
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
201 static inline bool unconditional(const struct ipt_ip
*ip
)
203 static const struct ipt_ip uncond
;
205 return memcmp(ip
, &uncond
, sizeof(uncond
)) == 0;
209 /* for const-correctness */
210 static inline const struct ipt_entry_target
*
211 ipt_get_target_c(const struct ipt_entry
*e
)
213 return ipt_get_target((struct ipt_entry
*)e
);
216 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
217 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
218 static const char *const hooknames
[] = {
219 [NF_INET_PRE_ROUTING
] = "PREROUTING",
220 [NF_INET_LOCAL_IN
] = "INPUT",
221 [NF_INET_FORWARD
] = "FORWARD",
222 [NF_INET_LOCAL_OUT
] = "OUTPUT",
223 [NF_INET_POST_ROUTING
] = "POSTROUTING",
226 enum nf_ip_trace_comments
{
227 NF_IP_TRACE_COMMENT_RULE
,
228 NF_IP_TRACE_COMMENT_RETURN
,
229 NF_IP_TRACE_COMMENT_POLICY
,
232 static const char *const comments
[] = {
233 [NF_IP_TRACE_COMMENT_RULE
] = "rule",
234 [NF_IP_TRACE_COMMENT_RETURN
] = "return",
235 [NF_IP_TRACE_COMMENT_POLICY
] = "policy",
238 static struct nf_loginfo trace_loginfo
= {
239 .type
= NF_LOG_TYPE_LOG
,
243 .logflags
= NF_LOG_MASK
,
248 /* Mildly perf critical (only if packet tracing is on) */
250 get_chainname_rulenum(const struct ipt_entry
*s
, const struct ipt_entry
*e
,
251 const char *hookname
, const char **chainname
,
252 const char **comment
, unsigned int *rulenum
)
254 const struct ipt_standard_target
*t
= (void *)ipt_get_target_c(s
);
256 if (strcmp(t
->target
.u
.kernel
.target
->name
, IPT_ERROR_TARGET
) == 0) {
257 /* Head of user chain: ERROR target with chainname */
258 *chainname
= t
->target
.data
;
263 if (s
->target_offset
== sizeof(struct ipt_entry
) &&
264 strcmp(t
->target
.u
.kernel
.target
->name
,
265 IPT_STANDARD_TARGET
) == 0 &&
267 unconditional(&s
->ip
)) {
268 /* Tail of chains: STANDARD target (return/policy) */
269 *comment
= *chainname
== hookname
270 ? comments
[NF_IP_TRACE_COMMENT_POLICY
]
271 : comments
[NF_IP_TRACE_COMMENT_RETURN
];
280 static void trace_packet(const struct sk_buff
*skb
,
282 const struct net_device
*in
,
283 const struct net_device
*out
,
284 const char *tablename
,
285 const struct xt_table_info
*private,
286 const struct ipt_entry
*e
)
288 const void *table_base
;
289 const struct ipt_entry
*root
;
290 const char *hookname
, *chainname
, *comment
;
291 const struct ipt_entry
*iter
;
292 unsigned int rulenum
= 0;
294 table_base
= private->entries
[smp_processor_id()];
295 root
= get_entry(table_base
, private->hook_entry
[hook
]);
297 hookname
= chainname
= hooknames
[hook
];
298 comment
= comments
[NF_IP_TRACE_COMMENT_RULE
];
300 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
301 if (get_chainname_rulenum(iter
, e
, hookname
,
302 &chainname
, &comment
, &rulenum
) != 0)
305 nf_log_packet(AF_INET
, hook
, skb
, in
, out
, &trace_loginfo
,
306 "TRACE: %s:%s:%s:%u ",
307 tablename
, chainname
, comment
, rulenum
);
312 struct ipt_entry
*ipt_next_entry(const struct ipt_entry
*entry
)
314 return (void *)entry
+ entry
->next_offset
;
317 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
319 ipt_do_table(struct sk_buff
*skb
,
321 const struct net_device
*in
,
322 const struct net_device
*out
,
323 struct xt_table
*table
)
325 #define tb_comefrom ((struct ipt_entry *)table_base)->comefrom
327 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
328 const struct iphdr
*ip
;
329 bool hotdrop
= false;
330 /* Initializing verdict to NF_DROP keeps gcc happy. */
331 unsigned int verdict
= NF_DROP
;
332 const char *indev
, *outdev
;
333 const void *table_base
;
334 struct ipt_entry
*e
, *back
;
335 const struct xt_table_info
*private;
336 struct xt_match_param mtpar
;
337 struct xt_target_param tgpar
;
341 indev
= in
? in
->name
: nulldevname
;
342 outdev
= out
? out
->name
: nulldevname
;
343 /* We handle fragments by dealing with the first fragment as
344 * if it was a normal packet. All other fragments are treated
345 * normally, except that they will NEVER match rules that ask
346 * things we don't know, ie. tcp syn flag or ports). If the
347 * rule is also a fragment-specific rule, non-fragments won't
349 mtpar
.fragoff
= ntohs(ip
->frag_off
) & IP_OFFSET
;
350 mtpar
.thoff
= ip_hdrlen(skb
);
351 mtpar
.hotdrop
= &hotdrop
;
352 mtpar
.in
= tgpar
.in
= in
;
353 mtpar
.out
= tgpar
.out
= out
;
354 mtpar
.family
= tgpar
.family
= NFPROTO_IPV4
;
355 mtpar
.hooknum
= tgpar
.hooknum
= hook
;
357 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
359 private = table
->private;
360 table_base
= private->entries
[smp_processor_id()];
362 e
= get_entry(table_base
, private->hook_entry
[hook
]);
364 /* For return from builtin chain */
365 back
= get_entry(table_base
, private->underflow
[hook
]);
368 const struct ipt_entry_target
*t
;
369 const struct xt_entry_match
*ematch
;
373 if (!ip_packet_match(ip
, indev
, outdev
,
374 &e
->ip
, mtpar
.fragoff
)) {
376 e
= ipt_next_entry(e
);
380 xt_ematch_foreach(ematch
, e
)
381 if (do_match(ematch
, skb
, &mtpar
) != 0)
384 ADD_COUNTER(e
->counters
, ntohs(ip
->tot_len
), 1);
386 t
= ipt_get_target(e
);
387 IP_NF_ASSERT(t
->u
.kernel
.target
);
389 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
390 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
391 /* The packet is traced: log it */
392 if (unlikely(skb
->nf_trace
))
393 trace_packet(skb
, hook
, in
, out
,
394 table
->name
, private, e
);
396 /* Standard target? */
397 if (!t
->u
.kernel
.target
->target
) {
400 v
= ((struct ipt_standard_target
*)t
)->verdict
;
402 /* Pop from stack? */
403 if (v
!= IPT_RETURN
) {
404 verdict
= (unsigned)(-v
) - 1;
408 back
= get_entry(table_base
, back
->comefrom
);
411 if (table_base
+ v
!= ipt_next_entry(e
) &&
412 !(e
->ip
.flags
& IPT_F_GOTO
)) {
413 /* Save old back ptr in next entry */
414 struct ipt_entry
*next
= ipt_next_entry(e
);
415 next
->comefrom
= (void *)back
- table_base
;
416 /* set back pointer to next entry */
420 e
= get_entry(table_base
, v
);
424 /* Targets which reenter must return
426 tgpar
.target
= t
->u
.kernel
.target
;
427 tgpar
.targinfo
= t
->data
;
430 #ifdef CONFIG_NETFILTER_DEBUG
431 tb_comefrom
= 0xeeeeeeec;
433 verdict
= t
->u
.kernel
.target
->target(skb
, &tgpar
);
434 #ifdef CONFIG_NETFILTER_DEBUG
435 if (tb_comefrom
!= 0xeeeeeeec && verdict
== IPT_CONTINUE
) {
436 printk("Target %s reentered!\n",
437 t
->u
.kernel
.target
->name
);
440 tb_comefrom
= 0x57acc001;
442 /* Target might have changed stuff. */
444 if (verdict
== IPT_CONTINUE
)
445 e
= ipt_next_entry(e
);
450 xt_info_rdunlock_bh();
452 #ifdef DEBUG_ALLOW_ALL
463 /* Figures out from what hook each rule can be called: returns 0 if
464 there are loops. Puts hook bitmask in comefrom. */
466 mark_source_chains(const struct xt_table_info
*newinfo
,
467 unsigned int valid_hooks
, void *entry0
)
471 /* No recursion; use packet counter to save back ptrs (reset
472 to 0 as we leave), and comefrom to save source hook bitmask */
473 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
474 unsigned int pos
= newinfo
->hook_entry
[hook
];
475 struct ipt_entry
*e
= (struct ipt_entry
*)(entry0
+ pos
);
477 if (!(valid_hooks
& (1 << hook
)))
480 /* Set initial back pointer. */
481 e
->counters
.pcnt
= pos
;
484 const struct ipt_standard_target
*t
485 = (void *)ipt_get_target_c(e
);
486 int visited
= e
->comefrom
& (1 << hook
);
488 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
489 printk("iptables: loop hook %u pos %u %08X.\n",
490 hook
, pos
, e
->comefrom
);
493 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
495 /* Unconditional return/END. */
496 if ((e
->target_offset
== sizeof(struct ipt_entry
) &&
497 (strcmp(t
->target
.u
.user
.name
,
498 IPT_STANDARD_TARGET
) == 0) &&
499 t
->verdict
< 0 && unconditional(&e
->ip
)) ||
501 unsigned int oldpos
, size
;
503 if ((strcmp(t
->target
.u
.user
.name
,
504 IPT_STANDARD_TARGET
) == 0) &&
505 t
->verdict
< -NF_MAX_VERDICT
- 1) {
506 duprintf("mark_source_chains: bad "
507 "negative verdict (%i)\n",
512 /* Return: backtrack through the last
515 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
516 #ifdef DEBUG_IP_FIREWALL_USER
518 & (1 << NF_INET_NUMHOOKS
)) {
519 duprintf("Back unset "
526 pos
= e
->counters
.pcnt
;
527 e
->counters
.pcnt
= 0;
529 /* We're at the start. */
533 e
= (struct ipt_entry
*)
535 } while (oldpos
== pos
+ e
->next_offset
);
538 size
= e
->next_offset
;
539 e
= (struct ipt_entry
*)
540 (entry0
+ pos
+ size
);
541 e
->counters
.pcnt
= pos
;
544 int newpos
= t
->verdict
;
546 if (strcmp(t
->target
.u
.user
.name
,
547 IPT_STANDARD_TARGET
) == 0 &&
549 if (newpos
> newinfo
->size
-
550 sizeof(struct ipt_entry
)) {
551 duprintf("mark_source_chains: "
552 "bad verdict (%i)\n",
556 /* This a jump; chase it. */
557 duprintf("Jump rule %u -> %u\n",
560 /* ... this is a fallthru */
561 newpos
= pos
+ e
->next_offset
;
563 e
= (struct ipt_entry
*)
565 e
->counters
.pcnt
= pos
;
570 duprintf("Finished chain %u\n", hook
);
575 static void cleanup_match(struct ipt_entry_match
*m
, struct net
*net
)
577 struct xt_mtdtor_param par
;
580 par
.match
= m
->u
.kernel
.match
;
581 par
.matchinfo
= m
->data
;
582 par
.family
= NFPROTO_IPV4
;
583 if (par
.match
->destroy
!= NULL
)
584 par
.match
->destroy(&par
);
585 module_put(par
.match
->me
);
589 check_entry(const struct ipt_entry
*e
, const char *name
)
591 const struct ipt_entry_target
*t
;
593 if (!ip_checkentry(&e
->ip
)) {
594 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
598 if (e
->target_offset
+ sizeof(struct ipt_entry_target
) >
602 t
= ipt_get_target_c(e
);
603 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
610 check_match(struct ipt_entry_match
*m
, struct xt_mtchk_param
*par
)
612 const struct ipt_ip
*ip
= par
->entryinfo
;
615 par
->match
= m
->u
.kernel
.match
;
616 par
->matchinfo
= m
->data
;
618 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
619 ip
->proto
, ip
->invflags
& IPT_INV_PROTO
);
621 duprintf("ip_tables: check failed for `%s'.\n",
629 find_check_match(struct ipt_entry_match
*m
, struct xt_mtchk_param
*par
)
631 struct xt_match
*match
;
634 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
636 "ipt_%s", m
->u
.user
.name
);
637 if (IS_ERR(match
) || !match
) {
638 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
639 return match
? PTR_ERR(match
) : -ENOENT
;
641 m
->u
.kernel
.match
= match
;
643 ret
= check_match(m
, par
);
649 module_put(m
->u
.kernel
.match
->me
);
653 static int check_target(struct ipt_entry
*e
, struct net
*net
, const char *name
)
655 struct ipt_entry_target
*t
= ipt_get_target(e
);
656 struct xt_tgchk_param par
= {
660 .target
= t
->u
.kernel
.target
,
662 .hook_mask
= e
->comefrom
,
663 .family
= NFPROTO_IPV4
,
667 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
668 e
->ip
.proto
, e
->ip
.invflags
& IPT_INV_PROTO
);
670 duprintf("ip_tables: check failed for `%s'.\n",
671 t
->u
.kernel
.target
->name
);
678 find_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
,
681 struct ipt_entry_target
*t
;
682 struct xt_target
*target
;
685 struct xt_mtchk_param mtpar
;
686 struct xt_entry_match
*ematch
;
688 ret
= check_entry(e
, name
);
695 mtpar
.entryinfo
= &e
->ip
;
696 mtpar
.hook_mask
= e
->comefrom
;
697 mtpar
.family
= NFPROTO_IPV4
;
698 xt_ematch_foreach(ematch
, e
) {
699 ret
= find_check_match(ematch
, &mtpar
);
701 goto cleanup_matches
;
705 t
= ipt_get_target(e
);
706 target
= try_then_request_module(xt_find_target(AF_INET
,
709 "ipt_%s", t
->u
.user
.name
);
710 if (IS_ERR(target
) || !target
) {
711 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
712 ret
= target
? PTR_ERR(target
) : -ENOENT
;
713 goto cleanup_matches
;
715 t
->u
.kernel
.target
= target
;
717 ret
= check_target(e
, net
, name
);
722 module_put(t
->u
.kernel
.target
->me
);
724 xt_ematch_foreach(ematch
, e
) {
727 cleanup_match(ematch
, net
);
732 static bool check_underflow(const struct ipt_entry
*e
)
734 const struct ipt_entry_target
*t
;
735 unsigned int verdict
;
737 if (!unconditional(&e
->ip
))
739 t
= ipt_get_target_c(e
);
740 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
742 verdict
= ((struct ipt_standard_target
*)t
)->verdict
;
743 verdict
= -verdict
- 1;
744 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
748 check_entry_size_and_hooks(struct ipt_entry
*e
,
749 struct xt_table_info
*newinfo
,
750 const unsigned char *base
,
751 const unsigned char *limit
,
752 const unsigned int *hook_entries
,
753 const unsigned int *underflows
,
754 unsigned int valid_hooks
)
758 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0 ||
759 (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
760 duprintf("Bad offset %p\n", e
);
765 < sizeof(struct ipt_entry
) + sizeof(struct ipt_entry_target
)) {
766 duprintf("checking: element %p size %u\n",
771 /* Check hooks & underflows */
772 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
773 if (!(valid_hooks
& (1 << h
)))
775 if ((unsigned char *)e
- base
== hook_entries
[h
])
776 newinfo
->hook_entry
[h
] = hook_entries
[h
];
777 if ((unsigned char *)e
- base
== underflows
[h
]) {
778 if (!check_underflow(e
)) {
779 pr_err("Underflows must be unconditional and "
780 "use the STANDARD target with "
784 newinfo
->underflow
[h
] = underflows
[h
];
788 /* Clear counters and comefrom */
789 e
->counters
= ((struct xt_counters
) { 0, 0 });
795 cleanup_entry(struct ipt_entry
*e
, struct net
*net
)
797 struct xt_tgdtor_param par
;
798 struct ipt_entry_target
*t
;
799 struct xt_entry_match
*ematch
;
801 /* Cleanup all matches */
802 xt_ematch_foreach(ematch
, e
)
803 cleanup_match(ematch
, net
);
804 t
= ipt_get_target(e
);
807 par
.target
= t
->u
.kernel
.target
;
808 par
.targinfo
= t
->data
;
809 par
.family
= NFPROTO_IPV4
;
810 if (par
.target
->destroy
!= NULL
)
811 par
.target
->destroy(&par
);
812 module_put(par
.target
->me
);
815 /* Checks and translates the user-supplied table segment (held in
818 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
819 const struct ipt_replace
*repl
)
821 struct ipt_entry
*iter
;
825 newinfo
->size
= repl
->size
;
826 newinfo
->number
= repl
->num_entries
;
828 /* Init all hooks to impossible value. */
829 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
830 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
831 newinfo
->underflow
[i
] = 0xFFFFFFFF;
834 duprintf("translate_table: size %u\n", newinfo
->size
);
836 /* Walk through entries, checking offsets. */
837 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
838 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
848 if (i
!= repl
->num_entries
) {
849 duprintf("translate_table: %u not %u entries\n",
850 i
, repl
->num_entries
);
854 /* Check hooks all assigned */
855 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
856 /* Only hooks which are valid */
857 if (!(repl
->valid_hooks
& (1 << i
)))
859 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
860 duprintf("Invalid hook entry %u %u\n",
861 i
, repl
->hook_entry
[i
]);
864 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
865 duprintf("Invalid underflow %u %u\n",
866 i
, repl
->underflow
[i
]);
871 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
874 /* Finally, each sanity check must pass */
876 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
877 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
884 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
887 cleanup_entry(iter
, net
);
892 /* And one copy for every other CPU */
893 for_each_possible_cpu(i
) {
894 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
895 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
902 get_counters(const struct xt_table_info
*t
,
903 struct xt_counters counters
[])
905 struct ipt_entry
*iter
;
910 /* Instead of clearing (by a previous call to memset())
911 * the counters and using adds, we set the counters
912 * with data used by 'current' CPU.
914 * Bottom half has to be disabled to prevent deadlock
915 * if new softirq were to run and call ipt_do_table
918 curcpu
= smp_processor_id();
921 xt_entry_foreach(iter
, t
->entries
[curcpu
], t
->size
) {
922 SET_COUNTER(counters
[i
], iter
->counters
.bcnt
,
923 iter
->counters
.pcnt
);
927 for_each_possible_cpu(cpu
) {
932 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
933 ADD_COUNTER(counters
[i
], iter
->counters
.bcnt
,
934 iter
->counters
.pcnt
);
935 ++i
; /* macro does multi eval of i */
937 xt_info_wrunlock(cpu
);
942 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
944 unsigned int countersize
;
945 struct xt_counters
*counters
;
946 const struct xt_table_info
*private = table
->private;
948 /* We need atomic snapshot of counters: rest doesn't change
949 (other than comefrom, which userspace doesn't care
951 countersize
= sizeof(struct xt_counters
) * private->number
;
952 counters
= vmalloc_node(countersize
, numa_node_id());
954 if (counters
== NULL
)
955 return ERR_PTR(-ENOMEM
);
957 get_counters(private, counters
);
963 copy_entries_to_user(unsigned int total_size
,
964 const struct xt_table
*table
,
965 void __user
*userptr
)
967 unsigned int off
, num
;
968 const struct ipt_entry
*e
;
969 struct xt_counters
*counters
;
970 const struct xt_table_info
*private = table
->private;
972 const void *loc_cpu_entry
;
974 counters
= alloc_counters(table
);
975 if (IS_ERR(counters
))
976 return PTR_ERR(counters
);
978 /* choose the copy that is on our node/cpu, ...
979 * This choice is lazy (because current thread is
980 * allowed to migrate to another cpu)
982 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
983 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
988 /* FIXME: use iterator macros --RR */
989 /* ... then go back and fix counters and names */
990 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
992 const struct ipt_entry_match
*m
;
993 const struct ipt_entry_target
*t
;
995 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
996 if (copy_to_user(userptr
+ off
997 + offsetof(struct ipt_entry
, counters
),
999 sizeof(counters
[num
])) != 0) {
1004 for (i
= sizeof(struct ipt_entry
);
1005 i
< e
->target_offset
;
1006 i
+= m
->u
.match_size
) {
1009 if (copy_to_user(userptr
+ off
+ i
1010 + offsetof(struct ipt_entry_match
,
1012 m
->u
.kernel
.match
->name
,
1013 strlen(m
->u
.kernel
.match
->name
)+1)
1020 t
= ipt_get_target_c(e
);
1021 if (copy_to_user(userptr
+ off
+ e
->target_offset
1022 + offsetof(struct ipt_entry_target
,
1024 t
->u
.kernel
.target
->name
,
1025 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1036 #ifdef CONFIG_COMPAT
1037 static void compat_standard_from_user(void *dst
, const void *src
)
1039 int v
= *(compat_int_t
*)src
;
1042 v
+= xt_compat_calc_jump(AF_INET
, v
);
1043 memcpy(dst
, &v
, sizeof(v
));
1046 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1048 compat_int_t cv
= *(int *)src
;
1051 cv
-= xt_compat_calc_jump(AF_INET
, cv
);
1052 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1055 static int compat_calc_entry(const struct ipt_entry
*e
,
1056 const struct xt_table_info
*info
,
1057 const void *base
, struct xt_table_info
*newinfo
)
1059 const struct xt_entry_match
*ematch
;
1060 const struct ipt_entry_target
*t
;
1061 unsigned int entry_offset
;
1064 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1065 entry_offset
= (void *)e
- base
;
1066 xt_ematch_foreach(ematch
, e
)
1067 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1068 t
= ipt_get_target_c(e
);
1069 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1070 newinfo
->size
-= off
;
1071 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1075 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1076 if (info
->hook_entry
[i
] &&
1077 (e
< (struct ipt_entry
*)(base
+ info
->hook_entry
[i
])))
1078 newinfo
->hook_entry
[i
] -= off
;
1079 if (info
->underflow
[i
] &&
1080 (e
< (struct ipt_entry
*)(base
+ info
->underflow
[i
])))
1081 newinfo
->underflow
[i
] -= off
;
1086 static int compat_table_info(const struct xt_table_info
*info
,
1087 struct xt_table_info
*newinfo
)
1089 struct ipt_entry
*iter
;
1090 void *loc_cpu_entry
;
1093 if (!newinfo
|| !info
)
1096 /* we dont care about newinfo->entries[] */
1097 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1098 newinfo
->initial_entries
= 0;
1099 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1100 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1101 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1109 static int get_info(struct net
*net
, void __user
*user
,
1110 const int *len
, int compat
)
1112 char name
[IPT_TABLE_MAXNAMELEN
];
1116 if (*len
!= sizeof(struct ipt_getinfo
)) {
1117 duprintf("length %u != %zu\n", *len
,
1118 sizeof(struct ipt_getinfo
));
1122 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1125 name
[IPT_TABLE_MAXNAMELEN
-1] = '\0';
1126 #ifdef CONFIG_COMPAT
1128 xt_compat_lock(AF_INET
);
1130 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1131 "iptable_%s", name
);
1132 if (t
&& !IS_ERR(t
)) {
1133 struct ipt_getinfo info
;
1134 const struct xt_table_info
*private = t
->private;
1135 #ifdef CONFIG_COMPAT
1136 struct xt_table_info tmp
;
1139 ret
= compat_table_info(private, &tmp
);
1140 xt_compat_flush_offsets(AF_INET
);
1144 info
.valid_hooks
= t
->valid_hooks
;
1145 memcpy(info
.hook_entry
, private->hook_entry
,
1146 sizeof(info
.hook_entry
));
1147 memcpy(info
.underflow
, private->underflow
,
1148 sizeof(info
.underflow
));
1149 info
.num_entries
= private->number
;
1150 info
.size
= private->size
;
1151 strcpy(info
.name
, name
);
1153 if (copy_to_user(user
, &info
, *len
) != 0)
1161 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1162 #ifdef CONFIG_COMPAT
1164 xt_compat_unlock(AF_INET
);
1170 get_entries(struct net
*net
, struct ipt_get_entries __user
*uptr
,
1174 struct ipt_get_entries get
;
1177 if (*len
< sizeof(get
)) {
1178 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1181 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1183 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1184 duprintf("get_entries: %u != %zu\n",
1185 *len
, sizeof(get
) + get
.size
);
1189 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1190 if (t
&& !IS_ERR(t
)) {
1191 const struct xt_table_info
*private = t
->private;
1192 duprintf("t->private->number = %u\n", private->number
);
1193 if (get
.size
== private->size
)
1194 ret
= copy_entries_to_user(private->size
,
1195 t
, uptr
->entrytable
);
1197 duprintf("get_entries: I've got %u not %u!\n",
1198 private->size
, get
.size
);
1204 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1210 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1211 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1212 void __user
*counters_ptr
)
1216 struct xt_table_info
*oldinfo
;
1217 struct xt_counters
*counters
;
1218 void *loc_cpu_old_entry
;
1219 struct ipt_entry
*iter
;
1222 counters
= vmalloc(num_counters
* sizeof(struct xt_counters
));
1228 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1229 "iptable_%s", name
);
1230 if (!t
|| IS_ERR(t
)) {
1231 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1232 goto free_newinfo_counters_untrans
;
1236 if (valid_hooks
!= t
->valid_hooks
) {
1237 duprintf("Valid hook crap: %08X vs %08X\n",
1238 valid_hooks
, t
->valid_hooks
);
1243 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1247 /* Update module usage count based on number of rules */
1248 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1249 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1250 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1251 (newinfo
->number
<= oldinfo
->initial_entries
))
1253 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1254 (newinfo
->number
<= oldinfo
->initial_entries
))
1257 /* Get the old counters, and synchronize with replace */
1258 get_counters(oldinfo
, counters
);
1260 /* Decrease module usage counts and free resource */
1261 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1262 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1263 cleanup_entry(iter
, net
);
1265 xt_free_table_info(oldinfo
);
1266 if (copy_to_user(counters_ptr
, counters
,
1267 sizeof(struct xt_counters
) * num_counters
) != 0)
1276 free_newinfo_counters_untrans
:
1283 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1286 struct ipt_replace tmp
;
1287 struct xt_table_info
*newinfo
;
1288 void *loc_cpu_entry
;
1289 struct ipt_entry
*iter
;
1291 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1294 /* overflow check */
1295 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1298 newinfo
= xt_alloc_table_info(tmp
.size
);
1302 /* choose the copy that is on our node/cpu */
1303 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1304 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1310 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1314 duprintf("ip_tables: Translated table\n");
1316 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1317 tmp
.num_counters
, tmp
.counters
);
1319 goto free_newinfo_untrans
;
1322 free_newinfo_untrans
:
1323 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1324 cleanup_entry(iter
, net
);
1326 xt_free_table_info(newinfo
);
1331 do_add_counters(struct net
*net
, const void __user
*user
,
1332 unsigned int len
, int compat
)
1334 unsigned int i
, curcpu
;
1335 struct xt_counters_info tmp
;
1336 struct xt_counters
*paddc
;
1337 unsigned int num_counters
;
1342 const struct xt_table_info
*private;
1344 void *loc_cpu_entry
;
1345 struct ipt_entry
*iter
;
1346 #ifdef CONFIG_COMPAT
1347 struct compat_xt_counters_info compat_tmp
;
1351 size
= sizeof(struct compat_xt_counters_info
);
1356 size
= sizeof(struct xt_counters_info
);
1359 if (copy_from_user(ptmp
, user
, size
) != 0)
1362 #ifdef CONFIG_COMPAT
1364 num_counters
= compat_tmp
.num_counters
;
1365 name
= compat_tmp
.name
;
1369 num_counters
= tmp
.num_counters
;
1373 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1376 paddc
= vmalloc_node(len
- size
, numa_node_id());
1380 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1385 t
= xt_find_table_lock(net
, AF_INET
, name
);
1386 if (!t
|| IS_ERR(t
)) {
1387 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1392 private = t
->private;
1393 if (private->number
!= num_counters
) {
1395 goto unlock_up_free
;
1399 /* Choose the copy that is on our node */
1400 curcpu
= smp_processor_id();
1401 loc_cpu_entry
= private->entries
[curcpu
];
1402 xt_info_wrlock(curcpu
);
1403 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1404 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1407 xt_info_wrunlock(curcpu
);
1418 #ifdef CONFIG_COMPAT
1419 struct compat_ipt_replace
{
1420 char name
[IPT_TABLE_MAXNAMELEN
];
1424 u32 hook_entry
[NF_INET_NUMHOOKS
];
1425 u32 underflow
[NF_INET_NUMHOOKS
];
1427 compat_uptr_t counters
; /* struct ipt_counters * */
1428 struct compat_ipt_entry entries
[0];
1432 compat_copy_entry_to_user(struct ipt_entry
*e
, void __user
**dstptr
,
1433 unsigned int *size
, struct xt_counters
*counters
,
1436 struct ipt_entry_target
*t
;
1437 struct compat_ipt_entry __user
*ce
;
1438 u_int16_t target_offset
, next_offset
;
1439 compat_uint_t origsize
;
1440 const struct xt_entry_match
*ematch
;
1444 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1445 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)) != 0 ||
1446 copy_to_user(&ce
->counters
, &counters
[i
],
1447 sizeof(counters
[i
])) != 0)
1450 *dstptr
+= sizeof(struct compat_ipt_entry
);
1451 *size
-= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1453 xt_ematch_foreach(ematch
, e
) {
1454 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1458 target_offset
= e
->target_offset
- (origsize
- *size
);
1459 t
= ipt_get_target(e
);
1460 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1463 next_offset
= e
->next_offset
- (origsize
- *size
);
1464 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1465 put_user(next_offset
, &ce
->next_offset
) != 0)
1471 compat_find_calc_match(struct ipt_entry_match
*m
,
1473 const struct ipt_ip
*ip
,
1474 unsigned int hookmask
,
1477 struct xt_match
*match
;
1479 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
1480 m
->u
.user
.revision
),
1481 "ipt_%s", m
->u
.user
.name
);
1482 if (IS_ERR(match
) || !match
) {
1483 duprintf("compat_check_calc_match: `%s' not found\n",
1485 return match
? PTR_ERR(match
) : -ENOENT
;
1487 m
->u
.kernel
.match
= match
;
1488 *size
+= xt_compat_match_offset(match
);
1492 static void compat_release_entry(struct compat_ipt_entry
*e
)
1494 struct ipt_entry_target
*t
;
1495 struct xt_entry_match
*ematch
;
1497 /* Cleanup all matches */
1498 xt_ematch_foreach(ematch
, e
)
1499 module_put(ematch
->u
.kernel
.match
->me
);
1500 t
= compat_ipt_get_target(e
);
1501 module_put(t
->u
.kernel
.target
->me
);
1505 check_compat_entry_size_and_hooks(struct compat_ipt_entry
*e
,
1506 struct xt_table_info
*newinfo
,
1508 const unsigned char *base
,
1509 const unsigned char *limit
,
1510 const unsigned int *hook_entries
,
1511 const unsigned int *underflows
,
1514 struct xt_entry_match
*ematch
;
1515 struct ipt_entry_target
*t
;
1516 struct xt_target
*target
;
1517 unsigned int entry_offset
;
1521 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1522 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0 ||
1523 (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1524 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1528 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1529 sizeof(struct compat_xt_entry_target
)) {
1530 duprintf("checking: element %p size %u\n",
1535 /* For purposes of check_entry casting the compat entry is fine */
1536 ret
= check_entry((struct ipt_entry
*)e
, name
);
1540 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1541 entry_offset
= (void *)e
- (void *)base
;
1543 xt_ematch_foreach(ematch
, e
) {
1544 ret
= compat_find_calc_match(ematch
, name
,
1545 &e
->ip
, e
->comefrom
, &off
);
1547 goto release_matches
;
1551 t
= compat_ipt_get_target(e
);
1552 target
= try_then_request_module(xt_find_target(AF_INET
,
1554 t
->u
.user
.revision
),
1555 "ipt_%s", t
->u
.user
.name
);
1556 if (IS_ERR(target
) || !target
) {
1557 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1559 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1560 goto release_matches
;
1562 t
->u
.kernel
.target
= target
;
1564 off
+= xt_compat_target_offset(target
);
1566 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1570 /* Check hooks & underflows */
1571 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1572 if ((unsigned char *)e
- base
== hook_entries
[h
])
1573 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1574 if ((unsigned char *)e
- base
== underflows
[h
])
1575 newinfo
->underflow
[h
] = underflows
[h
];
1578 /* Clear counters and comefrom */
1579 memset(&e
->counters
, 0, sizeof(e
->counters
));
1584 module_put(t
->u
.kernel
.target
->me
);
1586 xt_ematch_foreach(ematch
, e
) {
1589 module_put(ematch
->u
.kernel
.match
->me
);
1595 compat_copy_entry_from_user(struct compat_ipt_entry
*e
, void **dstptr
,
1596 unsigned int *size
, const char *name
,
1597 struct xt_table_info
*newinfo
, unsigned char *base
)
1599 struct ipt_entry_target
*t
;
1600 struct xt_target
*target
;
1601 struct ipt_entry
*de
;
1602 unsigned int origsize
;
1604 struct xt_entry_match
*ematch
;
1608 de
= (struct ipt_entry
*)*dstptr
;
1609 memcpy(de
, e
, sizeof(struct ipt_entry
));
1610 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1612 *dstptr
+= sizeof(struct ipt_entry
);
1613 *size
+= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1615 xt_ematch_foreach(ematch
, e
) {
1616 ret
= xt_compat_match_from_user(ematch
, dstptr
, size
);
1620 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1621 t
= compat_ipt_get_target(e
);
1622 target
= t
->u
.kernel
.target
;
1623 xt_compat_target_from_user(t
, dstptr
, size
);
1625 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1626 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1627 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1628 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1629 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1630 newinfo
->underflow
[h
] -= origsize
- *size
;
1636 compat_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
)
1638 struct xt_entry_match
*ematch
;
1639 struct xt_mtchk_param mtpar
;
1646 mtpar
.entryinfo
= &e
->ip
;
1647 mtpar
.hook_mask
= e
->comefrom
;
1648 mtpar
.family
= NFPROTO_IPV4
;
1649 xt_ematch_foreach(ematch
, e
) {
1650 ret
= check_match(ematch
, &mtpar
);
1652 goto cleanup_matches
;
1656 ret
= check_target(e
, net
, name
);
1658 goto cleanup_matches
;
1662 xt_ematch_foreach(ematch
, e
) {
1665 cleanup_match(ematch
, net
);
1671 translate_compat_table(struct net
*net
,
1673 unsigned int valid_hooks
,
1674 struct xt_table_info
**pinfo
,
1676 unsigned int total_size
,
1677 unsigned int number
,
1678 unsigned int *hook_entries
,
1679 unsigned int *underflows
)
1682 struct xt_table_info
*newinfo
, *info
;
1683 void *pos
, *entry0
, *entry1
;
1684 struct compat_ipt_entry
*iter0
;
1685 struct ipt_entry
*iter1
;
1692 info
->number
= number
;
1694 /* Init all hooks to impossible value. */
1695 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1696 info
->hook_entry
[i
] = 0xFFFFFFFF;
1697 info
->underflow
[i
] = 0xFFFFFFFF;
1700 duprintf("translate_compat_table: size %u\n", info
->size
);
1702 xt_compat_lock(AF_INET
);
1703 /* Walk through entries, checking offsets. */
1704 xt_entry_foreach(iter0
, entry0
, total_size
) {
1705 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1707 entry0
+ total_size
,
1718 duprintf("translate_compat_table: %u not %u entries\n",
1723 /* Check hooks all assigned */
1724 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1725 /* Only hooks which are valid */
1726 if (!(valid_hooks
& (1 << i
)))
1728 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1729 duprintf("Invalid hook entry %u %u\n",
1730 i
, hook_entries
[i
]);
1733 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1734 duprintf("Invalid underflow %u %u\n",
1741 newinfo
= xt_alloc_table_info(size
);
1745 newinfo
->number
= number
;
1746 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1747 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1748 newinfo
->underflow
[i
] = info
->underflow
[i
];
1750 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1753 xt_entry_foreach(iter0
, entry0
, total_size
) {
1754 ret
= compat_copy_entry_from_user(iter0
, &pos
, &size
,
1755 name
, newinfo
, entry1
);
1759 xt_compat_flush_offsets(AF_INET
);
1760 xt_compat_unlock(AF_INET
);
1765 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1769 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1770 ret
= compat_check_entry(iter1
, net
, name
);
1777 * The first i matches need cleanup_entry (calls ->destroy)
1778 * because they had called ->check already. The other j-i
1779 * entries need only release.
1783 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1788 compat_release_entry(iter0
);
1790 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1793 cleanup_entry(iter1
, net
);
1795 xt_free_table_info(newinfo
);
1799 /* And one copy for every other CPU */
1800 for_each_possible_cpu(i
)
1801 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1802 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1806 xt_free_table_info(info
);
1810 xt_free_table_info(newinfo
);
1812 xt_entry_foreach(iter0
, entry0
, total_size
) {
1815 compat_release_entry(iter0
);
1819 xt_compat_flush_offsets(AF_INET
);
1820 xt_compat_unlock(AF_INET
);
1825 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1828 struct compat_ipt_replace tmp
;
1829 struct xt_table_info
*newinfo
;
1830 void *loc_cpu_entry
;
1831 struct ipt_entry
*iter
;
1833 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1836 /* overflow check */
1837 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1839 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1842 newinfo
= xt_alloc_table_info(tmp
.size
);
1846 /* choose the copy that is on our node/cpu */
1847 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1848 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1854 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1855 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1856 tmp
.num_entries
, tmp
.hook_entry
,
1861 duprintf("compat_do_replace: Translated table\n");
1863 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1864 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1866 goto free_newinfo_untrans
;
1869 free_newinfo_untrans
:
1870 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1871 cleanup_entry(iter
, net
);
1873 xt_free_table_info(newinfo
);
1878 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1883 if (!capable(CAP_NET_ADMIN
))
1887 case IPT_SO_SET_REPLACE
:
1888 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1891 case IPT_SO_SET_ADD_COUNTERS
:
1892 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1896 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1903 struct compat_ipt_get_entries
{
1904 char name
[IPT_TABLE_MAXNAMELEN
];
1906 struct compat_ipt_entry entrytable
[0];
1910 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1911 void __user
*userptr
)
1913 struct xt_counters
*counters
;
1914 const struct xt_table_info
*private = table
->private;
1918 const void *loc_cpu_entry
;
1920 struct ipt_entry
*iter
;
1922 counters
= alloc_counters(table
);
1923 if (IS_ERR(counters
))
1924 return PTR_ERR(counters
);
1926 /* choose the copy that is on our node/cpu, ...
1927 * This choice is lazy (because current thread is
1928 * allowed to migrate to another cpu)
1930 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1933 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1934 ret
= compat_copy_entry_to_user(iter
, &pos
,
1935 &size
, counters
, i
++);
1945 compat_get_entries(struct net
*net
, struct compat_ipt_get_entries __user
*uptr
,
1949 struct compat_ipt_get_entries get
;
1952 if (*len
< sizeof(get
)) {
1953 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1957 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1960 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1961 duprintf("compat_get_entries: %u != %zu\n",
1962 *len
, sizeof(get
) + get
.size
);
1966 xt_compat_lock(AF_INET
);
1967 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1968 if (t
&& !IS_ERR(t
)) {
1969 const struct xt_table_info
*private = t
->private;
1970 struct xt_table_info info
;
1971 duprintf("t->private->number = %u\n", private->number
);
1972 ret
= compat_table_info(private, &info
);
1973 if (!ret
&& get
.size
== info
.size
) {
1974 ret
= compat_copy_entries_to_user(private->size
,
1975 t
, uptr
->entrytable
);
1977 duprintf("compat_get_entries: I've got %u not %u!\n",
1978 private->size
, get
.size
);
1981 xt_compat_flush_offsets(AF_INET
);
1985 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1987 xt_compat_unlock(AF_INET
);
1991 static int do_ipt_get_ctl(struct sock
*, int, void __user
*, int *);
1994 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1998 if (!capable(CAP_NET_ADMIN
))
2002 case IPT_SO_GET_INFO
:
2003 ret
= get_info(sock_net(sk
), user
, len
, 1);
2005 case IPT_SO_GET_ENTRIES
:
2006 ret
= compat_get_entries(sock_net(sk
), user
, len
);
2009 ret
= do_ipt_get_ctl(sk
, cmd
, user
, len
);
2016 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2020 if (!capable(CAP_NET_ADMIN
))
2024 case IPT_SO_SET_REPLACE
:
2025 ret
= do_replace(sock_net(sk
), user
, len
);
2028 case IPT_SO_SET_ADD_COUNTERS
:
2029 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2033 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2041 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2045 if (!capable(CAP_NET_ADMIN
))
2049 case IPT_SO_GET_INFO
:
2050 ret
= get_info(sock_net(sk
), user
, len
, 0);
2053 case IPT_SO_GET_ENTRIES
:
2054 ret
= get_entries(sock_net(sk
), user
, len
);
2057 case IPT_SO_GET_REVISION_MATCH
:
2058 case IPT_SO_GET_REVISION_TARGET
: {
2059 struct ipt_get_revision rev
;
2062 if (*len
!= sizeof(rev
)) {
2066 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2071 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2076 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2079 "ipt_%s", rev
.name
);
2084 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2091 struct xt_table
*ipt_register_table(struct net
*net
,
2092 const struct xt_table
*table
,
2093 const struct ipt_replace
*repl
)
2096 struct xt_table_info
*newinfo
;
2097 struct xt_table_info bootstrap
2098 = { 0, 0, 0, { 0 }, { 0 }, { } };
2099 void *loc_cpu_entry
;
2100 struct xt_table
*new_table
;
2102 newinfo
= xt_alloc_table_info(repl
->size
);
2108 /* choose the copy on our node/cpu, but dont care about preemption */
2109 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2110 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2112 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
2116 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2117 if (IS_ERR(new_table
)) {
2118 ret
= PTR_ERR(new_table
);
2125 xt_free_table_info(newinfo
);
2127 return ERR_PTR(ret
);
2130 void ipt_unregister_table(struct net
*net
, struct xt_table
*table
)
2132 struct xt_table_info
*private;
2133 void *loc_cpu_entry
;
2134 struct module
*table_owner
= table
->me
;
2135 struct ipt_entry
*iter
;
2137 private = xt_unregister_table(table
);
2139 /* Decrease module usage counts and free resources */
2140 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2141 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2142 cleanup_entry(iter
, net
);
2143 if (private->number
> private->initial_entries
)
2144 module_put(table_owner
);
2145 xt_free_table_info(private);
2148 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2150 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2151 u_int8_t type
, u_int8_t code
,
2154 return ((test_type
== 0xFF) ||
2155 (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2160 icmp_match(const struct sk_buff
*skb
, const struct xt_match_param
*par
)
2162 const struct icmphdr
*ic
;
2163 struct icmphdr _icmph
;
2164 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2166 /* Must not be a fragment. */
2167 if (par
->fragoff
!= 0)
2170 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2172 /* We've been asked to examine this packet, and we
2173 * can't. Hence, no choice but to drop.
2175 duprintf("Dropping evil ICMP tinygram.\n");
2176 *par
->hotdrop
= true;
2180 return icmp_type_code_match(icmpinfo
->type
,
2184 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2187 static bool icmp_checkentry(const struct xt_mtchk_param
*par
)
2189 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2191 /* Must specify no unknown invflags */
2192 return !(icmpinfo
->invflags
& ~IPT_ICMP_INV
);
2195 /* The built-in targets: standard (NULL) and error. */
2196 static struct xt_target ipt_standard_target __read_mostly
= {
2197 .name
= IPT_STANDARD_TARGET
,
2198 .targetsize
= sizeof(int),
2199 .family
= NFPROTO_IPV4
,
2200 #ifdef CONFIG_COMPAT
2201 .compatsize
= sizeof(compat_int_t
),
2202 .compat_from_user
= compat_standard_from_user
,
2203 .compat_to_user
= compat_standard_to_user
,
2207 static struct xt_target ipt_error_target __read_mostly
= {
2208 .name
= IPT_ERROR_TARGET
,
2209 .target
= ipt_error
,
2210 .targetsize
= IPT_FUNCTION_MAXNAMELEN
,
2211 .family
= NFPROTO_IPV4
,
2214 static struct nf_sockopt_ops ipt_sockopts
= {
2216 .set_optmin
= IPT_BASE_CTL
,
2217 .set_optmax
= IPT_SO_SET_MAX
+1,
2218 .set
= do_ipt_set_ctl
,
2219 #ifdef CONFIG_COMPAT
2220 .compat_set
= compat_do_ipt_set_ctl
,
2222 .get_optmin
= IPT_BASE_CTL
,
2223 .get_optmax
= IPT_SO_GET_MAX
+1,
2224 .get
= do_ipt_get_ctl
,
2225 #ifdef CONFIG_COMPAT
2226 .compat_get
= compat_do_ipt_get_ctl
,
2228 .owner
= THIS_MODULE
,
2231 static struct xt_match icmp_matchstruct __read_mostly
= {
2233 .match
= icmp_match
,
2234 .matchsize
= sizeof(struct ipt_icmp
),
2235 .checkentry
= icmp_checkentry
,
2236 .proto
= IPPROTO_ICMP
,
2237 .family
= NFPROTO_IPV4
,
2240 static int __net_init
ip_tables_net_init(struct net
*net
)
2242 return xt_proto_init(net
, NFPROTO_IPV4
);
2245 static void __net_exit
ip_tables_net_exit(struct net
*net
)
2247 xt_proto_fini(net
, NFPROTO_IPV4
);
2250 static struct pernet_operations ip_tables_net_ops
= {
2251 .init
= ip_tables_net_init
,
2252 .exit
= ip_tables_net_exit
,
2255 static int __init
ip_tables_init(void)
2259 ret
= register_pernet_subsys(&ip_tables_net_ops
);
2263 /* Noone else will be downing sem now, so we won't sleep */
2264 ret
= xt_register_target(&ipt_standard_target
);
2267 ret
= xt_register_target(&ipt_error_target
);
2270 ret
= xt_register_match(&icmp_matchstruct
);
2274 /* Register setsockopt */
2275 ret
= nf_register_sockopt(&ipt_sockopts
);
2279 printk(KERN_INFO
"ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2283 xt_unregister_match(&icmp_matchstruct
);
2285 xt_unregister_target(&ipt_error_target
);
2287 xt_unregister_target(&ipt_standard_target
);
2289 unregister_pernet_subsys(&ip_tables_net_ops
);
2294 static void __exit
ip_tables_fini(void)
2296 nf_unregister_sockopt(&ipt_sockopts
);
2298 xt_unregister_match(&icmp_matchstruct
);
2299 xt_unregister_target(&ipt_error_target
);
2300 xt_unregister_target(&ipt_standard_target
);
2302 unregister_pernet_subsys(&ip_tables_net_ops
);
2305 EXPORT_SYMBOL(ipt_register_table
);
2306 EXPORT_SYMBOL(ipt_unregister_table
);
2307 EXPORT_SYMBOL(ipt_do_table
);
2308 module_init(ip_tables_init
);
2309 module_exit(ip_tables_fini
);