2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 #include <net/netfilter/nf_log.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
35 /*#define DEBUG_IP_FIREWALL*/
36 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
37 /*#define DEBUG_IP_FIREWALL_USER*/
39 #ifdef DEBUG_IP_FIREWALL
40 #define dprintf(format, args...) printk(format , ## args)
42 #define dprintf(format, args...)
45 #ifdef DEBUG_IP_FIREWALL_USER
46 #define duprintf(format, args...) printk(format , ## args)
48 #define duprintf(format, args...)
51 #ifdef CONFIG_NETFILTER_DEBUG
52 #define IP_NF_ASSERT(x) \
55 printk("IP_NF_ASSERT: %s:%s:%u\n", \
56 __func__, __FILE__, __LINE__); \
59 #define IP_NF_ASSERT(x)
63 /* All the better to debug you with... */
69 We keep a set of rules for each CPU, so we can avoid write-locking
70 them in the softirq when updating the counters and therefore
71 only need to read-lock in the softirq; doing a write_lock_bh() in user
72 context stops packets coming through and allows user context to read
73 the counters or update the rules.
75 Hence the start of any table is given by get_table() below. */
77 /* Returns whether matches rule or not. */
78 /* Performance critical - called for every packet */
80 ip_packet_match(const struct iphdr
*ip
,
83 const struct ipt_ip
*ipinfo
,
88 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
90 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
92 || FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
94 dprintf("Source or dest mismatch.\n");
96 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
97 &ip
->saddr
, &ipinfo
->smsk
.s_addr
, &ipinfo
->src
.s_addr
,
98 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
99 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
100 &ip
->daddr
, &ipinfo
->dmsk
.s_addr
, &ipinfo
->dst
.s_addr
,
101 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
105 ret
= ifname_compare_aligned(indev
, ipinfo
->iniface
, ipinfo
->iniface_mask
);
107 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
108 dprintf("VIA in mismatch (%s vs %s).%s\n",
109 indev
, ipinfo
->iniface
,
110 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
114 ret
= ifname_compare_aligned(outdev
, ipinfo
->outiface
, ipinfo
->outiface_mask
);
116 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
117 dprintf("VIA out mismatch (%s vs %s).%s\n",
118 outdev
, ipinfo
->outiface
,
119 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
123 /* Check specific protocol */
125 && FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
126 dprintf("Packet protocol %hi does not match %hi.%s\n",
127 ip
->protocol
, ipinfo
->proto
,
128 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
132 /* If we have a fragment rule but the packet is not a fragment
133 * then we return zero */
134 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
135 dprintf("Fragment rule but not fragment.%s\n",
136 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
144 ip_checkentry(const struct ipt_ip
*ip
)
146 if (ip
->flags
& ~IPT_F_MASK
) {
147 duprintf("Unknown flag bits set: %08X\n",
148 ip
->flags
& ~IPT_F_MASK
);
151 if (ip
->invflags
& ~IPT_INV_MASK
) {
152 duprintf("Unknown invflag bits set: %08X\n",
153 ip
->invflags
& ~IPT_INV_MASK
);
160 ipt_error(struct sk_buff
*skb
, const struct xt_target_param
*par
)
163 printk("ip_tables: error: `%s'\n",
164 (const char *)par
->targinfo
);
169 /* Performance critical - called for every packet */
171 do_match(struct ipt_entry_match
*m
, const struct sk_buff
*skb
,
172 struct xt_match_param
*par
)
174 par
->match
= m
->u
.kernel
.match
;
175 par
->matchinfo
= m
->data
;
177 /* Stop iteration if it doesn't match */
178 if (!m
->u
.kernel
.match
->match(skb
, par
))
184 /* Performance critical */
185 static inline struct ipt_entry
*
186 get_entry(void *base
, unsigned int offset
)
188 return (struct ipt_entry
*)(base
+ offset
);
191 /* All zeroes == unconditional rule. */
192 /* Mildly perf critical (only if packet tracing is on) */
194 unconditional(const struct ipt_ip
*ip
)
198 for (i
= 0; i
< sizeof(*ip
)/sizeof(__u32
); i
++)
199 if (((__u32
*)ip
)[i
])
206 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
207 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
208 static const char *const hooknames
[] = {
209 [NF_INET_PRE_ROUTING
] = "PREROUTING",
210 [NF_INET_LOCAL_IN
] = "INPUT",
211 [NF_INET_FORWARD
] = "FORWARD",
212 [NF_INET_LOCAL_OUT
] = "OUTPUT",
213 [NF_INET_POST_ROUTING
] = "POSTROUTING",
216 enum nf_ip_trace_comments
{
217 NF_IP_TRACE_COMMENT_RULE
,
218 NF_IP_TRACE_COMMENT_RETURN
,
219 NF_IP_TRACE_COMMENT_POLICY
,
222 static const char *const comments
[] = {
223 [NF_IP_TRACE_COMMENT_RULE
] = "rule",
224 [NF_IP_TRACE_COMMENT_RETURN
] = "return",
225 [NF_IP_TRACE_COMMENT_POLICY
] = "policy",
228 static struct nf_loginfo trace_loginfo
= {
229 .type
= NF_LOG_TYPE_LOG
,
233 .logflags
= NF_LOG_MASK
,
238 /* Mildly perf critical (only if packet tracing is on) */
240 get_chainname_rulenum(struct ipt_entry
*s
, struct ipt_entry
*e
,
241 char *hookname
, char **chainname
,
242 char **comment
, unsigned int *rulenum
)
244 struct ipt_standard_target
*t
= (void *)ipt_get_target(s
);
246 if (strcmp(t
->target
.u
.kernel
.target
->name
, IPT_ERROR_TARGET
) == 0) {
247 /* Head of user chain: ERROR target with chainname */
248 *chainname
= t
->target
.data
;
253 if (s
->target_offset
== sizeof(struct ipt_entry
)
254 && strcmp(t
->target
.u
.kernel
.target
->name
,
255 IPT_STANDARD_TARGET
) == 0
257 && unconditional(&s
->ip
)) {
258 /* Tail of chains: STANDARD target (return/policy) */
259 *comment
= *chainname
== hookname
260 ? (char *)comments
[NF_IP_TRACE_COMMENT_POLICY
]
261 : (char *)comments
[NF_IP_TRACE_COMMENT_RETURN
];
270 static void trace_packet(struct sk_buff
*skb
,
272 const struct net_device
*in
,
273 const struct net_device
*out
,
274 const char *tablename
,
275 struct xt_table_info
*private,
279 const struct ipt_entry
*root
;
280 char *hookname
, *chainname
, *comment
;
281 unsigned int rulenum
= 0;
283 table_base
= (void *)private->entries
[smp_processor_id()];
284 root
= get_entry(table_base
, private->hook_entry
[hook
]);
286 hookname
= chainname
= (char *)hooknames
[hook
];
287 comment
= (char *)comments
[NF_IP_TRACE_COMMENT_RULE
];
289 IPT_ENTRY_ITERATE(root
,
290 private->size
- private->hook_entry
[hook
],
291 get_chainname_rulenum
,
292 e
, hookname
, &chainname
, &comment
, &rulenum
);
294 nf_log_packet(AF_INET
, hook
, skb
, in
, out
, &trace_loginfo
,
295 "TRACE: %s:%s:%s:%u ",
296 tablename
, chainname
, comment
, rulenum
);
300 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
302 ipt_do_table(struct sk_buff
*skb
,
304 const struct net_device
*in
,
305 const struct net_device
*out
,
306 struct xt_table
*table
)
308 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
309 const struct iphdr
*ip
;
311 bool hotdrop
= false;
312 /* Initializing verdict to NF_DROP keeps gcc happy. */
313 unsigned int verdict
= NF_DROP
;
314 const char *indev
, *outdev
;
316 struct ipt_entry
*e
, *back
;
317 struct xt_table_info
*private;
318 struct xt_match_param mtpar
;
319 struct xt_target_param tgpar
;
323 datalen
= skb
->len
- ip
->ihl
* 4;
324 indev
= in
? in
->name
: nulldevname
;
325 outdev
= out
? out
->name
: nulldevname
;
326 /* We handle fragments by dealing with the first fragment as
327 * if it was a normal packet. All other fragments are treated
328 * normally, except that they will NEVER match rules that ask
329 * things we don't know, ie. tcp syn flag or ports). If the
330 * rule is also a fragment-specific rule, non-fragments won't
332 mtpar
.fragoff
= ntohs(ip
->frag_off
) & IP_OFFSET
;
333 mtpar
.thoff
= ip_hdrlen(skb
);
334 mtpar
.hotdrop
= &hotdrop
;
335 mtpar
.in
= tgpar
.in
= in
;
336 mtpar
.out
= tgpar
.out
= out
;
337 mtpar
.family
= tgpar
.family
= NFPROTO_IPV4
;
338 tgpar
.hooknum
= hook
;
340 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
342 private = table
->private;
343 table_base
= private->entries
[smp_processor_id()];
345 e
= get_entry(table_base
, private->hook_entry
[hook
]);
347 /* For return from builtin chain */
348 back
= get_entry(table_base
, private->underflow
[hook
]);
353 if (ip_packet_match(ip
, indev
, outdev
,
354 &e
->ip
, mtpar
.fragoff
)) {
355 struct ipt_entry_target
*t
;
357 if (IPT_MATCH_ITERATE(e
, do_match
, skb
, &mtpar
) != 0)
360 ADD_COUNTER(e
->counters
, ntohs(ip
->tot_len
), 1);
362 t
= ipt_get_target(e
);
363 IP_NF_ASSERT(t
->u
.kernel
.target
);
365 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
366 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
367 /* The packet is traced: log it */
368 if (unlikely(skb
->nf_trace
))
369 trace_packet(skb
, hook
, in
, out
,
370 table
->name
, private, e
);
372 /* Standard target? */
373 if (!t
->u
.kernel
.target
->target
) {
376 v
= ((struct ipt_standard_target
*)t
)->verdict
;
378 /* Pop from stack? */
379 if (v
!= IPT_RETURN
) {
380 verdict
= (unsigned)(-v
) - 1;
384 back
= get_entry(table_base
,
388 if (table_base
+ v
!= (void *)e
+ e
->next_offset
389 && !(e
->ip
.flags
& IPT_F_GOTO
)) {
390 /* Save old back ptr in next entry */
391 struct ipt_entry
*next
392 = (void *)e
+ e
->next_offset
;
394 = (void *)back
- table_base
;
395 /* set back pointer to next entry */
399 e
= get_entry(table_base
, v
);
401 /* Targets which reenter must return
403 tgpar
.target
= t
->u
.kernel
.target
;
404 tgpar
.targinfo
= t
->data
;
405 #ifdef CONFIG_NETFILTER_DEBUG
406 ((struct ipt_entry
*)table_base
)->comefrom
409 verdict
= t
->u
.kernel
.target
->target(skb
,
411 #ifdef CONFIG_NETFILTER_DEBUG
412 if (((struct ipt_entry
*)table_base
)->comefrom
414 && verdict
== IPT_CONTINUE
) {
415 printk("Target %s reentered!\n",
416 t
->u
.kernel
.target
->name
);
419 ((struct ipt_entry
*)table_base
)->comefrom
422 /* Target might have changed stuff. */
424 datalen
= skb
->len
- ip
->ihl
* 4;
426 if (verdict
== IPT_CONTINUE
)
427 e
= (void *)e
+ e
->next_offset
;
435 e
= (void *)e
+ e
->next_offset
;
438 xt_info_rdunlock_bh();
440 #ifdef DEBUG_ALLOW_ALL
449 /* Figures out from what hook each rule can be called: returns 0 if
450 there are loops. Puts hook bitmask in comefrom. */
452 mark_source_chains(struct xt_table_info
*newinfo
,
453 unsigned int valid_hooks
, void *entry0
)
457 /* No recursion; use packet counter to save back ptrs (reset
458 to 0 as we leave), and comefrom to save source hook bitmask */
459 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
460 unsigned int pos
= newinfo
->hook_entry
[hook
];
461 struct ipt_entry
*e
= (struct ipt_entry
*)(entry0
+ pos
);
463 if (!(valid_hooks
& (1 << hook
)))
466 /* Set initial back pointer. */
467 e
->counters
.pcnt
= pos
;
470 struct ipt_standard_target
*t
471 = (void *)ipt_get_target(e
);
472 int visited
= e
->comefrom
& (1 << hook
);
474 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
475 printk("iptables: loop hook %u pos %u %08X.\n",
476 hook
, pos
, e
->comefrom
);
479 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
481 /* Unconditional return/END. */
482 if ((e
->target_offset
== sizeof(struct ipt_entry
)
483 && (strcmp(t
->target
.u
.user
.name
,
484 IPT_STANDARD_TARGET
) == 0)
486 && unconditional(&e
->ip
)) || visited
) {
487 unsigned int oldpos
, size
;
489 if ((strcmp(t
->target
.u
.user
.name
,
490 IPT_STANDARD_TARGET
) == 0) &&
491 t
->verdict
< -NF_MAX_VERDICT
- 1) {
492 duprintf("mark_source_chains: bad "
493 "negative verdict (%i)\n",
498 /* Return: backtrack through the last
501 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
502 #ifdef DEBUG_IP_FIREWALL_USER
504 & (1 << NF_INET_NUMHOOKS
)) {
505 duprintf("Back unset "
512 pos
= e
->counters
.pcnt
;
513 e
->counters
.pcnt
= 0;
515 /* We're at the start. */
519 e
= (struct ipt_entry
*)
521 } while (oldpos
== pos
+ e
->next_offset
);
524 size
= e
->next_offset
;
525 e
= (struct ipt_entry
*)
526 (entry0
+ pos
+ size
);
527 e
->counters
.pcnt
= pos
;
530 int newpos
= t
->verdict
;
532 if (strcmp(t
->target
.u
.user
.name
,
533 IPT_STANDARD_TARGET
) == 0
535 if (newpos
> newinfo
->size
-
536 sizeof(struct ipt_entry
)) {
537 duprintf("mark_source_chains: "
538 "bad verdict (%i)\n",
542 /* This a jump; chase it. */
543 duprintf("Jump rule %u -> %u\n",
546 /* ... this is a fallthru */
547 newpos
= pos
+ e
->next_offset
;
549 e
= (struct ipt_entry
*)
551 e
->counters
.pcnt
= pos
;
556 duprintf("Finished chain %u\n", hook
);
562 cleanup_match(struct ipt_entry_match
*m
, unsigned int *i
)
564 struct xt_mtdtor_param par
;
566 if (i
&& (*i
)-- == 0)
569 par
.match
= m
->u
.kernel
.match
;
570 par
.matchinfo
= m
->data
;
571 par
.family
= NFPROTO_IPV4
;
572 if (par
.match
->destroy
!= NULL
)
573 par
.match
->destroy(&par
);
574 module_put(par
.match
->me
);
579 check_entry(struct ipt_entry
*e
, const char *name
)
581 struct ipt_entry_target
*t
;
583 if (!ip_checkentry(&e
->ip
)) {
584 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
588 if (e
->target_offset
+ sizeof(struct ipt_entry_target
) >
592 t
= ipt_get_target(e
);
593 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
600 check_match(struct ipt_entry_match
*m
, struct xt_mtchk_param
*par
,
603 const struct ipt_ip
*ip
= par
->entryinfo
;
606 par
->match
= m
->u
.kernel
.match
;
607 par
->matchinfo
= m
->data
;
609 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
610 ip
->proto
, ip
->invflags
& IPT_INV_PROTO
);
612 duprintf("ip_tables: check failed for `%s'.\n",
621 find_check_match(struct ipt_entry_match
*m
, struct xt_mtchk_param
*par
,
624 struct xt_match
*match
;
627 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
629 "ipt_%s", m
->u
.user
.name
);
630 if (IS_ERR(match
) || !match
) {
631 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
632 return match
? PTR_ERR(match
) : -ENOENT
;
634 m
->u
.kernel
.match
= match
;
636 ret
= check_match(m
, par
, i
);
642 module_put(m
->u
.kernel
.match
->me
);
646 static int check_target(struct ipt_entry
*e
, const char *name
)
648 struct ipt_entry_target
*t
= ipt_get_target(e
);
649 struct xt_tgchk_param par
= {
652 .target
= t
->u
.kernel
.target
,
654 .hook_mask
= e
->comefrom
,
655 .family
= NFPROTO_IPV4
,
659 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
660 e
->ip
.proto
, e
->ip
.invflags
& IPT_INV_PROTO
);
662 duprintf("ip_tables: check failed for `%s'.\n",
663 t
->u
.kernel
.target
->name
);
670 find_check_entry(struct ipt_entry
*e
, const char *name
, unsigned int size
,
673 struct ipt_entry_target
*t
;
674 struct xt_target
*target
;
677 struct xt_mtchk_param mtpar
;
679 ret
= check_entry(e
, name
);
685 mtpar
.entryinfo
= &e
->ip
;
686 mtpar
.hook_mask
= e
->comefrom
;
687 mtpar
.family
= NFPROTO_IPV4
;
688 ret
= IPT_MATCH_ITERATE(e
, find_check_match
, &mtpar
, &j
);
690 goto cleanup_matches
;
692 t
= ipt_get_target(e
);
693 target
= try_then_request_module(xt_find_target(AF_INET
,
696 "ipt_%s", t
->u
.user
.name
);
697 if (IS_ERR(target
) || !target
) {
698 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
699 ret
= target
? PTR_ERR(target
) : -ENOENT
;
700 goto cleanup_matches
;
702 t
->u
.kernel
.target
= target
;
704 ret
= check_target(e
, name
);
711 module_put(t
->u
.kernel
.target
->me
);
713 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
718 check_entry_size_and_hooks(struct ipt_entry
*e
,
719 struct xt_table_info
*newinfo
,
721 unsigned char *limit
,
722 const unsigned int *hook_entries
,
723 const unsigned int *underflows
,
728 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0
729 || (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
730 duprintf("Bad offset %p\n", e
);
735 < sizeof(struct ipt_entry
) + sizeof(struct ipt_entry_target
)) {
736 duprintf("checking: element %p size %u\n",
741 /* Check hooks & underflows */
742 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
743 if ((unsigned char *)e
- base
== hook_entries
[h
])
744 newinfo
->hook_entry
[h
] = hook_entries
[h
];
745 if ((unsigned char *)e
- base
== underflows
[h
])
746 newinfo
->underflow
[h
] = underflows
[h
];
749 /* FIXME: underflows must be unconditional, standard verdicts
750 < 0 (not IPT_RETURN). --RR */
752 /* Clear counters and comefrom */
753 e
->counters
= ((struct xt_counters
) { 0, 0 });
761 cleanup_entry(struct ipt_entry
*e
, unsigned int *i
)
763 struct xt_tgdtor_param par
;
764 struct ipt_entry_target
*t
;
766 if (i
&& (*i
)-- == 0)
769 /* Cleanup all matches */
770 IPT_MATCH_ITERATE(e
, cleanup_match
, NULL
);
771 t
= ipt_get_target(e
);
773 par
.target
= t
->u
.kernel
.target
;
774 par
.targinfo
= t
->data
;
775 par
.family
= NFPROTO_IPV4
;
776 if (par
.target
->destroy
!= NULL
)
777 par
.target
->destroy(&par
);
778 module_put(par
.target
->me
);
782 /* Checks and translates the user-supplied table segment (held in
785 translate_table(const char *name
,
786 unsigned int valid_hooks
,
787 struct xt_table_info
*newinfo
,
791 const unsigned int *hook_entries
,
792 const unsigned int *underflows
)
797 newinfo
->size
= size
;
798 newinfo
->number
= number
;
800 /* Init all hooks to impossible value. */
801 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
802 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
803 newinfo
->underflow
[i
] = 0xFFFFFFFF;
806 duprintf("translate_table: size %u\n", newinfo
->size
);
808 /* Walk through entries, checking offsets. */
809 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
810 check_entry_size_and_hooks
,
814 hook_entries
, underflows
, &i
);
819 duprintf("translate_table: %u not %u entries\n",
824 /* Check hooks all assigned */
825 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
826 /* Only hooks which are valid */
827 if (!(valid_hooks
& (1 << i
)))
829 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
830 duprintf("Invalid hook entry %u %u\n",
834 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
835 duprintf("Invalid underflow %u %u\n",
841 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
))
844 /* Finally, each sanity check must pass */
846 ret
= IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
847 find_check_entry
, name
, size
, &i
);
850 IPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
855 /* And one copy for every other CPU */
856 for_each_possible_cpu(i
) {
857 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
858 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
866 add_entry_to_counter(const struct ipt_entry
*e
,
867 struct xt_counters total
[],
870 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
877 set_entry_to_counter(const struct ipt_entry
*e
,
878 struct ipt_counters total
[],
881 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
888 get_counters(const struct xt_table_info
*t
,
889 struct xt_counters counters
[])
895 /* Instead of clearing (by a previous call to memset())
896 * the counters and using adds, we set the counters
897 * with data used by 'current' CPU.
899 * Bottom half has to be disabled to prevent deadlock
900 * if new softirq were to run and call ipt_do_table
903 curcpu
= smp_processor_id();
906 IPT_ENTRY_ITERATE(t
->entries
[curcpu
],
908 set_entry_to_counter
,
912 for_each_possible_cpu(cpu
) {
917 IPT_ENTRY_ITERATE(t
->entries
[cpu
],
919 add_entry_to_counter
,
922 xt_info_wrunlock(cpu
);
927 static struct xt_counters
* alloc_counters(struct xt_table
*table
)
929 unsigned int countersize
;
930 struct xt_counters
*counters
;
931 struct xt_table_info
*private = table
->private;
933 /* We need atomic snapshot of counters: rest doesn't change
934 (other than comefrom, which userspace doesn't care
936 countersize
= sizeof(struct xt_counters
) * private->number
;
937 counters
= vmalloc_node(countersize
, numa_node_id());
939 if (counters
== NULL
)
940 return ERR_PTR(-ENOMEM
);
942 get_counters(private, counters
);
948 copy_entries_to_user(unsigned int total_size
,
949 struct xt_table
*table
,
950 void __user
*userptr
)
952 unsigned int off
, num
;
954 struct xt_counters
*counters
;
955 const struct xt_table_info
*private = table
->private;
957 const void *loc_cpu_entry
;
959 counters
= alloc_counters(table
);
960 if (IS_ERR(counters
))
961 return PTR_ERR(counters
);
963 /* choose the copy that is on our node/cpu, ...
964 * This choice is lazy (because current thread is
965 * allowed to migrate to another cpu)
967 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
968 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
973 /* FIXME: use iterator macros --RR */
974 /* ... then go back and fix counters and names */
975 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
977 const struct ipt_entry_match
*m
;
978 const struct ipt_entry_target
*t
;
980 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
981 if (copy_to_user(userptr
+ off
982 + offsetof(struct ipt_entry
, counters
),
984 sizeof(counters
[num
])) != 0) {
989 for (i
= sizeof(struct ipt_entry
);
990 i
< e
->target_offset
;
991 i
+= m
->u
.match_size
) {
994 if (copy_to_user(userptr
+ off
+ i
995 + offsetof(struct ipt_entry_match
,
997 m
->u
.kernel
.match
->name
,
998 strlen(m
->u
.kernel
.match
->name
)+1)
1005 t
= ipt_get_target(e
);
1006 if (copy_to_user(userptr
+ off
+ e
->target_offset
1007 + offsetof(struct ipt_entry_target
,
1009 t
->u
.kernel
.target
->name
,
1010 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1021 #ifdef CONFIG_COMPAT
1022 static void compat_standard_from_user(void *dst
, void *src
)
1024 int v
= *(compat_int_t
*)src
;
1027 v
+= xt_compat_calc_jump(AF_INET
, v
);
1028 memcpy(dst
, &v
, sizeof(v
));
1031 static int compat_standard_to_user(void __user
*dst
, void *src
)
1033 compat_int_t cv
= *(int *)src
;
1036 cv
-= xt_compat_calc_jump(AF_INET
, cv
);
1037 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1041 compat_calc_match(struct ipt_entry_match
*m
, int *size
)
1043 *size
+= xt_compat_match_offset(m
->u
.kernel
.match
);
1047 static int compat_calc_entry(struct ipt_entry
*e
,
1048 const struct xt_table_info
*info
,
1049 void *base
, struct xt_table_info
*newinfo
)
1051 struct ipt_entry_target
*t
;
1052 unsigned int entry_offset
;
1055 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1056 entry_offset
= (void *)e
- base
;
1057 IPT_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1058 t
= ipt_get_target(e
);
1059 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1060 newinfo
->size
-= off
;
1061 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1065 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1066 if (info
->hook_entry
[i
] &&
1067 (e
< (struct ipt_entry
*)(base
+ info
->hook_entry
[i
])))
1068 newinfo
->hook_entry
[i
] -= off
;
1069 if (info
->underflow
[i
] &&
1070 (e
< (struct ipt_entry
*)(base
+ info
->underflow
[i
])))
1071 newinfo
->underflow
[i
] -= off
;
1076 static int compat_table_info(const struct xt_table_info
*info
,
1077 struct xt_table_info
*newinfo
)
1079 void *loc_cpu_entry
;
1081 if (!newinfo
|| !info
)
1084 /* we dont care about newinfo->entries[] */
1085 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1086 newinfo
->initial_entries
= 0;
1087 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1088 return IPT_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
1089 compat_calc_entry
, info
, loc_cpu_entry
,
1094 static int get_info(struct net
*net
, void __user
*user
, int *len
, int compat
)
1096 char name
[IPT_TABLE_MAXNAMELEN
];
1100 if (*len
!= sizeof(struct ipt_getinfo
)) {
1101 duprintf("length %u != %zu\n", *len
,
1102 sizeof(struct ipt_getinfo
));
1106 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1109 name
[IPT_TABLE_MAXNAMELEN
-1] = '\0';
1110 #ifdef CONFIG_COMPAT
1112 xt_compat_lock(AF_INET
);
1114 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1115 "iptable_%s", name
);
1116 if (t
&& !IS_ERR(t
)) {
1117 struct ipt_getinfo info
;
1118 const struct xt_table_info
*private = t
->private;
1120 #ifdef CONFIG_COMPAT
1122 struct xt_table_info tmp
;
1123 ret
= compat_table_info(private, &tmp
);
1124 xt_compat_flush_offsets(AF_INET
);
1128 info
.valid_hooks
= t
->valid_hooks
;
1129 memcpy(info
.hook_entry
, private->hook_entry
,
1130 sizeof(info
.hook_entry
));
1131 memcpy(info
.underflow
, private->underflow
,
1132 sizeof(info
.underflow
));
1133 info
.num_entries
= private->number
;
1134 info
.size
= private->size
;
1135 strcpy(info
.name
, name
);
1137 if (copy_to_user(user
, &info
, *len
) != 0)
1145 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1146 #ifdef CONFIG_COMPAT
1148 xt_compat_unlock(AF_INET
);
1154 get_entries(struct net
*net
, struct ipt_get_entries __user
*uptr
, int *len
)
1157 struct ipt_get_entries get
;
1160 if (*len
< sizeof(get
)) {
1161 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1164 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1166 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1167 duprintf("get_entries: %u != %zu\n",
1168 *len
, sizeof(get
) + get
.size
);
1172 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1173 if (t
&& !IS_ERR(t
)) {
1174 const struct xt_table_info
*private = t
->private;
1175 duprintf("t->private->number = %u\n", private->number
);
1176 if (get
.size
== private->size
)
1177 ret
= copy_entries_to_user(private->size
,
1178 t
, uptr
->entrytable
);
1180 duprintf("get_entries: I've got %u not %u!\n",
1181 private->size
, get
.size
);
1187 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1193 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1194 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1195 void __user
*counters_ptr
)
1199 struct xt_table_info
*oldinfo
;
1200 struct xt_counters
*counters
;
1201 void *loc_cpu_old_entry
;
1204 counters
= vmalloc(num_counters
* sizeof(struct xt_counters
));
1210 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1211 "iptable_%s", name
);
1212 if (!t
|| IS_ERR(t
)) {
1213 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1214 goto free_newinfo_counters_untrans
;
1218 if (valid_hooks
!= t
->valid_hooks
) {
1219 duprintf("Valid hook crap: %08X vs %08X\n",
1220 valid_hooks
, t
->valid_hooks
);
1225 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1229 /* Update module usage count based on number of rules */
1230 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1231 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1232 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1233 (newinfo
->number
<= oldinfo
->initial_entries
))
1235 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1236 (newinfo
->number
<= oldinfo
->initial_entries
))
1239 /* Get the old counters, and synchronize with replace */
1240 get_counters(oldinfo
, counters
);
1242 /* Decrease module usage counts and free resource */
1243 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1244 IPT_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,
1246 xt_free_table_info(oldinfo
);
1247 if (copy_to_user(counters_ptr
, counters
,
1248 sizeof(struct xt_counters
) * num_counters
) != 0)
1257 free_newinfo_counters_untrans
:
1264 do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1267 struct ipt_replace tmp
;
1268 struct xt_table_info
*newinfo
;
1269 void *loc_cpu_entry
;
1271 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1274 /* overflow check */
1275 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1278 newinfo
= xt_alloc_table_info(tmp
.size
);
1282 /* choose the copy that is on our node/cpu */
1283 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1284 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1290 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1291 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1292 tmp
.hook_entry
, tmp
.underflow
);
1296 duprintf("ip_tables: Translated table\n");
1298 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1299 tmp
.num_counters
, tmp
.counters
);
1301 goto free_newinfo_untrans
;
1304 free_newinfo_untrans
:
1305 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1307 xt_free_table_info(newinfo
);
1311 /* We're lazy, and add to the first CPU; overflow works its fey magic
1312 * and everything is OK. */
1314 add_counter_to_entry(struct ipt_entry
*e
,
1315 const struct xt_counters addme
[],
1318 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1325 do_add_counters(struct net
*net
, void __user
*user
, unsigned int len
, int compat
)
1327 unsigned int i
, curcpu
;
1328 struct xt_counters_info tmp
;
1329 struct xt_counters
*paddc
;
1330 unsigned int num_counters
;
1335 const struct xt_table_info
*private;
1337 void *loc_cpu_entry
;
1338 #ifdef CONFIG_COMPAT
1339 struct compat_xt_counters_info compat_tmp
;
1343 size
= sizeof(struct compat_xt_counters_info
);
1348 size
= sizeof(struct xt_counters_info
);
1351 if (copy_from_user(ptmp
, user
, size
) != 0)
1354 #ifdef CONFIG_COMPAT
1356 num_counters
= compat_tmp
.num_counters
;
1357 name
= compat_tmp
.name
;
1361 num_counters
= tmp
.num_counters
;
1365 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1368 paddc
= vmalloc_node(len
- size
, numa_node_id());
1372 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1377 t
= xt_find_table_lock(net
, AF_INET
, name
);
1378 if (!t
|| IS_ERR(t
)) {
1379 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1384 private = t
->private;
1385 if (private->number
!= num_counters
) {
1387 goto unlock_up_free
;
1391 /* Choose the copy that is on our node */
1392 curcpu
= smp_processor_id();
1393 loc_cpu_entry
= private->entries
[curcpu
];
1394 xt_info_wrlock(curcpu
);
1395 IPT_ENTRY_ITERATE(loc_cpu_entry
,
1397 add_counter_to_entry
,
1400 xt_info_wrunlock(curcpu
);
1411 #ifdef CONFIG_COMPAT
1412 struct compat_ipt_replace
{
1413 char name
[IPT_TABLE_MAXNAMELEN
];
1417 u32 hook_entry
[NF_INET_NUMHOOKS
];
1418 u32 underflow
[NF_INET_NUMHOOKS
];
1420 compat_uptr_t counters
; /* struct ipt_counters * */
1421 struct compat_ipt_entry entries
[0];
1425 compat_copy_entry_to_user(struct ipt_entry
*e
, void __user
**dstptr
,
1426 unsigned int *size
, struct xt_counters
*counters
,
1429 struct ipt_entry_target
*t
;
1430 struct compat_ipt_entry __user
*ce
;
1431 u_int16_t target_offset
, next_offset
;
1432 compat_uint_t origsize
;
1437 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1438 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)))
1441 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1444 *dstptr
+= sizeof(struct compat_ipt_entry
);
1445 *size
-= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1447 ret
= IPT_MATCH_ITERATE(e
, xt_compat_match_to_user
, dstptr
, size
);
1448 target_offset
= e
->target_offset
- (origsize
- *size
);
1451 t
= ipt_get_target(e
);
1452 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1456 next_offset
= e
->next_offset
- (origsize
- *size
);
1457 if (put_user(target_offset
, &ce
->target_offset
))
1459 if (put_user(next_offset
, &ce
->next_offset
))
1469 compat_find_calc_match(struct ipt_entry_match
*m
,
1471 const struct ipt_ip
*ip
,
1472 unsigned int hookmask
,
1473 int *size
, unsigned int *i
)
1475 struct xt_match
*match
;
1477 match
= try_then_request_module(xt_find_match(AF_INET
, m
->u
.user
.name
,
1478 m
->u
.user
.revision
),
1479 "ipt_%s", m
->u
.user
.name
);
1480 if (IS_ERR(match
) || !match
) {
1481 duprintf("compat_check_calc_match: `%s' not found\n",
1483 return match
? PTR_ERR(match
) : -ENOENT
;
1485 m
->u
.kernel
.match
= match
;
1486 *size
+= xt_compat_match_offset(match
);
1493 compat_release_match(struct ipt_entry_match
*m
, unsigned int *i
)
1495 if (i
&& (*i
)-- == 0)
1498 module_put(m
->u
.kernel
.match
->me
);
1503 compat_release_entry(struct compat_ipt_entry
*e
, unsigned int *i
)
1505 struct ipt_entry_target
*t
;
1507 if (i
&& (*i
)-- == 0)
1510 /* Cleanup all matches */
1511 COMPAT_IPT_MATCH_ITERATE(e
, compat_release_match
, NULL
);
1512 t
= compat_ipt_get_target(e
);
1513 module_put(t
->u
.kernel
.target
->me
);
1518 check_compat_entry_size_and_hooks(struct compat_ipt_entry
*e
,
1519 struct xt_table_info
*newinfo
,
1521 unsigned char *base
,
1522 unsigned char *limit
,
1523 unsigned int *hook_entries
,
1524 unsigned int *underflows
,
1528 struct ipt_entry_target
*t
;
1529 struct xt_target
*target
;
1530 unsigned int entry_offset
;
1534 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1535 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0
1536 || (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1537 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1541 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1542 sizeof(struct compat_xt_entry_target
)) {
1543 duprintf("checking: element %p size %u\n",
1548 /* For purposes of check_entry casting the compat entry is fine */
1549 ret
= check_entry((struct ipt_entry
*)e
, name
);
1553 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1554 entry_offset
= (void *)e
- (void *)base
;
1556 ret
= COMPAT_IPT_MATCH_ITERATE(e
, compat_find_calc_match
, name
,
1557 &e
->ip
, e
->comefrom
, &off
, &j
);
1559 goto release_matches
;
1561 t
= compat_ipt_get_target(e
);
1562 target
= try_then_request_module(xt_find_target(AF_INET
,
1564 t
->u
.user
.revision
),
1565 "ipt_%s", t
->u
.user
.name
);
1566 if (IS_ERR(target
) || !target
) {
1567 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1569 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1570 goto release_matches
;
1572 t
->u
.kernel
.target
= target
;
1574 off
+= xt_compat_target_offset(target
);
1576 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1580 /* Check hooks & underflows */
1581 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1582 if ((unsigned char *)e
- base
== hook_entries
[h
])
1583 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1584 if ((unsigned char *)e
- base
== underflows
[h
])
1585 newinfo
->underflow
[h
] = underflows
[h
];
1588 /* Clear counters and comefrom */
1589 memset(&e
->counters
, 0, sizeof(e
->counters
));
1596 module_put(t
->u
.kernel
.target
->me
);
1598 IPT_MATCH_ITERATE(e
, compat_release_match
, &j
);
1603 compat_copy_entry_from_user(struct compat_ipt_entry
*e
, void **dstptr
,
1604 unsigned int *size
, const char *name
,
1605 struct xt_table_info
*newinfo
, unsigned char *base
)
1607 struct ipt_entry_target
*t
;
1608 struct xt_target
*target
;
1609 struct ipt_entry
*de
;
1610 unsigned int origsize
;
1615 de
= (struct ipt_entry
*)*dstptr
;
1616 memcpy(de
, e
, sizeof(struct ipt_entry
));
1617 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1619 *dstptr
+= sizeof(struct ipt_entry
);
1620 *size
+= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1622 ret
= COMPAT_IPT_MATCH_ITERATE(e
, xt_compat_match_from_user
,
1626 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1627 t
= compat_ipt_get_target(e
);
1628 target
= t
->u
.kernel
.target
;
1629 xt_compat_target_from_user(t
, dstptr
, size
);
1631 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1632 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1633 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1634 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1635 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1636 newinfo
->underflow
[h
] -= origsize
- *size
;
1642 compat_check_entry(struct ipt_entry
*e
, const char *name
,
1645 struct xt_mtchk_param mtpar
;
1651 mtpar
.entryinfo
= &e
->ip
;
1652 mtpar
.hook_mask
= e
->comefrom
;
1653 mtpar
.family
= NFPROTO_IPV4
;
1654 ret
= IPT_MATCH_ITERATE(e
, check_match
, &mtpar
, &j
);
1656 goto cleanup_matches
;
1658 ret
= check_target(e
, name
);
1660 goto cleanup_matches
;
1666 IPT_MATCH_ITERATE(e
, cleanup_match
, &j
);
1671 translate_compat_table(const char *name
,
1672 unsigned int valid_hooks
,
1673 struct xt_table_info
**pinfo
,
1675 unsigned int total_size
,
1676 unsigned int number
,
1677 unsigned int *hook_entries
,
1678 unsigned int *underflows
)
1681 struct xt_table_info
*newinfo
, *info
;
1682 void *pos
, *entry0
, *entry1
;
1689 info
->number
= number
;
1691 /* Init all hooks to impossible value. */
1692 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1693 info
->hook_entry
[i
] = 0xFFFFFFFF;
1694 info
->underflow
[i
] = 0xFFFFFFFF;
1697 duprintf("translate_compat_table: size %u\n", info
->size
);
1699 xt_compat_lock(AF_INET
);
1700 /* Walk through entries, checking offsets. */
1701 ret
= COMPAT_IPT_ENTRY_ITERATE(entry0
, total_size
,
1702 check_compat_entry_size_and_hooks
,
1703 info
, &size
, entry0
,
1704 entry0
+ total_size
,
1705 hook_entries
, underflows
, &j
, name
);
1711 duprintf("translate_compat_table: %u not %u entries\n",
1716 /* Check hooks all assigned */
1717 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1718 /* Only hooks which are valid */
1719 if (!(valid_hooks
& (1 << i
)))
1721 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1722 duprintf("Invalid hook entry %u %u\n",
1723 i
, hook_entries
[i
]);
1726 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1727 duprintf("Invalid underflow %u %u\n",
1734 newinfo
= xt_alloc_table_info(size
);
1738 newinfo
->number
= number
;
1739 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1740 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1741 newinfo
->underflow
[i
] = info
->underflow
[i
];
1743 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1746 ret
= COMPAT_IPT_ENTRY_ITERATE(entry0
, total_size
,
1747 compat_copy_entry_from_user
,
1748 &pos
, &size
, name
, newinfo
, entry1
);
1749 xt_compat_flush_offsets(AF_INET
);
1750 xt_compat_unlock(AF_INET
);
1755 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1759 ret
= IPT_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1763 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0
, newinfo
->size
, i
,
1764 compat_release_entry
, &j
);
1765 IPT_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, &i
);
1766 xt_free_table_info(newinfo
);
1770 /* And one copy for every other CPU */
1771 for_each_possible_cpu(i
)
1772 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1773 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1777 xt_free_table_info(info
);
1781 xt_free_table_info(newinfo
);
1783 COMPAT_IPT_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1786 xt_compat_flush_offsets(AF_INET
);
1787 xt_compat_unlock(AF_INET
);
1792 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1795 struct compat_ipt_replace tmp
;
1796 struct xt_table_info
*newinfo
;
1797 void *loc_cpu_entry
;
1799 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1802 /* overflow check */
1803 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1805 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1808 newinfo
= xt_alloc_table_info(tmp
.size
);
1812 /* choose the copy that is on our node/cpu */
1813 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1814 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1820 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1821 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1822 tmp
.num_entries
, tmp
.hook_entry
,
1827 duprintf("compat_do_replace: Translated table\n");
1829 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1830 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1832 goto free_newinfo_untrans
;
1835 free_newinfo_untrans
:
1836 IPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1838 xt_free_table_info(newinfo
);
1843 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1848 if (!capable(CAP_NET_ADMIN
))
1852 case IPT_SO_SET_REPLACE
:
1853 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1856 case IPT_SO_SET_ADD_COUNTERS
:
1857 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1861 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1868 struct compat_ipt_get_entries
{
1869 char name
[IPT_TABLE_MAXNAMELEN
];
1871 struct compat_ipt_entry entrytable
[0];
1875 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1876 void __user
*userptr
)
1878 struct xt_counters
*counters
;
1879 const struct xt_table_info
*private = table
->private;
1883 const void *loc_cpu_entry
;
1886 counters
= alloc_counters(table
);
1887 if (IS_ERR(counters
))
1888 return PTR_ERR(counters
);
1890 /* choose the copy that is on our node/cpu, ...
1891 * This choice is lazy (because current thread is
1892 * allowed to migrate to another cpu)
1894 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1897 ret
= IPT_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1898 compat_copy_entry_to_user
,
1899 &pos
, &size
, counters
, &i
);
1906 compat_get_entries(struct net
*net
, struct compat_ipt_get_entries __user
*uptr
,
1910 struct compat_ipt_get_entries get
;
1913 if (*len
< sizeof(get
)) {
1914 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1918 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1921 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1922 duprintf("compat_get_entries: %u != %zu\n",
1923 *len
, sizeof(get
) + get
.size
);
1927 xt_compat_lock(AF_INET
);
1928 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1929 if (t
&& !IS_ERR(t
)) {
1930 const struct xt_table_info
*private = t
->private;
1931 struct xt_table_info info
;
1932 duprintf("t->private->number = %u\n", private->number
);
1933 ret
= compat_table_info(private, &info
);
1934 if (!ret
&& get
.size
== info
.size
) {
1935 ret
= compat_copy_entries_to_user(private->size
,
1936 t
, uptr
->entrytable
);
1938 duprintf("compat_get_entries: I've got %u not %u!\n",
1939 private->size
, get
.size
);
1942 xt_compat_flush_offsets(AF_INET
);
1946 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1948 xt_compat_unlock(AF_INET
);
1952 static int do_ipt_get_ctl(struct sock
*, int, void __user
*, int *);
1955 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1959 if (!capable(CAP_NET_ADMIN
))
1963 case IPT_SO_GET_INFO
:
1964 ret
= get_info(sock_net(sk
), user
, len
, 1);
1966 case IPT_SO_GET_ENTRIES
:
1967 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1970 ret
= do_ipt_get_ctl(sk
, cmd
, user
, len
);
1977 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1981 if (!capable(CAP_NET_ADMIN
))
1985 case IPT_SO_SET_REPLACE
:
1986 ret
= do_replace(sock_net(sk
), user
, len
);
1989 case IPT_SO_SET_ADD_COUNTERS
:
1990 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
1994 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2002 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2006 if (!capable(CAP_NET_ADMIN
))
2010 case IPT_SO_GET_INFO
:
2011 ret
= get_info(sock_net(sk
), user
, len
, 0);
2014 case IPT_SO_GET_ENTRIES
:
2015 ret
= get_entries(sock_net(sk
), user
, len
);
2018 case IPT_SO_GET_REVISION_MATCH
:
2019 case IPT_SO_GET_REVISION_TARGET
: {
2020 struct ipt_get_revision rev
;
2023 if (*len
!= sizeof(rev
)) {
2027 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2032 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2037 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2040 "ipt_%s", rev
.name
);
2045 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2052 struct xt_table
*ipt_register_table(struct net
*net
, struct xt_table
*table
,
2053 const struct ipt_replace
*repl
)
2056 struct xt_table_info
*newinfo
;
2057 struct xt_table_info bootstrap
2058 = { 0, 0, 0, { 0 }, { 0 }, { } };
2059 void *loc_cpu_entry
;
2060 struct xt_table
*new_table
;
2062 newinfo
= xt_alloc_table_info(repl
->size
);
2068 /* choose the copy on our node/cpu, but dont care about preemption */
2069 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2070 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2072 ret
= translate_table(table
->name
, table
->valid_hooks
,
2073 newinfo
, loc_cpu_entry
, repl
->size
,
2080 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2081 if (IS_ERR(new_table
)) {
2082 ret
= PTR_ERR(new_table
);
2089 xt_free_table_info(newinfo
);
2091 return ERR_PTR(ret
);
2094 void ipt_unregister_table(struct xt_table
*table
)
2096 struct xt_table_info
*private;
2097 void *loc_cpu_entry
;
2098 struct module
*table_owner
= table
->me
;
2100 private = xt_unregister_table(table
);
2102 /* Decrease module usage counts and free resources */
2103 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2104 IPT_ENTRY_ITERATE(loc_cpu_entry
, private->size
, cleanup_entry
, NULL
);
2105 if (private->number
> private->initial_entries
)
2106 module_put(table_owner
);
2107 xt_free_table_info(private);
2110 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2112 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2113 u_int8_t type
, u_int8_t code
,
2116 return ((test_type
== 0xFF) ||
2117 (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2122 icmp_match(const struct sk_buff
*skb
, const struct xt_match_param
*par
)
2124 const struct icmphdr
*ic
;
2125 struct icmphdr _icmph
;
2126 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2128 /* Must not be a fragment. */
2129 if (par
->fragoff
!= 0)
2132 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2134 /* We've been asked to examine this packet, and we
2135 * can't. Hence, no choice but to drop.
2137 duprintf("Dropping evil ICMP tinygram.\n");
2138 *par
->hotdrop
= true;
2142 return icmp_type_code_match(icmpinfo
->type
,
2146 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2149 static bool icmp_checkentry(const struct xt_mtchk_param
*par
)
2151 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2153 /* Must specify no unknown invflags */
2154 return !(icmpinfo
->invflags
& ~IPT_ICMP_INV
);
2157 /* The built-in targets: standard (NULL) and error. */
2158 static struct xt_target ipt_standard_target __read_mostly
= {
2159 .name
= IPT_STANDARD_TARGET
,
2160 .targetsize
= sizeof(int),
2162 #ifdef CONFIG_COMPAT
2163 .compatsize
= sizeof(compat_int_t
),
2164 .compat_from_user
= compat_standard_from_user
,
2165 .compat_to_user
= compat_standard_to_user
,
2169 static struct xt_target ipt_error_target __read_mostly
= {
2170 .name
= IPT_ERROR_TARGET
,
2171 .target
= ipt_error
,
2172 .targetsize
= IPT_FUNCTION_MAXNAMELEN
,
2176 static struct nf_sockopt_ops ipt_sockopts
= {
2178 .set_optmin
= IPT_BASE_CTL
,
2179 .set_optmax
= IPT_SO_SET_MAX
+1,
2180 .set
= do_ipt_set_ctl
,
2181 #ifdef CONFIG_COMPAT
2182 .compat_set
= compat_do_ipt_set_ctl
,
2184 .get_optmin
= IPT_BASE_CTL
,
2185 .get_optmax
= IPT_SO_GET_MAX
+1,
2186 .get
= do_ipt_get_ctl
,
2187 #ifdef CONFIG_COMPAT
2188 .compat_get
= compat_do_ipt_get_ctl
,
2190 .owner
= THIS_MODULE
,
2193 static struct xt_match icmp_matchstruct __read_mostly
= {
2195 .match
= icmp_match
,
2196 .matchsize
= sizeof(struct ipt_icmp
),
2197 .checkentry
= icmp_checkentry
,
2198 .proto
= IPPROTO_ICMP
,
2202 static int __net_init
ip_tables_net_init(struct net
*net
)
2204 return xt_proto_init(net
, AF_INET
);
2207 static void __net_exit
ip_tables_net_exit(struct net
*net
)
2209 xt_proto_fini(net
, AF_INET
);
2212 static struct pernet_operations ip_tables_net_ops
= {
2213 .init
= ip_tables_net_init
,
2214 .exit
= ip_tables_net_exit
,
2217 static int __init
ip_tables_init(void)
2221 ret
= register_pernet_subsys(&ip_tables_net_ops
);
2225 /* Noone else will be downing sem now, so we won't sleep */
2226 ret
= xt_register_target(&ipt_standard_target
);
2229 ret
= xt_register_target(&ipt_error_target
);
2232 ret
= xt_register_match(&icmp_matchstruct
);
2236 /* Register setsockopt */
2237 ret
= nf_register_sockopt(&ipt_sockopts
);
2241 printk(KERN_INFO
"ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2245 xt_unregister_match(&icmp_matchstruct
);
2247 xt_unregister_target(&ipt_error_target
);
2249 xt_unregister_target(&ipt_standard_target
);
2251 unregister_pernet_subsys(&ip_tables_net_ops
);
2256 static void __exit
ip_tables_fini(void)
2258 nf_unregister_sockopt(&ipt_sockopts
);
2260 xt_unregister_match(&icmp_matchstruct
);
2261 xt_unregister_target(&ipt_error_target
);
2262 xt_unregister_target(&ipt_standard_target
);
2264 unregister_pernet_subsys(&ip_tables_net_ops
);
2267 EXPORT_SYMBOL(ipt_register_table
);
2268 EXPORT_SYMBOL(ipt_unregister_table
);
2269 EXPORT_SYMBOL(ipt_do_table
);
2270 module_init(ip_tables_init
);
2271 module_exit(ip_tables_fini
);