[SCTP]: Fix sctp_primitive_ABORT() call in sctp_close().
[linux-2.6/verdex.git] / net / ipv4 / netfilter / ip_tables.c
blob048514f15f2ffe116dcbebc0aa1672eae54045f4
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
13 * a table
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
25 #include <net/ip.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...) printk(format , ## args)
46 #else
47 #define dprintf(format, args...)
48 #endif
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
52 #else
53 #define duprintf(format, args...)
54 #endif
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x) \
58 do { \
59 if (!(x)) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
62 } while(0)
63 #else
64 #define IP_NF_ASSERT(x)
65 #endif
67 #if 0
68 /* All the better to debug you with... */
69 #define static
70 #define inline
71 #endif
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
83 static inline int
84 ip_packet_match(const struct iphdr *ip,
85 const char *indev,
86 const char *outdev,
87 const struct ipt_ip *ipinfo,
88 int isfrag)
90 size_t i;
91 unsigned long ret;
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
95 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
96 IPT_INV_SRCIP)
97 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
98 IPT_INV_DSTIP)) {
99 dprintf("Source or dest mismatch.\n");
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ip->saddr),
103 NIPQUAD(ipinfo->smsk.s_addr),
104 NIPQUAD(ipinfo->src.s_addr),
105 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
107 NIPQUAD(ip->daddr),
108 NIPQUAD(ipinfo->dmsk.s_addr),
109 NIPQUAD(ipinfo->dst.s_addr),
110 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
111 return 0;
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116 ret |= (((const unsigned long *)indev)[i]
117 ^ ((const unsigned long *)ipinfo->iniface)[i])
118 & ((const unsigned long *)ipinfo->iniface_mask)[i];
121 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev, ipinfo->iniface,
124 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
125 return 0;
128 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129 ret |= (((const unsigned long *)outdev)[i]
130 ^ ((const unsigned long *)ipinfo->outiface)[i])
131 & ((const unsigned long *)ipinfo->outiface_mask)[i];
134 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev, ipinfo->outiface,
137 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
138 return 0;
141 /* Check specific protocol */
142 if (ipinfo->proto
143 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip->protocol, ipinfo->proto,
146 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
147 return 0;
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
155 return 0;
158 return 1;
161 static inline int
162 ip_checkentry(const struct ipt_ip *ip)
164 if (ip->flags & ~IPT_F_MASK) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip->flags & ~IPT_F_MASK);
167 return 0;
169 if (ip->invflags & ~IPT_INV_MASK) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip->invflags & ~IPT_INV_MASK);
172 return 0;
174 return 1;
177 static unsigned int
178 ipt_error(struct sk_buff **pskb,
179 const struct net_device *in,
180 const struct net_device *out,
181 unsigned int hooknum,
182 const struct xt_target *target,
183 const void *targinfo,
184 void *userinfo)
186 if (net_ratelimit())
187 printk("ip_tables: error: `%s'\n", (char *)targinfo);
189 return NF_DROP;
192 static inline
193 int do_match(struct ipt_entry_match *m,
194 const struct sk_buff *skb,
195 const struct net_device *in,
196 const struct net_device *out,
197 int offset,
198 int *hotdrop)
200 /* Stop iteration if it doesn't match */
201 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
202 offset, skb->nh.iph->ihl*4, hotdrop))
203 return 1;
204 else
205 return 0;
208 static inline struct ipt_entry *
209 get_entry(void *base, unsigned int offset)
211 return (struct ipt_entry *)(base + offset);
214 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
215 unsigned int
216 ipt_do_table(struct sk_buff **pskb,
217 unsigned int hook,
218 const struct net_device *in,
219 const struct net_device *out,
220 struct ipt_table *table,
221 void *userdata)
223 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
224 u_int16_t offset;
225 struct iphdr *ip;
226 u_int16_t datalen;
227 int hotdrop = 0;
228 /* Initializing verdict to NF_DROP keeps gcc happy. */
229 unsigned int verdict = NF_DROP;
230 const char *indev, *outdev;
231 void *table_base;
232 struct ipt_entry *e, *back;
233 struct xt_table_info *private;
235 /* Initialization */
236 ip = (*pskb)->nh.iph;
237 datalen = (*pskb)->len - ip->ihl * 4;
238 indev = in ? in->name : nulldevname;
239 outdev = out ? out->name : nulldevname;
240 /* We handle fragments by dealing with the first fragment as
241 * if it was a normal packet. All other fragments are treated
242 * normally, except that they will NEVER match rules that ask
243 * things we don't know, ie. tcp syn flag or ports). If the
244 * rule is also a fragment-specific rule, non-fragments won't
245 * match it. */
246 offset = ntohs(ip->frag_off) & IP_OFFSET;
248 read_lock_bh(&table->lock);
249 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
250 private = table->private;
251 table_base = (void *)private->entries[smp_processor_id()];
252 e = get_entry(table_base, private->hook_entry[hook]);
254 /* For return from builtin chain */
255 back = get_entry(table_base, private->underflow[hook]);
257 do {
258 IP_NF_ASSERT(e);
259 IP_NF_ASSERT(back);
260 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
261 struct ipt_entry_target *t;
263 if (IPT_MATCH_ITERATE(e, do_match,
264 *pskb, in, out,
265 offset, &hotdrop) != 0)
266 goto no_match;
268 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
270 t = ipt_get_target(e);
271 IP_NF_ASSERT(t->u.kernel.target);
272 /* Standard target? */
273 if (!t->u.kernel.target->target) {
274 int v;
276 v = ((struct ipt_standard_target *)t)->verdict;
277 if (v < 0) {
278 /* Pop from stack? */
279 if (v != IPT_RETURN) {
280 verdict = (unsigned)(-v) - 1;
281 break;
283 e = back;
284 back = get_entry(table_base,
285 back->comefrom);
286 continue;
288 if (table_base + v != (void *)e + e->next_offset
289 && !(e->ip.flags & IPT_F_GOTO)) {
290 /* Save old back ptr in next entry */
291 struct ipt_entry *next
292 = (void *)e + e->next_offset;
293 next->comefrom
294 = (void *)back - table_base;
295 /* set back pointer to next entry */
296 back = next;
299 e = get_entry(table_base, v);
300 } else {
301 /* Targets which reenter must return
302 abs. verdicts */
303 #ifdef CONFIG_NETFILTER_DEBUG
304 ((struct ipt_entry *)table_base)->comefrom
305 = 0xeeeeeeec;
306 #endif
307 verdict = t->u.kernel.target->target(pskb,
308 in, out,
309 hook,
310 t->u.kernel.target,
311 t->data,
312 userdata);
314 #ifdef CONFIG_NETFILTER_DEBUG
315 if (((struct ipt_entry *)table_base)->comefrom
316 != 0xeeeeeeec
317 && verdict == IPT_CONTINUE) {
318 printk("Target %s reentered!\n",
319 t->u.kernel.target->name);
320 verdict = NF_DROP;
322 ((struct ipt_entry *)table_base)->comefrom
323 = 0x57acc001;
324 #endif
325 /* Target might have changed stuff. */
326 ip = (*pskb)->nh.iph;
327 datalen = (*pskb)->len - ip->ihl * 4;
329 if (verdict == IPT_CONTINUE)
330 e = (void *)e + e->next_offset;
331 else
332 /* Verdict */
333 break;
335 } else {
337 no_match:
338 e = (void *)e + e->next_offset;
340 } while (!hotdrop);
342 read_unlock_bh(&table->lock);
344 #ifdef DEBUG_ALLOW_ALL
345 return NF_ACCEPT;
346 #else
347 if (hotdrop)
348 return NF_DROP;
349 else return verdict;
350 #endif
353 /* All zeroes == unconditional rule. */
354 static inline int
355 unconditional(const struct ipt_ip *ip)
357 unsigned int i;
359 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
360 if (((__u32 *)ip)[i])
361 return 0;
363 return 1;
366 /* Figures out from what hook each rule can be called: returns 0 if
367 there are loops. Puts hook bitmask in comefrom. */
368 static int
369 mark_source_chains(struct xt_table_info *newinfo,
370 unsigned int valid_hooks, void *entry0)
372 unsigned int hook;
374 /* No recursion; use packet counter to save back ptrs (reset
375 to 0 as we leave), and comefrom to save source hook bitmask */
376 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
377 unsigned int pos = newinfo->hook_entry[hook];
378 struct ipt_entry *e
379 = (struct ipt_entry *)(entry0 + pos);
381 if (!(valid_hooks & (1 << hook)))
382 continue;
384 /* Set initial back pointer. */
385 e->counters.pcnt = pos;
387 for (;;) {
388 struct ipt_standard_target *t
389 = (void *)ipt_get_target(e);
391 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
392 printk("iptables: loop hook %u pos %u %08X.\n",
393 hook, pos, e->comefrom);
394 return 0;
396 e->comefrom
397 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
399 /* Unconditional return/END. */
400 if (e->target_offset == sizeof(struct ipt_entry)
401 && (strcmp(t->target.u.user.name,
402 IPT_STANDARD_TARGET) == 0)
403 && t->verdict < 0
404 && unconditional(&e->ip)) {
405 unsigned int oldpos, size;
407 /* Return: backtrack through the last
408 big jump. */
409 do {
410 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
411 #ifdef DEBUG_IP_FIREWALL_USER
412 if (e->comefrom
413 & (1 << NF_IP_NUMHOOKS)) {
414 duprintf("Back unset "
415 "on hook %u "
416 "rule %u\n",
417 hook, pos);
419 #endif
420 oldpos = pos;
421 pos = e->counters.pcnt;
422 e->counters.pcnt = 0;
424 /* We're at the start. */
425 if (pos == oldpos)
426 goto next;
428 e = (struct ipt_entry *)
429 (entry0 + pos);
430 } while (oldpos == pos + e->next_offset);
432 /* Move along one */
433 size = e->next_offset;
434 e = (struct ipt_entry *)
435 (entry0 + pos + size);
436 e->counters.pcnt = pos;
437 pos += size;
438 } else {
439 int newpos = t->verdict;
441 if (strcmp(t->target.u.user.name,
442 IPT_STANDARD_TARGET) == 0
443 && newpos >= 0) {
444 /* This a jump; chase it. */
445 duprintf("Jump rule %u -> %u\n",
446 pos, newpos);
447 } else {
448 /* ... this is a fallthru */
449 newpos = pos + e->next_offset;
451 e = (struct ipt_entry *)
452 (entry0 + newpos);
453 e->counters.pcnt = pos;
454 pos = newpos;
457 next:
458 duprintf("Finished chain %u\n", hook);
460 return 1;
463 static inline int
464 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
466 if (i && (*i)-- == 0)
467 return 1;
469 if (m->u.kernel.match->destroy)
470 m->u.kernel.match->destroy(m->u.kernel.match, m->data,
471 m->u.match_size - sizeof(*m));
472 module_put(m->u.kernel.match->me);
473 return 0;
476 static inline int
477 standard_check(const struct ipt_entry_target *t,
478 unsigned int max_offset)
480 struct ipt_standard_target *targ = (void *)t;
482 /* Check standard info. */
483 if (targ->verdict >= 0
484 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
485 duprintf("ipt_standard_check: bad verdict (%i)\n",
486 targ->verdict);
487 return 0;
489 if (targ->verdict < -NF_MAX_VERDICT - 1) {
490 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
491 targ->verdict);
492 return 0;
494 return 1;
497 static inline int
498 check_match(struct ipt_entry_match *m,
499 const char *name,
500 const struct ipt_ip *ip,
501 unsigned int hookmask,
502 unsigned int *i)
504 struct ipt_match *match;
505 int ret;
507 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
508 m->u.user.revision),
509 "ipt_%s", m->u.user.name);
510 if (IS_ERR(match) || !match) {
511 duprintf("check_match: `%s' not found\n", m->u.user.name);
512 return match ? PTR_ERR(match) : -ENOENT;
514 m->u.kernel.match = match;
516 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
517 name, hookmask, ip->proto,
518 ip->invflags & IPT_INV_PROTO);
519 if (ret)
520 goto err;
522 if (m->u.kernel.match->checkentry
523 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
524 m->u.match_size - sizeof(*m),
525 hookmask)) {
526 duprintf("ip_tables: check failed for `%s'.\n",
527 m->u.kernel.match->name);
528 ret = -EINVAL;
529 goto err;
532 (*i)++;
533 return 0;
534 err:
535 module_put(m->u.kernel.match->me);
536 return ret;
539 static struct ipt_target ipt_standard_target;
541 static inline int
542 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
543 unsigned int *i)
545 struct ipt_entry_target *t;
546 struct ipt_target *target;
547 int ret;
548 unsigned int j;
550 if (!ip_checkentry(&e->ip)) {
551 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
552 return -EINVAL;
555 j = 0;
556 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
557 if (ret != 0)
558 goto cleanup_matches;
560 t = ipt_get_target(e);
561 target = try_then_request_module(xt_find_target(AF_INET,
562 t->u.user.name,
563 t->u.user.revision),
564 "ipt_%s", t->u.user.name);
565 if (IS_ERR(target) || !target) {
566 duprintf("check_entry: `%s' not found\n", t->u.user.name);
567 ret = target ? PTR_ERR(target) : -ENOENT;
568 goto cleanup_matches;
570 t->u.kernel.target = target;
572 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
573 name, e->comefrom, e->ip.proto,
574 e->ip.invflags & IPT_INV_PROTO);
575 if (ret)
576 goto err;
578 if (t->u.kernel.target == &ipt_standard_target) {
579 if (!standard_check(t, size)) {
580 ret = -EINVAL;
581 goto cleanup_matches;
583 } else if (t->u.kernel.target->checkentry
584 && !t->u.kernel.target->checkentry(name, e, target, t->data,
585 t->u.target_size
586 - sizeof(*t),
587 e->comefrom)) {
588 duprintf("ip_tables: check failed for `%s'.\n",
589 t->u.kernel.target->name);
590 ret = -EINVAL;
591 goto err;
594 (*i)++;
595 return 0;
596 err:
597 module_put(t->u.kernel.target->me);
598 cleanup_matches:
599 IPT_MATCH_ITERATE(e, cleanup_match, &j);
600 return ret;
603 static inline int
604 check_entry_size_and_hooks(struct ipt_entry *e,
605 struct xt_table_info *newinfo,
606 unsigned char *base,
607 unsigned char *limit,
608 const unsigned int *hook_entries,
609 const unsigned int *underflows,
610 unsigned int *i)
612 unsigned int h;
614 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
615 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
616 duprintf("Bad offset %p\n", e);
617 return -EINVAL;
620 if (e->next_offset
621 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
622 duprintf("checking: element %p size %u\n",
623 e, e->next_offset);
624 return -EINVAL;
627 /* Check hooks & underflows */
628 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
629 if ((unsigned char *)e - base == hook_entries[h])
630 newinfo->hook_entry[h] = hook_entries[h];
631 if ((unsigned char *)e - base == underflows[h])
632 newinfo->underflow[h] = underflows[h];
635 /* FIXME: underflows must be unconditional, standard verdicts
636 < 0 (not IPT_RETURN). --RR */
638 /* Clear counters and comefrom */
639 e->counters = ((struct xt_counters) { 0, 0 });
640 e->comefrom = 0;
642 (*i)++;
643 return 0;
646 static inline int
647 cleanup_entry(struct ipt_entry *e, unsigned int *i)
649 struct ipt_entry_target *t;
651 if (i && (*i)-- == 0)
652 return 1;
654 /* Cleanup all matches */
655 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
656 t = ipt_get_target(e);
657 if (t->u.kernel.target->destroy)
658 t->u.kernel.target->destroy(t->u.kernel.target, t->data,
659 t->u.target_size - sizeof(*t));
660 module_put(t->u.kernel.target->me);
661 return 0;
664 /* Checks and translates the user-supplied table segment (held in
665 newinfo) */
666 static int
667 translate_table(const char *name,
668 unsigned int valid_hooks,
669 struct xt_table_info *newinfo,
670 void *entry0,
671 unsigned int size,
672 unsigned int number,
673 const unsigned int *hook_entries,
674 const unsigned int *underflows)
676 unsigned int i;
677 int ret;
679 newinfo->size = size;
680 newinfo->number = number;
682 /* Init all hooks to impossible value. */
683 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
684 newinfo->hook_entry[i] = 0xFFFFFFFF;
685 newinfo->underflow[i] = 0xFFFFFFFF;
688 duprintf("translate_table: size %u\n", newinfo->size);
689 i = 0;
690 /* Walk through entries, checking offsets. */
691 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
692 check_entry_size_and_hooks,
693 newinfo,
694 entry0,
695 entry0 + size,
696 hook_entries, underflows, &i);
697 if (ret != 0)
698 return ret;
700 if (i != number) {
701 duprintf("translate_table: %u not %u entries\n",
702 i, number);
703 return -EINVAL;
706 /* Check hooks all assigned */
707 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
708 /* Only hooks which are valid */
709 if (!(valid_hooks & (1 << i)))
710 continue;
711 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
712 duprintf("Invalid hook entry %u %u\n",
713 i, hook_entries[i]);
714 return -EINVAL;
716 if (newinfo->underflow[i] == 0xFFFFFFFF) {
717 duprintf("Invalid underflow %u %u\n",
718 i, underflows[i]);
719 return -EINVAL;
723 if (!mark_source_chains(newinfo, valid_hooks, entry0))
724 return -ELOOP;
726 /* Finally, each sanity check must pass */
727 i = 0;
728 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
729 check_entry, name, size, &i);
731 if (ret != 0) {
732 IPT_ENTRY_ITERATE(entry0, newinfo->size,
733 cleanup_entry, &i);
734 return ret;
737 /* And one copy for every other CPU */
738 for_each_possible_cpu(i) {
739 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
740 memcpy(newinfo->entries[i], entry0, newinfo->size);
743 return ret;
746 /* Gets counters. */
747 static inline int
748 add_entry_to_counter(const struct ipt_entry *e,
749 struct xt_counters total[],
750 unsigned int *i)
752 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
754 (*i)++;
755 return 0;
758 static inline int
759 set_entry_to_counter(const struct ipt_entry *e,
760 struct ipt_counters total[],
761 unsigned int *i)
763 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
765 (*i)++;
766 return 0;
769 static void
770 get_counters(const struct xt_table_info *t,
771 struct xt_counters counters[])
773 unsigned int cpu;
774 unsigned int i;
775 unsigned int curcpu;
777 /* Instead of clearing (by a previous call to memset())
778 * the counters and using adds, we set the counters
779 * with data used by 'current' CPU
780 * We dont care about preemption here.
782 curcpu = raw_smp_processor_id();
784 i = 0;
785 IPT_ENTRY_ITERATE(t->entries[curcpu],
786 t->size,
787 set_entry_to_counter,
788 counters,
789 &i);
791 for_each_possible_cpu(cpu) {
792 if (cpu == curcpu)
793 continue;
794 i = 0;
795 IPT_ENTRY_ITERATE(t->entries[cpu],
796 t->size,
797 add_entry_to_counter,
798 counters,
799 &i);
803 static inline struct xt_counters * alloc_counters(struct ipt_table *table)
805 unsigned int countersize;
806 struct xt_counters *counters;
807 struct xt_table_info *private = table->private;
809 /* We need atomic snapshot of counters: rest doesn't change
810 (other than comefrom, which userspace doesn't care
811 about). */
812 countersize = sizeof(struct xt_counters) * private->number;
813 counters = vmalloc_node(countersize, numa_node_id());
815 if (counters == NULL)
816 return ERR_PTR(-ENOMEM);
818 /* First, sum counters... */
819 write_lock_bh(&table->lock);
820 get_counters(private, counters);
821 write_unlock_bh(&table->lock);
823 return counters;
826 static int
827 copy_entries_to_user(unsigned int total_size,
828 struct ipt_table *table,
829 void __user *userptr)
831 unsigned int off, num;
832 struct ipt_entry *e;
833 struct xt_counters *counters;
834 struct xt_table_info *private = table->private;
835 int ret = 0;
836 void *loc_cpu_entry;
838 counters = alloc_counters(table);
839 if (IS_ERR(counters))
840 return PTR_ERR(counters);
842 /* choose the copy that is on our node/cpu, ...
843 * This choice is lazy (because current thread is
844 * allowed to migrate to another cpu)
846 loc_cpu_entry = private->entries[raw_smp_processor_id()];
847 /* ... then copy entire thing ... */
848 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
849 ret = -EFAULT;
850 goto free_counters;
853 /* FIXME: use iterator macros --RR */
854 /* ... then go back and fix counters and names */
855 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
856 unsigned int i;
857 struct ipt_entry_match *m;
858 struct ipt_entry_target *t;
860 e = (struct ipt_entry *)(loc_cpu_entry + off);
861 if (copy_to_user(userptr + off
862 + offsetof(struct ipt_entry, counters),
863 &counters[num],
864 sizeof(counters[num])) != 0) {
865 ret = -EFAULT;
866 goto free_counters;
869 for (i = sizeof(struct ipt_entry);
870 i < e->target_offset;
871 i += m->u.match_size) {
872 m = (void *)e + i;
874 if (copy_to_user(userptr + off + i
875 + offsetof(struct ipt_entry_match,
876 u.user.name),
877 m->u.kernel.match->name,
878 strlen(m->u.kernel.match->name)+1)
879 != 0) {
880 ret = -EFAULT;
881 goto free_counters;
885 t = ipt_get_target(e);
886 if (copy_to_user(userptr + off + e->target_offset
887 + offsetof(struct ipt_entry_target,
888 u.user.name),
889 t->u.kernel.target->name,
890 strlen(t->u.kernel.target->name)+1) != 0) {
891 ret = -EFAULT;
892 goto free_counters;
896 free_counters:
897 vfree(counters);
898 return ret;
901 #ifdef CONFIG_COMPAT
902 struct compat_delta {
903 struct compat_delta *next;
904 u_int16_t offset;
905 short delta;
908 static struct compat_delta *compat_offsets = NULL;
910 static int compat_add_offset(u_int16_t offset, short delta)
912 struct compat_delta *tmp;
914 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
915 if (!tmp)
916 return -ENOMEM;
917 tmp->offset = offset;
918 tmp->delta = delta;
919 if (compat_offsets) {
920 tmp->next = compat_offsets->next;
921 compat_offsets->next = tmp;
922 } else {
923 compat_offsets = tmp;
924 tmp->next = NULL;
926 return 0;
929 static void compat_flush_offsets(void)
931 struct compat_delta *tmp, *next;
933 if (compat_offsets) {
934 for(tmp = compat_offsets; tmp; tmp = next) {
935 next = tmp->next;
936 kfree(tmp);
938 compat_offsets = NULL;
942 static short compat_calc_jump(u_int16_t offset)
944 struct compat_delta *tmp;
945 short delta;
947 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
948 if (tmp->offset < offset)
949 delta += tmp->delta;
950 return delta;
953 struct compat_ipt_standard_target
955 struct compat_xt_entry_target target;
956 compat_int_t verdict;
959 struct compat_ipt_standard
961 struct compat_ipt_entry entry;
962 struct compat_ipt_standard_target target;
965 #define IPT_ST_LEN XT_ALIGN(sizeof(struct ipt_standard_target))
966 #define IPT_ST_COMPAT_LEN COMPAT_XT_ALIGN(sizeof(struct compat_ipt_standard_target))
967 #define IPT_ST_OFFSET (IPT_ST_LEN - IPT_ST_COMPAT_LEN)
969 static int compat_ipt_standard_fn(void *target,
970 void **dstptr, int *size, int convert)
972 struct compat_ipt_standard_target compat_st, *pcompat_st;
973 struct ipt_standard_target st, *pst;
974 int ret;
976 ret = 0;
977 switch (convert) {
978 case COMPAT_TO_USER:
979 pst = target;
980 memcpy(&compat_st.target, &pst->target,
981 sizeof(compat_st.target));
982 compat_st.verdict = pst->verdict;
983 if (compat_st.verdict > 0)
984 compat_st.verdict -=
985 compat_calc_jump(compat_st.verdict);
986 compat_st.target.u.user.target_size = IPT_ST_COMPAT_LEN;
987 if (copy_to_user(*dstptr, &compat_st, IPT_ST_COMPAT_LEN))
988 ret = -EFAULT;
989 *size -= IPT_ST_OFFSET;
990 *dstptr += IPT_ST_COMPAT_LEN;
991 break;
992 case COMPAT_FROM_USER:
993 pcompat_st = target;
994 memcpy(&st.target, &pcompat_st->target, IPT_ST_COMPAT_LEN);
995 st.verdict = pcompat_st->verdict;
996 if (st.verdict > 0)
997 st.verdict += compat_calc_jump(st.verdict);
998 st.target.u.user.target_size = IPT_ST_LEN;
999 memcpy(*dstptr, &st, IPT_ST_LEN);
1000 *size += IPT_ST_OFFSET;
1001 *dstptr += IPT_ST_LEN;
1002 break;
1003 case COMPAT_CALC_SIZE:
1004 *size += IPT_ST_OFFSET;
1005 break;
1006 default:
1007 ret = -ENOPROTOOPT;
1008 break;
1010 return ret;
1013 static inline int
1014 compat_calc_match(struct ipt_entry_match *m, int * size)
1016 if (m->u.kernel.match->compat)
1017 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1018 else
1019 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1020 return 0;
1023 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
1024 void *base, struct xt_table_info *newinfo)
1026 struct ipt_entry_target *t;
1027 u_int16_t entry_offset;
1028 int off, i, ret;
1030 off = 0;
1031 entry_offset = (void *)e - base;
1032 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1033 t = ipt_get_target(e);
1034 if (t->u.kernel.target->compat)
1035 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1036 else
1037 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1038 newinfo->size -= off;
1039 ret = compat_add_offset(entry_offset, off);
1040 if (ret)
1041 return ret;
1043 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1044 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1045 (base + info->hook_entry[i])))
1046 newinfo->hook_entry[i] -= off;
1047 if (info->underflow[i] && (e < (struct ipt_entry *)
1048 (base + info->underflow[i])))
1049 newinfo->underflow[i] -= off;
1051 return 0;
1054 static int compat_table_info(struct xt_table_info *info,
1055 struct xt_table_info *newinfo)
1057 void *loc_cpu_entry;
1058 int i;
1060 if (!newinfo || !info)
1061 return -EINVAL;
1063 memset(newinfo, 0, sizeof(struct xt_table_info));
1064 newinfo->size = info->size;
1065 newinfo->number = info->number;
1066 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1067 newinfo->hook_entry[i] = info->hook_entry[i];
1068 newinfo->underflow[i] = info->underflow[i];
1070 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1071 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1072 compat_calc_entry, info, loc_cpu_entry, newinfo);
1074 #endif
1076 static int get_info(void __user *user, int *len, int compat)
1078 char name[IPT_TABLE_MAXNAMELEN];
1079 struct ipt_table *t;
1080 int ret;
1082 if (*len != sizeof(struct ipt_getinfo)) {
1083 duprintf("length %u != %u\n", *len,
1084 (unsigned int)sizeof(struct ipt_getinfo));
1085 return -EINVAL;
1088 if (copy_from_user(name, user, sizeof(name)) != 0)
1089 return -EFAULT;
1091 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1092 #ifdef CONFIG_COMPAT
1093 if (compat)
1094 xt_compat_lock(AF_INET);
1095 #endif
1096 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1097 "iptable_%s", name);
1098 if (t && !IS_ERR(t)) {
1099 struct ipt_getinfo info;
1100 struct xt_table_info *private = t->private;
1102 #ifdef CONFIG_COMPAT
1103 if (compat) {
1104 struct xt_table_info tmp;
1105 ret = compat_table_info(private, &tmp);
1106 compat_flush_offsets();
1107 private = &tmp;
1109 #endif
1110 info.valid_hooks = t->valid_hooks;
1111 memcpy(info.hook_entry, private->hook_entry,
1112 sizeof(info.hook_entry));
1113 memcpy(info.underflow, private->underflow,
1114 sizeof(info.underflow));
1115 info.num_entries = private->number;
1116 info.size = private->size;
1117 strcpy(info.name, name);
1119 if (copy_to_user(user, &info, *len) != 0)
1120 ret = -EFAULT;
1121 else
1122 ret = 0;
1124 xt_table_unlock(t);
1125 module_put(t->me);
1126 } else
1127 ret = t ? PTR_ERR(t) : -ENOENT;
1128 #ifdef CONFIG_COMPAT
1129 if (compat)
1130 xt_compat_unlock(AF_INET);
1131 #endif
1132 return ret;
1135 static int
1136 get_entries(struct ipt_get_entries __user *uptr, int *len)
1138 int ret;
1139 struct ipt_get_entries get;
1140 struct ipt_table *t;
1142 if (*len < sizeof(get)) {
1143 duprintf("get_entries: %u < %d\n", *len,
1144 (unsigned int)sizeof(get));
1145 return -EINVAL;
1147 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1148 return -EFAULT;
1149 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1150 duprintf("get_entries: %u != %u\n", *len,
1151 (unsigned int)(sizeof(struct ipt_get_entries) +
1152 get.size));
1153 return -EINVAL;
1156 t = xt_find_table_lock(AF_INET, get.name);
1157 if (t && !IS_ERR(t)) {
1158 struct xt_table_info *private = t->private;
1159 duprintf("t->private->number = %u\n",
1160 private->number);
1161 if (get.size == private->size)
1162 ret = copy_entries_to_user(private->size,
1163 t, uptr->entrytable);
1164 else {
1165 duprintf("get_entries: I've got %u not %u!\n",
1166 private->size,
1167 get.size);
1168 ret = -EINVAL;
1170 module_put(t->me);
1171 xt_table_unlock(t);
1172 } else
1173 ret = t ? PTR_ERR(t) : -ENOENT;
1175 return ret;
1178 static int
1179 __do_replace(const char *name, unsigned int valid_hooks,
1180 struct xt_table_info *newinfo, unsigned int num_counters,
1181 void __user *counters_ptr)
1183 int ret;
1184 struct ipt_table *t;
1185 struct xt_table_info *oldinfo;
1186 struct xt_counters *counters;
1187 void *loc_cpu_old_entry;
1189 ret = 0;
1190 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1191 if (!counters) {
1192 ret = -ENOMEM;
1193 goto out;
1196 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1197 "iptable_%s", name);
1198 if (!t || IS_ERR(t)) {
1199 ret = t ? PTR_ERR(t) : -ENOENT;
1200 goto free_newinfo_counters_untrans;
1203 /* You lied! */
1204 if (valid_hooks != t->valid_hooks) {
1205 duprintf("Valid hook crap: %08X vs %08X\n",
1206 valid_hooks, t->valid_hooks);
1207 ret = -EINVAL;
1208 goto put_module;
1211 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1212 if (!oldinfo)
1213 goto put_module;
1215 /* Update module usage count based on number of rules */
1216 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1217 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1218 if ((oldinfo->number > oldinfo->initial_entries) ||
1219 (newinfo->number <= oldinfo->initial_entries))
1220 module_put(t->me);
1221 if ((oldinfo->number > oldinfo->initial_entries) &&
1222 (newinfo->number <= oldinfo->initial_entries))
1223 module_put(t->me);
1225 /* Get the old counters. */
1226 get_counters(oldinfo, counters);
1227 /* Decrease module usage counts and free resource */
1228 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1229 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1230 xt_free_table_info(oldinfo);
1231 if (copy_to_user(counters_ptr, counters,
1232 sizeof(struct xt_counters) * num_counters) != 0)
1233 ret = -EFAULT;
1234 vfree(counters);
1235 xt_table_unlock(t);
1236 return ret;
1238 put_module:
1239 module_put(t->me);
1240 xt_table_unlock(t);
1241 free_newinfo_counters_untrans:
1242 vfree(counters);
1243 out:
1244 return ret;
1247 static int
1248 do_replace(void __user *user, unsigned int len)
1250 int ret;
1251 struct ipt_replace tmp;
1252 struct xt_table_info *newinfo;
1253 void *loc_cpu_entry;
1255 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1256 return -EFAULT;
1258 /* Hack: Causes ipchains to give correct error msg --RR */
1259 if (len != sizeof(tmp) + tmp.size)
1260 return -ENOPROTOOPT;
1262 /* overflow check */
1263 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1264 SMP_CACHE_BYTES)
1265 return -ENOMEM;
1266 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1267 return -ENOMEM;
1269 newinfo = xt_alloc_table_info(tmp.size);
1270 if (!newinfo)
1271 return -ENOMEM;
1273 /* choose the copy that is our node/cpu */
1274 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1275 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1276 tmp.size) != 0) {
1277 ret = -EFAULT;
1278 goto free_newinfo;
1281 ret = translate_table(tmp.name, tmp.valid_hooks,
1282 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1283 tmp.hook_entry, tmp.underflow);
1284 if (ret != 0)
1285 goto free_newinfo;
1287 duprintf("ip_tables: Translated table\n");
1289 ret = __do_replace(tmp.name, tmp.valid_hooks,
1290 newinfo, tmp.num_counters,
1291 tmp.counters);
1292 if (ret)
1293 goto free_newinfo_untrans;
1294 return 0;
1296 free_newinfo_untrans:
1297 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1298 free_newinfo:
1299 xt_free_table_info(newinfo);
1300 return ret;
1303 /* We're lazy, and add to the first CPU; overflow works its fey magic
1304 * and everything is OK. */
1305 static inline int
1306 add_counter_to_entry(struct ipt_entry *e,
1307 const struct xt_counters addme[],
1308 unsigned int *i)
1310 #if 0
1311 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1313 (long unsigned int)e->counters.pcnt,
1314 (long unsigned int)e->counters.bcnt,
1315 (long unsigned int)addme[*i].pcnt,
1316 (long unsigned int)addme[*i].bcnt);
1317 #endif
1319 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1321 (*i)++;
1322 return 0;
1325 static int
1326 do_add_counters(void __user *user, unsigned int len, int compat)
1328 unsigned int i;
1329 struct xt_counters_info tmp;
1330 struct xt_counters *paddc;
1331 unsigned int num_counters;
1332 char *name;
1333 int size;
1334 void *ptmp;
1335 struct ipt_table *t;
1336 struct xt_table_info *private;
1337 int ret = 0;
1338 void *loc_cpu_entry;
1339 #ifdef CONFIG_COMPAT
1340 struct compat_xt_counters_info compat_tmp;
1342 if (compat) {
1343 ptmp = &compat_tmp;
1344 size = sizeof(struct compat_xt_counters_info);
1345 } else
1346 #endif
1348 ptmp = &tmp;
1349 size = sizeof(struct xt_counters_info);
1352 if (copy_from_user(ptmp, user, size) != 0)
1353 return -EFAULT;
1355 #ifdef CONFIG_COMPAT
1356 if (compat) {
1357 num_counters = compat_tmp.num_counters;
1358 name = compat_tmp.name;
1359 } else
1360 #endif
1362 num_counters = tmp.num_counters;
1363 name = tmp.name;
1366 if (len != size + num_counters * sizeof(struct xt_counters))
1367 return -EINVAL;
1369 paddc = vmalloc_node(len - size, numa_node_id());
1370 if (!paddc)
1371 return -ENOMEM;
1373 if (copy_from_user(paddc, user + size, len - size) != 0) {
1374 ret = -EFAULT;
1375 goto free;
1378 t = xt_find_table_lock(AF_INET, name);
1379 if (!t || IS_ERR(t)) {
1380 ret = t ? PTR_ERR(t) : -ENOENT;
1381 goto free;
1384 write_lock_bh(&t->lock);
1385 private = t->private;
1386 if (private->number != num_counters) {
1387 ret = -EINVAL;
1388 goto unlock_up_free;
1391 i = 0;
1392 /* Choose the copy that is on our node */
1393 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1394 IPT_ENTRY_ITERATE(loc_cpu_entry,
1395 private->size,
1396 add_counter_to_entry,
1397 paddc,
1398 &i);
1399 unlock_up_free:
1400 write_unlock_bh(&t->lock);
1401 xt_table_unlock(t);
1402 module_put(t->me);
1403 free:
1404 vfree(paddc);
1406 return ret;
1409 #ifdef CONFIG_COMPAT
1410 struct compat_ipt_replace {
1411 char name[IPT_TABLE_MAXNAMELEN];
1412 u32 valid_hooks;
1413 u32 num_entries;
1414 u32 size;
1415 u32 hook_entry[NF_IP_NUMHOOKS];
1416 u32 underflow[NF_IP_NUMHOOKS];
1417 u32 num_counters;
1418 compat_uptr_t counters; /* struct ipt_counters * */
1419 struct compat_ipt_entry entries[0];
1422 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1423 void __user **dstptr, compat_uint_t *size)
1425 if (m->u.kernel.match->compat)
1426 return m->u.kernel.match->compat(m, dstptr, size,
1427 COMPAT_TO_USER);
1428 else
1429 return xt_compat_match(m, dstptr, size, COMPAT_TO_USER);
1432 static int compat_copy_entry_to_user(struct ipt_entry *e,
1433 void __user **dstptr, compat_uint_t *size)
1435 struct ipt_entry_target __user *t;
1436 struct compat_ipt_entry __user *ce;
1437 u_int16_t target_offset, next_offset;
1438 compat_uint_t origsize;
1439 int ret;
1441 ret = -EFAULT;
1442 origsize = *size;
1443 ce = (struct compat_ipt_entry __user *)*dstptr;
1444 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1445 goto out;
1447 *dstptr += sizeof(struct compat_ipt_entry);
1448 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1449 target_offset = e->target_offset - (origsize - *size);
1450 if (ret)
1451 goto out;
1452 t = ipt_get_target(e);
1453 if (t->u.kernel.target->compat)
1454 ret = t->u.kernel.target->compat(t, dstptr, size,
1455 COMPAT_TO_USER);
1456 else
1457 ret = xt_compat_target(t, dstptr, size, COMPAT_TO_USER);
1458 if (ret)
1459 goto out;
1460 ret = -EFAULT;
1461 next_offset = e->next_offset - (origsize - *size);
1462 if (put_user(target_offset, &ce->target_offset))
1463 goto out;
1464 if (put_user(next_offset, &ce->next_offset))
1465 goto out;
1466 return 0;
1467 out:
1468 return ret;
1471 static inline int
1472 compat_check_calc_match(struct ipt_entry_match *m,
1473 const char *name,
1474 const struct ipt_ip *ip,
1475 unsigned int hookmask,
1476 int *size, int *i)
1478 struct ipt_match *match;
1480 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1481 m->u.user.revision),
1482 "ipt_%s", m->u.user.name);
1483 if (IS_ERR(match) || !match) {
1484 duprintf("compat_check_calc_match: `%s' not found\n",
1485 m->u.user.name);
1486 return match ? PTR_ERR(match) : -ENOENT;
1488 m->u.kernel.match = match;
1490 if (m->u.kernel.match->compat)
1491 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1492 else
1493 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1495 (*i)++;
1496 return 0;
1499 static inline int
1500 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1501 struct xt_table_info *newinfo,
1502 unsigned int *size,
1503 unsigned char *base,
1504 unsigned char *limit,
1505 unsigned int *hook_entries,
1506 unsigned int *underflows,
1507 unsigned int *i,
1508 const char *name)
1510 struct ipt_entry_target *t;
1511 struct ipt_target *target;
1512 u_int16_t entry_offset;
1513 int ret, off, h, j;
1515 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1516 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1517 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1518 duprintf("Bad offset %p, limit = %p\n", e, limit);
1519 return -EINVAL;
1522 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1523 sizeof(struct compat_xt_entry_target)) {
1524 duprintf("checking: element %p size %u\n",
1525 e, e->next_offset);
1526 return -EINVAL;
1529 if (!ip_checkentry(&e->ip)) {
1530 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1531 return -EINVAL;
1534 off = 0;
1535 entry_offset = (void *)e - (void *)base;
1536 j = 0;
1537 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1538 e->comefrom, &off, &j);
1539 if (ret != 0)
1540 goto out;
1542 t = ipt_get_target(e);
1543 target = try_then_request_module(xt_find_target(AF_INET,
1544 t->u.user.name,
1545 t->u.user.revision),
1546 "ipt_%s", t->u.user.name);
1547 if (IS_ERR(target) || !target) {
1548 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1549 ret = target ? PTR_ERR(target) : -ENOENT;
1550 goto out;
1552 t->u.kernel.target = target;
1554 if (t->u.kernel.target->compat)
1555 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1556 else
1557 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1558 *size += off;
1559 ret = compat_add_offset(entry_offset, off);
1560 if (ret)
1561 goto out;
1563 /* Check hooks & underflows */
1564 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1565 if ((unsigned char *)e - base == hook_entries[h])
1566 newinfo->hook_entry[h] = hook_entries[h];
1567 if ((unsigned char *)e - base == underflows[h])
1568 newinfo->underflow[h] = underflows[h];
1571 /* Clear counters and comefrom */
1572 e->counters = ((struct ipt_counters) { 0, 0 });
1573 e->comefrom = 0;
1575 (*i)++;
1576 return 0;
1577 out:
1578 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1579 return ret;
1582 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1583 void **dstptr, compat_uint_t *size, const char *name,
1584 const struct ipt_ip *ip, unsigned int hookmask)
1586 struct ipt_entry_match *dm;
1587 struct ipt_match *match;
1588 int ret;
1590 dm = (struct ipt_entry_match *)*dstptr;
1591 match = m->u.kernel.match;
1592 if (match->compat)
1593 match->compat(m, dstptr, size, COMPAT_FROM_USER);
1594 else
1595 xt_compat_match(m, dstptr, size, COMPAT_FROM_USER);
1597 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1598 name, hookmask, ip->proto,
1599 ip->invflags & IPT_INV_PROTO);
1600 if (ret)
1601 return ret;
1603 if (m->u.kernel.match->checkentry
1604 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1605 dm->u.match_size - sizeof(*dm),
1606 hookmask)) {
1607 duprintf("ip_tables: check failed for `%s'.\n",
1608 m->u.kernel.match->name);
1609 return -EINVAL;
1611 return 0;
1614 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1615 unsigned int *size, const char *name,
1616 struct xt_table_info *newinfo, unsigned char *base)
1618 struct ipt_entry_target *t;
1619 struct ipt_target *target;
1620 struct ipt_entry *de;
1621 unsigned int origsize;
1622 int ret, h;
1624 ret = 0;
1625 origsize = *size;
1626 de = (struct ipt_entry *)*dstptr;
1627 memcpy(de, e, sizeof(struct ipt_entry));
1629 *dstptr += sizeof(struct compat_ipt_entry);
1630 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1631 name, &de->ip, de->comefrom);
1632 if (ret)
1633 goto out;
1634 de->target_offset = e->target_offset - (origsize - *size);
1635 t = ipt_get_target(e);
1636 target = t->u.kernel.target;
1637 if (target->compat)
1638 target->compat(t, dstptr, size, COMPAT_FROM_USER);
1639 else
1640 xt_compat_target(t, dstptr, size, COMPAT_FROM_USER);
1642 de->next_offset = e->next_offset - (origsize - *size);
1643 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1644 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1645 newinfo->hook_entry[h] -= origsize - *size;
1646 if ((unsigned char *)de - base < newinfo->underflow[h])
1647 newinfo->underflow[h] -= origsize - *size;
1650 t = ipt_get_target(de);
1651 target = t->u.kernel.target;
1652 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1653 name, e->comefrom, e->ip.proto,
1654 e->ip.invflags & IPT_INV_PROTO);
1655 if (ret)
1656 goto out;
1658 ret = -EINVAL;
1659 if (t->u.kernel.target == &ipt_standard_target) {
1660 if (!standard_check(t, *size))
1661 goto out;
1662 } else if (t->u.kernel.target->checkentry
1663 && !t->u.kernel.target->checkentry(name, de, target,
1664 t->data, t->u.target_size - sizeof(*t),
1665 de->comefrom)) {
1666 duprintf("ip_tables: compat: check failed for `%s'.\n",
1667 t->u.kernel.target->name);
1668 goto out;
1670 ret = 0;
1671 out:
1672 return ret;
1675 static int
1676 translate_compat_table(const char *name,
1677 unsigned int valid_hooks,
1678 struct xt_table_info **pinfo,
1679 void **pentry0,
1680 unsigned int total_size,
1681 unsigned int number,
1682 unsigned int *hook_entries,
1683 unsigned int *underflows)
1685 unsigned int i;
1686 struct xt_table_info *newinfo, *info;
1687 void *pos, *entry0, *entry1;
1688 unsigned int size;
1689 int ret;
1691 info = *pinfo;
1692 entry0 = *pentry0;
1693 size = total_size;
1694 info->number = number;
1696 /* Init all hooks to impossible value. */
1697 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1698 info->hook_entry[i] = 0xFFFFFFFF;
1699 info->underflow[i] = 0xFFFFFFFF;
1702 duprintf("translate_compat_table: size %u\n", info->size);
1703 i = 0;
1704 xt_compat_lock(AF_INET);
1705 /* Walk through entries, checking offsets. */
1706 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1707 check_compat_entry_size_and_hooks,
1708 info, &size, entry0,
1709 entry0 + total_size,
1710 hook_entries, underflows, &i, name);
1711 if (ret != 0)
1712 goto out_unlock;
1714 ret = -EINVAL;
1715 if (i != number) {
1716 duprintf("translate_compat_table: %u not %u entries\n",
1717 i, number);
1718 goto out_unlock;
1721 /* Check hooks all assigned */
1722 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1723 /* Only hooks which are valid */
1724 if (!(valid_hooks & (1 << i)))
1725 continue;
1726 if (info->hook_entry[i] == 0xFFFFFFFF) {
1727 duprintf("Invalid hook entry %u %u\n",
1728 i, hook_entries[i]);
1729 goto out_unlock;
1731 if (info->underflow[i] == 0xFFFFFFFF) {
1732 duprintf("Invalid underflow %u %u\n",
1733 i, underflows[i]);
1734 goto out_unlock;
1738 ret = -ENOMEM;
1739 newinfo = xt_alloc_table_info(size);
1740 if (!newinfo)
1741 goto out_unlock;
1743 newinfo->number = number;
1744 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1745 newinfo->hook_entry[i] = info->hook_entry[i];
1746 newinfo->underflow[i] = info->underflow[i];
1748 entry1 = newinfo->entries[raw_smp_processor_id()];
1749 pos = entry1;
1750 size = total_size;
1751 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1752 compat_copy_entry_from_user, &pos, &size,
1753 name, newinfo, entry1);
1754 compat_flush_offsets();
1755 xt_compat_unlock(AF_INET);
1756 if (ret)
1757 goto free_newinfo;
1759 ret = -ELOOP;
1760 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1761 goto free_newinfo;
1763 /* And one copy for every other CPU */
1764 for_each_possible_cpu(i)
1765 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1766 memcpy(newinfo->entries[i], entry1, newinfo->size);
1768 *pinfo = newinfo;
1769 *pentry0 = entry1;
1770 xt_free_table_info(info);
1771 return 0;
1773 free_newinfo:
1774 xt_free_table_info(newinfo);
1775 out:
1776 return ret;
1777 out_unlock:
1778 xt_compat_unlock(AF_INET);
1779 goto out;
1782 static int
1783 compat_do_replace(void __user *user, unsigned int len)
1785 int ret;
1786 struct compat_ipt_replace tmp;
1787 struct xt_table_info *newinfo;
1788 void *loc_cpu_entry;
1790 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1791 return -EFAULT;
1793 /* Hack: Causes ipchains to give correct error msg --RR */
1794 if (len != sizeof(tmp) + tmp.size)
1795 return -ENOPROTOOPT;
1797 /* overflow check */
1798 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1799 SMP_CACHE_BYTES)
1800 return -ENOMEM;
1801 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1802 return -ENOMEM;
1804 newinfo = xt_alloc_table_info(tmp.size);
1805 if (!newinfo)
1806 return -ENOMEM;
1808 /* choose the copy that is our node/cpu */
1809 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1810 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1811 tmp.size) != 0) {
1812 ret = -EFAULT;
1813 goto free_newinfo;
1816 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1817 &newinfo, &loc_cpu_entry, tmp.size,
1818 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1819 if (ret != 0)
1820 goto free_newinfo;
1822 duprintf("compat_do_replace: Translated table\n");
1824 ret = __do_replace(tmp.name, tmp.valid_hooks,
1825 newinfo, tmp.num_counters,
1826 compat_ptr(tmp.counters));
1827 if (ret)
1828 goto free_newinfo_untrans;
1829 return 0;
1831 free_newinfo_untrans:
1832 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1833 free_newinfo:
1834 xt_free_table_info(newinfo);
1835 return ret;
1838 static int
1839 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1840 unsigned int len)
1842 int ret;
1844 if (!capable(CAP_NET_ADMIN))
1845 return -EPERM;
1847 switch (cmd) {
1848 case IPT_SO_SET_REPLACE:
1849 ret = compat_do_replace(user, len);
1850 break;
1852 case IPT_SO_SET_ADD_COUNTERS:
1853 ret = do_add_counters(user, len, 1);
1854 break;
1856 default:
1857 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1858 ret = -EINVAL;
1861 return ret;
1864 struct compat_ipt_get_entries
1866 char name[IPT_TABLE_MAXNAMELEN];
1867 compat_uint_t size;
1868 struct compat_ipt_entry entrytable[0];
1871 static int compat_copy_entries_to_user(unsigned int total_size,
1872 struct ipt_table *table, void __user *userptr)
1874 unsigned int off, num;
1875 struct compat_ipt_entry e;
1876 struct xt_counters *counters;
1877 struct xt_table_info *private = table->private;
1878 void __user *pos;
1879 unsigned int size;
1880 int ret = 0;
1881 void *loc_cpu_entry;
1883 counters = alloc_counters(table);
1884 if (IS_ERR(counters))
1885 return PTR_ERR(counters);
1887 /* choose the copy that is on our node/cpu, ...
1888 * This choice is lazy (because current thread is
1889 * allowed to migrate to another cpu)
1891 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1892 pos = userptr;
1893 size = total_size;
1894 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1895 compat_copy_entry_to_user, &pos, &size);
1896 if (ret)
1897 goto free_counters;
1899 /* ... then go back and fix counters and names */
1900 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1901 unsigned int i;
1902 struct ipt_entry_match m;
1903 struct ipt_entry_target t;
1905 ret = -EFAULT;
1906 if (copy_from_user(&e, userptr + off,
1907 sizeof(struct compat_ipt_entry)))
1908 goto free_counters;
1909 if (copy_to_user(userptr + off +
1910 offsetof(struct compat_ipt_entry, counters),
1911 &counters[num], sizeof(counters[num])))
1912 goto free_counters;
1914 for (i = sizeof(struct compat_ipt_entry);
1915 i < e.target_offset; i += m.u.match_size) {
1916 if (copy_from_user(&m, userptr + off + i,
1917 sizeof(struct ipt_entry_match)))
1918 goto free_counters;
1919 if (copy_to_user(userptr + off + i +
1920 offsetof(struct ipt_entry_match, u.user.name),
1921 m.u.kernel.match->name,
1922 strlen(m.u.kernel.match->name) + 1))
1923 goto free_counters;
1926 if (copy_from_user(&t, userptr + off + e.target_offset,
1927 sizeof(struct ipt_entry_target)))
1928 goto free_counters;
1929 if (copy_to_user(userptr + off + e.target_offset +
1930 offsetof(struct ipt_entry_target, u.user.name),
1931 t.u.kernel.target->name,
1932 strlen(t.u.kernel.target->name) + 1))
1933 goto free_counters;
1935 ret = 0;
1936 free_counters:
1937 vfree(counters);
1938 return ret;
1941 static int
1942 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1944 int ret;
1945 struct compat_ipt_get_entries get;
1946 struct ipt_table *t;
1949 if (*len < sizeof(get)) {
1950 duprintf("compat_get_entries: %u < %u\n",
1951 *len, (unsigned int)sizeof(get));
1952 return -EINVAL;
1955 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1956 return -EFAULT;
1958 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1959 duprintf("compat_get_entries: %u != %u\n", *len,
1960 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1961 get.size));
1962 return -EINVAL;
1965 xt_compat_lock(AF_INET);
1966 t = xt_find_table_lock(AF_INET, get.name);
1967 if (t && !IS_ERR(t)) {
1968 struct xt_table_info *private = t->private;
1969 struct xt_table_info info;
1970 duprintf("t->private->number = %u\n",
1971 private->number);
1972 ret = compat_table_info(private, &info);
1973 if (!ret && get.size == info.size) {
1974 ret = compat_copy_entries_to_user(private->size,
1975 t, uptr->entrytable);
1976 } else if (!ret) {
1977 duprintf("compat_get_entries: I've got %u not %u!\n",
1978 private->size,
1979 get.size);
1980 ret = -EINVAL;
1982 compat_flush_offsets();
1983 module_put(t->me);
1984 xt_table_unlock(t);
1985 } else
1986 ret = t ? PTR_ERR(t) : -ENOENT;
1988 xt_compat_unlock(AF_INET);
1989 return ret;
1992 static int
1993 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1995 int ret;
1997 switch (cmd) {
1998 case IPT_SO_GET_INFO:
1999 ret = get_info(user, len, 1);
2000 break;
2001 case IPT_SO_GET_ENTRIES:
2002 ret = compat_get_entries(user, len);
2003 break;
2004 default:
2005 duprintf("compat_do_ipt_get_ctl: unknown request %i\n", cmd);
2006 ret = -EINVAL;
2008 return ret;
2010 #endif
2012 static int
2013 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2015 int ret;
2017 if (!capable(CAP_NET_ADMIN))
2018 return -EPERM;
2020 switch (cmd) {
2021 case IPT_SO_SET_REPLACE:
2022 ret = do_replace(user, len);
2023 break;
2025 case IPT_SO_SET_ADD_COUNTERS:
2026 ret = do_add_counters(user, len, 0);
2027 break;
2029 default:
2030 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2031 ret = -EINVAL;
2034 return ret;
2037 static int
2038 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2040 int ret;
2042 if (!capable(CAP_NET_ADMIN))
2043 return -EPERM;
2045 switch (cmd) {
2046 case IPT_SO_GET_INFO:
2047 ret = get_info(user, len, 0);
2048 break;
2050 case IPT_SO_GET_ENTRIES:
2051 ret = get_entries(user, len);
2052 break;
2054 case IPT_SO_GET_REVISION_MATCH:
2055 case IPT_SO_GET_REVISION_TARGET: {
2056 struct ipt_get_revision rev;
2057 int target;
2059 if (*len != sizeof(rev)) {
2060 ret = -EINVAL;
2061 break;
2063 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2064 ret = -EFAULT;
2065 break;
2068 if (cmd == IPT_SO_GET_REVISION_TARGET)
2069 target = 1;
2070 else
2071 target = 0;
2073 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2074 rev.revision,
2075 target, &ret),
2076 "ipt_%s", rev.name);
2077 break;
2080 default:
2081 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2082 ret = -EINVAL;
2085 return ret;
2088 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2090 int ret;
2091 struct xt_table_info *newinfo;
2092 static struct xt_table_info bootstrap
2093 = { 0, 0, 0, { 0 }, { 0 }, { } };
2094 void *loc_cpu_entry;
2096 newinfo = xt_alloc_table_info(repl->size);
2097 if (!newinfo)
2098 return -ENOMEM;
2100 /* choose the copy on our node/cpu
2101 * but dont care of preemption
2103 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2104 memcpy(loc_cpu_entry, repl->entries, repl->size);
2106 ret = translate_table(table->name, table->valid_hooks,
2107 newinfo, loc_cpu_entry, repl->size,
2108 repl->num_entries,
2109 repl->hook_entry,
2110 repl->underflow);
2111 if (ret != 0) {
2112 xt_free_table_info(newinfo);
2113 return ret;
2116 ret = xt_register_table(table, &bootstrap, newinfo);
2117 if (ret != 0) {
2118 xt_free_table_info(newinfo);
2119 return ret;
2122 return 0;
2125 void ipt_unregister_table(struct ipt_table *table)
2127 struct xt_table_info *private;
2128 void *loc_cpu_entry;
2130 private = xt_unregister_table(table);
2132 /* Decrease module usage counts and free resources */
2133 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2134 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2135 xt_free_table_info(private);
2138 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2139 static inline int
2140 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2141 u_int8_t type, u_int8_t code,
2142 int invert)
2144 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2145 ^ invert;
2148 static int
2149 icmp_match(const struct sk_buff *skb,
2150 const struct net_device *in,
2151 const struct net_device *out,
2152 const struct xt_match *match,
2153 const void *matchinfo,
2154 int offset,
2155 unsigned int protoff,
2156 int *hotdrop)
2158 struct icmphdr _icmph, *ic;
2159 const struct ipt_icmp *icmpinfo = matchinfo;
2161 /* Must not be a fragment. */
2162 if (offset)
2163 return 0;
2165 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2166 if (ic == NULL) {
2167 /* We've been asked to examine this packet, and we
2168 * can't. Hence, no choice but to drop.
2170 duprintf("Dropping evil ICMP tinygram.\n");
2171 *hotdrop = 1;
2172 return 0;
2175 return icmp_type_code_match(icmpinfo->type,
2176 icmpinfo->code[0],
2177 icmpinfo->code[1],
2178 ic->type, ic->code,
2179 !!(icmpinfo->invflags&IPT_ICMP_INV));
2182 /* Called when user tries to insert an entry of this type. */
2183 static int
2184 icmp_checkentry(const char *tablename,
2185 const void *info,
2186 const struct xt_match *match,
2187 void *matchinfo,
2188 unsigned int matchsize,
2189 unsigned int hook_mask)
2191 const struct ipt_icmp *icmpinfo = matchinfo;
2193 /* Must specify no unknown invflags */
2194 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2197 /* The built-in targets: standard (NULL) and error. */
2198 static struct ipt_target ipt_standard_target = {
2199 .name = IPT_STANDARD_TARGET,
2200 .targetsize = sizeof(int),
2201 .family = AF_INET,
2202 #ifdef CONFIG_COMPAT
2203 .compat = &compat_ipt_standard_fn,
2204 #endif
2207 static struct ipt_target ipt_error_target = {
2208 .name = IPT_ERROR_TARGET,
2209 .target = ipt_error,
2210 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2211 .family = AF_INET,
2214 static struct nf_sockopt_ops ipt_sockopts = {
2215 .pf = PF_INET,
2216 .set_optmin = IPT_BASE_CTL,
2217 .set_optmax = IPT_SO_SET_MAX+1,
2218 .set = do_ipt_set_ctl,
2219 #ifdef CONFIG_COMPAT
2220 .compat_set = compat_do_ipt_set_ctl,
2221 #endif
2222 .get_optmin = IPT_BASE_CTL,
2223 .get_optmax = IPT_SO_GET_MAX+1,
2224 .get = do_ipt_get_ctl,
2225 #ifdef CONFIG_COMPAT
2226 .compat_get = compat_do_ipt_get_ctl,
2227 #endif
2230 static struct ipt_match icmp_matchstruct = {
2231 .name = "icmp",
2232 .match = icmp_match,
2233 .matchsize = sizeof(struct ipt_icmp),
2234 .proto = IPPROTO_ICMP,
2235 .family = AF_INET,
2236 .checkentry = icmp_checkentry,
2239 static int __init ip_tables_init(void)
2241 int ret;
2243 ret = xt_proto_init(AF_INET);
2244 if (ret < 0)
2245 goto err1;
2247 /* Noone else will be downing sem now, so we won't sleep */
2248 ret = xt_register_target(&ipt_standard_target);
2249 if (ret < 0)
2250 goto err2;
2251 ret = xt_register_target(&ipt_error_target);
2252 if (ret < 0)
2253 goto err3;
2254 ret = xt_register_match(&icmp_matchstruct);
2255 if (ret < 0)
2256 goto err4;
2258 /* Register setsockopt */
2259 ret = nf_register_sockopt(&ipt_sockopts);
2260 if (ret < 0)
2261 goto err5;
2263 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2264 return 0;
2266 err5:
2267 xt_unregister_match(&icmp_matchstruct);
2268 err4:
2269 xt_unregister_target(&ipt_error_target);
2270 err3:
2271 xt_unregister_target(&ipt_standard_target);
2272 err2:
2273 xt_proto_fini(AF_INET);
2274 err1:
2275 return ret;
2278 static void __exit ip_tables_fini(void)
2280 nf_unregister_sockopt(&ipt_sockopts);
2282 xt_unregister_match(&icmp_matchstruct);
2283 xt_unregister_target(&ipt_error_target);
2284 xt_unregister_target(&ipt_standard_target);
2286 xt_proto_fini(AF_INET);
2289 EXPORT_SYMBOL(ipt_register_table);
2290 EXPORT_SYMBOL(ipt_unregister_table);
2291 EXPORT_SYMBOL(ipt_do_table);
2292 module_init(ip_tables_init);
2293 module_exit(ip_tables_fini);