regulator: Fix warning with CONFIG_BUG disabled
[zen-stable.git] / net / ipv4 / netfilter / ip_tables.c
blob652efea013dcc8bb8364397071086318d44ef9bf
1 /*
2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/cache.h>
13 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/icmp.h>
20 #include <net/ip.h>
21 #include <net/compat.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/err.h>
26 #include <linux/cpumask.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_ipv4/ip_tables.h>
30 #include <net/netfilter/nf_log.h>
31 #include "../../netfilter/xt_repldata.h"
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv4 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) pr_info(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) pr_info(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) WARN_ON(!(x))
55 #else
56 #define IP_NF_ASSERT(x)
57 #endif
59 #if 0
60 /* All the better to debug you with... */
61 #define static
62 #define inline
63 #endif
65 void *ipt_alloc_initial_table(const struct xt_table *info)
67 return xt_alloc_initial_table(ipt, IPT);
69 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
72 We keep a set of rules for each CPU, so we can avoid write-locking
73 them in the softirq when updating the counters and therefore
74 only need to read-lock in the softirq; doing a write_lock_bh() in user
75 context stops packets coming through and allows user context to read
76 the counters or update the rules.
78 Hence the start of any table is given by get_table() below. */
80 /* Returns whether matches rule or not. */
81 /* Performance critical - called for every packet */
82 static inline bool
83 ip_packet_match(const struct iphdr *ip,
84 const char *indev,
85 const char *outdev,
86 const struct ipt_ip *ipinfo,
87 int isfrag)
89 unsigned long ret;
91 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
93 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
94 IPT_INV_SRCIP) ||
95 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
96 IPT_INV_DSTIP)) {
97 dprintf("Source or dest mismatch.\n");
99 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
100 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
101 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
102 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
103 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
105 return false;
108 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
110 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
111 dprintf("VIA in mismatch (%s vs %s).%s\n",
112 indev, ipinfo->iniface,
113 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
114 return false;
117 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
119 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
120 dprintf("VIA out mismatch (%s vs %s).%s\n",
121 outdev, ipinfo->outiface,
122 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
123 return false;
126 /* Check specific protocol */
127 if (ipinfo->proto &&
128 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
129 dprintf("Packet protocol %hi does not match %hi.%s\n",
130 ip->protocol, ipinfo->proto,
131 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
132 return false;
135 /* If we have a fragment rule but the packet is not a fragment
136 * then we return zero */
137 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
138 dprintf("Fragment rule but not fragment.%s\n",
139 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
140 return false;
143 return true;
146 static bool
147 ip_checkentry(const struct ipt_ip *ip)
149 if (ip->flags & ~IPT_F_MASK) {
150 duprintf("Unknown flag bits set: %08X\n",
151 ip->flags & ~IPT_F_MASK);
152 return false;
154 if (ip->invflags & ~IPT_INV_MASK) {
155 duprintf("Unknown invflag bits set: %08X\n",
156 ip->invflags & ~IPT_INV_MASK);
157 return false;
159 return true;
162 static unsigned int
163 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
165 if (net_ratelimit())
166 pr_info("error: `%s'\n", (const char *)par->targinfo);
168 return NF_DROP;
171 /* Performance critical */
172 static inline struct ipt_entry *
173 get_entry(const void *base, unsigned int offset)
175 return (struct ipt_entry *)(base + offset);
178 /* All zeroes == unconditional rule. */
179 /* Mildly perf critical (only if packet tracing is on) */
180 static inline bool unconditional(const struct ipt_ip *ip)
182 static const struct ipt_ip uncond;
184 return memcmp(ip, &uncond, sizeof(uncond)) == 0;
185 #undef FWINV
188 /* for const-correctness */
189 static inline const struct xt_entry_target *
190 ipt_get_target_c(const struct ipt_entry *e)
192 return ipt_get_target((struct ipt_entry *)e);
195 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
196 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
197 static const char *const hooknames[] = {
198 [NF_INET_PRE_ROUTING] = "PREROUTING",
199 [NF_INET_LOCAL_IN] = "INPUT",
200 [NF_INET_FORWARD] = "FORWARD",
201 [NF_INET_LOCAL_OUT] = "OUTPUT",
202 [NF_INET_POST_ROUTING] = "POSTROUTING",
205 enum nf_ip_trace_comments {
206 NF_IP_TRACE_COMMENT_RULE,
207 NF_IP_TRACE_COMMENT_RETURN,
208 NF_IP_TRACE_COMMENT_POLICY,
211 static const char *const comments[] = {
212 [NF_IP_TRACE_COMMENT_RULE] = "rule",
213 [NF_IP_TRACE_COMMENT_RETURN] = "return",
214 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
217 static struct nf_loginfo trace_loginfo = {
218 .type = NF_LOG_TYPE_LOG,
219 .u = {
220 .log = {
221 .level = 4,
222 .logflags = NF_LOG_MASK,
227 /* Mildly perf critical (only if packet tracing is on) */
228 static inline int
229 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
230 const char *hookname, const char **chainname,
231 const char **comment, unsigned int *rulenum)
233 const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
235 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
236 /* Head of user chain: ERROR target with chainname */
237 *chainname = t->target.data;
238 (*rulenum) = 0;
239 } else if (s == e) {
240 (*rulenum)++;
242 if (s->target_offset == sizeof(struct ipt_entry) &&
243 strcmp(t->target.u.kernel.target->name,
244 XT_STANDARD_TARGET) == 0 &&
245 t->verdict < 0 &&
246 unconditional(&s->ip)) {
247 /* Tail of chains: STANDARD target (return/policy) */
248 *comment = *chainname == hookname
249 ? comments[NF_IP_TRACE_COMMENT_POLICY]
250 : comments[NF_IP_TRACE_COMMENT_RETURN];
252 return 1;
253 } else
254 (*rulenum)++;
256 return 0;
259 static void trace_packet(const struct sk_buff *skb,
260 unsigned int hook,
261 const struct net_device *in,
262 const struct net_device *out,
263 const char *tablename,
264 const struct xt_table_info *private,
265 const struct ipt_entry *e)
267 const void *table_base;
268 const struct ipt_entry *root;
269 const char *hookname, *chainname, *comment;
270 const struct ipt_entry *iter;
271 unsigned int rulenum = 0;
273 table_base = private->entries[smp_processor_id()];
274 root = get_entry(table_base, private->hook_entry[hook]);
276 hookname = chainname = hooknames[hook];
277 comment = comments[NF_IP_TRACE_COMMENT_RULE];
279 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
280 if (get_chainname_rulenum(iter, e, hookname,
281 &chainname, &comment, &rulenum) != 0)
282 break;
284 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
285 "TRACE: %s:%s:%s:%u ",
286 tablename, chainname, comment, rulenum);
288 #endif
290 static inline __pure
291 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
293 return (void *)entry + entry->next_offset;
296 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
297 unsigned int
298 ipt_do_table(struct sk_buff *skb,
299 unsigned int hook,
300 const struct net_device *in,
301 const struct net_device *out,
302 struct xt_table *table)
304 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
305 const struct iphdr *ip;
306 /* Initializing verdict to NF_DROP keeps gcc happy. */
307 unsigned int verdict = NF_DROP;
308 const char *indev, *outdev;
309 const void *table_base;
310 struct ipt_entry *e, **jumpstack;
311 unsigned int *stackptr, origptr, cpu;
312 const struct xt_table_info *private;
313 struct xt_action_param acpar;
315 /* Initialization */
316 ip = ip_hdr(skb);
317 indev = in ? in->name : nulldevname;
318 outdev = out ? out->name : nulldevname;
319 /* We handle fragments by dealing with the first fragment as
320 * if it was a normal packet. All other fragments are treated
321 * normally, except that they will NEVER match rules that ask
322 * things we don't know, ie. tcp syn flag or ports). If the
323 * rule is also a fragment-specific rule, non-fragments won't
324 * match it. */
325 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
326 acpar.thoff = ip_hdrlen(skb);
327 acpar.hotdrop = false;
328 acpar.in = in;
329 acpar.out = out;
330 acpar.family = NFPROTO_IPV4;
331 acpar.hooknum = hook;
333 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
334 xt_info_rdlock_bh();
335 private = table->private;
336 cpu = smp_processor_id();
337 table_base = private->entries[cpu];
338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
339 stackptr = per_cpu_ptr(private->stackptr, cpu);
340 origptr = *stackptr;
342 e = get_entry(table_base, private->hook_entry[hook]);
344 pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
345 table->name, hook, origptr,
346 get_entry(table_base, private->underflow[hook]));
348 do {
349 const struct xt_entry_target *t;
350 const struct xt_entry_match *ematch;
352 IP_NF_ASSERT(e);
353 if (!ip_packet_match(ip, indev, outdev,
354 &e->ip, acpar.fragoff)) {
355 no_match:
356 e = ipt_next_entry(e);
357 continue;
360 xt_ematch_foreach(ematch, e) {
361 acpar.match = ematch->u.kernel.match;
362 acpar.matchinfo = ematch->data;
363 if (!acpar.match->match(skb, &acpar))
364 goto no_match;
367 ADD_COUNTER(e->counters, skb->len, 1);
369 t = ipt_get_target(e);
370 IP_NF_ASSERT(t->u.kernel.target);
372 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
373 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
374 /* The packet is traced: log it */
375 if (unlikely(skb->nf_trace))
376 trace_packet(skb, hook, in, out,
377 table->name, private, e);
378 #endif
379 /* Standard target? */
380 if (!t->u.kernel.target->target) {
381 int v;
383 v = ((struct xt_standard_target *)t)->verdict;
384 if (v < 0) {
385 /* Pop from stack? */
386 if (v != XT_RETURN) {
387 verdict = (unsigned)(-v) - 1;
388 break;
390 if (*stackptr == 0) {
391 e = get_entry(table_base,
392 private->underflow[hook]);
393 pr_debug("Underflow (this is normal) "
394 "to %p\n", e);
395 } else {
396 e = jumpstack[--*stackptr];
397 pr_debug("Pulled %p out from pos %u\n",
398 e, *stackptr);
399 e = ipt_next_entry(e);
401 continue;
403 if (table_base + v != ipt_next_entry(e) &&
404 !(e->ip.flags & IPT_F_GOTO)) {
405 if (*stackptr >= private->stacksize) {
406 verdict = NF_DROP;
407 break;
409 jumpstack[(*stackptr)++] = e;
410 pr_debug("Pushed %p into pos %u\n",
411 e, *stackptr - 1);
414 e = get_entry(table_base, v);
415 continue;
418 acpar.target = t->u.kernel.target;
419 acpar.targinfo = t->data;
421 verdict = t->u.kernel.target->target(skb, &acpar);
422 /* Target might have changed stuff. */
423 ip = ip_hdr(skb);
424 if (verdict == XT_CONTINUE)
425 e = ipt_next_entry(e);
426 else
427 /* Verdict */
428 break;
429 } while (!acpar.hotdrop);
430 xt_info_rdunlock_bh();
431 pr_debug("Exiting %s; resetting sp from %u to %u\n",
432 __func__, *stackptr, origptr);
433 *stackptr = origptr;
434 #ifdef DEBUG_ALLOW_ALL
435 return NF_ACCEPT;
436 #else
437 if (acpar.hotdrop)
438 return NF_DROP;
439 else return verdict;
440 #endif
443 /* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */
445 static int
446 mark_source_chains(const struct xt_table_info *newinfo,
447 unsigned int valid_hooks, void *entry0)
449 unsigned int hook;
451 /* No recursion; use packet counter to save back ptrs (reset
452 to 0 as we leave), and comefrom to save source hook bitmask */
453 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
454 unsigned int pos = newinfo->hook_entry[hook];
455 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
457 if (!(valid_hooks & (1 << hook)))
458 continue;
460 /* Set initial back pointer. */
461 e->counters.pcnt = pos;
463 for (;;) {
464 const struct xt_standard_target *t
465 = (void *)ipt_get_target_c(e);
466 int visited = e->comefrom & (1 << hook);
468 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
469 pr_err("iptables: loop hook %u pos %u %08X.\n",
470 hook, pos, e->comefrom);
471 return 0;
473 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
475 /* Unconditional return/END. */
476 if ((e->target_offset == sizeof(struct ipt_entry) &&
477 (strcmp(t->target.u.user.name,
478 XT_STANDARD_TARGET) == 0) &&
479 t->verdict < 0 && unconditional(&e->ip)) ||
480 visited) {
481 unsigned int oldpos, size;
483 if ((strcmp(t->target.u.user.name,
484 XT_STANDARD_TARGET) == 0) &&
485 t->verdict < -NF_MAX_VERDICT - 1) {
486 duprintf("mark_source_chains: bad "
487 "negative verdict (%i)\n",
488 t->verdict);
489 return 0;
492 /* Return: backtrack through the last
493 big jump. */
494 do {
495 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
496 #ifdef DEBUG_IP_FIREWALL_USER
497 if (e->comefrom
498 & (1 << NF_INET_NUMHOOKS)) {
499 duprintf("Back unset "
500 "on hook %u "
501 "rule %u\n",
502 hook, pos);
504 #endif
505 oldpos = pos;
506 pos = e->counters.pcnt;
507 e->counters.pcnt = 0;
509 /* We're at the start. */
510 if (pos == oldpos)
511 goto next;
513 e = (struct ipt_entry *)
514 (entry0 + pos);
515 } while (oldpos == pos + e->next_offset);
517 /* Move along one */
518 size = e->next_offset;
519 e = (struct ipt_entry *)
520 (entry0 + pos + size);
521 e->counters.pcnt = pos;
522 pos += size;
523 } else {
524 int newpos = t->verdict;
526 if (strcmp(t->target.u.user.name,
527 XT_STANDARD_TARGET) == 0 &&
528 newpos >= 0) {
529 if (newpos > newinfo->size -
530 sizeof(struct ipt_entry)) {
531 duprintf("mark_source_chains: "
532 "bad verdict (%i)\n",
533 newpos);
534 return 0;
536 /* This a jump; chase it. */
537 duprintf("Jump rule %u -> %u\n",
538 pos, newpos);
539 } else {
540 /* ... this is a fallthru */
541 newpos = pos + e->next_offset;
543 e = (struct ipt_entry *)
544 (entry0 + newpos);
545 e->counters.pcnt = pos;
546 pos = newpos;
549 next:
550 duprintf("Finished chain %u\n", hook);
552 return 1;
555 static void cleanup_match(struct xt_entry_match *m, struct net *net)
557 struct xt_mtdtor_param par;
559 par.net = net;
560 par.match = m->u.kernel.match;
561 par.matchinfo = m->data;
562 par.family = NFPROTO_IPV4;
563 if (par.match->destroy != NULL)
564 par.match->destroy(&par);
565 module_put(par.match->me);
568 static int
569 check_entry(const struct ipt_entry *e, const char *name)
571 const struct xt_entry_target *t;
573 if (!ip_checkentry(&e->ip)) {
574 duprintf("ip check failed %p %s.\n", e, par->match->name);
575 return -EINVAL;
578 if (e->target_offset + sizeof(struct xt_entry_target) >
579 e->next_offset)
580 return -EINVAL;
582 t = ipt_get_target_c(e);
583 if (e->target_offset + t->u.target_size > e->next_offset)
584 return -EINVAL;
586 return 0;
589 static int
590 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
592 const struct ipt_ip *ip = par->entryinfo;
593 int ret;
595 par->match = m->u.kernel.match;
596 par->matchinfo = m->data;
598 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
599 ip->proto, ip->invflags & IPT_INV_PROTO);
600 if (ret < 0) {
601 duprintf("check failed for `%s'.\n", par->match->name);
602 return ret;
604 return 0;
607 static int
608 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
610 struct xt_match *match;
611 int ret;
613 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
614 m->u.user.revision);
615 if (IS_ERR(match)) {
616 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
617 return PTR_ERR(match);
619 m->u.kernel.match = match;
621 ret = check_match(m, par);
622 if (ret)
623 goto err;
625 return 0;
626 err:
627 module_put(m->u.kernel.match->me);
628 return ret;
631 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
633 struct xt_entry_target *t = ipt_get_target(e);
634 struct xt_tgchk_param par = {
635 .net = net,
636 .table = name,
637 .entryinfo = e,
638 .target = t->u.kernel.target,
639 .targinfo = t->data,
640 .hook_mask = e->comefrom,
641 .family = NFPROTO_IPV4,
643 int ret;
645 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
646 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
647 if (ret < 0) {
648 duprintf("check failed for `%s'.\n",
649 t->u.kernel.target->name);
650 return ret;
652 return 0;
655 static int
656 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
657 unsigned int size)
659 struct xt_entry_target *t;
660 struct xt_target *target;
661 int ret;
662 unsigned int j;
663 struct xt_mtchk_param mtpar;
664 struct xt_entry_match *ematch;
666 ret = check_entry(e, name);
667 if (ret)
668 return ret;
670 j = 0;
671 mtpar.net = net;
672 mtpar.table = name;
673 mtpar.entryinfo = &e->ip;
674 mtpar.hook_mask = e->comefrom;
675 mtpar.family = NFPROTO_IPV4;
676 xt_ematch_foreach(ematch, e) {
677 ret = find_check_match(ematch, &mtpar);
678 if (ret != 0)
679 goto cleanup_matches;
680 ++j;
683 t = ipt_get_target(e);
684 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
685 t->u.user.revision);
686 if (IS_ERR(target)) {
687 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
688 ret = PTR_ERR(target);
689 goto cleanup_matches;
691 t->u.kernel.target = target;
693 ret = check_target(e, net, name);
694 if (ret)
695 goto err;
696 return 0;
697 err:
698 module_put(t->u.kernel.target->me);
699 cleanup_matches:
700 xt_ematch_foreach(ematch, e) {
701 if (j-- == 0)
702 break;
703 cleanup_match(ematch, net);
705 return ret;
708 static bool check_underflow(const struct ipt_entry *e)
710 const struct xt_entry_target *t;
711 unsigned int verdict;
713 if (!unconditional(&e->ip))
714 return false;
715 t = ipt_get_target_c(e);
716 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
717 return false;
718 verdict = ((struct xt_standard_target *)t)->verdict;
719 verdict = -verdict - 1;
720 return verdict == NF_DROP || verdict == NF_ACCEPT;
723 static int
724 check_entry_size_and_hooks(struct ipt_entry *e,
725 struct xt_table_info *newinfo,
726 const unsigned char *base,
727 const unsigned char *limit,
728 const unsigned int *hook_entries,
729 const unsigned int *underflows,
730 unsigned int valid_hooks)
732 unsigned int h;
734 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
735 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
736 duprintf("Bad offset %p\n", e);
737 return -EINVAL;
740 if (e->next_offset
741 < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) {
742 duprintf("checking: element %p size %u\n",
743 e, e->next_offset);
744 return -EINVAL;
747 /* Check hooks & underflows */
748 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
749 if (!(valid_hooks & (1 << h)))
750 continue;
751 if ((unsigned char *)e - base == hook_entries[h])
752 newinfo->hook_entry[h] = hook_entries[h];
753 if ((unsigned char *)e - base == underflows[h]) {
754 if (!check_underflow(e)) {
755 pr_err("Underflows must be unconditional and "
756 "use the STANDARD target with "
757 "ACCEPT/DROP\n");
758 return -EINVAL;
760 newinfo->underflow[h] = underflows[h];
764 /* Clear counters and comefrom */
765 e->counters = ((struct xt_counters) { 0, 0 });
766 e->comefrom = 0;
767 return 0;
770 static void
771 cleanup_entry(struct ipt_entry *e, struct net *net)
773 struct xt_tgdtor_param par;
774 struct xt_entry_target *t;
775 struct xt_entry_match *ematch;
777 /* Cleanup all matches */
778 xt_ematch_foreach(ematch, e)
779 cleanup_match(ematch, net);
780 t = ipt_get_target(e);
782 par.net = net;
783 par.target = t->u.kernel.target;
784 par.targinfo = t->data;
785 par.family = NFPROTO_IPV4;
786 if (par.target->destroy != NULL)
787 par.target->destroy(&par);
788 module_put(par.target->me);
791 /* Checks and translates the user-supplied table segment (held in
792 newinfo) */
793 static int
794 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
795 const struct ipt_replace *repl)
797 struct ipt_entry *iter;
798 unsigned int i;
799 int ret = 0;
801 newinfo->size = repl->size;
802 newinfo->number = repl->num_entries;
804 /* Init all hooks to impossible value. */
805 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
806 newinfo->hook_entry[i] = 0xFFFFFFFF;
807 newinfo->underflow[i] = 0xFFFFFFFF;
810 duprintf("translate_table: size %u\n", newinfo->size);
811 i = 0;
812 /* Walk through entries, checking offsets. */
813 xt_entry_foreach(iter, entry0, newinfo->size) {
814 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
815 entry0 + repl->size,
816 repl->hook_entry,
817 repl->underflow,
818 repl->valid_hooks);
819 if (ret != 0)
820 return ret;
821 ++i;
822 if (strcmp(ipt_get_target(iter)->u.user.name,
823 XT_ERROR_TARGET) == 0)
824 ++newinfo->stacksize;
827 if (i != repl->num_entries) {
828 duprintf("translate_table: %u not %u entries\n",
829 i, repl->num_entries);
830 return -EINVAL;
833 /* Check hooks all assigned */
834 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
835 /* Only hooks which are valid */
836 if (!(repl->valid_hooks & (1 << i)))
837 continue;
838 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
839 duprintf("Invalid hook entry %u %u\n",
840 i, repl->hook_entry[i]);
841 return -EINVAL;
843 if (newinfo->underflow[i] == 0xFFFFFFFF) {
844 duprintf("Invalid underflow %u %u\n",
845 i, repl->underflow[i]);
846 return -EINVAL;
850 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
851 return -ELOOP;
853 /* Finally, each sanity check must pass */
854 i = 0;
855 xt_entry_foreach(iter, entry0, newinfo->size) {
856 ret = find_check_entry(iter, net, repl->name, repl->size);
857 if (ret != 0)
858 break;
859 ++i;
862 if (ret != 0) {
863 xt_entry_foreach(iter, entry0, newinfo->size) {
864 if (i-- == 0)
865 break;
866 cleanup_entry(iter, net);
868 return ret;
871 /* And one copy for every other CPU */
872 for_each_possible_cpu(i) {
873 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
874 memcpy(newinfo->entries[i], entry0, newinfo->size);
877 return ret;
880 static void
881 get_counters(const struct xt_table_info *t,
882 struct xt_counters counters[])
884 struct ipt_entry *iter;
885 unsigned int cpu;
886 unsigned int i;
888 for_each_possible_cpu(cpu) {
889 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
891 i = 0;
892 xt_entry_foreach(iter, t->entries[cpu], t->size) {
893 u64 bcnt, pcnt;
894 unsigned int start;
896 do {
897 start = read_seqbegin(lock);
898 bcnt = iter->counters.bcnt;
899 pcnt = iter->counters.pcnt;
900 } while (read_seqretry(lock, start));
902 ADD_COUNTER(counters[i], bcnt, pcnt);
903 ++i; /* macro does multi eval of i */
908 static struct xt_counters *alloc_counters(const struct xt_table *table)
910 unsigned int countersize;
911 struct xt_counters *counters;
912 const struct xt_table_info *private = table->private;
914 /* We need atomic snapshot of counters: rest doesn't change
915 (other than comefrom, which userspace doesn't care
916 about). */
917 countersize = sizeof(struct xt_counters) * private->number;
918 counters = vzalloc(countersize);
920 if (counters == NULL)
921 return ERR_PTR(-ENOMEM);
923 get_counters(private, counters);
925 return counters;
928 static int
929 copy_entries_to_user(unsigned int total_size,
930 const struct xt_table *table,
931 void __user *userptr)
933 unsigned int off, num;
934 const struct ipt_entry *e;
935 struct xt_counters *counters;
936 const struct xt_table_info *private = table->private;
937 int ret = 0;
938 const void *loc_cpu_entry;
940 counters = alloc_counters(table);
941 if (IS_ERR(counters))
942 return PTR_ERR(counters);
944 /* choose the copy that is on our node/cpu, ...
945 * This choice is lazy (because current thread is
946 * allowed to migrate to another cpu)
948 loc_cpu_entry = private->entries[raw_smp_processor_id()];
949 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
950 ret = -EFAULT;
951 goto free_counters;
954 /* FIXME: use iterator macros --RR */
955 /* ... then go back and fix counters and names */
956 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
957 unsigned int i;
958 const struct xt_entry_match *m;
959 const struct xt_entry_target *t;
961 e = (struct ipt_entry *)(loc_cpu_entry + off);
962 if (copy_to_user(userptr + off
963 + offsetof(struct ipt_entry, counters),
964 &counters[num],
965 sizeof(counters[num])) != 0) {
966 ret = -EFAULT;
967 goto free_counters;
970 for (i = sizeof(struct ipt_entry);
971 i < e->target_offset;
972 i += m->u.match_size) {
973 m = (void *)e + i;
975 if (copy_to_user(userptr + off + i
976 + offsetof(struct xt_entry_match,
977 u.user.name),
978 m->u.kernel.match->name,
979 strlen(m->u.kernel.match->name)+1)
980 != 0) {
981 ret = -EFAULT;
982 goto free_counters;
986 t = ipt_get_target_c(e);
987 if (copy_to_user(userptr + off + e->target_offset
988 + offsetof(struct xt_entry_target,
989 u.user.name),
990 t->u.kernel.target->name,
991 strlen(t->u.kernel.target->name)+1) != 0) {
992 ret = -EFAULT;
993 goto free_counters;
997 free_counters:
998 vfree(counters);
999 return ret;
1002 #ifdef CONFIG_COMPAT
1003 static void compat_standard_from_user(void *dst, const void *src)
1005 int v = *(compat_int_t *)src;
1007 if (v > 0)
1008 v += xt_compat_calc_jump(AF_INET, v);
1009 memcpy(dst, &v, sizeof(v));
1012 static int compat_standard_to_user(void __user *dst, const void *src)
1014 compat_int_t cv = *(int *)src;
1016 if (cv > 0)
1017 cv -= xt_compat_calc_jump(AF_INET, cv);
1018 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1021 static int compat_calc_entry(const struct ipt_entry *e,
1022 const struct xt_table_info *info,
1023 const void *base, struct xt_table_info *newinfo)
1025 const struct xt_entry_match *ematch;
1026 const struct xt_entry_target *t;
1027 unsigned int entry_offset;
1028 int off, i, ret;
1030 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1031 entry_offset = (void *)e - base;
1032 xt_ematch_foreach(ematch, e)
1033 off += xt_compat_match_offset(ematch->u.kernel.match);
1034 t = ipt_get_target_c(e);
1035 off += xt_compat_target_offset(t->u.kernel.target);
1036 newinfo->size -= off;
1037 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1038 if (ret)
1039 return ret;
1041 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1042 if (info->hook_entry[i] &&
1043 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1044 newinfo->hook_entry[i] -= off;
1045 if (info->underflow[i] &&
1046 (e < (struct ipt_entry *)(base + info->underflow[i])))
1047 newinfo->underflow[i] -= off;
1049 return 0;
1052 static int compat_table_info(const struct xt_table_info *info,
1053 struct xt_table_info *newinfo)
1055 struct ipt_entry *iter;
1056 void *loc_cpu_entry;
1057 int ret;
1059 if (!newinfo || !info)
1060 return -EINVAL;
1062 /* we dont care about newinfo->entries[] */
1063 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1064 newinfo->initial_entries = 0;
1065 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1066 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1067 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1068 if (ret != 0)
1069 return ret;
1071 return 0;
1073 #endif
1075 static int get_info(struct net *net, void __user *user,
1076 const int *len, int compat)
1078 char name[XT_TABLE_MAXNAMELEN];
1079 struct xt_table *t;
1080 int ret;
1082 if (*len != sizeof(struct ipt_getinfo)) {
1083 duprintf("length %u != %zu\n", *len,
1084 sizeof(struct ipt_getinfo));
1085 return -EINVAL;
1088 if (copy_from_user(name, user, sizeof(name)) != 0)
1089 return -EFAULT;
1091 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1092 #ifdef CONFIG_COMPAT
1093 if (compat)
1094 xt_compat_lock(AF_INET);
1095 #endif
1096 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1097 "iptable_%s", name);
1098 if (t && !IS_ERR(t)) {
1099 struct ipt_getinfo info;
1100 const struct xt_table_info *private = t->private;
1101 #ifdef CONFIG_COMPAT
1102 struct xt_table_info tmp;
1104 if (compat) {
1105 ret = compat_table_info(private, &tmp);
1106 xt_compat_flush_offsets(AF_INET);
1107 private = &tmp;
1109 #endif
1110 memset(&info, 0, sizeof(info));
1111 info.valid_hooks = t->valid_hooks;
1112 memcpy(info.hook_entry, private->hook_entry,
1113 sizeof(info.hook_entry));
1114 memcpy(info.underflow, private->underflow,
1115 sizeof(info.underflow));
1116 info.num_entries = private->number;
1117 info.size = private->size;
1118 strcpy(info.name, name);
1120 if (copy_to_user(user, &info, *len) != 0)
1121 ret = -EFAULT;
1122 else
1123 ret = 0;
1125 xt_table_unlock(t);
1126 module_put(t->me);
1127 } else
1128 ret = t ? PTR_ERR(t) : -ENOENT;
1129 #ifdef CONFIG_COMPAT
1130 if (compat)
1131 xt_compat_unlock(AF_INET);
1132 #endif
1133 return ret;
1136 static int
1137 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1138 const int *len)
1140 int ret;
1141 struct ipt_get_entries get;
1142 struct xt_table *t;
1144 if (*len < sizeof(get)) {
1145 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1146 return -EINVAL;
1148 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1149 return -EFAULT;
1150 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1151 duprintf("get_entries: %u != %zu\n",
1152 *len, sizeof(get) + get.size);
1153 return -EINVAL;
1156 t = xt_find_table_lock(net, AF_INET, get.name);
1157 if (t && !IS_ERR(t)) {
1158 const struct xt_table_info *private = t->private;
1159 duprintf("t->private->number = %u\n", private->number);
1160 if (get.size == private->size)
1161 ret = copy_entries_to_user(private->size,
1162 t, uptr->entrytable);
1163 else {
1164 duprintf("get_entries: I've got %u not %u!\n",
1165 private->size, get.size);
1166 ret = -EAGAIN;
1168 module_put(t->me);
1169 xt_table_unlock(t);
1170 } else
1171 ret = t ? PTR_ERR(t) : -ENOENT;
1173 return ret;
1176 static int
1177 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1178 struct xt_table_info *newinfo, unsigned int num_counters,
1179 void __user *counters_ptr)
1181 int ret;
1182 struct xt_table *t;
1183 struct xt_table_info *oldinfo;
1184 struct xt_counters *counters;
1185 void *loc_cpu_old_entry;
1186 struct ipt_entry *iter;
1188 ret = 0;
1189 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1190 if (!counters) {
1191 ret = -ENOMEM;
1192 goto out;
1195 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1196 "iptable_%s", name);
1197 if (!t || IS_ERR(t)) {
1198 ret = t ? PTR_ERR(t) : -ENOENT;
1199 goto free_newinfo_counters_untrans;
1202 /* You lied! */
1203 if (valid_hooks != t->valid_hooks) {
1204 duprintf("Valid hook crap: %08X vs %08X\n",
1205 valid_hooks, t->valid_hooks);
1206 ret = -EINVAL;
1207 goto put_module;
1210 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1211 if (!oldinfo)
1212 goto put_module;
1214 /* Update module usage count based on number of rules */
1215 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1216 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1217 if ((oldinfo->number > oldinfo->initial_entries) ||
1218 (newinfo->number <= oldinfo->initial_entries))
1219 module_put(t->me);
1220 if ((oldinfo->number > oldinfo->initial_entries) &&
1221 (newinfo->number <= oldinfo->initial_entries))
1222 module_put(t->me);
1224 /* Get the old counters, and synchronize with replace */
1225 get_counters(oldinfo, counters);
1227 /* Decrease module usage counts and free resource */
1228 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1229 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1230 cleanup_entry(iter, net);
1232 xt_free_table_info(oldinfo);
1233 if (copy_to_user(counters_ptr, counters,
1234 sizeof(struct xt_counters) * num_counters) != 0)
1235 ret = -EFAULT;
1236 vfree(counters);
1237 xt_table_unlock(t);
1238 return ret;
1240 put_module:
1241 module_put(t->me);
1242 xt_table_unlock(t);
1243 free_newinfo_counters_untrans:
1244 vfree(counters);
1245 out:
1246 return ret;
1249 static int
1250 do_replace(struct net *net, const void __user *user, unsigned int len)
1252 int ret;
1253 struct ipt_replace tmp;
1254 struct xt_table_info *newinfo;
1255 void *loc_cpu_entry;
1256 struct ipt_entry *iter;
1258 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1259 return -EFAULT;
1261 /* overflow check */
1262 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1263 return -ENOMEM;
1265 newinfo = xt_alloc_table_info(tmp.size);
1266 if (!newinfo)
1267 return -ENOMEM;
1269 /* choose the copy that is on our node/cpu */
1270 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1271 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1272 tmp.size) != 0) {
1273 ret = -EFAULT;
1274 goto free_newinfo;
1277 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1278 if (ret != 0)
1279 goto free_newinfo;
1281 duprintf("Translated table\n");
1283 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1284 tmp.num_counters, tmp.counters);
1285 if (ret)
1286 goto free_newinfo_untrans;
1287 return 0;
1289 free_newinfo_untrans:
1290 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1291 cleanup_entry(iter, net);
1292 free_newinfo:
1293 xt_free_table_info(newinfo);
1294 return ret;
1297 static int
1298 do_add_counters(struct net *net, const void __user *user,
1299 unsigned int len, int compat)
1301 unsigned int i, curcpu;
1302 struct xt_counters_info tmp;
1303 struct xt_counters *paddc;
1304 unsigned int num_counters;
1305 const char *name;
1306 int size;
1307 void *ptmp;
1308 struct xt_table *t;
1309 const struct xt_table_info *private;
1310 int ret = 0;
1311 void *loc_cpu_entry;
1312 struct ipt_entry *iter;
1313 #ifdef CONFIG_COMPAT
1314 struct compat_xt_counters_info compat_tmp;
1316 if (compat) {
1317 ptmp = &compat_tmp;
1318 size = sizeof(struct compat_xt_counters_info);
1319 } else
1320 #endif
1322 ptmp = &tmp;
1323 size = sizeof(struct xt_counters_info);
1326 if (copy_from_user(ptmp, user, size) != 0)
1327 return -EFAULT;
1329 #ifdef CONFIG_COMPAT
1330 if (compat) {
1331 num_counters = compat_tmp.num_counters;
1332 name = compat_tmp.name;
1333 } else
1334 #endif
1336 num_counters = tmp.num_counters;
1337 name = tmp.name;
1340 if (len != size + num_counters * sizeof(struct xt_counters))
1341 return -EINVAL;
1343 paddc = vmalloc(len - size);
1344 if (!paddc)
1345 return -ENOMEM;
1347 if (copy_from_user(paddc, user + size, len - size) != 0) {
1348 ret = -EFAULT;
1349 goto free;
1352 t = xt_find_table_lock(net, AF_INET, name);
1353 if (!t || IS_ERR(t)) {
1354 ret = t ? PTR_ERR(t) : -ENOENT;
1355 goto free;
1358 local_bh_disable();
1359 private = t->private;
1360 if (private->number != num_counters) {
1361 ret = -EINVAL;
1362 goto unlock_up_free;
1365 i = 0;
1366 /* Choose the copy that is on our node */
1367 curcpu = smp_processor_id();
1368 loc_cpu_entry = private->entries[curcpu];
1369 xt_info_wrlock(curcpu);
1370 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1371 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1372 ++i;
1374 xt_info_wrunlock(curcpu);
1375 unlock_up_free:
1376 local_bh_enable();
1377 xt_table_unlock(t);
1378 module_put(t->me);
1379 free:
1380 vfree(paddc);
1382 return ret;
1385 #ifdef CONFIG_COMPAT
1386 struct compat_ipt_replace {
1387 char name[XT_TABLE_MAXNAMELEN];
1388 u32 valid_hooks;
1389 u32 num_entries;
1390 u32 size;
1391 u32 hook_entry[NF_INET_NUMHOOKS];
1392 u32 underflow[NF_INET_NUMHOOKS];
1393 u32 num_counters;
1394 compat_uptr_t counters; /* struct xt_counters * */
1395 struct compat_ipt_entry entries[0];
1398 static int
1399 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1400 unsigned int *size, struct xt_counters *counters,
1401 unsigned int i)
1403 struct xt_entry_target *t;
1404 struct compat_ipt_entry __user *ce;
1405 u_int16_t target_offset, next_offset;
1406 compat_uint_t origsize;
1407 const struct xt_entry_match *ematch;
1408 int ret = 0;
1410 origsize = *size;
1411 ce = (struct compat_ipt_entry __user *)*dstptr;
1412 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1413 copy_to_user(&ce->counters, &counters[i],
1414 sizeof(counters[i])) != 0)
1415 return -EFAULT;
1417 *dstptr += sizeof(struct compat_ipt_entry);
1418 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1420 xt_ematch_foreach(ematch, e) {
1421 ret = xt_compat_match_to_user(ematch, dstptr, size);
1422 if (ret != 0)
1423 return ret;
1425 target_offset = e->target_offset - (origsize - *size);
1426 t = ipt_get_target(e);
1427 ret = xt_compat_target_to_user(t, dstptr, size);
1428 if (ret)
1429 return ret;
1430 next_offset = e->next_offset - (origsize - *size);
1431 if (put_user(target_offset, &ce->target_offset) != 0 ||
1432 put_user(next_offset, &ce->next_offset) != 0)
1433 return -EFAULT;
1434 return 0;
1437 static int
1438 compat_find_calc_match(struct xt_entry_match *m,
1439 const char *name,
1440 const struct ipt_ip *ip,
1441 unsigned int hookmask,
1442 int *size)
1444 struct xt_match *match;
1446 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1447 m->u.user.revision);
1448 if (IS_ERR(match)) {
1449 duprintf("compat_check_calc_match: `%s' not found\n",
1450 m->u.user.name);
1451 return PTR_ERR(match);
1453 m->u.kernel.match = match;
1454 *size += xt_compat_match_offset(match);
1455 return 0;
1458 static void compat_release_entry(struct compat_ipt_entry *e)
1460 struct xt_entry_target *t;
1461 struct xt_entry_match *ematch;
1463 /* Cleanup all matches */
1464 xt_ematch_foreach(ematch, e)
1465 module_put(ematch->u.kernel.match->me);
1466 t = compat_ipt_get_target(e);
1467 module_put(t->u.kernel.target->me);
1470 static int
1471 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1472 struct xt_table_info *newinfo,
1473 unsigned int *size,
1474 const unsigned char *base,
1475 const unsigned char *limit,
1476 const unsigned int *hook_entries,
1477 const unsigned int *underflows,
1478 const char *name)
1480 struct xt_entry_match *ematch;
1481 struct xt_entry_target *t;
1482 struct xt_target *target;
1483 unsigned int entry_offset;
1484 unsigned int j;
1485 int ret, off, h;
1487 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1488 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1489 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1490 duprintf("Bad offset %p, limit = %p\n", e, limit);
1491 return -EINVAL;
1494 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1495 sizeof(struct compat_xt_entry_target)) {
1496 duprintf("checking: element %p size %u\n",
1497 e, e->next_offset);
1498 return -EINVAL;
1501 /* For purposes of check_entry casting the compat entry is fine */
1502 ret = check_entry((struct ipt_entry *)e, name);
1503 if (ret)
1504 return ret;
1506 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1507 entry_offset = (void *)e - (void *)base;
1508 j = 0;
1509 xt_ematch_foreach(ematch, e) {
1510 ret = compat_find_calc_match(ematch, name,
1511 &e->ip, e->comefrom, &off);
1512 if (ret != 0)
1513 goto release_matches;
1514 ++j;
1517 t = compat_ipt_get_target(e);
1518 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1519 t->u.user.revision);
1520 if (IS_ERR(target)) {
1521 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1522 t->u.user.name);
1523 ret = PTR_ERR(target);
1524 goto release_matches;
1526 t->u.kernel.target = target;
1528 off += xt_compat_target_offset(target);
1529 *size += off;
1530 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1531 if (ret)
1532 goto out;
1534 /* Check hooks & underflows */
1535 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1536 if ((unsigned char *)e - base == hook_entries[h])
1537 newinfo->hook_entry[h] = hook_entries[h];
1538 if ((unsigned char *)e - base == underflows[h])
1539 newinfo->underflow[h] = underflows[h];
1542 /* Clear counters and comefrom */
1543 memset(&e->counters, 0, sizeof(e->counters));
1544 e->comefrom = 0;
1545 return 0;
1547 out:
1548 module_put(t->u.kernel.target->me);
1549 release_matches:
1550 xt_ematch_foreach(ematch, e) {
1551 if (j-- == 0)
1552 break;
1553 module_put(ematch->u.kernel.match->me);
1555 return ret;
1558 static int
1559 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1560 unsigned int *size, const char *name,
1561 struct xt_table_info *newinfo, unsigned char *base)
1563 struct xt_entry_target *t;
1564 struct xt_target *target;
1565 struct ipt_entry *de;
1566 unsigned int origsize;
1567 int ret, h;
1568 struct xt_entry_match *ematch;
1570 ret = 0;
1571 origsize = *size;
1572 de = (struct ipt_entry *)*dstptr;
1573 memcpy(de, e, sizeof(struct ipt_entry));
1574 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1576 *dstptr += sizeof(struct ipt_entry);
1577 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1579 xt_ematch_foreach(ematch, e) {
1580 ret = xt_compat_match_from_user(ematch, dstptr, size);
1581 if (ret != 0)
1582 return ret;
1584 de->target_offset = e->target_offset - (origsize - *size);
1585 t = compat_ipt_get_target(e);
1586 target = t->u.kernel.target;
1587 xt_compat_target_from_user(t, dstptr, size);
1589 de->next_offset = e->next_offset - (origsize - *size);
1590 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1591 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1592 newinfo->hook_entry[h] -= origsize - *size;
1593 if ((unsigned char *)de - base < newinfo->underflow[h])
1594 newinfo->underflow[h] -= origsize - *size;
1596 return ret;
1599 static int
1600 compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1602 struct xt_entry_match *ematch;
1603 struct xt_mtchk_param mtpar;
1604 unsigned int j;
1605 int ret = 0;
1607 j = 0;
1608 mtpar.net = net;
1609 mtpar.table = name;
1610 mtpar.entryinfo = &e->ip;
1611 mtpar.hook_mask = e->comefrom;
1612 mtpar.family = NFPROTO_IPV4;
1613 xt_ematch_foreach(ematch, e) {
1614 ret = check_match(ematch, &mtpar);
1615 if (ret != 0)
1616 goto cleanup_matches;
1617 ++j;
1620 ret = check_target(e, net, name);
1621 if (ret)
1622 goto cleanup_matches;
1623 return 0;
1625 cleanup_matches:
1626 xt_ematch_foreach(ematch, e) {
1627 if (j-- == 0)
1628 break;
1629 cleanup_match(ematch, net);
1631 return ret;
1634 static int
1635 translate_compat_table(struct net *net,
1636 const char *name,
1637 unsigned int valid_hooks,
1638 struct xt_table_info **pinfo,
1639 void **pentry0,
1640 unsigned int total_size,
1641 unsigned int number,
1642 unsigned int *hook_entries,
1643 unsigned int *underflows)
1645 unsigned int i, j;
1646 struct xt_table_info *newinfo, *info;
1647 void *pos, *entry0, *entry1;
1648 struct compat_ipt_entry *iter0;
1649 struct ipt_entry *iter1;
1650 unsigned int size;
1651 int ret;
1653 info = *pinfo;
1654 entry0 = *pentry0;
1655 size = total_size;
1656 info->number = number;
1658 /* Init all hooks to impossible value. */
1659 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1660 info->hook_entry[i] = 0xFFFFFFFF;
1661 info->underflow[i] = 0xFFFFFFFF;
1664 duprintf("translate_compat_table: size %u\n", info->size);
1665 j = 0;
1666 xt_compat_lock(AF_INET);
1667 /* Walk through entries, checking offsets. */
1668 xt_entry_foreach(iter0, entry0, total_size) {
1669 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1670 entry0,
1671 entry0 + total_size,
1672 hook_entries,
1673 underflows,
1674 name);
1675 if (ret != 0)
1676 goto out_unlock;
1677 ++j;
1680 ret = -EINVAL;
1681 if (j != number) {
1682 duprintf("translate_compat_table: %u not %u entries\n",
1683 j, number);
1684 goto out_unlock;
1687 /* Check hooks all assigned */
1688 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1689 /* Only hooks which are valid */
1690 if (!(valid_hooks & (1 << i)))
1691 continue;
1692 if (info->hook_entry[i] == 0xFFFFFFFF) {
1693 duprintf("Invalid hook entry %u %u\n",
1694 i, hook_entries[i]);
1695 goto out_unlock;
1697 if (info->underflow[i] == 0xFFFFFFFF) {
1698 duprintf("Invalid underflow %u %u\n",
1699 i, underflows[i]);
1700 goto out_unlock;
1704 ret = -ENOMEM;
1705 newinfo = xt_alloc_table_info(size);
1706 if (!newinfo)
1707 goto out_unlock;
1709 newinfo->number = number;
1710 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1711 newinfo->hook_entry[i] = info->hook_entry[i];
1712 newinfo->underflow[i] = info->underflow[i];
1714 entry1 = newinfo->entries[raw_smp_processor_id()];
1715 pos = entry1;
1716 size = total_size;
1717 xt_entry_foreach(iter0, entry0, total_size) {
1718 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1719 name, newinfo, entry1);
1720 if (ret != 0)
1721 break;
1723 xt_compat_flush_offsets(AF_INET);
1724 xt_compat_unlock(AF_INET);
1725 if (ret)
1726 goto free_newinfo;
1728 ret = -ELOOP;
1729 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1730 goto free_newinfo;
1732 i = 0;
1733 xt_entry_foreach(iter1, entry1, newinfo->size) {
1734 ret = compat_check_entry(iter1, net, name);
1735 if (ret != 0)
1736 break;
1737 ++i;
1738 if (strcmp(ipt_get_target(iter1)->u.user.name,
1739 XT_ERROR_TARGET) == 0)
1740 ++newinfo->stacksize;
1742 if (ret) {
1744 * The first i matches need cleanup_entry (calls ->destroy)
1745 * because they had called ->check already. The other j-i
1746 * entries need only release.
1748 int skip = i;
1749 j -= i;
1750 xt_entry_foreach(iter0, entry0, newinfo->size) {
1751 if (skip-- > 0)
1752 continue;
1753 if (j-- == 0)
1754 break;
1755 compat_release_entry(iter0);
1757 xt_entry_foreach(iter1, entry1, newinfo->size) {
1758 if (i-- == 0)
1759 break;
1760 cleanup_entry(iter1, net);
1762 xt_free_table_info(newinfo);
1763 return ret;
1766 /* And one copy for every other CPU */
1767 for_each_possible_cpu(i)
1768 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1769 memcpy(newinfo->entries[i], entry1, newinfo->size);
1771 *pinfo = newinfo;
1772 *pentry0 = entry1;
1773 xt_free_table_info(info);
1774 return 0;
1776 free_newinfo:
1777 xt_free_table_info(newinfo);
1778 out:
1779 xt_entry_foreach(iter0, entry0, total_size) {
1780 if (j-- == 0)
1781 break;
1782 compat_release_entry(iter0);
1784 return ret;
1785 out_unlock:
1786 xt_compat_flush_offsets(AF_INET);
1787 xt_compat_unlock(AF_INET);
1788 goto out;
1791 static int
1792 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1794 int ret;
1795 struct compat_ipt_replace tmp;
1796 struct xt_table_info *newinfo;
1797 void *loc_cpu_entry;
1798 struct ipt_entry *iter;
1800 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1801 return -EFAULT;
1803 /* overflow check */
1804 if (tmp.size >= INT_MAX / num_possible_cpus())
1805 return -ENOMEM;
1806 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1807 return -ENOMEM;
1809 newinfo = xt_alloc_table_info(tmp.size);
1810 if (!newinfo)
1811 return -ENOMEM;
1813 /* choose the copy that is on our node/cpu */
1814 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1815 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1816 tmp.size) != 0) {
1817 ret = -EFAULT;
1818 goto free_newinfo;
1821 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1822 &newinfo, &loc_cpu_entry, tmp.size,
1823 tmp.num_entries, tmp.hook_entry,
1824 tmp.underflow);
1825 if (ret != 0)
1826 goto free_newinfo;
1828 duprintf("compat_do_replace: Translated table\n");
1830 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1831 tmp.num_counters, compat_ptr(tmp.counters));
1832 if (ret)
1833 goto free_newinfo_untrans;
1834 return 0;
1836 free_newinfo_untrans:
1837 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1838 cleanup_entry(iter, net);
1839 free_newinfo:
1840 xt_free_table_info(newinfo);
1841 return ret;
1844 static int
1845 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1846 unsigned int len)
1848 int ret;
1850 if (!capable(CAP_NET_ADMIN))
1851 return -EPERM;
1853 switch (cmd) {
1854 case IPT_SO_SET_REPLACE:
1855 ret = compat_do_replace(sock_net(sk), user, len);
1856 break;
1858 case IPT_SO_SET_ADD_COUNTERS:
1859 ret = do_add_counters(sock_net(sk), user, len, 1);
1860 break;
1862 default:
1863 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1864 ret = -EINVAL;
1867 return ret;
1870 struct compat_ipt_get_entries {
1871 char name[XT_TABLE_MAXNAMELEN];
1872 compat_uint_t size;
1873 struct compat_ipt_entry entrytable[0];
1876 static int
1877 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1878 void __user *userptr)
1880 struct xt_counters *counters;
1881 const struct xt_table_info *private = table->private;
1882 void __user *pos;
1883 unsigned int size;
1884 int ret = 0;
1885 const void *loc_cpu_entry;
1886 unsigned int i = 0;
1887 struct ipt_entry *iter;
1889 counters = alloc_counters(table);
1890 if (IS_ERR(counters))
1891 return PTR_ERR(counters);
1893 /* choose the copy that is on our node/cpu, ...
1894 * This choice is lazy (because current thread is
1895 * allowed to migrate to another cpu)
1897 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1898 pos = userptr;
1899 size = total_size;
1900 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1901 ret = compat_copy_entry_to_user(iter, &pos,
1902 &size, counters, i++);
1903 if (ret != 0)
1904 break;
1907 vfree(counters);
1908 return ret;
1911 static int
1912 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1913 int *len)
1915 int ret;
1916 struct compat_ipt_get_entries get;
1917 struct xt_table *t;
1919 if (*len < sizeof(get)) {
1920 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1921 return -EINVAL;
1924 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1925 return -EFAULT;
1927 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1928 duprintf("compat_get_entries: %u != %zu\n",
1929 *len, sizeof(get) + get.size);
1930 return -EINVAL;
1933 xt_compat_lock(AF_INET);
1934 t = xt_find_table_lock(net, AF_INET, get.name);
1935 if (t && !IS_ERR(t)) {
1936 const struct xt_table_info *private = t->private;
1937 struct xt_table_info info;
1938 duprintf("t->private->number = %u\n", private->number);
1939 ret = compat_table_info(private, &info);
1940 if (!ret && get.size == info.size) {
1941 ret = compat_copy_entries_to_user(private->size,
1942 t, uptr->entrytable);
1943 } else if (!ret) {
1944 duprintf("compat_get_entries: I've got %u not %u!\n",
1945 private->size, get.size);
1946 ret = -EAGAIN;
1948 xt_compat_flush_offsets(AF_INET);
1949 module_put(t->me);
1950 xt_table_unlock(t);
1951 } else
1952 ret = t ? PTR_ERR(t) : -ENOENT;
1954 xt_compat_unlock(AF_INET);
1955 return ret;
1958 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1960 static int
1961 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1963 int ret;
1965 if (!capable(CAP_NET_ADMIN))
1966 return -EPERM;
1968 switch (cmd) {
1969 case IPT_SO_GET_INFO:
1970 ret = get_info(sock_net(sk), user, len, 1);
1971 break;
1972 case IPT_SO_GET_ENTRIES:
1973 ret = compat_get_entries(sock_net(sk), user, len);
1974 break;
1975 default:
1976 ret = do_ipt_get_ctl(sk, cmd, user, len);
1978 return ret;
1980 #endif
1982 static int
1983 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1985 int ret;
1987 if (!capable(CAP_NET_ADMIN))
1988 return -EPERM;
1990 switch (cmd) {
1991 case IPT_SO_SET_REPLACE:
1992 ret = do_replace(sock_net(sk), user, len);
1993 break;
1995 case IPT_SO_SET_ADD_COUNTERS:
1996 ret = do_add_counters(sock_net(sk), user, len, 0);
1997 break;
1999 default:
2000 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2001 ret = -EINVAL;
2004 return ret;
2007 static int
2008 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2010 int ret;
2012 if (!capable(CAP_NET_ADMIN))
2013 return -EPERM;
2015 switch (cmd) {
2016 case IPT_SO_GET_INFO:
2017 ret = get_info(sock_net(sk), user, len, 0);
2018 break;
2020 case IPT_SO_GET_ENTRIES:
2021 ret = get_entries(sock_net(sk), user, len);
2022 break;
2024 case IPT_SO_GET_REVISION_MATCH:
2025 case IPT_SO_GET_REVISION_TARGET: {
2026 struct xt_get_revision rev;
2027 int target;
2029 if (*len != sizeof(rev)) {
2030 ret = -EINVAL;
2031 break;
2033 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2034 ret = -EFAULT;
2035 break;
2038 if (cmd == IPT_SO_GET_REVISION_TARGET)
2039 target = 1;
2040 else
2041 target = 0;
2043 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2044 rev.revision,
2045 target, &ret),
2046 "ipt_%s", rev.name);
2047 break;
2050 default:
2051 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2052 ret = -EINVAL;
2055 return ret;
2058 struct xt_table *ipt_register_table(struct net *net,
2059 const struct xt_table *table,
2060 const struct ipt_replace *repl)
2062 int ret;
2063 struct xt_table_info *newinfo;
2064 struct xt_table_info bootstrap = {0};
2065 void *loc_cpu_entry;
2066 struct xt_table *new_table;
2068 newinfo = xt_alloc_table_info(repl->size);
2069 if (!newinfo) {
2070 ret = -ENOMEM;
2071 goto out;
2074 /* choose the copy on our node/cpu, but dont care about preemption */
2075 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2076 memcpy(loc_cpu_entry, repl->entries, repl->size);
2078 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2079 if (ret != 0)
2080 goto out_free;
2082 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2083 if (IS_ERR(new_table)) {
2084 ret = PTR_ERR(new_table);
2085 goto out_free;
2088 return new_table;
2090 out_free:
2091 xt_free_table_info(newinfo);
2092 out:
2093 return ERR_PTR(ret);
2096 void ipt_unregister_table(struct net *net, struct xt_table *table)
2098 struct xt_table_info *private;
2099 void *loc_cpu_entry;
2100 struct module *table_owner = table->me;
2101 struct ipt_entry *iter;
2103 private = xt_unregister_table(table);
2105 /* Decrease module usage counts and free resources */
2106 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2107 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2108 cleanup_entry(iter, net);
2109 if (private->number > private->initial_entries)
2110 module_put(table_owner);
2111 xt_free_table_info(private);
2114 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2115 static inline bool
2116 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2117 u_int8_t type, u_int8_t code,
2118 bool invert)
2120 return ((test_type == 0xFF) ||
2121 (type == test_type && code >= min_code && code <= max_code))
2122 ^ invert;
2125 static bool
2126 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
2128 const struct icmphdr *ic;
2129 struct icmphdr _icmph;
2130 const struct ipt_icmp *icmpinfo = par->matchinfo;
2132 /* Must not be a fragment. */
2133 if (par->fragoff != 0)
2134 return false;
2136 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2137 if (ic == NULL) {
2138 /* We've been asked to examine this packet, and we
2139 * can't. Hence, no choice but to drop.
2141 duprintf("Dropping evil ICMP tinygram.\n");
2142 par->hotdrop = true;
2143 return false;
2146 return icmp_type_code_match(icmpinfo->type,
2147 icmpinfo->code[0],
2148 icmpinfo->code[1],
2149 ic->type, ic->code,
2150 !!(icmpinfo->invflags&IPT_ICMP_INV));
2153 static int icmp_checkentry(const struct xt_mtchk_param *par)
2155 const struct ipt_icmp *icmpinfo = par->matchinfo;
2157 /* Must specify no unknown invflags */
2158 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
2161 static struct xt_target ipt_builtin_tg[] __read_mostly = {
2163 .name = XT_STANDARD_TARGET,
2164 .targetsize = sizeof(int),
2165 .family = NFPROTO_IPV4,
2166 #ifdef CONFIG_COMPAT
2167 .compatsize = sizeof(compat_int_t),
2168 .compat_from_user = compat_standard_from_user,
2169 .compat_to_user = compat_standard_to_user,
2170 #endif
2173 .name = XT_ERROR_TARGET,
2174 .target = ipt_error,
2175 .targetsize = XT_FUNCTION_MAXNAMELEN,
2176 .family = NFPROTO_IPV4,
2180 static struct nf_sockopt_ops ipt_sockopts = {
2181 .pf = PF_INET,
2182 .set_optmin = IPT_BASE_CTL,
2183 .set_optmax = IPT_SO_SET_MAX+1,
2184 .set = do_ipt_set_ctl,
2185 #ifdef CONFIG_COMPAT
2186 .compat_set = compat_do_ipt_set_ctl,
2187 #endif
2188 .get_optmin = IPT_BASE_CTL,
2189 .get_optmax = IPT_SO_GET_MAX+1,
2190 .get = do_ipt_get_ctl,
2191 #ifdef CONFIG_COMPAT
2192 .compat_get = compat_do_ipt_get_ctl,
2193 #endif
2194 .owner = THIS_MODULE,
2197 static struct xt_match ipt_builtin_mt[] __read_mostly = {
2199 .name = "icmp",
2200 .match = icmp_match,
2201 .matchsize = sizeof(struct ipt_icmp),
2202 .checkentry = icmp_checkentry,
2203 .proto = IPPROTO_ICMP,
2204 .family = NFPROTO_IPV4,
2208 static int __net_init ip_tables_net_init(struct net *net)
2210 return xt_proto_init(net, NFPROTO_IPV4);
2213 static void __net_exit ip_tables_net_exit(struct net *net)
2215 xt_proto_fini(net, NFPROTO_IPV4);
2218 static struct pernet_operations ip_tables_net_ops = {
2219 .init = ip_tables_net_init,
2220 .exit = ip_tables_net_exit,
2223 static int __init ip_tables_init(void)
2225 int ret;
2227 ret = register_pernet_subsys(&ip_tables_net_ops);
2228 if (ret < 0)
2229 goto err1;
2231 /* Noone else will be downing sem now, so we won't sleep */
2232 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2233 if (ret < 0)
2234 goto err2;
2235 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2236 if (ret < 0)
2237 goto err4;
2239 /* Register setsockopt */
2240 ret = nf_register_sockopt(&ipt_sockopts);
2241 if (ret < 0)
2242 goto err5;
2244 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2245 return 0;
2247 err5:
2248 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2249 err4:
2250 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2251 err2:
2252 unregister_pernet_subsys(&ip_tables_net_ops);
2253 err1:
2254 return ret;
2257 static void __exit ip_tables_fini(void)
2259 nf_unregister_sockopt(&ipt_sockopts);
2261 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2262 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2263 unregister_pernet_subsys(&ip_tables_net_ops);
2266 EXPORT_SYMBOL(ipt_register_table);
2267 EXPORT_SYMBOL(ipt_unregister_table);
2268 EXPORT_SYMBOL(ipt_do_table);
2269 module_init(ip_tables_init);
2270 module_exit(ip_tables_fini);