mfd: wm8350-i2c: Make sure the i2c regmap functions are compiled
[linux/fpc-iii.git] / net / netfilter / x_tables.c
blob94ce5ff8e33815b35d312f6c9b9630bec2baa917
1 /*
2 * x_tables core - Backend for {ip,ip6,arp}_tables
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 * Based on existing ip_tables code which is
8 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/socket.h>
20 #include <linux/net.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/string.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mutex.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/audit.h>
29 #include <net/net_namespace.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <linux/netfilter_arp.h>
33 #include <linux/netfilter_ipv4/ip_tables.h>
34 #include <linux/netfilter_ipv6/ip6_tables.h>
35 #include <linux/netfilter_arp/arp_tables.h>
37 MODULE_LICENSE("GPL");
38 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
39 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
41 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
43 struct compat_delta {
44 unsigned int offset; /* offset in kernel */
45 int delta; /* delta in 32bit user land */
48 struct xt_af {
49 struct mutex mutex;
50 struct list_head match;
51 struct list_head target;
52 #ifdef CONFIG_COMPAT
53 struct mutex compat_mutex;
54 struct compat_delta *compat_tab;
55 unsigned int number; /* number of slots in compat_tab[] */
56 unsigned int cur; /* number of used slots in compat_tab[] */
57 #endif
60 static struct xt_af *xt;
62 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
63 [NFPROTO_UNSPEC] = "x",
64 [NFPROTO_IPV4] = "ip",
65 [NFPROTO_ARP] = "arp",
66 [NFPROTO_BRIDGE] = "eb",
67 [NFPROTO_IPV6] = "ip6",
70 /* Allow this many total (re)entries. */
71 static const unsigned int xt_jumpstack_multiplier = 2;
73 /* Registration hooks for targets. */
74 int
75 xt_register_target(struct xt_target *target)
77 u_int8_t af = target->family;
78 int ret;
80 ret = mutex_lock_interruptible(&xt[af].mutex);
81 if (ret != 0)
82 return ret;
83 list_add(&target->list, &xt[af].target);
84 mutex_unlock(&xt[af].mutex);
85 return ret;
87 EXPORT_SYMBOL(xt_register_target);
89 void
90 xt_unregister_target(struct xt_target *target)
92 u_int8_t af = target->family;
94 mutex_lock(&xt[af].mutex);
95 list_del(&target->list);
96 mutex_unlock(&xt[af].mutex);
98 EXPORT_SYMBOL(xt_unregister_target);
101 xt_register_targets(struct xt_target *target, unsigned int n)
103 unsigned int i;
104 int err = 0;
106 for (i = 0; i < n; i++) {
107 err = xt_register_target(&target[i]);
108 if (err)
109 goto err;
111 return err;
113 err:
114 if (i > 0)
115 xt_unregister_targets(target, i);
116 return err;
118 EXPORT_SYMBOL(xt_register_targets);
120 void
121 xt_unregister_targets(struct xt_target *target, unsigned int n)
123 while (n-- > 0)
124 xt_unregister_target(&target[n]);
126 EXPORT_SYMBOL(xt_unregister_targets);
129 xt_register_match(struct xt_match *match)
131 u_int8_t af = match->family;
132 int ret;
134 ret = mutex_lock_interruptible(&xt[af].mutex);
135 if (ret != 0)
136 return ret;
138 list_add(&match->list, &xt[af].match);
139 mutex_unlock(&xt[af].mutex);
141 return ret;
143 EXPORT_SYMBOL(xt_register_match);
145 void
146 xt_unregister_match(struct xt_match *match)
148 u_int8_t af = match->family;
150 mutex_lock(&xt[af].mutex);
151 list_del(&match->list);
152 mutex_unlock(&xt[af].mutex);
154 EXPORT_SYMBOL(xt_unregister_match);
157 xt_register_matches(struct xt_match *match, unsigned int n)
159 unsigned int i;
160 int err = 0;
162 for (i = 0; i < n; i++) {
163 err = xt_register_match(&match[i]);
164 if (err)
165 goto err;
167 return err;
169 err:
170 if (i > 0)
171 xt_unregister_matches(match, i);
172 return err;
174 EXPORT_SYMBOL(xt_register_matches);
176 void
177 xt_unregister_matches(struct xt_match *match, unsigned int n)
179 while (n-- > 0)
180 xt_unregister_match(&match[n]);
182 EXPORT_SYMBOL(xt_unregister_matches);
186 * These are weird, but module loading must not be done with mutex
187 * held (since they will register), and we have to have a single
188 * function to use.
191 /* Find match, grabs ref. Returns ERR_PTR() on error. */
192 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
194 struct xt_match *m;
195 int err = -ENOENT;
197 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
198 return ERR_PTR(-EINTR);
200 list_for_each_entry(m, &xt[af].match, list) {
201 if (strcmp(m->name, name) == 0) {
202 if (m->revision == revision) {
203 if (try_module_get(m->me)) {
204 mutex_unlock(&xt[af].mutex);
205 return m;
207 } else
208 err = -EPROTOTYPE; /* Found something. */
211 mutex_unlock(&xt[af].mutex);
213 if (af != NFPROTO_UNSPEC)
214 /* Try searching again in the family-independent list */
215 return xt_find_match(NFPROTO_UNSPEC, name, revision);
217 return ERR_PTR(err);
219 EXPORT_SYMBOL(xt_find_match);
221 struct xt_match *
222 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
224 struct xt_match *match;
226 match = xt_find_match(nfproto, name, revision);
227 if (IS_ERR(match)) {
228 request_module("%st_%s", xt_prefix[nfproto], name);
229 match = xt_find_match(nfproto, name, revision);
232 return match;
234 EXPORT_SYMBOL_GPL(xt_request_find_match);
236 /* Find target, grabs ref. Returns ERR_PTR() on error. */
237 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
239 struct xt_target *t;
240 int err = -ENOENT;
242 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
243 return ERR_PTR(-EINTR);
245 list_for_each_entry(t, &xt[af].target, list) {
246 if (strcmp(t->name, name) == 0) {
247 if (t->revision == revision) {
248 if (try_module_get(t->me)) {
249 mutex_unlock(&xt[af].mutex);
250 return t;
252 } else
253 err = -EPROTOTYPE; /* Found something. */
256 mutex_unlock(&xt[af].mutex);
258 if (af != NFPROTO_UNSPEC)
259 /* Try searching again in the family-independent list */
260 return xt_find_target(NFPROTO_UNSPEC, name, revision);
262 return ERR_PTR(err);
264 EXPORT_SYMBOL(xt_find_target);
266 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
268 struct xt_target *target;
270 target = xt_find_target(af, name, revision);
271 if (IS_ERR(target)) {
272 request_module("%st_%s", xt_prefix[af], name);
273 target = xt_find_target(af, name, revision);
276 return target;
278 EXPORT_SYMBOL_GPL(xt_request_find_target);
280 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
282 const struct xt_match *m;
283 int have_rev = 0;
285 list_for_each_entry(m, &xt[af].match, list) {
286 if (strcmp(m->name, name) == 0) {
287 if (m->revision > *bestp)
288 *bestp = m->revision;
289 if (m->revision == revision)
290 have_rev = 1;
294 if (af != NFPROTO_UNSPEC && !have_rev)
295 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
297 return have_rev;
300 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
302 const struct xt_target *t;
303 int have_rev = 0;
305 list_for_each_entry(t, &xt[af].target, list) {
306 if (strcmp(t->name, name) == 0) {
307 if (t->revision > *bestp)
308 *bestp = t->revision;
309 if (t->revision == revision)
310 have_rev = 1;
314 if (af != NFPROTO_UNSPEC && !have_rev)
315 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
317 return have_rev;
320 /* Returns true or false (if no such extension at all) */
321 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
322 int *err)
324 int have_rev, best = -1;
326 if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
327 *err = -EINTR;
328 return 1;
330 if (target == 1)
331 have_rev = target_revfn(af, name, revision, &best);
332 else
333 have_rev = match_revfn(af, name, revision, &best);
334 mutex_unlock(&xt[af].mutex);
336 /* Nothing at all? Return 0 to try loading module. */
337 if (best == -1) {
338 *err = -ENOENT;
339 return 0;
342 *err = best;
343 if (!have_rev)
344 *err = -EPROTONOSUPPORT;
345 return 1;
347 EXPORT_SYMBOL_GPL(xt_find_revision);
349 static char *
350 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
352 static const char *const inetbr_names[] = {
353 "PREROUTING", "INPUT", "FORWARD",
354 "OUTPUT", "POSTROUTING", "BROUTING",
356 static const char *const arp_names[] = {
357 "INPUT", "FORWARD", "OUTPUT",
359 const char *const *names;
360 unsigned int i, max;
361 char *p = buf;
362 bool np = false;
363 int res;
365 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
366 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
367 ARRAY_SIZE(inetbr_names);
368 *p = '\0';
369 for (i = 0; i < max; ++i) {
370 if (!(mask & (1 << i)))
371 continue;
372 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
373 if (res > 0) {
374 size -= res;
375 p += res;
377 np = true;
380 return buf;
383 int xt_check_match(struct xt_mtchk_param *par,
384 unsigned int size, u_int8_t proto, bool inv_proto)
386 int ret;
388 if (XT_ALIGN(par->match->matchsize) != size &&
389 par->match->matchsize != -1) {
391 * ebt_among is exempt from centralized matchsize checking
392 * because it uses a dynamic-size data set.
394 pr_err("%s_tables: %s.%u match: invalid size "
395 "%u (kernel) != (user) %u\n",
396 xt_prefix[par->family], par->match->name,
397 par->match->revision,
398 XT_ALIGN(par->match->matchsize), size);
399 return -EINVAL;
401 if (par->match->table != NULL &&
402 strcmp(par->match->table, par->table) != 0) {
403 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
404 xt_prefix[par->family], par->match->name,
405 par->match->table, par->table);
406 return -EINVAL;
408 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
409 char used[64], allow[64];
411 pr_err("%s_tables: %s match: used from hooks %s, but only "
412 "valid from %s\n",
413 xt_prefix[par->family], par->match->name,
414 textify_hooks(used, sizeof(used), par->hook_mask,
415 par->family),
416 textify_hooks(allow, sizeof(allow), par->match->hooks,
417 par->family));
418 return -EINVAL;
420 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
421 pr_err("%s_tables: %s match: only valid for protocol %u\n",
422 xt_prefix[par->family], par->match->name,
423 par->match->proto);
424 return -EINVAL;
426 if (par->match->checkentry != NULL) {
427 ret = par->match->checkentry(par);
428 if (ret < 0)
429 return ret;
430 else if (ret > 0)
431 /* Flag up potential errors. */
432 return -EIO;
434 return 0;
436 EXPORT_SYMBOL_GPL(xt_check_match);
438 /** xt_check_entry_match - check that matches end before start of target
440 * @match: beginning of xt_entry_match
441 * @target: beginning of this rules target (alleged end of matches)
442 * @alignment: alignment requirement of match structures
444 * Validates that all matches add up to the beginning of the target,
445 * and that each match covers at least the base structure size.
447 * Return: 0 on success, negative errno on failure.
449 static int xt_check_entry_match(const char *match, const char *target,
450 const size_t alignment)
452 const struct xt_entry_match *pos;
453 int length = target - match;
455 if (length == 0) /* no matches */
456 return 0;
458 pos = (struct xt_entry_match *)match;
459 do {
460 if ((unsigned long)pos % alignment)
461 return -EINVAL;
463 if (length < (int)sizeof(struct xt_entry_match))
464 return -EINVAL;
466 if (pos->u.match_size < sizeof(struct xt_entry_match))
467 return -EINVAL;
469 if (pos->u.match_size > length)
470 return -EINVAL;
472 length -= pos->u.match_size;
473 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
474 } while (length > 0);
476 return 0;
479 #ifdef CONFIG_COMPAT
480 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
482 struct xt_af *xp = &xt[af];
484 if (!xp->compat_tab) {
485 if (!xp->number)
486 return -EINVAL;
487 xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
488 if (!xp->compat_tab)
489 return -ENOMEM;
490 xp->cur = 0;
493 if (xp->cur >= xp->number)
494 return -EINVAL;
496 if (xp->cur)
497 delta += xp->compat_tab[xp->cur - 1].delta;
498 xp->compat_tab[xp->cur].offset = offset;
499 xp->compat_tab[xp->cur].delta = delta;
500 xp->cur++;
501 return 0;
503 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
505 void xt_compat_flush_offsets(u_int8_t af)
507 if (xt[af].compat_tab) {
508 vfree(xt[af].compat_tab);
509 xt[af].compat_tab = NULL;
510 xt[af].number = 0;
511 xt[af].cur = 0;
514 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
516 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
518 struct compat_delta *tmp = xt[af].compat_tab;
519 int mid, left = 0, right = xt[af].cur - 1;
521 while (left <= right) {
522 mid = (left + right) >> 1;
523 if (offset > tmp[mid].offset)
524 left = mid + 1;
525 else if (offset < tmp[mid].offset)
526 right = mid - 1;
527 else
528 return mid ? tmp[mid - 1].delta : 0;
530 return left ? tmp[left - 1].delta : 0;
532 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
534 void xt_compat_init_offsets(u_int8_t af, unsigned int number)
536 xt[af].number = number;
537 xt[af].cur = 0;
539 EXPORT_SYMBOL(xt_compat_init_offsets);
541 int xt_compat_match_offset(const struct xt_match *match)
543 u_int16_t csize = match->compatsize ? : match->matchsize;
544 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
546 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
548 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
549 unsigned int *size)
551 const struct xt_match *match = m->u.kernel.match;
552 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
553 int pad, off = xt_compat_match_offset(match);
554 u_int16_t msize = cm->u.user.match_size;
555 char name[sizeof(m->u.user.name)];
557 m = *dstptr;
558 memcpy(m, cm, sizeof(*cm));
559 if (match->compat_from_user)
560 match->compat_from_user(m->data, cm->data);
561 else
562 memcpy(m->data, cm->data, msize - sizeof(*cm));
563 pad = XT_ALIGN(match->matchsize) - match->matchsize;
564 if (pad > 0)
565 memset(m->data + match->matchsize, 0, pad);
567 msize += off;
568 m->u.user.match_size = msize;
569 strlcpy(name, match->name, sizeof(name));
570 module_put(match->me);
571 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
573 *size += off;
574 *dstptr += msize;
576 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
578 int xt_compat_match_to_user(const struct xt_entry_match *m,
579 void __user **dstptr, unsigned int *size)
581 const struct xt_match *match = m->u.kernel.match;
582 struct compat_xt_entry_match __user *cm = *dstptr;
583 int off = xt_compat_match_offset(match);
584 u_int16_t msize = m->u.user.match_size - off;
586 if (copy_to_user(cm, m, sizeof(*cm)) ||
587 put_user(msize, &cm->u.user.match_size) ||
588 copy_to_user(cm->u.user.name, m->u.kernel.match->name,
589 strlen(m->u.kernel.match->name) + 1))
590 return -EFAULT;
592 if (match->compat_to_user) {
593 if (match->compat_to_user((void __user *)cm->data, m->data))
594 return -EFAULT;
595 } else {
596 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
597 return -EFAULT;
600 *size -= off;
601 *dstptr += msize;
602 return 0;
604 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
606 /* non-compat version may have padding after verdict */
607 struct compat_xt_standard_target {
608 struct compat_xt_entry_target t;
609 compat_uint_t verdict;
612 int xt_compat_check_entry_offsets(const void *base, const char *elems,
613 unsigned int target_offset,
614 unsigned int next_offset)
616 long size_of_base_struct = elems - (const char *)base;
617 const struct compat_xt_entry_target *t;
618 const char *e = base;
620 if (target_offset < size_of_base_struct)
621 return -EINVAL;
623 if (target_offset + sizeof(*t) > next_offset)
624 return -EINVAL;
626 t = (void *)(e + target_offset);
627 if (t->u.target_size < sizeof(*t))
628 return -EINVAL;
630 if (target_offset + t->u.target_size > next_offset)
631 return -EINVAL;
633 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
634 COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
635 return -EINVAL;
637 /* compat_xt_entry match has less strict aligment requirements,
638 * otherwise they are identical. In case of padding differences
639 * we need to add compat version of xt_check_entry_match.
641 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
643 return xt_check_entry_match(elems, base + target_offset,
644 __alignof__(struct compat_xt_entry_match));
646 EXPORT_SYMBOL(xt_compat_check_entry_offsets);
647 #endif /* CONFIG_COMPAT */
650 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
652 * @base: pointer to arp/ip/ip6t_entry
653 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
654 * @target_offset: the arp/ip/ip6_t->target_offset
655 * @next_offset: the arp/ip/ip6_t->next_offset
657 * validates that target_offset and next_offset are sane and that all
658 * match sizes (if any) align with the target offset.
660 * This function does not validate the targets or matches themselves, it
661 * only tests that all the offsets and sizes are correct, that all
662 * match structures are aligned, and that the last structure ends where
663 * the target structure begins.
665 * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
667 * The arp/ip/ip6t_entry structure @base must have passed following tests:
668 * - it must point to a valid memory location
669 * - base to base + next_offset must be accessible, i.e. not exceed allocated
670 * length.
672 * A well-formed entry looks like this:
674 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
675 * e->elems[]-----' | |
676 * matchsize | |
677 * matchsize | |
678 * | |
679 * target_offset---------------------------------' |
680 * next_offset---------------------------------------------------'
682 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
683 * This is where matches (if any) and the target reside.
684 * target_offset: beginning of target.
685 * next_offset: start of the next rule; also: size of this rule.
686 * Since targets have a minimum size, target_offset + minlen <= next_offset.
688 * Every match stores its size, sum of sizes must not exceed target_offset.
690 * Return: 0 on success, negative errno on failure.
692 int xt_check_entry_offsets(const void *base,
693 const char *elems,
694 unsigned int target_offset,
695 unsigned int next_offset)
697 long size_of_base_struct = elems - (const char *)base;
698 const struct xt_entry_target *t;
699 const char *e = base;
701 /* target start is within the ip/ip6/arpt_entry struct */
702 if (target_offset < size_of_base_struct)
703 return -EINVAL;
705 if (target_offset + sizeof(*t) > next_offset)
706 return -EINVAL;
708 t = (void *)(e + target_offset);
709 if (t->u.target_size < sizeof(*t))
710 return -EINVAL;
712 if (target_offset + t->u.target_size > next_offset)
713 return -EINVAL;
715 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
716 XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
717 return -EINVAL;
719 return xt_check_entry_match(elems, base + target_offset,
720 __alignof__(struct xt_entry_match));
722 EXPORT_SYMBOL(xt_check_entry_offsets);
725 * xt_alloc_entry_offsets - allocate array to store rule head offsets
727 * @size: number of entries
729 * Return: NULL or kmalloc'd or vmalloc'd array
731 unsigned int *xt_alloc_entry_offsets(unsigned int size)
733 unsigned int *off;
735 off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
737 if (off)
738 return off;
740 if (size < (SIZE_MAX / sizeof(unsigned int)))
741 off = vmalloc(size * sizeof(unsigned int));
743 return off;
745 EXPORT_SYMBOL(xt_alloc_entry_offsets);
748 * xt_find_jump_offset - check if target is a valid jump offset
750 * @offsets: array containing all valid rule start offsets of a rule blob
751 * @target: the jump target to search for
752 * @size: entries in @offset
754 bool xt_find_jump_offset(const unsigned int *offsets,
755 unsigned int target, unsigned int size)
757 int m, low = 0, hi = size;
759 while (hi > low) {
760 m = (low + hi) / 2u;
762 if (offsets[m] > target)
763 hi = m;
764 else if (offsets[m] < target)
765 low = m + 1;
766 else
767 return true;
770 return false;
772 EXPORT_SYMBOL(xt_find_jump_offset);
774 int xt_check_target(struct xt_tgchk_param *par,
775 unsigned int size, u_int8_t proto, bool inv_proto)
777 int ret;
779 if (XT_ALIGN(par->target->targetsize) != size) {
780 pr_err("%s_tables: %s.%u target: invalid size "
781 "%u (kernel) != (user) %u\n",
782 xt_prefix[par->family], par->target->name,
783 par->target->revision,
784 XT_ALIGN(par->target->targetsize), size);
785 return -EINVAL;
787 if (par->target->table != NULL &&
788 strcmp(par->target->table, par->table) != 0) {
789 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
790 xt_prefix[par->family], par->target->name,
791 par->target->table, par->table);
792 return -EINVAL;
794 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
795 char used[64], allow[64];
797 pr_err("%s_tables: %s target: used from hooks %s, but only "
798 "usable from %s\n",
799 xt_prefix[par->family], par->target->name,
800 textify_hooks(used, sizeof(used), par->hook_mask,
801 par->family),
802 textify_hooks(allow, sizeof(allow), par->target->hooks,
803 par->family));
804 return -EINVAL;
806 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
807 pr_err("%s_tables: %s target: only valid for protocol %u\n",
808 xt_prefix[par->family], par->target->name,
809 par->target->proto);
810 return -EINVAL;
812 if (par->target->checkentry != NULL) {
813 ret = par->target->checkentry(par);
814 if (ret < 0)
815 return ret;
816 else if (ret > 0)
817 /* Flag up potential errors. */
818 return -EIO;
820 return 0;
822 EXPORT_SYMBOL_GPL(xt_check_target);
825 * xt_copy_counters_from_user - copy counters and metadata from userspace
827 * @user: src pointer to userspace memory
828 * @len: alleged size of userspace memory
829 * @info: where to store the xt_counters_info metadata
830 * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
832 * Copies counter meta data from @user and stores it in @info.
834 * vmallocs memory to hold the counters, then copies the counter data
835 * from @user to the new memory and returns a pointer to it.
837 * If @compat is true, @info gets converted automatically to the 64bit
838 * representation.
840 * The metadata associated with the counters is stored in @info.
842 * Return: returns pointer that caller has to test via IS_ERR().
843 * If IS_ERR is false, caller has to vfree the pointer.
845 void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
846 struct xt_counters_info *info, bool compat)
848 void *mem;
849 u64 size;
851 #ifdef CONFIG_COMPAT
852 if (compat) {
853 /* structures only differ in size due to alignment */
854 struct compat_xt_counters_info compat_tmp;
856 if (len <= sizeof(compat_tmp))
857 return ERR_PTR(-EINVAL);
859 len -= sizeof(compat_tmp);
860 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
861 return ERR_PTR(-EFAULT);
863 strlcpy(info->name, compat_tmp.name, sizeof(info->name));
864 info->num_counters = compat_tmp.num_counters;
865 user += sizeof(compat_tmp);
866 } else
867 #endif
869 if (len <= sizeof(*info))
870 return ERR_PTR(-EINVAL);
872 len -= sizeof(*info);
873 if (copy_from_user(info, user, sizeof(*info)) != 0)
874 return ERR_PTR(-EFAULT);
876 info->name[sizeof(info->name) - 1] = '\0';
877 user += sizeof(*info);
880 size = sizeof(struct xt_counters);
881 size *= info->num_counters;
883 if (size != (u64)len)
884 return ERR_PTR(-EINVAL);
886 mem = vmalloc(len);
887 if (!mem)
888 return ERR_PTR(-ENOMEM);
890 if (copy_from_user(mem, user, len) == 0)
891 return mem;
893 vfree(mem);
894 return ERR_PTR(-EFAULT);
896 EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
898 #ifdef CONFIG_COMPAT
899 int xt_compat_target_offset(const struct xt_target *target)
901 u_int16_t csize = target->compatsize ? : target->targetsize;
902 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
904 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
906 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
907 unsigned int *size)
909 const struct xt_target *target = t->u.kernel.target;
910 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
911 int pad, off = xt_compat_target_offset(target);
912 u_int16_t tsize = ct->u.user.target_size;
913 char name[sizeof(t->u.user.name)];
915 t = *dstptr;
916 memcpy(t, ct, sizeof(*ct));
917 if (target->compat_from_user)
918 target->compat_from_user(t->data, ct->data);
919 else
920 memcpy(t->data, ct->data, tsize - sizeof(*ct));
921 pad = XT_ALIGN(target->targetsize) - target->targetsize;
922 if (pad > 0)
923 memset(t->data + target->targetsize, 0, pad);
925 tsize += off;
926 t->u.user.target_size = tsize;
927 strlcpy(name, target->name, sizeof(name));
928 module_put(target->me);
929 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
931 *size += off;
932 *dstptr += tsize;
934 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
936 int xt_compat_target_to_user(const struct xt_entry_target *t,
937 void __user **dstptr, unsigned int *size)
939 const struct xt_target *target = t->u.kernel.target;
940 struct compat_xt_entry_target __user *ct = *dstptr;
941 int off = xt_compat_target_offset(target);
942 u_int16_t tsize = t->u.user.target_size - off;
944 if (copy_to_user(ct, t, sizeof(*ct)) ||
945 put_user(tsize, &ct->u.user.target_size) ||
946 copy_to_user(ct->u.user.name, t->u.kernel.target->name,
947 strlen(t->u.kernel.target->name) + 1))
948 return -EFAULT;
950 if (target->compat_to_user) {
951 if (target->compat_to_user((void __user *)ct->data, t->data))
952 return -EFAULT;
953 } else {
954 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
955 return -EFAULT;
958 *size -= off;
959 *dstptr += tsize;
960 return 0;
962 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
963 #endif
965 struct xt_table_info *xt_alloc_table_info(unsigned int size)
967 struct xt_table_info *newinfo;
968 int cpu;
970 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
971 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
972 return NULL;
974 newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
975 if (!newinfo)
976 return NULL;
978 newinfo->size = size;
980 for_each_possible_cpu(cpu) {
981 if (size <= PAGE_SIZE)
982 newinfo->entries[cpu] = kmalloc_node(size,
983 GFP_KERNEL,
984 cpu_to_node(cpu));
985 else
986 newinfo->entries[cpu] = vmalloc_node(size,
987 cpu_to_node(cpu));
989 if (newinfo->entries[cpu] == NULL) {
990 xt_free_table_info(newinfo);
991 return NULL;
995 return newinfo;
997 EXPORT_SYMBOL(xt_alloc_table_info);
999 void xt_free_table_info(struct xt_table_info *info)
1001 int cpu;
1003 for_each_possible_cpu(cpu) {
1004 if (info->size <= PAGE_SIZE)
1005 kfree(info->entries[cpu]);
1006 else
1007 vfree(info->entries[cpu]);
1010 if (info->jumpstack != NULL) {
1011 if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
1012 for_each_possible_cpu(cpu)
1013 vfree(info->jumpstack[cpu]);
1014 } else {
1015 for_each_possible_cpu(cpu)
1016 kfree(info->jumpstack[cpu]);
1020 if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
1021 vfree(info->jumpstack);
1022 else
1023 kfree(info->jumpstack);
1025 free_percpu(info->stackptr);
1027 kfree(info);
1029 EXPORT_SYMBOL(xt_free_table_info);
1031 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
1032 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1033 const char *name)
1035 struct xt_table *t;
1037 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
1038 return ERR_PTR(-EINTR);
1040 list_for_each_entry(t, &net->xt.tables[af], list)
1041 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1042 return t;
1043 mutex_unlock(&xt[af].mutex);
1044 return NULL;
1046 EXPORT_SYMBOL_GPL(xt_find_table_lock);
1048 void xt_table_unlock(struct xt_table *table)
1050 mutex_unlock(&xt[table->af].mutex);
1052 EXPORT_SYMBOL_GPL(xt_table_unlock);
1054 #ifdef CONFIG_COMPAT
1055 void xt_compat_lock(u_int8_t af)
1057 mutex_lock(&xt[af].compat_mutex);
1059 EXPORT_SYMBOL_GPL(xt_compat_lock);
1061 void xt_compat_unlock(u_int8_t af)
1063 mutex_unlock(&xt[af].compat_mutex);
1065 EXPORT_SYMBOL_GPL(xt_compat_unlock);
1066 #endif
1068 DEFINE_PER_CPU(seqcount_t, xt_recseq);
1069 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1071 static int xt_jumpstack_alloc(struct xt_table_info *i)
1073 unsigned int size;
1074 int cpu;
1076 i->stackptr = alloc_percpu(unsigned int);
1077 if (i->stackptr == NULL)
1078 return -ENOMEM;
1080 size = sizeof(void **) * nr_cpu_ids;
1081 if (size > PAGE_SIZE)
1082 i->jumpstack = vzalloc(size);
1083 else
1084 i->jumpstack = kzalloc(size, GFP_KERNEL);
1085 if (i->jumpstack == NULL)
1086 return -ENOMEM;
1088 i->stacksize *= xt_jumpstack_multiplier;
1089 size = sizeof(void *) * i->stacksize;
1090 for_each_possible_cpu(cpu) {
1091 if (size > PAGE_SIZE)
1092 i->jumpstack[cpu] = vmalloc_node(size,
1093 cpu_to_node(cpu));
1094 else
1095 i->jumpstack[cpu] = kmalloc_node(size,
1096 GFP_KERNEL, cpu_to_node(cpu));
1097 if (i->jumpstack[cpu] == NULL)
1099 * Freeing will be done later on by the callers. The
1100 * chain is: xt_replace_table -> __do_replace ->
1101 * do_replace -> xt_free_table_info.
1103 return -ENOMEM;
1106 return 0;
1109 struct xt_table_info *
1110 xt_replace_table(struct xt_table *table,
1111 unsigned int num_counters,
1112 struct xt_table_info *newinfo,
1113 int *error)
1115 struct xt_table_info *private;
1116 int ret;
1118 ret = xt_jumpstack_alloc(newinfo);
1119 if (ret < 0) {
1120 *error = ret;
1121 return NULL;
1124 /* Do the substitution. */
1125 local_bh_disable();
1126 private = table->private;
1128 /* Check inside lock: is the old number correct? */
1129 if (num_counters != private->number) {
1130 pr_debug("num_counters != table->private->number (%u/%u)\n",
1131 num_counters, private->number);
1132 local_bh_enable();
1133 *error = -EAGAIN;
1134 return NULL;
1137 table->private = newinfo;
1138 newinfo->initial_entries = private->initial_entries;
1141 * Even though table entries have now been swapped, other CPU's
1142 * may still be using the old entries. This is okay, because
1143 * resynchronization happens because of the locking done
1144 * during the get_counters() routine.
1146 local_bh_enable();
1148 #ifdef CONFIG_AUDIT
1149 if (audit_enabled) {
1150 struct audit_buffer *ab;
1152 ab = audit_log_start(current->audit_context, GFP_KERNEL,
1153 AUDIT_NETFILTER_CFG);
1154 if (ab) {
1155 audit_log_format(ab, "table=%s family=%u entries=%u",
1156 table->name, table->af,
1157 private->number);
1158 audit_log_end(ab);
1161 #endif
1163 return private;
1165 EXPORT_SYMBOL_GPL(xt_replace_table);
1167 struct xt_table *xt_register_table(struct net *net,
1168 const struct xt_table *input_table,
1169 struct xt_table_info *bootstrap,
1170 struct xt_table_info *newinfo)
1172 int ret;
1173 struct xt_table_info *private;
1174 struct xt_table *t, *table;
1176 /* Don't add one object to multiple lists. */
1177 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1178 if (!table) {
1179 ret = -ENOMEM;
1180 goto out;
1183 ret = mutex_lock_interruptible(&xt[table->af].mutex);
1184 if (ret != 0)
1185 goto out_free;
1187 /* Don't autoload: we'd eat our tail... */
1188 list_for_each_entry(t, &net->xt.tables[table->af], list) {
1189 if (strcmp(t->name, table->name) == 0) {
1190 ret = -EEXIST;
1191 goto unlock;
1195 /* Simplifies replace_table code. */
1196 table->private = bootstrap;
1198 if (!xt_replace_table(table, 0, newinfo, &ret))
1199 goto unlock;
1201 private = table->private;
1202 pr_debug("table->private->number = %u\n", private->number);
1204 /* save number of initial entries */
1205 private->initial_entries = private->number;
1207 list_add(&table->list, &net->xt.tables[table->af]);
1208 mutex_unlock(&xt[table->af].mutex);
1209 return table;
1211 unlock:
1212 mutex_unlock(&xt[table->af].mutex);
1213 out_free:
1214 kfree(table);
1215 out:
1216 return ERR_PTR(ret);
1218 EXPORT_SYMBOL_GPL(xt_register_table);
1220 void *xt_unregister_table(struct xt_table *table)
1222 struct xt_table_info *private;
1224 mutex_lock(&xt[table->af].mutex);
1225 private = table->private;
1226 list_del(&table->list);
1227 mutex_unlock(&xt[table->af].mutex);
1228 kfree(table);
1230 return private;
1232 EXPORT_SYMBOL_GPL(xt_unregister_table);
1234 #ifdef CONFIG_PROC_FS
1235 struct xt_names_priv {
1236 struct seq_net_private p;
1237 u_int8_t af;
1239 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1241 struct xt_names_priv *priv = seq->private;
1242 struct net *net = seq_file_net(seq);
1243 u_int8_t af = priv->af;
1245 mutex_lock(&xt[af].mutex);
1246 return seq_list_start(&net->xt.tables[af], *pos);
1249 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1251 struct xt_names_priv *priv = seq->private;
1252 struct net *net = seq_file_net(seq);
1253 u_int8_t af = priv->af;
1255 return seq_list_next(v, &net->xt.tables[af], pos);
1258 static void xt_table_seq_stop(struct seq_file *seq, void *v)
1260 struct xt_names_priv *priv = seq->private;
1261 u_int8_t af = priv->af;
1263 mutex_unlock(&xt[af].mutex);
1266 static int xt_table_seq_show(struct seq_file *seq, void *v)
1268 struct xt_table *table = list_entry(v, struct xt_table, list);
1270 if (strlen(table->name))
1271 return seq_printf(seq, "%s\n", table->name);
1272 else
1273 return 0;
1276 static const struct seq_operations xt_table_seq_ops = {
1277 .start = xt_table_seq_start,
1278 .next = xt_table_seq_next,
1279 .stop = xt_table_seq_stop,
1280 .show = xt_table_seq_show,
1283 static int xt_table_open(struct inode *inode, struct file *file)
1285 int ret;
1286 struct xt_names_priv *priv;
1288 ret = seq_open_net(inode, file, &xt_table_seq_ops,
1289 sizeof(struct xt_names_priv));
1290 if (!ret) {
1291 priv = ((struct seq_file *)file->private_data)->private;
1292 priv->af = (unsigned long)PDE_DATA(inode);
1294 return ret;
1297 static const struct file_operations xt_table_ops = {
1298 .owner = THIS_MODULE,
1299 .open = xt_table_open,
1300 .read = seq_read,
1301 .llseek = seq_lseek,
1302 .release = seq_release_net,
1306 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1307 * the multi-AF mutexes.
1309 struct nf_mttg_trav {
1310 struct list_head *head, *curr;
1311 uint8_t class, nfproto;
1314 enum {
1315 MTTG_TRAV_INIT,
1316 MTTG_TRAV_NFP_UNSPEC,
1317 MTTG_TRAV_NFP_SPEC,
1318 MTTG_TRAV_DONE,
1321 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1322 bool is_target)
1324 static const uint8_t next_class[] = {
1325 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1326 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1328 struct nf_mttg_trav *trav = seq->private;
1330 switch (trav->class) {
1331 case MTTG_TRAV_INIT:
1332 trav->class = MTTG_TRAV_NFP_UNSPEC;
1333 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1334 trav->head = trav->curr = is_target ?
1335 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1336 break;
1337 case MTTG_TRAV_NFP_UNSPEC:
1338 trav->curr = trav->curr->next;
1339 if (trav->curr != trav->head)
1340 break;
1341 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1342 mutex_lock(&xt[trav->nfproto].mutex);
1343 trav->head = trav->curr = is_target ?
1344 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
1345 trav->class = next_class[trav->class];
1346 break;
1347 case MTTG_TRAV_NFP_SPEC:
1348 trav->curr = trav->curr->next;
1349 if (trav->curr != trav->head)
1350 break;
1351 /* fallthru, _stop will unlock */
1352 default:
1353 return NULL;
1356 if (ppos != NULL)
1357 ++*ppos;
1358 return trav;
1361 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1362 bool is_target)
1364 struct nf_mttg_trav *trav = seq->private;
1365 unsigned int j;
1367 trav->class = MTTG_TRAV_INIT;
1368 for (j = 0; j < *pos; ++j)
1369 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1370 return NULL;
1371 return trav;
1374 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1376 struct nf_mttg_trav *trav = seq->private;
1378 switch (trav->class) {
1379 case MTTG_TRAV_NFP_UNSPEC:
1380 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1381 break;
1382 case MTTG_TRAV_NFP_SPEC:
1383 mutex_unlock(&xt[trav->nfproto].mutex);
1384 break;
1388 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1390 return xt_mttg_seq_start(seq, pos, false);
1393 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1395 return xt_mttg_seq_next(seq, v, ppos, false);
1398 static int xt_match_seq_show(struct seq_file *seq, void *v)
1400 const struct nf_mttg_trav *trav = seq->private;
1401 const struct xt_match *match;
1403 switch (trav->class) {
1404 case MTTG_TRAV_NFP_UNSPEC:
1405 case MTTG_TRAV_NFP_SPEC:
1406 if (trav->curr == trav->head)
1407 return 0;
1408 match = list_entry(trav->curr, struct xt_match, list);
1409 return (*match->name == '\0') ? 0 :
1410 seq_printf(seq, "%s\n", match->name);
1412 return 0;
1415 static const struct seq_operations xt_match_seq_ops = {
1416 .start = xt_match_seq_start,
1417 .next = xt_match_seq_next,
1418 .stop = xt_mttg_seq_stop,
1419 .show = xt_match_seq_show,
1422 static int xt_match_open(struct inode *inode, struct file *file)
1424 struct seq_file *seq;
1425 struct nf_mttg_trav *trav;
1426 int ret;
1428 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1429 if (trav == NULL)
1430 return -ENOMEM;
1432 ret = seq_open(file, &xt_match_seq_ops);
1433 if (ret < 0) {
1434 kfree(trav);
1435 return ret;
1438 seq = file->private_data;
1439 seq->private = trav;
1440 trav->nfproto = (unsigned long)PDE_DATA(inode);
1441 return 0;
1444 static const struct file_operations xt_match_ops = {
1445 .owner = THIS_MODULE,
1446 .open = xt_match_open,
1447 .read = seq_read,
1448 .llseek = seq_lseek,
1449 .release = seq_release_private,
1452 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1454 return xt_mttg_seq_start(seq, pos, true);
1457 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1459 return xt_mttg_seq_next(seq, v, ppos, true);
1462 static int xt_target_seq_show(struct seq_file *seq, void *v)
1464 const struct nf_mttg_trav *trav = seq->private;
1465 const struct xt_target *target;
1467 switch (trav->class) {
1468 case MTTG_TRAV_NFP_UNSPEC:
1469 case MTTG_TRAV_NFP_SPEC:
1470 if (trav->curr == trav->head)
1471 return 0;
1472 target = list_entry(trav->curr, struct xt_target, list);
1473 return (*target->name == '\0') ? 0 :
1474 seq_printf(seq, "%s\n", target->name);
1476 return 0;
1479 static const struct seq_operations xt_target_seq_ops = {
1480 .start = xt_target_seq_start,
1481 .next = xt_target_seq_next,
1482 .stop = xt_mttg_seq_stop,
1483 .show = xt_target_seq_show,
1486 static int xt_target_open(struct inode *inode, struct file *file)
1488 struct seq_file *seq;
1489 struct nf_mttg_trav *trav;
1490 int ret;
1492 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1493 if (trav == NULL)
1494 return -ENOMEM;
1496 ret = seq_open(file, &xt_target_seq_ops);
1497 if (ret < 0) {
1498 kfree(trav);
1499 return ret;
1502 seq = file->private_data;
1503 seq->private = trav;
1504 trav->nfproto = (unsigned long)PDE_DATA(inode);
1505 return 0;
1508 static const struct file_operations xt_target_ops = {
1509 .owner = THIS_MODULE,
1510 .open = xt_target_open,
1511 .read = seq_read,
1512 .llseek = seq_lseek,
1513 .release = seq_release_private,
1516 #define FORMAT_TABLES "_tables_names"
1517 #define FORMAT_MATCHES "_tables_matches"
1518 #define FORMAT_TARGETS "_tables_targets"
1520 #endif /* CONFIG_PROC_FS */
1523 * xt_hook_link - set up hooks for a new table
1524 * @table: table with metadata needed to set up hooks
1525 * @fn: Hook function
1527 * This function will take care of creating and registering the necessary
1528 * Netfilter hooks for XT tables.
1530 struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1532 unsigned int hook_mask = table->valid_hooks;
1533 uint8_t i, num_hooks = hweight32(hook_mask);
1534 uint8_t hooknum;
1535 struct nf_hook_ops *ops;
1536 int ret;
1538 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1539 if (ops == NULL)
1540 return ERR_PTR(-ENOMEM);
1542 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1543 hook_mask >>= 1, ++hooknum) {
1544 if (!(hook_mask & 1))
1545 continue;
1546 ops[i].hook = fn;
1547 ops[i].owner = table->me;
1548 ops[i].pf = table->af;
1549 ops[i].hooknum = hooknum;
1550 ops[i].priority = table->priority;
1551 ++i;
1554 ret = nf_register_hooks(ops, num_hooks);
1555 if (ret < 0) {
1556 kfree(ops);
1557 return ERR_PTR(ret);
1560 return ops;
1562 EXPORT_SYMBOL_GPL(xt_hook_link);
1565 * xt_hook_unlink - remove hooks for a table
1566 * @ops: nf_hook_ops array as returned by nf_hook_link
1567 * @hook_mask: the very same mask that was passed to nf_hook_link
1569 void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1571 nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1572 kfree(ops);
1574 EXPORT_SYMBOL_GPL(xt_hook_unlink);
1576 int xt_proto_init(struct net *net, u_int8_t af)
1578 #ifdef CONFIG_PROC_FS
1579 char buf[XT_FUNCTION_MAXNAMELEN];
1580 struct proc_dir_entry *proc;
1581 #endif
1583 if (af >= ARRAY_SIZE(xt_prefix))
1584 return -EINVAL;
1587 #ifdef CONFIG_PROC_FS
1588 strlcpy(buf, xt_prefix[af], sizeof(buf));
1589 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1590 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1591 (void *)(unsigned long)af);
1592 if (!proc)
1593 goto out;
1595 strlcpy(buf, xt_prefix[af], sizeof(buf));
1596 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1597 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1598 (void *)(unsigned long)af);
1599 if (!proc)
1600 goto out_remove_tables;
1602 strlcpy(buf, xt_prefix[af], sizeof(buf));
1603 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1604 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1605 (void *)(unsigned long)af);
1606 if (!proc)
1607 goto out_remove_matches;
1608 #endif
1610 return 0;
1612 #ifdef CONFIG_PROC_FS
1613 out_remove_matches:
1614 strlcpy(buf, xt_prefix[af], sizeof(buf));
1615 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1616 remove_proc_entry(buf, net->proc_net);
1618 out_remove_tables:
1619 strlcpy(buf, xt_prefix[af], sizeof(buf));
1620 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1621 remove_proc_entry(buf, net->proc_net);
1622 out:
1623 return -1;
1624 #endif
1626 EXPORT_SYMBOL_GPL(xt_proto_init);
1628 void xt_proto_fini(struct net *net, u_int8_t af)
1630 #ifdef CONFIG_PROC_FS
1631 char buf[XT_FUNCTION_MAXNAMELEN];
1633 strlcpy(buf, xt_prefix[af], sizeof(buf));
1634 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1635 remove_proc_entry(buf, net->proc_net);
1637 strlcpy(buf, xt_prefix[af], sizeof(buf));
1638 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1639 remove_proc_entry(buf, net->proc_net);
1641 strlcpy(buf, xt_prefix[af], sizeof(buf));
1642 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1643 remove_proc_entry(buf, net->proc_net);
1644 #endif /*CONFIG_PROC_FS*/
1646 EXPORT_SYMBOL_GPL(xt_proto_fini);
1648 static int __net_init xt_net_init(struct net *net)
1650 int i;
1652 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1653 INIT_LIST_HEAD(&net->xt.tables[i]);
1654 return 0;
1657 static struct pernet_operations xt_net_ops = {
1658 .init = xt_net_init,
1661 static int __init xt_init(void)
1663 unsigned int i;
1664 int rv;
1666 for_each_possible_cpu(i) {
1667 seqcount_init(&per_cpu(xt_recseq, i));
1670 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1671 if (!xt)
1672 return -ENOMEM;
1674 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1675 mutex_init(&xt[i].mutex);
1676 #ifdef CONFIG_COMPAT
1677 mutex_init(&xt[i].compat_mutex);
1678 xt[i].compat_tab = NULL;
1679 #endif
1680 INIT_LIST_HEAD(&xt[i].target);
1681 INIT_LIST_HEAD(&xt[i].match);
1683 rv = register_pernet_subsys(&xt_net_ops);
1684 if (rv < 0)
1685 kfree(xt);
1686 return rv;
1689 static void __exit xt_fini(void)
1691 unregister_pernet_subsys(&xt_net_ops);
1692 kfree(xt);
1695 module_init(xt_init);
1696 module_exit(xt_fini);