2 * Packet matching code for ARP packets.
4 * Based heavily, if not almost entirely, upon ip_tables.c framework.
6 * Some ARP specific bits are:
8 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/netdevice.h>
15 #include <linux/capability.h>
16 #include <linux/if_arp.h>
17 #include <linux/kmod.h>
18 #include <linux/vmalloc.h>
19 #include <linux/proc_fs.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/mutex.h>
23 #include <linux/err.h>
24 #include <net/compat.h>
26 #include <asm/uaccess.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_arp/arp_tables.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
33 MODULE_DESCRIPTION("arptables core");
35 /*#define DEBUG_ARP_TABLES*/
36 /*#define DEBUG_ARP_TABLES_USER*/
38 #ifdef DEBUG_ARP_TABLES
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_ARP_TABLES_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define ARP_NF_ASSERT(x) \
54 printk("ARP_NF_ASSERT: %s:%s:%u\n", \
55 __func__, __FILE__, __LINE__); \
58 #define ARP_NF_ASSERT(x)
61 static inline int arp_devaddr_compare(const struct arpt_devaddr_info
*ap
,
62 const char *hdr_addr
, int len
)
66 if (len
> ARPT_DEV_ADDR_LEN_MAX
)
67 len
= ARPT_DEV_ADDR_LEN_MAX
;
70 for (i
= 0; i
< len
; i
++)
71 ret
|= (hdr_addr
[i
] ^ ap
->addr
[i
]) & ap
->mask
[i
];
77 * Unfortunatly, _b and _mask are not aligned to an int (or long int)
78 * Some arches dont care, unrolling the loop is a win on them.
79 * For other arches, we only have a 16bit alignement.
81 static unsigned long ifname_compare(const char *_a
, const char *_b
, const char *_mask
)
83 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
84 unsigned long ret
= ifname_compare_aligned(_a
, _b
, _mask
);
86 unsigned long ret
= 0;
87 const u16
*a
= (const u16
*)_a
;
88 const u16
*b
= (const u16
*)_b
;
89 const u16
*mask
= (const u16
*)_mask
;
92 for (i
= 0; i
< IFNAMSIZ
/sizeof(u16
); i
++)
93 ret
|= (a
[i
] ^ b
[i
]) & mask
[i
];
98 /* Returns whether packet matches rule or not. */
99 static inline int arp_packet_match(const struct arphdr
*arphdr
,
100 struct net_device
*dev
,
103 const struct arpt_arp
*arpinfo
)
105 const char *arpptr
= (char *)(arphdr
+ 1);
106 const char *src_devaddr
, *tgt_devaddr
;
107 __be32 src_ipaddr
, tgt_ipaddr
;
110 #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
112 if (FWINV((arphdr
->ar_op
& arpinfo
->arpop_mask
) != arpinfo
->arpop
,
114 dprintf("ARP operation field mismatch.\n");
115 dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n",
116 arphdr
->ar_op
, arpinfo
->arpop
, arpinfo
->arpop_mask
);
120 if (FWINV((arphdr
->ar_hrd
& arpinfo
->arhrd_mask
) != arpinfo
->arhrd
,
122 dprintf("ARP hardware address format mismatch.\n");
123 dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n",
124 arphdr
->ar_hrd
, arpinfo
->arhrd
, arpinfo
->arhrd_mask
);
128 if (FWINV((arphdr
->ar_pro
& arpinfo
->arpro_mask
) != arpinfo
->arpro
,
130 dprintf("ARP protocol address format mismatch.\n");
131 dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n",
132 arphdr
->ar_pro
, arpinfo
->arpro
, arpinfo
->arpro_mask
);
136 if (FWINV((arphdr
->ar_hln
& arpinfo
->arhln_mask
) != arpinfo
->arhln
,
138 dprintf("ARP hardware address length mismatch.\n");
139 dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n",
140 arphdr
->ar_hln
, arpinfo
->arhln
, arpinfo
->arhln_mask
);
144 src_devaddr
= arpptr
;
145 arpptr
+= dev
->addr_len
;
146 memcpy(&src_ipaddr
, arpptr
, sizeof(u32
));
147 arpptr
+= sizeof(u32
);
148 tgt_devaddr
= arpptr
;
149 arpptr
+= dev
->addr_len
;
150 memcpy(&tgt_ipaddr
, arpptr
, sizeof(u32
));
152 if (FWINV(arp_devaddr_compare(&arpinfo
->src_devaddr
, src_devaddr
, dev
->addr_len
),
153 ARPT_INV_SRCDEVADDR
) ||
154 FWINV(arp_devaddr_compare(&arpinfo
->tgt_devaddr
, tgt_devaddr
, dev
->addr_len
),
155 ARPT_INV_TGTDEVADDR
)) {
156 dprintf("Source or target device address mismatch.\n");
161 if (FWINV((src_ipaddr
& arpinfo
->smsk
.s_addr
) != arpinfo
->src
.s_addr
,
163 FWINV(((tgt_ipaddr
& arpinfo
->tmsk
.s_addr
) != arpinfo
->tgt
.s_addr
),
165 dprintf("Source or target IP address mismatch.\n");
167 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
169 &arpinfo
->smsk
.s_addr
,
170 &arpinfo
->src
.s_addr
,
171 arpinfo
->invflags
& ARPT_INV_SRCIP
? " (INV)" : "");
172 dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n",
174 &arpinfo
->tmsk
.s_addr
,
175 &arpinfo
->tgt
.s_addr
,
176 arpinfo
->invflags
& ARPT_INV_TGTIP
? " (INV)" : "");
180 /* Look for ifname matches. */
181 ret
= ifname_compare(indev
, arpinfo
->iniface
, arpinfo
->iniface_mask
);
183 if (FWINV(ret
!= 0, ARPT_INV_VIA_IN
)) {
184 dprintf("VIA in mismatch (%s vs %s).%s\n",
185 indev
, arpinfo
->iniface
,
186 arpinfo
->invflags
&ARPT_INV_VIA_IN
?" (INV)":"");
190 ret
= ifname_compare(outdev
, arpinfo
->outiface
, arpinfo
->outiface_mask
);
192 if (FWINV(ret
!= 0, ARPT_INV_VIA_OUT
)) {
193 dprintf("VIA out mismatch (%s vs %s).%s\n",
194 outdev
, arpinfo
->outiface
,
195 arpinfo
->invflags
&ARPT_INV_VIA_OUT
?" (INV)":"");
203 static inline int arp_checkentry(const struct arpt_arp
*arp
)
205 if (arp
->flags
& ~ARPT_F_MASK
) {
206 duprintf("Unknown flag bits set: %08X\n",
207 arp
->flags
& ~ARPT_F_MASK
);
210 if (arp
->invflags
& ~ARPT_INV_MASK
) {
211 duprintf("Unknown invflag bits set: %08X\n",
212 arp
->invflags
& ~ARPT_INV_MASK
);
220 arpt_error(struct sk_buff
*skb
, const struct xt_target_param
*par
)
223 printk("arp_tables: error: '%s'\n",
224 (const char *)par
->targinfo
);
229 static inline struct arpt_entry
*get_entry(void *base
, unsigned int offset
)
231 return (struct arpt_entry
*)(base
+ offset
);
234 unsigned int arpt_do_table(struct sk_buff
*skb
,
236 const struct net_device
*in
,
237 const struct net_device
*out
,
238 struct xt_table
*table
)
240 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
241 unsigned int verdict
= NF_DROP
;
242 const struct arphdr
*arp
;
243 bool hotdrop
= false;
244 struct arpt_entry
*e
, *back
;
245 const char *indev
, *outdev
;
247 const struct xt_table_info
*private;
248 struct xt_target_param tgpar
;
250 if (!pskb_may_pull(skb
, arp_hdr_len(skb
->dev
)))
253 indev
= in
? in
->name
: nulldevname
;
254 outdev
= out
? out
->name
: nulldevname
;
257 private = table
->private;
258 table_base
= private->entries
[smp_processor_id()];
260 e
= get_entry(table_base
, private->hook_entry
[hook
]);
261 back
= get_entry(table_base
, private->underflow
[hook
]);
265 tgpar
.hooknum
= hook
;
266 tgpar
.family
= NFPROTO_ARP
;
270 if (arp_packet_match(arp
, skb
->dev
, indev
, outdev
, &e
->arp
)) {
271 struct arpt_entry_target
*t
;
274 hdr_len
= sizeof(*arp
) + (2 * sizeof(struct in_addr
)) +
275 (2 * skb
->dev
->addr_len
);
277 ADD_COUNTER(e
->counters
, hdr_len
, 1);
279 t
= arpt_get_target(e
);
281 /* Standard target? */
282 if (!t
->u
.kernel
.target
->target
) {
285 v
= ((struct arpt_standard_target
*)t
)->verdict
;
287 /* Pop from stack? */
288 if (v
!= ARPT_RETURN
) {
289 verdict
= (unsigned)(-v
) - 1;
293 back
= get_entry(table_base
,
298 != (void *)e
+ e
->next_offset
) {
299 /* Save old back ptr in next entry */
300 struct arpt_entry
*next
301 = (void *)e
+ e
->next_offset
;
303 (void *)back
- table_base
;
305 /* set back pointer to next entry */
309 e
= get_entry(table_base
, v
);
311 /* Targets which reenter must return
314 tgpar
.target
= t
->u
.kernel
.target
;
315 tgpar
.targinfo
= t
->data
;
316 verdict
= t
->u
.kernel
.target
->target(skb
,
319 /* Target might have changed stuff. */
322 if (verdict
== ARPT_CONTINUE
)
323 e
= (void *)e
+ e
->next_offset
;
329 e
= (void *)e
+ e
->next_offset
;
332 xt_info_rdunlock_bh();
340 /* All zeroes == unconditional rule. */
341 static inline int unconditional(const struct arpt_arp
*arp
)
345 for (i
= 0; i
< sizeof(*arp
)/sizeof(__u32
); i
++)
346 if (((__u32
*)arp
)[i
])
352 /* Figures out from what hook each rule can be called: returns 0 if
353 * there are loops. Puts hook bitmask in comefrom.
355 static int mark_source_chains(struct xt_table_info
*newinfo
,
356 unsigned int valid_hooks
, void *entry0
)
360 /* No recursion; use packet counter to save back ptrs (reset
361 * to 0 as we leave), and comefrom to save source hook bitmask.
363 for (hook
= 0; hook
< NF_ARP_NUMHOOKS
; hook
++) {
364 unsigned int pos
= newinfo
->hook_entry
[hook
];
366 = (struct arpt_entry
*)(entry0
+ pos
);
368 if (!(valid_hooks
& (1 << hook
)))
371 /* Set initial back pointer. */
372 e
->counters
.pcnt
= pos
;
375 const struct arpt_standard_target
*t
376 = (void *)arpt_get_target(e
);
377 int visited
= e
->comefrom
& (1 << hook
);
379 if (e
->comefrom
& (1 << NF_ARP_NUMHOOKS
)) {
380 printk("arptables: loop hook %u pos %u %08X.\n",
381 hook
, pos
, e
->comefrom
);
385 |= ((1 << hook
) | (1 << NF_ARP_NUMHOOKS
));
387 /* Unconditional return/END. */
388 if ((e
->target_offset
== sizeof(struct arpt_entry
)
389 && (strcmp(t
->target
.u
.user
.name
,
390 ARPT_STANDARD_TARGET
) == 0)
392 && unconditional(&e
->arp
)) || visited
) {
393 unsigned int oldpos
, size
;
395 if ((strcmp(t
->target
.u
.user
.name
,
396 ARPT_STANDARD_TARGET
) == 0) &&
397 t
->verdict
< -NF_MAX_VERDICT
- 1) {
398 duprintf("mark_source_chains: bad "
399 "negative verdict (%i)\n",
404 /* Return: backtrack through the last
408 e
->comefrom
^= (1<<NF_ARP_NUMHOOKS
);
410 pos
= e
->counters
.pcnt
;
411 e
->counters
.pcnt
= 0;
413 /* We're at the start. */
417 e
= (struct arpt_entry
*)
419 } while (oldpos
== pos
+ e
->next_offset
);
422 size
= e
->next_offset
;
423 e
= (struct arpt_entry
*)
424 (entry0
+ pos
+ size
);
425 e
->counters
.pcnt
= pos
;
428 int newpos
= t
->verdict
;
430 if (strcmp(t
->target
.u
.user
.name
,
431 ARPT_STANDARD_TARGET
) == 0
433 if (newpos
> newinfo
->size
-
434 sizeof(struct arpt_entry
)) {
435 duprintf("mark_source_chains: "
436 "bad verdict (%i)\n",
441 /* This a jump; chase it. */
442 duprintf("Jump rule %u -> %u\n",
445 /* ... this is a fallthru */
446 newpos
= pos
+ e
->next_offset
;
448 e
= (struct arpt_entry
*)
450 e
->counters
.pcnt
= pos
;
455 duprintf("Finished chain %u\n", hook
);
460 static inline int check_entry(struct arpt_entry
*e
, const char *name
)
462 const struct arpt_entry_target
*t
;
464 if (!arp_checkentry(&e
->arp
)) {
465 duprintf("arp_tables: arp check failed %p %s.\n", e
, name
);
469 if (e
->target_offset
+ sizeof(struct arpt_entry_target
) > e
->next_offset
)
472 t
= arpt_get_target(e
);
473 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
479 static inline int check_target(struct arpt_entry
*e
, const char *name
)
481 struct arpt_entry_target
*t
= arpt_get_target(e
);
483 struct xt_tgchk_param par
= {
486 .target
= t
->u
.kernel
.target
,
488 .hook_mask
= e
->comefrom
,
489 .family
= NFPROTO_ARP
,
492 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
), 0, false);
494 duprintf("arp_tables: check failed for `%s'.\n",
495 t
->u
.kernel
.target
->name
);
502 find_check_entry(struct arpt_entry
*e
, const char *name
, unsigned int size
,
505 struct arpt_entry_target
*t
;
506 struct xt_target
*target
;
509 ret
= check_entry(e
, name
);
513 t
= arpt_get_target(e
);
514 target
= try_then_request_module(xt_find_target(NFPROTO_ARP
,
517 "arpt_%s", t
->u
.user
.name
);
518 if (IS_ERR(target
) || !target
) {
519 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
520 ret
= target
? PTR_ERR(target
) : -ENOENT
;
523 t
->u
.kernel
.target
= target
;
525 ret
= check_target(e
, name
);
532 module_put(t
->u
.kernel
.target
->me
);
537 static inline int check_entry_size_and_hooks(struct arpt_entry
*e
,
538 struct xt_table_info
*newinfo
,
540 unsigned char *limit
,
541 const unsigned int *hook_entries
,
542 const unsigned int *underflows
,
547 if ((unsigned long)e
% __alignof__(struct arpt_entry
) != 0
548 || (unsigned char *)e
+ sizeof(struct arpt_entry
) >= limit
) {
549 duprintf("Bad offset %p\n", e
);
554 < sizeof(struct arpt_entry
) + sizeof(struct arpt_entry_target
)) {
555 duprintf("checking: element %p size %u\n",
560 /* Check hooks & underflows */
561 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
562 if ((unsigned char *)e
- base
== hook_entries
[h
])
563 newinfo
->hook_entry
[h
] = hook_entries
[h
];
564 if ((unsigned char *)e
- base
== underflows
[h
])
565 newinfo
->underflow
[h
] = underflows
[h
];
568 /* FIXME: underflows must be unconditional, standard verdicts
569 < 0 (not ARPT_RETURN). --RR */
571 /* Clear counters and comefrom */
572 e
->counters
= ((struct xt_counters
) { 0, 0 });
579 static inline int cleanup_entry(struct arpt_entry
*e
, unsigned int *i
)
581 struct xt_tgdtor_param par
;
582 struct arpt_entry_target
*t
;
584 if (i
&& (*i
)-- == 0)
587 t
= arpt_get_target(e
);
588 par
.target
= t
->u
.kernel
.target
;
589 par
.targinfo
= t
->data
;
590 par
.family
= NFPROTO_ARP
;
591 if (par
.target
->destroy
!= NULL
)
592 par
.target
->destroy(&par
);
593 module_put(par
.target
->me
);
597 /* Checks and translates the user-supplied table segment (held in
600 static int translate_table(const char *name
,
601 unsigned int valid_hooks
,
602 struct xt_table_info
*newinfo
,
606 const unsigned int *hook_entries
,
607 const unsigned int *underflows
)
612 newinfo
->size
= size
;
613 newinfo
->number
= number
;
615 /* Init all hooks to impossible value. */
616 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
617 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
618 newinfo
->underflow
[i
] = 0xFFFFFFFF;
621 duprintf("translate_table: size %u\n", newinfo
->size
);
624 /* Walk through entries, checking offsets. */
625 ret
= ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
626 check_entry_size_and_hooks
,
630 hook_entries
, underflows
, &i
);
631 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret
);
636 duprintf("translate_table: %u not %u entries\n",
641 /* Check hooks all assigned */
642 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
643 /* Only hooks which are valid */
644 if (!(valid_hooks
& (1 << i
)))
646 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
647 duprintf("Invalid hook entry %u %u\n",
651 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
652 duprintf("Invalid underflow %u %u\n",
658 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
)) {
659 duprintf("Looping hook\n");
663 /* Finally, each sanity check must pass */
665 ret
= ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
666 find_check_entry
, name
, size
, &i
);
669 ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
674 /* And one copy for every other CPU */
675 for_each_possible_cpu(i
) {
676 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
677 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
684 static inline int add_entry_to_counter(const struct arpt_entry
*e
,
685 struct xt_counters total
[],
688 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
694 static inline int set_entry_to_counter(const struct arpt_entry
*e
,
695 struct xt_counters total
[],
698 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
704 static void get_counters(const struct xt_table_info
*t
,
705 struct xt_counters counters
[])
711 /* Instead of clearing (by a previous call to memset())
712 * the counters and using adds, we set the counters
713 * with data used by 'current' CPU
715 * Bottom half has to be disabled to prevent deadlock
716 * if new softirq were to run and call ipt_do_table
719 curcpu
= smp_processor_id();
722 ARPT_ENTRY_ITERATE(t
->entries
[curcpu
],
724 set_entry_to_counter
,
728 for_each_possible_cpu(cpu
) {
733 ARPT_ENTRY_ITERATE(t
->entries
[cpu
],
735 add_entry_to_counter
,
738 xt_info_wrunlock(cpu
);
743 static struct xt_counters
*alloc_counters(struct xt_table
*table
)
745 unsigned int countersize
;
746 struct xt_counters
*counters
;
747 struct xt_table_info
*private = table
->private;
749 /* We need atomic snapshot of counters: rest doesn't change
750 * (other than comefrom, which userspace doesn't care
753 countersize
= sizeof(struct xt_counters
) * private->number
;
754 counters
= vmalloc_node(countersize
, numa_node_id());
756 if (counters
== NULL
)
757 return ERR_PTR(-ENOMEM
);
759 get_counters(private, counters
);
764 static int copy_entries_to_user(unsigned int total_size
,
765 struct xt_table
*table
,
766 void __user
*userptr
)
768 unsigned int off
, num
;
769 struct arpt_entry
*e
;
770 struct xt_counters
*counters
;
771 struct xt_table_info
*private = table
->private;
775 counters
= alloc_counters(table
);
776 if (IS_ERR(counters
))
777 return PTR_ERR(counters
);
779 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
780 /* ... then copy entire thing ... */
781 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
786 /* FIXME: use iterator macros --RR */
787 /* ... then go back and fix counters and names */
788 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
789 struct arpt_entry_target
*t
;
791 e
= (struct arpt_entry
*)(loc_cpu_entry
+ off
);
792 if (copy_to_user(userptr
+ off
793 + offsetof(struct arpt_entry
, counters
),
795 sizeof(counters
[num
])) != 0) {
800 t
= arpt_get_target(e
);
801 if (copy_to_user(userptr
+ off
+ e
->target_offset
802 + offsetof(struct arpt_entry_target
,
804 t
->u
.kernel
.target
->name
,
805 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
817 static void compat_standard_from_user(void *dst
, void *src
)
819 int v
= *(compat_int_t
*)src
;
822 v
+= xt_compat_calc_jump(NFPROTO_ARP
, v
);
823 memcpy(dst
, &v
, sizeof(v
));
826 static int compat_standard_to_user(void __user
*dst
, void *src
)
828 compat_int_t cv
= *(int *)src
;
831 cv
-= xt_compat_calc_jump(NFPROTO_ARP
, cv
);
832 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
835 static int compat_calc_entry(struct arpt_entry
*e
,
836 const struct xt_table_info
*info
,
837 void *base
, struct xt_table_info
*newinfo
)
839 struct arpt_entry_target
*t
;
840 unsigned int entry_offset
;
843 off
= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
844 entry_offset
= (void *)e
- base
;
846 t
= arpt_get_target(e
);
847 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
848 newinfo
->size
-= off
;
849 ret
= xt_compat_add_offset(NFPROTO_ARP
, entry_offset
, off
);
853 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
854 if (info
->hook_entry
[i
] &&
855 (e
< (struct arpt_entry
*)(base
+ info
->hook_entry
[i
])))
856 newinfo
->hook_entry
[i
] -= off
;
857 if (info
->underflow
[i
] &&
858 (e
< (struct arpt_entry
*)(base
+ info
->underflow
[i
])))
859 newinfo
->underflow
[i
] -= off
;
864 static int compat_table_info(const struct xt_table_info
*info
,
865 struct xt_table_info
*newinfo
)
869 if (!newinfo
|| !info
)
872 /* we dont care about newinfo->entries[] */
873 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
874 newinfo
->initial_entries
= 0;
875 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
876 return ARPT_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
877 compat_calc_entry
, info
, loc_cpu_entry
,
882 static int get_info(struct net
*net
, void __user
*user
, int *len
, int compat
)
884 char name
[ARPT_TABLE_MAXNAMELEN
];
888 if (*len
!= sizeof(struct arpt_getinfo
)) {
889 duprintf("length %u != %Zu\n", *len
,
890 sizeof(struct arpt_getinfo
));
894 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
897 name
[ARPT_TABLE_MAXNAMELEN
-1] = '\0';
900 xt_compat_lock(NFPROTO_ARP
);
902 t
= try_then_request_module(xt_find_table_lock(net
, NFPROTO_ARP
, name
),
903 "arptable_%s", name
);
904 if (t
&& !IS_ERR(t
)) {
905 struct arpt_getinfo info
;
906 const struct xt_table_info
*private = t
->private;
910 struct xt_table_info tmp
;
911 ret
= compat_table_info(private, &tmp
);
912 xt_compat_flush_offsets(NFPROTO_ARP
);
916 info
.valid_hooks
= t
->valid_hooks
;
917 memcpy(info
.hook_entry
, private->hook_entry
,
918 sizeof(info
.hook_entry
));
919 memcpy(info
.underflow
, private->underflow
,
920 sizeof(info
.underflow
));
921 info
.num_entries
= private->number
;
922 info
.size
= private->size
;
923 strcpy(info
.name
, name
);
925 if (copy_to_user(user
, &info
, *len
) != 0)
932 ret
= t
? PTR_ERR(t
) : -ENOENT
;
935 xt_compat_unlock(NFPROTO_ARP
);
940 static int get_entries(struct net
*net
, struct arpt_get_entries __user
*uptr
,
944 struct arpt_get_entries get
;
947 if (*len
< sizeof(get
)) {
948 duprintf("get_entries: %u < %Zu\n", *len
, sizeof(get
));
951 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
953 if (*len
!= sizeof(struct arpt_get_entries
) + get
.size
) {
954 duprintf("get_entries: %u != %Zu\n", *len
,
955 sizeof(struct arpt_get_entries
) + get
.size
);
959 t
= xt_find_table_lock(net
, NFPROTO_ARP
, get
.name
);
960 if (t
&& !IS_ERR(t
)) {
961 const struct xt_table_info
*private = t
->private;
963 duprintf("t->private->number = %u\n",
965 if (get
.size
== private->size
)
966 ret
= copy_entries_to_user(private->size
,
967 t
, uptr
->entrytable
);
969 duprintf("get_entries: I've got %u not %u!\n",
970 private->size
, get
.size
);
976 ret
= t
? PTR_ERR(t
) : -ENOENT
;
981 static int __do_replace(struct net
*net
, const char *name
,
982 unsigned int valid_hooks
,
983 struct xt_table_info
*newinfo
,
984 unsigned int num_counters
,
985 void __user
*counters_ptr
)
989 struct xt_table_info
*oldinfo
;
990 struct xt_counters
*counters
;
991 void *loc_cpu_old_entry
;
994 counters
= vmalloc_node(num_counters
* sizeof(struct xt_counters
),
1001 t
= try_then_request_module(xt_find_table_lock(net
, NFPROTO_ARP
, name
),
1002 "arptable_%s", name
);
1003 if (!t
|| IS_ERR(t
)) {
1004 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1005 goto free_newinfo_counters_untrans
;
1009 if (valid_hooks
!= t
->valid_hooks
) {
1010 duprintf("Valid hook crap: %08X vs %08X\n",
1011 valid_hooks
, t
->valid_hooks
);
1016 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1020 /* Update module usage count based on number of rules */
1021 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1022 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1023 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1024 (newinfo
->number
<= oldinfo
->initial_entries
))
1026 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1027 (newinfo
->number
<= oldinfo
->initial_entries
))
1030 /* Get the old counters, and synchronize with replace */
1031 get_counters(oldinfo
, counters
);
1033 /* Decrease module usage counts and free resource */
1034 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1035 ARPT_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,
1038 xt_free_table_info(oldinfo
);
1039 if (copy_to_user(counters_ptr
, counters
,
1040 sizeof(struct xt_counters
) * num_counters
) != 0)
1049 free_newinfo_counters_untrans
:
1055 static int do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1058 struct arpt_replace tmp
;
1059 struct xt_table_info
*newinfo
;
1060 void *loc_cpu_entry
;
1062 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1065 /* overflow check */
1066 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1069 newinfo
= xt_alloc_table_info(tmp
.size
);
1073 /* choose the copy that is on our node/cpu */
1074 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1075 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1081 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1082 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1083 tmp
.hook_entry
, tmp
.underflow
);
1087 duprintf("arp_tables: Translated table\n");
1089 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1090 tmp
.num_counters
, tmp
.counters
);
1092 goto free_newinfo_untrans
;
1095 free_newinfo_untrans
:
1096 ARPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1098 xt_free_table_info(newinfo
);
1102 /* We're lazy, and add to the first CPU; overflow works its fey magic
1103 * and everything is OK. */
1105 add_counter_to_entry(struct arpt_entry
*e
,
1106 const struct xt_counters addme
[],
1109 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1115 static int do_add_counters(struct net
*net
, void __user
*user
, unsigned int len
,
1118 unsigned int i
, curcpu
;
1119 struct xt_counters_info tmp
;
1120 struct xt_counters
*paddc
;
1121 unsigned int num_counters
;
1126 const struct xt_table_info
*private;
1128 void *loc_cpu_entry
;
1129 #ifdef CONFIG_COMPAT
1130 struct compat_xt_counters_info compat_tmp
;
1134 size
= sizeof(struct compat_xt_counters_info
);
1139 size
= sizeof(struct xt_counters_info
);
1142 if (copy_from_user(ptmp
, user
, size
) != 0)
1145 #ifdef CONFIG_COMPAT
1147 num_counters
= compat_tmp
.num_counters
;
1148 name
= compat_tmp
.name
;
1152 num_counters
= tmp
.num_counters
;
1156 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1159 paddc
= vmalloc_node(len
- size
, numa_node_id());
1163 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1168 t
= xt_find_table_lock(net
, NFPROTO_ARP
, name
);
1169 if (!t
|| IS_ERR(t
)) {
1170 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1175 private = t
->private;
1176 if (private->number
!= num_counters
) {
1178 goto unlock_up_free
;
1182 /* Choose the copy that is on our node */
1183 curcpu
= smp_processor_id();
1184 loc_cpu_entry
= private->entries
[curcpu
];
1185 xt_info_wrlock(curcpu
);
1186 ARPT_ENTRY_ITERATE(loc_cpu_entry
,
1188 add_counter_to_entry
,
1191 xt_info_wrunlock(curcpu
);
1202 #ifdef CONFIG_COMPAT
1204 compat_release_entry(struct compat_arpt_entry
*e
, unsigned int *i
)
1206 struct arpt_entry_target
*t
;
1208 if (i
&& (*i
)-- == 0)
1211 t
= compat_arpt_get_target(e
);
1212 module_put(t
->u
.kernel
.target
->me
);
1217 check_compat_entry_size_and_hooks(struct compat_arpt_entry
*e
,
1218 struct xt_table_info
*newinfo
,
1220 unsigned char *base
,
1221 unsigned char *limit
,
1222 unsigned int *hook_entries
,
1223 unsigned int *underflows
,
1227 struct arpt_entry_target
*t
;
1228 struct xt_target
*target
;
1229 unsigned int entry_offset
;
1232 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1233 if ((unsigned long)e
% __alignof__(struct compat_arpt_entry
) != 0
1234 || (unsigned char *)e
+ sizeof(struct compat_arpt_entry
) >= limit
) {
1235 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1239 if (e
->next_offset
< sizeof(struct compat_arpt_entry
) +
1240 sizeof(struct compat_xt_entry_target
)) {
1241 duprintf("checking: element %p size %u\n",
1246 /* For purposes of check_entry casting the compat entry is fine */
1247 ret
= check_entry((struct arpt_entry
*)e
, name
);
1251 off
= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1252 entry_offset
= (void *)e
- (void *)base
;
1254 t
= compat_arpt_get_target(e
);
1255 target
= try_then_request_module(xt_find_target(NFPROTO_ARP
,
1257 t
->u
.user
.revision
),
1258 "arpt_%s", t
->u
.user
.name
);
1259 if (IS_ERR(target
) || !target
) {
1260 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1262 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1265 t
->u
.kernel
.target
= target
;
1267 off
+= xt_compat_target_offset(target
);
1269 ret
= xt_compat_add_offset(NFPROTO_ARP
, entry_offset
, off
);
1271 goto release_target
;
1273 /* Check hooks & underflows */
1274 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
1275 if ((unsigned char *)e
- base
== hook_entries
[h
])
1276 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1277 if ((unsigned char *)e
- base
== underflows
[h
])
1278 newinfo
->underflow
[h
] = underflows
[h
];
1281 /* Clear counters and comefrom */
1282 memset(&e
->counters
, 0, sizeof(e
->counters
));
1289 module_put(t
->u
.kernel
.target
->me
);
1295 compat_copy_entry_from_user(struct compat_arpt_entry
*e
, void **dstptr
,
1296 unsigned int *size
, const char *name
,
1297 struct xt_table_info
*newinfo
, unsigned char *base
)
1299 struct arpt_entry_target
*t
;
1300 struct xt_target
*target
;
1301 struct arpt_entry
*de
;
1302 unsigned int origsize
;
1307 de
= (struct arpt_entry
*)*dstptr
;
1308 memcpy(de
, e
, sizeof(struct arpt_entry
));
1309 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1311 *dstptr
+= sizeof(struct arpt_entry
);
1312 *size
+= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1314 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1315 t
= compat_arpt_get_target(e
);
1316 target
= t
->u
.kernel
.target
;
1317 xt_compat_target_from_user(t
, dstptr
, size
);
1319 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1320 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
1321 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1322 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1323 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1324 newinfo
->underflow
[h
] -= origsize
- *size
;
1329 static inline int compat_check_entry(struct arpt_entry
*e
, const char *name
,
1334 ret
= check_target(e
, name
);
1342 static int translate_compat_table(const char *name
,
1343 unsigned int valid_hooks
,
1344 struct xt_table_info
**pinfo
,
1346 unsigned int total_size
,
1347 unsigned int number
,
1348 unsigned int *hook_entries
,
1349 unsigned int *underflows
)
1352 struct xt_table_info
*newinfo
, *info
;
1353 void *pos
, *entry0
, *entry1
;
1360 info
->number
= number
;
1362 /* Init all hooks to impossible value. */
1363 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1364 info
->hook_entry
[i
] = 0xFFFFFFFF;
1365 info
->underflow
[i
] = 0xFFFFFFFF;
1368 duprintf("translate_compat_table: size %u\n", info
->size
);
1370 xt_compat_lock(NFPROTO_ARP
);
1371 /* Walk through entries, checking offsets. */
1372 ret
= COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
,
1373 check_compat_entry_size_and_hooks
,
1374 info
, &size
, entry0
,
1375 entry0
+ total_size
,
1376 hook_entries
, underflows
, &j
, name
);
1382 duprintf("translate_compat_table: %u not %u entries\n",
1387 /* Check hooks all assigned */
1388 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1389 /* Only hooks which are valid */
1390 if (!(valid_hooks
& (1 << i
)))
1392 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1393 duprintf("Invalid hook entry %u %u\n",
1394 i
, hook_entries
[i
]);
1397 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1398 duprintf("Invalid underflow %u %u\n",
1405 newinfo
= xt_alloc_table_info(size
);
1409 newinfo
->number
= number
;
1410 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1411 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1412 newinfo
->underflow
[i
] = info
->underflow
[i
];
1414 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1417 ret
= COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
,
1418 compat_copy_entry_from_user
,
1419 &pos
, &size
, name
, newinfo
, entry1
);
1420 xt_compat_flush_offsets(NFPROTO_ARP
);
1421 xt_compat_unlock(NFPROTO_ARP
);
1426 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1430 ret
= ARPT_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1434 COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0
, newinfo
->size
, i
,
1435 compat_release_entry
, &j
);
1436 ARPT_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, &i
);
1437 xt_free_table_info(newinfo
);
1441 /* And one copy for every other CPU */
1442 for_each_possible_cpu(i
)
1443 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1444 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1448 xt_free_table_info(info
);
1452 xt_free_table_info(newinfo
);
1454 COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1457 xt_compat_flush_offsets(NFPROTO_ARP
);
1458 xt_compat_unlock(NFPROTO_ARP
);
1462 struct compat_arpt_replace
{
1463 char name
[ARPT_TABLE_MAXNAMELEN
];
1467 u32 hook_entry
[NF_ARP_NUMHOOKS
];
1468 u32 underflow
[NF_ARP_NUMHOOKS
];
1470 compat_uptr_t counters
;
1471 struct compat_arpt_entry entries
[0];
1474 static int compat_do_replace(struct net
*net
, void __user
*user
,
1478 struct compat_arpt_replace tmp
;
1479 struct xt_table_info
*newinfo
;
1480 void *loc_cpu_entry
;
1482 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1485 /* overflow check */
1486 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1488 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1491 newinfo
= xt_alloc_table_info(tmp
.size
);
1495 /* choose the copy that is on our node/cpu */
1496 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1497 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
), tmp
.size
) != 0) {
1502 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1503 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1504 tmp
.num_entries
, tmp
.hook_entry
,
1509 duprintf("compat_do_replace: Translated table\n");
1511 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1512 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1514 goto free_newinfo_untrans
;
1517 free_newinfo_untrans
:
1518 ARPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1520 xt_free_table_info(newinfo
);
1524 static int compat_do_arpt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1529 if (!capable(CAP_NET_ADMIN
))
1533 case ARPT_SO_SET_REPLACE
:
1534 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1537 case ARPT_SO_SET_ADD_COUNTERS
:
1538 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1542 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd
);
1549 static int compat_copy_entry_to_user(struct arpt_entry
*e
, void __user
**dstptr
,
1550 compat_uint_t
*size
,
1551 struct xt_counters
*counters
,
1554 struct arpt_entry_target
*t
;
1555 struct compat_arpt_entry __user
*ce
;
1556 u_int16_t target_offset
, next_offset
;
1557 compat_uint_t origsize
;
1562 ce
= (struct compat_arpt_entry __user
*)*dstptr
;
1563 if (copy_to_user(ce
, e
, sizeof(struct arpt_entry
)))
1566 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1569 *dstptr
+= sizeof(struct compat_arpt_entry
);
1570 *size
-= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1572 target_offset
= e
->target_offset
- (origsize
- *size
);
1574 t
= arpt_get_target(e
);
1575 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1579 next_offset
= e
->next_offset
- (origsize
- *size
);
1580 if (put_user(target_offset
, &ce
->target_offset
))
1582 if (put_user(next_offset
, &ce
->next_offset
))
1591 static int compat_copy_entries_to_user(unsigned int total_size
,
1592 struct xt_table
*table
,
1593 void __user
*userptr
)
1595 struct xt_counters
*counters
;
1596 const struct xt_table_info
*private = table
->private;
1600 void *loc_cpu_entry
;
1603 counters
= alloc_counters(table
);
1604 if (IS_ERR(counters
))
1605 return PTR_ERR(counters
);
1607 /* choose the copy on our node/cpu */
1608 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1611 ret
= ARPT_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1612 compat_copy_entry_to_user
,
1613 &pos
, &size
, counters
, &i
);
1618 struct compat_arpt_get_entries
{
1619 char name
[ARPT_TABLE_MAXNAMELEN
];
1621 struct compat_arpt_entry entrytable
[0];
1624 static int compat_get_entries(struct net
*net
,
1625 struct compat_arpt_get_entries __user
*uptr
,
1629 struct compat_arpt_get_entries get
;
1632 if (*len
< sizeof(get
)) {
1633 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1636 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1638 if (*len
!= sizeof(struct compat_arpt_get_entries
) + get
.size
) {
1639 duprintf("compat_get_entries: %u != %zu\n",
1640 *len
, sizeof(get
) + get
.size
);
1644 xt_compat_lock(NFPROTO_ARP
);
1645 t
= xt_find_table_lock(net
, NFPROTO_ARP
, get
.name
);
1646 if (t
&& !IS_ERR(t
)) {
1647 const struct xt_table_info
*private = t
->private;
1648 struct xt_table_info info
;
1650 duprintf("t->private->number = %u\n", private->number
);
1651 ret
= compat_table_info(private, &info
);
1652 if (!ret
&& get
.size
== info
.size
) {
1653 ret
= compat_copy_entries_to_user(private->size
,
1654 t
, uptr
->entrytable
);
1656 duprintf("compat_get_entries: I've got %u not %u!\n",
1657 private->size
, get
.size
);
1660 xt_compat_flush_offsets(NFPROTO_ARP
);
1664 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1666 xt_compat_unlock(NFPROTO_ARP
);
1670 static int do_arpt_get_ctl(struct sock
*, int, void __user
*, int *);
1672 static int compat_do_arpt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1677 if (!capable(CAP_NET_ADMIN
))
1681 case ARPT_SO_GET_INFO
:
1682 ret
= get_info(sock_net(sk
), user
, len
, 1);
1684 case ARPT_SO_GET_ENTRIES
:
1685 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1688 ret
= do_arpt_get_ctl(sk
, cmd
, user
, len
);
1694 static int do_arpt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1698 if (!capable(CAP_NET_ADMIN
))
1702 case ARPT_SO_SET_REPLACE
:
1703 ret
= do_replace(sock_net(sk
), user
, len
);
1706 case ARPT_SO_SET_ADD_COUNTERS
:
1707 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
1711 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd
);
1718 static int do_arpt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1722 if (!capable(CAP_NET_ADMIN
))
1726 case ARPT_SO_GET_INFO
:
1727 ret
= get_info(sock_net(sk
), user
, len
, 0);
1730 case ARPT_SO_GET_ENTRIES
:
1731 ret
= get_entries(sock_net(sk
), user
, len
);
1734 case ARPT_SO_GET_REVISION_TARGET
: {
1735 struct xt_get_revision rev
;
1737 if (*len
!= sizeof(rev
)) {
1741 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
1746 try_then_request_module(xt_find_revision(NFPROTO_ARP
, rev
.name
,
1747 rev
.revision
, 1, &ret
),
1748 "arpt_%s", rev
.name
);
1753 duprintf("do_arpt_get_ctl: unknown request %i\n", cmd
);
1760 struct xt_table
*arpt_register_table(struct net
*net
, struct xt_table
*table
,
1761 const struct arpt_replace
*repl
)
1764 struct xt_table_info
*newinfo
;
1765 struct xt_table_info bootstrap
1766 = { 0, 0, 0, { 0 }, { 0 }, { } };
1767 void *loc_cpu_entry
;
1768 struct xt_table
*new_table
;
1770 newinfo
= xt_alloc_table_info(repl
->size
);
1776 /* choose the copy on our node/cpu */
1777 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1778 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
1780 ret
= translate_table(table
->name
, table
->valid_hooks
,
1781 newinfo
, loc_cpu_entry
, repl
->size
,
1786 duprintf("arpt_register_table: translate table gives %d\n", ret
);
1790 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
1791 if (IS_ERR(new_table
)) {
1792 ret
= PTR_ERR(new_table
);
1798 xt_free_table_info(newinfo
);
1800 return ERR_PTR(ret
);
1803 void arpt_unregister_table(struct xt_table
*table
)
1805 struct xt_table_info
*private;
1806 void *loc_cpu_entry
;
1807 struct module
*table_owner
= table
->me
;
1809 private = xt_unregister_table(table
);
1811 /* Decrease module usage counts and free resources */
1812 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1813 ARPT_ENTRY_ITERATE(loc_cpu_entry
, private->size
,
1814 cleanup_entry
, NULL
);
1815 if (private->number
> private->initial_entries
)
1816 module_put(table_owner
);
1817 xt_free_table_info(private);
1820 /* The built-in targets: standard (NULL) and error. */
1821 static struct xt_target arpt_standard_target __read_mostly
= {
1822 .name
= ARPT_STANDARD_TARGET
,
1823 .targetsize
= sizeof(int),
1824 .family
= NFPROTO_ARP
,
1825 #ifdef CONFIG_COMPAT
1826 .compatsize
= sizeof(compat_int_t
),
1827 .compat_from_user
= compat_standard_from_user
,
1828 .compat_to_user
= compat_standard_to_user
,
1832 static struct xt_target arpt_error_target __read_mostly
= {
1833 .name
= ARPT_ERROR_TARGET
,
1834 .target
= arpt_error
,
1835 .targetsize
= ARPT_FUNCTION_MAXNAMELEN
,
1836 .family
= NFPROTO_ARP
,
1839 static struct nf_sockopt_ops arpt_sockopts
= {
1841 .set_optmin
= ARPT_BASE_CTL
,
1842 .set_optmax
= ARPT_SO_SET_MAX
+1,
1843 .set
= do_arpt_set_ctl
,
1844 #ifdef CONFIG_COMPAT
1845 .compat_set
= compat_do_arpt_set_ctl
,
1847 .get_optmin
= ARPT_BASE_CTL
,
1848 .get_optmax
= ARPT_SO_GET_MAX
+1,
1849 .get
= do_arpt_get_ctl
,
1850 #ifdef CONFIG_COMPAT
1851 .compat_get
= compat_do_arpt_get_ctl
,
1853 .owner
= THIS_MODULE
,
1856 static int __net_init
arp_tables_net_init(struct net
*net
)
1858 return xt_proto_init(net
, NFPROTO_ARP
);
1861 static void __net_exit
arp_tables_net_exit(struct net
*net
)
1863 xt_proto_fini(net
, NFPROTO_ARP
);
1866 static struct pernet_operations arp_tables_net_ops
= {
1867 .init
= arp_tables_net_init
,
1868 .exit
= arp_tables_net_exit
,
1871 static int __init
arp_tables_init(void)
1875 ret
= register_pernet_subsys(&arp_tables_net_ops
);
1879 /* Noone else will be downing sem now, so we won't sleep */
1880 ret
= xt_register_target(&arpt_standard_target
);
1883 ret
= xt_register_target(&arpt_error_target
);
1887 /* Register setsockopt */
1888 ret
= nf_register_sockopt(&arpt_sockopts
);
1892 printk(KERN_INFO
"arp_tables: (C) 2002 David S. Miller\n");
1896 xt_unregister_target(&arpt_error_target
);
1898 xt_unregister_target(&arpt_standard_target
);
1900 unregister_pernet_subsys(&arp_tables_net_ops
);
1905 static void __exit
arp_tables_fini(void)
1907 nf_unregister_sockopt(&arpt_sockopts
);
1908 xt_unregister_target(&arpt_error_target
);
1909 xt_unregister_target(&arpt_standard_target
);
1910 unregister_pernet_subsys(&arp_tables_net_ops
);
1913 EXPORT_SYMBOL(arpt_register_table
);
1914 EXPORT_SYMBOL(arpt_unregister_table
);
1915 EXPORT_SYMBOL(arpt_do_table
);
1917 module_init(arp_tables_init
);
1918 module_exit(arp_tables_fini
);