2 * Packet matching code for ARP packets.
4 * Based heavily, if not almost entirely, upon ip_tables.c framework.
6 * Some ARP specific bits are:
8 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/netdevice.h>
15 #include <linux/capability.h>
16 #include <linux/if_arp.h>
17 #include <linux/kmod.h>
18 #include <linux/vmalloc.h>
19 #include <linux/proc_fs.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/mutex.h>
23 #include <linux/err.h>
24 #include <net/compat.h>
26 #include <asm/uaccess.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_arp/arp_tables.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
33 MODULE_DESCRIPTION("arptables core");
35 /*#define DEBUG_ARP_TABLES*/
36 /*#define DEBUG_ARP_TABLES_USER*/
38 #ifdef DEBUG_ARP_TABLES
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_ARP_TABLES_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define ARP_NF_ASSERT(x) \
54 printk("ARP_NF_ASSERT: %s:%s:%u\n", \
55 __func__, __FILE__, __LINE__); \
58 #define ARP_NF_ASSERT(x)
61 static inline int arp_devaddr_compare(const struct arpt_devaddr_info
*ap
,
62 const char *hdr_addr
, int len
)
66 if (len
> ARPT_DEV_ADDR_LEN_MAX
)
67 len
= ARPT_DEV_ADDR_LEN_MAX
;
70 for (i
= 0; i
< len
; i
++)
71 ret
|= (hdr_addr
[i
] ^ ap
->addr
[i
]) & ap
->mask
[i
];
76 /* Returns whether packet matches rule or not. */
77 static inline int arp_packet_match(const struct arphdr
*arphdr
,
78 struct net_device
*dev
,
81 const struct arpt_arp
*arpinfo
)
83 const char *arpptr
= (char *)(arphdr
+ 1);
84 const char *src_devaddr
, *tgt_devaddr
;
85 __be32 src_ipaddr
, tgt_ipaddr
;
88 #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
90 if (FWINV((arphdr
->ar_op
& arpinfo
->arpop_mask
) != arpinfo
->arpop
,
92 dprintf("ARP operation field mismatch.\n");
93 dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n",
94 arphdr
->ar_op
, arpinfo
->arpop
, arpinfo
->arpop_mask
);
98 if (FWINV((arphdr
->ar_hrd
& arpinfo
->arhrd_mask
) != arpinfo
->arhrd
,
100 dprintf("ARP hardware address format mismatch.\n");
101 dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n",
102 arphdr
->ar_hrd
, arpinfo
->arhrd
, arpinfo
->arhrd_mask
);
106 if (FWINV((arphdr
->ar_pro
& arpinfo
->arpro_mask
) != arpinfo
->arpro
,
108 dprintf("ARP protocol address format mismatch.\n");
109 dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n",
110 arphdr
->ar_pro
, arpinfo
->arpro
, arpinfo
->arpro_mask
);
114 if (FWINV((arphdr
->ar_hln
& arpinfo
->arhln_mask
) != arpinfo
->arhln
,
116 dprintf("ARP hardware address length mismatch.\n");
117 dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n",
118 arphdr
->ar_hln
, arpinfo
->arhln
, arpinfo
->arhln_mask
);
122 src_devaddr
= arpptr
;
123 arpptr
+= dev
->addr_len
;
124 memcpy(&src_ipaddr
, arpptr
, sizeof(u32
));
125 arpptr
+= sizeof(u32
);
126 tgt_devaddr
= arpptr
;
127 arpptr
+= dev
->addr_len
;
128 memcpy(&tgt_ipaddr
, arpptr
, sizeof(u32
));
130 if (FWINV(arp_devaddr_compare(&arpinfo
->src_devaddr
, src_devaddr
, dev
->addr_len
),
131 ARPT_INV_SRCDEVADDR
) ||
132 FWINV(arp_devaddr_compare(&arpinfo
->tgt_devaddr
, tgt_devaddr
, dev
->addr_len
),
133 ARPT_INV_TGTDEVADDR
)) {
134 dprintf("Source or target device address mismatch.\n");
139 if (FWINV((src_ipaddr
& arpinfo
->smsk
.s_addr
) != arpinfo
->src
.s_addr
,
141 FWINV(((tgt_ipaddr
& arpinfo
->tmsk
.s_addr
) != arpinfo
->tgt
.s_addr
),
143 dprintf("Source or target IP address mismatch.\n");
145 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
147 NIPQUAD(arpinfo
->smsk
.s_addr
),
148 NIPQUAD(arpinfo
->src
.s_addr
),
149 arpinfo
->invflags
& ARPT_INV_SRCIP
? " (INV)" : "");
150 dprintf("TGT: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
152 NIPQUAD(arpinfo
->tmsk
.s_addr
),
153 NIPQUAD(arpinfo
->tgt
.s_addr
),
154 arpinfo
->invflags
& ARPT_INV_TGTIP
? " (INV)" : "");
158 /* Look for ifname matches. */
159 for (i
= 0, ret
= 0; i
< IFNAMSIZ
; i
++) {
160 ret
|= (indev
[i
] ^ arpinfo
->iniface
[i
])
161 & arpinfo
->iniface_mask
[i
];
164 if (FWINV(ret
!= 0, ARPT_INV_VIA_IN
)) {
165 dprintf("VIA in mismatch (%s vs %s).%s\n",
166 indev
, arpinfo
->iniface
,
167 arpinfo
->invflags
&ARPT_INV_VIA_IN
?" (INV)":"");
171 for (i
= 0, ret
= 0; i
< IFNAMSIZ
; i
++) {
172 ret
|= (outdev
[i
] ^ arpinfo
->outiface
[i
])
173 & arpinfo
->outiface_mask
[i
];
176 if (FWINV(ret
!= 0, ARPT_INV_VIA_OUT
)) {
177 dprintf("VIA out mismatch (%s vs %s).%s\n",
178 outdev
, arpinfo
->outiface
,
179 arpinfo
->invflags
&ARPT_INV_VIA_OUT
?" (INV)":"");
187 static inline int arp_checkentry(const struct arpt_arp
*arp
)
189 if (arp
->flags
& ~ARPT_F_MASK
) {
190 duprintf("Unknown flag bits set: %08X\n",
191 arp
->flags
& ~ARPT_F_MASK
);
194 if (arp
->invflags
& ~ARPT_INV_MASK
) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 arp
->invflags
& ~ARPT_INV_MASK
);
204 arpt_error(struct sk_buff
*skb
, const struct xt_target_param
*par
)
207 printk("arp_tables: error: '%s'\n",
208 (const char *)par
->targinfo
);
213 static inline struct arpt_entry
*get_entry(void *base
, unsigned int offset
)
215 return (struct arpt_entry
*)(base
+ offset
);
218 unsigned int arpt_do_table(struct sk_buff
*skb
,
220 const struct net_device
*in
,
221 const struct net_device
*out
,
222 struct xt_table
*table
)
224 static const char nulldevname
[IFNAMSIZ
];
225 unsigned int verdict
= NF_DROP
;
226 const struct arphdr
*arp
;
227 bool hotdrop
= false;
228 struct arpt_entry
*e
, *back
;
229 const char *indev
, *outdev
;
231 const struct xt_table_info
*private;
232 struct xt_target_param tgpar
;
234 if (!pskb_may_pull(skb
, arp_hdr_len(skb
->dev
)))
237 indev
= in
? in
->name
: nulldevname
;
238 outdev
= out
? out
->name
: nulldevname
;
240 read_lock_bh(&table
->lock
);
241 private = table
->private;
242 table_base
= (void *)private->entries
[smp_processor_id()];
243 e
= get_entry(table_base
, private->hook_entry
[hook
]);
244 back
= get_entry(table_base
, private->underflow
[hook
]);
248 tgpar
.hooknum
= hook
;
249 tgpar
.family
= NFPROTO_ARP
;
253 if (arp_packet_match(arp
, skb
->dev
, indev
, outdev
, &e
->arp
)) {
254 struct arpt_entry_target
*t
;
257 hdr_len
= sizeof(*arp
) + (2 * sizeof(struct in_addr
)) +
258 (2 * skb
->dev
->addr_len
);
259 ADD_COUNTER(e
->counters
, hdr_len
, 1);
261 t
= arpt_get_target(e
);
263 /* Standard target? */
264 if (!t
->u
.kernel
.target
->target
) {
267 v
= ((struct arpt_standard_target
*)t
)->verdict
;
269 /* Pop from stack? */
270 if (v
!= ARPT_RETURN
) {
271 verdict
= (unsigned)(-v
) - 1;
275 back
= get_entry(table_base
,
280 != (void *)e
+ e
->next_offset
) {
281 /* Save old back ptr in next entry */
282 struct arpt_entry
*next
283 = (void *)e
+ e
->next_offset
;
285 (void *)back
- table_base
;
287 /* set back pointer to next entry */
291 e
= get_entry(table_base
, v
);
293 /* Targets which reenter must return
296 tgpar
.target
= t
->u
.kernel
.target
;
297 tgpar
.targinfo
= t
->data
;
298 verdict
= t
->u
.kernel
.target
->target(skb
,
301 /* Target might have changed stuff. */
304 if (verdict
== ARPT_CONTINUE
)
305 e
= (void *)e
+ e
->next_offset
;
311 e
= (void *)e
+ e
->next_offset
;
314 read_unlock_bh(&table
->lock
);
322 /* All zeroes == unconditional rule. */
323 static inline int unconditional(const struct arpt_arp
*arp
)
327 for (i
= 0; i
< sizeof(*arp
)/sizeof(__u32
); i
++)
328 if (((__u32
*)arp
)[i
])
334 /* Figures out from what hook each rule can be called: returns 0 if
335 * there are loops. Puts hook bitmask in comefrom.
337 static int mark_source_chains(struct xt_table_info
*newinfo
,
338 unsigned int valid_hooks
, void *entry0
)
342 /* No recursion; use packet counter to save back ptrs (reset
343 * to 0 as we leave), and comefrom to save source hook bitmask.
345 for (hook
= 0; hook
< NF_ARP_NUMHOOKS
; hook
++) {
346 unsigned int pos
= newinfo
->hook_entry
[hook
];
348 = (struct arpt_entry
*)(entry0
+ pos
);
350 if (!(valid_hooks
& (1 << hook
)))
353 /* Set initial back pointer. */
354 e
->counters
.pcnt
= pos
;
357 const struct arpt_standard_target
*t
358 = (void *)arpt_get_target(e
);
359 int visited
= e
->comefrom
& (1 << hook
);
361 if (e
->comefrom
& (1 << NF_ARP_NUMHOOKS
)) {
362 printk("arptables: loop hook %u pos %u %08X.\n",
363 hook
, pos
, e
->comefrom
);
367 |= ((1 << hook
) | (1 << NF_ARP_NUMHOOKS
));
369 /* Unconditional return/END. */
370 if ((e
->target_offset
== sizeof(struct arpt_entry
)
371 && (strcmp(t
->target
.u
.user
.name
,
372 ARPT_STANDARD_TARGET
) == 0)
374 && unconditional(&e
->arp
)) || visited
) {
375 unsigned int oldpos
, size
;
377 if (t
->verdict
< -NF_MAX_VERDICT
- 1) {
378 duprintf("mark_source_chains: bad "
379 "negative verdict (%i)\n",
384 /* Return: backtrack through the last
388 e
->comefrom
^= (1<<NF_ARP_NUMHOOKS
);
390 pos
= e
->counters
.pcnt
;
391 e
->counters
.pcnt
= 0;
393 /* We're at the start. */
397 e
= (struct arpt_entry
*)
399 } while (oldpos
== pos
+ e
->next_offset
);
402 size
= e
->next_offset
;
403 e
= (struct arpt_entry
*)
404 (entry0
+ pos
+ size
);
405 e
->counters
.pcnt
= pos
;
408 int newpos
= t
->verdict
;
410 if (strcmp(t
->target
.u
.user
.name
,
411 ARPT_STANDARD_TARGET
) == 0
413 if (newpos
> newinfo
->size
-
414 sizeof(struct arpt_entry
)) {
415 duprintf("mark_source_chains: "
416 "bad verdict (%i)\n",
421 /* This a jump; chase it. */
422 duprintf("Jump rule %u -> %u\n",
425 /* ... this is a fallthru */
426 newpos
= pos
+ e
->next_offset
;
428 e
= (struct arpt_entry
*)
430 e
->counters
.pcnt
= pos
;
435 duprintf("Finished chain %u\n", hook
);
440 static inline int check_entry(struct arpt_entry
*e
, const char *name
)
442 const struct arpt_entry_target
*t
;
444 if (!arp_checkentry(&e
->arp
)) {
445 duprintf("arp_tables: arp check failed %p %s.\n", e
, name
);
449 if (e
->target_offset
+ sizeof(struct arpt_entry_target
) > e
->next_offset
)
452 t
= arpt_get_target(e
);
453 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
459 static inline int check_target(struct arpt_entry
*e
, const char *name
)
461 struct arpt_entry_target
*t
= arpt_get_target(e
);
463 struct xt_tgchk_param par
= {
466 .target
= t
->u
.kernel
.target
,
468 .hook_mask
= e
->comefrom
,
469 .family
= NFPROTO_ARP
,
472 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
), 0, false);
474 duprintf("arp_tables: check failed for `%s'.\n",
475 t
->u
.kernel
.target
->name
);
482 find_check_entry(struct arpt_entry
*e
, const char *name
, unsigned int size
,
485 struct arpt_entry_target
*t
;
486 struct xt_target
*target
;
489 ret
= check_entry(e
, name
);
493 t
= arpt_get_target(e
);
494 target
= try_then_request_module(xt_find_target(NFPROTO_ARP
,
497 "arpt_%s", t
->u
.user
.name
);
498 if (IS_ERR(target
) || !target
) {
499 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
500 ret
= target
? PTR_ERR(target
) : -ENOENT
;
503 t
->u
.kernel
.target
= target
;
505 ret
= check_target(e
, name
);
512 module_put(t
->u
.kernel
.target
->me
);
517 static inline int check_entry_size_and_hooks(struct arpt_entry
*e
,
518 struct xt_table_info
*newinfo
,
520 unsigned char *limit
,
521 const unsigned int *hook_entries
,
522 const unsigned int *underflows
,
527 if ((unsigned long)e
% __alignof__(struct arpt_entry
) != 0
528 || (unsigned char *)e
+ sizeof(struct arpt_entry
) >= limit
) {
529 duprintf("Bad offset %p\n", e
);
534 < sizeof(struct arpt_entry
) + sizeof(struct arpt_entry_target
)) {
535 duprintf("checking: element %p size %u\n",
540 /* Check hooks & underflows */
541 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
542 if ((unsigned char *)e
- base
== hook_entries
[h
])
543 newinfo
->hook_entry
[h
] = hook_entries
[h
];
544 if ((unsigned char *)e
- base
== underflows
[h
])
545 newinfo
->underflow
[h
] = underflows
[h
];
548 /* FIXME: underflows must be unconditional, standard verdicts
549 < 0 (not ARPT_RETURN). --RR */
551 /* Clear counters and comefrom */
552 e
->counters
= ((struct xt_counters
) { 0, 0 });
559 static inline int cleanup_entry(struct arpt_entry
*e
, unsigned int *i
)
561 struct xt_tgdtor_param par
;
562 struct arpt_entry_target
*t
;
564 if (i
&& (*i
)-- == 0)
567 t
= arpt_get_target(e
);
568 par
.target
= t
->u
.kernel
.target
;
569 par
.targinfo
= t
->data
;
570 par
.family
= NFPROTO_ARP
;
571 if (par
.target
->destroy
!= NULL
)
572 par
.target
->destroy(&par
);
573 module_put(par
.target
->me
);
577 /* Checks and translates the user-supplied table segment (held in
580 static int translate_table(const char *name
,
581 unsigned int valid_hooks
,
582 struct xt_table_info
*newinfo
,
586 const unsigned int *hook_entries
,
587 const unsigned int *underflows
)
592 newinfo
->size
= size
;
593 newinfo
->number
= number
;
595 /* Init all hooks to impossible value. */
596 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
597 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
598 newinfo
->underflow
[i
] = 0xFFFFFFFF;
601 duprintf("translate_table: size %u\n", newinfo
->size
);
604 /* Walk through entries, checking offsets. */
605 ret
= ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
606 check_entry_size_and_hooks
,
610 hook_entries
, underflows
, &i
);
611 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret
);
616 duprintf("translate_table: %u not %u entries\n",
621 /* Check hooks all assigned */
622 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
623 /* Only hooks which are valid */
624 if (!(valid_hooks
& (1 << i
)))
626 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
627 duprintf("Invalid hook entry %u %u\n",
631 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
632 duprintf("Invalid underflow %u %u\n",
638 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
)) {
639 duprintf("Looping hook\n");
643 /* Finally, each sanity check must pass */
645 ret
= ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
646 find_check_entry
, name
, size
, &i
);
649 ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
654 /* And one copy for every other CPU */
655 for_each_possible_cpu(i
) {
656 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
657 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
664 static inline int add_entry_to_counter(const struct arpt_entry
*e
,
665 struct xt_counters total
[],
668 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
674 static inline int set_entry_to_counter(const struct arpt_entry
*e
,
675 struct xt_counters total
[],
678 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
684 static void get_counters(const struct xt_table_info
*t
,
685 struct xt_counters counters
[])
691 /* Instead of clearing (by a previous call to memset())
692 * the counters and using adds, we set the counters
693 * with data used by 'current' CPU
694 * We dont care about preemption here.
696 curcpu
= raw_smp_processor_id();
699 ARPT_ENTRY_ITERATE(t
->entries
[curcpu
],
701 set_entry_to_counter
,
705 for_each_possible_cpu(cpu
) {
709 ARPT_ENTRY_ITERATE(t
->entries
[cpu
],
711 add_entry_to_counter
,
717 static inline struct xt_counters
*alloc_counters(struct xt_table
*table
)
719 unsigned int countersize
;
720 struct xt_counters
*counters
;
721 const struct xt_table_info
*private = table
->private;
723 /* We need atomic snapshot of counters: rest doesn't change
724 * (other than comefrom, which userspace doesn't care
727 countersize
= sizeof(struct xt_counters
) * private->number
;
728 counters
= vmalloc_node(countersize
, numa_node_id());
730 if (counters
== NULL
)
731 return ERR_PTR(-ENOMEM
);
733 /* First, sum counters... */
734 write_lock_bh(&table
->lock
);
735 get_counters(private, counters
);
736 write_unlock_bh(&table
->lock
);
741 static int copy_entries_to_user(unsigned int total_size
,
742 struct xt_table
*table
,
743 void __user
*userptr
)
745 unsigned int off
, num
;
746 struct arpt_entry
*e
;
747 struct xt_counters
*counters
;
748 struct xt_table_info
*private = table
->private;
752 counters
= alloc_counters(table
);
753 if (IS_ERR(counters
))
754 return PTR_ERR(counters
);
756 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
757 /* ... then copy entire thing ... */
758 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
763 /* FIXME: use iterator macros --RR */
764 /* ... then go back and fix counters and names */
765 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
766 struct arpt_entry_target
*t
;
768 e
= (struct arpt_entry
*)(loc_cpu_entry
+ off
);
769 if (copy_to_user(userptr
+ off
770 + offsetof(struct arpt_entry
, counters
),
772 sizeof(counters
[num
])) != 0) {
777 t
= arpt_get_target(e
);
778 if (copy_to_user(userptr
+ off
+ e
->target_offset
779 + offsetof(struct arpt_entry_target
,
781 t
->u
.kernel
.target
->name
,
782 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
794 static void compat_standard_from_user(void *dst
, void *src
)
796 int v
= *(compat_int_t
*)src
;
799 v
+= xt_compat_calc_jump(NFPROTO_ARP
, v
);
800 memcpy(dst
, &v
, sizeof(v
));
803 static int compat_standard_to_user(void __user
*dst
, void *src
)
805 compat_int_t cv
= *(int *)src
;
808 cv
-= xt_compat_calc_jump(NFPROTO_ARP
, cv
);
809 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
812 static int compat_calc_entry(struct arpt_entry
*e
,
813 const struct xt_table_info
*info
,
814 void *base
, struct xt_table_info
*newinfo
)
816 struct arpt_entry_target
*t
;
817 unsigned int entry_offset
;
820 off
= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
821 entry_offset
= (void *)e
- base
;
823 t
= arpt_get_target(e
);
824 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
825 newinfo
->size
-= off
;
826 ret
= xt_compat_add_offset(NFPROTO_ARP
, entry_offset
, off
);
830 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
831 if (info
->hook_entry
[i
] &&
832 (e
< (struct arpt_entry
*)(base
+ info
->hook_entry
[i
])))
833 newinfo
->hook_entry
[i
] -= off
;
834 if (info
->underflow
[i
] &&
835 (e
< (struct arpt_entry
*)(base
+ info
->underflow
[i
])))
836 newinfo
->underflow
[i
] -= off
;
841 static int compat_table_info(const struct xt_table_info
*info
,
842 struct xt_table_info
*newinfo
)
846 if (!newinfo
|| !info
)
849 /* we dont care about newinfo->entries[] */
850 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
851 newinfo
->initial_entries
= 0;
852 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
853 return ARPT_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
854 compat_calc_entry
, info
, loc_cpu_entry
,
859 static int get_info(struct net
*net
, void __user
*user
, int *len
, int compat
)
861 char name
[ARPT_TABLE_MAXNAMELEN
];
865 if (*len
!= sizeof(struct arpt_getinfo
)) {
866 duprintf("length %u != %Zu\n", *len
,
867 sizeof(struct arpt_getinfo
));
871 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
874 name
[ARPT_TABLE_MAXNAMELEN
-1] = '\0';
877 xt_compat_lock(NFPROTO_ARP
);
879 t
= try_then_request_module(xt_find_table_lock(net
, NFPROTO_ARP
, name
),
880 "arptable_%s", name
);
881 if (t
&& !IS_ERR(t
)) {
882 struct arpt_getinfo info
;
883 const struct xt_table_info
*private = t
->private;
887 struct xt_table_info tmp
;
888 ret
= compat_table_info(private, &tmp
);
889 xt_compat_flush_offsets(NFPROTO_ARP
);
893 info
.valid_hooks
= t
->valid_hooks
;
894 memcpy(info
.hook_entry
, private->hook_entry
,
895 sizeof(info
.hook_entry
));
896 memcpy(info
.underflow
, private->underflow
,
897 sizeof(info
.underflow
));
898 info
.num_entries
= private->number
;
899 info
.size
= private->size
;
900 strcpy(info
.name
, name
);
902 if (copy_to_user(user
, &info
, *len
) != 0)
909 ret
= t
? PTR_ERR(t
) : -ENOENT
;
912 xt_compat_unlock(NFPROTO_ARP
);
917 static int get_entries(struct net
*net
, struct arpt_get_entries __user
*uptr
,
921 struct arpt_get_entries get
;
924 if (*len
< sizeof(get
)) {
925 duprintf("get_entries: %u < %Zu\n", *len
, sizeof(get
));
928 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
930 if (*len
!= sizeof(struct arpt_get_entries
) + get
.size
) {
931 duprintf("get_entries: %u != %Zu\n", *len
,
932 sizeof(struct arpt_get_entries
) + get
.size
);
936 t
= xt_find_table_lock(net
, NFPROTO_ARP
, get
.name
);
937 if (t
&& !IS_ERR(t
)) {
938 const struct xt_table_info
*private = t
->private;
940 duprintf("t->private->number = %u\n",
942 if (get
.size
== private->size
)
943 ret
= copy_entries_to_user(private->size
,
944 t
, uptr
->entrytable
);
946 duprintf("get_entries: I've got %u not %u!\n",
947 private->size
, get
.size
);
953 ret
= t
? PTR_ERR(t
) : -ENOENT
;
958 static int __do_replace(struct net
*net
, const char *name
,
959 unsigned int valid_hooks
,
960 struct xt_table_info
*newinfo
,
961 unsigned int num_counters
,
962 void __user
*counters_ptr
)
966 struct xt_table_info
*oldinfo
;
967 struct xt_counters
*counters
;
968 void *loc_cpu_old_entry
;
971 counters
= vmalloc_node(num_counters
* sizeof(struct xt_counters
),
978 t
= try_then_request_module(xt_find_table_lock(net
, NFPROTO_ARP
, name
),
979 "arptable_%s", name
);
980 if (!t
|| IS_ERR(t
)) {
981 ret
= t
? PTR_ERR(t
) : -ENOENT
;
982 goto free_newinfo_counters_untrans
;
986 if (valid_hooks
!= t
->valid_hooks
) {
987 duprintf("Valid hook crap: %08X vs %08X\n",
988 valid_hooks
, t
->valid_hooks
);
993 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
997 /* Update module usage count based on number of rules */
998 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
999 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1000 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1001 (newinfo
->number
<= oldinfo
->initial_entries
))
1003 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1004 (newinfo
->number
<= oldinfo
->initial_entries
))
1007 /* Get the old counters. */
1008 get_counters(oldinfo
, counters
);
1009 /* Decrease module usage counts and free resource */
1010 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1011 ARPT_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,
1014 xt_free_table_info(oldinfo
);
1015 if (copy_to_user(counters_ptr
, counters
,
1016 sizeof(struct xt_counters
) * num_counters
) != 0)
1025 free_newinfo_counters_untrans
:
1031 static int do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1034 struct arpt_replace tmp
;
1035 struct xt_table_info
*newinfo
;
1036 void *loc_cpu_entry
;
1038 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1041 /* overflow check */
1042 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1045 newinfo
= xt_alloc_table_info(tmp
.size
);
1049 /* choose the copy that is on our node/cpu */
1050 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1051 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1057 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1058 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1059 tmp
.hook_entry
, tmp
.underflow
);
1063 duprintf("arp_tables: Translated table\n");
1065 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1066 tmp
.num_counters
, tmp
.counters
);
1068 goto free_newinfo_untrans
;
1071 free_newinfo_untrans
:
1072 ARPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1074 xt_free_table_info(newinfo
);
1078 /* We're lazy, and add to the first CPU; overflow works its fey magic
1079 * and everything is OK.
1081 static inline int add_counter_to_entry(struct arpt_entry
*e
,
1082 const struct xt_counters addme
[],
1086 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1092 static int do_add_counters(struct net
*net
, void __user
*user
, unsigned int len
,
1096 struct xt_counters_info tmp
;
1097 struct xt_counters
*paddc
;
1098 unsigned int num_counters
;
1103 const struct xt_table_info
*private;
1105 void *loc_cpu_entry
;
1106 #ifdef CONFIG_COMPAT
1107 struct compat_xt_counters_info compat_tmp
;
1111 size
= sizeof(struct compat_xt_counters_info
);
1116 size
= sizeof(struct xt_counters_info
);
1119 if (copy_from_user(ptmp
, user
, size
) != 0)
1122 #ifdef CONFIG_COMPAT
1124 num_counters
= compat_tmp
.num_counters
;
1125 name
= compat_tmp
.name
;
1129 num_counters
= tmp
.num_counters
;
1133 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1136 paddc
= vmalloc_node(len
- size
, numa_node_id());
1140 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1145 t
= xt_find_table_lock(net
, NFPROTO_ARP
, name
);
1146 if (!t
|| IS_ERR(t
)) {
1147 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1151 write_lock_bh(&t
->lock
);
1152 private = t
->private;
1153 if (private->number
!= num_counters
) {
1155 goto unlock_up_free
;
1159 /* Choose the copy that is on our node */
1160 loc_cpu_entry
= private->entries
[smp_processor_id()];
1161 ARPT_ENTRY_ITERATE(loc_cpu_entry
,
1163 add_counter_to_entry
,
1167 write_unlock_bh(&t
->lock
);
1176 #ifdef CONFIG_COMPAT
1178 compat_release_entry(struct compat_arpt_entry
*e
, unsigned int *i
)
1180 struct arpt_entry_target
*t
;
1182 if (i
&& (*i
)-- == 0)
1185 t
= compat_arpt_get_target(e
);
1186 module_put(t
->u
.kernel
.target
->me
);
1191 check_compat_entry_size_and_hooks(struct compat_arpt_entry
*e
,
1192 struct xt_table_info
*newinfo
,
1194 unsigned char *base
,
1195 unsigned char *limit
,
1196 unsigned int *hook_entries
,
1197 unsigned int *underflows
,
1201 struct arpt_entry_target
*t
;
1202 struct xt_target
*target
;
1203 unsigned int entry_offset
;
1206 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1207 if ((unsigned long)e
% __alignof__(struct compat_arpt_entry
) != 0
1208 || (unsigned char *)e
+ sizeof(struct compat_arpt_entry
) >= limit
) {
1209 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1213 if (e
->next_offset
< sizeof(struct compat_arpt_entry
) +
1214 sizeof(struct compat_xt_entry_target
)) {
1215 duprintf("checking: element %p size %u\n",
1220 /* For purposes of check_entry casting the compat entry is fine */
1221 ret
= check_entry((struct arpt_entry
*)e
, name
);
1225 off
= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1226 entry_offset
= (void *)e
- (void *)base
;
1228 t
= compat_arpt_get_target(e
);
1229 target
= try_then_request_module(xt_find_target(NFPROTO_ARP
,
1231 t
->u
.user
.revision
),
1232 "arpt_%s", t
->u
.user
.name
);
1233 if (IS_ERR(target
) || !target
) {
1234 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1236 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1239 t
->u
.kernel
.target
= target
;
1241 off
+= xt_compat_target_offset(target
);
1243 ret
= xt_compat_add_offset(NFPROTO_ARP
, entry_offset
, off
);
1245 goto release_target
;
1247 /* Check hooks & underflows */
1248 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
1249 if ((unsigned char *)e
- base
== hook_entries
[h
])
1250 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1251 if ((unsigned char *)e
- base
== underflows
[h
])
1252 newinfo
->underflow
[h
] = underflows
[h
];
1255 /* Clear counters and comefrom */
1256 memset(&e
->counters
, 0, sizeof(e
->counters
));
1263 module_put(t
->u
.kernel
.target
->me
);
1269 compat_copy_entry_from_user(struct compat_arpt_entry
*e
, void **dstptr
,
1270 unsigned int *size
, const char *name
,
1271 struct xt_table_info
*newinfo
, unsigned char *base
)
1273 struct arpt_entry_target
*t
;
1274 struct xt_target
*target
;
1275 struct arpt_entry
*de
;
1276 unsigned int origsize
;
1281 de
= (struct arpt_entry
*)*dstptr
;
1282 memcpy(de
, e
, sizeof(struct arpt_entry
));
1283 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1285 *dstptr
+= sizeof(struct arpt_entry
);
1286 *size
+= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1288 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1289 t
= compat_arpt_get_target(e
);
1290 target
= t
->u
.kernel
.target
;
1291 xt_compat_target_from_user(t
, dstptr
, size
);
1293 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1294 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
1295 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1296 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1297 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1298 newinfo
->underflow
[h
] -= origsize
- *size
;
1303 static inline int compat_check_entry(struct arpt_entry
*e
, const char *name
,
1308 ret
= check_target(e
, name
);
1316 static int translate_compat_table(const char *name
,
1317 unsigned int valid_hooks
,
1318 struct xt_table_info
**pinfo
,
1320 unsigned int total_size
,
1321 unsigned int number
,
1322 unsigned int *hook_entries
,
1323 unsigned int *underflows
)
1326 struct xt_table_info
*newinfo
, *info
;
1327 void *pos
, *entry0
, *entry1
;
1334 info
->number
= number
;
1336 /* Init all hooks to impossible value. */
1337 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1338 info
->hook_entry
[i
] = 0xFFFFFFFF;
1339 info
->underflow
[i
] = 0xFFFFFFFF;
1342 duprintf("translate_compat_table: size %u\n", info
->size
);
1344 xt_compat_lock(NFPROTO_ARP
);
1345 /* Walk through entries, checking offsets. */
1346 ret
= COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
,
1347 check_compat_entry_size_and_hooks
,
1348 info
, &size
, entry0
,
1349 entry0
+ total_size
,
1350 hook_entries
, underflows
, &j
, name
);
1356 duprintf("translate_compat_table: %u not %u entries\n",
1361 /* Check hooks all assigned */
1362 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1363 /* Only hooks which are valid */
1364 if (!(valid_hooks
& (1 << i
)))
1366 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1367 duprintf("Invalid hook entry %u %u\n",
1368 i
, hook_entries
[i
]);
1371 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1372 duprintf("Invalid underflow %u %u\n",
1379 newinfo
= xt_alloc_table_info(size
);
1383 newinfo
->number
= number
;
1384 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1385 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1386 newinfo
->underflow
[i
] = info
->underflow
[i
];
1388 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1391 ret
= COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
,
1392 compat_copy_entry_from_user
,
1393 &pos
, &size
, name
, newinfo
, entry1
);
1394 xt_compat_flush_offsets(NFPROTO_ARP
);
1395 xt_compat_unlock(NFPROTO_ARP
);
1400 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1404 ret
= ARPT_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1408 COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0
, newinfo
->size
, i
,
1409 compat_release_entry
, &j
);
1410 ARPT_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, &i
);
1411 xt_free_table_info(newinfo
);
1415 /* And one copy for every other CPU */
1416 for_each_possible_cpu(i
)
1417 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1418 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1422 xt_free_table_info(info
);
1426 xt_free_table_info(newinfo
);
1428 COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1431 xt_compat_flush_offsets(NFPROTO_ARP
);
1432 xt_compat_unlock(NFPROTO_ARP
);
1436 struct compat_arpt_replace
{
1437 char name
[ARPT_TABLE_MAXNAMELEN
];
1441 u32 hook_entry
[NF_ARP_NUMHOOKS
];
1442 u32 underflow
[NF_ARP_NUMHOOKS
];
1444 compat_uptr_t counters
;
1445 struct compat_arpt_entry entries
[0];
1448 static int compat_do_replace(struct net
*net
, void __user
*user
,
1452 struct compat_arpt_replace tmp
;
1453 struct xt_table_info
*newinfo
;
1454 void *loc_cpu_entry
;
1456 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1459 /* overflow check */
1460 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1462 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1465 newinfo
= xt_alloc_table_info(tmp
.size
);
1469 /* choose the copy that is on our node/cpu */
1470 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1471 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
), tmp
.size
) != 0) {
1476 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1477 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1478 tmp
.num_entries
, tmp
.hook_entry
,
1483 duprintf("compat_do_replace: Translated table\n");
1485 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1486 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1488 goto free_newinfo_untrans
;
1491 free_newinfo_untrans
:
1492 ARPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1494 xt_free_table_info(newinfo
);
1498 static int compat_do_arpt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1503 if (!capable(CAP_NET_ADMIN
))
1507 case ARPT_SO_SET_REPLACE
:
1508 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1511 case ARPT_SO_SET_ADD_COUNTERS
:
1512 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1516 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd
);
1523 static int compat_copy_entry_to_user(struct arpt_entry
*e
, void __user
**dstptr
,
1524 compat_uint_t
*size
,
1525 struct xt_counters
*counters
,
1528 struct arpt_entry_target
*t
;
1529 struct compat_arpt_entry __user
*ce
;
1530 u_int16_t target_offset
, next_offset
;
1531 compat_uint_t origsize
;
1536 ce
= (struct compat_arpt_entry __user
*)*dstptr
;
1537 if (copy_to_user(ce
, e
, sizeof(struct arpt_entry
)))
1540 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1543 *dstptr
+= sizeof(struct compat_arpt_entry
);
1544 *size
-= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1546 target_offset
= e
->target_offset
- (origsize
- *size
);
1548 t
= arpt_get_target(e
);
1549 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1553 next_offset
= e
->next_offset
- (origsize
- *size
);
1554 if (put_user(target_offset
, &ce
->target_offset
))
1556 if (put_user(next_offset
, &ce
->next_offset
))
1565 static int compat_copy_entries_to_user(unsigned int total_size
,
1566 struct xt_table
*table
,
1567 void __user
*userptr
)
1569 struct xt_counters
*counters
;
1570 const struct xt_table_info
*private = table
->private;
1574 void *loc_cpu_entry
;
1577 counters
= alloc_counters(table
);
1578 if (IS_ERR(counters
))
1579 return PTR_ERR(counters
);
1581 /* choose the copy on our node/cpu */
1582 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1585 ret
= ARPT_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1586 compat_copy_entry_to_user
,
1587 &pos
, &size
, counters
, &i
);
1592 struct compat_arpt_get_entries
{
1593 char name
[ARPT_TABLE_MAXNAMELEN
];
1595 struct compat_arpt_entry entrytable
[0];
1598 static int compat_get_entries(struct net
*net
,
1599 struct compat_arpt_get_entries __user
*uptr
,
1603 struct compat_arpt_get_entries get
;
1606 if (*len
< sizeof(get
)) {
1607 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1610 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1612 if (*len
!= sizeof(struct compat_arpt_get_entries
) + get
.size
) {
1613 duprintf("compat_get_entries: %u != %zu\n",
1614 *len
, sizeof(get
) + get
.size
);
1618 xt_compat_lock(NFPROTO_ARP
);
1619 t
= xt_find_table_lock(net
, NFPROTO_ARP
, get
.name
);
1620 if (t
&& !IS_ERR(t
)) {
1621 const struct xt_table_info
*private = t
->private;
1622 struct xt_table_info info
;
1624 duprintf("t->private->number = %u\n", private->number
);
1625 ret
= compat_table_info(private, &info
);
1626 if (!ret
&& get
.size
== info
.size
) {
1627 ret
= compat_copy_entries_to_user(private->size
,
1628 t
, uptr
->entrytable
);
1630 duprintf("compat_get_entries: I've got %u not %u!\n",
1631 private->size
, get
.size
);
1634 xt_compat_flush_offsets(NFPROTO_ARP
);
1638 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1640 xt_compat_unlock(NFPROTO_ARP
);
1644 static int do_arpt_get_ctl(struct sock
*, int, void __user
*, int *);
1646 static int compat_do_arpt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1651 if (!capable(CAP_NET_ADMIN
))
1655 case ARPT_SO_GET_INFO
:
1656 ret
= get_info(sock_net(sk
), user
, len
, 1);
1658 case ARPT_SO_GET_ENTRIES
:
1659 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1662 ret
= do_arpt_get_ctl(sk
, cmd
, user
, len
);
1668 static int do_arpt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1672 if (!capable(CAP_NET_ADMIN
))
1676 case ARPT_SO_SET_REPLACE
:
1677 ret
= do_replace(sock_net(sk
), user
, len
);
1680 case ARPT_SO_SET_ADD_COUNTERS
:
1681 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
1685 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd
);
1692 static int do_arpt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1696 if (!capable(CAP_NET_ADMIN
))
1700 case ARPT_SO_GET_INFO
:
1701 ret
= get_info(sock_net(sk
), user
, len
, 0);
1704 case ARPT_SO_GET_ENTRIES
:
1705 ret
= get_entries(sock_net(sk
), user
, len
);
1708 case ARPT_SO_GET_REVISION_TARGET
: {
1709 struct xt_get_revision rev
;
1711 if (*len
!= sizeof(rev
)) {
1715 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
1720 try_then_request_module(xt_find_revision(NFPROTO_ARP
, rev
.name
,
1721 rev
.revision
, 1, &ret
),
1722 "arpt_%s", rev
.name
);
1727 duprintf("do_arpt_get_ctl: unknown request %i\n", cmd
);
1734 struct xt_table
*arpt_register_table(struct net
*net
, struct xt_table
*table
,
1735 const struct arpt_replace
*repl
)
1738 struct xt_table_info
*newinfo
;
1739 struct xt_table_info bootstrap
1740 = { 0, 0, 0, { 0 }, { 0 }, { } };
1741 void *loc_cpu_entry
;
1742 struct xt_table
*new_table
;
1744 newinfo
= xt_alloc_table_info(repl
->size
);
1750 /* choose the copy on our node/cpu */
1751 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1752 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
1754 ret
= translate_table(table
->name
, table
->valid_hooks
,
1755 newinfo
, loc_cpu_entry
, repl
->size
,
1760 duprintf("arpt_register_table: translate table gives %d\n", ret
);
1764 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
1765 if (IS_ERR(new_table
)) {
1766 ret
= PTR_ERR(new_table
);
1772 xt_free_table_info(newinfo
);
1774 return ERR_PTR(ret
);
1777 void arpt_unregister_table(struct xt_table
*table
)
1779 struct xt_table_info
*private;
1780 void *loc_cpu_entry
;
1781 struct module
*table_owner
= table
->me
;
1783 private = xt_unregister_table(table
);
1785 /* Decrease module usage counts and free resources */
1786 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1787 ARPT_ENTRY_ITERATE(loc_cpu_entry
, private->size
,
1788 cleanup_entry
, NULL
);
1789 if (private->number
> private->initial_entries
)
1790 module_put(table_owner
);
1791 xt_free_table_info(private);
1794 /* The built-in targets: standard (NULL) and error. */
1795 static struct xt_target arpt_standard_target __read_mostly
= {
1796 .name
= ARPT_STANDARD_TARGET
,
1797 .targetsize
= sizeof(int),
1798 .family
= NFPROTO_ARP
,
1799 #ifdef CONFIG_COMPAT
1800 .compatsize
= sizeof(compat_int_t
),
1801 .compat_from_user
= compat_standard_from_user
,
1802 .compat_to_user
= compat_standard_to_user
,
1806 static struct xt_target arpt_error_target __read_mostly
= {
1807 .name
= ARPT_ERROR_TARGET
,
1808 .target
= arpt_error
,
1809 .targetsize
= ARPT_FUNCTION_MAXNAMELEN
,
1810 .family
= NFPROTO_ARP
,
1813 static struct nf_sockopt_ops arpt_sockopts
= {
1815 .set_optmin
= ARPT_BASE_CTL
,
1816 .set_optmax
= ARPT_SO_SET_MAX
+1,
1817 .set
= do_arpt_set_ctl
,
1818 #ifdef CONFIG_COMPAT
1819 .compat_set
= compat_do_arpt_set_ctl
,
1821 .get_optmin
= ARPT_BASE_CTL
,
1822 .get_optmax
= ARPT_SO_GET_MAX
+1,
1823 .get
= do_arpt_get_ctl
,
1824 #ifdef CONFIG_COMPAT
1825 .compat_get
= compat_do_arpt_get_ctl
,
1827 .owner
= THIS_MODULE
,
1830 static int __net_init
arp_tables_net_init(struct net
*net
)
1832 return xt_proto_init(net
, NFPROTO_ARP
);
1835 static void __net_exit
arp_tables_net_exit(struct net
*net
)
1837 xt_proto_fini(net
, NFPROTO_ARP
);
1840 static struct pernet_operations arp_tables_net_ops
= {
1841 .init
= arp_tables_net_init
,
1842 .exit
= arp_tables_net_exit
,
1845 static int __init
arp_tables_init(void)
1849 ret
= register_pernet_subsys(&arp_tables_net_ops
);
1853 /* Noone else will be downing sem now, so we won't sleep */
1854 ret
= xt_register_target(&arpt_standard_target
);
1857 ret
= xt_register_target(&arpt_error_target
);
1861 /* Register setsockopt */
1862 ret
= nf_register_sockopt(&arpt_sockopts
);
1866 printk(KERN_INFO
"arp_tables: (C) 2002 David S. Miller\n");
1870 xt_unregister_target(&arpt_error_target
);
1872 xt_unregister_target(&arpt_standard_target
);
1874 unregister_pernet_subsys(&arp_tables_net_ops
);
1879 static void __exit
arp_tables_fini(void)
1881 nf_unregister_sockopt(&arpt_sockopts
);
1882 xt_unregister_target(&arpt_error_target
);
1883 xt_unregister_target(&arpt_standard_target
);
1884 unregister_pernet_subsys(&arp_tables_net_ops
);
1887 EXPORT_SYMBOL(arpt_register_table
);
1888 EXPORT_SYMBOL(arpt_unregister_table
);
1889 EXPORT_SYMBOL(arpt_do_table
);
1891 module_init(arp_tables_init
);
1892 module_exit(arp_tables_fini
);