2 * Packet matching code for ARP packets.
4 * Based heavily, if not almost entirely, upon ip_tables.c framework.
6 * Some ARP specific bits are:
8 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/netdevice.h>
15 #include <linux/capability.h>
16 #include <linux/if_arp.h>
17 #include <linux/kmod.h>
18 #include <linux/vmalloc.h>
19 #include <linux/proc_fs.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/mutex.h>
23 #include <linux/err.h>
24 #include <net/compat.h>
26 #include <asm/uaccess.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter_arp/arp_tables.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
33 MODULE_DESCRIPTION("arptables core");
35 /*#define DEBUG_ARP_TABLES*/
36 /*#define DEBUG_ARP_TABLES_USER*/
38 #ifdef DEBUG_ARP_TABLES
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_ARP_TABLES_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define ARP_NF_ASSERT(x) \
54 printk("ARP_NF_ASSERT: %s:%s:%u\n", \
55 __func__, __FILE__, __LINE__); \
58 #define ARP_NF_ASSERT(x)
61 static inline int arp_devaddr_compare(const struct arpt_devaddr_info
*ap
,
62 const char *hdr_addr
, int len
)
66 if (len
> ARPT_DEV_ADDR_LEN_MAX
)
67 len
= ARPT_DEV_ADDR_LEN_MAX
;
70 for (i
= 0; i
< len
; i
++)
71 ret
|= (hdr_addr
[i
] ^ ap
->addr
[i
]) & ap
->mask
[i
];
77 * Unfortunatly, _b and _mask are not aligned to an int (or long int)
78 * Some arches dont care, unrolling the loop is a win on them.
79 * For other arches, we only have a 16bit alignement.
81 static unsigned long ifname_compare(const char *_a
, const char *_b
, const char *_mask
)
83 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
84 unsigned long ret
= ifname_compare_aligned(_a
, _b
, _mask
);
86 unsigned long ret
= 0;
87 const u16
*a
= (const u16
*)_a
;
88 const u16
*b
= (const u16
*)_b
;
89 const u16
*mask
= (const u16
*)_mask
;
92 for (i
= 0; i
< IFNAMSIZ
/sizeof(u16
); i
++)
93 ret
|= (a
[i
] ^ b
[i
]) & mask
[i
];
98 /* Returns whether packet matches rule or not. */
99 static inline int arp_packet_match(const struct arphdr
*arphdr
,
100 struct net_device
*dev
,
103 const struct arpt_arp
*arpinfo
)
105 const char *arpptr
= (char *)(arphdr
+ 1);
106 const char *src_devaddr
, *tgt_devaddr
;
107 __be32 src_ipaddr
, tgt_ipaddr
;
110 #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
112 if (FWINV((arphdr
->ar_op
& arpinfo
->arpop_mask
) != arpinfo
->arpop
,
114 dprintf("ARP operation field mismatch.\n");
115 dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n",
116 arphdr
->ar_op
, arpinfo
->arpop
, arpinfo
->arpop_mask
);
120 if (FWINV((arphdr
->ar_hrd
& arpinfo
->arhrd_mask
) != arpinfo
->arhrd
,
122 dprintf("ARP hardware address format mismatch.\n");
123 dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n",
124 arphdr
->ar_hrd
, arpinfo
->arhrd
, arpinfo
->arhrd_mask
);
128 if (FWINV((arphdr
->ar_pro
& arpinfo
->arpro_mask
) != arpinfo
->arpro
,
130 dprintf("ARP protocol address format mismatch.\n");
131 dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n",
132 arphdr
->ar_pro
, arpinfo
->arpro
, arpinfo
->arpro_mask
);
136 if (FWINV((arphdr
->ar_hln
& arpinfo
->arhln_mask
) != arpinfo
->arhln
,
138 dprintf("ARP hardware address length mismatch.\n");
139 dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n",
140 arphdr
->ar_hln
, arpinfo
->arhln
, arpinfo
->arhln_mask
);
144 src_devaddr
= arpptr
;
145 arpptr
+= dev
->addr_len
;
146 memcpy(&src_ipaddr
, arpptr
, sizeof(u32
));
147 arpptr
+= sizeof(u32
);
148 tgt_devaddr
= arpptr
;
149 arpptr
+= dev
->addr_len
;
150 memcpy(&tgt_ipaddr
, arpptr
, sizeof(u32
));
152 if (FWINV(arp_devaddr_compare(&arpinfo
->src_devaddr
, src_devaddr
, dev
->addr_len
),
153 ARPT_INV_SRCDEVADDR
) ||
154 FWINV(arp_devaddr_compare(&arpinfo
->tgt_devaddr
, tgt_devaddr
, dev
->addr_len
),
155 ARPT_INV_TGTDEVADDR
)) {
156 dprintf("Source or target device address mismatch.\n");
161 if (FWINV((src_ipaddr
& arpinfo
->smsk
.s_addr
) != arpinfo
->src
.s_addr
,
163 FWINV(((tgt_ipaddr
& arpinfo
->tmsk
.s_addr
) != arpinfo
->tgt
.s_addr
),
165 dprintf("Source or target IP address mismatch.\n");
167 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
169 &arpinfo
->smsk
.s_addr
,
170 &arpinfo
->src
.s_addr
,
171 arpinfo
->invflags
& ARPT_INV_SRCIP
? " (INV)" : "");
172 dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n",
174 &arpinfo
->tmsk
.s_addr
,
175 &arpinfo
->tgt
.s_addr
,
176 arpinfo
->invflags
& ARPT_INV_TGTIP
? " (INV)" : "");
180 /* Look for ifname matches. */
181 ret
= ifname_compare(indev
, arpinfo
->iniface
, arpinfo
->iniface_mask
);
183 if (FWINV(ret
!= 0, ARPT_INV_VIA_IN
)) {
184 dprintf("VIA in mismatch (%s vs %s).%s\n",
185 indev
, arpinfo
->iniface
,
186 arpinfo
->invflags
&ARPT_INV_VIA_IN
?" (INV)":"");
190 ret
= ifname_compare(outdev
, arpinfo
->outiface
, arpinfo
->outiface_mask
);
192 if (FWINV(ret
!= 0, ARPT_INV_VIA_OUT
)) {
193 dprintf("VIA out mismatch (%s vs %s).%s\n",
194 outdev
, arpinfo
->outiface
,
195 arpinfo
->invflags
&ARPT_INV_VIA_OUT
?" (INV)":"");
203 static inline int arp_checkentry(const struct arpt_arp
*arp
)
205 if (arp
->flags
& ~ARPT_F_MASK
) {
206 duprintf("Unknown flag bits set: %08X\n",
207 arp
->flags
& ~ARPT_F_MASK
);
210 if (arp
->invflags
& ~ARPT_INV_MASK
) {
211 duprintf("Unknown invflag bits set: %08X\n",
212 arp
->invflags
& ~ARPT_INV_MASK
);
220 arpt_error(struct sk_buff
*skb
, const struct xt_target_param
*par
)
223 printk("arp_tables: error: '%s'\n",
224 (const char *)par
->targinfo
);
229 static inline struct arpt_entry
*get_entry(void *base
, unsigned int offset
)
231 return (struct arpt_entry
*)(base
+ offset
);
235 struct arpt_entry
*arpt_next_entry(const struct arpt_entry
*entry
)
237 return (void *)entry
+ entry
->next_offset
;
240 unsigned int arpt_do_table(struct sk_buff
*skb
,
242 const struct net_device
*in
,
243 const struct net_device
*out
,
244 struct xt_table
*table
)
246 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
247 unsigned int verdict
= NF_DROP
;
248 const struct arphdr
*arp
;
249 bool hotdrop
= false;
250 struct arpt_entry
*e
, *back
;
251 const char *indev
, *outdev
;
253 const struct xt_table_info
*private;
254 struct xt_target_param tgpar
;
256 if (!pskb_may_pull(skb
, arp_hdr_len(skb
->dev
)))
259 indev
= in
? in
->name
: nulldevname
;
260 outdev
= out
? out
->name
: nulldevname
;
263 private = table
->private;
264 table_base
= private->entries
[smp_processor_id()];
266 e
= get_entry(table_base
, private->hook_entry
[hook
]);
267 back
= get_entry(table_base
, private->underflow
[hook
]);
271 tgpar
.hooknum
= hook
;
272 tgpar
.family
= NFPROTO_ARP
;
276 struct arpt_entry_target
*t
;
279 if (!arp_packet_match(arp
, skb
->dev
, indev
, outdev
, &e
->arp
)) {
280 e
= arpt_next_entry(e
);
284 hdr_len
= sizeof(*arp
) + (2 * sizeof(struct in_addr
)) +
285 (2 * skb
->dev
->addr_len
);
286 ADD_COUNTER(e
->counters
, hdr_len
, 1);
288 t
= arpt_get_target(e
);
290 /* Standard target? */
291 if (!t
->u
.kernel
.target
->target
) {
294 v
= ((struct arpt_standard_target
*)t
)->verdict
;
296 /* Pop from stack? */
297 if (v
!= ARPT_RETURN
) {
298 verdict
= (unsigned)(-v
) - 1;
302 back
= get_entry(table_base
, back
->comefrom
);
306 != arpt_next_entry(e
)) {
307 /* Save old back ptr in next entry */
308 struct arpt_entry
*next
= arpt_next_entry(e
);
309 next
->comefrom
= (void *)back
- table_base
;
311 /* set back pointer to next entry */
315 e
= get_entry(table_base
, v
);
319 /* Targets which reenter must return
322 tgpar
.target
= t
->u
.kernel
.target
;
323 tgpar
.targinfo
= t
->data
;
324 verdict
= t
->u
.kernel
.target
->target(skb
, &tgpar
);
326 /* Target might have changed stuff. */
329 if (verdict
== ARPT_CONTINUE
)
330 e
= arpt_next_entry(e
);
335 xt_info_rdunlock_bh();
343 /* All zeroes == unconditional rule. */
344 static inline bool unconditional(const struct arpt_arp
*arp
)
346 static const struct arpt_arp uncond
;
348 return memcmp(arp
, &uncond
, sizeof(uncond
)) == 0;
351 /* Figures out from what hook each rule can be called: returns 0 if
352 * there are loops. Puts hook bitmask in comefrom.
354 static int mark_source_chains(struct xt_table_info
*newinfo
,
355 unsigned int valid_hooks
, void *entry0
)
359 /* No recursion; use packet counter to save back ptrs (reset
360 * to 0 as we leave), and comefrom to save source hook bitmask.
362 for (hook
= 0; hook
< NF_ARP_NUMHOOKS
; hook
++) {
363 unsigned int pos
= newinfo
->hook_entry
[hook
];
365 = (struct arpt_entry
*)(entry0
+ pos
);
367 if (!(valid_hooks
& (1 << hook
)))
370 /* Set initial back pointer. */
371 e
->counters
.pcnt
= pos
;
374 const struct arpt_standard_target
*t
375 = (void *)arpt_get_target(e
);
376 int visited
= e
->comefrom
& (1 << hook
);
378 if (e
->comefrom
& (1 << NF_ARP_NUMHOOKS
)) {
379 printk("arptables: loop hook %u pos %u %08X.\n",
380 hook
, pos
, e
->comefrom
);
384 |= ((1 << hook
) | (1 << NF_ARP_NUMHOOKS
));
386 /* Unconditional return/END. */
387 if ((e
->target_offset
== sizeof(struct arpt_entry
)
388 && (strcmp(t
->target
.u
.user
.name
,
389 ARPT_STANDARD_TARGET
) == 0)
391 && unconditional(&e
->arp
)) || visited
) {
392 unsigned int oldpos
, size
;
394 if ((strcmp(t
->target
.u
.user
.name
,
395 ARPT_STANDARD_TARGET
) == 0) &&
396 t
->verdict
< -NF_MAX_VERDICT
- 1) {
397 duprintf("mark_source_chains: bad "
398 "negative verdict (%i)\n",
403 /* Return: backtrack through the last
407 e
->comefrom
^= (1<<NF_ARP_NUMHOOKS
);
409 pos
= e
->counters
.pcnt
;
410 e
->counters
.pcnt
= 0;
412 /* We're at the start. */
416 e
= (struct arpt_entry
*)
418 } while (oldpos
== pos
+ e
->next_offset
);
421 size
= e
->next_offset
;
422 e
= (struct arpt_entry
*)
423 (entry0
+ pos
+ size
);
424 e
->counters
.pcnt
= pos
;
427 int newpos
= t
->verdict
;
429 if (strcmp(t
->target
.u
.user
.name
,
430 ARPT_STANDARD_TARGET
) == 0
432 if (newpos
> newinfo
->size
-
433 sizeof(struct arpt_entry
)) {
434 duprintf("mark_source_chains: "
435 "bad verdict (%i)\n",
440 /* This a jump; chase it. */
441 duprintf("Jump rule %u -> %u\n",
444 /* ... this is a fallthru */
445 newpos
= pos
+ e
->next_offset
;
447 e
= (struct arpt_entry
*)
449 e
->counters
.pcnt
= pos
;
454 duprintf("Finished chain %u\n", hook
);
459 static inline int check_entry(struct arpt_entry
*e
, const char *name
)
461 const struct arpt_entry_target
*t
;
463 if (!arp_checkentry(&e
->arp
)) {
464 duprintf("arp_tables: arp check failed %p %s.\n", e
, name
);
468 if (e
->target_offset
+ sizeof(struct arpt_entry_target
) > e
->next_offset
)
471 t
= arpt_get_target(e
);
472 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
478 static inline int check_target(struct arpt_entry
*e
, const char *name
)
480 struct arpt_entry_target
*t
= arpt_get_target(e
);
482 struct xt_tgchk_param par
= {
485 .target
= t
->u
.kernel
.target
,
487 .hook_mask
= e
->comefrom
,
488 .family
= NFPROTO_ARP
,
491 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
), 0, false);
493 duprintf("arp_tables: check failed for `%s'.\n",
494 t
->u
.kernel
.target
->name
);
501 find_check_entry(struct arpt_entry
*e
, const char *name
, unsigned int size
,
504 struct arpt_entry_target
*t
;
505 struct xt_target
*target
;
508 ret
= check_entry(e
, name
);
512 t
= arpt_get_target(e
);
513 target
= try_then_request_module(xt_find_target(NFPROTO_ARP
,
516 "arpt_%s", t
->u
.user
.name
);
517 if (IS_ERR(target
) || !target
) {
518 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
519 ret
= target
? PTR_ERR(target
) : -ENOENT
;
522 t
->u
.kernel
.target
= target
;
524 ret
= check_target(e
, name
);
531 module_put(t
->u
.kernel
.target
->me
);
536 static bool check_underflow(struct arpt_entry
*e
)
538 const struct arpt_entry_target
*t
;
539 unsigned int verdict
;
541 if (!unconditional(&e
->arp
))
543 t
= arpt_get_target(e
);
544 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
546 verdict
= ((struct arpt_standard_target
*)t
)->verdict
;
547 verdict
= -verdict
- 1;
548 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
551 static inline int check_entry_size_and_hooks(struct arpt_entry
*e
,
552 struct xt_table_info
*newinfo
,
554 unsigned char *limit
,
555 const unsigned int *hook_entries
,
556 const unsigned int *underflows
,
557 unsigned int valid_hooks
,
562 if ((unsigned long)e
% __alignof__(struct arpt_entry
) != 0
563 || (unsigned char *)e
+ sizeof(struct arpt_entry
) >= limit
) {
564 duprintf("Bad offset %p\n", e
);
569 < sizeof(struct arpt_entry
) + sizeof(struct arpt_entry_target
)) {
570 duprintf("checking: element %p size %u\n",
575 /* Check hooks & underflows */
576 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
577 if (!(valid_hooks
& (1 << h
)))
579 if ((unsigned char *)e
- base
== hook_entries
[h
])
580 newinfo
->hook_entry
[h
] = hook_entries
[h
];
581 if ((unsigned char *)e
- base
== underflows
[h
]) {
582 if (!check_underflow(e
)) {
583 pr_err("Underflows must be unconditional and "
584 "use the STANDARD target with "
588 newinfo
->underflow
[h
] = underflows
[h
];
592 /* Clear counters and comefrom */
593 e
->counters
= ((struct xt_counters
) { 0, 0 });
600 static inline int cleanup_entry(struct arpt_entry
*e
, unsigned int *i
)
602 struct xt_tgdtor_param par
;
603 struct arpt_entry_target
*t
;
605 if (i
&& (*i
)-- == 0)
608 t
= arpt_get_target(e
);
609 par
.target
= t
->u
.kernel
.target
;
610 par
.targinfo
= t
->data
;
611 par
.family
= NFPROTO_ARP
;
612 if (par
.target
->destroy
!= NULL
)
613 par
.target
->destroy(&par
);
614 module_put(par
.target
->me
);
618 /* Checks and translates the user-supplied table segment (held in
621 static int translate_table(const char *name
,
622 unsigned int valid_hooks
,
623 struct xt_table_info
*newinfo
,
627 const unsigned int *hook_entries
,
628 const unsigned int *underflows
)
633 newinfo
->size
= size
;
634 newinfo
->number
= number
;
636 /* Init all hooks to impossible value. */
637 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
638 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
639 newinfo
->underflow
[i
] = 0xFFFFFFFF;
642 duprintf("translate_table: size %u\n", newinfo
->size
);
645 /* Walk through entries, checking offsets. */
646 ret
= ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
647 check_entry_size_and_hooks
,
651 hook_entries
, underflows
, valid_hooks
, &i
);
652 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret
);
657 duprintf("translate_table: %u not %u entries\n",
662 /* Check hooks all assigned */
663 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
664 /* Only hooks which are valid */
665 if (!(valid_hooks
& (1 << i
)))
667 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
668 duprintf("Invalid hook entry %u %u\n",
672 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
673 duprintf("Invalid underflow %u %u\n",
679 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
)) {
680 duprintf("Looping hook\n");
684 /* Finally, each sanity check must pass */
686 ret
= ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
687 find_check_entry
, name
, size
, &i
);
690 ARPT_ENTRY_ITERATE(entry0
, newinfo
->size
,
695 /* And one copy for every other CPU */
696 for_each_possible_cpu(i
) {
697 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
698 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
705 static inline int add_entry_to_counter(const struct arpt_entry
*e
,
706 struct xt_counters total
[],
709 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
715 static inline int set_entry_to_counter(const struct arpt_entry
*e
,
716 struct xt_counters total
[],
719 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
725 static void get_counters(const struct xt_table_info
*t
,
726 struct xt_counters counters
[])
732 /* Instead of clearing (by a previous call to memset())
733 * the counters and using adds, we set the counters
734 * with data used by 'current' CPU
736 * Bottom half has to be disabled to prevent deadlock
737 * if new softirq were to run and call ipt_do_table
740 curcpu
= smp_processor_id();
743 ARPT_ENTRY_ITERATE(t
->entries
[curcpu
],
745 set_entry_to_counter
,
749 for_each_possible_cpu(cpu
) {
754 ARPT_ENTRY_ITERATE(t
->entries
[cpu
],
756 add_entry_to_counter
,
759 xt_info_wrunlock(cpu
);
764 static struct xt_counters
*alloc_counters(struct xt_table
*table
)
766 unsigned int countersize
;
767 struct xt_counters
*counters
;
768 struct xt_table_info
*private = table
->private;
770 /* We need atomic snapshot of counters: rest doesn't change
771 * (other than comefrom, which userspace doesn't care
774 countersize
= sizeof(struct xt_counters
) * private->number
;
775 counters
= vmalloc_node(countersize
, numa_node_id());
777 if (counters
== NULL
)
778 return ERR_PTR(-ENOMEM
);
780 get_counters(private, counters
);
785 static int copy_entries_to_user(unsigned int total_size
,
786 struct xt_table
*table
,
787 void __user
*userptr
)
789 unsigned int off
, num
;
790 struct arpt_entry
*e
;
791 struct xt_counters
*counters
;
792 struct xt_table_info
*private = table
->private;
796 counters
= alloc_counters(table
);
797 if (IS_ERR(counters
))
798 return PTR_ERR(counters
);
800 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
801 /* ... then copy entire thing ... */
802 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
807 /* FIXME: use iterator macros --RR */
808 /* ... then go back and fix counters and names */
809 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
810 struct arpt_entry_target
*t
;
812 e
= (struct arpt_entry
*)(loc_cpu_entry
+ off
);
813 if (copy_to_user(userptr
+ off
814 + offsetof(struct arpt_entry
, counters
),
816 sizeof(counters
[num
])) != 0) {
821 t
= arpt_get_target(e
);
822 if (copy_to_user(userptr
+ off
+ e
->target_offset
823 + offsetof(struct arpt_entry_target
,
825 t
->u
.kernel
.target
->name
,
826 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
838 static void compat_standard_from_user(void *dst
, void *src
)
840 int v
= *(compat_int_t
*)src
;
843 v
+= xt_compat_calc_jump(NFPROTO_ARP
, v
);
844 memcpy(dst
, &v
, sizeof(v
));
847 static int compat_standard_to_user(void __user
*dst
, void *src
)
849 compat_int_t cv
= *(int *)src
;
852 cv
-= xt_compat_calc_jump(NFPROTO_ARP
, cv
);
853 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
856 static int compat_calc_entry(struct arpt_entry
*e
,
857 const struct xt_table_info
*info
,
858 void *base
, struct xt_table_info
*newinfo
)
860 struct arpt_entry_target
*t
;
861 unsigned int entry_offset
;
864 off
= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
865 entry_offset
= (void *)e
- base
;
867 t
= arpt_get_target(e
);
868 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
869 newinfo
->size
-= off
;
870 ret
= xt_compat_add_offset(NFPROTO_ARP
, entry_offset
, off
);
874 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
875 if (info
->hook_entry
[i
] &&
876 (e
< (struct arpt_entry
*)(base
+ info
->hook_entry
[i
])))
877 newinfo
->hook_entry
[i
] -= off
;
878 if (info
->underflow
[i
] &&
879 (e
< (struct arpt_entry
*)(base
+ info
->underflow
[i
])))
880 newinfo
->underflow
[i
] -= off
;
885 static int compat_table_info(const struct xt_table_info
*info
,
886 struct xt_table_info
*newinfo
)
890 if (!newinfo
|| !info
)
893 /* we dont care about newinfo->entries[] */
894 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
895 newinfo
->initial_entries
= 0;
896 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
897 return ARPT_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
898 compat_calc_entry
, info
, loc_cpu_entry
,
903 static int get_info(struct net
*net
, void __user
*user
, int *len
, int compat
)
905 char name
[ARPT_TABLE_MAXNAMELEN
];
909 if (*len
!= sizeof(struct arpt_getinfo
)) {
910 duprintf("length %u != %Zu\n", *len
,
911 sizeof(struct arpt_getinfo
));
915 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
918 name
[ARPT_TABLE_MAXNAMELEN
-1] = '\0';
921 xt_compat_lock(NFPROTO_ARP
);
923 t
= try_then_request_module(xt_find_table_lock(net
, NFPROTO_ARP
, name
),
924 "arptable_%s", name
);
925 if (t
&& !IS_ERR(t
)) {
926 struct arpt_getinfo info
;
927 const struct xt_table_info
*private = t
->private;
931 struct xt_table_info tmp
;
932 ret
= compat_table_info(private, &tmp
);
933 xt_compat_flush_offsets(NFPROTO_ARP
);
937 info
.valid_hooks
= t
->valid_hooks
;
938 memcpy(info
.hook_entry
, private->hook_entry
,
939 sizeof(info
.hook_entry
));
940 memcpy(info
.underflow
, private->underflow
,
941 sizeof(info
.underflow
));
942 info
.num_entries
= private->number
;
943 info
.size
= private->size
;
944 strcpy(info
.name
, name
);
946 if (copy_to_user(user
, &info
, *len
) != 0)
953 ret
= t
? PTR_ERR(t
) : -ENOENT
;
956 xt_compat_unlock(NFPROTO_ARP
);
961 static int get_entries(struct net
*net
, struct arpt_get_entries __user
*uptr
,
965 struct arpt_get_entries get
;
968 if (*len
< sizeof(get
)) {
969 duprintf("get_entries: %u < %Zu\n", *len
, sizeof(get
));
972 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
974 if (*len
!= sizeof(struct arpt_get_entries
) + get
.size
) {
975 duprintf("get_entries: %u != %Zu\n", *len
,
976 sizeof(struct arpt_get_entries
) + get
.size
);
980 t
= xt_find_table_lock(net
, NFPROTO_ARP
, get
.name
);
981 if (t
&& !IS_ERR(t
)) {
982 const struct xt_table_info
*private = t
->private;
984 duprintf("t->private->number = %u\n",
986 if (get
.size
== private->size
)
987 ret
= copy_entries_to_user(private->size
,
988 t
, uptr
->entrytable
);
990 duprintf("get_entries: I've got %u not %u!\n",
991 private->size
, get
.size
);
997 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1002 static int __do_replace(struct net
*net
, const char *name
,
1003 unsigned int valid_hooks
,
1004 struct xt_table_info
*newinfo
,
1005 unsigned int num_counters
,
1006 void __user
*counters_ptr
)
1010 struct xt_table_info
*oldinfo
;
1011 struct xt_counters
*counters
;
1012 void *loc_cpu_old_entry
;
1015 counters
= vmalloc_node(num_counters
* sizeof(struct xt_counters
),
1022 t
= try_then_request_module(xt_find_table_lock(net
, NFPROTO_ARP
, name
),
1023 "arptable_%s", name
);
1024 if (!t
|| IS_ERR(t
)) {
1025 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1026 goto free_newinfo_counters_untrans
;
1030 if (valid_hooks
!= t
->valid_hooks
) {
1031 duprintf("Valid hook crap: %08X vs %08X\n",
1032 valid_hooks
, t
->valid_hooks
);
1037 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1041 /* Update module usage count based on number of rules */
1042 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1043 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1044 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1045 (newinfo
->number
<= oldinfo
->initial_entries
))
1047 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1048 (newinfo
->number
<= oldinfo
->initial_entries
))
1051 /* Get the old counters, and synchronize with replace */
1052 get_counters(oldinfo
, counters
);
1054 /* Decrease module usage counts and free resource */
1055 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1056 ARPT_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,
1059 xt_free_table_info(oldinfo
);
1060 if (copy_to_user(counters_ptr
, counters
,
1061 sizeof(struct xt_counters
) * num_counters
) != 0)
1070 free_newinfo_counters_untrans
:
1076 static int do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1079 struct arpt_replace tmp
;
1080 struct xt_table_info
*newinfo
;
1081 void *loc_cpu_entry
;
1083 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1086 /* overflow check */
1087 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1090 newinfo
= xt_alloc_table_info(tmp
.size
);
1094 /* choose the copy that is on our node/cpu */
1095 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1096 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1102 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1103 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1104 tmp
.hook_entry
, tmp
.underflow
);
1108 duprintf("arp_tables: Translated table\n");
1110 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1111 tmp
.num_counters
, tmp
.counters
);
1113 goto free_newinfo_untrans
;
1116 free_newinfo_untrans
:
1117 ARPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1119 xt_free_table_info(newinfo
);
1123 /* We're lazy, and add to the first CPU; overflow works its fey magic
1124 * and everything is OK. */
1126 add_counter_to_entry(struct arpt_entry
*e
,
1127 const struct xt_counters addme
[],
1130 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1136 static int do_add_counters(struct net
*net
, void __user
*user
, unsigned int len
,
1139 unsigned int i
, curcpu
;
1140 struct xt_counters_info tmp
;
1141 struct xt_counters
*paddc
;
1142 unsigned int num_counters
;
1147 const struct xt_table_info
*private;
1149 void *loc_cpu_entry
;
1150 #ifdef CONFIG_COMPAT
1151 struct compat_xt_counters_info compat_tmp
;
1155 size
= sizeof(struct compat_xt_counters_info
);
1160 size
= sizeof(struct xt_counters_info
);
1163 if (copy_from_user(ptmp
, user
, size
) != 0)
1166 #ifdef CONFIG_COMPAT
1168 num_counters
= compat_tmp
.num_counters
;
1169 name
= compat_tmp
.name
;
1173 num_counters
= tmp
.num_counters
;
1177 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1180 paddc
= vmalloc_node(len
- size
, numa_node_id());
1184 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1189 t
= xt_find_table_lock(net
, NFPROTO_ARP
, name
);
1190 if (!t
|| IS_ERR(t
)) {
1191 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1196 private = t
->private;
1197 if (private->number
!= num_counters
) {
1199 goto unlock_up_free
;
1203 /* Choose the copy that is on our node */
1204 curcpu
= smp_processor_id();
1205 loc_cpu_entry
= private->entries
[curcpu
];
1206 xt_info_wrlock(curcpu
);
1207 ARPT_ENTRY_ITERATE(loc_cpu_entry
,
1209 add_counter_to_entry
,
1212 xt_info_wrunlock(curcpu
);
1223 #ifdef CONFIG_COMPAT
1225 compat_release_entry(struct compat_arpt_entry
*e
, unsigned int *i
)
1227 struct arpt_entry_target
*t
;
1229 if (i
&& (*i
)-- == 0)
1232 t
= compat_arpt_get_target(e
);
1233 module_put(t
->u
.kernel
.target
->me
);
1238 check_compat_entry_size_and_hooks(struct compat_arpt_entry
*e
,
1239 struct xt_table_info
*newinfo
,
1241 unsigned char *base
,
1242 unsigned char *limit
,
1243 unsigned int *hook_entries
,
1244 unsigned int *underflows
,
1248 struct arpt_entry_target
*t
;
1249 struct xt_target
*target
;
1250 unsigned int entry_offset
;
1253 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1254 if ((unsigned long)e
% __alignof__(struct compat_arpt_entry
) != 0
1255 || (unsigned char *)e
+ sizeof(struct compat_arpt_entry
) >= limit
) {
1256 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1260 if (e
->next_offset
< sizeof(struct compat_arpt_entry
) +
1261 sizeof(struct compat_xt_entry_target
)) {
1262 duprintf("checking: element %p size %u\n",
1267 /* For purposes of check_entry casting the compat entry is fine */
1268 ret
= check_entry((struct arpt_entry
*)e
, name
);
1272 off
= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1273 entry_offset
= (void *)e
- (void *)base
;
1275 t
= compat_arpt_get_target(e
);
1276 target
= try_then_request_module(xt_find_target(NFPROTO_ARP
,
1278 t
->u
.user
.revision
),
1279 "arpt_%s", t
->u
.user
.name
);
1280 if (IS_ERR(target
) || !target
) {
1281 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1283 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1286 t
->u
.kernel
.target
= target
;
1288 off
+= xt_compat_target_offset(target
);
1290 ret
= xt_compat_add_offset(NFPROTO_ARP
, entry_offset
, off
);
1292 goto release_target
;
1294 /* Check hooks & underflows */
1295 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
1296 if ((unsigned char *)e
- base
== hook_entries
[h
])
1297 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1298 if ((unsigned char *)e
- base
== underflows
[h
])
1299 newinfo
->underflow
[h
] = underflows
[h
];
1302 /* Clear counters and comefrom */
1303 memset(&e
->counters
, 0, sizeof(e
->counters
));
1310 module_put(t
->u
.kernel
.target
->me
);
1316 compat_copy_entry_from_user(struct compat_arpt_entry
*e
, void **dstptr
,
1317 unsigned int *size
, const char *name
,
1318 struct xt_table_info
*newinfo
, unsigned char *base
)
1320 struct arpt_entry_target
*t
;
1321 struct xt_target
*target
;
1322 struct arpt_entry
*de
;
1323 unsigned int origsize
;
1328 de
= (struct arpt_entry
*)*dstptr
;
1329 memcpy(de
, e
, sizeof(struct arpt_entry
));
1330 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1332 *dstptr
+= sizeof(struct arpt_entry
);
1333 *size
+= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1335 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1336 t
= compat_arpt_get_target(e
);
1337 target
= t
->u
.kernel
.target
;
1338 xt_compat_target_from_user(t
, dstptr
, size
);
1340 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1341 for (h
= 0; h
< NF_ARP_NUMHOOKS
; h
++) {
1342 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1343 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1344 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1345 newinfo
->underflow
[h
] -= origsize
- *size
;
1350 static inline int compat_check_entry(struct arpt_entry
*e
, const char *name
,
1355 ret
= check_target(e
, name
);
1363 static int translate_compat_table(const char *name
,
1364 unsigned int valid_hooks
,
1365 struct xt_table_info
**pinfo
,
1367 unsigned int total_size
,
1368 unsigned int number
,
1369 unsigned int *hook_entries
,
1370 unsigned int *underflows
)
1373 struct xt_table_info
*newinfo
, *info
;
1374 void *pos
, *entry0
, *entry1
;
1381 info
->number
= number
;
1383 /* Init all hooks to impossible value. */
1384 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1385 info
->hook_entry
[i
] = 0xFFFFFFFF;
1386 info
->underflow
[i
] = 0xFFFFFFFF;
1389 duprintf("translate_compat_table: size %u\n", info
->size
);
1391 xt_compat_lock(NFPROTO_ARP
);
1392 /* Walk through entries, checking offsets. */
1393 ret
= COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
,
1394 check_compat_entry_size_and_hooks
,
1395 info
, &size
, entry0
,
1396 entry0
+ total_size
,
1397 hook_entries
, underflows
, &j
, name
);
1403 duprintf("translate_compat_table: %u not %u entries\n",
1408 /* Check hooks all assigned */
1409 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1410 /* Only hooks which are valid */
1411 if (!(valid_hooks
& (1 << i
)))
1413 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1414 duprintf("Invalid hook entry %u %u\n",
1415 i
, hook_entries
[i
]);
1418 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1419 duprintf("Invalid underflow %u %u\n",
1426 newinfo
= xt_alloc_table_info(size
);
1430 newinfo
->number
= number
;
1431 for (i
= 0; i
< NF_ARP_NUMHOOKS
; i
++) {
1432 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1433 newinfo
->underflow
[i
] = info
->underflow
[i
];
1435 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1438 ret
= COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
,
1439 compat_copy_entry_from_user
,
1440 &pos
, &size
, name
, newinfo
, entry1
);
1441 xt_compat_flush_offsets(NFPROTO_ARP
);
1442 xt_compat_unlock(NFPROTO_ARP
);
1447 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1451 ret
= ARPT_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1455 COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0
, newinfo
->size
, i
,
1456 compat_release_entry
, &j
);
1457 ARPT_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, &i
);
1458 xt_free_table_info(newinfo
);
1462 /* And one copy for every other CPU */
1463 for_each_possible_cpu(i
)
1464 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1465 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1469 xt_free_table_info(info
);
1473 xt_free_table_info(newinfo
);
1475 COMPAT_ARPT_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1478 xt_compat_flush_offsets(NFPROTO_ARP
);
1479 xt_compat_unlock(NFPROTO_ARP
);
1483 struct compat_arpt_replace
{
1484 char name
[ARPT_TABLE_MAXNAMELEN
];
1488 u32 hook_entry
[NF_ARP_NUMHOOKS
];
1489 u32 underflow
[NF_ARP_NUMHOOKS
];
1491 compat_uptr_t counters
;
1492 struct compat_arpt_entry entries
[0];
1495 static int compat_do_replace(struct net
*net
, void __user
*user
,
1499 struct compat_arpt_replace tmp
;
1500 struct xt_table_info
*newinfo
;
1501 void *loc_cpu_entry
;
1503 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1506 /* overflow check */
1507 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1509 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1512 newinfo
= xt_alloc_table_info(tmp
.size
);
1516 /* choose the copy that is on our node/cpu */
1517 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1518 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
), tmp
.size
) != 0) {
1523 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1524 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1525 tmp
.num_entries
, tmp
.hook_entry
,
1530 duprintf("compat_do_replace: Translated table\n");
1532 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1533 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1535 goto free_newinfo_untrans
;
1538 free_newinfo_untrans
:
1539 ARPT_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1541 xt_free_table_info(newinfo
);
1545 static int compat_do_arpt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1550 if (!capable(CAP_NET_ADMIN
))
1554 case ARPT_SO_SET_REPLACE
:
1555 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1558 case ARPT_SO_SET_ADD_COUNTERS
:
1559 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1563 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd
);
1570 static int compat_copy_entry_to_user(struct arpt_entry
*e
, void __user
**dstptr
,
1571 compat_uint_t
*size
,
1572 struct xt_counters
*counters
,
1575 struct arpt_entry_target
*t
;
1576 struct compat_arpt_entry __user
*ce
;
1577 u_int16_t target_offset
, next_offset
;
1578 compat_uint_t origsize
;
1583 ce
= (struct compat_arpt_entry __user
*)*dstptr
;
1584 if (copy_to_user(ce
, e
, sizeof(struct arpt_entry
)))
1587 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1590 *dstptr
+= sizeof(struct compat_arpt_entry
);
1591 *size
-= sizeof(struct arpt_entry
) - sizeof(struct compat_arpt_entry
);
1593 target_offset
= e
->target_offset
- (origsize
- *size
);
1595 t
= arpt_get_target(e
);
1596 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1600 next_offset
= e
->next_offset
- (origsize
- *size
);
1601 if (put_user(target_offset
, &ce
->target_offset
))
1603 if (put_user(next_offset
, &ce
->next_offset
))
1612 static int compat_copy_entries_to_user(unsigned int total_size
,
1613 struct xt_table
*table
,
1614 void __user
*userptr
)
1616 struct xt_counters
*counters
;
1617 const struct xt_table_info
*private = table
->private;
1621 void *loc_cpu_entry
;
1624 counters
= alloc_counters(table
);
1625 if (IS_ERR(counters
))
1626 return PTR_ERR(counters
);
1628 /* choose the copy on our node/cpu */
1629 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1632 ret
= ARPT_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1633 compat_copy_entry_to_user
,
1634 &pos
, &size
, counters
, &i
);
1639 struct compat_arpt_get_entries
{
1640 char name
[ARPT_TABLE_MAXNAMELEN
];
1642 struct compat_arpt_entry entrytable
[0];
1645 static int compat_get_entries(struct net
*net
,
1646 struct compat_arpt_get_entries __user
*uptr
,
1650 struct compat_arpt_get_entries get
;
1653 if (*len
< sizeof(get
)) {
1654 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1657 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1659 if (*len
!= sizeof(struct compat_arpt_get_entries
) + get
.size
) {
1660 duprintf("compat_get_entries: %u != %zu\n",
1661 *len
, sizeof(get
) + get
.size
);
1665 xt_compat_lock(NFPROTO_ARP
);
1666 t
= xt_find_table_lock(net
, NFPROTO_ARP
, get
.name
);
1667 if (t
&& !IS_ERR(t
)) {
1668 const struct xt_table_info
*private = t
->private;
1669 struct xt_table_info info
;
1671 duprintf("t->private->number = %u\n", private->number
);
1672 ret
= compat_table_info(private, &info
);
1673 if (!ret
&& get
.size
== info
.size
) {
1674 ret
= compat_copy_entries_to_user(private->size
,
1675 t
, uptr
->entrytable
);
1677 duprintf("compat_get_entries: I've got %u not %u!\n",
1678 private->size
, get
.size
);
1681 xt_compat_flush_offsets(NFPROTO_ARP
);
1685 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1687 xt_compat_unlock(NFPROTO_ARP
);
1691 static int do_arpt_get_ctl(struct sock
*, int, void __user
*, int *);
1693 static int compat_do_arpt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1698 if (!capable(CAP_NET_ADMIN
))
1702 case ARPT_SO_GET_INFO
:
1703 ret
= get_info(sock_net(sk
), user
, len
, 1);
1705 case ARPT_SO_GET_ENTRIES
:
1706 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1709 ret
= do_arpt_get_ctl(sk
, cmd
, user
, len
);
1715 static int do_arpt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1719 if (!capable(CAP_NET_ADMIN
))
1723 case ARPT_SO_SET_REPLACE
:
1724 ret
= do_replace(sock_net(sk
), user
, len
);
1727 case ARPT_SO_SET_ADD_COUNTERS
:
1728 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
1732 duprintf("do_arpt_set_ctl: unknown request %i\n", cmd
);
1739 static int do_arpt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1743 if (!capable(CAP_NET_ADMIN
))
1747 case ARPT_SO_GET_INFO
:
1748 ret
= get_info(sock_net(sk
), user
, len
, 0);
1751 case ARPT_SO_GET_ENTRIES
:
1752 ret
= get_entries(sock_net(sk
), user
, len
);
1755 case ARPT_SO_GET_REVISION_TARGET
: {
1756 struct xt_get_revision rev
;
1758 if (*len
!= sizeof(rev
)) {
1762 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
1767 try_then_request_module(xt_find_revision(NFPROTO_ARP
, rev
.name
,
1768 rev
.revision
, 1, &ret
),
1769 "arpt_%s", rev
.name
);
1774 duprintf("do_arpt_get_ctl: unknown request %i\n", cmd
);
1781 struct xt_table
*arpt_register_table(struct net
*net
,
1782 const struct xt_table
*table
,
1783 const struct arpt_replace
*repl
)
1786 struct xt_table_info
*newinfo
;
1787 struct xt_table_info bootstrap
1788 = { 0, 0, 0, { 0 }, { 0 }, { } };
1789 void *loc_cpu_entry
;
1790 struct xt_table
*new_table
;
1792 newinfo
= xt_alloc_table_info(repl
->size
);
1798 /* choose the copy on our node/cpu */
1799 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1800 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
1802 ret
= translate_table(table
->name
, table
->valid_hooks
,
1803 newinfo
, loc_cpu_entry
, repl
->size
,
1808 duprintf("arpt_register_table: translate table gives %d\n", ret
);
1812 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
1813 if (IS_ERR(new_table
)) {
1814 ret
= PTR_ERR(new_table
);
1820 xt_free_table_info(newinfo
);
1822 return ERR_PTR(ret
);
1825 void arpt_unregister_table(struct xt_table
*table
)
1827 struct xt_table_info
*private;
1828 void *loc_cpu_entry
;
1829 struct module
*table_owner
= table
->me
;
1831 private = xt_unregister_table(table
);
1833 /* Decrease module usage counts and free resources */
1834 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1835 ARPT_ENTRY_ITERATE(loc_cpu_entry
, private->size
,
1836 cleanup_entry
, NULL
);
1837 if (private->number
> private->initial_entries
)
1838 module_put(table_owner
);
1839 xt_free_table_info(private);
1842 /* The built-in targets: standard (NULL) and error. */
1843 static struct xt_target arpt_standard_target __read_mostly
= {
1844 .name
= ARPT_STANDARD_TARGET
,
1845 .targetsize
= sizeof(int),
1846 .family
= NFPROTO_ARP
,
1847 #ifdef CONFIG_COMPAT
1848 .compatsize
= sizeof(compat_int_t
),
1849 .compat_from_user
= compat_standard_from_user
,
1850 .compat_to_user
= compat_standard_to_user
,
1854 static struct xt_target arpt_error_target __read_mostly
= {
1855 .name
= ARPT_ERROR_TARGET
,
1856 .target
= arpt_error
,
1857 .targetsize
= ARPT_FUNCTION_MAXNAMELEN
,
1858 .family
= NFPROTO_ARP
,
1861 static struct nf_sockopt_ops arpt_sockopts
= {
1863 .set_optmin
= ARPT_BASE_CTL
,
1864 .set_optmax
= ARPT_SO_SET_MAX
+1,
1865 .set
= do_arpt_set_ctl
,
1866 #ifdef CONFIG_COMPAT
1867 .compat_set
= compat_do_arpt_set_ctl
,
1869 .get_optmin
= ARPT_BASE_CTL
,
1870 .get_optmax
= ARPT_SO_GET_MAX
+1,
1871 .get
= do_arpt_get_ctl
,
1872 #ifdef CONFIG_COMPAT
1873 .compat_get
= compat_do_arpt_get_ctl
,
1875 .owner
= THIS_MODULE
,
1878 static int __net_init
arp_tables_net_init(struct net
*net
)
1880 return xt_proto_init(net
, NFPROTO_ARP
);
1883 static void __net_exit
arp_tables_net_exit(struct net
*net
)
1885 xt_proto_fini(net
, NFPROTO_ARP
);
1888 static struct pernet_operations arp_tables_net_ops
= {
1889 .init
= arp_tables_net_init
,
1890 .exit
= arp_tables_net_exit
,
1893 static int __init
arp_tables_init(void)
1897 ret
= register_pernet_subsys(&arp_tables_net_ops
);
1901 /* Noone else will be downing sem now, so we won't sleep */
1902 ret
= xt_register_target(&arpt_standard_target
);
1905 ret
= xt_register_target(&arpt_error_target
);
1909 /* Register setsockopt */
1910 ret
= nf_register_sockopt(&arpt_sockopts
);
1914 printk(KERN_INFO
"arp_tables: (C) 2002 David S. Miller\n");
1918 xt_unregister_target(&arpt_error_target
);
1920 xt_unregister_target(&arpt_standard_target
);
1922 unregister_pernet_subsys(&arp_tables_net_ops
);
1927 static void __exit
arp_tables_fini(void)
1929 nf_unregister_sockopt(&arpt_sockopts
);
1930 xt_unregister_target(&arpt_error_target
);
1931 xt_unregister_target(&arpt_standard_target
);
1932 unregister_pernet_subsys(&arp_tables_net_ops
);
1935 EXPORT_SYMBOL(arpt_register_table
);
1936 EXPORT_SYMBOL(arpt_unregister_table
);
1937 EXPORT_SYMBOL(arpt_do_table
);
1939 module_init(arp_tables_init
);
1940 module_exit(arp_tables_fini
);