2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
28 #include <linux/sysctl.h>
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
46 #define neigh_dbg(level, fmt, ...) \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
52 #define PNEIGH_HASHMASK 0xF
54 static void neigh_timer_handler(unsigned long arg
);
55 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
);
56 static void neigh_update_notify(struct neighbour
*neigh
);
57 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
60 static const struct file_operations neigh_stat_seq_fops
;
64 Neighbour hash table buckets are protected with rwlock tbl->lock.
66 - All the scans/updates to hash buckets MUST be made under this lock.
67 - NOTHING clever should be made under this lock: no callbacks
68 to protocol backends, no attempts to send something to network.
69 It will result in deadlocks, if backend/driver wants to use neighbour
71 - If the entry requires some non-trivial actions, increase
72 its reference count and release table lock.
74 Neighbour entries are protected:
75 - with reference count.
76 - with rwlock neigh->lock
78 Reference count prevents destruction.
80 neigh->lock mainly serializes ll address data and its validity state.
81 However, the same lock is used to protect another entry fields:
85 Again, nothing clever shall be made under neigh->lock,
86 the most complicated procedure, which we allow is dev->hard_header.
87 It is supposed, that dev->hard_header is simplistic and does
88 not make callbacks to neighbour tables.
91 static int neigh_blackhole(struct neighbour
*neigh
, struct sk_buff
*skb
)
97 static void neigh_cleanup_and_release(struct neighbour
*neigh
)
99 if (neigh
->parms
->neigh_cleanup
)
100 neigh
->parms
->neigh_cleanup(neigh
);
102 __neigh_notify(neigh
, RTM_DELNEIGH
, 0);
103 neigh_release(neigh
);
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
112 unsigned long neigh_rand_reach_time(unsigned long base
)
114 return base
? (prandom_u32() % base
) + (base
>> 1) : 0;
116 EXPORT_SYMBOL(neigh_rand_reach_time
);
119 static int neigh_forced_gc(struct neigh_table
*tbl
)
123 struct neigh_hash_table
*nht
;
125 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
127 write_lock_bh(&tbl
->lock
);
128 nht
= rcu_dereference_protected(tbl
->nht
,
129 lockdep_is_held(&tbl
->lock
));
130 for (i
= 0; i
< (1 << nht
->hash_shift
); i
++) {
132 struct neighbour __rcu
**np
;
134 np
= &nht
->hash_buckets
[i
];
135 while ((n
= rcu_dereference_protected(*np
,
136 lockdep_is_held(&tbl
->lock
))) != NULL
) {
137 /* Neighbour record may be discarded if:
138 * - nobody refers to it.
139 * - it is not permanent
141 write_lock(&n
->lock
);
142 if (atomic_read(&n
->refcnt
) == 1 &&
143 !(n
->nud_state
& NUD_PERMANENT
)) {
144 rcu_assign_pointer(*np
,
145 rcu_dereference_protected(n
->next
,
146 lockdep_is_held(&tbl
->lock
)));
149 write_unlock(&n
->lock
);
150 neigh_cleanup_and_release(n
);
153 write_unlock(&n
->lock
);
158 tbl
->last_flush
= jiffies
;
160 write_unlock_bh(&tbl
->lock
);
165 static void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
168 if (unlikely(mod_timer(&n
->timer
, when
))) {
169 printk("NEIGH: BUG, double timer add, state is %x\n",
175 static int neigh_del_timer(struct neighbour
*n
)
177 if ((n
->nud_state
& NUD_IN_TIMER
) &&
178 del_timer(&n
->timer
)) {
185 static void pneigh_queue_purge(struct sk_buff_head
*list
)
189 while ((skb
= skb_dequeue(list
)) != NULL
) {
195 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
)
198 struct neigh_hash_table
*nht
;
200 nht
= rcu_dereference_protected(tbl
->nht
,
201 lockdep_is_held(&tbl
->lock
));
203 for (i
= 0; i
< (1 << nht
->hash_shift
); i
++) {
205 struct neighbour __rcu
**np
= &nht
->hash_buckets
[i
];
207 while ((n
= rcu_dereference_protected(*np
,
208 lockdep_is_held(&tbl
->lock
))) != NULL
) {
209 if (dev
&& n
->dev
!= dev
) {
213 rcu_assign_pointer(*np
,
214 rcu_dereference_protected(n
->next
,
215 lockdep_is_held(&tbl
->lock
)));
216 write_lock(&n
->lock
);
220 if (atomic_read(&n
->refcnt
) != 1) {
221 /* The most unpleasant situation.
222 We must destroy neighbour entry,
223 but someone still uses it.
225 The destroy will be delayed until
226 the last user releases us, but
227 we must kill timers etc. and move
230 __skb_queue_purge(&n
->arp_queue
);
231 n
->arp_queue_len_bytes
= 0;
232 n
->output
= neigh_blackhole
;
233 if (n
->nud_state
& NUD_VALID
)
234 n
->nud_state
= NUD_NOARP
;
236 n
->nud_state
= NUD_NONE
;
237 neigh_dbg(2, "neigh %p is stray\n", n
);
239 write_unlock(&n
->lock
);
240 neigh_cleanup_and_release(n
);
245 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
247 write_lock_bh(&tbl
->lock
);
248 neigh_flush_dev(tbl
, dev
);
249 write_unlock_bh(&tbl
->lock
);
251 EXPORT_SYMBOL(neigh_changeaddr
);
253 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
255 write_lock_bh(&tbl
->lock
);
256 neigh_flush_dev(tbl
, dev
);
257 pneigh_ifdown(tbl
, dev
);
258 write_unlock_bh(&tbl
->lock
);
260 del_timer_sync(&tbl
->proxy_timer
);
261 pneigh_queue_purge(&tbl
->proxy_queue
);
264 EXPORT_SYMBOL(neigh_ifdown
);
266 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
, struct net_device
*dev
)
268 struct neighbour
*n
= NULL
;
269 unsigned long now
= jiffies
;
272 entries
= atomic_inc_return(&tbl
->entries
) - 1;
273 if (entries
>= tbl
->gc_thresh3
||
274 (entries
>= tbl
->gc_thresh2
&&
275 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
276 if (!neigh_forced_gc(tbl
) &&
277 entries
>= tbl
->gc_thresh3
) {
278 net_info_ratelimited("%s: neighbor table overflow!\n",
280 NEIGH_CACHE_STAT_INC(tbl
, table_fulls
);
285 n
= kzalloc(tbl
->entry_size
+ dev
->neigh_priv_len
, GFP_ATOMIC
);
289 __skb_queue_head_init(&n
->arp_queue
);
290 rwlock_init(&n
->lock
);
291 seqlock_init(&n
->ha_lock
);
292 n
->updated
= n
->used
= now
;
293 n
->nud_state
= NUD_NONE
;
294 n
->output
= neigh_blackhole
;
295 seqlock_init(&n
->hh
.hh_lock
);
296 n
->parms
= neigh_parms_clone(&tbl
->parms
);
297 setup_timer(&n
->timer
, neigh_timer_handler
, (unsigned long)n
);
299 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
301 atomic_set(&n
->refcnt
, 1);
307 atomic_dec(&tbl
->entries
);
311 static void neigh_get_hash_rnd(u32
*x
)
313 get_random_bytes(x
, sizeof(*x
));
317 static struct neigh_hash_table
*neigh_hash_alloc(unsigned int shift
)
319 size_t size
= (1 << shift
) * sizeof(struct neighbour
*);
320 struct neigh_hash_table
*ret
;
321 struct neighbour __rcu
**buckets
;
324 ret
= kmalloc(sizeof(*ret
), GFP_ATOMIC
);
327 if (size
<= PAGE_SIZE
)
328 buckets
= kzalloc(size
, GFP_ATOMIC
);
330 buckets
= (struct neighbour __rcu
**)
331 __get_free_pages(GFP_ATOMIC
| __GFP_ZERO
,
337 ret
->hash_buckets
= buckets
;
338 ret
->hash_shift
= shift
;
339 for (i
= 0; i
< NEIGH_NUM_HASH_RND
; i
++)
340 neigh_get_hash_rnd(&ret
->hash_rnd
[i
]);
344 static void neigh_hash_free_rcu(struct rcu_head
*head
)
346 struct neigh_hash_table
*nht
= container_of(head
,
347 struct neigh_hash_table
,
349 size_t size
= (1 << nht
->hash_shift
) * sizeof(struct neighbour
*);
350 struct neighbour __rcu
**buckets
= nht
->hash_buckets
;
352 if (size
<= PAGE_SIZE
)
355 free_pages((unsigned long)buckets
, get_order(size
));
359 static struct neigh_hash_table
*neigh_hash_grow(struct neigh_table
*tbl
,
360 unsigned long new_shift
)
362 unsigned int i
, hash
;
363 struct neigh_hash_table
*new_nht
, *old_nht
;
365 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
367 old_nht
= rcu_dereference_protected(tbl
->nht
,
368 lockdep_is_held(&tbl
->lock
));
369 new_nht
= neigh_hash_alloc(new_shift
);
373 for (i
= 0; i
< (1 << old_nht
->hash_shift
); i
++) {
374 struct neighbour
*n
, *next
;
376 for (n
= rcu_dereference_protected(old_nht
->hash_buckets
[i
],
377 lockdep_is_held(&tbl
->lock
));
380 hash
= tbl
->hash(n
->primary_key
, n
->dev
,
383 hash
>>= (32 - new_nht
->hash_shift
);
384 next
= rcu_dereference_protected(n
->next
,
385 lockdep_is_held(&tbl
->lock
));
387 rcu_assign_pointer(n
->next
,
388 rcu_dereference_protected(
389 new_nht
->hash_buckets
[hash
],
390 lockdep_is_held(&tbl
->lock
)));
391 rcu_assign_pointer(new_nht
->hash_buckets
[hash
], n
);
395 rcu_assign_pointer(tbl
->nht
, new_nht
);
396 call_rcu(&old_nht
->rcu
, neigh_hash_free_rcu
);
400 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
401 struct net_device
*dev
)
405 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
408 n
= __neigh_lookup_noref(tbl
, pkey
, dev
);
410 if (!atomic_inc_not_zero(&n
->refcnt
))
412 NEIGH_CACHE_STAT_INC(tbl
, hits
);
415 rcu_read_unlock_bh();
418 EXPORT_SYMBOL(neigh_lookup
);
420 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, struct net
*net
,
424 int key_len
= tbl
->key_len
;
426 struct neigh_hash_table
*nht
;
428 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
431 nht
= rcu_dereference_bh(tbl
->nht
);
432 hash_val
= tbl
->hash(pkey
, NULL
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
434 for (n
= rcu_dereference_bh(nht
->hash_buckets
[hash_val
]);
436 n
= rcu_dereference_bh(n
->next
)) {
437 if (!memcmp(n
->primary_key
, pkey
, key_len
) &&
438 net_eq(dev_net(n
->dev
), net
)) {
439 if (!atomic_inc_not_zero(&n
->refcnt
))
441 NEIGH_CACHE_STAT_INC(tbl
, hits
);
446 rcu_read_unlock_bh();
449 EXPORT_SYMBOL(neigh_lookup_nodev
);
451 struct neighbour
*__neigh_create(struct neigh_table
*tbl
, const void *pkey
,
452 struct net_device
*dev
, bool want_ref
)
455 int key_len
= tbl
->key_len
;
457 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
, dev
);
458 struct neigh_hash_table
*nht
;
461 rc
= ERR_PTR(-ENOBUFS
);
465 memcpy(n
->primary_key
, pkey
, key_len
);
469 /* Protocol specific setup. */
470 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
472 goto out_neigh_release
;
475 if (dev
->netdev_ops
->ndo_neigh_construct
) {
476 error
= dev
->netdev_ops
->ndo_neigh_construct(dev
, n
);
479 goto out_neigh_release
;
483 /* Device specific setup. */
484 if (n
->parms
->neigh_setup
&&
485 (error
= n
->parms
->neigh_setup(n
)) < 0) {
487 goto out_neigh_release
;
490 n
->confirmed
= jiffies
- (NEIGH_VAR(n
->parms
, BASE_REACHABLE_TIME
) << 1);
492 write_lock_bh(&tbl
->lock
);
493 nht
= rcu_dereference_protected(tbl
->nht
,
494 lockdep_is_held(&tbl
->lock
));
496 if (atomic_read(&tbl
->entries
) > (1 << nht
->hash_shift
))
497 nht
= neigh_hash_grow(tbl
, nht
->hash_shift
+ 1);
499 hash_val
= tbl
->hash(pkey
, dev
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
501 if (n
->parms
->dead
) {
502 rc
= ERR_PTR(-EINVAL
);
506 for (n1
= rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
507 lockdep_is_held(&tbl
->lock
));
509 n1
= rcu_dereference_protected(n1
->next
,
510 lockdep_is_held(&tbl
->lock
))) {
511 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, pkey
, key_len
)) {
522 rcu_assign_pointer(n
->next
,
523 rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
524 lockdep_is_held(&tbl
->lock
)));
525 rcu_assign_pointer(nht
->hash_buckets
[hash_val
], n
);
526 write_unlock_bh(&tbl
->lock
);
527 neigh_dbg(2, "neigh %p is created\n", n
);
532 write_unlock_bh(&tbl
->lock
);
537 EXPORT_SYMBOL(__neigh_create
);
539 static u32
pneigh_hash(const void *pkey
, int key_len
)
541 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
542 hash_val
^= (hash_val
>> 16);
543 hash_val
^= hash_val
>> 8;
544 hash_val
^= hash_val
>> 4;
545 hash_val
&= PNEIGH_HASHMASK
;
549 static struct pneigh_entry
*__pneigh_lookup_1(struct pneigh_entry
*n
,
553 struct net_device
*dev
)
556 if (!memcmp(n
->key
, pkey
, key_len
) &&
557 net_eq(pneigh_net(n
), net
) &&
558 (n
->dev
== dev
|| !n
->dev
))
565 struct pneigh_entry
*__pneigh_lookup(struct neigh_table
*tbl
,
566 struct net
*net
, const void *pkey
, struct net_device
*dev
)
568 int key_len
= tbl
->key_len
;
569 u32 hash_val
= pneigh_hash(pkey
, key_len
);
571 return __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
572 net
, pkey
, key_len
, dev
);
574 EXPORT_SYMBOL_GPL(__pneigh_lookup
);
576 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
,
577 struct net
*net
, const void *pkey
,
578 struct net_device
*dev
, int creat
)
580 struct pneigh_entry
*n
;
581 int key_len
= tbl
->key_len
;
582 u32 hash_val
= pneigh_hash(pkey
, key_len
);
584 read_lock_bh(&tbl
->lock
);
585 n
= __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
586 net
, pkey
, key_len
, dev
);
587 read_unlock_bh(&tbl
->lock
);
594 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
598 write_pnet(&n
->net
, net
);
599 memcpy(n
->key
, pkey
, key_len
);
604 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
612 write_lock_bh(&tbl
->lock
);
613 n
->next
= tbl
->phash_buckets
[hash_val
];
614 tbl
->phash_buckets
[hash_val
] = n
;
615 write_unlock_bh(&tbl
->lock
);
619 EXPORT_SYMBOL(pneigh_lookup
);
622 int pneigh_delete(struct neigh_table
*tbl
, struct net
*net
, const void *pkey
,
623 struct net_device
*dev
)
625 struct pneigh_entry
*n
, **np
;
626 int key_len
= tbl
->key_len
;
627 u32 hash_val
= pneigh_hash(pkey
, key_len
);
629 write_lock_bh(&tbl
->lock
);
630 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
632 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
&&
633 net_eq(pneigh_net(n
), net
)) {
635 write_unlock_bh(&tbl
->lock
);
636 if (tbl
->pdestructor
)
644 write_unlock_bh(&tbl
->lock
);
648 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
650 struct pneigh_entry
*n
, **np
;
653 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
654 np
= &tbl
->phash_buckets
[h
];
655 while ((n
= *np
) != NULL
) {
656 if (!dev
|| n
->dev
== dev
) {
658 if (tbl
->pdestructor
)
671 static void neigh_parms_destroy(struct neigh_parms
*parms
);
673 static inline void neigh_parms_put(struct neigh_parms
*parms
)
675 if (atomic_dec_and_test(&parms
->refcnt
))
676 neigh_parms_destroy(parms
);
680 * neighbour must already be out of the table;
683 void neigh_destroy(struct neighbour
*neigh
)
685 struct net_device
*dev
= neigh
->dev
;
687 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
690 pr_warn("Destroying alive neighbour %p\n", neigh
);
695 if (neigh_del_timer(neigh
))
696 pr_warn("Impossible event\n");
698 write_lock_bh(&neigh
->lock
);
699 __skb_queue_purge(&neigh
->arp_queue
);
700 write_unlock_bh(&neigh
->lock
);
701 neigh
->arp_queue_len_bytes
= 0;
703 if (dev
->netdev_ops
->ndo_neigh_destroy
)
704 dev
->netdev_ops
->ndo_neigh_destroy(dev
, neigh
);
707 neigh_parms_put(neigh
->parms
);
709 neigh_dbg(2, "neigh %p is destroyed\n", neigh
);
711 atomic_dec(&neigh
->tbl
->entries
);
712 kfree_rcu(neigh
, rcu
);
714 EXPORT_SYMBOL(neigh_destroy
);
716 /* Neighbour state is suspicious;
719 Called with write_locked neigh.
721 static void neigh_suspect(struct neighbour
*neigh
)
723 neigh_dbg(2, "neigh %p is suspected\n", neigh
);
725 neigh
->output
= neigh
->ops
->output
;
728 /* Neighbour state is OK;
731 Called with write_locked neigh.
733 static void neigh_connect(struct neighbour
*neigh
)
735 neigh_dbg(2, "neigh %p is connected\n", neigh
);
737 neigh
->output
= neigh
->ops
->connected_output
;
740 static void neigh_periodic_work(struct work_struct
*work
)
742 struct neigh_table
*tbl
= container_of(work
, struct neigh_table
, gc_work
.work
);
744 struct neighbour __rcu
**np
;
746 struct neigh_hash_table
*nht
;
748 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
750 write_lock_bh(&tbl
->lock
);
751 nht
= rcu_dereference_protected(tbl
->nht
,
752 lockdep_is_held(&tbl
->lock
));
755 * periodically recompute ReachableTime from random function
758 if (time_after(jiffies
, tbl
->last_rand
+ 300 * HZ
)) {
759 struct neigh_parms
*p
;
760 tbl
->last_rand
= jiffies
;
761 list_for_each_entry(p
, &tbl
->parms_list
, list
)
763 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
766 if (atomic_read(&tbl
->entries
) < tbl
->gc_thresh1
)
769 for (i
= 0 ; i
< (1 << nht
->hash_shift
); i
++) {
770 np
= &nht
->hash_buckets
[i
];
772 while ((n
= rcu_dereference_protected(*np
,
773 lockdep_is_held(&tbl
->lock
))) != NULL
) {
776 write_lock(&n
->lock
);
778 state
= n
->nud_state
;
779 if (state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) {
780 write_unlock(&n
->lock
);
784 if (time_before(n
->used
, n
->confirmed
))
785 n
->used
= n
->confirmed
;
787 if (atomic_read(&n
->refcnt
) == 1 &&
788 (state
== NUD_FAILED
||
789 time_after(jiffies
, n
->used
+ NEIGH_VAR(n
->parms
, GC_STALETIME
)))) {
792 write_unlock(&n
->lock
);
793 neigh_cleanup_and_release(n
);
796 write_unlock(&n
->lock
);
802 * It's fine to release lock here, even if hash table
803 * grows while we are preempted.
805 write_unlock_bh(&tbl
->lock
);
807 write_lock_bh(&tbl
->lock
);
808 nht
= rcu_dereference_protected(tbl
->nht
,
809 lockdep_is_held(&tbl
->lock
));
812 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
813 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
814 * BASE_REACHABLE_TIME.
816 queue_delayed_work(system_power_efficient_wq
, &tbl
->gc_work
,
817 NEIGH_VAR(&tbl
->parms
, BASE_REACHABLE_TIME
) >> 1);
818 write_unlock_bh(&tbl
->lock
);
821 static __inline__
int neigh_max_probes(struct neighbour
*n
)
823 struct neigh_parms
*p
= n
->parms
;
824 return NEIGH_VAR(p
, UCAST_PROBES
) + NEIGH_VAR(p
, APP_PROBES
) +
825 (n
->nud_state
& NUD_PROBE
? NEIGH_VAR(p
, MCAST_REPROBES
) :
826 NEIGH_VAR(p
, MCAST_PROBES
));
829 static void neigh_invalidate(struct neighbour
*neigh
)
830 __releases(neigh
->lock
)
831 __acquires(neigh
->lock
)
835 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
836 neigh_dbg(2, "neigh %p is failed\n", neigh
);
837 neigh
->updated
= jiffies
;
839 /* It is very thin place. report_unreachable is very complicated
840 routine. Particularly, it can hit the same neighbour entry!
842 So that, we try to be accurate and avoid dead loop. --ANK
844 while (neigh
->nud_state
== NUD_FAILED
&&
845 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
846 write_unlock(&neigh
->lock
);
847 neigh
->ops
->error_report(neigh
, skb
);
848 write_lock(&neigh
->lock
);
850 __skb_queue_purge(&neigh
->arp_queue
);
851 neigh
->arp_queue_len_bytes
= 0;
854 static void neigh_probe(struct neighbour
*neigh
)
855 __releases(neigh
->lock
)
857 struct sk_buff
*skb
= skb_peek_tail(&neigh
->arp_queue
);
858 /* keep skb alive even if arp_queue overflows */
860 skb
= skb_clone(skb
, GFP_ATOMIC
);
861 write_unlock(&neigh
->lock
);
862 neigh
->ops
->solicit(neigh
, skb
);
863 atomic_inc(&neigh
->probes
);
867 /* Called when a timer expires for a neighbour entry. */
869 static void neigh_timer_handler(unsigned long arg
)
871 unsigned long now
, next
;
872 struct neighbour
*neigh
= (struct neighbour
*)arg
;
876 write_lock(&neigh
->lock
);
878 state
= neigh
->nud_state
;
882 if (!(state
& NUD_IN_TIMER
))
885 if (state
& NUD_REACHABLE
) {
886 if (time_before_eq(now
,
887 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
888 neigh_dbg(2, "neigh %p is still alive\n", neigh
);
889 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
890 } else if (time_before_eq(now
,
892 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
))) {
893 neigh_dbg(2, "neigh %p is delayed\n", neigh
);
894 neigh
->nud_state
= NUD_DELAY
;
895 neigh
->updated
= jiffies
;
896 neigh_suspect(neigh
);
897 next
= now
+ NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
);
899 neigh_dbg(2, "neigh %p is suspected\n", neigh
);
900 neigh
->nud_state
= NUD_STALE
;
901 neigh
->updated
= jiffies
;
902 neigh_suspect(neigh
);
905 } else if (state
& NUD_DELAY
) {
906 if (time_before_eq(now
,
908 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
))) {
909 neigh_dbg(2, "neigh %p is now reachable\n", neigh
);
910 neigh
->nud_state
= NUD_REACHABLE
;
911 neigh
->updated
= jiffies
;
912 neigh_connect(neigh
);
914 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
916 neigh_dbg(2, "neigh %p is probed\n", neigh
);
917 neigh
->nud_state
= NUD_PROBE
;
918 neigh
->updated
= jiffies
;
919 atomic_set(&neigh
->probes
, 0);
921 next
= now
+ NEIGH_VAR(neigh
->parms
, RETRANS_TIME
);
924 /* NUD_PROBE|NUD_INCOMPLETE */
925 next
= now
+ NEIGH_VAR(neigh
->parms
, RETRANS_TIME
);
928 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
929 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
930 neigh
->nud_state
= NUD_FAILED
;
932 neigh_invalidate(neigh
);
936 if (neigh
->nud_state
& NUD_IN_TIMER
) {
937 if (time_before(next
, jiffies
+ HZ
/2))
938 next
= jiffies
+ HZ
/2;
939 if (!mod_timer(&neigh
->timer
, next
))
942 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
946 write_unlock(&neigh
->lock
);
950 neigh_update_notify(neigh
);
952 neigh_release(neigh
);
955 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
958 bool immediate_probe
= false;
960 write_lock_bh(&neigh
->lock
);
963 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
968 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
969 if (NEIGH_VAR(neigh
->parms
, MCAST_PROBES
) +
970 NEIGH_VAR(neigh
->parms
, APP_PROBES
)) {
971 unsigned long next
, now
= jiffies
;
973 atomic_set(&neigh
->probes
,
974 NEIGH_VAR(neigh
->parms
, UCAST_PROBES
));
975 neigh
->nud_state
= NUD_INCOMPLETE
;
976 neigh
->updated
= now
;
977 next
= now
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
),
979 neigh_add_timer(neigh
, next
);
980 immediate_probe
= true;
982 neigh
->nud_state
= NUD_FAILED
;
983 neigh
->updated
= jiffies
;
984 write_unlock_bh(&neigh
->lock
);
989 } else if (neigh
->nud_state
& NUD_STALE
) {
990 neigh_dbg(2, "neigh %p is delayed\n", neigh
);
991 neigh
->nud_state
= NUD_DELAY
;
992 neigh
->updated
= jiffies
;
993 neigh_add_timer(neigh
, jiffies
+
994 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
));
997 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
999 while (neigh
->arp_queue_len_bytes
+ skb
->truesize
>
1000 NEIGH_VAR(neigh
->parms
, QUEUE_LEN_BYTES
)) {
1001 struct sk_buff
*buff
;
1003 buff
= __skb_dequeue(&neigh
->arp_queue
);
1006 neigh
->arp_queue_len_bytes
-= buff
->truesize
;
1008 NEIGH_CACHE_STAT_INC(neigh
->tbl
, unres_discards
);
1011 __skb_queue_tail(&neigh
->arp_queue
, skb
);
1012 neigh
->arp_queue_len_bytes
+= skb
->truesize
;
1017 if (immediate_probe
)
1020 write_unlock(&neigh
->lock
);
1025 if (neigh
->nud_state
& NUD_STALE
)
1027 write_unlock_bh(&neigh
->lock
);
1031 EXPORT_SYMBOL(__neigh_event_send
);
1033 static void neigh_update_hhs(struct neighbour
*neigh
)
1035 struct hh_cache
*hh
;
1036 void (*update
)(struct hh_cache
*, const struct net_device
*, const unsigned char *)
1039 if (neigh
->dev
->header_ops
)
1040 update
= neigh
->dev
->header_ops
->cache_update
;
1045 write_seqlock_bh(&hh
->hh_lock
);
1046 update(hh
, neigh
->dev
, neigh
->ha
);
1047 write_sequnlock_bh(&hh
->hh_lock
);
1054 /* Generic update routine.
1055 -- lladdr is new lladdr or NULL, if it is not supplied.
1056 -- new is new state.
1058 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1060 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1061 lladdr instead of overriding it
1063 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1065 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1067 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1070 Caller MUST hold reference count on the entry.
1073 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
1079 struct net_device
*dev
;
1080 int update_isrouter
= 0;
1082 write_lock_bh(&neigh
->lock
);
1085 old
= neigh
->nud_state
;
1088 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
1089 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
1094 if (!(new & NUD_VALID
)) {
1095 neigh_del_timer(neigh
);
1096 if (old
& NUD_CONNECTED
)
1097 neigh_suspect(neigh
);
1098 neigh
->nud_state
= new;
1100 notify
= old
& NUD_VALID
;
1101 if ((old
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
1102 (new & NUD_FAILED
)) {
1103 neigh_invalidate(neigh
);
1109 /* Compare new lladdr with cached one */
1110 if (!dev
->addr_len
) {
1111 /* First case: device needs no address. */
1113 } else if (lladdr
) {
1114 /* The second case: if something is already cached
1115 and a new address is proposed:
1117 - if they are different, check override flag
1119 if ((old
& NUD_VALID
) &&
1120 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
1123 /* No address is supplied; if we know something,
1124 use it, otherwise discard the request.
1127 if (!(old
& NUD_VALID
))
1132 if (new & NUD_CONNECTED
)
1133 neigh
->confirmed
= jiffies
;
1134 neigh
->updated
= jiffies
;
1136 /* If entry was valid and address is not changed,
1137 do not change entry state, if new one is STALE.
1140 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1141 if (old
& NUD_VALID
) {
1142 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
1143 update_isrouter
= 0;
1144 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1145 (old
& NUD_CONNECTED
)) {
1151 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1152 !(flags
& NEIGH_UPDATE_F_ADMIN
))
1158 neigh_del_timer(neigh
);
1159 if (new & NUD_PROBE
)
1160 atomic_set(&neigh
->probes
, 0);
1161 if (new & NUD_IN_TIMER
)
1162 neigh_add_timer(neigh
, (jiffies
+
1163 ((new & NUD_REACHABLE
) ?
1164 neigh
->parms
->reachable_time
:
1166 neigh
->nud_state
= new;
1170 if (lladdr
!= neigh
->ha
) {
1171 write_seqlock(&neigh
->ha_lock
);
1172 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1173 write_sequnlock(&neigh
->ha_lock
);
1174 neigh_update_hhs(neigh
);
1175 if (!(new & NUD_CONNECTED
))
1176 neigh
->confirmed
= jiffies
-
1177 (NEIGH_VAR(neigh
->parms
, BASE_REACHABLE_TIME
) << 1);
1182 if (new & NUD_CONNECTED
)
1183 neigh_connect(neigh
);
1185 neigh_suspect(neigh
);
1186 if (!(old
& NUD_VALID
)) {
1187 struct sk_buff
*skb
;
1189 /* Again: avoid dead loop if something went wrong */
1191 while (neigh
->nud_state
& NUD_VALID
&&
1192 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1193 struct dst_entry
*dst
= skb_dst(skb
);
1194 struct neighbour
*n2
, *n1
= neigh
;
1195 write_unlock_bh(&neigh
->lock
);
1199 /* Why not just use 'neigh' as-is? The problem is that
1200 * things such as shaper, eql, and sch_teql can end up
1201 * using alternative, different, neigh objects to output
1202 * the packet in the output path. So what we need to do
1203 * here is re-lookup the top-level neigh in the path so
1204 * we can reinject the packet there.
1208 n2
= dst_neigh_lookup_skb(dst
, skb
);
1212 n1
->output(n1
, skb
);
1217 write_lock_bh(&neigh
->lock
);
1219 __skb_queue_purge(&neigh
->arp_queue
);
1220 neigh
->arp_queue_len_bytes
= 0;
1223 if (update_isrouter
) {
1224 neigh
->flags
= (flags
& NEIGH_UPDATE_F_ISROUTER
) ?
1225 (neigh
->flags
| NTF_ROUTER
) :
1226 (neigh
->flags
& ~NTF_ROUTER
);
1228 write_unlock_bh(&neigh
->lock
);
1231 neigh_update_notify(neigh
);
1235 EXPORT_SYMBOL(neigh_update
);
1237 /* Update the neigh to listen temporarily for probe responses, even if it is
1238 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1240 void __neigh_set_probe_once(struct neighbour
*neigh
)
1244 neigh
->updated
= jiffies
;
1245 if (!(neigh
->nud_state
& NUD_FAILED
))
1247 neigh
->nud_state
= NUD_INCOMPLETE
;
1248 atomic_set(&neigh
->probes
, neigh_max_probes(neigh
));
1249 neigh_add_timer(neigh
,
1250 jiffies
+ NEIGH_VAR(neigh
->parms
, RETRANS_TIME
));
1252 EXPORT_SYMBOL(__neigh_set_probe_once
);
1254 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1255 u8
*lladdr
, void *saddr
,
1256 struct net_device
*dev
)
1258 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1259 lladdr
|| !dev
->addr_len
);
1261 neigh_update(neigh
, lladdr
, NUD_STALE
,
1262 NEIGH_UPDATE_F_OVERRIDE
);
1265 EXPORT_SYMBOL(neigh_event_ns
);
1267 /* called with read_lock_bh(&n->lock); */
1268 static void neigh_hh_init(struct neighbour
*n
)
1270 struct net_device
*dev
= n
->dev
;
1271 __be16 prot
= n
->tbl
->protocol
;
1272 struct hh_cache
*hh
= &n
->hh
;
1274 write_lock_bh(&n
->lock
);
1276 /* Only one thread can come in here and initialize the
1280 dev
->header_ops
->cache(n
, hh
, prot
);
1282 write_unlock_bh(&n
->lock
);
1285 /* Slow and careful. */
1287 int neigh_resolve_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1291 if (!neigh_event_send(neigh
, skb
)) {
1293 struct net_device
*dev
= neigh
->dev
;
1296 if (dev
->header_ops
->cache
&& !neigh
->hh
.hh_len
)
1297 neigh_hh_init(neigh
);
1300 __skb_pull(skb
, skb_network_offset(skb
));
1301 seq
= read_seqbegin(&neigh
->ha_lock
);
1302 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1303 neigh
->ha
, NULL
, skb
->len
);
1304 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1307 rc
= dev_queue_xmit(skb
);
1318 EXPORT_SYMBOL(neigh_resolve_output
);
1320 /* As fast as possible without hh cache */
1322 int neigh_connected_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1324 struct net_device
*dev
= neigh
->dev
;
1329 __skb_pull(skb
, skb_network_offset(skb
));
1330 seq
= read_seqbegin(&neigh
->ha_lock
);
1331 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1332 neigh
->ha
, NULL
, skb
->len
);
1333 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1336 err
= dev_queue_xmit(skb
);
1343 EXPORT_SYMBOL(neigh_connected_output
);
1345 int neigh_direct_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1347 return dev_queue_xmit(skb
);
1349 EXPORT_SYMBOL(neigh_direct_output
);
1351 static void neigh_proxy_process(unsigned long arg
)
1353 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
1354 long sched_next
= 0;
1355 unsigned long now
= jiffies
;
1356 struct sk_buff
*skb
, *n
;
1358 spin_lock(&tbl
->proxy_queue
.lock
);
1360 skb_queue_walk_safe(&tbl
->proxy_queue
, skb
, n
) {
1361 long tdif
= NEIGH_CB(skb
)->sched_next
- now
;
1364 struct net_device
*dev
= skb
->dev
;
1366 __skb_unlink(skb
, &tbl
->proxy_queue
);
1367 if (tbl
->proxy_redo
&& netif_running(dev
)) {
1369 tbl
->proxy_redo(skb
);
1376 } else if (!sched_next
|| tdif
< sched_next
)
1379 del_timer(&tbl
->proxy_timer
);
1381 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1382 spin_unlock(&tbl
->proxy_queue
.lock
);
1385 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1386 struct sk_buff
*skb
)
1388 unsigned long now
= jiffies
;
1390 unsigned long sched_next
= now
+ (prandom_u32() %
1391 NEIGH_VAR(p
, PROXY_DELAY
));
1393 if (tbl
->proxy_queue
.qlen
> NEIGH_VAR(p
, PROXY_QLEN
)) {
1398 NEIGH_CB(skb
)->sched_next
= sched_next
;
1399 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1401 spin_lock(&tbl
->proxy_queue
.lock
);
1402 if (del_timer(&tbl
->proxy_timer
)) {
1403 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1404 sched_next
= tbl
->proxy_timer
.expires
;
1408 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1409 mod_timer(&tbl
->proxy_timer
, sched_next
);
1410 spin_unlock(&tbl
->proxy_queue
.lock
);
1412 EXPORT_SYMBOL(pneigh_enqueue
);
1414 static inline struct neigh_parms
*lookup_neigh_parms(struct neigh_table
*tbl
,
1415 struct net
*net
, int ifindex
)
1417 struct neigh_parms
*p
;
1419 list_for_each_entry(p
, &tbl
->parms_list
, list
) {
1420 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
&& net_eq(neigh_parms_net(p
), net
)) ||
1421 (!p
->dev
&& !ifindex
&& net_eq(net
, &init_net
)))
1428 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1429 struct neigh_table
*tbl
)
1431 struct neigh_parms
*p
;
1432 struct net
*net
= dev_net(dev
);
1433 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1435 p
= kmemdup(&tbl
->parms
, sizeof(*p
), GFP_KERNEL
);
1438 atomic_set(&p
->refcnt
, 1);
1440 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
1443 write_pnet(&p
->net
, net
);
1444 p
->sysctl_table
= NULL
;
1446 if (ops
->ndo_neigh_setup
&& ops
->ndo_neigh_setup(dev
, p
)) {
1452 write_lock_bh(&tbl
->lock
);
1453 list_add(&p
->list
, &tbl
->parms
.list
);
1454 write_unlock_bh(&tbl
->lock
);
1456 neigh_parms_data_state_cleanall(p
);
1460 EXPORT_SYMBOL(neigh_parms_alloc
);
1462 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1464 struct neigh_parms
*parms
=
1465 container_of(head
, struct neigh_parms
, rcu_head
);
1467 neigh_parms_put(parms
);
1470 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1472 if (!parms
|| parms
== &tbl
->parms
)
1474 write_lock_bh(&tbl
->lock
);
1475 list_del(&parms
->list
);
1477 write_unlock_bh(&tbl
->lock
);
1479 dev_put(parms
->dev
);
1480 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1482 EXPORT_SYMBOL(neigh_parms_release
);
1484 static void neigh_parms_destroy(struct neigh_parms
*parms
)
1489 static struct lock_class_key neigh_table_proxy_queue_class
;
1491 static struct neigh_table
*neigh_tables
[NEIGH_NR_TABLES
] __read_mostly
;
1493 void neigh_table_init(int index
, struct neigh_table
*tbl
)
1495 unsigned long now
= jiffies
;
1496 unsigned long phsize
;
1498 INIT_LIST_HEAD(&tbl
->parms_list
);
1499 list_add(&tbl
->parms
.list
, &tbl
->parms_list
);
1500 write_pnet(&tbl
->parms
.net
, &init_net
);
1501 atomic_set(&tbl
->parms
.refcnt
, 1);
1502 tbl
->parms
.reachable_time
=
1503 neigh_rand_reach_time(NEIGH_VAR(&tbl
->parms
, BASE_REACHABLE_TIME
));
1505 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1507 panic("cannot create neighbour cache statistics");
1509 #ifdef CONFIG_PROC_FS
1510 if (!proc_create_data(tbl
->id
, 0, init_net
.proc_net_stat
,
1511 &neigh_stat_seq_fops
, tbl
))
1512 panic("cannot create neighbour proc dir entry");
1515 RCU_INIT_POINTER(tbl
->nht
, neigh_hash_alloc(3));
1517 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1518 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1520 if (!tbl
->nht
|| !tbl
->phash_buckets
)
1521 panic("cannot allocate neighbour cache hashes");
1523 if (!tbl
->entry_size
)
1524 tbl
->entry_size
= ALIGN(offsetof(struct neighbour
, primary_key
) +
1525 tbl
->key_len
, NEIGH_PRIV_ALIGN
);
1527 WARN_ON(tbl
->entry_size
% NEIGH_PRIV_ALIGN
);
1529 rwlock_init(&tbl
->lock
);
1530 INIT_DEFERRABLE_WORK(&tbl
->gc_work
, neigh_periodic_work
);
1531 queue_delayed_work(system_power_efficient_wq
, &tbl
->gc_work
,
1532 tbl
->parms
.reachable_time
);
1533 setup_timer(&tbl
->proxy_timer
, neigh_proxy_process
, (unsigned long)tbl
);
1534 skb_queue_head_init_class(&tbl
->proxy_queue
,
1535 &neigh_table_proxy_queue_class
);
1537 tbl
->last_flush
= now
;
1538 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1540 neigh_tables
[index
] = tbl
;
1542 EXPORT_SYMBOL(neigh_table_init
);
1544 int neigh_table_clear(int index
, struct neigh_table
*tbl
)
1546 neigh_tables
[index
] = NULL
;
1547 /* It is not clean... Fix it to unload IPv6 module safely */
1548 cancel_delayed_work_sync(&tbl
->gc_work
);
1549 del_timer_sync(&tbl
->proxy_timer
);
1550 pneigh_queue_purge(&tbl
->proxy_queue
);
1551 neigh_ifdown(tbl
, NULL
);
1552 if (atomic_read(&tbl
->entries
))
1553 pr_crit("neighbour leakage\n");
1555 call_rcu(&rcu_dereference_protected(tbl
->nht
, 1)->rcu
,
1556 neigh_hash_free_rcu
);
1559 kfree(tbl
->phash_buckets
);
1560 tbl
->phash_buckets
= NULL
;
1562 remove_proc_entry(tbl
->id
, init_net
.proc_net_stat
);
1564 free_percpu(tbl
->stats
);
1569 EXPORT_SYMBOL(neigh_table_clear
);
1571 static struct neigh_table
*neigh_find_table(int family
)
1573 struct neigh_table
*tbl
= NULL
;
1577 tbl
= neigh_tables
[NEIGH_ARP_TABLE
];
1580 tbl
= neigh_tables
[NEIGH_ND_TABLE
];
1583 tbl
= neigh_tables
[NEIGH_DN_TABLE
];
1590 static int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1592 struct net
*net
= sock_net(skb
->sk
);
1594 struct nlattr
*dst_attr
;
1595 struct neigh_table
*tbl
;
1596 struct neighbour
*neigh
;
1597 struct net_device
*dev
= NULL
;
1601 if (nlmsg_len(nlh
) < sizeof(*ndm
))
1604 dst_attr
= nlmsg_find_attr(nlh
, sizeof(*ndm
), NDA_DST
);
1605 if (dst_attr
== NULL
)
1608 ndm
= nlmsg_data(nlh
);
1609 if (ndm
->ndm_ifindex
) {
1610 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1617 tbl
= neigh_find_table(ndm
->ndm_family
);
1619 return -EAFNOSUPPORT
;
1621 if (nla_len(dst_attr
) < tbl
->key_len
)
1624 if (ndm
->ndm_flags
& NTF_PROXY
) {
1625 err
= pneigh_delete(tbl
, net
, nla_data(dst_attr
), dev
);
1632 neigh
= neigh_lookup(tbl
, nla_data(dst_attr
), dev
);
1633 if (neigh
== NULL
) {
1638 err
= neigh_update(neigh
, NULL
, NUD_FAILED
,
1639 NEIGH_UPDATE_F_OVERRIDE
|
1640 NEIGH_UPDATE_F_ADMIN
);
1641 neigh_release(neigh
);
1647 static int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1649 int flags
= NEIGH_UPDATE_F_ADMIN
| NEIGH_UPDATE_F_OVERRIDE
;
1650 struct net
*net
= sock_net(skb
->sk
);
1652 struct nlattr
*tb
[NDA_MAX
+1];
1653 struct neigh_table
*tbl
;
1654 struct net_device
*dev
= NULL
;
1655 struct neighbour
*neigh
;
1660 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
);
1665 if (tb
[NDA_DST
] == NULL
)
1668 ndm
= nlmsg_data(nlh
);
1669 if (ndm
->ndm_ifindex
) {
1670 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1676 if (tb
[NDA_LLADDR
] && nla_len(tb
[NDA_LLADDR
]) < dev
->addr_len
)
1680 tbl
= neigh_find_table(ndm
->ndm_family
);
1682 return -EAFNOSUPPORT
;
1684 if (nla_len(tb
[NDA_DST
]) < tbl
->key_len
)
1686 dst
= nla_data(tb
[NDA_DST
]);
1687 lladdr
= tb
[NDA_LLADDR
] ? nla_data(tb
[NDA_LLADDR
]) : NULL
;
1689 if (ndm
->ndm_flags
& NTF_PROXY
) {
1690 struct pneigh_entry
*pn
;
1693 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 1);
1695 pn
->flags
= ndm
->ndm_flags
;
1704 neigh
= neigh_lookup(tbl
, dst
, dev
);
1705 if (neigh
== NULL
) {
1706 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1711 neigh
= __neigh_lookup_errno(tbl
, dst
, dev
);
1712 if (IS_ERR(neigh
)) {
1713 err
= PTR_ERR(neigh
);
1717 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1719 neigh_release(neigh
);
1723 if (!(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
1724 flags
&= ~NEIGH_UPDATE_F_OVERRIDE
;
1727 if (ndm
->ndm_flags
& NTF_USE
) {
1728 neigh_event_send(neigh
, NULL
);
1731 err
= neigh_update(neigh
, lladdr
, ndm
->ndm_state
, flags
);
1732 neigh_release(neigh
);
1738 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1740 struct nlattr
*nest
;
1742 nest
= nla_nest_start(skb
, NDTA_PARMS
);
1747 nla_put_u32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
)) ||
1748 nla_put_u32(skb
, NDTPA_REFCNT
, atomic_read(&parms
->refcnt
)) ||
1749 nla_put_u32(skb
, NDTPA_QUEUE_LENBYTES
,
1750 NEIGH_VAR(parms
, QUEUE_LEN_BYTES
)) ||
1751 /* approximative value for deprecated QUEUE_LEN (in packets) */
1752 nla_put_u32(skb
, NDTPA_QUEUE_LEN
,
1753 NEIGH_VAR(parms
, QUEUE_LEN_BYTES
) / SKB_TRUESIZE(ETH_FRAME_LEN
)) ||
1754 nla_put_u32(skb
, NDTPA_PROXY_QLEN
, NEIGH_VAR(parms
, PROXY_QLEN
)) ||
1755 nla_put_u32(skb
, NDTPA_APP_PROBES
, NEIGH_VAR(parms
, APP_PROBES
)) ||
1756 nla_put_u32(skb
, NDTPA_UCAST_PROBES
,
1757 NEIGH_VAR(parms
, UCAST_PROBES
)) ||
1758 nla_put_u32(skb
, NDTPA_MCAST_PROBES
,
1759 NEIGH_VAR(parms
, MCAST_PROBES
)) ||
1760 nla_put_u32(skb
, NDTPA_MCAST_REPROBES
,
1761 NEIGH_VAR(parms
, MCAST_REPROBES
)) ||
1762 nla_put_msecs(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
,
1764 nla_put_msecs(skb
, NDTPA_BASE_REACHABLE_TIME
,
1765 NEIGH_VAR(parms
, BASE_REACHABLE_TIME
), NDTPA_PAD
) ||
1766 nla_put_msecs(skb
, NDTPA_GC_STALETIME
,
1767 NEIGH_VAR(parms
, GC_STALETIME
), NDTPA_PAD
) ||
1768 nla_put_msecs(skb
, NDTPA_DELAY_PROBE_TIME
,
1769 NEIGH_VAR(parms
, DELAY_PROBE_TIME
), NDTPA_PAD
) ||
1770 nla_put_msecs(skb
, NDTPA_RETRANS_TIME
,
1771 NEIGH_VAR(parms
, RETRANS_TIME
), NDTPA_PAD
) ||
1772 nla_put_msecs(skb
, NDTPA_ANYCAST_DELAY
,
1773 NEIGH_VAR(parms
, ANYCAST_DELAY
), NDTPA_PAD
) ||
1774 nla_put_msecs(skb
, NDTPA_PROXY_DELAY
,
1775 NEIGH_VAR(parms
, PROXY_DELAY
), NDTPA_PAD
) ||
1776 nla_put_msecs(skb
, NDTPA_LOCKTIME
,
1777 NEIGH_VAR(parms
, LOCKTIME
), NDTPA_PAD
))
1778 goto nla_put_failure
;
1779 return nla_nest_end(skb
, nest
);
1782 nla_nest_cancel(skb
, nest
);
1786 static int neightbl_fill_info(struct sk_buff
*skb
, struct neigh_table
*tbl
,
1787 u32 pid
, u32 seq
, int type
, int flags
)
1789 struct nlmsghdr
*nlh
;
1790 struct ndtmsg
*ndtmsg
;
1792 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1796 ndtmsg
= nlmsg_data(nlh
);
1798 read_lock_bh(&tbl
->lock
);
1799 ndtmsg
->ndtm_family
= tbl
->family
;
1800 ndtmsg
->ndtm_pad1
= 0;
1801 ndtmsg
->ndtm_pad2
= 0;
1803 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) ||
1804 nla_put_msecs(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
, NDTA_PAD
) ||
1805 nla_put_u32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
) ||
1806 nla_put_u32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
) ||
1807 nla_put_u32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
))
1808 goto nla_put_failure
;
1810 unsigned long now
= jiffies
;
1811 unsigned int flush_delta
= now
- tbl
->last_flush
;
1812 unsigned int rand_delta
= now
- tbl
->last_rand
;
1813 struct neigh_hash_table
*nht
;
1814 struct ndt_config ndc
= {
1815 .ndtc_key_len
= tbl
->key_len
,
1816 .ndtc_entry_size
= tbl
->entry_size
,
1817 .ndtc_entries
= atomic_read(&tbl
->entries
),
1818 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
1819 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
1820 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
1824 nht
= rcu_dereference_bh(tbl
->nht
);
1825 ndc
.ndtc_hash_rnd
= nht
->hash_rnd
[0];
1826 ndc
.ndtc_hash_mask
= ((1 << nht
->hash_shift
) - 1);
1827 rcu_read_unlock_bh();
1829 if (nla_put(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
))
1830 goto nla_put_failure
;
1835 struct ndt_stats ndst
;
1837 memset(&ndst
, 0, sizeof(ndst
));
1839 for_each_possible_cpu(cpu
) {
1840 struct neigh_statistics
*st
;
1842 st
= per_cpu_ptr(tbl
->stats
, cpu
);
1843 ndst
.ndts_allocs
+= st
->allocs
;
1844 ndst
.ndts_destroys
+= st
->destroys
;
1845 ndst
.ndts_hash_grows
+= st
->hash_grows
;
1846 ndst
.ndts_res_failed
+= st
->res_failed
;
1847 ndst
.ndts_lookups
+= st
->lookups
;
1848 ndst
.ndts_hits
+= st
->hits
;
1849 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
1850 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
1851 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
1852 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
1853 ndst
.ndts_table_fulls
+= st
->table_fulls
;
1856 if (nla_put_64bit(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
,
1858 goto nla_put_failure
;
1861 BUG_ON(tbl
->parms
.dev
);
1862 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
1863 goto nla_put_failure
;
1865 read_unlock_bh(&tbl
->lock
);
1866 nlmsg_end(skb
, nlh
);
1870 read_unlock_bh(&tbl
->lock
);
1871 nlmsg_cancel(skb
, nlh
);
1875 static int neightbl_fill_param_info(struct sk_buff
*skb
,
1876 struct neigh_table
*tbl
,
1877 struct neigh_parms
*parms
,
1878 u32 pid
, u32 seq
, int type
,
1881 struct ndtmsg
*ndtmsg
;
1882 struct nlmsghdr
*nlh
;
1884 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1888 ndtmsg
= nlmsg_data(nlh
);
1890 read_lock_bh(&tbl
->lock
);
1891 ndtmsg
->ndtm_family
= tbl
->family
;
1892 ndtmsg
->ndtm_pad1
= 0;
1893 ndtmsg
->ndtm_pad2
= 0;
1895 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) < 0 ||
1896 neightbl_fill_parms(skb
, parms
) < 0)
1899 read_unlock_bh(&tbl
->lock
);
1900 nlmsg_end(skb
, nlh
);
1903 read_unlock_bh(&tbl
->lock
);
1904 nlmsg_cancel(skb
, nlh
);
1908 static const struct nla_policy nl_neightbl_policy
[NDTA_MAX
+1] = {
1909 [NDTA_NAME
] = { .type
= NLA_STRING
},
1910 [NDTA_THRESH1
] = { .type
= NLA_U32
},
1911 [NDTA_THRESH2
] = { .type
= NLA_U32
},
1912 [NDTA_THRESH3
] = { .type
= NLA_U32
},
1913 [NDTA_GC_INTERVAL
] = { .type
= NLA_U64
},
1914 [NDTA_PARMS
] = { .type
= NLA_NESTED
},
1917 static const struct nla_policy nl_ntbl_parm_policy
[NDTPA_MAX
+1] = {
1918 [NDTPA_IFINDEX
] = { .type
= NLA_U32
},
1919 [NDTPA_QUEUE_LEN
] = { .type
= NLA_U32
},
1920 [NDTPA_PROXY_QLEN
] = { .type
= NLA_U32
},
1921 [NDTPA_APP_PROBES
] = { .type
= NLA_U32
},
1922 [NDTPA_UCAST_PROBES
] = { .type
= NLA_U32
},
1923 [NDTPA_MCAST_PROBES
] = { .type
= NLA_U32
},
1924 [NDTPA_MCAST_REPROBES
] = { .type
= NLA_U32
},
1925 [NDTPA_BASE_REACHABLE_TIME
] = { .type
= NLA_U64
},
1926 [NDTPA_GC_STALETIME
] = { .type
= NLA_U64
},
1927 [NDTPA_DELAY_PROBE_TIME
] = { .type
= NLA_U64
},
1928 [NDTPA_RETRANS_TIME
] = { .type
= NLA_U64
},
1929 [NDTPA_ANYCAST_DELAY
] = { .type
= NLA_U64
},
1930 [NDTPA_PROXY_DELAY
] = { .type
= NLA_U64
},
1931 [NDTPA_LOCKTIME
] = { .type
= NLA_U64
},
1934 static int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1936 struct net
*net
= sock_net(skb
->sk
);
1937 struct neigh_table
*tbl
;
1938 struct ndtmsg
*ndtmsg
;
1939 struct nlattr
*tb
[NDTA_MAX
+1];
1943 err
= nlmsg_parse(nlh
, sizeof(*ndtmsg
), tb
, NDTA_MAX
,
1944 nl_neightbl_policy
);
1948 if (tb
[NDTA_NAME
] == NULL
) {
1953 ndtmsg
= nlmsg_data(nlh
);
1955 for (tidx
= 0; tidx
< NEIGH_NR_TABLES
; tidx
++) {
1956 tbl
= neigh_tables
[tidx
];
1959 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
1961 if (nla_strcmp(tb
[NDTA_NAME
], tbl
->id
) == 0) {
1971 * We acquire tbl->lock to be nice to the periodic timers and
1972 * make sure they always see a consistent set of values.
1974 write_lock_bh(&tbl
->lock
);
1976 if (tb
[NDTA_PARMS
]) {
1977 struct nlattr
*tbp
[NDTPA_MAX
+1];
1978 struct neigh_parms
*p
;
1981 err
= nla_parse_nested(tbp
, NDTPA_MAX
, tb
[NDTA_PARMS
],
1982 nl_ntbl_parm_policy
);
1984 goto errout_tbl_lock
;
1986 if (tbp
[NDTPA_IFINDEX
])
1987 ifindex
= nla_get_u32(tbp
[NDTPA_IFINDEX
]);
1989 p
= lookup_neigh_parms(tbl
, net
, ifindex
);
1992 goto errout_tbl_lock
;
1995 for (i
= 1; i
<= NDTPA_MAX
; i
++) {
2000 case NDTPA_QUEUE_LEN
:
2001 NEIGH_VAR_SET(p
, QUEUE_LEN_BYTES
,
2002 nla_get_u32(tbp
[i
]) *
2003 SKB_TRUESIZE(ETH_FRAME_LEN
));
2005 case NDTPA_QUEUE_LENBYTES
:
2006 NEIGH_VAR_SET(p
, QUEUE_LEN_BYTES
,
2007 nla_get_u32(tbp
[i
]));
2009 case NDTPA_PROXY_QLEN
:
2010 NEIGH_VAR_SET(p
, PROXY_QLEN
,
2011 nla_get_u32(tbp
[i
]));
2013 case NDTPA_APP_PROBES
:
2014 NEIGH_VAR_SET(p
, APP_PROBES
,
2015 nla_get_u32(tbp
[i
]));
2017 case NDTPA_UCAST_PROBES
:
2018 NEIGH_VAR_SET(p
, UCAST_PROBES
,
2019 nla_get_u32(tbp
[i
]));
2021 case NDTPA_MCAST_PROBES
:
2022 NEIGH_VAR_SET(p
, MCAST_PROBES
,
2023 nla_get_u32(tbp
[i
]));
2025 case NDTPA_MCAST_REPROBES
:
2026 NEIGH_VAR_SET(p
, MCAST_REPROBES
,
2027 nla_get_u32(tbp
[i
]));
2029 case NDTPA_BASE_REACHABLE_TIME
:
2030 NEIGH_VAR_SET(p
, BASE_REACHABLE_TIME
,
2031 nla_get_msecs(tbp
[i
]));
2032 /* update reachable_time as well, otherwise, the change will
2033 * only be effective after the next time neigh_periodic_work
2034 * decides to recompute it (can be multiple minutes)
2037 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
2039 case NDTPA_GC_STALETIME
:
2040 NEIGH_VAR_SET(p
, GC_STALETIME
,
2041 nla_get_msecs(tbp
[i
]));
2043 case NDTPA_DELAY_PROBE_TIME
:
2044 NEIGH_VAR_SET(p
, DELAY_PROBE_TIME
,
2045 nla_get_msecs(tbp
[i
]));
2046 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE
, p
);
2048 case NDTPA_RETRANS_TIME
:
2049 NEIGH_VAR_SET(p
, RETRANS_TIME
,
2050 nla_get_msecs(tbp
[i
]));
2052 case NDTPA_ANYCAST_DELAY
:
2053 NEIGH_VAR_SET(p
, ANYCAST_DELAY
,
2054 nla_get_msecs(tbp
[i
]));
2056 case NDTPA_PROXY_DELAY
:
2057 NEIGH_VAR_SET(p
, PROXY_DELAY
,
2058 nla_get_msecs(tbp
[i
]));
2060 case NDTPA_LOCKTIME
:
2061 NEIGH_VAR_SET(p
, LOCKTIME
,
2062 nla_get_msecs(tbp
[i
]));
2069 if ((tb
[NDTA_THRESH1
] || tb
[NDTA_THRESH2
] ||
2070 tb
[NDTA_THRESH3
] || tb
[NDTA_GC_INTERVAL
]) &&
2071 !net_eq(net
, &init_net
))
2072 goto errout_tbl_lock
;
2074 if (tb
[NDTA_THRESH1
])
2075 tbl
->gc_thresh1
= nla_get_u32(tb
[NDTA_THRESH1
]);
2077 if (tb
[NDTA_THRESH2
])
2078 tbl
->gc_thresh2
= nla_get_u32(tb
[NDTA_THRESH2
]);
2080 if (tb
[NDTA_THRESH3
])
2081 tbl
->gc_thresh3
= nla_get_u32(tb
[NDTA_THRESH3
]);
2083 if (tb
[NDTA_GC_INTERVAL
])
2084 tbl
->gc_interval
= nla_get_msecs(tb
[NDTA_GC_INTERVAL
]);
2089 write_unlock_bh(&tbl
->lock
);
2094 static int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2096 struct net
*net
= sock_net(skb
->sk
);
2097 int family
, tidx
, nidx
= 0;
2098 int tbl_skip
= cb
->args
[0];
2099 int neigh_skip
= cb
->args
[1];
2100 struct neigh_table
*tbl
;
2102 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
2104 for (tidx
= 0; tidx
< NEIGH_NR_TABLES
; tidx
++) {
2105 struct neigh_parms
*p
;
2107 tbl
= neigh_tables
[tidx
];
2111 if (tidx
< tbl_skip
|| (family
&& tbl
->family
!= family
))
2114 if (neightbl_fill_info(skb
, tbl
, NETLINK_CB(cb
->skb
).portid
,
2115 cb
->nlh
->nlmsg_seq
, RTM_NEWNEIGHTBL
,
2120 p
= list_next_entry(&tbl
->parms
, list
);
2121 list_for_each_entry_from(p
, &tbl
->parms_list
, list
) {
2122 if (!net_eq(neigh_parms_net(p
), net
))
2125 if (nidx
< neigh_skip
)
2128 if (neightbl_fill_param_info(skb
, tbl
, p
,
2129 NETLINK_CB(cb
->skb
).portid
,
2147 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*neigh
,
2148 u32 pid
, u32 seq
, int type
, unsigned int flags
)
2150 unsigned long now
= jiffies
;
2151 struct nda_cacheinfo ci
;
2152 struct nlmsghdr
*nlh
;
2155 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2159 ndm
= nlmsg_data(nlh
);
2160 ndm
->ndm_family
= neigh
->ops
->family
;
2163 ndm
->ndm_flags
= neigh
->flags
;
2164 ndm
->ndm_type
= neigh
->type
;
2165 ndm
->ndm_ifindex
= neigh
->dev
->ifindex
;
2167 if (nla_put(skb
, NDA_DST
, neigh
->tbl
->key_len
, neigh
->primary_key
))
2168 goto nla_put_failure
;
2170 read_lock_bh(&neigh
->lock
);
2171 ndm
->ndm_state
= neigh
->nud_state
;
2172 if (neigh
->nud_state
& NUD_VALID
) {
2173 char haddr
[MAX_ADDR_LEN
];
2175 neigh_ha_snapshot(haddr
, neigh
, neigh
->dev
);
2176 if (nla_put(skb
, NDA_LLADDR
, neigh
->dev
->addr_len
, haddr
) < 0) {
2177 read_unlock_bh(&neigh
->lock
);
2178 goto nla_put_failure
;
2182 ci
.ndm_used
= jiffies_to_clock_t(now
- neigh
->used
);
2183 ci
.ndm_confirmed
= jiffies_to_clock_t(now
- neigh
->confirmed
);
2184 ci
.ndm_updated
= jiffies_to_clock_t(now
- neigh
->updated
);
2185 ci
.ndm_refcnt
= atomic_read(&neigh
->refcnt
) - 1;
2186 read_unlock_bh(&neigh
->lock
);
2188 if (nla_put_u32(skb
, NDA_PROBES
, atomic_read(&neigh
->probes
)) ||
2189 nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
2190 goto nla_put_failure
;
2192 nlmsg_end(skb
, nlh
);
2196 nlmsg_cancel(skb
, nlh
);
2200 static int pneigh_fill_info(struct sk_buff
*skb
, struct pneigh_entry
*pn
,
2201 u32 pid
, u32 seq
, int type
, unsigned int flags
,
2202 struct neigh_table
*tbl
)
2204 struct nlmsghdr
*nlh
;
2207 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2211 ndm
= nlmsg_data(nlh
);
2212 ndm
->ndm_family
= tbl
->family
;
2215 ndm
->ndm_flags
= pn
->flags
| NTF_PROXY
;
2216 ndm
->ndm_type
= RTN_UNICAST
;
2217 ndm
->ndm_ifindex
= pn
->dev
? pn
->dev
->ifindex
: 0;
2218 ndm
->ndm_state
= NUD_NONE
;
2220 if (nla_put(skb
, NDA_DST
, tbl
->key_len
, pn
->key
))
2221 goto nla_put_failure
;
2223 nlmsg_end(skb
, nlh
);
2227 nlmsg_cancel(skb
, nlh
);
2231 static void neigh_update_notify(struct neighbour
*neigh
)
2233 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
2234 __neigh_notify(neigh
, RTM_NEWNEIGH
, 0);
2237 static bool neigh_master_filtered(struct net_device
*dev
, int master_idx
)
2239 struct net_device
*master
;
2244 master
= netdev_master_upper_dev_get(dev
);
2245 if (!master
|| master
->ifindex
!= master_idx
)
2251 static bool neigh_ifindex_filtered(struct net_device
*dev
, int filter_idx
)
2253 if (filter_idx
&& dev
->ifindex
!= filter_idx
)
2259 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2260 struct netlink_callback
*cb
)
2262 struct net
*net
= sock_net(skb
->sk
);
2263 const struct nlmsghdr
*nlh
= cb
->nlh
;
2264 struct nlattr
*tb
[NDA_MAX
+ 1];
2265 struct neighbour
*n
;
2266 int rc
, h
, s_h
= cb
->args
[1];
2267 int idx
, s_idx
= idx
= cb
->args
[2];
2268 struct neigh_hash_table
*nht
;
2269 int filter_master_idx
= 0, filter_idx
= 0;
2270 unsigned int flags
= NLM_F_MULTI
;
2273 err
= nlmsg_parse(nlh
, sizeof(struct ndmsg
), tb
, NDA_MAX
, NULL
);
2275 if (tb
[NDA_IFINDEX
])
2276 filter_idx
= nla_get_u32(tb
[NDA_IFINDEX
]);
2279 filter_master_idx
= nla_get_u32(tb
[NDA_MASTER
]);
2281 if (filter_idx
|| filter_master_idx
)
2282 flags
|= NLM_F_DUMP_FILTERED
;
2286 nht
= rcu_dereference_bh(tbl
->nht
);
2288 for (h
= s_h
; h
< (1 << nht
->hash_shift
); h
++) {
2291 for (n
= rcu_dereference_bh(nht
->hash_buckets
[h
]), idx
= 0;
2293 n
= rcu_dereference_bh(n
->next
)) {
2294 if (!net_eq(dev_net(n
->dev
), net
))
2296 if (neigh_ifindex_filtered(n
->dev
, filter_idx
))
2298 if (neigh_master_filtered(n
->dev
, filter_master_idx
))
2302 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).portid
,
2315 rcu_read_unlock_bh();
2321 static int pneigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2322 struct netlink_callback
*cb
)
2324 struct pneigh_entry
*n
;
2325 struct net
*net
= sock_net(skb
->sk
);
2326 int rc
, h
, s_h
= cb
->args
[3];
2327 int idx
, s_idx
= idx
= cb
->args
[4];
2329 read_lock_bh(&tbl
->lock
);
2331 for (h
= s_h
; h
<= PNEIGH_HASHMASK
; h
++) {
2334 for (n
= tbl
->phash_buckets
[h
], idx
= 0; n
; n
= n
->next
) {
2335 if (pneigh_net(n
) != net
)
2339 if (pneigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).portid
,
2342 NLM_F_MULTI
, tbl
) < 0) {
2343 read_unlock_bh(&tbl
->lock
);
2352 read_unlock_bh(&tbl
->lock
);
2361 static int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2363 struct neigh_table
*tbl
;
2368 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
2370 /* check for full ndmsg structure presence, family member is
2371 * the same for both structures
2373 if (nlmsg_len(cb
->nlh
) >= sizeof(struct ndmsg
) &&
2374 ((struct ndmsg
*) nlmsg_data(cb
->nlh
))->ndm_flags
== NTF_PROXY
)
2379 for (t
= 0; t
< NEIGH_NR_TABLES
; t
++) {
2380 tbl
= neigh_tables
[t
];
2384 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
2387 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
2388 sizeof(cb
->args
[0]));
2390 err
= pneigh_dump_table(tbl
, skb
, cb
);
2392 err
= neigh_dump_table(tbl
, skb
, cb
);
2401 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
2404 struct neigh_hash_table
*nht
;
2407 nht
= rcu_dereference_bh(tbl
->nht
);
2409 read_lock(&tbl
->lock
); /* avoid resizes */
2410 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2411 struct neighbour
*n
;
2413 for (n
= rcu_dereference_bh(nht
->hash_buckets
[chain
]);
2415 n
= rcu_dereference_bh(n
->next
))
2418 read_unlock(&tbl
->lock
);
2419 rcu_read_unlock_bh();
2421 EXPORT_SYMBOL(neigh_for_each
);
2423 /* The tbl->lock must be held as a writer and BH disabled. */
2424 void __neigh_for_each_release(struct neigh_table
*tbl
,
2425 int (*cb
)(struct neighbour
*))
2428 struct neigh_hash_table
*nht
;
2430 nht
= rcu_dereference_protected(tbl
->nht
,
2431 lockdep_is_held(&tbl
->lock
));
2432 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2433 struct neighbour
*n
;
2434 struct neighbour __rcu
**np
;
2436 np
= &nht
->hash_buckets
[chain
];
2437 while ((n
= rcu_dereference_protected(*np
,
2438 lockdep_is_held(&tbl
->lock
))) != NULL
) {
2441 write_lock(&n
->lock
);
2444 rcu_assign_pointer(*np
,
2445 rcu_dereference_protected(n
->next
,
2446 lockdep_is_held(&tbl
->lock
)));
2450 write_unlock(&n
->lock
);
2452 neigh_cleanup_and_release(n
);
2456 EXPORT_SYMBOL(__neigh_for_each_release
);
2458 int neigh_xmit(int index
, struct net_device
*dev
,
2459 const void *addr
, struct sk_buff
*skb
)
2461 int err
= -EAFNOSUPPORT
;
2462 if (likely(index
< NEIGH_NR_TABLES
)) {
2463 struct neigh_table
*tbl
;
2464 struct neighbour
*neigh
;
2466 tbl
= neigh_tables
[index
];
2470 neigh
= __neigh_lookup_noref(tbl
, addr
, dev
);
2472 neigh
= __neigh_create(tbl
, addr
, dev
, false);
2473 err
= PTR_ERR(neigh
);
2474 if (IS_ERR(neigh
)) {
2475 rcu_read_unlock_bh();
2478 err
= neigh
->output(neigh
, skb
);
2479 rcu_read_unlock_bh();
2481 else if (index
== NEIGH_LINK_TABLE
) {
2482 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
2483 addr
, NULL
, skb
->len
);
2486 err
= dev_queue_xmit(skb
);
2494 EXPORT_SYMBOL(neigh_xmit
);
2496 #ifdef CONFIG_PROC_FS
2498 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
2500 struct neigh_seq_state
*state
= seq
->private;
2501 struct net
*net
= seq_file_net(seq
);
2502 struct neigh_hash_table
*nht
= state
->nht
;
2503 struct neighbour
*n
= NULL
;
2504 int bucket
= state
->bucket
;
2506 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
2507 for (bucket
= 0; bucket
< (1 << nht
->hash_shift
); bucket
++) {
2508 n
= rcu_dereference_bh(nht
->hash_buckets
[bucket
]);
2511 if (!net_eq(dev_net(n
->dev
), net
))
2513 if (state
->neigh_sub_iter
) {
2517 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
2521 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2523 if (n
->nud_state
& ~NUD_NOARP
)
2526 n
= rcu_dereference_bh(n
->next
);
2532 state
->bucket
= bucket
;
2537 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
2538 struct neighbour
*n
,
2541 struct neigh_seq_state
*state
= seq
->private;
2542 struct net
*net
= seq_file_net(seq
);
2543 struct neigh_hash_table
*nht
= state
->nht
;
2545 if (state
->neigh_sub_iter
) {
2546 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2550 n
= rcu_dereference_bh(n
->next
);
2554 if (!net_eq(dev_net(n
->dev
), net
))
2556 if (state
->neigh_sub_iter
) {
2557 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2562 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2565 if (n
->nud_state
& ~NUD_NOARP
)
2568 n
= rcu_dereference_bh(n
->next
);
2574 if (++state
->bucket
>= (1 << nht
->hash_shift
))
2577 n
= rcu_dereference_bh(nht
->hash_buckets
[state
->bucket
]);
2585 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2587 struct neighbour
*n
= neigh_get_first(seq
);
2592 n
= neigh_get_next(seq
, n
, pos
);
2597 return *pos
? NULL
: n
;
2600 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
2602 struct neigh_seq_state
*state
= seq
->private;
2603 struct net
*net
= seq_file_net(seq
);
2604 struct neigh_table
*tbl
= state
->tbl
;
2605 struct pneigh_entry
*pn
= NULL
;
2606 int bucket
= state
->bucket
;
2608 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
2609 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
2610 pn
= tbl
->phash_buckets
[bucket
];
2611 while (pn
&& !net_eq(pneigh_net(pn
), net
))
2616 state
->bucket
= bucket
;
2621 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
2622 struct pneigh_entry
*pn
,
2625 struct neigh_seq_state
*state
= seq
->private;
2626 struct net
*net
= seq_file_net(seq
);
2627 struct neigh_table
*tbl
= state
->tbl
;
2631 } while (pn
&& !net_eq(pneigh_net(pn
), net
));
2634 if (++state
->bucket
> PNEIGH_HASHMASK
)
2636 pn
= tbl
->phash_buckets
[state
->bucket
];
2637 while (pn
&& !net_eq(pneigh_net(pn
), net
))
2649 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2651 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
2656 pn
= pneigh_get_next(seq
, pn
, pos
);
2661 return *pos
? NULL
: pn
;
2664 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
2666 struct neigh_seq_state
*state
= seq
->private;
2668 loff_t idxpos
= *pos
;
2670 rc
= neigh_get_idx(seq
, &idxpos
);
2671 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2672 rc
= pneigh_get_idx(seq
, &idxpos
);
2677 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
2680 struct neigh_seq_state
*state
= seq
->private;
2684 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
2687 state
->nht
= rcu_dereference_bh(tbl
->nht
);
2689 return *pos
? neigh_get_idx_any(seq
, pos
) : SEQ_START_TOKEN
;
2691 EXPORT_SYMBOL(neigh_seq_start
);
2693 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2695 struct neigh_seq_state
*state
;
2698 if (v
== SEQ_START_TOKEN
) {
2699 rc
= neigh_get_first(seq
);
2703 state
= seq
->private;
2704 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
2705 rc
= neigh_get_next(seq
, v
, NULL
);
2708 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2709 rc
= pneigh_get_first(seq
);
2711 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
2712 rc
= pneigh_get_next(seq
, v
, NULL
);
2718 EXPORT_SYMBOL(neigh_seq_next
);
2720 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
2723 rcu_read_unlock_bh();
2725 EXPORT_SYMBOL(neigh_seq_stop
);
2727 /* statistics via seq_file */
2729 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2731 struct neigh_table
*tbl
= seq
->private;
2735 return SEQ_START_TOKEN
;
2737 for (cpu
= *pos
-1; cpu
< nr_cpu_ids
; ++cpu
) {
2738 if (!cpu_possible(cpu
))
2741 return per_cpu_ptr(tbl
->stats
, cpu
);
2746 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2748 struct neigh_table
*tbl
= seq
->private;
2751 for (cpu
= *pos
; cpu
< nr_cpu_ids
; ++cpu
) {
2752 if (!cpu_possible(cpu
))
2755 return per_cpu_ptr(tbl
->stats
, cpu
);
2760 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
2765 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
2767 struct neigh_table
*tbl
= seq
->private;
2768 struct neigh_statistics
*st
= v
;
2770 if (v
== SEQ_START_TOKEN
) {
2771 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2775 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2776 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
2777 atomic_read(&tbl
->entries
),
2788 st
->rcv_probes_mcast
,
2789 st
->rcv_probes_ucast
,
2791 st
->periodic_gc_runs
,
2800 static const struct seq_operations neigh_stat_seq_ops
= {
2801 .start
= neigh_stat_seq_start
,
2802 .next
= neigh_stat_seq_next
,
2803 .stop
= neigh_stat_seq_stop
,
2804 .show
= neigh_stat_seq_show
,
2807 static int neigh_stat_seq_open(struct inode
*inode
, struct file
*file
)
2809 int ret
= seq_open(file
, &neigh_stat_seq_ops
);
2812 struct seq_file
*sf
= file
->private_data
;
2813 sf
->private = PDE_DATA(inode
);
2818 static const struct file_operations neigh_stat_seq_fops
= {
2819 .owner
= THIS_MODULE
,
2820 .open
= neigh_stat_seq_open
,
2822 .llseek
= seq_lseek
,
2823 .release
= seq_release
,
2826 #endif /* CONFIG_PROC_FS */
2828 static inline size_t neigh_nlmsg_size(void)
2830 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2831 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2832 + nla_total_size(MAX_ADDR_LEN
) /* NDA_LLADDR */
2833 + nla_total_size(sizeof(struct nda_cacheinfo
))
2834 + nla_total_size(4); /* NDA_PROBES */
2837 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
)
2839 struct net
*net
= dev_net(n
->dev
);
2840 struct sk_buff
*skb
;
2843 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC
);
2847 err
= neigh_fill_info(skb
, n
, 0, 0, type
, flags
);
2849 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2850 WARN_ON(err
== -EMSGSIZE
);
2854 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
2858 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
2861 void neigh_app_ns(struct neighbour
*n
)
2863 __neigh_notify(n
, RTM_GETNEIGH
, NLM_F_REQUEST
);
2865 EXPORT_SYMBOL(neigh_app_ns
);
2867 #ifdef CONFIG_SYSCTL
2869 static int int_max
= INT_MAX
;
2870 static int unres_qlen_max
= INT_MAX
/ SKB_TRUESIZE(ETH_FRAME_LEN
);
2872 static int proc_unres_qlen(struct ctl_table
*ctl
, int write
,
2873 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
2876 struct ctl_table tmp
= *ctl
;
2879 tmp
.extra2
= &unres_qlen_max
;
2882 size
= *(int *)ctl
->data
/ SKB_TRUESIZE(ETH_FRAME_LEN
);
2883 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
2886 *(int *)ctl
->data
= size
* SKB_TRUESIZE(ETH_FRAME_LEN
);
2890 static struct neigh_parms
*neigh_get_dev_parms_rcu(struct net_device
*dev
,
2895 return __in_dev_arp_parms_get_rcu(dev
);
2897 return __in6_dev_nd_parms_get_rcu(dev
);
2902 static void neigh_copy_dflt_parms(struct net
*net
, struct neigh_parms
*p
,
2905 struct net_device
*dev
;
2906 int family
= neigh_parms_family(p
);
2909 for_each_netdev_rcu(net
, dev
) {
2910 struct neigh_parms
*dst_p
=
2911 neigh_get_dev_parms_rcu(dev
, family
);
2913 if (dst_p
&& !test_bit(index
, dst_p
->data_state
))
2914 dst_p
->data
[index
] = p
->data
[index
];
2919 static void neigh_proc_update(struct ctl_table
*ctl
, int write
)
2921 struct net_device
*dev
= ctl
->extra1
;
2922 struct neigh_parms
*p
= ctl
->extra2
;
2923 struct net
*net
= neigh_parms_net(p
);
2924 int index
= (int *) ctl
->data
- p
->data
;
2929 set_bit(index
, p
->data_state
);
2930 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE
, p
);
2931 if (!dev
) /* NULL dev means this is default value */
2932 neigh_copy_dflt_parms(net
, p
, index
);
2935 static int neigh_proc_dointvec_zero_intmax(struct ctl_table
*ctl
, int write
,
2936 void __user
*buffer
,
2937 size_t *lenp
, loff_t
*ppos
)
2939 struct ctl_table tmp
= *ctl
;
2943 tmp
.extra2
= &int_max
;
2945 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
2946 neigh_proc_update(ctl
, write
);
2950 int neigh_proc_dointvec(struct ctl_table
*ctl
, int write
,
2951 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
2953 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2955 neigh_proc_update(ctl
, write
);
2958 EXPORT_SYMBOL(neigh_proc_dointvec
);
2960 int neigh_proc_dointvec_jiffies(struct ctl_table
*ctl
, int write
,
2961 void __user
*buffer
,
2962 size_t *lenp
, loff_t
*ppos
)
2964 int ret
= proc_dointvec_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
2966 neigh_proc_update(ctl
, write
);
2969 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies
);
2971 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table
*ctl
, int write
,
2972 void __user
*buffer
,
2973 size_t *lenp
, loff_t
*ppos
)
2975 int ret
= proc_dointvec_userhz_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
2977 neigh_proc_update(ctl
, write
);
2981 int neigh_proc_dointvec_ms_jiffies(struct ctl_table
*ctl
, int write
,
2982 void __user
*buffer
,
2983 size_t *lenp
, loff_t
*ppos
)
2985 int ret
= proc_dointvec_ms_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
2987 neigh_proc_update(ctl
, write
);
2990 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies
);
2992 static int neigh_proc_dointvec_unres_qlen(struct ctl_table
*ctl
, int write
,
2993 void __user
*buffer
,
2994 size_t *lenp
, loff_t
*ppos
)
2996 int ret
= proc_unres_qlen(ctl
, write
, buffer
, lenp
, ppos
);
2998 neigh_proc_update(ctl
, write
);
3002 static int neigh_proc_base_reachable_time(struct ctl_table
*ctl
, int write
,
3003 void __user
*buffer
,
3004 size_t *lenp
, loff_t
*ppos
)
3006 struct neigh_parms
*p
= ctl
->extra2
;
3009 if (strcmp(ctl
->procname
, "base_reachable_time") == 0)
3010 ret
= neigh_proc_dointvec_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3011 else if (strcmp(ctl
->procname
, "base_reachable_time_ms") == 0)
3012 ret
= neigh_proc_dointvec_ms_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3016 if (write
&& ret
== 0) {
3017 /* update reachable_time as well, otherwise, the change will
3018 * only be effective after the next time neigh_periodic_work
3019 * decides to recompute it
3022 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
3027 #define NEIGH_PARMS_DATA_OFFSET(index) \
3028 (&((struct neigh_parms *) 0)->data[index])
3030 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3031 [NEIGH_VAR_ ## attr] = { \
3033 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3034 .maxlen = sizeof(int), \
3036 .proc_handler = proc, \
3039 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3040 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3042 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3043 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3045 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3046 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3048 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3049 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3051 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3052 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3054 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3055 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3057 static struct neigh_sysctl_table
{
3058 struct ctl_table_header
*sysctl_header
;
3059 struct ctl_table neigh_vars
[NEIGH_VAR_MAX
+ 1];
3060 } neigh_sysctl_template __read_mostly
= {
3062 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES
, "mcast_solicit"),
3063 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES
, "ucast_solicit"),
3064 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES
, "app_solicit"),
3065 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES
, "mcast_resolicit"),
3066 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME
, "retrans_time"),
3067 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME
, "base_reachable_time"),
3068 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME
, "delay_first_probe_time"),
3069 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME
, "gc_stale_time"),
3070 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES
, "unres_qlen_bytes"),
3071 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN
, "proxy_qlen"),
3072 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY
, "anycast_delay"),
3073 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY
, "proxy_delay"),
3074 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME
, "locktime"),
3075 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN
, QUEUE_LEN_BYTES
, "unres_qlen"),
3076 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS
, RETRANS_TIME
, "retrans_time_ms"),
3077 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS
, BASE_REACHABLE_TIME
, "base_reachable_time_ms"),
3078 [NEIGH_VAR_GC_INTERVAL
] = {
3079 .procname
= "gc_interval",
3080 .maxlen
= sizeof(int),
3082 .proc_handler
= proc_dointvec_jiffies
,
3084 [NEIGH_VAR_GC_THRESH1
] = {
3085 .procname
= "gc_thresh1",
3086 .maxlen
= sizeof(int),
3090 .proc_handler
= proc_dointvec_minmax
,
3092 [NEIGH_VAR_GC_THRESH2
] = {
3093 .procname
= "gc_thresh2",
3094 .maxlen
= sizeof(int),
3098 .proc_handler
= proc_dointvec_minmax
,
3100 [NEIGH_VAR_GC_THRESH3
] = {
3101 .procname
= "gc_thresh3",
3102 .maxlen
= sizeof(int),
3106 .proc_handler
= proc_dointvec_minmax
,
3112 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
3113 proc_handler
*handler
)
3116 struct neigh_sysctl_table
*t
;
3117 const char *dev_name_source
;
3118 char neigh_path
[ sizeof("net//neigh/") + IFNAMSIZ
+ IFNAMSIZ
];
3121 t
= kmemdup(&neigh_sysctl_template
, sizeof(*t
), GFP_KERNEL
);
3125 for (i
= 0; i
< NEIGH_VAR_GC_INTERVAL
; i
++) {
3126 t
->neigh_vars
[i
].data
+= (long) p
;
3127 t
->neigh_vars
[i
].extra1
= dev
;
3128 t
->neigh_vars
[i
].extra2
= p
;
3132 dev_name_source
= dev
->name
;
3133 /* Terminate the table early */
3134 memset(&t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
], 0,
3135 sizeof(t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
]));
3137 struct neigh_table
*tbl
= p
->tbl
;
3138 dev_name_source
= "default";
3139 t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
].data
= &tbl
->gc_interval
;
3140 t
->neigh_vars
[NEIGH_VAR_GC_THRESH1
].data
= &tbl
->gc_thresh1
;
3141 t
->neigh_vars
[NEIGH_VAR_GC_THRESH2
].data
= &tbl
->gc_thresh2
;
3142 t
->neigh_vars
[NEIGH_VAR_GC_THRESH3
].data
= &tbl
->gc_thresh3
;
3147 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME
].proc_handler
= handler
;
3149 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
= handler
;
3150 /* RetransTime (in milliseconds)*/
3151 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME_MS
].proc_handler
= handler
;
3152 /* ReachableTime (in milliseconds) */
3153 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
= handler
;
3155 /* Those handlers will update p->reachable_time after
3156 * base_reachable_time(_ms) is set to ensure the new timer starts being
3157 * applied after the next neighbour update instead of waiting for
3158 * neigh_periodic_work to update its value (can be multiple minutes)
3159 * So any handler that replaces them should do this as well
3162 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
=
3163 neigh_proc_base_reachable_time
;
3164 /* ReachableTime (in milliseconds) */
3165 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
=
3166 neigh_proc_base_reachable_time
;
3169 /* Don't export sysctls to unprivileged users */
3170 if (neigh_parms_net(p
)->user_ns
!= &init_user_ns
)
3171 t
->neigh_vars
[0].procname
= NULL
;
3173 switch (neigh_parms_family(p
)) {
3184 snprintf(neigh_path
, sizeof(neigh_path
), "net/%s/neigh/%s",
3185 p_name
, dev_name_source
);
3187 register_net_sysctl(neigh_parms_net(p
), neigh_path
, t
->neigh_vars
);
3188 if (!t
->sysctl_header
)
3191 p
->sysctl_table
= t
;
3199 EXPORT_SYMBOL(neigh_sysctl_register
);
3201 void neigh_sysctl_unregister(struct neigh_parms
*p
)
3203 if (p
->sysctl_table
) {
3204 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
3205 p
->sysctl_table
= NULL
;
3206 unregister_net_sysctl_table(t
->sysctl_header
);
3210 EXPORT_SYMBOL(neigh_sysctl_unregister
);
3212 #endif /* CONFIG_SYSCTL */
3214 static int __init
neigh_init(void)
3216 rtnl_register(PF_UNSPEC
, RTM_NEWNEIGH
, neigh_add
, NULL
, NULL
);
3217 rtnl_register(PF_UNSPEC
, RTM_DELNEIGH
, neigh_delete
, NULL
, NULL
);
3218 rtnl_register(PF_UNSPEC
, RTM_GETNEIGH
, NULL
, neigh_dump_info
, NULL
);
3220 rtnl_register(PF_UNSPEC
, RTM_GETNEIGHTBL
, NULL
, neightbl_dump_info
,
3222 rtnl_register(PF_UNSPEC
, RTM_SETNEIGHTBL
, neightbl_set
, NULL
, NULL
);
3227 subsys_initcall(neigh_init
);