1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Generic address resolution entity
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
25 #include <linux/sysctl.h>
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
42 #include <trace/events/neigh.h>
46 #define neigh_dbg(level, fmt, ...) \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
52 #define PNEIGH_HASHMASK 0xF
54 static void neigh_timer_handler(struct timer_list
*t
);
55 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
,
57 static void neigh_update_notify(struct neighbour
*neigh
, u32 nlmsg_pid
);
58 static int pneigh_ifdown_and_unlock(struct neigh_table
*tbl
,
59 struct net_device
*dev
);
62 static const struct seq_operations neigh_stat_seq_ops
;
66 Neighbour hash table buckets are protected with rwlock tbl->lock.
68 - All the scans/updates to hash buckets MUST be made under this lock.
69 - NOTHING clever should be made under this lock: no callbacks
70 to protocol backends, no attempts to send something to network.
71 It will result in deadlocks, if backend/driver wants to use neighbour
73 - If the entry requires some non-trivial actions, increase
74 its reference count and release table lock.
76 Neighbour entries are protected:
77 - with reference count.
78 - with rwlock neigh->lock
80 Reference count prevents destruction.
82 neigh->lock mainly serializes ll address data and its validity state.
83 However, the same lock is used to protect another entry fields:
87 Again, nothing clever shall be made under neigh->lock,
88 the most complicated procedure, which we allow is dev->hard_header.
89 It is supposed, that dev->hard_header is simplistic and does
90 not make callbacks to neighbour tables.
93 static int neigh_blackhole(struct neighbour
*neigh
, struct sk_buff
*skb
)
99 static void neigh_cleanup_and_release(struct neighbour
*neigh
)
101 trace_neigh_cleanup_and_release(neigh
, 0);
102 __neigh_notify(neigh
, RTM_DELNEIGH
, 0, 0);
103 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
104 neigh_release(neigh
);
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
113 unsigned long neigh_rand_reach_time(unsigned long base
)
115 return base
? (prandom_u32() % base
) + (base
>> 1) : 0;
117 EXPORT_SYMBOL(neigh_rand_reach_time
);
119 static void neigh_mark_dead(struct neighbour
*n
)
122 if (!list_empty(&n
->gc_list
)) {
123 list_del_init(&n
->gc_list
);
124 atomic_dec(&n
->tbl
->gc_entries
);
128 static void neigh_update_gc_list(struct neighbour
*n
)
130 bool on_gc_list
, exempt_from_gc
;
132 write_lock_bh(&n
->tbl
->lock
);
133 write_lock(&n
->lock
);
135 /* remove from the gc list if new state is permanent or if neighbor
136 * is externally learned; otherwise entry should be on the gc list
138 exempt_from_gc
= n
->nud_state
& NUD_PERMANENT
||
139 n
->flags
& NTF_EXT_LEARNED
;
140 on_gc_list
= !list_empty(&n
->gc_list
);
142 if (exempt_from_gc
&& on_gc_list
) {
143 list_del_init(&n
->gc_list
);
144 atomic_dec(&n
->tbl
->gc_entries
);
145 } else if (!exempt_from_gc
&& !on_gc_list
) {
146 /* add entries to the tail; cleaning removes from the front */
147 list_add_tail(&n
->gc_list
, &n
->tbl
->gc_list
);
148 atomic_inc(&n
->tbl
->gc_entries
);
151 write_unlock(&n
->lock
);
152 write_unlock_bh(&n
->tbl
->lock
);
155 static bool neigh_update_ext_learned(struct neighbour
*neigh
, u32 flags
,
161 if (!(flags
& NEIGH_UPDATE_F_ADMIN
))
164 ndm_flags
= (flags
& NEIGH_UPDATE_F_EXT_LEARNED
) ? NTF_EXT_LEARNED
: 0;
165 if ((neigh
->flags
^ ndm_flags
) & NTF_EXT_LEARNED
) {
166 if (ndm_flags
& NTF_EXT_LEARNED
)
167 neigh
->flags
|= NTF_EXT_LEARNED
;
169 neigh
->flags
&= ~NTF_EXT_LEARNED
;
177 static bool neigh_del(struct neighbour
*n
, struct neighbour __rcu
**np
,
178 struct neigh_table
*tbl
)
182 write_lock(&n
->lock
);
183 if (refcount_read(&n
->refcnt
) == 1) {
184 struct neighbour
*neigh
;
186 neigh
= rcu_dereference_protected(n
->next
,
187 lockdep_is_held(&tbl
->lock
));
188 rcu_assign_pointer(*np
, neigh
);
192 write_unlock(&n
->lock
);
194 neigh_cleanup_and_release(n
);
198 bool neigh_remove_one(struct neighbour
*ndel
, struct neigh_table
*tbl
)
200 struct neigh_hash_table
*nht
;
201 void *pkey
= ndel
->primary_key
;
204 struct neighbour __rcu
**np
;
206 nht
= rcu_dereference_protected(tbl
->nht
,
207 lockdep_is_held(&tbl
->lock
));
208 hash_val
= tbl
->hash(pkey
, ndel
->dev
, nht
->hash_rnd
);
209 hash_val
= hash_val
>> (32 - nht
->hash_shift
);
211 np
= &nht
->hash_buckets
[hash_val
];
212 while ((n
= rcu_dereference_protected(*np
,
213 lockdep_is_held(&tbl
->lock
)))) {
215 return neigh_del(n
, np
, tbl
);
221 static int neigh_forced_gc(struct neigh_table
*tbl
)
223 int max_clean
= atomic_read(&tbl
->gc_entries
) - tbl
->gc_thresh2
;
224 unsigned long tref
= jiffies
- 5 * HZ
;
225 struct neighbour
*n
, *tmp
;
228 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
230 write_lock_bh(&tbl
->lock
);
232 list_for_each_entry_safe(n
, tmp
, &tbl
->gc_list
, gc_list
) {
233 if (refcount_read(&n
->refcnt
) == 1) {
236 write_lock(&n
->lock
);
237 if ((n
->nud_state
== NUD_FAILED
) ||
238 (tbl
->is_multicast
&&
239 tbl
->is_multicast(n
->primary_key
)) ||
240 time_after(tref
, n
->updated
))
242 write_unlock(&n
->lock
);
244 if (remove
&& neigh_remove_one(n
, tbl
))
246 if (shrunk
>= max_clean
)
251 tbl
->last_flush
= jiffies
;
253 write_unlock_bh(&tbl
->lock
);
258 static void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
261 if (unlikely(mod_timer(&n
->timer
, when
))) {
262 printk("NEIGH: BUG, double timer add, state is %x\n",
268 static int neigh_del_timer(struct neighbour
*n
)
270 if ((n
->nud_state
& NUD_IN_TIMER
) &&
271 del_timer(&n
->timer
)) {
278 static void pneigh_queue_purge(struct sk_buff_head
*list
)
282 while ((skb
= skb_dequeue(list
)) != NULL
) {
288 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
,
292 struct neigh_hash_table
*nht
;
294 nht
= rcu_dereference_protected(tbl
->nht
,
295 lockdep_is_held(&tbl
->lock
));
297 for (i
= 0; i
< (1 << nht
->hash_shift
); i
++) {
299 struct neighbour __rcu
**np
= &nht
->hash_buckets
[i
];
301 while ((n
= rcu_dereference_protected(*np
,
302 lockdep_is_held(&tbl
->lock
))) != NULL
) {
303 if (dev
&& n
->dev
!= dev
) {
307 if (skip_perm
&& n
->nud_state
& NUD_PERMANENT
) {
311 rcu_assign_pointer(*np
,
312 rcu_dereference_protected(n
->next
,
313 lockdep_is_held(&tbl
->lock
)));
314 write_lock(&n
->lock
);
317 if (refcount_read(&n
->refcnt
) != 1) {
318 /* The most unpleasant situation.
319 We must destroy neighbour entry,
320 but someone still uses it.
322 The destroy will be delayed until
323 the last user releases us, but
324 we must kill timers etc. and move
327 __skb_queue_purge(&n
->arp_queue
);
328 n
->arp_queue_len_bytes
= 0;
329 n
->output
= neigh_blackhole
;
330 if (n
->nud_state
& NUD_VALID
)
331 n
->nud_state
= NUD_NOARP
;
333 n
->nud_state
= NUD_NONE
;
334 neigh_dbg(2, "neigh %p is stray\n", n
);
336 write_unlock(&n
->lock
);
337 neigh_cleanup_and_release(n
);
342 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
344 write_lock_bh(&tbl
->lock
);
345 neigh_flush_dev(tbl
, dev
, false);
346 write_unlock_bh(&tbl
->lock
);
348 EXPORT_SYMBOL(neigh_changeaddr
);
350 static int __neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
,
353 write_lock_bh(&tbl
->lock
);
354 neigh_flush_dev(tbl
, dev
, skip_perm
);
355 pneigh_ifdown_and_unlock(tbl
, dev
);
357 del_timer_sync(&tbl
->proxy_timer
);
358 pneigh_queue_purge(&tbl
->proxy_queue
);
362 int neigh_carrier_down(struct neigh_table
*tbl
, struct net_device
*dev
)
364 __neigh_ifdown(tbl
, dev
, true);
367 EXPORT_SYMBOL(neigh_carrier_down
);
369 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
371 __neigh_ifdown(tbl
, dev
, false);
374 EXPORT_SYMBOL(neigh_ifdown
);
376 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
,
377 struct net_device
*dev
,
380 struct neighbour
*n
= NULL
;
381 unsigned long now
= jiffies
;
387 entries
= atomic_inc_return(&tbl
->gc_entries
) - 1;
388 if (entries
>= tbl
->gc_thresh3
||
389 (entries
>= tbl
->gc_thresh2
&&
390 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
391 if (!neigh_forced_gc(tbl
) &&
392 entries
>= tbl
->gc_thresh3
) {
393 net_info_ratelimited("%s: neighbor table overflow!\n",
395 NEIGH_CACHE_STAT_INC(tbl
, table_fulls
);
401 n
= kzalloc(tbl
->entry_size
+ dev
->neigh_priv_len
, GFP_ATOMIC
);
405 __skb_queue_head_init(&n
->arp_queue
);
406 rwlock_init(&n
->lock
);
407 seqlock_init(&n
->ha_lock
);
408 n
->updated
= n
->used
= now
;
409 n
->nud_state
= NUD_NONE
;
410 n
->output
= neigh_blackhole
;
411 seqlock_init(&n
->hh
.hh_lock
);
412 n
->parms
= neigh_parms_clone(&tbl
->parms
);
413 timer_setup(&n
->timer
, neigh_timer_handler
, 0);
415 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
417 refcount_set(&n
->refcnt
, 1);
419 INIT_LIST_HEAD(&n
->gc_list
);
421 atomic_inc(&tbl
->entries
);
427 atomic_dec(&tbl
->gc_entries
);
431 static void neigh_get_hash_rnd(u32
*x
)
433 *x
= get_random_u32() | 1;
436 static struct neigh_hash_table
*neigh_hash_alloc(unsigned int shift
)
438 size_t size
= (1 << shift
) * sizeof(struct neighbour
*);
439 struct neigh_hash_table
*ret
;
440 struct neighbour __rcu
**buckets
;
443 ret
= kmalloc(sizeof(*ret
), GFP_ATOMIC
);
446 if (size
<= PAGE_SIZE
) {
447 buckets
= kzalloc(size
, GFP_ATOMIC
);
449 buckets
= (struct neighbour __rcu
**)
450 __get_free_pages(GFP_ATOMIC
| __GFP_ZERO
,
452 kmemleak_alloc(buckets
, size
, 1, GFP_ATOMIC
);
458 ret
->hash_buckets
= buckets
;
459 ret
->hash_shift
= shift
;
460 for (i
= 0; i
< NEIGH_NUM_HASH_RND
; i
++)
461 neigh_get_hash_rnd(&ret
->hash_rnd
[i
]);
465 static void neigh_hash_free_rcu(struct rcu_head
*head
)
467 struct neigh_hash_table
*nht
= container_of(head
,
468 struct neigh_hash_table
,
470 size_t size
= (1 << nht
->hash_shift
) * sizeof(struct neighbour
*);
471 struct neighbour __rcu
**buckets
= nht
->hash_buckets
;
473 if (size
<= PAGE_SIZE
) {
476 kmemleak_free(buckets
);
477 free_pages((unsigned long)buckets
, get_order(size
));
482 static struct neigh_hash_table
*neigh_hash_grow(struct neigh_table
*tbl
,
483 unsigned long new_shift
)
485 unsigned int i
, hash
;
486 struct neigh_hash_table
*new_nht
, *old_nht
;
488 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
490 old_nht
= rcu_dereference_protected(tbl
->nht
,
491 lockdep_is_held(&tbl
->lock
));
492 new_nht
= neigh_hash_alloc(new_shift
);
496 for (i
= 0; i
< (1 << old_nht
->hash_shift
); i
++) {
497 struct neighbour
*n
, *next
;
499 for (n
= rcu_dereference_protected(old_nht
->hash_buckets
[i
],
500 lockdep_is_held(&tbl
->lock
));
503 hash
= tbl
->hash(n
->primary_key
, n
->dev
,
506 hash
>>= (32 - new_nht
->hash_shift
);
507 next
= rcu_dereference_protected(n
->next
,
508 lockdep_is_held(&tbl
->lock
));
510 rcu_assign_pointer(n
->next
,
511 rcu_dereference_protected(
512 new_nht
->hash_buckets
[hash
],
513 lockdep_is_held(&tbl
->lock
)));
514 rcu_assign_pointer(new_nht
->hash_buckets
[hash
], n
);
518 rcu_assign_pointer(tbl
->nht
, new_nht
);
519 call_rcu(&old_nht
->rcu
, neigh_hash_free_rcu
);
523 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
524 struct net_device
*dev
)
528 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
531 n
= __neigh_lookup_noref(tbl
, pkey
, dev
);
533 if (!refcount_inc_not_zero(&n
->refcnt
))
535 NEIGH_CACHE_STAT_INC(tbl
, hits
);
538 rcu_read_unlock_bh();
541 EXPORT_SYMBOL(neigh_lookup
);
543 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, struct net
*net
,
547 unsigned int key_len
= tbl
->key_len
;
549 struct neigh_hash_table
*nht
;
551 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
554 nht
= rcu_dereference_bh(tbl
->nht
);
555 hash_val
= tbl
->hash(pkey
, NULL
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
557 for (n
= rcu_dereference_bh(nht
->hash_buckets
[hash_val
]);
559 n
= rcu_dereference_bh(n
->next
)) {
560 if (!memcmp(n
->primary_key
, pkey
, key_len
) &&
561 net_eq(dev_net(n
->dev
), net
)) {
562 if (!refcount_inc_not_zero(&n
->refcnt
))
564 NEIGH_CACHE_STAT_INC(tbl
, hits
);
569 rcu_read_unlock_bh();
572 EXPORT_SYMBOL(neigh_lookup_nodev
);
574 static struct neighbour
*___neigh_create(struct neigh_table
*tbl
,
576 struct net_device
*dev
,
577 bool exempt_from_gc
, bool want_ref
)
579 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
, dev
, exempt_from_gc
);
581 unsigned int key_len
= tbl
->key_len
;
583 struct neigh_hash_table
*nht
;
585 trace_neigh_create(tbl
, dev
, pkey
, n
, exempt_from_gc
);
588 rc
= ERR_PTR(-ENOBUFS
);
592 memcpy(n
->primary_key
, pkey
, key_len
);
596 /* Protocol specific setup. */
597 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
599 goto out_neigh_release
;
602 if (dev
->netdev_ops
->ndo_neigh_construct
) {
603 error
= dev
->netdev_ops
->ndo_neigh_construct(dev
, n
);
606 goto out_neigh_release
;
610 /* Device specific setup. */
611 if (n
->parms
->neigh_setup
&&
612 (error
= n
->parms
->neigh_setup(n
)) < 0) {
614 goto out_neigh_release
;
617 n
->confirmed
= jiffies
- (NEIGH_VAR(n
->parms
, BASE_REACHABLE_TIME
) << 1);
619 write_lock_bh(&tbl
->lock
);
620 nht
= rcu_dereference_protected(tbl
->nht
,
621 lockdep_is_held(&tbl
->lock
));
623 if (atomic_read(&tbl
->entries
) > (1 << nht
->hash_shift
))
624 nht
= neigh_hash_grow(tbl
, nht
->hash_shift
+ 1);
626 hash_val
= tbl
->hash(n
->primary_key
, dev
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
628 if (n
->parms
->dead
) {
629 rc
= ERR_PTR(-EINVAL
);
633 for (n1
= rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
634 lockdep_is_held(&tbl
->lock
));
636 n1
= rcu_dereference_protected(n1
->next
,
637 lockdep_is_held(&tbl
->lock
))) {
638 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, n
->primary_key
, key_len
)) {
648 list_add_tail(&n
->gc_list
, &n
->tbl
->gc_list
);
652 rcu_assign_pointer(n
->next
,
653 rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
654 lockdep_is_held(&tbl
->lock
)));
655 rcu_assign_pointer(nht
->hash_buckets
[hash_val
], n
);
656 write_unlock_bh(&tbl
->lock
);
657 neigh_dbg(2, "neigh %p is created\n", n
);
662 write_unlock_bh(&tbl
->lock
);
665 atomic_dec(&tbl
->gc_entries
);
670 struct neighbour
*__neigh_create(struct neigh_table
*tbl
, const void *pkey
,
671 struct net_device
*dev
, bool want_ref
)
673 return ___neigh_create(tbl
, pkey
, dev
, false, want_ref
);
675 EXPORT_SYMBOL(__neigh_create
);
677 static u32
pneigh_hash(const void *pkey
, unsigned int key_len
)
679 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
680 hash_val
^= (hash_val
>> 16);
681 hash_val
^= hash_val
>> 8;
682 hash_val
^= hash_val
>> 4;
683 hash_val
&= PNEIGH_HASHMASK
;
687 static struct pneigh_entry
*__pneigh_lookup_1(struct pneigh_entry
*n
,
690 unsigned int key_len
,
691 struct net_device
*dev
)
694 if (!memcmp(n
->key
, pkey
, key_len
) &&
695 net_eq(pneigh_net(n
), net
) &&
696 (n
->dev
== dev
|| !n
->dev
))
703 struct pneigh_entry
*__pneigh_lookup(struct neigh_table
*tbl
,
704 struct net
*net
, const void *pkey
, struct net_device
*dev
)
706 unsigned int key_len
= tbl
->key_len
;
707 u32 hash_val
= pneigh_hash(pkey
, key_len
);
709 return __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
710 net
, pkey
, key_len
, dev
);
712 EXPORT_SYMBOL_GPL(__pneigh_lookup
);
714 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
,
715 struct net
*net
, const void *pkey
,
716 struct net_device
*dev
, int creat
)
718 struct pneigh_entry
*n
;
719 unsigned int key_len
= tbl
->key_len
;
720 u32 hash_val
= pneigh_hash(pkey
, key_len
);
722 read_lock_bh(&tbl
->lock
);
723 n
= __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
724 net
, pkey
, key_len
, dev
);
725 read_unlock_bh(&tbl
->lock
);
732 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
737 write_pnet(&n
->net
, net
);
738 memcpy(n
->key
, pkey
, key_len
);
743 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
751 write_lock_bh(&tbl
->lock
);
752 n
->next
= tbl
->phash_buckets
[hash_val
];
753 tbl
->phash_buckets
[hash_val
] = n
;
754 write_unlock_bh(&tbl
->lock
);
758 EXPORT_SYMBOL(pneigh_lookup
);
761 int pneigh_delete(struct neigh_table
*tbl
, struct net
*net
, const void *pkey
,
762 struct net_device
*dev
)
764 struct pneigh_entry
*n
, **np
;
765 unsigned int key_len
= tbl
->key_len
;
766 u32 hash_val
= pneigh_hash(pkey
, key_len
);
768 write_lock_bh(&tbl
->lock
);
769 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
771 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
&&
772 net_eq(pneigh_net(n
), net
)) {
774 write_unlock_bh(&tbl
->lock
);
775 if (tbl
->pdestructor
)
783 write_unlock_bh(&tbl
->lock
);
787 static int pneigh_ifdown_and_unlock(struct neigh_table
*tbl
,
788 struct net_device
*dev
)
790 struct pneigh_entry
*n
, **np
, *freelist
= NULL
;
793 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
794 np
= &tbl
->phash_buckets
[h
];
795 while ((n
= *np
) != NULL
) {
796 if (!dev
|| n
->dev
== dev
) {
805 write_unlock_bh(&tbl
->lock
);
806 while ((n
= freelist
)) {
809 if (tbl
->pdestructor
)
818 static void neigh_parms_destroy(struct neigh_parms
*parms
);
820 static inline void neigh_parms_put(struct neigh_parms
*parms
)
822 if (refcount_dec_and_test(&parms
->refcnt
))
823 neigh_parms_destroy(parms
);
827 * neighbour must already be out of the table;
830 void neigh_destroy(struct neighbour
*neigh
)
832 struct net_device
*dev
= neigh
->dev
;
834 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
837 pr_warn("Destroying alive neighbour %p\n", neigh
);
842 if (neigh_del_timer(neigh
))
843 pr_warn("Impossible event\n");
845 write_lock_bh(&neigh
->lock
);
846 __skb_queue_purge(&neigh
->arp_queue
);
847 write_unlock_bh(&neigh
->lock
);
848 neigh
->arp_queue_len_bytes
= 0;
850 if (dev
->netdev_ops
->ndo_neigh_destroy
)
851 dev
->netdev_ops
->ndo_neigh_destroy(dev
, neigh
);
854 neigh_parms_put(neigh
->parms
);
856 neigh_dbg(2, "neigh %p is destroyed\n", neigh
);
858 atomic_dec(&neigh
->tbl
->entries
);
859 kfree_rcu(neigh
, rcu
);
861 EXPORT_SYMBOL(neigh_destroy
);
863 /* Neighbour state is suspicious;
866 Called with write_locked neigh.
868 static void neigh_suspect(struct neighbour
*neigh
)
870 neigh_dbg(2, "neigh %p is suspected\n", neigh
);
872 neigh
->output
= neigh
->ops
->output
;
875 /* Neighbour state is OK;
878 Called with write_locked neigh.
880 static void neigh_connect(struct neighbour
*neigh
)
882 neigh_dbg(2, "neigh %p is connected\n", neigh
);
884 neigh
->output
= neigh
->ops
->connected_output
;
887 static void neigh_periodic_work(struct work_struct
*work
)
889 struct neigh_table
*tbl
= container_of(work
, struct neigh_table
, gc_work
.work
);
891 struct neighbour __rcu
**np
;
893 struct neigh_hash_table
*nht
;
895 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
897 write_lock_bh(&tbl
->lock
);
898 nht
= rcu_dereference_protected(tbl
->nht
,
899 lockdep_is_held(&tbl
->lock
));
902 * periodically recompute ReachableTime from random function
905 if (time_after(jiffies
, tbl
->last_rand
+ 300 * HZ
)) {
906 struct neigh_parms
*p
;
907 tbl
->last_rand
= jiffies
;
908 list_for_each_entry(p
, &tbl
->parms_list
, list
)
910 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
913 if (atomic_read(&tbl
->entries
) < tbl
->gc_thresh1
)
916 for (i
= 0 ; i
< (1 << nht
->hash_shift
); i
++) {
917 np
= &nht
->hash_buckets
[i
];
919 while ((n
= rcu_dereference_protected(*np
,
920 lockdep_is_held(&tbl
->lock
))) != NULL
) {
923 write_lock(&n
->lock
);
925 state
= n
->nud_state
;
926 if ((state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) ||
927 (n
->flags
& NTF_EXT_LEARNED
)) {
928 write_unlock(&n
->lock
);
932 if (time_before(n
->used
, n
->confirmed
))
933 n
->used
= n
->confirmed
;
935 if (refcount_read(&n
->refcnt
) == 1 &&
936 (state
== NUD_FAILED
||
937 time_after(jiffies
, n
->used
+ NEIGH_VAR(n
->parms
, GC_STALETIME
)))) {
940 write_unlock(&n
->lock
);
941 neigh_cleanup_and_release(n
);
944 write_unlock(&n
->lock
);
950 * It's fine to release lock here, even if hash table
951 * grows while we are preempted.
953 write_unlock_bh(&tbl
->lock
);
955 write_lock_bh(&tbl
->lock
);
956 nht
= rcu_dereference_protected(tbl
->nht
,
957 lockdep_is_held(&tbl
->lock
));
960 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
961 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
962 * BASE_REACHABLE_TIME.
964 queue_delayed_work(system_power_efficient_wq
, &tbl
->gc_work
,
965 NEIGH_VAR(&tbl
->parms
, BASE_REACHABLE_TIME
) >> 1);
966 write_unlock_bh(&tbl
->lock
);
969 static __inline__
int neigh_max_probes(struct neighbour
*n
)
971 struct neigh_parms
*p
= n
->parms
;
972 return NEIGH_VAR(p
, UCAST_PROBES
) + NEIGH_VAR(p
, APP_PROBES
) +
973 (n
->nud_state
& NUD_PROBE
? NEIGH_VAR(p
, MCAST_REPROBES
) :
974 NEIGH_VAR(p
, MCAST_PROBES
));
977 static void neigh_invalidate(struct neighbour
*neigh
)
978 __releases(neigh
->lock
)
979 __acquires(neigh
->lock
)
983 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
984 neigh_dbg(2, "neigh %p is failed\n", neigh
);
985 neigh
->updated
= jiffies
;
987 /* It is very thin place. report_unreachable is very complicated
988 routine. Particularly, it can hit the same neighbour entry!
990 So that, we try to be accurate and avoid dead loop. --ANK
992 while (neigh
->nud_state
== NUD_FAILED
&&
993 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
994 write_unlock(&neigh
->lock
);
995 neigh
->ops
->error_report(neigh
, skb
);
996 write_lock(&neigh
->lock
);
998 __skb_queue_purge(&neigh
->arp_queue
);
999 neigh
->arp_queue_len_bytes
= 0;
1002 static void neigh_probe(struct neighbour
*neigh
)
1003 __releases(neigh
->lock
)
1005 struct sk_buff
*skb
= skb_peek_tail(&neigh
->arp_queue
);
1006 /* keep skb alive even if arp_queue overflows */
1008 skb
= skb_clone(skb
, GFP_ATOMIC
);
1009 write_unlock(&neigh
->lock
);
1010 if (neigh
->ops
->solicit
)
1011 neigh
->ops
->solicit(neigh
, skb
);
1012 atomic_inc(&neigh
->probes
);
1016 /* Called when a timer expires for a neighbour entry. */
1018 static void neigh_timer_handler(struct timer_list
*t
)
1020 unsigned long now
, next
;
1021 struct neighbour
*neigh
= from_timer(neigh
, t
, timer
);
1025 write_lock(&neigh
->lock
);
1027 state
= neigh
->nud_state
;
1031 if (!(state
& NUD_IN_TIMER
))
1034 if (state
& NUD_REACHABLE
) {
1035 if (time_before_eq(now
,
1036 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
1037 neigh_dbg(2, "neigh %p is still alive\n", neigh
);
1038 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
1039 } else if (time_before_eq(now
,
1041 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
))) {
1042 neigh_dbg(2, "neigh %p is delayed\n", neigh
);
1043 neigh
->nud_state
= NUD_DELAY
;
1044 neigh
->updated
= jiffies
;
1045 neigh_suspect(neigh
);
1046 next
= now
+ NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
);
1048 neigh_dbg(2, "neigh %p is suspected\n", neigh
);
1049 neigh
->nud_state
= NUD_STALE
;
1050 neigh
->updated
= jiffies
;
1051 neigh_suspect(neigh
);
1054 } else if (state
& NUD_DELAY
) {
1055 if (time_before_eq(now
,
1057 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
))) {
1058 neigh_dbg(2, "neigh %p is now reachable\n", neigh
);
1059 neigh
->nud_state
= NUD_REACHABLE
;
1060 neigh
->updated
= jiffies
;
1061 neigh_connect(neigh
);
1063 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
1065 neigh_dbg(2, "neigh %p is probed\n", neigh
);
1066 neigh
->nud_state
= NUD_PROBE
;
1067 neigh
->updated
= jiffies
;
1068 atomic_set(&neigh
->probes
, 0);
1070 next
= now
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
),
1074 /* NUD_PROBE|NUD_INCOMPLETE */
1075 next
= now
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
), HZ
/100);
1078 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
1079 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
1080 neigh
->nud_state
= NUD_FAILED
;
1082 neigh_invalidate(neigh
);
1086 if (neigh
->nud_state
& NUD_IN_TIMER
) {
1087 if (time_before(next
, jiffies
+ HZ
/100))
1088 next
= jiffies
+ HZ
/100;
1089 if (!mod_timer(&neigh
->timer
, next
))
1092 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
1096 write_unlock(&neigh
->lock
);
1100 neigh_update_notify(neigh
, 0);
1102 trace_neigh_timer_handler(neigh
, 0);
1104 neigh_release(neigh
);
1107 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
1110 bool immediate_probe
= false;
1112 write_lock_bh(&neigh
->lock
);
1115 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
1120 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
1121 if (NEIGH_VAR(neigh
->parms
, MCAST_PROBES
) +
1122 NEIGH_VAR(neigh
->parms
, APP_PROBES
)) {
1123 unsigned long next
, now
= jiffies
;
1125 atomic_set(&neigh
->probes
,
1126 NEIGH_VAR(neigh
->parms
, UCAST_PROBES
));
1127 neigh_del_timer(neigh
);
1128 neigh
->nud_state
= NUD_INCOMPLETE
;
1129 neigh
->updated
= now
;
1130 next
= now
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
),
1132 neigh_add_timer(neigh
, next
);
1133 immediate_probe
= true;
1135 neigh
->nud_state
= NUD_FAILED
;
1136 neigh
->updated
= jiffies
;
1137 write_unlock_bh(&neigh
->lock
);
1142 } else if (neigh
->nud_state
& NUD_STALE
) {
1143 neigh_dbg(2, "neigh %p is delayed\n", neigh
);
1144 neigh_del_timer(neigh
);
1145 neigh
->nud_state
= NUD_DELAY
;
1146 neigh
->updated
= jiffies
;
1147 neigh_add_timer(neigh
, jiffies
+
1148 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
));
1151 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
1153 while (neigh
->arp_queue_len_bytes
+ skb
->truesize
>
1154 NEIGH_VAR(neigh
->parms
, QUEUE_LEN_BYTES
)) {
1155 struct sk_buff
*buff
;
1157 buff
= __skb_dequeue(&neigh
->arp_queue
);
1160 neigh
->arp_queue_len_bytes
-= buff
->truesize
;
1162 NEIGH_CACHE_STAT_INC(neigh
->tbl
, unres_discards
);
1165 __skb_queue_tail(&neigh
->arp_queue
, skb
);
1166 neigh
->arp_queue_len_bytes
+= skb
->truesize
;
1171 if (immediate_probe
)
1174 write_unlock(&neigh
->lock
);
1176 trace_neigh_event_send_done(neigh
, rc
);
1180 if (neigh
->nud_state
& NUD_STALE
)
1182 write_unlock_bh(&neigh
->lock
);
1184 trace_neigh_event_send_dead(neigh
, 1);
1187 EXPORT_SYMBOL(__neigh_event_send
);
1189 static void neigh_update_hhs(struct neighbour
*neigh
)
1191 struct hh_cache
*hh
;
1192 void (*update
)(struct hh_cache
*, const struct net_device
*, const unsigned char *)
1195 if (neigh
->dev
->header_ops
)
1196 update
= neigh
->dev
->header_ops
->cache_update
;
1200 if (READ_ONCE(hh
->hh_len
)) {
1201 write_seqlock_bh(&hh
->hh_lock
);
1202 update(hh
, neigh
->dev
, neigh
->ha
);
1203 write_sequnlock_bh(&hh
->hh_lock
);
1210 /* Generic update routine.
1211 -- lladdr is new lladdr or NULL, if it is not supplied.
1212 -- new is new state.
1214 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1216 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1217 lladdr instead of overriding it
1219 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1221 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1223 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1226 Caller MUST hold reference count on the entry.
1229 static int __neigh_update(struct neighbour
*neigh
, const u8
*lladdr
,
1230 u8
new, u32 flags
, u32 nlmsg_pid
,
1231 struct netlink_ext_ack
*extack
)
1233 bool ext_learn_change
= false;
1237 struct net_device
*dev
;
1238 int update_isrouter
= 0;
1240 trace_neigh_update(neigh
, lladdr
, new, flags
, nlmsg_pid
);
1242 write_lock_bh(&neigh
->lock
);
1245 old
= neigh
->nud_state
;
1248 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
1249 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
1252 NL_SET_ERR_MSG(extack
, "Neighbor entry is now dead");
1256 ext_learn_change
= neigh_update_ext_learned(neigh
, flags
, ¬ify
);
1258 if (!(new & NUD_VALID
)) {
1259 neigh_del_timer(neigh
);
1260 if (old
& NUD_CONNECTED
)
1261 neigh_suspect(neigh
);
1262 neigh
->nud_state
= new;
1264 notify
= old
& NUD_VALID
;
1265 if ((old
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
1266 (new & NUD_FAILED
)) {
1267 neigh_invalidate(neigh
);
1273 /* Compare new lladdr with cached one */
1274 if (!dev
->addr_len
) {
1275 /* First case: device needs no address. */
1277 } else if (lladdr
) {
1278 /* The second case: if something is already cached
1279 and a new address is proposed:
1281 - if they are different, check override flag
1283 if ((old
& NUD_VALID
) &&
1284 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
1287 /* No address is supplied; if we know something,
1288 use it, otherwise discard the request.
1291 if (!(old
& NUD_VALID
)) {
1292 NL_SET_ERR_MSG(extack
, "No link layer address given");
1298 /* Update confirmed timestamp for neighbour entry after we
1299 * received ARP packet even if it doesn't change IP to MAC binding.
1301 if (new & NUD_CONNECTED
)
1302 neigh
->confirmed
= jiffies
;
1304 /* If entry was valid and address is not changed,
1305 do not change entry state, if new one is STALE.
1308 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1309 if (old
& NUD_VALID
) {
1310 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
1311 update_isrouter
= 0;
1312 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1313 (old
& NUD_CONNECTED
)) {
1319 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1320 !(flags
& NEIGH_UPDATE_F_ADMIN
))
1325 /* Update timestamp only once we know we will make a change to the
1326 * neighbour entry. Otherwise we risk to move the locktime window with
1327 * noop updates and ignore relevant ARP updates.
1329 if (new != old
|| lladdr
!= neigh
->ha
)
1330 neigh
->updated
= jiffies
;
1333 neigh_del_timer(neigh
);
1334 if (new & NUD_PROBE
)
1335 atomic_set(&neigh
->probes
, 0);
1336 if (new & NUD_IN_TIMER
)
1337 neigh_add_timer(neigh
, (jiffies
+
1338 ((new & NUD_REACHABLE
) ?
1339 neigh
->parms
->reachable_time
:
1341 neigh
->nud_state
= new;
1345 if (lladdr
!= neigh
->ha
) {
1346 write_seqlock(&neigh
->ha_lock
);
1347 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1348 write_sequnlock(&neigh
->ha_lock
);
1349 neigh_update_hhs(neigh
);
1350 if (!(new & NUD_CONNECTED
))
1351 neigh
->confirmed
= jiffies
-
1352 (NEIGH_VAR(neigh
->parms
, BASE_REACHABLE_TIME
) << 1);
1357 if (new & NUD_CONNECTED
)
1358 neigh_connect(neigh
);
1360 neigh_suspect(neigh
);
1361 if (!(old
& NUD_VALID
)) {
1362 struct sk_buff
*skb
;
1364 /* Again: avoid dead loop if something went wrong */
1366 while (neigh
->nud_state
& NUD_VALID
&&
1367 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1368 struct dst_entry
*dst
= skb_dst(skb
);
1369 struct neighbour
*n2
, *n1
= neigh
;
1370 write_unlock_bh(&neigh
->lock
);
1374 /* Why not just use 'neigh' as-is? The problem is that
1375 * things such as shaper, eql, and sch_teql can end up
1376 * using alternative, different, neigh objects to output
1377 * the packet in the output path. So what we need to do
1378 * here is re-lookup the top-level neigh in the path so
1379 * we can reinject the packet there.
1383 n2
= dst_neigh_lookup_skb(dst
, skb
);
1387 n1
->output(n1
, skb
);
1392 write_lock_bh(&neigh
->lock
);
1394 __skb_queue_purge(&neigh
->arp_queue
);
1395 neigh
->arp_queue_len_bytes
= 0;
1398 if (update_isrouter
)
1399 neigh_update_is_router(neigh
, flags
, ¬ify
);
1400 write_unlock_bh(&neigh
->lock
);
1402 if (((new ^ old
) & NUD_PERMANENT
) || ext_learn_change
)
1403 neigh_update_gc_list(neigh
);
1406 neigh_update_notify(neigh
, nlmsg_pid
);
1408 trace_neigh_update_done(neigh
, err
);
1413 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
1414 u32 flags
, u32 nlmsg_pid
)
1416 return __neigh_update(neigh
, lladdr
, new, flags
, nlmsg_pid
, NULL
);
1418 EXPORT_SYMBOL(neigh_update
);
1420 /* Update the neigh to listen temporarily for probe responses, even if it is
1421 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1423 void __neigh_set_probe_once(struct neighbour
*neigh
)
1427 neigh
->updated
= jiffies
;
1428 if (!(neigh
->nud_state
& NUD_FAILED
))
1430 neigh
->nud_state
= NUD_INCOMPLETE
;
1431 atomic_set(&neigh
->probes
, neigh_max_probes(neigh
));
1432 neigh_add_timer(neigh
,
1433 jiffies
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
),
1436 EXPORT_SYMBOL(__neigh_set_probe_once
);
1438 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1439 u8
*lladdr
, void *saddr
,
1440 struct net_device
*dev
)
1442 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1443 lladdr
|| !dev
->addr_len
);
1445 neigh_update(neigh
, lladdr
, NUD_STALE
,
1446 NEIGH_UPDATE_F_OVERRIDE
, 0);
1449 EXPORT_SYMBOL(neigh_event_ns
);
1451 /* called with read_lock_bh(&n->lock); */
1452 static void neigh_hh_init(struct neighbour
*n
)
1454 struct net_device
*dev
= n
->dev
;
1455 __be16 prot
= n
->tbl
->protocol
;
1456 struct hh_cache
*hh
= &n
->hh
;
1458 write_lock_bh(&n
->lock
);
1460 /* Only one thread can come in here and initialize the
1464 dev
->header_ops
->cache(n
, hh
, prot
);
1466 write_unlock_bh(&n
->lock
);
1469 /* Slow and careful. */
1471 int neigh_resolve_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1475 if (!neigh_event_send(neigh
, skb
)) {
1477 struct net_device
*dev
= neigh
->dev
;
1480 if (dev
->header_ops
->cache
&& !READ_ONCE(neigh
->hh
.hh_len
))
1481 neigh_hh_init(neigh
);
1484 __skb_pull(skb
, skb_network_offset(skb
));
1485 seq
= read_seqbegin(&neigh
->ha_lock
);
1486 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1487 neigh
->ha
, NULL
, skb
->len
);
1488 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1491 rc
= dev_queue_xmit(skb
);
1502 EXPORT_SYMBOL(neigh_resolve_output
);
1504 /* As fast as possible without hh cache */
1506 int neigh_connected_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1508 struct net_device
*dev
= neigh
->dev
;
1513 __skb_pull(skb
, skb_network_offset(skb
));
1514 seq
= read_seqbegin(&neigh
->ha_lock
);
1515 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1516 neigh
->ha
, NULL
, skb
->len
);
1517 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1520 err
= dev_queue_xmit(skb
);
1527 EXPORT_SYMBOL(neigh_connected_output
);
1529 int neigh_direct_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1531 return dev_queue_xmit(skb
);
1533 EXPORT_SYMBOL(neigh_direct_output
);
1535 static void neigh_proxy_process(struct timer_list
*t
)
1537 struct neigh_table
*tbl
= from_timer(tbl
, t
, proxy_timer
);
1538 long sched_next
= 0;
1539 unsigned long now
= jiffies
;
1540 struct sk_buff
*skb
, *n
;
1542 spin_lock(&tbl
->proxy_queue
.lock
);
1544 skb_queue_walk_safe(&tbl
->proxy_queue
, skb
, n
) {
1545 long tdif
= NEIGH_CB(skb
)->sched_next
- now
;
1548 struct net_device
*dev
= skb
->dev
;
1550 __skb_unlink(skb
, &tbl
->proxy_queue
);
1551 if (tbl
->proxy_redo
&& netif_running(dev
)) {
1553 tbl
->proxy_redo(skb
);
1560 } else if (!sched_next
|| tdif
< sched_next
)
1563 del_timer(&tbl
->proxy_timer
);
1565 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1566 spin_unlock(&tbl
->proxy_queue
.lock
);
1569 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1570 struct sk_buff
*skb
)
1572 unsigned long now
= jiffies
;
1574 unsigned long sched_next
= now
+ (prandom_u32() %
1575 NEIGH_VAR(p
, PROXY_DELAY
));
1577 if (tbl
->proxy_queue
.qlen
> NEIGH_VAR(p
, PROXY_QLEN
)) {
1582 NEIGH_CB(skb
)->sched_next
= sched_next
;
1583 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1585 spin_lock(&tbl
->proxy_queue
.lock
);
1586 if (del_timer(&tbl
->proxy_timer
)) {
1587 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1588 sched_next
= tbl
->proxy_timer
.expires
;
1592 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1593 mod_timer(&tbl
->proxy_timer
, sched_next
);
1594 spin_unlock(&tbl
->proxy_queue
.lock
);
1596 EXPORT_SYMBOL(pneigh_enqueue
);
1598 static inline struct neigh_parms
*lookup_neigh_parms(struct neigh_table
*tbl
,
1599 struct net
*net
, int ifindex
)
1601 struct neigh_parms
*p
;
1603 list_for_each_entry(p
, &tbl
->parms_list
, list
) {
1604 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
&& net_eq(neigh_parms_net(p
), net
)) ||
1605 (!p
->dev
&& !ifindex
&& net_eq(net
, &init_net
)))
1612 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1613 struct neigh_table
*tbl
)
1615 struct neigh_parms
*p
;
1616 struct net
*net
= dev_net(dev
);
1617 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1619 p
= kmemdup(&tbl
->parms
, sizeof(*p
), GFP_KERNEL
);
1622 refcount_set(&p
->refcnt
, 1);
1624 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
1627 write_pnet(&p
->net
, net
);
1628 p
->sysctl_table
= NULL
;
1630 if (ops
->ndo_neigh_setup
&& ops
->ndo_neigh_setup(dev
, p
)) {
1636 write_lock_bh(&tbl
->lock
);
1637 list_add(&p
->list
, &tbl
->parms
.list
);
1638 write_unlock_bh(&tbl
->lock
);
1640 neigh_parms_data_state_cleanall(p
);
1644 EXPORT_SYMBOL(neigh_parms_alloc
);
1646 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1648 struct neigh_parms
*parms
=
1649 container_of(head
, struct neigh_parms
, rcu_head
);
1651 neigh_parms_put(parms
);
1654 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1656 if (!parms
|| parms
== &tbl
->parms
)
1658 write_lock_bh(&tbl
->lock
);
1659 list_del(&parms
->list
);
1661 write_unlock_bh(&tbl
->lock
);
1663 dev_put(parms
->dev
);
1664 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1666 EXPORT_SYMBOL(neigh_parms_release
);
1668 static void neigh_parms_destroy(struct neigh_parms
*parms
)
1673 static struct lock_class_key neigh_table_proxy_queue_class
;
1675 static struct neigh_table
*neigh_tables
[NEIGH_NR_TABLES
] __read_mostly
;
1677 void neigh_table_init(int index
, struct neigh_table
*tbl
)
1679 unsigned long now
= jiffies
;
1680 unsigned long phsize
;
1682 INIT_LIST_HEAD(&tbl
->parms_list
);
1683 INIT_LIST_HEAD(&tbl
->gc_list
);
1684 list_add(&tbl
->parms
.list
, &tbl
->parms_list
);
1685 write_pnet(&tbl
->parms
.net
, &init_net
);
1686 refcount_set(&tbl
->parms
.refcnt
, 1);
1687 tbl
->parms
.reachable_time
=
1688 neigh_rand_reach_time(NEIGH_VAR(&tbl
->parms
, BASE_REACHABLE_TIME
));
1690 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1692 panic("cannot create neighbour cache statistics");
1694 #ifdef CONFIG_PROC_FS
1695 if (!proc_create_seq_data(tbl
->id
, 0, init_net
.proc_net_stat
,
1696 &neigh_stat_seq_ops
, tbl
))
1697 panic("cannot create neighbour proc dir entry");
1700 RCU_INIT_POINTER(tbl
->nht
, neigh_hash_alloc(3));
1702 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1703 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1705 if (!tbl
->nht
|| !tbl
->phash_buckets
)
1706 panic("cannot allocate neighbour cache hashes");
1708 if (!tbl
->entry_size
)
1709 tbl
->entry_size
= ALIGN(offsetof(struct neighbour
, primary_key
) +
1710 tbl
->key_len
, NEIGH_PRIV_ALIGN
);
1712 WARN_ON(tbl
->entry_size
% NEIGH_PRIV_ALIGN
);
1714 rwlock_init(&tbl
->lock
);
1715 INIT_DEFERRABLE_WORK(&tbl
->gc_work
, neigh_periodic_work
);
1716 queue_delayed_work(system_power_efficient_wq
, &tbl
->gc_work
,
1717 tbl
->parms
.reachable_time
);
1718 timer_setup(&tbl
->proxy_timer
, neigh_proxy_process
, 0);
1719 skb_queue_head_init_class(&tbl
->proxy_queue
,
1720 &neigh_table_proxy_queue_class
);
1722 tbl
->last_flush
= now
;
1723 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1725 neigh_tables
[index
] = tbl
;
1727 EXPORT_SYMBOL(neigh_table_init
);
1729 int neigh_table_clear(int index
, struct neigh_table
*tbl
)
1731 neigh_tables
[index
] = NULL
;
1732 /* It is not clean... Fix it to unload IPv6 module safely */
1733 cancel_delayed_work_sync(&tbl
->gc_work
);
1734 del_timer_sync(&tbl
->proxy_timer
);
1735 pneigh_queue_purge(&tbl
->proxy_queue
);
1736 neigh_ifdown(tbl
, NULL
);
1737 if (atomic_read(&tbl
->entries
))
1738 pr_crit("neighbour leakage\n");
1740 call_rcu(&rcu_dereference_protected(tbl
->nht
, 1)->rcu
,
1741 neigh_hash_free_rcu
);
1744 kfree(tbl
->phash_buckets
);
1745 tbl
->phash_buckets
= NULL
;
1747 remove_proc_entry(tbl
->id
, init_net
.proc_net_stat
);
1749 free_percpu(tbl
->stats
);
1754 EXPORT_SYMBOL(neigh_table_clear
);
1756 static struct neigh_table
*neigh_find_table(int family
)
1758 struct neigh_table
*tbl
= NULL
;
1762 tbl
= neigh_tables
[NEIGH_ARP_TABLE
];
1765 tbl
= neigh_tables
[NEIGH_ND_TABLE
];
1768 tbl
= neigh_tables
[NEIGH_DN_TABLE
];
1775 const struct nla_policy nda_policy
[NDA_MAX
+1] = {
1776 [NDA_UNSPEC
] = { .strict_start_type
= NDA_NH_ID
},
1777 [NDA_DST
] = { .type
= NLA_BINARY
, .len
= MAX_ADDR_LEN
},
1778 [NDA_LLADDR
] = { .type
= NLA_BINARY
, .len
= MAX_ADDR_LEN
},
1779 [NDA_CACHEINFO
] = { .len
= sizeof(struct nda_cacheinfo
) },
1780 [NDA_PROBES
] = { .type
= NLA_U32
},
1781 [NDA_VLAN
] = { .type
= NLA_U16
},
1782 [NDA_PORT
] = { .type
= NLA_U16
},
1783 [NDA_VNI
] = { .type
= NLA_U32
},
1784 [NDA_IFINDEX
] = { .type
= NLA_U32
},
1785 [NDA_MASTER
] = { .type
= NLA_U32
},
1786 [NDA_PROTOCOL
] = { .type
= NLA_U8
},
1787 [NDA_NH_ID
] = { .type
= NLA_U32
},
1788 [NDA_FDB_EXT_ATTRS
] = { .type
= NLA_NESTED
},
1791 static int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1792 struct netlink_ext_ack
*extack
)
1794 struct net
*net
= sock_net(skb
->sk
);
1796 struct nlattr
*dst_attr
;
1797 struct neigh_table
*tbl
;
1798 struct neighbour
*neigh
;
1799 struct net_device
*dev
= NULL
;
1803 if (nlmsg_len(nlh
) < sizeof(*ndm
))
1806 dst_attr
= nlmsg_find_attr(nlh
, sizeof(*ndm
), NDA_DST
);
1808 NL_SET_ERR_MSG(extack
, "Network address not specified");
1812 ndm
= nlmsg_data(nlh
);
1813 if (ndm
->ndm_ifindex
) {
1814 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1821 tbl
= neigh_find_table(ndm
->ndm_family
);
1823 return -EAFNOSUPPORT
;
1825 if (nla_len(dst_attr
) < (int)tbl
->key_len
) {
1826 NL_SET_ERR_MSG(extack
, "Invalid network address");
1830 if (ndm
->ndm_flags
& NTF_PROXY
) {
1831 err
= pneigh_delete(tbl
, net
, nla_data(dst_attr
), dev
);
1838 neigh
= neigh_lookup(tbl
, nla_data(dst_attr
), dev
);
1839 if (neigh
== NULL
) {
1844 err
= __neigh_update(neigh
, NULL
, NUD_FAILED
,
1845 NEIGH_UPDATE_F_OVERRIDE
| NEIGH_UPDATE_F_ADMIN
,
1846 NETLINK_CB(skb
).portid
, extack
);
1847 write_lock_bh(&tbl
->lock
);
1848 neigh_release(neigh
);
1849 neigh_remove_one(neigh
, tbl
);
1850 write_unlock_bh(&tbl
->lock
);
1856 static int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1857 struct netlink_ext_ack
*extack
)
1859 int flags
= NEIGH_UPDATE_F_ADMIN
| NEIGH_UPDATE_F_OVERRIDE
|
1860 NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1861 struct net
*net
= sock_net(skb
->sk
);
1863 struct nlattr
*tb
[NDA_MAX
+1];
1864 struct neigh_table
*tbl
;
1865 struct net_device
*dev
= NULL
;
1866 struct neighbour
*neigh
;
1872 err
= nlmsg_parse_deprecated(nlh
, sizeof(*ndm
), tb
, NDA_MAX
,
1873 nda_policy
, extack
);
1879 NL_SET_ERR_MSG(extack
, "Network address not specified");
1883 ndm
= nlmsg_data(nlh
);
1884 if (ndm
->ndm_ifindex
) {
1885 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1891 if (tb
[NDA_LLADDR
] && nla_len(tb
[NDA_LLADDR
]) < dev
->addr_len
) {
1892 NL_SET_ERR_MSG(extack
, "Invalid link address");
1897 tbl
= neigh_find_table(ndm
->ndm_family
);
1899 return -EAFNOSUPPORT
;
1901 if (nla_len(tb
[NDA_DST
]) < (int)tbl
->key_len
) {
1902 NL_SET_ERR_MSG(extack
, "Invalid network address");
1906 dst
= nla_data(tb
[NDA_DST
]);
1907 lladdr
= tb
[NDA_LLADDR
] ? nla_data(tb
[NDA_LLADDR
]) : NULL
;
1909 if (tb
[NDA_PROTOCOL
])
1910 protocol
= nla_get_u8(tb
[NDA_PROTOCOL
]);
1912 if (ndm
->ndm_flags
& NTF_PROXY
) {
1913 struct pneigh_entry
*pn
;
1916 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 1);
1918 pn
->flags
= ndm
->ndm_flags
;
1920 pn
->protocol
= protocol
;
1927 NL_SET_ERR_MSG(extack
, "Device not specified");
1931 if (tbl
->allow_add
&& !tbl
->allow_add(dev
, extack
)) {
1936 neigh
= neigh_lookup(tbl
, dst
, dev
);
1937 if (neigh
== NULL
) {
1938 bool exempt_from_gc
;
1940 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1945 exempt_from_gc
= ndm
->ndm_state
& NUD_PERMANENT
||
1946 ndm
->ndm_flags
& NTF_EXT_LEARNED
;
1947 neigh
= ___neigh_create(tbl
, dst
, dev
, exempt_from_gc
, true);
1948 if (IS_ERR(neigh
)) {
1949 err
= PTR_ERR(neigh
);
1953 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1955 neigh_release(neigh
);
1959 if (!(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
1960 flags
&= ~(NEIGH_UPDATE_F_OVERRIDE
|
1961 NEIGH_UPDATE_F_OVERRIDE_ISROUTER
);
1965 neigh
->protocol
= protocol
;
1967 if (ndm
->ndm_flags
& NTF_EXT_LEARNED
)
1968 flags
|= NEIGH_UPDATE_F_EXT_LEARNED
;
1970 if (ndm
->ndm_flags
& NTF_ROUTER
)
1971 flags
|= NEIGH_UPDATE_F_ISROUTER
;
1973 if (ndm
->ndm_flags
& NTF_USE
) {
1974 neigh_event_send(neigh
, NULL
);
1977 err
= __neigh_update(neigh
, lladdr
, ndm
->ndm_state
, flags
,
1978 NETLINK_CB(skb
).portid
, extack
);
1980 neigh_release(neigh
);
1986 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1988 struct nlattr
*nest
;
1990 nest
= nla_nest_start_noflag(skb
, NDTA_PARMS
);
1995 nla_put_u32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
)) ||
1996 nla_put_u32(skb
, NDTPA_REFCNT
, refcount_read(&parms
->refcnt
)) ||
1997 nla_put_u32(skb
, NDTPA_QUEUE_LENBYTES
,
1998 NEIGH_VAR(parms
, QUEUE_LEN_BYTES
)) ||
1999 /* approximative value for deprecated QUEUE_LEN (in packets) */
2000 nla_put_u32(skb
, NDTPA_QUEUE_LEN
,
2001 NEIGH_VAR(parms
, QUEUE_LEN_BYTES
) / SKB_TRUESIZE(ETH_FRAME_LEN
)) ||
2002 nla_put_u32(skb
, NDTPA_PROXY_QLEN
, NEIGH_VAR(parms
, PROXY_QLEN
)) ||
2003 nla_put_u32(skb
, NDTPA_APP_PROBES
, NEIGH_VAR(parms
, APP_PROBES
)) ||
2004 nla_put_u32(skb
, NDTPA_UCAST_PROBES
,
2005 NEIGH_VAR(parms
, UCAST_PROBES
)) ||
2006 nla_put_u32(skb
, NDTPA_MCAST_PROBES
,
2007 NEIGH_VAR(parms
, MCAST_PROBES
)) ||
2008 nla_put_u32(skb
, NDTPA_MCAST_REPROBES
,
2009 NEIGH_VAR(parms
, MCAST_REPROBES
)) ||
2010 nla_put_msecs(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
,
2012 nla_put_msecs(skb
, NDTPA_BASE_REACHABLE_TIME
,
2013 NEIGH_VAR(parms
, BASE_REACHABLE_TIME
), NDTPA_PAD
) ||
2014 nla_put_msecs(skb
, NDTPA_GC_STALETIME
,
2015 NEIGH_VAR(parms
, GC_STALETIME
), NDTPA_PAD
) ||
2016 nla_put_msecs(skb
, NDTPA_DELAY_PROBE_TIME
,
2017 NEIGH_VAR(parms
, DELAY_PROBE_TIME
), NDTPA_PAD
) ||
2018 nla_put_msecs(skb
, NDTPA_RETRANS_TIME
,
2019 NEIGH_VAR(parms
, RETRANS_TIME
), NDTPA_PAD
) ||
2020 nla_put_msecs(skb
, NDTPA_ANYCAST_DELAY
,
2021 NEIGH_VAR(parms
, ANYCAST_DELAY
), NDTPA_PAD
) ||
2022 nla_put_msecs(skb
, NDTPA_PROXY_DELAY
,
2023 NEIGH_VAR(parms
, PROXY_DELAY
), NDTPA_PAD
) ||
2024 nla_put_msecs(skb
, NDTPA_LOCKTIME
,
2025 NEIGH_VAR(parms
, LOCKTIME
), NDTPA_PAD
))
2026 goto nla_put_failure
;
2027 return nla_nest_end(skb
, nest
);
2030 nla_nest_cancel(skb
, nest
);
2034 static int neightbl_fill_info(struct sk_buff
*skb
, struct neigh_table
*tbl
,
2035 u32 pid
, u32 seq
, int type
, int flags
)
2037 struct nlmsghdr
*nlh
;
2038 struct ndtmsg
*ndtmsg
;
2040 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
2044 ndtmsg
= nlmsg_data(nlh
);
2046 read_lock_bh(&tbl
->lock
);
2047 ndtmsg
->ndtm_family
= tbl
->family
;
2048 ndtmsg
->ndtm_pad1
= 0;
2049 ndtmsg
->ndtm_pad2
= 0;
2051 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) ||
2052 nla_put_msecs(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
, NDTA_PAD
) ||
2053 nla_put_u32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
) ||
2054 nla_put_u32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
) ||
2055 nla_put_u32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
))
2056 goto nla_put_failure
;
2058 unsigned long now
= jiffies
;
2059 long flush_delta
= now
- tbl
->last_flush
;
2060 long rand_delta
= now
- tbl
->last_rand
;
2061 struct neigh_hash_table
*nht
;
2062 struct ndt_config ndc
= {
2063 .ndtc_key_len
= tbl
->key_len
,
2064 .ndtc_entry_size
= tbl
->entry_size
,
2065 .ndtc_entries
= atomic_read(&tbl
->entries
),
2066 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
2067 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
2068 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
2072 nht
= rcu_dereference_bh(tbl
->nht
);
2073 ndc
.ndtc_hash_rnd
= nht
->hash_rnd
[0];
2074 ndc
.ndtc_hash_mask
= ((1 << nht
->hash_shift
) - 1);
2075 rcu_read_unlock_bh();
2077 if (nla_put(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
))
2078 goto nla_put_failure
;
2083 struct ndt_stats ndst
;
2085 memset(&ndst
, 0, sizeof(ndst
));
2087 for_each_possible_cpu(cpu
) {
2088 struct neigh_statistics
*st
;
2090 st
= per_cpu_ptr(tbl
->stats
, cpu
);
2091 ndst
.ndts_allocs
+= st
->allocs
;
2092 ndst
.ndts_destroys
+= st
->destroys
;
2093 ndst
.ndts_hash_grows
+= st
->hash_grows
;
2094 ndst
.ndts_res_failed
+= st
->res_failed
;
2095 ndst
.ndts_lookups
+= st
->lookups
;
2096 ndst
.ndts_hits
+= st
->hits
;
2097 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
2098 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
2099 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
2100 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
2101 ndst
.ndts_table_fulls
+= st
->table_fulls
;
2104 if (nla_put_64bit(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
,
2106 goto nla_put_failure
;
2109 BUG_ON(tbl
->parms
.dev
);
2110 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
2111 goto nla_put_failure
;
2113 read_unlock_bh(&tbl
->lock
);
2114 nlmsg_end(skb
, nlh
);
2118 read_unlock_bh(&tbl
->lock
);
2119 nlmsg_cancel(skb
, nlh
);
2123 static int neightbl_fill_param_info(struct sk_buff
*skb
,
2124 struct neigh_table
*tbl
,
2125 struct neigh_parms
*parms
,
2126 u32 pid
, u32 seq
, int type
,
2129 struct ndtmsg
*ndtmsg
;
2130 struct nlmsghdr
*nlh
;
2132 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
2136 ndtmsg
= nlmsg_data(nlh
);
2138 read_lock_bh(&tbl
->lock
);
2139 ndtmsg
->ndtm_family
= tbl
->family
;
2140 ndtmsg
->ndtm_pad1
= 0;
2141 ndtmsg
->ndtm_pad2
= 0;
2143 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) < 0 ||
2144 neightbl_fill_parms(skb
, parms
) < 0)
2147 read_unlock_bh(&tbl
->lock
);
2148 nlmsg_end(skb
, nlh
);
2151 read_unlock_bh(&tbl
->lock
);
2152 nlmsg_cancel(skb
, nlh
);
2156 static const struct nla_policy nl_neightbl_policy
[NDTA_MAX
+1] = {
2157 [NDTA_NAME
] = { .type
= NLA_STRING
},
2158 [NDTA_THRESH1
] = { .type
= NLA_U32
},
2159 [NDTA_THRESH2
] = { .type
= NLA_U32
},
2160 [NDTA_THRESH3
] = { .type
= NLA_U32
},
2161 [NDTA_GC_INTERVAL
] = { .type
= NLA_U64
},
2162 [NDTA_PARMS
] = { .type
= NLA_NESTED
},
2165 static const struct nla_policy nl_ntbl_parm_policy
[NDTPA_MAX
+1] = {
2166 [NDTPA_IFINDEX
] = { .type
= NLA_U32
},
2167 [NDTPA_QUEUE_LEN
] = { .type
= NLA_U32
},
2168 [NDTPA_PROXY_QLEN
] = { .type
= NLA_U32
},
2169 [NDTPA_APP_PROBES
] = { .type
= NLA_U32
},
2170 [NDTPA_UCAST_PROBES
] = { .type
= NLA_U32
},
2171 [NDTPA_MCAST_PROBES
] = { .type
= NLA_U32
},
2172 [NDTPA_MCAST_REPROBES
] = { .type
= NLA_U32
},
2173 [NDTPA_BASE_REACHABLE_TIME
] = { .type
= NLA_U64
},
2174 [NDTPA_GC_STALETIME
] = { .type
= NLA_U64
},
2175 [NDTPA_DELAY_PROBE_TIME
] = { .type
= NLA_U64
},
2176 [NDTPA_RETRANS_TIME
] = { .type
= NLA_U64
},
2177 [NDTPA_ANYCAST_DELAY
] = { .type
= NLA_U64
},
2178 [NDTPA_PROXY_DELAY
] = { .type
= NLA_U64
},
2179 [NDTPA_LOCKTIME
] = { .type
= NLA_U64
},
2182 static int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2183 struct netlink_ext_ack
*extack
)
2185 struct net
*net
= sock_net(skb
->sk
);
2186 struct neigh_table
*tbl
;
2187 struct ndtmsg
*ndtmsg
;
2188 struct nlattr
*tb
[NDTA_MAX
+1];
2192 err
= nlmsg_parse_deprecated(nlh
, sizeof(*ndtmsg
), tb
, NDTA_MAX
,
2193 nl_neightbl_policy
, extack
);
2197 if (tb
[NDTA_NAME
] == NULL
) {
2202 ndtmsg
= nlmsg_data(nlh
);
2204 for (tidx
= 0; tidx
< NEIGH_NR_TABLES
; tidx
++) {
2205 tbl
= neigh_tables
[tidx
];
2208 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
2210 if (nla_strcmp(tb
[NDTA_NAME
], tbl
->id
) == 0) {
2220 * We acquire tbl->lock to be nice to the periodic timers and
2221 * make sure they always see a consistent set of values.
2223 write_lock_bh(&tbl
->lock
);
2225 if (tb
[NDTA_PARMS
]) {
2226 struct nlattr
*tbp
[NDTPA_MAX
+1];
2227 struct neigh_parms
*p
;
2230 err
= nla_parse_nested_deprecated(tbp
, NDTPA_MAX
,
2232 nl_ntbl_parm_policy
, extack
);
2234 goto errout_tbl_lock
;
2236 if (tbp
[NDTPA_IFINDEX
])
2237 ifindex
= nla_get_u32(tbp
[NDTPA_IFINDEX
]);
2239 p
= lookup_neigh_parms(tbl
, net
, ifindex
);
2242 goto errout_tbl_lock
;
2245 for (i
= 1; i
<= NDTPA_MAX
; i
++) {
2250 case NDTPA_QUEUE_LEN
:
2251 NEIGH_VAR_SET(p
, QUEUE_LEN_BYTES
,
2252 nla_get_u32(tbp
[i
]) *
2253 SKB_TRUESIZE(ETH_FRAME_LEN
));
2255 case NDTPA_QUEUE_LENBYTES
:
2256 NEIGH_VAR_SET(p
, QUEUE_LEN_BYTES
,
2257 nla_get_u32(tbp
[i
]));
2259 case NDTPA_PROXY_QLEN
:
2260 NEIGH_VAR_SET(p
, PROXY_QLEN
,
2261 nla_get_u32(tbp
[i
]));
2263 case NDTPA_APP_PROBES
:
2264 NEIGH_VAR_SET(p
, APP_PROBES
,
2265 nla_get_u32(tbp
[i
]));
2267 case NDTPA_UCAST_PROBES
:
2268 NEIGH_VAR_SET(p
, UCAST_PROBES
,
2269 nla_get_u32(tbp
[i
]));
2271 case NDTPA_MCAST_PROBES
:
2272 NEIGH_VAR_SET(p
, MCAST_PROBES
,
2273 nla_get_u32(tbp
[i
]));
2275 case NDTPA_MCAST_REPROBES
:
2276 NEIGH_VAR_SET(p
, MCAST_REPROBES
,
2277 nla_get_u32(tbp
[i
]));
2279 case NDTPA_BASE_REACHABLE_TIME
:
2280 NEIGH_VAR_SET(p
, BASE_REACHABLE_TIME
,
2281 nla_get_msecs(tbp
[i
]));
2282 /* update reachable_time as well, otherwise, the change will
2283 * only be effective after the next time neigh_periodic_work
2284 * decides to recompute it (can be multiple minutes)
2287 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
2289 case NDTPA_GC_STALETIME
:
2290 NEIGH_VAR_SET(p
, GC_STALETIME
,
2291 nla_get_msecs(tbp
[i
]));
2293 case NDTPA_DELAY_PROBE_TIME
:
2294 NEIGH_VAR_SET(p
, DELAY_PROBE_TIME
,
2295 nla_get_msecs(tbp
[i
]));
2296 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE
, p
);
2298 case NDTPA_RETRANS_TIME
:
2299 NEIGH_VAR_SET(p
, RETRANS_TIME
,
2300 nla_get_msecs(tbp
[i
]));
2302 case NDTPA_ANYCAST_DELAY
:
2303 NEIGH_VAR_SET(p
, ANYCAST_DELAY
,
2304 nla_get_msecs(tbp
[i
]));
2306 case NDTPA_PROXY_DELAY
:
2307 NEIGH_VAR_SET(p
, PROXY_DELAY
,
2308 nla_get_msecs(tbp
[i
]));
2310 case NDTPA_LOCKTIME
:
2311 NEIGH_VAR_SET(p
, LOCKTIME
,
2312 nla_get_msecs(tbp
[i
]));
2319 if ((tb
[NDTA_THRESH1
] || tb
[NDTA_THRESH2
] ||
2320 tb
[NDTA_THRESH3
] || tb
[NDTA_GC_INTERVAL
]) &&
2321 !net_eq(net
, &init_net
))
2322 goto errout_tbl_lock
;
2324 if (tb
[NDTA_THRESH1
])
2325 tbl
->gc_thresh1
= nla_get_u32(tb
[NDTA_THRESH1
]);
2327 if (tb
[NDTA_THRESH2
])
2328 tbl
->gc_thresh2
= nla_get_u32(tb
[NDTA_THRESH2
]);
2330 if (tb
[NDTA_THRESH3
])
2331 tbl
->gc_thresh3
= nla_get_u32(tb
[NDTA_THRESH3
]);
2333 if (tb
[NDTA_GC_INTERVAL
])
2334 tbl
->gc_interval
= nla_get_msecs(tb
[NDTA_GC_INTERVAL
]);
2339 write_unlock_bh(&tbl
->lock
);
2344 static int neightbl_valid_dump_info(const struct nlmsghdr
*nlh
,
2345 struct netlink_ext_ack
*extack
)
2347 struct ndtmsg
*ndtm
;
2349 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*ndtm
))) {
2350 NL_SET_ERR_MSG(extack
, "Invalid header for neighbor table dump request");
2354 ndtm
= nlmsg_data(nlh
);
2355 if (ndtm
->ndtm_pad1
|| ndtm
->ndtm_pad2
) {
2356 NL_SET_ERR_MSG(extack
, "Invalid values in header for neighbor table dump request");
2360 if (nlmsg_attrlen(nlh
, sizeof(*ndtm
))) {
2361 NL_SET_ERR_MSG(extack
, "Invalid data after header in neighbor table dump request");
2368 static int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2370 const struct nlmsghdr
*nlh
= cb
->nlh
;
2371 struct net
*net
= sock_net(skb
->sk
);
2372 int family
, tidx
, nidx
= 0;
2373 int tbl_skip
= cb
->args
[0];
2374 int neigh_skip
= cb
->args
[1];
2375 struct neigh_table
*tbl
;
2377 if (cb
->strict_check
) {
2378 int err
= neightbl_valid_dump_info(nlh
, cb
->extack
);
2384 family
= ((struct rtgenmsg
*)nlmsg_data(nlh
))->rtgen_family
;
2386 for (tidx
= 0; tidx
< NEIGH_NR_TABLES
; tidx
++) {
2387 struct neigh_parms
*p
;
2389 tbl
= neigh_tables
[tidx
];
2393 if (tidx
< tbl_skip
|| (family
&& tbl
->family
!= family
))
2396 if (neightbl_fill_info(skb
, tbl
, NETLINK_CB(cb
->skb
).portid
,
2397 nlh
->nlmsg_seq
, RTM_NEWNEIGHTBL
,
2402 p
= list_next_entry(&tbl
->parms
, list
);
2403 list_for_each_entry_from(p
, &tbl
->parms_list
, list
) {
2404 if (!net_eq(neigh_parms_net(p
), net
))
2407 if (nidx
< neigh_skip
)
2410 if (neightbl_fill_param_info(skb
, tbl
, p
,
2411 NETLINK_CB(cb
->skb
).portid
,
2429 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*neigh
,
2430 u32 pid
, u32 seq
, int type
, unsigned int flags
)
2432 unsigned long now
= jiffies
;
2433 struct nda_cacheinfo ci
;
2434 struct nlmsghdr
*nlh
;
2437 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2441 ndm
= nlmsg_data(nlh
);
2442 ndm
->ndm_family
= neigh
->ops
->family
;
2445 ndm
->ndm_flags
= neigh
->flags
;
2446 ndm
->ndm_type
= neigh
->type
;
2447 ndm
->ndm_ifindex
= neigh
->dev
->ifindex
;
2449 if (nla_put(skb
, NDA_DST
, neigh
->tbl
->key_len
, neigh
->primary_key
))
2450 goto nla_put_failure
;
2452 read_lock_bh(&neigh
->lock
);
2453 ndm
->ndm_state
= neigh
->nud_state
;
2454 if (neigh
->nud_state
& NUD_VALID
) {
2455 char haddr
[MAX_ADDR_LEN
];
2457 neigh_ha_snapshot(haddr
, neigh
, neigh
->dev
);
2458 if (nla_put(skb
, NDA_LLADDR
, neigh
->dev
->addr_len
, haddr
) < 0) {
2459 read_unlock_bh(&neigh
->lock
);
2460 goto nla_put_failure
;
2464 ci
.ndm_used
= jiffies_to_clock_t(now
- neigh
->used
);
2465 ci
.ndm_confirmed
= jiffies_to_clock_t(now
- neigh
->confirmed
);
2466 ci
.ndm_updated
= jiffies_to_clock_t(now
- neigh
->updated
);
2467 ci
.ndm_refcnt
= refcount_read(&neigh
->refcnt
) - 1;
2468 read_unlock_bh(&neigh
->lock
);
2470 if (nla_put_u32(skb
, NDA_PROBES
, atomic_read(&neigh
->probes
)) ||
2471 nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
2472 goto nla_put_failure
;
2474 if (neigh
->protocol
&& nla_put_u8(skb
, NDA_PROTOCOL
, neigh
->protocol
))
2475 goto nla_put_failure
;
2477 nlmsg_end(skb
, nlh
);
2481 nlmsg_cancel(skb
, nlh
);
2485 static int pneigh_fill_info(struct sk_buff
*skb
, struct pneigh_entry
*pn
,
2486 u32 pid
, u32 seq
, int type
, unsigned int flags
,
2487 struct neigh_table
*tbl
)
2489 struct nlmsghdr
*nlh
;
2492 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2496 ndm
= nlmsg_data(nlh
);
2497 ndm
->ndm_family
= tbl
->family
;
2500 ndm
->ndm_flags
= pn
->flags
| NTF_PROXY
;
2501 ndm
->ndm_type
= RTN_UNICAST
;
2502 ndm
->ndm_ifindex
= pn
->dev
? pn
->dev
->ifindex
: 0;
2503 ndm
->ndm_state
= NUD_NONE
;
2505 if (nla_put(skb
, NDA_DST
, tbl
->key_len
, pn
->key
))
2506 goto nla_put_failure
;
2508 if (pn
->protocol
&& nla_put_u8(skb
, NDA_PROTOCOL
, pn
->protocol
))
2509 goto nla_put_failure
;
2511 nlmsg_end(skb
, nlh
);
2515 nlmsg_cancel(skb
, nlh
);
2519 static void neigh_update_notify(struct neighbour
*neigh
, u32 nlmsg_pid
)
2521 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
2522 __neigh_notify(neigh
, RTM_NEWNEIGH
, 0, nlmsg_pid
);
2525 static bool neigh_master_filtered(struct net_device
*dev
, int master_idx
)
2527 struct net_device
*master
;
2532 master
= dev
? netdev_master_upper_dev_get(dev
) : NULL
;
2533 if (!master
|| master
->ifindex
!= master_idx
)
2539 static bool neigh_ifindex_filtered(struct net_device
*dev
, int filter_idx
)
2541 if (filter_idx
&& (!dev
|| dev
->ifindex
!= filter_idx
))
2547 struct neigh_dump_filter
{
2552 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2553 struct netlink_callback
*cb
,
2554 struct neigh_dump_filter
*filter
)
2556 struct net
*net
= sock_net(skb
->sk
);
2557 struct neighbour
*n
;
2558 int rc
, h
, s_h
= cb
->args
[1];
2559 int idx
, s_idx
= idx
= cb
->args
[2];
2560 struct neigh_hash_table
*nht
;
2561 unsigned int flags
= NLM_F_MULTI
;
2563 if (filter
->dev_idx
|| filter
->master_idx
)
2564 flags
|= NLM_F_DUMP_FILTERED
;
2567 nht
= rcu_dereference_bh(tbl
->nht
);
2569 for (h
= s_h
; h
< (1 << nht
->hash_shift
); h
++) {
2572 for (n
= rcu_dereference_bh(nht
->hash_buckets
[h
]), idx
= 0;
2574 n
= rcu_dereference_bh(n
->next
)) {
2575 if (idx
< s_idx
|| !net_eq(dev_net(n
->dev
), net
))
2577 if (neigh_ifindex_filtered(n
->dev
, filter
->dev_idx
) ||
2578 neigh_master_filtered(n
->dev
, filter
->master_idx
))
2580 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).portid
,
2593 rcu_read_unlock_bh();
2599 static int pneigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2600 struct netlink_callback
*cb
,
2601 struct neigh_dump_filter
*filter
)
2603 struct pneigh_entry
*n
;
2604 struct net
*net
= sock_net(skb
->sk
);
2605 int rc
, h
, s_h
= cb
->args
[3];
2606 int idx
, s_idx
= idx
= cb
->args
[4];
2607 unsigned int flags
= NLM_F_MULTI
;
2609 if (filter
->dev_idx
|| filter
->master_idx
)
2610 flags
|= NLM_F_DUMP_FILTERED
;
2612 read_lock_bh(&tbl
->lock
);
2614 for (h
= s_h
; h
<= PNEIGH_HASHMASK
; h
++) {
2617 for (n
= tbl
->phash_buckets
[h
], idx
= 0; n
; n
= n
->next
) {
2618 if (idx
< s_idx
|| pneigh_net(n
) != net
)
2620 if (neigh_ifindex_filtered(n
->dev
, filter
->dev_idx
) ||
2621 neigh_master_filtered(n
->dev
, filter
->master_idx
))
2623 if (pneigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).portid
,
2625 RTM_NEWNEIGH
, flags
, tbl
) < 0) {
2626 read_unlock_bh(&tbl
->lock
);
2635 read_unlock_bh(&tbl
->lock
);
2644 static int neigh_valid_dump_req(const struct nlmsghdr
*nlh
,
2646 struct neigh_dump_filter
*filter
,
2647 struct netlink_ext_ack
*extack
)
2649 struct nlattr
*tb
[NDA_MAX
+ 1];
2655 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*ndm
))) {
2656 NL_SET_ERR_MSG(extack
, "Invalid header for neighbor dump request");
2660 ndm
= nlmsg_data(nlh
);
2661 if (ndm
->ndm_pad1
|| ndm
->ndm_pad2
|| ndm
->ndm_ifindex
||
2662 ndm
->ndm_state
|| ndm
->ndm_type
) {
2663 NL_SET_ERR_MSG(extack
, "Invalid values in header for neighbor dump request");
2667 if (ndm
->ndm_flags
& ~NTF_PROXY
) {
2668 NL_SET_ERR_MSG(extack
, "Invalid flags in header for neighbor dump request");
2672 err
= nlmsg_parse_deprecated_strict(nlh
, sizeof(struct ndmsg
),
2673 tb
, NDA_MAX
, nda_policy
,
2676 err
= nlmsg_parse_deprecated(nlh
, sizeof(struct ndmsg
), tb
,
2677 NDA_MAX
, nda_policy
, extack
);
2682 for (i
= 0; i
<= NDA_MAX
; ++i
) {
2686 /* all new attributes should require strict_check */
2689 filter
->dev_idx
= nla_get_u32(tb
[i
]);
2692 filter
->master_idx
= nla_get_u32(tb
[i
]);
2696 NL_SET_ERR_MSG(extack
, "Unsupported attribute in neighbor dump request");
2705 static int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2707 const struct nlmsghdr
*nlh
= cb
->nlh
;
2708 struct neigh_dump_filter filter
= {};
2709 struct neigh_table
*tbl
;
2714 family
= ((struct rtgenmsg
*)nlmsg_data(nlh
))->rtgen_family
;
2716 /* check for full ndmsg structure presence, family member is
2717 * the same for both structures
2719 if (nlmsg_len(nlh
) >= sizeof(struct ndmsg
) &&
2720 ((struct ndmsg
*)nlmsg_data(nlh
))->ndm_flags
== NTF_PROXY
)
2723 err
= neigh_valid_dump_req(nlh
, cb
->strict_check
, &filter
, cb
->extack
);
2724 if (err
< 0 && cb
->strict_check
)
2729 for (t
= 0; t
< NEIGH_NR_TABLES
; t
++) {
2730 tbl
= neigh_tables
[t
];
2734 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
2737 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
2738 sizeof(cb
->args
[0]));
2740 err
= pneigh_dump_table(tbl
, skb
, cb
, &filter
);
2742 err
= neigh_dump_table(tbl
, skb
, cb
, &filter
);
2751 static int neigh_valid_get_req(const struct nlmsghdr
*nlh
,
2752 struct neigh_table
**tbl
,
2753 void **dst
, int *dev_idx
, u8
*ndm_flags
,
2754 struct netlink_ext_ack
*extack
)
2756 struct nlattr
*tb
[NDA_MAX
+ 1];
2760 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*ndm
))) {
2761 NL_SET_ERR_MSG(extack
, "Invalid header for neighbor get request");
2765 ndm
= nlmsg_data(nlh
);
2766 if (ndm
->ndm_pad1
|| ndm
->ndm_pad2
|| ndm
->ndm_state
||
2768 NL_SET_ERR_MSG(extack
, "Invalid values in header for neighbor get request");
2772 if (ndm
->ndm_flags
& ~NTF_PROXY
) {
2773 NL_SET_ERR_MSG(extack
, "Invalid flags in header for neighbor get request");
2777 err
= nlmsg_parse_deprecated_strict(nlh
, sizeof(struct ndmsg
), tb
,
2778 NDA_MAX
, nda_policy
, extack
);
2782 *ndm_flags
= ndm
->ndm_flags
;
2783 *dev_idx
= ndm
->ndm_ifindex
;
2784 *tbl
= neigh_find_table(ndm
->ndm_family
);
2786 NL_SET_ERR_MSG(extack
, "Unsupported family in header for neighbor get request");
2787 return -EAFNOSUPPORT
;
2790 for (i
= 0; i
<= NDA_MAX
; ++i
) {
2796 if (nla_len(tb
[i
]) != (int)(*tbl
)->key_len
) {
2797 NL_SET_ERR_MSG(extack
, "Invalid network address in neighbor get request");
2800 *dst
= nla_data(tb
[i
]);
2803 NL_SET_ERR_MSG(extack
, "Unsupported attribute in neighbor get request");
2811 static inline size_t neigh_nlmsg_size(void)
2813 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2814 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2815 + nla_total_size(MAX_ADDR_LEN
) /* NDA_LLADDR */
2816 + nla_total_size(sizeof(struct nda_cacheinfo
))
2817 + nla_total_size(4) /* NDA_PROBES */
2818 + nla_total_size(1); /* NDA_PROTOCOL */
2821 static int neigh_get_reply(struct net
*net
, struct neighbour
*neigh
,
2824 struct sk_buff
*skb
;
2827 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL
);
2831 err
= neigh_fill_info(skb
, neigh
, pid
, seq
, RTM_NEWNEIGH
, 0);
2837 err
= rtnl_unicast(skb
, net
, pid
);
2842 static inline size_t pneigh_nlmsg_size(void)
2844 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2845 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2846 + nla_total_size(1); /* NDA_PROTOCOL */
2849 static int pneigh_get_reply(struct net
*net
, struct pneigh_entry
*neigh
,
2850 u32 pid
, u32 seq
, struct neigh_table
*tbl
)
2852 struct sk_buff
*skb
;
2855 skb
= nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL
);
2859 err
= pneigh_fill_info(skb
, neigh
, pid
, seq
, RTM_NEWNEIGH
, 0, tbl
);
2865 err
= rtnl_unicast(skb
, net
, pid
);
2870 static int neigh_get(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
,
2871 struct netlink_ext_ack
*extack
)
2873 struct net
*net
= sock_net(in_skb
->sk
);
2874 struct net_device
*dev
= NULL
;
2875 struct neigh_table
*tbl
= NULL
;
2876 struct neighbour
*neigh
;
2882 err
= neigh_valid_get_req(nlh
, &tbl
, &dst
, &dev_idx
, &ndm_flags
,
2888 dev
= __dev_get_by_index(net
, dev_idx
);
2890 NL_SET_ERR_MSG(extack
, "Unknown device ifindex");
2896 NL_SET_ERR_MSG(extack
, "Network address not specified");
2900 if (ndm_flags
& NTF_PROXY
) {
2901 struct pneigh_entry
*pn
;
2903 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 0);
2905 NL_SET_ERR_MSG(extack
, "Proxy neighbour entry not found");
2908 return pneigh_get_reply(net
, pn
, NETLINK_CB(in_skb
).portid
,
2909 nlh
->nlmsg_seq
, tbl
);
2913 NL_SET_ERR_MSG(extack
, "No device specified");
2917 neigh
= neigh_lookup(tbl
, dst
, dev
);
2919 NL_SET_ERR_MSG(extack
, "Neighbour entry not found");
2923 err
= neigh_get_reply(net
, neigh
, NETLINK_CB(in_skb
).portid
,
2926 neigh_release(neigh
);
2931 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
2934 struct neigh_hash_table
*nht
;
2937 nht
= rcu_dereference_bh(tbl
->nht
);
2939 read_lock(&tbl
->lock
); /* avoid resizes */
2940 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2941 struct neighbour
*n
;
2943 for (n
= rcu_dereference_bh(nht
->hash_buckets
[chain
]);
2945 n
= rcu_dereference_bh(n
->next
))
2948 read_unlock(&tbl
->lock
);
2949 rcu_read_unlock_bh();
2951 EXPORT_SYMBOL(neigh_for_each
);
2953 /* The tbl->lock must be held as a writer and BH disabled. */
2954 void __neigh_for_each_release(struct neigh_table
*tbl
,
2955 int (*cb
)(struct neighbour
*))
2958 struct neigh_hash_table
*nht
;
2960 nht
= rcu_dereference_protected(tbl
->nht
,
2961 lockdep_is_held(&tbl
->lock
));
2962 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2963 struct neighbour
*n
;
2964 struct neighbour __rcu
**np
;
2966 np
= &nht
->hash_buckets
[chain
];
2967 while ((n
= rcu_dereference_protected(*np
,
2968 lockdep_is_held(&tbl
->lock
))) != NULL
) {
2971 write_lock(&n
->lock
);
2974 rcu_assign_pointer(*np
,
2975 rcu_dereference_protected(n
->next
,
2976 lockdep_is_held(&tbl
->lock
)));
2980 write_unlock(&n
->lock
);
2982 neigh_cleanup_and_release(n
);
2986 EXPORT_SYMBOL(__neigh_for_each_release
);
2988 int neigh_xmit(int index
, struct net_device
*dev
,
2989 const void *addr
, struct sk_buff
*skb
)
2991 int err
= -EAFNOSUPPORT
;
2992 if (likely(index
< NEIGH_NR_TABLES
)) {
2993 struct neigh_table
*tbl
;
2994 struct neighbour
*neigh
;
2996 tbl
= neigh_tables
[index
];
3000 if (index
== NEIGH_ARP_TABLE
) {
3001 u32 key
= *((u32
*)addr
);
3003 neigh
= __ipv4_neigh_lookup_noref(dev
, key
);
3005 neigh
= __neigh_lookup_noref(tbl
, addr
, dev
);
3008 neigh
= __neigh_create(tbl
, addr
, dev
, false);
3009 err
= PTR_ERR(neigh
);
3010 if (IS_ERR(neigh
)) {
3011 rcu_read_unlock_bh();
3014 err
= neigh
->output(neigh
, skb
);
3015 rcu_read_unlock_bh();
3017 else if (index
== NEIGH_LINK_TABLE
) {
3018 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
3019 addr
, NULL
, skb
->len
);
3022 err
= dev_queue_xmit(skb
);
3030 EXPORT_SYMBOL(neigh_xmit
);
3032 #ifdef CONFIG_PROC_FS
3034 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
3036 struct neigh_seq_state
*state
= seq
->private;
3037 struct net
*net
= seq_file_net(seq
);
3038 struct neigh_hash_table
*nht
= state
->nht
;
3039 struct neighbour
*n
= NULL
;
3042 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
3043 for (bucket
= 0; bucket
< (1 << nht
->hash_shift
); bucket
++) {
3044 n
= rcu_dereference_bh(nht
->hash_buckets
[bucket
]);
3047 if (!net_eq(dev_net(n
->dev
), net
))
3049 if (state
->neigh_sub_iter
) {
3053 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
3057 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
3059 if (n
->nud_state
& ~NUD_NOARP
)
3062 n
= rcu_dereference_bh(n
->next
);
3068 state
->bucket
= bucket
;
3073 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
3074 struct neighbour
*n
,
3077 struct neigh_seq_state
*state
= seq
->private;
3078 struct net
*net
= seq_file_net(seq
);
3079 struct neigh_hash_table
*nht
= state
->nht
;
3081 if (state
->neigh_sub_iter
) {
3082 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
3086 n
= rcu_dereference_bh(n
->next
);
3090 if (!net_eq(dev_net(n
->dev
), net
))
3092 if (state
->neigh_sub_iter
) {
3093 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
3098 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
3101 if (n
->nud_state
& ~NUD_NOARP
)
3104 n
= rcu_dereference_bh(n
->next
);
3110 if (++state
->bucket
>= (1 << nht
->hash_shift
))
3113 n
= rcu_dereference_bh(nht
->hash_buckets
[state
->bucket
]);
3121 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
3123 struct neighbour
*n
= neigh_get_first(seq
);
3128 n
= neigh_get_next(seq
, n
, pos
);
3133 return *pos
? NULL
: n
;
3136 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
3138 struct neigh_seq_state
*state
= seq
->private;
3139 struct net
*net
= seq_file_net(seq
);
3140 struct neigh_table
*tbl
= state
->tbl
;
3141 struct pneigh_entry
*pn
= NULL
;
3142 int bucket
= state
->bucket
;
3144 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
3145 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
3146 pn
= tbl
->phash_buckets
[bucket
];
3147 while (pn
&& !net_eq(pneigh_net(pn
), net
))
3152 state
->bucket
= bucket
;
3157 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
3158 struct pneigh_entry
*pn
,
3161 struct neigh_seq_state
*state
= seq
->private;
3162 struct net
*net
= seq_file_net(seq
);
3163 struct neigh_table
*tbl
= state
->tbl
;
3167 } while (pn
&& !net_eq(pneigh_net(pn
), net
));
3170 if (++state
->bucket
> PNEIGH_HASHMASK
)
3172 pn
= tbl
->phash_buckets
[state
->bucket
];
3173 while (pn
&& !net_eq(pneigh_net(pn
), net
))
3185 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
3187 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
3192 pn
= pneigh_get_next(seq
, pn
, pos
);
3197 return *pos
? NULL
: pn
;
3200 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
3202 struct neigh_seq_state
*state
= seq
->private;
3204 loff_t idxpos
= *pos
;
3206 rc
= neigh_get_idx(seq
, &idxpos
);
3207 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
3208 rc
= pneigh_get_idx(seq
, &idxpos
);
3213 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
3214 __acquires(tbl
->lock
)
3217 struct neigh_seq_state
*state
= seq
->private;
3221 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
3224 state
->nht
= rcu_dereference_bh(tbl
->nht
);
3225 read_lock(&tbl
->lock
);
3227 return *pos
? neigh_get_idx_any(seq
, pos
) : SEQ_START_TOKEN
;
3229 EXPORT_SYMBOL(neigh_seq_start
);
3231 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3233 struct neigh_seq_state
*state
;
3236 if (v
== SEQ_START_TOKEN
) {
3237 rc
= neigh_get_first(seq
);
3241 state
= seq
->private;
3242 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
3243 rc
= neigh_get_next(seq
, v
, NULL
);
3246 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
3247 rc
= pneigh_get_first(seq
);
3249 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
3250 rc
= pneigh_get_next(seq
, v
, NULL
);
3256 EXPORT_SYMBOL(neigh_seq_next
);
3258 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
3259 __releases(tbl
->lock
)
3262 struct neigh_seq_state
*state
= seq
->private;
3263 struct neigh_table
*tbl
= state
->tbl
;
3265 read_unlock(&tbl
->lock
);
3266 rcu_read_unlock_bh();
3268 EXPORT_SYMBOL(neigh_seq_stop
);
3270 /* statistics via seq_file */
3272 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3274 struct neigh_table
*tbl
= PDE_DATA(file_inode(seq
->file
));
3278 return SEQ_START_TOKEN
;
3280 for (cpu
= *pos
-1; cpu
< nr_cpu_ids
; ++cpu
) {
3281 if (!cpu_possible(cpu
))
3284 return per_cpu_ptr(tbl
->stats
, cpu
);
3289 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3291 struct neigh_table
*tbl
= PDE_DATA(file_inode(seq
->file
));
3294 for (cpu
= *pos
; cpu
< nr_cpu_ids
; ++cpu
) {
3295 if (!cpu_possible(cpu
))
3298 return per_cpu_ptr(tbl
->stats
, cpu
);
3304 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
3309 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
3311 struct neigh_table
*tbl
= PDE_DATA(file_inode(seq
->file
));
3312 struct neigh_statistics
*st
= v
;
3314 if (v
== SEQ_START_TOKEN
) {
3315 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3319 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3320 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
3321 atomic_read(&tbl
->entries
),
3332 st
->rcv_probes_mcast
,
3333 st
->rcv_probes_ucast
,
3335 st
->periodic_gc_runs
,
3344 static const struct seq_operations neigh_stat_seq_ops
= {
3345 .start
= neigh_stat_seq_start
,
3346 .next
= neigh_stat_seq_next
,
3347 .stop
= neigh_stat_seq_stop
,
3348 .show
= neigh_stat_seq_show
,
3350 #endif /* CONFIG_PROC_FS */
3352 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
,
3355 struct net
*net
= dev_net(n
->dev
);
3356 struct sk_buff
*skb
;
3359 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC
);
3363 err
= neigh_fill_info(skb
, n
, pid
, 0, type
, flags
);
3365 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3366 WARN_ON(err
== -EMSGSIZE
);
3370 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
3374 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
3377 void neigh_app_ns(struct neighbour
*n
)
3379 __neigh_notify(n
, RTM_GETNEIGH
, NLM_F_REQUEST
, 0);
3381 EXPORT_SYMBOL(neigh_app_ns
);
3383 #ifdef CONFIG_SYSCTL
3384 static int unres_qlen_max
= INT_MAX
/ SKB_TRUESIZE(ETH_FRAME_LEN
);
3386 static int proc_unres_qlen(struct ctl_table
*ctl
, int write
,
3387 void *buffer
, size_t *lenp
, loff_t
*ppos
)
3390 struct ctl_table tmp
= *ctl
;
3392 tmp
.extra1
= SYSCTL_ZERO
;
3393 tmp
.extra2
= &unres_qlen_max
;
3396 size
= *(int *)ctl
->data
/ SKB_TRUESIZE(ETH_FRAME_LEN
);
3397 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
3400 *(int *)ctl
->data
= size
* SKB_TRUESIZE(ETH_FRAME_LEN
);
3404 static struct neigh_parms
*neigh_get_dev_parms_rcu(struct net_device
*dev
,
3409 return __in_dev_arp_parms_get_rcu(dev
);
3411 return __in6_dev_nd_parms_get_rcu(dev
);
3416 static void neigh_copy_dflt_parms(struct net
*net
, struct neigh_parms
*p
,
3419 struct net_device
*dev
;
3420 int family
= neigh_parms_family(p
);
3423 for_each_netdev_rcu(net
, dev
) {
3424 struct neigh_parms
*dst_p
=
3425 neigh_get_dev_parms_rcu(dev
, family
);
3427 if (dst_p
&& !test_bit(index
, dst_p
->data_state
))
3428 dst_p
->data
[index
] = p
->data
[index
];
3433 static void neigh_proc_update(struct ctl_table
*ctl
, int write
)
3435 struct net_device
*dev
= ctl
->extra1
;
3436 struct neigh_parms
*p
= ctl
->extra2
;
3437 struct net
*net
= neigh_parms_net(p
);
3438 int index
= (int *) ctl
->data
- p
->data
;
3443 set_bit(index
, p
->data_state
);
3444 if (index
== NEIGH_VAR_DELAY_PROBE_TIME
)
3445 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE
, p
);
3446 if (!dev
) /* NULL dev means this is default value */
3447 neigh_copy_dflt_parms(net
, p
, index
);
3450 static int neigh_proc_dointvec_zero_intmax(struct ctl_table
*ctl
, int write
,
3451 void *buffer
, size_t *lenp
,
3454 struct ctl_table tmp
= *ctl
;
3457 tmp
.extra1
= SYSCTL_ZERO
;
3458 tmp
.extra2
= SYSCTL_INT_MAX
;
3460 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
3461 neigh_proc_update(ctl
, write
);
3465 int neigh_proc_dointvec(struct ctl_table
*ctl
, int write
, void *buffer
,
3466 size_t *lenp
, loff_t
*ppos
)
3468 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
3470 neigh_proc_update(ctl
, write
);
3473 EXPORT_SYMBOL(neigh_proc_dointvec
);
3475 int neigh_proc_dointvec_jiffies(struct ctl_table
*ctl
, int write
, void *buffer
,
3476 size_t *lenp
, loff_t
*ppos
)
3478 int ret
= proc_dointvec_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3480 neigh_proc_update(ctl
, write
);
3483 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies
);
3485 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table
*ctl
, int write
,
3486 void *buffer
, size_t *lenp
,
3489 int ret
= proc_dointvec_userhz_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3491 neigh_proc_update(ctl
, write
);
3495 int neigh_proc_dointvec_ms_jiffies(struct ctl_table
*ctl
, int write
,
3496 void *buffer
, size_t *lenp
, loff_t
*ppos
)
3498 int ret
= proc_dointvec_ms_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3500 neigh_proc_update(ctl
, write
);
3503 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies
);
3505 static int neigh_proc_dointvec_unres_qlen(struct ctl_table
*ctl
, int write
,
3506 void *buffer
, size_t *lenp
,
3509 int ret
= proc_unres_qlen(ctl
, write
, buffer
, lenp
, ppos
);
3511 neigh_proc_update(ctl
, write
);
3515 static int neigh_proc_base_reachable_time(struct ctl_table
*ctl
, int write
,
3516 void *buffer
, size_t *lenp
,
3519 struct neigh_parms
*p
= ctl
->extra2
;
3522 if (strcmp(ctl
->procname
, "base_reachable_time") == 0)
3523 ret
= neigh_proc_dointvec_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3524 else if (strcmp(ctl
->procname
, "base_reachable_time_ms") == 0)
3525 ret
= neigh_proc_dointvec_ms_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3529 if (write
&& ret
== 0) {
3530 /* update reachable_time as well, otherwise, the change will
3531 * only be effective after the next time neigh_periodic_work
3532 * decides to recompute it
3535 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
3540 #define NEIGH_PARMS_DATA_OFFSET(index) \
3541 (&((struct neigh_parms *) 0)->data[index])
3543 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3544 [NEIGH_VAR_ ## attr] = { \
3546 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3547 .maxlen = sizeof(int), \
3549 .proc_handler = proc, \
3552 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3553 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3555 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3556 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3558 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3559 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3561 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3562 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3564 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3565 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3567 static struct neigh_sysctl_table
{
3568 struct ctl_table_header
*sysctl_header
;
3569 struct ctl_table neigh_vars
[NEIGH_VAR_MAX
+ 1];
3570 } neigh_sysctl_template __read_mostly
= {
3572 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES
, "mcast_solicit"),
3573 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES
, "ucast_solicit"),
3574 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES
, "app_solicit"),
3575 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES
, "mcast_resolicit"),
3576 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME
, "retrans_time"),
3577 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME
, "base_reachable_time"),
3578 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME
, "delay_first_probe_time"),
3579 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME
, "gc_stale_time"),
3580 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES
, "unres_qlen_bytes"),
3581 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN
, "proxy_qlen"),
3582 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY
, "anycast_delay"),
3583 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY
, "proxy_delay"),
3584 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME
, "locktime"),
3585 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN
, QUEUE_LEN_BYTES
, "unres_qlen"),
3586 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS
, RETRANS_TIME
, "retrans_time_ms"),
3587 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS
, BASE_REACHABLE_TIME
, "base_reachable_time_ms"),
3588 [NEIGH_VAR_GC_INTERVAL
] = {
3589 .procname
= "gc_interval",
3590 .maxlen
= sizeof(int),
3592 .proc_handler
= proc_dointvec_jiffies
,
3594 [NEIGH_VAR_GC_THRESH1
] = {
3595 .procname
= "gc_thresh1",
3596 .maxlen
= sizeof(int),
3598 .extra1
= SYSCTL_ZERO
,
3599 .extra2
= SYSCTL_INT_MAX
,
3600 .proc_handler
= proc_dointvec_minmax
,
3602 [NEIGH_VAR_GC_THRESH2
] = {
3603 .procname
= "gc_thresh2",
3604 .maxlen
= sizeof(int),
3606 .extra1
= SYSCTL_ZERO
,
3607 .extra2
= SYSCTL_INT_MAX
,
3608 .proc_handler
= proc_dointvec_minmax
,
3610 [NEIGH_VAR_GC_THRESH3
] = {
3611 .procname
= "gc_thresh3",
3612 .maxlen
= sizeof(int),
3614 .extra1
= SYSCTL_ZERO
,
3615 .extra2
= SYSCTL_INT_MAX
,
3616 .proc_handler
= proc_dointvec_minmax
,
3622 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
3623 proc_handler
*handler
)
3626 struct neigh_sysctl_table
*t
;
3627 const char *dev_name_source
;
3628 char neigh_path
[ sizeof("net//neigh/") + IFNAMSIZ
+ IFNAMSIZ
];
3631 t
= kmemdup(&neigh_sysctl_template
, sizeof(*t
), GFP_KERNEL
);
3635 for (i
= 0; i
< NEIGH_VAR_GC_INTERVAL
; i
++) {
3636 t
->neigh_vars
[i
].data
+= (long) p
;
3637 t
->neigh_vars
[i
].extra1
= dev
;
3638 t
->neigh_vars
[i
].extra2
= p
;
3642 dev_name_source
= dev
->name
;
3643 /* Terminate the table early */
3644 memset(&t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
], 0,
3645 sizeof(t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
]));
3647 struct neigh_table
*tbl
= p
->tbl
;
3648 dev_name_source
= "default";
3649 t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
].data
= &tbl
->gc_interval
;
3650 t
->neigh_vars
[NEIGH_VAR_GC_THRESH1
].data
= &tbl
->gc_thresh1
;
3651 t
->neigh_vars
[NEIGH_VAR_GC_THRESH2
].data
= &tbl
->gc_thresh2
;
3652 t
->neigh_vars
[NEIGH_VAR_GC_THRESH3
].data
= &tbl
->gc_thresh3
;
3657 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME
].proc_handler
= handler
;
3659 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
= handler
;
3660 /* RetransTime (in milliseconds)*/
3661 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME_MS
].proc_handler
= handler
;
3662 /* ReachableTime (in milliseconds) */
3663 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
= handler
;
3665 /* Those handlers will update p->reachable_time after
3666 * base_reachable_time(_ms) is set to ensure the new timer starts being
3667 * applied after the next neighbour update instead of waiting for
3668 * neigh_periodic_work to update its value (can be multiple minutes)
3669 * So any handler that replaces them should do this as well
3672 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
=
3673 neigh_proc_base_reachable_time
;
3674 /* ReachableTime (in milliseconds) */
3675 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
=
3676 neigh_proc_base_reachable_time
;
3679 /* Don't export sysctls to unprivileged users */
3680 if (neigh_parms_net(p
)->user_ns
!= &init_user_ns
)
3681 t
->neigh_vars
[0].procname
= NULL
;
3683 switch (neigh_parms_family(p
)) {
3694 snprintf(neigh_path
, sizeof(neigh_path
), "net/%s/neigh/%s",
3695 p_name
, dev_name_source
);
3697 register_net_sysctl(neigh_parms_net(p
), neigh_path
, t
->neigh_vars
);
3698 if (!t
->sysctl_header
)
3701 p
->sysctl_table
= t
;
3709 EXPORT_SYMBOL(neigh_sysctl_register
);
3711 void neigh_sysctl_unregister(struct neigh_parms
*p
)
3713 if (p
->sysctl_table
) {
3714 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
3715 p
->sysctl_table
= NULL
;
3716 unregister_net_sysctl_table(t
->sysctl_header
);
3720 EXPORT_SYMBOL(neigh_sysctl_unregister
);
3722 #endif /* CONFIG_SYSCTL */
3724 static int __init
neigh_init(void)
3726 rtnl_register(PF_UNSPEC
, RTM_NEWNEIGH
, neigh_add
, NULL
, 0);
3727 rtnl_register(PF_UNSPEC
, RTM_DELNEIGH
, neigh_delete
, NULL
, 0);
3728 rtnl_register(PF_UNSPEC
, RTM_GETNEIGH
, neigh_get
, neigh_dump_info
, 0);
3730 rtnl_register(PF_UNSPEC
, RTM_GETNEIGHTBL
, NULL
, neightbl_dump_info
,
3732 rtnl_register(PF_UNSPEC
, RTM_SETNEIGHTBL
, neightbl_set
, NULL
, 0);
3737 subsys_initcall(neigh_init
);