1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Generic address resolution entity
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
25 #include <linux/sysctl.h>
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
42 #include <trace/events/neigh.h>
46 #define neigh_dbg(level, fmt, ...) \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
52 #define PNEIGH_HASHMASK 0xF
54 static void neigh_timer_handler(struct timer_list
*t
);
55 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
,
57 static void neigh_update_notify(struct neighbour
*neigh
, u32 nlmsg_pid
);
58 static int pneigh_ifdown_and_unlock(struct neigh_table
*tbl
,
59 struct net_device
*dev
);
62 static const struct seq_operations neigh_stat_seq_ops
;
66 Neighbour hash table buckets are protected with rwlock tbl->lock.
68 - All the scans/updates to hash buckets MUST be made under this lock.
69 - NOTHING clever should be made under this lock: no callbacks
70 to protocol backends, no attempts to send something to network.
71 It will result in deadlocks, if backend/driver wants to use neighbour
73 - If the entry requires some non-trivial actions, increase
74 its reference count and release table lock.
76 Neighbour entries are protected:
77 - with reference count.
78 - with rwlock neigh->lock
80 Reference count prevents destruction.
82 neigh->lock mainly serializes ll address data and its validity state.
83 However, the same lock is used to protect another entry fields:
87 Again, nothing clever shall be made under neigh->lock,
88 the most complicated procedure, which we allow is dev->hard_header.
89 It is supposed, that dev->hard_header is simplistic and does
90 not make callbacks to neighbour tables.
93 static int neigh_blackhole(struct neighbour
*neigh
, struct sk_buff
*skb
)
99 static void neigh_cleanup_and_release(struct neighbour
*neigh
)
101 trace_neigh_cleanup_and_release(neigh
, 0);
102 __neigh_notify(neigh
, RTM_DELNEIGH
, 0, 0);
103 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
104 neigh_release(neigh
);
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
113 unsigned long neigh_rand_reach_time(unsigned long base
)
115 return base
? (prandom_u32() % base
) + (base
>> 1) : 0;
117 EXPORT_SYMBOL(neigh_rand_reach_time
);
119 static void neigh_mark_dead(struct neighbour
*n
)
122 if (!list_empty(&n
->gc_list
)) {
123 list_del_init(&n
->gc_list
);
124 atomic_dec(&n
->tbl
->gc_entries
);
128 static void neigh_update_gc_list(struct neighbour
*n
)
130 bool on_gc_list
, exempt_from_gc
;
132 write_lock_bh(&n
->tbl
->lock
);
133 write_lock(&n
->lock
);
135 /* remove from the gc list if new state is permanent or if neighbor
136 * is externally learned; otherwise entry should be on the gc list
138 exempt_from_gc
= n
->nud_state
& NUD_PERMANENT
||
139 n
->flags
& NTF_EXT_LEARNED
;
140 on_gc_list
= !list_empty(&n
->gc_list
);
142 if (exempt_from_gc
&& on_gc_list
) {
143 list_del_init(&n
->gc_list
);
144 atomic_dec(&n
->tbl
->gc_entries
);
145 } else if (!exempt_from_gc
&& !on_gc_list
) {
146 /* add entries to the tail; cleaning removes from the front */
147 list_add_tail(&n
->gc_list
, &n
->tbl
->gc_list
);
148 atomic_inc(&n
->tbl
->gc_entries
);
151 write_unlock(&n
->lock
);
152 write_unlock_bh(&n
->tbl
->lock
);
155 static bool neigh_update_ext_learned(struct neighbour
*neigh
, u32 flags
,
161 if (!(flags
& NEIGH_UPDATE_F_ADMIN
))
164 ndm_flags
= (flags
& NEIGH_UPDATE_F_EXT_LEARNED
) ? NTF_EXT_LEARNED
: 0;
165 if ((neigh
->flags
^ ndm_flags
) & NTF_EXT_LEARNED
) {
166 if (ndm_flags
& NTF_EXT_LEARNED
)
167 neigh
->flags
|= NTF_EXT_LEARNED
;
169 neigh
->flags
&= ~NTF_EXT_LEARNED
;
177 static bool neigh_del(struct neighbour
*n
, struct neighbour __rcu
**np
,
178 struct neigh_table
*tbl
)
182 write_lock(&n
->lock
);
183 if (refcount_read(&n
->refcnt
) == 1) {
184 struct neighbour
*neigh
;
186 neigh
= rcu_dereference_protected(n
->next
,
187 lockdep_is_held(&tbl
->lock
));
188 rcu_assign_pointer(*np
, neigh
);
192 write_unlock(&n
->lock
);
194 neigh_cleanup_and_release(n
);
198 bool neigh_remove_one(struct neighbour
*ndel
, struct neigh_table
*tbl
)
200 struct neigh_hash_table
*nht
;
201 void *pkey
= ndel
->primary_key
;
204 struct neighbour __rcu
**np
;
206 nht
= rcu_dereference_protected(tbl
->nht
,
207 lockdep_is_held(&tbl
->lock
));
208 hash_val
= tbl
->hash(pkey
, ndel
->dev
, nht
->hash_rnd
);
209 hash_val
= hash_val
>> (32 - nht
->hash_shift
);
211 np
= &nht
->hash_buckets
[hash_val
];
212 while ((n
= rcu_dereference_protected(*np
,
213 lockdep_is_held(&tbl
->lock
)))) {
215 return neigh_del(n
, np
, tbl
);
221 static int neigh_forced_gc(struct neigh_table
*tbl
)
223 int max_clean
= atomic_read(&tbl
->gc_entries
) - tbl
->gc_thresh2
;
224 unsigned long tref
= jiffies
- 5 * HZ
;
225 struct neighbour
*n
, *tmp
;
228 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
230 write_lock_bh(&tbl
->lock
);
232 list_for_each_entry_safe(n
, tmp
, &tbl
->gc_list
, gc_list
) {
233 if (refcount_read(&n
->refcnt
) == 1) {
236 write_lock(&n
->lock
);
237 if ((n
->nud_state
== NUD_FAILED
) ||
238 time_after(tref
, n
->updated
))
240 write_unlock(&n
->lock
);
242 if (remove
&& neigh_remove_one(n
, tbl
))
244 if (shrunk
>= max_clean
)
249 tbl
->last_flush
= jiffies
;
251 write_unlock_bh(&tbl
->lock
);
256 static void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
259 if (unlikely(mod_timer(&n
->timer
, when
))) {
260 printk("NEIGH: BUG, double timer add, state is %x\n",
266 static int neigh_del_timer(struct neighbour
*n
)
268 if ((n
->nud_state
& NUD_IN_TIMER
) &&
269 del_timer(&n
->timer
)) {
276 static void pneigh_queue_purge(struct sk_buff_head
*list
)
280 while ((skb
= skb_dequeue(list
)) != NULL
) {
286 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
,
290 struct neigh_hash_table
*nht
;
292 nht
= rcu_dereference_protected(tbl
->nht
,
293 lockdep_is_held(&tbl
->lock
));
295 for (i
= 0; i
< (1 << nht
->hash_shift
); i
++) {
297 struct neighbour __rcu
**np
= &nht
->hash_buckets
[i
];
299 while ((n
= rcu_dereference_protected(*np
,
300 lockdep_is_held(&tbl
->lock
))) != NULL
) {
301 if (dev
&& n
->dev
!= dev
) {
305 if (skip_perm
&& n
->nud_state
& NUD_PERMANENT
) {
309 rcu_assign_pointer(*np
,
310 rcu_dereference_protected(n
->next
,
311 lockdep_is_held(&tbl
->lock
)));
312 write_lock(&n
->lock
);
315 if (refcount_read(&n
->refcnt
) != 1) {
316 /* The most unpleasant situation.
317 We must destroy neighbour entry,
318 but someone still uses it.
320 The destroy will be delayed until
321 the last user releases us, but
322 we must kill timers etc. and move
325 __skb_queue_purge(&n
->arp_queue
);
326 n
->arp_queue_len_bytes
= 0;
327 n
->output
= neigh_blackhole
;
328 if (n
->nud_state
& NUD_VALID
)
329 n
->nud_state
= NUD_NOARP
;
331 n
->nud_state
= NUD_NONE
;
332 neigh_dbg(2, "neigh %p is stray\n", n
);
334 write_unlock(&n
->lock
);
335 neigh_cleanup_and_release(n
);
340 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
342 write_lock_bh(&tbl
->lock
);
343 neigh_flush_dev(tbl
, dev
, false);
344 write_unlock_bh(&tbl
->lock
);
346 EXPORT_SYMBOL(neigh_changeaddr
);
348 static int __neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
,
351 write_lock_bh(&tbl
->lock
);
352 neigh_flush_dev(tbl
, dev
, skip_perm
);
353 pneigh_ifdown_and_unlock(tbl
, dev
);
355 del_timer_sync(&tbl
->proxy_timer
);
356 pneigh_queue_purge(&tbl
->proxy_queue
);
360 int neigh_carrier_down(struct neigh_table
*tbl
, struct net_device
*dev
)
362 __neigh_ifdown(tbl
, dev
, true);
365 EXPORT_SYMBOL(neigh_carrier_down
);
367 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
369 __neigh_ifdown(tbl
, dev
, false);
372 EXPORT_SYMBOL(neigh_ifdown
);
374 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
,
375 struct net_device
*dev
,
378 struct neighbour
*n
= NULL
;
379 unsigned long now
= jiffies
;
385 entries
= atomic_inc_return(&tbl
->gc_entries
) - 1;
386 if (entries
>= tbl
->gc_thresh3
||
387 (entries
>= tbl
->gc_thresh2
&&
388 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
389 if (!neigh_forced_gc(tbl
) &&
390 entries
>= tbl
->gc_thresh3
) {
391 net_info_ratelimited("%s: neighbor table overflow!\n",
393 NEIGH_CACHE_STAT_INC(tbl
, table_fulls
);
399 n
= kzalloc(tbl
->entry_size
+ dev
->neigh_priv_len
, GFP_ATOMIC
);
403 __skb_queue_head_init(&n
->arp_queue
);
404 rwlock_init(&n
->lock
);
405 seqlock_init(&n
->ha_lock
);
406 n
->updated
= n
->used
= now
;
407 n
->nud_state
= NUD_NONE
;
408 n
->output
= neigh_blackhole
;
409 seqlock_init(&n
->hh
.hh_lock
);
410 n
->parms
= neigh_parms_clone(&tbl
->parms
);
411 timer_setup(&n
->timer
, neigh_timer_handler
, 0);
413 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
415 refcount_set(&n
->refcnt
, 1);
417 INIT_LIST_HEAD(&n
->gc_list
);
419 atomic_inc(&tbl
->entries
);
425 atomic_dec(&tbl
->gc_entries
);
429 static void neigh_get_hash_rnd(u32
*x
)
431 *x
= get_random_u32() | 1;
434 static struct neigh_hash_table
*neigh_hash_alloc(unsigned int shift
)
436 size_t size
= (1 << shift
) * sizeof(struct neighbour
*);
437 struct neigh_hash_table
*ret
;
438 struct neighbour __rcu
**buckets
;
441 ret
= kmalloc(sizeof(*ret
), GFP_ATOMIC
);
444 if (size
<= PAGE_SIZE
) {
445 buckets
= kzalloc(size
, GFP_ATOMIC
);
447 buckets
= (struct neighbour __rcu
**)
448 __get_free_pages(GFP_ATOMIC
| __GFP_ZERO
,
450 kmemleak_alloc(buckets
, size
, 1, GFP_ATOMIC
);
456 ret
->hash_buckets
= buckets
;
457 ret
->hash_shift
= shift
;
458 for (i
= 0; i
< NEIGH_NUM_HASH_RND
; i
++)
459 neigh_get_hash_rnd(&ret
->hash_rnd
[i
]);
463 static void neigh_hash_free_rcu(struct rcu_head
*head
)
465 struct neigh_hash_table
*nht
= container_of(head
,
466 struct neigh_hash_table
,
468 size_t size
= (1 << nht
->hash_shift
) * sizeof(struct neighbour
*);
469 struct neighbour __rcu
**buckets
= nht
->hash_buckets
;
471 if (size
<= PAGE_SIZE
) {
474 kmemleak_free(buckets
);
475 free_pages((unsigned long)buckets
, get_order(size
));
480 static struct neigh_hash_table
*neigh_hash_grow(struct neigh_table
*tbl
,
481 unsigned long new_shift
)
483 unsigned int i
, hash
;
484 struct neigh_hash_table
*new_nht
, *old_nht
;
486 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
488 old_nht
= rcu_dereference_protected(tbl
->nht
,
489 lockdep_is_held(&tbl
->lock
));
490 new_nht
= neigh_hash_alloc(new_shift
);
494 for (i
= 0; i
< (1 << old_nht
->hash_shift
); i
++) {
495 struct neighbour
*n
, *next
;
497 for (n
= rcu_dereference_protected(old_nht
->hash_buckets
[i
],
498 lockdep_is_held(&tbl
->lock
));
501 hash
= tbl
->hash(n
->primary_key
, n
->dev
,
504 hash
>>= (32 - new_nht
->hash_shift
);
505 next
= rcu_dereference_protected(n
->next
,
506 lockdep_is_held(&tbl
->lock
));
508 rcu_assign_pointer(n
->next
,
509 rcu_dereference_protected(
510 new_nht
->hash_buckets
[hash
],
511 lockdep_is_held(&tbl
->lock
)));
512 rcu_assign_pointer(new_nht
->hash_buckets
[hash
], n
);
516 rcu_assign_pointer(tbl
->nht
, new_nht
);
517 call_rcu(&old_nht
->rcu
, neigh_hash_free_rcu
);
521 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
522 struct net_device
*dev
)
526 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
529 n
= __neigh_lookup_noref(tbl
, pkey
, dev
);
531 if (!refcount_inc_not_zero(&n
->refcnt
))
533 NEIGH_CACHE_STAT_INC(tbl
, hits
);
536 rcu_read_unlock_bh();
539 EXPORT_SYMBOL(neigh_lookup
);
541 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, struct net
*net
,
545 unsigned int key_len
= tbl
->key_len
;
547 struct neigh_hash_table
*nht
;
549 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
552 nht
= rcu_dereference_bh(tbl
->nht
);
553 hash_val
= tbl
->hash(pkey
, NULL
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
555 for (n
= rcu_dereference_bh(nht
->hash_buckets
[hash_val
]);
557 n
= rcu_dereference_bh(n
->next
)) {
558 if (!memcmp(n
->primary_key
, pkey
, key_len
) &&
559 net_eq(dev_net(n
->dev
), net
)) {
560 if (!refcount_inc_not_zero(&n
->refcnt
))
562 NEIGH_CACHE_STAT_INC(tbl
, hits
);
567 rcu_read_unlock_bh();
570 EXPORT_SYMBOL(neigh_lookup_nodev
);
572 static struct neighbour
*___neigh_create(struct neigh_table
*tbl
,
574 struct net_device
*dev
,
575 bool exempt_from_gc
, bool want_ref
)
577 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
, dev
, exempt_from_gc
);
579 unsigned int key_len
= tbl
->key_len
;
581 struct neigh_hash_table
*nht
;
583 trace_neigh_create(tbl
, dev
, pkey
, n
, exempt_from_gc
);
586 rc
= ERR_PTR(-ENOBUFS
);
590 memcpy(n
->primary_key
, pkey
, key_len
);
594 /* Protocol specific setup. */
595 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
597 goto out_neigh_release
;
600 if (dev
->netdev_ops
->ndo_neigh_construct
) {
601 error
= dev
->netdev_ops
->ndo_neigh_construct(dev
, n
);
604 goto out_neigh_release
;
608 /* Device specific setup. */
609 if (n
->parms
->neigh_setup
&&
610 (error
= n
->parms
->neigh_setup(n
)) < 0) {
612 goto out_neigh_release
;
615 n
->confirmed
= jiffies
- (NEIGH_VAR(n
->parms
, BASE_REACHABLE_TIME
) << 1);
617 write_lock_bh(&tbl
->lock
);
618 nht
= rcu_dereference_protected(tbl
->nht
,
619 lockdep_is_held(&tbl
->lock
));
621 if (atomic_read(&tbl
->entries
) > (1 << nht
->hash_shift
))
622 nht
= neigh_hash_grow(tbl
, nht
->hash_shift
+ 1);
624 hash_val
= tbl
->hash(n
->primary_key
, dev
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
626 if (n
->parms
->dead
) {
627 rc
= ERR_PTR(-EINVAL
);
631 for (n1
= rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
632 lockdep_is_held(&tbl
->lock
));
634 n1
= rcu_dereference_protected(n1
->next
,
635 lockdep_is_held(&tbl
->lock
))) {
636 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, n
->primary_key
, key_len
)) {
646 list_add_tail(&n
->gc_list
, &n
->tbl
->gc_list
);
650 rcu_assign_pointer(n
->next
,
651 rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
652 lockdep_is_held(&tbl
->lock
)));
653 rcu_assign_pointer(nht
->hash_buckets
[hash_val
], n
);
654 write_unlock_bh(&tbl
->lock
);
655 neigh_dbg(2, "neigh %p is created\n", n
);
660 write_unlock_bh(&tbl
->lock
);
663 atomic_dec(&tbl
->gc_entries
);
668 struct neighbour
*__neigh_create(struct neigh_table
*tbl
, const void *pkey
,
669 struct net_device
*dev
, bool want_ref
)
671 return ___neigh_create(tbl
, pkey
, dev
, false, want_ref
);
673 EXPORT_SYMBOL(__neigh_create
);
675 static u32
pneigh_hash(const void *pkey
, unsigned int key_len
)
677 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
678 hash_val
^= (hash_val
>> 16);
679 hash_val
^= hash_val
>> 8;
680 hash_val
^= hash_val
>> 4;
681 hash_val
&= PNEIGH_HASHMASK
;
685 static struct pneigh_entry
*__pneigh_lookup_1(struct pneigh_entry
*n
,
688 unsigned int key_len
,
689 struct net_device
*dev
)
692 if (!memcmp(n
->key
, pkey
, key_len
) &&
693 net_eq(pneigh_net(n
), net
) &&
694 (n
->dev
== dev
|| !n
->dev
))
701 struct pneigh_entry
*__pneigh_lookup(struct neigh_table
*tbl
,
702 struct net
*net
, const void *pkey
, struct net_device
*dev
)
704 unsigned int key_len
= tbl
->key_len
;
705 u32 hash_val
= pneigh_hash(pkey
, key_len
);
707 return __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
708 net
, pkey
, key_len
, dev
);
710 EXPORT_SYMBOL_GPL(__pneigh_lookup
);
712 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
,
713 struct net
*net
, const void *pkey
,
714 struct net_device
*dev
, int creat
)
716 struct pneigh_entry
*n
;
717 unsigned int key_len
= tbl
->key_len
;
718 u32 hash_val
= pneigh_hash(pkey
, key_len
);
720 read_lock_bh(&tbl
->lock
);
721 n
= __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
722 net
, pkey
, key_len
, dev
);
723 read_unlock_bh(&tbl
->lock
);
730 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
735 write_pnet(&n
->net
, net
);
736 memcpy(n
->key
, pkey
, key_len
);
741 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
749 write_lock_bh(&tbl
->lock
);
750 n
->next
= tbl
->phash_buckets
[hash_val
];
751 tbl
->phash_buckets
[hash_val
] = n
;
752 write_unlock_bh(&tbl
->lock
);
756 EXPORT_SYMBOL(pneigh_lookup
);
759 int pneigh_delete(struct neigh_table
*tbl
, struct net
*net
, const void *pkey
,
760 struct net_device
*dev
)
762 struct pneigh_entry
*n
, **np
;
763 unsigned int key_len
= tbl
->key_len
;
764 u32 hash_val
= pneigh_hash(pkey
, key_len
);
766 write_lock_bh(&tbl
->lock
);
767 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
769 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
&&
770 net_eq(pneigh_net(n
), net
)) {
772 write_unlock_bh(&tbl
->lock
);
773 if (tbl
->pdestructor
)
781 write_unlock_bh(&tbl
->lock
);
785 static int pneigh_ifdown_and_unlock(struct neigh_table
*tbl
,
786 struct net_device
*dev
)
788 struct pneigh_entry
*n
, **np
, *freelist
= NULL
;
791 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
792 np
= &tbl
->phash_buckets
[h
];
793 while ((n
= *np
) != NULL
) {
794 if (!dev
|| n
->dev
== dev
) {
803 write_unlock_bh(&tbl
->lock
);
804 while ((n
= freelist
)) {
807 if (tbl
->pdestructor
)
816 static void neigh_parms_destroy(struct neigh_parms
*parms
);
818 static inline void neigh_parms_put(struct neigh_parms
*parms
)
820 if (refcount_dec_and_test(&parms
->refcnt
))
821 neigh_parms_destroy(parms
);
825 * neighbour must already be out of the table;
828 void neigh_destroy(struct neighbour
*neigh
)
830 struct net_device
*dev
= neigh
->dev
;
832 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
835 pr_warn("Destroying alive neighbour %p\n", neigh
);
840 if (neigh_del_timer(neigh
))
841 pr_warn("Impossible event\n");
843 write_lock_bh(&neigh
->lock
);
844 __skb_queue_purge(&neigh
->arp_queue
);
845 write_unlock_bh(&neigh
->lock
);
846 neigh
->arp_queue_len_bytes
= 0;
848 if (dev
->netdev_ops
->ndo_neigh_destroy
)
849 dev
->netdev_ops
->ndo_neigh_destroy(dev
, neigh
);
852 neigh_parms_put(neigh
->parms
);
854 neigh_dbg(2, "neigh %p is destroyed\n", neigh
);
856 atomic_dec(&neigh
->tbl
->entries
);
857 kfree_rcu(neigh
, rcu
);
859 EXPORT_SYMBOL(neigh_destroy
);
861 /* Neighbour state is suspicious;
864 Called with write_locked neigh.
866 static void neigh_suspect(struct neighbour
*neigh
)
868 neigh_dbg(2, "neigh %p is suspected\n", neigh
);
870 neigh
->output
= neigh
->ops
->output
;
873 /* Neighbour state is OK;
876 Called with write_locked neigh.
878 static void neigh_connect(struct neighbour
*neigh
)
880 neigh_dbg(2, "neigh %p is connected\n", neigh
);
882 neigh
->output
= neigh
->ops
->connected_output
;
885 static void neigh_periodic_work(struct work_struct
*work
)
887 struct neigh_table
*tbl
= container_of(work
, struct neigh_table
, gc_work
.work
);
889 struct neighbour __rcu
**np
;
891 struct neigh_hash_table
*nht
;
893 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
895 write_lock_bh(&tbl
->lock
);
896 nht
= rcu_dereference_protected(tbl
->nht
,
897 lockdep_is_held(&tbl
->lock
));
900 * periodically recompute ReachableTime from random function
903 if (time_after(jiffies
, tbl
->last_rand
+ 300 * HZ
)) {
904 struct neigh_parms
*p
;
905 tbl
->last_rand
= jiffies
;
906 list_for_each_entry(p
, &tbl
->parms_list
, list
)
908 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
911 if (atomic_read(&tbl
->entries
) < tbl
->gc_thresh1
)
914 for (i
= 0 ; i
< (1 << nht
->hash_shift
); i
++) {
915 np
= &nht
->hash_buckets
[i
];
917 while ((n
= rcu_dereference_protected(*np
,
918 lockdep_is_held(&tbl
->lock
))) != NULL
) {
921 write_lock(&n
->lock
);
923 state
= n
->nud_state
;
924 if ((state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) ||
925 (n
->flags
& NTF_EXT_LEARNED
)) {
926 write_unlock(&n
->lock
);
930 if (time_before(n
->used
, n
->confirmed
))
931 n
->used
= n
->confirmed
;
933 if (refcount_read(&n
->refcnt
) == 1 &&
934 (state
== NUD_FAILED
||
935 time_after(jiffies
, n
->used
+ NEIGH_VAR(n
->parms
, GC_STALETIME
)))) {
938 write_unlock(&n
->lock
);
939 neigh_cleanup_and_release(n
);
942 write_unlock(&n
->lock
);
948 * It's fine to release lock here, even if hash table
949 * grows while we are preempted.
951 write_unlock_bh(&tbl
->lock
);
953 write_lock_bh(&tbl
->lock
);
954 nht
= rcu_dereference_protected(tbl
->nht
,
955 lockdep_is_held(&tbl
->lock
));
958 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
959 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
960 * BASE_REACHABLE_TIME.
962 queue_delayed_work(system_power_efficient_wq
, &tbl
->gc_work
,
963 NEIGH_VAR(&tbl
->parms
, BASE_REACHABLE_TIME
) >> 1);
964 write_unlock_bh(&tbl
->lock
);
967 static __inline__
int neigh_max_probes(struct neighbour
*n
)
969 struct neigh_parms
*p
= n
->parms
;
970 return NEIGH_VAR(p
, UCAST_PROBES
) + NEIGH_VAR(p
, APP_PROBES
) +
971 (n
->nud_state
& NUD_PROBE
? NEIGH_VAR(p
, MCAST_REPROBES
) :
972 NEIGH_VAR(p
, MCAST_PROBES
));
975 static void neigh_invalidate(struct neighbour
*neigh
)
976 __releases(neigh
->lock
)
977 __acquires(neigh
->lock
)
981 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
982 neigh_dbg(2, "neigh %p is failed\n", neigh
);
983 neigh
->updated
= jiffies
;
985 /* It is very thin place. report_unreachable is very complicated
986 routine. Particularly, it can hit the same neighbour entry!
988 So that, we try to be accurate and avoid dead loop. --ANK
990 while (neigh
->nud_state
== NUD_FAILED
&&
991 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
992 write_unlock(&neigh
->lock
);
993 neigh
->ops
->error_report(neigh
, skb
);
994 write_lock(&neigh
->lock
);
996 __skb_queue_purge(&neigh
->arp_queue
);
997 neigh
->arp_queue_len_bytes
= 0;
1000 static void neigh_probe(struct neighbour
*neigh
)
1001 __releases(neigh
->lock
)
1003 struct sk_buff
*skb
= skb_peek_tail(&neigh
->arp_queue
);
1004 /* keep skb alive even if arp_queue overflows */
1006 skb
= skb_clone(skb
, GFP_ATOMIC
);
1007 write_unlock(&neigh
->lock
);
1008 if (neigh
->ops
->solicit
)
1009 neigh
->ops
->solicit(neigh
, skb
);
1010 atomic_inc(&neigh
->probes
);
1014 /* Called when a timer expires for a neighbour entry. */
1016 static void neigh_timer_handler(struct timer_list
*t
)
1018 unsigned long now
, next
;
1019 struct neighbour
*neigh
= from_timer(neigh
, t
, timer
);
1023 write_lock(&neigh
->lock
);
1025 state
= neigh
->nud_state
;
1029 if (!(state
& NUD_IN_TIMER
))
1032 if (state
& NUD_REACHABLE
) {
1033 if (time_before_eq(now
,
1034 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
1035 neigh_dbg(2, "neigh %p is still alive\n", neigh
);
1036 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
1037 } else if (time_before_eq(now
,
1039 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
))) {
1040 neigh_dbg(2, "neigh %p is delayed\n", neigh
);
1041 neigh
->nud_state
= NUD_DELAY
;
1042 neigh
->updated
= jiffies
;
1043 neigh_suspect(neigh
);
1044 next
= now
+ NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
);
1046 neigh_dbg(2, "neigh %p is suspected\n", neigh
);
1047 neigh
->nud_state
= NUD_STALE
;
1048 neigh
->updated
= jiffies
;
1049 neigh_suspect(neigh
);
1052 } else if (state
& NUD_DELAY
) {
1053 if (time_before_eq(now
,
1055 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
))) {
1056 neigh_dbg(2, "neigh %p is now reachable\n", neigh
);
1057 neigh
->nud_state
= NUD_REACHABLE
;
1058 neigh
->updated
= jiffies
;
1059 neigh_connect(neigh
);
1061 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
1063 neigh_dbg(2, "neigh %p is probed\n", neigh
);
1064 neigh
->nud_state
= NUD_PROBE
;
1065 neigh
->updated
= jiffies
;
1066 atomic_set(&neigh
->probes
, 0);
1068 next
= now
+ NEIGH_VAR(neigh
->parms
, RETRANS_TIME
);
1071 /* NUD_PROBE|NUD_INCOMPLETE */
1072 next
= now
+ NEIGH_VAR(neigh
->parms
, RETRANS_TIME
);
1075 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
1076 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
1077 neigh
->nud_state
= NUD_FAILED
;
1079 neigh_invalidate(neigh
);
1083 if (neigh
->nud_state
& NUD_IN_TIMER
) {
1084 if (time_before(next
, jiffies
+ HZ
/2))
1085 next
= jiffies
+ HZ
/2;
1086 if (!mod_timer(&neigh
->timer
, next
))
1089 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
1093 write_unlock(&neigh
->lock
);
1097 neigh_update_notify(neigh
, 0);
1099 trace_neigh_timer_handler(neigh
, 0);
1101 neigh_release(neigh
);
1104 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
1107 bool immediate_probe
= false;
1109 write_lock_bh(&neigh
->lock
);
1112 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
1117 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
1118 if (NEIGH_VAR(neigh
->parms
, MCAST_PROBES
) +
1119 NEIGH_VAR(neigh
->parms
, APP_PROBES
)) {
1120 unsigned long next
, now
= jiffies
;
1122 atomic_set(&neigh
->probes
,
1123 NEIGH_VAR(neigh
->parms
, UCAST_PROBES
));
1124 neigh_del_timer(neigh
);
1125 neigh
->nud_state
= NUD_INCOMPLETE
;
1126 neigh
->updated
= now
;
1127 next
= now
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
),
1129 neigh_add_timer(neigh
, next
);
1130 immediate_probe
= true;
1132 neigh
->nud_state
= NUD_FAILED
;
1133 neigh
->updated
= jiffies
;
1134 write_unlock_bh(&neigh
->lock
);
1139 } else if (neigh
->nud_state
& NUD_STALE
) {
1140 neigh_dbg(2, "neigh %p is delayed\n", neigh
);
1141 neigh_del_timer(neigh
);
1142 neigh
->nud_state
= NUD_DELAY
;
1143 neigh
->updated
= jiffies
;
1144 neigh_add_timer(neigh
, jiffies
+
1145 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
));
1148 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
1150 while (neigh
->arp_queue_len_bytes
+ skb
->truesize
>
1151 NEIGH_VAR(neigh
->parms
, QUEUE_LEN_BYTES
)) {
1152 struct sk_buff
*buff
;
1154 buff
= __skb_dequeue(&neigh
->arp_queue
);
1157 neigh
->arp_queue_len_bytes
-= buff
->truesize
;
1159 NEIGH_CACHE_STAT_INC(neigh
->tbl
, unres_discards
);
1162 __skb_queue_tail(&neigh
->arp_queue
, skb
);
1163 neigh
->arp_queue_len_bytes
+= skb
->truesize
;
1168 if (immediate_probe
)
1171 write_unlock(&neigh
->lock
);
1173 trace_neigh_event_send_done(neigh
, rc
);
1177 if (neigh
->nud_state
& NUD_STALE
)
1179 write_unlock_bh(&neigh
->lock
);
1181 trace_neigh_event_send_dead(neigh
, 1);
1184 EXPORT_SYMBOL(__neigh_event_send
);
1186 static void neigh_update_hhs(struct neighbour
*neigh
)
1188 struct hh_cache
*hh
;
1189 void (*update
)(struct hh_cache
*, const struct net_device
*, const unsigned char *)
1192 if (neigh
->dev
->header_ops
)
1193 update
= neigh
->dev
->header_ops
->cache_update
;
1197 if (READ_ONCE(hh
->hh_len
)) {
1198 write_seqlock_bh(&hh
->hh_lock
);
1199 update(hh
, neigh
->dev
, neigh
->ha
);
1200 write_sequnlock_bh(&hh
->hh_lock
);
1207 /* Generic update routine.
1208 -- lladdr is new lladdr or NULL, if it is not supplied.
1209 -- new is new state.
1211 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1213 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1214 lladdr instead of overriding it
1216 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1218 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1220 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1223 Caller MUST hold reference count on the entry.
1226 static int __neigh_update(struct neighbour
*neigh
, const u8
*lladdr
,
1227 u8
new, u32 flags
, u32 nlmsg_pid
,
1228 struct netlink_ext_ack
*extack
)
1230 bool ext_learn_change
= false;
1234 struct net_device
*dev
;
1235 int update_isrouter
= 0;
1237 trace_neigh_update(neigh
, lladdr
, new, flags
, nlmsg_pid
);
1239 write_lock_bh(&neigh
->lock
);
1242 old
= neigh
->nud_state
;
1245 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
1246 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
1249 NL_SET_ERR_MSG(extack
, "Neighbor entry is now dead");
1253 ext_learn_change
= neigh_update_ext_learned(neigh
, flags
, ¬ify
);
1255 if (!(new & NUD_VALID
)) {
1256 neigh_del_timer(neigh
);
1257 if (old
& NUD_CONNECTED
)
1258 neigh_suspect(neigh
);
1259 neigh
->nud_state
= new;
1261 notify
= old
& NUD_VALID
;
1262 if ((old
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
1263 (new & NUD_FAILED
)) {
1264 neigh_invalidate(neigh
);
1270 /* Compare new lladdr with cached one */
1271 if (!dev
->addr_len
) {
1272 /* First case: device needs no address. */
1274 } else if (lladdr
) {
1275 /* The second case: if something is already cached
1276 and a new address is proposed:
1278 - if they are different, check override flag
1280 if ((old
& NUD_VALID
) &&
1281 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
1284 /* No address is supplied; if we know something,
1285 use it, otherwise discard the request.
1288 if (!(old
& NUD_VALID
)) {
1289 NL_SET_ERR_MSG(extack
, "No link layer address given");
1295 /* Update confirmed timestamp for neighbour entry after we
1296 * received ARP packet even if it doesn't change IP to MAC binding.
1298 if (new & NUD_CONNECTED
)
1299 neigh
->confirmed
= jiffies
;
1301 /* If entry was valid and address is not changed,
1302 do not change entry state, if new one is STALE.
1305 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1306 if (old
& NUD_VALID
) {
1307 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
1308 update_isrouter
= 0;
1309 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1310 (old
& NUD_CONNECTED
)) {
1316 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1317 !(flags
& NEIGH_UPDATE_F_ADMIN
))
1322 /* Update timestamp only once we know we will make a change to the
1323 * neighbour entry. Otherwise we risk to move the locktime window with
1324 * noop updates and ignore relevant ARP updates.
1326 if (new != old
|| lladdr
!= neigh
->ha
)
1327 neigh
->updated
= jiffies
;
1330 neigh_del_timer(neigh
);
1331 if (new & NUD_PROBE
)
1332 atomic_set(&neigh
->probes
, 0);
1333 if (new & NUD_IN_TIMER
)
1334 neigh_add_timer(neigh
, (jiffies
+
1335 ((new & NUD_REACHABLE
) ?
1336 neigh
->parms
->reachable_time
:
1338 neigh
->nud_state
= new;
1342 if (lladdr
!= neigh
->ha
) {
1343 write_seqlock(&neigh
->ha_lock
);
1344 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1345 write_sequnlock(&neigh
->ha_lock
);
1346 neigh_update_hhs(neigh
);
1347 if (!(new & NUD_CONNECTED
))
1348 neigh
->confirmed
= jiffies
-
1349 (NEIGH_VAR(neigh
->parms
, BASE_REACHABLE_TIME
) << 1);
1354 if (new & NUD_CONNECTED
)
1355 neigh_connect(neigh
);
1357 neigh_suspect(neigh
);
1358 if (!(old
& NUD_VALID
)) {
1359 struct sk_buff
*skb
;
1361 /* Again: avoid dead loop if something went wrong */
1363 while (neigh
->nud_state
& NUD_VALID
&&
1364 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1365 struct dst_entry
*dst
= skb_dst(skb
);
1366 struct neighbour
*n2
, *n1
= neigh
;
1367 write_unlock_bh(&neigh
->lock
);
1371 /* Why not just use 'neigh' as-is? The problem is that
1372 * things such as shaper, eql, and sch_teql can end up
1373 * using alternative, different, neigh objects to output
1374 * the packet in the output path. So what we need to do
1375 * here is re-lookup the top-level neigh in the path so
1376 * we can reinject the packet there.
1380 n2
= dst_neigh_lookup_skb(dst
, skb
);
1384 n1
->output(n1
, skb
);
1389 write_lock_bh(&neigh
->lock
);
1391 __skb_queue_purge(&neigh
->arp_queue
);
1392 neigh
->arp_queue_len_bytes
= 0;
1395 if (update_isrouter
)
1396 neigh_update_is_router(neigh
, flags
, ¬ify
);
1397 write_unlock_bh(&neigh
->lock
);
1399 if (((new ^ old
) & NUD_PERMANENT
) || ext_learn_change
)
1400 neigh_update_gc_list(neigh
);
1403 neigh_update_notify(neigh
, nlmsg_pid
);
1405 trace_neigh_update_done(neigh
, err
);
1410 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
1411 u32 flags
, u32 nlmsg_pid
)
1413 return __neigh_update(neigh
, lladdr
, new, flags
, nlmsg_pid
, NULL
);
1415 EXPORT_SYMBOL(neigh_update
);
1417 /* Update the neigh to listen temporarily for probe responses, even if it is
1418 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1420 void __neigh_set_probe_once(struct neighbour
*neigh
)
1424 neigh
->updated
= jiffies
;
1425 if (!(neigh
->nud_state
& NUD_FAILED
))
1427 neigh
->nud_state
= NUD_INCOMPLETE
;
1428 atomic_set(&neigh
->probes
, neigh_max_probes(neigh
));
1429 neigh_add_timer(neigh
,
1430 jiffies
+ NEIGH_VAR(neigh
->parms
, RETRANS_TIME
));
1432 EXPORT_SYMBOL(__neigh_set_probe_once
);
1434 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1435 u8
*lladdr
, void *saddr
,
1436 struct net_device
*dev
)
1438 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1439 lladdr
|| !dev
->addr_len
);
1441 neigh_update(neigh
, lladdr
, NUD_STALE
,
1442 NEIGH_UPDATE_F_OVERRIDE
, 0);
1445 EXPORT_SYMBOL(neigh_event_ns
);
1447 /* called with read_lock_bh(&n->lock); */
1448 static void neigh_hh_init(struct neighbour
*n
)
1450 struct net_device
*dev
= n
->dev
;
1451 __be16 prot
= n
->tbl
->protocol
;
1452 struct hh_cache
*hh
= &n
->hh
;
1454 write_lock_bh(&n
->lock
);
1456 /* Only one thread can come in here and initialize the
1460 dev
->header_ops
->cache(n
, hh
, prot
);
1462 write_unlock_bh(&n
->lock
);
1465 /* Slow and careful. */
1467 int neigh_resolve_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1471 if (!neigh_event_send(neigh
, skb
)) {
1473 struct net_device
*dev
= neigh
->dev
;
1476 if (dev
->header_ops
->cache
&& !READ_ONCE(neigh
->hh
.hh_len
))
1477 neigh_hh_init(neigh
);
1480 __skb_pull(skb
, skb_network_offset(skb
));
1481 seq
= read_seqbegin(&neigh
->ha_lock
);
1482 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1483 neigh
->ha
, NULL
, skb
->len
);
1484 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1487 rc
= dev_queue_xmit(skb
);
1498 EXPORT_SYMBOL(neigh_resolve_output
);
1500 /* As fast as possible without hh cache */
1502 int neigh_connected_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1504 struct net_device
*dev
= neigh
->dev
;
1509 __skb_pull(skb
, skb_network_offset(skb
));
1510 seq
= read_seqbegin(&neigh
->ha_lock
);
1511 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1512 neigh
->ha
, NULL
, skb
->len
);
1513 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1516 err
= dev_queue_xmit(skb
);
1523 EXPORT_SYMBOL(neigh_connected_output
);
1525 int neigh_direct_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1527 return dev_queue_xmit(skb
);
1529 EXPORT_SYMBOL(neigh_direct_output
);
1531 static void neigh_proxy_process(struct timer_list
*t
)
1533 struct neigh_table
*tbl
= from_timer(tbl
, t
, proxy_timer
);
1534 long sched_next
= 0;
1535 unsigned long now
= jiffies
;
1536 struct sk_buff
*skb
, *n
;
1538 spin_lock(&tbl
->proxy_queue
.lock
);
1540 skb_queue_walk_safe(&tbl
->proxy_queue
, skb
, n
) {
1541 long tdif
= NEIGH_CB(skb
)->sched_next
- now
;
1544 struct net_device
*dev
= skb
->dev
;
1546 __skb_unlink(skb
, &tbl
->proxy_queue
);
1547 if (tbl
->proxy_redo
&& netif_running(dev
)) {
1549 tbl
->proxy_redo(skb
);
1556 } else if (!sched_next
|| tdif
< sched_next
)
1559 del_timer(&tbl
->proxy_timer
);
1561 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1562 spin_unlock(&tbl
->proxy_queue
.lock
);
1565 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1566 struct sk_buff
*skb
)
1568 unsigned long now
= jiffies
;
1570 unsigned long sched_next
= now
+ (prandom_u32() %
1571 NEIGH_VAR(p
, PROXY_DELAY
));
1573 if (tbl
->proxy_queue
.qlen
> NEIGH_VAR(p
, PROXY_QLEN
)) {
1578 NEIGH_CB(skb
)->sched_next
= sched_next
;
1579 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1581 spin_lock(&tbl
->proxy_queue
.lock
);
1582 if (del_timer(&tbl
->proxy_timer
)) {
1583 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1584 sched_next
= tbl
->proxy_timer
.expires
;
1588 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1589 mod_timer(&tbl
->proxy_timer
, sched_next
);
1590 spin_unlock(&tbl
->proxy_queue
.lock
);
1592 EXPORT_SYMBOL(pneigh_enqueue
);
1594 static inline struct neigh_parms
*lookup_neigh_parms(struct neigh_table
*tbl
,
1595 struct net
*net
, int ifindex
)
1597 struct neigh_parms
*p
;
1599 list_for_each_entry(p
, &tbl
->parms_list
, list
) {
1600 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
&& net_eq(neigh_parms_net(p
), net
)) ||
1601 (!p
->dev
&& !ifindex
&& net_eq(net
, &init_net
)))
1608 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1609 struct neigh_table
*tbl
)
1611 struct neigh_parms
*p
;
1612 struct net
*net
= dev_net(dev
);
1613 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1615 p
= kmemdup(&tbl
->parms
, sizeof(*p
), GFP_KERNEL
);
1618 refcount_set(&p
->refcnt
, 1);
1620 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
1623 write_pnet(&p
->net
, net
);
1624 p
->sysctl_table
= NULL
;
1626 if (ops
->ndo_neigh_setup
&& ops
->ndo_neigh_setup(dev
, p
)) {
1632 write_lock_bh(&tbl
->lock
);
1633 list_add(&p
->list
, &tbl
->parms
.list
);
1634 write_unlock_bh(&tbl
->lock
);
1636 neigh_parms_data_state_cleanall(p
);
1640 EXPORT_SYMBOL(neigh_parms_alloc
);
1642 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1644 struct neigh_parms
*parms
=
1645 container_of(head
, struct neigh_parms
, rcu_head
);
1647 neigh_parms_put(parms
);
1650 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1652 if (!parms
|| parms
== &tbl
->parms
)
1654 write_lock_bh(&tbl
->lock
);
1655 list_del(&parms
->list
);
1657 write_unlock_bh(&tbl
->lock
);
1659 dev_put(parms
->dev
);
1660 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1662 EXPORT_SYMBOL(neigh_parms_release
);
1664 static void neigh_parms_destroy(struct neigh_parms
*parms
)
1669 static struct lock_class_key neigh_table_proxy_queue_class
;
1671 static struct neigh_table
*neigh_tables
[NEIGH_NR_TABLES
] __read_mostly
;
1673 void neigh_table_init(int index
, struct neigh_table
*tbl
)
1675 unsigned long now
= jiffies
;
1676 unsigned long phsize
;
1678 INIT_LIST_HEAD(&tbl
->parms_list
);
1679 INIT_LIST_HEAD(&tbl
->gc_list
);
1680 list_add(&tbl
->parms
.list
, &tbl
->parms_list
);
1681 write_pnet(&tbl
->parms
.net
, &init_net
);
1682 refcount_set(&tbl
->parms
.refcnt
, 1);
1683 tbl
->parms
.reachable_time
=
1684 neigh_rand_reach_time(NEIGH_VAR(&tbl
->parms
, BASE_REACHABLE_TIME
));
1686 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1688 panic("cannot create neighbour cache statistics");
1690 #ifdef CONFIG_PROC_FS
1691 if (!proc_create_seq_data(tbl
->id
, 0, init_net
.proc_net_stat
,
1692 &neigh_stat_seq_ops
, tbl
))
1693 panic("cannot create neighbour proc dir entry");
1696 RCU_INIT_POINTER(tbl
->nht
, neigh_hash_alloc(3));
1698 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1699 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1701 if (!tbl
->nht
|| !tbl
->phash_buckets
)
1702 panic("cannot allocate neighbour cache hashes");
1704 if (!tbl
->entry_size
)
1705 tbl
->entry_size
= ALIGN(offsetof(struct neighbour
, primary_key
) +
1706 tbl
->key_len
, NEIGH_PRIV_ALIGN
);
1708 WARN_ON(tbl
->entry_size
% NEIGH_PRIV_ALIGN
);
1710 rwlock_init(&tbl
->lock
);
1711 INIT_DEFERRABLE_WORK(&tbl
->gc_work
, neigh_periodic_work
);
1712 queue_delayed_work(system_power_efficient_wq
, &tbl
->gc_work
,
1713 tbl
->parms
.reachable_time
);
1714 timer_setup(&tbl
->proxy_timer
, neigh_proxy_process
, 0);
1715 skb_queue_head_init_class(&tbl
->proxy_queue
,
1716 &neigh_table_proxy_queue_class
);
1718 tbl
->last_flush
= now
;
1719 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1721 neigh_tables
[index
] = tbl
;
1723 EXPORT_SYMBOL(neigh_table_init
);
1725 int neigh_table_clear(int index
, struct neigh_table
*tbl
)
1727 neigh_tables
[index
] = NULL
;
1728 /* It is not clean... Fix it to unload IPv6 module safely */
1729 cancel_delayed_work_sync(&tbl
->gc_work
);
1730 del_timer_sync(&tbl
->proxy_timer
);
1731 pneigh_queue_purge(&tbl
->proxy_queue
);
1732 neigh_ifdown(tbl
, NULL
);
1733 if (atomic_read(&tbl
->entries
))
1734 pr_crit("neighbour leakage\n");
1736 call_rcu(&rcu_dereference_protected(tbl
->nht
, 1)->rcu
,
1737 neigh_hash_free_rcu
);
1740 kfree(tbl
->phash_buckets
);
1741 tbl
->phash_buckets
= NULL
;
1743 remove_proc_entry(tbl
->id
, init_net
.proc_net_stat
);
1745 free_percpu(tbl
->stats
);
1750 EXPORT_SYMBOL(neigh_table_clear
);
1752 static struct neigh_table
*neigh_find_table(int family
)
1754 struct neigh_table
*tbl
= NULL
;
1758 tbl
= neigh_tables
[NEIGH_ARP_TABLE
];
1761 tbl
= neigh_tables
[NEIGH_ND_TABLE
];
1764 tbl
= neigh_tables
[NEIGH_DN_TABLE
];
1771 const struct nla_policy nda_policy
[NDA_MAX
+1] = {
1772 [NDA_DST
] = { .type
= NLA_BINARY
, .len
= MAX_ADDR_LEN
},
1773 [NDA_LLADDR
] = { .type
= NLA_BINARY
, .len
= MAX_ADDR_LEN
},
1774 [NDA_CACHEINFO
] = { .len
= sizeof(struct nda_cacheinfo
) },
1775 [NDA_PROBES
] = { .type
= NLA_U32
},
1776 [NDA_VLAN
] = { .type
= NLA_U16
},
1777 [NDA_PORT
] = { .type
= NLA_U16
},
1778 [NDA_VNI
] = { .type
= NLA_U32
},
1779 [NDA_IFINDEX
] = { .type
= NLA_U32
},
1780 [NDA_MASTER
] = { .type
= NLA_U32
},
1781 [NDA_PROTOCOL
] = { .type
= NLA_U8
},
1784 static int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1785 struct netlink_ext_ack
*extack
)
1787 struct net
*net
= sock_net(skb
->sk
);
1789 struct nlattr
*dst_attr
;
1790 struct neigh_table
*tbl
;
1791 struct neighbour
*neigh
;
1792 struct net_device
*dev
= NULL
;
1796 if (nlmsg_len(nlh
) < sizeof(*ndm
))
1799 dst_attr
= nlmsg_find_attr(nlh
, sizeof(*ndm
), NDA_DST
);
1801 NL_SET_ERR_MSG(extack
, "Network address not specified");
1805 ndm
= nlmsg_data(nlh
);
1806 if (ndm
->ndm_ifindex
) {
1807 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1814 tbl
= neigh_find_table(ndm
->ndm_family
);
1816 return -EAFNOSUPPORT
;
1818 if (nla_len(dst_attr
) < (int)tbl
->key_len
) {
1819 NL_SET_ERR_MSG(extack
, "Invalid network address");
1823 if (ndm
->ndm_flags
& NTF_PROXY
) {
1824 err
= pneigh_delete(tbl
, net
, nla_data(dst_attr
), dev
);
1831 neigh
= neigh_lookup(tbl
, nla_data(dst_attr
), dev
);
1832 if (neigh
== NULL
) {
1837 err
= __neigh_update(neigh
, NULL
, NUD_FAILED
,
1838 NEIGH_UPDATE_F_OVERRIDE
| NEIGH_UPDATE_F_ADMIN
,
1839 NETLINK_CB(skb
).portid
, extack
);
1840 write_lock_bh(&tbl
->lock
);
1841 neigh_release(neigh
);
1842 neigh_remove_one(neigh
, tbl
);
1843 write_unlock_bh(&tbl
->lock
);
1849 static int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1850 struct netlink_ext_ack
*extack
)
1852 int flags
= NEIGH_UPDATE_F_ADMIN
| NEIGH_UPDATE_F_OVERRIDE
|
1853 NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1854 struct net
*net
= sock_net(skb
->sk
);
1856 struct nlattr
*tb
[NDA_MAX
+1];
1857 struct neigh_table
*tbl
;
1858 struct net_device
*dev
= NULL
;
1859 struct neighbour
*neigh
;
1865 err
= nlmsg_parse_deprecated(nlh
, sizeof(*ndm
), tb
, NDA_MAX
,
1866 nda_policy
, extack
);
1872 NL_SET_ERR_MSG(extack
, "Network address not specified");
1876 ndm
= nlmsg_data(nlh
);
1877 if (ndm
->ndm_ifindex
) {
1878 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1884 if (tb
[NDA_LLADDR
] && nla_len(tb
[NDA_LLADDR
]) < dev
->addr_len
) {
1885 NL_SET_ERR_MSG(extack
, "Invalid link address");
1890 tbl
= neigh_find_table(ndm
->ndm_family
);
1892 return -EAFNOSUPPORT
;
1894 if (nla_len(tb
[NDA_DST
]) < (int)tbl
->key_len
) {
1895 NL_SET_ERR_MSG(extack
, "Invalid network address");
1899 dst
= nla_data(tb
[NDA_DST
]);
1900 lladdr
= tb
[NDA_LLADDR
] ? nla_data(tb
[NDA_LLADDR
]) : NULL
;
1902 if (tb
[NDA_PROTOCOL
])
1903 protocol
= nla_get_u8(tb
[NDA_PROTOCOL
]);
1905 if (ndm
->ndm_flags
& NTF_PROXY
) {
1906 struct pneigh_entry
*pn
;
1909 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 1);
1911 pn
->flags
= ndm
->ndm_flags
;
1913 pn
->protocol
= protocol
;
1920 NL_SET_ERR_MSG(extack
, "Device not specified");
1924 if (tbl
->allow_add
&& !tbl
->allow_add(dev
, extack
)) {
1929 neigh
= neigh_lookup(tbl
, dst
, dev
);
1930 if (neigh
== NULL
) {
1931 bool exempt_from_gc
;
1933 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1938 exempt_from_gc
= ndm
->ndm_state
& NUD_PERMANENT
||
1939 ndm
->ndm_flags
& NTF_EXT_LEARNED
;
1940 neigh
= ___neigh_create(tbl
, dst
, dev
, exempt_from_gc
, true);
1941 if (IS_ERR(neigh
)) {
1942 err
= PTR_ERR(neigh
);
1946 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1948 neigh_release(neigh
);
1952 if (!(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
1953 flags
&= ~(NEIGH_UPDATE_F_OVERRIDE
|
1954 NEIGH_UPDATE_F_OVERRIDE_ISROUTER
);
1957 if (ndm
->ndm_flags
& NTF_EXT_LEARNED
)
1958 flags
|= NEIGH_UPDATE_F_EXT_LEARNED
;
1960 if (ndm
->ndm_flags
& NTF_ROUTER
)
1961 flags
|= NEIGH_UPDATE_F_ISROUTER
;
1963 if (ndm
->ndm_flags
& NTF_USE
) {
1964 neigh_event_send(neigh
, NULL
);
1967 err
= __neigh_update(neigh
, lladdr
, ndm
->ndm_state
, flags
,
1968 NETLINK_CB(skb
).portid
, extack
);
1971 neigh
->protocol
= protocol
;
1973 neigh_release(neigh
);
1979 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1981 struct nlattr
*nest
;
1983 nest
= nla_nest_start_noflag(skb
, NDTA_PARMS
);
1988 nla_put_u32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
)) ||
1989 nla_put_u32(skb
, NDTPA_REFCNT
, refcount_read(&parms
->refcnt
)) ||
1990 nla_put_u32(skb
, NDTPA_QUEUE_LENBYTES
,
1991 NEIGH_VAR(parms
, QUEUE_LEN_BYTES
)) ||
1992 /* approximative value for deprecated QUEUE_LEN (in packets) */
1993 nla_put_u32(skb
, NDTPA_QUEUE_LEN
,
1994 NEIGH_VAR(parms
, QUEUE_LEN_BYTES
) / SKB_TRUESIZE(ETH_FRAME_LEN
)) ||
1995 nla_put_u32(skb
, NDTPA_PROXY_QLEN
, NEIGH_VAR(parms
, PROXY_QLEN
)) ||
1996 nla_put_u32(skb
, NDTPA_APP_PROBES
, NEIGH_VAR(parms
, APP_PROBES
)) ||
1997 nla_put_u32(skb
, NDTPA_UCAST_PROBES
,
1998 NEIGH_VAR(parms
, UCAST_PROBES
)) ||
1999 nla_put_u32(skb
, NDTPA_MCAST_PROBES
,
2000 NEIGH_VAR(parms
, MCAST_PROBES
)) ||
2001 nla_put_u32(skb
, NDTPA_MCAST_REPROBES
,
2002 NEIGH_VAR(parms
, MCAST_REPROBES
)) ||
2003 nla_put_msecs(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
,
2005 nla_put_msecs(skb
, NDTPA_BASE_REACHABLE_TIME
,
2006 NEIGH_VAR(parms
, BASE_REACHABLE_TIME
), NDTPA_PAD
) ||
2007 nla_put_msecs(skb
, NDTPA_GC_STALETIME
,
2008 NEIGH_VAR(parms
, GC_STALETIME
), NDTPA_PAD
) ||
2009 nla_put_msecs(skb
, NDTPA_DELAY_PROBE_TIME
,
2010 NEIGH_VAR(parms
, DELAY_PROBE_TIME
), NDTPA_PAD
) ||
2011 nla_put_msecs(skb
, NDTPA_RETRANS_TIME
,
2012 NEIGH_VAR(parms
, RETRANS_TIME
), NDTPA_PAD
) ||
2013 nla_put_msecs(skb
, NDTPA_ANYCAST_DELAY
,
2014 NEIGH_VAR(parms
, ANYCAST_DELAY
), NDTPA_PAD
) ||
2015 nla_put_msecs(skb
, NDTPA_PROXY_DELAY
,
2016 NEIGH_VAR(parms
, PROXY_DELAY
), NDTPA_PAD
) ||
2017 nla_put_msecs(skb
, NDTPA_LOCKTIME
,
2018 NEIGH_VAR(parms
, LOCKTIME
), NDTPA_PAD
))
2019 goto nla_put_failure
;
2020 return nla_nest_end(skb
, nest
);
2023 nla_nest_cancel(skb
, nest
);
2027 static int neightbl_fill_info(struct sk_buff
*skb
, struct neigh_table
*tbl
,
2028 u32 pid
, u32 seq
, int type
, int flags
)
2030 struct nlmsghdr
*nlh
;
2031 struct ndtmsg
*ndtmsg
;
2033 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
2037 ndtmsg
= nlmsg_data(nlh
);
2039 read_lock_bh(&tbl
->lock
);
2040 ndtmsg
->ndtm_family
= tbl
->family
;
2041 ndtmsg
->ndtm_pad1
= 0;
2042 ndtmsg
->ndtm_pad2
= 0;
2044 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) ||
2045 nla_put_msecs(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
, NDTA_PAD
) ||
2046 nla_put_u32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
) ||
2047 nla_put_u32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
) ||
2048 nla_put_u32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
))
2049 goto nla_put_failure
;
2051 unsigned long now
= jiffies
;
2052 long flush_delta
= now
- tbl
->last_flush
;
2053 long rand_delta
= now
- tbl
->last_rand
;
2054 struct neigh_hash_table
*nht
;
2055 struct ndt_config ndc
= {
2056 .ndtc_key_len
= tbl
->key_len
,
2057 .ndtc_entry_size
= tbl
->entry_size
,
2058 .ndtc_entries
= atomic_read(&tbl
->entries
),
2059 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
2060 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
2061 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
2065 nht
= rcu_dereference_bh(tbl
->nht
);
2066 ndc
.ndtc_hash_rnd
= nht
->hash_rnd
[0];
2067 ndc
.ndtc_hash_mask
= ((1 << nht
->hash_shift
) - 1);
2068 rcu_read_unlock_bh();
2070 if (nla_put(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
))
2071 goto nla_put_failure
;
2076 struct ndt_stats ndst
;
2078 memset(&ndst
, 0, sizeof(ndst
));
2080 for_each_possible_cpu(cpu
) {
2081 struct neigh_statistics
*st
;
2083 st
= per_cpu_ptr(tbl
->stats
, cpu
);
2084 ndst
.ndts_allocs
+= st
->allocs
;
2085 ndst
.ndts_destroys
+= st
->destroys
;
2086 ndst
.ndts_hash_grows
+= st
->hash_grows
;
2087 ndst
.ndts_res_failed
+= st
->res_failed
;
2088 ndst
.ndts_lookups
+= st
->lookups
;
2089 ndst
.ndts_hits
+= st
->hits
;
2090 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
2091 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
2092 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
2093 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
2094 ndst
.ndts_table_fulls
+= st
->table_fulls
;
2097 if (nla_put_64bit(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
,
2099 goto nla_put_failure
;
2102 BUG_ON(tbl
->parms
.dev
);
2103 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
2104 goto nla_put_failure
;
2106 read_unlock_bh(&tbl
->lock
);
2107 nlmsg_end(skb
, nlh
);
2111 read_unlock_bh(&tbl
->lock
);
2112 nlmsg_cancel(skb
, nlh
);
2116 static int neightbl_fill_param_info(struct sk_buff
*skb
,
2117 struct neigh_table
*tbl
,
2118 struct neigh_parms
*parms
,
2119 u32 pid
, u32 seq
, int type
,
2122 struct ndtmsg
*ndtmsg
;
2123 struct nlmsghdr
*nlh
;
2125 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
2129 ndtmsg
= nlmsg_data(nlh
);
2131 read_lock_bh(&tbl
->lock
);
2132 ndtmsg
->ndtm_family
= tbl
->family
;
2133 ndtmsg
->ndtm_pad1
= 0;
2134 ndtmsg
->ndtm_pad2
= 0;
2136 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) < 0 ||
2137 neightbl_fill_parms(skb
, parms
) < 0)
2140 read_unlock_bh(&tbl
->lock
);
2141 nlmsg_end(skb
, nlh
);
2144 read_unlock_bh(&tbl
->lock
);
2145 nlmsg_cancel(skb
, nlh
);
2149 static const struct nla_policy nl_neightbl_policy
[NDTA_MAX
+1] = {
2150 [NDTA_NAME
] = { .type
= NLA_STRING
},
2151 [NDTA_THRESH1
] = { .type
= NLA_U32
},
2152 [NDTA_THRESH2
] = { .type
= NLA_U32
},
2153 [NDTA_THRESH3
] = { .type
= NLA_U32
},
2154 [NDTA_GC_INTERVAL
] = { .type
= NLA_U64
},
2155 [NDTA_PARMS
] = { .type
= NLA_NESTED
},
2158 static const struct nla_policy nl_ntbl_parm_policy
[NDTPA_MAX
+1] = {
2159 [NDTPA_IFINDEX
] = { .type
= NLA_U32
},
2160 [NDTPA_QUEUE_LEN
] = { .type
= NLA_U32
},
2161 [NDTPA_PROXY_QLEN
] = { .type
= NLA_U32
},
2162 [NDTPA_APP_PROBES
] = { .type
= NLA_U32
},
2163 [NDTPA_UCAST_PROBES
] = { .type
= NLA_U32
},
2164 [NDTPA_MCAST_PROBES
] = { .type
= NLA_U32
},
2165 [NDTPA_MCAST_REPROBES
] = { .type
= NLA_U32
},
2166 [NDTPA_BASE_REACHABLE_TIME
] = { .type
= NLA_U64
},
2167 [NDTPA_GC_STALETIME
] = { .type
= NLA_U64
},
2168 [NDTPA_DELAY_PROBE_TIME
] = { .type
= NLA_U64
},
2169 [NDTPA_RETRANS_TIME
] = { .type
= NLA_U64
},
2170 [NDTPA_ANYCAST_DELAY
] = { .type
= NLA_U64
},
2171 [NDTPA_PROXY_DELAY
] = { .type
= NLA_U64
},
2172 [NDTPA_LOCKTIME
] = { .type
= NLA_U64
},
2175 static int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2176 struct netlink_ext_ack
*extack
)
2178 struct net
*net
= sock_net(skb
->sk
);
2179 struct neigh_table
*tbl
;
2180 struct ndtmsg
*ndtmsg
;
2181 struct nlattr
*tb
[NDTA_MAX
+1];
2185 err
= nlmsg_parse_deprecated(nlh
, sizeof(*ndtmsg
), tb
, NDTA_MAX
,
2186 nl_neightbl_policy
, extack
);
2190 if (tb
[NDTA_NAME
] == NULL
) {
2195 ndtmsg
= nlmsg_data(nlh
);
2197 for (tidx
= 0; tidx
< NEIGH_NR_TABLES
; tidx
++) {
2198 tbl
= neigh_tables
[tidx
];
2201 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
2203 if (nla_strcmp(tb
[NDTA_NAME
], tbl
->id
) == 0) {
2213 * We acquire tbl->lock to be nice to the periodic timers and
2214 * make sure they always see a consistent set of values.
2216 write_lock_bh(&tbl
->lock
);
2218 if (tb
[NDTA_PARMS
]) {
2219 struct nlattr
*tbp
[NDTPA_MAX
+1];
2220 struct neigh_parms
*p
;
2223 err
= nla_parse_nested_deprecated(tbp
, NDTPA_MAX
,
2225 nl_ntbl_parm_policy
, extack
);
2227 goto errout_tbl_lock
;
2229 if (tbp
[NDTPA_IFINDEX
])
2230 ifindex
= nla_get_u32(tbp
[NDTPA_IFINDEX
]);
2232 p
= lookup_neigh_parms(tbl
, net
, ifindex
);
2235 goto errout_tbl_lock
;
2238 for (i
= 1; i
<= NDTPA_MAX
; i
++) {
2243 case NDTPA_QUEUE_LEN
:
2244 NEIGH_VAR_SET(p
, QUEUE_LEN_BYTES
,
2245 nla_get_u32(tbp
[i
]) *
2246 SKB_TRUESIZE(ETH_FRAME_LEN
));
2248 case NDTPA_QUEUE_LENBYTES
:
2249 NEIGH_VAR_SET(p
, QUEUE_LEN_BYTES
,
2250 nla_get_u32(tbp
[i
]));
2252 case NDTPA_PROXY_QLEN
:
2253 NEIGH_VAR_SET(p
, PROXY_QLEN
,
2254 nla_get_u32(tbp
[i
]));
2256 case NDTPA_APP_PROBES
:
2257 NEIGH_VAR_SET(p
, APP_PROBES
,
2258 nla_get_u32(tbp
[i
]));
2260 case NDTPA_UCAST_PROBES
:
2261 NEIGH_VAR_SET(p
, UCAST_PROBES
,
2262 nla_get_u32(tbp
[i
]));
2264 case NDTPA_MCAST_PROBES
:
2265 NEIGH_VAR_SET(p
, MCAST_PROBES
,
2266 nla_get_u32(tbp
[i
]));
2268 case NDTPA_MCAST_REPROBES
:
2269 NEIGH_VAR_SET(p
, MCAST_REPROBES
,
2270 nla_get_u32(tbp
[i
]));
2272 case NDTPA_BASE_REACHABLE_TIME
:
2273 NEIGH_VAR_SET(p
, BASE_REACHABLE_TIME
,
2274 nla_get_msecs(tbp
[i
]));
2275 /* update reachable_time as well, otherwise, the change will
2276 * only be effective after the next time neigh_periodic_work
2277 * decides to recompute it (can be multiple minutes)
2280 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
2282 case NDTPA_GC_STALETIME
:
2283 NEIGH_VAR_SET(p
, GC_STALETIME
,
2284 nla_get_msecs(tbp
[i
]));
2286 case NDTPA_DELAY_PROBE_TIME
:
2287 NEIGH_VAR_SET(p
, DELAY_PROBE_TIME
,
2288 nla_get_msecs(tbp
[i
]));
2289 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE
, p
);
2291 case NDTPA_RETRANS_TIME
:
2292 NEIGH_VAR_SET(p
, RETRANS_TIME
,
2293 nla_get_msecs(tbp
[i
]));
2295 case NDTPA_ANYCAST_DELAY
:
2296 NEIGH_VAR_SET(p
, ANYCAST_DELAY
,
2297 nla_get_msecs(tbp
[i
]));
2299 case NDTPA_PROXY_DELAY
:
2300 NEIGH_VAR_SET(p
, PROXY_DELAY
,
2301 nla_get_msecs(tbp
[i
]));
2303 case NDTPA_LOCKTIME
:
2304 NEIGH_VAR_SET(p
, LOCKTIME
,
2305 nla_get_msecs(tbp
[i
]));
2312 if ((tb
[NDTA_THRESH1
] || tb
[NDTA_THRESH2
] ||
2313 tb
[NDTA_THRESH3
] || tb
[NDTA_GC_INTERVAL
]) &&
2314 !net_eq(net
, &init_net
))
2315 goto errout_tbl_lock
;
2317 if (tb
[NDTA_THRESH1
])
2318 tbl
->gc_thresh1
= nla_get_u32(tb
[NDTA_THRESH1
]);
2320 if (tb
[NDTA_THRESH2
])
2321 tbl
->gc_thresh2
= nla_get_u32(tb
[NDTA_THRESH2
]);
2323 if (tb
[NDTA_THRESH3
])
2324 tbl
->gc_thresh3
= nla_get_u32(tb
[NDTA_THRESH3
]);
2326 if (tb
[NDTA_GC_INTERVAL
])
2327 tbl
->gc_interval
= nla_get_msecs(tb
[NDTA_GC_INTERVAL
]);
2332 write_unlock_bh(&tbl
->lock
);
2337 static int neightbl_valid_dump_info(const struct nlmsghdr
*nlh
,
2338 struct netlink_ext_ack
*extack
)
2340 struct ndtmsg
*ndtm
;
2342 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*ndtm
))) {
2343 NL_SET_ERR_MSG(extack
, "Invalid header for neighbor table dump request");
2347 ndtm
= nlmsg_data(nlh
);
2348 if (ndtm
->ndtm_pad1
|| ndtm
->ndtm_pad2
) {
2349 NL_SET_ERR_MSG(extack
, "Invalid values in header for neighbor table dump request");
2353 if (nlmsg_attrlen(nlh
, sizeof(*ndtm
))) {
2354 NL_SET_ERR_MSG(extack
, "Invalid data after header in neighbor table dump request");
2361 static int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2363 const struct nlmsghdr
*nlh
= cb
->nlh
;
2364 struct net
*net
= sock_net(skb
->sk
);
2365 int family
, tidx
, nidx
= 0;
2366 int tbl_skip
= cb
->args
[0];
2367 int neigh_skip
= cb
->args
[1];
2368 struct neigh_table
*tbl
;
2370 if (cb
->strict_check
) {
2371 int err
= neightbl_valid_dump_info(nlh
, cb
->extack
);
2377 family
= ((struct rtgenmsg
*)nlmsg_data(nlh
))->rtgen_family
;
2379 for (tidx
= 0; tidx
< NEIGH_NR_TABLES
; tidx
++) {
2380 struct neigh_parms
*p
;
2382 tbl
= neigh_tables
[tidx
];
2386 if (tidx
< tbl_skip
|| (family
&& tbl
->family
!= family
))
2389 if (neightbl_fill_info(skb
, tbl
, NETLINK_CB(cb
->skb
).portid
,
2390 nlh
->nlmsg_seq
, RTM_NEWNEIGHTBL
,
2395 p
= list_next_entry(&tbl
->parms
, list
);
2396 list_for_each_entry_from(p
, &tbl
->parms_list
, list
) {
2397 if (!net_eq(neigh_parms_net(p
), net
))
2400 if (nidx
< neigh_skip
)
2403 if (neightbl_fill_param_info(skb
, tbl
, p
,
2404 NETLINK_CB(cb
->skb
).portid
,
2422 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*neigh
,
2423 u32 pid
, u32 seq
, int type
, unsigned int flags
)
2425 unsigned long now
= jiffies
;
2426 struct nda_cacheinfo ci
;
2427 struct nlmsghdr
*nlh
;
2430 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2434 ndm
= nlmsg_data(nlh
);
2435 ndm
->ndm_family
= neigh
->ops
->family
;
2438 ndm
->ndm_flags
= neigh
->flags
;
2439 ndm
->ndm_type
= neigh
->type
;
2440 ndm
->ndm_ifindex
= neigh
->dev
->ifindex
;
2442 if (nla_put(skb
, NDA_DST
, neigh
->tbl
->key_len
, neigh
->primary_key
))
2443 goto nla_put_failure
;
2445 read_lock_bh(&neigh
->lock
);
2446 ndm
->ndm_state
= neigh
->nud_state
;
2447 if (neigh
->nud_state
& NUD_VALID
) {
2448 char haddr
[MAX_ADDR_LEN
];
2450 neigh_ha_snapshot(haddr
, neigh
, neigh
->dev
);
2451 if (nla_put(skb
, NDA_LLADDR
, neigh
->dev
->addr_len
, haddr
) < 0) {
2452 read_unlock_bh(&neigh
->lock
);
2453 goto nla_put_failure
;
2457 ci
.ndm_used
= jiffies_to_clock_t(now
- neigh
->used
);
2458 ci
.ndm_confirmed
= jiffies_to_clock_t(now
- neigh
->confirmed
);
2459 ci
.ndm_updated
= jiffies_to_clock_t(now
- neigh
->updated
);
2460 ci
.ndm_refcnt
= refcount_read(&neigh
->refcnt
) - 1;
2461 read_unlock_bh(&neigh
->lock
);
2463 if (nla_put_u32(skb
, NDA_PROBES
, atomic_read(&neigh
->probes
)) ||
2464 nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
2465 goto nla_put_failure
;
2467 if (neigh
->protocol
&& nla_put_u8(skb
, NDA_PROTOCOL
, neigh
->protocol
))
2468 goto nla_put_failure
;
2470 nlmsg_end(skb
, nlh
);
2474 nlmsg_cancel(skb
, nlh
);
2478 static int pneigh_fill_info(struct sk_buff
*skb
, struct pneigh_entry
*pn
,
2479 u32 pid
, u32 seq
, int type
, unsigned int flags
,
2480 struct neigh_table
*tbl
)
2482 struct nlmsghdr
*nlh
;
2485 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2489 ndm
= nlmsg_data(nlh
);
2490 ndm
->ndm_family
= tbl
->family
;
2493 ndm
->ndm_flags
= pn
->flags
| NTF_PROXY
;
2494 ndm
->ndm_type
= RTN_UNICAST
;
2495 ndm
->ndm_ifindex
= pn
->dev
? pn
->dev
->ifindex
: 0;
2496 ndm
->ndm_state
= NUD_NONE
;
2498 if (nla_put(skb
, NDA_DST
, tbl
->key_len
, pn
->key
))
2499 goto nla_put_failure
;
2501 if (pn
->protocol
&& nla_put_u8(skb
, NDA_PROTOCOL
, pn
->protocol
))
2502 goto nla_put_failure
;
2504 nlmsg_end(skb
, nlh
);
2508 nlmsg_cancel(skb
, nlh
);
2512 static void neigh_update_notify(struct neighbour
*neigh
, u32 nlmsg_pid
)
2514 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
2515 __neigh_notify(neigh
, RTM_NEWNEIGH
, 0, nlmsg_pid
);
2518 static bool neigh_master_filtered(struct net_device
*dev
, int master_idx
)
2520 struct net_device
*master
;
2525 master
= dev
? netdev_master_upper_dev_get(dev
) : NULL
;
2526 if (!master
|| master
->ifindex
!= master_idx
)
2532 static bool neigh_ifindex_filtered(struct net_device
*dev
, int filter_idx
)
2534 if (filter_idx
&& (!dev
|| dev
->ifindex
!= filter_idx
))
2540 struct neigh_dump_filter
{
2545 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2546 struct netlink_callback
*cb
,
2547 struct neigh_dump_filter
*filter
)
2549 struct net
*net
= sock_net(skb
->sk
);
2550 struct neighbour
*n
;
2551 int rc
, h
, s_h
= cb
->args
[1];
2552 int idx
, s_idx
= idx
= cb
->args
[2];
2553 struct neigh_hash_table
*nht
;
2554 unsigned int flags
= NLM_F_MULTI
;
2556 if (filter
->dev_idx
|| filter
->master_idx
)
2557 flags
|= NLM_F_DUMP_FILTERED
;
2560 nht
= rcu_dereference_bh(tbl
->nht
);
2562 for (h
= s_h
; h
< (1 << nht
->hash_shift
); h
++) {
2565 for (n
= rcu_dereference_bh(nht
->hash_buckets
[h
]), idx
= 0;
2567 n
= rcu_dereference_bh(n
->next
)) {
2568 if (idx
< s_idx
|| !net_eq(dev_net(n
->dev
), net
))
2570 if (neigh_ifindex_filtered(n
->dev
, filter
->dev_idx
) ||
2571 neigh_master_filtered(n
->dev
, filter
->master_idx
))
2573 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).portid
,
2586 rcu_read_unlock_bh();
2592 static int pneigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2593 struct netlink_callback
*cb
,
2594 struct neigh_dump_filter
*filter
)
2596 struct pneigh_entry
*n
;
2597 struct net
*net
= sock_net(skb
->sk
);
2598 int rc
, h
, s_h
= cb
->args
[3];
2599 int idx
, s_idx
= idx
= cb
->args
[4];
2600 unsigned int flags
= NLM_F_MULTI
;
2602 if (filter
->dev_idx
|| filter
->master_idx
)
2603 flags
|= NLM_F_DUMP_FILTERED
;
2605 read_lock_bh(&tbl
->lock
);
2607 for (h
= s_h
; h
<= PNEIGH_HASHMASK
; h
++) {
2610 for (n
= tbl
->phash_buckets
[h
], idx
= 0; n
; n
= n
->next
) {
2611 if (idx
< s_idx
|| pneigh_net(n
) != net
)
2613 if (neigh_ifindex_filtered(n
->dev
, filter
->dev_idx
) ||
2614 neigh_master_filtered(n
->dev
, filter
->master_idx
))
2616 if (pneigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).portid
,
2618 RTM_NEWNEIGH
, flags
, tbl
) < 0) {
2619 read_unlock_bh(&tbl
->lock
);
2628 read_unlock_bh(&tbl
->lock
);
2637 static int neigh_valid_dump_req(const struct nlmsghdr
*nlh
,
2639 struct neigh_dump_filter
*filter
,
2640 struct netlink_ext_ack
*extack
)
2642 struct nlattr
*tb
[NDA_MAX
+ 1];
2648 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*ndm
))) {
2649 NL_SET_ERR_MSG(extack
, "Invalid header for neighbor dump request");
2653 ndm
= nlmsg_data(nlh
);
2654 if (ndm
->ndm_pad1
|| ndm
->ndm_pad2
|| ndm
->ndm_ifindex
||
2655 ndm
->ndm_state
|| ndm
->ndm_type
) {
2656 NL_SET_ERR_MSG(extack
, "Invalid values in header for neighbor dump request");
2660 if (ndm
->ndm_flags
& ~NTF_PROXY
) {
2661 NL_SET_ERR_MSG(extack
, "Invalid flags in header for neighbor dump request");
2665 err
= nlmsg_parse_deprecated_strict(nlh
, sizeof(struct ndmsg
),
2666 tb
, NDA_MAX
, nda_policy
,
2669 err
= nlmsg_parse_deprecated(nlh
, sizeof(struct ndmsg
), tb
,
2670 NDA_MAX
, nda_policy
, extack
);
2675 for (i
= 0; i
<= NDA_MAX
; ++i
) {
2679 /* all new attributes should require strict_check */
2682 filter
->dev_idx
= nla_get_u32(tb
[i
]);
2685 filter
->master_idx
= nla_get_u32(tb
[i
]);
2689 NL_SET_ERR_MSG(extack
, "Unsupported attribute in neighbor dump request");
2698 static int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2700 const struct nlmsghdr
*nlh
= cb
->nlh
;
2701 struct neigh_dump_filter filter
= {};
2702 struct neigh_table
*tbl
;
2707 family
= ((struct rtgenmsg
*)nlmsg_data(nlh
))->rtgen_family
;
2709 /* check for full ndmsg structure presence, family member is
2710 * the same for both structures
2712 if (nlmsg_len(nlh
) >= sizeof(struct ndmsg
) &&
2713 ((struct ndmsg
*)nlmsg_data(nlh
))->ndm_flags
== NTF_PROXY
)
2716 err
= neigh_valid_dump_req(nlh
, cb
->strict_check
, &filter
, cb
->extack
);
2717 if (err
< 0 && cb
->strict_check
)
2722 for (t
= 0; t
< NEIGH_NR_TABLES
; t
++) {
2723 tbl
= neigh_tables
[t
];
2727 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
2730 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
2731 sizeof(cb
->args
[0]));
2733 err
= pneigh_dump_table(tbl
, skb
, cb
, &filter
);
2735 err
= neigh_dump_table(tbl
, skb
, cb
, &filter
);
2744 static int neigh_valid_get_req(const struct nlmsghdr
*nlh
,
2745 struct neigh_table
**tbl
,
2746 void **dst
, int *dev_idx
, u8
*ndm_flags
,
2747 struct netlink_ext_ack
*extack
)
2749 struct nlattr
*tb
[NDA_MAX
+ 1];
2753 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*ndm
))) {
2754 NL_SET_ERR_MSG(extack
, "Invalid header for neighbor get request");
2758 ndm
= nlmsg_data(nlh
);
2759 if (ndm
->ndm_pad1
|| ndm
->ndm_pad2
|| ndm
->ndm_state
||
2761 NL_SET_ERR_MSG(extack
, "Invalid values in header for neighbor get request");
2765 if (ndm
->ndm_flags
& ~NTF_PROXY
) {
2766 NL_SET_ERR_MSG(extack
, "Invalid flags in header for neighbor get request");
2770 err
= nlmsg_parse_deprecated_strict(nlh
, sizeof(struct ndmsg
), tb
,
2771 NDA_MAX
, nda_policy
, extack
);
2775 *ndm_flags
= ndm
->ndm_flags
;
2776 *dev_idx
= ndm
->ndm_ifindex
;
2777 *tbl
= neigh_find_table(ndm
->ndm_family
);
2779 NL_SET_ERR_MSG(extack
, "Unsupported family in header for neighbor get request");
2780 return -EAFNOSUPPORT
;
2783 for (i
= 0; i
<= NDA_MAX
; ++i
) {
2789 if (nla_len(tb
[i
]) != (int)(*tbl
)->key_len
) {
2790 NL_SET_ERR_MSG(extack
, "Invalid network address in neighbor get request");
2793 *dst
= nla_data(tb
[i
]);
2796 NL_SET_ERR_MSG(extack
, "Unsupported attribute in neighbor get request");
2804 static inline size_t neigh_nlmsg_size(void)
2806 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2807 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2808 + nla_total_size(MAX_ADDR_LEN
) /* NDA_LLADDR */
2809 + nla_total_size(sizeof(struct nda_cacheinfo
))
2810 + nla_total_size(4) /* NDA_PROBES */
2811 + nla_total_size(1); /* NDA_PROTOCOL */
2814 static int neigh_get_reply(struct net
*net
, struct neighbour
*neigh
,
2817 struct sk_buff
*skb
;
2820 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL
);
2824 err
= neigh_fill_info(skb
, neigh
, pid
, seq
, RTM_NEWNEIGH
, 0);
2830 err
= rtnl_unicast(skb
, net
, pid
);
2835 static inline size_t pneigh_nlmsg_size(void)
2837 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2838 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2839 + nla_total_size(1); /* NDA_PROTOCOL */
2842 static int pneigh_get_reply(struct net
*net
, struct pneigh_entry
*neigh
,
2843 u32 pid
, u32 seq
, struct neigh_table
*tbl
)
2845 struct sk_buff
*skb
;
2848 skb
= nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL
);
2852 err
= pneigh_fill_info(skb
, neigh
, pid
, seq
, RTM_NEWNEIGH
, 0, tbl
);
2858 err
= rtnl_unicast(skb
, net
, pid
);
2863 static int neigh_get(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
,
2864 struct netlink_ext_ack
*extack
)
2866 struct net
*net
= sock_net(in_skb
->sk
);
2867 struct net_device
*dev
= NULL
;
2868 struct neigh_table
*tbl
= NULL
;
2869 struct neighbour
*neigh
;
2875 err
= neigh_valid_get_req(nlh
, &tbl
, &dst
, &dev_idx
, &ndm_flags
,
2881 dev
= __dev_get_by_index(net
, dev_idx
);
2883 NL_SET_ERR_MSG(extack
, "Unknown device ifindex");
2889 NL_SET_ERR_MSG(extack
, "Network address not specified");
2893 if (ndm_flags
& NTF_PROXY
) {
2894 struct pneigh_entry
*pn
;
2896 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 0);
2898 NL_SET_ERR_MSG(extack
, "Proxy neighbour entry not found");
2901 return pneigh_get_reply(net
, pn
, NETLINK_CB(in_skb
).portid
,
2902 nlh
->nlmsg_seq
, tbl
);
2906 NL_SET_ERR_MSG(extack
, "No device specified");
2910 neigh
= neigh_lookup(tbl
, dst
, dev
);
2912 NL_SET_ERR_MSG(extack
, "Neighbour entry not found");
2916 err
= neigh_get_reply(net
, neigh
, NETLINK_CB(in_skb
).portid
,
2919 neigh_release(neigh
);
2924 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
2927 struct neigh_hash_table
*nht
;
2930 nht
= rcu_dereference_bh(tbl
->nht
);
2932 read_lock(&tbl
->lock
); /* avoid resizes */
2933 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2934 struct neighbour
*n
;
2936 for (n
= rcu_dereference_bh(nht
->hash_buckets
[chain
]);
2938 n
= rcu_dereference_bh(n
->next
))
2941 read_unlock(&tbl
->lock
);
2942 rcu_read_unlock_bh();
2944 EXPORT_SYMBOL(neigh_for_each
);
2946 /* The tbl->lock must be held as a writer and BH disabled. */
2947 void __neigh_for_each_release(struct neigh_table
*tbl
,
2948 int (*cb
)(struct neighbour
*))
2951 struct neigh_hash_table
*nht
;
2953 nht
= rcu_dereference_protected(tbl
->nht
,
2954 lockdep_is_held(&tbl
->lock
));
2955 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2956 struct neighbour
*n
;
2957 struct neighbour __rcu
**np
;
2959 np
= &nht
->hash_buckets
[chain
];
2960 while ((n
= rcu_dereference_protected(*np
,
2961 lockdep_is_held(&tbl
->lock
))) != NULL
) {
2964 write_lock(&n
->lock
);
2967 rcu_assign_pointer(*np
,
2968 rcu_dereference_protected(n
->next
,
2969 lockdep_is_held(&tbl
->lock
)));
2973 write_unlock(&n
->lock
);
2975 neigh_cleanup_and_release(n
);
2979 EXPORT_SYMBOL(__neigh_for_each_release
);
2981 int neigh_xmit(int index
, struct net_device
*dev
,
2982 const void *addr
, struct sk_buff
*skb
)
2984 int err
= -EAFNOSUPPORT
;
2985 if (likely(index
< NEIGH_NR_TABLES
)) {
2986 struct neigh_table
*tbl
;
2987 struct neighbour
*neigh
;
2989 tbl
= neigh_tables
[index
];
2993 if (index
== NEIGH_ARP_TABLE
) {
2994 u32 key
= *((u32
*)addr
);
2996 neigh
= __ipv4_neigh_lookup_noref(dev
, key
);
2998 neigh
= __neigh_lookup_noref(tbl
, addr
, dev
);
3001 neigh
= __neigh_create(tbl
, addr
, dev
, false);
3002 err
= PTR_ERR(neigh
);
3003 if (IS_ERR(neigh
)) {
3004 rcu_read_unlock_bh();
3007 err
= neigh
->output(neigh
, skb
);
3008 rcu_read_unlock_bh();
3010 else if (index
== NEIGH_LINK_TABLE
) {
3011 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
3012 addr
, NULL
, skb
->len
);
3015 err
= dev_queue_xmit(skb
);
3023 EXPORT_SYMBOL(neigh_xmit
);
3025 #ifdef CONFIG_PROC_FS
3027 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
3029 struct neigh_seq_state
*state
= seq
->private;
3030 struct net
*net
= seq_file_net(seq
);
3031 struct neigh_hash_table
*nht
= state
->nht
;
3032 struct neighbour
*n
= NULL
;
3035 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
3036 for (bucket
= 0; bucket
< (1 << nht
->hash_shift
); bucket
++) {
3037 n
= rcu_dereference_bh(nht
->hash_buckets
[bucket
]);
3040 if (!net_eq(dev_net(n
->dev
), net
))
3042 if (state
->neigh_sub_iter
) {
3046 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
3050 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
3052 if (n
->nud_state
& ~NUD_NOARP
)
3055 n
= rcu_dereference_bh(n
->next
);
3061 state
->bucket
= bucket
;
3066 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
3067 struct neighbour
*n
,
3070 struct neigh_seq_state
*state
= seq
->private;
3071 struct net
*net
= seq_file_net(seq
);
3072 struct neigh_hash_table
*nht
= state
->nht
;
3074 if (state
->neigh_sub_iter
) {
3075 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
3079 n
= rcu_dereference_bh(n
->next
);
3083 if (!net_eq(dev_net(n
->dev
), net
))
3085 if (state
->neigh_sub_iter
) {
3086 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
3091 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
3094 if (n
->nud_state
& ~NUD_NOARP
)
3097 n
= rcu_dereference_bh(n
->next
);
3103 if (++state
->bucket
>= (1 << nht
->hash_shift
))
3106 n
= rcu_dereference_bh(nht
->hash_buckets
[state
->bucket
]);
3114 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
3116 struct neighbour
*n
= neigh_get_first(seq
);
3121 n
= neigh_get_next(seq
, n
, pos
);
3126 return *pos
? NULL
: n
;
3129 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
3131 struct neigh_seq_state
*state
= seq
->private;
3132 struct net
*net
= seq_file_net(seq
);
3133 struct neigh_table
*tbl
= state
->tbl
;
3134 struct pneigh_entry
*pn
= NULL
;
3135 int bucket
= state
->bucket
;
3137 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
3138 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
3139 pn
= tbl
->phash_buckets
[bucket
];
3140 while (pn
&& !net_eq(pneigh_net(pn
), net
))
3145 state
->bucket
= bucket
;
3150 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
3151 struct pneigh_entry
*pn
,
3154 struct neigh_seq_state
*state
= seq
->private;
3155 struct net
*net
= seq_file_net(seq
);
3156 struct neigh_table
*tbl
= state
->tbl
;
3160 } while (pn
&& !net_eq(pneigh_net(pn
), net
));
3163 if (++state
->bucket
> PNEIGH_HASHMASK
)
3165 pn
= tbl
->phash_buckets
[state
->bucket
];
3166 while (pn
&& !net_eq(pneigh_net(pn
), net
))
3178 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
3180 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
3185 pn
= pneigh_get_next(seq
, pn
, pos
);
3190 return *pos
? NULL
: pn
;
3193 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
3195 struct neigh_seq_state
*state
= seq
->private;
3197 loff_t idxpos
= *pos
;
3199 rc
= neigh_get_idx(seq
, &idxpos
);
3200 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
3201 rc
= pneigh_get_idx(seq
, &idxpos
);
3206 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
3207 __acquires(tbl
->lock
)
3210 struct neigh_seq_state
*state
= seq
->private;
3214 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
3217 state
->nht
= rcu_dereference_bh(tbl
->nht
);
3218 read_lock(&tbl
->lock
);
3220 return *pos
? neigh_get_idx_any(seq
, pos
) : SEQ_START_TOKEN
;
3222 EXPORT_SYMBOL(neigh_seq_start
);
3224 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3226 struct neigh_seq_state
*state
;
3229 if (v
== SEQ_START_TOKEN
) {
3230 rc
= neigh_get_first(seq
);
3234 state
= seq
->private;
3235 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
3236 rc
= neigh_get_next(seq
, v
, NULL
);
3239 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
3240 rc
= pneigh_get_first(seq
);
3242 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
3243 rc
= pneigh_get_next(seq
, v
, NULL
);
3249 EXPORT_SYMBOL(neigh_seq_next
);
3251 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
3252 __releases(tbl
->lock
)
3255 struct neigh_seq_state
*state
= seq
->private;
3256 struct neigh_table
*tbl
= state
->tbl
;
3258 read_unlock(&tbl
->lock
);
3259 rcu_read_unlock_bh();
3261 EXPORT_SYMBOL(neigh_seq_stop
);
3263 /* statistics via seq_file */
3265 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3267 struct neigh_table
*tbl
= PDE_DATA(file_inode(seq
->file
));
3271 return SEQ_START_TOKEN
;
3273 for (cpu
= *pos
-1; cpu
< nr_cpu_ids
; ++cpu
) {
3274 if (!cpu_possible(cpu
))
3277 return per_cpu_ptr(tbl
->stats
, cpu
);
3282 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3284 struct neigh_table
*tbl
= PDE_DATA(file_inode(seq
->file
));
3287 for (cpu
= *pos
; cpu
< nr_cpu_ids
; ++cpu
) {
3288 if (!cpu_possible(cpu
))
3291 return per_cpu_ptr(tbl
->stats
, cpu
);
3297 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
3302 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
3304 struct neigh_table
*tbl
= PDE_DATA(file_inode(seq
->file
));
3305 struct neigh_statistics
*st
= v
;
3307 if (v
== SEQ_START_TOKEN
) {
3308 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3312 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3313 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
3314 atomic_read(&tbl
->entries
),
3325 st
->rcv_probes_mcast
,
3326 st
->rcv_probes_ucast
,
3328 st
->periodic_gc_runs
,
3337 static const struct seq_operations neigh_stat_seq_ops
= {
3338 .start
= neigh_stat_seq_start
,
3339 .next
= neigh_stat_seq_next
,
3340 .stop
= neigh_stat_seq_stop
,
3341 .show
= neigh_stat_seq_show
,
3343 #endif /* CONFIG_PROC_FS */
3345 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
,
3348 struct net
*net
= dev_net(n
->dev
);
3349 struct sk_buff
*skb
;
3352 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC
);
3356 err
= neigh_fill_info(skb
, n
, pid
, 0, type
, flags
);
3358 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3359 WARN_ON(err
== -EMSGSIZE
);
3363 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
3367 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
3370 void neigh_app_ns(struct neighbour
*n
)
3372 __neigh_notify(n
, RTM_GETNEIGH
, NLM_F_REQUEST
, 0);
3374 EXPORT_SYMBOL(neigh_app_ns
);
3376 #ifdef CONFIG_SYSCTL
3377 static int unres_qlen_max
= INT_MAX
/ SKB_TRUESIZE(ETH_FRAME_LEN
);
3379 static int proc_unres_qlen(struct ctl_table
*ctl
, int write
,
3380 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
3383 struct ctl_table tmp
= *ctl
;
3385 tmp
.extra1
= SYSCTL_ZERO
;
3386 tmp
.extra2
= &unres_qlen_max
;
3389 size
= *(int *)ctl
->data
/ SKB_TRUESIZE(ETH_FRAME_LEN
);
3390 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
3393 *(int *)ctl
->data
= size
* SKB_TRUESIZE(ETH_FRAME_LEN
);
3397 static struct neigh_parms
*neigh_get_dev_parms_rcu(struct net_device
*dev
,
3402 return __in_dev_arp_parms_get_rcu(dev
);
3404 return __in6_dev_nd_parms_get_rcu(dev
);
3409 static void neigh_copy_dflt_parms(struct net
*net
, struct neigh_parms
*p
,
3412 struct net_device
*dev
;
3413 int family
= neigh_parms_family(p
);
3416 for_each_netdev_rcu(net
, dev
) {
3417 struct neigh_parms
*dst_p
=
3418 neigh_get_dev_parms_rcu(dev
, family
);
3420 if (dst_p
&& !test_bit(index
, dst_p
->data_state
))
3421 dst_p
->data
[index
] = p
->data
[index
];
3426 static void neigh_proc_update(struct ctl_table
*ctl
, int write
)
3428 struct net_device
*dev
= ctl
->extra1
;
3429 struct neigh_parms
*p
= ctl
->extra2
;
3430 struct net
*net
= neigh_parms_net(p
);
3431 int index
= (int *) ctl
->data
- p
->data
;
3436 set_bit(index
, p
->data_state
);
3437 if (index
== NEIGH_VAR_DELAY_PROBE_TIME
)
3438 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE
, p
);
3439 if (!dev
) /* NULL dev means this is default value */
3440 neigh_copy_dflt_parms(net
, p
, index
);
3443 static int neigh_proc_dointvec_zero_intmax(struct ctl_table
*ctl
, int write
,
3444 void __user
*buffer
,
3445 size_t *lenp
, loff_t
*ppos
)
3447 struct ctl_table tmp
= *ctl
;
3450 tmp
.extra1
= SYSCTL_ZERO
;
3451 tmp
.extra2
= SYSCTL_INT_MAX
;
3453 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
3454 neigh_proc_update(ctl
, write
);
3458 int neigh_proc_dointvec(struct ctl_table
*ctl
, int write
,
3459 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
3461 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
3463 neigh_proc_update(ctl
, write
);
3466 EXPORT_SYMBOL(neigh_proc_dointvec
);
3468 int neigh_proc_dointvec_jiffies(struct ctl_table
*ctl
, int write
,
3469 void __user
*buffer
,
3470 size_t *lenp
, loff_t
*ppos
)
3472 int ret
= proc_dointvec_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3474 neigh_proc_update(ctl
, write
);
3477 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies
);
3479 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table
*ctl
, int write
,
3480 void __user
*buffer
,
3481 size_t *lenp
, loff_t
*ppos
)
3483 int ret
= proc_dointvec_userhz_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3485 neigh_proc_update(ctl
, write
);
3489 int neigh_proc_dointvec_ms_jiffies(struct ctl_table
*ctl
, int write
,
3490 void __user
*buffer
,
3491 size_t *lenp
, loff_t
*ppos
)
3493 int ret
= proc_dointvec_ms_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3495 neigh_proc_update(ctl
, write
);
3498 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies
);
3500 static int neigh_proc_dointvec_unres_qlen(struct ctl_table
*ctl
, int write
,
3501 void __user
*buffer
,
3502 size_t *lenp
, loff_t
*ppos
)
3504 int ret
= proc_unres_qlen(ctl
, write
, buffer
, lenp
, ppos
);
3506 neigh_proc_update(ctl
, write
);
3510 static int neigh_proc_base_reachable_time(struct ctl_table
*ctl
, int write
,
3511 void __user
*buffer
,
3512 size_t *lenp
, loff_t
*ppos
)
3514 struct neigh_parms
*p
= ctl
->extra2
;
3517 if (strcmp(ctl
->procname
, "base_reachable_time") == 0)
3518 ret
= neigh_proc_dointvec_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3519 else if (strcmp(ctl
->procname
, "base_reachable_time_ms") == 0)
3520 ret
= neigh_proc_dointvec_ms_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3524 if (write
&& ret
== 0) {
3525 /* update reachable_time as well, otherwise, the change will
3526 * only be effective after the next time neigh_periodic_work
3527 * decides to recompute it
3530 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
3535 #define NEIGH_PARMS_DATA_OFFSET(index) \
3536 (&((struct neigh_parms *) 0)->data[index])
3538 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3539 [NEIGH_VAR_ ## attr] = { \
3541 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3542 .maxlen = sizeof(int), \
3544 .proc_handler = proc, \
3547 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3548 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3550 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3551 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3553 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3554 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3556 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3557 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3559 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3560 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3562 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3563 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3565 static struct neigh_sysctl_table
{
3566 struct ctl_table_header
*sysctl_header
;
3567 struct ctl_table neigh_vars
[NEIGH_VAR_MAX
+ 1];
3568 } neigh_sysctl_template __read_mostly
= {
3570 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES
, "mcast_solicit"),
3571 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES
, "ucast_solicit"),
3572 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES
, "app_solicit"),
3573 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES
, "mcast_resolicit"),
3574 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME
, "retrans_time"),
3575 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME
, "base_reachable_time"),
3576 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME
, "delay_first_probe_time"),
3577 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME
, "gc_stale_time"),
3578 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES
, "unres_qlen_bytes"),
3579 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN
, "proxy_qlen"),
3580 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY
, "anycast_delay"),
3581 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY
, "proxy_delay"),
3582 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME
, "locktime"),
3583 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN
, QUEUE_LEN_BYTES
, "unres_qlen"),
3584 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS
, RETRANS_TIME
, "retrans_time_ms"),
3585 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS
, BASE_REACHABLE_TIME
, "base_reachable_time_ms"),
3586 [NEIGH_VAR_GC_INTERVAL
] = {
3587 .procname
= "gc_interval",
3588 .maxlen
= sizeof(int),
3590 .proc_handler
= proc_dointvec_jiffies
,
3592 [NEIGH_VAR_GC_THRESH1
] = {
3593 .procname
= "gc_thresh1",
3594 .maxlen
= sizeof(int),
3596 .extra1
= SYSCTL_ZERO
,
3597 .extra2
= SYSCTL_INT_MAX
,
3598 .proc_handler
= proc_dointvec_minmax
,
3600 [NEIGH_VAR_GC_THRESH2
] = {
3601 .procname
= "gc_thresh2",
3602 .maxlen
= sizeof(int),
3604 .extra1
= SYSCTL_ZERO
,
3605 .extra2
= SYSCTL_INT_MAX
,
3606 .proc_handler
= proc_dointvec_minmax
,
3608 [NEIGH_VAR_GC_THRESH3
] = {
3609 .procname
= "gc_thresh3",
3610 .maxlen
= sizeof(int),
3612 .extra1
= SYSCTL_ZERO
,
3613 .extra2
= SYSCTL_INT_MAX
,
3614 .proc_handler
= proc_dointvec_minmax
,
3620 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
3621 proc_handler
*handler
)
3624 struct neigh_sysctl_table
*t
;
3625 const char *dev_name_source
;
3626 char neigh_path
[ sizeof("net//neigh/") + IFNAMSIZ
+ IFNAMSIZ
];
3629 t
= kmemdup(&neigh_sysctl_template
, sizeof(*t
), GFP_KERNEL
);
3633 for (i
= 0; i
< NEIGH_VAR_GC_INTERVAL
; i
++) {
3634 t
->neigh_vars
[i
].data
+= (long) p
;
3635 t
->neigh_vars
[i
].extra1
= dev
;
3636 t
->neigh_vars
[i
].extra2
= p
;
3640 dev_name_source
= dev
->name
;
3641 /* Terminate the table early */
3642 memset(&t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
], 0,
3643 sizeof(t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
]));
3645 struct neigh_table
*tbl
= p
->tbl
;
3646 dev_name_source
= "default";
3647 t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
].data
= &tbl
->gc_interval
;
3648 t
->neigh_vars
[NEIGH_VAR_GC_THRESH1
].data
= &tbl
->gc_thresh1
;
3649 t
->neigh_vars
[NEIGH_VAR_GC_THRESH2
].data
= &tbl
->gc_thresh2
;
3650 t
->neigh_vars
[NEIGH_VAR_GC_THRESH3
].data
= &tbl
->gc_thresh3
;
3655 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME
].proc_handler
= handler
;
3657 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
= handler
;
3658 /* RetransTime (in milliseconds)*/
3659 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME_MS
].proc_handler
= handler
;
3660 /* ReachableTime (in milliseconds) */
3661 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
= handler
;
3663 /* Those handlers will update p->reachable_time after
3664 * base_reachable_time(_ms) is set to ensure the new timer starts being
3665 * applied after the next neighbour update instead of waiting for
3666 * neigh_periodic_work to update its value (can be multiple minutes)
3667 * So any handler that replaces them should do this as well
3670 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
=
3671 neigh_proc_base_reachable_time
;
3672 /* ReachableTime (in milliseconds) */
3673 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
=
3674 neigh_proc_base_reachable_time
;
3677 /* Don't export sysctls to unprivileged users */
3678 if (neigh_parms_net(p
)->user_ns
!= &init_user_ns
)
3679 t
->neigh_vars
[0].procname
= NULL
;
3681 switch (neigh_parms_family(p
)) {
3692 snprintf(neigh_path
, sizeof(neigh_path
), "net/%s/neigh/%s",
3693 p_name
, dev_name_source
);
3695 register_net_sysctl(neigh_parms_net(p
), neigh_path
, t
->neigh_vars
);
3696 if (!t
->sysctl_header
)
3699 p
->sysctl_table
= t
;
3707 EXPORT_SYMBOL(neigh_sysctl_register
);
3709 void neigh_sysctl_unregister(struct neigh_parms
*p
)
3711 if (p
->sysctl_table
) {
3712 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
3713 p
->sysctl_table
= NULL
;
3714 unregister_net_sysctl_table(t
->sysctl_header
);
3718 EXPORT_SYMBOL(neigh_sysctl_unregister
);
3720 #endif /* CONFIG_SYSCTL */
3722 static int __init
neigh_init(void)
3724 rtnl_register(PF_UNSPEC
, RTM_NEWNEIGH
, neigh_add
, NULL
, 0);
3725 rtnl_register(PF_UNSPEC
, RTM_DELNEIGH
, neigh_delete
, NULL
, 0);
3726 rtnl_register(PF_UNSPEC
, RTM_GETNEIGH
, neigh_get
, neigh_dump_info
, 0);
3728 rtnl_register(PF_UNSPEC
, RTM_GETNEIGHTBL
, NULL
, neightbl_dump_info
,
3730 rtnl_register(PF_UNSPEC
, RTM_SETNEIGHTBL
, neightbl_set
, NULL
, 0);
3735 subsys_initcall(neigh_init
);