2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
28 #include <linux/sysctl.h>
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
46 #define neigh_dbg(level, fmt, ...) \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
52 #define PNEIGH_HASHMASK 0xF
54 static void neigh_timer_handler(unsigned long arg
);
55 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
);
56 static void neigh_update_notify(struct neighbour
*neigh
);
57 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
60 static const struct file_operations neigh_stat_seq_fops
;
64 Neighbour hash table buckets are protected with rwlock tbl->lock.
66 - All the scans/updates to hash buckets MUST be made under this lock.
67 - NOTHING clever should be made under this lock: no callbacks
68 to protocol backends, no attempts to send something to network.
69 It will result in deadlocks, if backend/driver wants to use neighbour
71 - If the entry requires some non-trivial actions, increase
72 its reference count and release table lock.
74 Neighbour entries are protected:
75 - with reference count.
76 - with rwlock neigh->lock
78 Reference count prevents destruction.
80 neigh->lock mainly serializes ll address data and its validity state.
81 However, the same lock is used to protect another entry fields:
85 Again, nothing clever shall be made under neigh->lock,
86 the most complicated procedure, which we allow is dev->hard_header.
87 It is supposed, that dev->hard_header is simplistic and does
88 not make callbacks to neighbour tables.
91 static int neigh_blackhole(struct neighbour
*neigh
, struct sk_buff
*skb
)
97 static void neigh_cleanup_and_release(struct neighbour
*neigh
)
99 if (neigh
->parms
->neigh_cleanup
)
100 neigh
->parms
->neigh_cleanup(neigh
);
102 __neigh_notify(neigh
, RTM_DELNEIGH
, 0);
103 neigh_release(neigh
);
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
112 unsigned long neigh_rand_reach_time(unsigned long base
)
114 return base
? (prandom_u32() % base
) + (base
>> 1) : 0;
116 EXPORT_SYMBOL(neigh_rand_reach_time
);
119 static int neigh_forced_gc(struct neigh_table
*tbl
)
123 struct neigh_hash_table
*nht
;
125 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
127 write_lock_bh(&tbl
->lock
);
128 nht
= rcu_dereference_protected(tbl
->nht
,
129 lockdep_is_held(&tbl
->lock
));
130 for (i
= 0; i
< (1 << nht
->hash_shift
); i
++) {
132 struct neighbour __rcu
**np
;
134 np
= &nht
->hash_buckets
[i
];
135 while ((n
= rcu_dereference_protected(*np
,
136 lockdep_is_held(&tbl
->lock
))) != NULL
) {
137 /* Neighbour record may be discarded if:
138 * - nobody refers to it.
139 * - it is not permanent
141 write_lock(&n
->lock
);
142 if (atomic_read(&n
->refcnt
) == 1 &&
143 !(n
->nud_state
& NUD_PERMANENT
)) {
144 rcu_assign_pointer(*np
,
145 rcu_dereference_protected(n
->next
,
146 lockdep_is_held(&tbl
->lock
)));
149 write_unlock(&n
->lock
);
150 neigh_cleanup_and_release(n
);
153 write_unlock(&n
->lock
);
158 tbl
->last_flush
= jiffies
;
160 write_unlock_bh(&tbl
->lock
);
165 static void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
168 if (unlikely(mod_timer(&n
->timer
, when
))) {
169 printk("NEIGH: BUG, double timer add, state is %x\n",
175 static int neigh_del_timer(struct neighbour
*n
)
177 if ((n
->nud_state
& NUD_IN_TIMER
) &&
178 del_timer(&n
->timer
)) {
185 static void pneigh_queue_purge(struct sk_buff_head
*list
)
189 while ((skb
= skb_dequeue(list
)) != NULL
) {
195 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
)
198 struct neigh_hash_table
*nht
;
200 nht
= rcu_dereference_protected(tbl
->nht
,
201 lockdep_is_held(&tbl
->lock
));
203 for (i
= 0; i
< (1 << nht
->hash_shift
); i
++) {
205 struct neighbour __rcu
**np
= &nht
->hash_buckets
[i
];
207 while ((n
= rcu_dereference_protected(*np
,
208 lockdep_is_held(&tbl
->lock
))) != NULL
) {
209 if (dev
&& n
->dev
!= dev
) {
213 rcu_assign_pointer(*np
,
214 rcu_dereference_protected(n
->next
,
215 lockdep_is_held(&tbl
->lock
)));
216 write_lock(&n
->lock
);
220 if (atomic_read(&n
->refcnt
) != 1) {
221 /* The most unpleasant situation.
222 We must destroy neighbour entry,
223 but someone still uses it.
225 The destroy will be delayed until
226 the last user releases us, but
227 we must kill timers etc. and move
230 __skb_queue_purge(&n
->arp_queue
);
231 n
->arp_queue_len_bytes
= 0;
232 n
->output
= neigh_blackhole
;
233 if (n
->nud_state
& NUD_VALID
)
234 n
->nud_state
= NUD_NOARP
;
236 n
->nud_state
= NUD_NONE
;
237 neigh_dbg(2, "neigh %p is stray\n", n
);
239 write_unlock(&n
->lock
);
240 neigh_cleanup_and_release(n
);
245 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
247 write_lock_bh(&tbl
->lock
);
248 neigh_flush_dev(tbl
, dev
);
249 write_unlock_bh(&tbl
->lock
);
251 EXPORT_SYMBOL(neigh_changeaddr
);
253 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
255 write_lock_bh(&tbl
->lock
);
256 neigh_flush_dev(tbl
, dev
);
257 pneigh_ifdown(tbl
, dev
);
258 write_unlock_bh(&tbl
->lock
);
260 del_timer_sync(&tbl
->proxy_timer
);
261 pneigh_queue_purge(&tbl
->proxy_queue
);
264 EXPORT_SYMBOL(neigh_ifdown
);
266 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
, struct net_device
*dev
)
268 struct neighbour
*n
= NULL
;
269 unsigned long now
= jiffies
;
272 entries
= atomic_inc_return(&tbl
->entries
) - 1;
273 if (entries
>= tbl
->gc_thresh3
||
274 (entries
>= tbl
->gc_thresh2
&&
275 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
276 if (!neigh_forced_gc(tbl
) &&
277 entries
>= tbl
->gc_thresh3
)
281 n
= kzalloc(tbl
->entry_size
+ dev
->neigh_priv_len
, GFP_ATOMIC
);
285 __skb_queue_head_init(&n
->arp_queue
);
286 rwlock_init(&n
->lock
);
287 seqlock_init(&n
->ha_lock
);
288 n
->updated
= n
->used
= now
;
289 n
->nud_state
= NUD_NONE
;
290 n
->output
= neigh_blackhole
;
291 seqlock_init(&n
->hh
.hh_lock
);
292 n
->parms
= neigh_parms_clone(&tbl
->parms
);
293 setup_timer(&n
->timer
, neigh_timer_handler
, (unsigned long)n
);
295 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
297 atomic_set(&n
->refcnt
, 1);
303 atomic_dec(&tbl
->entries
);
307 static void neigh_get_hash_rnd(u32
*x
)
309 get_random_bytes(x
, sizeof(*x
));
313 static struct neigh_hash_table
*neigh_hash_alloc(unsigned int shift
)
315 size_t size
= (1 << shift
) * sizeof(struct neighbour
*);
316 struct neigh_hash_table
*ret
;
317 struct neighbour __rcu
**buckets
;
320 ret
= kmalloc(sizeof(*ret
), GFP_ATOMIC
);
323 if (size
<= PAGE_SIZE
)
324 buckets
= kzalloc(size
, GFP_ATOMIC
);
326 buckets
= (struct neighbour __rcu
**)
327 __get_free_pages(GFP_ATOMIC
| __GFP_ZERO
,
333 ret
->hash_buckets
= buckets
;
334 ret
->hash_shift
= shift
;
335 for (i
= 0; i
< NEIGH_NUM_HASH_RND
; i
++)
336 neigh_get_hash_rnd(&ret
->hash_rnd
[i
]);
340 static void neigh_hash_free_rcu(struct rcu_head
*head
)
342 struct neigh_hash_table
*nht
= container_of(head
,
343 struct neigh_hash_table
,
345 size_t size
= (1 << nht
->hash_shift
) * sizeof(struct neighbour
*);
346 struct neighbour __rcu
**buckets
= nht
->hash_buckets
;
348 if (size
<= PAGE_SIZE
)
351 free_pages((unsigned long)buckets
, get_order(size
));
355 static struct neigh_hash_table
*neigh_hash_grow(struct neigh_table
*tbl
,
356 unsigned long new_shift
)
358 unsigned int i
, hash
;
359 struct neigh_hash_table
*new_nht
, *old_nht
;
361 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
363 old_nht
= rcu_dereference_protected(tbl
->nht
,
364 lockdep_is_held(&tbl
->lock
));
365 new_nht
= neigh_hash_alloc(new_shift
);
369 for (i
= 0; i
< (1 << old_nht
->hash_shift
); i
++) {
370 struct neighbour
*n
, *next
;
372 for (n
= rcu_dereference_protected(old_nht
->hash_buckets
[i
],
373 lockdep_is_held(&tbl
->lock
));
376 hash
= tbl
->hash(n
->primary_key
, n
->dev
,
379 hash
>>= (32 - new_nht
->hash_shift
);
380 next
= rcu_dereference_protected(n
->next
,
381 lockdep_is_held(&tbl
->lock
));
383 rcu_assign_pointer(n
->next
,
384 rcu_dereference_protected(
385 new_nht
->hash_buckets
[hash
],
386 lockdep_is_held(&tbl
->lock
)));
387 rcu_assign_pointer(new_nht
->hash_buckets
[hash
], n
);
391 rcu_assign_pointer(tbl
->nht
, new_nht
);
392 call_rcu(&old_nht
->rcu
, neigh_hash_free_rcu
);
396 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
397 struct net_device
*dev
)
401 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
404 n
= __neigh_lookup_noref(tbl
, pkey
, dev
);
406 if (!atomic_inc_not_zero(&n
->refcnt
))
408 NEIGH_CACHE_STAT_INC(tbl
, hits
);
411 rcu_read_unlock_bh();
414 EXPORT_SYMBOL(neigh_lookup
);
416 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, struct net
*net
,
420 int key_len
= tbl
->key_len
;
422 struct neigh_hash_table
*nht
;
424 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
427 nht
= rcu_dereference_bh(tbl
->nht
);
428 hash_val
= tbl
->hash(pkey
, NULL
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
430 for (n
= rcu_dereference_bh(nht
->hash_buckets
[hash_val
]);
432 n
= rcu_dereference_bh(n
->next
)) {
433 if (!memcmp(n
->primary_key
, pkey
, key_len
) &&
434 net_eq(dev_net(n
->dev
), net
)) {
435 if (!atomic_inc_not_zero(&n
->refcnt
))
437 NEIGH_CACHE_STAT_INC(tbl
, hits
);
442 rcu_read_unlock_bh();
445 EXPORT_SYMBOL(neigh_lookup_nodev
);
447 struct neighbour
*__neigh_create(struct neigh_table
*tbl
, const void *pkey
,
448 struct net_device
*dev
, bool want_ref
)
451 int key_len
= tbl
->key_len
;
453 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
, dev
);
454 struct neigh_hash_table
*nht
;
457 rc
= ERR_PTR(-ENOBUFS
);
461 memcpy(n
->primary_key
, pkey
, key_len
);
465 /* Protocol specific setup. */
466 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
468 goto out_neigh_release
;
471 if (dev
->netdev_ops
->ndo_neigh_construct
) {
472 error
= dev
->netdev_ops
->ndo_neigh_construct(n
);
475 goto out_neigh_release
;
479 /* Device specific setup. */
480 if (n
->parms
->neigh_setup
&&
481 (error
= n
->parms
->neigh_setup(n
)) < 0) {
483 goto out_neigh_release
;
486 n
->confirmed
= jiffies
- (NEIGH_VAR(n
->parms
, BASE_REACHABLE_TIME
) << 1);
488 write_lock_bh(&tbl
->lock
);
489 nht
= rcu_dereference_protected(tbl
->nht
,
490 lockdep_is_held(&tbl
->lock
));
492 if (atomic_read(&tbl
->entries
) > (1 << nht
->hash_shift
))
493 nht
= neigh_hash_grow(tbl
, nht
->hash_shift
+ 1);
495 hash_val
= tbl
->hash(pkey
, dev
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
497 if (n
->parms
->dead
) {
498 rc
= ERR_PTR(-EINVAL
);
502 for (n1
= rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
503 lockdep_is_held(&tbl
->lock
));
505 n1
= rcu_dereference_protected(n1
->next
,
506 lockdep_is_held(&tbl
->lock
))) {
507 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, pkey
, key_len
)) {
518 rcu_assign_pointer(n
->next
,
519 rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
520 lockdep_is_held(&tbl
->lock
)));
521 rcu_assign_pointer(nht
->hash_buckets
[hash_val
], n
);
522 write_unlock_bh(&tbl
->lock
);
523 neigh_dbg(2, "neigh %p is created\n", n
);
528 write_unlock_bh(&tbl
->lock
);
533 EXPORT_SYMBOL(__neigh_create
);
535 static u32
pneigh_hash(const void *pkey
, int key_len
)
537 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
538 hash_val
^= (hash_val
>> 16);
539 hash_val
^= hash_val
>> 8;
540 hash_val
^= hash_val
>> 4;
541 hash_val
&= PNEIGH_HASHMASK
;
545 static struct pneigh_entry
*__pneigh_lookup_1(struct pneigh_entry
*n
,
549 struct net_device
*dev
)
552 if (!memcmp(n
->key
, pkey
, key_len
) &&
553 net_eq(pneigh_net(n
), net
) &&
554 (n
->dev
== dev
|| !n
->dev
))
561 struct pneigh_entry
*__pneigh_lookup(struct neigh_table
*tbl
,
562 struct net
*net
, const void *pkey
, struct net_device
*dev
)
564 int key_len
= tbl
->key_len
;
565 u32 hash_val
= pneigh_hash(pkey
, key_len
);
567 return __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
568 net
, pkey
, key_len
, dev
);
570 EXPORT_SYMBOL_GPL(__pneigh_lookup
);
572 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
,
573 struct net
*net
, const void *pkey
,
574 struct net_device
*dev
, int creat
)
576 struct pneigh_entry
*n
;
577 int key_len
= tbl
->key_len
;
578 u32 hash_val
= pneigh_hash(pkey
, key_len
);
580 read_lock_bh(&tbl
->lock
);
581 n
= __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
582 net
, pkey
, key_len
, dev
);
583 read_unlock_bh(&tbl
->lock
);
590 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
594 write_pnet(&n
->net
, net
);
595 memcpy(n
->key
, pkey
, key_len
);
600 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
608 write_lock_bh(&tbl
->lock
);
609 n
->next
= tbl
->phash_buckets
[hash_val
];
610 tbl
->phash_buckets
[hash_val
] = n
;
611 write_unlock_bh(&tbl
->lock
);
615 EXPORT_SYMBOL(pneigh_lookup
);
618 int pneigh_delete(struct neigh_table
*tbl
, struct net
*net
, const void *pkey
,
619 struct net_device
*dev
)
621 struct pneigh_entry
*n
, **np
;
622 int key_len
= tbl
->key_len
;
623 u32 hash_val
= pneigh_hash(pkey
, key_len
);
625 write_lock_bh(&tbl
->lock
);
626 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
628 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
&&
629 net_eq(pneigh_net(n
), net
)) {
631 write_unlock_bh(&tbl
->lock
);
632 if (tbl
->pdestructor
)
640 write_unlock_bh(&tbl
->lock
);
644 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
646 struct pneigh_entry
*n
, **np
;
649 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
650 np
= &tbl
->phash_buckets
[h
];
651 while ((n
= *np
) != NULL
) {
652 if (!dev
|| n
->dev
== dev
) {
654 if (tbl
->pdestructor
)
667 static void neigh_parms_destroy(struct neigh_parms
*parms
);
669 static inline void neigh_parms_put(struct neigh_parms
*parms
)
671 if (atomic_dec_and_test(&parms
->refcnt
))
672 neigh_parms_destroy(parms
);
676 * neighbour must already be out of the table;
679 void neigh_destroy(struct neighbour
*neigh
)
681 struct net_device
*dev
= neigh
->dev
;
683 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
686 pr_warn("Destroying alive neighbour %p\n", neigh
);
691 if (neigh_del_timer(neigh
))
692 pr_warn("Impossible event\n");
694 write_lock_bh(&neigh
->lock
);
695 __skb_queue_purge(&neigh
->arp_queue
);
696 write_unlock_bh(&neigh
->lock
);
697 neigh
->arp_queue_len_bytes
= 0;
699 if (dev
->netdev_ops
->ndo_neigh_destroy
)
700 dev
->netdev_ops
->ndo_neigh_destroy(neigh
);
703 neigh_parms_put(neigh
->parms
);
705 neigh_dbg(2, "neigh %p is destroyed\n", neigh
);
707 atomic_dec(&neigh
->tbl
->entries
);
708 kfree_rcu(neigh
, rcu
);
710 EXPORT_SYMBOL(neigh_destroy
);
712 /* Neighbour state is suspicious;
715 Called with write_locked neigh.
717 static void neigh_suspect(struct neighbour
*neigh
)
719 neigh_dbg(2, "neigh %p is suspected\n", neigh
);
721 neigh
->output
= neigh
->ops
->output
;
724 /* Neighbour state is OK;
727 Called with write_locked neigh.
729 static void neigh_connect(struct neighbour
*neigh
)
731 neigh_dbg(2, "neigh %p is connected\n", neigh
);
733 neigh
->output
= neigh
->ops
->connected_output
;
736 static void neigh_periodic_work(struct work_struct
*work
)
738 struct neigh_table
*tbl
= container_of(work
, struct neigh_table
, gc_work
.work
);
740 struct neighbour __rcu
**np
;
742 struct neigh_hash_table
*nht
;
744 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
746 write_lock_bh(&tbl
->lock
);
747 nht
= rcu_dereference_protected(tbl
->nht
,
748 lockdep_is_held(&tbl
->lock
));
751 * periodically recompute ReachableTime from random function
754 if (time_after(jiffies
, tbl
->last_rand
+ 300 * HZ
)) {
755 struct neigh_parms
*p
;
756 tbl
->last_rand
= jiffies
;
757 list_for_each_entry(p
, &tbl
->parms_list
, list
)
759 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
762 if (atomic_read(&tbl
->entries
) < tbl
->gc_thresh1
)
765 for (i
= 0 ; i
< (1 << nht
->hash_shift
); i
++) {
766 np
= &nht
->hash_buckets
[i
];
768 while ((n
= rcu_dereference_protected(*np
,
769 lockdep_is_held(&tbl
->lock
))) != NULL
) {
772 write_lock(&n
->lock
);
774 state
= n
->nud_state
;
775 if (state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) {
776 write_unlock(&n
->lock
);
780 if (time_before(n
->used
, n
->confirmed
))
781 n
->used
= n
->confirmed
;
783 if (atomic_read(&n
->refcnt
) == 1 &&
784 (state
== NUD_FAILED
||
785 time_after(jiffies
, n
->used
+ NEIGH_VAR(n
->parms
, GC_STALETIME
)))) {
788 write_unlock(&n
->lock
);
789 neigh_cleanup_and_release(n
);
792 write_unlock(&n
->lock
);
798 * It's fine to release lock here, even if hash table
799 * grows while we are preempted.
801 write_unlock_bh(&tbl
->lock
);
803 write_lock_bh(&tbl
->lock
);
804 nht
= rcu_dereference_protected(tbl
->nht
,
805 lockdep_is_held(&tbl
->lock
));
808 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
809 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
810 * BASE_REACHABLE_TIME.
812 queue_delayed_work(system_power_efficient_wq
, &tbl
->gc_work
,
813 NEIGH_VAR(&tbl
->parms
, BASE_REACHABLE_TIME
) >> 1);
814 write_unlock_bh(&tbl
->lock
);
817 static __inline__
int neigh_max_probes(struct neighbour
*n
)
819 struct neigh_parms
*p
= n
->parms
;
820 return NEIGH_VAR(p
, UCAST_PROBES
) + NEIGH_VAR(p
, APP_PROBES
) +
821 (n
->nud_state
& NUD_PROBE
? NEIGH_VAR(p
, MCAST_REPROBES
) :
822 NEIGH_VAR(p
, MCAST_PROBES
));
825 static void neigh_invalidate(struct neighbour
*neigh
)
826 __releases(neigh
->lock
)
827 __acquires(neigh
->lock
)
831 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
832 neigh_dbg(2, "neigh %p is failed\n", neigh
);
833 neigh
->updated
= jiffies
;
835 /* It is very thin place. report_unreachable is very complicated
836 routine. Particularly, it can hit the same neighbour entry!
838 So that, we try to be accurate and avoid dead loop. --ANK
840 while (neigh
->nud_state
== NUD_FAILED
&&
841 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
842 write_unlock(&neigh
->lock
);
843 neigh
->ops
->error_report(neigh
, skb
);
844 write_lock(&neigh
->lock
);
846 __skb_queue_purge(&neigh
->arp_queue
);
847 neigh
->arp_queue_len_bytes
= 0;
850 static void neigh_probe(struct neighbour
*neigh
)
851 __releases(neigh
->lock
)
853 struct sk_buff
*skb
= skb_peek_tail(&neigh
->arp_queue
);
854 /* keep skb alive even if arp_queue overflows */
856 skb
= skb_copy(skb
, GFP_ATOMIC
);
857 write_unlock(&neigh
->lock
);
858 neigh
->ops
->solicit(neigh
, skb
);
859 atomic_inc(&neigh
->probes
);
863 /* Called when a timer expires for a neighbour entry. */
865 static void neigh_timer_handler(unsigned long arg
)
867 unsigned long now
, next
;
868 struct neighbour
*neigh
= (struct neighbour
*)arg
;
872 write_lock(&neigh
->lock
);
874 state
= neigh
->nud_state
;
878 if (!(state
& NUD_IN_TIMER
))
881 if (state
& NUD_REACHABLE
) {
882 if (time_before_eq(now
,
883 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
884 neigh_dbg(2, "neigh %p is still alive\n", neigh
);
885 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
886 } else if (time_before_eq(now
,
888 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
))) {
889 neigh_dbg(2, "neigh %p is delayed\n", neigh
);
890 neigh
->nud_state
= NUD_DELAY
;
891 neigh
->updated
= jiffies
;
892 neigh_suspect(neigh
);
893 next
= now
+ NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
);
895 neigh_dbg(2, "neigh %p is suspected\n", neigh
);
896 neigh
->nud_state
= NUD_STALE
;
897 neigh
->updated
= jiffies
;
898 neigh_suspect(neigh
);
901 } else if (state
& NUD_DELAY
) {
902 if (time_before_eq(now
,
904 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
))) {
905 neigh_dbg(2, "neigh %p is now reachable\n", neigh
);
906 neigh
->nud_state
= NUD_REACHABLE
;
907 neigh
->updated
= jiffies
;
908 neigh_connect(neigh
);
910 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
912 neigh_dbg(2, "neigh %p is probed\n", neigh
);
913 neigh
->nud_state
= NUD_PROBE
;
914 neigh
->updated
= jiffies
;
915 atomic_set(&neigh
->probes
, 0);
917 next
= now
+ NEIGH_VAR(neigh
->parms
, RETRANS_TIME
);
920 /* NUD_PROBE|NUD_INCOMPLETE */
921 next
= now
+ NEIGH_VAR(neigh
->parms
, RETRANS_TIME
);
924 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
925 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
926 neigh
->nud_state
= NUD_FAILED
;
928 neigh_invalidate(neigh
);
932 if (neigh
->nud_state
& NUD_IN_TIMER
) {
933 if (time_before(next
, jiffies
+ HZ
/2))
934 next
= jiffies
+ HZ
/2;
935 if (!mod_timer(&neigh
->timer
, next
))
938 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
942 write_unlock(&neigh
->lock
);
946 neigh_update_notify(neigh
);
948 neigh_release(neigh
);
951 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
954 bool immediate_probe
= false;
956 write_lock_bh(&neigh
->lock
);
959 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
964 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
965 if (NEIGH_VAR(neigh
->parms
, MCAST_PROBES
) +
966 NEIGH_VAR(neigh
->parms
, APP_PROBES
)) {
967 unsigned long next
, now
= jiffies
;
969 atomic_set(&neigh
->probes
,
970 NEIGH_VAR(neigh
->parms
, UCAST_PROBES
));
971 neigh
->nud_state
= NUD_INCOMPLETE
;
972 neigh
->updated
= now
;
973 next
= now
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
),
975 neigh_add_timer(neigh
, next
);
976 immediate_probe
= true;
978 neigh
->nud_state
= NUD_FAILED
;
979 neigh
->updated
= jiffies
;
980 write_unlock_bh(&neigh
->lock
);
985 } else if (neigh
->nud_state
& NUD_STALE
) {
986 neigh_dbg(2, "neigh %p is delayed\n", neigh
);
987 neigh
->nud_state
= NUD_DELAY
;
988 neigh
->updated
= jiffies
;
989 neigh_add_timer(neigh
, jiffies
+
990 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
));
993 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
995 while (neigh
->arp_queue_len_bytes
+ skb
->truesize
>
996 NEIGH_VAR(neigh
->parms
, QUEUE_LEN_BYTES
)) {
997 struct sk_buff
*buff
;
999 buff
= __skb_dequeue(&neigh
->arp_queue
);
1002 neigh
->arp_queue_len_bytes
-= buff
->truesize
;
1004 NEIGH_CACHE_STAT_INC(neigh
->tbl
, unres_discards
);
1007 __skb_queue_tail(&neigh
->arp_queue
, skb
);
1008 neigh
->arp_queue_len_bytes
+= skb
->truesize
;
1013 if (immediate_probe
)
1016 write_unlock(&neigh
->lock
);
1021 if (neigh
->nud_state
& NUD_STALE
)
1023 write_unlock_bh(&neigh
->lock
);
1027 EXPORT_SYMBOL(__neigh_event_send
);
1029 static void neigh_update_hhs(struct neighbour
*neigh
)
1031 struct hh_cache
*hh
;
1032 void (*update
)(struct hh_cache
*, const struct net_device
*, const unsigned char *)
1035 if (neigh
->dev
->header_ops
)
1036 update
= neigh
->dev
->header_ops
->cache_update
;
1041 write_seqlock_bh(&hh
->hh_lock
);
1042 update(hh
, neigh
->dev
, neigh
->ha
);
1043 write_sequnlock_bh(&hh
->hh_lock
);
1050 /* Generic update routine.
1051 -- lladdr is new lladdr or NULL, if it is not supplied.
1052 -- new is new state.
1054 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1056 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1057 lladdr instead of overriding it
1059 It also allows to retain current state
1060 if lladdr is unchanged.
1061 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1063 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1065 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1068 Caller MUST hold reference count on the entry.
1071 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
1077 struct net_device
*dev
;
1078 int update_isrouter
= 0;
1080 write_lock_bh(&neigh
->lock
);
1083 old
= neigh
->nud_state
;
1086 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
1087 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
1092 if (!(new & NUD_VALID
)) {
1093 neigh_del_timer(neigh
);
1094 if (old
& NUD_CONNECTED
)
1095 neigh_suspect(neigh
);
1096 neigh
->nud_state
= new;
1098 notify
= old
& NUD_VALID
;
1099 if ((old
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
1100 (new & NUD_FAILED
)) {
1101 neigh_invalidate(neigh
);
1107 /* Compare new lladdr with cached one */
1108 if (!dev
->addr_len
) {
1109 /* First case: device needs no address. */
1111 } else if (lladdr
) {
1112 /* The second case: if something is already cached
1113 and a new address is proposed:
1115 - if they are different, check override flag
1117 if ((old
& NUD_VALID
) &&
1118 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
1121 /* No address is supplied; if we know something,
1122 use it, otherwise discard the request.
1125 if (!(old
& NUD_VALID
))
1130 if (new & NUD_CONNECTED
)
1131 neigh
->confirmed
= jiffies
;
1132 neigh
->updated
= jiffies
;
1134 /* If entry was valid and address is not changed,
1135 do not change entry state, if new one is STALE.
1138 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1139 if (old
& NUD_VALID
) {
1140 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
1141 update_isrouter
= 0;
1142 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1143 (old
& NUD_CONNECTED
)) {
1149 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1150 ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) ||
1151 (old
& NUD_CONNECTED
))
1158 neigh_del_timer(neigh
);
1159 if (new & NUD_PROBE
)
1160 atomic_set(&neigh
->probes
, 0);
1161 if (new & NUD_IN_TIMER
)
1162 neigh_add_timer(neigh
, (jiffies
+
1163 ((new & NUD_REACHABLE
) ?
1164 neigh
->parms
->reachable_time
:
1166 neigh
->nud_state
= new;
1170 if (lladdr
!= neigh
->ha
) {
1171 write_seqlock(&neigh
->ha_lock
);
1172 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1173 write_sequnlock(&neigh
->ha_lock
);
1174 neigh_update_hhs(neigh
);
1175 if (!(new & NUD_CONNECTED
))
1176 neigh
->confirmed
= jiffies
-
1177 (NEIGH_VAR(neigh
->parms
, BASE_REACHABLE_TIME
) << 1);
1182 if (new & NUD_CONNECTED
)
1183 neigh_connect(neigh
);
1185 neigh_suspect(neigh
);
1186 if (!(old
& NUD_VALID
)) {
1187 struct sk_buff
*skb
;
1189 /* Again: avoid dead loop if something went wrong */
1191 while (neigh
->nud_state
& NUD_VALID
&&
1192 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1193 struct dst_entry
*dst
= skb_dst(skb
);
1194 struct neighbour
*n2
, *n1
= neigh
;
1195 write_unlock_bh(&neigh
->lock
);
1199 /* Why not just use 'neigh' as-is? The problem is that
1200 * things such as shaper, eql, and sch_teql can end up
1201 * using alternative, different, neigh objects to output
1202 * the packet in the output path. So what we need to do
1203 * here is re-lookup the top-level neigh in the path so
1204 * we can reinject the packet there.
1208 n2
= dst_neigh_lookup_skb(dst
, skb
);
1212 n1
->output(n1
, skb
);
1217 write_lock_bh(&neigh
->lock
);
1219 __skb_queue_purge(&neigh
->arp_queue
);
1220 neigh
->arp_queue_len_bytes
= 0;
1223 if (update_isrouter
) {
1224 neigh
->flags
= (flags
& NEIGH_UPDATE_F_ISROUTER
) ?
1225 (neigh
->flags
| NTF_ROUTER
) :
1226 (neigh
->flags
& ~NTF_ROUTER
);
1228 write_unlock_bh(&neigh
->lock
);
1231 neigh_update_notify(neigh
);
1235 EXPORT_SYMBOL(neigh_update
);
1237 /* Update the neigh to listen temporarily for probe responses, even if it is
1238 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1240 void __neigh_set_probe_once(struct neighbour
*neigh
)
1244 neigh
->updated
= jiffies
;
1245 if (!(neigh
->nud_state
& NUD_FAILED
))
1247 neigh
->nud_state
= NUD_INCOMPLETE
;
1248 atomic_set(&neigh
->probes
, neigh_max_probes(neigh
));
1249 neigh_add_timer(neigh
,
1250 jiffies
+ NEIGH_VAR(neigh
->parms
, RETRANS_TIME
));
1252 EXPORT_SYMBOL(__neigh_set_probe_once
);
1254 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1255 u8
*lladdr
, void *saddr
,
1256 struct net_device
*dev
)
1258 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1259 lladdr
|| !dev
->addr_len
);
1261 neigh_update(neigh
, lladdr
, NUD_STALE
,
1262 NEIGH_UPDATE_F_OVERRIDE
);
1265 EXPORT_SYMBOL(neigh_event_ns
);
1267 /* called with read_lock_bh(&n->lock); */
1268 static void neigh_hh_init(struct neighbour
*n
)
1270 struct net_device
*dev
= n
->dev
;
1271 __be16 prot
= n
->tbl
->protocol
;
1272 struct hh_cache
*hh
= &n
->hh
;
1274 write_lock_bh(&n
->lock
);
1276 /* Only one thread can come in here and initialize the
1280 dev
->header_ops
->cache(n
, hh
, prot
);
1282 write_unlock_bh(&n
->lock
);
1285 /* Slow and careful. */
1287 int neigh_resolve_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1291 if (!neigh_event_send(neigh
, skb
)) {
1293 struct net_device
*dev
= neigh
->dev
;
1296 if (dev
->header_ops
->cache
&& !neigh
->hh
.hh_len
)
1297 neigh_hh_init(neigh
);
1300 __skb_pull(skb
, skb_network_offset(skb
));
1301 seq
= read_seqbegin(&neigh
->ha_lock
);
1302 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1303 neigh
->ha
, NULL
, skb
->len
);
1304 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1307 rc
= dev_queue_xmit(skb
);
1318 EXPORT_SYMBOL(neigh_resolve_output
);
1320 /* As fast as possible without hh cache */
1322 int neigh_connected_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1324 struct net_device
*dev
= neigh
->dev
;
1329 __skb_pull(skb
, skb_network_offset(skb
));
1330 seq
= read_seqbegin(&neigh
->ha_lock
);
1331 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1332 neigh
->ha
, NULL
, skb
->len
);
1333 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1336 err
= dev_queue_xmit(skb
);
1343 EXPORT_SYMBOL(neigh_connected_output
);
1345 int neigh_direct_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1347 return dev_queue_xmit(skb
);
1349 EXPORT_SYMBOL(neigh_direct_output
);
1351 static void neigh_proxy_process(unsigned long arg
)
1353 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
1354 long sched_next
= 0;
1355 unsigned long now
= jiffies
;
1356 struct sk_buff
*skb
, *n
;
1358 spin_lock(&tbl
->proxy_queue
.lock
);
1360 skb_queue_walk_safe(&tbl
->proxy_queue
, skb
, n
) {
1361 long tdif
= NEIGH_CB(skb
)->sched_next
- now
;
1364 struct net_device
*dev
= skb
->dev
;
1366 __skb_unlink(skb
, &tbl
->proxy_queue
);
1367 if (tbl
->proxy_redo
&& netif_running(dev
)) {
1369 tbl
->proxy_redo(skb
);
1376 } else if (!sched_next
|| tdif
< sched_next
)
1379 del_timer(&tbl
->proxy_timer
);
1381 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1382 spin_unlock(&tbl
->proxy_queue
.lock
);
1385 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1386 struct sk_buff
*skb
)
1388 unsigned long now
= jiffies
;
1390 unsigned long sched_next
= now
+ (prandom_u32() %
1391 NEIGH_VAR(p
, PROXY_DELAY
));
1393 if (tbl
->proxy_queue
.qlen
> NEIGH_VAR(p
, PROXY_QLEN
)) {
1398 NEIGH_CB(skb
)->sched_next
= sched_next
;
1399 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1401 spin_lock(&tbl
->proxy_queue
.lock
);
1402 if (del_timer(&tbl
->proxy_timer
)) {
1403 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1404 sched_next
= tbl
->proxy_timer
.expires
;
1408 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1409 mod_timer(&tbl
->proxy_timer
, sched_next
);
1410 spin_unlock(&tbl
->proxy_queue
.lock
);
1412 EXPORT_SYMBOL(pneigh_enqueue
);
1414 static inline struct neigh_parms
*lookup_neigh_parms(struct neigh_table
*tbl
,
1415 struct net
*net
, int ifindex
)
1417 struct neigh_parms
*p
;
1419 list_for_each_entry(p
, &tbl
->parms_list
, list
) {
1420 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
&& net_eq(neigh_parms_net(p
), net
)) ||
1421 (!p
->dev
&& !ifindex
&& net_eq(net
, &init_net
)))
1428 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1429 struct neigh_table
*tbl
)
1431 struct neigh_parms
*p
;
1432 struct net
*net
= dev_net(dev
);
1433 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1435 p
= kmemdup(&tbl
->parms
, sizeof(*p
), GFP_KERNEL
);
1438 atomic_set(&p
->refcnt
, 1);
1440 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
1443 write_pnet(&p
->net
, net
);
1444 p
->sysctl_table
= NULL
;
1446 if (ops
->ndo_neigh_setup
&& ops
->ndo_neigh_setup(dev
, p
)) {
1452 write_lock_bh(&tbl
->lock
);
1453 list_add(&p
->list
, &tbl
->parms
.list
);
1454 write_unlock_bh(&tbl
->lock
);
1456 neigh_parms_data_state_cleanall(p
);
1460 EXPORT_SYMBOL(neigh_parms_alloc
);
1462 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1464 struct neigh_parms
*parms
=
1465 container_of(head
, struct neigh_parms
, rcu_head
);
1467 neigh_parms_put(parms
);
1470 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1472 if (!parms
|| parms
== &tbl
->parms
)
1474 write_lock_bh(&tbl
->lock
);
1475 list_del(&parms
->list
);
1477 write_unlock_bh(&tbl
->lock
);
1479 dev_put(parms
->dev
);
1480 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1482 EXPORT_SYMBOL(neigh_parms_release
);
1484 static void neigh_parms_destroy(struct neigh_parms
*parms
)
1489 static struct lock_class_key neigh_table_proxy_queue_class
;
1491 static struct neigh_table
*neigh_tables
[NEIGH_NR_TABLES
] __read_mostly
;
1493 void neigh_table_init(int index
, struct neigh_table
*tbl
)
1495 unsigned long now
= jiffies
;
1496 unsigned long phsize
;
1498 INIT_LIST_HEAD(&tbl
->parms_list
);
1499 list_add(&tbl
->parms
.list
, &tbl
->parms_list
);
1500 write_pnet(&tbl
->parms
.net
, &init_net
);
1501 atomic_set(&tbl
->parms
.refcnt
, 1);
1502 tbl
->parms
.reachable_time
=
1503 neigh_rand_reach_time(NEIGH_VAR(&tbl
->parms
, BASE_REACHABLE_TIME
));
1505 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1507 panic("cannot create neighbour cache statistics");
1509 #ifdef CONFIG_PROC_FS
1510 if (!proc_create_data(tbl
->id
, 0, init_net
.proc_net_stat
,
1511 &neigh_stat_seq_fops
, tbl
))
1512 panic("cannot create neighbour proc dir entry");
1515 RCU_INIT_POINTER(tbl
->nht
, neigh_hash_alloc(3));
1517 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1518 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1520 if (!tbl
->nht
|| !tbl
->phash_buckets
)
1521 panic("cannot allocate neighbour cache hashes");
1523 if (!tbl
->entry_size
)
1524 tbl
->entry_size
= ALIGN(offsetof(struct neighbour
, primary_key
) +
1525 tbl
->key_len
, NEIGH_PRIV_ALIGN
);
1527 WARN_ON(tbl
->entry_size
% NEIGH_PRIV_ALIGN
);
1529 rwlock_init(&tbl
->lock
);
1530 INIT_DEFERRABLE_WORK(&tbl
->gc_work
, neigh_periodic_work
);
1531 queue_delayed_work(system_power_efficient_wq
, &tbl
->gc_work
,
1532 tbl
->parms
.reachable_time
);
1533 setup_timer(&tbl
->proxy_timer
, neigh_proxy_process
, (unsigned long)tbl
);
1534 skb_queue_head_init_class(&tbl
->proxy_queue
,
1535 &neigh_table_proxy_queue_class
);
1537 tbl
->last_flush
= now
;
1538 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1540 neigh_tables
[index
] = tbl
;
1542 EXPORT_SYMBOL(neigh_table_init
);
1544 int neigh_table_clear(int index
, struct neigh_table
*tbl
)
1546 neigh_tables
[index
] = NULL
;
1547 /* It is not clean... Fix it to unload IPv6 module safely */
1548 cancel_delayed_work_sync(&tbl
->gc_work
);
1549 del_timer_sync(&tbl
->proxy_timer
);
1550 pneigh_queue_purge(&tbl
->proxy_queue
);
1551 neigh_ifdown(tbl
, NULL
);
1552 if (atomic_read(&tbl
->entries
))
1553 pr_crit("neighbour leakage\n");
1555 call_rcu(&rcu_dereference_protected(tbl
->nht
, 1)->rcu
,
1556 neigh_hash_free_rcu
);
1559 kfree(tbl
->phash_buckets
);
1560 tbl
->phash_buckets
= NULL
;
1562 remove_proc_entry(tbl
->id
, init_net
.proc_net_stat
);
1564 free_percpu(tbl
->stats
);
1569 EXPORT_SYMBOL(neigh_table_clear
);
1571 static struct neigh_table
*neigh_find_table(int family
)
1573 struct neigh_table
*tbl
= NULL
;
1577 tbl
= neigh_tables
[NEIGH_ARP_TABLE
];
1580 tbl
= neigh_tables
[NEIGH_ND_TABLE
];
1583 tbl
= neigh_tables
[NEIGH_DN_TABLE
];
1590 static int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1592 struct net
*net
= sock_net(skb
->sk
);
1594 struct nlattr
*dst_attr
;
1595 struct neigh_table
*tbl
;
1596 struct neighbour
*neigh
;
1597 struct net_device
*dev
= NULL
;
1601 if (nlmsg_len(nlh
) < sizeof(*ndm
))
1604 dst_attr
= nlmsg_find_attr(nlh
, sizeof(*ndm
), NDA_DST
);
1605 if (dst_attr
== NULL
)
1608 ndm
= nlmsg_data(nlh
);
1609 if (ndm
->ndm_ifindex
) {
1610 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1617 tbl
= neigh_find_table(ndm
->ndm_family
);
1619 return -EAFNOSUPPORT
;
1621 if (nla_len(dst_attr
) < tbl
->key_len
)
1624 if (ndm
->ndm_flags
& NTF_PROXY
) {
1625 err
= pneigh_delete(tbl
, net
, nla_data(dst_attr
), dev
);
1632 neigh
= neigh_lookup(tbl
, nla_data(dst_attr
), dev
);
1633 if (neigh
== NULL
) {
1638 err
= neigh_update(neigh
, NULL
, NUD_FAILED
,
1639 NEIGH_UPDATE_F_OVERRIDE
|
1640 NEIGH_UPDATE_F_ADMIN
);
1641 neigh_release(neigh
);
1647 static int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1649 int flags
= NEIGH_UPDATE_F_ADMIN
| NEIGH_UPDATE_F_OVERRIDE
;
1650 struct net
*net
= sock_net(skb
->sk
);
1652 struct nlattr
*tb
[NDA_MAX
+1];
1653 struct neigh_table
*tbl
;
1654 struct net_device
*dev
= NULL
;
1655 struct neighbour
*neigh
;
1660 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
);
1665 if (tb
[NDA_DST
] == NULL
)
1668 ndm
= nlmsg_data(nlh
);
1669 if (ndm
->ndm_ifindex
) {
1670 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1676 if (tb
[NDA_LLADDR
] && nla_len(tb
[NDA_LLADDR
]) < dev
->addr_len
)
1680 tbl
= neigh_find_table(ndm
->ndm_family
);
1682 return -EAFNOSUPPORT
;
1684 if (nla_len(tb
[NDA_DST
]) < tbl
->key_len
)
1686 dst
= nla_data(tb
[NDA_DST
]);
1687 lladdr
= tb
[NDA_LLADDR
] ? nla_data(tb
[NDA_LLADDR
]) : NULL
;
1689 if (ndm
->ndm_flags
& NTF_PROXY
) {
1690 struct pneigh_entry
*pn
;
1693 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 1);
1695 pn
->flags
= ndm
->ndm_flags
;
1704 neigh
= neigh_lookup(tbl
, dst
, dev
);
1705 if (neigh
== NULL
) {
1706 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1711 neigh
= __neigh_lookup_errno(tbl
, dst
, dev
);
1712 if (IS_ERR(neigh
)) {
1713 err
= PTR_ERR(neigh
);
1717 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1719 neigh_release(neigh
);
1723 if (!(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
1724 flags
&= ~NEIGH_UPDATE_F_OVERRIDE
;
1727 if (ndm
->ndm_flags
& NTF_USE
) {
1728 neigh_event_send(neigh
, NULL
);
1731 err
= neigh_update(neigh
, lladdr
, ndm
->ndm_state
, flags
);
1732 neigh_release(neigh
);
1738 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1740 struct nlattr
*nest
;
1742 nest
= nla_nest_start(skb
, NDTA_PARMS
);
1747 nla_put_u32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
)) ||
1748 nla_put_u32(skb
, NDTPA_REFCNT
, atomic_read(&parms
->refcnt
)) ||
1749 nla_put_u32(skb
, NDTPA_QUEUE_LENBYTES
,
1750 NEIGH_VAR(parms
, QUEUE_LEN_BYTES
)) ||
1751 /* approximative value for deprecated QUEUE_LEN (in packets) */
1752 nla_put_u32(skb
, NDTPA_QUEUE_LEN
,
1753 NEIGH_VAR(parms
, QUEUE_LEN_BYTES
) / SKB_TRUESIZE(ETH_FRAME_LEN
)) ||
1754 nla_put_u32(skb
, NDTPA_PROXY_QLEN
, NEIGH_VAR(parms
, PROXY_QLEN
)) ||
1755 nla_put_u32(skb
, NDTPA_APP_PROBES
, NEIGH_VAR(parms
, APP_PROBES
)) ||
1756 nla_put_u32(skb
, NDTPA_UCAST_PROBES
,
1757 NEIGH_VAR(parms
, UCAST_PROBES
)) ||
1758 nla_put_u32(skb
, NDTPA_MCAST_PROBES
,
1759 NEIGH_VAR(parms
, MCAST_PROBES
)) ||
1760 nla_put_u32(skb
, NDTPA_MCAST_REPROBES
,
1761 NEIGH_VAR(parms
, MCAST_REPROBES
)) ||
1762 nla_put_msecs(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
) ||
1763 nla_put_msecs(skb
, NDTPA_BASE_REACHABLE_TIME
,
1764 NEIGH_VAR(parms
, BASE_REACHABLE_TIME
)) ||
1765 nla_put_msecs(skb
, NDTPA_GC_STALETIME
,
1766 NEIGH_VAR(parms
, GC_STALETIME
)) ||
1767 nla_put_msecs(skb
, NDTPA_DELAY_PROBE_TIME
,
1768 NEIGH_VAR(parms
, DELAY_PROBE_TIME
)) ||
1769 nla_put_msecs(skb
, NDTPA_RETRANS_TIME
,
1770 NEIGH_VAR(parms
, RETRANS_TIME
)) ||
1771 nla_put_msecs(skb
, NDTPA_ANYCAST_DELAY
,
1772 NEIGH_VAR(parms
, ANYCAST_DELAY
)) ||
1773 nla_put_msecs(skb
, NDTPA_PROXY_DELAY
,
1774 NEIGH_VAR(parms
, PROXY_DELAY
)) ||
1775 nla_put_msecs(skb
, NDTPA_LOCKTIME
,
1776 NEIGH_VAR(parms
, LOCKTIME
)))
1777 goto nla_put_failure
;
1778 return nla_nest_end(skb
, nest
);
1781 nla_nest_cancel(skb
, nest
);
1785 static int neightbl_fill_info(struct sk_buff
*skb
, struct neigh_table
*tbl
,
1786 u32 pid
, u32 seq
, int type
, int flags
)
1788 struct nlmsghdr
*nlh
;
1789 struct ndtmsg
*ndtmsg
;
1791 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1795 ndtmsg
= nlmsg_data(nlh
);
1797 read_lock_bh(&tbl
->lock
);
1798 ndtmsg
->ndtm_family
= tbl
->family
;
1799 ndtmsg
->ndtm_pad1
= 0;
1800 ndtmsg
->ndtm_pad2
= 0;
1802 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) ||
1803 nla_put_msecs(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
) ||
1804 nla_put_u32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
) ||
1805 nla_put_u32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
) ||
1806 nla_put_u32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
))
1807 goto nla_put_failure
;
1809 unsigned long now
= jiffies
;
1810 unsigned int flush_delta
= now
- tbl
->last_flush
;
1811 unsigned int rand_delta
= now
- tbl
->last_rand
;
1812 struct neigh_hash_table
*nht
;
1813 struct ndt_config ndc
= {
1814 .ndtc_key_len
= tbl
->key_len
,
1815 .ndtc_entry_size
= tbl
->entry_size
,
1816 .ndtc_entries
= atomic_read(&tbl
->entries
),
1817 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
1818 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
1819 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
1823 nht
= rcu_dereference_bh(tbl
->nht
);
1824 ndc
.ndtc_hash_rnd
= nht
->hash_rnd
[0];
1825 ndc
.ndtc_hash_mask
= ((1 << nht
->hash_shift
) - 1);
1826 rcu_read_unlock_bh();
1828 if (nla_put(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
))
1829 goto nla_put_failure
;
1834 struct ndt_stats ndst
;
1836 memset(&ndst
, 0, sizeof(ndst
));
1838 for_each_possible_cpu(cpu
) {
1839 struct neigh_statistics
*st
;
1841 st
= per_cpu_ptr(tbl
->stats
, cpu
);
1842 ndst
.ndts_allocs
+= st
->allocs
;
1843 ndst
.ndts_destroys
+= st
->destroys
;
1844 ndst
.ndts_hash_grows
+= st
->hash_grows
;
1845 ndst
.ndts_res_failed
+= st
->res_failed
;
1846 ndst
.ndts_lookups
+= st
->lookups
;
1847 ndst
.ndts_hits
+= st
->hits
;
1848 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
1849 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
1850 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
1851 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
1854 if (nla_put(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
))
1855 goto nla_put_failure
;
1858 BUG_ON(tbl
->parms
.dev
);
1859 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
1860 goto nla_put_failure
;
1862 read_unlock_bh(&tbl
->lock
);
1863 nlmsg_end(skb
, nlh
);
1867 read_unlock_bh(&tbl
->lock
);
1868 nlmsg_cancel(skb
, nlh
);
1872 static int neightbl_fill_param_info(struct sk_buff
*skb
,
1873 struct neigh_table
*tbl
,
1874 struct neigh_parms
*parms
,
1875 u32 pid
, u32 seq
, int type
,
1878 struct ndtmsg
*ndtmsg
;
1879 struct nlmsghdr
*nlh
;
1881 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1885 ndtmsg
= nlmsg_data(nlh
);
1887 read_lock_bh(&tbl
->lock
);
1888 ndtmsg
->ndtm_family
= tbl
->family
;
1889 ndtmsg
->ndtm_pad1
= 0;
1890 ndtmsg
->ndtm_pad2
= 0;
1892 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) < 0 ||
1893 neightbl_fill_parms(skb
, parms
) < 0)
1896 read_unlock_bh(&tbl
->lock
);
1897 nlmsg_end(skb
, nlh
);
1900 read_unlock_bh(&tbl
->lock
);
1901 nlmsg_cancel(skb
, nlh
);
1905 static const struct nla_policy nl_neightbl_policy
[NDTA_MAX
+1] = {
1906 [NDTA_NAME
] = { .type
= NLA_STRING
},
1907 [NDTA_THRESH1
] = { .type
= NLA_U32
},
1908 [NDTA_THRESH2
] = { .type
= NLA_U32
},
1909 [NDTA_THRESH3
] = { .type
= NLA_U32
},
1910 [NDTA_GC_INTERVAL
] = { .type
= NLA_U64
},
1911 [NDTA_PARMS
] = { .type
= NLA_NESTED
},
1914 static const struct nla_policy nl_ntbl_parm_policy
[NDTPA_MAX
+1] = {
1915 [NDTPA_IFINDEX
] = { .type
= NLA_U32
},
1916 [NDTPA_QUEUE_LEN
] = { .type
= NLA_U32
},
1917 [NDTPA_PROXY_QLEN
] = { .type
= NLA_U32
},
1918 [NDTPA_APP_PROBES
] = { .type
= NLA_U32
},
1919 [NDTPA_UCAST_PROBES
] = { .type
= NLA_U32
},
1920 [NDTPA_MCAST_PROBES
] = { .type
= NLA_U32
},
1921 [NDTPA_MCAST_REPROBES
] = { .type
= NLA_U32
},
1922 [NDTPA_BASE_REACHABLE_TIME
] = { .type
= NLA_U64
},
1923 [NDTPA_GC_STALETIME
] = { .type
= NLA_U64
},
1924 [NDTPA_DELAY_PROBE_TIME
] = { .type
= NLA_U64
},
1925 [NDTPA_RETRANS_TIME
] = { .type
= NLA_U64
},
1926 [NDTPA_ANYCAST_DELAY
] = { .type
= NLA_U64
},
1927 [NDTPA_PROXY_DELAY
] = { .type
= NLA_U64
},
1928 [NDTPA_LOCKTIME
] = { .type
= NLA_U64
},
1931 static int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1933 struct net
*net
= sock_net(skb
->sk
);
1934 struct neigh_table
*tbl
;
1935 struct ndtmsg
*ndtmsg
;
1936 struct nlattr
*tb
[NDTA_MAX
+1];
1940 err
= nlmsg_parse(nlh
, sizeof(*ndtmsg
), tb
, NDTA_MAX
,
1941 nl_neightbl_policy
);
1945 if (tb
[NDTA_NAME
] == NULL
) {
1950 ndtmsg
= nlmsg_data(nlh
);
1952 for (tidx
= 0; tidx
< NEIGH_NR_TABLES
; tidx
++) {
1953 tbl
= neigh_tables
[tidx
];
1956 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
1958 if (nla_strcmp(tb
[NDTA_NAME
], tbl
->id
) == 0) {
1968 * We acquire tbl->lock to be nice to the periodic timers and
1969 * make sure they always see a consistent set of values.
1971 write_lock_bh(&tbl
->lock
);
1973 if (tb
[NDTA_PARMS
]) {
1974 struct nlattr
*tbp
[NDTPA_MAX
+1];
1975 struct neigh_parms
*p
;
1978 err
= nla_parse_nested(tbp
, NDTPA_MAX
, tb
[NDTA_PARMS
],
1979 nl_ntbl_parm_policy
);
1981 goto errout_tbl_lock
;
1983 if (tbp
[NDTPA_IFINDEX
])
1984 ifindex
= nla_get_u32(tbp
[NDTPA_IFINDEX
]);
1986 p
= lookup_neigh_parms(tbl
, net
, ifindex
);
1989 goto errout_tbl_lock
;
1992 for (i
= 1; i
<= NDTPA_MAX
; i
++) {
1997 case NDTPA_QUEUE_LEN
:
1998 NEIGH_VAR_SET(p
, QUEUE_LEN_BYTES
,
1999 nla_get_u32(tbp
[i
]) *
2000 SKB_TRUESIZE(ETH_FRAME_LEN
));
2002 case NDTPA_QUEUE_LENBYTES
:
2003 NEIGH_VAR_SET(p
, QUEUE_LEN_BYTES
,
2004 nla_get_u32(tbp
[i
]));
2006 case NDTPA_PROXY_QLEN
:
2007 NEIGH_VAR_SET(p
, PROXY_QLEN
,
2008 nla_get_u32(tbp
[i
]));
2010 case NDTPA_APP_PROBES
:
2011 NEIGH_VAR_SET(p
, APP_PROBES
,
2012 nla_get_u32(tbp
[i
]));
2014 case NDTPA_UCAST_PROBES
:
2015 NEIGH_VAR_SET(p
, UCAST_PROBES
,
2016 nla_get_u32(tbp
[i
]));
2018 case NDTPA_MCAST_PROBES
:
2019 NEIGH_VAR_SET(p
, MCAST_PROBES
,
2020 nla_get_u32(tbp
[i
]));
2022 case NDTPA_MCAST_REPROBES
:
2023 NEIGH_VAR_SET(p
, MCAST_REPROBES
,
2024 nla_get_u32(tbp
[i
]));
2026 case NDTPA_BASE_REACHABLE_TIME
:
2027 NEIGH_VAR_SET(p
, BASE_REACHABLE_TIME
,
2028 nla_get_msecs(tbp
[i
]));
2029 /* update reachable_time as well, otherwise, the change will
2030 * only be effective after the next time neigh_periodic_work
2031 * decides to recompute it (can be multiple minutes)
2034 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
2036 case NDTPA_GC_STALETIME
:
2037 NEIGH_VAR_SET(p
, GC_STALETIME
,
2038 nla_get_msecs(tbp
[i
]));
2040 case NDTPA_DELAY_PROBE_TIME
:
2041 NEIGH_VAR_SET(p
, DELAY_PROBE_TIME
,
2042 nla_get_msecs(tbp
[i
]));
2044 case NDTPA_RETRANS_TIME
:
2045 NEIGH_VAR_SET(p
, RETRANS_TIME
,
2046 nla_get_msecs(tbp
[i
]));
2048 case NDTPA_ANYCAST_DELAY
:
2049 NEIGH_VAR_SET(p
, ANYCAST_DELAY
,
2050 nla_get_msecs(tbp
[i
]));
2052 case NDTPA_PROXY_DELAY
:
2053 NEIGH_VAR_SET(p
, PROXY_DELAY
,
2054 nla_get_msecs(tbp
[i
]));
2056 case NDTPA_LOCKTIME
:
2057 NEIGH_VAR_SET(p
, LOCKTIME
,
2058 nla_get_msecs(tbp
[i
]));
2065 if ((tb
[NDTA_THRESH1
] || tb
[NDTA_THRESH2
] ||
2066 tb
[NDTA_THRESH3
] || tb
[NDTA_GC_INTERVAL
]) &&
2067 !net_eq(net
, &init_net
))
2068 goto errout_tbl_lock
;
2070 if (tb
[NDTA_THRESH1
])
2071 tbl
->gc_thresh1
= nla_get_u32(tb
[NDTA_THRESH1
]);
2073 if (tb
[NDTA_THRESH2
])
2074 tbl
->gc_thresh2
= nla_get_u32(tb
[NDTA_THRESH2
]);
2076 if (tb
[NDTA_THRESH3
])
2077 tbl
->gc_thresh3
= nla_get_u32(tb
[NDTA_THRESH3
]);
2079 if (tb
[NDTA_GC_INTERVAL
])
2080 tbl
->gc_interval
= nla_get_msecs(tb
[NDTA_GC_INTERVAL
]);
2085 write_unlock_bh(&tbl
->lock
);
2090 static int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2092 struct net
*net
= sock_net(skb
->sk
);
2093 int family
, tidx
, nidx
= 0;
2094 int tbl_skip
= cb
->args
[0];
2095 int neigh_skip
= cb
->args
[1];
2096 struct neigh_table
*tbl
;
2098 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
2100 for (tidx
= 0; tidx
< NEIGH_NR_TABLES
; tidx
++) {
2101 struct neigh_parms
*p
;
2103 tbl
= neigh_tables
[tidx
];
2107 if (tidx
< tbl_skip
|| (family
&& tbl
->family
!= family
))
2110 if (neightbl_fill_info(skb
, tbl
, NETLINK_CB(cb
->skb
).portid
,
2111 cb
->nlh
->nlmsg_seq
, RTM_NEWNEIGHTBL
,
2116 p
= list_next_entry(&tbl
->parms
, list
);
2117 list_for_each_entry_from(p
, &tbl
->parms_list
, list
) {
2118 if (!net_eq(neigh_parms_net(p
), net
))
2121 if (nidx
< neigh_skip
)
2124 if (neightbl_fill_param_info(skb
, tbl
, p
,
2125 NETLINK_CB(cb
->skb
).portid
,
2143 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*neigh
,
2144 u32 pid
, u32 seq
, int type
, unsigned int flags
)
2146 unsigned long now
= jiffies
;
2147 struct nda_cacheinfo ci
;
2148 struct nlmsghdr
*nlh
;
2151 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2155 ndm
= nlmsg_data(nlh
);
2156 ndm
->ndm_family
= neigh
->ops
->family
;
2159 ndm
->ndm_flags
= neigh
->flags
;
2160 ndm
->ndm_type
= neigh
->type
;
2161 ndm
->ndm_ifindex
= neigh
->dev
->ifindex
;
2163 if (nla_put(skb
, NDA_DST
, neigh
->tbl
->key_len
, neigh
->primary_key
))
2164 goto nla_put_failure
;
2166 read_lock_bh(&neigh
->lock
);
2167 ndm
->ndm_state
= neigh
->nud_state
;
2168 if (neigh
->nud_state
& NUD_VALID
) {
2169 char haddr
[MAX_ADDR_LEN
];
2171 neigh_ha_snapshot(haddr
, neigh
, neigh
->dev
);
2172 if (nla_put(skb
, NDA_LLADDR
, neigh
->dev
->addr_len
, haddr
) < 0) {
2173 read_unlock_bh(&neigh
->lock
);
2174 goto nla_put_failure
;
2178 ci
.ndm_used
= jiffies_to_clock_t(now
- neigh
->used
);
2179 ci
.ndm_confirmed
= jiffies_to_clock_t(now
- neigh
->confirmed
);
2180 ci
.ndm_updated
= jiffies_to_clock_t(now
- neigh
->updated
);
2181 ci
.ndm_refcnt
= atomic_read(&neigh
->refcnt
) - 1;
2182 read_unlock_bh(&neigh
->lock
);
2184 if (nla_put_u32(skb
, NDA_PROBES
, atomic_read(&neigh
->probes
)) ||
2185 nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
2186 goto nla_put_failure
;
2188 nlmsg_end(skb
, nlh
);
2192 nlmsg_cancel(skb
, nlh
);
2196 static int pneigh_fill_info(struct sk_buff
*skb
, struct pneigh_entry
*pn
,
2197 u32 pid
, u32 seq
, int type
, unsigned int flags
,
2198 struct neigh_table
*tbl
)
2200 struct nlmsghdr
*nlh
;
2203 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2207 ndm
= nlmsg_data(nlh
);
2208 ndm
->ndm_family
= tbl
->family
;
2211 ndm
->ndm_flags
= pn
->flags
| NTF_PROXY
;
2212 ndm
->ndm_type
= RTN_UNICAST
;
2213 ndm
->ndm_ifindex
= pn
->dev
->ifindex
;
2214 ndm
->ndm_state
= NUD_NONE
;
2216 if (nla_put(skb
, NDA_DST
, tbl
->key_len
, pn
->key
))
2217 goto nla_put_failure
;
2219 nlmsg_end(skb
, nlh
);
2223 nlmsg_cancel(skb
, nlh
);
2227 static void neigh_update_notify(struct neighbour
*neigh
)
2229 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
2230 __neigh_notify(neigh
, RTM_NEWNEIGH
, 0);
2233 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2234 struct netlink_callback
*cb
)
2236 struct net
*net
= sock_net(skb
->sk
);
2237 struct neighbour
*n
;
2238 int rc
, h
, s_h
= cb
->args
[1];
2239 int idx
, s_idx
= idx
= cb
->args
[2];
2240 struct neigh_hash_table
*nht
;
2243 nht
= rcu_dereference_bh(tbl
->nht
);
2245 for (h
= s_h
; h
< (1 << nht
->hash_shift
); h
++) {
2248 for (n
= rcu_dereference_bh(nht
->hash_buckets
[h
]), idx
= 0;
2250 n
= rcu_dereference_bh(n
->next
)) {
2251 if (!net_eq(dev_net(n
->dev
), net
))
2255 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).portid
,
2268 rcu_read_unlock_bh();
2274 static int pneigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2275 struct netlink_callback
*cb
)
2277 struct pneigh_entry
*n
;
2278 struct net
*net
= sock_net(skb
->sk
);
2279 int rc
, h
, s_h
= cb
->args
[3];
2280 int idx
, s_idx
= idx
= cb
->args
[4];
2282 read_lock_bh(&tbl
->lock
);
2284 for (h
= s_h
; h
<= PNEIGH_HASHMASK
; h
++) {
2287 for (n
= tbl
->phash_buckets
[h
], idx
= 0; n
; n
= n
->next
) {
2288 if (dev_net(n
->dev
) != net
)
2292 if (pneigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).portid
,
2295 NLM_F_MULTI
, tbl
) < 0) {
2296 read_unlock_bh(&tbl
->lock
);
2305 read_unlock_bh(&tbl
->lock
);
2314 static int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2316 struct neigh_table
*tbl
;
2321 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
2323 /* check for full ndmsg structure presence, family member is
2324 * the same for both structures
2326 if (nlmsg_len(cb
->nlh
) >= sizeof(struct ndmsg
) &&
2327 ((struct ndmsg
*) nlmsg_data(cb
->nlh
))->ndm_flags
== NTF_PROXY
)
2332 for (t
= 0; t
< NEIGH_NR_TABLES
; t
++) {
2333 tbl
= neigh_tables
[t
];
2337 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
2340 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
2341 sizeof(cb
->args
[0]));
2343 err
= pneigh_dump_table(tbl
, skb
, cb
);
2345 err
= neigh_dump_table(tbl
, skb
, cb
);
2354 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
2357 struct neigh_hash_table
*nht
;
2360 nht
= rcu_dereference_bh(tbl
->nht
);
2362 read_lock(&tbl
->lock
); /* avoid resizes */
2363 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2364 struct neighbour
*n
;
2366 for (n
= rcu_dereference_bh(nht
->hash_buckets
[chain
]);
2368 n
= rcu_dereference_bh(n
->next
))
2371 read_unlock(&tbl
->lock
);
2372 rcu_read_unlock_bh();
2374 EXPORT_SYMBOL(neigh_for_each
);
2376 /* The tbl->lock must be held as a writer and BH disabled. */
2377 void __neigh_for_each_release(struct neigh_table
*tbl
,
2378 int (*cb
)(struct neighbour
*))
2381 struct neigh_hash_table
*nht
;
2383 nht
= rcu_dereference_protected(tbl
->nht
,
2384 lockdep_is_held(&tbl
->lock
));
2385 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2386 struct neighbour
*n
;
2387 struct neighbour __rcu
**np
;
2389 np
= &nht
->hash_buckets
[chain
];
2390 while ((n
= rcu_dereference_protected(*np
,
2391 lockdep_is_held(&tbl
->lock
))) != NULL
) {
2394 write_lock(&n
->lock
);
2397 rcu_assign_pointer(*np
,
2398 rcu_dereference_protected(n
->next
,
2399 lockdep_is_held(&tbl
->lock
)));
2403 write_unlock(&n
->lock
);
2405 neigh_cleanup_and_release(n
);
2409 EXPORT_SYMBOL(__neigh_for_each_release
);
2411 int neigh_xmit(int index
, struct net_device
*dev
,
2412 const void *addr
, struct sk_buff
*skb
)
2414 int err
= -EAFNOSUPPORT
;
2415 if (likely(index
< NEIGH_NR_TABLES
)) {
2416 struct neigh_table
*tbl
;
2417 struct neighbour
*neigh
;
2419 tbl
= neigh_tables
[index
];
2422 neigh
= __neigh_lookup_noref(tbl
, addr
, dev
);
2424 neigh
= __neigh_create(tbl
, addr
, dev
, false);
2425 err
= PTR_ERR(neigh
);
2428 err
= neigh
->output(neigh
, skb
);
2430 else if (index
== NEIGH_LINK_TABLE
) {
2431 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
2432 addr
, NULL
, skb
->len
);
2435 err
= dev_queue_xmit(skb
);
2443 EXPORT_SYMBOL(neigh_xmit
);
2445 #ifdef CONFIG_PROC_FS
2447 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
2449 struct neigh_seq_state
*state
= seq
->private;
2450 struct net
*net
= seq_file_net(seq
);
2451 struct neigh_hash_table
*nht
= state
->nht
;
2452 struct neighbour
*n
= NULL
;
2453 int bucket
= state
->bucket
;
2455 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
2456 for (bucket
= 0; bucket
< (1 << nht
->hash_shift
); bucket
++) {
2457 n
= rcu_dereference_bh(nht
->hash_buckets
[bucket
]);
2460 if (!net_eq(dev_net(n
->dev
), net
))
2462 if (state
->neigh_sub_iter
) {
2466 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
2470 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2472 if (n
->nud_state
& ~NUD_NOARP
)
2475 n
= rcu_dereference_bh(n
->next
);
2481 state
->bucket
= bucket
;
2486 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
2487 struct neighbour
*n
,
2490 struct neigh_seq_state
*state
= seq
->private;
2491 struct net
*net
= seq_file_net(seq
);
2492 struct neigh_hash_table
*nht
= state
->nht
;
2494 if (state
->neigh_sub_iter
) {
2495 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2499 n
= rcu_dereference_bh(n
->next
);
2503 if (!net_eq(dev_net(n
->dev
), net
))
2505 if (state
->neigh_sub_iter
) {
2506 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2511 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2514 if (n
->nud_state
& ~NUD_NOARP
)
2517 n
= rcu_dereference_bh(n
->next
);
2523 if (++state
->bucket
>= (1 << nht
->hash_shift
))
2526 n
= rcu_dereference_bh(nht
->hash_buckets
[state
->bucket
]);
2534 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2536 struct neighbour
*n
= neigh_get_first(seq
);
2541 n
= neigh_get_next(seq
, n
, pos
);
2546 return *pos
? NULL
: n
;
2549 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
2551 struct neigh_seq_state
*state
= seq
->private;
2552 struct net
*net
= seq_file_net(seq
);
2553 struct neigh_table
*tbl
= state
->tbl
;
2554 struct pneigh_entry
*pn
= NULL
;
2555 int bucket
= state
->bucket
;
2557 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
2558 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
2559 pn
= tbl
->phash_buckets
[bucket
];
2560 while (pn
&& !net_eq(pneigh_net(pn
), net
))
2565 state
->bucket
= bucket
;
2570 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
2571 struct pneigh_entry
*pn
,
2574 struct neigh_seq_state
*state
= seq
->private;
2575 struct net
*net
= seq_file_net(seq
);
2576 struct neigh_table
*tbl
= state
->tbl
;
2580 } while (pn
&& !net_eq(pneigh_net(pn
), net
));
2583 if (++state
->bucket
> PNEIGH_HASHMASK
)
2585 pn
= tbl
->phash_buckets
[state
->bucket
];
2586 while (pn
&& !net_eq(pneigh_net(pn
), net
))
2598 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2600 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
2605 pn
= pneigh_get_next(seq
, pn
, pos
);
2610 return *pos
? NULL
: pn
;
2613 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
2615 struct neigh_seq_state
*state
= seq
->private;
2617 loff_t idxpos
= *pos
;
2619 rc
= neigh_get_idx(seq
, &idxpos
);
2620 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2621 rc
= pneigh_get_idx(seq
, &idxpos
);
2626 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
2629 struct neigh_seq_state
*state
= seq
->private;
2633 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
2636 state
->nht
= rcu_dereference_bh(tbl
->nht
);
2638 return *pos
? neigh_get_idx_any(seq
, pos
) : SEQ_START_TOKEN
;
2640 EXPORT_SYMBOL(neigh_seq_start
);
2642 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2644 struct neigh_seq_state
*state
;
2647 if (v
== SEQ_START_TOKEN
) {
2648 rc
= neigh_get_first(seq
);
2652 state
= seq
->private;
2653 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
2654 rc
= neigh_get_next(seq
, v
, NULL
);
2657 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2658 rc
= pneigh_get_first(seq
);
2660 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
2661 rc
= pneigh_get_next(seq
, v
, NULL
);
2667 EXPORT_SYMBOL(neigh_seq_next
);
2669 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
2672 rcu_read_unlock_bh();
2674 EXPORT_SYMBOL(neigh_seq_stop
);
2676 /* statistics via seq_file */
2678 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2680 struct neigh_table
*tbl
= seq
->private;
2684 return SEQ_START_TOKEN
;
2686 for (cpu
= *pos
-1; cpu
< nr_cpu_ids
; ++cpu
) {
2687 if (!cpu_possible(cpu
))
2690 return per_cpu_ptr(tbl
->stats
, cpu
);
2695 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2697 struct neigh_table
*tbl
= seq
->private;
2700 for (cpu
= *pos
; cpu
< nr_cpu_ids
; ++cpu
) {
2701 if (!cpu_possible(cpu
))
2704 return per_cpu_ptr(tbl
->stats
, cpu
);
2709 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
2714 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
2716 struct neigh_table
*tbl
= seq
->private;
2717 struct neigh_statistics
*st
= v
;
2719 if (v
== SEQ_START_TOKEN
) {
2720 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2724 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2725 "%08lx %08lx %08lx %08lx %08lx\n",
2726 atomic_read(&tbl
->entries
),
2737 st
->rcv_probes_mcast
,
2738 st
->rcv_probes_ucast
,
2740 st
->periodic_gc_runs
,
2748 static const struct seq_operations neigh_stat_seq_ops
= {
2749 .start
= neigh_stat_seq_start
,
2750 .next
= neigh_stat_seq_next
,
2751 .stop
= neigh_stat_seq_stop
,
2752 .show
= neigh_stat_seq_show
,
2755 static int neigh_stat_seq_open(struct inode
*inode
, struct file
*file
)
2757 int ret
= seq_open(file
, &neigh_stat_seq_ops
);
2760 struct seq_file
*sf
= file
->private_data
;
2761 sf
->private = PDE_DATA(inode
);
2766 static const struct file_operations neigh_stat_seq_fops
= {
2767 .owner
= THIS_MODULE
,
2768 .open
= neigh_stat_seq_open
,
2770 .llseek
= seq_lseek
,
2771 .release
= seq_release
,
2774 #endif /* CONFIG_PROC_FS */
2776 static inline size_t neigh_nlmsg_size(void)
2778 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2779 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2780 + nla_total_size(MAX_ADDR_LEN
) /* NDA_LLADDR */
2781 + nla_total_size(sizeof(struct nda_cacheinfo
))
2782 + nla_total_size(4); /* NDA_PROBES */
2785 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
)
2787 struct net
*net
= dev_net(n
->dev
);
2788 struct sk_buff
*skb
;
2791 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC
);
2795 err
= neigh_fill_info(skb
, n
, 0, 0, type
, flags
);
2797 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2798 WARN_ON(err
== -EMSGSIZE
);
2802 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
2806 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
2809 void neigh_app_ns(struct neighbour
*n
)
2811 __neigh_notify(n
, RTM_GETNEIGH
, NLM_F_REQUEST
);
2813 EXPORT_SYMBOL(neigh_app_ns
);
2815 #ifdef CONFIG_SYSCTL
2817 static int int_max
= INT_MAX
;
2818 static int unres_qlen_max
= INT_MAX
/ SKB_TRUESIZE(ETH_FRAME_LEN
);
2820 static int proc_unres_qlen(struct ctl_table
*ctl
, int write
,
2821 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
2824 struct ctl_table tmp
= *ctl
;
2827 tmp
.extra2
= &unres_qlen_max
;
2830 size
= *(int *)ctl
->data
/ SKB_TRUESIZE(ETH_FRAME_LEN
);
2831 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
2834 *(int *)ctl
->data
= size
* SKB_TRUESIZE(ETH_FRAME_LEN
);
2838 static struct neigh_parms
*neigh_get_dev_parms_rcu(struct net_device
*dev
,
2843 return __in_dev_arp_parms_get_rcu(dev
);
2845 return __in6_dev_nd_parms_get_rcu(dev
);
2850 static void neigh_copy_dflt_parms(struct net
*net
, struct neigh_parms
*p
,
2853 struct net_device
*dev
;
2854 int family
= neigh_parms_family(p
);
2857 for_each_netdev_rcu(net
, dev
) {
2858 struct neigh_parms
*dst_p
=
2859 neigh_get_dev_parms_rcu(dev
, family
);
2861 if (dst_p
&& !test_bit(index
, dst_p
->data_state
))
2862 dst_p
->data
[index
] = p
->data
[index
];
2867 static void neigh_proc_update(struct ctl_table
*ctl
, int write
)
2869 struct net_device
*dev
= ctl
->extra1
;
2870 struct neigh_parms
*p
= ctl
->extra2
;
2871 struct net
*net
= neigh_parms_net(p
);
2872 int index
= (int *) ctl
->data
- p
->data
;
2877 set_bit(index
, p
->data_state
);
2878 if (!dev
) /* NULL dev means this is default value */
2879 neigh_copy_dflt_parms(net
, p
, index
);
2882 static int neigh_proc_dointvec_zero_intmax(struct ctl_table
*ctl
, int write
,
2883 void __user
*buffer
,
2884 size_t *lenp
, loff_t
*ppos
)
2886 struct ctl_table tmp
= *ctl
;
2890 tmp
.extra2
= &int_max
;
2892 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
2893 neigh_proc_update(ctl
, write
);
2897 int neigh_proc_dointvec(struct ctl_table
*ctl
, int write
,
2898 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
2900 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2902 neigh_proc_update(ctl
, write
);
2905 EXPORT_SYMBOL(neigh_proc_dointvec
);
2907 int neigh_proc_dointvec_jiffies(struct ctl_table
*ctl
, int write
,
2908 void __user
*buffer
,
2909 size_t *lenp
, loff_t
*ppos
)
2911 int ret
= proc_dointvec_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
2913 neigh_proc_update(ctl
, write
);
2916 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies
);
2918 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table
*ctl
, int write
,
2919 void __user
*buffer
,
2920 size_t *lenp
, loff_t
*ppos
)
2922 int ret
= proc_dointvec_userhz_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
2924 neigh_proc_update(ctl
, write
);
2928 int neigh_proc_dointvec_ms_jiffies(struct ctl_table
*ctl
, int write
,
2929 void __user
*buffer
,
2930 size_t *lenp
, loff_t
*ppos
)
2932 int ret
= proc_dointvec_ms_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
2934 neigh_proc_update(ctl
, write
);
2937 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies
);
2939 static int neigh_proc_dointvec_unres_qlen(struct ctl_table
*ctl
, int write
,
2940 void __user
*buffer
,
2941 size_t *lenp
, loff_t
*ppos
)
2943 int ret
= proc_unres_qlen(ctl
, write
, buffer
, lenp
, ppos
);
2945 neigh_proc_update(ctl
, write
);
2949 static int neigh_proc_base_reachable_time(struct ctl_table
*ctl
, int write
,
2950 void __user
*buffer
,
2951 size_t *lenp
, loff_t
*ppos
)
2953 struct neigh_parms
*p
= ctl
->extra2
;
2956 if (strcmp(ctl
->procname
, "base_reachable_time") == 0)
2957 ret
= neigh_proc_dointvec_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
2958 else if (strcmp(ctl
->procname
, "base_reachable_time_ms") == 0)
2959 ret
= neigh_proc_dointvec_ms_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
2963 if (write
&& ret
== 0) {
2964 /* update reachable_time as well, otherwise, the change will
2965 * only be effective after the next time neigh_periodic_work
2966 * decides to recompute it
2969 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
2974 #define NEIGH_PARMS_DATA_OFFSET(index) \
2975 (&((struct neigh_parms *) 0)->data[index])
2977 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
2978 [NEIGH_VAR_ ## attr] = { \
2980 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
2981 .maxlen = sizeof(int), \
2983 .proc_handler = proc, \
2986 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
2987 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
2989 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
2990 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
2992 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
2993 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
2995 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
2996 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
2998 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
2999 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3001 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3002 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3004 static struct neigh_sysctl_table
{
3005 struct ctl_table_header
*sysctl_header
;
3006 struct ctl_table neigh_vars
[NEIGH_VAR_MAX
+ 1];
3007 } neigh_sysctl_template __read_mostly
= {
3009 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES
, "mcast_solicit"),
3010 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES
, "ucast_solicit"),
3011 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES
, "app_solicit"),
3012 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES
, "mcast_resolicit"),
3013 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME
, "retrans_time"),
3014 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME
, "base_reachable_time"),
3015 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME
, "delay_first_probe_time"),
3016 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME
, "gc_stale_time"),
3017 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES
, "unres_qlen_bytes"),
3018 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN
, "proxy_qlen"),
3019 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY
, "anycast_delay"),
3020 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY
, "proxy_delay"),
3021 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME
, "locktime"),
3022 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN
, QUEUE_LEN_BYTES
, "unres_qlen"),
3023 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS
, RETRANS_TIME
, "retrans_time_ms"),
3024 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS
, BASE_REACHABLE_TIME
, "base_reachable_time_ms"),
3025 [NEIGH_VAR_GC_INTERVAL
] = {
3026 .procname
= "gc_interval",
3027 .maxlen
= sizeof(int),
3029 .proc_handler
= proc_dointvec_jiffies
,
3031 [NEIGH_VAR_GC_THRESH1
] = {
3032 .procname
= "gc_thresh1",
3033 .maxlen
= sizeof(int),
3037 .proc_handler
= proc_dointvec_minmax
,
3039 [NEIGH_VAR_GC_THRESH2
] = {
3040 .procname
= "gc_thresh2",
3041 .maxlen
= sizeof(int),
3045 .proc_handler
= proc_dointvec_minmax
,
3047 [NEIGH_VAR_GC_THRESH3
] = {
3048 .procname
= "gc_thresh3",
3049 .maxlen
= sizeof(int),
3053 .proc_handler
= proc_dointvec_minmax
,
3059 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
3060 proc_handler
*handler
)
3063 struct neigh_sysctl_table
*t
;
3064 const char *dev_name_source
;
3065 char neigh_path
[ sizeof("net//neigh/") + IFNAMSIZ
+ IFNAMSIZ
];
3068 t
= kmemdup(&neigh_sysctl_template
, sizeof(*t
), GFP_KERNEL
);
3072 for (i
= 0; i
< NEIGH_VAR_GC_INTERVAL
; i
++) {
3073 t
->neigh_vars
[i
].data
+= (long) p
;
3074 t
->neigh_vars
[i
].extra1
= dev
;
3075 t
->neigh_vars
[i
].extra2
= p
;
3079 dev_name_source
= dev
->name
;
3080 /* Terminate the table early */
3081 memset(&t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
], 0,
3082 sizeof(t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
]));
3084 struct neigh_table
*tbl
= p
->tbl
;
3085 dev_name_source
= "default";
3086 t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
].data
= &tbl
->gc_interval
;
3087 t
->neigh_vars
[NEIGH_VAR_GC_THRESH1
].data
= &tbl
->gc_thresh1
;
3088 t
->neigh_vars
[NEIGH_VAR_GC_THRESH2
].data
= &tbl
->gc_thresh2
;
3089 t
->neigh_vars
[NEIGH_VAR_GC_THRESH3
].data
= &tbl
->gc_thresh3
;
3094 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME
].proc_handler
= handler
;
3096 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
= handler
;
3097 /* RetransTime (in milliseconds)*/
3098 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME_MS
].proc_handler
= handler
;
3099 /* ReachableTime (in milliseconds) */
3100 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
= handler
;
3102 /* Those handlers will update p->reachable_time after
3103 * base_reachable_time(_ms) is set to ensure the new timer starts being
3104 * applied after the next neighbour update instead of waiting for
3105 * neigh_periodic_work to update its value (can be multiple minutes)
3106 * So any handler that replaces them should do this as well
3109 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
=
3110 neigh_proc_base_reachable_time
;
3111 /* ReachableTime (in milliseconds) */
3112 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
=
3113 neigh_proc_base_reachable_time
;
3116 /* Don't export sysctls to unprivileged users */
3117 if (neigh_parms_net(p
)->user_ns
!= &init_user_ns
)
3118 t
->neigh_vars
[0].procname
= NULL
;
3120 switch (neigh_parms_family(p
)) {
3131 snprintf(neigh_path
, sizeof(neigh_path
), "net/%s/neigh/%s",
3132 p_name
, dev_name_source
);
3134 register_net_sysctl(neigh_parms_net(p
), neigh_path
, t
->neigh_vars
);
3135 if (!t
->sysctl_header
)
3138 p
->sysctl_table
= t
;
3146 EXPORT_SYMBOL(neigh_sysctl_register
);
3148 void neigh_sysctl_unregister(struct neigh_parms
*p
)
3150 if (p
->sysctl_table
) {
3151 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
3152 p
->sysctl_table
= NULL
;
3153 unregister_net_sysctl_table(t
->sysctl_header
);
3157 EXPORT_SYMBOL(neigh_sysctl_unregister
);
3159 #endif /* CONFIG_SYSCTL */
3161 static int __init
neigh_init(void)
3163 rtnl_register(PF_UNSPEC
, RTM_NEWNEIGH
, neigh_add
, NULL
, NULL
);
3164 rtnl_register(PF_UNSPEC
, RTM_DELNEIGH
, neigh_delete
, NULL
, NULL
);
3165 rtnl_register(PF_UNSPEC
, RTM_GETNEIGH
, NULL
, neigh_dump_info
, NULL
);
3167 rtnl_register(PF_UNSPEC
, RTM_GETNEIGHTBL
, NULL
, neightbl_dump_info
,
3169 rtnl_register(PF_UNSPEC
, RTM_SETNEIGHTBL
, neightbl_set
, NULL
, NULL
);
3174 subsys_initcall(neigh_init
);