ARM: rockchip: fix broken build
[linux/fpc-iii.git] / net / core / neighbour.c
blob84195dacb8b63f418cac67d4039842dd72eaecc4
1 /*
2 * Generic address resolution entity
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...) \
47 do { \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
50 } while (0)
52 #define PNEIGH_HASHMASK 0xF
54 static void neigh_timer_handler(unsigned long arg);
55 static void __neigh_notify(struct neighbour *n, int type, int flags);
56 static void neigh_update_notify(struct neighbour *neigh);
57 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
59 #ifdef CONFIG_PROC_FS
60 static const struct file_operations neigh_stat_seq_fops;
61 #endif
64 Neighbour hash table buckets are protected with rwlock tbl->lock.
66 - All the scans/updates to hash buckets MUST be made under this lock.
67 - NOTHING clever should be made under this lock: no callbacks
68 to protocol backends, no attempts to send something to network.
69 It will result in deadlocks, if backend/driver wants to use neighbour
70 cache.
71 - If the entry requires some non-trivial actions, increase
72 its reference count and release table lock.
74 Neighbour entries are protected:
75 - with reference count.
76 - with rwlock neigh->lock
78 Reference count prevents destruction.
80 neigh->lock mainly serializes ll address data and its validity state.
81 However, the same lock is used to protect another entry fields:
82 - timer
83 - resolution queue
85 Again, nothing clever shall be made under neigh->lock,
86 the most complicated procedure, which we allow is dev->hard_header.
87 It is supposed, that dev->hard_header is simplistic and does
88 not make callbacks to neighbour tables.
91 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93 kfree_skb(skb);
94 return -ENETDOWN;
97 static void neigh_cleanup_and_release(struct neighbour *neigh)
99 if (neigh->parms->neigh_cleanup)
100 neigh->parms->neigh_cleanup(neigh);
102 __neigh_notify(neigh, RTM_DELNEIGH, 0);
103 neigh_release(neigh);
107 * It is random distribution in the interval (1/2)*base...(3/2)*base.
108 * It corresponds to default IPv6 settings and is not overridable,
109 * because it is really reasonable choice.
112 unsigned long neigh_rand_reach_time(unsigned long base)
114 return base ? (prandom_u32() % base) + (base >> 1) : 0;
116 EXPORT_SYMBOL(neigh_rand_reach_time);
119 static int neigh_forced_gc(struct neigh_table *tbl)
121 int shrunk = 0;
122 int i;
123 struct neigh_hash_table *nht;
125 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
127 write_lock_bh(&tbl->lock);
128 nht = rcu_dereference_protected(tbl->nht,
129 lockdep_is_held(&tbl->lock));
130 for (i = 0; i < (1 << nht->hash_shift); i++) {
131 struct neighbour *n;
132 struct neighbour __rcu **np;
134 np = &nht->hash_buckets[i];
135 while ((n = rcu_dereference_protected(*np,
136 lockdep_is_held(&tbl->lock))) != NULL) {
137 /* Neighbour record may be discarded if:
138 * - nobody refers to it.
139 * - it is not permanent
141 write_lock(&n->lock);
142 if (atomic_read(&n->refcnt) == 1 &&
143 !(n->nud_state & NUD_PERMANENT)) {
144 rcu_assign_pointer(*np,
145 rcu_dereference_protected(n->next,
146 lockdep_is_held(&tbl->lock)));
147 n->dead = 1;
148 shrunk = 1;
149 write_unlock(&n->lock);
150 neigh_cleanup_and_release(n);
151 continue;
153 write_unlock(&n->lock);
154 np = &n->next;
158 tbl->last_flush = jiffies;
160 write_unlock_bh(&tbl->lock);
162 return shrunk;
165 static void neigh_add_timer(struct neighbour *n, unsigned long when)
167 neigh_hold(n);
168 if (unlikely(mod_timer(&n->timer, when))) {
169 printk("NEIGH: BUG, double timer add, state is %x\n",
170 n->nud_state);
171 dump_stack();
175 static int neigh_del_timer(struct neighbour *n)
177 if ((n->nud_state & NUD_IN_TIMER) &&
178 del_timer(&n->timer)) {
179 neigh_release(n);
180 return 1;
182 return 0;
185 static void pneigh_queue_purge(struct sk_buff_head *list)
187 struct sk_buff *skb;
189 while ((skb = skb_dequeue(list)) != NULL) {
190 dev_put(skb->dev);
191 kfree_skb(skb);
195 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
197 int i;
198 struct neigh_hash_table *nht;
200 nht = rcu_dereference_protected(tbl->nht,
201 lockdep_is_held(&tbl->lock));
203 for (i = 0; i < (1 << nht->hash_shift); i++) {
204 struct neighbour *n;
205 struct neighbour __rcu **np = &nht->hash_buckets[i];
207 while ((n = rcu_dereference_protected(*np,
208 lockdep_is_held(&tbl->lock))) != NULL) {
209 if (dev && n->dev != dev) {
210 np = &n->next;
211 continue;
213 rcu_assign_pointer(*np,
214 rcu_dereference_protected(n->next,
215 lockdep_is_held(&tbl->lock)));
216 write_lock(&n->lock);
217 neigh_del_timer(n);
218 n->dead = 1;
220 if (atomic_read(&n->refcnt) != 1) {
221 /* The most unpleasant situation.
222 We must destroy neighbour entry,
223 but someone still uses it.
225 The destroy will be delayed until
226 the last user releases us, but
227 we must kill timers etc. and move
228 it to safe state.
230 __skb_queue_purge(&n->arp_queue);
231 n->arp_queue_len_bytes = 0;
232 n->output = neigh_blackhole;
233 if (n->nud_state & NUD_VALID)
234 n->nud_state = NUD_NOARP;
235 else
236 n->nud_state = NUD_NONE;
237 neigh_dbg(2, "neigh %p is stray\n", n);
239 write_unlock(&n->lock);
240 neigh_cleanup_and_release(n);
245 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
247 write_lock_bh(&tbl->lock);
248 neigh_flush_dev(tbl, dev);
249 write_unlock_bh(&tbl->lock);
251 EXPORT_SYMBOL(neigh_changeaddr);
253 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
255 write_lock_bh(&tbl->lock);
256 neigh_flush_dev(tbl, dev);
257 pneigh_ifdown(tbl, dev);
258 write_unlock_bh(&tbl->lock);
260 del_timer_sync(&tbl->proxy_timer);
261 pneigh_queue_purge(&tbl->proxy_queue);
262 return 0;
264 EXPORT_SYMBOL(neigh_ifdown);
266 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
268 struct neighbour *n = NULL;
269 unsigned long now = jiffies;
270 int entries;
272 entries = atomic_inc_return(&tbl->entries) - 1;
273 if (entries >= tbl->gc_thresh3 ||
274 (entries >= tbl->gc_thresh2 &&
275 time_after(now, tbl->last_flush + 5 * HZ))) {
276 if (!neigh_forced_gc(tbl) &&
277 entries >= tbl->gc_thresh3)
278 goto out_entries;
281 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
282 if (!n)
283 goto out_entries;
285 __skb_queue_head_init(&n->arp_queue);
286 rwlock_init(&n->lock);
287 seqlock_init(&n->ha_lock);
288 n->updated = n->used = now;
289 n->nud_state = NUD_NONE;
290 n->output = neigh_blackhole;
291 seqlock_init(&n->hh.hh_lock);
292 n->parms = neigh_parms_clone(&tbl->parms);
293 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
295 NEIGH_CACHE_STAT_INC(tbl, allocs);
296 n->tbl = tbl;
297 atomic_set(&n->refcnt, 1);
298 n->dead = 1;
299 out:
300 return n;
302 out_entries:
303 atomic_dec(&tbl->entries);
304 goto out;
307 static void neigh_get_hash_rnd(u32 *x)
309 get_random_bytes(x, sizeof(*x));
310 *x |= 1;
313 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
315 size_t size = (1 << shift) * sizeof(struct neighbour *);
316 struct neigh_hash_table *ret;
317 struct neighbour __rcu **buckets;
318 int i;
320 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
321 if (!ret)
322 return NULL;
323 if (size <= PAGE_SIZE)
324 buckets = kzalloc(size, GFP_ATOMIC);
325 else
326 buckets = (struct neighbour __rcu **)
327 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
328 get_order(size));
329 if (!buckets) {
330 kfree(ret);
331 return NULL;
333 ret->hash_buckets = buckets;
334 ret->hash_shift = shift;
335 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
336 neigh_get_hash_rnd(&ret->hash_rnd[i]);
337 return ret;
340 static void neigh_hash_free_rcu(struct rcu_head *head)
342 struct neigh_hash_table *nht = container_of(head,
343 struct neigh_hash_table,
344 rcu);
345 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
346 struct neighbour __rcu **buckets = nht->hash_buckets;
348 if (size <= PAGE_SIZE)
349 kfree(buckets);
350 else
351 free_pages((unsigned long)buckets, get_order(size));
352 kfree(nht);
355 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
356 unsigned long new_shift)
358 unsigned int i, hash;
359 struct neigh_hash_table *new_nht, *old_nht;
361 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
363 old_nht = rcu_dereference_protected(tbl->nht,
364 lockdep_is_held(&tbl->lock));
365 new_nht = neigh_hash_alloc(new_shift);
366 if (!new_nht)
367 return old_nht;
369 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
370 struct neighbour *n, *next;
372 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
373 lockdep_is_held(&tbl->lock));
374 n != NULL;
375 n = next) {
376 hash = tbl->hash(n->primary_key, n->dev,
377 new_nht->hash_rnd);
379 hash >>= (32 - new_nht->hash_shift);
380 next = rcu_dereference_protected(n->next,
381 lockdep_is_held(&tbl->lock));
383 rcu_assign_pointer(n->next,
384 rcu_dereference_protected(
385 new_nht->hash_buckets[hash],
386 lockdep_is_held(&tbl->lock)));
387 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
391 rcu_assign_pointer(tbl->nht, new_nht);
392 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
393 return new_nht;
396 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
397 struct net_device *dev)
399 struct neighbour *n;
401 NEIGH_CACHE_STAT_INC(tbl, lookups);
403 rcu_read_lock_bh();
404 n = __neigh_lookup_noref(tbl, pkey, dev);
405 if (n) {
406 if (!atomic_inc_not_zero(&n->refcnt))
407 n = NULL;
408 NEIGH_CACHE_STAT_INC(tbl, hits);
411 rcu_read_unlock_bh();
412 return n;
414 EXPORT_SYMBOL(neigh_lookup);
416 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
417 const void *pkey)
419 struct neighbour *n;
420 int key_len = tbl->key_len;
421 u32 hash_val;
422 struct neigh_hash_table *nht;
424 NEIGH_CACHE_STAT_INC(tbl, lookups);
426 rcu_read_lock_bh();
427 nht = rcu_dereference_bh(tbl->nht);
428 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
430 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
431 n != NULL;
432 n = rcu_dereference_bh(n->next)) {
433 if (!memcmp(n->primary_key, pkey, key_len) &&
434 net_eq(dev_net(n->dev), net)) {
435 if (!atomic_inc_not_zero(&n->refcnt))
436 n = NULL;
437 NEIGH_CACHE_STAT_INC(tbl, hits);
438 break;
442 rcu_read_unlock_bh();
443 return n;
445 EXPORT_SYMBOL(neigh_lookup_nodev);
447 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
448 struct net_device *dev, bool want_ref)
450 u32 hash_val;
451 int key_len = tbl->key_len;
452 int error;
453 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
454 struct neigh_hash_table *nht;
456 if (!n) {
457 rc = ERR_PTR(-ENOBUFS);
458 goto out;
461 memcpy(n->primary_key, pkey, key_len);
462 n->dev = dev;
463 dev_hold(dev);
465 /* Protocol specific setup. */
466 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
467 rc = ERR_PTR(error);
468 goto out_neigh_release;
471 if (dev->netdev_ops->ndo_neigh_construct) {
472 error = dev->netdev_ops->ndo_neigh_construct(n);
473 if (error < 0) {
474 rc = ERR_PTR(error);
475 goto out_neigh_release;
479 /* Device specific setup. */
480 if (n->parms->neigh_setup &&
481 (error = n->parms->neigh_setup(n)) < 0) {
482 rc = ERR_PTR(error);
483 goto out_neigh_release;
486 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
488 write_lock_bh(&tbl->lock);
489 nht = rcu_dereference_protected(tbl->nht,
490 lockdep_is_held(&tbl->lock));
492 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
493 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
495 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
497 if (n->parms->dead) {
498 rc = ERR_PTR(-EINVAL);
499 goto out_tbl_unlock;
502 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
503 lockdep_is_held(&tbl->lock));
504 n1 != NULL;
505 n1 = rcu_dereference_protected(n1->next,
506 lockdep_is_held(&tbl->lock))) {
507 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
508 if (want_ref)
509 neigh_hold(n1);
510 rc = n1;
511 goto out_tbl_unlock;
515 n->dead = 0;
516 if (want_ref)
517 neigh_hold(n);
518 rcu_assign_pointer(n->next,
519 rcu_dereference_protected(nht->hash_buckets[hash_val],
520 lockdep_is_held(&tbl->lock)));
521 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
522 write_unlock_bh(&tbl->lock);
523 neigh_dbg(2, "neigh %p is created\n", n);
524 rc = n;
525 out:
526 return rc;
527 out_tbl_unlock:
528 write_unlock_bh(&tbl->lock);
529 out_neigh_release:
530 neigh_release(n);
531 goto out;
533 EXPORT_SYMBOL(__neigh_create);
535 static u32 pneigh_hash(const void *pkey, int key_len)
537 u32 hash_val = *(u32 *)(pkey + key_len - 4);
538 hash_val ^= (hash_val >> 16);
539 hash_val ^= hash_val >> 8;
540 hash_val ^= hash_val >> 4;
541 hash_val &= PNEIGH_HASHMASK;
542 return hash_val;
545 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
546 struct net *net,
547 const void *pkey,
548 int key_len,
549 struct net_device *dev)
551 while (n) {
552 if (!memcmp(n->key, pkey, key_len) &&
553 net_eq(pneigh_net(n), net) &&
554 (n->dev == dev || !n->dev))
555 return n;
556 n = n->next;
558 return NULL;
561 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
562 struct net *net, const void *pkey, struct net_device *dev)
564 int key_len = tbl->key_len;
565 u32 hash_val = pneigh_hash(pkey, key_len);
567 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
568 net, pkey, key_len, dev);
570 EXPORT_SYMBOL_GPL(__pneigh_lookup);
572 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
573 struct net *net, const void *pkey,
574 struct net_device *dev, int creat)
576 struct pneigh_entry *n;
577 int key_len = tbl->key_len;
578 u32 hash_val = pneigh_hash(pkey, key_len);
580 read_lock_bh(&tbl->lock);
581 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
582 net, pkey, key_len, dev);
583 read_unlock_bh(&tbl->lock);
585 if (n || !creat)
586 goto out;
588 ASSERT_RTNL();
590 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
591 if (!n)
592 goto out;
594 write_pnet(&n->net, net);
595 memcpy(n->key, pkey, key_len);
596 n->dev = dev;
597 if (dev)
598 dev_hold(dev);
600 if (tbl->pconstructor && tbl->pconstructor(n)) {
601 if (dev)
602 dev_put(dev);
603 kfree(n);
604 n = NULL;
605 goto out;
608 write_lock_bh(&tbl->lock);
609 n->next = tbl->phash_buckets[hash_val];
610 tbl->phash_buckets[hash_val] = n;
611 write_unlock_bh(&tbl->lock);
612 out:
613 return n;
615 EXPORT_SYMBOL(pneigh_lookup);
618 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
619 struct net_device *dev)
621 struct pneigh_entry *n, **np;
622 int key_len = tbl->key_len;
623 u32 hash_val = pneigh_hash(pkey, key_len);
625 write_lock_bh(&tbl->lock);
626 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
627 np = &n->next) {
628 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
629 net_eq(pneigh_net(n), net)) {
630 *np = n->next;
631 write_unlock_bh(&tbl->lock);
632 if (tbl->pdestructor)
633 tbl->pdestructor(n);
634 if (n->dev)
635 dev_put(n->dev);
636 kfree(n);
637 return 0;
640 write_unlock_bh(&tbl->lock);
641 return -ENOENT;
644 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
646 struct pneigh_entry *n, **np;
647 u32 h;
649 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
650 np = &tbl->phash_buckets[h];
651 while ((n = *np) != NULL) {
652 if (!dev || n->dev == dev) {
653 *np = n->next;
654 if (tbl->pdestructor)
655 tbl->pdestructor(n);
656 if (n->dev)
657 dev_put(n->dev);
658 kfree(n);
659 continue;
661 np = &n->next;
664 return -ENOENT;
667 static void neigh_parms_destroy(struct neigh_parms *parms);
669 static inline void neigh_parms_put(struct neigh_parms *parms)
671 if (atomic_dec_and_test(&parms->refcnt))
672 neigh_parms_destroy(parms);
676 * neighbour must already be out of the table;
679 void neigh_destroy(struct neighbour *neigh)
681 struct net_device *dev = neigh->dev;
683 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
685 if (!neigh->dead) {
686 pr_warn("Destroying alive neighbour %p\n", neigh);
687 dump_stack();
688 return;
691 if (neigh_del_timer(neigh))
692 pr_warn("Impossible event\n");
694 write_lock_bh(&neigh->lock);
695 __skb_queue_purge(&neigh->arp_queue);
696 write_unlock_bh(&neigh->lock);
697 neigh->arp_queue_len_bytes = 0;
699 if (dev->netdev_ops->ndo_neigh_destroy)
700 dev->netdev_ops->ndo_neigh_destroy(neigh);
702 dev_put(dev);
703 neigh_parms_put(neigh->parms);
705 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
707 atomic_dec(&neigh->tbl->entries);
708 kfree_rcu(neigh, rcu);
710 EXPORT_SYMBOL(neigh_destroy);
712 /* Neighbour state is suspicious;
713 disable fast path.
715 Called with write_locked neigh.
717 static void neigh_suspect(struct neighbour *neigh)
719 neigh_dbg(2, "neigh %p is suspected\n", neigh);
721 neigh->output = neigh->ops->output;
724 /* Neighbour state is OK;
725 enable fast path.
727 Called with write_locked neigh.
729 static void neigh_connect(struct neighbour *neigh)
731 neigh_dbg(2, "neigh %p is connected\n", neigh);
733 neigh->output = neigh->ops->connected_output;
736 static void neigh_periodic_work(struct work_struct *work)
738 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
739 struct neighbour *n;
740 struct neighbour __rcu **np;
741 unsigned int i;
742 struct neigh_hash_table *nht;
744 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
746 write_lock_bh(&tbl->lock);
747 nht = rcu_dereference_protected(tbl->nht,
748 lockdep_is_held(&tbl->lock));
751 * periodically recompute ReachableTime from random function
754 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
755 struct neigh_parms *p;
756 tbl->last_rand = jiffies;
757 list_for_each_entry(p, &tbl->parms_list, list)
758 p->reachable_time =
759 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
762 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
763 goto out;
765 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
766 np = &nht->hash_buckets[i];
768 while ((n = rcu_dereference_protected(*np,
769 lockdep_is_held(&tbl->lock))) != NULL) {
770 unsigned int state;
772 write_lock(&n->lock);
774 state = n->nud_state;
775 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
776 write_unlock(&n->lock);
777 goto next_elt;
780 if (time_before(n->used, n->confirmed))
781 n->used = n->confirmed;
783 if (atomic_read(&n->refcnt) == 1 &&
784 (state == NUD_FAILED ||
785 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
786 *np = n->next;
787 n->dead = 1;
788 write_unlock(&n->lock);
789 neigh_cleanup_and_release(n);
790 continue;
792 write_unlock(&n->lock);
794 next_elt:
795 np = &n->next;
798 * It's fine to release lock here, even if hash table
799 * grows while we are preempted.
801 write_unlock_bh(&tbl->lock);
802 cond_resched();
803 write_lock_bh(&tbl->lock);
804 nht = rcu_dereference_protected(tbl->nht,
805 lockdep_is_held(&tbl->lock));
807 out:
808 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
809 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
810 * BASE_REACHABLE_TIME.
812 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
813 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
814 write_unlock_bh(&tbl->lock);
817 static __inline__ int neigh_max_probes(struct neighbour *n)
819 struct neigh_parms *p = n->parms;
820 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
821 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
822 NEIGH_VAR(p, MCAST_PROBES));
825 static void neigh_invalidate(struct neighbour *neigh)
826 __releases(neigh->lock)
827 __acquires(neigh->lock)
829 struct sk_buff *skb;
831 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
832 neigh_dbg(2, "neigh %p is failed\n", neigh);
833 neigh->updated = jiffies;
835 /* It is very thin place. report_unreachable is very complicated
836 routine. Particularly, it can hit the same neighbour entry!
838 So that, we try to be accurate and avoid dead loop. --ANK
840 while (neigh->nud_state == NUD_FAILED &&
841 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
842 write_unlock(&neigh->lock);
843 neigh->ops->error_report(neigh, skb);
844 write_lock(&neigh->lock);
846 __skb_queue_purge(&neigh->arp_queue);
847 neigh->arp_queue_len_bytes = 0;
850 static void neigh_probe(struct neighbour *neigh)
851 __releases(neigh->lock)
853 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
854 /* keep skb alive even if arp_queue overflows */
855 if (skb)
856 skb = skb_copy(skb, GFP_ATOMIC);
857 write_unlock(&neigh->lock);
858 neigh->ops->solicit(neigh, skb);
859 atomic_inc(&neigh->probes);
860 kfree_skb(skb);
863 /* Called when a timer expires for a neighbour entry. */
865 static void neigh_timer_handler(unsigned long arg)
867 unsigned long now, next;
868 struct neighbour *neigh = (struct neighbour *)arg;
869 unsigned int state;
870 int notify = 0;
872 write_lock(&neigh->lock);
874 state = neigh->nud_state;
875 now = jiffies;
876 next = now + HZ;
878 if (!(state & NUD_IN_TIMER))
879 goto out;
881 if (state & NUD_REACHABLE) {
882 if (time_before_eq(now,
883 neigh->confirmed + neigh->parms->reachable_time)) {
884 neigh_dbg(2, "neigh %p is still alive\n", neigh);
885 next = neigh->confirmed + neigh->parms->reachable_time;
886 } else if (time_before_eq(now,
887 neigh->used +
888 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
889 neigh_dbg(2, "neigh %p is delayed\n", neigh);
890 neigh->nud_state = NUD_DELAY;
891 neigh->updated = jiffies;
892 neigh_suspect(neigh);
893 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
894 } else {
895 neigh_dbg(2, "neigh %p is suspected\n", neigh);
896 neigh->nud_state = NUD_STALE;
897 neigh->updated = jiffies;
898 neigh_suspect(neigh);
899 notify = 1;
901 } else if (state & NUD_DELAY) {
902 if (time_before_eq(now,
903 neigh->confirmed +
904 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
905 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
906 neigh->nud_state = NUD_REACHABLE;
907 neigh->updated = jiffies;
908 neigh_connect(neigh);
909 notify = 1;
910 next = neigh->confirmed + neigh->parms->reachable_time;
911 } else {
912 neigh_dbg(2, "neigh %p is probed\n", neigh);
913 neigh->nud_state = NUD_PROBE;
914 neigh->updated = jiffies;
915 atomic_set(&neigh->probes, 0);
916 notify = 1;
917 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
919 } else {
920 /* NUD_PROBE|NUD_INCOMPLETE */
921 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
924 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
925 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
926 neigh->nud_state = NUD_FAILED;
927 notify = 1;
928 neigh_invalidate(neigh);
929 goto out;
932 if (neigh->nud_state & NUD_IN_TIMER) {
933 if (time_before(next, jiffies + HZ/2))
934 next = jiffies + HZ/2;
935 if (!mod_timer(&neigh->timer, next))
936 neigh_hold(neigh);
938 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
939 neigh_probe(neigh);
940 } else {
941 out:
942 write_unlock(&neigh->lock);
945 if (notify)
946 neigh_update_notify(neigh);
948 neigh_release(neigh);
951 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
953 int rc;
954 bool immediate_probe = false;
956 write_lock_bh(&neigh->lock);
958 rc = 0;
959 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
960 goto out_unlock_bh;
961 if (neigh->dead)
962 goto out_dead;
964 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
965 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
966 NEIGH_VAR(neigh->parms, APP_PROBES)) {
967 unsigned long next, now = jiffies;
969 atomic_set(&neigh->probes,
970 NEIGH_VAR(neigh->parms, UCAST_PROBES));
971 neigh->nud_state = NUD_INCOMPLETE;
972 neigh->updated = now;
973 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
974 HZ/2);
975 neigh_add_timer(neigh, next);
976 immediate_probe = true;
977 } else {
978 neigh->nud_state = NUD_FAILED;
979 neigh->updated = jiffies;
980 write_unlock_bh(&neigh->lock);
982 kfree_skb(skb);
983 return 1;
985 } else if (neigh->nud_state & NUD_STALE) {
986 neigh_dbg(2, "neigh %p is delayed\n", neigh);
987 neigh->nud_state = NUD_DELAY;
988 neigh->updated = jiffies;
989 neigh_add_timer(neigh, jiffies +
990 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
993 if (neigh->nud_state == NUD_INCOMPLETE) {
994 if (skb) {
995 while (neigh->arp_queue_len_bytes + skb->truesize >
996 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
997 struct sk_buff *buff;
999 buff = __skb_dequeue(&neigh->arp_queue);
1000 if (!buff)
1001 break;
1002 neigh->arp_queue_len_bytes -= buff->truesize;
1003 kfree_skb(buff);
1004 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1006 skb_dst_force(skb);
1007 __skb_queue_tail(&neigh->arp_queue, skb);
1008 neigh->arp_queue_len_bytes += skb->truesize;
1010 rc = 1;
1012 out_unlock_bh:
1013 if (immediate_probe)
1014 neigh_probe(neigh);
1015 else
1016 write_unlock(&neigh->lock);
1017 local_bh_enable();
1018 return rc;
1020 out_dead:
1021 if (neigh->nud_state & NUD_STALE)
1022 goto out_unlock_bh;
1023 write_unlock_bh(&neigh->lock);
1024 kfree_skb(skb);
1025 return 1;
1027 EXPORT_SYMBOL(__neigh_event_send);
1029 static void neigh_update_hhs(struct neighbour *neigh)
1031 struct hh_cache *hh;
1032 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1033 = NULL;
1035 if (neigh->dev->header_ops)
1036 update = neigh->dev->header_ops->cache_update;
1038 if (update) {
1039 hh = &neigh->hh;
1040 if (hh->hh_len) {
1041 write_seqlock_bh(&hh->hh_lock);
1042 update(hh, neigh->dev, neigh->ha);
1043 write_sequnlock_bh(&hh->hh_lock);
1050 /* Generic update routine.
1051 -- lladdr is new lladdr or NULL, if it is not supplied.
1052 -- new is new state.
1053 -- flags
1054 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1055 if it is different.
1056 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1057 lladdr instead of overriding it
1058 if it is different.
1059 It also allows to retain current state
1060 if lladdr is unchanged.
1061 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1063 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1064 NTF_ROUTER flag.
1065 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1066 a router.
1068 Caller MUST hold reference count on the entry.
1071 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1072 u32 flags)
1074 u8 old;
1075 int err;
1076 int notify = 0;
1077 struct net_device *dev;
1078 int update_isrouter = 0;
1080 write_lock_bh(&neigh->lock);
1082 dev = neigh->dev;
1083 old = neigh->nud_state;
1084 err = -EPERM;
1086 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1087 (old & (NUD_NOARP | NUD_PERMANENT)))
1088 goto out;
1089 if (neigh->dead)
1090 goto out;
1092 if (!(new & NUD_VALID)) {
1093 neigh_del_timer(neigh);
1094 if (old & NUD_CONNECTED)
1095 neigh_suspect(neigh);
1096 neigh->nud_state = new;
1097 err = 0;
1098 notify = old & NUD_VALID;
1099 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1100 (new & NUD_FAILED)) {
1101 neigh_invalidate(neigh);
1102 notify = 1;
1104 goto out;
1107 /* Compare new lladdr with cached one */
1108 if (!dev->addr_len) {
1109 /* First case: device needs no address. */
1110 lladdr = neigh->ha;
1111 } else if (lladdr) {
1112 /* The second case: if something is already cached
1113 and a new address is proposed:
1114 - compare new & old
1115 - if they are different, check override flag
1117 if ((old & NUD_VALID) &&
1118 !memcmp(lladdr, neigh->ha, dev->addr_len))
1119 lladdr = neigh->ha;
1120 } else {
1121 /* No address is supplied; if we know something,
1122 use it, otherwise discard the request.
1124 err = -EINVAL;
1125 if (!(old & NUD_VALID))
1126 goto out;
1127 lladdr = neigh->ha;
1130 if (new & NUD_CONNECTED)
1131 neigh->confirmed = jiffies;
1132 neigh->updated = jiffies;
1134 /* If entry was valid and address is not changed,
1135 do not change entry state, if new one is STALE.
1137 err = 0;
1138 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1139 if (old & NUD_VALID) {
1140 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1141 update_isrouter = 0;
1142 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1143 (old & NUD_CONNECTED)) {
1144 lladdr = neigh->ha;
1145 new = NUD_STALE;
1146 } else
1147 goto out;
1148 } else {
1149 if (lladdr == neigh->ha && new == NUD_STALE &&
1150 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1151 (old & NUD_CONNECTED))
1153 new = old;
1157 if (new != old) {
1158 neigh_del_timer(neigh);
1159 if (new & NUD_PROBE)
1160 atomic_set(&neigh->probes, 0);
1161 if (new & NUD_IN_TIMER)
1162 neigh_add_timer(neigh, (jiffies +
1163 ((new & NUD_REACHABLE) ?
1164 neigh->parms->reachable_time :
1165 0)));
1166 neigh->nud_state = new;
1167 notify = 1;
1170 if (lladdr != neigh->ha) {
1171 write_seqlock(&neigh->ha_lock);
1172 memcpy(&neigh->ha, lladdr, dev->addr_len);
1173 write_sequnlock(&neigh->ha_lock);
1174 neigh_update_hhs(neigh);
1175 if (!(new & NUD_CONNECTED))
1176 neigh->confirmed = jiffies -
1177 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1178 notify = 1;
1180 if (new == old)
1181 goto out;
1182 if (new & NUD_CONNECTED)
1183 neigh_connect(neigh);
1184 else
1185 neigh_suspect(neigh);
1186 if (!(old & NUD_VALID)) {
1187 struct sk_buff *skb;
1189 /* Again: avoid dead loop if something went wrong */
1191 while (neigh->nud_state & NUD_VALID &&
1192 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1193 struct dst_entry *dst = skb_dst(skb);
1194 struct neighbour *n2, *n1 = neigh;
1195 write_unlock_bh(&neigh->lock);
1197 rcu_read_lock();
1199 /* Why not just use 'neigh' as-is? The problem is that
1200 * things such as shaper, eql, and sch_teql can end up
1201 * using alternative, different, neigh objects to output
1202 * the packet in the output path. So what we need to do
1203 * here is re-lookup the top-level neigh in the path so
1204 * we can reinject the packet there.
1206 n2 = NULL;
1207 if (dst) {
1208 n2 = dst_neigh_lookup_skb(dst, skb);
1209 if (n2)
1210 n1 = n2;
1212 n1->output(n1, skb);
1213 if (n2)
1214 neigh_release(n2);
1215 rcu_read_unlock();
1217 write_lock_bh(&neigh->lock);
1219 __skb_queue_purge(&neigh->arp_queue);
1220 neigh->arp_queue_len_bytes = 0;
1222 out:
1223 if (update_isrouter) {
1224 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1225 (neigh->flags | NTF_ROUTER) :
1226 (neigh->flags & ~NTF_ROUTER);
1228 write_unlock_bh(&neigh->lock);
1230 if (notify)
1231 neigh_update_notify(neigh);
1233 return err;
1235 EXPORT_SYMBOL(neigh_update);
1237 /* Update the neigh to listen temporarily for probe responses, even if it is
1238 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1240 void __neigh_set_probe_once(struct neighbour *neigh)
1242 if (neigh->dead)
1243 return;
1244 neigh->updated = jiffies;
1245 if (!(neigh->nud_state & NUD_FAILED))
1246 return;
1247 neigh->nud_state = NUD_INCOMPLETE;
1248 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1249 neigh_add_timer(neigh,
1250 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1252 EXPORT_SYMBOL(__neigh_set_probe_once);
1254 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1255 u8 *lladdr, void *saddr,
1256 struct net_device *dev)
1258 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1259 lladdr || !dev->addr_len);
1260 if (neigh)
1261 neigh_update(neigh, lladdr, NUD_STALE,
1262 NEIGH_UPDATE_F_OVERRIDE);
1263 return neigh;
1265 EXPORT_SYMBOL(neigh_event_ns);
1267 /* called with read_lock_bh(&n->lock); */
1268 static void neigh_hh_init(struct neighbour *n)
1270 struct net_device *dev = n->dev;
1271 __be16 prot = n->tbl->protocol;
1272 struct hh_cache *hh = &n->hh;
1274 write_lock_bh(&n->lock);
1276 /* Only one thread can come in here and initialize the
1277 * hh_cache entry.
1279 if (!hh->hh_len)
1280 dev->header_ops->cache(n, hh, prot);
1282 write_unlock_bh(&n->lock);
1285 /* Slow and careful. */
1287 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1289 int rc = 0;
1291 if (!neigh_event_send(neigh, skb)) {
1292 int err;
1293 struct net_device *dev = neigh->dev;
1294 unsigned int seq;
1296 if (dev->header_ops->cache && !neigh->hh.hh_len)
1297 neigh_hh_init(neigh);
1299 do {
1300 __skb_pull(skb, skb_network_offset(skb));
1301 seq = read_seqbegin(&neigh->ha_lock);
1302 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1303 neigh->ha, NULL, skb->len);
1304 } while (read_seqretry(&neigh->ha_lock, seq));
1306 if (err >= 0)
1307 rc = dev_queue_xmit(skb);
1308 else
1309 goto out_kfree_skb;
1311 out:
1312 return rc;
1313 out_kfree_skb:
1314 rc = -EINVAL;
1315 kfree_skb(skb);
1316 goto out;
1318 EXPORT_SYMBOL(neigh_resolve_output);
1320 /* As fast as possible without hh cache */
1322 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1324 struct net_device *dev = neigh->dev;
1325 unsigned int seq;
1326 int err;
1328 do {
1329 __skb_pull(skb, skb_network_offset(skb));
1330 seq = read_seqbegin(&neigh->ha_lock);
1331 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1332 neigh->ha, NULL, skb->len);
1333 } while (read_seqretry(&neigh->ha_lock, seq));
1335 if (err >= 0)
1336 err = dev_queue_xmit(skb);
1337 else {
1338 err = -EINVAL;
1339 kfree_skb(skb);
1341 return err;
1343 EXPORT_SYMBOL(neigh_connected_output);
1345 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1347 return dev_queue_xmit(skb);
1349 EXPORT_SYMBOL(neigh_direct_output);
1351 static void neigh_proxy_process(unsigned long arg)
1353 struct neigh_table *tbl = (struct neigh_table *)arg;
1354 long sched_next = 0;
1355 unsigned long now = jiffies;
1356 struct sk_buff *skb, *n;
1358 spin_lock(&tbl->proxy_queue.lock);
1360 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1361 long tdif = NEIGH_CB(skb)->sched_next - now;
1363 if (tdif <= 0) {
1364 struct net_device *dev = skb->dev;
1366 __skb_unlink(skb, &tbl->proxy_queue);
1367 if (tbl->proxy_redo && netif_running(dev)) {
1368 rcu_read_lock();
1369 tbl->proxy_redo(skb);
1370 rcu_read_unlock();
1371 } else {
1372 kfree_skb(skb);
1375 dev_put(dev);
1376 } else if (!sched_next || tdif < sched_next)
1377 sched_next = tdif;
1379 del_timer(&tbl->proxy_timer);
1380 if (sched_next)
1381 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1382 spin_unlock(&tbl->proxy_queue.lock);
1385 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1386 struct sk_buff *skb)
1388 unsigned long now = jiffies;
1390 unsigned long sched_next = now + (prandom_u32() %
1391 NEIGH_VAR(p, PROXY_DELAY));
1393 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1394 kfree_skb(skb);
1395 return;
1398 NEIGH_CB(skb)->sched_next = sched_next;
1399 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1401 spin_lock(&tbl->proxy_queue.lock);
1402 if (del_timer(&tbl->proxy_timer)) {
1403 if (time_before(tbl->proxy_timer.expires, sched_next))
1404 sched_next = tbl->proxy_timer.expires;
1406 skb_dst_drop(skb);
1407 dev_hold(skb->dev);
1408 __skb_queue_tail(&tbl->proxy_queue, skb);
1409 mod_timer(&tbl->proxy_timer, sched_next);
1410 spin_unlock(&tbl->proxy_queue.lock);
1412 EXPORT_SYMBOL(pneigh_enqueue);
1414 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1415 struct net *net, int ifindex)
1417 struct neigh_parms *p;
1419 list_for_each_entry(p, &tbl->parms_list, list) {
1420 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1421 (!p->dev && !ifindex && net_eq(net, &init_net)))
1422 return p;
1425 return NULL;
1428 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1429 struct neigh_table *tbl)
1431 struct neigh_parms *p;
1432 struct net *net = dev_net(dev);
1433 const struct net_device_ops *ops = dev->netdev_ops;
1435 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1436 if (p) {
1437 p->tbl = tbl;
1438 atomic_set(&p->refcnt, 1);
1439 p->reachable_time =
1440 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1441 dev_hold(dev);
1442 p->dev = dev;
1443 write_pnet(&p->net, net);
1444 p->sysctl_table = NULL;
1446 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1447 dev_put(dev);
1448 kfree(p);
1449 return NULL;
1452 write_lock_bh(&tbl->lock);
1453 list_add(&p->list, &tbl->parms.list);
1454 write_unlock_bh(&tbl->lock);
1456 neigh_parms_data_state_cleanall(p);
1458 return p;
1460 EXPORT_SYMBOL(neigh_parms_alloc);
1462 static void neigh_rcu_free_parms(struct rcu_head *head)
1464 struct neigh_parms *parms =
1465 container_of(head, struct neigh_parms, rcu_head);
1467 neigh_parms_put(parms);
1470 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1472 if (!parms || parms == &tbl->parms)
1473 return;
1474 write_lock_bh(&tbl->lock);
1475 list_del(&parms->list);
1476 parms->dead = 1;
1477 write_unlock_bh(&tbl->lock);
1478 if (parms->dev)
1479 dev_put(parms->dev);
1480 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1482 EXPORT_SYMBOL(neigh_parms_release);
1484 static void neigh_parms_destroy(struct neigh_parms *parms)
1486 kfree(parms);
1489 static struct lock_class_key neigh_table_proxy_queue_class;
1491 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1493 void neigh_table_init(int index, struct neigh_table *tbl)
1495 unsigned long now = jiffies;
1496 unsigned long phsize;
1498 INIT_LIST_HEAD(&tbl->parms_list);
1499 list_add(&tbl->parms.list, &tbl->parms_list);
1500 write_pnet(&tbl->parms.net, &init_net);
1501 atomic_set(&tbl->parms.refcnt, 1);
1502 tbl->parms.reachable_time =
1503 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1505 tbl->stats = alloc_percpu(struct neigh_statistics);
1506 if (!tbl->stats)
1507 panic("cannot create neighbour cache statistics");
1509 #ifdef CONFIG_PROC_FS
1510 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1511 &neigh_stat_seq_fops, tbl))
1512 panic("cannot create neighbour proc dir entry");
1513 #endif
1515 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1517 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1518 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1520 if (!tbl->nht || !tbl->phash_buckets)
1521 panic("cannot allocate neighbour cache hashes");
1523 if (!tbl->entry_size)
1524 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1525 tbl->key_len, NEIGH_PRIV_ALIGN);
1526 else
1527 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1529 rwlock_init(&tbl->lock);
1530 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1531 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1532 tbl->parms.reachable_time);
1533 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1534 skb_queue_head_init_class(&tbl->proxy_queue,
1535 &neigh_table_proxy_queue_class);
1537 tbl->last_flush = now;
1538 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1540 neigh_tables[index] = tbl;
1542 EXPORT_SYMBOL(neigh_table_init);
1544 int neigh_table_clear(int index, struct neigh_table *tbl)
1546 neigh_tables[index] = NULL;
1547 /* It is not clean... Fix it to unload IPv6 module safely */
1548 cancel_delayed_work_sync(&tbl->gc_work);
1549 del_timer_sync(&tbl->proxy_timer);
1550 pneigh_queue_purge(&tbl->proxy_queue);
1551 neigh_ifdown(tbl, NULL);
1552 if (atomic_read(&tbl->entries))
1553 pr_crit("neighbour leakage\n");
1555 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1556 neigh_hash_free_rcu);
1557 tbl->nht = NULL;
1559 kfree(tbl->phash_buckets);
1560 tbl->phash_buckets = NULL;
1562 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1564 free_percpu(tbl->stats);
1565 tbl->stats = NULL;
1567 return 0;
1569 EXPORT_SYMBOL(neigh_table_clear);
1571 static struct neigh_table *neigh_find_table(int family)
1573 struct neigh_table *tbl = NULL;
1575 switch (family) {
1576 case AF_INET:
1577 tbl = neigh_tables[NEIGH_ARP_TABLE];
1578 break;
1579 case AF_INET6:
1580 tbl = neigh_tables[NEIGH_ND_TABLE];
1581 break;
1582 case AF_DECnet:
1583 tbl = neigh_tables[NEIGH_DN_TABLE];
1584 break;
1587 return tbl;
1590 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1592 struct net *net = sock_net(skb->sk);
1593 struct ndmsg *ndm;
1594 struct nlattr *dst_attr;
1595 struct neigh_table *tbl;
1596 struct neighbour *neigh;
1597 struct net_device *dev = NULL;
1598 int err = -EINVAL;
1600 ASSERT_RTNL();
1601 if (nlmsg_len(nlh) < sizeof(*ndm))
1602 goto out;
1604 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1605 if (dst_attr == NULL)
1606 goto out;
1608 ndm = nlmsg_data(nlh);
1609 if (ndm->ndm_ifindex) {
1610 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1611 if (dev == NULL) {
1612 err = -ENODEV;
1613 goto out;
1617 tbl = neigh_find_table(ndm->ndm_family);
1618 if (tbl == NULL)
1619 return -EAFNOSUPPORT;
1621 if (nla_len(dst_attr) < tbl->key_len)
1622 goto out;
1624 if (ndm->ndm_flags & NTF_PROXY) {
1625 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1626 goto out;
1629 if (dev == NULL)
1630 goto out;
1632 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1633 if (neigh == NULL) {
1634 err = -ENOENT;
1635 goto out;
1638 err = neigh_update(neigh, NULL, NUD_FAILED,
1639 NEIGH_UPDATE_F_OVERRIDE |
1640 NEIGH_UPDATE_F_ADMIN);
1641 neigh_release(neigh);
1643 out:
1644 return err;
1647 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1649 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1650 struct net *net = sock_net(skb->sk);
1651 struct ndmsg *ndm;
1652 struct nlattr *tb[NDA_MAX+1];
1653 struct neigh_table *tbl;
1654 struct net_device *dev = NULL;
1655 struct neighbour *neigh;
1656 void *dst, *lladdr;
1657 int err;
1659 ASSERT_RTNL();
1660 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1661 if (err < 0)
1662 goto out;
1664 err = -EINVAL;
1665 if (tb[NDA_DST] == NULL)
1666 goto out;
1668 ndm = nlmsg_data(nlh);
1669 if (ndm->ndm_ifindex) {
1670 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1671 if (dev == NULL) {
1672 err = -ENODEV;
1673 goto out;
1676 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1677 goto out;
1680 tbl = neigh_find_table(ndm->ndm_family);
1681 if (tbl == NULL)
1682 return -EAFNOSUPPORT;
1684 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1685 goto out;
1686 dst = nla_data(tb[NDA_DST]);
1687 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1689 if (ndm->ndm_flags & NTF_PROXY) {
1690 struct pneigh_entry *pn;
1692 err = -ENOBUFS;
1693 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1694 if (pn) {
1695 pn->flags = ndm->ndm_flags;
1696 err = 0;
1698 goto out;
1701 if (dev == NULL)
1702 goto out;
1704 neigh = neigh_lookup(tbl, dst, dev);
1705 if (neigh == NULL) {
1706 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1707 err = -ENOENT;
1708 goto out;
1711 neigh = __neigh_lookup_errno(tbl, dst, dev);
1712 if (IS_ERR(neigh)) {
1713 err = PTR_ERR(neigh);
1714 goto out;
1716 } else {
1717 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1718 err = -EEXIST;
1719 neigh_release(neigh);
1720 goto out;
1723 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1724 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1727 if (ndm->ndm_flags & NTF_USE) {
1728 neigh_event_send(neigh, NULL);
1729 err = 0;
1730 } else
1731 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1732 neigh_release(neigh);
1734 out:
1735 return err;
1738 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1740 struct nlattr *nest;
1742 nest = nla_nest_start(skb, NDTA_PARMS);
1743 if (nest == NULL)
1744 return -ENOBUFS;
1746 if ((parms->dev &&
1747 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1748 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1749 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1750 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1751 /* approximative value for deprecated QUEUE_LEN (in packets) */
1752 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1753 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1754 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1755 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1756 nla_put_u32(skb, NDTPA_UCAST_PROBES,
1757 NEIGH_VAR(parms, UCAST_PROBES)) ||
1758 nla_put_u32(skb, NDTPA_MCAST_PROBES,
1759 NEIGH_VAR(parms, MCAST_PROBES)) ||
1760 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1761 NEIGH_VAR(parms, MCAST_REPROBES)) ||
1762 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1763 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1764 NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
1765 nla_put_msecs(skb, NDTPA_GC_STALETIME,
1766 NEIGH_VAR(parms, GC_STALETIME)) ||
1767 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1768 NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
1769 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1770 NEIGH_VAR(parms, RETRANS_TIME)) ||
1771 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1772 NEIGH_VAR(parms, ANYCAST_DELAY)) ||
1773 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1774 NEIGH_VAR(parms, PROXY_DELAY)) ||
1775 nla_put_msecs(skb, NDTPA_LOCKTIME,
1776 NEIGH_VAR(parms, LOCKTIME)))
1777 goto nla_put_failure;
1778 return nla_nest_end(skb, nest);
1780 nla_put_failure:
1781 nla_nest_cancel(skb, nest);
1782 return -EMSGSIZE;
1785 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1786 u32 pid, u32 seq, int type, int flags)
1788 struct nlmsghdr *nlh;
1789 struct ndtmsg *ndtmsg;
1791 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1792 if (nlh == NULL)
1793 return -EMSGSIZE;
1795 ndtmsg = nlmsg_data(nlh);
1797 read_lock_bh(&tbl->lock);
1798 ndtmsg->ndtm_family = tbl->family;
1799 ndtmsg->ndtm_pad1 = 0;
1800 ndtmsg->ndtm_pad2 = 0;
1802 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1803 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1804 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1805 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1806 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1807 goto nla_put_failure;
1809 unsigned long now = jiffies;
1810 unsigned int flush_delta = now - tbl->last_flush;
1811 unsigned int rand_delta = now - tbl->last_rand;
1812 struct neigh_hash_table *nht;
1813 struct ndt_config ndc = {
1814 .ndtc_key_len = tbl->key_len,
1815 .ndtc_entry_size = tbl->entry_size,
1816 .ndtc_entries = atomic_read(&tbl->entries),
1817 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1818 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1819 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1822 rcu_read_lock_bh();
1823 nht = rcu_dereference_bh(tbl->nht);
1824 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1825 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1826 rcu_read_unlock_bh();
1828 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1829 goto nla_put_failure;
1833 int cpu;
1834 struct ndt_stats ndst;
1836 memset(&ndst, 0, sizeof(ndst));
1838 for_each_possible_cpu(cpu) {
1839 struct neigh_statistics *st;
1841 st = per_cpu_ptr(tbl->stats, cpu);
1842 ndst.ndts_allocs += st->allocs;
1843 ndst.ndts_destroys += st->destroys;
1844 ndst.ndts_hash_grows += st->hash_grows;
1845 ndst.ndts_res_failed += st->res_failed;
1846 ndst.ndts_lookups += st->lookups;
1847 ndst.ndts_hits += st->hits;
1848 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1849 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1850 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1851 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1854 if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1855 goto nla_put_failure;
1858 BUG_ON(tbl->parms.dev);
1859 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1860 goto nla_put_failure;
1862 read_unlock_bh(&tbl->lock);
1863 nlmsg_end(skb, nlh);
1864 return 0;
1866 nla_put_failure:
1867 read_unlock_bh(&tbl->lock);
1868 nlmsg_cancel(skb, nlh);
1869 return -EMSGSIZE;
1872 static int neightbl_fill_param_info(struct sk_buff *skb,
1873 struct neigh_table *tbl,
1874 struct neigh_parms *parms,
1875 u32 pid, u32 seq, int type,
1876 unsigned int flags)
1878 struct ndtmsg *ndtmsg;
1879 struct nlmsghdr *nlh;
1881 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1882 if (nlh == NULL)
1883 return -EMSGSIZE;
1885 ndtmsg = nlmsg_data(nlh);
1887 read_lock_bh(&tbl->lock);
1888 ndtmsg->ndtm_family = tbl->family;
1889 ndtmsg->ndtm_pad1 = 0;
1890 ndtmsg->ndtm_pad2 = 0;
1892 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1893 neightbl_fill_parms(skb, parms) < 0)
1894 goto errout;
1896 read_unlock_bh(&tbl->lock);
1897 nlmsg_end(skb, nlh);
1898 return 0;
1899 errout:
1900 read_unlock_bh(&tbl->lock);
1901 nlmsg_cancel(skb, nlh);
1902 return -EMSGSIZE;
1905 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1906 [NDTA_NAME] = { .type = NLA_STRING },
1907 [NDTA_THRESH1] = { .type = NLA_U32 },
1908 [NDTA_THRESH2] = { .type = NLA_U32 },
1909 [NDTA_THRESH3] = { .type = NLA_U32 },
1910 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1911 [NDTA_PARMS] = { .type = NLA_NESTED },
1914 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1915 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1916 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1917 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1918 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1919 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1920 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1921 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
1922 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1923 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1924 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1925 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1926 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1927 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1928 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1931 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1933 struct net *net = sock_net(skb->sk);
1934 struct neigh_table *tbl;
1935 struct ndtmsg *ndtmsg;
1936 struct nlattr *tb[NDTA_MAX+1];
1937 bool found = false;
1938 int err, tidx;
1940 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1941 nl_neightbl_policy);
1942 if (err < 0)
1943 goto errout;
1945 if (tb[NDTA_NAME] == NULL) {
1946 err = -EINVAL;
1947 goto errout;
1950 ndtmsg = nlmsg_data(nlh);
1952 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1953 tbl = neigh_tables[tidx];
1954 if (!tbl)
1955 continue;
1956 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1957 continue;
1958 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1959 found = true;
1960 break;
1964 if (!found)
1965 return -ENOENT;
1968 * We acquire tbl->lock to be nice to the periodic timers and
1969 * make sure they always see a consistent set of values.
1971 write_lock_bh(&tbl->lock);
1973 if (tb[NDTA_PARMS]) {
1974 struct nlattr *tbp[NDTPA_MAX+1];
1975 struct neigh_parms *p;
1976 int i, ifindex = 0;
1978 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1979 nl_ntbl_parm_policy);
1980 if (err < 0)
1981 goto errout_tbl_lock;
1983 if (tbp[NDTPA_IFINDEX])
1984 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1986 p = lookup_neigh_parms(tbl, net, ifindex);
1987 if (p == NULL) {
1988 err = -ENOENT;
1989 goto errout_tbl_lock;
1992 for (i = 1; i <= NDTPA_MAX; i++) {
1993 if (tbp[i] == NULL)
1994 continue;
1996 switch (i) {
1997 case NDTPA_QUEUE_LEN:
1998 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
1999 nla_get_u32(tbp[i]) *
2000 SKB_TRUESIZE(ETH_FRAME_LEN));
2001 break;
2002 case NDTPA_QUEUE_LENBYTES:
2003 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2004 nla_get_u32(tbp[i]));
2005 break;
2006 case NDTPA_PROXY_QLEN:
2007 NEIGH_VAR_SET(p, PROXY_QLEN,
2008 nla_get_u32(tbp[i]));
2009 break;
2010 case NDTPA_APP_PROBES:
2011 NEIGH_VAR_SET(p, APP_PROBES,
2012 nla_get_u32(tbp[i]));
2013 break;
2014 case NDTPA_UCAST_PROBES:
2015 NEIGH_VAR_SET(p, UCAST_PROBES,
2016 nla_get_u32(tbp[i]));
2017 break;
2018 case NDTPA_MCAST_PROBES:
2019 NEIGH_VAR_SET(p, MCAST_PROBES,
2020 nla_get_u32(tbp[i]));
2021 break;
2022 case NDTPA_MCAST_REPROBES:
2023 NEIGH_VAR_SET(p, MCAST_REPROBES,
2024 nla_get_u32(tbp[i]));
2025 break;
2026 case NDTPA_BASE_REACHABLE_TIME:
2027 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2028 nla_get_msecs(tbp[i]));
2029 /* update reachable_time as well, otherwise, the change will
2030 * only be effective after the next time neigh_periodic_work
2031 * decides to recompute it (can be multiple minutes)
2033 p->reachable_time =
2034 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2035 break;
2036 case NDTPA_GC_STALETIME:
2037 NEIGH_VAR_SET(p, GC_STALETIME,
2038 nla_get_msecs(tbp[i]));
2039 break;
2040 case NDTPA_DELAY_PROBE_TIME:
2041 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2042 nla_get_msecs(tbp[i]));
2043 break;
2044 case NDTPA_RETRANS_TIME:
2045 NEIGH_VAR_SET(p, RETRANS_TIME,
2046 nla_get_msecs(tbp[i]));
2047 break;
2048 case NDTPA_ANYCAST_DELAY:
2049 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2050 nla_get_msecs(tbp[i]));
2051 break;
2052 case NDTPA_PROXY_DELAY:
2053 NEIGH_VAR_SET(p, PROXY_DELAY,
2054 nla_get_msecs(tbp[i]));
2055 break;
2056 case NDTPA_LOCKTIME:
2057 NEIGH_VAR_SET(p, LOCKTIME,
2058 nla_get_msecs(tbp[i]));
2059 break;
2064 err = -ENOENT;
2065 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2066 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2067 !net_eq(net, &init_net))
2068 goto errout_tbl_lock;
2070 if (tb[NDTA_THRESH1])
2071 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2073 if (tb[NDTA_THRESH2])
2074 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2076 if (tb[NDTA_THRESH3])
2077 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2079 if (tb[NDTA_GC_INTERVAL])
2080 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2082 err = 0;
2084 errout_tbl_lock:
2085 write_unlock_bh(&tbl->lock);
2086 errout:
2087 return err;
2090 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2092 struct net *net = sock_net(skb->sk);
2093 int family, tidx, nidx = 0;
2094 int tbl_skip = cb->args[0];
2095 int neigh_skip = cb->args[1];
2096 struct neigh_table *tbl;
2098 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2100 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2101 struct neigh_parms *p;
2103 tbl = neigh_tables[tidx];
2104 if (!tbl)
2105 continue;
2107 if (tidx < tbl_skip || (family && tbl->family != family))
2108 continue;
2110 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2111 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2112 NLM_F_MULTI) < 0)
2113 break;
2115 nidx = 0;
2116 p = list_next_entry(&tbl->parms, list);
2117 list_for_each_entry_from(p, &tbl->parms_list, list) {
2118 if (!net_eq(neigh_parms_net(p), net))
2119 continue;
2121 if (nidx < neigh_skip)
2122 goto next;
2124 if (neightbl_fill_param_info(skb, tbl, p,
2125 NETLINK_CB(cb->skb).portid,
2126 cb->nlh->nlmsg_seq,
2127 RTM_NEWNEIGHTBL,
2128 NLM_F_MULTI) < 0)
2129 goto out;
2130 next:
2131 nidx++;
2134 neigh_skip = 0;
2136 out:
2137 cb->args[0] = tidx;
2138 cb->args[1] = nidx;
2140 return skb->len;
2143 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2144 u32 pid, u32 seq, int type, unsigned int flags)
2146 unsigned long now = jiffies;
2147 struct nda_cacheinfo ci;
2148 struct nlmsghdr *nlh;
2149 struct ndmsg *ndm;
2151 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2152 if (nlh == NULL)
2153 return -EMSGSIZE;
2155 ndm = nlmsg_data(nlh);
2156 ndm->ndm_family = neigh->ops->family;
2157 ndm->ndm_pad1 = 0;
2158 ndm->ndm_pad2 = 0;
2159 ndm->ndm_flags = neigh->flags;
2160 ndm->ndm_type = neigh->type;
2161 ndm->ndm_ifindex = neigh->dev->ifindex;
2163 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2164 goto nla_put_failure;
2166 read_lock_bh(&neigh->lock);
2167 ndm->ndm_state = neigh->nud_state;
2168 if (neigh->nud_state & NUD_VALID) {
2169 char haddr[MAX_ADDR_LEN];
2171 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2172 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2173 read_unlock_bh(&neigh->lock);
2174 goto nla_put_failure;
2178 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2179 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2180 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2181 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2182 read_unlock_bh(&neigh->lock);
2184 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2185 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2186 goto nla_put_failure;
2188 nlmsg_end(skb, nlh);
2189 return 0;
2191 nla_put_failure:
2192 nlmsg_cancel(skb, nlh);
2193 return -EMSGSIZE;
2196 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2197 u32 pid, u32 seq, int type, unsigned int flags,
2198 struct neigh_table *tbl)
2200 struct nlmsghdr *nlh;
2201 struct ndmsg *ndm;
2203 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2204 if (nlh == NULL)
2205 return -EMSGSIZE;
2207 ndm = nlmsg_data(nlh);
2208 ndm->ndm_family = tbl->family;
2209 ndm->ndm_pad1 = 0;
2210 ndm->ndm_pad2 = 0;
2211 ndm->ndm_flags = pn->flags | NTF_PROXY;
2212 ndm->ndm_type = RTN_UNICAST;
2213 ndm->ndm_ifindex = pn->dev->ifindex;
2214 ndm->ndm_state = NUD_NONE;
2216 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2217 goto nla_put_failure;
2219 nlmsg_end(skb, nlh);
2220 return 0;
2222 nla_put_failure:
2223 nlmsg_cancel(skb, nlh);
2224 return -EMSGSIZE;
2227 static void neigh_update_notify(struct neighbour *neigh)
2229 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2230 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2233 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2234 struct netlink_callback *cb)
2236 struct net *net = sock_net(skb->sk);
2237 struct neighbour *n;
2238 int rc, h, s_h = cb->args[1];
2239 int idx, s_idx = idx = cb->args[2];
2240 struct neigh_hash_table *nht;
2242 rcu_read_lock_bh();
2243 nht = rcu_dereference_bh(tbl->nht);
2245 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2246 if (h > s_h)
2247 s_idx = 0;
2248 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2249 n != NULL;
2250 n = rcu_dereference_bh(n->next)) {
2251 if (!net_eq(dev_net(n->dev), net))
2252 continue;
2253 if (idx < s_idx)
2254 goto next;
2255 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2256 cb->nlh->nlmsg_seq,
2257 RTM_NEWNEIGH,
2258 NLM_F_MULTI) < 0) {
2259 rc = -1;
2260 goto out;
2262 next:
2263 idx++;
2266 rc = skb->len;
2267 out:
2268 rcu_read_unlock_bh();
2269 cb->args[1] = h;
2270 cb->args[2] = idx;
2271 return rc;
2274 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2275 struct netlink_callback *cb)
2277 struct pneigh_entry *n;
2278 struct net *net = sock_net(skb->sk);
2279 int rc, h, s_h = cb->args[3];
2280 int idx, s_idx = idx = cb->args[4];
2282 read_lock_bh(&tbl->lock);
2284 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2285 if (h > s_h)
2286 s_idx = 0;
2287 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2288 if (dev_net(n->dev) != net)
2289 continue;
2290 if (idx < s_idx)
2291 goto next;
2292 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2293 cb->nlh->nlmsg_seq,
2294 RTM_NEWNEIGH,
2295 NLM_F_MULTI, tbl) < 0) {
2296 read_unlock_bh(&tbl->lock);
2297 rc = -1;
2298 goto out;
2300 next:
2301 idx++;
2305 read_unlock_bh(&tbl->lock);
2306 rc = skb->len;
2307 out:
2308 cb->args[3] = h;
2309 cb->args[4] = idx;
2310 return rc;
2314 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2316 struct neigh_table *tbl;
2317 int t, family, s_t;
2318 int proxy = 0;
2319 int err;
2321 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2323 /* check for full ndmsg structure presence, family member is
2324 * the same for both structures
2326 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2327 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2328 proxy = 1;
2330 s_t = cb->args[0];
2332 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2333 tbl = neigh_tables[t];
2335 if (!tbl)
2336 continue;
2337 if (t < s_t || (family && tbl->family != family))
2338 continue;
2339 if (t > s_t)
2340 memset(&cb->args[1], 0, sizeof(cb->args) -
2341 sizeof(cb->args[0]));
2342 if (proxy)
2343 err = pneigh_dump_table(tbl, skb, cb);
2344 else
2345 err = neigh_dump_table(tbl, skb, cb);
2346 if (err < 0)
2347 break;
2350 cb->args[0] = t;
2351 return skb->len;
2354 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2356 int chain;
2357 struct neigh_hash_table *nht;
2359 rcu_read_lock_bh();
2360 nht = rcu_dereference_bh(tbl->nht);
2362 read_lock(&tbl->lock); /* avoid resizes */
2363 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2364 struct neighbour *n;
2366 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2367 n != NULL;
2368 n = rcu_dereference_bh(n->next))
2369 cb(n, cookie);
2371 read_unlock(&tbl->lock);
2372 rcu_read_unlock_bh();
2374 EXPORT_SYMBOL(neigh_for_each);
2376 /* The tbl->lock must be held as a writer and BH disabled. */
2377 void __neigh_for_each_release(struct neigh_table *tbl,
2378 int (*cb)(struct neighbour *))
2380 int chain;
2381 struct neigh_hash_table *nht;
2383 nht = rcu_dereference_protected(tbl->nht,
2384 lockdep_is_held(&tbl->lock));
2385 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2386 struct neighbour *n;
2387 struct neighbour __rcu **np;
2389 np = &nht->hash_buckets[chain];
2390 while ((n = rcu_dereference_protected(*np,
2391 lockdep_is_held(&tbl->lock))) != NULL) {
2392 int release;
2394 write_lock(&n->lock);
2395 release = cb(n);
2396 if (release) {
2397 rcu_assign_pointer(*np,
2398 rcu_dereference_protected(n->next,
2399 lockdep_is_held(&tbl->lock)));
2400 n->dead = 1;
2401 } else
2402 np = &n->next;
2403 write_unlock(&n->lock);
2404 if (release)
2405 neigh_cleanup_and_release(n);
2409 EXPORT_SYMBOL(__neigh_for_each_release);
2411 int neigh_xmit(int index, struct net_device *dev,
2412 const void *addr, struct sk_buff *skb)
2414 int err = -EAFNOSUPPORT;
2415 if (likely(index < NEIGH_NR_TABLES)) {
2416 struct neigh_table *tbl;
2417 struct neighbour *neigh;
2419 tbl = neigh_tables[index];
2420 if (!tbl)
2421 goto out;
2422 neigh = __neigh_lookup_noref(tbl, addr, dev);
2423 if (!neigh)
2424 neigh = __neigh_create(tbl, addr, dev, false);
2425 err = PTR_ERR(neigh);
2426 if (IS_ERR(neigh))
2427 goto out_kfree_skb;
2428 err = neigh->output(neigh, skb);
2430 else if (index == NEIGH_LINK_TABLE) {
2431 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2432 addr, NULL, skb->len);
2433 if (err < 0)
2434 goto out_kfree_skb;
2435 err = dev_queue_xmit(skb);
2437 out:
2438 return err;
2439 out_kfree_skb:
2440 kfree_skb(skb);
2441 goto out;
2443 EXPORT_SYMBOL(neigh_xmit);
2445 #ifdef CONFIG_PROC_FS
2447 static struct neighbour *neigh_get_first(struct seq_file *seq)
2449 struct neigh_seq_state *state = seq->private;
2450 struct net *net = seq_file_net(seq);
2451 struct neigh_hash_table *nht = state->nht;
2452 struct neighbour *n = NULL;
2453 int bucket = state->bucket;
2455 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2456 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2457 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2459 while (n) {
2460 if (!net_eq(dev_net(n->dev), net))
2461 goto next;
2462 if (state->neigh_sub_iter) {
2463 loff_t fakep = 0;
2464 void *v;
2466 v = state->neigh_sub_iter(state, n, &fakep);
2467 if (!v)
2468 goto next;
2470 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2471 break;
2472 if (n->nud_state & ~NUD_NOARP)
2473 break;
2474 next:
2475 n = rcu_dereference_bh(n->next);
2478 if (n)
2479 break;
2481 state->bucket = bucket;
2483 return n;
2486 static struct neighbour *neigh_get_next(struct seq_file *seq,
2487 struct neighbour *n,
2488 loff_t *pos)
2490 struct neigh_seq_state *state = seq->private;
2491 struct net *net = seq_file_net(seq);
2492 struct neigh_hash_table *nht = state->nht;
2494 if (state->neigh_sub_iter) {
2495 void *v = state->neigh_sub_iter(state, n, pos);
2496 if (v)
2497 return n;
2499 n = rcu_dereference_bh(n->next);
2501 while (1) {
2502 while (n) {
2503 if (!net_eq(dev_net(n->dev), net))
2504 goto next;
2505 if (state->neigh_sub_iter) {
2506 void *v = state->neigh_sub_iter(state, n, pos);
2507 if (v)
2508 return n;
2509 goto next;
2511 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2512 break;
2514 if (n->nud_state & ~NUD_NOARP)
2515 break;
2516 next:
2517 n = rcu_dereference_bh(n->next);
2520 if (n)
2521 break;
2523 if (++state->bucket >= (1 << nht->hash_shift))
2524 break;
2526 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2529 if (n && pos)
2530 --(*pos);
2531 return n;
2534 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2536 struct neighbour *n = neigh_get_first(seq);
2538 if (n) {
2539 --(*pos);
2540 while (*pos) {
2541 n = neigh_get_next(seq, n, pos);
2542 if (!n)
2543 break;
2546 return *pos ? NULL : n;
2549 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2551 struct neigh_seq_state *state = seq->private;
2552 struct net *net = seq_file_net(seq);
2553 struct neigh_table *tbl = state->tbl;
2554 struct pneigh_entry *pn = NULL;
2555 int bucket = state->bucket;
2557 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2558 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2559 pn = tbl->phash_buckets[bucket];
2560 while (pn && !net_eq(pneigh_net(pn), net))
2561 pn = pn->next;
2562 if (pn)
2563 break;
2565 state->bucket = bucket;
2567 return pn;
2570 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2571 struct pneigh_entry *pn,
2572 loff_t *pos)
2574 struct neigh_seq_state *state = seq->private;
2575 struct net *net = seq_file_net(seq);
2576 struct neigh_table *tbl = state->tbl;
2578 do {
2579 pn = pn->next;
2580 } while (pn && !net_eq(pneigh_net(pn), net));
2582 while (!pn) {
2583 if (++state->bucket > PNEIGH_HASHMASK)
2584 break;
2585 pn = tbl->phash_buckets[state->bucket];
2586 while (pn && !net_eq(pneigh_net(pn), net))
2587 pn = pn->next;
2588 if (pn)
2589 break;
2592 if (pn && pos)
2593 --(*pos);
2595 return pn;
2598 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2600 struct pneigh_entry *pn = pneigh_get_first(seq);
2602 if (pn) {
2603 --(*pos);
2604 while (*pos) {
2605 pn = pneigh_get_next(seq, pn, pos);
2606 if (!pn)
2607 break;
2610 return *pos ? NULL : pn;
2613 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2615 struct neigh_seq_state *state = seq->private;
2616 void *rc;
2617 loff_t idxpos = *pos;
2619 rc = neigh_get_idx(seq, &idxpos);
2620 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2621 rc = pneigh_get_idx(seq, &idxpos);
2623 return rc;
2626 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2627 __acquires(rcu_bh)
2629 struct neigh_seq_state *state = seq->private;
2631 state->tbl = tbl;
2632 state->bucket = 0;
2633 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2635 rcu_read_lock_bh();
2636 state->nht = rcu_dereference_bh(tbl->nht);
2638 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2640 EXPORT_SYMBOL(neigh_seq_start);
2642 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2644 struct neigh_seq_state *state;
2645 void *rc;
2647 if (v == SEQ_START_TOKEN) {
2648 rc = neigh_get_first(seq);
2649 goto out;
2652 state = seq->private;
2653 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2654 rc = neigh_get_next(seq, v, NULL);
2655 if (rc)
2656 goto out;
2657 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2658 rc = pneigh_get_first(seq);
2659 } else {
2660 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2661 rc = pneigh_get_next(seq, v, NULL);
2663 out:
2664 ++(*pos);
2665 return rc;
2667 EXPORT_SYMBOL(neigh_seq_next);
2669 void neigh_seq_stop(struct seq_file *seq, void *v)
2670 __releases(rcu_bh)
2672 rcu_read_unlock_bh();
2674 EXPORT_SYMBOL(neigh_seq_stop);
2676 /* statistics via seq_file */
2678 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2680 struct neigh_table *tbl = seq->private;
2681 int cpu;
2683 if (*pos == 0)
2684 return SEQ_START_TOKEN;
2686 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2687 if (!cpu_possible(cpu))
2688 continue;
2689 *pos = cpu+1;
2690 return per_cpu_ptr(tbl->stats, cpu);
2692 return NULL;
2695 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2697 struct neigh_table *tbl = seq->private;
2698 int cpu;
2700 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2701 if (!cpu_possible(cpu))
2702 continue;
2703 *pos = cpu+1;
2704 return per_cpu_ptr(tbl->stats, cpu);
2706 return NULL;
2709 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2714 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2716 struct neigh_table *tbl = seq->private;
2717 struct neigh_statistics *st = v;
2719 if (v == SEQ_START_TOKEN) {
2720 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2721 return 0;
2724 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2725 "%08lx %08lx %08lx %08lx %08lx\n",
2726 atomic_read(&tbl->entries),
2728 st->allocs,
2729 st->destroys,
2730 st->hash_grows,
2732 st->lookups,
2733 st->hits,
2735 st->res_failed,
2737 st->rcv_probes_mcast,
2738 st->rcv_probes_ucast,
2740 st->periodic_gc_runs,
2741 st->forced_gc_runs,
2742 st->unres_discards
2745 return 0;
2748 static const struct seq_operations neigh_stat_seq_ops = {
2749 .start = neigh_stat_seq_start,
2750 .next = neigh_stat_seq_next,
2751 .stop = neigh_stat_seq_stop,
2752 .show = neigh_stat_seq_show,
2755 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2757 int ret = seq_open(file, &neigh_stat_seq_ops);
2759 if (!ret) {
2760 struct seq_file *sf = file->private_data;
2761 sf->private = PDE_DATA(inode);
2763 return ret;
2766 static const struct file_operations neigh_stat_seq_fops = {
2767 .owner = THIS_MODULE,
2768 .open = neigh_stat_seq_open,
2769 .read = seq_read,
2770 .llseek = seq_lseek,
2771 .release = seq_release,
2774 #endif /* CONFIG_PROC_FS */
2776 static inline size_t neigh_nlmsg_size(void)
2778 return NLMSG_ALIGN(sizeof(struct ndmsg))
2779 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2780 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2781 + nla_total_size(sizeof(struct nda_cacheinfo))
2782 + nla_total_size(4); /* NDA_PROBES */
2785 static void __neigh_notify(struct neighbour *n, int type, int flags)
2787 struct net *net = dev_net(n->dev);
2788 struct sk_buff *skb;
2789 int err = -ENOBUFS;
2791 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2792 if (skb == NULL)
2793 goto errout;
2795 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2796 if (err < 0) {
2797 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2798 WARN_ON(err == -EMSGSIZE);
2799 kfree_skb(skb);
2800 goto errout;
2802 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2803 return;
2804 errout:
2805 if (err < 0)
2806 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2809 void neigh_app_ns(struct neighbour *n)
2811 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2813 EXPORT_SYMBOL(neigh_app_ns);
2815 #ifdef CONFIG_SYSCTL
2816 static int zero;
2817 static int int_max = INT_MAX;
2818 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2820 static int proc_unres_qlen(struct ctl_table *ctl, int write,
2821 void __user *buffer, size_t *lenp, loff_t *ppos)
2823 int size, ret;
2824 struct ctl_table tmp = *ctl;
2826 tmp.extra1 = &zero;
2827 tmp.extra2 = &unres_qlen_max;
2828 tmp.data = &size;
2830 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2831 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2833 if (write && !ret)
2834 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2835 return ret;
2838 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2839 int family)
2841 switch (family) {
2842 case AF_INET:
2843 return __in_dev_arp_parms_get_rcu(dev);
2844 case AF_INET6:
2845 return __in6_dev_nd_parms_get_rcu(dev);
2847 return NULL;
2850 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2851 int index)
2853 struct net_device *dev;
2854 int family = neigh_parms_family(p);
2856 rcu_read_lock();
2857 for_each_netdev_rcu(net, dev) {
2858 struct neigh_parms *dst_p =
2859 neigh_get_dev_parms_rcu(dev, family);
2861 if (dst_p && !test_bit(index, dst_p->data_state))
2862 dst_p->data[index] = p->data[index];
2864 rcu_read_unlock();
2867 static void neigh_proc_update(struct ctl_table *ctl, int write)
2869 struct net_device *dev = ctl->extra1;
2870 struct neigh_parms *p = ctl->extra2;
2871 struct net *net = neigh_parms_net(p);
2872 int index = (int *) ctl->data - p->data;
2874 if (!write)
2875 return;
2877 set_bit(index, p->data_state);
2878 if (!dev) /* NULL dev means this is default value */
2879 neigh_copy_dflt_parms(net, p, index);
2882 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2883 void __user *buffer,
2884 size_t *lenp, loff_t *ppos)
2886 struct ctl_table tmp = *ctl;
2887 int ret;
2889 tmp.extra1 = &zero;
2890 tmp.extra2 = &int_max;
2892 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2893 neigh_proc_update(ctl, write);
2894 return ret;
2897 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2898 void __user *buffer, size_t *lenp, loff_t *ppos)
2900 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2902 neigh_proc_update(ctl, write);
2903 return ret;
2905 EXPORT_SYMBOL(neigh_proc_dointvec);
2907 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2908 void __user *buffer,
2909 size_t *lenp, loff_t *ppos)
2911 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2913 neigh_proc_update(ctl, write);
2914 return ret;
2916 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2918 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2919 void __user *buffer,
2920 size_t *lenp, loff_t *ppos)
2922 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2924 neigh_proc_update(ctl, write);
2925 return ret;
2928 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2929 void __user *buffer,
2930 size_t *lenp, loff_t *ppos)
2932 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2934 neigh_proc_update(ctl, write);
2935 return ret;
2937 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2939 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2940 void __user *buffer,
2941 size_t *lenp, loff_t *ppos)
2943 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2945 neigh_proc_update(ctl, write);
2946 return ret;
2949 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
2950 void __user *buffer,
2951 size_t *lenp, loff_t *ppos)
2953 struct neigh_parms *p = ctl->extra2;
2954 int ret;
2956 if (strcmp(ctl->procname, "base_reachable_time") == 0)
2957 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2958 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
2959 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2960 else
2961 ret = -1;
2963 if (write && ret == 0) {
2964 /* update reachable_time as well, otherwise, the change will
2965 * only be effective after the next time neigh_periodic_work
2966 * decides to recompute it
2968 p->reachable_time =
2969 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2971 return ret;
2974 #define NEIGH_PARMS_DATA_OFFSET(index) \
2975 (&((struct neigh_parms *) 0)->data[index])
2977 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
2978 [NEIGH_VAR_ ## attr] = { \
2979 .procname = name, \
2980 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
2981 .maxlen = sizeof(int), \
2982 .mode = mval, \
2983 .proc_handler = proc, \
2986 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
2987 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
2989 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
2990 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
2992 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
2993 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
2995 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
2996 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
2998 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
2999 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3001 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3002 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3004 static struct neigh_sysctl_table {
3005 struct ctl_table_header *sysctl_header;
3006 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3007 } neigh_sysctl_template __read_mostly = {
3008 .neigh_vars = {
3009 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3010 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3011 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3012 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3013 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3014 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3015 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3016 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3017 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3018 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3019 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3020 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3021 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3022 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3023 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3024 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3025 [NEIGH_VAR_GC_INTERVAL] = {
3026 .procname = "gc_interval",
3027 .maxlen = sizeof(int),
3028 .mode = 0644,
3029 .proc_handler = proc_dointvec_jiffies,
3031 [NEIGH_VAR_GC_THRESH1] = {
3032 .procname = "gc_thresh1",
3033 .maxlen = sizeof(int),
3034 .mode = 0644,
3035 .extra1 = &zero,
3036 .extra2 = &int_max,
3037 .proc_handler = proc_dointvec_minmax,
3039 [NEIGH_VAR_GC_THRESH2] = {
3040 .procname = "gc_thresh2",
3041 .maxlen = sizeof(int),
3042 .mode = 0644,
3043 .extra1 = &zero,
3044 .extra2 = &int_max,
3045 .proc_handler = proc_dointvec_minmax,
3047 [NEIGH_VAR_GC_THRESH3] = {
3048 .procname = "gc_thresh3",
3049 .maxlen = sizeof(int),
3050 .mode = 0644,
3051 .extra1 = &zero,
3052 .extra2 = &int_max,
3053 .proc_handler = proc_dointvec_minmax,
3059 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3060 proc_handler *handler)
3062 int i;
3063 struct neigh_sysctl_table *t;
3064 const char *dev_name_source;
3065 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3066 char *p_name;
3068 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3069 if (!t)
3070 goto err;
3072 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3073 t->neigh_vars[i].data += (long) p;
3074 t->neigh_vars[i].extra1 = dev;
3075 t->neigh_vars[i].extra2 = p;
3078 if (dev) {
3079 dev_name_source = dev->name;
3080 /* Terminate the table early */
3081 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3082 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3083 } else {
3084 struct neigh_table *tbl = p->tbl;
3085 dev_name_source = "default";
3086 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3087 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3088 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3089 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3092 if (handler) {
3093 /* RetransTime */
3094 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3095 /* ReachableTime */
3096 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3097 /* RetransTime (in milliseconds)*/
3098 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3099 /* ReachableTime (in milliseconds) */
3100 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3101 } else {
3102 /* Those handlers will update p->reachable_time after
3103 * base_reachable_time(_ms) is set to ensure the new timer starts being
3104 * applied after the next neighbour update instead of waiting for
3105 * neigh_periodic_work to update its value (can be multiple minutes)
3106 * So any handler that replaces them should do this as well
3108 /* ReachableTime */
3109 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3110 neigh_proc_base_reachable_time;
3111 /* ReachableTime (in milliseconds) */
3112 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3113 neigh_proc_base_reachable_time;
3116 /* Don't export sysctls to unprivileged users */
3117 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3118 t->neigh_vars[0].procname = NULL;
3120 switch (neigh_parms_family(p)) {
3121 case AF_INET:
3122 p_name = "ipv4";
3123 break;
3124 case AF_INET6:
3125 p_name = "ipv6";
3126 break;
3127 default:
3128 BUG();
3131 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3132 p_name, dev_name_source);
3133 t->sysctl_header =
3134 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3135 if (!t->sysctl_header)
3136 goto free;
3138 p->sysctl_table = t;
3139 return 0;
3141 free:
3142 kfree(t);
3143 err:
3144 return -ENOBUFS;
3146 EXPORT_SYMBOL(neigh_sysctl_register);
3148 void neigh_sysctl_unregister(struct neigh_parms *p)
3150 if (p->sysctl_table) {
3151 struct neigh_sysctl_table *t = p->sysctl_table;
3152 p->sysctl_table = NULL;
3153 unregister_net_sysctl_table(t->sysctl_header);
3154 kfree(t);
3157 EXPORT_SYMBOL(neigh_sysctl_unregister);
3159 #endif /* CONFIG_SYSCTL */
3161 static int __init neigh_init(void)
3163 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3164 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3165 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3167 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3168 NULL);
3169 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3171 return 0;
3174 subsys_initcall(neigh_init);