Merge branch 'x86/rdrand' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[wandboard.git] / net / core / neighbour.c
blobe287346e09343f1f315866fc7e77fb3770d927b0
1 /*
2 * Generic address resolution entity
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/netdevice.h>
24 #include <linux/proc_fs.h>
25 #ifdef CONFIG_SYSCTL
26 #include <linux/sysctl.h>
27 #endif
28 #include <linux/times.h>
29 #include <net/net_namespace.h>
30 #include <net/neighbour.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
40 #define NEIGH_DEBUG 1
42 #define NEIGH_PRINTK(x...) printk(x)
43 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
56 #define PNEIGH_HASHMASK 0xF
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static const struct file_operations neigh_stat_seq_fops;
66 #endif
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static DEFINE_RWLOCK(neigh_tbl_lock);
101 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
103 kfree_skb(skb);
104 return -ENETDOWN;
107 static void neigh_cleanup_and_release(struct neighbour *neigh)
109 if (neigh->parms->neigh_cleanup)
110 neigh->parms->neigh_cleanup(neigh);
112 __neigh_notify(neigh, RTM_DELNEIGH, 0);
113 neigh_release(neigh);
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
122 unsigned long neigh_rand_reach_time(unsigned long base)
124 return base ? (net_random() % base) + (base >> 1) : 0;
126 EXPORT_SYMBOL(neigh_rand_reach_time);
129 static int neigh_forced_gc(struct neigh_table *tbl)
131 int shrunk = 0;
132 int i;
133 struct neigh_hash_table *nht;
135 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
137 write_lock_bh(&tbl->lock);
138 nht = rcu_dereference_protected(tbl->nht,
139 lockdep_is_held(&tbl->lock));
140 for (i = 0; i < (1 << nht->hash_shift); i++) {
141 struct neighbour *n;
142 struct neighbour __rcu **np;
144 np = &nht->hash_buckets[i];
145 while ((n = rcu_dereference_protected(*np,
146 lockdep_is_held(&tbl->lock))) != NULL) {
147 /* Neighbour record may be discarded if:
148 * - nobody refers to it.
149 * - it is not permanent
151 write_lock(&n->lock);
152 if (atomic_read(&n->refcnt) == 1 &&
153 !(n->nud_state & NUD_PERMANENT)) {
154 rcu_assign_pointer(*np,
155 rcu_dereference_protected(n->next,
156 lockdep_is_held(&tbl->lock)));
157 n->dead = 1;
158 shrunk = 1;
159 write_unlock(&n->lock);
160 neigh_cleanup_and_release(n);
161 continue;
163 write_unlock(&n->lock);
164 np = &n->next;
168 tbl->last_flush = jiffies;
170 write_unlock_bh(&tbl->lock);
172 return shrunk;
175 static void neigh_add_timer(struct neighbour *n, unsigned long when)
177 neigh_hold(n);
178 if (unlikely(mod_timer(&n->timer, when))) {
179 printk("NEIGH: BUG, double timer add, state is %x\n",
180 n->nud_state);
181 dump_stack();
185 static int neigh_del_timer(struct neighbour *n)
187 if ((n->nud_state & NUD_IN_TIMER) &&
188 del_timer(&n->timer)) {
189 neigh_release(n);
190 return 1;
192 return 0;
195 static void pneigh_queue_purge(struct sk_buff_head *list)
197 struct sk_buff *skb;
199 while ((skb = skb_dequeue(list)) != NULL) {
200 dev_put(skb->dev);
201 kfree_skb(skb);
205 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
207 int i;
208 struct neigh_hash_table *nht;
210 nht = rcu_dereference_protected(tbl->nht,
211 lockdep_is_held(&tbl->lock));
213 for (i = 0; i < (1 << nht->hash_shift); i++) {
214 struct neighbour *n;
215 struct neighbour __rcu **np = &nht->hash_buckets[i];
217 while ((n = rcu_dereference_protected(*np,
218 lockdep_is_held(&tbl->lock))) != NULL) {
219 if (dev && n->dev != dev) {
220 np = &n->next;
221 continue;
223 rcu_assign_pointer(*np,
224 rcu_dereference_protected(n->next,
225 lockdep_is_held(&tbl->lock)));
226 write_lock(&n->lock);
227 neigh_del_timer(n);
228 n->dead = 1;
230 if (atomic_read(&n->refcnt) != 1) {
231 /* The most unpleasant situation.
232 We must destroy neighbour entry,
233 but someone still uses it.
235 The destroy will be delayed until
236 the last user releases us, but
237 we must kill timers etc. and move
238 it to safe state.
240 skb_queue_purge(&n->arp_queue);
241 n->arp_queue_len_bytes = 0;
242 n->output = neigh_blackhole;
243 if (n->nud_state & NUD_VALID)
244 n->nud_state = NUD_NOARP;
245 else
246 n->nud_state = NUD_NONE;
247 NEIGH_PRINTK2("neigh %p is stray.\n", n);
249 write_unlock(&n->lock);
250 neigh_cleanup_and_release(n);
255 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
257 write_lock_bh(&tbl->lock);
258 neigh_flush_dev(tbl, dev);
259 write_unlock_bh(&tbl->lock);
261 EXPORT_SYMBOL(neigh_changeaddr);
263 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
265 write_lock_bh(&tbl->lock);
266 neigh_flush_dev(tbl, dev);
267 pneigh_ifdown(tbl, dev);
268 write_unlock_bh(&tbl->lock);
270 del_timer_sync(&tbl->proxy_timer);
271 pneigh_queue_purge(&tbl->proxy_queue);
272 return 0;
274 EXPORT_SYMBOL(neigh_ifdown);
276 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
278 struct neighbour *n = NULL;
279 unsigned long now = jiffies;
280 int entries;
282 entries = atomic_inc_return(&tbl->entries) - 1;
283 if (entries >= tbl->gc_thresh3 ||
284 (entries >= tbl->gc_thresh2 &&
285 time_after(now, tbl->last_flush + 5 * HZ))) {
286 if (!neigh_forced_gc(tbl) &&
287 entries >= tbl->gc_thresh3)
288 goto out_entries;
291 if (tbl->entry_size)
292 n = kzalloc(tbl->entry_size, GFP_ATOMIC);
293 else {
294 int sz = sizeof(*n) + tbl->key_len;
296 sz = ALIGN(sz, NEIGH_PRIV_ALIGN);
297 sz += dev->neigh_priv_len;
298 n = kzalloc(sz, GFP_ATOMIC);
300 if (!n)
301 goto out_entries;
303 skb_queue_head_init(&n->arp_queue);
304 rwlock_init(&n->lock);
305 seqlock_init(&n->ha_lock);
306 n->updated = n->used = now;
307 n->nud_state = NUD_NONE;
308 n->output = neigh_blackhole;
309 seqlock_init(&n->hh.hh_lock);
310 n->parms = neigh_parms_clone(&tbl->parms);
311 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
313 NEIGH_CACHE_STAT_INC(tbl, allocs);
314 n->tbl = tbl;
315 atomic_set(&n->refcnt, 1);
316 n->dead = 1;
317 out:
318 return n;
320 out_entries:
321 atomic_dec(&tbl->entries);
322 goto out;
325 static void neigh_get_hash_rnd(u32 *x)
327 get_random_bytes(x, sizeof(*x));
328 *x |= 1;
331 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
333 size_t size = (1 << shift) * sizeof(struct neighbour *);
334 struct neigh_hash_table *ret;
335 struct neighbour __rcu **buckets;
336 int i;
338 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
339 if (!ret)
340 return NULL;
341 if (size <= PAGE_SIZE)
342 buckets = kzalloc(size, GFP_ATOMIC);
343 else
344 buckets = (struct neighbour __rcu **)
345 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
346 get_order(size));
347 if (!buckets) {
348 kfree(ret);
349 return NULL;
351 ret->hash_buckets = buckets;
352 ret->hash_shift = shift;
353 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
354 neigh_get_hash_rnd(&ret->hash_rnd[i]);
355 return ret;
358 static void neigh_hash_free_rcu(struct rcu_head *head)
360 struct neigh_hash_table *nht = container_of(head,
361 struct neigh_hash_table,
362 rcu);
363 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
364 struct neighbour __rcu **buckets = nht->hash_buckets;
366 if (size <= PAGE_SIZE)
367 kfree(buckets);
368 else
369 free_pages((unsigned long)buckets, get_order(size));
370 kfree(nht);
373 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
374 unsigned long new_shift)
376 unsigned int i, hash;
377 struct neigh_hash_table *new_nht, *old_nht;
379 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
381 old_nht = rcu_dereference_protected(tbl->nht,
382 lockdep_is_held(&tbl->lock));
383 new_nht = neigh_hash_alloc(new_shift);
384 if (!new_nht)
385 return old_nht;
387 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
388 struct neighbour *n, *next;
390 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
391 lockdep_is_held(&tbl->lock));
392 n != NULL;
393 n = next) {
394 hash = tbl->hash(n->primary_key, n->dev,
395 new_nht->hash_rnd);
397 hash >>= (32 - new_nht->hash_shift);
398 next = rcu_dereference_protected(n->next,
399 lockdep_is_held(&tbl->lock));
401 rcu_assign_pointer(n->next,
402 rcu_dereference_protected(
403 new_nht->hash_buckets[hash],
404 lockdep_is_held(&tbl->lock)));
405 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
409 rcu_assign_pointer(tbl->nht, new_nht);
410 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
411 return new_nht;
414 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
415 struct net_device *dev)
417 struct neighbour *n;
418 int key_len = tbl->key_len;
419 u32 hash_val;
420 struct neigh_hash_table *nht;
422 NEIGH_CACHE_STAT_INC(tbl, lookups);
424 rcu_read_lock_bh();
425 nht = rcu_dereference_bh(tbl->nht);
426 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
428 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
429 n != NULL;
430 n = rcu_dereference_bh(n->next)) {
431 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
432 if (!atomic_inc_not_zero(&n->refcnt))
433 n = NULL;
434 NEIGH_CACHE_STAT_INC(tbl, hits);
435 break;
439 rcu_read_unlock_bh();
440 return n;
442 EXPORT_SYMBOL(neigh_lookup);
444 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
445 const void *pkey)
447 struct neighbour *n;
448 int key_len = tbl->key_len;
449 u32 hash_val;
450 struct neigh_hash_table *nht;
452 NEIGH_CACHE_STAT_INC(tbl, lookups);
454 rcu_read_lock_bh();
455 nht = rcu_dereference_bh(tbl->nht);
456 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
458 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
459 n != NULL;
460 n = rcu_dereference_bh(n->next)) {
461 if (!memcmp(n->primary_key, pkey, key_len) &&
462 net_eq(dev_net(n->dev), net)) {
463 if (!atomic_inc_not_zero(&n->refcnt))
464 n = NULL;
465 NEIGH_CACHE_STAT_INC(tbl, hits);
466 break;
470 rcu_read_unlock_bh();
471 return n;
473 EXPORT_SYMBOL(neigh_lookup_nodev);
475 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
476 struct net_device *dev)
478 u32 hash_val;
479 int key_len = tbl->key_len;
480 int error;
481 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
482 struct neigh_hash_table *nht;
484 if (!n) {
485 rc = ERR_PTR(-ENOBUFS);
486 goto out;
489 memcpy(n->primary_key, pkey, key_len);
490 n->dev = dev;
491 dev_hold(dev);
493 /* Protocol specific setup. */
494 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
495 rc = ERR_PTR(error);
496 goto out_neigh_release;
499 if (dev->netdev_ops->ndo_neigh_construct) {
500 error = dev->netdev_ops->ndo_neigh_construct(n);
501 if (error < 0) {
502 rc = ERR_PTR(error);
503 goto out_neigh_release;
507 /* Device specific setup. */
508 if (n->parms->neigh_setup &&
509 (error = n->parms->neigh_setup(n)) < 0) {
510 rc = ERR_PTR(error);
511 goto out_neigh_release;
514 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
516 write_lock_bh(&tbl->lock);
517 nht = rcu_dereference_protected(tbl->nht,
518 lockdep_is_held(&tbl->lock));
520 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
521 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
523 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
525 if (n->parms->dead) {
526 rc = ERR_PTR(-EINVAL);
527 goto out_tbl_unlock;
530 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
531 lockdep_is_held(&tbl->lock));
532 n1 != NULL;
533 n1 = rcu_dereference_protected(n1->next,
534 lockdep_is_held(&tbl->lock))) {
535 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
536 neigh_hold(n1);
537 rc = n1;
538 goto out_tbl_unlock;
542 n->dead = 0;
543 neigh_hold(n);
544 rcu_assign_pointer(n->next,
545 rcu_dereference_protected(nht->hash_buckets[hash_val],
546 lockdep_is_held(&tbl->lock)));
547 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
548 write_unlock_bh(&tbl->lock);
549 NEIGH_PRINTK2("neigh %p is created.\n", n);
550 rc = n;
551 out:
552 return rc;
553 out_tbl_unlock:
554 write_unlock_bh(&tbl->lock);
555 out_neigh_release:
556 neigh_release(n);
557 goto out;
559 EXPORT_SYMBOL(neigh_create);
561 static u32 pneigh_hash(const void *pkey, int key_len)
563 u32 hash_val = *(u32 *)(pkey + key_len - 4);
564 hash_val ^= (hash_val >> 16);
565 hash_val ^= hash_val >> 8;
566 hash_val ^= hash_val >> 4;
567 hash_val &= PNEIGH_HASHMASK;
568 return hash_val;
571 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
572 struct net *net,
573 const void *pkey,
574 int key_len,
575 struct net_device *dev)
577 while (n) {
578 if (!memcmp(n->key, pkey, key_len) &&
579 net_eq(pneigh_net(n), net) &&
580 (n->dev == dev || !n->dev))
581 return n;
582 n = n->next;
584 return NULL;
587 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
588 struct net *net, const void *pkey, struct net_device *dev)
590 int key_len = tbl->key_len;
591 u32 hash_val = pneigh_hash(pkey, key_len);
593 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
594 net, pkey, key_len, dev);
596 EXPORT_SYMBOL_GPL(__pneigh_lookup);
598 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
599 struct net *net, const void *pkey,
600 struct net_device *dev, int creat)
602 struct pneigh_entry *n;
603 int key_len = tbl->key_len;
604 u32 hash_val = pneigh_hash(pkey, key_len);
606 read_lock_bh(&tbl->lock);
607 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
608 net, pkey, key_len, dev);
609 read_unlock_bh(&tbl->lock);
611 if (n || !creat)
612 goto out;
614 ASSERT_RTNL();
616 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
617 if (!n)
618 goto out;
620 write_pnet(&n->net, hold_net(net));
621 memcpy(n->key, pkey, key_len);
622 n->dev = dev;
623 if (dev)
624 dev_hold(dev);
626 if (tbl->pconstructor && tbl->pconstructor(n)) {
627 if (dev)
628 dev_put(dev);
629 release_net(net);
630 kfree(n);
631 n = NULL;
632 goto out;
635 write_lock_bh(&tbl->lock);
636 n->next = tbl->phash_buckets[hash_val];
637 tbl->phash_buckets[hash_val] = n;
638 write_unlock_bh(&tbl->lock);
639 out:
640 return n;
642 EXPORT_SYMBOL(pneigh_lookup);
645 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
646 struct net_device *dev)
648 struct pneigh_entry *n, **np;
649 int key_len = tbl->key_len;
650 u32 hash_val = pneigh_hash(pkey, key_len);
652 write_lock_bh(&tbl->lock);
653 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
654 np = &n->next) {
655 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
656 net_eq(pneigh_net(n), net)) {
657 *np = n->next;
658 write_unlock_bh(&tbl->lock);
659 if (tbl->pdestructor)
660 tbl->pdestructor(n);
661 if (n->dev)
662 dev_put(n->dev);
663 release_net(pneigh_net(n));
664 kfree(n);
665 return 0;
668 write_unlock_bh(&tbl->lock);
669 return -ENOENT;
672 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
674 struct pneigh_entry *n, **np;
675 u32 h;
677 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
678 np = &tbl->phash_buckets[h];
679 while ((n = *np) != NULL) {
680 if (!dev || n->dev == dev) {
681 *np = n->next;
682 if (tbl->pdestructor)
683 tbl->pdestructor(n);
684 if (n->dev)
685 dev_put(n->dev);
686 release_net(pneigh_net(n));
687 kfree(n);
688 continue;
690 np = &n->next;
693 return -ENOENT;
696 static void neigh_parms_destroy(struct neigh_parms *parms);
698 static inline void neigh_parms_put(struct neigh_parms *parms)
700 if (atomic_dec_and_test(&parms->refcnt))
701 neigh_parms_destroy(parms);
705 * neighbour must already be out of the table;
708 void neigh_destroy(struct neighbour *neigh)
710 struct net_device *dev = neigh->dev;
712 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
714 if (!neigh->dead) {
715 printk(KERN_WARNING
716 "Destroying alive neighbour %p\n", neigh);
717 dump_stack();
718 return;
721 if (neigh_del_timer(neigh))
722 printk(KERN_WARNING "Impossible event.\n");
724 skb_queue_purge(&neigh->arp_queue);
725 neigh->arp_queue_len_bytes = 0;
727 if (dev->netdev_ops->ndo_neigh_destroy)
728 dev->netdev_ops->ndo_neigh_destroy(neigh);
730 dev_put(dev);
731 neigh_parms_put(neigh->parms);
733 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
735 atomic_dec(&neigh->tbl->entries);
736 kfree_rcu(neigh, rcu);
738 EXPORT_SYMBOL(neigh_destroy);
740 /* Neighbour state is suspicious;
741 disable fast path.
743 Called with write_locked neigh.
745 static void neigh_suspect(struct neighbour *neigh)
747 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
749 neigh->output = neigh->ops->output;
752 /* Neighbour state is OK;
753 enable fast path.
755 Called with write_locked neigh.
757 static void neigh_connect(struct neighbour *neigh)
759 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
761 neigh->output = neigh->ops->connected_output;
764 static void neigh_periodic_work(struct work_struct *work)
766 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
767 struct neighbour *n;
768 struct neighbour __rcu **np;
769 unsigned int i;
770 struct neigh_hash_table *nht;
772 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
774 write_lock_bh(&tbl->lock);
775 nht = rcu_dereference_protected(tbl->nht,
776 lockdep_is_held(&tbl->lock));
779 * periodically recompute ReachableTime from random function
782 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
783 struct neigh_parms *p;
784 tbl->last_rand = jiffies;
785 for (p = &tbl->parms; p; p = p->next)
786 p->reachable_time =
787 neigh_rand_reach_time(p->base_reachable_time);
790 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
791 np = &nht->hash_buckets[i];
793 while ((n = rcu_dereference_protected(*np,
794 lockdep_is_held(&tbl->lock))) != NULL) {
795 unsigned int state;
797 write_lock(&n->lock);
799 state = n->nud_state;
800 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
801 write_unlock(&n->lock);
802 goto next_elt;
805 if (time_before(n->used, n->confirmed))
806 n->used = n->confirmed;
808 if (atomic_read(&n->refcnt) == 1 &&
809 (state == NUD_FAILED ||
810 time_after(jiffies, n->used + n->parms->gc_staletime))) {
811 *np = n->next;
812 n->dead = 1;
813 write_unlock(&n->lock);
814 neigh_cleanup_and_release(n);
815 continue;
817 write_unlock(&n->lock);
819 next_elt:
820 np = &n->next;
823 * It's fine to release lock here, even if hash table
824 * grows while we are preempted.
826 write_unlock_bh(&tbl->lock);
827 cond_resched();
828 write_lock_bh(&tbl->lock);
830 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
831 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
832 * base_reachable_time.
834 schedule_delayed_work(&tbl->gc_work,
835 tbl->parms.base_reachable_time >> 1);
836 write_unlock_bh(&tbl->lock);
839 static __inline__ int neigh_max_probes(struct neighbour *n)
841 struct neigh_parms *p = n->parms;
842 return (n->nud_state & NUD_PROBE) ?
843 p->ucast_probes :
844 p->ucast_probes + p->app_probes + p->mcast_probes;
847 static void neigh_invalidate(struct neighbour *neigh)
848 __releases(neigh->lock)
849 __acquires(neigh->lock)
851 struct sk_buff *skb;
853 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
854 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
855 neigh->updated = jiffies;
857 /* It is very thin place. report_unreachable is very complicated
858 routine. Particularly, it can hit the same neighbour entry!
860 So that, we try to be accurate and avoid dead loop. --ANK
862 while (neigh->nud_state == NUD_FAILED &&
863 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
864 write_unlock(&neigh->lock);
865 neigh->ops->error_report(neigh, skb);
866 write_lock(&neigh->lock);
868 skb_queue_purge(&neigh->arp_queue);
869 neigh->arp_queue_len_bytes = 0;
872 static void neigh_probe(struct neighbour *neigh)
873 __releases(neigh->lock)
875 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
876 /* keep skb alive even if arp_queue overflows */
877 if (skb)
878 skb = skb_copy(skb, GFP_ATOMIC);
879 write_unlock(&neigh->lock);
880 neigh->ops->solicit(neigh, skb);
881 atomic_inc(&neigh->probes);
882 kfree_skb(skb);
885 /* Called when a timer expires for a neighbour entry. */
887 static void neigh_timer_handler(unsigned long arg)
889 unsigned long now, next;
890 struct neighbour *neigh = (struct neighbour *)arg;
891 unsigned state;
892 int notify = 0;
894 write_lock(&neigh->lock);
896 state = neigh->nud_state;
897 now = jiffies;
898 next = now + HZ;
900 if (!(state & NUD_IN_TIMER))
901 goto out;
903 if (state & NUD_REACHABLE) {
904 if (time_before_eq(now,
905 neigh->confirmed + neigh->parms->reachable_time)) {
906 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
907 next = neigh->confirmed + neigh->parms->reachable_time;
908 } else if (time_before_eq(now,
909 neigh->used + neigh->parms->delay_probe_time)) {
910 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
911 neigh->nud_state = NUD_DELAY;
912 neigh->updated = jiffies;
913 neigh_suspect(neigh);
914 next = now + neigh->parms->delay_probe_time;
915 } else {
916 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
917 neigh->nud_state = NUD_STALE;
918 neigh->updated = jiffies;
919 neigh_suspect(neigh);
920 notify = 1;
922 } else if (state & NUD_DELAY) {
923 if (time_before_eq(now,
924 neigh->confirmed + neigh->parms->delay_probe_time)) {
925 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
926 neigh->nud_state = NUD_REACHABLE;
927 neigh->updated = jiffies;
928 neigh_connect(neigh);
929 notify = 1;
930 next = neigh->confirmed + neigh->parms->reachable_time;
931 } else {
932 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
933 neigh->nud_state = NUD_PROBE;
934 neigh->updated = jiffies;
935 atomic_set(&neigh->probes, 0);
936 next = now + neigh->parms->retrans_time;
938 } else {
939 /* NUD_PROBE|NUD_INCOMPLETE */
940 next = now + neigh->parms->retrans_time;
943 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
944 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
945 neigh->nud_state = NUD_FAILED;
946 notify = 1;
947 neigh_invalidate(neigh);
950 if (neigh->nud_state & NUD_IN_TIMER) {
951 if (time_before(next, jiffies + HZ/2))
952 next = jiffies + HZ/2;
953 if (!mod_timer(&neigh->timer, next))
954 neigh_hold(neigh);
956 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
957 neigh_probe(neigh);
958 } else {
959 out:
960 write_unlock(&neigh->lock);
963 if (notify)
964 neigh_update_notify(neigh);
966 neigh_release(neigh);
969 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
971 int rc;
972 bool immediate_probe = false;
974 write_lock_bh(&neigh->lock);
976 rc = 0;
977 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
978 goto out_unlock_bh;
980 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
981 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
982 unsigned long next, now = jiffies;
984 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
985 neigh->nud_state = NUD_INCOMPLETE;
986 neigh->updated = now;
987 next = now + max(neigh->parms->retrans_time, HZ/2);
988 neigh_add_timer(neigh, next);
989 immediate_probe = true;
990 } else {
991 neigh->nud_state = NUD_FAILED;
992 neigh->updated = jiffies;
993 write_unlock_bh(&neigh->lock);
995 kfree_skb(skb);
996 return 1;
998 } else if (neigh->nud_state & NUD_STALE) {
999 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
1000 neigh->nud_state = NUD_DELAY;
1001 neigh->updated = jiffies;
1002 neigh_add_timer(neigh,
1003 jiffies + neigh->parms->delay_probe_time);
1006 if (neigh->nud_state == NUD_INCOMPLETE) {
1007 if (skb) {
1008 while (neigh->arp_queue_len_bytes + skb->truesize >
1009 neigh->parms->queue_len_bytes) {
1010 struct sk_buff *buff;
1012 buff = __skb_dequeue(&neigh->arp_queue);
1013 if (!buff)
1014 break;
1015 neigh->arp_queue_len_bytes -= buff->truesize;
1016 kfree_skb(buff);
1017 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1019 skb_dst_force(skb);
1020 __skb_queue_tail(&neigh->arp_queue, skb);
1021 neigh->arp_queue_len_bytes += skb->truesize;
1023 rc = 1;
1025 out_unlock_bh:
1026 if (immediate_probe)
1027 neigh_probe(neigh);
1028 else
1029 write_unlock(&neigh->lock);
1030 local_bh_enable();
1031 return rc;
1033 EXPORT_SYMBOL(__neigh_event_send);
1035 static void neigh_update_hhs(struct neighbour *neigh)
1037 struct hh_cache *hh;
1038 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1039 = NULL;
1041 if (neigh->dev->header_ops)
1042 update = neigh->dev->header_ops->cache_update;
1044 if (update) {
1045 hh = &neigh->hh;
1046 if (hh->hh_len) {
1047 write_seqlock_bh(&hh->hh_lock);
1048 update(hh, neigh->dev, neigh->ha);
1049 write_sequnlock_bh(&hh->hh_lock);
1056 /* Generic update routine.
1057 -- lladdr is new lladdr or NULL, if it is not supplied.
1058 -- new is new state.
1059 -- flags
1060 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1061 if it is different.
1062 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1063 lladdr instead of overriding it
1064 if it is different.
1065 It also allows to retain current state
1066 if lladdr is unchanged.
1067 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1069 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1070 NTF_ROUTER flag.
1071 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1072 a router.
1074 Caller MUST hold reference count on the entry.
1077 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1078 u32 flags)
1080 u8 old;
1081 int err;
1082 int notify = 0;
1083 struct net_device *dev;
1084 int update_isrouter = 0;
1086 write_lock_bh(&neigh->lock);
1088 dev = neigh->dev;
1089 old = neigh->nud_state;
1090 err = -EPERM;
1092 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1093 (old & (NUD_NOARP | NUD_PERMANENT)))
1094 goto out;
1096 if (!(new & NUD_VALID)) {
1097 neigh_del_timer(neigh);
1098 if (old & NUD_CONNECTED)
1099 neigh_suspect(neigh);
1100 neigh->nud_state = new;
1101 err = 0;
1102 notify = old & NUD_VALID;
1103 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1104 (new & NUD_FAILED)) {
1105 neigh_invalidate(neigh);
1106 notify = 1;
1108 goto out;
1111 /* Compare new lladdr with cached one */
1112 if (!dev->addr_len) {
1113 /* First case: device needs no address. */
1114 lladdr = neigh->ha;
1115 } else if (lladdr) {
1116 /* The second case: if something is already cached
1117 and a new address is proposed:
1118 - compare new & old
1119 - if they are different, check override flag
1121 if ((old & NUD_VALID) &&
1122 !memcmp(lladdr, neigh->ha, dev->addr_len))
1123 lladdr = neigh->ha;
1124 } else {
1125 /* No address is supplied; if we know something,
1126 use it, otherwise discard the request.
1128 err = -EINVAL;
1129 if (!(old & NUD_VALID))
1130 goto out;
1131 lladdr = neigh->ha;
1134 if (new & NUD_CONNECTED)
1135 neigh->confirmed = jiffies;
1136 neigh->updated = jiffies;
1138 /* If entry was valid and address is not changed,
1139 do not change entry state, if new one is STALE.
1141 err = 0;
1142 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1143 if (old & NUD_VALID) {
1144 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1145 update_isrouter = 0;
1146 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1147 (old & NUD_CONNECTED)) {
1148 lladdr = neigh->ha;
1149 new = NUD_STALE;
1150 } else
1151 goto out;
1152 } else {
1153 if (lladdr == neigh->ha && new == NUD_STALE &&
1154 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1155 (old & NUD_CONNECTED))
1157 new = old;
1161 if (new != old) {
1162 neigh_del_timer(neigh);
1163 if (new & NUD_IN_TIMER)
1164 neigh_add_timer(neigh, (jiffies +
1165 ((new & NUD_REACHABLE) ?
1166 neigh->parms->reachable_time :
1167 0)));
1168 neigh->nud_state = new;
1171 if (lladdr != neigh->ha) {
1172 write_seqlock(&neigh->ha_lock);
1173 memcpy(&neigh->ha, lladdr, dev->addr_len);
1174 write_sequnlock(&neigh->ha_lock);
1175 neigh_update_hhs(neigh);
1176 if (!(new & NUD_CONNECTED))
1177 neigh->confirmed = jiffies -
1178 (neigh->parms->base_reachable_time << 1);
1179 notify = 1;
1181 if (new == old)
1182 goto out;
1183 if (new & NUD_CONNECTED)
1184 neigh_connect(neigh);
1185 else
1186 neigh_suspect(neigh);
1187 if (!(old & NUD_VALID)) {
1188 struct sk_buff *skb;
1190 /* Again: avoid dead loop if something went wrong */
1192 while (neigh->nud_state & NUD_VALID &&
1193 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1194 struct dst_entry *dst = skb_dst(skb);
1195 struct neighbour *n2, *n1 = neigh;
1196 write_unlock_bh(&neigh->lock);
1198 rcu_read_lock();
1199 /* On shaper/eql skb->dst->neighbour != neigh :( */
1200 if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
1201 n1 = n2;
1202 n1->output(n1, skb);
1203 rcu_read_unlock();
1205 write_lock_bh(&neigh->lock);
1207 skb_queue_purge(&neigh->arp_queue);
1208 neigh->arp_queue_len_bytes = 0;
1210 out:
1211 if (update_isrouter) {
1212 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1213 (neigh->flags | NTF_ROUTER) :
1214 (neigh->flags & ~NTF_ROUTER);
1216 write_unlock_bh(&neigh->lock);
1218 if (notify)
1219 neigh_update_notify(neigh);
1221 return err;
1223 EXPORT_SYMBOL(neigh_update);
1225 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1226 u8 *lladdr, void *saddr,
1227 struct net_device *dev)
1229 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1230 lladdr || !dev->addr_len);
1231 if (neigh)
1232 neigh_update(neigh, lladdr, NUD_STALE,
1233 NEIGH_UPDATE_F_OVERRIDE);
1234 return neigh;
1236 EXPORT_SYMBOL(neigh_event_ns);
1238 /* called with read_lock_bh(&n->lock); */
1239 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1241 struct net_device *dev = dst->dev;
1242 __be16 prot = dst->ops->protocol;
1243 struct hh_cache *hh = &n->hh;
1245 write_lock_bh(&n->lock);
1247 /* Only one thread can come in here and initialize the
1248 * hh_cache entry.
1250 if (!hh->hh_len)
1251 dev->header_ops->cache(n, hh, prot);
1253 write_unlock_bh(&n->lock);
1256 /* This function can be used in contexts, where only old dev_queue_xmit
1257 * worked, f.e. if you want to override normal output path (eql, shaper),
1258 * but resolution is not made yet.
1261 int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1263 struct net_device *dev = skb->dev;
1265 __skb_pull(skb, skb_network_offset(skb));
1267 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1268 skb->len) < 0 &&
1269 dev->header_ops->rebuild(skb))
1270 return 0;
1272 return dev_queue_xmit(skb);
1274 EXPORT_SYMBOL(neigh_compat_output);
1276 /* Slow and careful. */
1278 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1280 struct dst_entry *dst = skb_dst(skb);
1281 int rc = 0;
1283 if (!dst)
1284 goto discard;
1286 __skb_pull(skb, skb_network_offset(skb));
1288 if (!neigh_event_send(neigh, skb)) {
1289 int err;
1290 struct net_device *dev = neigh->dev;
1291 unsigned int seq;
1293 if (dev->header_ops->cache && !neigh->hh.hh_len)
1294 neigh_hh_init(neigh, dst);
1296 do {
1297 seq = read_seqbegin(&neigh->ha_lock);
1298 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1299 neigh->ha, NULL, skb->len);
1300 } while (read_seqretry(&neigh->ha_lock, seq));
1302 if (err >= 0)
1303 rc = dev_queue_xmit(skb);
1304 else
1305 goto out_kfree_skb;
1307 out:
1308 return rc;
1309 discard:
1310 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1311 dst, neigh);
1312 out_kfree_skb:
1313 rc = -EINVAL;
1314 kfree_skb(skb);
1315 goto out;
1317 EXPORT_SYMBOL(neigh_resolve_output);
1319 /* As fast as possible without hh cache */
1321 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1323 struct net_device *dev = neigh->dev;
1324 unsigned int seq;
1325 int err;
1327 __skb_pull(skb, skb_network_offset(skb));
1329 do {
1330 seq = read_seqbegin(&neigh->ha_lock);
1331 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1332 neigh->ha, NULL, skb->len);
1333 } while (read_seqretry(&neigh->ha_lock, seq));
1335 if (err >= 0)
1336 err = dev_queue_xmit(skb);
1337 else {
1338 err = -EINVAL;
1339 kfree_skb(skb);
1341 return err;
1343 EXPORT_SYMBOL(neigh_connected_output);
1345 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1347 return dev_queue_xmit(skb);
1349 EXPORT_SYMBOL(neigh_direct_output);
1351 static void neigh_proxy_process(unsigned long arg)
1353 struct neigh_table *tbl = (struct neigh_table *)arg;
1354 long sched_next = 0;
1355 unsigned long now = jiffies;
1356 struct sk_buff *skb, *n;
1358 spin_lock(&tbl->proxy_queue.lock);
1360 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1361 long tdif = NEIGH_CB(skb)->sched_next - now;
1363 if (tdif <= 0) {
1364 struct net_device *dev = skb->dev;
1366 __skb_unlink(skb, &tbl->proxy_queue);
1367 if (tbl->proxy_redo && netif_running(dev)) {
1368 rcu_read_lock();
1369 tbl->proxy_redo(skb);
1370 rcu_read_unlock();
1371 } else {
1372 kfree_skb(skb);
1375 dev_put(dev);
1376 } else if (!sched_next || tdif < sched_next)
1377 sched_next = tdif;
1379 del_timer(&tbl->proxy_timer);
1380 if (sched_next)
1381 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1382 spin_unlock(&tbl->proxy_queue.lock);
1385 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1386 struct sk_buff *skb)
1388 unsigned long now = jiffies;
1389 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1391 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1392 kfree_skb(skb);
1393 return;
1396 NEIGH_CB(skb)->sched_next = sched_next;
1397 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1399 spin_lock(&tbl->proxy_queue.lock);
1400 if (del_timer(&tbl->proxy_timer)) {
1401 if (time_before(tbl->proxy_timer.expires, sched_next))
1402 sched_next = tbl->proxy_timer.expires;
1404 skb_dst_drop(skb);
1405 dev_hold(skb->dev);
1406 __skb_queue_tail(&tbl->proxy_queue, skb);
1407 mod_timer(&tbl->proxy_timer, sched_next);
1408 spin_unlock(&tbl->proxy_queue.lock);
1410 EXPORT_SYMBOL(pneigh_enqueue);
1412 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1413 struct net *net, int ifindex)
1415 struct neigh_parms *p;
1417 for (p = &tbl->parms; p; p = p->next) {
1418 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1419 (!p->dev && !ifindex))
1420 return p;
1423 return NULL;
1426 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1427 struct neigh_table *tbl)
1429 struct neigh_parms *p, *ref;
1430 struct net *net = dev_net(dev);
1431 const struct net_device_ops *ops = dev->netdev_ops;
1433 ref = lookup_neigh_parms(tbl, net, 0);
1434 if (!ref)
1435 return NULL;
1437 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1438 if (p) {
1439 p->tbl = tbl;
1440 atomic_set(&p->refcnt, 1);
1441 p->reachable_time =
1442 neigh_rand_reach_time(p->base_reachable_time);
1444 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1445 kfree(p);
1446 return NULL;
1449 dev_hold(dev);
1450 p->dev = dev;
1451 write_pnet(&p->net, hold_net(net));
1452 p->sysctl_table = NULL;
1453 write_lock_bh(&tbl->lock);
1454 p->next = tbl->parms.next;
1455 tbl->parms.next = p;
1456 write_unlock_bh(&tbl->lock);
1458 return p;
1460 EXPORT_SYMBOL(neigh_parms_alloc);
1462 static void neigh_rcu_free_parms(struct rcu_head *head)
1464 struct neigh_parms *parms =
1465 container_of(head, struct neigh_parms, rcu_head);
1467 neigh_parms_put(parms);
1470 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1472 struct neigh_parms **p;
1474 if (!parms || parms == &tbl->parms)
1475 return;
1476 write_lock_bh(&tbl->lock);
1477 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1478 if (*p == parms) {
1479 *p = parms->next;
1480 parms->dead = 1;
1481 write_unlock_bh(&tbl->lock);
1482 if (parms->dev)
1483 dev_put(parms->dev);
1484 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1485 return;
1488 write_unlock_bh(&tbl->lock);
1489 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1491 EXPORT_SYMBOL(neigh_parms_release);
1493 static void neigh_parms_destroy(struct neigh_parms *parms)
1495 release_net(neigh_parms_net(parms));
1496 kfree(parms);
1499 static struct lock_class_key neigh_table_proxy_queue_class;
1501 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1503 unsigned long now = jiffies;
1504 unsigned long phsize;
1506 write_pnet(&tbl->parms.net, &init_net);
1507 atomic_set(&tbl->parms.refcnt, 1);
1508 tbl->parms.reachable_time =
1509 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1511 tbl->stats = alloc_percpu(struct neigh_statistics);
1512 if (!tbl->stats)
1513 panic("cannot create neighbour cache statistics");
1515 #ifdef CONFIG_PROC_FS
1516 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1517 &neigh_stat_seq_fops, tbl))
1518 panic("cannot create neighbour proc dir entry");
1519 #endif
1521 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1523 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1524 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1526 if (!tbl->nht || !tbl->phash_buckets)
1527 panic("cannot allocate neighbour cache hashes");
1529 rwlock_init(&tbl->lock);
1530 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1531 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1532 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1533 skb_queue_head_init_class(&tbl->proxy_queue,
1534 &neigh_table_proxy_queue_class);
1536 tbl->last_flush = now;
1537 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1539 EXPORT_SYMBOL(neigh_table_init_no_netlink);
1541 void neigh_table_init(struct neigh_table *tbl)
1543 struct neigh_table *tmp;
1545 neigh_table_init_no_netlink(tbl);
1546 write_lock(&neigh_tbl_lock);
1547 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1548 if (tmp->family == tbl->family)
1549 break;
1551 tbl->next = neigh_tables;
1552 neigh_tables = tbl;
1553 write_unlock(&neigh_tbl_lock);
1555 if (unlikely(tmp)) {
1556 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1557 "family %d\n", tbl->family);
1558 dump_stack();
1561 EXPORT_SYMBOL(neigh_table_init);
1563 int neigh_table_clear(struct neigh_table *tbl)
1565 struct neigh_table **tp;
1567 /* It is not clean... Fix it to unload IPv6 module safely */
1568 cancel_delayed_work_sync(&tbl->gc_work);
1569 del_timer_sync(&tbl->proxy_timer);
1570 pneigh_queue_purge(&tbl->proxy_queue);
1571 neigh_ifdown(tbl, NULL);
1572 if (atomic_read(&tbl->entries))
1573 printk(KERN_CRIT "neighbour leakage\n");
1574 write_lock(&neigh_tbl_lock);
1575 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1576 if (*tp == tbl) {
1577 *tp = tbl->next;
1578 break;
1581 write_unlock(&neigh_tbl_lock);
1583 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1584 neigh_hash_free_rcu);
1585 tbl->nht = NULL;
1587 kfree(tbl->phash_buckets);
1588 tbl->phash_buckets = NULL;
1590 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1592 free_percpu(tbl->stats);
1593 tbl->stats = NULL;
1595 return 0;
1597 EXPORT_SYMBOL(neigh_table_clear);
1599 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1601 struct net *net = sock_net(skb->sk);
1602 struct ndmsg *ndm;
1603 struct nlattr *dst_attr;
1604 struct neigh_table *tbl;
1605 struct net_device *dev = NULL;
1606 int err = -EINVAL;
1608 ASSERT_RTNL();
1609 if (nlmsg_len(nlh) < sizeof(*ndm))
1610 goto out;
1612 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1613 if (dst_attr == NULL)
1614 goto out;
1616 ndm = nlmsg_data(nlh);
1617 if (ndm->ndm_ifindex) {
1618 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1619 if (dev == NULL) {
1620 err = -ENODEV;
1621 goto out;
1625 read_lock(&neigh_tbl_lock);
1626 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1627 struct neighbour *neigh;
1629 if (tbl->family != ndm->ndm_family)
1630 continue;
1631 read_unlock(&neigh_tbl_lock);
1633 if (nla_len(dst_attr) < tbl->key_len)
1634 goto out;
1636 if (ndm->ndm_flags & NTF_PROXY) {
1637 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1638 goto out;
1641 if (dev == NULL)
1642 goto out;
1644 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1645 if (neigh == NULL) {
1646 err = -ENOENT;
1647 goto out;
1650 err = neigh_update(neigh, NULL, NUD_FAILED,
1651 NEIGH_UPDATE_F_OVERRIDE |
1652 NEIGH_UPDATE_F_ADMIN);
1653 neigh_release(neigh);
1654 goto out;
1656 read_unlock(&neigh_tbl_lock);
1657 err = -EAFNOSUPPORT;
1659 out:
1660 return err;
1663 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1665 struct net *net = sock_net(skb->sk);
1666 struct ndmsg *ndm;
1667 struct nlattr *tb[NDA_MAX+1];
1668 struct neigh_table *tbl;
1669 struct net_device *dev = NULL;
1670 int err;
1672 ASSERT_RTNL();
1673 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1674 if (err < 0)
1675 goto out;
1677 err = -EINVAL;
1678 if (tb[NDA_DST] == NULL)
1679 goto out;
1681 ndm = nlmsg_data(nlh);
1682 if (ndm->ndm_ifindex) {
1683 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1684 if (dev == NULL) {
1685 err = -ENODEV;
1686 goto out;
1689 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1690 goto out;
1693 read_lock(&neigh_tbl_lock);
1694 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1695 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1696 struct neighbour *neigh;
1697 void *dst, *lladdr;
1699 if (tbl->family != ndm->ndm_family)
1700 continue;
1701 read_unlock(&neigh_tbl_lock);
1703 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1704 goto out;
1705 dst = nla_data(tb[NDA_DST]);
1706 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1708 if (ndm->ndm_flags & NTF_PROXY) {
1709 struct pneigh_entry *pn;
1711 err = -ENOBUFS;
1712 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1713 if (pn) {
1714 pn->flags = ndm->ndm_flags;
1715 err = 0;
1717 goto out;
1720 if (dev == NULL)
1721 goto out;
1723 neigh = neigh_lookup(tbl, dst, dev);
1724 if (neigh == NULL) {
1725 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1726 err = -ENOENT;
1727 goto out;
1730 neigh = __neigh_lookup_errno(tbl, dst, dev);
1731 if (IS_ERR(neigh)) {
1732 err = PTR_ERR(neigh);
1733 goto out;
1735 } else {
1736 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1737 err = -EEXIST;
1738 neigh_release(neigh);
1739 goto out;
1742 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1743 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1746 if (ndm->ndm_flags & NTF_USE) {
1747 neigh_event_send(neigh, NULL);
1748 err = 0;
1749 } else
1750 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1751 neigh_release(neigh);
1752 goto out;
1755 read_unlock(&neigh_tbl_lock);
1756 err = -EAFNOSUPPORT;
1757 out:
1758 return err;
1761 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1763 struct nlattr *nest;
1765 nest = nla_nest_start(skb, NDTA_PARMS);
1766 if (nest == NULL)
1767 return -ENOBUFS;
1769 if (parms->dev)
1770 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1772 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1773 NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes);
1774 /* approximative value for deprecated QUEUE_LEN (in packets) */
1775 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN,
1776 DIV_ROUND_UP(parms->queue_len_bytes,
1777 SKB_TRUESIZE(ETH_FRAME_LEN)));
1778 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1779 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1780 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1781 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1782 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1783 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1784 parms->base_reachable_time);
1785 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1786 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1787 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1788 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1789 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1790 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1792 return nla_nest_end(skb, nest);
1794 nla_put_failure:
1795 nla_nest_cancel(skb, nest);
1796 return -EMSGSIZE;
1799 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1800 u32 pid, u32 seq, int type, int flags)
1802 struct nlmsghdr *nlh;
1803 struct ndtmsg *ndtmsg;
1805 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1806 if (nlh == NULL)
1807 return -EMSGSIZE;
1809 ndtmsg = nlmsg_data(nlh);
1811 read_lock_bh(&tbl->lock);
1812 ndtmsg->ndtm_family = tbl->family;
1813 ndtmsg->ndtm_pad1 = 0;
1814 ndtmsg->ndtm_pad2 = 0;
1816 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1817 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1818 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1819 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1820 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1823 unsigned long now = jiffies;
1824 unsigned int flush_delta = now - tbl->last_flush;
1825 unsigned int rand_delta = now - tbl->last_rand;
1826 struct neigh_hash_table *nht;
1827 struct ndt_config ndc = {
1828 .ndtc_key_len = tbl->key_len,
1829 .ndtc_entry_size = tbl->entry_size,
1830 .ndtc_entries = atomic_read(&tbl->entries),
1831 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1832 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1833 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1836 rcu_read_lock_bh();
1837 nht = rcu_dereference_bh(tbl->nht);
1838 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1839 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1840 rcu_read_unlock_bh();
1842 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1846 int cpu;
1847 struct ndt_stats ndst;
1849 memset(&ndst, 0, sizeof(ndst));
1851 for_each_possible_cpu(cpu) {
1852 struct neigh_statistics *st;
1854 st = per_cpu_ptr(tbl->stats, cpu);
1855 ndst.ndts_allocs += st->allocs;
1856 ndst.ndts_destroys += st->destroys;
1857 ndst.ndts_hash_grows += st->hash_grows;
1858 ndst.ndts_res_failed += st->res_failed;
1859 ndst.ndts_lookups += st->lookups;
1860 ndst.ndts_hits += st->hits;
1861 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1862 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1863 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1864 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1867 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1870 BUG_ON(tbl->parms.dev);
1871 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1872 goto nla_put_failure;
1874 read_unlock_bh(&tbl->lock);
1875 return nlmsg_end(skb, nlh);
1877 nla_put_failure:
1878 read_unlock_bh(&tbl->lock);
1879 nlmsg_cancel(skb, nlh);
1880 return -EMSGSIZE;
1883 static int neightbl_fill_param_info(struct sk_buff *skb,
1884 struct neigh_table *tbl,
1885 struct neigh_parms *parms,
1886 u32 pid, u32 seq, int type,
1887 unsigned int flags)
1889 struct ndtmsg *ndtmsg;
1890 struct nlmsghdr *nlh;
1892 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1893 if (nlh == NULL)
1894 return -EMSGSIZE;
1896 ndtmsg = nlmsg_data(nlh);
1898 read_lock_bh(&tbl->lock);
1899 ndtmsg->ndtm_family = tbl->family;
1900 ndtmsg->ndtm_pad1 = 0;
1901 ndtmsg->ndtm_pad2 = 0;
1903 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1904 neightbl_fill_parms(skb, parms) < 0)
1905 goto errout;
1907 read_unlock_bh(&tbl->lock);
1908 return nlmsg_end(skb, nlh);
1909 errout:
1910 read_unlock_bh(&tbl->lock);
1911 nlmsg_cancel(skb, nlh);
1912 return -EMSGSIZE;
1915 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1916 [NDTA_NAME] = { .type = NLA_STRING },
1917 [NDTA_THRESH1] = { .type = NLA_U32 },
1918 [NDTA_THRESH2] = { .type = NLA_U32 },
1919 [NDTA_THRESH3] = { .type = NLA_U32 },
1920 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1921 [NDTA_PARMS] = { .type = NLA_NESTED },
1924 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1925 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1926 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1927 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1928 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1929 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1930 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1931 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1932 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1933 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1934 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1935 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1936 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1937 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1940 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1942 struct net *net = sock_net(skb->sk);
1943 struct neigh_table *tbl;
1944 struct ndtmsg *ndtmsg;
1945 struct nlattr *tb[NDTA_MAX+1];
1946 int err;
1948 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1949 nl_neightbl_policy);
1950 if (err < 0)
1951 goto errout;
1953 if (tb[NDTA_NAME] == NULL) {
1954 err = -EINVAL;
1955 goto errout;
1958 ndtmsg = nlmsg_data(nlh);
1959 read_lock(&neigh_tbl_lock);
1960 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1961 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1962 continue;
1964 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1965 break;
1968 if (tbl == NULL) {
1969 err = -ENOENT;
1970 goto errout_locked;
1974 * We acquire tbl->lock to be nice to the periodic timers and
1975 * make sure they always see a consistent set of values.
1977 write_lock_bh(&tbl->lock);
1979 if (tb[NDTA_PARMS]) {
1980 struct nlattr *tbp[NDTPA_MAX+1];
1981 struct neigh_parms *p;
1982 int i, ifindex = 0;
1984 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1985 nl_ntbl_parm_policy);
1986 if (err < 0)
1987 goto errout_tbl_lock;
1989 if (tbp[NDTPA_IFINDEX])
1990 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1992 p = lookup_neigh_parms(tbl, net, ifindex);
1993 if (p == NULL) {
1994 err = -ENOENT;
1995 goto errout_tbl_lock;
1998 for (i = 1; i <= NDTPA_MAX; i++) {
1999 if (tbp[i] == NULL)
2000 continue;
2002 switch (i) {
2003 case NDTPA_QUEUE_LEN:
2004 p->queue_len_bytes = nla_get_u32(tbp[i]) *
2005 SKB_TRUESIZE(ETH_FRAME_LEN);
2006 break;
2007 case NDTPA_QUEUE_LENBYTES:
2008 p->queue_len_bytes = nla_get_u32(tbp[i]);
2009 break;
2010 case NDTPA_PROXY_QLEN:
2011 p->proxy_qlen = nla_get_u32(tbp[i]);
2012 break;
2013 case NDTPA_APP_PROBES:
2014 p->app_probes = nla_get_u32(tbp[i]);
2015 break;
2016 case NDTPA_UCAST_PROBES:
2017 p->ucast_probes = nla_get_u32(tbp[i]);
2018 break;
2019 case NDTPA_MCAST_PROBES:
2020 p->mcast_probes = nla_get_u32(tbp[i]);
2021 break;
2022 case NDTPA_BASE_REACHABLE_TIME:
2023 p->base_reachable_time = nla_get_msecs(tbp[i]);
2024 break;
2025 case NDTPA_GC_STALETIME:
2026 p->gc_staletime = nla_get_msecs(tbp[i]);
2027 break;
2028 case NDTPA_DELAY_PROBE_TIME:
2029 p->delay_probe_time = nla_get_msecs(tbp[i]);
2030 break;
2031 case NDTPA_RETRANS_TIME:
2032 p->retrans_time = nla_get_msecs(tbp[i]);
2033 break;
2034 case NDTPA_ANYCAST_DELAY:
2035 p->anycast_delay = nla_get_msecs(tbp[i]);
2036 break;
2037 case NDTPA_PROXY_DELAY:
2038 p->proxy_delay = nla_get_msecs(tbp[i]);
2039 break;
2040 case NDTPA_LOCKTIME:
2041 p->locktime = nla_get_msecs(tbp[i]);
2042 break;
2047 if (tb[NDTA_THRESH1])
2048 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2050 if (tb[NDTA_THRESH2])
2051 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2053 if (tb[NDTA_THRESH3])
2054 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2056 if (tb[NDTA_GC_INTERVAL])
2057 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2059 err = 0;
2061 errout_tbl_lock:
2062 write_unlock_bh(&tbl->lock);
2063 errout_locked:
2064 read_unlock(&neigh_tbl_lock);
2065 errout:
2066 return err;
2069 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2071 struct net *net = sock_net(skb->sk);
2072 int family, tidx, nidx = 0;
2073 int tbl_skip = cb->args[0];
2074 int neigh_skip = cb->args[1];
2075 struct neigh_table *tbl;
2077 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2079 read_lock(&neigh_tbl_lock);
2080 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2081 struct neigh_parms *p;
2083 if (tidx < tbl_skip || (family && tbl->family != family))
2084 continue;
2086 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2087 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2088 NLM_F_MULTI) <= 0)
2089 break;
2091 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2092 if (!net_eq(neigh_parms_net(p), net))
2093 continue;
2095 if (nidx < neigh_skip)
2096 goto next;
2098 if (neightbl_fill_param_info(skb, tbl, p,
2099 NETLINK_CB(cb->skb).pid,
2100 cb->nlh->nlmsg_seq,
2101 RTM_NEWNEIGHTBL,
2102 NLM_F_MULTI) <= 0)
2103 goto out;
2104 next:
2105 nidx++;
2108 neigh_skip = 0;
2110 out:
2111 read_unlock(&neigh_tbl_lock);
2112 cb->args[0] = tidx;
2113 cb->args[1] = nidx;
2115 return skb->len;
2118 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2119 u32 pid, u32 seq, int type, unsigned int flags)
2121 unsigned long now = jiffies;
2122 struct nda_cacheinfo ci;
2123 struct nlmsghdr *nlh;
2124 struct ndmsg *ndm;
2126 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2127 if (nlh == NULL)
2128 return -EMSGSIZE;
2130 ndm = nlmsg_data(nlh);
2131 ndm->ndm_family = neigh->ops->family;
2132 ndm->ndm_pad1 = 0;
2133 ndm->ndm_pad2 = 0;
2134 ndm->ndm_flags = neigh->flags;
2135 ndm->ndm_type = neigh->type;
2136 ndm->ndm_ifindex = neigh->dev->ifindex;
2138 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2140 read_lock_bh(&neigh->lock);
2141 ndm->ndm_state = neigh->nud_state;
2142 if (neigh->nud_state & NUD_VALID) {
2143 char haddr[MAX_ADDR_LEN];
2145 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2146 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2147 read_unlock_bh(&neigh->lock);
2148 goto nla_put_failure;
2152 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2153 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2154 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2155 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2156 read_unlock_bh(&neigh->lock);
2158 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2159 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2161 return nlmsg_end(skb, nlh);
2163 nla_put_failure:
2164 nlmsg_cancel(skb, nlh);
2165 return -EMSGSIZE;
2168 static void neigh_update_notify(struct neighbour *neigh)
2170 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2171 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2174 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2175 struct netlink_callback *cb)
2177 struct net *net = sock_net(skb->sk);
2178 struct neighbour *n;
2179 int rc, h, s_h = cb->args[1];
2180 int idx, s_idx = idx = cb->args[2];
2181 struct neigh_hash_table *nht;
2183 rcu_read_lock_bh();
2184 nht = rcu_dereference_bh(tbl->nht);
2186 for (h = 0; h < (1 << nht->hash_shift); h++) {
2187 if (h < s_h)
2188 continue;
2189 if (h > s_h)
2190 s_idx = 0;
2191 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2192 n != NULL;
2193 n = rcu_dereference_bh(n->next)) {
2194 if (!net_eq(dev_net(n->dev), net))
2195 continue;
2196 if (idx < s_idx)
2197 goto next;
2198 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2199 cb->nlh->nlmsg_seq,
2200 RTM_NEWNEIGH,
2201 NLM_F_MULTI) <= 0) {
2202 rc = -1;
2203 goto out;
2205 next:
2206 idx++;
2209 rc = skb->len;
2210 out:
2211 rcu_read_unlock_bh();
2212 cb->args[1] = h;
2213 cb->args[2] = idx;
2214 return rc;
2217 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2219 struct neigh_table *tbl;
2220 int t, family, s_t;
2222 read_lock(&neigh_tbl_lock);
2223 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2224 s_t = cb->args[0];
2226 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2227 if (t < s_t || (family && tbl->family != family))
2228 continue;
2229 if (t > s_t)
2230 memset(&cb->args[1], 0, sizeof(cb->args) -
2231 sizeof(cb->args[0]));
2232 if (neigh_dump_table(tbl, skb, cb) < 0)
2233 break;
2235 read_unlock(&neigh_tbl_lock);
2237 cb->args[0] = t;
2238 return skb->len;
2241 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2243 int chain;
2244 struct neigh_hash_table *nht;
2246 rcu_read_lock_bh();
2247 nht = rcu_dereference_bh(tbl->nht);
2249 read_lock(&tbl->lock); /* avoid resizes */
2250 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2251 struct neighbour *n;
2253 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2254 n != NULL;
2255 n = rcu_dereference_bh(n->next))
2256 cb(n, cookie);
2258 read_unlock(&tbl->lock);
2259 rcu_read_unlock_bh();
2261 EXPORT_SYMBOL(neigh_for_each);
2263 /* The tbl->lock must be held as a writer and BH disabled. */
2264 void __neigh_for_each_release(struct neigh_table *tbl,
2265 int (*cb)(struct neighbour *))
2267 int chain;
2268 struct neigh_hash_table *nht;
2270 nht = rcu_dereference_protected(tbl->nht,
2271 lockdep_is_held(&tbl->lock));
2272 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2273 struct neighbour *n;
2274 struct neighbour __rcu **np;
2276 np = &nht->hash_buckets[chain];
2277 while ((n = rcu_dereference_protected(*np,
2278 lockdep_is_held(&tbl->lock))) != NULL) {
2279 int release;
2281 write_lock(&n->lock);
2282 release = cb(n);
2283 if (release) {
2284 rcu_assign_pointer(*np,
2285 rcu_dereference_protected(n->next,
2286 lockdep_is_held(&tbl->lock)));
2287 n->dead = 1;
2288 } else
2289 np = &n->next;
2290 write_unlock(&n->lock);
2291 if (release)
2292 neigh_cleanup_and_release(n);
2296 EXPORT_SYMBOL(__neigh_for_each_release);
2298 #ifdef CONFIG_PROC_FS
2300 static struct neighbour *neigh_get_first(struct seq_file *seq)
2302 struct neigh_seq_state *state = seq->private;
2303 struct net *net = seq_file_net(seq);
2304 struct neigh_hash_table *nht = state->nht;
2305 struct neighbour *n = NULL;
2306 int bucket = state->bucket;
2308 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2309 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2310 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2312 while (n) {
2313 if (!net_eq(dev_net(n->dev), net))
2314 goto next;
2315 if (state->neigh_sub_iter) {
2316 loff_t fakep = 0;
2317 void *v;
2319 v = state->neigh_sub_iter(state, n, &fakep);
2320 if (!v)
2321 goto next;
2323 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2324 break;
2325 if (n->nud_state & ~NUD_NOARP)
2326 break;
2327 next:
2328 n = rcu_dereference_bh(n->next);
2331 if (n)
2332 break;
2334 state->bucket = bucket;
2336 return n;
2339 static struct neighbour *neigh_get_next(struct seq_file *seq,
2340 struct neighbour *n,
2341 loff_t *pos)
2343 struct neigh_seq_state *state = seq->private;
2344 struct net *net = seq_file_net(seq);
2345 struct neigh_hash_table *nht = state->nht;
2347 if (state->neigh_sub_iter) {
2348 void *v = state->neigh_sub_iter(state, n, pos);
2349 if (v)
2350 return n;
2352 n = rcu_dereference_bh(n->next);
2354 while (1) {
2355 while (n) {
2356 if (!net_eq(dev_net(n->dev), net))
2357 goto next;
2358 if (state->neigh_sub_iter) {
2359 void *v = state->neigh_sub_iter(state, n, pos);
2360 if (v)
2361 return n;
2362 goto next;
2364 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2365 break;
2367 if (n->nud_state & ~NUD_NOARP)
2368 break;
2369 next:
2370 n = rcu_dereference_bh(n->next);
2373 if (n)
2374 break;
2376 if (++state->bucket >= (1 << nht->hash_shift))
2377 break;
2379 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2382 if (n && pos)
2383 --(*pos);
2384 return n;
2387 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2389 struct neighbour *n = neigh_get_first(seq);
2391 if (n) {
2392 --(*pos);
2393 while (*pos) {
2394 n = neigh_get_next(seq, n, pos);
2395 if (!n)
2396 break;
2399 return *pos ? NULL : n;
2402 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2404 struct neigh_seq_state *state = seq->private;
2405 struct net *net = seq_file_net(seq);
2406 struct neigh_table *tbl = state->tbl;
2407 struct pneigh_entry *pn = NULL;
2408 int bucket = state->bucket;
2410 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2411 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2412 pn = tbl->phash_buckets[bucket];
2413 while (pn && !net_eq(pneigh_net(pn), net))
2414 pn = pn->next;
2415 if (pn)
2416 break;
2418 state->bucket = bucket;
2420 return pn;
2423 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2424 struct pneigh_entry *pn,
2425 loff_t *pos)
2427 struct neigh_seq_state *state = seq->private;
2428 struct net *net = seq_file_net(seq);
2429 struct neigh_table *tbl = state->tbl;
2431 do {
2432 pn = pn->next;
2433 } while (pn && !net_eq(pneigh_net(pn), net));
2435 while (!pn) {
2436 if (++state->bucket > PNEIGH_HASHMASK)
2437 break;
2438 pn = tbl->phash_buckets[state->bucket];
2439 while (pn && !net_eq(pneigh_net(pn), net))
2440 pn = pn->next;
2441 if (pn)
2442 break;
2445 if (pn && pos)
2446 --(*pos);
2448 return pn;
2451 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2453 struct pneigh_entry *pn = pneigh_get_first(seq);
2455 if (pn) {
2456 --(*pos);
2457 while (*pos) {
2458 pn = pneigh_get_next(seq, pn, pos);
2459 if (!pn)
2460 break;
2463 return *pos ? NULL : pn;
2466 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2468 struct neigh_seq_state *state = seq->private;
2469 void *rc;
2470 loff_t idxpos = *pos;
2472 rc = neigh_get_idx(seq, &idxpos);
2473 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2474 rc = pneigh_get_idx(seq, &idxpos);
2476 return rc;
2479 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2480 __acquires(rcu_bh)
2482 struct neigh_seq_state *state = seq->private;
2484 state->tbl = tbl;
2485 state->bucket = 0;
2486 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2488 rcu_read_lock_bh();
2489 state->nht = rcu_dereference_bh(tbl->nht);
2491 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2493 EXPORT_SYMBOL(neigh_seq_start);
2495 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2497 struct neigh_seq_state *state;
2498 void *rc;
2500 if (v == SEQ_START_TOKEN) {
2501 rc = neigh_get_first(seq);
2502 goto out;
2505 state = seq->private;
2506 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2507 rc = neigh_get_next(seq, v, NULL);
2508 if (rc)
2509 goto out;
2510 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2511 rc = pneigh_get_first(seq);
2512 } else {
2513 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2514 rc = pneigh_get_next(seq, v, NULL);
2516 out:
2517 ++(*pos);
2518 return rc;
2520 EXPORT_SYMBOL(neigh_seq_next);
2522 void neigh_seq_stop(struct seq_file *seq, void *v)
2523 __releases(rcu_bh)
2525 rcu_read_unlock_bh();
2527 EXPORT_SYMBOL(neigh_seq_stop);
2529 /* statistics via seq_file */
2531 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2533 struct neigh_table *tbl = seq->private;
2534 int cpu;
2536 if (*pos == 0)
2537 return SEQ_START_TOKEN;
2539 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2540 if (!cpu_possible(cpu))
2541 continue;
2542 *pos = cpu+1;
2543 return per_cpu_ptr(tbl->stats, cpu);
2545 return NULL;
2548 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2550 struct neigh_table *tbl = seq->private;
2551 int cpu;
2553 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2554 if (!cpu_possible(cpu))
2555 continue;
2556 *pos = cpu+1;
2557 return per_cpu_ptr(tbl->stats, cpu);
2559 return NULL;
2562 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2567 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2569 struct neigh_table *tbl = seq->private;
2570 struct neigh_statistics *st = v;
2572 if (v == SEQ_START_TOKEN) {
2573 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2574 return 0;
2577 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2578 "%08lx %08lx %08lx %08lx %08lx\n",
2579 atomic_read(&tbl->entries),
2581 st->allocs,
2582 st->destroys,
2583 st->hash_grows,
2585 st->lookups,
2586 st->hits,
2588 st->res_failed,
2590 st->rcv_probes_mcast,
2591 st->rcv_probes_ucast,
2593 st->periodic_gc_runs,
2594 st->forced_gc_runs,
2595 st->unres_discards
2598 return 0;
2601 static const struct seq_operations neigh_stat_seq_ops = {
2602 .start = neigh_stat_seq_start,
2603 .next = neigh_stat_seq_next,
2604 .stop = neigh_stat_seq_stop,
2605 .show = neigh_stat_seq_show,
2608 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2610 int ret = seq_open(file, &neigh_stat_seq_ops);
2612 if (!ret) {
2613 struct seq_file *sf = file->private_data;
2614 sf->private = PDE(inode)->data;
2616 return ret;
2619 static const struct file_operations neigh_stat_seq_fops = {
2620 .owner = THIS_MODULE,
2621 .open = neigh_stat_seq_open,
2622 .read = seq_read,
2623 .llseek = seq_lseek,
2624 .release = seq_release,
2627 #endif /* CONFIG_PROC_FS */
2629 static inline size_t neigh_nlmsg_size(void)
2631 return NLMSG_ALIGN(sizeof(struct ndmsg))
2632 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2633 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2634 + nla_total_size(sizeof(struct nda_cacheinfo))
2635 + nla_total_size(4); /* NDA_PROBES */
2638 static void __neigh_notify(struct neighbour *n, int type, int flags)
2640 struct net *net = dev_net(n->dev);
2641 struct sk_buff *skb;
2642 int err = -ENOBUFS;
2644 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2645 if (skb == NULL)
2646 goto errout;
2648 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2649 if (err < 0) {
2650 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2651 WARN_ON(err == -EMSGSIZE);
2652 kfree_skb(skb);
2653 goto errout;
2655 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2656 return;
2657 errout:
2658 if (err < 0)
2659 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2662 #ifdef CONFIG_ARPD
2663 void neigh_app_ns(struct neighbour *n)
2665 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2667 EXPORT_SYMBOL(neigh_app_ns);
2668 #endif /* CONFIG_ARPD */
2670 #ifdef CONFIG_SYSCTL
2672 static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
2673 size_t *lenp, loff_t *ppos)
2675 int size, ret;
2676 ctl_table tmp = *ctl;
2678 tmp.data = &size;
2679 size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN));
2680 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
2681 if (write && !ret)
2682 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2683 return ret;
2686 enum {
2687 NEIGH_VAR_MCAST_PROBE,
2688 NEIGH_VAR_UCAST_PROBE,
2689 NEIGH_VAR_APP_PROBE,
2690 NEIGH_VAR_RETRANS_TIME,
2691 NEIGH_VAR_BASE_REACHABLE_TIME,
2692 NEIGH_VAR_DELAY_PROBE_TIME,
2693 NEIGH_VAR_GC_STALETIME,
2694 NEIGH_VAR_QUEUE_LEN,
2695 NEIGH_VAR_QUEUE_LEN_BYTES,
2696 NEIGH_VAR_PROXY_QLEN,
2697 NEIGH_VAR_ANYCAST_DELAY,
2698 NEIGH_VAR_PROXY_DELAY,
2699 NEIGH_VAR_LOCKTIME,
2700 NEIGH_VAR_RETRANS_TIME_MS,
2701 NEIGH_VAR_BASE_REACHABLE_TIME_MS,
2702 NEIGH_VAR_GC_INTERVAL,
2703 NEIGH_VAR_GC_THRESH1,
2704 NEIGH_VAR_GC_THRESH2,
2705 NEIGH_VAR_GC_THRESH3,
2706 NEIGH_VAR_MAX
2709 static struct neigh_sysctl_table {
2710 struct ctl_table_header *sysctl_header;
2711 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2712 char *dev_name;
2713 } neigh_sysctl_template __read_mostly = {
2714 .neigh_vars = {
2715 [NEIGH_VAR_MCAST_PROBE] = {
2716 .procname = "mcast_solicit",
2717 .maxlen = sizeof(int),
2718 .mode = 0644,
2719 .proc_handler = proc_dointvec,
2721 [NEIGH_VAR_UCAST_PROBE] = {
2722 .procname = "ucast_solicit",
2723 .maxlen = sizeof(int),
2724 .mode = 0644,
2725 .proc_handler = proc_dointvec,
2727 [NEIGH_VAR_APP_PROBE] = {
2728 .procname = "app_solicit",
2729 .maxlen = sizeof(int),
2730 .mode = 0644,
2731 .proc_handler = proc_dointvec,
2733 [NEIGH_VAR_RETRANS_TIME] = {
2734 .procname = "retrans_time",
2735 .maxlen = sizeof(int),
2736 .mode = 0644,
2737 .proc_handler = proc_dointvec_userhz_jiffies,
2739 [NEIGH_VAR_BASE_REACHABLE_TIME] = {
2740 .procname = "base_reachable_time",
2741 .maxlen = sizeof(int),
2742 .mode = 0644,
2743 .proc_handler = proc_dointvec_jiffies,
2745 [NEIGH_VAR_DELAY_PROBE_TIME] = {
2746 .procname = "delay_first_probe_time",
2747 .maxlen = sizeof(int),
2748 .mode = 0644,
2749 .proc_handler = proc_dointvec_jiffies,
2751 [NEIGH_VAR_GC_STALETIME] = {
2752 .procname = "gc_stale_time",
2753 .maxlen = sizeof(int),
2754 .mode = 0644,
2755 .proc_handler = proc_dointvec_jiffies,
2757 [NEIGH_VAR_QUEUE_LEN] = {
2758 .procname = "unres_qlen",
2759 .maxlen = sizeof(int),
2760 .mode = 0644,
2761 .proc_handler = proc_unres_qlen,
2763 [NEIGH_VAR_QUEUE_LEN_BYTES] = {
2764 .procname = "unres_qlen_bytes",
2765 .maxlen = sizeof(int),
2766 .mode = 0644,
2767 .proc_handler = proc_dointvec,
2769 [NEIGH_VAR_PROXY_QLEN] = {
2770 .procname = "proxy_qlen",
2771 .maxlen = sizeof(int),
2772 .mode = 0644,
2773 .proc_handler = proc_dointvec,
2775 [NEIGH_VAR_ANYCAST_DELAY] = {
2776 .procname = "anycast_delay",
2777 .maxlen = sizeof(int),
2778 .mode = 0644,
2779 .proc_handler = proc_dointvec_userhz_jiffies,
2781 [NEIGH_VAR_PROXY_DELAY] = {
2782 .procname = "proxy_delay",
2783 .maxlen = sizeof(int),
2784 .mode = 0644,
2785 .proc_handler = proc_dointvec_userhz_jiffies,
2787 [NEIGH_VAR_LOCKTIME] = {
2788 .procname = "locktime",
2789 .maxlen = sizeof(int),
2790 .mode = 0644,
2791 .proc_handler = proc_dointvec_userhz_jiffies,
2793 [NEIGH_VAR_RETRANS_TIME_MS] = {
2794 .procname = "retrans_time_ms",
2795 .maxlen = sizeof(int),
2796 .mode = 0644,
2797 .proc_handler = proc_dointvec_ms_jiffies,
2799 [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
2800 .procname = "base_reachable_time_ms",
2801 .maxlen = sizeof(int),
2802 .mode = 0644,
2803 .proc_handler = proc_dointvec_ms_jiffies,
2805 [NEIGH_VAR_GC_INTERVAL] = {
2806 .procname = "gc_interval",
2807 .maxlen = sizeof(int),
2808 .mode = 0644,
2809 .proc_handler = proc_dointvec_jiffies,
2811 [NEIGH_VAR_GC_THRESH1] = {
2812 .procname = "gc_thresh1",
2813 .maxlen = sizeof(int),
2814 .mode = 0644,
2815 .proc_handler = proc_dointvec,
2817 [NEIGH_VAR_GC_THRESH2] = {
2818 .procname = "gc_thresh2",
2819 .maxlen = sizeof(int),
2820 .mode = 0644,
2821 .proc_handler = proc_dointvec,
2823 [NEIGH_VAR_GC_THRESH3] = {
2824 .procname = "gc_thresh3",
2825 .maxlen = sizeof(int),
2826 .mode = 0644,
2827 .proc_handler = proc_dointvec,
2833 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2834 char *p_name, proc_handler *handler)
2836 struct neigh_sysctl_table *t;
2837 const char *dev_name_source = NULL;
2839 #define NEIGH_CTL_PATH_ROOT 0
2840 #define NEIGH_CTL_PATH_PROTO 1
2841 #define NEIGH_CTL_PATH_NEIGH 2
2842 #define NEIGH_CTL_PATH_DEV 3
2844 struct ctl_path neigh_path[] = {
2845 { .procname = "net", },
2846 { .procname = "proto", },
2847 { .procname = "neigh", },
2848 { .procname = "default", },
2849 { },
2852 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2853 if (!t)
2854 goto err;
2856 t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes;
2857 t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes;
2858 t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes;
2859 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time;
2860 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time;
2861 t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time;
2862 t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime;
2863 t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes;
2864 t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes;
2865 t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen;
2866 t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay;
2867 t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
2868 t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
2869 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time;
2870 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time;
2872 if (dev) {
2873 dev_name_source = dev->name;
2874 /* Terminate the table early */
2875 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
2876 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
2877 } else {
2878 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2879 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
2880 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
2881 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
2882 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
2886 if (handler) {
2887 /* RetransTime */
2888 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
2889 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
2890 /* ReachableTime */
2891 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
2892 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
2893 /* RetransTime (in milliseconds)*/
2894 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
2895 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
2896 /* ReachableTime (in milliseconds) */
2897 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
2898 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
2901 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2902 if (!t->dev_name)
2903 goto free;
2905 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2906 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2908 t->sysctl_header =
2909 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2910 if (!t->sysctl_header)
2911 goto free_procname;
2913 p->sysctl_table = t;
2914 return 0;
2916 free_procname:
2917 kfree(t->dev_name);
2918 free:
2919 kfree(t);
2920 err:
2921 return -ENOBUFS;
2923 EXPORT_SYMBOL(neigh_sysctl_register);
2925 void neigh_sysctl_unregister(struct neigh_parms *p)
2927 if (p->sysctl_table) {
2928 struct neigh_sysctl_table *t = p->sysctl_table;
2929 p->sysctl_table = NULL;
2930 unregister_sysctl_table(t->sysctl_header);
2931 kfree(t->dev_name);
2932 kfree(t);
2935 EXPORT_SYMBOL(neigh_sysctl_unregister);
2937 #endif /* CONFIG_SYSCTL */
2939 static int __init neigh_init(void)
2941 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
2942 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
2943 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
2945 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
2946 NULL);
2947 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
2949 return 0;
2952 subsys_initcall(neigh_init);