2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
25 #include <linux/sysctl.h>
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
56 #define PNEIGH_HASHMASK 0xF
58 static void neigh_timer_handler(unsigned long arg
);
59 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
);
60 static void neigh_update_notify(struct neighbour
*neigh
);
61 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
62 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
);
64 static struct neigh_table
*neigh_tables
;
66 static const struct file_operations neigh_stat_seq_fops
;
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
84 Reference count prevents destruction.
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
100 static DEFINE_RWLOCK(neigh_tbl_lock
);
102 static int neigh_blackhole(struct sk_buff
*skb
)
108 static void neigh_cleanup_and_release(struct neighbour
*neigh
)
110 if (neigh
->parms
->neigh_cleanup
)
111 neigh
->parms
->neigh_cleanup(neigh
);
113 __neigh_notify(neigh
, RTM_DELNEIGH
, 0);
114 neigh_release(neigh
);
118 * It is random distribution in the interval (1/2)*base...(3/2)*base.
119 * It corresponds to default IPv6 settings and is not overridable,
120 * because it is really reasonable choice.
123 unsigned long neigh_rand_reach_time(unsigned long base
)
125 return (base
? (net_random() % base
) + (base
>> 1) : 0);
129 static int neigh_forced_gc(struct neigh_table
*tbl
)
134 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
136 write_lock_bh(&tbl
->lock
);
137 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
138 struct neighbour
*n
, **np
;
140 np
= &tbl
->hash_buckets
[i
];
141 while ((n
= *np
) != NULL
) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
146 write_lock(&n
->lock
);
147 if (atomic_read(&n
->refcnt
) == 1 &&
148 !(n
->nud_state
& NUD_PERMANENT
)) {
152 write_unlock(&n
->lock
);
153 neigh_cleanup_and_release(n
);
156 write_unlock(&n
->lock
);
161 tbl
->last_flush
= jiffies
;
163 write_unlock_bh(&tbl
->lock
);
168 static void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
171 if (unlikely(mod_timer(&n
->timer
, when
))) {
172 printk("NEIGH: BUG, double timer add, state is %x\n",
178 static int neigh_del_timer(struct neighbour
*n
)
180 if ((n
->nud_state
& NUD_IN_TIMER
) &&
181 del_timer(&n
->timer
)) {
188 static void pneigh_queue_purge(struct sk_buff_head
*list
)
192 while ((skb
= skb_dequeue(list
)) != NULL
) {
198 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
)
202 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
203 struct neighbour
*n
, **np
= &tbl
->hash_buckets
[i
];
205 while ((n
= *np
) != NULL
) {
206 if (dev
&& n
->dev
!= dev
) {
211 write_lock(&n
->lock
);
215 if (atomic_read(&n
->refcnt
) != 1) {
216 /* The most unpleasant situation.
217 We must destroy neighbour entry,
218 but someone still uses it.
220 The destroy will be delayed until
221 the last user releases us, but
222 we must kill timers etc. and move
225 skb_queue_purge(&n
->arp_queue
);
226 n
->output
= neigh_blackhole
;
227 if (n
->nud_state
& NUD_VALID
)
228 n
->nud_state
= NUD_NOARP
;
230 n
->nud_state
= NUD_NONE
;
231 NEIGH_PRINTK2("neigh %p is stray.\n", n
);
233 write_unlock(&n
->lock
);
234 neigh_cleanup_and_release(n
);
239 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
241 write_lock_bh(&tbl
->lock
);
242 neigh_flush_dev(tbl
, dev
);
243 write_unlock_bh(&tbl
->lock
);
246 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
248 write_lock_bh(&tbl
->lock
);
249 neigh_flush_dev(tbl
, dev
);
250 pneigh_ifdown(tbl
, dev
);
251 write_unlock_bh(&tbl
->lock
);
253 del_timer_sync(&tbl
->proxy_timer
);
254 pneigh_queue_purge(&tbl
->proxy_queue
);
258 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
)
260 struct neighbour
*n
= NULL
;
261 unsigned long now
= jiffies
;
264 entries
= atomic_inc_return(&tbl
->entries
) - 1;
265 if (entries
>= tbl
->gc_thresh3
||
266 (entries
>= tbl
->gc_thresh2
&&
267 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
268 if (!neigh_forced_gc(tbl
) &&
269 entries
>= tbl
->gc_thresh3
)
273 n
= kmem_cache_zalloc(tbl
->kmem_cachep
, GFP_ATOMIC
);
277 skb_queue_head_init(&n
->arp_queue
);
278 rwlock_init(&n
->lock
);
279 n
->updated
= n
->used
= now
;
280 n
->nud_state
= NUD_NONE
;
281 n
->output
= neigh_blackhole
;
282 n
->parms
= neigh_parms_clone(&tbl
->parms
);
283 setup_timer(&n
->timer
, neigh_timer_handler
, (unsigned long)n
);
285 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
287 atomic_set(&n
->refcnt
, 1);
293 atomic_dec(&tbl
->entries
);
297 static struct neighbour
**neigh_hash_alloc(unsigned int entries
)
299 unsigned long size
= entries
* sizeof(struct neighbour
*);
300 struct neighbour
**ret
;
302 if (size
<= PAGE_SIZE
) {
303 ret
= kzalloc(size
, GFP_ATOMIC
);
305 ret
= (struct neighbour
**)
306 __get_free_pages(GFP_ATOMIC
|__GFP_ZERO
, get_order(size
));
311 static void neigh_hash_free(struct neighbour
**hash
, unsigned int entries
)
313 unsigned long size
= entries
* sizeof(struct neighbour
*);
315 if (size
<= PAGE_SIZE
)
318 free_pages((unsigned long)hash
, get_order(size
));
321 static void neigh_hash_grow(struct neigh_table
*tbl
, unsigned long new_entries
)
323 struct neighbour
**new_hash
, **old_hash
;
324 unsigned int i
, new_hash_mask
, old_entries
;
326 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
328 BUG_ON(!is_power_of_2(new_entries
));
329 new_hash
= neigh_hash_alloc(new_entries
);
333 old_entries
= tbl
->hash_mask
+ 1;
334 new_hash_mask
= new_entries
- 1;
335 old_hash
= tbl
->hash_buckets
;
337 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
338 for (i
= 0; i
< old_entries
; i
++) {
339 struct neighbour
*n
, *next
;
341 for (n
= old_hash
[i
]; n
; n
= next
) {
342 unsigned int hash_val
= tbl
->hash(n
->primary_key
, n
->dev
);
344 hash_val
&= new_hash_mask
;
347 n
->next
= new_hash
[hash_val
];
348 new_hash
[hash_val
] = n
;
351 tbl
->hash_buckets
= new_hash
;
352 tbl
->hash_mask
= new_hash_mask
;
354 neigh_hash_free(old_hash
, old_entries
);
357 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
358 struct net_device
*dev
)
361 int key_len
= tbl
->key_len
;
362 u32 hash_val
= tbl
->hash(pkey
, dev
);
364 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
366 read_lock_bh(&tbl
->lock
);
367 for (n
= tbl
->hash_buckets
[hash_val
& tbl
->hash_mask
]; n
; n
= n
->next
) {
368 if (dev
== n
->dev
&& !memcmp(n
->primary_key
, pkey
, key_len
)) {
370 NEIGH_CACHE_STAT_INC(tbl
, hits
);
374 read_unlock_bh(&tbl
->lock
);
378 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, struct net
*net
,
382 int key_len
= tbl
->key_len
;
383 u32 hash_val
= tbl
->hash(pkey
, NULL
);
385 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
387 read_lock_bh(&tbl
->lock
);
388 for (n
= tbl
->hash_buckets
[hash_val
& tbl
->hash_mask
]; n
; n
= n
->next
) {
389 if (!memcmp(n
->primary_key
, pkey
, key_len
) &&
390 (net
== n
->dev
->nd_net
)) {
392 NEIGH_CACHE_STAT_INC(tbl
, hits
);
396 read_unlock_bh(&tbl
->lock
);
400 struct neighbour
*neigh_create(struct neigh_table
*tbl
, const void *pkey
,
401 struct net_device
*dev
)
404 int key_len
= tbl
->key_len
;
406 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
);
409 rc
= ERR_PTR(-ENOBUFS
);
413 memcpy(n
->primary_key
, pkey
, key_len
);
417 /* Protocol specific setup. */
418 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
420 goto out_neigh_release
;
423 /* Device specific setup. */
424 if (n
->parms
->neigh_setup
&&
425 (error
= n
->parms
->neigh_setup(n
)) < 0) {
427 goto out_neigh_release
;
430 n
->confirmed
= jiffies
- (n
->parms
->base_reachable_time
<< 1);
432 write_lock_bh(&tbl
->lock
);
434 if (atomic_read(&tbl
->entries
) > (tbl
->hash_mask
+ 1))
435 neigh_hash_grow(tbl
, (tbl
->hash_mask
+ 1) << 1);
437 hash_val
= tbl
->hash(pkey
, dev
) & tbl
->hash_mask
;
439 if (n
->parms
->dead
) {
440 rc
= ERR_PTR(-EINVAL
);
444 for (n1
= tbl
->hash_buckets
[hash_val
]; n1
; n1
= n1
->next
) {
445 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, pkey
, key_len
)) {
452 n
->next
= tbl
->hash_buckets
[hash_val
];
453 tbl
->hash_buckets
[hash_val
] = n
;
456 write_unlock_bh(&tbl
->lock
);
457 NEIGH_PRINTK2("neigh %p is created.\n", n
);
462 write_unlock_bh(&tbl
->lock
);
468 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
,
469 struct net
*net
, const void *pkey
,
470 struct net_device
*dev
, int creat
)
472 struct pneigh_entry
*n
;
473 int key_len
= tbl
->key_len
;
474 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
476 hash_val
^= (hash_val
>> 16);
477 hash_val
^= hash_val
>> 8;
478 hash_val
^= hash_val
>> 4;
479 hash_val
&= PNEIGH_HASHMASK
;
481 read_lock_bh(&tbl
->lock
);
483 for (n
= tbl
->phash_buckets
[hash_val
]; n
; n
= n
->next
) {
484 if (!memcmp(n
->key
, pkey
, key_len
) &&
486 (n
->dev
== dev
|| !n
->dev
)) {
487 read_unlock_bh(&tbl
->lock
);
491 read_unlock_bh(&tbl
->lock
);
498 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
502 n
->net
= hold_net(net
);
503 memcpy(n
->key
, pkey
, key_len
);
508 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
516 write_lock_bh(&tbl
->lock
);
517 n
->next
= tbl
->phash_buckets
[hash_val
];
518 tbl
->phash_buckets
[hash_val
] = n
;
519 write_unlock_bh(&tbl
->lock
);
525 int pneigh_delete(struct neigh_table
*tbl
, struct net
*net
, const void *pkey
,
526 struct net_device
*dev
)
528 struct pneigh_entry
*n
, **np
;
529 int key_len
= tbl
->key_len
;
530 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
532 hash_val
^= (hash_val
>> 16);
533 hash_val
^= hash_val
>> 8;
534 hash_val
^= hash_val
>> 4;
535 hash_val
&= PNEIGH_HASHMASK
;
537 write_lock_bh(&tbl
->lock
);
538 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
540 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
&&
543 write_unlock_bh(&tbl
->lock
);
544 if (tbl
->pdestructor
)
553 write_unlock_bh(&tbl
->lock
);
557 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
559 struct pneigh_entry
*n
, **np
;
562 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
563 np
= &tbl
->phash_buckets
[h
];
564 while ((n
= *np
) != NULL
) {
565 if (!dev
|| n
->dev
== dev
) {
567 if (tbl
->pdestructor
)
583 * neighbour must already be out of the table;
586 void neigh_destroy(struct neighbour
*neigh
)
590 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
594 "Destroying alive neighbour %p\n", neigh
);
599 if (neigh_del_timer(neigh
))
600 printk(KERN_WARNING
"Impossible event.\n");
602 while ((hh
= neigh
->hh
) != NULL
) {
603 neigh
->hh
= hh
->hh_next
;
606 write_seqlock_bh(&hh
->hh_lock
);
607 hh
->hh_output
= neigh_blackhole
;
608 write_sequnlock_bh(&hh
->hh_lock
);
609 if (atomic_dec_and_test(&hh
->hh_refcnt
))
613 skb_queue_purge(&neigh
->arp_queue
);
616 neigh_parms_put(neigh
->parms
);
618 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh
);
620 atomic_dec(&neigh
->tbl
->entries
);
621 kmem_cache_free(neigh
->tbl
->kmem_cachep
, neigh
);
624 /* Neighbour state is suspicious;
627 Called with write_locked neigh.
629 static void neigh_suspect(struct neighbour
*neigh
)
633 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
635 neigh
->output
= neigh
->ops
->output
;
637 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
638 hh
->hh_output
= neigh
->ops
->output
;
641 /* Neighbour state is OK;
644 Called with write_locked neigh.
646 static void neigh_connect(struct neighbour
*neigh
)
650 NEIGH_PRINTK2("neigh %p is connected.\n", neigh
);
652 neigh
->output
= neigh
->ops
->connected_output
;
654 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
655 hh
->hh_output
= neigh
->ops
->hh_output
;
658 static void neigh_periodic_timer(unsigned long arg
)
660 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
661 struct neighbour
*n
, **np
;
662 unsigned long expire
, now
= jiffies
;
664 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
666 write_lock(&tbl
->lock
);
669 * periodically recompute ReachableTime from random function
672 if (time_after(now
, tbl
->last_rand
+ 300 * HZ
)) {
673 struct neigh_parms
*p
;
674 tbl
->last_rand
= now
;
675 for (p
= &tbl
->parms
; p
; p
= p
->next
)
677 neigh_rand_reach_time(p
->base_reachable_time
);
680 np
= &tbl
->hash_buckets
[tbl
->hash_chain_gc
];
681 tbl
->hash_chain_gc
= ((tbl
->hash_chain_gc
+ 1) & tbl
->hash_mask
);
683 while ((n
= *np
) != NULL
) {
686 write_lock(&n
->lock
);
688 state
= n
->nud_state
;
689 if (state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) {
690 write_unlock(&n
->lock
);
694 if (time_before(n
->used
, n
->confirmed
))
695 n
->used
= n
->confirmed
;
697 if (atomic_read(&n
->refcnt
) == 1 &&
698 (state
== NUD_FAILED
||
699 time_after(now
, n
->used
+ n
->parms
->gc_staletime
))) {
702 write_unlock(&n
->lock
);
703 neigh_cleanup_and_release(n
);
706 write_unlock(&n
->lock
);
712 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
713 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
714 * base_reachable_time.
716 expire
= tbl
->parms
.base_reachable_time
>> 1;
717 expire
/= (tbl
->hash_mask
+ 1);
722 mod_timer(&tbl
->gc_timer
, round_jiffies(now
+ expire
));
724 mod_timer(&tbl
->gc_timer
, now
+ expire
);
726 write_unlock(&tbl
->lock
);
729 static __inline__
int neigh_max_probes(struct neighbour
*n
)
731 struct neigh_parms
*p
= n
->parms
;
732 return (n
->nud_state
& NUD_PROBE
?
734 p
->ucast_probes
+ p
->app_probes
+ p
->mcast_probes
);
737 /* Called when a timer expires for a neighbour entry. */
739 static void neigh_timer_handler(unsigned long arg
)
741 unsigned long now
, next
;
742 struct neighbour
*neigh
= (struct neighbour
*)arg
;
746 write_lock(&neigh
->lock
);
748 state
= neigh
->nud_state
;
752 if (!(state
& NUD_IN_TIMER
)) {
754 printk(KERN_WARNING
"neigh: timer & !nud_in_timer\n");
759 if (state
& NUD_REACHABLE
) {
760 if (time_before_eq(now
,
761 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
762 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh
);
763 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
764 } else if (time_before_eq(now
,
765 neigh
->used
+ neigh
->parms
->delay_probe_time
)) {
766 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
767 neigh
->nud_state
= NUD_DELAY
;
768 neigh
->updated
= jiffies
;
769 neigh_suspect(neigh
);
770 next
= now
+ neigh
->parms
->delay_probe_time
;
772 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
773 neigh
->nud_state
= NUD_STALE
;
774 neigh
->updated
= jiffies
;
775 neigh_suspect(neigh
);
778 } else if (state
& NUD_DELAY
) {
779 if (time_before_eq(now
,
780 neigh
->confirmed
+ neigh
->parms
->delay_probe_time
)) {
781 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh
);
782 neigh
->nud_state
= NUD_REACHABLE
;
783 neigh
->updated
= jiffies
;
784 neigh_connect(neigh
);
786 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
788 NEIGH_PRINTK2("neigh %p is probed.\n", neigh
);
789 neigh
->nud_state
= NUD_PROBE
;
790 neigh
->updated
= jiffies
;
791 atomic_set(&neigh
->probes
, 0);
792 next
= now
+ neigh
->parms
->retrans_time
;
795 /* NUD_PROBE|NUD_INCOMPLETE */
796 next
= now
+ neigh
->parms
->retrans_time
;
799 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
800 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
803 neigh
->nud_state
= NUD_FAILED
;
804 neigh
->updated
= jiffies
;
806 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
807 NEIGH_PRINTK2("neigh %p is failed.\n", neigh
);
809 /* It is very thin place. report_unreachable is very complicated
810 routine. Particularly, it can hit the same neighbour entry!
812 So that, we try to be accurate and avoid dead loop. --ANK
814 while (neigh
->nud_state
== NUD_FAILED
&&
815 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
816 write_unlock(&neigh
->lock
);
817 neigh
->ops
->error_report(neigh
, skb
);
818 write_lock(&neigh
->lock
);
820 skb_queue_purge(&neigh
->arp_queue
);
823 if (neigh
->nud_state
& NUD_IN_TIMER
) {
824 if (time_before(next
, jiffies
+ HZ
/2))
825 next
= jiffies
+ HZ
/2;
826 if (!mod_timer(&neigh
->timer
, next
))
829 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
830 struct sk_buff
*skb
= skb_peek(&neigh
->arp_queue
);
831 /* keep skb alive even if arp_queue overflows */
834 write_unlock(&neigh
->lock
);
835 neigh
->ops
->solicit(neigh
, skb
);
836 atomic_inc(&neigh
->probes
);
841 write_unlock(&neigh
->lock
);
845 neigh_update_notify(neigh
);
847 neigh_release(neigh
);
850 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
855 write_lock_bh(&neigh
->lock
);
858 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
863 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
864 if (neigh
->parms
->mcast_probes
+ neigh
->parms
->app_probes
) {
865 atomic_set(&neigh
->probes
, neigh
->parms
->ucast_probes
);
866 neigh
->nud_state
= NUD_INCOMPLETE
;
867 neigh
->updated
= jiffies
;
868 neigh_add_timer(neigh
, now
+ 1);
870 neigh
->nud_state
= NUD_FAILED
;
871 neigh
->updated
= jiffies
;
872 write_unlock_bh(&neigh
->lock
);
878 } else if (neigh
->nud_state
& NUD_STALE
) {
879 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
880 neigh
->nud_state
= NUD_DELAY
;
881 neigh
->updated
= jiffies
;
882 neigh_add_timer(neigh
,
883 jiffies
+ neigh
->parms
->delay_probe_time
);
886 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
888 if (skb_queue_len(&neigh
->arp_queue
) >=
889 neigh
->parms
->queue_len
) {
890 struct sk_buff
*buff
;
891 buff
= neigh
->arp_queue
.next
;
892 __skb_unlink(buff
, &neigh
->arp_queue
);
895 __skb_queue_tail(&neigh
->arp_queue
, skb
);
900 write_unlock_bh(&neigh
->lock
);
904 static void neigh_update_hhs(struct neighbour
*neigh
)
907 void (*update
)(struct hh_cache
*, const struct net_device
*, const unsigned char *)
908 = neigh
->dev
->header_ops
->cache_update
;
911 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
) {
912 write_seqlock_bh(&hh
->hh_lock
);
913 update(hh
, neigh
->dev
, neigh
->ha
);
914 write_sequnlock_bh(&hh
->hh_lock
);
921 /* Generic update routine.
922 -- lladdr is new lladdr or NULL, if it is not supplied.
925 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
927 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
928 lladdr instead of overriding it
930 It also allows to retain current state
931 if lladdr is unchanged.
932 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
934 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
936 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
939 Caller MUST hold reference count on the entry.
942 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
948 struct net_device
*dev
;
949 int update_isrouter
= 0;
951 write_lock_bh(&neigh
->lock
);
954 old
= neigh
->nud_state
;
957 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
958 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
961 if (!(new & NUD_VALID
)) {
962 neigh_del_timer(neigh
);
963 if (old
& NUD_CONNECTED
)
964 neigh_suspect(neigh
);
965 neigh
->nud_state
= new;
967 notify
= old
& NUD_VALID
;
971 /* Compare new lladdr with cached one */
972 if (!dev
->addr_len
) {
973 /* First case: device needs no address. */
976 /* The second case: if something is already cached
977 and a new address is proposed:
979 - if they are different, check override flag
981 if ((old
& NUD_VALID
) &&
982 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
985 /* No address is supplied; if we know something,
986 use it, otherwise discard the request.
989 if (!(old
& NUD_VALID
))
994 if (new & NUD_CONNECTED
)
995 neigh
->confirmed
= jiffies
;
996 neigh
->updated
= jiffies
;
998 /* If entry was valid and address is not changed,
999 do not change entry state, if new one is STALE.
1002 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1003 if (old
& NUD_VALID
) {
1004 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
1005 update_isrouter
= 0;
1006 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1007 (old
& NUD_CONNECTED
)) {
1013 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1014 ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) ||
1015 (old
& NUD_CONNECTED
))
1022 neigh_del_timer(neigh
);
1023 if (new & NUD_IN_TIMER
)
1024 neigh_add_timer(neigh
, (jiffies
+
1025 ((new & NUD_REACHABLE
) ?
1026 neigh
->parms
->reachable_time
:
1028 neigh
->nud_state
= new;
1031 if (lladdr
!= neigh
->ha
) {
1032 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1033 neigh_update_hhs(neigh
);
1034 if (!(new & NUD_CONNECTED
))
1035 neigh
->confirmed
= jiffies
-
1036 (neigh
->parms
->base_reachable_time
<< 1);
1041 if (new & NUD_CONNECTED
)
1042 neigh_connect(neigh
);
1044 neigh_suspect(neigh
);
1045 if (!(old
& NUD_VALID
)) {
1046 struct sk_buff
*skb
;
1048 /* Again: avoid dead loop if something went wrong */
1050 while (neigh
->nud_state
& NUD_VALID
&&
1051 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1052 struct neighbour
*n1
= neigh
;
1053 write_unlock_bh(&neigh
->lock
);
1054 /* On shaper/eql skb->dst->neighbour != neigh :( */
1055 if (skb
->dst
&& skb
->dst
->neighbour
)
1056 n1
= skb
->dst
->neighbour
;
1058 write_lock_bh(&neigh
->lock
);
1060 skb_queue_purge(&neigh
->arp_queue
);
1063 if (update_isrouter
) {
1064 neigh
->flags
= (flags
& NEIGH_UPDATE_F_ISROUTER
) ?
1065 (neigh
->flags
| NTF_ROUTER
) :
1066 (neigh
->flags
& ~NTF_ROUTER
);
1068 write_unlock_bh(&neigh
->lock
);
1071 neigh_update_notify(neigh
);
1076 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1077 u8
*lladdr
, void *saddr
,
1078 struct net_device
*dev
)
1080 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1081 lladdr
|| !dev
->addr_len
);
1083 neigh_update(neigh
, lladdr
, NUD_STALE
,
1084 NEIGH_UPDATE_F_OVERRIDE
);
1088 static void neigh_hh_init(struct neighbour
*n
, struct dst_entry
*dst
,
1091 struct hh_cache
*hh
;
1092 struct net_device
*dev
= dst
->dev
;
1094 for (hh
= n
->hh
; hh
; hh
= hh
->hh_next
)
1095 if (hh
->hh_type
== protocol
)
1098 if (!hh
&& (hh
= kzalloc(sizeof(*hh
), GFP_ATOMIC
)) != NULL
) {
1099 seqlock_init(&hh
->hh_lock
);
1100 hh
->hh_type
= protocol
;
1101 atomic_set(&hh
->hh_refcnt
, 0);
1104 if (dev
->header_ops
->cache(n
, hh
)) {
1108 atomic_inc(&hh
->hh_refcnt
);
1109 hh
->hh_next
= n
->hh
;
1111 if (n
->nud_state
& NUD_CONNECTED
)
1112 hh
->hh_output
= n
->ops
->hh_output
;
1114 hh
->hh_output
= n
->ops
->output
;
1118 atomic_inc(&hh
->hh_refcnt
);
1123 /* This function can be used in contexts, where only old dev_queue_xmit
1124 worked, f.e. if you want to override normal output path (eql, shaper),
1125 but resolution is not made yet.
1128 int neigh_compat_output(struct sk_buff
*skb
)
1130 struct net_device
*dev
= skb
->dev
;
1132 __skb_pull(skb
, skb_network_offset(skb
));
1134 if (dev_hard_header(skb
, dev
, ntohs(skb
->protocol
), NULL
, NULL
,
1136 dev
->header_ops
->rebuild(skb
))
1139 return dev_queue_xmit(skb
);
1142 /* Slow and careful. */
1144 int neigh_resolve_output(struct sk_buff
*skb
)
1146 struct dst_entry
*dst
= skb
->dst
;
1147 struct neighbour
*neigh
;
1150 if (!dst
|| !(neigh
= dst
->neighbour
))
1153 __skb_pull(skb
, skb_network_offset(skb
));
1155 if (!neigh_event_send(neigh
, skb
)) {
1157 struct net_device
*dev
= neigh
->dev
;
1158 if (dev
->header_ops
->cache
&& !dst
->hh
) {
1159 write_lock_bh(&neigh
->lock
);
1161 neigh_hh_init(neigh
, dst
, dst
->ops
->protocol
);
1162 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1163 neigh
->ha
, NULL
, skb
->len
);
1164 write_unlock_bh(&neigh
->lock
);
1166 read_lock_bh(&neigh
->lock
);
1167 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1168 neigh
->ha
, NULL
, skb
->len
);
1169 read_unlock_bh(&neigh
->lock
);
1172 rc
= neigh
->ops
->queue_xmit(skb
);
1179 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1180 dst
, dst
? dst
->neighbour
: NULL
);
1187 /* As fast as possible without hh cache */
1189 int neigh_connected_output(struct sk_buff
*skb
)
1192 struct dst_entry
*dst
= skb
->dst
;
1193 struct neighbour
*neigh
= dst
->neighbour
;
1194 struct net_device
*dev
= neigh
->dev
;
1196 __skb_pull(skb
, skb_network_offset(skb
));
1198 read_lock_bh(&neigh
->lock
);
1199 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1200 neigh
->ha
, NULL
, skb
->len
);
1201 read_unlock_bh(&neigh
->lock
);
1203 err
= neigh
->ops
->queue_xmit(skb
);
1211 static void neigh_proxy_process(unsigned long arg
)
1213 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
1214 long sched_next
= 0;
1215 unsigned long now
= jiffies
;
1216 struct sk_buff
*skb
;
1218 spin_lock(&tbl
->proxy_queue
.lock
);
1220 skb
= tbl
->proxy_queue
.next
;
1222 while (skb
!= (struct sk_buff
*)&tbl
->proxy_queue
) {
1223 struct sk_buff
*back
= skb
;
1224 long tdif
= NEIGH_CB(back
)->sched_next
- now
;
1228 struct net_device
*dev
= back
->dev
;
1229 __skb_unlink(back
, &tbl
->proxy_queue
);
1230 if (tbl
->proxy_redo
&& netif_running(dev
))
1231 tbl
->proxy_redo(back
);
1236 } else if (!sched_next
|| tdif
< sched_next
)
1239 del_timer(&tbl
->proxy_timer
);
1241 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1242 spin_unlock(&tbl
->proxy_queue
.lock
);
1245 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1246 struct sk_buff
*skb
)
1248 unsigned long now
= jiffies
;
1249 unsigned long sched_next
= now
+ (net_random() % p
->proxy_delay
);
1251 if (tbl
->proxy_queue
.qlen
> p
->proxy_qlen
) {
1256 NEIGH_CB(skb
)->sched_next
= sched_next
;
1257 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1259 spin_lock(&tbl
->proxy_queue
.lock
);
1260 if (del_timer(&tbl
->proxy_timer
)) {
1261 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1262 sched_next
= tbl
->proxy_timer
.expires
;
1264 dst_release(skb
->dst
);
1267 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1268 mod_timer(&tbl
->proxy_timer
, sched_next
);
1269 spin_unlock(&tbl
->proxy_queue
.lock
);
1272 static inline struct neigh_parms
*lookup_neigh_params(struct neigh_table
*tbl
,
1273 struct net
*net
, int ifindex
)
1275 struct neigh_parms
*p
;
1277 for (p
= &tbl
->parms
; p
; p
= p
->next
) {
1280 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
) ||
1281 (!p
->dev
&& !ifindex
))
1288 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1289 struct neigh_table
*tbl
)
1291 struct neigh_parms
*p
, *ref
;
1298 ref
= lookup_neigh_params(tbl
, net
, 0);
1302 p
= kmemdup(ref
, sizeof(*p
), GFP_KERNEL
);
1305 atomic_set(&p
->refcnt
, 1);
1306 INIT_RCU_HEAD(&p
->rcu_head
);
1308 neigh_rand_reach_time(p
->base_reachable_time
);
1310 if (dev
->neigh_setup
&& dev
->neigh_setup(dev
, p
)) {
1318 p
->net
= hold_net(net
);
1319 p
->sysctl_table
= NULL
;
1320 write_lock_bh(&tbl
->lock
);
1321 p
->next
= tbl
->parms
.next
;
1322 tbl
->parms
.next
= p
;
1323 write_unlock_bh(&tbl
->lock
);
1328 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1330 struct neigh_parms
*parms
=
1331 container_of(head
, struct neigh_parms
, rcu_head
);
1333 neigh_parms_put(parms
);
1336 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1338 struct neigh_parms
**p
;
1340 if (!parms
|| parms
== &tbl
->parms
)
1342 write_lock_bh(&tbl
->lock
);
1343 for (p
= &tbl
->parms
.next
; *p
; p
= &(*p
)->next
) {
1347 write_unlock_bh(&tbl
->lock
);
1349 dev_put(parms
->dev
);
1350 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1354 write_unlock_bh(&tbl
->lock
);
1355 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1358 void neigh_parms_destroy(struct neigh_parms
*parms
)
1360 release_net(parms
->net
);
1364 static struct lock_class_key neigh_table_proxy_queue_class
;
1366 void neigh_table_init_no_netlink(struct neigh_table
*tbl
)
1368 unsigned long now
= jiffies
;
1369 unsigned long phsize
;
1371 tbl
->parms
.net
= &init_net
;
1372 atomic_set(&tbl
->parms
.refcnt
, 1);
1373 INIT_RCU_HEAD(&tbl
->parms
.rcu_head
);
1374 tbl
->parms
.reachable_time
=
1375 neigh_rand_reach_time(tbl
->parms
.base_reachable_time
);
1377 if (!tbl
->kmem_cachep
)
1379 kmem_cache_create(tbl
->id
, tbl
->entry_size
, 0,
1380 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
1382 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1384 panic("cannot create neighbour cache statistics");
1386 #ifdef CONFIG_PROC_FS
1387 tbl
->pde
= create_proc_entry(tbl
->id
, 0, init_net
.proc_net_stat
);
1389 panic("cannot create neighbour proc dir entry");
1390 tbl
->pde
->proc_fops
= &neigh_stat_seq_fops
;
1391 tbl
->pde
->data
= tbl
;
1395 tbl
->hash_buckets
= neigh_hash_alloc(tbl
->hash_mask
+ 1);
1397 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1398 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1400 if (!tbl
->hash_buckets
|| !tbl
->phash_buckets
)
1401 panic("cannot allocate neighbour cache hashes");
1403 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
1405 rwlock_init(&tbl
->lock
);
1406 setup_timer(&tbl
->gc_timer
, neigh_periodic_timer
, (unsigned long)tbl
);
1407 tbl
->gc_timer
.expires
= now
+ 1;
1408 add_timer(&tbl
->gc_timer
);
1410 setup_timer(&tbl
->proxy_timer
, neigh_proxy_process
, (unsigned long)tbl
);
1411 skb_queue_head_init_class(&tbl
->proxy_queue
,
1412 &neigh_table_proxy_queue_class
);
1414 tbl
->last_flush
= now
;
1415 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1418 void neigh_table_init(struct neigh_table
*tbl
)
1420 struct neigh_table
*tmp
;
1422 neigh_table_init_no_netlink(tbl
);
1423 write_lock(&neigh_tbl_lock
);
1424 for (tmp
= neigh_tables
; tmp
; tmp
= tmp
->next
) {
1425 if (tmp
->family
== tbl
->family
)
1428 tbl
->next
= neigh_tables
;
1430 write_unlock(&neigh_tbl_lock
);
1432 if (unlikely(tmp
)) {
1433 printk(KERN_ERR
"NEIGH: Registering multiple tables for "
1434 "family %d\n", tbl
->family
);
1439 int neigh_table_clear(struct neigh_table
*tbl
)
1441 struct neigh_table
**tp
;
1443 /* It is not clean... Fix it to unload IPv6 module safely */
1444 del_timer_sync(&tbl
->gc_timer
);
1445 del_timer_sync(&tbl
->proxy_timer
);
1446 pneigh_queue_purge(&tbl
->proxy_queue
);
1447 neigh_ifdown(tbl
, NULL
);
1448 if (atomic_read(&tbl
->entries
))
1449 printk(KERN_CRIT
"neighbour leakage\n");
1450 write_lock(&neigh_tbl_lock
);
1451 for (tp
= &neigh_tables
; *tp
; tp
= &(*tp
)->next
) {
1457 write_unlock(&neigh_tbl_lock
);
1459 neigh_hash_free(tbl
->hash_buckets
, tbl
->hash_mask
+ 1);
1460 tbl
->hash_buckets
= NULL
;
1462 kfree(tbl
->phash_buckets
);
1463 tbl
->phash_buckets
= NULL
;
1465 remove_proc_entry(tbl
->id
, init_net
.proc_net_stat
);
1467 free_percpu(tbl
->stats
);
1470 kmem_cache_destroy(tbl
->kmem_cachep
);
1471 tbl
->kmem_cachep
= NULL
;
1476 static int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1478 struct net
*net
= skb
->sk
->sk_net
;
1480 struct nlattr
*dst_attr
;
1481 struct neigh_table
*tbl
;
1482 struct net_device
*dev
= NULL
;
1485 if (nlmsg_len(nlh
) < sizeof(*ndm
))
1488 dst_attr
= nlmsg_find_attr(nlh
, sizeof(*ndm
), NDA_DST
);
1489 if (dst_attr
== NULL
)
1492 ndm
= nlmsg_data(nlh
);
1493 if (ndm
->ndm_ifindex
) {
1494 dev
= dev_get_by_index(net
, ndm
->ndm_ifindex
);
1501 read_lock(&neigh_tbl_lock
);
1502 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1503 struct neighbour
*neigh
;
1505 if (tbl
->family
!= ndm
->ndm_family
)
1507 read_unlock(&neigh_tbl_lock
);
1509 if (nla_len(dst_attr
) < tbl
->key_len
)
1512 if (ndm
->ndm_flags
& NTF_PROXY
) {
1513 err
= pneigh_delete(tbl
, net
, nla_data(dst_attr
), dev
);
1520 neigh
= neigh_lookup(tbl
, nla_data(dst_attr
), dev
);
1521 if (neigh
== NULL
) {
1526 err
= neigh_update(neigh
, NULL
, NUD_FAILED
,
1527 NEIGH_UPDATE_F_OVERRIDE
|
1528 NEIGH_UPDATE_F_ADMIN
);
1529 neigh_release(neigh
);
1532 read_unlock(&neigh_tbl_lock
);
1533 err
= -EAFNOSUPPORT
;
1542 static int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1544 struct net
*net
= skb
->sk
->sk_net
;
1546 struct nlattr
*tb
[NDA_MAX
+1];
1547 struct neigh_table
*tbl
;
1548 struct net_device
*dev
= NULL
;
1551 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
);
1556 if (tb
[NDA_DST
] == NULL
)
1559 ndm
= nlmsg_data(nlh
);
1560 if (ndm
->ndm_ifindex
) {
1561 dev
= dev_get_by_index(net
, ndm
->ndm_ifindex
);
1567 if (tb
[NDA_LLADDR
] && nla_len(tb
[NDA_LLADDR
]) < dev
->addr_len
)
1571 read_lock(&neigh_tbl_lock
);
1572 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1573 int flags
= NEIGH_UPDATE_F_ADMIN
| NEIGH_UPDATE_F_OVERRIDE
;
1574 struct neighbour
*neigh
;
1577 if (tbl
->family
!= ndm
->ndm_family
)
1579 read_unlock(&neigh_tbl_lock
);
1581 if (nla_len(tb
[NDA_DST
]) < tbl
->key_len
)
1583 dst
= nla_data(tb
[NDA_DST
]);
1584 lladdr
= tb
[NDA_LLADDR
] ? nla_data(tb
[NDA_LLADDR
]) : NULL
;
1586 if (ndm
->ndm_flags
& NTF_PROXY
) {
1587 struct pneigh_entry
*pn
;
1590 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 1);
1592 pn
->flags
= ndm
->ndm_flags
;
1601 neigh
= neigh_lookup(tbl
, dst
, dev
);
1602 if (neigh
== NULL
) {
1603 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1608 neigh
= __neigh_lookup_errno(tbl
, dst
, dev
);
1609 if (IS_ERR(neigh
)) {
1610 err
= PTR_ERR(neigh
);
1614 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1616 neigh_release(neigh
);
1620 if (!(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
1621 flags
&= ~NEIGH_UPDATE_F_OVERRIDE
;
1624 err
= neigh_update(neigh
, lladdr
, ndm
->ndm_state
, flags
);
1625 neigh_release(neigh
);
1629 read_unlock(&neigh_tbl_lock
);
1630 err
= -EAFNOSUPPORT
;
1639 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1641 struct nlattr
*nest
;
1643 nest
= nla_nest_start(skb
, NDTA_PARMS
);
1648 NLA_PUT_U32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
);
1650 NLA_PUT_U32(skb
, NDTPA_REFCNT
, atomic_read(&parms
->refcnt
));
1651 NLA_PUT_U32(skb
, NDTPA_QUEUE_LEN
, parms
->queue_len
);
1652 NLA_PUT_U32(skb
, NDTPA_PROXY_QLEN
, parms
->proxy_qlen
);
1653 NLA_PUT_U32(skb
, NDTPA_APP_PROBES
, parms
->app_probes
);
1654 NLA_PUT_U32(skb
, NDTPA_UCAST_PROBES
, parms
->ucast_probes
);
1655 NLA_PUT_U32(skb
, NDTPA_MCAST_PROBES
, parms
->mcast_probes
);
1656 NLA_PUT_MSECS(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
);
1657 NLA_PUT_MSECS(skb
, NDTPA_BASE_REACHABLE_TIME
,
1658 parms
->base_reachable_time
);
1659 NLA_PUT_MSECS(skb
, NDTPA_GC_STALETIME
, parms
->gc_staletime
);
1660 NLA_PUT_MSECS(skb
, NDTPA_DELAY_PROBE_TIME
, parms
->delay_probe_time
);
1661 NLA_PUT_MSECS(skb
, NDTPA_RETRANS_TIME
, parms
->retrans_time
);
1662 NLA_PUT_MSECS(skb
, NDTPA_ANYCAST_DELAY
, parms
->anycast_delay
);
1663 NLA_PUT_MSECS(skb
, NDTPA_PROXY_DELAY
, parms
->proxy_delay
);
1664 NLA_PUT_MSECS(skb
, NDTPA_LOCKTIME
, parms
->locktime
);
1666 return nla_nest_end(skb
, nest
);
1669 return nla_nest_cancel(skb
, nest
);
1672 static int neightbl_fill_info(struct sk_buff
*skb
, struct neigh_table
*tbl
,
1673 u32 pid
, u32 seq
, int type
, int flags
)
1675 struct nlmsghdr
*nlh
;
1676 struct ndtmsg
*ndtmsg
;
1678 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1682 ndtmsg
= nlmsg_data(nlh
);
1684 read_lock_bh(&tbl
->lock
);
1685 ndtmsg
->ndtm_family
= tbl
->family
;
1686 ndtmsg
->ndtm_pad1
= 0;
1687 ndtmsg
->ndtm_pad2
= 0;
1689 NLA_PUT_STRING(skb
, NDTA_NAME
, tbl
->id
);
1690 NLA_PUT_MSECS(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
);
1691 NLA_PUT_U32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
);
1692 NLA_PUT_U32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
);
1693 NLA_PUT_U32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
);
1696 unsigned long now
= jiffies
;
1697 unsigned int flush_delta
= now
- tbl
->last_flush
;
1698 unsigned int rand_delta
= now
- tbl
->last_rand
;
1700 struct ndt_config ndc
= {
1701 .ndtc_key_len
= tbl
->key_len
,
1702 .ndtc_entry_size
= tbl
->entry_size
,
1703 .ndtc_entries
= atomic_read(&tbl
->entries
),
1704 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
1705 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
1706 .ndtc_hash_rnd
= tbl
->hash_rnd
,
1707 .ndtc_hash_mask
= tbl
->hash_mask
,
1708 .ndtc_hash_chain_gc
= tbl
->hash_chain_gc
,
1709 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
1712 NLA_PUT(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
);
1717 struct ndt_stats ndst
;
1719 memset(&ndst
, 0, sizeof(ndst
));
1721 for_each_possible_cpu(cpu
) {
1722 struct neigh_statistics
*st
;
1724 st
= per_cpu_ptr(tbl
->stats
, cpu
);
1725 ndst
.ndts_allocs
+= st
->allocs
;
1726 ndst
.ndts_destroys
+= st
->destroys
;
1727 ndst
.ndts_hash_grows
+= st
->hash_grows
;
1728 ndst
.ndts_res_failed
+= st
->res_failed
;
1729 ndst
.ndts_lookups
+= st
->lookups
;
1730 ndst
.ndts_hits
+= st
->hits
;
1731 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
1732 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
1733 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
1734 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
1737 NLA_PUT(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
);
1740 BUG_ON(tbl
->parms
.dev
);
1741 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
1742 goto nla_put_failure
;
1744 read_unlock_bh(&tbl
->lock
);
1745 return nlmsg_end(skb
, nlh
);
1748 read_unlock_bh(&tbl
->lock
);
1749 nlmsg_cancel(skb
, nlh
);
1753 static int neightbl_fill_param_info(struct sk_buff
*skb
,
1754 struct neigh_table
*tbl
,
1755 struct neigh_parms
*parms
,
1756 u32 pid
, u32 seq
, int type
,
1759 struct ndtmsg
*ndtmsg
;
1760 struct nlmsghdr
*nlh
;
1762 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1766 ndtmsg
= nlmsg_data(nlh
);
1768 read_lock_bh(&tbl
->lock
);
1769 ndtmsg
->ndtm_family
= tbl
->family
;
1770 ndtmsg
->ndtm_pad1
= 0;
1771 ndtmsg
->ndtm_pad2
= 0;
1773 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) < 0 ||
1774 neightbl_fill_parms(skb
, parms
) < 0)
1777 read_unlock_bh(&tbl
->lock
);
1778 return nlmsg_end(skb
, nlh
);
1780 read_unlock_bh(&tbl
->lock
);
1781 nlmsg_cancel(skb
, nlh
);
1785 static const struct nla_policy nl_neightbl_policy
[NDTA_MAX
+1] = {
1786 [NDTA_NAME
] = { .type
= NLA_STRING
},
1787 [NDTA_THRESH1
] = { .type
= NLA_U32
},
1788 [NDTA_THRESH2
] = { .type
= NLA_U32
},
1789 [NDTA_THRESH3
] = { .type
= NLA_U32
},
1790 [NDTA_GC_INTERVAL
] = { .type
= NLA_U64
},
1791 [NDTA_PARMS
] = { .type
= NLA_NESTED
},
1794 static const struct nla_policy nl_ntbl_parm_policy
[NDTPA_MAX
+1] = {
1795 [NDTPA_IFINDEX
] = { .type
= NLA_U32
},
1796 [NDTPA_QUEUE_LEN
] = { .type
= NLA_U32
},
1797 [NDTPA_PROXY_QLEN
] = { .type
= NLA_U32
},
1798 [NDTPA_APP_PROBES
] = { .type
= NLA_U32
},
1799 [NDTPA_UCAST_PROBES
] = { .type
= NLA_U32
},
1800 [NDTPA_MCAST_PROBES
] = { .type
= NLA_U32
},
1801 [NDTPA_BASE_REACHABLE_TIME
] = { .type
= NLA_U64
},
1802 [NDTPA_GC_STALETIME
] = { .type
= NLA_U64
},
1803 [NDTPA_DELAY_PROBE_TIME
] = { .type
= NLA_U64
},
1804 [NDTPA_RETRANS_TIME
] = { .type
= NLA_U64
},
1805 [NDTPA_ANYCAST_DELAY
] = { .type
= NLA_U64
},
1806 [NDTPA_PROXY_DELAY
] = { .type
= NLA_U64
},
1807 [NDTPA_LOCKTIME
] = { .type
= NLA_U64
},
1810 static int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1812 struct net
*net
= skb
->sk
->sk_net
;
1813 struct neigh_table
*tbl
;
1814 struct ndtmsg
*ndtmsg
;
1815 struct nlattr
*tb
[NDTA_MAX
+1];
1818 err
= nlmsg_parse(nlh
, sizeof(*ndtmsg
), tb
, NDTA_MAX
,
1819 nl_neightbl_policy
);
1823 if (tb
[NDTA_NAME
] == NULL
) {
1828 ndtmsg
= nlmsg_data(nlh
);
1829 read_lock(&neigh_tbl_lock
);
1830 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1831 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
1834 if (nla_strcmp(tb
[NDTA_NAME
], tbl
->id
) == 0)
1844 * We acquire tbl->lock to be nice to the periodic timers and
1845 * make sure they always see a consistent set of values.
1847 write_lock_bh(&tbl
->lock
);
1849 if (tb
[NDTA_PARMS
]) {
1850 struct nlattr
*tbp
[NDTPA_MAX
+1];
1851 struct neigh_parms
*p
;
1854 err
= nla_parse_nested(tbp
, NDTPA_MAX
, tb
[NDTA_PARMS
],
1855 nl_ntbl_parm_policy
);
1857 goto errout_tbl_lock
;
1859 if (tbp
[NDTPA_IFINDEX
])
1860 ifindex
= nla_get_u32(tbp
[NDTPA_IFINDEX
]);
1862 p
= lookup_neigh_params(tbl
, net
, ifindex
);
1865 goto errout_tbl_lock
;
1868 for (i
= 1; i
<= NDTPA_MAX
; i
++) {
1873 case NDTPA_QUEUE_LEN
:
1874 p
->queue_len
= nla_get_u32(tbp
[i
]);
1876 case NDTPA_PROXY_QLEN
:
1877 p
->proxy_qlen
= nla_get_u32(tbp
[i
]);
1879 case NDTPA_APP_PROBES
:
1880 p
->app_probes
= nla_get_u32(tbp
[i
]);
1882 case NDTPA_UCAST_PROBES
:
1883 p
->ucast_probes
= nla_get_u32(tbp
[i
]);
1885 case NDTPA_MCAST_PROBES
:
1886 p
->mcast_probes
= nla_get_u32(tbp
[i
]);
1888 case NDTPA_BASE_REACHABLE_TIME
:
1889 p
->base_reachable_time
= nla_get_msecs(tbp
[i
]);
1891 case NDTPA_GC_STALETIME
:
1892 p
->gc_staletime
= nla_get_msecs(tbp
[i
]);
1894 case NDTPA_DELAY_PROBE_TIME
:
1895 p
->delay_probe_time
= nla_get_msecs(tbp
[i
]);
1897 case NDTPA_RETRANS_TIME
:
1898 p
->retrans_time
= nla_get_msecs(tbp
[i
]);
1900 case NDTPA_ANYCAST_DELAY
:
1901 p
->anycast_delay
= nla_get_msecs(tbp
[i
]);
1903 case NDTPA_PROXY_DELAY
:
1904 p
->proxy_delay
= nla_get_msecs(tbp
[i
]);
1906 case NDTPA_LOCKTIME
:
1907 p
->locktime
= nla_get_msecs(tbp
[i
]);
1913 if (tb
[NDTA_THRESH1
])
1914 tbl
->gc_thresh1
= nla_get_u32(tb
[NDTA_THRESH1
]);
1916 if (tb
[NDTA_THRESH2
])
1917 tbl
->gc_thresh2
= nla_get_u32(tb
[NDTA_THRESH2
]);
1919 if (tb
[NDTA_THRESH3
])
1920 tbl
->gc_thresh3
= nla_get_u32(tb
[NDTA_THRESH3
]);
1922 if (tb
[NDTA_GC_INTERVAL
])
1923 tbl
->gc_interval
= nla_get_msecs(tb
[NDTA_GC_INTERVAL
]);
1928 write_unlock_bh(&tbl
->lock
);
1930 read_unlock(&neigh_tbl_lock
);
1935 static int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1937 struct net
*net
= skb
->sk
->sk_net
;
1938 int family
, tidx
, nidx
= 0;
1939 int tbl_skip
= cb
->args
[0];
1940 int neigh_skip
= cb
->args
[1];
1941 struct neigh_table
*tbl
;
1943 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
1945 read_lock(&neigh_tbl_lock
);
1946 for (tbl
= neigh_tables
, tidx
= 0; tbl
; tbl
= tbl
->next
, tidx
++) {
1947 struct neigh_parms
*p
;
1949 if (tidx
< tbl_skip
|| (family
&& tbl
->family
!= family
))
1952 if (neightbl_fill_info(skb
, tbl
, NETLINK_CB(cb
->skb
).pid
,
1953 cb
->nlh
->nlmsg_seq
, RTM_NEWNEIGHTBL
,
1957 for (nidx
= 0, p
= tbl
->parms
.next
; p
; p
= p
->next
) {
1961 if (nidx
++ < neigh_skip
)
1964 if (neightbl_fill_param_info(skb
, tbl
, p
,
1965 NETLINK_CB(cb
->skb
).pid
,
1975 read_unlock(&neigh_tbl_lock
);
1982 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*neigh
,
1983 u32 pid
, u32 seq
, int type
, unsigned int flags
)
1985 unsigned long now
= jiffies
;
1986 struct nda_cacheinfo ci
;
1987 struct nlmsghdr
*nlh
;
1990 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
1994 ndm
= nlmsg_data(nlh
);
1995 ndm
->ndm_family
= neigh
->ops
->family
;
1998 ndm
->ndm_flags
= neigh
->flags
;
1999 ndm
->ndm_type
= neigh
->type
;
2000 ndm
->ndm_ifindex
= neigh
->dev
->ifindex
;
2002 NLA_PUT(skb
, NDA_DST
, neigh
->tbl
->key_len
, neigh
->primary_key
);
2004 read_lock_bh(&neigh
->lock
);
2005 ndm
->ndm_state
= neigh
->nud_state
;
2006 if ((neigh
->nud_state
& NUD_VALID
) &&
2007 nla_put(skb
, NDA_LLADDR
, neigh
->dev
->addr_len
, neigh
->ha
) < 0) {
2008 read_unlock_bh(&neigh
->lock
);
2009 goto nla_put_failure
;
2012 ci
.ndm_used
= now
- neigh
->used
;
2013 ci
.ndm_confirmed
= now
- neigh
->confirmed
;
2014 ci
.ndm_updated
= now
- neigh
->updated
;
2015 ci
.ndm_refcnt
= atomic_read(&neigh
->refcnt
) - 1;
2016 read_unlock_bh(&neigh
->lock
);
2018 NLA_PUT_U32(skb
, NDA_PROBES
, atomic_read(&neigh
->probes
));
2019 NLA_PUT(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
);
2021 return nlmsg_end(skb
, nlh
);
2024 nlmsg_cancel(skb
, nlh
);
2028 static void neigh_update_notify(struct neighbour
*neigh
)
2030 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
2031 __neigh_notify(neigh
, RTM_NEWNEIGH
, 0);
2034 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2035 struct netlink_callback
*cb
)
2037 struct net
* net
= skb
->sk
->sk_net
;
2038 struct neighbour
*n
;
2039 int rc
, h
, s_h
= cb
->args
[1];
2040 int idx
, s_idx
= idx
= cb
->args
[2];
2042 read_lock_bh(&tbl
->lock
);
2043 for (h
= 0; h
<= tbl
->hash_mask
; h
++) {
2048 for (n
= tbl
->hash_buckets
[h
], idx
= 0; n
; n
= n
->next
) {
2050 if (n
->dev
->nd_net
!= net
)
2055 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).pid
,
2058 NLM_F_MULTI
) <= 0) {
2059 read_unlock_bh(&tbl
->lock
);
2065 read_unlock_bh(&tbl
->lock
);
2073 static int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2075 struct neigh_table
*tbl
;
2078 read_lock(&neigh_tbl_lock
);
2079 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
2082 for (tbl
= neigh_tables
, t
= 0; tbl
; tbl
= tbl
->next
, t
++) {
2083 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
2086 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
2087 sizeof(cb
->args
[0]));
2088 if (neigh_dump_table(tbl
, skb
, cb
) < 0)
2091 read_unlock(&neigh_tbl_lock
);
2097 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
2101 read_lock_bh(&tbl
->lock
);
2102 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
2103 struct neighbour
*n
;
2105 for (n
= tbl
->hash_buckets
[chain
]; n
; n
= n
->next
)
2108 read_unlock_bh(&tbl
->lock
);
2110 EXPORT_SYMBOL(neigh_for_each
);
2112 /* The tbl->lock must be held as a writer and BH disabled. */
2113 void __neigh_for_each_release(struct neigh_table
*tbl
,
2114 int (*cb
)(struct neighbour
*))
2118 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
2119 struct neighbour
*n
, **np
;
2121 np
= &tbl
->hash_buckets
[chain
];
2122 while ((n
= *np
) != NULL
) {
2125 write_lock(&n
->lock
);
2132 write_unlock(&n
->lock
);
2134 neigh_cleanup_and_release(n
);
2138 EXPORT_SYMBOL(__neigh_for_each_release
);
2140 #ifdef CONFIG_PROC_FS
2142 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
2144 struct neigh_seq_state
*state
= seq
->private;
2145 struct net
*net
= state
->p
.net
;
2146 struct neigh_table
*tbl
= state
->tbl
;
2147 struct neighbour
*n
= NULL
;
2148 int bucket
= state
->bucket
;
2150 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
2151 for (bucket
= 0; bucket
<= tbl
->hash_mask
; bucket
++) {
2152 n
= tbl
->hash_buckets
[bucket
];
2155 if (n
->dev
->nd_net
!= net
)
2157 if (state
->neigh_sub_iter
) {
2161 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
2165 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2167 if (n
->nud_state
& ~NUD_NOARP
)
2176 state
->bucket
= bucket
;
2181 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
2182 struct neighbour
*n
,
2185 struct neigh_seq_state
*state
= seq
->private;
2186 struct net
*net
= state
->p
.net
;
2187 struct neigh_table
*tbl
= state
->tbl
;
2189 if (state
->neigh_sub_iter
) {
2190 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2198 if (n
->dev
->nd_net
!= net
)
2200 if (state
->neigh_sub_iter
) {
2201 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2206 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2209 if (n
->nud_state
& ~NUD_NOARP
)
2218 if (++state
->bucket
> tbl
->hash_mask
)
2221 n
= tbl
->hash_buckets
[state
->bucket
];
2229 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2231 struct neighbour
*n
= neigh_get_first(seq
);
2235 n
= neigh_get_next(seq
, n
, pos
);
2240 return *pos
? NULL
: n
;
2243 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
2245 struct neigh_seq_state
*state
= seq
->private;
2246 struct net
* net
= state
->p
.net
;
2247 struct neigh_table
*tbl
= state
->tbl
;
2248 struct pneigh_entry
*pn
= NULL
;
2249 int bucket
= state
->bucket
;
2251 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
2252 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
2253 pn
= tbl
->phash_buckets
[bucket
];
2254 while (pn
&& (pn
->net
!= net
))
2259 state
->bucket
= bucket
;
2264 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
2265 struct pneigh_entry
*pn
,
2268 struct neigh_seq_state
*state
= seq
->private;
2269 struct net
* net
= state
->p
.net
;
2270 struct neigh_table
*tbl
= state
->tbl
;
2274 if (++state
->bucket
> PNEIGH_HASHMASK
)
2276 pn
= tbl
->phash_buckets
[state
->bucket
];
2277 while (pn
&& (pn
->net
!= net
))
2289 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2291 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
2295 pn
= pneigh_get_next(seq
, pn
, pos
);
2300 return *pos
? NULL
: pn
;
2303 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
2305 struct neigh_seq_state
*state
= seq
->private;
2308 rc
= neigh_get_idx(seq
, pos
);
2309 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2310 rc
= pneigh_get_idx(seq
, pos
);
2315 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
2316 __acquires(tbl
->lock
)
2318 struct neigh_seq_state
*state
= seq
->private;
2319 loff_t pos_minus_one
;
2323 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
2325 read_lock_bh(&tbl
->lock
);
2327 pos_minus_one
= *pos
- 1;
2328 return *pos
? neigh_get_idx_any(seq
, &pos_minus_one
) : SEQ_START_TOKEN
;
2330 EXPORT_SYMBOL(neigh_seq_start
);
2332 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2334 struct neigh_seq_state
*state
;
2337 if (v
== SEQ_START_TOKEN
) {
2338 rc
= neigh_get_idx(seq
, pos
);
2342 state
= seq
->private;
2343 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
2344 rc
= neigh_get_next(seq
, v
, NULL
);
2347 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2348 rc
= pneigh_get_first(seq
);
2350 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
2351 rc
= pneigh_get_next(seq
, v
, NULL
);
2357 EXPORT_SYMBOL(neigh_seq_next
);
2359 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
2360 __releases(tbl
->lock
)
2362 struct neigh_seq_state
*state
= seq
->private;
2363 struct neigh_table
*tbl
= state
->tbl
;
2365 read_unlock_bh(&tbl
->lock
);
2367 EXPORT_SYMBOL(neigh_seq_stop
);
2369 /* statistics via seq_file */
2371 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2373 struct proc_dir_entry
*pde
= seq
->private;
2374 struct neigh_table
*tbl
= pde
->data
;
2378 return SEQ_START_TOKEN
;
2380 for (cpu
= *pos
-1; cpu
< NR_CPUS
; ++cpu
) {
2381 if (!cpu_possible(cpu
))
2384 return per_cpu_ptr(tbl
->stats
, cpu
);
2389 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2391 struct proc_dir_entry
*pde
= seq
->private;
2392 struct neigh_table
*tbl
= pde
->data
;
2395 for (cpu
= *pos
; cpu
< NR_CPUS
; ++cpu
) {
2396 if (!cpu_possible(cpu
))
2399 return per_cpu_ptr(tbl
->stats
, cpu
);
2404 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
2409 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
2411 struct proc_dir_entry
*pde
= seq
->private;
2412 struct neigh_table
*tbl
= pde
->data
;
2413 struct neigh_statistics
*st
= v
;
2415 if (v
== SEQ_START_TOKEN
) {
2416 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2420 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2421 "%08lx %08lx %08lx %08lx\n",
2422 atomic_read(&tbl
->entries
),
2433 st
->rcv_probes_mcast
,
2434 st
->rcv_probes_ucast
,
2436 st
->periodic_gc_runs
,
2443 static const struct seq_operations neigh_stat_seq_ops
= {
2444 .start
= neigh_stat_seq_start
,
2445 .next
= neigh_stat_seq_next
,
2446 .stop
= neigh_stat_seq_stop
,
2447 .show
= neigh_stat_seq_show
,
2450 static int neigh_stat_seq_open(struct inode
*inode
, struct file
*file
)
2452 int ret
= seq_open(file
, &neigh_stat_seq_ops
);
2455 struct seq_file
*sf
= file
->private_data
;
2456 sf
->private = PDE(inode
);
2461 static const struct file_operations neigh_stat_seq_fops
= {
2462 .owner
= THIS_MODULE
,
2463 .open
= neigh_stat_seq_open
,
2465 .llseek
= seq_lseek
,
2466 .release
= seq_release
,
2469 #endif /* CONFIG_PROC_FS */
2471 static inline size_t neigh_nlmsg_size(void)
2473 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2474 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2475 + nla_total_size(MAX_ADDR_LEN
) /* NDA_LLADDR */
2476 + nla_total_size(sizeof(struct nda_cacheinfo
))
2477 + nla_total_size(4); /* NDA_PROBES */
2480 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
)
2482 struct net
*net
= n
->dev
->nd_net
;
2483 struct sk_buff
*skb
;
2486 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC
);
2490 err
= neigh_fill_info(skb
, n
, 0, 0, type
, flags
);
2492 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2493 WARN_ON(err
== -EMSGSIZE
);
2497 err
= rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
2500 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
2504 void neigh_app_ns(struct neighbour
*n
)
2506 __neigh_notify(n
, RTM_GETNEIGH
, NLM_F_REQUEST
);
2508 #endif /* CONFIG_ARPD */
2510 #ifdef CONFIG_SYSCTL
2512 static struct neigh_sysctl_table
{
2513 struct ctl_table_header
*sysctl_header
;
2514 struct ctl_table neigh_vars
[__NET_NEIGH_MAX
];
2516 } neigh_sysctl_template __read_mostly
= {
2519 .ctl_name
= NET_NEIGH_MCAST_SOLICIT
,
2520 .procname
= "mcast_solicit",
2521 .maxlen
= sizeof(int),
2523 .proc_handler
= &proc_dointvec
,
2526 .ctl_name
= NET_NEIGH_UCAST_SOLICIT
,
2527 .procname
= "ucast_solicit",
2528 .maxlen
= sizeof(int),
2530 .proc_handler
= &proc_dointvec
,
2533 .ctl_name
= NET_NEIGH_APP_SOLICIT
,
2534 .procname
= "app_solicit",
2535 .maxlen
= sizeof(int),
2537 .proc_handler
= &proc_dointvec
,
2540 .procname
= "retrans_time",
2541 .maxlen
= sizeof(int),
2543 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2546 .ctl_name
= NET_NEIGH_REACHABLE_TIME
,
2547 .procname
= "base_reachable_time",
2548 .maxlen
= sizeof(int),
2550 .proc_handler
= &proc_dointvec_jiffies
,
2551 .strategy
= &sysctl_jiffies
,
2554 .ctl_name
= NET_NEIGH_DELAY_PROBE_TIME
,
2555 .procname
= "delay_first_probe_time",
2556 .maxlen
= sizeof(int),
2558 .proc_handler
= &proc_dointvec_jiffies
,
2559 .strategy
= &sysctl_jiffies
,
2562 .ctl_name
= NET_NEIGH_GC_STALE_TIME
,
2563 .procname
= "gc_stale_time",
2564 .maxlen
= sizeof(int),
2566 .proc_handler
= &proc_dointvec_jiffies
,
2567 .strategy
= &sysctl_jiffies
,
2570 .ctl_name
= NET_NEIGH_UNRES_QLEN
,
2571 .procname
= "unres_qlen",
2572 .maxlen
= sizeof(int),
2574 .proc_handler
= &proc_dointvec
,
2577 .ctl_name
= NET_NEIGH_PROXY_QLEN
,
2578 .procname
= "proxy_qlen",
2579 .maxlen
= sizeof(int),
2581 .proc_handler
= &proc_dointvec
,
2584 .procname
= "anycast_delay",
2585 .maxlen
= sizeof(int),
2587 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2590 .procname
= "proxy_delay",
2591 .maxlen
= sizeof(int),
2593 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2596 .procname
= "locktime",
2597 .maxlen
= sizeof(int),
2599 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2602 .ctl_name
= NET_NEIGH_RETRANS_TIME_MS
,
2603 .procname
= "retrans_time_ms",
2604 .maxlen
= sizeof(int),
2606 .proc_handler
= &proc_dointvec_ms_jiffies
,
2607 .strategy
= &sysctl_ms_jiffies
,
2610 .ctl_name
= NET_NEIGH_REACHABLE_TIME_MS
,
2611 .procname
= "base_reachable_time_ms",
2612 .maxlen
= sizeof(int),
2614 .proc_handler
= &proc_dointvec_ms_jiffies
,
2615 .strategy
= &sysctl_ms_jiffies
,
2618 .ctl_name
= NET_NEIGH_GC_INTERVAL
,
2619 .procname
= "gc_interval",
2620 .maxlen
= sizeof(int),
2622 .proc_handler
= &proc_dointvec_jiffies
,
2623 .strategy
= &sysctl_jiffies
,
2626 .ctl_name
= NET_NEIGH_GC_THRESH1
,
2627 .procname
= "gc_thresh1",
2628 .maxlen
= sizeof(int),
2630 .proc_handler
= &proc_dointvec
,
2633 .ctl_name
= NET_NEIGH_GC_THRESH2
,
2634 .procname
= "gc_thresh2",
2635 .maxlen
= sizeof(int),
2637 .proc_handler
= &proc_dointvec
,
2640 .ctl_name
= NET_NEIGH_GC_THRESH3
,
2641 .procname
= "gc_thresh3",
2642 .maxlen
= sizeof(int),
2644 .proc_handler
= &proc_dointvec
,
2650 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
2651 int p_id
, int pdev_id
, char *p_name
,
2652 proc_handler
*handler
, ctl_handler
*strategy
)
2654 struct neigh_sysctl_table
*t
;
2655 const char *dev_name_source
= NULL
;
2657 #define NEIGH_CTL_PATH_ROOT 0
2658 #define NEIGH_CTL_PATH_PROTO 1
2659 #define NEIGH_CTL_PATH_NEIGH 2
2660 #define NEIGH_CTL_PATH_DEV 3
2662 struct ctl_path neigh_path
[] = {
2663 { .procname
= "net", .ctl_name
= CTL_NET
, },
2664 { .procname
= "proto", .ctl_name
= 0, },
2665 { .procname
= "neigh", .ctl_name
= 0, },
2666 { .procname
= "default", .ctl_name
= NET_PROTO_CONF_DEFAULT
, },
2670 t
= kmemdup(&neigh_sysctl_template
, sizeof(*t
), GFP_KERNEL
);
2674 t
->neigh_vars
[0].data
= &p
->mcast_probes
;
2675 t
->neigh_vars
[1].data
= &p
->ucast_probes
;
2676 t
->neigh_vars
[2].data
= &p
->app_probes
;
2677 t
->neigh_vars
[3].data
= &p
->retrans_time
;
2678 t
->neigh_vars
[4].data
= &p
->base_reachable_time
;
2679 t
->neigh_vars
[5].data
= &p
->delay_probe_time
;
2680 t
->neigh_vars
[6].data
= &p
->gc_staletime
;
2681 t
->neigh_vars
[7].data
= &p
->queue_len
;
2682 t
->neigh_vars
[8].data
= &p
->proxy_qlen
;
2683 t
->neigh_vars
[9].data
= &p
->anycast_delay
;
2684 t
->neigh_vars
[10].data
= &p
->proxy_delay
;
2685 t
->neigh_vars
[11].data
= &p
->locktime
;
2686 t
->neigh_vars
[12].data
= &p
->retrans_time
;
2687 t
->neigh_vars
[13].data
= &p
->base_reachable_time
;
2690 dev_name_source
= dev
->name
;
2691 neigh_path
[NEIGH_CTL_PATH_DEV
].ctl_name
= dev
->ifindex
;
2692 /* Terminate the table early */
2693 memset(&t
->neigh_vars
[14], 0, sizeof(t
->neigh_vars
[14]));
2695 dev_name_source
= neigh_path
[NEIGH_CTL_PATH_DEV
].procname
;
2696 t
->neigh_vars
[14].data
= (int *)(p
+ 1);
2697 t
->neigh_vars
[15].data
= (int *)(p
+ 1) + 1;
2698 t
->neigh_vars
[16].data
= (int *)(p
+ 1) + 2;
2699 t
->neigh_vars
[17].data
= (int *)(p
+ 1) + 3;
2703 if (handler
|| strategy
) {
2705 t
->neigh_vars
[3].proc_handler
= handler
;
2706 t
->neigh_vars
[3].strategy
= strategy
;
2707 t
->neigh_vars
[3].extra1
= dev
;
2709 t
->neigh_vars
[3].ctl_name
= CTL_UNNUMBERED
;
2711 t
->neigh_vars
[4].proc_handler
= handler
;
2712 t
->neigh_vars
[4].strategy
= strategy
;
2713 t
->neigh_vars
[4].extra1
= dev
;
2715 t
->neigh_vars
[4].ctl_name
= CTL_UNNUMBERED
;
2716 /* RetransTime (in milliseconds)*/
2717 t
->neigh_vars
[12].proc_handler
= handler
;
2718 t
->neigh_vars
[12].strategy
= strategy
;
2719 t
->neigh_vars
[12].extra1
= dev
;
2721 t
->neigh_vars
[12].ctl_name
= CTL_UNNUMBERED
;
2722 /* ReachableTime (in milliseconds) */
2723 t
->neigh_vars
[13].proc_handler
= handler
;
2724 t
->neigh_vars
[13].strategy
= strategy
;
2725 t
->neigh_vars
[13].extra1
= dev
;
2727 t
->neigh_vars
[13].ctl_name
= CTL_UNNUMBERED
;
2730 t
->dev_name
= kstrdup(dev_name_source
, GFP_KERNEL
);
2734 neigh_path
[NEIGH_CTL_PATH_DEV
].procname
= t
->dev_name
;
2735 neigh_path
[NEIGH_CTL_PATH_NEIGH
].ctl_name
= pdev_id
;
2736 neigh_path
[NEIGH_CTL_PATH_PROTO
].procname
= p_name
;
2737 neigh_path
[NEIGH_CTL_PATH_PROTO
].ctl_name
= p_id
;
2739 t
->sysctl_header
= register_sysctl_paths(neigh_path
, t
->neigh_vars
);
2740 if (!t
->sysctl_header
)
2743 p
->sysctl_table
= t
;
2754 void neigh_sysctl_unregister(struct neigh_parms
*p
)
2756 if (p
->sysctl_table
) {
2757 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
2758 p
->sysctl_table
= NULL
;
2759 unregister_sysctl_table(t
->sysctl_header
);
2765 #endif /* CONFIG_SYSCTL */
2767 static int __init
neigh_init(void)
2769 rtnl_register(PF_UNSPEC
, RTM_NEWNEIGH
, neigh_add
, NULL
);
2770 rtnl_register(PF_UNSPEC
, RTM_DELNEIGH
, neigh_delete
, NULL
);
2771 rtnl_register(PF_UNSPEC
, RTM_GETNEIGH
, NULL
, neigh_dump_info
);
2773 rtnl_register(PF_UNSPEC
, RTM_GETNEIGHTBL
, NULL
, neightbl_dump_info
);
2774 rtnl_register(PF_UNSPEC
, RTM_SETNEIGHTBL
, neightbl_set
, NULL
);
2779 subsys_initcall(neigh_init
);
2781 EXPORT_SYMBOL(__neigh_event_send
);
2782 EXPORT_SYMBOL(neigh_changeaddr
);
2783 EXPORT_SYMBOL(neigh_compat_output
);
2784 EXPORT_SYMBOL(neigh_connected_output
);
2785 EXPORT_SYMBOL(neigh_create
);
2786 EXPORT_SYMBOL(neigh_destroy
);
2787 EXPORT_SYMBOL(neigh_event_ns
);
2788 EXPORT_SYMBOL(neigh_ifdown
);
2789 EXPORT_SYMBOL(neigh_lookup
);
2790 EXPORT_SYMBOL(neigh_lookup_nodev
);
2791 EXPORT_SYMBOL(neigh_parms_alloc
);
2792 EXPORT_SYMBOL(neigh_parms_release
);
2793 EXPORT_SYMBOL(neigh_rand_reach_time
);
2794 EXPORT_SYMBOL(neigh_resolve_output
);
2795 EXPORT_SYMBOL(neigh_table_clear
);
2796 EXPORT_SYMBOL(neigh_table_init
);
2797 EXPORT_SYMBOL(neigh_table_init_no_netlink
);
2798 EXPORT_SYMBOL(neigh_update
);
2799 EXPORT_SYMBOL(pneigh_enqueue
);
2800 EXPORT_SYMBOL(pneigh_lookup
);
2803 EXPORT_SYMBOL(neigh_app_ns
);
2805 #ifdef CONFIG_SYSCTL
2806 EXPORT_SYMBOL(neigh_sysctl_register
);
2807 EXPORT_SYMBOL(neigh_sysctl_unregister
);