2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
25 #include <linux/sysctl.h>
27 #include <linux/times.h>
28 #include <net/neighbour.h>
31 #include <net/netevent.h>
32 #include <net/netlink.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 #include <linux/log2.h>
40 #define NEIGH_PRINTK(x...) printk(x)
41 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
42 #define NEIGH_PRINTK0 NEIGH_PRINTK
43 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
44 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
48 #define NEIGH_PRINTK1 NEIGH_PRINTK
52 #define NEIGH_PRINTK2 NEIGH_PRINTK
55 #define PNEIGH_HASHMASK 0xF
57 static void neigh_timer_handler(unsigned long arg
);
59 static void neigh_app_notify(struct neighbour
*n
);
61 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
62 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
);
64 static struct neigh_table
*neigh_tables
;
66 static const struct file_operations neigh_stat_seq_fops
;
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
84 Reference count prevents destruction.
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
100 static DEFINE_RWLOCK(neigh_tbl_lock
);
102 static int neigh_blackhole(struct sk_buff
*skb
)
109 * It is random distribution in the interval (1/2)*base...(3/2)*base.
110 * It corresponds to default IPv6 settings and is not overridable,
111 * because it is really reasonable choice.
114 unsigned long neigh_rand_reach_time(unsigned long base
)
116 return (base
? (net_random() % base
) + (base
>> 1) : 0);
120 static int neigh_forced_gc(struct neigh_table
*tbl
)
125 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
127 write_lock_bh(&tbl
->lock
);
128 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
129 struct neighbour
*n
, **np
;
131 np
= &tbl
->hash_buckets
[i
];
132 while ((n
= *np
) != NULL
) {
133 /* Neighbour record may be discarded if:
134 * - nobody refers to it.
135 * - it is not permanent
137 write_lock(&n
->lock
);
138 if (atomic_read(&n
->refcnt
) == 1 &&
139 !(n
->nud_state
& NUD_PERMANENT
)) {
143 write_unlock(&n
->lock
);
144 if (n
->parms
->neigh_cleanup
)
145 n
->parms
->neigh_cleanup(n
);
149 write_unlock(&n
->lock
);
154 tbl
->last_flush
= jiffies
;
156 write_unlock_bh(&tbl
->lock
);
161 static int neigh_del_timer(struct neighbour
*n
)
163 if ((n
->nud_state
& NUD_IN_TIMER
) &&
164 del_timer(&n
->timer
)) {
171 static void pneigh_queue_purge(struct sk_buff_head
*list
)
175 while ((skb
= skb_dequeue(list
)) != NULL
) {
181 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
)
185 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
186 struct neighbour
*n
, **np
= &tbl
->hash_buckets
[i
];
188 while ((n
= *np
) != NULL
) {
189 if (dev
&& n
->dev
!= dev
) {
194 write_lock(&n
->lock
);
198 if (atomic_read(&n
->refcnt
) != 1) {
199 /* The most unpleasant situation.
200 We must destroy neighbour entry,
201 but someone still uses it.
203 The destroy will be delayed until
204 the last user releases us, but
205 we must kill timers etc. and move
208 skb_queue_purge(&n
->arp_queue
);
209 n
->output
= neigh_blackhole
;
210 if (n
->nud_state
& NUD_VALID
)
211 n
->nud_state
= NUD_NOARP
;
213 n
->nud_state
= NUD_NONE
;
214 NEIGH_PRINTK2("neigh %p is stray.\n", n
);
216 write_unlock(&n
->lock
);
217 if (n
->parms
->neigh_cleanup
)
218 n
->parms
->neigh_cleanup(n
);
224 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
226 write_lock_bh(&tbl
->lock
);
227 neigh_flush_dev(tbl
, dev
);
228 write_unlock_bh(&tbl
->lock
);
231 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
233 write_lock_bh(&tbl
->lock
);
234 neigh_flush_dev(tbl
, dev
);
235 pneigh_ifdown(tbl
, dev
);
236 write_unlock_bh(&tbl
->lock
);
238 del_timer_sync(&tbl
->proxy_timer
);
239 pneigh_queue_purge(&tbl
->proxy_queue
);
243 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
)
245 struct neighbour
*n
= NULL
;
246 unsigned long now
= jiffies
;
249 entries
= atomic_inc_return(&tbl
->entries
) - 1;
250 if (entries
>= tbl
->gc_thresh3
||
251 (entries
>= tbl
->gc_thresh2
&&
252 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
253 if (!neigh_forced_gc(tbl
) &&
254 entries
>= tbl
->gc_thresh3
)
258 n
= kmem_cache_zalloc(tbl
->kmem_cachep
, GFP_ATOMIC
);
262 skb_queue_head_init(&n
->arp_queue
);
263 rwlock_init(&n
->lock
);
264 n
->updated
= n
->used
= now
;
265 n
->nud_state
= NUD_NONE
;
266 n
->output
= neigh_blackhole
;
267 n
->parms
= neigh_parms_clone(&tbl
->parms
);
268 init_timer(&n
->timer
);
269 n
->timer
.function
= neigh_timer_handler
;
270 n
->timer
.data
= (unsigned long)n
;
272 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
274 atomic_set(&n
->refcnt
, 1);
280 atomic_dec(&tbl
->entries
);
284 static struct neighbour
**neigh_hash_alloc(unsigned int entries
)
286 unsigned long size
= entries
* sizeof(struct neighbour
*);
287 struct neighbour
**ret
;
289 if (size
<= PAGE_SIZE
) {
290 ret
= kzalloc(size
, GFP_ATOMIC
);
292 ret
= (struct neighbour
**)
293 __get_free_pages(GFP_ATOMIC
|__GFP_ZERO
, get_order(size
));
298 static void neigh_hash_free(struct neighbour
**hash
, unsigned int entries
)
300 unsigned long size
= entries
* sizeof(struct neighbour
*);
302 if (size
<= PAGE_SIZE
)
305 free_pages((unsigned long)hash
, get_order(size
));
308 static void neigh_hash_grow(struct neigh_table
*tbl
, unsigned long new_entries
)
310 struct neighbour
**new_hash
, **old_hash
;
311 unsigned int i
, new_hash_mask
, old_entries
;
313 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
315 BUG_ON(!is_power_of_2(new_entries
));
316 new_hash
= neigh_hash_alloc(new_entries
);
320 old_entries
= tbl
->hash_mask
+ 1;
321 new_hash_mask
= new_entries
- 1;
322 old_hash
= tbl
->hash_buckets
;
324 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
325 for (i
= 0; i
< old_entries
; i
++) {
326 struct neighbour
*n
, *next
;
328 for (n
= old_hash
[i
]; n
; n
= next
) {
329 unsigned int hash_val
= tbl
->hash(n
->primary_key
, n
->dev
);
331 hash_val
&= new_hash_mask
;
334 n
->next
= new_hash
[hash_val
];
335 new_hash
[hash_val
] = n
;
338 tbl
->hash_buckets
= new_hash
;
339 tbl
->hash_mask
= new_hash_mask
;
341 neigh_hash_free(old_hash
, old_entries
);
344 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
345 struct net_device
*dev
)
348 int key_len
= tbl
->key_len
;
349 u32 hash_val
= tbl
->hash(pkey
, dev
);
351 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
353 read_lock_bh(&tbl
->lock
);
354 for (n
= tbl
->hash_buckets
[hash_val
& tbl
->hash_mask
]; n
; n
= n
->next
) {
355 if (dev
== n
->dev
&& !memcmp(n
->primary_key
, pkey
, key_len
)) {
357 NEIGH_CACHE_STAT_INC(tbl
, hits
);
361 read_unlock_bh(&tbl
->lock
);
365 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, const void *pkey
)
368 int key_len
= tbl
->key_len
;
369 u32 hash_val
= tbl
->hash(pkey
, NULL
);
371 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
373 read_lock_bh(&tbl
->lock
);
374 for (n
= tbl
->hash_buckets
[hash_val
& tbl
->hash_mask
]; n
; n
= n
->next
) {
375 if (!memcmp(n
->primary_key
, pkey
, key_len
)) {
377 NEIGH_CACHE_STAT_INC(tbl
, hits
);
381 read_unlock_bh(&tbl
->lock
);
385 struct neighbour
*neigh_create(struct neigh_table
*tbl
, const void *pkey
,
386 struct net_device
*dev
)
389 int key_len
= tbl
->key_len
;
391 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
);
394 rc
= ERR_PTR(-ENOBUFS
);
398 memcpy(n
->primary_key
, pkey
, key_len
);
402 /* Protocol specific setup. */
403 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
405 goto out_neigh_release
;
408 /* Device specific setup. */
409 if (n
->parms
->neigh_setup
&&
410 (error
= n
->parms
->neigh_setup(n
)) < 0) {
412 goto out_neigh_release
;
415 n
->confirmed
= jiffies
- (n
->parms
->base_reachable_time
<< 1);
417 write_lock_bh(&tbl
->lock
);
419 if (atomic_read(&tbl
->entries
) > (tbl
->hash_mask
+ 1))
420 neigh_hash_grow(tbl
, (tbl
->hash_mask
+ 1) << 1);
422 hash_val
= tbl
->hash(pkey
, dev
) & tbl
->hash_mask
;
424 if (n
->parms
->dead
) {
425 rc
= ERR_PTR(-EINVAL
);
429 for (n1
= tbl
->hash_buckets
[hash_val
]; n1
; n1
= n1
->next
) {
430 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, pkey
, key_len
)) {
437 n
->next
= tbl
->hash_buckets
[hash_val
];
438 tbl
->hash_buckets
[hash_val
] = n
;
441 write_unlock_bh(&tbl
->lock
);
442 NEIGH_PRINTK2("neigh %p is created.\n", n
);
447 write_unlock_bh(&tbl
->lock
);
453 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
454 struct net_device
*dev
, int creat
)
456 struct pneigh_entry
*n
;
457 int key_len
= tbl
->key_len
;
458 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
460 hash_val
^= (hash_val
>> 16);
461 hash_val
^= hash_val
>> 8;
462 hash_val
^= hash_val
>> 4;
463 hash_val
&= PNEIGH_HASHMASK
;
465 read_lock_bh(&tbl
->lock
);
467 for (n
= tbl
->phash_buckets
[hash_val
]; n
; n
= n
->next
) {
468 if (!memcmp(n
->key
, pkey
, key_len
) &&
469 (n
->dev
== dev
|| !n
->dev
)) {
470 read_unlock_bh(&tbl
->lock
);
474 read_unlock_bh(&tbl
->lock
);
479 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
483 memcpy(n
->key
, pkey
, key_len
);
488 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
496 write_lock_bh(&tbl
->lock
);
497 n
->next
= tbl
->phash_buckets
[hash_val
];
498 tbl
->phash_buckets
[hash_val
] = n
;
499 write_unlock_bh(&tbl
->lock
);
505 int pneigh_delete(struct neigh_table
*tbl
, const void *pkey
,
506 struct net_device
*dev
)
508 struct pneigh_entry
*n
, **np
;
509 int key_len
= tbl
->key_len
;
510 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
512 hash_val
^= (hash_val
>> 16);
513 hash_val
^= hash_val
>> 8;
514 hash_val
^= hash_val
>> 4;
515 hash_val
&= PNEIGH_HASHMASK
;
517 write_lock_bh(&tbl
->lock
);
518 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
520 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
) {
522 write_unlock_bh(&tbl
->lock
);
523 if (tbl
->pdestructor
)
531 write_unlock_bh(&tbl
->lock
);
535 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
537 struct pneigh_entry
*n
, **np
;
540 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
541 np
= &tbl
->phash_buckets
[h
];
542 while ((n
= *np
) != NULL
) {
543 if (!dev
|| n
->dev
== dev
) {
545 if (tbl
->pdestructor
)
560 * neighbour must already be out of the table;
563 void neigh_destroy(struct neighbour
*neigh
)
567 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
571 "Destroying alive neighbour %p\n", neigh
);
576 if (neigh_del_timer(neigh
))
577 printk(KERN_WARNING
"Impossible event.\n");
579 while ((hh
= neigh
->hh
) != NULL
) {
580 neigh
->hh
= hh
->hh_next
;
583 write_seqlock_bh(&hh
->hh_lock
);
584 hh
->hh_output
= neigh_blackhole
;
585 write_sequnlock_bh(&hh
->hh_lock
);
586 if (atomic_dec_and_test(&hh
->hh_refcnt
))
590 skb_queue_purge(&neigh
->arp_queue
);
593 neigh_parms_put(neigh
->parms
);
595 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh
);
597 atomic_dec(&neigh
->tbl
->entries
);
598 kmem_cache_free(neigh
->tbl
->kmem_cachep
, neigh
);
601 /* Neighbour state is suspicious;
604 Called with write_locked neigh.
606 static void neigh_suspect(struct neighbour
*neigh
)
610 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
612 neigh
->output
= neigh
->ops
->output
;
614 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
615 hh
->hh_output
= neigh
->ops
->output
;
618 /* Neighbour state is OK;
621 Called with write_locked neigh.
623 static void neigh_connect(struct neighbour
*neigh
)
627 NEIGH_PRINTK2("neigh %p is connected.\n", neigh
);
629 neigh
->output
= neigh
->ops
->connected_output
;
631 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
632 hh
->hh_output
= neigh
->ops
->hh_output
;
635 static void neigh_periodic_timer(unsigned long arg
)
637 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
638 struct neighbour
*n
, **np
;
639 unsigned long expire
, now
= jiffies
;
641 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
643 write_lock(&tbl
->lock
);
646 * periodically recompute ReachableTime from random function
649 if (time_after(now
, tbl
->last_rand
+ 300 * HZ
)) {
650 struct neigh_parms
*p
;
651 tbl
->last_rand
= now
;
652 for (p
= &tbl
->parms
; p
; p
= p
->next
)
654 neigh_rand_reach_time(p
->base_reachable_time
);
657 np
= &tbl
->hash_buckets
[tbl
->hash_chain_gc
];
658 tbl
->hash_chain_gc
= ((tbl
->hash_chain_gc
+ 1) & tbl
->hash_mask
);
660 while ((n
= *np
) != NULL
) {
663 write_lock(&n
->lock
);
665 state
= n
->nud_state
;
666 if (state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) {
667 write_unlock(&n
->lock
);
671 if (time_before(n
->used
, n
->confirmed
))
672 n
->used
= n
->confirmed
;
674 if (atomic_read(&n
->refcnt
) == 1 &&
675 (state
== NUD_FAILED
||
676 time_after(now
, n
->used
+ n
->parms
->gc_staletime
))) {
679 write_unlock(&n
->lock
);
680 if (n
->parms
->neigh_cleanup
)
681 n
->parms
->neigh_cleanup(n
);
685 write_unlock(&n
->lock
);
691 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
692 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
693 * base_reachable_time.
695 expire
= tbl
->parms
.base_reachable_time
>> 1;
696 expire
/= (tbl
->hash_mask
+ 1);
701 mod_timer(&tbl
->gc_timer
, round_jiffies(now
+ expire
));
703 mod_timer(&tbl
->gc_timer
, now
+ expire
);
705 write_unlock(&tbl
->lock
);
708 static __inline__
int neigh_max_probes(struct neighbour
*n
)
710 struct neigh_parms
*p
= n
->parms
;
711 return (n
->nud_state
& NUD_PROBE
?
713 p
->ucast_probes
+ p
->app_probes
+ p
->mcast_probes
);
716 static inline void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
718 if (unlikely(mod_timer(&n
->timer
, when
))) {
719 printk("NEIGH: BUG, double timer add, state is %x\n",
725 /* Called when a timer expires for a neighbour entry. */
727 static void neigh_timer_handler(unsigned long arg
)
729 unsigned long now
, next
;
730 struct neighbour
*neigh
= (struct neighbour
*)arg
;
734 write_lock(&neigh
->lock
);
736 state
= neigh
->nud_state
;
740 if (!(state
& NUD_IN_TIMER
)) {
742 printk(KERN_WARNING
"neigh: timer & !nud_in_timer\n");
747 if (state
& NUD_REACHABLE
) {
748 if (time_before_eq(now
,
749 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
750 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh
);
751 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
752 } else if (time_before_eq(now
,
753 neigh
->used
+ neigh
->parms
->delay_probe_time
)) {
754 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
755 neigh
->nud_state
= NUD_DELAY
;
756 neigh
->updated
= jiffies
;
757 neigh_suspect(neigh
);
758 next
= now
+ neigh
->parms
->delay_probe_time
;
760 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
761 neigh
->nud_state
= NUD_STALE
;
762 neigh
->updated
= jiffies
;
763 neigh_suspect(neigh
);
766 } else if (state
& NUD_DELAY
) {
767 if (time_before_eq(now
,
768 neigh
->confirmed
+ neigh
->parms
->delay_probe_time
)) {
769 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh
);
770 neigh
->nud_state
= NUD_REACHABLE
;
771 neigh
->updated
= jiffies
;
772 neigh_connect(neigh
);
774 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
776 NEIGH_PRINTK2("neigh %p is probed.\n", neigh
);
777 neigh
->nud_state
= NUD_PROBE
;
778 neigh
->updated
= jiffies
;
779 atomic_set(&neigh
->probes
, 0);
780 next
= now
+ neigh
->parms
->retrans_time
;
783 /* NUD_PROBE|NUD_INCOMPLETE */
784 next
= now
+ neigh
->parms
->retrans_time
;
787 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
788 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
791 neigh
->nud_state
= NUD_FAILED
;
792 neigh
->updated
= jiffies
;
794 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
795 NEIGH_PRINTK2("neigh %p is failed.\n", neigh
);
797 /* It is very thin place. report_unreachable is very complicated
798 routine. Particularly, it can hit the same neighbour entry!
800 So that, we try to be accurate and avoid dead loop. --ANK
802 while (neigh
->nud_state
== NUD_FAILED
&&
803 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
804 write_unlock(&neigh
->lock
);
805 neigh
->ops
->error_report(neigh
, skb
);
806 write_lock(&neigh
->lock
);
808 skb_queue_purge(&neigh
->arp_queue
);
811 if (neigh
->nud_state
& NUD_IN_TIMER
) {
812 if (time_before(next
, jiffies
+ HZ
/2))
813 next
= jiffies
+ HZ
/2;
814 if (!mod_timer(&neigh
->timer
, next
))
817 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
818 struct sk_buff
*skb
= skb_peek(&neigh
->arp_queue
);
819 /* keep skb alive even if arp_queue overflows */
822 write_unlock(&neigh
->lock
);
823 neigh
->ops
->solicit(neigh
, skb
);
824 atomic_inc(&neigh
->probes
);
829 write_unlock(&neigh
->lock
);
832 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
835 if (notify
&& neigh
->parms
->app_probes
)
836 neigh_app_notify(neigh
);
838 neigh_release(neigh
);
841 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
846 write_lock_bh(&neigh
->lock
);
849 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
854 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
855 if (neigh
->parms
->mcast_probes
+ neigh
->parms
->app_probes
) {
856 atomic_set(&neigh
->probes
, neigh
->parms
->ucast_probes
);
857 neigh
->nud_state
= NUD_INCOMPLETE
;
858 neigh
->updated
= jiffies
;
860 neigh_add_timer(neigh
, now
+ 1);
862 neigh
->nud_state
= NUD_FAILED
;
863 neigh
->updated
= jiffies
;
864 write_unlock_bh(&neigh
->lock
);
870 } else if (neigh
->nud_state
& NUD_STALE
) {
871 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
873 neigh
->nud_state
= NUD_DELAY
;
874 neigh
->updated
= jiffies
;
875 neigh_add_timer(neigh
,
876 jiffies
+ neigh
->parms
->delay_probe_time
);
879 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
881 if (skb_queue_len(&neigh
->arp_queue
) >=
882 neigh
->parms
->queue_len
) {
883 struct sk_buff
*buff
;
884 buff
= neigh
->arp_queue
.next
;
885 __skb_unlink(buff
, &neigh
->arp_queue
);
888 __skb_queue_tail(&neigh
->arp_queue
, skb
);
893 write_unlock_bh(&neigh
->lock
);
897 static void neigh_update_hhs(struct neighbour
*neigh
)
900 void (*update
)(struct hh_cache
*, struct net_device
*, unsigned char *) =
901 neigh
->dev
->header_cache_update
;
904 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
) {
905 write_seqlock_bh(&hh
->hh_lock
);
906 update(hh
, neigh
->dev
, neigh
->ha
);
907 write_sequnlock_bh(&hh
->hh_lock
);
914 /* Generic update routine.
915 -- lladdr is new lladdr or NULL, if it is not supplied.
918 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
920 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
921 lladdr instead of overriding it
923 It also allows to retain current state
924 if lladdr is unchanged.
925 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
927 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
929 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
932 Caller MUST hold reference count on the entry.
935 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
941 struct net_device
*dev
;
942 int update_isrouter
= 0;
944 write_lock_bh(&neigh
->lock
);
947 old
= neigh
->nud_state
;
950 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
951 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
954 if (!(new & NUD_VALID
)) {
955 neigh_del_timer(neigh
);
956 if (old
& NUD_CONNECTED
)
957 neigh_suspect(neigh
);
958 neigh
->nud_state
= new;
960 notify
= old
& NUD_VALID
;
964 /* Compare new lladdr with cached one */
965 if (!dev
->addr_len
) {
966 /* First case: device needs no address. */
969 /* The second case: if something is already cached
970 and a new address is proposed:
972 - if they are different, check override flag
974 if ((old
& NUD_VALID
) &&
975 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
978 /* No address is supplied; if we know something,
979 use it, otherwise discard the request.
982 if (!(old
& NUD_VALID
))
987 if (new & NUD_CONNECTED
)
988 neigh
->confirmed
= jiffies
;
989 neigh
->updated
= jiffies
;
991 /* If entry was valid and address is not changed,
992 do not change entry state, if new one is STALE.
995 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
996 if (old
& NUD_VALID
) {
997 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
999 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1000 (old
& NUD_CONNECTED
)) {
1006 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1007 ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) ||
1008 (old
& NUD_CONNECTED
))
1015 neigh_del_timer(neigh
);
1016 if (new & NUD_IN_TIMER
) {
1018 neigh_add_timer(neigh
, (jiffies
+
1019 ((new & NUD_REACHABLE
) ?
1020 neigh
->parms
->reachable_time
:
1023 neigh
->nud_state
= new;
1026 if (lladdr
!= neigh
->ha
) {
1027 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1028 neigh_update_hhs(neigh
);
1029 if (!(new & NUD_CONNECTED
))
1030 neigh
->confirmed
= jiffies
-
1031 (neigh
->parms
->base_reachable_time
<< 1);
1036 if (new & NUD_CONNECTED
)
1037 neigh_connect(neigh
);
1039 neigh_suspect(neigh
);
1040 if (!(old
& NUD_VALID
)) {
1041 struct sk_buff
*skb
;
1043 /* Again: avoid dead loop if something went wrong */
1045 while (neigh
->nud_state
& NUD_VALID
&&
1046 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1047 struct neighbour
*n1
= neigh
;
1048 write_unlock_bh(&neigh
->lock
);
1049 /* On shaper/eql skb->dst->neighbour != neigh :( */
1050 if (skb
->dst
&& skb
->dst
->neighbour
)
1051 n1
= skb
->dst
->neighbour
;
1053 write_lock_bh(&neigh
->lock
);
1055 skb_queue_purge(&neigh
->arp_queue
);
1058 if (update_isrouter
) {
1059 neigh
->flags
= (flags
& NEIGH_UPDATE_F_ISROUTER
) ?
1060 (neigh
->flags
| NTF_ROUTER
) :
1061 (neigh
->flags
& ~NTF_ROUTER
);
1063 write_unlock_bh(&neigh
->lock
);
1066 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
1068 if (notify
&& neigh
->parms
->app_probes
)
1069 neigh_app_notify(neigh
);
1074 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1075 u8
*lladdr
, void *saddr
,
1076 struct net_device
*dev
)
1078 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1079 lladdr
|| !dev
->addr_len
);
1081 neigh_update(neigh
, lladdr
, NUD_STALE
,
1082 NEIGH_UPDATE_F_OVERRIDE
);
1086 static void neigh_hh_init(struct neighbour
*n
, struct dst_entry
*dst
,
1089 struct hh_cache
*hh
;
1090 struct net_device
*dev
= dst
->dev
;
1092 for (hh
= n
->hh
; hh
; hh
= hh
->hh_next
)
1093 if (hh
->hh_type
== protocol
)
1096 if (!hh
&& (hh
= kzalloc(sizeof(*hh
), GFP_ATOMIC
)) != NULL
) {
1097 seqlock_init(&hh
->hh_lock
);
1098 hh
->hh_type
= protocol
;
1099 atomic_set(&hh
->hh_refcnt
, 0);
1101 if (dev
->hard_header_cache(n
, hh
)) {
1105 atomic_inc(&hh
->hh_refcnt
);
1106 hh
->hh_next
= n
->hh
;
1108 if (n
->nud_state
& NUD_CONNECTED
)
1109 hh
->hh_output
= n
->ops
->hh_output
;
1111 hh
->hh_output
= n
->ops
->output
;
1115 atomic_inc(&hh
->hh_refcnt
);
1120 /* This function can be used in contexts, where only old dev_queue_xmit
1121 worked, f.e. if you want to override normal output path (eql, shaper),
1122 but resolution is not made yet.
1125 int neigh_compat_output(struct sk_buff
*skb
)
1127 struct net_device
*dev
= skb
->dev
;
1129 __skb_pull(skb
, skb_network_offset(skb
));
1131 if (dev
->hard_header
&&
1132 dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
), NULL
, NULL
,
1134 dev
->rebuild_header(skb
))
1137 return dev_queue_xmit(skb
);
1140 /* Slow and careful. */
1142 int neigh_resolve_output(struct sk_buff
*skb
)
1144 struct dst_entry
*dst
= skb
->dst
;
1145 struct neighbour
*neigh
;
1148 if (!dst
|| !(neigh
= dst
->neighbour
))
1151 __skb_pull(skb
, skb_network_offset(skb
));
1153 if (!neigh_event_send(neigh
, skb
)) {
1155 struct net_device
*dev
= neigh
->dev
;
1156 if (dev
->hard_header_cache
&& !dst
->hh
) {
1157 write_lock_bh(&neigh
->lock
);
1159 neigh_hh_init(neigh
, dst
, dst
->ops
->protocol
);
1160 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
),
1161 neigh
->ha
, NULL
, skb
->len
);
1162 write_unlock_bh(&neigh
->lock
);
1164 read_lock_bh(&neigh
->lock
);
1165 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
),
1166 neigh
->ha
, NULL
, skb
->len
);
1167 read_unlock_bh(&neigh
->lock
);
1170 rc
= neigh
->ops
->queue_xmit(skb
);
1177 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1178 dst
, dst
? dst
->neighbour
: NULL
);
1185 /* As fast as possible without hh cache */
1187 int neigh_connected_output(struct sk_buff
*skb
)
1190 struct dst_entry
*dst
= skb
->dst
;
1191 struct neighbour
*neigh
= dst
->neighbour
;
1192 struct net_device
*dev
= neigh
->dev
;
1194 __skb_pull(skb
, skb_network_offset(skb
));
1196 read_lock_bh(&neigh
->lock
);
1197 err
= dev
->hard_header(skb
, dev
, ntohs(skb
->protocol
),
1198 neigh
->ha
, NULL
, skb
->len
);
1199 read_unlock_bh(&neigh
->lock
);
1201 err
= neigh
->ops
->queue_xmit(skb
);
1209 static void neigh_proxy_process(unsigned long arg
)
1211 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
1212 long sched_next
= 0;
1213 unsigned long now
= jiffies
;
1214 struct sk_buff
*skb
;
1216 spin_lock(&tbl
->proxy_queue
.lock
);
1218 skb
= tbl
->proxy_queue
.next
;
1220 while (skb
!= (struct sk_buff
*)&tbl
->proxy_queue
) {
1221 struct sk_buff
*back
= skb
;
1222 long tdif
= NEIGH_CB(back
)->sched_next
- now
;
1226 struct net_device
*dev
= back
->dev
;
1227 __skb_unlink(back
, &tbl
->proxy_queue
);
1228 if (tbl
->proxy_redo
&& netif_running(dev
))
1229 tbl
->proxy_redo(back
);
1234 } else if (!sched_next
|| tdif
< sched_next
)
1237 del_timer(&tbl
->proxy_timer
);
1239 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1240 spin_unlock(&tbl
->proxy_queue
.lock
);
1243 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1244 struct sk_buff
*skb
)
1246 unsigned long now
= jiffies
;
1247 unsigned long sched_next
= now
+ (net_random() % p
->proxy_delay
);
1249 if (tbl
->proxy_queue
.qlen
> p
->proxy_qlen
) {
1254 NEIGH_CB(skb
)->sched_next
= sched_next
;
1255 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1257 spin_lock(&tbl
->proxy_queue
.lock
);
1258 if (del_timer(&tbl
->proxy_timer
)) {
1259 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1260 sched_next
= tbl
->proxy_timer
.expires
;
1262 dst_release(skb
->dst
);
1265 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1266 mod_timer(&tbl
->proxy_timer
, sched_next
);
1267 spin_unlock(&tbl
->proxy_queue
.lock
);
1271 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1272 struct neigh_table
*tbl
)
1274 struct neigh_parms
*p
= kmemdup(&tbl
->parms
, sizeof(*p
), GFP_KERNEL
);
1278 atomic_set(&p
->refcnt
, 1);
1279 INIT_RCU_HEAD(&p
->rcu_head
);
1281 neigh_rand_reach_time(p
->base_reachable_time
);
1283 if (dev
->neigh_setup
&& dev
->neigh_setup(dev
, p
)) {
1291 p
->sysctl_table
= NULL
;
1292 write_lock_bh(&tbl
->lock
);
1293 p
->next
= tbl
->parms
.next
;
1294 tbl
->parms
.next
= p
;
1295 write_unlock_bh(&tbl
->lock
);
1300 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1302 struct neigh_parms
*parms
=
1303 container_of(head
, struct neigh_parms
, rcu_head
);
1305 neigh_parms_put(parms
);
1308 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1310 struct neigh_parms
**p
;
1312 if (!parms
|| parms
== &tbl
->parms
)
1314 write_lock_bh(&tbl
->lock
);
1315 for (p
= &tbl
->parms
.next
; *p
; p
= &(*p
)->next
) {
1319 write_unlock_bh(&tbl
->lock
);
1321 dev_put(parms
->dev
);
1322 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1326 write_unlock_bh(&tbl
->lock
);
1327 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1330 void neigh_parms_destroy(struct neigh_parms
*parms
)
1335 static struct lock_class_key neigh_table_proxy_queue_class
;
1337 void neigh_table_init_no_netlink(struct neigh_table
*tbl
)
1339 unsigned long now
= jiffies
;
1340 unsigned long phsize
;
1342 atomic_set(&tbl
->parms
.refcnt
, 1);
1343 INIT_RCU_HEAD(&tbl
->parms
.rcu_head
);
1344 tbl
->parms
.reachable_time
=
1345 neigh_rand_reach_time(tbl
->parms
.base_reachable_time
);
1347 if (!tbl
->kmem_cachep
)
1349 kmem_cache_create(tbl
->id
, tbl
->entry_size
, 0,
1350 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
1352 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1354 panic("cannot create neighbour cache statistics");
1356 #ifdef CONFIG_PROC_FS
1357 tbl
->pde
= create_proc_entry(tbl
->id
, 0, proc_net_stat
);
1359 panic("cannot create neighbour proc dir entry");
1360 tbl
->pde
->proc_fops
= &neigh_stat_seq_fops
;
1361 tbl
->pde
->data
= tbl
;
1365 tbl
->hash_buckets
= neigh_hash_alloc(tbl
->hash_mask
+ 1);
1367 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1368 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1370 if (!tbl
->hash_buckets
|| !tbl
->phash_buckets
)
1371 panic("cannot allocate neighbour cache hashes");
1373 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
1375 rwlock_init(&tbl
->lock
);
1376 init_timer(&tbl
->gc_timer
);
1377 tbl
->gc_timer
.data
= (unsigned long)tbl
;
1378 tbl
->gc_timer
.function
= neigh_periodic_timer
;
1379 tbl
->gc_timer
.expires
= now
+ 1;
1380 add_timer(&tbl
->gc_timer
);
1382 init_timer(&tbl
->proxy_timer
);
1383 tbl
->proxy_timer
.data
= (unsigned long)tbl
;
1384 tbl
->proxy_timer
.function
= neigh_proxy_process
;
1385 skb_queue_head_init_class(&tbl
->proxy_queue
,
1386 &neigh_table_proxy_queue_class
);
1388 tbl
->last_flush
= now
;
1389 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1392 void neigh_table_init(struct neigh_table
*tbl
)
1394 struct neigh_table
*tmp
;
1396 neigh_table_init_no_netlink(tbl
);
1397 write_lock(&neigh_tbl_lock
);
1398 for (tmp
= neigh_tables
; tmp
; tmp
= tmp
->next
) {
1399 if (tmp
->family
== tbl
->family
)
1402 tbl
->next
= neigh_tables
;
1404 write_unlock(&neigh_tbl_lock
);
1406 if (unlikely(tmp
)) {
1407 printk(KERN_ERR
"NEIGH: Registering multiple tables for "
1408 "family %d\n", tbl
->family
);
1413 int neigh_table_clear(struct neigh_table
*tbl
)
1415 struct neigh_table
**tp
;
1417 /* It is not clean... Fix it to unload IPv6 module safely */
1418 del_timer_sync(&tbl
->gc_timer
);
1419 del_timer_sync(&tbl
->proxy_timer
);
1420 pneigh_queue_purge(&tbl
->proxy_queue
);
1421 neigh_ifdown(tbl
, NULL
);
1422 if (atomic_read(&tbl
->entries
))
1423 printk(KERN_CRIT
"neighbour leakage\n");
1424 write_lock(&neigh_tbl_lock
);
1425 for (tp
= &neigh_tables
; *tp
; tp
= &(*tp
)->next
) {
1431 write_unlock(&neigh_tbl_lock
);
1433 neigh_hash_free(tbl
->hash_buckets
, tbl
->hash_mask
+ 1);
1434 tbl
->hash_buckets
= NULL
;
1436 kfree(tbl
->phash_buckets
);
1437 tbl
->phash_buckets
= NULL
;
1439 free_percpu(tbl
->stats
);
1445 static int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1448 struct nlattr
*dst_attr
;
1449 struct neigh_table
*tbl
;
1450 struct net_device
*dev
= NULL
;
1453 if (nlmsg_len(nlh
) < sizeof(*ndm
))
1456 dst_attr
= nlmsg_find_attr(nlh
, sizeof(*ndm
), NDA_DST
);
1457 if (dst_attr
== NULL
)
1460 ndm
= nlmsg_data(nlh
);
1461 if (ndm
->ndm_ifindex
) {
1462 dev
= dev_get_by_index(ndm
->ndm_ifindex
);
1469 read_lock(&neigh_tbl_lock
);
1470 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1471 struct neighbour
*neigh
;
1473 if (tbl
->family
!= ndm
->ndm_family
)
1475 read_unlock(&neigh_tbl_lock
);
1477 if (nla_len(dst_attr
) < tbl
->key_len
)
1480 if (ndm
->ndm_flags
& NTF_PROXY
) {
1481 err
= pneigh_delete(tbl
, nla_data(dst_attr
), dev
);
1488 neigh
= neigh_lookup(tbl
, nla_data(dst_attr
), dev
);
1489 if (neigh
== NULL
) {
1494 err
= neigh_update(neigh
, NULL
, NUD_FAILED
,
1495 NEIGH_UPDATE_F_OVERRIDE
|
1496 NEIGH_UPDATE_F_ADMIN
);
1497 neigh_release(neigh
);
1500 read_unlock(&neigh_tbl_lock
);
1501 err
= -EAFNOSUPPORT
;
1510 static int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1513 struct nlattr
*tb
[NDA_MAX
+1];
1514 struct neigh_table
*tbl
;
1515 struct net_device
*dev
= NULL
;
1518 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
);
1523 if (tb
[NDA_DST
] == NULL
)
1526 ndm
= nlmsg_data(nlh
);
1527 if (ndm
->ndm_ifindex
) {
1528 dev
= dev_get_by_index(ndm
->ndm_ifindex
);
1534 if (tb
[NDA_LLADDR
] && nla_len(tb
[NDA_LLADDR
]) < dev
->addr_len
)
1538 read_lock(&neigh_tbl_lock
);
1539 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1540 int flags
= NEIGH_UPDATE_F_ADMIN
| NEIGH_UPDATE_F_OVERRIDE
;
1541 struct neighbour
*neigh
;
1544 if (tbl
->family
!= ndm
->ndm_family
)
1546 read_unlock(&neigh_tbl_lock
);
1548 if (nla_len(tb
[NDA_DST
]) < tbl
->key_len
)
1550 dst
= nla_data(tb
[NDA_DST
]);
1551 lladdr
= tb
[NDA_LLADDR
] ? nla_data(tb
[NDA_LLADDR
]) : NULL
;
1553 if (ndm
->ndm_flags
& NTF_PROXY
) {
1554 struct pneigh_entry
*pn
;
1557 pn
= pneigh_lookup(tbl
, dst
, dev
, 1);
1559 pn
->flags
= ndm
->ndm_flags
;
1568 neigh
= neigh_lookup(tbl
, dst
, dev
);
1569 if (neigh
== NULL
) {
1570 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1575 neigh
= __neigh_lookup_errno(tbl
, dst
, dev
);
1576 if (IS_ERR(neigh
)) {
1577 err
= PTR_ERR(neigh
);
1581 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1583 neigh_release(neigh
);
1587 if (!(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
1588 flags
&= ~NEIGH_UPDATE_F_OVERRIDE
;
1591 err
= neigh_update(neigh
, lladdr
, ndm
->ndm_state
, flags
);
1592 neigh_release(neigh
);
1596 read_unlock(&neigh_tbl_lock
);
1597 err
= -EAFNOSUPPORT
;
1606 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1608 struct nlattr
*nest
;
1610 nest
= nla_nest_start(skb
, NDTA_PARMS
);
1615 NLA_PUT_U32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
);
1617 NLA_PUT_U32(skb
, NDTPA_REFCNT
, atomic_read(&parms
->refcnt
));
1618 NLA_PUT_U32(skb
, NDTPA_QUEUE_LEN
, parms
->queue_len
);
1619 NLA_PUT_U32(skb
, NDTPA_PROXY_QLEN
, parms
->proxy_qlen
);
1620 NLA_PUT_U32(skb
, NDTPA_APP_PROBES
, parms
->app_probes
);
1621 NLA_PUT_U32(skb
, NDTPA_UCAST_PROBES
, parms
->ucast_probes
);
1622 NLA_PUT_U32(skb
, NDTPA_MCAST_PROBES
, parms
->mcast_probes
);
1623 NLA_PUT_MSECS(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
);
1624 NLA_PUT_MSECS(skb
, NDTPA_BASE_REACHABLE_TIME
,
1625 parms
->base_reachable_time
);
1626 NLA_PUT_MSECS(skb
, NDTPA_GC_STALETIME
, parms
->gc_staletime
);
1627 NLA_PUT_MSECS(skb
, NDTPA_DELAY_PROBE_TIME
, parms
->delay_probe_time
);
1628 NLA_PUT_MSECS(skb
, NDTPA_RETRANS_TIME
, parms
->retrans_time
);
1629 NLA_PUT_MSECS(skb
, NDTPA_ANYCAST_DELAY
, parms
->anycast_delay
);
1630 NLA_PUT_MSECS(skb
, NDTPA_PROXY_DELAY
, parms
->proxy_delay
);
1631 NLA_PUT_MSECS(skb
, NDTPA_LOCKTIME
, parms
->locktime
);
1633 return nla_nest_end(skb
, nest
);
1636 return nla_nest_cancel(skb
, nest
);
1639 static int neightbl_fill_info(struct sk_buff
*skb
, struct neigh_table
*tbl
,
1640 u32 pid
, u32 seq
, int type
, int flags
)
1642 struct nlmsghdr
*nlh
;
1643 struct ndtmsg
*ndtmsg
;
1645 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1649 ndtmsg
= nlmsg_data(nlh
);
1651 read_lock_bh(&tbl
->lock
);
1652 ndtmsg
->ndtm_family
= tbl
->family
;
1653 ndtmsg
->ndtm_pad1
= 0;
1654 ndtmsg
->ndtm_pad2
= 0;
1656 NLA_PUT_STRING(skb
, NDTA_NAME
, tbl
->id
);
1657 NLA_PUT_MSECS(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
);
1658 NLA_PUT_U32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
);
1659 NLA_PUT_U32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
);
1660 NLA_PUT_U32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
);
1663 unsigned long now
= jiffies
;
1664 unsigned int flush_delta
= now
- tbl
->last_flush
;
1665 unsigned int rand_delta
= now
- tbl
->last_rand
;
1667 struct ndt_config ndc
= {
1668 .ndtc_key_len
= tbl
->key_len
,
1669 .ndtc_entry_size
= tbl
->entry_size
,
1670 .ndtc_entries
= atomic_read(&tbl
->entries
),
1671 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
1672 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
1673 .ndtc_hash_rnd
= tbl
->hash_rnd
,
1674 .ndtc_hash_mask
= tbl
->hash_mask
,
1675 .ndtc_hash_chain_gc
= tbl
->hash_chain_gc
,
1676 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
1679 NLA_PUT(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
);
1684 struct ndt_stats ndst
;
1686 memset(&ndst
, 0, sizeof(ndst
));
1688 for_each_possible_cpu(cpu
) {
1689 struct neigh_statistics
*st
;
1691 st
= per_cpu_ptr(tbl
->stats
, cpu
);
1692 ndst
.ndts_allocs
+= st
->allocs
;
1693 ndst
.ndts_destroys
+= st
->destroys
;
1694 ndst
.ndts_hash_grows
+= st
->hash_grows
;
1695 ndst
.ndts_res_failed
+= st
->res_failed
;
1696 ndst
.ndts_lookups
+= st
->lookups
;
1697 ndst
.ndts_hits
+= st
->hits
;
1698 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
1699 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
1700 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
1701 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
1704 NLA_PUT(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
);
1707 BUG_ON(tbl
->parms
.dev
);
1708 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
1709 goto nla_put_failure
;
1711 read_unlock_bh(&tbl
->lock
);
1712 return nlmsg_end(skb
, nlh
);
1715 read_unlock_bh(&tbl
->lock
);
1716 nlmsg_cancel(skb
, nlh
);
1720 static int neightbl_fill_param_info(struct sk_buff
*skb
,
1721 struct neigh_table
*tbl
,
1722 struct neigh_parms
*parms
,
1723 u32 pid
, u32 seq
, int type
,
1726 struct ndtmsg
*ndtmsg
;
1727 struct nlmsghdr
*nlh
;
1729 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1733 ndtmsg
= nlmsg_data(nlh
);
1735 read_lock_bh(&tbl
->lock
);
1736 ndtmsg
->ndtm_family
= tbl
->family
;
1737 ndtmsg
->ndtm_pad1
= 0;
1738 ndtmsg
->ndtm_pad2
= 0;
1740 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) < 0 ||
1741 neightbl_fill_parms(skb
, parms
) < 0)
1744 read_unlock_bh(&tbl
->lock
);
1745 return nlmsg_end(skb
, nlh
);
1747 read_unlock_bh(&tbl
->lock
);
1748 nlmsg_cancel(skb
, nlh
);
1752 static inline struct neigh_parms
*lookup_neigh_params(struct neigh_table
*tbl
,
1755 struct neigh_parms
*p
;
1757 for (p
= &tbl
->parms
; p
; p
= p
->next
)
1758 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
) ||
1759 (!p
->dev
&& !ifindex
))
1765 static const struct nla_policy nl_neightbl_policy
[NDTA_MAX
+1] = {
1766 [NDTA_NAME
] = { .type
= NLA_STRING
},
1767 [NDTA_THRESH1
] = { .type
= NLA_U32
},
1768 [NDTA_THRESH2
] = { .type
= NLA_U32
},
1769 [NDTA_THRESH3
] = { .type
= NLA_U32
},
1770 [NDTA_GC_INTERVAL
] = { .type
= NLA_U64
},
1771 [NDTA_PARMS
] = { .type
= NLA_NESTED
},
1774 static const struct nla_policy nl_ntbl_parm_policy
[NDTPA_MAX
+1] = {
1775 [NDTPA_IFINDEX
] = { .type
= NLA_U32
},
1776 [NDTPA_QUEUE_LEN
] = { .type
= NLA_U32
},
1777 [NDTPA_PROXY_QLEN
] = { .type
= NLA_U32
},
1778 [NDTPA_APP_PROBES
] = { .type
= NLA_U32
},
1779 [NDTPA_UCAST_PROBES
] = { .type
= NLA_U32
},
1780 [NDTPA_MCAST_PROBES
] = { .type
= NLA_U32
},
1781 [NDTPA_BASE_REACHABLE_TIME
] = { .type
= NLA_U64
},
1782 [NDTPA_GC_STALETIME
] = { .type
= NLA_U64
},
1783 [NDTPA_DELAY_PROBE_TIME
] = { .type
= NLA_U64
},
1784 [NDTPA_RETRANS_TIME
] = { .type
= NLA_U64
},
1785 [NDTPA_ANYCAST_DELAY
] = { .type
= NLA_U64
},
1786 [NDTPA_PROXY_DELAY
] = { .type
= NLA_U64
},
1787 [NDTPA_LOCKTIME
] = { .type
= NLA_U64
},
1790 static int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1792 struct neigh_table
*tbl
;
1793 struct ndtmsg
*ndtmsg
;
1794 struct nlattr
*tb
[NDTA_MAX
+1];
1797 err
= nlmsg_parse(nlh
, sizeof(*ndtmsg
), tb
, NDTA_MAX
,
1798 nl_neightbl_policy
);
1802 if (tb
[NDTA_NAME
] == NULL
) {
1807 ndtmsg
= nlmsg_data(nlh
);
1808 read_lock(&neigh_tbl_lock
);
1809 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1810 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
1813 if (nla_strcmp(tb
[NDTA_NAME
], tbl
->id
) == 0)
1823 * We acquire tbl->lock to be nice to the periodic timers and
1824 * make sure they always see a consistent set of values.
1826 write_lock_bh(&tbl
->lock
);
1828 if (tb
[NDTA_PARMS
]) {
1829 struct nlattr
*tbp
[NDTPA_MAX
+1];
1830 struct neigh_parms
*p
;
1833 err
= nla_parse_nested(tbp
, NDTPA_MAX
, tb
[NDTA_PARMS
],
1834 nl_ntbl_parm_policy
);
1836 goto errout_tbl_lock
;
1838 if (tbp
[NDTPA_IFINDEX
])
1839 ifindex
= nla_get_u32(tbp
[NDTPA_IFINDEX
]);
1841 p
= lookup_neigh_params(tbl
, ifindex
);
1844 goto errout_tbl_lock
;
1847 for (i
= 1; i
<= NDTPA_MAX
; i
++) {
1852 case NDTPA_QUEUE_LEN
:
1853 p
->queue_len
= nla_get_u32(tbp
[i
]);
1855 case NDTPA_PROXY_QLEN
:
1856 p
->proxy_qlen
= nla_get_u32(tbp
[i
]);
1858 case NDTPA_APP_PROBES
:
1859 p
->app_probes
= nla_get_u32(tbp
[i
]);
1861 case NDTPA_UCAST_PROBES
:
1862 p
->ucast_probes
= nla_get_u32(tbp
[i
]);
1864 case NDTPA_MCAST_PROBES
:
1865 p
->mcast_probes
= nla_get_u32(tbp
[i
]);
1867 case NDTPA_BASE_REACHABLE_TIME
:
1868 p
->base_reachable_time
= nla_get_msecs(tbp
[i
]);
1870 case NDTPA_GC_STALETIME
:
1871 p
->gc_staletime
= nla_get_msecs(tbp
[i
]);
1873 case NDTPA_DELAY_PROBE_TIME
:
1874 p
->delay_probe_time
= nla_get_msecs(tbp
[i
]);
1876 case NDTPA_RETRANS_TIME
:
1877 p
->retrans_time
= nla_get_msecs(tbp
[i
]);
1879 case NDTPA_ANYCAST_DELAY
:
1880 p
->anycast_delay
= nla_get_msecs(tbp
[i
]);
1882 case NDTPA_PROXY_DELAY
:
1883 p
->proxy_delay
= nla_get_msecs(tbp
[i
]);
1885 case NDTPA_LOCKTIME
:
1886 p
->locktime
= nla_get_msecs(tbp
[i
]);
1892 if (tb
[NDTA_THRESH1
])
1893 tbl
->gc_thresh1
= nla_get_u32(tb
[NDTA_THRESH1
]);
1895 if (tb
[NDTA_THRESH2
])
1896 tbl
->gc_thresh2
= nla_get_u32(tb
[NDTA_THRESH2
]);
1898 if (tb
[NDTA_THRESH3
])
1899 tbl
->gc_thresh3
= nla_get_u32(tb
[NDTA_THRESH3
]);
1901 if (tb
[NDTA_GC_INTERVAL
])
1902 tbl
->gc_interval
= nla_get_msecs(tb
[NDTA_GC_INTERVAL
]);
1907 write_unlock_bh(&tbl
->lock
);
1909 read_unlock(&neigh_tbl_lock
);
1914 static int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1916 int family
, tidx
, nidx
= 0;
1917 int tbl_skip
= cb
->args
[0];
1918 int neigh_skip
= cb
->args
[1];
1919 struct neigh_table
*tbl
;
1921 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
1923 read_lock(&neigh_tbl_lock
);
1924 for (tbl
= neigh_tables
, tidx
= 0; tbl
; tbl
= tbl
->next
, tidx
++) {
1925 struct neigh_parms
*p
;
1927 if (tidx
< tbl_skip
|| (family
&& tbl
->family
!= family
))
1930 if (neightbl_fill_info(skb
, tbl
, NETLINK_CB(cb
->skb
).pid
,
1931 cb
->nlh
->nlmsg_seq
, RTM_NEWNEIGHTBL
,
1935 for (nidx
= 0, p
= tbl
->parms
.next
; p
; p
= p
->next
, nidx
++) {
1936 if (nidx
< neigh_skip
)
1939 if (neightbl_fill_param_info(skb
, tbl
, p
,
1940 NETLINK_CB(cb
->skb
).pid
,
1950 read_unlock(&neigh_tbl_lock
);
1957 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*neigh
,
1958 u32 pid
, u32 seq
, int type
, unsigned int flags
)
1960 unsigned long now
= jiffies
;
1961 struct nda_cacheinfo ci
;
1962 struct nlmsghdr
*nlh
;
1965 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
1969 ndm
= nlmsg_data(nlh
);
1970 ndm
->ndm_family
= neigh
->ops
->family
;
1973 ndm
->ndm_flags
= neigh
->flags
;
1974 ndm
->ndm_type
= neigh
->type
;
1975 ndm
->ndm_ifindex
= neigh
->dev
->ifindex
;
1977 NLA_PUT(skb
, NDA_DST
, neigh
->tbl
->key_len
, neigh
->primary_key
);
1979 read_lock_bh(&neigh
->lock
);
1980 ndm
->ndm_state
= neigh
->nud_state
;
1981 if ((neigh
->nud_state
& NUD_VALID
) &&
1982 nla_put(skb
, NDA_LLADDR
, neigh
->dev
->addr_len
, neigh
->ha
) < 0) {
1983 read_unlock_bh(&neigh
->lock
);
1984 goto nla_put_failure
;
1987 ci
.ndm_used
= now
- neigh
->used
;
1988 ci
.ndm_confirmed
= now
- neigh
->confirmed
;
1989 ci
.ndm_updated
= now
- neigh
->updated
;
1990 ci
.ndm_refcnt
= atomic_read(&neigh
->refcnt
) - 1;
1991 read_unlock_bh(&neigh
->lock
);
1993 NLA_PUT_U32(skb
, NDA_PROBES
, atomic_read(&neigh
->probes
));
1994 NLA_PUT(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
);
1996 return nlmsg_end(skb
, nlh
);
1999 nlmsg_cancel(skb
, nlh
);
2004 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2005 struct netlink_callback
*cb
)
2007 struct neighbour
*n
;
2008 int rc
, h
, s_h
= cb
->args
[1];
2009 int idx
, s_idx
= idx
= cb
->args
[2];
2011 read_lock_bh(&tbl
->lock
);
2012 for (h
= 0; h
<= tbl
->hash_mask
; h
++) {
2017 for (n
= tbl
->hash_buckets
[h
], idx
= 0; n
; n
= n
->next
, idx
++) {
2020 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).pid
,
2023 NLM_F_MULTI
) <= 0) {
2024 read_unlock_bh(&tbl
->lock
);
2030 read_unlock_bh(&tbl
->lock
);
2038 static int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2040 struct neigh_table
*tbl
;
2043 read_lock(&neigh_tbl_lock
);
2044 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
2047 for (tbl
= neigh_tables
, t
= 0; tbl
; tbl
= tbl
->next
, t
++) {
2048 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
2051 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
2052 sizeof(cb
->args
[0]));
2053 if (neigh_dump_table(tbl
, skb
, cb
) < 0)
2056 read_unlock(&neigh_tbl_lock
);
2062 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
2066 read_lock_bh(&tbl
->lock
);
2067 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
2068 struct neighbour
*n
;
2070 for (n
= tbl
->hash_buckets
[chain
]; n
; n
= n
->next
)
2073 read_unlock_bh(&tbl
->lock
);
2075 EXPORT_SYMBOL(neigh_for_each
);
2077 /* The tbl->lock must be held as a writer and BH disabled. */
2078 void __neigh_for_each_release(struct neigh_table
*tbl
,
2079 int (*cb
)(struct neighbour
*))
2083 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
2084 struct neighbour
*n
, **np
;
2086 np
= &tbl
->hash_buckets
[chain
];
2087 while ((n
= *np
) != NULL
) {
2090 write_lock(&n
->lock
);
2097 write_unlock(&n
->lock
);
2099 if (n
->parms
->neigh_cleanup
)
2100 n
->parms
->neigh_cleanup(n
);
2106 EXPORT_SYMBOL(__neigh_for_each_release
);
2108 #ifdef CONFIG_PROC_FS
2110 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
2112 struct neigh_seq_state
*state
= seq
->private;
2113 struct neigh_table
*tbl
= state
->tbl
;
2114 struct neighbour
*n
= NULL
;
2115 int bucket
= state
->bucket
;
2117 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
2118 for (bucket
= 0; bucket
<= tbl
->hash_mask
; bucket
++) {
2119 n
= tbl
->hash_buckets
[bucket
];
2122 if (state
->neigh_sub_iter
) {
2126 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
2130 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2132 if (n
->nud_state
& ~NUD_NOARP
)
2141 state
->bucket
= bucket
;
2146 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
2147 struct neighbour
*n
,
2150 struct neigh_seq_state
*state
= seq
->private;
2151 struct neigh_table
*tbl
= state
->tbl
;
2153 if (state
->neigh_sub_iter
) {
2154 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2162 if (state
->neigh_sub_iter
) {
2163 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2168 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2171 if (n
->nud_state
& ~NUD_NOARP
)
2180 if (++state
->bucket
> tbl
->hash_mask
)
2183 n
= tbl
->hash_buckets
[state
->bucket
];
2191 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2193 struct neighbour
*n
= neigh_get_first(seq
);
2197 n
= neigh_get_next(seq
, n
, pos
);
2202 return *pos
? NULL
: n
;
2205 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
2207 struct neigh_seq_state
*state
= seq
->private;
2208 struct neigh_table
*tbl
= state
->tbl
;
2209 struct pneigh_entry
*pn
= NULL
;
2210 int bucket
= state
->bucket
;
2212 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
2213 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
2214 pn
= tbl
->phash_buckets
[bucket
];
2218 state
->bucket
= bucket
;
2223 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
2224 struct pneigh_entry
*pn
,
2227 struct neigh_seq_state
*state
= seq
->private;
2228 struct neigh_table
*tbl
= state
->tbl
;
2232 if (++state
->bucket
> PNEIGH_HASHMASK
)
2234 pn
= tbl
->phash_buckets
[state
->bucket
];
2245 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2247 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
2251 pn
= pneigh_get_next(seq
, pn
, pos
);
2256 return *pos
? NULL
: pn
;
2259 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
2261 struct neigh_seq_state
*state
= seq
->private;
2264 rc
= neigh_get_idx(seq
, pos
);
2265 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2266 rc
= pneigh_get_idx(seq
, pos
);
2271 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
2273 struct neigh_seq_state
*state
= seq
->private;
2274 loff_t pos_minus_one
;
2278 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
2280 read_lock_bh(&tbl
->lock
);
2282 pos_minus_one
= *pos
- 1;
2283 return *pos
? neigh_get_idx_any(seq
, &pos_minus_one
) : SEQ_START_TOKEN
;
2285 EXPORT_SYMBOL(neigh_seq_start
);
2287 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2289 struct neigh_seq_state
*state
;
2292 if (v
== SEQ_START_TOKEN
) {
2293 rc
= neigh_get_idx(seq
, pos
);
2297 state
= seq
->private;
2298 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
2299 rc
= neigh_get_next(seq
, v
, NULL
);
2302 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2303 rc
= pneigh_get_first(seq
);
2305 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
2306 rc
= pneigh_get_next(seq
, v
, NULL
);
2312 EXPORT_SYMBOL(neigh_seq_next
);
2314 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
2316 struct neigh_seq_state
*state
= seq
->private;
2317 struct neigh_table
*tbl
= state
->tbl
;
2319 read_unlock_bh(&tbl
->lock
);
2321 EXPORT_SYMBOL(neigh_seq_stop
);
2323 /* statistics via seq_file */
2325 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2327 struct proc_dir_entry
*pde
= seq
->private;
2328 struct neigh_table
*tbl
= pde
->data
;
2332 return SEQ_START_TOKEN
;
2334 for (cpu
= *pos
-1; cpu
< NR_CPUS
; ++cpu
) {
2335 if (!cpu_possible(cpu
))
2338 return per_cpu_ptr(tbl
->stats
, cpu
);
2343 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2345 struct proc_dir_entry
*pde
= seq
->private;
2346 struct neigh_table
*tbl
= pde
->data
;
2349 for (cpu
= *pos
; cpu
< NR_CPUS
; ++cpu
) {
2350 if (!cpu_possible(cpu
))
2353 return per_cpu_ptr(tbl
->stats
, cpu
);
2358 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
2363 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
2365 struct proc_dir_entry
*pde
= seq
->private;
2366 struct neigh_table
*tbl
= pde
->data
;
2367 struct neigh_statistics
*st
= v
;
2369 if (v
== SEQ_START_TOKEN
) {
2370 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2374 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2375 "%08lx %08lx %08lx %08lx\n",
2376 atomic_read(&tbl
->entries
),
2387 st
->rcv_probes_mcast
,
2388 st
->rcv_probes_ucast
,
2390 st
->periodic_gc_runs
,
2397 static const struct seq_operations neigh_stat_seq_ops
= {
2398 .start
= neigh_stat_seq_start
,
2399 .next
= neigh_stat_seq_next
,
2400 .stop
= neigh_stat_seq_stop
,
2401 .show
= neigh_stat_seq_show
,
2404 static int neigh_stat_seq_open(struct inode
*inode
, struct file
*file
)
2406 int ret
= seq_open(file
, &neigh_stat_seq_ops
);
2409 struct seq_file
*sf
= file
->private_data
;
2410 sf
->private = PDE(inode
);
2415 static const struct file_operations neigh_stat_seq_fops
= {
2416 .owner
= THIS_MODULE
,
2417 .open
= neigh_stat_seq_open
,
2419 .llseek
= seq_lseek
,
2420 .release
= seq_release
,
2423 #endif /* CONFIG_PROC_FS */
2426 static inline size_t neigh_nlmsg_size(void)
2428 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2429 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2430 + nla_total_size(MAX_ADDR_LEN
) /* NDA_LLADDR */
2431 + nla_total_size(sizeof(struct nda_cacheinfo
))
2432 + nla_total_size(4); /* NDA_PROBES */
2435 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
)
2437 struct sk_buff
*skb
;
2440 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC
);
2444 err
= neigh_fill_info(skb
, n
, 0, 0, type
, flags
);
2446 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2447 WARN_ON(err
== -EMSGSIZE
);
2451 err
= rtnl_notify(skb
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
2454 rtnl_set_sk_err(RTNLGRP_NEIGH
, err
);
2457 void neigh_app_ns(struct neighbour
*n
)
2459 __neigh_notify(n
, RTM_GETNEIGH
, NLM_F_REQUEST
);
2462 static void neigh_app_notify(struct neighbour
*n
)
2464 __neigh_notify(n
, RTM_NEWNEIGH
, 0);
2467 #endif /* CONFIG_ARPD */
2469 #ifdef CONFIG_SYSCTL
2471 static struct neigh_sysctl_table
{
2472 struct ctl_table_header
*sysctl_header
;
2473 ctl_table neigh_vars
[__NET_NEIGH_MAX
];
2474 ctl_table neigh_dev
[2];
2475 ctl_table neigh_neigh_dir
[2];
2476 ctl_table neigh_proto_dir
[2];
2477 ctl_table neigh_root_dir
[2];
2478 } neigh_sysctl_template __read_mostly
= {
2481 .ctl_name
= NET_NEIGH_MCAST_SOLICIT
,
2482 .procname
= "mcast_solicit",
2483 .maxlen
= sizeof(int),
2485 .proc_handler
= &proc_dointvec
,
2488 .ctl_name
= NET_NEIGH_UCAST_SOLICIT
,
2489 .procname
= "ucast_solicit",
2490 .maxlen
= sizeof(int),
2492 .proc_handler
= &proc_dointvec
,
2495 .ctl_name
= NET_NEIGH_APP_SOLICIT
,
2496 .procname
= "app_solicit",
2497 .maxlen
= sizeof(int),
2499 .proc_handler
= &proc_dointvec
,
2502 .ctl_name
= NET_NEIGH_RETRANS_TIME
,
2503 .procname
= "retrans_time",
2504 .maxlen
= sizeof(int),
2506 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2509 .ctl_name
= NET_NEIGH_REACHABLE_TIME
,
2510 .procname
= "base_reachable_time",
2511 .maxlen
= sizeof(int),
2513 .proc_handler
= &proc_dointvec_jiffies
,
2514 .strategy
= &sysctl_jiffies
,
2517 .ctl_name
= NET_NEIGH_DELAY_PROBE_TIME
,
2518 .procname
= "delay_first_probe_time",
2519 .maxlen
= sizeof(int),
2521 .proc_handler
= &proc_dointvec_jiffies
,
2522 .strategy
= &sysctl_jiffies
,
2525 .ctl_name
= NET_NEIGH_GC_STALE_TIME
,
2526 .procname
= "gc_stale_time",
2527 .maxlen
= sizeof(int),
2529 .proc_handler
= &proc_dointvec_jiffies
,
2530 .strategy
= &sysctl_jiffies
,
2533 .ctl_name
= NET_NEIGH_UNRES_QLEN
,
2534 .procname
= "unres_qlen",
2535 .maxlen
= sizeof(int),
2537 .proc_handler
= &proc_dointvec
,
2540 .ctl_name
= NET_NEIGH_PROXY_QLEN
,
2541 .procname
= "proxy_qlen",
2542 .maxlen
= sizeof(int),
2544 .proc_handler
= &proc_dointvec
,
2547 .ctl_name
= NET_NEIGH_ANYCAST_DELAY
,
2548 .procname
= "anycast_delay",
2549 .maxlen
= sizeof(int),
2551 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2554 .ctl_name
= NET_NEIGH_PROXY_DELAY
,
2555 .procname
= "proxy_delay",
2556 .maxlen
= sizeof(int),
2558 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2561 .ctl_name
= NET_NEIGH_LOCKTIME
,
2562 .procname
= "locktime",
2563 .maxlen
= sizeof(int),
2565 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2568 .ctl_name
= NET_NEIGH_GC_INTERVAL
,
2569 .procname
= "gc_interval",
2570 .maxlen
= sizeof(int),
2572 .proc_handler
= &proc_dointvec_jiffies
,
2573 .strategy
= &sysctl_jiffies
,
2576 .ctl_name
= NET_NEIGH_GC_THRESH1
,
2577 .procname
= "gc_thresh1",
2578 .maxlen
= sizeof(int),
2580 .proc_handler
= &proc_dointvec
,
2583 .ctl_name
= NET_NEIGH_GC_THRESH2
,
2584 .procname
= "gc_thresh2",
2585 .maxlen
= sizeof(int),
2587 .proc_handler
= &proc_dointvec
,
2590 .ctl_name
= NET_NEIGH_GC_THRESH3
,
2591 .procname
= "gc_thresh3",
2592 .maxlen
= sizeof(int),
2594 .proc_handler
= &proc_dointvec
,
2597 .ctl_name
= NET_NEIGH_RETRANS_TIME_MS
,
2598 .procname
= "retrans_time_ms",
2599 .maxlen
= sizeof(int),
2601 .proc_handler
= &proc_dointvec_ms_jiffies
,
2602 .strategy
= &sysctl_ms_jiffies
,
2605 .ctl_name
= NET_NEIGH_REACHABLE_TIME_MS
,
2606 .procname
= "base_reachable_time_ms",
2607 .maxlen
= sizeof(int),
2609 .proc_handler
= &proc_dointvec_ms_jiffies
,
2610 .strategy
= &sysctl_ms_jiffies
,
2615 .ctl_name
= NET_PROTO_CONF_DEFAULT
,
2616 .procname
= "default",
2620 .neigh_neigh_dir
= {
2622 .procname
= "neigh",
2626 .neigh_proto_dir
= {
2633 .ctl_name
= CTL_NET
,
2640 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
2641 int p_id
, int pdev_id
, char *p_name
,
2642 proc_handler
*handler
, ctl_handler
*strategy
)
2644 struct neigh_sysctl_table
*t
= kmemdup(&neigh_sysctl_template
,
2645 sizeof(*t
), GFP_KERNEL
);
2646 const char *dev_name_source
= NULL
;
2647 char *dev_name
= NULL
;
2652 t
->neigh_vars
[0].data
= &p
->mcast_probes
;
2653 t
->neigh_vars
[1].data
= &p
->ucast_probes
;
2654 t
->neigh_vars
[2].data
= &p
->app_probes
;
2655 t
->neigh_vars
[3].data
= &p
->retrans_time
;
2656 t
->neigh_vars
[4].data
= &p
->base_reachable_time
;
2657 t
->neigh_vars
[5].data
= &p
->delay_probe_time
;
2658 t
->neigh_vars
[6].data
= &p
->gc_staletime
;
2659 t
->neigh_vars
[7].data
= &p
->queue_len
;
2660 t
->neigh_vars
[8].data
= &p
->proxy_qlen
;
2661 t
->neigh_vars
[9].data
= &p
->anycast_delay
;
2662 t
->neigh_vars
[10].data
= &p
->proxy_delay
;
2663 t
->neigh_vars
[11].data
= &p
->locktime
;
2666 dev_name_source
= dev
->name
;
2667 t
->neigh_dev
[0].ctl_name
= dev
->ifindex
;
2668 t
->neigh_vars
[12].procname
= NULL
;
2669 t
->neigh_vars
[13].procname
= NULL
;
2670 t
->neigh_vars
[14].procname
= NULL
;
2671 t
->neigh_vars
[15].procname
= NULL
;
2673 dev_name_source
= t
->neigh_dev
[0].procname
;
2674 t
->neigh_vars
[12].data
= (int *)(p
+ 1);
2675 t
->neigh_vars
[13].data
= (int *)(p
+ 1) + 1;
2676 t
->neigh_vars
[14].data
= (int *)(p
+ 1) + 2;
2677 t
->neigh_vars
[15].data
= (int *)(p
+ 1) + 3;
2680 t
->neigh_vars
[16].data
= &p
->retrans_time
;
2681 t
->neigh_vars
[17].data
= &p
->base_reachable_time
;
2683 if (handler
|| strategy
) {
2685 t
->neigh_vars
[3].proc_handler
= handler
;
2686 t
->neigh_vars
[3].strategy
= strategy
;
2687 t
->neigh_vars
[3].extra1
= dev
;
2689 t
->neigh_vars
[4].proc_handler
= handler
;
2690 t
->neigh_vars
[4].strategy
= strategy
;
2691 t
->neigh_vars
[4].extra1
= dev
;
2692 /* RetransTime (in milliseconds)*/
2693 t
->neigh_vars
[16].proc_handler
= handler
;
2694 t
->neigh_vars
[16].strategy
= strategy
;
2695 t
->neigh_vars
[16].extra1
= dev
;
2696 /* ReachableTime (in milliseconds) */
2697 t
->neigh_vars
[17].proc_handler
= handler
;
2698 t
->neigh_vars
[17].strategy
= strategy
;
2699 t
->neigh_vars
[17].extra1
= dev
;
2702 dev_name
= kstrdup(dev_name_source
, GFP_KERNEL
);
2708 t
->neigh_dev
[0].procname
= dev_name
;
2710 t
->neigh_neigh_dir
[0].ctl_name
= pdev_id
;
2712 t
->neigh_proto_dir
[0].procname
= p_name
;
2713 t
->neigh_proto_dir
[0].ctl_name
= p_id
;
2715 t
->neigh_dev
[0].child
= t
->neigh_vars
;
2716 t
->neigh_neigh_dir
[0].child
= t
->neigh_dev
;
2717 t
->neigh_proto_dir
[0].child
= t
->neigh_neigh_dir
;
2718 t
->neigh_root_dir
[0].child
= t
->neigh_proto_dir
;
2720 t
->sysctl_header
= register_sysctl_table(t
->neigh_root_dir
);
2721 if (!t
->sysctl_header
) {
2725 p
->sysctl_table
= t
;
2737 void neigh_sysctl_unregister(struct neigh_parms
*p
)
2739 if (p
->sysctl_table
) {
2740 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
2741 p
->sysctl_table
= NULL
;
2742 unregister_sysctl_table(t
->sysctl_header
);
2743 kfree(t
->neigh_dev
[0].procname
);
2748 #endif /* CONFIG_SYSCTL */
2750 static int __init
neigh_init(void)
2752 rtnl_register(PF_UNSPEC
, RTM_NEWNEIGH
, neigh_add
, NULL
);
2753 rtnl_register(PF_UNSPEC
, RTM_DELNEIGH
, neigh_delete
, NULL
);
2754 rtnl_register(PF_UNSPEC
, RTM_GETNEIGH
, NULL
, neigh_dump_info
);
2756 rtnl_register(PF_UNSPEC
, RTM_GETNEIGHTBL
, NULL
, neightbl_dump_info
);
2757 rtnl_register(PF_UNSPEC
, RTM_SETNEIGHTBL
, neightbl_set
, NULL
);
2762 subsys_initcall(neigh_init
);
2764 EXPORT_SYMBOL(__neigh_event_send
);
2765 EXPORT_SYMBOL(neigh_changeaddr
);
2766 EXPORT_SYMBOL(neigh_compat_output
);
2767 EXPORT_SYMBOL(neigh_connected_output
);
2768 EXPORT_SYMBOL(neigh_create
);
2769 EXPORT_SYMBOL(neigh_destroy
);
2770 EXPORT_SYMBOL(neigh_event_ns
);
2771 EXPORT_SYMBOL(neigh_ifdown
);
2772 EXPORT_SYMBOL(neigh_lookup
);
2773 EXPORT_SYMBOL(neigh_lookup_nodev
);
2774 EXPORT_SYMBOL(neigh_parms_alloc
);
2775 EXPORT_SYMBOL(neigh_parms_release
);
2776 EXPORT_SYMBOL(neigh_rand_reach_time
);
2777 EXPORT_SYMBOL(neigh_resolve_output
);
2778 EXPORT_SYMBOL(neigh_table_clear
);
2779 EXPORT_SYMBOL(neigh_table_init
);
2780 EXPORT_SYMBOL(neigh_table_init_no_netlink
);
2781 EXPORT_SYMBOL(neigh_update
);
2782 EXPORT_SYMBOL(pneigh_enqueue
);
2783 EXPORT_SYMBOL(pneigh_lookup
);
2786 EXPORT_SYMBOL(neigh_app_ns
);
2788 #ifdef CONFIG_SYSCTL
2789 EXPORT_SYMBOL(neigh_sysctl_register
);
2790 EXPORT_SYMBOL(neigh_sysctl_unregister
);