1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/socket.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/string.h>
15 #include <linux/sockios.h>
16 #include <linux/net.h>
17 #include <linux/slab.h>
19 #include <linux/inet.h>
20 #include <linux/netdevice.h>
22 #include <linux/if_arp.h>
23 #include <linux/skbuff.h>
25 #include <linux/uaccess.h>
26 #include <linux/fcntl.h>
27 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
29 #include <linux/interrupt.h>
30 #include <linux/notifier.h>
31 #include <linux/init.h>
32 #include <linux/spinlock.h>
33 #include <net/netrom.h>
34 #include <linux/seq_file.h>
35 #include <linux/export.h>
37 static unsigned int nr_neigh_no
= 1;
39 static HLIST_HEAD(nr_node_list
);
40 static DEFINE_SPINLOCK(nr_node_list_lock
);
41 static HLIST_HEAD(nr_neigh_list
);
42 static DEFINE_SPINLOCK(nr_neigh_list_lock
);
44 static struct nr_node
*nr_node_get(ax25_address
*callsign
)
46 struct nr_node
*found
= NULL
;
47 struct nr_node
*nr_node
;
49 spin_lock_bh(&nr_node_list_lock
);
50 nr_node_for_each(nr_node
, &nr_node_list
)
51 if (ax25cmp(callsign
, &nr_node
->callsign
) == 0) {
52 nr_node_hold(nr_node
);
56 spin_unlock_bh(&nr_node_list_lock
);
60 static struct nr_neigh
*nr_neigh_get_dev(ax25_address
*callsign
,
61 struct net_device
*dev
)
63 struct nr_neigh
*found
= NULL
;
64 struct nr_neigh
*nr_neigh
;
66 spin_lock_bh(&nr_neigh_list_lock
);
67 nr_neigh_for_each(nr_neigh
, &nr_neigh_list
)
68 if (ax25cmp(callsign
, &nr_neigh
->callsign
) == 0 &&
69 nr_neigh
->dev
== dev
) {
70 nr_neigh_hold(nr_neigh
);
74 spin_unlock_bh(&nr_neigh_list_lock
);
78 static void nr_remove_neigh(struct nr_neigh
*);
80 /* re-sort the routes in quality order. */
81 static void re_sort_routes(struct nr_node
*nr_node
, int x
, int y
)
83 if (nr_node
->routes
[y
].quality
> nr_node
->routes
[x
].quality
) {
84 if (nr_node
->which
== x
)
86 else if (nr_node
->which
== y
)
89 swap(nr_node
->routes
[x
], nr_node
->routes
[y
]);
94 * Add a new route to a node, and in the process add the node and the
95 * neighbour if it is new.
97 static int __must_check
nr_add_node(ax25_address
*nr
, const char *mnemonic
,
98 ax25_address
*ax25
, ax25_digi
*ax25_digi
, struct net_device
*dev
,
99 int quality
, int obs_count
)
101 struct nr_node
*nr_node
;
102 struct nr_neigh
*nr_neigh
;
104 struct net_device
*odev
;
106 if ((odev
=nr_dev_get(nr
)) != NULL
) { /* Can't add routes to ourself */
111 nr_node
= nr_node_get(nr
);
113 nr_neigh
= nr_neigh_get_dev(ax25
, dev
);
116 * The L2 link to a neighbour has failed in the past
117 * and now a frame comes from this neighbour. We assume
118 * it was a temporary trouble with the link and reset the
119 * routes now (and not wait for a node broadcast).
121 if (nr_neigh
!= NULL
&& nr_neigh
->failed
!= 0 && quality
== 0) {
122 struct nr_node
*nr_nodet
;
124 spin_lock_bh(&nr_node_list_lock
);
125 nr_node_for_each(nr_nodet
, &nr_node_list
) {
126 nr_node_lock(nr_nodet
);
127 for (i
= 0; i
< nr_nodet
->count
; i
++)
128 if (nr_nodet
->routes
[i
].neighbour
== nr_neigh
)
129 if (i
< nr_nodet
->which
)
131 nr_node_unlock(nr_nodet
);
133 spin_unlock_bh(&nr_node_list_lock
);
136 if (nr_neigh
!= NULL
)
137 nr_neigh
->failed
= 0;
139 if (quality
== 0 && nr_neigh
!= NULL
&& nr_node
!= NULL
) {
140 nr_neigh_put(nr_neigh
);
141 nr_node_put(nr_node
);
145 if (nr_neigh
== NULL
) {
146 if ((nr_neigh
= kmalloc(sizeof(*nr_neigh
), GFP_ATOMIC
)) == NULL
) {
148 nr_node_put(nr_node
);
152 nr_neigh
->callsign
= *ax25
;
153 nr_neigh
->digipeat
= NULL
;
154 nr_neigh
->ax25
= NULL
;
156 nr_neigh
->quality
= READ_ONCE(sysctl_netrom_default_path_quality
);
157 nr_neigh
->locked
= 0;
159 nr_neigh
->number
= nr_neigh_no
++;
160 nr_neigh
->failed
= 0;
161 refcount_set(&nr_neigh
->refcount
, 1);
163 if (ax25_digi
!= NULL
&& ax25_digi
->ndigi
> 0) {
164 nr_neigh
->digipeat
= kmemdup(ax25_digi
,
167 if (nr_neigh
->digipeat
== NULL
) {
170 nr_node_put(nr_node
);
175 spin_lock_bh(&nr_neigh_list_lock
);
176 hlist_add_head(&nr_neigh
->neigh_node
, &nr_neigh_list
);
177 nr_neigh_hold(nr_neigh
);
178 spin_unlock_bh(&nr_neigh_list_lock
);
181 if (quality
!= 0 && ax25cmp(nr
, ax25
) == 0 && !nr_neigh
->locked
)
182 nr_neigh
->quality
= quality
;
184 if (nr_node
== NULL
) {
185 if ((nr_node
= kmalloc(sizeof(*nr_node
), GFP_ATOMIC
)) == NULL
) {
187 nr_neigh_put(nr_neigh
);
191 nr_node
->callsign
= *nr
;
192 strscpy(nr_node
->mnemonic
, mnemonic
);
196 refcount_set(&nr_node
->refcount
, 1);
197 spin_lock_init(&nr_node
->node_lock
);
199 nr_node
->routes
[0].quality
= quality
;
200 nr_node
->routes
[0].obs_count
= obs_count
;
201 nr_node
->routes
[0].neighbour
= nr_neigh
;
203 nr_neigh_hold(nr_neigh
);
206 spin_lock_bh(&nr_node_list_lock
);
207 hlist_add_head(&nr_node
->node_node
, &nr_node_list
);
208 /* refcount initialized at 1 */
209 spin_unlock_bh(&nr_node_list_lock
);
211 nr_neigh_put(nr_neigh
);
214 nr_node_lock(nr_node
);
217 strscpy(nr_node
->mnemonic
, mnemonic
);
219 for (found
= 0, i
= 0; i
< nr_node
->count
; i
++) {
220 if (nr_node
->routes
[i
].neighbour
== nr_neigh
) {
221 nr_node
->routes
[i
].quality
= quality
;
222 nr_node
->routes
[i
].obs_count
= obs_count
;
229 /* We have space at the bottom, slot it in */
230 if (nr_node
->count
< 3) {
231 nr_node
->routes
[2] = nr_node
->routes
[1];
232 nr_node
->routes
[1] = nr_node
->routes
[0];
234 nr_node
->routes
[0].quality
= quality
;
235 nr_node
->routes
[0].obs_count
= obs_count
;
236 nr_node
->routes
[0].neighbour
= nr_neigh
;
240 nr_neigh_hold(nr_neigh
);
243 /* It must be better than the worst */
244 if (quality
> nr_node
->routes
[2].quality
) {
245 nr_node
->routes
[2].neighbour
->count
--;
246 nr_neigh_put(nr_node
->routes
[2].neighbour
);
248 if (nr_node
->routes
[2].neighbour
->count
== 0 && !nr_node
->routes
[2].neighbour
->locked
)
249 nr_remove_neigh(nr_node
->routes
[2].neighbour
);
251 nr_node
->routes
[2].quality
= quality
;
252 nr_node
->routes
[2].obs_count
= obs_count
;
253 nr_node
->routes
[2].neighbour
= nr_neigh
;
255 nr_neigh_hold(nr_neigh
);
261 /* Now re-sort the routes in quality order */
262 switch (nr_node
->count
) {
264 re_sort_routes(nr_node
, 0, 1);
265 re_sort_routes(nr_node
, 1, 2);
268 re_sort_routes(nr_node
, 0, 1);
274 for (i
= 0; i
< nr_node
->count
; i
++) {
275 if (nr_node
->routes
[i
].neighbour
== nr_neigh
) {
276 if (i
< nr_node
->which
)
282 nr_neigh_put(nr_neigh
);
283 nr_node_unlock(nr_node
);
284 nr_node_put(nr_node
);
288 static void nr_remove_node_locked(struct nr_node
*nr_node
)
290 lockdep_assert_held(&nr_node_list_lock
);
292 hlist_del_init(&nr_node
->node_node
);
293 nr_node_put(nr_node
);
296 static inline void __nr_remove_neigh(struct nr_neigh
*nr_neigh
)
298 hlist_del_init(&nr_neigh
->neigh_node
);
299 nr_neigh_put(nr_neigh
);
302 #define nr_remove_neigh_locked(__neigh) \
303 __nr_remove_neigh(__neigh)
305 static void nr_remove_neigh(struct nr_neigh
*nr_neigh
)
307 spin_lock_bh(&nr_neigh_list_lock
);
308 __nr_remove_neigh(nr_neigh
);
309 spin_unlock_bh(&nr_neigh_list_lock
);
313 * "Delete" a node. Strictly speaking remove a route to a node. The node
314 * is only deleted if no routes are left to it.
316 static int nr_del_node(ax25_address
*callsign
, ax25_address
*neighbour
, struct net_device
*dev
)
318 struct nr_node
*nr_node
;
319 struct nr_neigh
*nr_neigh
;
322 nr_node
= nr_node_get(callsign
);
327 nr_neigh
= nr_neigh_get_dev(neighbour
, dev
);
329 if (nr_neigh
== NULL
) {
330 nr_node_put(nr_node
);
334 spin_lock_bh(&nr_node_list_lock
);
335 nr_node_lock(nr_node
);
336 for (i
= 0; i
< nr_node
->count
; i
++) {
337 if (nr_node
->routes
[i
].neighbour
== nr_neigh
) {
339 nr_neigh_put(nr_neigh
);
341 if (nr_neigh
->count
== 0 && !nr_neigh
->locked
)
342 nr_remove_neigh(nr_neigh
);
343 nr_neigh_put(nr_neigh
);
347 if (nr_node
->count
== 0) {
348 nr_remove_node_locked(nr_node
);
352 nr_node
->routes
[0] = nr_node
->routes
[1];
355 nr_node
->routes
[1] = nr_node
->routes
[2];
360 nr_node_put(nr_node
);
362 nr_node_unlock(nr_node
);
363 spin_unlock_bh(&nr_node_list_lock
);
368 nr_neigh_put(nr_neigh
);
369 nr_node_unlock(nr_node
);
370 spin_unlock_bh(&nr_node_list_lock
);
371 nr_node_put(nr_node
);
377 * Lock a neighbour with a quality.
379 static int __must_check
nr_add_neigh(ax25_address
*callsign
,
380 ax25_digi
*ax25_digi
, struct net_device
*dev
, unsigned int quality
)
382 struct nr_neigh
*nr_neigh
;
384 nr_neigh
= nr_neigh_get_dev(callsign
, dev
);
386 nr_neigh
->quality
= quality
;
387 nr_neigh
->locked
= 1;
388 nr_neigh_put(nr_neigh
);
392 if ((nr_neigh
= kmalloc(sizeof(*nr_neigh
), GFP_ATOMIC
)) == NULL
)
395 nr_neigh
->callsign
= *callsign
;
396 nr_neigh
->digipeat
= NULL
;
397 nr_neigh
->ax25
= NULL
;
399 nr_neigh
->quality
= quality
;
400 nr_neigh
->locked
= 1;
402 nr_neigh
->number
= nr_neigh_no
++;
403 nr_neigh
->failed
= 0;
404 refcount_set(&nr_neigh
->refcount
, 1);
406 if (ax25_digi
!= NULL
&& ax25_digi
->ndigi
> 0) {
407 nr_neigh
->digipeat
= kmemdup(ax25_digi
, sizeof(*ax25_digi
),
409 if (nr_neigh
->digipeat
== NULL
) {
415 spin_lock_bh(&nr_neigh_list_lock
);
416 hlist_add_head(&nr_neigh
->neigh_node
, &nr_neigh_list
);
417 /* refcount is initialized at 1 */
418 spin_unlock_bh(&nr_neigh_list_lock
);
424 * "Delete" a neighbour. The neighbour is only removed if the number
425 * of nodes that may use it is zero.
427 static int nr_del_neigh(ax25_address
*callsign
, struct net_device
*dev
, unsigned int quality
)
429 struct nr_neigh
*nr_neigh
;
431 nr_neigh
= nr_neigh_get_dev(callsign
, dev
);
433 if (nr_neigh
== NULL
) return -EINVAL
;
435 nr_neigh
->quality
= quality
;
436 nr_neigh
->locked
= 0;
438 if (nr_neigh
->count
== 0)
439 nr_remove_neigh(nr_neigh
);
440 nr_neigh_put(nr_neigh
);
446 * Decrement the obsolescence count by one. If a route is reduced to a
447 * count of zero, remove it. Also remove any unlocked neighbours with
448 * zero nodes routing via it.
450 static int nr_dec_obs(void)
452 struct nr_neigh
*nr_neigh
;
454 struct hlist_node
*nodet
;
457 spin_lock_bh(&nr_node_list_lock
);
458 nr_node_for_each_safe(s
, nodet
, &nr_node_list
) {
460 for (i
= 0; i
< s
->count
; i
++) {
461 switch (s
->routes
[i
].obs_count
) {
462 case 0: /* A locked entry */
465 case 1: /* From 1 -> 0 */
466 nr_neigh
= s
->routes
[i
].neighbour
;
469 nr_neigh_put(nr_neigh
);
471 if (nr_neigh
->count
== 0 && !nr_neigh
->locked
)
472 nr_remove_neigh(nr_neigh
);
478 s
->routes
[0] = s
->routes
[1];
481 s
->routes
[1] = s
->routes
[2];
489 s
->routes
[i
].obs_count
--;
496 nr_remove_node_locked(s
);
499 spin_unlock_bh(&nr_node_list_lock
);
505 * A device has been removed. Remove its routes and neighbours.
507 void nr_rt_device_down(struct net_device
*dev
)
510 struct hlist_node
*nodet
, *node2t
;
514 spin_lock_bh(&nr_neigh_list_lock
);
515 nr_neigh_for_each_safe(s
, nodet
, &nr_neigh_list
) {
517 spin_lock_bh(&nr_node_list_lock
);
518 nr_node_for_each_safe(t
, node2t
, &nr_node_list
) {
520 for (i
= 0; i
< t
->count
; i
++) {
521 if (t
->routes
[i
].neighbour
== s
) {
526 t
->routes
[0] = t
->routes
[1];
529 t
->routes
[1] = t
->routes
[2];
538 nr_remove_node_locked(t
);
541 spin_unlock_bh(&nr_node_list_lock
);
543 nr_remove_neigh_locked(s
);
546 spin_unlock_bh(&nr_neigh_list_lock
);
550 * Check that the device given is a valid AX.25 interface that is "up".
551 * Or a valid ethernet interface with an AX.25 callsign binding.
553 static struct net_device
*nr_ax25_dev_get(char *devname
)
555 struct net_device
*dev
;
557 if ((dev
= dev_get_by_name(&init_net
, devname
)) == NULL
)
560 if ((dev
->flags
& IFF_UP
) && dev
->type
== ARPHRD_AX25
)
568 * Find the first active NET/ROM device, usually "nr0".
570 struct net_device
*nr_dev_first(void)
572 struct net_device
*dev
, *first
= NULL
;
575 for_each_netdev_rcu(&init_net
, dev
) {
576 if ((dev
->flags
& IFF_UP
) && dev
->type
== ARPHRD_NETROM
)
577 if (first
== NULL
|| strncmp(dev
->name
, first
->name
, 3) < 0)
587 * Find the NET/ROM device for the given callsign.
589 struct net_device
*nr_dev_get(ax25_address
*addr
)
591 struct net_device
*dev
;
594 for_each_netdev_rcu(&init_net
, dev
) {
595 if ((dev
->flags
& IFF_UP
) && dev
->type
== ARPHRD_NETROM
&&
596 ax25cmp(addr
, (const ax25_address
*)dev
->dev_addr
) == 0) {
607 static ax25_digi
*nr_call_to_digi(ax25_digi
*digi
, int ndigis
,
608 ax25_address
*digipeaters
)
615 for (i
= 0; i
< ndigis
; i
++) {
616 digi
->calls
[i
] = digipeaters
[i
];
617 digi
->repeated
[i
] = 0;
620 digi
->ndigi
= ndigis
;
621 digi
->lastrepeat
= -1;
627 * Handle the ioctls that control the routing functions.
629 int nr_rt_ioctl(unsigned int cmd
, void __user
*arg
)
631 struct nr_route_struct nr_route
;
632 struct net_device
*dev
;
638 if (copy_from_user(&nr_route
, arg
, sizeof(struct nr_route_struct
)))
640 if (nr_route
.ndigis
> AX25_MAX_DIGIS
)
642 if ((dev
= nr_ax25_dev_get(nr_route
.device
)) == NULL
)
644 switch (nr_route
.type
) {
646 if (strnlen(nr_route
.mnemonic
, 7) == 7) {
651 ret
= nr_add_node(&nr_route
.callsign
,
654 nr_call_to_digi(&digi
, nr_route
.ndigis
,
655 nr_route
.digipeaters
),
656 dev
, nr_route
.quality
,
660 ret
= nr_add_neigh(&nr_route
.callsign
,
661 nr_call_to_digi(&digi
, nr_route
.ndigis
,
662 nr_route
.digipeaters
),
663 dev
, nr_route
.quality
);
672 if (copy_from_user(&nr_route
, arg
, sizeof(struct nr_route_struct
)))
674 if ((dev
= nr_ax25_dev_get(nr_route
.device
)) == NULL
)
676 switch (nr_route
.type
) {
678 ret
= nr_del_node(&nr_route
.callsign
,
679 &nr_route
.neighbour
, dev
);
682 ret
= nr_del_neigh(&nr_route
.callsign
,
683 dev
, nr_route
.quality
);
702 * A level 2 link has timed out, therefore it appears to be a poor link,
703 * then don't use that neighbour until it is reset.
705 void nr_link_failed(ax25_cb
*ax25
, int reason
)
707 struct nr_neigh
*s
, *nr_neigh
= NULL
;
708 struct nr_node
*nr_node
= NULL
;
710 spin_lock_bh(&nr_neigh_list_lock
);
711 nr_neigh_for_each(s
, &nr_neigh_list
) {
712 if (s
->ax25
== ax25
) {
718 spin_unlock_bh(&nr_neigh_list_lock
);
720 if (nr_neigh
== NULL
)
723 nr_neigh
->ax25
= NULL
;
726 if (++nr_neigh
->failed
< READ_ONCE(sysctl_netrom_link_fails_count
)) {
727 nr_neigh_put(nr_neigh
);
730 spin_lock_bh(&nr_node_list_lock
);
731 nr_node_for_each(nr_node
, &nr_node_list
) {
732 nr_node_lock(nr_node
);
733 if (nr_node
->which
< nr_node
->count
&&
734 nr_node
->routes
[nr_node
->which
].neighbour
== nr_neigh
)
736 nr_node_unlock(nr_node
);
738 spin_unlock_bh(&nr_node_list_lock
);
739 nr_neigh_put(nr_neigh
);
743 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
744 * indicates an internally generated frame.
746 int nr_route_frame(struct sk_buff
*skb
, ax25_cb
*ax25
)
748 ax25_address
*nr_src
, *nr_dest
;
749 struct nr_neigh
*nr_neigh
;
750 struct nr_node
*nr_node
;
751 struct net_device
*dev
;
755 struct sk_buff
*skbn
;
758 nr_src
= (ax25_address
*)(skb
->data
+ 0);
759 nr_dest
= (ax25_address
*)(skb
->data
+ 7);
762 ret
= nr_add_node(nr_src
, "", &ax25
->dest_addr
, ax25
->digipeat
,
763 ax25
->ax25_dev
->dev
, 0,
764 READ_ONCE(sysctl_netrom_obsolescence_count_initialiser
));
769 if ((dev
= nr_dev_get(nr_dest
)) != NULL
) { /* Its for me */
770 if (ax25
== NULL
) /* Its from me */
771 ret
= nr_loopback_queue(skb
);
773 ret
= nr_rx_frame(skb
, dev
);
778 if (!READ_ONCE(sysctl_netrom_routing_control
) && ax25
!= NULL
)
781 /* Its Time-To-Live has expired */
782 if (skb
->data
[14] == 1) {
786 nr_node
= nr_node_get(nr_dest
);
789 nr_node_lock(nr_node
);
791 if (nr_node
->which
>= nr_node
->count
) {
792 nr_node_unlock(nr_node
);
793 nr_node_put(nr_node
);
797 nr_neigh
= nr_node
->routes
[nr_node
->which
].neighbour
;
799 if ((dev
= nr_dev_first()) == NULL
) {
800 nr_node_unlock(nr_node
);
801 nr_node_put(nr_node
);
805 /* We are going to change the netrom headers so we should get our
806 own skb, we also did not know until now how much header space
807 we had to reserve... - RXQ */
808 if ((skbn
=skb_copy_expand(skb
, dev
->hard_header_len
, 0, GFP_ATOMIC
)) == NULL
) {
809 nr_node_unlock(nr_node
);
810 nr_node_put(nr_node
);
818 dptr
= skb_push(skb
, 1);
819 *dptr
= AX25_P_NETROM
;
821 ax25s
= nr_neigh
->ax25
;
822 nr_neigh
->ax25
= ax25_send_frame(skb
, 256,
823 (const ax25_address
*)dev
->dev_addr
,
825 nr_neigh
->digipeat
, nr_neigh
->dev
);
830 ret
= (nr_neigh
->ax25
!= NULL
);
831 nr_node_unlock(nr_node
);
832 nr_node_put(nr_node
);
837 #ifdef CONFIG_PROC_FS
839 static void *nr_node_start(struct seq_file
*seq
, loff_t
*pos
)
840 __acquires(&nr_node_list_lock
)
842 spin_lock_bh(&nr_node_list_lock
);
843 return seq_hlist_start_head(&nr_node_list
, *pos
);
846 static void *nr_node_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
848 return seq_hlist_next(v
, &nr_node_list
, pos
);
851 static void nr_node_stop(struct seq_file
*seq
, void *v
)
852 __releases(&nr_node_list_lock
)
854 spin_unlock_bh(&nr_node_list_lock
);
857 static int nr_node_show(struct seq_file
*seq
, void *v
)
862 if (v
== SEQ_START_TOKEN
)
864 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
866 struct nr_node
*nr_node
= hlist_entry(v
, struct nr_node
,
869 nr_node_lock(nr_node
);
870 seq_printf(seq
, "%-9s %-7s %d %d",
871 ax2asc(buf
, &nr_node
->callsign
),
872 (nr_node
->mnemonic
[0] == '\0') ? "*" : nr_node
->mnemonic
,
876 for (i
= 0; i
< nr_node
->count
; i
++) {
877 seq_printf(seq
, " %3d %d %05d",
878 nr_node
->routes
[i
].quality
,
879 nr_node
->routes
[i
].obs_count
,
880 nr_node
->routes
[i
].neighbour
->number
);
882 nr_node_unlock(nr_node
);
889 const struct seq_operations nr_node_seqops
= {
890 .start
= nr_node_start
,
891 .next
= nr_node_next
,
892 .stop
= nr_node_stop
,
893 .show
= nr_node_show
,
896 static void *nr_neigh_start(struct seq_file
*seq
, loff_t
*pos
)
897 __acquires(&nr_neigh_list_lock
)
899 spin_lock_bh(&nr_neigh_list_lock
);
900 return seq_hlist_start_head(&nr_neigh_list
, *pos
);
903 static void *nr_neigh_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
905 return seq_hlist_next(v
, &nr_neigh_list
, pos
);
908 static void nr_neigh_stop(struct seq_file
*seq
, void *v
)
909 __releases(&nr_neigh_list_lock
)
911 spin_unlock_bh(&nr_neigh_list_lock
);
914 static int nr_neigh_show(struct seq_file
*seq
, void *v
)
919 if (v
== SEQ_START_TOKEN
)
920 seq_puts(seq
, "addr callsign dev qual lock count failed digipeaters\n");
922 struct nr_neigh
*nr_neigh
;
924 nr_neigh
= hlist_entry(v
, struct nr_neigh
, neigh_node
);
925 seq_printf(seq
, "%05d %-9s %-4s %3d %d %3d %3d",
927 ax2asc(buf
, &nr_neigh
->callsign
),
928 nr_neigh
->dev
? nr_neigh
->dev
->name
: "???",
934 if (nr_neigh
->digipeat
!= NULL
) {
935 for (i
= 0; i
< nr_neigh
->digipeat
->ndigi
; i
++)
936 seq_printf(seq
, " %s",
937 ax2asc(buf
, &nr_neigh
->digipeat
->calls
[i
]));
945 const struct seq_operations nr_neigh_seqops
= {
946 .start
= nr_neigh_start
,
947 .next
= nr_neigh_next
,
948 .stop
= nr_neigh_stop
,
949 .show
= nr_neigh_show
,
954 * Free all memory associated with the nodes and routes lists.
956 void nr_rt_free(void)
958 struct nr_neigh
*s
= NULL
;
959 struct nr_node
*t
= NULL
;
960 struct hlist_node
*nodet
;
962 spin_lock_bh(&nr_neigh_list_lock
);
963 spin_lock_bh(&nr_node_list_lock
);
964 nr_node_for_each_safe(t
, nodet
, &nr_node_list
) {
966 nr_remove_node_locked(t
);
969 nr_neigh_for_each_safe(s
, nodet
, &nr_neigh_list
) {
974 nr_remove_neigh_locked(s
);
976 spin_unlock_bh(&nr_node_list_lock
);
977 spin_unlock_bh(&nr_neigh_list_lock
);