2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/socket.h>
15 #include <linux/kernel.h>
16 #include <linux/timer.h>
17 #include <linux/string.h>
18 #include <linux/sockios.h>
19 #include <linux/net.h>
20 #include <linux/slab.h>
22 #include <linux/inet.h>
23 #include <linux/netdevice.h>
25 #include <linux/if_arp.h>
26 #include <linux/skbuff.h>
28 #include <linux/uaccess.h>
29 #include <linux/fcntl.h>
30 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
32 #include <linux/interrupt.h>
33 #include <linux/notifier.h>
34 #include <linux/netfilter.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <net/netrom.h>
38 #include <linux/seq_file.h>
39 #include <linux/export.h>
41 static unsigned int nr_neigh_no
= 1;
43 static HLIST_HEAD(nr_node_list
);
44 static DEFINE_SPINLOCK(nr_node_list_lock
);
45 static HLIST_HEAD(nr_neigh_list
);
46 static DEFINE_SPINLOCK(nr_neigh_list_lock
);
48 static struct nr_node
*nr_node_get(ax25_address
*callsign
)
50 struct nr_node
*found
= NULL
;
51 struct nr_node
*nr_node
;
53 spin_lock_bh(&nr_node_list_lock
);
54 nr_node_for_each(nr_node
, &nr_node_list
)
55 if (ax25cmp(callsign
, &nr_node
->callsign
) == 0) {
56 nr_node_hold(nr_node
);
60 spin_unlock_bh(&nr_node_list_lock
);
64 static struct nr_neigh
*nr_neigh_get_dev(ax25_address
*callsign
,
65 struct net_device
*dev
)
67 struct nr_neigh
*found
= NULL
;
68 struct nr_neigh
*nr_neigh
;
70 spin_lock_bh(&nr_neigh_list_lock
);
71 nr_neigh_for_each(nr_neigh
, &nr_neigh_list
)
72 if (ax25cmp(callsign
, &nr_neigh
->callsign
) == 0 &&
73 nr_neigh
->dev
== dev
) {
74 nr_neigh_hold(nr_neigh
);
78 spin_unlock_bh(&nr_neigh_list_lock
);
82 static void nr_remove_neigh(struct nr_neigh
*);
85 * Add a new route to a node, and in the process add the node and the
86 * neighbour if it is new.
88 static int __must_check
nr_add_node(ax25_address
*nr
, const char *mnemonic
,
89 ax25_address
*ax25
, ax25_digi
*ax25_digi
, struct net_device
*dev
,
90 int quality
, int obs_count
)
92 struct nr_node
*nr_node
;
93 struct nr_neigh
*nr_neigh
;
94 struct nr_route nr_route
;
96 struct net_device
*odev
;
98 if ((odev
=nr_dev_get(nr
)) != NULL
) { /* Can't add routes to ourself */
103 nr_node
= nr_node_get(nr
);
105 nr_neigh
= nr_neigh_get_dev(ax25
, dev
);
108 * The L2 link to a neighbour has failed in the past
109 * and now a frame comes from this neighbour. We assume
110 * it was a temporary trouble with the link and reset the
111 * routes now (and not wait for a node broadcast).
113 if (nr_neigh
!= NULL
&& nr_neigh
->failed
!= 0 && quality
== 0) {
114 struct nr_node
*nr_nodet
;
116 spin_lock_bh(&nr_node_list_lock
);
117 nr_node_for_each(nr_nodet
, &nr_node_list
) {
118 nr_node_lock(nr_nodet
);
119 for (i
= 0; i
< nr_nodet
->count
; i
++)
120 if (nr_nodet
->routes
[i
].neighbour
== nr_neigh
)
121 if (i
< nr_nodet
->which
)
123 nr_node_unlock(nr_nodet
);
125 spin_unlock_bh(&nr_node_list_lock
);
128 if (nr_neigh
!= NULL
)
129 nr_neigh
->failed
= 0;
131 if (quality
== 0 && nr_neigh
!= NULL
&& nr_node
!= NULL
) {
132 nr_neigh_put(nr_neigh
);
133 nr_node_put(nr_node
);
137 if (nr_neigh
== NULL
) {
138 if ((nr_neigh
= kmalloc(sizeof(*nr_neigh
), GFP_ATOMIC
)) == NULL
) {
140 nr_node_put(nr_node
);
144 nr_neigh
->callsign
= *ax25
;
145 nr_neigh
->digipeat
= NULL
;
146 nr_neigh
->ax25
= NULL
;
148 nr_neigh
->quality
= sysctl_netrom_default_path_quality
;
149 nr_neigh
->locked
= 0;
151 nr_neigh
->number
= nr_neigh_no
++;
152 nr_neigh
->failed
= 0;
153 atomic_set(&nr_neigh
->refcount
, 1);
155 if (ax25_digi
!= NULL
&& ax25_digi
->ndigi
> 0) {
156 nr_neigh
->digipeat
= kmemdup(ax25_digi
,
159 if (nr_neigh
->digipeat
== NULL
) {
162 nr_node_put(nr_node
);
167 spin_lock_bh(&nr_neigh_list_lock
);
168 hlist_add_head(&nr_neigh
->neigh_node
, &nr_neigh_list
);
169 nr_neigh_hold(nr_neigh
);
170 spin_unlock_bh(&nr_neigh_list_lock
);
173 if (quality
!= 0 && ax25cmp(nr
, ax25
) == 0 && !nr_neigh
->locked
)
174 nr_neigh
->quality
= quality
;
176 if (nr_node
== NULL
) {
177 if ((nr_node
= kmalloc(sizeof(*nr_node
), GFP_ATOMIC
)) == NULL
) {
179 nr_neigh_put(nr_neigh
);
183 nr_node
->callsign
= *nr
;
184 strcpy(nr_node
->mnemonic
, mnemonic
);
188 atomic_set(&nr_node
->refcount
, 1);
189 spin_lock_init(&nr_node
->node_lock
);
191 nr_node
->routes
[0].quality
= quality
;
192 nr_node
->routes
[0].obs_count
= obs_count
;
193 nr_node
->routes
[0].neighbour
= nr_neigh
;
195 nr_neigh_hold(nr_neigh
);
198 spin_lock_bh(&nr_node_list_lock
);
199 hlist_add_head(&nr_node
->node_node
, &nr_node_list
);
200 /* refcount initialized at 1 */
201 spin_unlock_bh(&nr_node_list_lock
);
205 nr_node_lock(nr_node
);
208 strcpy(nr_node
->mnemonic
, mnemonic
);
210 for (found
= 0, i
= 0; i
< nr_node
->count
; i
++) {
211 if (nr_node
->routes
[i
].neighbour
== nr_neigh
) {
212 nr_node
->routes
[i
].quality
= quality
;
213 nr_node
->routes
[i
].obs_count
= obs_count
;
220 /* We have space at the bottom, slot it in */
221 if (nr_node
->count
< 3) {
222 nr_node
->routes
[2] = nr_node
->routes
[1];
223 nr_node
->routes
[1] = nr_node
->routes
[0];
225 nr_node
->routes
[0].quality
= quality
;
226 nr_node
->routes
[0].obs_count
= obs_count
;
227 nr_node
->routes
[0].neighbour
= nr_neigh
;
231 nr_neigh_hold(nr_neigh
);
234 /* It must be better than the worst */
235 if (quality
> nr_node
->routes
[2].quality
) {
236 nr_node
->routes
[2].neighbour
->count
--;
237 nr_neigh_put(nr_node
->routes
[2].neighbour
);
239 if (nr_node
->routes
[2].neighbour
->count
== 0 && !nr_node
->routes
[2].neighbour
->locked
)
240 nr_remove_neigh(nr_node
->routes
[2].neighbour
);
242 nr_node
->routes
[2].quality
= quality
;
243 nr_node
->routes
[2].obs_count
= obs_count
;
244 nr_node
->routes
[2].neighbour
= nr_neigh
;
246 nr_neigh_hold(nr_neigh
);
252 /* Now re-sort the routes in quality order */
253 switch (nr_node
->count
) {
255 if (nr_node
->routes
[1].quality
> nr_node
->routes
[0].quality
) {
256 switch (nr_node
->which
) {
264 nr_route
= nr_node
->routes
[0];
265 nr_node
->routes
[0] = nr_node
->routes
[1];
266 nr_node
->routes
[1] = nr_route
;
268 if (nr_node
->routes
[2].quality
> nr_node
->routes
[1].quality
) {
269 switch (nr_node
->which
) {
270 case 1: nr_node
->which
= 2;
273 case 2: nr_node
->which
= 1;
279 nr_route
= nr_node
->routes
[1];
280 nr_node
->routes
[1] = nr_node
->routes
[2];
281 nr_node
->routes
[2] = nr_route
;
284 if (nr_node
->routes
[1].quality
> nr_node
->routes
[0].quality
) {
285 switch (nr_node
->which
) {
286 case 0: nr_node
->which
= 1;
289 case 1: nr_node
->which
= 0;
294 nr_route
= nr_node
->routes
[0];
295 nr_node
->routes
[0] = nr_node
->routes
[1];
296 nr_node
->routes
[1] = nr_route
;
302 for (i
= 0; i
< nr_node
->count
; i
++) {
303 if (nr_node
->routes
[i
].neighbour
== nr_neigh
) {
304 if (i
< nr_node
->which
)
310 nr_neigh_put(nr_neigh
);
311 nr_node_unlock(nr_node
);
312 nr_node_put(nr_node
);
316 static inline void __nr_remove_node(struct nr_node
*nr_node
)
318 hlist_del_init(&nr_node
->node_node
);
319 nr_node_put(nr_node
);
322 #define nr_remove_node_locked(__node) \
323 __nr_remove_node(__node)
325 static void nr_remove_node(struct nr_node
*nr_node
)
327 spin_lock_bh(&nr_node_list_lock
);
328 __nr_remove_node(nr_node
);
329 spin_unlock_bh(&nr_node_list_lock
);
332 static inline void __nr_remove_neigh(struct nr_neigh
*nr_neigh
)
334 hlist_del_init(&nr_neigh
->neigh_node
);
335 nr_neigh_put(nr_neigh
);
338 #define nr_remove_neigh_locked(__neigh) \
339 __nr_remove_neigh(__neigh)
341 static void nr_remove_neigh(struct nr_neigh
*nr_neigh
)
343 spin_lock_bh(&nr_neigh_list_lock
);
344 __nr_remove_neigh(nr_neigh
);
345 spin_unlock_bh(&nr_neigh_list_lock
);
349 * "Delete" a node. Strictly speaking remove a route to a node. The node
350 * is only deleted if no routes are left to it.
352 static int nr_del_node(ax25_address
*callsign
, ax25_address
*neighbour
, struct net_device
*dev
)
354 struct nr_node
*nr_node
;
355 struct nr_neigh
*nr_neigh
;
358 nr_node
= nr_node_get(callsign
);
363 nr_neigh
= nr_neigh_get_dev(neighbour
, dev
);
365 if (nr_neigh
== NULL
) {
366 nr_node_put(nr_node
);
370 nr_node_lock(nr_node
);
371 for (i
= 0; i
< nr_node
->count
; i
++) {
372 if (nr_node
->routes
[i
].neighbour
== nr_neigh
) {
374 nr_neigh_put(nr_neigh
);
376 if (nr_neigh
->count
== 0 && !nr_neigh
->locked
)
377 nr_remove_neigh(nr_neigh
);
378 nr_neigh_put(nr_neigh
);
382 if (nr_node
->count
== 0) {
383 nr_remove_node(nr_node
);
387 nr_node
->routes
[0] = nr_node
->routes
[1];
389 nr_node
->routes
[1] = nr_node
->routes
[2];
393 nr_node_put(nr_node
);
395 nr_node_unlock(nr_node
);
400 nr_neigh_put(nr_neigh
);
401 nr_node_unlock(nr_node
);
402 nr_node_put(nr_node
);
408 * Lock a neighbour with a quality.
410 static int __must_check
nr_add_neigh(ax25_address
*callsign
,
411 ax25_digi
*ax25_digi
, struct net_device
*dev
, unsigned int quality
)
413 struct nr_neigh
*nr_neigh
;
415 nr_neigh
= nr_neigh_get_dev(callsign
, dev
);
417 nr_neigh
->quality
= quality
;
418 nr_neigh
->locked
= 1;
419 nr_neigh_put(nr_neigh
);
423 if ((nr_neigh
= kmalloc(sizeof(*nr_neigh
), GFP_ATOMIC
)) == NULL
)
426 nr_neigh
->callsign
= *callsign
;
427 nr_neigh
->digipeat
= NULL
;
428 nr_neigh
->ax25
= NULL
;
430 nr_neigh
->quality
= quality
;
431 nr_neigh
->locked
= 1;
433 nr_neigh
->number
= nr_neigh_no
++;
434 nr_neigh
->failed
= 0;
435 atomic_set(&nr_neigh
->refcount
, 1);
437 if (ax25_digi
!= NULL
&& ax25_digi
->ndigi
> 0) {
438 nr_neigh
->digipeat
= kmemdup(ax25_digi
, sizeof(*ax25_digi
),
440 if (nr_neigh
->digipeat
== NULL
) {
446 spin_lock_bh(&nr_neigh_list_lock
);
447 hlist_add_head(&nr_neigh
->neigh_node
, &nr_neigh_list
);
448 /* refcount is initialized at 1 */
449 spin_unlock_bh(&nr_neigh_list_lock
);
455 * "Delete" a neighbour. The neighbour is only removed if the number
456 * of nodes that may use it is zero.
458 static int nr_del_neigh(ax25_address
*callsign
, struct net_device
*dev
, unsigned int quality
)
460 struct nr_neigh
*nr_neigh
;
462 nr_neigh
= nr_neigh_get_dev(callsign
, dev
);
464 if (nr_neigh
== NULL
) return -EINVAL
;
466 nr_neigh
->quality
= quality
;
467 nr_neigh
->locked
= 0;
469 if (nr_neigh
->count
== 0)
470 nr_remove_neigh(nr_neigh
);
471 nr_neigh_put(nr_neigh
);
477 * Decrement the obsolescence count by one. If a route is reduced to a
478 * count of zero, remove it. Also remove any unlocked neighbours with
479 * zero nodes routing via it.
481 static int nr_dec_obs(void)
483 struct nr_neigh
*nr_neigh
;
485 struct hlist_node
*nodet
;
488 spin_lock_bh(&nr_node_list_lock
);
489 nr_node_for_each_safe(s
, nodet
, &nr_node_list
) {
491 for (i
= 0; i
< s
->count
; i
++) {
492 switch (s
->routes
[i
].obs_count
) {
493 case 0: /* A locked entry */
496 case 1: /* From 1 -> 0 */
497 nr_neigh
= s
->routes
[i
].neighbour
;
500 nr_neigh_put(nr_neigh
);
502 if (nr_neigh
->count
== 0 && !nr_neigh
->locked
)
503 nr_remove_neigh(nr_neigh
);
509 s
->routes
[0] = s
->routes
[1];
512 s
->routes
[1] = s
->routes
[2];
519 s
->routes
[i
].obs_count
--;
526 nr_remove_node_locked(s
);
529 spin_unlock_bh(&nr_node_list_lock
);
535 * A device has been removed. Remove its routes and neighbours.
537 void nr_rt_device_down(struct net_device
*dev
)
540 struct hlist_node
*nodet
, *node2t
;
544 spin_lock_bh(&nr_neigh_list_lock
);
545 nr_neigh_for_each_safe(s
, nodet
, &nr_neigh_list
) {
547 spin_lock_bh(&nr_node_list_lock
);
548 nr_node_for_each_safe(t
, node2t
, &nr_node_list
) {
550 for (i
= 0; i
< t
->count
; i
++) {
551 if (t
->routes
[i
].neighbour
== s
) {
556 t
->routes
[0] = t
->routes
[1];
558 t
->routes
[1] = t
->routes
[2];
566 nr_remove_node_locked(t
);
569 spin_unlock_bh(&nr_node_list_lock
);
571 nr_remove_neigh_locked(s
);
574 spin_unlock_bh(&nr_neigh_list_lock
);
578 * Check that the device given is a valid AX.25 interface that is "up".
579 * Or a valid ethernet interface with an AX.25 callsign binding.
581 static struct net_device
*nr_ax25_dev_get(char *devname
)
583 struct net_device
*dev
;
585 if ((dev
= dev_get_by_name(&init_net
, devname
)) == NULL
)
588 if ((dev
->flags
& IFF_UP
) && dev
->type
== ARPHRD_AX25
)
596 * Find the first active NET/ROM device, usually "nr0".
598 struct net_device
*nr_dev_first(void)
600 struct net_device
*dev
, *first
= NULL
;
603 for_each_netdev_rcu(&init_net
, dev
) {
604 if ((dev
->flags
& IFF_UP
) && dev
->type
== ARPHRD_NETROM
)
605 if (first
== NULL
|| strncmp(dev
->name
, first
->name
, 3) < 0)
616 * Find the NET/ROM device for the given callsign.
618 struct net_device
*nr_dev_get(ax25_address
*addr
)
620 struct net_device
*dev
;
623 for_each_netdev_rcu(&init_net
, dev
) {
624 if ((dev
->flags
& IFF_UP
) && dev
->type
== ARPHRD_NETROM
&&
625 ax25cmp(addr
, (ax25_address
*)dev
->dev_addr
) == 0) {
636 static ax25_digi
*nr_call_to_digi(ax25_digi
*digi
, int ndigis
,
637 ax25_address
*digipeaters
)
644 for (i
= 0; i
< ndigis
; i
++) {
645 digi
->calls
[i
] = digipeaters
[i
];
646 digi
->repeated
[i
] = 0;
649 digi
->ndigi
= ndigis
;
650 digi
->lastrepeat
= -1;
656 * Handle the ioctls that control the routing functions.
658 int nr_rt_ioctl(unsigned int cmd
, void __user
*arg
)
660 struct nr_route_struct nr_route
;
661 struct net_device
*dev
;
667 if (copy_from_user(&nr_route
, arg
, sizeof(struct nr_route_struct
)))
669 if (nr_route
.ndigis
> AX25_MAX_DIGIS
)
671 if ((dev
= nr_ax25_dev_get(nr_route
.device
)) == NULL
)
673 switch (nr_route
.type
) {
675 if (strnlen(nr_route
.mnemonic
, 7) == 7) {
680 ret
= nr_add_node(&nr_route
.callsign
,
683 nr_call_to_digi(&digi
, nr_route
.ndigis
,
684 nr_route
.digipeaters
),
685 dev
, nr_route
.quality
,
689 ret
= nr_add_neigh(&nr_route
.callsign
,
690 nr_call_to_digi(&digi
, nr_route
.ndigis
,
691 nr_route
.digipeaters
),
692 dev
, nr_route
.quality
);
701 if (copy_from_user(&nr_route
, arg
, sizeof(struct nr_route_struct
)))
703 if ((dev
= nr_ax25_dev_get(nr_route
.device
)) == NULL
)
705 switch (nr_route
.type
) {
707 ret
= nr_del_node(&nr_route
.callsign
,
708 &nr_route
.neighbour
, dev
);
711 ret
= nr_del_neigh(&nr_route
.callsign
,
712 dev
, nr_route
.quality
);
731 * A level 2 link has timed out, therefore it appears to be a poor link,
732 * then don't use that neighbour until it is reset.
734 void nr_link_failed(ax25_cb
*ax25
, int reason
)
736 struct nr_neigh
*s
, *nr_neigh
= NULL
;
737 struct nr_node
*nr_node
= NULL
;
739 spin_lock_bh(&nr_neigh_list_lock
);
740 nr_neigh_for_each(s
, &nr_neigh_list
) {
741 if (s
->ax25
== ax25
) {
747 spin_unlock_bh(&nr_neigh_list_lock
);
749 if (nr_neigh
== NULL
)
752 nr_neigh
->ax25
= NULL
;
755 if (++nr_neigh
->failed
< sysctl_netrom_link_fails_count
) {
756 nr_neigh_put(nr_neigh
);
759 spin_lock_bh(&nr_node_list_lock
);
760 nr_node_for_each(nr_node
, &nr_node_list
) {
761 nr_node_lock(nr_node
);
762 if (nr_node
->which
< nr_node
->count
&&
763 nr_node
->routes
[nr_node
->which
].neighbour
== nr_neigh
)
765 nr_node_unlock(nr_node
);
767 spin_unlock_bh(&nr_node_list_lock
);
768 nr_neigh_put(nr_neigh
);
772 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
773 * indicates an internally generated frame.
775 int nr_route_frame(struct sk_buff
*skb
, ax25_cb
*ax25
)
777 ax25_address
*nr_src
, *nr_dest
;
778 struct nr_neigh
*nr_neigh
;
779 struct nr_node
*nr_node
;
780 struct net_device
*dev
;
784 struct sk_buff
*skbn
;
787 nr_src
= (ax25_address
*)(skb
->data
+ 0);
788 nr_dest
= (ax25_address
*)(skb
->data
+ 7);
791 ret
= nr_add_node(nr_src
, "", &ax25
->dest_addr
, ax25
->digipeat
,
792 ax25
->ax25_dev
->dev
, 0,
793 sysctl_netrom_obsolescence_count_initialiser
);
798 if ((dev
= nr_dev_get(nr_dest
)) != NULL
) { /* Its for me */
799 if (ax25
== NULL
) /* Its from me */
800 ret
= nr_loopback_queue(skb
);
802 ret
= nr_rx_frame(skb
, dev
);
807 if (!sysctl_netrom_routing_control
&& ax25
!= NULL
)
810 /* Its Time-To-Live has expired */
811 if (skb
->data
[14] == 1) {
815 nr_node
= nr_node_get(nr_dest
);
818 nr_node_lock(nr_node
);
820 if (nr_node
->which
>= nr_node
->count
) {
821 nr_node_unlock(nr_node
);
822 nr_node_put(nr_node
);
826 nr_neigh
= nr_node
->routes
[nr_node
->which
].neighbour
;
828 if ((dev
= nr_dev_first()) == NULL
) {
829 nr_node_unlock(nr_node
);
830 nr_node_put(nr_node
);
834 /* We are going to change the netrom headers so we should get our
835 own skb, we also did not know until now how much header space
836 we had to reserve... - RXQ */
837 if ((skbn
=skb_copy_expand(skb
, dev
->hard_header_len
, 0, GFP_ATOMIC
)) == NULL
) {
838 nr_node_unlock(nr_node
);
839 nr_node_put(nr_node
);
847 dptr
= skb_push(skb
, 1);
848 *dptr
= AX25_P_NETROM
;
850 ax25s
= nr_neigh
->ax25
;
851 nr_neigh
->ax25
= ax25_send_frame(skb
, 256,
852 (ax25_address
*)dev
->dev_addr
,
854 nr_neigh
->digipeat
, nr_neigh
->dev
);
859 ret
= (nr_neigh
->ax25
!= NULL
);
860 nr_node_unlock(nr_node
);
861 nr_node_put(nr_node
);
866 #ifdef CONFIG_PROC_FS
868 static void *nr_node_start(struct seq_file
*seq
, loff_t
*pos
)
870 spin_lock_bh(&nr_node_list_lock
);
871 return seq_hlist_start_head(&nr_node_list
, *pos
);
874 static void *nr_node_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
876 return seq_hlist_next(v
, &nr_node_list
, pos
);
879 static void nr_node_stop(struct seq_file
*seq
, void *v
)
881 spin_unlock_bh(&nr_node_list_lock
);
884 static int nr_node_show(struct seq_file
*seq
, void *v
)
889 if (v
== SEQ_START_TOKEN
)
891 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
893 struct nr_node
*nr_node
= hlist_entry(v
, struct nr_node
,
896 nr_node_lock(nr_node
);
897 seq_printf(seq
, "%-9s %-7s %d %d",
898 ax2asc(buf
, &nr_node
->callsign
),
899 (nr_node
->mnemonic
[0] == '\0') ? "*" : nr_node
->mnemonic
,
903 for (i
= 0; i
< nr_node
->count
; i
++) {
904 seq_printf(seq
, " %3d %d %05d",
905 nr_node
->routes
[i
].quality
,
906 nr_node
->routes
[i
].obs_count
,
907 nr_node
->routes
[i
].neighbour
->number
);
909 nr_node_unlock(nr_node
);
916 static const struct seq_operations nr_node_seqops
= {
917 .start
= nr_node_start
,
918 .next
= nr_node_next
,
919 .stop
= nr_node_stop
,
920 .show
= nr_node_show
,
923 static int nr_node_info_open(struct inode
*inode
, struct file
*file
)
925 return seq_open(file
, &nr_node_seqops
);
928 const struct file_operations nr_nodes_fops
= {
929 .owner
= THIS_MODULE
,
930 .open
= nr_node_info_open
,
933 .release
= seq_release
,
936 static void *nr_neigh_start(struct seq_file
*seq
, loff_t
*pos
)
938 spin_lock_bh(&nr_neigh_list_lock
);
939 return seq_hlist_start_head(&nr_neigh_list
, *pos
);
942 static void *nr_neigh_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
944 return seq_hlist_next(v
, &nr_neigh_list
, pos
);
947 static void nr_neigh_stop(struct seq_file
*seq
, void *v
)
949 spin_unlock_bh(&nr_neigh_list_lock
);
952 static int nr_neigh_show(struct seq_file
*seq
, void *v
)
957 if (v
== SEQ_START_TOKEN
)
958 seq_puts(seq
, "addr callsign dev qual lock count failed digipeaters\n");
960 struct nr_neigh
*nr_neigh
;
962 nr_neigh
= hlist_entry(v
, struct nr_neigh
, neigh_node
);
963 seq_printf(seq
, "%05d %-9s %-4s %3d %d %3d %3d",
965 ax2asc(buf
, &nr_neigh
->callsign
),
966 nr_neigh
->dev
? nr_neigh
->dev
->name
: "???",
972 if (nr_neigh
->digipeat
!= NULL
) {
973 for (i
= 0; i
< nr_neigh
->digipeat
->ndigi
; i
++)
974 seq_printf(seq
, " %s",
975 ax2asc(buf
, &nr_neigh
->digipeat
->calls
[i
]));
983 static const struct seq_operations nr_neigh_seqops
= {
984 .start
= nr_neigh_start
,
985 .next
= nr_neigh_next
,
986 .stop
= nr_neigh_stop
,
987 .show
= nr_neigh_show
,
990 static int nr_neigh_info_open(struct inode
*inode
, struct file
*file
)
992 return seq_open(file
, &nr_neigh_seqops
);
995 const struct file_operations nr_neigh_fops
= {
996 .owner
= THIS_MODULE
,
997 .open
= nr_neigh_info_open
,
1000 .release
= seq_release
,
1006 * Free all memory associated with the nodes and routes lists.
1008 void __exit
nr_rt_free(void)
1010 struct nr_neigh
*s
= NULL
;
1011 struct nr_node
*t
= NULL
;
1012 struct hlist_node
*nodet
;
1014 spin_lock_bh(&nr_neigh_list_lock
);
1015 spin_lock_bh(&nr_node_list_lock
);
1016 nr_node_for_each_safe(t
, nodet
, &nr_node_list
) {
1018 nr_remove_node_locked(t
);
1021 nr_neigh_for_each_safe(s
, nodet
, &nr_neigh_list
) {
1026 nr_remove_neigh_locked(s
);
1028 spin_unlock_bh(&nr_node_list_lock
);
1029 spin_unlock_bh(&nr_neigh_list_lock
);