2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/socket.h>
15 #include <linux/kernel.h>
16 #include <linux/timer.h>
17 #include <linux/string.h>
18 #include <linux/sockios.h>
19 #include <linux/net.h>
20 #include <linux/slab.h>
22 #include <linux/inet.h>
23 #include <linux/netdevice.h>
25 #include <linux/if_arp.h>
26 #include <linux/skbuff.h>
28 #include <linux/uaccess.h>
29 #include <linux/fcntl.h>
30 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
32 #include <linux/interrupt.h>
33 #include <linux/notifier.h>
34 #include <linux/init.h>
35 #include <linux/spinlock.h>
36 #include <net/netrom.h>
37 #include <linux/seq_file.h>
38 #include <linux/export.h>
40 static unsigned int nr_neigh_no
= 1;
42 static HLIST_HEAD(nr_node_list
);
43 static DEFINE_SPINLOCK(nr_node_list_lock
);
44 static HLIST_HEAD(nr_neigh_list
);
45 static DEFINE_SPINLOCK(nr_neigh_list_lock
);
47 static struct nr_node
*nr_node_get(ax25_address
*callsign
)
49 struct nr_node
*found
= NULL
;
50 struct nr_node
*nr_node
;
52 spin_lock_bh(&nr_node_list_lock
);
53 nr_node_for_each(nr_node
, &nr_node_list
)
54 if (ax25cmp(callsign
, &nr_node
->callsign
) == 0) {
55 nr_node_hold(nr_node
);
59 spin_unlock_bh(&nr_node_list_lock
);
63 static struct nr_neigh
*nr_neigh_get_dev(ax25_address
*callsign
,
64 struct net_device
*dev
)
66 struct nr_neigh
*found
= NULL
;
67 struct nr_neigh
*nr_neigh
;
69 spin_lock_bh(&nr_neigh_list_lock
);
70 nr_neigh_for_each(nr_neigh
, &nr_neigh_list
)
71 if (ax25cmp(callsign
, &nr_neigh
->callsign
) == 0 &&
72 nr_neigh
->dev
== dev
) {
73 nr_neigh_hold(nr_neigh
);
77 spin_unlock_bh(&nr_neigh_list_lock
);
81 static void nr_remove_neigh(struct nr_neigh
*);
83 /* re-sort the routes in quality order. */
84 static void re_sort_routes(struct nr_node
*nr_node
, int x
, int y
)
86 if (nr_node
->routes
[y
].quality
> nr_node
->routes
[x
].quality
) {
87 if (nr_node
->which
== x
)
89 else if (nr_node
->which
== y
)
92 swap(nr_node
->routes
[x
], nr_node
->routes
[y
]);
97 * Add a new route to a node, and in the process add the node and the
98 * neighbour if it is new.
100 static int __must_check
nr_add_node(ax25_address
*nr
, const char *mnemonic
,
101 ax25_address
*ax25
, ax25_digi
*ax25_digi
, struct net_device
*dev
,
102 int quality
, int obs_count
)
104 struct nr_node
*nr_node
;
105 struct nr_neigh
*nr_neigh
;
107 struct net_device
*odev
;
109 if ((odev
=nr_dev_get(nr
)) != NULL
) { /* Can't add routes to ourself */
114 nr_node
= nr_node_get(nr
);
116 nr_neigh
= nr_neigh_get_dev(ax25
, dev
);
119 * The L2 link to a neighbour has failed in the past
120 * and now a frame comes from this neighbour. We assume
121 * it was a temporary trouble with the link and reset the
122 * routes now (and not wait for a node broadcast).
124 if (nr_neigh
!= NULL
&& nr_neigh
->failed
!= 0 && quality
== 0) {
125 struct nr_node
*nr_nodet
;
127 spin_lock_bh(&nr_node_list_lock
);
128 nr_node_for_each(nr_nodet
, &nr_node_list
) {
129 nr_node_lock(nr_nodet
);
130 for (i
= 0; i
< nr_nodet
->count
; i
++)
131 if (nr_nodet
->routes
[i
].neighbour
== nr_neigh
)
132 if (i
< nr_nodet
->which
)
134 nr_node_unlock(nr_nodet
);
136 spin_unlock_bh(&nr_node_list_lock
);
139 if (nr_neigh
!= NULL
)
140 nr_neigh
->failed
= 0;
142 if (quality
== 0 && nr_neigh
!= NULL
&& nr_node
!= NULL
) {
143 nr_neigh_put(nr_neigh
);
144 nr_node_put(nr_node
);
148 if (nr_neigh
== NULL
) {
149 if ((nr_neigh
= kmalloc(sizeof(*nr_neigh
), GFP_ATOMIC
)) == NULL
) {
151 nr_node_put(nr_node
);
155 nr_neigh
->callsign
= *ax25
;
156 nr_neigh
->digipeat
= NULL
;
157 nr_neigh
->ax25
= NULL
;
159 nr_neigh
->quality
= sysctl_netrom_default_path_quality
;
160 nr_neigh
->locked
= 0;
162 nr_neigh
->number
= nr_neigh_no
++;
163 nr_neigh
->failed
= 0;
164 refcount_set(&nr_neigh
->refcount
, 1);
166 if (ax25_digi
!= NULL
&& ax25_digi
->ndigi
> 0) {
167 nr_neigh
->digipeat
= kmemdup(ax25_digi
,
170 if (nr_neigh
->digipeat
== NULL
) {
173 nr_node_put(nr_node
);
178 spin_lock_bh(&nr_neigh_list_lock
);
179 hlist_add_head(&nr_neigh
->neigh_node
, &nr_neigh_list
);
180 nr_neigh_hold(nr_neigh
);
181 spin_unlock_bh(&nr_neigh_list_lock
);
184 if (quality
!= 0 && ax25cmp(nr
, ax25
) == 0 && !nr_neigh
->locked
)
185 nr_neigh
->quality
= quality
;
187 if (nr_node
== NULL
) {
188 if ((nr_node
= kmalloc(sizeof(*nr_node
), GFP_ATOMIC
)) == NULL
) {
190 nr_neigh_put(nr_neigh
);
194 nr_node
->callsign
= *nr
;
195 strcpy(nr_node
->mnemonic
, mnemonic
);
199 refcount_set(&nr_node
->refcount
, 1);
200 spin_lock_init(&nr_node
->node_lock
);
202 nr_node
->routes
[0].quality
= quality
;
203 nr_node
->routes
[0].obs_count
= obs_count
;
204 nr_node
->routes
[0].neighbour
= nr_neigh
;
206 nr_neigh_hold(nr_neigh
);
209 spin_lock_bh(&nr_node_list_lock
);
210 hlist_add_head(&nr_node
->node_node
, &nr_node_list
);
211 /* refcount initialized at 1 */
212 spin_unlock_bh(&nr_node_list_lock
);
216 nr_node_lock(nr_node
);
219 strcpy(nr_node
->mnemonic
, mnemonic
);
221 for (found
= 0, i
= 0; i
< nr_node
->count
; i
++) {
222 if (nr_node
->routes
[i
].neighbour
== nr_neigh
) {
223 nr_node
->routes
[i
].quality
= quality
;
224 nr_node
->routes
[i
].obs_count
= obs_count
;
231 /* We have space at the bottom, slot it in */
232 if (nr_node
->count
< 3) {
233 nr_node
->routes
[2] = nr_node
->routes
[1];
234 nr_node
->routes
[1] = nr_node
->routes
[0];
236 nr_node
->routes
[0].quality
= quality
;
237 nr_node
->routes
[0].obs_count
= obs_count
;
238 nr_node
->routes
[0].neighbour
= nr_neigh
;
242 nr_neigh_hold(nr_neigh
);
245 /* It must be better than the worst */
246 if (quality
> nr_node
->routes
[2].quality
) {
247 nr_node
->routes
[2].neighbour
->count
--;
248 nr_neigh_put(nr_node
->routes
[2].neighbour
);
250 if (nr_node
->routes
[2].neighbour
->count
== 0 && !nr_node
->routes
[2].neighbour
->locked
)
251 nr_remove_neigh(nr_node
->routes
[2].neighbour
);
253 nr_node
->routes
[2].quality
= quality
;
254 nr_node
->routes
[2].obs_count
= obs_count
;
255 nr_node
->routes
[2].neighbour
= nr_neigh
;
257 nr_neigh_hold(nr_neigh
);
263 /* Now re-sort the routes in quality order */
264 switch (nr_node
->count
) {
266 re_sort_routes(nr_node
, 0, 1);
267 re_sort_routes(nr_node
, 1, 2);
270 re_sort_routes(nr_node
, 0, 1);
275 for (i
= 0; i
< nr_node
->count
; i
++) {
276 if (nr_node
->routes
[i
].neighbour
== nr_neigh
) {
277 if (i
< nr_node
->which
)
283 nr_neigh_put(nr_neigh
);
284 nr_node_unlock(nr_node
);
285 nr_node_put(nr_node
);
289 static inline void __nr_remove_node(struct nr_node
*nr_node
)
291 hlist_del_init(&nr_node
->node_node
);
292 nr_node_put(nr_node
);
295 #define nr_remove_node_locked(__node) \
296 __nr_remove_node(__node)
298 static void nr_remove_node(struct nr_node
*nr_node
)
300 spin_lock_bh(&nr_node_list_lock
);
301 __nr_remove_node(nr_node
);
302 spin_unlock_bh(&nr_node_list_lock
);
305 static inline void __nr_remove_neigh(struct nr_neigh
*nr_neigh
)
307 hlist_del_init(&nr_neigh
->neigh_node
);
308 nr_neigh_put(nr_neigh
);
311 #define nr_remove_neigh_locked(__neigh) \
312 __nr_remove_neigh(__neigh)
314 static void nr_remove_neigh(struct nr_neigh
*nr_neigh
)
316 spin_lock_bh(&nr_neigh_list_lock
);
317 __nr_remove_neigh(nr_neigh
);
318 spin_unlock_bh(&nr_neigh_list_lock
);
322 * "Delete" a node. Strictly speaking remove a route to a node. The node
323 * is only deleted if no routes are left to it.
325 static int nr_del_node(ax25_address
*callsign
, ax25_address
*neighbour
, struct net_device
*dev
)
327 struct nr_node
*nr_node
;
328 struct nr_neigh
*nr_neigh
;
331 nr_node
= nr_node_get(callsign
);
336 nr_neigh
= nr_neigh_get_dev(neighbour
, dev
);
338 if (nr_neigh
== NULL
) {
339 nr_node_put(nr_node
);
343 nr_node_lock(nr_node
);
344 for (i
= 0; i
< nr_node
->count
; i
++) {
345 if (nr_node
->routes
[i
].neighbour
== nr_neigh
) {
347 nr_neigh_put(nr_neigh
);
349 if (nr_neigh
->count
== 0 && !nr_neigh
->locked
)
350 nr_remove_neigh(nr_neigh
);
351 nr_neigh_put(nr_neigh
);
355 if (nr_node
->count
== 0) {
356 nr_remove_node(nr_node
);
360 nr_node
->routes
[0] = nr_node
->routes
[1];
363 nr_node
->routes
[1] = nr_node
->routes
[2];
367 nr_node_put(nr_node
);
369 nr_node_unlock(nr_node
);
374 nr_neigh_put(nr_neigh
);
375 nr_node_unlock(nr_node
);
376 nr_node_put(nr_node
);
382 * Lock a neighbour with a quality.
384 static int __must_check
nr_add_neigh(ax25_address
*callsign
,
385 ax25_digi
*ax25_digi
, struct net_device
*dev
, unsigned int quality
)
387 struct nr_neigh
*nr_neigh
;
389 nr_neigh
= nr_neigh_get_dev(callsign
, dev
);
391 nr_neigh
->quality
= quality
;
392 nr_neigh
->locked
= 1;
393 nr_neigh_put(nr_neigh
);
397 if ((nr_neigh
= kmalloc(sizeof(*nr_neigh
), GFP_ATOMIC
)) == NULL
)
400 nr_neigh
->callsign
= *callsign
;
401 nr_neigh
->digipeat
= NULL
;
402 nr_neigh
->ax25
= NULL
;
404 nr_neigh
->quality
= quality
;
405 nr_neigh
->locked
= 1;
407 nr_neigh
->number
= nr_neigh_no
++;
408 nr_neigh
->failed
= 0;
409 refcount_set(&nr_neigh
->refcount
, 1);
411 if (ax25_digi
!= NULL
&& ax25_digi
->ndigi
> 0) {
412 nr_neigh
->digipeat
= kmemdup(ax25_digi
, sizeof(*ax25_digi
),
414 if (nr_neigh
->digipeat
== NULL
) {
420 spin_lock_bh(&nr_neigh_list_lock
);
421 hlist_add_head(&nr_neigh
->neigh_node
, &nr_neigh_list
);
422 /* refcount is initialized at 1 */
423 spin_unlock_bh(&nr_neigh_list_lock
);
429 * "Delete" a neighbour. The neighbour is only removed if the number
430 * of nodes that may use it is zero.
432 static int nr_del_neigh(ax25_address
*callsign
, struct net_device
*dev
, unsigned int quality
)
434 struct nr_neigh
*nr_neigh
;
436 nr_neigh
= nr_neigh_get_dev(callsign
, dev
);
438 if (nr_neigh
== NULL
) return -EINVAL
;
440 nr_neigh
->quality
= quality
;
441 nr_neigh
->locked
= 0;
443 if (nr_neigh
->count
== 0)
444 nr_remove_neigh(nr_neigh
);
445 nr_neigh_put(nr_neigh
);
451 * Decrement the obsolescence count by one. If a route is reduced to a
452 * count of zero, remove it. Also remove any unlocked neighbours with
453 * zero nodes routing via it.
455 static int nr_dec_obs(void)
457 struct nr_neigh
*nr_neigh
;
459 struct hlist_node
*nodet
;
462 spin_lock_bh(&nr_node_list_lock
);
463 nr_node_for_each_safe(s
, nodet
, &nr_node_list
) {
465 for (i
= 0; i
< s
->count
; i
++) {
466 switch (s
->routes
[i
].obs_count
) {
467 case 0: /* A locked entry */
470 case 1: /* From 1 -> 0 */
471 nr_neigh
= s
->routes
[i
].neighbour
;
474 nr_neigh_put(nr_neigh
);
476 if (nr_neigh
->count
== 0 && !nr_neigh
->locked
)
477 nr_remove_neigh(nr_neigh
);
483 s
->routes
[0] = s
->routes
[1];
486 s
->routes
[1] = s
->routes
[2];
493 s
->routes
[i
].obs_count
--;
500 nr_remove_node_locked(s
);
503 spin_unlock_bh(&nr_node_list_lock
);
509 * A device has been removed. Remove its routes and neighbours.
511 void nr_rt_device_down(struct net_device
*dev
)
514 struct hlist_node
*nodet
, *node2t
;
518 spin_lock_bh(&nr_neigh_list_lock
);
519 nr_neigh_for_each_safe(s
, nodet
, &nr_neigh_list
) {
521 spin_lock_bh(&nr_node_list_lock
);
522 nr_node_for_each_safe(t
, node2t
, &nr_node_list
) {
524 for (i
= 0; i
< t
->count
; i
++) {
525 if (t
->routes
[i
].neighbour
== s
) {
530 t
->routes
[0] = t
->routes
[1];
533 t
->routes
[1] = t
->routes
[2];
541 nr_remove_node_locked(t
);
544 spin_unlock_bh(&nr_node_list_lock
);
546 nr_remove_neigh_locked(s
);
549 spin_unlock_bh(&nr_neigh_list_lock
);
553 * Check that the device given is a valid AX.25 interface that is "up".
554 * Or a valid ethernet interface with an AX.25 callsign binding.
556 static struct net_device
*nr_ax25_dev_get(char *devname
)
558 struct net_device
*dev
;
560 if ((dev
= dev_get_by_name(&init_net
, devname
)) == NULL
)
563 if ((dev
->flags
& IFF_UP
) && dev
->type
== ARPHRD_AX25
)
571 * Find the first active NET/ROM device, usually "nr0".
573 struct net_device
*nr_dev_first(void)
575 struct net_device
*dev
, *first
= NULL
;
578 for_each_netdev_rcu(&init_net
, dev
) {
579 if ((dev
->flags
& IFF_UP
) && dev
->type
== ARPHRD_NETROM
)
580 if (first
== NULL
|| strncmp(dev
->name
, first
->name
, 3) < 0)
591 * Find the NET/ROM device for the given callsign.
593 struct net_device
*nr_dev_get(ax25_address
*addr
)
595 struct net_device
*dev
;
598 for_each_netdev_rcu(&init_net
, dev
) {
599 if ((dev
->flags
& IFF_UP
) && dev
->type
== ARPHRD_NETROM
&&
600 ax25cmp(addr
, (ax25_address
*)dev
->dev_addr
) == 0) {
611 static ax25_digi
*nr_call_to_digi(ax25_digi
*digi
, int ndigis
,
612 ax25_address
*digipeaters
)
619 for (i
= 0; i
< ndigis
; i
++) {
620 digi
->calls
[i
] = digipeaters
[i
];
621 digi
->repeated
[i
] = 0;
624 digi
->ndigi
= ndigis
;
625 digi
->lastrepeat
= -1;
631 * Handle the ioctls that control the routing functions.
633 int nr_rt_ioctl(unsigned int cmd
, void __user
*arg
)
635 struct nr_route_struct nr_route
;
636 struct net_device
*dev
;
642 if (copy_from_user(&nr_route
, arg
, sizeof(struct nr_route_struct
)))
644 if (nr_route
.ndigis
> AX25_MAX_DIGIS
)
646 if ((dev
= nr_ax25_dev_get(nr_route
.device
)) == NULL
)
648 switch (nr_route
.type
) {
650 if (strnlen(nr_route
.mnemonic
, 7) == 7) {
655 ret
= nr_add_node(&nr_route
.callsign
,
658 nr_call_to_digi(&digi
, nr_route
.ndigis
,
659 nr_route
.digipeaters
),
660 dev
, nr_route
.quality
,
664 ret
= nr_add_neigh(&nr_route
.callsign
,
665 nr_call_to_digi(&digi
, nr_route
.ndigis
,
666 nr_route
.digipeaters
),
667 dev
, nr_route
.quality
);
676 if (copy_from_user(&nr_route
, arg
, sizeof(struct nr_route_struct
)))
678 if ((dev
= nr_ax25_dev_get(nr_route
.device
)) == NULL
)
680 switch (nr_route
.type
) {
682 ret
= nr_del_node(&nr_route
.callsign
,
683 &nr_route
.neighbour
, dev
);
686 ret
= nr_del_neigh(&nr_route
.callsign
,
687 dev
, nr_route
.quality
);
706 * A level 2 link has timed out, therefore it appears to be a poor link,
707 * then don't use that neighbour until it is reset.
709 void nr_link_failed(ax25_cb
*ax25
, int reason
)
711 struct nr_neigh
*s
, *nr_neigh
= NULL
;
712 struct nr_node
*nr_node
= NULL
;
714 spin_lock_bh(&nr_neigh_list_lock
);
715 nr_neigh_for_each(s
, &nr_neigh_list
) {
716 if (s
->ax25
== ax25
) {
722 spin_unlock_bh(&nr_neigh_list_lock
);
724 if (nr_neigh
== NULL
)
727 nr_neigh
->ax25
= NULL
;
730 if (++nr_neigh
->failed
< sysctl_netrom_link_fails_count
) {
731 nr_neigh_put(nr_neigh
);
734 spin_lock_bh(&nr_node_list_lock
);
735 nr_node_for_each(nr_node
, &nr_node_list
) {
736 nr_node_lock(nr_node
);
737 if (nr_node
->which
< nr_node
->count
&&
738 nr_node
->routes
[nr_node
->which
].neighbour
== nr_neigh
)
740 nr_node_unlock(nr_node
);
742 spin_unlock_bh(&nr_node_list_lock
);
743 nr_neigh_put(nr_neigh
);
747 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
748 * indicates an internally generated frame.
750 int nr_route_frame(struct sk_buff
*skb
, ax25_cb
*ax25
)
752 ax25_address
*nr_src
, *nr_dest
;
753 struct nr_neigh
*nr_neigh
;
754 struct nr_node
*nr_node
;
755 struct net_device
*dev
;
759 struct sk_buff
*skbn
;
762 nr_src
= (ax25_address
*)(skb
->data
+ 0);
763 nr_dest
= (ax25_address
*)(skb
->data
+ 7);
766 ret
= nr_add_node(nr_src
, "", &ax25
->dest_addr
, ax25
->digipeat
,
767 ax25
->ax25_dev
->dev
, 0,
768 sysctl_netrom_obsolescence_count_initialiser
);
773 if ((dev
= nr_dev_get(nr_dest
)) != NULL
) { /* Its for me */
774 if (ax25
== NULL
) /* Its from me */
775 ret
= nr_loopback_queue(skb
);
777 ret
= nr_rx_frame(skb
, dev
);
782 if (!sysctl_netrom_routing_control
&& ax25
!= NULL
)
785 /* Its Time-To-Live has expired */
786 if (skb
->data
[14] == 1) {
790 nr_node
= nr_node_get(nr_dest
);
793 nr_node_lock(nr_node
);
795 if (nr_node
->which
>= nr_node
->count
) {
796 nr_node_unlock(nr_node
);
797 nr_node_put(nr_node
);
801 nr_neigh
= nr_node
->routes
[nr_node
->which
].neighbour
;
803 if ((dev
= nr_dev_first()) == NULL
) {
804 nr_node_unlock(nr_node
);
805 nr_node_put(nr_node
);
809 /* We are going to change the netrom headers so we should get our
810 own skb, we also did not know until now how much header space
811 we had to reserve... - RXQ */
812 if ((skbn
=skb_copy_expand(skb
, dev
->hard_header_len
, 0, GFP_ATOMIC
)) == NULL
) {
813 nr_node_unlock(nr_node
);
814 nr_node_put(nr_node
);
822 dptr
= skb_push(skb
, 1);
823 *dptr
= AX25_P_NETROM
;
825 ax25s
= nr_neigh
->ax25
;
826 nr_neigh
->ax25
= ax25_send_frame(skb
, 256,
827 (ax25_address
*)dev
->dev_addr
,
829 nr_neigh
->digipeat
, nr_neigh
->dev
);
834 ret
= (nr_neigh
->ax25
!= NULL
);
835 nr_node_unlock(nr_node
);
836 nr_node_put(nr_node
);
841 #ifdef CONFIG_PROC_FS
843 static void *nr_node_start(struct seq_file
*seq
, loff_t
*pos
)
845 spin_lock_bh(&nr_node_list_lock
);
846 return seq_hlist_start_head(&nr_node_list
, *pos
);
849 static void *nr_node_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
851 return seq_hlist_next(v
, &nr_node_list
, pos
);
854 static void nr_node_stop(struct seq_file
*seq
, void *v
)
856 spin_unlock_bh(&nr_node_list_lock
);
859 static int nr_node_show(struct seq_file
*seq
, void *v
)
864 if (v
== SEQ_START_TOKEN
)
866 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
868 struct nr_node
*nr_node
= hlist_entry(v
, struct nr_node
,
871 nr_node_lock(nr_node
);
872 seq_printf(seq
, "%-9s %-7s %d %d",
873 ax2asc(buf
, &nr_node
->callsign
),
874 (nr_node
->mnemonic
[0] == '\0') ? "*" : nr_node
->mnemonic
,
878 for (i
= 0; i
< nr_node
->count
; i
++) {
879 seq_printf(seq
, " %3d %d %05d",
880 nr_node
->routes
[i
].quality
,
881 nr_node
->routes
[i
].obs_count
,
882 nr_node
->routes
[i
].neighbour
->number
);
884 nr_node_unlock(nr_node
);
891 static const struct seq_operations nr_node_seqops
= {
892 .start
= nr_node_start
,
893 .next
= nr_node_next
,
894 .stop
= nr_node_stop
,
895 .show
= nr_node_show
,
898 static int nr_node_info_open(struct inode
*inode
, struct file
*file
)
900 return seq_open(file
, &nr_node_seqops
);
903 const struct file_operations nr_nodes_fops
= {
904 .open
= nr_node_info_open
,
907 .release
= seq_release
,
910 static void *nr_neigh_start(struct seq_file
*seq
, loff_t
*pos
)
912 spin_lock_bh(&nr_neigh_list_lock
);
913 return seq_hlist_start_head(&nr_neigh_list
, *pos
);
916 static void *nr_neigh_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
918 return seq_hlist_next(v
, &nr_neigh_list
, pos
);
921 static void nr_neigh_stop(struct seq_file
*seq
, void *v
)
923 spin_unlock_bh(&nr_neigh_list_lock
);
926 static int nr_neigh_show(struct seq_file
*seq
, void *v
)
931 if (v
== SEQ_START_TOKEN
)
932 seq_puts(seq
, "addr callsign dev qual lock count failed digipeaters\n");
934 struct nr_neigh
*nr_neigh
;
936 nr_neigh
= hlist_entry(v
, struct nr_neigh
, neigh_node
);
937 seq_printf(seq
, "%05d %-9s %-4s %3d %d %3d %3d",
939 ax2asc(buf
, &nr_neigh
->callsign
),
940 nr_neigh
->dev
? nr_neigh
->dev
->name
: "???",
946 if (nr_neigh
->digipeat
!= NULL
) {
947 for (i
= 0; i
< nr_neigh
->digipeat
->ndigi
; i
++)
948 seq_printf(seq
, " %s",
949 ax2asc(buf
, &nr_neigh
->digipeat
->calls
[i
]));
957 static const struct seq_operations nr_neigh_seqops
= {
958 .start
= nr_neigh_start
,
959 .next
= nr_neigh_next
,
960 .stop
= nr_neigh_stop
,
961 .show
= nr_neigh_show
,
964 static int nr_neigh_info_open(struct inode
*inode
, struct file
*file
)
966 return seq_open(file
, &nr_neigh_seqops
);
969 const struct file_operations nr_neigh_fops
= {
970 .open
= nr_neigh_info_open
,
973 .release
= seq_release
,
979 * Free all memory associated with the nodes and routes lists.
981 void __exit
nr_rt_free(void)
983 struct nr_neigh
*s
= NULL
;
984 struct nr_node
*t
= NULL
;
985 struct hlist_node
*nodet
;
987 spin_lock_bh(&nr_neigh_list_lock
);
988 spin_lock_bh(&nr_node_list_lock
);
989 nr_node_for_each_safe(t
, nodet
, &nr_node_list
) {
991 nr_remove_node_locked(t
);
994 nr_neigh_for_each_safe(s
, nodet
, &nr_neigh_list
) {
999 nr_remove_neigh_locked(s
);
1001 spin_unlock_bh(&nr_node_list_lock
);
1002 spin_unlock_bh(&nr_neigh_list_lock
);