2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * DECnet Routing Functions (Endnode and Router)
8 * Authors: Steve Whitehouse <SteveW@ACM.org>
9 * Eduardo Marcelo Serrat <emserrat@geocities.com>
12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and
13 * "return-to-sender" bits on outgoing
15 * Steve Whitehouse : Timeouts for cached routes.
16 * Steve Whitehouse : Use dst cache for input routes too.
17 * Steve Whitehouse : Fixed error values in dn_send_skb.
18 * Steve Whitehouse : Rework routing functions to better fit
19 * DECnet routing design
20 * Alexey Kuznetsov : New SMP locking
21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump()
22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting.
23 * Fixed possible skb leak in rtnetlink funcs.
24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
25 * Alexey Kuznetsov's finer grained locking
27 * Steve Whitehouse : Routing is now starting to look like a
28 * sensible set of code now, mainly due to
29 * my copying the IPv4 routing code. The
30 * hooks here are modified and will continue
31 * to evolve for a while.
32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter
33 * stuff. Look out raw sockets your days
35 * Steve Whitehouse : Added return-to-sender functions. Added
36 * backlog congestion level return codes.
37 * Steve Whitehouse : Fixed bug where routes were set up with
38 * no ref count on net devices.
39 * Steve Whitehouse : RCU for the route cache
40 * Steve Whitehouse : Preparations for the flow cache
41 * Steve Whitehouse : Prepare for nonlinear skbs
44 /******************************************************************************
45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
47 This program is free software; you can redistribute it and/or modify
48 it under the terms of the GNU General Public License as published by
49 the Free Software Foundation; either version 2 of the License, or
52 This program is distributed in the hope that it will be useful,
53 but WITHOUT ANY WARRANTY; without even the implied warranty of
54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 GNU General Public License for more details.
56 *******************************************************************************/
58 #include <linux/errno.h>
59 #include <linux/types.h>
60 #include <linux/socket.h>
62 #include <linux/kernel.h>
63 #include <linux/sockios.h>
64 #include <linux/net.h>
65 #include <linux/netdevice.h>
66 #include <linux/inet.h>
67 #include <linux/route.h>
68 #include <linux/in_route.h>
69 #include <linux/slab.h>
72 #include <linux/proc_fs.h>
73 #include <linux/seq_file.h>
74 #include <linux/init.h>
75 #include <linux/rtnetlink.h>
76 #include <linux/string.h>
77 #include <linux/netfilter_decnet.h>
78 #include <linux/rcupdate.h>
79 #include <linux/times.h>
80 #include <linux/export.h>
81 #include <asm/errno.h>
82 #include <net/net_namespace.h>
83 #include <net/netlink.h>
84 #include <net/neighbour.h>
87 #include <net/fib_rules.h>
89 #include <net/dn_dev.h>
90 #include <net/dn_nsp.h>
91 #include <net/dn_route.h>
92 #include <net/dn_neigh.h>
93 #include <net/dn_fib.h>
95 struct dn_rt_hash_bucket
97 struct dn_route __rcu
*chain
;
101 extern struct neigh_table dn_neigh_table
;
104 static unsigned char dn_hiord_addr
[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
106 static const int dn_rt_min_delay
= 2 * HZ
;
107 static const int dn_rt_max_delay
= 10 * HZ
;
108 static const int dn_rt_mtu_expires
= 10 * 60 * HZ
;
110 static unsigned long dn_rt_deadline
;
112 static int dn_dst_gc(struct dst_ops
*ops
);
113 static struct dst_entry
*dn_dst_check(struct dst_entry
*, __u32
);
114 static unsigned int dn_dst_default_advmss(const struct dst_entry
*dst
);
115 static unsigned int dn_dst_mtu(const struct dst_entry
*dst
);
116 static void dn_dst_destroy(struct dst_entry
*);
117 static void dn_dst_ifdown(struct dst_entry
*, struct net_device
*dev
, int how
);
118 static struct dst_entry
*dn_dst_negative_advice(struct dst_entry
*);
119 static void dn_dst_link_failure(struct sk_buff
*);
120 static void dn_dst_update_pmtu(struct dst_entry
*dst
, struct sock
*sk
,
121 struct sk_buff
*skb
, u32 mtu
,
123 static void dn_dst_redirect(struct dst_entry
*dst
, struct sock
*sk
,
124 struct sk_buff
*skb
);
125 static struct neighbour
*dn_dst_neigh_lookup(const struct dst_entry
*dst
,
128 static int dn_route_input(struct sk_buff
*);
129 static void dn_run_flush(struct timer_list
*unused
);
131 static struct dn_rt_hash_bucket
*dn_rt_hash_table
;
132 static unsigned int dn_rt_hash_mask
;
134 static struct timer_list dn_route_timer
;
135 static DEFINE_TIMER(dn_rt_flush_timer
, dn_run_flush
);
136 int decnet_dst_gc_interval
= 2;
138 static struct dst_ops dn_dst_ops
= {
142 .check
= dn_dst_check
,
143 .default_advmss
= dn_dst_default_advmss
,
145 .cow_metrics
= dst_cow_metrics_generic
,
146 .destroy
= dn_dst_destroy
,
147 .ifdown
= dn_dst_ifdown
,
148 .negative_advice
= dn_dst_negative_advice
,
149 .link_failure
= dn_dst_link_failure
,
150 .update_pmtu
= dn_dst_update_pmtu
,
151 .redirect
= dn_dst_redirect
,
152 .neigh_lookup
= dn_dst_neigh_lookup
,
155 static void dn_dst_destroy(struct dst_entry
*dst
)
157 struct dn_route
*rt
= (struct dn_route
*) dst
;
160 neigh_release(rt
->n
);
161 dst_destroy_metrics_generic(dst
);
164 static void dn_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
, int how
)
167 struct dn_route
*rt
= (struct dn_route
*) dst
;
168 struct neighbour
*n
= rt
->n
;
170 if (n
&& n
->dev
== dev
) {
171 n
->dev
= dev_net(dev
)->loopback_dev
;
178 static __inline__
unsigned int dn_hash(__le16 src
, __le16 dst
)
180 __u16 tmp
= (__u16 __force
)(src
^ dst
);
184 return dn_rt_hash_mask
& (unsigned int)tmp
;
187 static void dn_dst_check_expire(struct timer_list
*unused
)
191 struct dn_route __rcu
**rtp
;
192 unsigned long now
= jiffies
;
193 unsigned long expire
= 120 * HZ
;
195 for (i
= 0; i
<= dn_rt_hash_mask
; i
++) {
196 rtp
= &dn_rt_hash_table
[i
].chain
;
198 spin_lock(&dn_rt_hash_table
[i
].lock
);
199 while ((rt
= rcu_dereference_protected(*rtp
,
200 lockdep_is_held(&dn_rt_hash_table
[i
].lock
))) != NULL
) {
201 if (atomic_read(&rt
->dst
.__refcnt
) > 1 ||
202 (now
- rt
->dst
.lastuse
) < expire
) {
208 dst_dev_put(&rt
->dst
);
209 dst_release(&rt
->dst
);
211 spin_unlock(&dn_rt_hash_table
[i
].lock
);
213 if ((jiffies
- now
) > 0)
217 mod_timer(&dn_route_timer
, now
+ decnet_dst_gc_interval
* HZ
);
220 static int dn_dst_gc(struct dst_ops
*ops
)
223 struct dn_route __rcu
**rtp
;
225 unsigned long now
= jiffies
;
226 unsigned long expire
= 10 * HZ
;
228 for (i
= 0; i
<= dn_rt_hash_mask
; i
++) {
230 spin_lock_bh(&dn_rt_hash_table
[i
].lock
);
231 rtp
= &dn_rt_hash_table
[i
].chain
;
233 while ((rt
= rcu_dereference_protected(*rtp
,
234 lockdep_is_held(&dn_rt_hash_table
[i
].lock
))) != NULL
) {
235 if (atomic_read(&rt
->dst
.__refcnt
) > 1 ||
236 (now
- rt
->dst
.lastuse
) < expire
) {
242 dst_dev_put(&rt
->dst
);
243 dst_release(&rt
->dst
);
246 spin_unlock_bh(&dn_rt_hash_table
[i
].lock
);
253 * The decnet standards don't impose a particular minimum mtu, what they
254 * do insist on is that the routing layer accepts a datagram of at least
255 * 230 bytes long. Here we have to subtract the routing header length from
256 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we
257 * assume the worst and use a long header size.
259 * We update both the mtu and the advertised mss (i.e. the segment size we
260 * advertise to the other end).
262 static void dn_dst_update_pmtu(struct dst_entry
*dst
, struct sock
*sk
,
263 struct sk_buff
*skb
, u32 mtu
,
266 struct dn_route
*rt
= (struct dn_route
*) dst
;
267 struct neighbour
*n
= rt
->n
;
271 dn
= n
? rcu_dereference_raw(n
->dev
->dn_ptr
) : NULL
;
273 if (dn
&& dn
->use_long
== 0)
278 if (dst_metric(dst
, RTAX_MTU
) > mtu
&& mtu
>= min_mtu
) {
279 if (!(dst_metric_locked(dst
, RTAX_MTU
))) {
280 dst_metric_set(dst
, RTAX_MTU
, mtu
);
281 dst_set_expires(dst
, dn_rt_mtu_expires
);
283 if (!(dst_metric_locked(dst
, RTAX_ADVMSS
))) {
284 u32 mss
= mtu
- DN_MAX_NSP_DATA_HEADER
;
285 u32 existing_mss
= dst_metric_raw(dst
, RTAX_ADVMSS
);
286 if (!existing_mss
|| existing_mss
> mss
)
287 dst_metric_set(dst
, RTAX_ADVMSS
, mss
);
292 static void dn_dst_redirect(struct dst_entry
*dst
, struct sock
*sk
,
298 * When a route has been marked obsolete. (e.g. routing cache flush)
300 static struct dst_entry
*dn_dst_check(struct dst_entry
*dst
, __u32 cookie
)
305 static struct dst_entry
*dn_dst_negative_advice(struct dst_entry
*dst
)
311 static void dn_dst_link_failure(struct sk_buff
*skb
)
315 static inline int compare_keys(struct flowidn
*fl1
, struct flowidn
*fl2
)
317 return ((fl1
->daddr
^ fl2
->daddr
) |
318 (fl1
->saddr
^ fl2
->saddr
) |
319 (fl1
->flowidn_mark
^ fl2
->flowidn_mark
) |
320 (fl1
->flowidn_scope
^ fl2
->flowidn_scope
) |
321 (fl1
->flowidn_oif
^ fl2
->flowidn_oif
) |
322 (fl1
->flowidn_iif
^ fl2
->flowidn_iif
)) == 0;
325 static int dn_insert_route(struct dn_route
*rt
, unsigned int hash
, struct dn_route
**rp
)
327 struct dn_route
*rth
;
328 struct dn_route __rcu
**rthp
;
329 unsigned long now
= jiffies
;
331 rthp
= &dn_rt_hash_table
[hash
].chain
;
333 spin_lock_bh(&dn_rt_hash_table
[hash
].lock
);
334 while ((rth
= rcu_dereference_protected(*rthp
,
335 lockdep_is_held(&dn_rt_hash_table
[hash
].lock
))) != NULL
) {
336 if (compare_keys(&rth
->fld
, &rt
->fld
)) {
338 *rthp
= rth
->dn_next
;
339 rcu_assign_pointer(rth
->dn_next
,
340 dn_rt_hash_table
[hash
].chain
);
341 rcu_assign_pointer(dn_rt_hash_table
[hash
].chain
, rth
);
343 dst_hold_and_use(&rth
->dst
, now
);
344 spin_unlock_bh(&dn_rt_hash_table
[hash
].lock
);
346 dst_release_immediate(&rt
->dst
);
350 rthp
= &rth
->dn_next
;
353 rcu_assign_pointer(rt
->dn_next
, dn_rt_hash_table
[hash
].chain
);
354 rcu_assign_pointer(dn_rt_hash_table
[hash
].chain
, rt
);
356 dst_hold_and_use(&rt
->dst
, now
);
357 spin_unlock_bh(&dn_rt_hash_table
[hash
].lock
);
362 static void dn_run_flush(struct timer_list
*unused
)
365 struct dn_route
*rt
, *next
;
367 for (i
= 0; i
< dn_rt_hash_mask
; i
++) {
368 spin_lock_bh(&dn_rt_hash_table
[i
].lock
);
370 if ((rt
= xchg((struct dn_route
**)&dn_rt_hash_table
[i
].chain
, NULL
)) == NULL
)
371 goto nothing_to_declare
;
373 for(; rt
; rt
= next
) {
374 next
= rcu_dereference_raw(rt
->dn_next
);
375 RCU_INIT_POINTER(rt
->dn_next
, NULL
);
376 dst_dev_put(&rt
->dst
);
377 dst_release(&rt
->dst
);
381 spin_unlock_bh(&dn_rt_hash_table
[i
].lock
);
385 static DEFINE_SPINLOCK(dn_rt_flush_lock
);
387 void dn_rt_cache_flush(int delay
)
389 unsigned long now
= jiffies
;
390 int user_mode
= !in_interrupt();
393 delay
= dn_rt_min_delay
;
395 spin_lock_bh(&dn_rt_flush_lock
);
397 if (del_timer(&dn_rt_flush_timer
) && delay
> 0 && dn_rt_deadline
) {
398 long tmo
= (long)(dn_rt_deadline
- now
);
400 if (user_mode
&& tmo
< dn_rt_max_delay
- dn_rt_min_delay
)
408 spin_unlock_bh(&dn_rt_flush_lock
);
413 if (dn_rt_deadline
== 0)
414 dn_rt_deadline
= now
+ dn_rt_max_delay
;
416 dn_rt_flush_timer
.expires
= now
+ delay
;
417 add_timer(&dn_rt_flush_timer
);
418 spin_unlock_bh(&dn_rt_flush_lock
);
422 * dn_return_short - Return a short packet to its sender
423 * @skb: The packet to return
426 static int dn_return_short(struct sk_buff
*skb
)
428 struct dn_skb_cb
*cb
;
433 /* Add back headers */
434 skb_push(skb
, skb
->data
- skb_network_header(skb
));
436 if ((skb
= skb_unshare(skb
, GFP_ATOMIC
)) == NULL
)
440 /* Skip packet length and point to flags */
442 *ptr
++ = (cb
->rt_flags
& ~DN_RT_F_RQR
) | DN_RT_F_RTS
;
448 *ptr
= 0; /* Zero hop count */
452 skb
->pkt_type
= PACKET_OUTGOING
;
453 dn_rt_finish_output(skb
, NULL
, NULL
);
454 return NET_RX_SUCCESS
;
458 * dn_return_long - Return a long packet to its sender
459 * @skb: The long format packet to return
462 static int dn_return_long(struct sk_buff
*skb
)
464 struct dn_skb_cb
*cb
;
466 unsigned char *src_addr
, *dst_addr
;
467 unsigned char tmp
[ETH_ALEN
];
469 /* Add back all headers */
470 skb_push(skb
, skb
->data
- skb_network_header(skb
));
472 if ((skb
= skb_unshare(skb
, GFP_ATOMIC
)) == NULL
)
476 /* Ignore packet length and point to flags */
480 if (*ptr
& DN_RT_F_PF
) {
481 char padlen
= (*ptr
& ~DN_RT_F_PF
);
485 *ptr
++ = (cb
->rt_flags
& ~DN_RT_F_RQR
) | DN_RT_F_RTS
;
491 *ptr
= 0; /* Zero hop count */
493 /* Swap source and destination */
494 memcpy(tmp
, src_addr
, ETH_ALEN
);
495 memcpy(src_addr
, dst_addr
, ETH_ALEN
);
496 memcpy(dst_addr
, tmp
, ETH_ALEN
);
498 skb
->pkt_type
= PACKET_OUTGOING
;
499 dn_rt_finish_output(skb
, dst_addr
, src_addr
);
500 return NET_RX_SUCCESS
;
504 * dn_route_rx_packet - Try and find a route for an incoming packet
505 * @skb: The packet to find a route for
507 * Returns: result of input function if route is found, error code otherwise
509 static int dn_route_rx_packet(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
511 struct dn_skb_cb
*cb
;
514 if ((err
= dn_route_input(skb
)) == 0)
515 return dst_input(skb
);
518 if (decnet_debug_level
& 4) {
519 char *devname
= skb
->dev
? skb
->dev
->name
: "???";
522 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
523 (int)cb
->rt_flags
, devname
, skb
->len
,
524 le16_to_cpu(cb
->src
), le16_to_cpu(cb
->dst
),
528 if ((skb
->pkt_type
== PACKET_HOST
) && (cb
->rt_flags
& DN_RT_F_RQR
)) {
529 switch (cb
->rt_flags
& DN_RT_PKT_MSK
) {
530 case DN_RT_PKT_SHORT
:
531 return dn_return_short(skb
);
533 return dn_return_long(skb
);
541 static int dn_route_rx_long(struct sk_buff
*skb
)
543 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
544 unsigned char *ptr
= skb
->data
;
546 if (!pskb_may_pull(skb
, 21)) /* 20 for long header, 1 for shortest nsp */
550 skb_reset_transport_header(skb
);
552 /* Destination info */
554 cb
->dst
= dn_eth2dn(ptr
);
555 if (memcmp(ptr
, dn_hiord_addr
, 4) != 0)
562 cb
->src
= dn_eth2dn(ptr
);
563 if (memcmp(ptr
, dn_hiord_addr
, 4) != 0)
568 cb
->hops
= *ptr
++; /* Visit Count */
570 return NF_HOOK(NFPROTO_DECNET
, NF_DN_PRE_ROUTING
,
571 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
581 static int dn_route_rx_short(struct sk_buff
*skb
)
583 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
584 unsigned char *ptr
= skb
->data
;
586 if (!pskb_may_pull(skb
, 6)) /* 5 for short header + 1 for shortest nsp */
590 skb_reset_transport_header(skb
);
592 cb
->dst
= *(__le16
*)ptr
;
594 cb
->src
= *(__le16
*)ptr
;
596 cb
->hops
= *ptr
& 0x3f;
598 return NF_HOOK(NFPROTO_DECNET
, NF_DN_PRE_ROUTING
,
599 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
607 static int dn_route_discard(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
610 * I know we drop the packet here, but thats considered success in
614 return NET_RX_SUCCESS
;
617 static int dn_route_ptp_hello(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
620 dn_neigh_pointopoint_hello(skb
);
621 return NET_RX_SUCCESS
;
624 int dn_route_rcv(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
, struct net_device
*orig_dev
)
626 struct dn_skb_cb
*cb
;
627 unsigned char flags
= 0;
628 __u16 len
= le16_to_cpu(*(__le16
*)skb
->data
);
629 struct dn_dev
*dn
= rcu_dereference(dev
->dn_ptr
);
630 unsigned char padlen
= 0;
632 if (!net_eq(dev_net(dev
), &init_net
))
638 if ((skb
= skb_share_check(skb
, GFP_ATOMIC
)) == NULL
)
641 if (!pskb_may_pull(skb
, 3))
655 cb
->iif
= dev
->ifindex
;
658 * If we have padding, remove it.
660 if (flags
& DN_RT_F_PF
) {
661 padlen
= flags
& ~DN_RT_F_PF
;
662 if (!pskb_may_pull(skb
, padlen
+ 1))
664 skb_pull(skb
, padlen
);
668 skb_reset_network_header(skb
);
671 * Weed out future version DECnet
673 if (flags
& DN_RT_F_VER
)
676 cb
->rt_flags
= flags
;
678 if (decnet_debug_level
& 1)
680 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
681 (int)flags
, (dev
) ? dev
->name
: "???", len
, skb
->len
,
684 if (flags
& DN_RT_PKT_CNTL
) {
685 if (unlikely(skb_linearize(skb
)))
688 switch (flags
& DN_RT_CNTL_MSK
) {
690 dn_dev_init_pkt(skb
);
693 dn_dev_veri_pkt(skb
);
697 if (dn
->parms
.state
!= DN_DEV_S_RU
)
700 switch (flags
& DN_RT_CNTL_MSK
) {
702 return NF_HOOK(NFPROTO_DECNET
, NF_DN_HELLO
,
703 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
708 return NF_HOOK(NFPROTO_DECNET
, NF_DN_ROUTE
,
709 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
712 return NF_HOOK(NFPROTO_DECNET
, NF_DN_HELLO
,
713 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
714 dn_neigh_router_hello
);
717 return NF_HOOK(NFPROTO_DECNET
, NF_DN_HELLO
,
718 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
719 dn_neigh_endnode_hello
);
722 if (dn
->parms
.state
!= DN_DEV_S_RU
)
725 skb_pull(skb
, 1); /* Pull flags */
727 switch (flags
& DN_RT_PKT_MSK
) {
729 return dn_route_rx_long(skb
);
730 case DN_RT_PKT_SHORT
:
731 return dn_route_rx_short(skb
);
741 static int dn_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
743 struct dst_entry
*dst
= skb_dst(skb
);
744 struct dn_route
*rt
= (struct dn_route
*)dst
;
745 struct net_device
*dev
= dst
->dev
;
746 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
755 cb
->src
= rt
->rt_saddr
;
756 cb
->dst
= rt
->rt_daddr
;
759 * Always set the Intra-Ethernet bit on all outgoing packets
760 * originated on this node. Only valid flag from upper layers
761 * is return-to-sender-requested. Set hop count to 0 too.
763 cb
->rt_flags
&= ~DN_RT_F_RQR
;
764 cb
->rt_flags
|= DN_RT_F_IE
;
767 return NF_HOOK(NFPROTO_DECNET
, NF_DN_LOCAL_OUT
,
768 &init_net
, sk
, skb
, NULL
, dev
,
772 net_dbg_ratelimited("dn_output: This should not happen\n");
779 static int dn_forward(struct sk_buff
*skb
)
781 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
782 struct dst_entry
*dst
= skb_dst(skb
);
783 struct dn_dev
*dn_db
= rcu_dereference(dst
->dev
->dn_ptr
);
786 struct net_device
*dev
= skb
->dev
;
788 if (skb
->pkt_type
!= PACKET_HOST
)
791 /* Ensure that we have enough space for headers */
792 rt
= (struct dn_route
*)skb_dst(skb
);
793 header_len
= dn_db
->use_long
? 21 : 6;
794 if (skb_cow(skb
, LL_RESERVED_SPACE(rt
->dst
.dev
)+header_len
))
798 * Hop count exceeded.
803 skb
->dev
= rt
->dst
.dev
;
806 * If packet goes out same interface it came in on, then set
807 * the Intra-Ethernet bit. This has no effect for short
808 * packets, so we don't need to test for them here.
810 cb
->rt_flags
&= ~DN_RT_F_IE
;
811 if (rt
->rt_flags
& RTCF_DOREDIRECT
)
812 cb
->rt_flags
|= DN_RT_F_IE
;
814 return NF_HOOK(NFPROTO_DECNET
, NF_DN_FORWARD
,
815 &init_net
, NULL
, skb
, dev
, skb
->dev
,
824 * Used to catch bugs. This should never normally get
827 static int dn_rt_bug_out(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
829 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
831 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
832 le16_to_cpu(cb
->src
), le16_to_cpu(cb
->dst
));
839 static int dn_rt_bug(struct sk_buff
*skb
)
841 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
843 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
844 le16_to_cpu(cb
->src
), le16_to_cpu(cb
->dst
));
851 static unsigned int dn_dst_default_advmss(const struct dst_entry
*dst
)
853 return dn_mss_from_pmtu(dst
->dev
, dst_mtu(dst
));
856 static unsigned int dn_dst_mtu(const struct dst_entry
*dst
)
858 unsigned int mtu
= dst_metric_raw(dst
, RTAX_MTU
);
860 return mtu
? : dst
->dev
->mtu
;
863 static struct neighbour
*dn_dst_neigh_lookup(const struct dst_entry
*dst
,
867 return __neigh_lookup_errno(&dn_neigh_table
, daddr
, dst
->dev
);
870 static int dn_rt_set_next_hop(struct dn_route
*rt
, struct dn_fib_res
*res
)
872 struct dn_fib_info
*fi
= res
->fi
;
873 struct net_device
*dev
= rt
->dst
.dev
;
874 unsigned int mss_metric
;
878 if (DN_FIB_RES_GW(*res
) &&
879 DN_FIB_RES_NH(*res
).nh_scope
== RT_SCOPE_LINK
)
880 rt
->rt_gateway
= DN_FIB_RES_GW(*res
);
881 dst_init_metrics(&rt
->dst
, fi
->fib_metrics
, true);
883 rt
->rt_type
= res
->type
;
885 if (dev
!= NULL
&& rt
->n
== NULL
) {
886 n
= __neigh_lookup_errno(&dn_neigh_table
, &rt
->rt_gateway
, dev
);
892 if (dst_metric(&rt
->dst
, RTAX_MTU
) > rt
->dst
.dev
->mtu
)
893 dst_metric_set(&rt
->dst
, RTAX_MTU
, rt
->dst
.dev
->mtu
);
894 mss_metric
= dst_metric_raw(&rt
->dst
, RTAX_ADVMSS
);
896 unsigned int mss
= dn_mss_from_pmtu(dev
, dst_mtu(&rt
->dst
));
897 if (mss_metric
> mss
)
898 dst_metric_set(&rt
->dst
, RTAX_ADVMSS
, mss
);
903 static inline int dn_match_addr(__le16 addr1
, __le16 addr2
)
905 __u16 tmp
= le16_to_cpu(addr1
) ^ le16_to_cpu(addr2
);
914 static __le16
dnet_select_source(const struct net_device
*dev
, __le16 daddr
, int scope
)
917 struct dn_dev
*dn_db
;
918 struct dn_ifaddr
*ifa
;
923 dn_db
= rcu_dereference(dev
->dn_ptr
);
924 for (ifa
= rcu_dereference(dn_db
->ifa_list
);
926 ifa
= rcu_dereference(ifa
->ifa_next
)) {
927 if (ifa
->ifa_scope
> scope
)
930 saddr
= ifa
->ifa_local
;
933 ret
= dn_match_addr(daddr
, ifa
->ifa_local
);
934 if (ret
> best_match
)
935 saddr
= ifa
->ifa_local
;
937 saddr
= ifa
->ifa_local
;
944 static inline __le16
__dn_fib_res_prefsrc(struct dn_fib_res
*res
)
946 return dnet_select_source(DN_FIB_RES_DEV(*res
), DN_FIB_RES_GW(*res
), res
->scope
);
949 static inline __le16
dn_fib_rules_map_destination(__le16 daddr
, struct dn_fib_res
*res
)
951 __le16 mask
= dnet_make_mask(res
->prefixlen
);
952 return (daddr
&~mask
)|res
->fi
->fib_nh
->nh_gw
;
955 static int dn_route_output_slow(struct dst_entry
**pprt
, const struct flowidn
*oldflp
, int try_hard
)
957 struct flowidn fld
= {
958 .daddr
= oldflp
->daddr
,
959 .saddr
= oldflp
->saddr
,
960 .flowidn_scope
= RT_SCOPE_UNIVERSE
,
961 .flowidn_mark
= oldflp
->flowidn_mark
,
962 .flowidn_iif
= LOOPBACK_IFINDEX
,
963 .flowidn_oif
= oldflp
->flowidn_oif
,
965 struct dn_route
*rt
= NULL
;
966 struct net_device
*dev_out
= NULL
, *dev
;
967 struct neighbour
*neigh
= NULL
;
969 unsigned int flags
= 0;
970 struct dn_fib_res res
= { .fi
= NULL
, .type
= RTN_UNICAST
};
975 if (decnet_debug_level
& 16)
977 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
978 " iif=%d oif=%d\n", le16_to_cpu(oldflp
->daddr
),
979 le16_to_cpu(oldflp
->saddr
),
980 oldflp
->flowidn_mark
, LOOPBACK_IFINDEX
,
981 oldflp
->flowidn_oif
);
983 /* If we have an output interface, verify its a DECnet device */
984 if (oldflp
->flowidn_oif
) {
985 dev_out
= dev_get_by_index(&init_net
, oldflp
->flowidn_oif
);
987 if (dev_out
&& dev_out
->dn_ptr
== NULL
) {
995 /* If we have a source address, verify that its a local address */
997 err
= -EADDRNOTAVAIL
;
1000 if (dn_dev_islocal(dev_out
, oldflp
->saddr
))
1006 for_each_netdev_rcu(&init_net
, dev
) {
1009 if (!dn_dev_islocal(dev
, oldflp
->saddr
))
1011 if ((dev
->flags
& IFF_LOOPBACK
) &&
1013 !dn_dev_islocal(dev
, oldflp
->daddr
))
1020 if (dev_out
== NULL
)
1027 /* No destination? Assume its local */
1029 fld
.daddr
= fld
.saddr
;
1034 dev_out
= init_net
.loopback_dev
;
1035 if (!dev_out
->dn_ptr
)
1037 err
= -EADDRNOTAVAIL
;
1041 fld
.saddr
= dnet_select_source(dev_out
, 0,
1046 fld
.flowidn_oif
= LOOPBACK_IFINDEX
;
1047 res
.type
= RTN_LOCAL
;
1051 if (decnet_debug_level
& 16)
1053 "dn_route_output_slow: initial checks complete."
1054 " dst=%04x src=%04x oif=%d try_hard=%d\n",
1055 le16_to_cpu(fld
.daddr
), le16_to_cpu(fld
.saddr
),
1056 fld
.flowidn_oif
, try_hard
);
1059 * N.B. If the kernel is compiled without router support then
1060 * dn_fib_lookup() will evaluate to non-zero so this if () block
1061 * will always be executed.
1064 if (try_hard
|| (err
= dn_fib_lookup(&fld
, &res
)) != 0) {
1065 struct dn_dev
*dn_db
;
1069 * Here the fallback is basically the standard algorithm for
1070 * routing in endnodes which is described in the DECnet routing
1073 * If we are not trying hard, look in neighbour cache.
1074 * The result is tested to ensure that if a specific output
1075 * device/source address was requested, then we honour that
1079 neigh
= neigh_lookup_nodev(&dn_neigh_table
, &init_net
, &fld
.daddr
);
1081 if ((oldflp
->flowidn_oif
&&
1082 (neigh
->dev
->ifindex
!= oldflp
->flowidn_oif
)) ||
1084 (!dn_dev_islocal(neigh
->dev
,
1086 neigh_release(neigh
);
1091 if (dn_dev_islocal(neigh
->dev
, fld
.daddr
)) {
1092 dev_out
= init_net
.loopback_dev
;
1093 res
.type
= RTN_LOCAL
;
1095 dev_out
= neigh
->dev
;
1103 /* Not there? Perhaps its a local address */
1104 if (dev_out
== NULL
)
1105 dev_out
= dn_dev_get_default();
1107 if (dev_out
== NULL
)
1109 dn_db
= rcu_dereference_raw(dev_out
->dn_ptr
);
1112 /* Possible improvement - check all devices for local addr */
1113 if (dn_dev_islocal(dev_out
, fld
.daddr
)) {
1115 dev_out
= init_net
.loopback_dev
;
1117 res
.type
= RTN_LOCAL
;
1120 /* Not local either.... try sending it to the default router */
1121 neigh
= neigh_clone(dn_db
->router
);
1122 BUG_ON(neigh
&& neigh
->dev
!= dev_out
);
1124 /* Ok then, we assume its directly connected and move on */
1127 gateway
= ((struct dn_neigh
*)neigh
)->addr
;
1129 gateway
= fld
.daddr
;
1130 if (fld
.saddr
== 0) {
1131 fld
.saddr
= dnet_select_source(dev_out
, gateway
,
1132 res
.type
== RTN_LOCAL
?
1135 if (fld
.saddr
== 0 && res
.type
!= RTN_LOCAL
)
1138 fld
.flowidn_oif
= dev_out
->ifindex
;
1143 if (res
.type
== RTN_NAT
)
1146 if (res
.type
== RTN_LOCAL
) {
1148 fld
.saddr
= fld
.daddr
;
1151 dev_out
= init_net
.loopback_dev
;
1153 if (!dev_out
->dn_ptr
)
1155 fld
.flowidn_oif
= dev_out
->ifindex
;
1157 dn_fib_info_put(res
.fi
);
1162 if (res
.fi
->fib_nhs
> 1 && fld
.flowidn_oif
== 0)
1163 dn_fib_select_multipath(&fld
, &res
);
1166 * We could add some logic to deal with default routes here and
1167 * get rid of some of the special casing above.
1171 fld
.saddr
= DN_FIB_RES_PREFSRC(res
);
1175 dev_out
= DN_FIB_RES_DEV(res
);
1177 fld
.flowidn_oif
= dev_out
->ifindex
;
1178 gateway
= DN_FIB_RES_GW(res
);
1181 if (dev_out
->flags
& IFF_LOOPBACK
)
1182 flags
|= RTCF_LOCAL
;
1184 rt
= dst_alloc(&dn_dst_ops
, dev_out
, 0, DST_OBSOLETE_NONE
, DST_HOST
);
1189 memset(&rt
->fld
, 0, sizeof(rt
->fld
));
1190 rt
->fld
.saddr
= oldflp
->saddr
;
1191 rt
->fld
.daddr
= oldflp
->daddr
;
1192 rt
->fld
.flowidn_oif
= oldflp
->flowidn_oif
;
1193 rt
->fld
.flowidn_iif
= 0;
1194 rt
->fld
.flowidn_mark
= oldflp
->flowidn_mark
;
1196 rt
->rt_saddr
= fld
.saddr
;
1197 rt
->rt_daddr
= fld
.daddr
;
1198 rt
->rt_gateway
= gateway
? gateway
: fld
.daddr
;
1199 rt
->rt_local_src
= fld
.saddr
;
1201 rt
->rt_dst_map
= fld
.daddr
;
1202 rt
->rt_src_map
= fld
.saddr
;
1207 rt
->dst
.lastuse
= jiffies
;
1208 rt
->dst
.output
= dn_output
;
1209 rt
->dst
.input
= dn_rt_bug
;
1210 rt
->rt_flags
= flags
;
1211 if (flags
& RTCF_LOCAL
)
1212 rt
->dst
.input
= dn_nsp_rx
;
1214 err
= dn_rt_set_next_hop(rt
, &res
);
1218 hash
= dn_hash(rt
->fld
.saddr
, rt
->fld
.daddr
);
1219 /* dn_insert_route() increments dst->__refcnt */
1220 dn_insert_route(rt
, hash
, (struct dn_route
**)pprt
);
1224 neigh_release(neigh
);
1226 dn_fib_res_put(&res
);
1233 err
= -EADDRNOTAVAIL
;
1242 dst_release_immediate(&rt
->dst
);
1248 * N.B. The flags may be moved into the flowi at some future stage.
1250 static int __dn_route_output_key(struct dst_entry
**pprt
, const struct flowidn
*flp
, int flags
)
1252 unsigned int hash
= dn_hash(flp
->saddr
, flp
->daddr
);
1253 struct dn_route
*rt
= NULL
;
1255 if (!(flags
& MSG_TRYHARD
)) {
1257 for (rt
= rcu_dereference_bh(dn_rt_hash_table
[hash
].chain
); rt
;
1258 rt
= rcu_dereference_bh(rt
->dn_next
)) {
1259 if ((flp
->daddr
== rt
->fld
.daddr
) &&
1260 (flp
->saddr
== rt
->fld
.saddr
) &&
1261 (flp
->flowidn_mark
== rt
->fld
.flowidn_mark
) &&
1262 dn_is_output_route(rt
) &&
1263 (rt
->fld
.flowidn_oif
== flp
->flowidn_oif
)) {
1264 dst_hold_and_use(&rt
->dst
, jiffies
);
1265 rcu_read_unlock_bh();
1270 rcu_read_unlock_bh();
1273 return dn_route_output_slow(pprt
, flp
, flags
);
1276 static int dn_route_output_key(struct dst_entry
**pprt
, struct flowidn
*flp
, int flags
)
1280 err
= __dn_route_output_key(pprt
, flp
, flags
);
1281 if (err
== 0 && flp
->flowidn_proto
) {
1282 *pprt
= xfrm_lookup(&init_net
, *pprt
,
1283 flowidn_to_flowi(flp
), NULL
, 0);
1284 if (IS_ERR(*pprt
)) {
1285 err
= PTR_ERR(*pprt
);
1292 int dn_route_output_sock(struct dst_entry __rcu
**pprt
, struct flowidn
*fl
, struct sock
*sk
, int flags
)
1296 err
= __dn_route_output_key(pprt
, fl
, flags
& MSG_TRYHARD
);
1297 if (err
== 0 && fl
->flowidn_proto
) {
1298 *pprt
= xfrm_lookup(&init_net
, *pprt
,
1299 flowidn_to_flowi(fl
), sk
, 0);
1300 if (IS_ERR(*pprt
)) {
1301 err
= PTR_ERR(*pprt
);
1308 static int dn_route_input_slow(struct sk_buff
*skb
)
1310 struct dn_route
*rt
= NULL
;
1311 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
1312 struct net_device
*in_dev
= skb
->dev
;
1313 struct net_device
*out_dev
= NULL
;
1314 struct dn_dev
*dn_db
;
1315 struct neighbour
*neigh
= NULL
;
1319 __le16 local_src
= 0;
1320 struct flowidn fld
= {
1323 .flowidn_scope
= RT_SCOPE_UNIVERSE
,
1324 .flowidn_mark
= skb
->mark
,
1325 .flowidn_iif
= skb
->dev
->ifindex
,
1327 struct dn_fib_res res
= { .fi
= NULL
, .type
= RTN_UNREACHABLE
};
1333 if ((dn_db
= rcu_dereference(in_dev
->dn_ptr
)) == NULL
)
1336 /* Zero source addresses are not allowed */
1341 * In this case we've just received a packet from a source
1342 * outside ourselves pretending to come from us. We don't
1343 * allow it any further to prevent routing loops, spoofing and
1344 * other nasties. Loopback packets already have the dst attached
1345 * so this only affects packets which have originated elsewhere.
1348 if (dn_dev_islocal(in_dev
, cb
->src
))
1351 err
= dn_fib_lookup(&fld
, &res
);
1356 * Is the destination us ?
1358 if (!dn_dev_islocal(in_dev
, cb
->dst
))
1361 res
.type
= RTN_LOCAL
;
1363 __le16 src_map
= fld
.saddr
;
1366 out_dev
= DN_FIB_RES_DEV(res
);
1367 if (out_dev
== NULL
) {
1368 net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
1374 src_map
= fld
.saddr
; /* no NAT support for now */
1376 gateway
= DN_FIB_RES_GW(res
);
1377 if (res
.type
== RTN_NAT
) {
1378 fld
.daddr
= dn_fib_rules_map_destination(fld
.daddr
, &res
);
1379 dn_fib_res_put(&res
);
1381 if (dn_fib_lookup(&fld
, &res
))
1384 if (res
.type
!= RTN_UNICAST
)
1387 gateway
= fld
.daddr
;
1389 fld
.saddr
= src_map
;
1395 * Forwarding check here, we only check for forwarding
1396 * being turned off, if you want to only forward intra
1397 * area, its up to you to set the routing tables up
1400 if (dn_db
->parms
.forwarding
== 0)
1403 if (res
.fi
->fib_nhs
> 1 && fld
.flowidn_oif
== 0)
1404 dn_fib_select_multipath(&fld
, &res
);
1407 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
1408 * flag as a hint to set the intra-ethernet bit when
1409 * forwarding. If we've got NAT in operation, we don't do
1410 * this optimisation.
1412 if (out_dev
== in_dev
&& !(flags
& RTCF_NAT
))
1413 flags
|= RTCF_DOREDIRECT
;
1415 local_src
= DN_FIB_RES_PREFSRC(res
);
1418 case RTN_UNREACHABLE
:
1421 flags
|= RTCF_LOCAL
;
1422 fld
.saddr
= cb
->dst
;
1423 fld
.daddr
= cb
->src
;
1425 /* Routing tables gave us a gateway */
1429 /* Packet was intra-ethernet, so we know its on-link */
1430 if (cb
->rt_flags
& DN_RT_F_IE
) {
1435 /* Use the default router if there is one */
1436 neigh
= neigh_clone(dn_db
->router
);
1438 gateway
= ((struct dn_neigh
*)neigh
)->addr
;
1442 /* Close eyes and pray */
1450 rt
= dst_alloc(&dn_dst_ops
, out_dev
, 1, DST_OBSOLETE_NONE
, DST_HOST
);
1455 memset(&rt
->fld
, 0, sizeof(rt
->fld
));
1456 rt
->rt_saddr
= fld
.saddr
;
1457 rt
->rt_daddr
= fld
.daddr
;
1458 rt
->rt_gateway
= fld
.daddr
;
1460 rt
->rt_gateway
= gateway
;
1461 rt
->rt_local_src
= local_src
? local_src
: rt
->rt_saddr
;
1463 rt
->rt_dst_map
= fld
.daddr
;
1464 rt
->rt_src_map
= fld
.saddr
;
1466 rt
->fld
.saddr
= cb
->src
;
1467 rt
->fld
.daddr
= cb
->dst
;
1468 rt
->fld
.flowidn_oif
= 0;
1469 rt
->fld
.flowidn_iif
= in_dev
->ifindex
;
1470 rt
->fld
.flowidn_mark
= fld
.flowidn_mark
;
1473 rt
->dst
.lastuse
= jiffies
;
1474 rt
->dst
.output
= dn_rt_bug_out
;
1477 rt
->dst
.input
= dn_forward
;
1480 rt
->dst
.output
= dn_output
;
1481 rt
->dst
.input
= dn_nsp_rx
;
1482 rt
->dst
.dev
= in_dev
;
1483 flags
|= RTCF_LOCAL
;
1486 case RTN_UNREACHABLE
:
1488 rt
->dst
.input
= dst_discard
;
1490 rt
->rt_flags
= flags
;
1492 err
= dn_rt_set_next_hop(rt
, &res
);
1496 hash
= dn_hash(rt
->fld
.saddr
, rt
->fld
.daddr
);
1497 /* dn_insert_route() increments dst->__refcnt */
1498 dn_insert_route(rt
, hash
, &rt
);
1499 skb_dst_set(skb
, &rt
->dst
);
1503 neigh_release(neigh
);
1505 dn_fib_res_put(&res
);
1521 dst_release_immediate(&rt
->dst
);
1525 static int dn_route_input(struct sk_buff
*skb
)
1527 struct dn_route
*rt
;
1528 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
1529 unsigned int hash
= dn_hash(cb
->src
, cb
->dst
);
1535 for(rt
= rcu_dereference(dn_rt_hash_table
[hash
].chain
); rt
!= NULL
;
1536 rt
= rcu_dereference(rt
->dn_next
)) {
1537 if ((rt
->fld
.saddr
== cb
->src
) &&
1538 (rt
->fld
.daddr
== cb
->dst
) &&
1539 (rt
->fld
.flowidn_oif
== 0) &&
1540 (rt
->fld
.flowidn_mark
== skb
->mark
) &&
1541 (rt
->fld
.flowidn_iif
== cb
->iif
)) {
1542 dst_hold_and_use(&rt
->dst
, jiffies
);
1544 skb_dst_set(skb
, (struct dst_entry
*)rt
);
1550 return dn_route_input_slow(skb
);
1553 static int dn_rt_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
,
1554 int event
, int nowait
, unsigned int flags
)
1556 struct dn_route
*rt
= (struct dn_route
*)skb_dst(skb
);
1558 struct nlmsghdr
*nlh
;
1561 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*r
), flags
);
1565 r
= nlmsg_data(nlh
);
1566 r
->rtm_family
= AF_DECnet
;
1567 r
->rtm_dst_len
= 16;
1570 r
->rtm_table
= RT_TABLE_MAIN
;
1571 r
->rtm_type
= rt
->rt_type
;
1572 r
->rtm_flags
= (rt
->rt_flags
& ~0xFFFF) | RTM_F_CLONED
;
1573 r
->rtm_scope
= RT_SCOPE_UNIVERSE
;
1574 r
->rtm_protocol
= RTPROT_UNSPEC
;
1576 if (rt
->rt_flags
& RTCF_NOTIFY
)
1577 r
->rtm_flags
|= RTM_F_NOTIFY
;
1579 if (nla_put_u32(skb
, RTA_TABLE
, RT_TABLE_MAIN
) < 0 ||
1580 nla_put_le16(skb
, RTA_DST
, rt
->rt_daddr
) < 0)
1583 if (rt
->fld
.saddr
) {
1584 r
->rtm_src_len
= 16;
1585 if (nla_put_le16(skb
, RTA_SRC
, rt
->fld
.saddr
) < 0)
1589 nla_put_u32(skb
, RTA_OIF
, rt
->dst
.dev
->ifindex
) < 0)
1593 * Note to self - change this if input routes reverse direction when
1594 * they deal only with inputs and not with replies like they do
1597 if (nla_put_le16(skb
, RTA_PREFSRC
, rt
->rt_local_src
) < 0)
1600 if (rt
->rt_daddr
!= rt
->rt_gateway
&&
1601 nla_put_le16(skb
, RTA_GATEWAY
, rt
->rt_gateway
) < 0)
1604 if (rtnetlink_put_metrics(skb
, dst_metrics_ptr(&rt
->dst
)) < 0)
1607 expires
= rt
->dst
.expires
? rt
->dst
.expires
- jiffies
: 0;
1608 if (rtnl_put_cacheinfo(skb
, &rt
->dst
, 0, expires
,
1612 if (dn_is_input_route(rt
) &&
1613 nla_put_u32(skb
, RTA_IIF
, rt
->fld
.flowidn_iif
) < 0)
1616 nlmsg_end(skb
, nlh
);
1620 nlmsg_cancel(skb
, nlh
);
1624 const struct nla_policy rtm_dn_policy
[RTA_MAX
+ 1] = {
1625 [RTA_DST
] = { .type
= NLA_U16
},
1626 [RTA_SRC
] = { .type
= NLA_U16
},
1627 [RTA_IIF
] = { .type
= NLA_U32
},
1628 [RTA_OIF
] = { .type
= NLA_U32
},
1629 [RTA_GATEWAY
] = { .type
= NLA_U16
},
1630 [RTA_PRIORITY
] = { .type
= NLA_U32
},
1631 [RTA_PREFSRC
] = { .type
= NLA_U16
},
1632 [RTA_METRICS
] = { .type
= NLA_NESTED
},
1633 [RTA_MULTIPATH
] = { .type
= NLA_NESTED
},
1634 [RTA_TABLE
] = { .type
= NLA_U32
},
1635 [RTA_MARK
] = { .type
= NLA_U32
},
1639 * This is called by both endnodes and routers now.
1641 static int dn_cache_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
,
1642 struct netlink_ext_ack
*extack
)
1644 struct net
*net
= sock_net(in_skb
->sk
);
1645 struct rtmsg
*rtm
= nlmsg_data(nlh
);
1646 struct dn_route
*rt
= NULL
;
1647 struct dn_skb_cb
*cb
;
1649 struct sk_buff
*skb
;
1651 struct nlattr
*tb
[RTA_MAX
+1];
1653 if (!net_eq(net
, &init_net
))
1656 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_dn_policy
,
1661 memset(&fld
, 0, sizeof(fld
));
1662 fld
.flowidn_proto
= DNPROTO_NSP
;
1664 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1667 skb_reset_mac_header(skb
);
1668 cb
= DN_SKB_CB(skb
);
1671 fld
.saddr
= nla_get_le16(tb
[RTA_SRC
]);
1674 fld
.daddr
= nla_get_le16(tb
[RTA_DST
]);
1677 fld
.flowidn_iif
= nla_get_u32(tb
[RTA_IIF
]);
1679 if (fld
.flowidn_iif
) {
1680 struct net_device
*dev
;
1681 dev
= __dev_get_by_index(&init_net
, fld
.flowidn_iif
);
1682 if (!dev
|| !dev
->dn_ptr
) {
1686 skb
->protocol
= htons(ETH_P_DNA_RT
);
1688 cb
->src
= fld
.saddr
;
1689 cb
->dst
= fld
.daddr
;
1691 err
= dn_route_input(skb
);
1693 memset(cb
, 0, sizeof(struct dn_skb_cb
));
1694 rt
= (struct dn_route
*)skb_dst(skb
);
1695 if (!err
&& -rt
->dst
.error
)
1696 err
= rt
->dst
.error
;
1699 fld
.flowidn_oif
= nla_get_u32(tb
[RTA_OIF
]);
1701 err
= dn_route_output_key((struct dst_entry
**)&rt
, &fld
, 0);
1707 skb_dst_set(skb
, &rt
->dst
);
1708 if (rtm
->rtm_flags
& RTM_F_NOTIFY
)
1709 rt
->rt_flags
|= RTCF_NOTIFY
;
1711 err
= dn_rt_fill_info(skb
, NETLINK_CB(in_skb
).portid
, nlh
->nlmsg_seq
, RTM_NEWROUTE
, 0, 0);
1717 return rtnl_unicast(skb
, &init_net
, NETLINK_CB(in_skb
).portid
);
1725 * For routers, this is called from dn_fib_dump, but for endnodes its
1726 * called directly from the rtnetlink dispatch table.
1728 int dn_cache_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1730 struct net
*net
= sock_net(skb
->sk
);
1731 struct dn_route
*rt
;
1736 if (!net_eq(net
, &init_net
))
1739 if (nlmsg_len(cb
->nlh
) < sizeof(struct rtmsg
))
1742 rtm
= nlmsg_data(cb
->nlh
);
1743 if (!(rtm
->rtm_flags
& RTM_F_CLONED
))
1747 s_idx
= idx
= cb
->args
[1];
1748 for(h
= 0; h
<= dn_rt_hash_mask
; h
++) {
1754 for(rt
= rcu_dereference_bh(dn_rt_hash_table
[h
].chain
), idx
= 0;
1756 rt
= rcu_dereference_bh(rt
->dn_next
), idx
++) {
1759 skb_dst_set(skb
, dst_clone(&rt
->dst
));
1760 if (dn_rt_fill_info(skb
, NETLINK_CB(cb
->skb
).portid
,
1761 cb
->nlh
->nlmsg_seq
, RTM_NEWROUTE
,
1762 1, NLM_F_MULTI
) < 0) {
1764 rcu_read_unlock_bh();
1769 rcu_read_unlock_bh();
1778 #ifdef CONFIG_PROC_FS
1779 struct dn_rt_cache_iter_state
{
1783 static struct dn_route
*dn_rt_cache_get_first(struct seq_file
*seq
)
1785 struct dn_route
*rt
= NULL
;
1786 struct dn_rt_cache_iter_state
*s
= seq
->private;
1788 for(s
->bucket
= dn_rt_hash_mask
; s
->bucket
>= 0; --s
->bucket
) {
1790 rt
= rcu_dereference_bh(dn_rt_hash_table
[s
->bucket
].chain
);
1793 rcu_read_unlock_bh();
1798 static struct dn_route
*dn_rt_cache_get_next(struct seq_file
*seq
, struct dn_route
*rt
)
1800 struct dn_rt_cache_iter_state
*s
= seq
->private;
1802 rt
= rcu_dereference_bh(rt
->dn_next
);
1804 rcu_read_unlock_bh();
1805 if (--s
->bucket
< 0)
1808 rt
= rcu_dereference_bh(dn_rt_hash_table
[s
->bucket
].chain
);
1813 static void *dn_rt_cache_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1815 struct dn_route
*rt
= dn_rt_cache_get_first(seq
);
1818 while(*pos
&& (rt
= dn_rt_cache_get_next(seq
, rt
)))
1821 return *pos
? NULL
: rt
;
1824 static void *dn_rt_cache_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1826 struct dn_route
*rt
= dn_rt_cache_get_next(seq
, v
);
1831 static void dn_rt_cache_seq_stop(struct seq_file
*seq
, void *v
)
1834 rcu_read_unlock_bh();
1837 static int dn_rt_cache_seq_show(struct seq_file
*seq
, void *v
)
1839 struct dn_route
*rt
= v
;
1840 char buf1
[DN_ASCBUF_LEN
], buf2
[DN_ASCBUF_LEN
];
1842 seq_printf(seq
, "%-8s %-7s %-7s %04d %04d %04d\n",
1843 rt
->dst
.dev
? rt
->dst
.dev
->name
: "*",
1844 dn_addr2asc(le16_to_cpu(rt
->rt_daddr
), buf1
),
1845 dn_addr2asc(le16_to_cpu(rt
->rt_saddr
), buf2
),
1846 atomic_read(&rt
->dst
.__refcnt
),
1851 static const struct seq_operations dn_rt_cache_seq_ops
= {
1852 .start
= dn_rt_cache_seq_start
,
1853 .next
= dn_rt_cache_seq_next
,
1854 .stop
= dn_rt_cache_seq_stop
,
1855 .show
= dn_rt_cache_seq_show
,
1857 #endif /* CONFIG_PROC_FS */
1859 void __init
dn_route_init(void)
1863 dn_dst_ops
.kmem_cachep
=
1864 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route
), 0,
1865 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
1866 dst_entries_init(&dn_dst_ops
);
1867 timer_setup(&dn_route_timer
, dn_dst_check_expire
, 0);
1868 dn_route_timer
.expires
= jiffies
+ decnet_dst_gc_interval
* HZ
;
1869 add_timer(&dn_route_timer
);
1871 goal
= totalram_pages
>> (26 - PAGE_SHIFT
);
1873 for(order
= 0; (1UL << order
) < goal
; order
++)
1877 * Only want 1024 entries max, since the table is very, very unlikely
1878 * to be larger than that.
1880 while(order
&& ((((1UL << order
) * PAGE_SIZE
) /
1881 sizeof(struct dn_rt_hash_bucket
)) >= 2048))
1885 dn_rt_hash_mask
= (1UL << order
) * PAGE_SIZE
/
1886 sizeof(struct dn_rt_hash_bucket
);
1887 while(dn_rt_hash_mask
& (dn_rt_hash_mask
- 1))
1889 dn_rt_hash_table
= (struct dn_rt_hash_bucket
*)
1890 __get_free_pages(GFP_ATOMIC
, order
);
1891 } while (dn_rt_hash_table
== NULL
&& --order
> 0);
1893 if (!dn_rt_hash_table
)
1894 panic("Failed to allocate DECnet route cache hash table\n");
1897 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
1899 (long)(dn_rt_hash_mask
*sizeof(struct dn_rt_hash_bucket
))/1024);
1902 for(i
= 0; i
<= dn_rt_hash_mask
; i
++) {
1903 spin_lock_init(&dn_rt_hash_table
[i
].lock
);
1904 dn_rt_hash_table
[i
].chain
= NULL
;
1907 dn_dst_ops
.gc_thresh
= (dn_rt_hash_mask
+ 1);
1909 proc_create_seq_private("decnet_cache", 0444, init_net
.proc_net
,
1910 &dn_rt_cache_seq_ops
,
1911 sizeof(struct dn_rt_cache_iter_state
), NULL
);
1913 #ifdef CONFIG_DECNET_ROUTER
1914 rtnl_register_module(THIS_MODULE
, PF_DECnet
, RTM_GETROUTE
,
1915 dn_cache_getroute
, dn_fib_dump
, 0);
1917 rtnl_register_module(THIS_MODULE
, PF_DECnet
, RTM_GETROUTE
,
1918 dn_cache_getroute
, dn_cache_dump
, 0);
1922 void __exit
dn_route_cleanup(void)
1924 del_timer(&dn_route_timer
);
1927 remove_proc_entry("decnet_cache", init_net
.proc_net
);
1928 dst_entries_destroy(&dn_dst_ops
);