2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * DECnet Routing Functions (Endnode and Router)
8 * Authors: Steve Whitehouse <SteveW@ACM.org>
9 * Eduardo Marcelo Serrat <emserrat@geocities.com>
12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and
13 * "return-to-sender" bits on outgoing
15 * Steve Whitehouse : Timeouts for cached routes.
16 * Steve Whitehouse : Use dst cache for input routes too.
17 * Steve Whitehouse : Fixed error values in dn_send_skb.
18 * Steve Whitehouse : Rework routing functions to better fit
19 * DECnet routing design
20 * Alexey Kuznetsov : New SMP locking
21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump()
22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting.
23 * Fixed possible skb leak in rtnetlink funcs.
24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
25 * Alexey Kuznetsov's finer grained locking
27 * Steve Whitehouse : Routing is now starting to look like a
28 * sensible set of code now, mainly due to
29 * my copying the IPv4 routing code. The
30 * hooks here are modified and will continue
31 * to evolve for a while.
32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter
33 * stuff. Look out raw sockets your days
35 * Steve Whitehouse : Added return-to-sender functions. Added
36 * backlog congestion level return codes.
37 * Steve Whitehouse : Fixed bug where routes were set up with
38 * no ref count on net devices.
39 * Steve Whitehouse : RCU for the route cache
40 * Steve Whitehouse : Preparations for the flow cache
41 * Steve Whitehouse : Prepare for nonlinear skbs
44 /******************************************************************************
45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
47 This program is free software; you can redistribute it and/or modify
48 it under the terms of the GNU General Public License as published by
49 the Free Software Foundation; either version 2 of the License, or
52 This program is distributed in the hope that it will be useful,
53 but WITHOUT ANY WARRANTY; without even the implied warranty of
54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 GNU General Public License for more details.
56 *******************************************************************************/
58 #include <linux/errno.h>
59 #include <linux/types.h>
60 #include <linux/socket.h>
62 #include <linux/kernel.h>
63 #include <linux/sockios.h>
64 #include <linux/net.h>
65 #include <linux/netdevice.h>
66 #include <linux/inet.h>
67 #include <linux/route.h>
68 #include <linux/in_route.h>
69 #include <linux/slab.h>
72 #include <linux/proc_fs.h>
73 #include <linux/seq_file.h>
74 #include <linux/init.h>
75 #include <linux/rtnetlink.h>
76 #include <linux/string.h>
77 #include <linux/netfilter_decnet.h>
78 #include <linux/rcupdate.h>
79 #include <linux/times.h>
80 #include <linux/export.h>
81 #include <asm/errno.h>
82 #include <net/net_namespace.h>
83 #include <net/netlink.h>
84 #include <net/neighbour.h>
87 #include <net/fib_rules.h>
89 #include <net/dn_dev.h>
90 #include <net/dn_nsp.h>
91 #include <net/dn_route.h>
92 #include <net/dn_neigh.h>
93 #include <net/dn_fib.h>
95 struct dn_rt_hash_bucket
97 struct dn_route __rcu
*chain
;
101 extern struct neigh_table dn_neigh_table
;
104 static unsigned char dn_hiord_addr
[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
106 static const int dn_rt_min_delay
= 2 * HZ
;
107 static const int dn_rt_max_delay
= 10 * HZ
;
108 static const int dn_rt_mtu_expires
= 10 * 60 * HZ
;
110 static unsigned long dn_rt_deadline
;
112 static int dn_dst_gc(struct dst_ops
*ops
);
113 static struct dst_entry
*dn_dst_check(struct dst_entry
*, __u32
);
114 static unsigned int dn_dst_default_advmss(const struct dst_entry
*dst
);
115 static unsigned int dn_dst_mtu(const struct dst_entry
*dst
);
116 static void dn_dst_destroy(struct dst_entry
*);
117 static void dn_dst_ifdown(struct dst_entry
*, struct net_device
*dev
, int how
);
118 static struct dst_entry
*dn_dst_negative_advice(struct dst_entry
*);
119 static void dn_dst_link_failure(struct sk_buff
*);
120 static void dn_dst_update_pmtu(struct dst_entry
*dst
, struct sock
*sk
,
121 struct sk_buff
*skb
, u32 mtu
);
122 static void dn_dst_redirect(struct dst_entry
*dst
, struct sock
*sk
,
123 struct sk_buff
*skb
);
124 static struct neighbour
*dn_dst_neigh_lookup(const struct dst_entry
*dst
,
127 static int dn_route_input(struct sk_buff
*);
128 static void dn_run_flush(unsigned long dummy
);
130 static struct dn_rt_hash_bucket
*dn_rt_hash_table
;
131 static unsigned int dn_rt_hash_mask
;
133 static struct timer_list dn_route_timer
;
134 static DEFINE_TIMER(dn_rt_flush_timer
, dn_run_flush
, 0, 0);
135 int decnet_dst_gc_interval
= 2;
137 static struct dst_ops dn_dst_ops
= {
141 .check
= dn_dst_check
,
142 .default_advmss
= dn_dst_default_advmss
,
144 .cow_metrics
= dst_cow_metrics_generic
,
145 .destroy
= dn_dst_destroy
,
146 .ifdown
= dn_dst_ifdown
,
147 .negative_advice
= dn_dst_negative_advice
,
148 .link_failure
= dn_dst_link_failure
,
149 .update_pmtu
= dn_dst_update_pmtu
,
150 .redirect
= dn_dst_redirect
,
151 .neigh_lookup
= dn_dst_neigh_lookup
,
154 static void dn_dst_destroy(struct dst_entry
*dst
)
156 struct dn_route
*rt
= (struct dn_route
*) dst
;
159 neigh_release(rt
->n
);
160 dst_destroy_metrics_generic(dst
);
163 static void dn_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
, int how
)
166 struct dn_route
*rt
= (struct dn_route
*) dst
;
167 struct neighbour
*n
= rt
->n
;
169 if (n
&& n
->dev
== dev
) {
170 n
->dev
= dev_net(dev
)->loopback_dev
;
177 static __inline__
unsigned int dn_hash(__le16 src
, __le16 dst
)
179 __u16 tmp
= (__u16 __force
)(src
^ dst
);
183 return dn_rt_hash_mask
& (unsigned int)tmp
;
186 static inline void dnrt_free(struct dn_route
*rt
)
188 call_rcu_bh(&rt
->dst
.rcu_head
, dst_rcu_free
);
191 static inline void dnrt_drop(struct dn_route
*rt
)
193 dst_release(&rt
->dst
);
194 call_rcu_bh(&rt
->dst
.rcu_head
, dst_rcu_free
);
197 static void dn_dst_check_expire(unsigned long dummy
)
201 struct dn_route __rcu
**rtp
;
202 unsigned long now
= jiffies
;
203 unsigned long expire
= 120 * HZ
;
205 for (i
= 0; i
<= dn_rt_hash_mask
; i
++) {
206 rtp
= &dn_rt_hash_table
[i
].chain
;
208 spin_lock(&dn_rt_hash_table
[i
].lock
);
209 while ((rt
= rcu_dereference_protected(*rtp
,
210 lockdep_is_held(&dn_rt_hash_table
[i
].lock
))) != NULL
) {
211 if (atomic_read(&rt
->dst
.__refcnt
) ||
212 (now
- rt
->dst
.lastuse
) < expire
) {
213 rtp
= &rt
->dst
.dn_next
;
216 *rtp
= rt
->dst
.dn_next
;
217 rt
->dst
.dn_next
= NULL
;
220 spin_unlock(&dn_rt_hash_table
[i
].lock
);
222 if ((jiffies
- now
) > 0)
226 mod_timer(&dn_route_timer
, now
+ decnet_dst_gc_interval
* HZ
);
229 static int dn_dst_gc(struct dst_ops
*ops
)
232 struct dn_route __rcu
**rtp
;
234 unsigned long now
= jiffies
;
235 unsigned long expire
= 10 * HZ
;
237 for (i
= 0; i
<= dn_rt_hash_mask
; i
++) {
239 spin_lock_bh(&dn_rt_hash_table
[i
].lock
);
240 rtp
= &dn_rt_hash_table
[i
].chain
;
242 while ((rt
= rcu_dereference_protected(*rtp
,
243 lockdep_is_held(&dn_rt_hash_table
[i
].lock
))) != NULL
) {
244 if (atomic_read(&rt
->dst
.__refcnt
) ||
245 (now
- rt
->dst
.lastuse
) < expire
) {
246 rtp
= &rt
->dst
.dn_next
;
249 *rtp
= rt
->dst
.dn_next
;
250 rt
->dst
.dn_next
= NULL
;
254 spin_unlock_bh(&dn_rt_hash_table
[i
].lock
);
261 * The decnet standards don't impose a particular minimum mtu, what they
262 * do insist on is that the routing layer accepts a datagram of at least
263 * 230 bytes long. Here we have to subtract the routing header length from
264 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we
265 * assume the worst and use a long header size.
267 * We update both the mtu and the advertised mss (i.e. the segment size we
268 * advertise to the other end).
270 static void dn_dst_update_pmtu(struct dst_entry
*dst
, struct sock
*sk
,
271 struct sk_buff
*skb
, u32 mtu
)
273 struct dn_route
*rt
= (struct dn_route
*) dst
;
274 struct neighbour
*n
= rt
->n
;
278 dn
= n
? rcu_dereference_raw(n
->dev
->dn_ptr
) : NULL
;
280 if (dn
&& dn
->use_long
== 0)
285 if (dst_metric(dst
, RTAX_MTU
) > mtu
&& mtu
>= min_mtu
) {
286 if (!(dst_metric_locked(dst
, RTAX_MTU
))) {
287 dst_metric_set(dst
, RTAX_MTU
, mtu
);
288 dst_set_expires(dst
, dn_rt_mtu_expires
);
290 if (!(dst_metric_locked(dst
, RTAX_ADVMSS
))) {
291 u32 mss
= mtu
- DN_MAX_NSP_DATA_HEADER
;
292 u32 existing_mss
= dst_metric_raw(dst
, RTAX_ADVMSS
);
293 if (!existing_mss
|| existing_mss
> mss
)
294 dst_metric_set(dst
, RTAX_ADVMSS
, mss
);
299 static void dn_dst_redirect(struct dst_entry
*dst
, struct sock
*sk
,
305 * When a route has been marked obsolete. (e.g. routing cache flush)
307 static struct dst_entry
*dn_dst_check(struct dst_entry
*dst
, __u32 cookie
)
312 static struct dst_entry
*dn_dst_negative_advice(struct dst_entry
*dst
)
318 static void dn_dst_link_failure(struct sk_buff
*skb
)
322 static inline int compare_keys(struct flowidn
*fl1
, struct flowidn
*fl2
)
324 return ((fl1
->daddr
^ fl2
->daddr
) |
325 (fl1
->saddr
^ fl2
->saddr
) |
326 (fl1
->flowidn_mark
^ fl2
->flowidn_mark
) |
327 (fl1
->flowidn_scope
^ fl2
->flowidn_scope
) |
328 (fl1
->flowidn_oif
^ fl2
->flowidn_oif
) |
329 (fl1
->flowidn_iif
^ fl2
->flowidn_iif
)) == 0;
332 static int dn_insert_route(struct dn_route
*rt
, unsigned int hash
, struct dn_route
**rp
)
334 struct dn_route
*rth
;
335 struct dn_route __rcu
**rthp
;
336 unsigned long now
= jiffies
;
338 rthp
= &dn_rt_hash_table
[hash
].chain
;
340 spin_lock_bh(&dn_rt_hash_table
[hash
].lock
);
341 while ((rth
= rcu_dereference_protected(*rthp
,
342 lockdep_is_held(&dn_rt_hash_table
[hash
].lock
))) != NULL
) {
343 if (compare_keys(&rth
->fld
, &rt
->fld
)) {
345 *rthp
= rth
->dst
.dn_next
;
346 rcu_assign_pointer(rth
->dst
.dn_next
,
347 dn_rt_hash_table
[hash
].chain
);
348 rcu_assign_pointer(dn_rt_hash_table
[hash
].chain
, rth
);
350 dst_use(&rth
->dst
, now
);
351 spin_unlock_bh(&dn_rt_hash_table
[hash
].lock
);
357 rthp
= &rth
->dst
.dn_next
;
360 rcu_assign_pointer(rt
->dst
.dn_next
, dn_rt_hash_table
[hash
].chain
);
361 rcu_assign_pointer(dn_rt_hash_table
[hash
].chain
, rt
);
363 dst_use(&rt
->dst
, now
);
364 spin_unlock_bh(&dn_rt_hash_table
[hash
].lock
);
369 static void dn_run_flush(unsigned long dummy
)
372 struct dn_route
*rt
, *next
;
374 for (i
= 0; i
< dn_rt_hash_mask
; i
++) {
375 spin_lock_bh(&dn_rt_hash_table
[i
].lock
);
377 if ((rt
= xchg((struct dn_route
**)&dn_rt_hash_table
[i
].chain
, NULL
)) == NULL
)
378 goto nothing_to_declare
;
380 for(; rt
; rt
= next
) {
381 next
= rcu_dereference_raw(rt
->dst
.dn_next
);
382 RCU_INIT_POINTER(rt
->dst
.dn_next
, NULL
);
383 dst_free((struct dst_entry
*)rt
);
387 spin_unlock_bh(&dn_rt_hash_table
[i
].lock
);
391 static DEFINE_SPINLOCK(dn_rt_flush_lock
);
393 void dn_rt_cache_flush(int delay
)
395 unsigned long now
= jiffies
;
396 int user_mode
= !in_interrupt();
399 delay
= dn_rt_min_delay
;
401 spin_lock_bh(&dn_rt_flush_lock
);
403 if (del_timer(&dn_rt_flush_timer
) && delay
> 0 && dn_rt_deadline
) {
404 long tmo
= (long)(dn_rt_deadline
- now
);
406 if (user_mode
&& tmo
< dn_rt_max_delay
- dn_rt_min_delay
)
414 spin_unlock_bh(&dn_rt_flush_lock
);
419 if (dn_rt_deadline
== 0)
420 dn_rt_deadline
= now
+ dn_rt_max_delay
;
422 dn_rt_flush_timer
.expires
= now
+ delay
;
423 add_timer(&dn_rt_flush_timer
);
424 spin_unlock_bh(&dn_rt_flush_lock
);
428 * dn_return_short - Return a short packet to its sender
429 * @skb: The packet to return
432 static int dn_return_short(struct sk_buff
*skb
)
434 struct dn_skb_cb
*cb
;
439 /* Add back headers */
440 skb_push(skb
, skb
->data
- skb_network_header(skb
));
442 if ((skb
= skb_unshare(skb
, GFP_ATOMIC
)) == NULL
)
446 /* Skip packet length and point to flags */
448 *ptr
++ = (cb
->rt_flags
& ~DN_RT_F_RQR
) | DN_RT_F_RTS
;
454 *ptr
= 0; /* Zero hop count */
458 skb
->pkt_type
= PACKET_OUTGOING
;
459 dn_rt_finish_output(skb
, NULL
, NULL
);
460 return NET_RX_SUCCESS
;
464 * dn_return_long - Return a long packet to its sender
465 * @skb: The long format packet to return
468 static int dn_return_long(struct sk_buff
*skb
)
470 struct dn_skb_cb
*cb
;
472 unsigned char *src_addr
, *dst_addr
;
473 unsigned char tmp
[ETH_ALEN
];
475 /* Add back all headers */
476 skb_push(skb
, skb
->data
- skb_network_header(skb
));
478 if ((skb
= skb_unshare(skb
, GFP_ATOMIC
)) == NULL
)
482 /* Ignore packet length and point to flags */
486 if (*ptr
& DN_RT_F_PF
) {
487 char padlen
= (*ptr
& ~DN_RT_F_PF
);
491 *ptr
++ = (cb
->rt_flags
& ~DN_RT_F_RQR
) | DN_RT_F_RTS
;
497 *ptr
= 0; /* Zero hop count */
499 /* Swap source and destination */
500 memcpy(tmp
, src_addr
, ETH_ALEN
);
501 memcpy(src_addr
, dst_addr
, ETH_ALEN
);
502 memcpy(dst_addr
, tmp
, ETH_ALEN
);
504 skb
->pkt_type
= PACKET_OUTGOING
;
505 dn_rt_finish_output(skb
, dst_addr
, src_addr
);
506 return NET_RX_SUCCESS
;
510 * dn_route_rx_packet - Try and find a route for an incoming packet
511 * @skb: The packet to find a route for
513 * Returns: result of input function if route is found, error code otherwise
515 static int dn_route_rx_packet(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
517 struct dn_skb_cb
*cb
;
520 if ((err
= dn_route_input(skb
)) == 0)
521 return dst_input(skb
);
524 if (decnet_debug_level
& 4) {
525 char *devname
= skb
->dev
? skb
->dev
->name
: "???";
528 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
529 (int)cb
->rt_flags
, devname
, skb
->len
,
530 le16_to_cpu(cb
->src
), le16_to_cpu(cb
->dst
),
534 if ((skb
->pkt_type
== PACKET_HOST
) && (cb
->rt_flags
& DN_RT_F_RQR
)) {
535 switch (cb
->rt_flags
& DN_RT_PKT_MSK
) {
536 case DN_RT_PKT_SHORT
:
537 return dn_return_short(skb
);
539 return dn_return_long(skb
);
547 static int dn_route_rx_long(struct sk_buff
*skb
)
549 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
550 unsigned char *ptr
= skb
->data
;
552 if (!pskb_may_pull(skb
, 21)) /* 20 for long header, 1 for shortest nsp */
556 skb_reset_transport_header(skb
);
558 /* Destination info */
560 cb
->dst
= dn_eth2dn(ptr
);
561 if (memcmp(ptr
, dn_hiord_addr
, 4) != 0)
568 cb
->src
= dn_eth2dn(ptr
);
569 if (memcmp(ptr
, dn_hiord_addr
, 4) != 0)
574 cb
->hops
= *ptr
++; /* Visit Count */
576 return NF_HOOK(NFPROTO_DECNET
, NF_DN_PRE_ROUTING
,
577 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
587 static int dn_route_rx_short(struct sk_buff
*skb
)
589 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
590 unsigned char *ptr
= skb
->data
;
592 if (!pskb_may_pull(skb
, 6)) /* 5 for short header + 1 for shortest nsp */
596 skb_reset_transport_header(skb
);
598 cb
->dst
= *(__le16
*)ptr
;
600 cb
->src
= *(__le16
*)ptr
;
602 cb
->hops
= *ptr
& 0x3f;
604 return NF_HOOK(NFPROTO_DECNET
, NF_DN_PRE_ROUTING
,
605 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
613 static int dn_route_discard(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
616 * I know we drop the packet here, but thats considered success in
620 return NET_RX_SUCCESS
;
623 static int dn_route_ptp_hello(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
626 dn_neigh_pointopoint_hello(skb
);
627 return NET_RX_SUCCESS
;
630 int dn_route_rcv(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
, struct net_device
*orig_dev
)
632 struct dn_skb_cb
*cb
;
633 unsigned char flags
= 0;
634 __u16 len
= le16_to_cpu(*(__le16
*)skb
->data
);
635 struct dn_dev
*dn
= rcu_dereference(dev
->dn_ptr
);
636 unsigned char padlen
= 0;
638 if (!net_eq(dev_net(dev
), &init_net
))
644 if ((skb
= skb_share_check(skb
, GFP_ATOMIC
)) == NULL
)
647 if (!pskb_may_pull(skb
, 3))
661 cb
->iif
= dev
->ifindex
;
664 * If we have padding, remove it.
666 if (flags
& DN_RT_F_PF
) {
667 padlen
= flags
& ~DN_RT_F_PF
;
668 if (!pskb_may_pull(skb
, padlen
+ 1))
670 skb_pull(skb
, padlen
);
674 skb_reset_network_header(skb
);
677 * Weed out future version DECnet
679 if (flags
& DN_RT_F_VER
)
682 cb
->rt_flags
= flags
;
684 if (decnet_debug_level
& 1)
686 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
687 (int)flags
, (dev
) ? dev
->name
: "???", len
, skb
->len
,
690 if (flags
& DN_RT_PKT_CNTL
) {
691 if (unlikely(skb_linearize(skb
)))
694 switch (flags
& DN_RT_CNTL_MSK
) {
696 dn_dev_init_pkt(skb
);
699 dn_dev_veri_pkt(skb
);
703 if (dn
->parms
.state
!= DN_DEV_S_RU
)
706 switch (flags
& DN_RT_CNTL_MSK
) {
708 return NF_HOOK(NFPROTO_DECNET
, NF_DN_HELLO
,
709 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
714 return NF_HOOK(NFPROTO_DECNET
, NF_DN_ROUTE
,
715 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
718 return NF_HOOK(NFPROTO_DECNET
, NF_DN_HELLO
,
719 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
720 dn_neigh_router_hello
);
723 return NF_HOOK(NFPROTO_DECNET
, NF_DN_HELLO
,
724 &init_net
, NULL
, skb
, skb
->dev
, NULL
,
725 dn_neigh_endnode_hello
);
728 if (dn
->parms
.state
!= DN_DEV_S_RU
)
731 skb_pull(skb
, 1); /* Pull flags */
733 switch (flags
& DN_RT_PKT_MSK
) {
735 return dn_route_rx_long(skb
);
736 case DN_RT_PKT_SHORT
:
737 return dn_route_rx_short(skb
);
747 static int dn_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
749 struct dst_entry
*dst
= skb_dst(skb
);
750 struct dn_route
*rt
= (struct dn_route
*)dst
;
751 struct net_device
*dev
= dst
->dev
;
752 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
761 cb
->src
= rt
->rt_saddr
;
762 cb
->dst
= rt
->rt_daddr
;
765 * Always set the Intra-Ethernet bit on all outgoing packets
766 * originated on this node. Only valid flag from upper layers
767 * is return-to-sender-requested. Set hop count to 0 too.
769 cb
->rt_flags
&= ~DN_RT_F_RQR
;
770 cb
->rt_flags
|= DN_RT_F_IE
;
773 return NF_HOOK(NFPROTO_DECNET
, NF_DN_LOCAL_OUT
,
774 &init_net
, sk
, skb
, NULL
, dev
,
778 net_dbg_ratelimited("dn_output: This should not happen\n");
785 static int dn_forward(struct sk_buff
*skb
)
787 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
788 struct dst_entry
*dst
= skb_dst(skb
);
789 struct dn_dev
*dn_db
= rcu_dereference(dst
->dev
->dn_ptr
);
792 struct net_device
*dev
= skb
->dev
;
794 if (skb
->pkt_type
!= PACKET_HOST
)
797 /* Ensure that we have enough space for headers */
798 rt
= (struct dn_route
*)skb_dst(skb
);
799 header_len
= dn_db
->use_long
? 21 : 6;
800 if (skb_cow(skb
, LL_RESERVED_SPACE(rt
->dst
.dev
)+header_len
))
804 * Hop count exceeded.
809 skb
->dev
= rt
->dst
.dev
;
812 * If packet goes out same interface it came in on, then set
813 * the Intra-Ethernet bit. This has no effect for short
814 * packets, so we don't need to test for them here.
816 cb
->rt_flags
&= ~DN_RT_F_IE
;
817 if (rt
->rt_flags
& RTCF_DOREDIRECT
)
818 cb
->rt_flags
|= DN_RT_F_IE
;
820 return NF_HOOK(NFPROTO_DECNET
, NF_DN_FORWARD
,
821 &init_net
, NULL
, skb
, dev
, skb
->dev
,
830 * Used to catch bugs. This should never normally get
833 static int dn_rt_bug_out(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
835 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
837 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
838 le16_to_cpu(cb
->src
), le16_to_cpu(cb
->dst
));
845 static int dn_rt_bug(struct sk_buff
*skb
)
847 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
849 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
850 le16_to_cpu(cb
->src
), le16_to_cpu(cb
->dst
));
857 static unsigned int dn_dst_default_advmss(const struct dst_entry
*dst
)
859 return dn_mss_from_pmtu(dst
->dev
, dst_mtu(dst
));
862 static unsigned int dn_dst_mtu(const struct dst_entry
*dst
)
864 unsigned int mtu
= dst_metric_raw(dst
, RTAX_MTU
);
866 return mtu
? : dst
->dev
->mtu
;
869 static struct neighbour
*dn_dst_neigh_lookup(const struct dst_entry
*dst
,
873 return __neigh_lookup_errno(&dn_neigh_table
, daddr
, dst
->dev
);
876 static int dn_rt_set_next_hop(struct dn_route
*rt
, struct dn_fib_res
*res
)
878 struct dn_fib_info
*fi
= res
->fi
;
879 struct net_device
*dev
= rt
->dst
.dev
;
880 unsigned int mss_metric
;
884 if (DN_FIB_RES_GW(*res
) &&
885 DN_FIB_RES_NH(*res
).nh_scope
== RT_SCOPE_LINK
)
886 rt
->rt_gateway
= DN_FIB_RES_GW(*res
);
887 dst_init_metrics(&rt
->dst
, fi
->fib_metrics
, true);
889 rt
->rt_type
= res
->type
;
891 if (dev
!= NULL
&& rt
->n
== NULL
) {
892 n
= __neigh_lookup_errno(&dn_neigh_table
, &rt
->rt_gateway
, dev
);
898 if (dst_metric(&rt
->dst
, RTAX_MTU
) > rt
->dst
.dev
->mtu
)
899 dst_metric_set(&rt
->dst
, RTAX_MTU
, rt
->dst
.dev
->mtu
);
900 mss_metric
= dst_metric_raw(&rt
->dst
, RTAX_ADVMSS
);
902 unsigned int mss
= dn_mss_from_pmtu(dev
, dst_mtu(&rt
->dst
));
903 if (mss_metric
> mss
)
904 dst_metric_set(&rt
->dst
, RTAX_ADVMSS
, mss
);
909 static inline int dn_match_addr(__le16 addr1
, __le16 addr2
)
911 __u16 tmp
= le16_to_cpu(addr1
) ^ le16_to_cpu(addr2
);
920 static __le16
dnet_select_source(const struct net_device
*dev
, __le16 daddr
, int scope
)
923 struct dn_dev
*dn_db
;
924 struct dn_ifaddr
*ifa
;
929 dn_db
= rcu_dereference(dev
->dn_ptr
);
930 for (ifa
= rcu_dereference(dn_db
->ifa_list
);
932 ifa
= rcu_dereference(ifa
->ifa_next
)) {
933 if (ifa
->ifa_scope
> scope
)
936 saddr
= ifa
->ifa_local
;
939 ret
= dn_match_addr(daddr
, ifa
->ifa_local
);
940 if (ret
> best_match
)
941 saddr
= ifa
->ifa_local
;
943 saddr
= ifa
->ifa_local
;
950 static inline __le16
__dn_fib_res_prefsrc(struct dn_fib_res
*res
)
952 return dnet_select_source(DN_FIB_RES_DEV(*res
), DN_FIB_RES_GW(*res
), res
->scope
);
955 static inline __le16
dn_fib_rules_map_destination(__le16 daddr
, struct dn_fib_res
*res
)
957 __le16 mask
= dnet_make_mask(res
->prefixlen
);
958 return (daddr
&~mask
)|res
->fi
->fib_nh
->nh_gw
;
961 static int dn_route_output_slow(struct dst_entry
**pprt
, const struct flowidn
*oldflp
, int try_hard
)
963 struct flowidn fld
= {
964 .daddr
= oldflp
->daddr
,
965 .saddr
= oldflp
->saddr
,
966 .flowidn_scope
= RT_SCOPE_UNIVERSE
,
967 .flowidn_mark
= oldflp
->flowidn_mark
,
968 .flowidn_iif
= LOOPBACK_IFINDEX
,
969 .flowidn_oif
= oldflp
->flowidn_oif
,
971 struct dn_route
*rt
= NULL
;
972 struct net_device
*dev_out
= NULL
, *dev
;
973 struct neighbour
*neigh
= NULL
;
975 unsigned int flags
= 0;
976 struct dn_fib_res res
= { .fi
= NULL
, .type
= RTN_UNICAST
};
981 if (decnet_debug_level
& 16)
983 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
984 " iif=%d oif=%d\n", le16_to_cpu(oldflp
->daddr
),
985 le16_to_cpu(oldflp
->saddr
),
986 oldflp
->flowidn_mark
, LOOPBACK_IFINDEX
,
987 oldflp
->flowidn_oif
);
989 /* If we have an output interface, verify its a DECnet device */
990 if (oldflp
->flowidn_oif
) {
991 dev_out
= dev_get_by_index(&init_net
, oldflp
->flowidn_oif
);
993 if (dev_out
&& dev_out
->dn_ptr
== NULL
) {
1001 /* If we have a source address, verify that its a local address */
1002 if (oldflp
->saddr
) {
1003 err
= -EADDRNOTAVAIL
;
1006 if (dn_dev_islocal(dev_out
, oldflp
->saddr
))
1012 for_each_netdev_rcu(&init_net
, dev
) {
1015 if (!dn_dev_islocal(dev
, oldflp
->saddr
))
1017 if ((dev
->flags
& IFF_LOOPBACK
) &&
1019 !dn_dev_islocal(dev
, oldflp
->daddr
))
1026 if (dev_out
== NULL
)
1033 /* No destination? Assume its local */
1035 fld
.daddr
= fld
.saddr
;
1040 dev_out
= init_net
.loopback_dev
;
1041 if (!dev_out
->dn_ptr
)
1043 err
= -EADDRNOTAVAIL
;
1047 fld
.saddr
= dnet_select_source(dev_out
, 0,
1052 fld
.flowidn_oif
= LOOPBACK_IFINDEX
;
1053 res
.type
= RTN_LOCAL
;
1057 if (decnet_debug_level
& 16)
1059 "dn_route_output_slow: initial checks complete."
1060 " dst=%04x src=%04x oif=%d try_hard=%d\n",
1061 le16_to_cpu(fld
.daddr
), le16_to_cpu(fld
.saddr
),
1062 fld
.flowidn_oif
, try_hard
);
1065 * N.B. If the kernel is compiled without router support then
1066 * dn_fib_lookup() will evaluate to non-zero so this if () block
1067 * will always be executed.
1070 if (try_hard
|| (err
= dn_fib_lookup(&fld
, &res
)) != 0) {
1071 struct dn_dev
*dn_db
;
1075 * Here the fallback is basically the standard algorithm for
1076 * routing in endnodes which is described in the DECnet routing
1079 * If we are not trying hard, look in neighbour cache.
1080 * The result is tested to ensure that if a specific output
1081 * device/source address was requested, then we honour that
1085 neigh
= neigh_lookup_nodev(&dn_neigh_table
, &init_net
, &fld
.daddr
);
1087 if ((oldflp
->flowidn_oif
&&
1088 (neigh
->dev
->ifindex
!= oldflp
->flowidn_oif
)) ||
1090 (!dn_dev_islocal(neigh
->dev
,
1092 neigh_release(neigh
);
1097 if (dn_dev_islocal(neigh
->dev
, fld
.daddr
)) {
1098 dev_out
= init_net
.loopback_dev
;
1099 res
.type
= RTN_LOCAL
;
1101 dev_out
= neigh
->dev
;
1109 /* Not there? Perhaps its a local address */
1110 if (dev_out
== NULL
)
1111 dev_out
= dn_dev_get_default();
1113 if (dev_out
== NULL
)
1115 dn_db
= rcu_dereference_raw(dev_out
->dn_ptr
);
1118 /* Possible improvement - check all devices for local addr */
1119 if (dn_dev_islocal(dev_out
, fld
.daddr
)) {
1121 dev_out
= init_net
.loopback_dev
;
1123 res
.type
= RTN_LOCAL
;
1126 /* Not local either.... try sending it to the default router */
1127 neigh
= neigh_clone(dn_db
->router
);
1128 BUG_ON(neigh
&& neigh
->dev
!= dev_out
);
1130 /* Ok then, we assume its directly connected and move on */
1133 gateway
= ((struct dn_neigh
*)neigh
)->addr
;
1135 gateway
= fld
.daddr
;
1136 if (fld
.saddr
== 0) {
1137 fld
.saddr
= dnet_select_source(dev_out
, gateway
,
1138 res
.type
== RTN_LOCAL
?
1141 if (fld
.saddr
== 0 && res
.type
!= RTN_LOCAL
)
1144 fld
.flowidn_oif
= dev_out
->ifindex
;
1149 if (res
.type
== RTN_NAT
)
1152 if (res
.type
== RTN_LOCAL
) {
1154 fld
.saddr
= fld
.daddr
;
1157 dev_out
= init_net
.loopback_dev
;
1159 if (!dev_out
->dn_ptr
)
1161 fld
.flowidn_oif
= dev_out
->ifindex
;
1163 dn_fib_info_put(res
.fi
);
1168 if (res
.fi
->fib_nhs
> 1 && fld
.flowidn_oif
== 0)
1169 dn_fib_select_multipath(&fld
, &res
);
1172 * We could add some logic to deal with default routes here and
1173 * get rid of some of the special casing above.
1177 fld
.saddr
= DN_FIB_RES_PREFSRC(res
);
1181 dev_out
= DN_FIB_RES_DEV(res
);
1183 fld
.flowidn_oif
= dev_out
->ifindex
;
1184 gateway
= DN_FIB_RES_GW(res
);
1187 if (dev_out
->flags
& IFF_LOOPBACK
)
1188 flags
|= RTCF_LOCAL
;
1190 rt
= dst_alloc(&dn_dst_ops
, dev_out
, 1, DST_OBSOLETE_NONE
, DST_HOST
);
1194 memset(&rt
->fld
, 0, sizeof(rt
->fld
));
1195 rt
->fld
.saddr
= oldflp
->saddr
;
1196 rt
->fld
.daddr
= oldflp
->daddr
;
1197 rt
->fld
.flowidn_oif
= oldflp
->flowidn_oif
;
1198 rt
->fld
.flowidn_iif
= 0;
1199 rt
->fld
.flowidn_mark
= oldflp
->flowidn_mark
;
1201 rt
->rt_saddr
= fld
.saddr
;
1202 rt
->rt_daddr
= fld
.daddr
;
1203 rt
->rt_gateway
= gateway
? gateway
: fld
.daddr
;
1204 rt
->rt_local_src
= fld
.saddr
;
1206 rt
->rt_dst_map
= fld
.daddr
;
1207 rt
->rt_src_map
= fld
.saddr
;
1212 rt
->dst
.lastuse
= jiffies
;
1213 rt
->dst
.output
= dn_output
;
1214 rt
->dst
.input
= dn_rt_bug
;
1215 rt
->rt_flags
= flags
;
1216 if (flags
& RTCF_LOCAL
)
1217 rt
->dst
.input
= dn_nsp_rx
;
1219 err
= dn_rt_set_next_hop(rt
, &res
);
1223 hash
= dn_hash(rt
->fld
.saddr
, rt
->fld
.daddr
);
1224 dn_insert_route(rt
, hash
, (struct dn_route
**)pprt
);
1228 neigh_release(neigh
);
1230 dn_fib_res_put(&res
);
1237 err
= -EADDRNOTAVAIL
;
1252 * N.B. The flags may be moved into the flowi at some future stage.
1254 static int __dn_route_output_key(struct dst_entry
**pprt
, const struct flowidn
*flp
, int flags
)
1256 unsigned int hash
= dn_hash(flp
->saddr
, flp
->daddr
);
1257 struct dn_route
*rt
= NULL
;
1259 if (!(flags
& MSG_TRYHARD
)) {
1261 for (rt
= rcu_dereference_bh(dn_rt_hash_table
[hash
].chain
); rt
;
1262 rt
= rcu_dereference_bh(rt
->dst
.dn_next
)) {
1263 if ((flp
->daddr
== rt
->fld
.daddr
) &&
1264 (flp
->saddr
== rt
->fld
.saddr
) &&
1265 (flp
->flowidn_mark
== rt
->fld
.flowidn_mark
) &&
1266 dn_is_output_route(rt
) &&
1267 (rt
->fld
.flowidn_oif
== flp
->flowidn_oif
)) {
1268 dst_use(&rt
->dst
, jiffies
);
1269 rcu_read_unlock_bh();
1274 rcu_read_unlock_bh();
1277 return dn_route_output_slow(pprt
, flp
, flags
);
1280 static int dn_route_output_key(struct dst_entry
**pprt
, struct flowidn
*flp
, int flags
)
1284 err
= __dn_route_output_key(pprt
, flp
, flags
);
1285 if (err
== 0 && flp
->flowidn_proto
) {
1286 *pprt
= xfrm_lookup(&init_net
, *pprt
,
1287 flowidn_to_flowi(flp
), NULL
, 0);
1288 if (IS_ERR(*pprt
)) {
1289 err
= PTR_ERR(*pprt
);
1296 int dn_route_output_sock(struct dst_entry __rcu
**pprt
, struct flowidn
*fl
, struct sock
*sk
, int flags
)
1300 err
= __dn_route_output_key(pprt
, fl
, flags
& MSG_TRYHARD
);
1301 if (err
== 0 && fl
->flowidn_proto
) {
1302 *pprt
= xfrm_lookup(&init_net
, *pprt
,
1303 flowidn_to_flowi(fl
), sk
, 0);
1304 if (IS_ERR(*pprt
)) {
1305 err
= PTR_ERR(*pprt
);
1312 static int dn_route_input_slow(struct sk_buff
*skb
)
1314 struct dn_route
*rt
= NULL
;
1315 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
1316 struct net_device
*in_dev
= skb
->dev
;
1317 struct net_device
*out_dev
= NULL
;
1318 struct dn_dev
*dn_db
;
1319 struct neighbour
*neigh
= NULL
;
1323 __le16 local_src
= 0;
1324 struct flowidn fld
= {
1327 .flowidn_scope
= RT_SCOPE_UNIVERSE
,
1328 .flowidn_mark
= skb
->mark
,
1329 .flowidn_iif
= skb
->dev
->ifindex
,
1331 struct dn_fib_res res
= { .fi
= NULL
, .type
= RTN_UNREACHABLE
};
1337 if ((dn_db
= rcu_dereference(in_dev
->dn_ptr
)) == NULL
)
1340 /* Zero source addresses are not allowed */
1345 * In this case we've just received a packet from a source
1346 * outside ourselves pretending to come from us. We don't
1347 * allow it any further to prevent routing loops, spoofing and
1348 * other nasties. Loopback packets already have the dst attached
1349 * so this only affects packets which have originated elsewhere.
1352 if (dn_dev_islocal(in_dev
, cb
->src
))
1355 err
= dn_fib_lookup(&fld
, &res
);
1360 * Is the destination us ?
1362 if (!dn_dev_islocal(in_dev
, cb
->dst
))
1365 res
.type
= RTN_LOCAL
;
1367 __le16 src_map
= fld
.saddr
;
1370 out_dev
= DN_FIB_RES_DEV(res
);
1371 if (out_dev
== NULL
) {
1372 net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
1378 src_map
= fld
.saddr
; /* no NAT support for now */
1380 gateway
= DN_FIB_RES_GW(res
);
1381 if (res
.type
== RTN_NAT
) {
1382 fld
.daddr
= dn_fib_rules_map_destination(fld
.daddr
, &res
);
1383 dn_fib_res_put(&res
);
1385 if (dn_fib_lookup(&fld
, &res
))
1388 if (res
.type
!= RTN_UNICAST
)
1391 gateway
= fld
.daddr
;
1393 fld
.saddr
= src_map
;
1399 * Forwarding check here, we only check for forwarding
1400 * being turned off, if you want to only forward intra
1401 * area, its up to you to set the routing tables up
1404 if (dn_db
->parms
.forwarding
== 0)
1407 if (res
.fi
->fib_nhs
> 1 && fld
.flowidn_oif
== 0)
1408 dn_fib_select_multipath(&fld
, &res
);
1411 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
1412 * flag as a hint to set the intra-ethernet bit when
1413 * forwarding. If we've got NAT in operation, we don't do
1414 * this optimisation.
1416 if (out_dev
== in_dev
&& !(flags
& RTCF_NAT
))
1417 flags
|= RTCF_DOREDIRECT
;
1419 local_src
= DN_FIB_RES_PREFSRC(res
);
1422 case RTN_UNREACHABLE
:
1425 flags
|= RTCF_LOCAL
;
1426 fld
.saddr
= cb
->dst
;
1427 fld
.daddr
= cb
->src
;
1429 /* Routing tables gave us a gateway */
1433 /* Packet was intra-ethernet, so we know its on-link */
1434 if (cb
->rt_flags
& DN_RT_F_IE
) {
1439 /* Use the default router if there is one */
1440 neigh
= neigh_clone(dn_db
->router
);
1442 gateway
= ((struct dn_neigh
*)neigh
)->addr
;
1446 /* Close eyes and pray */
1454 rt
= dst_alloc(&dn_dst_ops
, out_dev
, 0, DST_OBSOLETE_NONE
, DST_HOST
);
1458 memset(&rt
->fld
, 0, sizeof(rt
->fld
));
1459 rt
->rt_saddr
= fld
.saddr
;
1460 rt
->rt_daddr
= fld
.daddr
;
1461 rt
->rt_gateway
= fld
.daddr
;
1463 rt
->rt_gateway
= gateway
;
1464 rt
->rt_local_src
= local_src
? local_src
: rt
->rt_saddr
;
1466 rt
->rt_dst_map
= fld
.daddr
;
1467 rt
->rt_src_map
= fld
.saddr
;
1469 rt
->fld
.saddr
= cb
->src
;
1470 rt
->fld
.daddr
= cb
->dst
;
1471 rt
->fld
.flowidn_oif
= 0;
1472 rt
->fld
.flowidn_iif
= in_dev
->ifindex
;
1473 rt
->fld
.flowidn_mark
= fld
.flowidn_mark
;
1476 rt
->dst
.lastuse
= jiffies
;
1477 rt
->dst
.output
= dn_rt_bug_out
;
1480 rt
->dst
.input
= dn_forward
;
1483 rt
->dst
.output
= dn_output
;
1484 rt
->dst
.input
= dn_nsp_rx
;
1485 rt
->dst
.dev
= in_dev
;
1486 flags
|= RTCF_LOCAL
;
1489 case RTN_UNREACHABLE
:
1491 rt
->dst
.input
= dst_discard
;
1493 rt
->rt_flags
= flags
;
1495 err
= dn_rt_set_next_hop(rt
, &res
);
1499 hash
= dn_hash(rt
->fld
.saddr
, rt
->fld
.daddr
);
1500 dn_insert_route(rt
, hash
, &rt
);
1501 skb_dst_set(skb
, &rt
->dst
);
1505 neigh_release(neigh
);
1507 dn_fib_res_put(&res
);
1527 static int dn_route_input(struct sk_buff
*skb
)
1529 struct dn_route
*rt
;
1530 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
1531 unsigned int hash
= dn_hash(cb
->src
, cb
->dst
);
1537 for(rt
= rcu_dereference(dn_rt_hash_table
[hash
].chain
); rt
!= NULL
;
1538 rt
= rcu_dereference(rt
->dst
.dn_next
)) {
1539 if ((rt
->fld
.saddr
== cb
->src
) &&
1540 (rt
->fld
.daddr
== cb
->dst
) &&
1541 (rt
->fld
.flowidn_oif
== 0) &&
1542 (rt
->fld
.flowidn_mark
== skb
->mark
) &&
1543 (rt
->fld
.flowidn_iif
== cb
->iif
)) {
1544 dst_use(&rt
->dst
, jiffies
);
1546 skb_dst_set(skb
, (struct dst_entry
*)rt
);
1552 return dn_route_input_slow(skb
);
1555 static int dn_rt_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
,
1556 int event
, int nowait
, unsigned int flags
)
1558 struct dn_route
*rt
= (struct dn_route
*)skb_dst(skb
);
1560 struct nlmsghdr
*nlh
;
1563 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*r
), flags
);
1567 r
= nlmsg_data(nlh
);
1568 r
->rtm_family
= AF_DECnet
;
1569 r
->rtm_dst_len
= 16;
1572 r
->rtm_table
= RT_TABLE_MAIN
;
1573 r
->rtm_type
= rt
->rt_type
;
1574 r
->rtm_flags
= (rt
->rt_flags
& ~0xFFFF) | RTM_F_CLONED
;
1575 r
->rtm_scope
= RT_SCOPE_UNIVERSE
;
1576 r
->rtm_protocol
= RTPROT_UNSPEC
;
1578 if (rt
->rt_flags
& RTCF_NOTIFY
)
1579 r
->rtm_flags
|= RTM_F_NOTIFY
;
1581 if (nla_put_u32(skb
, RTA_TABLE
, RT_TABLE_MAIN
) < 0 ||
1582 nla_put_le16(skb
, RTA_DST
, rt
->rt_daddr
) < 0)
1585 if (rt
->fld
.saddr
) {
1586 r
->rtm_src_len
= 16;
1587 if (nla_put_le16(skb
, RTA_SRC
, rt
->fld
.saddr
) < 0)
1591 nla_put_u32(skb
, RTA_OIF
, rt
->dst
.dev
->ifindex
) < 0)
1595 * Note to self - change this if input routes reverse direction when
1596 * they deal only with inputs and not with replies like they do
1599 if (nla_put_le16(skb
, RTA_PREFSRC
, rt
->rt_local_src
) < 0)
1602 if (rt
->rt_daddr
!= rt
->rt_gateway
&&
1603 nla_put_le16(skb
, RTA_GATEWAY
, rt
->rt_gateway
) < 0)
1606 if (rtnetlink_put_metrics(skb
, dst_metrics_ptr(&rt
->dst
)) < 0)
1609 expires
= rt
->dst
.expires
? rt
->dst
.expires
- jiffies
: 0;
1610 if (rtnl_put_cacheinfo(skb
, &rt
->dst
, 0, expires
,
1614 if (dn_is_input_route(rt
) &&
1615 nla_put_u32(skb
, RTA_IIF
, rt
->fld
.flowidn_iif
) < 0)
1618 nlmsg_end(skb
, nlh
);
1622 nlmsg_cancel(skb
, nlh
);
1626 const struct nla_policy rtm_dn_policy
[RTA_MAX
+ 1] = {
1627 [RTA_DST
] = { .type
= NLA_U16
},
1628 [RTA_SRC
] = { .type
= NLA_U16
},
1629 [RTA_IIF
] = { .type
= NLA_U32
},
1630 [RTA_OIF
] = { .type
= NLA_U32
},
1631 [RTA_GATEWAY
] = { .type
= NLA_U16
},
1632 [RTA_PRIORITY
] = { .type
= NLA_U32
},
1633 [RTA_PREFSRC
] = { .type
= NLA_U16
},
1634 [RTA_METRICS
] = { .type
= NLA_NESTED
},
1635 [RTA_MULTIPATH
] = { .type
= NLA_NESTED
},
1636 [RTA_TABLE
] = { .type
= NLA_U32
},
1637 [RTA_MARK
] = { .type
= NLA_U32
},
1641 * This is called by both endnodes and routers now.
1643 static int dn_cache_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
)
1645 struct net
*net
= sock_net(in_skb
->sk
);
1646 struct rtmsg
*rtm
= nlmsg_data(nlh
);
1647 struct dn_route
*rt
= NULL
;
1648 struct dn_skb_cb
*cb
;
1650 struct sk_buff
*skb
;
1652 struct nlattr
*tb
[RTA_MAX
+1];
1654 if (!net_eq(net
, &init_net
))
1657 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_dn_policy
);
1661 memset(&fld
, 0, sizeof(fld
));
1662 fld
.flowidn_proto
= DNPROTO_NSP
;
1664 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1667 skb_reset_mac_header(skb
);
1668 cb
= DN_SKB_CB(skb
);
1671 fld
.saddr
= nla_get_le16(tb
[RTA_SRC
]);
1674 fld
.daddr
= nla_get_le16(tb
[RTA_DST
]);
1677 fld
.flowidn_iif
= nla_get_u32(tb
[RTA_IIF
]);
1679 if (fld
.flowidn_iif
) {
1680 struct net_device
*dev
;
1681 dev
= __dev_get_by_index(&init_net
, fld
.flowidn_iif
);
1682 if (!dev
|| !dev
->dn_ptr
) {
1686 skb
->protocol
= htons(ETH_P_DNA_RT
);
1688 cb
->src
= fld
.saddr
;
1689 cb
->dst
= fld
.daddr
;
1691 err
= dn_route_input(skb
);
1693 memset(cb
, 0, sizeof(struct dn_skb_cb
));
1694 rt
= (struct dn_route
*)skb_dst(skb
);
1695 if (!err
&& -rt
->dst
.error
)
1696 err
= rt
->dst
.error
;
1699 fld
.flowidn_oif
= nla_get_u32(tb
[RTA_OIF
]);
1701 err
= dn_route_output_key((struct dst_entry
**)&rt
, &fld
, 0);
1707 skb_dst_set(skb
, &rt
->dst
);
1708 if (rtm
->rtm_flags
& RTM_F_NOTIFY
)
1709 rt
->rt_flags
|= RTCF_NOTIFY
;
1711 err
= dn_rt_fill_info(skb
, NETLINK_CB(in_skb
).portid
, nlh
->nlmsg_seq
, RTM_NEWROUTE
, 0, 0);
1717 return rtnl_unicast(skb
, &init_net
, NETLINK_CB(in_skb
).portid
);
1725 * For routers, this is called from dn_fib_dump, but for endnodes its
1726 * called directly from the rtnetlink dispatch table.
1728 int dn_cache_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1730 struct net
*net
= sock_net(skb
->sk
);
1731 struct dn_route
*rt
;
1736 if (!net_eq(net
, &init_net
))
1739 if (nlmsg_len(cb
->nlh
) < sizeof(struct rtmsg
))
1742 rtm
= nlmsg_data(cb
->nlh
);
1743 if (!(rtm
->rtm_flags
& RTM_F_CLONED
))
1747 s_idx
= idx
= cb
->args
[1];
1748 for(h
= 0; h
<= dn_rt_hash_mask
; h
++) {
1754 for(rt
= rcu_dereference_bh(dn_rt_hash_table
[h
].chain
), idx
= 0;
1756 rt
= rcu_dereference_bh(rt
->dst
.dn_next
), idx
++) {
1759 skb_dst_set(skb
, dst_clone(&rt
->dst
));
1760 if (dn_rt_fill_info(skb
, NETLINK_CB(cb
->skb
).portid
,
1761 cb
->nlh
->nlmsg_seq
, RTM_NEWROUTE
,
1762 1, NLM_F_MULTI
) < 0) {
1764 rcu_read_unlock_bh();
1769 rcu_read_unlock_bh();
1778 #ifdef CONFIG_PROC_FS
1779 struct dn_rt_cache_iter_state
{
1783 static struct dn_route
*dn_rt_cache_get_first(struct seq_file
*seq
)
1785 struct dn_route
*rt
= NULL
;
1786 struct dn_rt_cache_iter_state
*s
= seq
->private;
1788 for(s
->bucket
= dn_rt_hash_mask
; s
->bucket
>= 0; --s
->bucket
) {
1790 rt
= rcu_dereference_bh(dn_rt_hash_table
[s
->bucket
].chain
);
1793 rcu_read_unlock_bh();
1798 static struct dn_route
*dn_rt_cache_get_next(struct seq_file
*seq
, struct dn_route
*rt
)
1800 struct dn_rt_cache_iter_state
*s
= seq
->private;
1802 rt
= rcu_dereference_bh(rt
->dst
.dn_next
);
1804 rcu_read_unlock_bh();
1805 if (--s
->bucket
< 0)
1808 rt
= rcu_dereference_bh(dn_rt_hash_table
[s
->bucket
].chain
);
1813 static void *dn_rt_cache_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1815 struct dn_route
*rt
= dn_rt_cache_get_first(seq
);
1818 while(*pos
&& (rt
= dn_rt_cache_get_next(seq
, rt
)))
1821 return *pos
? NULL
: rt
;
1824 static void *dn_rt_cache_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1826 struct dn_route
*rt
= dn_rt_cache_get_next(seq
, v
);
1831 static void dn_rt_cache_seq_stop(struct seq_file
*seq
, void *v
)
1834 rcu_read_unlock_bh();
1837 static int dn_rt_cache_seq_show(struct seq_file
*seq
, void *v
)
1839 struct dn_route
*rt
= v
;
1840 char buf1
[DN_ASCBUF_LEN
], buf2
[DN_ASCBUF_LEN
];
1842 seq_printf(seq
, "%-8s %-7s %-7s %04d %04d %04d\n",
1843 rt
->dst
.dev
? rt
->dst
.dev
->name
: "*",
1844 dn_addr2asc(le16_to_cpu(rt
->rt_daddr
), buf1
),
1845 dn_addr2asc(le16_to_cpu(rt
->rt_saddr
), buf2
),
1846 atomic_read(&rt
->dst
.__refcnt
),
1851 static const struct seq_operations dn_rt_cache_seq_ops
= {
1852 .start
= dn_rt_cache_seq_start
,
1853 .next
= dn_rt_cache_seq_next
,
1854 .stop
= dn_rt_cache_seq_stop
,
1855 .show
= dn_rt_cache_seq_show
,
1858 static int dn_rt_cache_seq_open(struct inode
*inode
, struct file
*file
)
1860 return seq_open_private(file
, &dn_rt_cache_seq_ops
,
1861 sizeof(struct dn_rt_cache_iter_state
));
1864 static const struct file_operations dn_rt_cache_seq_fops
= {
1865 .owner
= THIS_MODULE
,
1866 .open
= dn_rt_cache_seq_open
,
1868 .llseek
= seq_lseek
,
1869 .release
= seq_release_private
,
1872 #endif /* CONFIG_PROC_FS */
1874 void __init
dn_route_init(void)
1878 dn_dst_ops
.kmem_cachep
=
1879 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route
), 0,
1880 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
1881 dst_entries_init(&dn_dst_ops
);
1882 setup_timer(&dn_route_timer
, dn_dst_check_expire
, 0);
1883 dn_route_timer
.expires
= jiffies
+ decnet_dst_gc_interval
* HZ
;
1884 add_timer(&dn_route_timer
);
1886 goal
= totalram_pages
>> (26 - PAGE_SHIFT
);
1888 for(order
= 0; (1UL << order
) < goal
; order
++)
1892 * Only want 1024 entries max, since the table is very, very unlikely
1893 * to be larger than that.
1895 while(order
&& ((((1UL << order
) * PAGE_SIZE
) /
1896 sizeof(struct dn_rt_hash_bucket
)) >= 2048))
1900 dn_rt_hash_mask
= (1UL << order
) * PAGE_SIZE
/
1901 sizeof(struct dn_rt_hash_bucket
);
1902 while(dn_rt_hash_mask
& (dn_rt_hash_mask
- 1))
1904 dn_rt_hash_table
= (struct dn_rt_hash_bucket
*)
1905 __get_free_pages(GFP_ATOMIC
, order
);
1906 } while (dn_rt_hash_table
== NULL
&& --order
> 0);
1908 if (!dn_rt_hash_table
)
1909 panic("Failed to allocate DECnet route cache hash table\n");
1912 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
1914 (long)(dn_rt_hash_mask
*sizeof(struct dn_rt_hash_bucket
))/1024);
1917 for(i
= 0; i
<= dn_rt_hash_mask
; i
++) {
1918 spin_lock_init(&dn_rt_hash_table
[i
].lock
);
1919 dn_rt_hash_table
[i
].chain
= NULL
;
1922 dn_dst_ops
.gc_thresh
= (dn_rt_hash_mask
+ 1);
1924 proc_create("decnet_cache", S_IRUGO
, init_net
.proc_net
,
1925 &dn_rt_cache_seq_fops
);
1927 #ifdef CONFIG_DECNET_ROUTER
1928 rtnl_register(PF_DECnet
, RTM_GETROUTE
, dn_cache_getroute
,
1931 rtnl_register(PF_DECnet
, RTM_GETROUTE
, dn_cache_getroute
,
1932 dn_cache_dump
, NULL
);
1936 void __exit
dn_route_cleanup(void)
1938 del_timer(&dn_route_timer
);
1941 remove_proc_entry("decnet_cache", init_net
.proc_net
);
1942 dst_entries_destroy(&dn_dst_ops
);