Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / net / decnet / dn_route.c
blobef20b8e316698cd7c57c030ae8cac08190add427
1 /*
2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * DECnet Routing Functions (Endnode and Router)
8 * Authors: Steve Whitehouse <SteveW@ACM.org>
9 * Eduardo Marcelo Serrat <emserrat@geocities.com>
11 * Changes:
12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and
13 * "return-to-sender" bits on outgoing
14 * packets.
15 * Steve Whitehouse : Timeouts for cached routes.
16 * Steve Whitehouse : Use dst cache for input routes too.
17 * Steve Whitehouse : Fixed error values in dn_send_skb.
18 * Steve Whitehouse : Rework routing functions to better fit
19 * DECnet routing design
20 * Alexey Kuznetsov : New SMP locking
21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump()
22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting.
23 * Fixed possible skb leak in rtnetlink funcs.
24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
25 * Alexey Kuznetsov's finer grained locking
26 * from ipv4/route.c.
27 * Steve Whitehouse : Routing is now starting to look like a
28 * sensible set of code now, mainly due to
29 * my copying the IPv4 routing code. The
30 * hooks here are modified and will continue
31 * to evolve for a while.
32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter
33 * stuff. Look out raw sockets your days
34 * are numbered!
35 * Steve Whitehouse : Added return-to-sender functions. Added
36 * backlog congestion level return codes.
37 * Steve Whitehouse : Fixed bug where routes were set up with
38 * no ref count on net devices.
39 * Steve Whitehouse : RCU for the route cache
40 * Steve Whitehouse : Preparations for the flow cache
41 * Steve Whitehouse : Prepare for nonlinear skbs
44 /******************************************************************************
45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
47 This program is free software; you can redistribute it and/or modify
48 it under the terms of the GNU General Public License as published by
49 the Free Software Foundation; either version 2 of the License, or
50 any later version.
52 This program is distributed in the hope that it will be useful,
53 but WITHOUT ANY WARRANTY; without even the implied warranty of
54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 GNU General Public License for more details.
56 *******************************************************************************/
58 #include <linux/errno.h>
59 #include <linux/types.h>
60 #include <linux/socket.h>
61 #include <linux/in.h>
62 #include <linux/kernel.h>
63 #include <linux/sockios.h>
64 #include <linux/net.h>
65 #include <linux/netdevice.h>
66 #include <linux/inet.h>
67 #include <linux/route.h>
68 #include <linux/in_route.h>
69 #include <linux/slab.h>
70 #include <net/sock.h>
71 #include <linux/mm.h>
72 #include <linux/proc_fs.h>
73 #include <linux/seq_file.h>
74 #include <linux/init.h>
75 #include <linux/rtnetlink.h>
76 #include <linux/string.h>
77 #include <linux/netfilter_decnet.h>
78 #include <linux/rcupdate.h>
79 #include <linux/times.h>
80 #include <linux/export.h>
81 #include <asm/errno.h>
82 #include <net/net_namespace.h>
83 #include <net/netlink.h>
84 #include <net/neighbour.h>
85 #include <net/dst.h>
86 #include <net/flow.h>
87 #include <net/fib_rules.h>
88 #include <net/dn.h>
89 #include <net/dn_dev.h>
90 #include <net/dn_nsp.h>
91 #include <net/dn_route.h>
92 #include <net/dn_neigh.h>
93 #include <net/dn_fib.h>
95 struct dn_rt_hash_bucket
97 struct dn_route __rcu *chain;
98 spinlock_t lock;
101 extern struct neigh_table dn_neigh_table;
104 static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
106 static const int dn_rt_min_delay = 2 * HZ;
107 static const int dn_rt_max_delay = 10 * HZ;
108 static const int dn_rt_mtu_expires = 10 * 60 * HZ;
110 static unsigned long dn_rt_deadline;
112 static int dn_dst_gc(struct dst_ops *ops);
113 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
114 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
115 static unsigned int dn_dst_mtu(const struct dst_entry *dst);
116 static void dn_dst_destroy(struct dst_entry *);
117 static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
118 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
119 static void dn_dst_link_failure(struct sk_buff *);
120 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
121 struct sk_buff *skb , u32 mtu);
122 static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
123 struct sk_buff *skb);
124 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
125 struct sk_buff *skb,
126 const void *daddr);
127 static int dn_route_input(struct sk_buff *);
128 static void dn_run_flush(struct timer_list *unused);
130 static struct dn_rt_hash_bucket *dn_rt_hash_table;
131 static unsigned int dn_rt_hash_mask;
133 static struct timer_list dn_route_timer;
134 static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush);
135 int decnet_dst_gc_interval = 2;
137 static struct dst_ops dn_dst_ops = {
138 .family = PF_DECnet,
139 .gc_thresh = 128,
140 .gc = dn_dst_gc,
141 .check = dn_dst_check,
142 .default_advmss = dn_dst_default_advmss,
143 .mtu = dn_dst_mtu,
144 .cow_metrics = dst_cow_metrics_generic,
145 .destroy = dn_dst_destroy,
146 .ifdown = dn_dst_ifdown,
147 .negative_advice = dn_dst_negative_advice,
148 .link_failure = dn_dst_link_failure,
149 .update_pmtu = dn_dst_update_pmtu,
150 .redirect = dn_dst_redirect,
151 .neigh_lookup = dn_dst_neigh_lookup,
154 static void dn_dst_destroy(struct dst_entry *dst)
156 struct dn_route *rt = (struct dn_route *) dst;
158 if (rt->n)
159 neigh_release(rt->n);
160 dst_destroy_metrics_generic(dst);
163 static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how)
165 if (how) {
166 struct dn_route *rt = (struct dn_route *) dst;
167 struct neighbour *n = rt->n;
169 if (n && n->dev == dev) {
170 n->dev = dev_net(dev)->loopback_dev;
171 dev_hold(n->dev);
172 dev_put(dev);
177 static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
179 __u16 tmp = (__u16 __force)(src ^ dst);
180 tmp ^= (tmp >> 3);
181 tmp ^= (tmp >> 5);
182 tmp ^= (tmp >> 10);
183 return dn_rt_hash_mask & (unsigned int)tmp;
186 static void dn_dst_check_expire(struct timer_list *unused)
188 int i;
189 struct dn_route *rt;
190 struct dn_route __rcu **rtp;
191 unsigned long now = jiffies;
192 unsigned long expire = 120 * HZ;
194 for (i = 0; i <= dn_rt_hash_mask; i++) {
195 rtp = &dn_rt_hash_table[i].chain;
197 spin_lock(&dn_rt_hash_table[i].lock);
198 while ((rt = rcu_dereference_protected(*rtp,
199 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
200 if (atomic_read(&rt->dst.__refcnt) > 1 ||
201 (now - rt->dst.lastuse) < expire) {
202 rtp = &rt->dn_next;
203 continue;
205 *rtp = rt->dn_next;
206 rt->dn_next = NULL;
207 dst_dev_put(&rt->dst);
208 dst_release(&rt->dst);
210 spin_unlock(&dn_rt_hash_table[i].lock);
212 if ((jiffies - now) > 0)
213 break;
216 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ);
219 static int dn_dst_gc(struct dst_ops *ops)
221 struct dn_route *rt;
222 struct dn_route __rcu **rtp;
223 int i;
224 unsigned long now = jiffies;
225 unsigned long expire = 10 * HZ;
227 for (i = 0; i <= dn_rt_hash_mask; i++) {
229 spin_lock_bh(&dn_rt_hash_table[i].lock);
230 rtp = &dn_rt_hash_table[i].chain;
232 while ((rt = rcu_dereference_protected(*rtp,
233 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
234 if (atomic_read(&rt->dst.__refcnt) > 1 ||
235 (now - rt->dst.lastuse) < expire) {
236 rtp = &rt->dn_next;
237 continue;
239 *rtp = rt->dn_next;
240 rt->dn_next = NULL;
241 dst_dev_put(&rt->dst);
242 dst_release(&rt->dst);
243 break;
245 spin_unlock_bh(&dn_rt_hash_table[i].lock);
248 return 0;
252 * The decnet standards don't impose a particular minimum mtu, what they
253 * do insist on is that the routing layer accepts a datagram of at least
254 * 230 bytes long. Here we have to subtract the routing header length from
255 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we
256 * assume the worst and use a long header size.
258 * We update both the mtu and the advertised mss (i.e. the segment size we
259 * advertise to the other end).
261 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
262 struct sk_buff *skb, u32 mtu)
264 struct dn_route *rt = (struct dn_route *) dst;
265 struct neighbour *n = rt->n;
266 u32 min_mtu = 230;
267 struct dn_dev *dn;
269 dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL;
271 if (dn && dn->use_long == 0)
272 min_mtu -= 6;
273 else
274 min_mtu -= 21;
276 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) {
277 if (!(dst_metric_locked(dst, RTAX_MTU))) {
278 dst_metric_set(dst, RTAX_MTU, mtu);
279 dst_set_expires(dst, dn_rt_mtu_expires);
281 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
282 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
283 u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS);
284 if (!existing_mss || existing_mss > mss)
285 dst_metric_set(dst, RTAX_ADVMSS, mss);
290 static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
291 struct sk_buff *skb)
296 * When a route has been marked obsolete. (e.g. routing cache flush)
298 static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
300 return NULL;
303 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst)
305 dst_release(dst);
306 return NULL;
309 static void dn_dst_link_failure(struct sk_buff *skb)
313 static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2)
315 return ((fl1->daddr ^ fl2->daddr) |
316 (fl1->saddr ^ fl2->saddr) |
317 (fl1->flowidn_mark ^ fl2->flowidn_mark) |
318 (fl1->flowidn_scope ^ fl2->flowidn_scope) |
319 (fl1->flowidn_oif ^ fl2->flowidn_oif) |
320 (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
323 static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
325 struct dn_route *rth;
326 struct dn_route __rcu **rthp;
327 unsigned long now = jiffies;
329 rthp = &dn_rt_hash_table[hash].chain;
331 spin_lock_bh(&dn_rt_hash_table[hash].lock);
332 while ((rth = rcu_dereference_protected(*rthp,
333 lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
334 if (compare_keys(&rth->fld, &rt->fld)) {
335 /* Put it first */
336 *rthp = rth->dn_next;
337 rcu_assign_pointer(rth->dn_next,
338 dn_rt_hash_table[hash].chain);
339 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
341 dst_hold_and_use(&rth->dst, now);
342 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
344 dst_release_immediate(&rt->dst);
345 *rp = rth;
346 return 0;
348 rthp = &rth->dn_next;
351 rcu_assign_pointer(rt->dn_next, dn_rt_hash_table[hash].chain);
352 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
354 dst_hold_and_use(&rt->dst, now);
355 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
356 *rp = rt;
357 return 0;
360 static void dn_run_flush(struct timer_list *unused)
362 int i;
363 struct dn_route *rt, *next;
365 for (i = 0; i < dn_rt_hash_mask; i++) {
366 spin_lock_bh(&dn_rt_hash_table[i].lock);
368 if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
369 goto nothing_to_declare;
371 for(; rt; rt = next) {
372 next = rcu_dereference_raw(rt->dn_next);
373 RCU_INIT_POINTER(rt->dn_next, NULL);
374 dst_dev_put(&rt->dst);
375 dst_release(&rt->dst);
378 nothing_to_declare:
379 spin_unlock_bh(&dn_rt_hash_table[i].lock);
383 static DEFINE_SPINLOCK(dn_rt_flush_lock);
385 void dn_rt_cache_flush(int delay)
387 unsigned long now = jiffies;
388 int user_mode = !in_interrupt();
390 if (delay < 0)
391 delay = dn_rt_min_delay;
393 spin_lock_bh(&dn_rt_flush_lock);
395 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) {
396 long tmo = (long)(dn_rt_deadline - now);
398 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay)
399 tmo = 0;
401 if (delay > tmo)
402 delay = tmo;
405 if (delay <= 0) {
406 spin_unlock_bh(&dn_rt_flush_lock);
407 dn_run_flush(0);
408 return;
411 if (dn_rt_deadline == 0)
412 dn_rt_deadline = now + dn_rt_max_delay;
414 dn_rt_flush_timer.expires = now + delay;
415 add_timer(&dn_rt_flush_timer);
416 spin_unlock_bh(&dn_rt_flush_lock);
420 * dn_return_short - Return a short packet to its sender
421 * @skb: The packet to return
424 static int dn_return_short(struct sk_buff *skb)
426 struct dn_skb_cb *cb;
427 unsigned char *ptr;
428 __le16 *src;
429 __le16 *dst;
431 /* Add back headers */
432 skb_push(skb, skb->data - skb_network_header(skb));
434 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
435 return NET_RX_DROP;
437 cb = DN_SKB_CB(skb);
438 /* Skip packet length and point to flags */
439 ptr = skb->data + 2;
440 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
442 dst = (__le16 *)ptr;
443 ptr += 2;
444 src = (__le16 *)ptr;
445 ptr += 2;
446 *ptr = 0; /* Zero hop count */
448 swap(*src, *dst);
450 skb->pkt_type = PACKET_OUTGOING;
451 dn_rt_finish_output(skb, NULL, NULL);
452 return NET_RX_SUCCESS;
456 * dn_return_long - Return a long packet to its sender
457 * @skb: The long format packet to return
460 static int dn_return_long(struct sk_buff *skb)
462 struct dn_skb_cb *cb;
463 unsigned char *ptr;
464 unsigned char *src_addr, *dst_addr;
465 unsigned char tmp[ETH_ALEN];
467 /* Add back all headers */
468 skb_push(skb, skb->data - skb_network_header(skb));
470 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
471 return NET_RX_DROP;
473 cb = DN_SKB_CB(skb);
474 /* Ignore packet length and point to flags */
475 ptr = skb->data + 2;
477 /* Skip padding */
478 if (*ptr & DN_RT_F_PF) {
479 char padlen = (*ptr & ~DN_RT_F_PF);
480 ptr += padlen;
483 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
484 ptr += 2;
485 dst_addr = ptr;
486 ptr += 8;
487 src_addr = ptr;
488 ptr += 6;
489 *ptr = 0; /* Zero hop count */
491 /* Swap source and destination */
492 memcpy(tmp, src_addr, ETH_ALEN);
493 memcpy(src_addr, dst_addr, ETH_ALEN);
494 memcpy(dst_addr, tmp, ETH_ALEN);
496 skb->pkt_type = PACKET_OUTGOING;
497 dn_rt_finish_output(skb, dst_addr, src_addr);
498 return NET_RX_SUCCESS;
502 * dn_route_rx_packet - Try and find a route for an incoming packet
503 * @skb: The packet to find a route for
505 * Returns: result of input function if route is found, error code otherwise
507 static int dn_route_rx_packet(struct net *net, struct sock *sk, struct sk_buff *skb)
509 struct dn_skb_cb *cb;
510 int err;
512 if ((err = dn_route_input(skb)) == 0)
513 return dst_input(skb);
515 cb = DN_SKB_CB(skb);
516 if (decnet_debug_level & 4) {
517 char *devname = skb->dev ? skb->dev->name : "???";
519 printk(KERN_DEBUG
520 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
521 (int)cb->rt_flags, devname, skb->len,
522 le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
523 err, skb->pkt_type);
526 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
527 switch (cb->rt_flags & DN_RT_PKT_MSK) {
528 case DN_RT_PKT_SHORT:
529 return dn_return_short(skb);
530 case DN_RT_PKT_LONG:
531 return dn_return_long(skb);
535 kfree_skb(skb);
536 return NET_RX_DROP;
539 static int dn_route_rx_long(struct sk_buff *skb)
541 struct dn_skb_cb *cb = DN_SKB_CB(skb);
542 unsigned char *ptr = skb->data;
544 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */
545 goto drop_it;
547 skb_pull(skb, 20);
548 skb_reset_transport_header(skb);
550 /* Destination info */
551 ptr += 2;
552 cb->dst = dn_eth2dn(ptr);
553 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
554 goto drop_it;
555 ptr += 6;
558 /* Source info */
559 ptr += 2;
560 cb->src = dn_eth2dn(ptr);
561 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
562 goto drop_it;
563 ptr += 6;
564 /* Other junk */
565 ptr++;
566 cb->hops = *ptr++; /* Visit Count */
568 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING,
569 &init_net, NULL, skb, skb->dev, NULL,
570 dn_route_rx_packet);
572 drop_it:
573 kfree_skb(skb);
574 return NET_RX_DROP;
579 static int dn_route_rx_short(struct sk_buff *skb)
581 struct dn_skb_cb *cb = DN_SKB_CB(skb);
582 unsigned char *ptr = skb->data;
584 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */
585 goto drop_it;
587 skb_pull(skb, 5);
588 skb_reset_transport_header(skb);
590 cb->dst = *(__le16 *)ptr;
591 ptr += 2;
592 cb->src = *(__le16 *)ptr;
593 ptr += 2;
594 cb->hops = *ptr & 0x3f;
596 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING,
597 &init_net, NULL, skb, skb->dev, NULL,
598 dn_route_rx_packet);
600 drop_it:
601 kfree_skb(skb);
602 return NET_RX_DROP;
605 static int dn_route_discard(struct net *net, struct sock *sk, struct sk_buff *skb)
608 * I know we drop the packet here, but thats considered success in
609 * this case
611 kfree_skb(skb);
612 return NET_RX_SUCCESS;
615 static int dn_route_ptp_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
617 dn_dev_hello(skb);
618 dn_neigh_pointopoint_hello(skb);
619 return NET_RX_SUCCESS;
622 int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
624 struct dn_skb_cb *cb;
625 unsigned char flags = 0;
626 __u16 len = le16_to_cpu(*(__le16 *)skb->data);
627 struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
628 unsigned char padlen = 0;
630 if (!net_eq(dev_net(dev), &init_net))
631 goto dump_it;
633 if (dn == NULL)
634 goto dump_it;
636 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
637 goto out;
639 if (!pskb_may_pull(skb, 3))
640 goto dump_it;
642 skb_pull(skb, 2);
644 if (len > skb->len)
645 goto dump_it;
647 skb_trim(skb, len);
649 flags = *skb->data;
651 cb = DN_SKB_CB(skb);
652 cb->stamp = jiffies;
653 cb->iif = dev->ifindex;
656 * If we have padding, remove it.
658 if (flags & DN_RT_F_PF) {
659 padlen = flags & ~DN_RT_F_PF;
660 if (!pskb_may_pull(skb, padlen + 1))
661 goto dump_it;
662 skb_pull(skb, padlen);
663 flags = *skb->data;
666 skb_reset_network_header(skb);
669 * Weed out future version DECnet
671 if (flags & DN_RT_F_VER)
672 goto dump_it;
674 cb->rt_flags = flags;
676 if (decnet_debug_level & 1)
677 printk(KERN_DEBUG
678 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
679 (int)flags, (dev) ? dev->name : "???", len, skb->len,
680 padlen);
682 if (flags & DN_RT_PKT_CNTL) {
683 if (unlikely(skb_linearize(skb)))
684 goto dump_it;
686 switch (flags & DN_RT_CNTL_MSK) {
687 case DN_RT_PKT_INIT:
688 dn_dev_init_pkt(skb);
689 break;
690 case DN_RT_PKT_VERI:
691 dn_dev_veri_pkt(skb);
692 break;
695 if (dn->parms.state != DN_DEV_S_RU)
696 goto dump_it;
698 switch (flags & DN_RT_CNTL_MSK) {
699 case DN_RT_PKT_HELO:
700 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
701 &init_net, NULL, skb, skb->dev, NULL,
702 dn_route_ptp_hello);
704 case DN_RT_PKT_L1RT:
705 case DN_RT_PKT_L2RT:
706 return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
707 &init_net, NULL, skb, skb->dev, NULL,
708 dn_route_discard);
709 case DN_RT_PKT_ERTH:
710 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
711 &init_net, NULL, skb, skb->dev, NULL,
712 dn_neigh_router_hello);
714 case DN_RT_PKT_EEDH:
715 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
716 &init_net, NULL, skb, skb->dev, NULL,
717 dn_neigh_endnode_hello);
719 } else {
720 if (dn->parms.state != DN_DEV_S_RU)
721 goto dump_it;
723 skb_pull(skb, 1); /* Pull flags */
725 switch (flags & DN_RT_PKT_MSK) {
726 case DN_RT_PKT_LONG:
727 return dn_route_rx_long(skb);
728 case DN_RT_PKT_SHORT:
729 return dn_route_rx_short(skb);
733 dump_it:
734 kfree_skb(skb);
735 out:
736 return NET_RX_DROP;
739 static int dn_output(struct net *net, struct sock *sk, struct sk_buff *skb)
741 struct dst_entry *dst = skb_dst(skb);
742 struct dn_route *rt = (struct dn_route *)dst;
743 struct net_device *dev = dst->dev;
744 struct dn_skb_cb *cb = DN_SKB_CB(skb);
746 int err = -EINVAL;
748 if (rt->n == NULL)
749 goto error;
751 skb->dev = dev;
753 cb->src = rt->rt_saddr;
754 cb->dst = rt->rt_daddr;
757 * Always set the Intra-Ethernet bit on all outgoing packets
758 * originated on this node. Only valid flag from upper layers
759 * is return-to-sender-requested. Set hop count to 0 too.
761 cb->rt_flags &= ~DN_RT_F_RQR;
762 cb->rt_flags |= DN_RT_F_IE;
763 cb->hops = 0;
765 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT,
766 &init_net, sk, skb, NULL, dev,
767 dn_to_neigh_output);
769 error:
770 net_dbg_ratelimited("dn_output: This should not happen\n");
772 kfree_skb(skb);
774 return err;
777 static int dn_forward(struct sk_buff *skb)
779 struct dn_skb_cb *cb = DN_SKB_CB(skb);
780 struct dst_entry *dst = skb_dst(skb);
781 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
782 struct dn_route *rt;
783 int header_len;
784 struct net_device *dev = skb->dev;
786 if (skb->pkt_type != PACKET_HOST)
787 goto drop;
789 /* Ensure that we have enough space for headers */
790 rt = (struct dn_route *)skb_dst(skb);
791 header_len = dn_db->use_long ? 21 : 6;
792 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
793 goto drop;
796 * Hop count exceeded.
798 if (++cb->hops > 30)
799 goto drop;
801 skb->dev = rt->dst.dev;
804 * If packet goes out same interface it came in on, then set
805 * the Intra-Ethernet bit. This has no effect for short
806 * packets, so we don't need to test for them here.
808 cb->rt_flags &= ~DN_RT_F_IE;
809 if (rt->rt_flags & RTCF_DOREDIRECT)
810 cb->rt_flags |= DN_RT_F_IE;
812 return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD,
813 &init_net, NULL, skb, dev, skb->dev,
814 dn_to_neigh_output);
816 drop:
817 kfree_skb(skb);
818 return NET_RX_DROP;
822 * Used to catch bugs. This should never normally get
823 * called.
825 static int dn_rt_bug_out(struct net *net, struct sock *sk, struct sk_buff *skb)
827 struct dn_skb_cb *cb = DN_SKB_CB(skb);
829 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
830 le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
832 kfree_skb(skb);
834 return NET_RX_DROP;
837 static int dn_rt_bug(struct sk_buff *skb)
839 struct dn_skb_cb *cb = DN_SKB_CB(skb);
841 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
842 le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
844 kfree_skb(skb);
846 return NET_RX_DROP;
849 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
851 return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
854 static unsigned int dn_dst_mtu(const struct dst_entry *dst)
856 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
858 return mtu ? : dst->dev->mtu;
861 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
862 struct sk_buff *skb,
863 const void *daddr)
865 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
868 static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
870 struct dn_fib_info *fi = res->fi;
871 struct net_device *dev = rt->dst.dev;
872 unsigned int mss_metric;
873 struct neighbour *n;
875 if (fi) {
876 if (DN_FIB_RES_GW(*res) &&
877 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
878 rt->rt_gateway = DN_FIB_RES_GW(*res);
879 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
881 rt->rt_type = res->type;
883 if (dev != NULL && rt->n == NULL) {
884 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
885 if (IS_ERR(n))
886 return PTR_ERR(n);
887 rt->n = n;
890 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
891 dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
892 mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
893 if (mss_metric) {
894 unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
895 if (mss_metric > mss)
896 dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
898 return 0;
901 static inline int dn_match_addr(__le16 addr1, __le16 addr2)
903 __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2);
904 int match = 16;
905 while(tmp) {
906 tmp >>= 1;
907 match--;
909 return match;
912 static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
914 __le16 saddr = 0;
915 struct dn_dev *dn_db;
916 struct dn_ifaddr *ifa;
917 int best_match = 0;
918 int ret;
920 rcu_read_lock();
921 dn_db = rcu_dereference(dev->dn_ptr);
922 for (ifa = rcu_dereference(dn_db->ifa_list);
923 ifa != NULL;
924 ifa = rcu_dereference(ifa->ifa_next)) {
925 if (ifa->ifa_scope > scope)
926 continue;
927 if (!daddr) {
928 saddr = ifa->ifa_local;
929 break;
931 ret = dn_match_addr(daddr, ifa->ifa_local);
932 if (ret > best_match)
933 saddr = ifa->ifa_local;
934 if (best_match == 0)
935 saddr = ifa->ifa_local;
937 rcu_read_unlock();
939 return saddr;
942 static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res)
944 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope);
947 static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res)
949 __le16 mask = dnet_make_mask(res->prefixlen);
950 return (daddr&~mask)|res->fi->fib_nh->nh_gw;
953 static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *oldflp, int try_hard)
955 struct flowidn fld = {
956 .daddr = oldflp->daddr,
957 .saddr = oldflp->saddr,
958 .flowidn_scope = RT_SCOPE_UNIVERSE,
959 .flowidn_mark = oldflp->flowidn_mark,
960 .flowidn_iif = LOOPBACK_IFINDEX,
961 .flowidn_oif = oldflp->flowidn_oif,
963 struct dn_route *rt = NULL;
964 struct net_device *dev_out = NULL, *dev;
965 struct neighbour *neigh = NULL;
966 unsigned int hash;
967 unsigned int flags = 0;
968 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
969 int err;
970 int free_res = 0;
971 __le16 gateway = 0;
973 if (decnet_debug_level & 16)
974 printk(KERN_DEBUG
975 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
976 " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
977 le16_to_cpu(oldflp->saddr),
978 oldflp->flowidn_mark, LOOPBACK_IFINDEX,
979 oldflp->flowidn_oif);
981 /* If we have an output interface, verify its a DECnet device */
982 if (oldflp->flowidn_oif) {
983 dev_out = dev_get_by_index(&init_net, oldflp->flowidn_oif);
984 err = -ENODEV;
985 if (dev_out && dev_out->dn_ptr == NULL) {
986 dev_put(dev_out);
987 dev_out = NULL;
989 if (dev_out == NULL)
990 goto out;
993 /* If we have a source address, verify that its a local address */
994 if (oldflp->saddr) {
995 err = -EADDRNOTAVAIL;
997 if (dev_out) {
998 if (dn_dev_islocal(dev_out, oldflp->saddr))
999 goto source_ok;
1000 dev_put(dev_out);
1001 goto out;
1003 rcu_read_lock();
1004 for_each_netdev_rcu(&init_net, dev) {
1005 if (!dev->dn_ptr)
1006 continue;
1007 if (!dn_dev_islocal(dev, oldflp->saddr))
1008 continue;
1009 if ((dev->flags & IFF_LOOPBACK) &&
1010 oldflp->daddr &&
1011 !dn_dev_islocal(dev, oldflp->daddr))
1012 continue;
1014 dev_out = dev;
1015 break;
1017 rcu_read_unlock();
1018 if (dev_out == NULL)
1019 goto out;
1020 dev_hold(dev_out);
1021 source_ok:
1025 /* No destination? Assume its local */
1026 if (!fld.daddr) {
1027 fld.daddr = fld.saddr;
1029 if (dev_out)
1030 dev_put(dev_out);
1031 err = -EINVAL;
1032 dev_out = init_net.loopback_dev;
1033 if (!dev_out->dn_ptr)
1034 goto out;
1035 err = -EADDRNOTAVAIL;
1036 dev_hold(dev_out);
1037 if (!fld.daddr) {
1038 fld.daddr =
1039 fld.saddr = dnet_select_source(dev_out, 0,
1040 RT_SCOPE_HOST);
1041 if (!fld.daddr)
1042 goto out;
1044 fld.flowidn_oif = LOOPBACK_IFINDEX;
1045 res.type = RTN_LOCAL;
1046 goto make_route;
1049 if (decnet_debug_level & 16)
1050 printk(KERN_DEBUG
1051 "dn_route_output_slow: initial checks complete."
1052 " dst=%04x src=%04x oif=%d try_hard=%d\n",
1053 le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
1054 fld.flowidn_oif, try_hard);
1057 * N.B. If the kernel is compiled without router support then
1058 * dn_fib_lookup() will evaluate to non-zero so this if () block
1059 * will always be executed.
1061 err = -ESRCH;
1062 if (try_hard || (err = dn_fib_lookup(&fld, &res)) != 0) {
1063 struct dn_dev *dn_db;
1064 if (err != -ESRCH)
1065 goto out;
1067 * Here the fallback is basically the standard algorithm for
1068 * routing in endnodes which is described in the DECnet routing
1069 * docs
1071 * If we are not trying hard, look in neighbour cache.
1072 * The result is tested to ensure that if a specific output
1073 * device/source address was requested, then we honour that
1074 * here
1076 if (!try_hard) {
1077 neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fld.daddr);
1078 if (neigh) {
1079 if ((oldflp->flowidn_oif &&
1080 (neigh->dev->ifindex != oldflp->flowidn_oif)) ||
1081 (oldflp->saddr &&
1082 (!dn_dev_islocal(neigh->dev,
1083 oldflp->saddr)))) {
1084 neigh_release(neigh);
1085 neigh = NULL;
1086 } else {
1087 if (dev_out)
1088 dev_put(dev_out);
1089 if (dn_dev_islocal(neigh->dev, fld.daddr)) {
1090 dev_out = init_net.loopback_dev;
1091 res.type = RTN_LOCAL;
1092 } else {
1093 dev_out = neigh->dev;
1095 dev_hold(dev_out);
1096 goto select_source;
1101 /* Not there? Perhaps its a local address */
1102 if (dev_out == NULL)
1103 dev_out = dn_dev_get_default();
1104 err = -ENODEV;
1105 if (dev_out == NULL)
1106 goto out;
1107 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
1108 if (!dn_db)
1109 goto e_inval;
1110 /* Possible improvement - check all devices for local addr */
1111 if (dn_dev_islocal(dev_out, fld.daddr)) {
1112 dev_put(dev_out);
1113 dev_out = init_net.loopback_dev;
1114 dev_hold(dev_out);
1115 res.type = RTN_LOCAL;
1116 goto select_source;
1118 /* Not local either.... try sending it to the default router */
1119 neigh = neigh_clone(dn_db->router);
1120 BUG_ON(neigh && neigh->dev != dev_out);
1122 /* Ok then, we assume its directly connected and move on */
1123 select_source:
1124 if (neigh)
1125 gateway = ((struct dn_neigh *)neigh)->addr;
1126 if (gateway == 0)
1127 gateway = fld.daddr;
1128 if (fld.saddr == 0) {
1129 fld.saddr = dnet_select_source(dev_out, gateway,
1130 res.type == RTN_LOCAL ?
1131 RT_SCOPE_HOST :
1132 RT_SCOPE_LINK);
1133 if (fld.saddr == 0 && res.type != RTN_LOCAL)
1134 goto e_addr;
1136 fld.flowidn_oif = dev_out->ifindex;
1137 goto make_route;
1139 free_res = 1;
1141 if (res.type == RTN_NAT)
1142 goto e_inval;
1144 if (res.type == RTN_LOCAL) {
1145 if (!fld.saddr)
1146 fld.saddr = fld.daddr;
1147 if (dev_out)
1148 dev_put(dev_out);
1149 dev_out = init_net.loopback_dev;
1150 dev_hold(dev_out);
1151 if (!dev_out->dn_ptr)
1152 goto e_inval;
1153 fld.flowidn_oif = dev_out->ifindex;
1154 if (res.fi)
1155 dn_fib_info_put(res.fi);
1156 res.fi = NULL;
1157 goto make_route;
1160 if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
1161 dn_fib_select_multipath(&fld, &res);
1164 * We could add some logic to deal with default routes here and
1165 * get rid of some of the special casing above.
1168 if (!fld.saddr)
1169 fld.saddr = DN_FIB_RES_PREFSRC(res);
1171 if (dev_out)
1172 dev_put(dev_out);
1173 dev_out = DN_FIB_RES_DEV(res);
1174 dev_hold(dev_out);
1175 fld.flowidn_oif = dev_out->ifindex;
1176 gateway = DN_FIB_RES_GW(res);
1178 make_route:
1179 if (dev_out->flags & IFF_LOOPBACK)
1180 flags |= RTCF_LOCAL;
1182 rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
1183 if (rt == NULL)
1184 goto e_nobufs;
1186 rt->dn_next = NULL;
1187 memset(&rt->fld, 0, sizeof(rt->fld));
1188 rt->fld.saddr = oldflp->saddr;
1189 rt->fld.daddr = oldflp->daddr;
1190 rt->fld.flowidn_oif = oldflp->flowidn_oif;
1191 rt->fld.flowidn_iif = 0;
1192 rt->fld.flowidn_mark = oldflp->flowidn_mark;
1194 rt->rt_saddr = fld.saddr;
1195 rt->rt_daddr = fld.daddr;
1196 rt->rt_gateway = gateway ? gateway : fld.daddr;
1197 rt->rt_local_src = fld.saddr;
1199 rt->rt_dst_map = fld.daddr;
1200 rt->rt_src_map = fld.saddr;
1202 rt->n = neigh;
1203 neigh = NULL;
1205 rt->dst.lastuse = jiffies;
1206 rt->dst.output = dn_output;
1207 rt->dst.input = dn_rt_bug;
1208 rt->rt_flags = flags;
1209 if (flags & RTCF_LOCAL)
1210 rt->dst.input = dn_nsp_rx;
1212 err = dn_rt_set_next_hop(rt, &res);
1213 if (err)
1214 goto e_neighbour;
1216 hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
1217 /* dn_insert_route() increments dst->__refcnt */
1218 dn_insert_route(rt, hash, (struct dn_route **)pprt);
1220 done:
1221 if (neigh)
1222 neigh_release(neigh);
1223 if (free_res)
1224 dn_fib_res_put(&res);
1225 if (dev_out)
1226 dev_put(dev_out);
1227 out:
1228 return err;
1230 e_addr:
1231 err = -EADDRNOTAVAIL;
1232 goto done;
1233 e_inval:
1234 err = -EINVAL;
1235 goto done;
1236 e_nobufs:
1237 err = -ENOBUFS;
1238 goto done;
1239 e_neighbour:
1240 dst_release_immediate(&rt->dst);
1241 goto e_nobufs;
1246 * N.B. The flags may be moved into the flowi at some future stage.
1248 static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
1250 unsigned int hash = dn_hash(flp->saddr, flp->daddr);
1251 struct dn_route *rt = NULL;
1253 if (!(flags & MSG_TRYHARD)) {
1254 rcu_read_lock_bh();
1255 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
1256 rt = rcu_dereference_bh(rt->dn_next)) {
1257 if ((flp->daddr == rt->fld.daddr) &&
1258 (flp->saddr == rt->fld.saddr) &&
1259 (flp->flowidn_mark == rt->fld.flowidn_mark) &&
1260 dn_is_output_route(rt) &&
1261 (rt->fld.flowidn_oif == flp->flowidn_oif)) {
1262 dst_hold_and_use(&rt->dst, jiffies);
1263 rcu_read_unlock_bh();
1264 *pprt = &rt->dst;
1265 return 0;
1268 rcu_read_unlock_bh();
1271 return dn_route_output_slow(pprt, flp, flags);
1274 static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int flags)
1276 int err;
1278 err = __dn_route_output_key(pprt, flp, flags);
1279 if (err == 0 && flp->flowidn_proto) {
1280 *pprt = xfrm_lookup(&init_net, *pprt,
1281 flowidn_to_flowi(flp), NULL, 0);
1282 if (IS_ERR(*pprt)) {
1283 err = PTR_ERR(*pprt);
1284 *pprt = NULL;
1287 return err;
1290 int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *fl, struct sock *sk, int flags)
1292 int err;
1294 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
1295 if (err == 0 && fl->flowidn_proto) {
1296 *pprt = xfrm_lookup(&init_net, *pprt,
1297 flowidn_to_flowi(fl), sk, 0);
1298 if (IS_ERR(*pprt)) {
1299 err = PTR_ERR(*pprt);
1300 *pprt = NULL;
1303 return err;
1306 static int dn_route_input_slow(struct sk_buff *skb)
1308 struct dn_route *rt = NULL;
1309 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1310 struct net_device *in_dev = skb->dev;
1311 struct net_device *out_dev = NULL;
1312 struct dn_dev *dn_db;
1313 struct neighbour *neigh = NULL;
1314 unsigned int hash;
1315 int flags = 0;
1316 __le16 gateway = 0;
1317 __le16 local_src = 0;
1318 struct flowidn fld = {
1319 .daddr = cb->dst,
1320 .saddr = cb->src,
1321 .flowidn_scope = RT_SCOPE_UNIVERSE,
1322 .flowidn_mark = skb->mark,
1323 .flowidn_iif = skb->dev->ifindex,
1325 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
1326 int err = -EINVAL;
1327 int free_res = 0;
1329 dev_hold(in_dev);
1331 if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
1332 goto out;
1334 /* Zero source addresses are not allowed */
1335 if (fld.saddr == 0)
1336 goto out;
1339 * In this case we've just received a packet from a source
1340 * outside ourselves pretending to come from us. We don't
1341 * allow it any further to prevent routing loops, spoofing and
1342 * other nasties. Loopback packets already have the dst attached
1343 * so this only affects packets which have originated elsewhere.
1345 err = -ENOTUNIQ;
1346 if (dn_dev_islocal(in_dev, cb->src))
1347 goto out;
1349 err = dn_fib_lookup(&fld, &res);
1350 if (err) {
1351 if (err != -ESRCH)
1352 goto out;
1354 * Is the destination us ?
1356 if (!dn_dev_islocal(in_dev, cb->dst))
1357 goto e_inval;
1359 res.type = RTN_LOCAL;
1360 } else {
1361 __le16 src_map = fld.saddr;
1362 free_res = 1;
1364 out_dev = DN_FIB_RES_DEV(res);
1365 if (out_dev == NULL) {
1366 net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
1367 goto e_inval;
1369 dev_hold(out_dev);
1371 if (res.r)
1372 src_map = fld.saddr; /* no NAT support for now */
1374 gateway = DN_FIB_RES_GW(res);
1375 if (res.type == RTN_NAT) {
1376 fld.daddr = dn_fib_rules_map_destination(fld.daddr, &res);
1377 dn_fib_res_put(&res);
1378 free_res = 0;
1379 if (dn_fib_lookup(&fld, &res))
1380 goto e_inval;
1381 free_res = 1;
1382 if (res.type != RTN_UNICAST)
1383 goto e_inval;
1384 flags |= RTCF_DNAT;
1385 gateway = fld.daddr;
1387 fld.saddr = src_map;
1390 switch(res.type) {
1391 case RTN_UNICAST:
1393 * Forwarding check here, we only check for forwarding
1394 * being turned off, if you want to only forward intra
1395 * area, its up to you to set the routing tables up
1396 * correctly.
1398 if (dn_db->parms.forwarding == 0)
1399 goto e_inval;
1401 if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
1402 dn_fib_select_multipath(&fld, &res);
1405 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
1406 * flag as a hint to set the intra-ethernet bit when
1407 * forwarding. If we've got NAT in operation, we don't do
1408 * this optimisation.
1410 if (out_dev == in_dev && !(flags & RTCF_NAT))
1411 flags |= RTCF_DOREDIRECT;
1413 local_src = DN_FIB_RES_PREFSRC(res);
1415 case RTN_BLACKHOLE:
1416 case RTN_UNREACHABLE:
1417 break;
1418 case RTN_LOCAL:
1419 flags |= RTCF_LOCAL;
1420 fld.saddr = cb->dst;
1421 fld.daddr = cb->src;
1423 /* Routing tables gave us a gateway */
1424 if (gateway)
1425 goto make_route;
1427 /* Packet was intra-ethernet, so we know its on-link */
1428 if (cb->rt_flags & DN_RT_F_IE) {
1429 gateway = cb->src;
1430 goto make_route;
1433 /* Use the default router if there is one */
1434 neigh = neigh_clone(dn_db->router);
1435 if (neigh) {
1436 gateway = ((struct dn_neigh *)neigh)->addr;
1437 goto make_route;
1440 /* Close eyes and pray */
1441 gateway = cb->src;
1442 goto make_route;
1443 default:
1444 goto e_inval;
1447 make_route:
1448 rt = dst_alloc(&dn_dst_ops, out_dev, 1, DST_OBSOLETE_NONE, DST_HOST);
1449 if (rt == NULL)
1450 goto e_nobufs;
1452 rt->dn_next = NULL;
1453 memset(&rt->fld, 0, sizeof(rt->fld));
1454 rt->rt_saddr = fld.saddr;
1455 rt->rt_daddr = fld.daddr;
1456 rt->rt_gateway = fld.daddr;
1457 if (gateway)
1458 rt->rt_gateway = gateway;
1459 rt->rt_local_src = local_src ? local_src : rt->rt_saddr;
1461 rt->rt_dst_map = fld.daddr;
1462 rt->rt_src_map = fld.saddr;
1464 rt->fld.saddr = cb->src;
1465 rt->fld.daddr = cb->dst;
1466 rt->fld.flowidn_oif = 0;
1467 rt->fld.flowidn_iif = in_dev->ifindex;
1468 rt->fld.flowidn_mark = fld.flowidn_mark;
1470 rt->n = neigh;
1471 rt->dst.lastuse = jiffies;
1472 rt->dst.output = dn_rt_bug_out;
1473 switch (res.type) {
1474 case RTN_UNICAST:
1475 rt->dst.input = dn_forward;
1476 break;
1477 case RTN_LOCAL:
1478 rt->dst.output = dn_output;
1479 rt->dst.input = dn_nsp_rx;
1480 rt->dst.dev = in_dev;
1481 flags |= RTCF_LOCAL;
1482 break;
1483 default:
1484 case RTN_UNREACHABLE:
1485 case RTN_BLACKHOLE:
1486 rt->dst.input = dst_discard;
1488 rt->rt_flags = flags;
1490 err = dn_rt_set_next_hop(rt, &res);
1491 if (err)
1492 goto e_neighbour;
1494 hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
1495 /* dn_insert_route() increments dst->__refcnt */
1496 dn_insert_route(rt, hash, &rt);
1497 skb_dst_set(skb, &rt->dst);
1499 done:
1500 if (neigh)
1501 neigh_release(neigh);
1502 if (free_res)
1503 dn_fib_res_put(&res);
1504 dev_put(in_dev);
1505 if (out_dev)
1506 dev_put(out_dev);
1507 out:
1508 return err;
1510 e_inval:
1511 err = -EINVAL;
1512 goto done;
1514 e_nobufs:
1515 err = -ENOBUFS;
1516 goto done;
1518 e_neighbour:
1519 dst_release_immediate(&rt->dst);
1520 goto done;
1523 static int dn_route_input(struct sk_buff *skb)
1525 struct dn_route *rt;
1526 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1527 unsigned int hash = dn_hash(cb->src, cb->dst);
1529 if (skb_dst(skb))
1530 return 0;
1532 rcu_read_lock();
1533 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
1534 rt = rcu_dereference(rt->dn_next)) {
1535 if ((rt->fld.saddr == cb->src) &&
1536 (rt->fld.daddr == cb->dst) &&
1537 (rt->fld.flowidn_oif == 0) &&
1538 (rt->fld.flowidn_mark == skb->mark) &&
1539 (rt->fld.flowidn_iif == cb->iif)) {
1540 dst_hold_and_use(&rt->dst, jiffies);
1541 rcu_read_unlock();
1542 skb_dst_set(skb, (struct dst_entry *)rt);
1543 return 0;
1546 rcu_read_unlock();
1548 return dn_route_input_slow(skb);
1551 static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
1552 int event, int nowait, unsigned int flags)
1554 struct dn_route *rt = (struct dn_route *)skb_dst(skb);
1555 struct rtmsg *r;
1556 struct nlmsghdr *nlh;
1557 long expires;
1559 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
1560 if (!nlh)
1561 return -EMSGSIZE;
1563 r = nlmsg_data(nlh);
1564 r->rtm_family = AF_DECnet;
1565 r->rtm_dst_len = 16;
1566 r->rtm_src_len = 0;
1567 r->rtm_tos = 0;
1568 r->rtm_table = RT_TABLE_MAIN;
1569 r->rtm_type = rt->rt_type;
1570 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
1571 r->rtm_scope = RT_SCOPE_UNIVERSE;
1572 r->rtm_protocol = RTPROT_UNSPEC;
1574 if (rt->rt_flags & RTCF_NOTIFY)
1575 r->rtm_flags |= RTM_F_NOTIFY;
1577 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 ||
1578 nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
1579 goto errout;
1581 if (rt->fld.saddr) {
1582 r->rtm_src_len = 16;
1583 if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
1584 goto errout;
1586 if (rt->dst.dev &&
1587 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
1588 goto errout;
1591 * Note to self - change this if input routes reverse direction when
1592 * they deal only with inputs and not with replies like they do
1593 * currently.
1595 if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
1596 goto errout;
1598 if (rt->rt_daddr != rt->rt_gateway &&
1599 nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
1600 goto errout;
1602 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
1603 goto errout;
1605 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
1606 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires,
1607 rt->dst.error) < 0)
1608 goto errout;
1610 if (dn_is_input_route(rt) &&
1611 nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
1612 goto errout;
1614 nlmsg_end(skb, nlh);
1615 return 0;
1617 errout:
1618 nlmsg_cancel(skb, nlh);
1619 return -EMSGSIZE;
1622 const struct nla_policy rtm_dn_policy[RTA_MAX + 1] = {
1623 [RTA_DST] = { .type = NLA_U16 },
1624 [RTA_SRC] = { .type = NLA_U16 },
1625 [RTA_IIF] = { .type = NLA_U32 },
1626 [RTA_OIF] = { .type = NLA_U32 },
1627 [RTA_GATEWAY] = { .type = NLA_U16 },
1628 [RTA_PRIORITY] = { .type = NLA_U32 },
1629 [RTA_PREFSRC] = { .type = NLA_U16 },
1630 [RTA_METRICS] = { .type = NLA_NESTED },
1631 [RTA_MULTIPATH] = { .type = NLA_NESTED },
1632 [RTA_TABLE] = { .type = NLA_U32 },
1633 [RTA_MARK] = { .type = NLA_U32 },
1637 * This is called by both endnodes and routers now.
1639 static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
1640 struct netlink_ext_ack *extack)
1642 struct net *net = sock_net(in_skb->sk);
1643 struct rtmsg *rtm = nlmsg_data(nlh);
1644 struct dn_route *rt = NULL;
1645 struct dn_skb_cb *cb;
1646 int err;
1647 struct sk_buff *skb;
1648 struct flowidn fld;
1649 struct nlattr *tb[RTA_MAX+1];
1651 if (!net_eq(net, &init_net))
1652 return -EINVAL;
1654 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_dn_policy,
1655 extack);
1656 if (err < 0)
1657 return err;
1659 memset(&fld, 0, sizeof(fld));
1660 fld.flowidn_proto = DNPROTO_NSP;
1662 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1663 if (skb == NULL)
1664 return -ENOBUFS;
1665 skb_reset_mac_header(skb);
1666 cb = DN_SKB_CB(skb);
1668 if (tb[RTA_SRC])
1669 fld.saddr = nla_get_le16(tb[RTA_SRC]);
1671 if (tb[RTA_DST])
1672 fld.daddr = nla_get_le16(tb[RTA_DST]);
1674 if (tb[RTA_IIF])
1675 fld.flowidn_iif = nla_get_u32(tb[RTA_IIF]);
1677 if (fld.flowidn_iif) {
1678 struct net_device *dev;
1679 dev = __dev_get_by_index(&init_net, fld.flowidn_iif);
1680 if (!dev || !dev->dn_ptr) {
1681 kfree_skb(skb);
1682 return -ENODEV;
1684 skb->protocol = htons(ETH_P_DNA_RT);
1685 skb->dev = dev;
1686 cb->src = fld.saddr;
1687 cb->dst = fld.daddr;
1688 local_bh_disable();
1689 err = dn_route_input(skb);
1690 local_bh_enable();
1691 memset(cb, 0, sizeof(struct dn_skb_cb));
1692 rt = (struct dn_route *)skb_dst(skb);
1693 if (!err && -rt->dst.error)
1694 err = rt->dst.error;
1695 } else {
1696 if (tb[RTA_OIF])
1697 fld.flowidn_oif = nla_get_u32(tb[RTA_OIF]);
1699 err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0);
1702 skb->dev = NULL;
1703 if (err)
1704 goto out_free;
1705 skb_dst_set(skb, &rt->dst);
1706 if (rtm->rtm_flags & RTM_F_NOTIFY)
1707 rt->rt_flags |= RTCF_NOTIFY;
1709 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
1710 if (err < 0) {
1711 err = -EMSGSIZE;
1712 goto out_free;
1715 return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).portid);
1717 out_free:
1718 kfree_skb(skb);
1719 return err;
1723 * For routers, this is called from dn_fib_dump, but for endnodes its
1724 * called directly from the rtnetlink dispatch table.
1726 int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1728 struct net *net = sock_net(skb->sk);
1729 struct dn_route *rt;
1730 int h, s_h;
1731 int idx, s_idx;
1732 struct rtmsg *rtm;
1734 if (!net_eq(net, &init_net))
1735 return 0;
1737 if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg))
1738 return -EINVAL;
1740 rtm = nlmsg_data(cb->nlh);
1741 if (!(rtm->rtm_flags & RTM_F_CLONED))
1742 return 0;
1744 s_h = cb->args[0];
1745 s_idx = idx = cb->args[1];
1746 for(h = 0; h <= dn_rt_hash_mask; h++) {
1747 if (h < s_h)
1748 continue;
1749 if (h > s_h)
1750 s_idx = 0;
1751 rcu_read_lock_bh();
1752 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
1754 rt = rcu_dereference_bh(rt->dn_next), idx++) {
1755 if (idx < s_idx)
1756 continue;
1757 skb_dst_set(skb, dst_clone(&rt->dst));
1758 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).portid,
1759 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1760 1, NLM_F_MULTI) < 0) {
1761 skb_dst_drop(skb);
1762 rcu_read_unlock_bh();
1763 goto done;
1765 skb_dst_drop(skb);
1767 rcu_read_unlock_bh();
1770 done:
1771 cb->args[0] = h;
1772 cb->args[1] = idx;
1773 return skb->len;
1776 #ifdef CONFIG_PROC_FS
1777 struct dn_rt_cache_iter_state {
1778 int bucket;
1781 static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
1783 struct dn_route *rt = NULL;
1784 struct dn_rt_cache_iter_state *s = seq->private;
1786 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
1787 rcu_read_lock_bh();
1788 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1789 if (rt)
1790 break;
1791 rcu_read_unlock_bh();
1793 return rt;
1796 static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
1798 struct dn_rt_cache_iter_state *s = seq->private;
1800 rt = rcu_dereference_bh(rt->dn_next);
1801 while (!rt) {
1802 rcu_read_unlock_bh();
1803 if (--s->bucket < 0)
1804 break;
1805 rcu_read_lock_bh();
1806 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1808 return rt;
1811 static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
1813 struct dn_route *rt = dn_rt_cache_get_first(seq);
1815 if (rt) {
1816 while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
1817 --*pos;
1819 return *pos ? NULL : rt;
1822 static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1824 struct dn_route *rt = dn_rt_cache_get_next(seq, v);
1825 ++*pos;
1826 return rt;
1829 static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v)
1831 if (v)
1832 rcu_read_unlock_bh();
1835 static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1837 struct dn_route *rt = v;
1838 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
1840 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
1841 rt->dst.dev ? rt->dst.dev->name : "*",
1842 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
1843 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
1844 atomic_read(&rt->dst.__refcnt),
1845 rt->dst.__use, 0);
1846 return 0;
1849 static const struct seq_operations dn_rt_cache_seq_ops = {
1850 .start = dn_rt_cache_seq_start,
1851 .next = dn_rt_cache_seq_next,
1852 .stop = dn_rt_cache_seq_stop,
1853 .show = dn_rt_cache_seq_show,
1856 static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
1858 return seq_open_private(file, &dn_rt_cache_seq_ops,
1859 sizeof(struct dn_rt_cache_iter_state));
1862 static const struct file_operations dn_rt_cache_seq_fops = {
1863 .open = dn_rt_cache_seq_open,
1864 .read = seq_read,
1865 .llseek = seq_lseek,
1866 .release = seq_release_private,
1869 #endif /* CONFIG_PROC_FS */
1871 void __init dn_route_init(void)
1873 int i, goal, order;
1875 dn_dst_ops.kmem_cachep =
1876 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
1877 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1878 dst_entries_init(&dn_dst_ops);
1879 timer_setup(&dn_route_timer, dn_dst_check_expire, 0);
1880 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
1881 add_timer(&dn_route_timer);
1883 goal = totalram_pages >> (26 - PAGE_SHIFT);
1885 for(order = 0; (1UL << order) < goal; order++)
1886 /* NOTHING */;
1889 * Only want 1024 entries max, since the table is very, very unlikely
1890 * to be larger than that.
1892 while(order && ((((1UL << order) * PAGE_SIZE) /
1893 sizeof(struct dn_rt_hash_bucket)) >= 2048))
1894 order--;
1896 do {
1897 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
1898 sizeof(struct dn_rt_hash_bucket);
1899 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
1900 dn_rt_hash_mask--;
1901 dn_rt_hash_table = (struct dn_rt_hash_bucket *)
1902 __get_free_pages(GFP_ATOMIC, order);
1903 } while (dn_rt_hash_table == NULL && --order > 0);
1905 if (!dn_rt_hash_table)
1906 panic("Failed to allocate DECnet route cache hash table\n");
1908 printk(KERN_INFO
1909 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
1910 dn_rt_hash_mask,
1911 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
1913 dn_rt_hash_mask--;
1914 for(i = 0; i <= dn_rt_hash_mask; i++) {
1915 spin_lock_init(&dn_rt_hash_table[i].lock);
1916 dn_rt_hash_table[i].chain = NULL;
1919 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
1921 proc_create("decnet_cache", S_IRUGO, init_net.proc_net,
1922 &dn_rt_cache_seq_fops);
1924 #ifdef CONFIG_DECNET_ROUTER
1925 rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE,
1926 dn_cache_getroute, dn_fib_dump, 0);
1927 #else
1928 rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE,
1929 dn_cache_getroute, dn_cache_dump, 0);
1930 #endif
1933 void __exit dn_route_cleanup(void)
1935 del_timer(&dn_route_timer);
1936 dn_run_flush(0);
1938 remove_proc_entry("decnet_cache", init_net.proc_net);
1939 dst_entries_destroy(&dn_dst_ops);