[PATCH] cpm_uart: Fix 2nd serial port on MPC8560 ADS
[linux-ginger.git] / net / decnet / dn_route.c
blob2c915f305be37ffef87997ae5fbab5a166103c3b
1 /*
2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * DECnet Routing Functions (Endnode and Router)
8 * Authors: Steve Whitehouse <SteveW@ACM.org>
9 * Eduardo Marcelo Serrat <emserrat@geocities.com>
11 * Changes:
12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and
13 * "return-to-sender" bits on outgoing
14 * packets.
15 * Steve Whitehouse : Timeouts for cached routes.
16 * Steve Whitehouse : Use dst cache for input routes too.
17 * Steve Whitehouse : Fixed error values in dn_send_skb.
18 * Steve Whitehouse : Rework routing functions to better fit
19 * DECnet routing design
20 * Alexey Kuznetsov : New SMP locking
21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump()
22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting.
23 * Fixed possible skb leak in rtnetlink funcs.
24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
25 * Alexey Kuznetsov's finer grained locking
26 * from ipv4/route.c.
27 * Steve Whitehouse : Routing is now starting to look like a
28 * sensible set of code now, mainly due to
29 * my copying the IPv4 routing code. The
30 * hooks here are modified and will continue
31 * to evolve for a while.
32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter
33 * stuff. Look out raw sockets your days
34 * are numbered!
35 * Steve Whitehouse : Added return-to-sender functions. Added
36 * backlog congestion level return codes.
37 * Steve Whitehouse : Fixed bug where routes were set up with
38 * no ref count on net devices.
39 * Steve Whitehouse : RCU for the route cache
40 * Steve Whitehouse : Preparations for the flow cache
41 * Steve Whitehouse : Prepare for nonlinear skbs
44 /******************************************************************************
45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
47 This program is free software; you can redistribute it and/or modify
48 it under the terms of the GNU General Public License as published by
49 the Free Software Foundation; either version 2 of the License, or
50 any later version.
52 This program is distributed in the hope that it will be useful,
53 but WITHOUT ANY WARRANTY; without even the implied warranty of
54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 GNU General Public License for more details.
56 *******************************************************************************/
58 #include <linux/config.h>
59 #include <linux/errno.h>
60 #include <linux/types.h>
61 #include <linux/socket.h>
62 #include <linux/in.h>
63 #include <linux/kernel.h>
64 #include <linux/sockios.h>
65 #include <linux/net.h>
66 #include <linux/netdevice.h>
67 #include <linux/inet.h>
68 #include <linux/route.h>
69 #include <linux/in_route.h>
70 #include <net/sock.h>
71 #include <linux/mm.h>
72 #include <linux/proc_fs.h>
73 #include <linux/seq_file.h>
74 #include <linux/init.h>
75 #include <linux/rtnetlink.h>
76 #include <linux/string.h>
77 #include <linux/netfilter_decnet.h>
78 #include <linux/rcupdate.h>
79 #include <linux/times.h>
80 #include <asm/errno.h>
81 #include <net/neighbour.h>
82 #include <net/dst.h>
83 #include <net/flow.h>
84 #include <net/dn.h>
85 #include <net/dn_dev.h>
86 #include <net/dn_nsp.h>
87 #include <net/dn_route.h>
88 #include <net/dn_neigh.h>
89 #include <net/dn_fib.h>
91 struct dn_rt_hash_bucket
93 struct dn_route *chain;
94 spinlock_t lock;
95 } __attribute__((__aligned__(8)));
97 extern struct neigh_table dn_neigh_table;
100 static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
102 static const int dn_rt_min_delay = 2 * HZ;
103 static const int dn_rt_max_delay = 10 * HZ;
104 static const int dn_rt_mtu_expires = 10 * 60 * HZ;
106 static unsigned long dn_rt_deadline;
108 static int dn_dst_gc(void);
109 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
110 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
111 static void dn_dst_link_failure(struct sk_buff *);
112 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
113 static int dn_route_input(struct sk_buff *);
114 static void dn_run_flush(unsigned long dummy);
116 static struct dn_rt_hash_bucket *dn_rt_hash_table;
117 static unsigned dn_rt_hash_mask;
119 static struct timer_list dn_route_timer;
120 static struct timer_list dn_rt_flush_timer =
121 TIMER_INITIALIZER(dn_run_flush, 0, 0);
122 int decnet_dst_gc_interval = 2;
124 static struct dst_ops dn_dst_ops = {
125 .family = PF_DECnet,
126 .protocol = __constant_htons(ETH_P_DNA_RT),
127 .gc_thresh = 128,
128 .gc = dn_dst_gc,
129 .check = dn_dst_check,
130 .negative_advice = dn_dst_negative_advice,
131 .link_failure = dn_dst_link_failure,
132 .update_pmtu = dn_dst_update_pmtu,
133 .entry_size = sizeof(struct dn_route),
134 .entries = ATOMIC_INIT(0),
137 static __inline__ unsigned dn_hash(unsigned short src, unsigned short dst)
139 unsigned short tmp = src ^ dst;
140 tmp ^= (tmp >> 3);
141 tmp ^= (tmp >> 5);
142 tmp ^= (tmp >> 10);
143 return dn_rt_hash_mask & (unsigned)tmp;
146 static inline void dnrt_free(struct dn_route *rt)
148 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
151 static inline void dnrt_drop(struct dn_route *rt)
153 if (rt)
154 dst_release(&rt->u.dst);
155 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
158 static void dn_dst_check_expire(unsigned long dummy)
160 int i;
161 struct dn_route *rt, **rtp;
162 unsigned long now = jiffies;
163 unsigned long expire = 120 * HZ;
165 for(i = 0; i <= dn_rt_hash_mask; i++) {
166 rtp = &dn_rt_hash_table[i].chain;
168 spin_lock(&dn_rt_hash_table[i].lock);
169 while((rt=*rtp) != NULL) {
170 if (atomic_read(&rt->u.dst.__refcnt) ||
171 (now - rt->u.dst.lastuse) < expire) {
172 rtp = &rt->u.rt_next;
173 continue;
175 *rtp = rt->u.rt_next;
176 rt->u.rt_next = NULL;
177 dnrt_free(rt);
179 spin_unlock(&dn_rt_hash_table[i].lock);
181 if ((jiffies - now) > 0)
182 break;
185 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ);
188 static int dn_dst_gc(void)
190 struct dn_route *rt, **rtp;
191 int i;
192 unsigned long now = jiffies;
193 unsigned long expire = 10 * HZ;
195 for(i = 0; i <= dn_rt_hash_mask; i++) {
197 spin_lock_bh(&dn_rt_hash_table[i].lock);
198 rtp = &dn_rt_hash_table[i].chain;
200 while((rt=*rtp) != NULL) {
201 if (atomic_read(&rt->u.dst.__refcnt) ||
202 (now - rt->u.dst.lastuse) < expire) {
203 rtp = &rt->u.rt_next;
204 continue;
206 *rtp = rt->u.rt_next;
207 rt->u.rt_next = NULL;
208 dnrt_drop(rt);
209 break;
211 spin_unlock_bh(&dn_rt_hash_table[i].lock);
214 return 0;
218 * The decnet standards don't impose a particular minimum mtu, what they
219 * do insist on is that the routing layer accepts a datagram of at least
220 * 230 bytes long. Here we have to subtract the routing header length from
221 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we
222 * assume the worst and use a long header size.
224 * We update both the mtu and the advertised mss (i.e. the segment size we
225 * advertise to the other end).
227 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
229 u32 min_mtu = 230;
230 struct dn_dev *dn = dst->neighbour ?
231 (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL;
233 if (dn && dn->use_long == 0)
234 min_mtu -= 6;
235 else
236 min_mtu -= 21;
238 if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= min_mtu) {
239 if (!(dst_metric_locked(dst, RTAX_MTU))) {
240 dst->metrics[RTAX_MTU-1] = mtu;
241 dst_set_expires(dst, dn_rt_mtu_expires);
243 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
244 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
245 if (dst->metrics[RTAX_ADVMSS-1] > mss)
246 dst->metrics[RTAX_ADVMSS-1] = mss;
252 * When a route has been marked obsolete. (e.g. routing cache flush)
254 static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
256 return NULL;
259 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst)
261 dst_release(dst);
262 return NULL;
265 static void dn_dst_link_failure(struct sk_buff *skb)
267 return;
270 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
272 return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 &&
273 fl1->oif == fl2->oif &&
274 fl1->iif == fl2->iif;
277 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
279 struct dn_route *rth, **rthp;
280 unsigned long now = jiffies;
282 rthp = &dn_rt_hash_table[hash].chain;
284 spin_lock_bh(&dn_rt_hash_table[hash].lock);
285 while((rth = *rthp) != NULL) {
286 if (compare_keys(&rth->fl, &rt->fl)) {
287 /* Put it first */
288 *rthp = rth->u.rt_next;
289 rcu_assign_pointer(rth->u.rt_next,
290 dn_rt_hash_table[hash].chain);
291 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
293 rth->u.dst.__use++;
294 dst_hold(&rth->u.dst);
295 rth->u.dst.lastuse = now;
296 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
298 dnrt_drop(rt);
299 *rp = rth;
300 return 0;
302 rthp = &rth->u.rt_next;
305 rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain);
306 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
308 dst_hold(&rt->u.dst);
309 rt->u.dst.__use++;
310 rt->u.dst.lastuse = now;
311 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
312 *rp = rt;
313 return 0;
316 void dn_run_flush(unsigned long dummy)
318 int i;
319 struct dn_route *rt, *next;
321 for(i = 0; i < dn_rt_hash_mask; i++) {
322 spin_lock_bh(&dn_rt_hash_table[i].lock);
324 if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL)
325 goto nothing_to_declare;
327 for(; rt; rt=next) {
328 next = rt->u.rt_next;
329 rt->u.rt_next = NULL;
330 dst_free((struct dst_entry *)rt);
333 nothing_to_declare:
334 spin_unlock_bh(&dn_rt_hash_table[i].lock);
338 static DEFINE_SPINLOCK(dn_rt_flush_lock);
340 void dn_rt_cache_flush(int delay)
342 unsigned long now = jiffies;
343 int user_mode = !in_interrupt();
345 if (delay < 0)
346 delay = dn_rt_min_delay;
348 spin_lock_bh(&dn_rt_flush_lock);
350 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) {
351 long tmo = (long)(dn_rt_deadline - now);
353 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay)
354 tmo = 0;
356 if (delay > tmo)
357 delay = tmo;
360 if (delay <= 0) {
361 spin_unlock_bh(&dn_rt_flush_lock);
362 dn_run_flush(0);
363 return;
366 if (dn_rt_deadline == 0)
367 dn_rt_deadline = now + dn_rt_max_delay;
369 dn_rt_flush_timer.expires = now + delay;
370 add_timer(&dn_rt_flush_timer);
371 spin_unlock_bh(&dn_rt_flush_lock);
375 * dn_return_short - Return a short packet to its sender
376 * @skb: The packet to return
379 static int dn_return_short(struct sk_buff *skb)
381 struct dn_skb_cb *cb;
382 unsigned char *ptr;
383 dn_address *src;
384 dn_address *dst;
385 dn_address tmp;
387 /* Add back headers */
388 skb_push(skb, skb->data - skb->nh.raw);
390 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
391 return NET_RX_DROP;
393 cb = DN_SKB_CB(skb);
394 /* Skip packet length and point to flags */
395 ptr = skb->data + 2;
396 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
398 dst = (dn_address *)ptr;
399 ptr += 2;
400 src = (dn_address *)ptr;
401 ptr += 2;
402 *ptr = 0; /* Zero hop count */
404 /* Swap source and destination */
405 tmp = *src;
406 *src = *dst;
407 *dst = tmp;
409 skb->pkt_type = PACKET_OUTGOING;
410 dn_rt_finish_output(skb, NULL, NULL);
411 return NET_RX_SUCCESS;
415 * dn_return_long - Return a long packet to its sender
416 * @skb: The long format packet to return
419 static int dn_return_long(struct sk_buff *skb)
421 struct dn_skb_cb *cb;
422 unsigned char *ptr;
423 unsigned char *src_addr, *dst_addr;
424 unsigned char tmp[ETH_ALEN];
426 /* Add back all headers */
427 skb_push(skb, skb->data - skb->nh.raw);
429 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
430 return NET_RX_DROP;
432 cb = DN_SKB_CB(skb);
433 /* Ignore packet length and point to flags */
434 ptr = skb->data + 2;
436 /* Skip padding */
437 if (*ptr & DN_RT_F_PF) {
438 char padlen = (*ptr & ~DN_RT_F_PF);
439 ptr += padlen;
442 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
443 ptr += 2;
444 dst_addr = ptr;
445 ptr += 8;
446 src_addr = ptr;
447 ptr += 6;
448 *ptr = 0; /* Zero hop count */
450 /* Swap source and destination */
451 memcpy(tmp, src_addr, ETH_ALEN);
452 memcpy(src_addr, dst_addr, ETH_ALEN);
453 memcpy(dst_addr, tmp, ETH_ALEN);
455 skb->pkt_type = PACKET_OUTGOING;
456 dn_rt_finish_output(skb, dst_addr, src_addr);
457 return NET_RX_SUCCESS;
461 * dn_route_rx_packet - Try and find a route for an incoming packet
462 * @skb: The packet to find a route for
464 * Returns: result of input function if route is found, error code otherwise
466 static int dn_route_rx_packet(struct sk_buff *skb)
468 struct dn_skb_cb *cb = DN_SKB_CB(skb);
469 int err;
471 if ((err = dn_route_input(skb)) == 0)
472 return dst_input(skb);
474 if (decnet_debug_level & 4) {
475 char *devname = skb->dev ? skb->dev->name : "???";
476 struct dn_skb_cb *cb = DN_SKB_CB(skb);
477 printk(KERN_DEBUG
478 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
479 (int)cb->rt_flags, devname, skb->len, cb->src, cb->dst,
480 err, skb->pkt_type);
483 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
484 switch(cb->rt_flags & DN_RT_PKT_MSK) {
485 case DN_RT_PKT_SHORT:
486 return dn_return_short(skb);
487 case DN_RT_PKT_LONG:
488 return dn_return_long(skb);
492 kfree_skb(skb);
493 return NET_RX_DROP;
496 static int dn_route_rx_long(struct sk_buff *skb)
498 struct dn_skb_cb *cb = DN_SKB_CB(skb);
499 unsigned char *ptr = skb->data;
501 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */
502 goto drop_it;
504 skb_pull(skb, 20);
505 skb->h.raw = skb->data;
507 /* Destination info */
508 ptr += 2;
509 cb->dst = dn_htons(dn_eth2dn(ptr));
510 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
511 goto drop_it;
512 ptr += 6;
515 /* Source info */
516 ptr += 2;
517 cb->src = dn_htons(dn_eth2dn(ptr));
518 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
519 goto drop_it;
520 ptr += 6;
521 /* Other junk */
522 ptr++;
523 cb->hops = *ptr++; /* Visit Count */
525 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
527 drop_it:
528 kfree_skb(skb);
529 return NET_RX_DROP;
534 static int dn_route_rx_short(struct sk_buff *skb)
536 struct dn_skb_cb *cb = DN_SKB_CB(skb);
537 unsigned char *ptr = skb->data;
539 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */
540 goto drop_it;
542 skb_pull(skb, 5);
543 skb->h.raw = skb->data;
545 cb->dst = *(dn_address *)ptr;
546 ptr += 2;
547 cb->src = *(dn_address *)ptr;
548 ptr += 2;
549 cb->hops = *ptr & 0x3f;
551 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
553 drop_it:
554 kfree_skb(skb);
555 return NET_RX_DROP;
558 static int dn_route_discard(struct sk_buff *skb)
561 * I know we drop the packet here, but thats considered success in
562 * this case
564 kfree_skb(skb);
565 return NET_RX_SUCCESS;
568 static int dn_route_ptp_hello(struct sk_buff *skb)
570 dn_dev_hello(skb);
571 dn_neigh_pointopoint_hello(skb);
572 return NET_RX_SUCCESS;
575 int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
577 struct dn_skb_cb *cb;
578 unsigned char flags = 0;
579 __u16 len = dn_ntohs(*(__u16 *)skb->data);
580 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr;
581 unsigned char padlen = 0;
583 if (dn == NULL)
584 goto dump_it;
586 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
587 goto out;
589 if (!pskb_may_pull(skb, 3))
590 goto dump_it;
592 skb_pull(skb, 2);
594 if (len > skb->len)
595 goto dump_it;
597 skb_trim(skb, len);
599 flags = *skb->data;
601 cb = DN_SKB_CB(skb);
602 cb->stamp = jiffies;
603 cb->iif = dev->ifindex;
606 * If we have padding, remove it.
608 if (flags & DN_RT_F_PF) {
609 padlen = flags & ~DN_RT_F_PF;
610 if (!pskb_may_pull(skb, padlen + 1))
611 goto dump_it;
612 skb_pull(skb, padlen);
613 flags = *skb->data;
616 skb->nh.raw = skb->data;
619 * Weed out future version DECnet
621 if (flags & DN_RT_F_VER)
622 goto dump_it;
624 cb->rt_flags = flags;
626 if (decnet_debug_level & 1)
627 printk(KERN_DEBUG
628 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
629 (int)flags, (dev) ? dev->name : "???", len, skb->len,
630 padlen);
632 if (flags & DN_RT_PKT_CNTL) {
633 if (unlikely(skb_is_nonlinear(skb)) &&
634 skb_linearize(skb, GFP_ATOMIC) != 0)
635 goto dump_it;
637 switch(flags & DN_RT_CNTL_MSK) {
638 case DN_RT_PKT_INIT:
639 dn_dev_init_pkt(skb);
640 break;
641 case DN_RT_PKT_VERI:
642 dn_dev_veri_pkt(skb);
643 break;
646 if (dn->parms.state != DN_DEV_S_RU)
647 goto dump_it;
649 switch(flags & DN_RT_CNTL_MSK) {
650 case DN_RT_PKT_HELO:
651 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello);
653 case DN_RT_PKT_L1RT:
654 case DN_RT_PKT_L2RT:
655 return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
656 case DN_RT_PKT_ERTH:
657 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello);
659 case DN_RT_PKT_EEDH:
660 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello);
662 } else {
663 if (dn->parms.state != DN_DEV_S_RU)
664 goto dump_it;
666 skb_pull(skb, 1); /* Pull flags */
668 switch(flags & DN_RT_PKT_MSK) {
669 case DN_RT_PKT_LONG:
670 return dn_route_rx_long(skb);
671 case DN_RT_PKT_SHORT:
672 return dn_route_rx_short(skb);
676 dump_it:
677 kfree_skb(skb);
678 out:
679 return NET_RX_DROP;
682 static int dn_output(struct sk_buff *skb)
684 struct dst_entry *dst = skb->dst;
685 struct dn_route *rt = (struct dn_route *)dst;
686 struct net_device *dev = dst->dev;
687 struct dn_skb_cb *cb = DN_SKB_CB(skb);
688 struct neighbour *neigh;
690 int err = -EINVAL;
692 if ((neigh = dst->neighbour) == NULL)
693 goto error;
695 skb->dev = dev;
697 cb->src = rt->rt_saddr;
698 cb->dst = rt->rt_daddr;
701 * Always set the Intra-Ethernet bit on all outgoing packets
702 * originated on this node. Only valid flag from upper layers
703 * is return-to-sender-requested. Set hop count to 0 too.
705 cb->rt_flags &= ~DN_RT_F_RQR;
706 cb->rt_flags |= DN_RT_F_IE;
707 cb->hops = 0;
709 return NF_HOOK(PF_DECnet, NF_DN_LOCAL_OUT, skb, NULL, dev, neigh->output);
711 error:
712 if (net_ratelimit())
713 printk(KERN_DEBUG "dn_output: This should not happen\n");
715 kfree_skb(skb);
717 return err;
720 static int dn_forward(struct sk_buff *skb)
722 struct dn_skb_cb *cb = DN_SKB_CB(skb);
723 struct dst_entry *dst = skb->dst;
724 struct dn_dev *dn_db = dst->dev->dn_ptr;
725 struct dn_route *rt;
726 struct neighbour *neigh = dst->neighbour;
727 int header_len;
728 #ifdef CONFIG_NETFILTER
729 struct net_device *dev = skb->dev;
730 #endif
732 if (skb->pkt_type != PACKET_HOST)
733 goto drop;
735 /* Ensure that we have enough space for headers */
736 rt = (struct dn_route *)skb->dst;
737 header_len = dn_db->use_long ? 21 : 6;
738 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len))
739 goto drop;
742 * Hop count exceeded.
744 if (++cb->hops > 30)
745 goto drop;
747 skb->dev = rt->u.dst.dev;
750 * If packet goes out same interface it came in on, then set
751 * the Intra-Ethernet bit. This has no effect for short
752 * packets, so we don't need to test for them here.
754 cb->rt_flags &= ~DN_RT_F_IE;
755 if (rt->rt_flags & RTCF_DOREDIRECT)
756 cb->rt_flags |= DN_RT_F_IE;
758 return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output);
760 drop:
761 kfree_skb(skb);
762 return NET_RX_DROP;
766 * Drop packet. This is used for endnodes and for
767 * when we should not be forwarding packets from
768 * this dest.
770 static int dn_blackhole(struct sk_buff *skb)
772 kfree_skb(skb);
773 return NET_RX_DROP;
777 * Used to catch bugs. This should never normally get
778 * called.
780 static int dn_rt_bug(struct sk_buff *skb)
782 if (net_ratelimit()) {
783 struct dn_skb_cb *cb = DN_SKB_CB(skb);
785 printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n",
786 cb->src, cb->dst);
789 kfree_skb(skb);
791 return NET_RX_BAD;
794 static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
796 struct dn_fib_info *fi = res->fi;
797 struct net_device *dev = rt->u.dst.dev;
798 struct neighbour *n;
799 unsigned mss;
801 if (fi) {
802 if (DN_FIB_RES_GW(*res) &&
803 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
804 rt->rt_gateway = DN_FIB_RES_GW(*res);
805 memcpy(rt->u.dst.metrics, fi->fib_metrics,
806 sizeof(rt->u.dst.metrics));
808 rt->rt_type = res->type;
810 if (dev != NULL && rt->u.dst.neighbour == NULL) {
811 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
812 if (IS_ERR(n))
813 return PTR_ERR(n);
814 rt->u.dst.neighbour = n;
817 if (rt->u.dst.metrics[RTAX_MTU-1] == 0 ||
818 rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu)
819 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
820 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst));
821 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 ||
822 rt->u.dst.metrics[RTAX_ADVMSS-1] > mss)
823 rt->u.dst.metrics[RTAX_ADVMSS-1] = mss;
824 return 0;
827 static inline int dn_match_addr(__u16 addr1, __u16 addr2)
829 __u16 tmp = dn_ntohs(addr1) ^ dn_ntohs(addr2);
830 int match = 16;
831 while(tmp) {
832 tmp >>= 1;
833 match--;
835 return match;
838 static __u16 dnet_select_source(const struct net_device *dev, __u16 daddr, int scope)
840 __u16 saddr = 0;
841 struct dn_dev *dn_db = dev->dn_ptr;
842 struct dn_ifaddr *ifa;
843 int best_match = 0;
844 int ret;
846 read_lock(&dev_base_lock);
847 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
848 if (ifa->ifa_scope > scope)
849 continue;
850 if (!daddr) {
851 saddr = ifa->ifa_local;
852 break;
854 ret = dn_match_addr(daddr, ifa->ifa_local);
855 if (ret > best_match)
856 saddr = ifa->ifa_local;
857 if (best_match == 0)
858 saddr = ifa->ifa_local;
860 read_unlock(&dev_base_lock);
862 return saddr;
865 static inline __u16 __dn_fib_res_prefsrc(struct dn_fib_res *res)
867 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope);
870 static inline __u16 dn_fib_rules_map_destination(__u16 daddr, struct dn_fib_res *res)
872 __u16 mask = dnet_make_mask(res->prefixlen);
873 return (daddr&~mask)|res->fi->fib_nh->nh_gw;
876 static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
878 struct flowi fl = { .nl_u = { .dn_u =
879 { .daddr = oldflp->fld_dst,
880 .saddr = oldflp->fld_src,
881 .scope = RT_SCOPE_UNIVERSE,
882 #ifdef CONFIG_DECNET_ROUTE_FWMARK
883 .fwmark = oldflp->fld_fwmark
884 #endif
885 } },
886 .iif = loopback_dev.ifindex,
887 .oif = oldflp->oif };
888 struct dn_route *rt = NULL;
889 struct net_device *dev_out = NULL;
890 struct neighbour *neigh = NULL;
891 unsigned hash;
892 unsigned flags = 0;
893 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
894 int err;
895 int free_res = 0;
896 __u16 gateway = 0;
898 if (decnet_debug_level & 16)
899 printk(KERN_DEBUG
900 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
901 " iif=%d oif=%d\n", oldflp->fld_dst, oldflp->fld_src,
902 oldflp->fld_fwmark, loopback_dev.ifindex, oldflp->oif);
904 /* If we have an output interface, verify its a DECnet device */
905 if (oldflp->oif) {
906 dev_out = dev_get_by_index(oldflp->oif);
907 err = -ENODEV;
908 if (dev_out && dev_out->dn_ptr == NULL) {
909 dev_put(dev_out);
910 dev_out = NULL;
912 if (dev_out == NULL)
913 goto out;
916 /* If we have a source address, verify that its a local address */
917 if (oldflp->fld_src) {
918 err = -EADDRNOTAVAIL;
920 if (dev_out) {
921 if (dn_dev_islocal(dev_out, oldflp->fld_src))
922 goto source_ok;
923 dev_put(dev_out);
924 goto out;
926 read_lock(&dev_base_lock);
927 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) {
928 if (!dev_out->dn_ptr)
929 continue;
930 if (dn_dev_islocal(dev_out, oldflp->fld_src))
931 break;
933 read_unlock(&dev_base_lock);
934 if (dev_out == NULL)
935 goto out;
936 dev_hold(dev_out);
937 source_ok:
941 /* No destination? Assume its local */
942 if (!fl.fld_dst) {
943 fl.fld_dst = fl.fld_src;
945 err = -EADDRNOTAVAIL;
946 if (dev_out)
947 dev_put(dev_out);
948 dev_out = &loopback_dev;
949 dev_hold(dev_out);
950 if (!fl.fld_dst) {
951 fl.fld_dst =
952 fl.fld_src = dnet_select_source(dev_out, 0,
953 RT_SCOPE_HOST);
954 if (!fl.fld_dst)
955 goto out;
957 fl.oif = loopback_dev.ifindex;
958 res.type = RTN_LOCAL;
959 goto make_route;
962 if (decnet_debug_level & 16)
963 printk(KERN_DEBUG
964 "dn_route_output_slow: initial checks complete."
965 " dst=%o4x src=%04x oif=%d try_hard=%d\n", fl.fld_dst,
966 fl.fld_src, fl.oif, try_hard);
969 * N.B. If the kernel is compiled without router support then
970 * dn_fib_lookup() will evaluate to non-zero so this if () block
971 * will always be executed.
973 err = -ESRCH;
974 if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) {
975 struct dn_dev *dn_db;
976 if (err != -ESRCH)
977 goto out;
979 * Here the fallback is basically the standard algorithm for
980 * routing in endnodes which is described in the DECnet routing
981 * docs
983 * If we are not trying hard, look in neighbour cache.
984 * The result is tested to ensure that if a specific output
985 * device/source address was requested, then we honour that
986 * here
988 if (!try_hard) {
989 neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst);
990 if (neigh) {
991 if ((oldflp->oif &&
992 (neigh->dev->ifindex != oldflp->oif)) ||
993 (oldflp->fld_src &&
994 (!dn_dev_islocal(neigh->dev,
995 oldflp->fld_src)))) {
996 neigh_release(neigh);
997 neigh = NULL;
998 } else {
999 if (dev_out)
1000 dev_put(dev_out);
1001 if (dn_dev_islocal(neigh->dev, fl.fld_dst)) {
1002 dev_out = &loopback_dev;
1003 res.type = RTN_LOCAL;
1004 } else {
1005 dev_out = neigh->dev;
1007 dev_hold(dev_out);
1008 goto select_source;
1013 /* Not there? Perhaps its a local address */
1014 if (dev_out == NULL)
1015 dev_out = dn_dev_get_default();
1016 err = -ENODEV;
1017 if (dev_out == NULL)
1018 goto out;
1019 dn_db = dev_out->dn_ptr;
1020 /* Possible improvement - check all devices for local addr */
1021 if (dn_dev_islocal(dev_out, fl.fld_dst)) {
1022 dev_put(dev_out);
1023 dev_out = &loopback_dev;
1024 dev_hold(dev_out);
1025 res.type = RTN_LOCAL;
1026 goto select_source;
1028 /* Not local either.... try sending it to the default router */
1029 neigh = neigh_clone(dn_db->router);
1030 BUG_ON(neigh && neigh->dev != dev_out);
1032 /* Ok then, we assume its directly connected and move on */
1033 select_source:
1034 if (neigh)
1035 gateway = ((struct dn_neigh *)neigh)->addr;
1036 if (gateway == 0)
1037 gateway = fl.fld_dst;
1038 if (fl.fld_src == 0) {
1039 fl.fld_src = dnet_select_source(dev_out, gateway,
1040 res.type == RTN_LOCAL ?
1041 RT_SCOPE_HOST :
1042 RT_SCOPE_LINK);
1043 if (fl.fld_src == 0 && res.type != RTN_LOCAL)
1044 goto e_addr;
1046 fl.oif = dev_out->ifindex;
1047 goto make_route;
1049 free_res = 1;
1051 if (res.type == RTN_NAT)
1052 goto e_inval;
1054 if (res.type == RTN_LOCAL) {
1055 if (!fl.fld_src)
1056 fl.fld_src = fl.fld_dst;
1057 if (dev_out)
1058 dev_put(dev_out);
1059 dev_out = &loopback_dev;
1060 dev_hold(dev_out);
1061 fl.oif = dev_out->ifindex;
1062 if (res.fi)
1063 dn_fib_info_put(res.fi);
1064 res.fi = NULL;
1065 goto make_route;
1068 if (res.fi->fib_nhs > 1 && fl.oif == 0)
1069 dn_fib_select_multipath(&fl, &res);
1072 * We could add some logic to deal with default routes here and
1073 * get rid of some of the special casing above.
1076 if (!fl.fld_src)
1077 fl.fld_src = DN_FIB_RES_PREFSRC(res);
1079 if (dev_out)
1080 dev_put(dev_out);
1081 dev_out = DN_FIB_RES_DEV(res);
1082 dev_hold(dev_out);
1083 fl.oif = dev_out->ifindex;
1084 gateway = DN_FIB_RES_GW(res);
1086 make_route:
1087 if (dev_out->flags & IFF_LOOPBACK)
1088 flags |= RTCF_LOCAL;
1090 rt = dst_alloc(&dn_dst_ops);
1091 if (rt == NULL)
1092 goto e_nobufs;
1094 atomic_set(&rt->u.dst.__refcnt, 1);
1095 rt->u.dst.flags = DST_HOST;
1097 rt->fl.fld_src = oldflp->fld_src;
1098 rt->fl.fld_dst = oldflp->fld_dst;
1099 rt->fl.oif = oldflp->oif;
1100 rt->fl.iif = 0;
1101 #ifdef CONFIG_DECNET_ROUTE_FWMARK
1102 rt->fl.fld_fwmark = oldflp->fld_fwmark;
1103 #endif
1105 rt->rt_saddr = fl.fld_src;
1106 rt->rt_daddr = fl.fld_dst;
1107 rt->rt_gateway = gateway ? gateway : fl.fld_dst;
1108 rt->rt_local_src = fl.fld_src;
1110 rt->rt_dst_map = fl.fld_dst;
1111 rt->rt_src_map = fl.fld_src;
1113 rt->u.dst.dev = dev_out;
1114 dev_hold(dev_out);
1115 rt->u.dst.neighbour = neigh;
1116 neigh = NULL;
1118 rt->u.dst.lastuse = jiffies;
1119 rt->u.dst.output = dn_output;
1120 rt->u.dst.input = dn_rt_bug;
1121 rt->rt_flags = flags;
1122 if (flags & RTCF_LOCAL)
1123 rt->u.dst.input = dn_nsp_rx;
1125 err = dn_rt_set_next_hop(rt, &res);
1126 if (err)
1127 goto e_neighbour;
1129 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
1130 dn_insert_route(rt, hash, (struct dn_route **)pprt);
1132 done:
1133 if (neigh)
1134 neigh_release(neigh);
1135 if (free_res)
1136 dn_fib_res_put(&res);
1137 if (dev_out)
1138 dev_put(dev_out);
1139 out:
1140 return err;
1142 e_addr:
1143 err = -EADDRNOTAVAIL;
1144 goto done;
1145 e_inval:
1146 err = -EINVAL;
1147 goto done;
1148 e_nobufs:
1149 err = -ENOBUFS;
1150 goto done;
1151 e_neighbour:
1152 dst_free(&rt->u.dst);
1153 goto e_nobufs;
1158 * N.B. The flags may be moved into the flowi at some future stage.
1160 static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags)
1162 unsigned hash = dn_hash(flp->fld_src, flp->fld_dst);
1163 struct dn_route *rt = NULL;
1165 if (!(flags & MSG_TRYHARD)) {
1166 rcu_read_lock_bh();
1167 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt;
1168 rt = rcu_dereference(rt->u.rt_next)) {
1169 if ((flp->fld_dst == rt->fl.fld_dst) &&
1170 (flp->fld_src == rt->fl.fld_src) &&
1171 #ifdef CONFIG_DECNET_ROUTE_FWMARK
1172 (flp->fld_fwmark == rt->fl.fld_fwmark) &&
1173 #endif
1174 (rt->fl.iif == 0) &&
1175 (rt->fl.oif == flp->oif)) {
1176 rt->u.dst.lastuse = jiffies;
1177 dst_hold(&rt->u.dst);
1178 rt->u.dst.__use++;
1179 rcu_read_unlock_bh();
1180 *pprt = &rt->u.dst;
1181 return 0;
1184 rcu_read_unlock_bh();
1187 return dn_route_output_slow(pprt, flp, flags);
1190 static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags)
1192 int err;
1194 err = __dn_route_output_key(pprt, flp, flags);
1195 if (err == 0 && flp->proto) {
1196 err = xfrm_lookup(pprt, flp, NULL, 0);
1198 return err;
1201 int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags)
1203 int err;
1205 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
1206 if (err == 0 && fl->proto) {
1207 err = xfrm_lookup(pprt, fl, sk, !(flags & MSG_DONTWAIT));
1209 return err;
1212 static int dn_route_input_slow(struct sk_buff *skb)
1214 struct dn_route *rt = NULL;
1215 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1216 struct net_device *in_dev = skb->dev;
1217 struct net_device *out_dev = NULL;
1218 struct dn_dev *dn_db;
1219 struct neighbour *neigh = NULL;
1220 unsigned hash;
1221 int flags = 0;
1222 __u16 gateway = 0;
1223 __u16 local_src = 0;
1224 struct flowi fl = { .nl_u = { .dn_u =
1225 { .daddr = cb->dst,
1226 .saddr = cb->src,
1227 .scope = RT_SCOPE_UNIVERSE,
1228 #ifdef CONFIG_DECNET_ROUTE_FWMARK
1229 .fwmark = skb->nfmark
1230 #endif
1231 } },
1232 .iif = skb->dev->ifindex };
1233 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
1234 int err = -EINVAL;
1235 int free_res = 0;
1237 dev_hold(in_dev);
1239 if ((dn_db = in_dev->dn_ptr) == NULL)
1240 goto out;
1242 /* Zero source addresses are not allowed */
1243 if (fl.fld_src == 0)
1244 goto out;
1247 * In this case we've just received a packet from a source
1248 * outside ourselves pretending to come from us. We don't
1249 * allow it any further to prevent routing loops, spoofing and
1250 * other nasties. Loopback packets already have the dst attached
1251 * so this only affects packets which have originated elsewhere.
1253 err = -ENOTUNIQ;
1254 if (dn_dev_islocal(in_dev, cb->src))
1255 goto out;
1257 err = dn_fib_lookup(&fl, &res);
1258 if (err) {
1259 if (err != -ESRCH)
1260 goto out;
1262 * Is the destination us ?
1264 if (!dn_dev_islocal(in_dev, cb->dst))
1265 goto e_inval;
1267 res.type = RTN_LOCAL;
1268 flags |= RTCF_DIRECTSRC;
1269 } else {
1270 __u16 src_map = fl.fld_src;
1271 free_res = 1;
1273 out_dev = DN_FIB_RES_DEV(res);
1274 if (out_dev == NULL) {
1275 if (net_ratelimit())
1276 printk(KERN_CRIT "Bug in dn_route_input_slow() "
1277 "No output device\n");
1278 goto e_inval;
1280 dev_hold(out_dev);
1282 if (res.r)
1283 src_map = dn_fib_rules_policy(fl.fld_src, &res, &flags);
1285 gateway = DN_FIB_RES_GW(res);
1286 if (res.type == RTN_NAT) {
1287 fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res);
1288 dn_fib_res_put(&res);
1289 free_res = 0;
1290 if (dn_fib_lookup(&fl, &res))
1291 goto e_inval;
1292 free_res = 1;
1293 if (res.type != RTN_UNICAST)
1294 goto e_inval;
1295 flags |= RTCF_DNAT;
1296 gateway = fl.fld_dst;
1298 fl.fld_src = src_map;
1301 switch(res.type) {
1302 case RTN_UNICAST:
1304 * Forwarding check here, we only check for forwarding
1305 * being turned off, if you want to only forward intra
1306 * area, its up to you to set the routing tables up
1307 * correctly.
1309 if (dn_db->parms.forwarding == 0)
1310 goto e_inval;
1312 if (res.fi->fib_nhs > 1 && fl.oif == 0)
1313 dn_fib_select_multipath(&fl, &res);
1316 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
1317 * flag as a hint to set the intra-ethernet bit when
1318 * forwarding. If we've got NAT in operation, we don't do
1319 * this optimisation.
1321 if (out_dev == in_dev && !(flags & RTCF_NAT))
1322 flags |= RTCF_DOREDIRECT;
1324 local_src = DN_FIB_RES_PREFSRC(res);
1326 case RTN_BLACKHOLE:
1327 case RTN_UNREACHABLE:
1328 break;
1329 case RTN_LOCAL:
1330 flags |= RTCF_LOCAL;
1331 fl.fld_src = cb->dst;
1332 fl.fld_dst = cb->src;
1334 /* Routing tables gave us a gateway */
1335 if (gateway)
1336 goto make_route;
1338 /* Packet was intra-ethernet, so we know its on-link */
1339 if (cb->rt_flags | DN_RT_F_IE) {
1340 gateway = cb->src;
1341 flags |= RTCF_DIRECTSRC;
1342 goto make_route;
1345 /* Use the default router if there is one */
1346 neigh = neigh_clone(dn_db->router);
1347 if (neigh) {
1348 gateway = ((struct dn_neigh *)neigh)->addr;
1349 goto make_route;
1352 /* Close eyes and pray */
1353 gateway = cb->src;
1354 flags |= RTCF_DIRECTSRC;
1355 goto make_route;
1356 default:
1357 goto e_inval;
1360 make_route:
1361 rt = dst_alloc(&dn_dst_ops);
1362 if (rt == NULL)
1363 goto e_nobufs;
1365 rt->rt_saddr = fl.fld_src;
1366 rt->rt_daddr = fl.fld_dst;
1367 rt->rt_gateway = fl.fld_dst;
1368 if (gateway)
1369 rt->rt_gateway = gateway;
1370 rt->rt_local_src = local_src ? local_src : rt->rt_saddr;
1372 rt->rt_dst_map = fl.fld_dst;
1373 rt->rt_src_map = fl.fld_src;
1375 rt->fl.fld_src = cb->src;
1376 rt->fl.fld_dst = cb->dst;
1377 rt->fl.oif = 0;
1378 rt->fl.iif = in_dev->ifindex;
1379 rt->fl.fld_fwmark = fl.fld_fwmark;
1381 rt->u.dst.flags = DST_HOST;
1382 rt->u.dst.neighbour = neigh;
1383 rt->u.dst.dev = out_dev;
1384 rt->u.dst.lastuse = jiffies;
1385 rt->u.dst.output = dn_rt_bug;
1386 switch(res.type) {
1387 case RTN_UNICAST:
1388 rt->u.dst.input = dn_forward;
1389 break;
1390 case RTN_LOCAL:
1391 rt->u.dst.output = dn_output;
1392 rt->u.dst.input = dn_nsp_rx;
1393 rt->u.dst.dev = in_dev;
1394 flags |= RTCF_LOCAL;
1395 break;
1396 default:
1397 case RTN_UNREACHABLE:
1398 case RTN_BLACKHOLE:
1399 rt->u.dst.input = dn_blackhole;
1401 rt->rt_flags = flags;
1402 if (rt->u.dst.dev)
1403 dev_hold(rt->u.dst.dev);
1405 err = dn_rt_set_next_hop(rt, &res);
1406 if (err)
1407 goto e_neighbour;
1409 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
1410 dn_insert_route(rt, hash, (struct dn_route **)&skb->dst);
1412 done:
1413 if (neigh)
1414 neigh_release(neigh);
1415 if (free_res)
1416 dn_fib_res_put(&res);
1417 dev_put(in_dev);
1418 if (out_dev)
1419 dev_put(out_dev);
1420 out:
1421 return err;
1423 e_inval:
1424 err = -EINVAL;
1425 goto done;
1427 e_nobufs:
1428 err = -ENOBUFS;
1429 goto done;
1431 e_neighbour:
1432 dst_free(&rt->u.dst);
1433 goto done;
1436 int dn_route_input(struct sk_buff *skb)
1438 struct dn_route *rt;
1439 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1440 unsigned hash = dn_hash(cb->src, cb->dst);
1442 if (skb->dst)
1443 return 0;
1445 rcu_read_lock();
1446 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
1447 rt = rcu_dereference(rt->u.rt_next)) {
1448 if ((rt->fl.fld_src == cb->src) &&
1449 (rt->fl.fld_dst == cb->dst) &&
1450 (rt->fl.oif == 0) &&
1451 #ifdef CONFIG_DECNET_ROUTE_FWMARK
1452 (rt->fl.fld_fwmark == skb->nfmark) &&
1453 #endif
1454 (rt->fl.iif == cb->iif)) {
1455 rt->u.dst.lastuse = jiffies;
1456 dst_hold(&rt->u.dst);
1457 rt->u.dst.__use++;
1458 rcu_read_unlock();
1459 skb->dst = (struct dst_entry *)rt;
1460 return 0;
1463 rcu_read_unlock();
1465 return dn_route_input_slow(skb);
1468 static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1469 int event, int nowait, unsigned int flags)
1471 struct dn_route *rt = (struct dn_route *)skb->dst;
1472 struct rtmsg *r;
1473 struct nlmsghdr *nlh;
1474 unsigned char *b = skb->tail;
1475 struct rta_cacheinfo ci;
1477 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
1478 r = NLMSG_DATA(nlh);
1479 r->rtm_family = AF_DECnet;
1480 r->rtm_dst_len = 16;
1481 r->rtm_src_len = 0;
1482 r->rtm_tos = 0;
1483 r->rtm_table = RT_TABLE_MAIN;
1484 r->rtm_type = rt->rt_type;
1485 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
1486 r->rtm_scope = RT_SCOPE_UNIVERSE;
1487 r->rtm_protocol = RTPROT_UNSPEC;
1488 if (rt->rt_flags & RTCF_NOTIFY)
1489 r->rtm_flags |= RTM_F_NOTIFY;
1490 RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr);
1491 if (rt->fl.fld_src) {
1492 r->rtm_src_len = 16;
1493 RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src);
1495 if (rt->u.dst.dev)
1496 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);
1498 * Note to self - change this if input routes reverse direction when
1499 * they deal only with inputs and not with replies like they do
1500 * currently.
1502 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
1503 if (rt->rt_daddr != rt->rt_gateway)
1504 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
1505 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
1506 goto rtattr_failure;
1507 ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
1508 ci.rta_used = rt->u.dst.__use;
1509 ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
1510 if (rt->u.dst.expires)
1511 ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies);
1512 else
1513 ci.rta_expires = 0;
1514 ci.rta_error = rt->u.dst.error;
1515 ci.rta_id = ci.rta_ts = ci.rta_tsage = 0;
1516 RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
1517 if (rt->fl.iif)
1518 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
1520 nlh->nlmsg_len = skb->tail - b;
1521 return skb->len;
1523 nlmsg_failure:
1524 rtattr_failure:
1525 skb_trim(skb, b - skb->data);
1526 return -1;
1530 * This is called by both endnodes and routers now.
1532 int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
1534 struct rtattr **rta = arg;
1535 struct rtmsg *rtm = NLMSG_DATA(nlh);
1536 struct dn_route *rt = NULL;
1537 struct dn_skb_cb *cb;
1538 int err;
1539 struct sk_buff *skb;
1540 struct flowi fl;
1542 memset(&fl, 0, sizeof(fl));
1543 fl.proto = DNPROTO_NSP;
1545 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1546 if (skb == NULL)
1547 return -ENOBUFS;
1548 skb->mac.raw = skb->data;
1549 cb = DN_SKB_CB(skb);
1551 if (rta[RTA_SRC-1])
1552 memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2);
1553 if (rta[RTA_DST-1])
1554 memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2);
1555 if (rta[RTA_IIF-1])
1556 memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
1558 if (fl.iif) {
1559 struct net_device *dev;
1560 if ((dev = dev_get_by_index(fl.iif)) == NULL) {
1561 kfree_skb(skb);
1562 return -ENODEV;
1564 if (!dev->dn_ptr) {
1565 dev_put(dev);
1566 kfree_skb(skb);
1567 return -ENODEV;
1569 skb->protocol = __constant_htons(ETH_P_DNA_RT);
1570 skb->dev = dev;
1571 cb->src = fl.fld_src;
1572 cb->dst = fl.fld_dst;
1573 local_bh_disable();
1574 err = dn_route_input(skb);
1575 local_bh_enable();
1576 memset(cb, 0, sizeof(struct dn_skb_cb));
1577 rt = (struct dn_route *)skb->dst;
1578 if (!err && -rt->u.dst.error)
1579 err = rt->u.dst.error;
1580 } else {
1581 int oif = 0;
1582 if (rta[RTA_OIF - 1])
1583 memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
1584 fl.oif = oif;
1585 err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0);
1588 if (skb->dev)
1589 dev_put(skb->dev);
1590 skb->dev = NULL;
1591 if (err)
1592 goto out_free;
1593 skb->dst = &rt->u.dst;
1594 if (rtm->rtm_flags & RTM_F_NOTIFY)
1595 rt->rt_flags |= RTCF_NOTIFY;
1597 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
1599 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
1601 if (err == 0)
1602 goto out_free;
1603 if (err < 0) {
1604 err = -EMSGSIZE;
1605 goto out_free;
1608 err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1610 return err;
1612 out_free:
1613 kfree_skb(skb);
1614 return err;
1618 * For routers, this is called from dn_fib_dump, but for endnodes its
1619 * called directly from the rtnetlink dispatch table.
1621 int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1623 struct dn_route *rt;
1624 int h, s_h;
1625 int idx, s_idx;
1627 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg))
1628 return -EINVAL;
1629 if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED))
1630 return 0;
1632 s_h = cb->args[0];
1633 s_idx = idx = cb->args[1];
1634 for(h = 0; h <= dn_rt_hash_mask; h++) {
1635 if (h < s_h)
1636 continue;
1637 if (h > s_h)
1638 s_idx = 0;
1639 rcu_read_lock_bh();
1640 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;
1642 rt = rcu_dereference(rt->u.rt_next), idx++) {
1643 if (idx < s_idx)
1644 continue;
1645 skb->dst = dst_clone(&rt->u.dst);
1646 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
1647 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1648 1, NLM_F_MULTI) <= 0) {
1649 dst_release(xchg(&skb->dst, NULL));
1650 rcu_read_unlock_bh();
1651 goto done;
1653 dst_release(xchg(&skb->dst, NULL));
1655 rcu_read_unlock_bh();
1658 done:
1659 cb->args[0] = h;
1660 cb->args[1] = idx;
1661 return skb->len;
1664 #ifdef CONFIG_PROC_FS
1665 struct dn_rt_cache_iter_state {
1666 int bucket;
1669 static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
1671 struct dn_route *rt = NULL;
1672 struct dn_rt_cache_iter_state *s = seq->private;
1674 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
1675 rcu_read_lock_bh();
1676 rt = dn_rt_hash_table[s->bucket].chain;
1677 if (rt)
1678 break;
1679 rcu_read_unlock_bh();
1681 return rt;
1684 static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
1686 struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private);
1688 rt = rt->u.rt_next;
1689 while(!rt) {
1690 rcu_read_unlock_bh();
1691 if (--s->bucket < 0)
1692 break;
1693 rcu_read_lock_bh();
1694 rt = dn_rt_hash_table[s->bucket].chain;
1696 return rt;
1699 static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
1701 struct dn_route *rt = dn_rt_cache_get_first(seq);
1703 if (rt) {
1704 while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
1705 --*pos;
1707 return *pos ? NULL : rt;
1710 static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1712 struct dn_route *rt = dn_rt_cache_get_next(seq, v);
1713 ++*pos;
1714 return rt;
1717 static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v)
1719 if (v)
1720 rcu_read_unlock_bh();
1723 static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1725 struct dn_route *rt = v;
1726 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
1728 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
1729 rt->u.dst.dev ? rt->u.dst.dev->name : "*",
1730 dn_addr2asc(dn_ntohs(rt->rt_daddr), buf1),
1731 dn_addr2asc(dn_ntohs(rt->rt_saddr), buf2),
1732 atomic_read(&rt->u.dst.__refcnt),
1733 rt->u.dst.__use,
1734 (int) dst_metric(&rt->u.dst, RTAX_RTT));
1735 return 0;
1738 static struct seq_operations dn_rt_cache_seq_ops = {
1739 .start = dn_rt_cache_seq_start,
1740 .next = dn_rt_cache_seq_next,
1741 .stop = dn_rt_cache_seq_stop,
1742 .show = dn_rt_cache_seq_show,
1745 static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
1747 struct seq_file *seq;
1748 int rc = -ENOMEM;
1749 struct dn_rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1751 if (!s)
1752 goto out;
1753 rc = seq_open(file, &dn_rt_cache_seq_ops);
1754 if (rc)
1755 goto out_kfree;
1756 seq = file->private_data;
1757 seq->private = s;
1758 memset(s, 0, sizeof(*s));
1759 out:
1760 return rc;
1761 out_kfree:
1762 kfree(s);
1763 goto out;
1766 static struct file_operations dn_rt_cache_seq_fops = {
1767 .owner = THIS_MODULE,
1768 .open = dn_rt_cache_seq_open,
1769 .read = seq_read,
1770 .llseek = seq_lseek,
1771 .release = seq_release_private,
1774 #endif /* CONFIG_PROC_FS */
1776 void __init dn_route_init(void)
1778 int i, goal, order;
1780 dn_dst_ops.kmem_cachep = kmem_cache_create("dn_dst_cache",
1781 sizeof(struct dn_route),
1782 0, SLAB_HWCACHE_ALIGN,
1783 NULL, NULL);
1785 if (!dn_dst_ops.kmem_cachep)
1786 panic("DECnet: Failed to allocate dn_dst_cache\n");
1788 init_timer(&dn_route_timer);
1789 dn_route_timer.function = dn_dst_check_expire;
1790 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
1791 add_timer(&dn_route_timer);
1793 goal = num_physpages >> (26 - PAGE_SHIFT);
1795 for(order = 0; (1UL << order) < goal; order++)
1796 /* NOTHING */;
1799 * Only want 1024 entries max, since the table is very, very unlikely
1800 * to be larger than that.
1802 while(order && ((((1UL << order) * PAGE_SIZE) /
1803 sizeof(struct dn_rt_hash_bucket)) >= 2048))
1804 order--;
1806 do {
1807 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
1808 sizeof(struct dn_rt_hash_bucket);
1809 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
1810 dn_rt_hash_mask--;
1811 dn_rt_hash_table = (struct dn_rt_hash_bucket *)
1812 __get_free_pages(GFP_ATOMIC, order);
1813 } while (dn_rt_hash_table == NULL && --order > 0);
1815 if (!dn_rt_hash_table)
1816 panic("Failed to allocate DECnet route cache hash table\n");
1818 printk(KERN_INFO
1819 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
1820 dn_rt_hash_mask,
1821 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
1823 dn_rt_hash_mask--;
1824 for(i = 0; i <= dn_rt_hash_mask; i++) {
1825 spin_lock_init(&dn_rt_hash_table[i].lock);
1826 dn_rt_hash_table[i].chain = NULL;
1829 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
1831 proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
1834 void __exit dn_route_cleanup(void)
1836 del_timer(&dn_route_timer);
1837 dn_run_flush(0);
1839 proc_net_remove("decnet_cache");