Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[linux/fpc-iii.git] / net / decnet / dn_nsp_out.c
blob56a52a004c560b674b7172fdf38de283d6dbb36e
1 /*
2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * DECnet Network Services Protocol (Output)
8 * Author: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Changes:
12 * Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from
13 * original dn_nsp.c.
14 * Steve Whitehouse: Updated to work with my new routing architecture.
15 * Steve Whitehouse: Added changes from Eduardo Serrat's patches.
16 * Steve Whitehouse: Now conninits have the "return" bit set.
17 * Steve Whitehouse: Fixes to check alloc'd skbs are non NULL!
18 * Moved output state machine into one function
19 * Steve Whitehouse: New output state machine
20 * Paul Koning: Connect Confirm message fix.
21 * Eduardo Serrat: Fix to stop dn_nsp_do_disc() sending malformed packets.
22 * Steve Whitehouse: dn_nsp_output() and friends needed a spring clean
23 * Steve Whitehouse: Moved dn_nsp_send() in here from route.h
26 /******************************************************************************
27 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
29 This program is free software; you can redistribute it and/or modify
30 it under the terms of the GNU General Public License as published by
31 the Free Software Foundation; either version 2 of the License, or
32 any later version.
34 This program is distributed in the hope that it will be useful,
35 but WITHOUT ANY WARRANTY; without even the implied warranty of
36 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
37 GNU General Public License for more details.
38 *******************************************************************************/
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/socket.h>
43 #include <linux/in.h>
44 #include <linux/kernel.h>
45 #include <linux/timer.h>
46 #include <linux/string.h>
47 #include <linux/sockios.h>
48 #include <linux/net.h>
49 #include <linux/netdevice.h>
50 #include <linux/inet.h>
51 #include <linux/route.h>
52 #include <linux/slab.h>
53 #include <net/sock.h>
54 #include <linux/fcntl.h>
55 #include <linux/mm.h>
56 #include <linux/termios.h>
57 #include <linux/interrupt.h>
58 #include <linux/proc_fs.h>
59 #include <linux/stat.h>
60 #include <linux/init.h>
61 #include <linux/poll.h>
62 #include <linux/if_packet.h>
63 #include <net/neighbour.h>
64 #include <net/dst.h>
65 #include <net/flow.h>
66 #include <net/dn.h>
67 #include <net/dn_nsp.h>
68 #include <net/dn_dev.h>
69 #include <net/dn_route.h>
72 static int nsp_backoff[NSP_MAXRXTSHIFT + 1] = { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
74 static void dn_nsp_send(struct sk_buff *skb)
76 struct sock *sk = skb->sk;
77 struct dn_scp *scp = DN_SK(sk);
78 struct dst_entry *dst;
79 struct flowidn fld;
81 skb_reset_transport_header(skb);
82 scp->stamp = jiffies;
84 dst = sk_dst_check(sk, 0);
85 if (dst) {
86 try_again:
87 skb_dst_set(skb, dst);
88 dst_output(&init_net, skb->sk, skb);
89 return;
92 memset(&fld, 0, sizeof(fld));
93 fld.flowidn_oif = sk->sk_bound_dev_if;
94 fld.saddr = dn_saddr2dn(&scp->addr);
95 fld.daddr = dn_saddr2dn(&scp->peer);
96 dn_sk_ports_copy(&fld, scp);
97 fld.flowidn_proto = DNPROTO_NSP;
98 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, 0) == 0) {
99 dst = sk_dst_get(sk);
100 sk->sk_route_caps = dst->dev->features;
101 goto try_again;
104 sk->sk_err = EHOSTUNREACH;
105 if (!sock_flag(sk, SOCK_DEAD))
106 sk->sk_state_change(sk);
111 * If sk == NULL, then we assume that we are supposed to be making
112 * a routing layer skb. If sk != NULL, then we are supposed to be
113 * creating an skb for the NSP layer.
115 * The eventual aim is for each socket to have a cached header size
116 * for its outgoing packets, and to set hdr from this when sk != NULL.
118 struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri)
120 struct sk_buff *skb;
121 int hdr = 64;
123 if ((skb = alloc_skb(size + hdr, pri)) == NULL)
124 return NULL;
126 skb->protocol = htons(ETH_P_DNA_RT);
127 skb->pkt_type = PACKET_OUTGOING;
129 if (sk)
130 skb_set_owner_w(skb, sk);
132 skb_reserve(skb, hdr);
134 return skb;
138 * Calculate persist timer based upon the smoothed round
139 * trip time and the variance. Backoff according to the
140 * nsp_backoff[] array.
142 unsigned long dn_nsp_persist(struct sock *sk)
144 struct dn_scp *scp = DN_SK(sk);
146 unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
148 t *= nsp_backoff[scp->nsp_rxtshift];
150 if (t < HZ) t = HZ;
151 if (t > (600*HZ)) t = (600*HZ);
153 if (scp->nsp_rxtshift < NSP_MAXRXTSHIFT)
154 scp->nsp_rxtshift++;
156 /* printk(KERN_DEBUG "rxtshift %lu, t=%lu\n", scp->nsp_rxtshift, t); */
158 return t;
162 * This is called each time we get an estimate for the rtt
163 * on the link.
165 static void dn_nsp_rtt(struct sock *sk, long rtt)
167 struct dn_scp *scp = DN_SK(sk);
168 long srtt = (long)scp->nsp_srtt;
169 long rttvar = (long)scp->nsp_rttvar;
170 long delta;
173 * If the jiffies clock flips over in the middle of timestamp
174 * gathering this value might turn out negative, so we make sure
175 * that is it always positive here.
177 if (rtt < 0)
178 rtt = -rtt;
180 * Add new rtt to smoothed average
182 delta = ((rtt << 3) - srtt);
183 srtt += (delta >> 3);
184 if (srtt >= 1)
185 scp->nsp_srtt = (unsigned long)srtt;
186 else
187 scp->nsp_srtt = 1;
190 * Add new rtt varience to smoothed varience
192 delta >>= 1;
193 rttvar += ((((delta>0)?(delta):(-delta)) - rttvar) >> 2);
194 if (rttvar >= 1)
195 scp->nsp_rttvar = (unsigned long)rttvar;
196 else
197 scp->nsp_rttvar = 1;
199 /* printk(KERN_DEBUG "srtt=%lu rttvar=%lu\n", scp->nsp_srtt, scp->nsp_rttvar); */
203 * dn_nsp_clone_and_send - Send a data packet by cloning it
204 * @skb: The packet to clone and transmit
205 * @gfp: memory allocation flag
207 * Clone a queued data or other data packet and transmit it.
209 * Returns: The number of times the packet has been sent previously
211 static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb,
212 gfp_t gfp)
214 struct dn_skb_cb *cb = DN_SKB_CB(skb);
215 struct sk_buff *skb2;
216 int ret = 0;
218 if ((skb2 = skb_clone(skb, gfp)) != NULL) {
219 ret = cb->xmit_count;
220 cb->xmit_count++;
221 cb->stamp = jiffies;
222 skb2->sk = skb->sk;
223 dn_nsp_send(skb2);
226 return ret;
230 * dn_nsp_output - Try and send something from socket queues
231 * @sk: The socket whose queues are to be investigated
233 * Try and send the packet on the end of the data and other data queues.
234 * Other data gets priority over data, and if we retransmit a packet we
235 * reduce the window by dividing it in two.
238 void dn_nsp_output(struct sock *sk)
240 struct dn_scp *scp = DN_SK(sk);
241 struct sk_buff *skb;
242 unsigned int reduce_win = 0;
245 * First we check for otherdata/linkservice messages
247 if ((skb = skb_peek(&scp->other_xmit_queue)) != NULL)
248 reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
251 * If we may not send any data, we don't.
252 * If we are still trying to get some other data down the
253 * channel, we don't try and send any data.
255 if (reduce_win || (scp->flowrem_sw != DN_SEND))
256 goto recalc_window;
258 if ((skb = skb_peek(&scp->data_xmit_queue)) != NULL)
259 reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
262 * If we've sent any frame more than once, we cut the
263 * send window size in half. There is always a minimum
264 * window size of one available.
266 recalc_window:
267 if (reduce_win) {
268 scp->snd_window >>= 1;
269 if (scp->snd_window < NSP_MIN_WINDOW)
270 scp->snd_window = NSP_MIN_WINDOW;
274 int dn_nsp_xmit_timeout(struct sock *sk)
276 struct dn_scp *scp = DN_SK(sk);
278 dn_nsp_output(sk);
280 if (!skb_queue_empty(&scp->data_xmit_queue) ||
281 !skb_queue_empty(&scp->other_xmit_queue))
282 scp->persist = dn_nsp_persist(sk);
284 return 0;
287 static inline __le16 *dn_mk_common_header(struct dn_scp *scp, struct sk_buff *skb, unsigned char msgflag, int len)
289 unsigned char *ptr = skb_push(skb, len);
291 BUG_ON(len < 5);
293 *ptr++ = msgflag;
294 *((__le16 *)ptr) = scp->addrrem;
295 ptr += 2;
296 *((__le16 *)ptr) = scp->addrloc;
297 ptr += 2;
298 return (__le16 __force *)ptr;
301 static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned char msgflag, int hlen, int other)
303 struct dn_scp *scp = DN_SK(sk);
304 unsigned short acknum = scp->numdat_rcv & 0x0FFF;
305 unsigned short ackcrs = scp->numoth_rcv & 0x0FFF;
306 __le16 *ptr;
308 BUG_ON(hlen < 9);
310 scp->ackxmt_dat = acknum;
311 scp->ackxmt_oth = ackcrs;
312 acknum |= 0x8000;
313 ackcrs |= 0x8000;
315 /* If this is an "other data/ack" message, swap acknum and ackcrs */
316 if (other)
317 swap(acknum, ackcrs);
319 /* Set "cross subchannel" bit in ackcrs */
320 ackcrs |= 0x2000;
322 ptr = dn_mk_common_header(scp, skb, msgflag, hlen);
324 *ptr++ = cpu_to_le16(acknum);
325 *ptr++ = cpu_to_le16(ackcrs);
327 return ptr;
330 static __le16 *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *skb, int oth)
332 struct dn_scp *scp = DN_SK(sk);
333 struct dn_skb_cb *cb = DN_SKB_CB(skb);
334 __le16 *ptr = dn_mk_ack_header(sk, skb, cb->nsp_flags, 11, oth);
336 if (unlikely(oth)) {
337 cb->segnum = scp->numoth;
338 seq_add(&scp->numoth, 1);
339 } else {
340 cb->segnum = scp->numdat;
341 seq_add(&scp->numdat, 1);
343 *(ptr++) = cpu_to_le16(cb->segnum);
345 return ptr;
348 void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb,
349 gfp_t gfp, int oth)
351 struct dn_scp *scp = DN_SK(sk);
352 struct dn_skb_cb *cb = DN_SKB_CB(skb);
353 unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
355 cb->xmit_count = 0;
356 dn_nsp_mk_data_header(sk, skb, oth);
359 * Slow start: If we have been idle for more than
360 * one RTT, then reset window to min size.
362 if ((jiffies - scp->stamp) > t)
363 scp->snd_window = NSP_MIN_WINDOW;
365 if (oth)
366 skb_queue_tail(&scp->other_xmit_queue, skb);
367 else
368 skb_queue_tail(&scp->data_xmit_queue, skb);
370 if (scp->flowrem_sw != DN_SEND)
371 return;
373 dn_nsp_clone_and_send(skb, gfp);
377 int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum)
379 struct dn_skb_cb *cb = DN_SKB_CB(skb);
380 struct dn_scp *scp = DN_SK(sk);
381 struct sk_buff *skb2, *n, *ack = NULL;
382 int wakeup = 0;
383 int try_retrans = 0;
384 unsigned long reftime = cb->stamp;
385 unsigned long pkttime;
386 unsigned short xmit_count;
387 unsigned short segnum;
389 skb_queue_walk_safe(q, skb2, n) {
390 struct dn_skb_cb *cb2 = DN_SKB_CB(skb2);
392 if (dn_before_or_equal(cb2->segnum, acknum))
393 ack = skb2;
395 /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */
397 if (ack == NULL)
398 continue;
400 /* printk(KERN_DEBUG "check_xmit_queue: %04x, %d\n", acknum, cb2->xmit_count); */
402 /* Does _last_ packet acked have xmit_count > 1 */
403 try_retrans = 0;
404 /* Remember to wake up the sending process */
405 wakeup = 1;
406 /* Keep various statistics */
407 pkttime = cb2->stamp;
408 xmit_count = cb2->xmit_count;
409 segnum = cb2->segnum;
410 /* Remove and drop ack'ed packet */
411 skb_unlink(ack, q);
412 kfree_skb(ack);
413 ack = NULL;
416 * We don't expect to see acknowledgements for packets we
417 * haven't sent yet.
419 WARN_ON(xmit_count == 0);
422 * If the packet has only been sent once, we can use it
423 * to calculate the RTT and also open the window a little
424 * further.
426 if (xmit_count == 1) {
427 if (dn_equal(segnum, acknum))
428 dn_nsp_rtt(sk, (long)(pkttime - reftime));
430 if (scp->snd_window < scp->max_window)
431 scp->snd_window++;
435 * Packet has been sent more than once. If this is the last
436 * packet to be acknowledged then we want to send the next
437 * packet in the send queue again (assumes the remote host does
438 * go-back-N error control).
440 if (xmit_count > 1)
441 try_retrans = 1;
444 if (try_retrans)
445 dn_nsp_output(sk);
447 return wakeup;
450 void dn_nsp_send_data_ack(struct sock *sk)
452 struct sk_buff *skb = NULL;
454 if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
455 return;
457 skb_reserve(skb, 9);
458 dn_mk_ack_header(sk, skb, 0x04, 9, 0);
459 dn_nsp_send(skb);
462 void dn_nsp_send_oth_ack(struct sock *sk)
464 struct sk_buff *skb = NULL;
466 if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
467 return;
469 skb_reserve(skb, 9);
470 dn_mk_ack_header(sk, skb, 0x14, 9, 1);
471 dn_nsp_send(skb);
475 void dn_send_conn_ack (struct sock *sk)
477 struct dn_scp *scp = DN_SK(sk);
478 struct sk_buff *skb = NULL;
479 struct nsp_conn_ack_msg *msg;
481 if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL)
482 return;
484 msg = skb_put(skb, 3);
485 msg->msgflg = 0x24;
486 msg->dstaddr = scp->addrrem;
488 dn_nsp_send(skb);
491 static int dn_nsp_retrans_conn_conf(struct sock *sk)
493 struct dn_scp *scp = DN_SK(sk);
495 if (scp->state == DN_CC)
496 dn_send_conn_conf(sk, GFP_ATOMIC);
498 return 0;
501 void dn_send_conn_conf(struct sock *sk, gfp_t gfp)
503 struct dn_scp *scp = DN_SK(sk);
504 struct sk_buff *skb = NULL;
505 struct nsp_conn_init_msg *msg;
506 __u8 len = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
508 if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL)
509 return;
511 msg = skb_put(skb, sizeof(*msg));
512 msg->msgflg = 0x28;
513 msg->dstaddr = scp->addrrem;
514 msg->srcaddr = scp->addrloc;
515 msg->services = scp->services_loc;
516 msg->info = scp->info_loc;
517 msg->segsize = cpu_to_le16(scp->segsize_loc);
519 skb_put_u8(skb, len);
521 if (len > 0)
522 skb_put_data(skb, scp->conndata_out.opt_data, len);
525 dn_nsp_send(skb);
527 scp->persist = dn_nsp_persist(sk);
528 scp->persist_fxn = dn_nsp_retrans_conn_conf;
532 static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
533 unsigned short reason, gfp_t gfp,
534 struct dst_entry *dst,
535 int ddl, unsigned char *dd, __le16 rem, __le16 loc)
537 struct sk_buff *skb = NULL;
538 int size = 7 + ddl + ((msgflg == NSP_DISCINIT) ? 1 : 0);
539 unsigned char *msg;
541 if ((dst == NULL) || (rem == 0)) {
542 net_dbg_ratelimited("DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n",
543 le16_to_cpu(rem), dst);
544 return;
547 if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL)
548 return;
550 msg = skb_put(skb, size);
551 *msg++ = msgflg;
552 *(__le16 *)msg = rem;
553 msg += 2;
554 *(__le16 *)msg = loc;
555 msg += 2;
556 *(__le16 *)msg = cpu_to_le16(reason);
557 msg += 2;
558 if (msgflg == NSP_DISCINIT)
559 *msg++ = ddl;
561 if (ddl) {
562 memcpy(msg, dd, ddl);
566 * This doesn't go via the dn_nsp_send() function since we need
567 * to be able to send disc packets out which have no socket
568 * associations.
570 skb_dst_set(skb, dst_clone(dst));
571 dst_output(&init_net, skb->sk, skb);
575 void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
576 unsigned short reason, gfp_t gfp)
578 struct dn_scp *scp = DN_SK(sk);
579 int ddl = 0;
581 if (msgflg == NSP_DISCINIT)
582 ddl = le16_to_cpu(scp->discdata_out.opt_optl);
584 if (reason == 0)
585 reason = le16_to_cpu(scp->discdata_out.opt_status);
587 dn_nsp_do_disc(sk, msgflg, reason, gfp, __sk_dst_get(sk), ddl,
588 scp->discdata_out.opt_data, scp->addrrem, scp->addrloc);
592 void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg,
593 unsigned short reason)
595 struct dn_skb_cb *cb = DN_SKB_CB(skb);
596 int ddl = 0;
597 gfp_t gfp = GFP_ATOMIC;
599 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb_dst(skb), ddl,
600 NULL, cb->src_port, cb->dst_port);
604 void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval)
606 struct dn_scp *scp = DN_SK(sk);
607 struct sk_buff *skb;
608 unsigned char *ptr;
609 gfp_t gfp = GFP_ATOMIC;
611 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL)
612 return;
614 skb_reserve(skb, DN_MAX_NSP_DATA_HEADER);
615 ptr = skb_put(skb, 2);
616 DN_SKB_CB(skb)->nsp_flags = 0x10;
617 *ptr++ = lsflags;
618 *ptr = fcval;
620 dn_nsp_queue_xmit(sk, skb, gfp, 1);
622 scp->persist = dn_nsp_persist(sk);
623 scp->persist_fxn = dn_nsp_xmit_timeout;
626 static int dn_nsp_retrans_conninit(struct sock *sk)
628 struct dn_scp *scp = DN_SK(sk);
630 if (scp->state == DN_CI)
631 dn_nsp_send_conninit(sk, NSP_RCI);
633 return 0;
636 void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
638 struct dn_scp *scp = DN_SK(sk);
639 struct nsp_conn_init_msg *msg;
640 unsigned char aux;
641 unsigned char menuver;
642 struct dn_skb_cb *cb;
643 unsigned char type = 1;
644 gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
645 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);
647 if (!skb)
648 return;
650 cb = DN_SKB_CB(skb);
651 msg = skb_put(skb, sizeof(*msg));
653 msg->msgflg = msgflg;
654 msg->dstaddr = 0x0000; /* Remote Node will assign it*/
656 msg->srcaddr = scp->addrloc;
657 msg->services = scp->services_loc; /* Requested flow control */
658 msg->info = scp->info_loc; /* Version Number */
659 msg->segsize = cpu_to_le16(scp->segsize_loc); /* Max segment size */
661 if (scp->peer.sdn_objnum)
662 type = 0;
664 skb_put(skb, dn_sockaddr2username(&scp->peer,
665 skb_tail_pointer(skb), type));
666 skb_put(skb, dn_sockaddr2username(&scp->addr,
667 skb_tail_pointer(skb), 2));
669 menuver = DN_MENUVER_ACC | DN_MENUVER_USR;
670 if (scp->peer.sdn_flags & SDF_PROXY)
671 menuver |= DN_MENUVER_PRX;
672 if (scp->peer.sdn_flags & SDF_UICPROXY)
673 menuver |= DN_MENUVER_UIC;
675 skb_put_u8(skb, menuver); /* Menu Version */
677 aux = scp->accessdata.acc_userl;
678 skb_put_u8(skb, aux);
679 if (aux > 0)
680 skb_put_data(skb, scp->accessdata.acc_user, aux);
682 aux = scp->accessdata.acc_passl;
683 skb_put_u8(skb, aux);
684 if (aux > 0)
685 skb_put_data(skb, scp->accessdata.acc_pass, aux);
687 aux = scp->accessdata.acc_accl;
688 skb_put_u8(skb, aux);
689 if (aux > 0)
690 skb_put_data(skb, scp->accessdata.acc_acc, aux);
692 aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
693 skb_put_u8(skb, aux);
694 if (aux > 0)
695 skb_put_data(skb, scp->conndata_out.opt_data, aux);
697 scp->persist = dn_nsp_persist(sk);
698 scp->persist_fxn = dn_nsp_retrans_conninit;
700 cb->rt_flags = DN_RT_F_RQR;
702 dn_nsp_send(skb);