3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * DECnet Socket Layer Interface
9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Patrick Caulfield <patrick@pandh.demon.co.uk>
13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
14 * version of the code. Original copyright preserved
16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
17 * compatible with my routing layer.
18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
20 * Steve Whitehouse: Further bug fixes, checking module code still works
21 * with new routing layer.
22 * Steve Whitehouse: Additional set/get_sockopt() calls.
23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
26 * way. Didn't manage it entirely, but its better.
27 * Steve Whitehouse: ditto for sendmsg().
28 * Steve Whitehouse: A selection of bug fixes to various things.
29 * Steve Whitehouse: Added TIOCOUTQ ioctl.
30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
31 * Steve Whitehouse: Fixes to connect() error returns.
32 * Patrick Caulfield: Fixes to delayed acceptance logic.
33 * David S. Miller: New socket locking
34 * Steve Whitehouse: Socket list hashing/locking
35 * Arnaldo C. Melo: use capable, not suser
36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
38 * Patrick Caulfield: /proc/net/decnet now has object name/number
39 * Steve Whitehouse: Fixed local port allocation, hashed sk list
40 * Matthew Wilcox: Fixes for dn_ioctl()
41 * Steve Whitehouse: New connect/accept logic to allow timeouts and
42 * prepare for sendpage etc.
46 /******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or
54 This program is distributed in the hope that it will be useful,
55 but WITHOUT ANY WARRANTY; without even the implied warranty of
56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
57 GNU General Public License for more details.
61 Version Kernel Date Author/Comments
62 ------- ------ ---- ---------------
63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com)
66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing
70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
71 (patrick@pandh.demon.co.uk)
73 Port to new kernel development version.
75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
78 Added support for incoming connections
79 so we can start developing server apps
83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com)
86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!!
89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com)
91 Removed bugs on flow control
92 Removed bugs on incoming accessdata
95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
100 *******************************************************************************/
102 #include <linux/config.h>
103 #include <linux/module.h>
104 #include <linux/errno.h>
105 #include <linux/types.h>
106 #include <linux/slab.h>
107 #include <linux/socket.h>
108 #include <linux/in.h>
109 #include <linux/kernel.h>
110 #include <linux/sched.h>
111 #include <linux/timer.h>
112 #include <linux/string.h>
113 #include <linux/sockios.h>
114 #include <linux/net.h>
115 #include <linux/netdevice.h>
116 #include <linux/inet.h>
117 #include <linux/route.h>
118 #include <linux/netfilter.h>
119 #include <linux/seq_file.h>
120 #include <net/sock.h>
121 #include <net/tcp_states.h>
122 #include <net/flow.h>
123 #include <asm/system.h>
124 #include <asm/ioctls.h>
125 #include <linux/mm.h>
126 #include <linux/interrupt.h>
127 #include <linux/proc_fs.h>
128 #include <linux/stat.h>
129 #include <linux/init.h>
130 #include <linux/poll.h>
131 #include <net/neighbour.h>
134 #include <net/dn_nsp.h>
135 #include <net/dn_dev.h>
136 #include <net/dn_route.h>
137 #include <net/dn_fib.h>
138 #include <net/dn_neigh.h>
145 static void dn_keepalive(struct sock
*sk
);
147 #define DN_SK_HASH_SHIFT 8
148 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
149 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
152 static struct proto_ops dn_proto_ops
;
153 static DEFINE_RWLOCK(dn_hash_lock
);
154 static struct hlist_head dn_sk_hash
[DN_SK_HASH_SIZE
];
155 static struct hlist_head dn_wild_sk
;
157 static int __dn_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int optlen
, int flags
);
158 static int __dn_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
, int flags
);
160 static struct hlist_head
*dn_find_list(struct sock
*sk
)
162 struct dn_scp
*scp
= DN_SK(sk
);
164 if (scp
->addr
.sdn_flags
& SDF_WILD
)
165 return hlist_empty(&dn_wild_sk
) ? &dn_wild_sk
: NULL
;
167 return &dn_sk_hash
[scp
->addrloc
& DN_SK_HASH_MASK
];
171 * Valid ports are those greater than zero and not already in use.
173 static int check_port(unsigned short port
)
176 struct hlist_node
*node
;
181 sk_for_each(sk
, node
, &dn_sk_hash
[port
& DN_SK_HASH_MASK
]) {
182 struct dn_scp
*scp
= DN_SK(sk
);
183 if (scp
->addrloc
== port
)
189 static unsigned short port_alloc(struct sock
*sk
)
191 struct dn_scp
*scp
= DN_SK(sk
);
192 static unsigned short port
= 0x2000;
193 unsigned short i_port
= port
;
195 while(check_port(++port
) != 0) {
206 * Since this is only ever called from user
207 * level, we don't need a write_lock() version
210 static int dn_hash_sock(struct sock
*sk
)
212 struct dn_scp
*scp
= DN_SK(sk
);
213 struct hlist_head
*list
;
216 BUG_ON(sk_hashed(sk
));
218 write_lock_bh(&dn_hash_lock
);
220 if (!scp
->addrloc
&& !port_alloc(sk
))
224 if ((list
= dn_find_list(sk
)) == NULL
)
227 sk_add_node(sk
, list
);
230 write_unlock_bh(&dn_hash_lock
);
234 static void dn_unhash_sock(struct sock
*sk
)
236 write_lock(&dn_hash_lock
);
237 sk_del_node_init(sk
);
238 write_unlock(&dn_hash_lock
);
241 static void dn_unhash_sock_bh(struct sock
*sk
)
243 write_lock_bh(&dn_hash_lock
);
244 sk_del_node_init(sk
);
245 write_unlock_bh(&dn_hash_lock
);
248 static struct hlist_head
*listen_hash(struct sockaddr_dn
*addr
)
251 unsigned hash
= addr
->sdn_objnum
;
254 hash
= addr
->sdn_objnamel
;
255 for(i
= 0; i
< dn_ntohs(addr
->sdn_objnamel
); i
++) {
256 hash
^= addr
->sdn_objname
[i
];
261 return &dn_sk_hash
[hash
& DN_SK_HASH_MASK
];
265 * Called to transform a socket from bound (i.e. with a local address)
266 * into a listening socket (doesn't need a local port number) and rehashes
267 * based upon the object name/number.
269 static void dn_rehash_sock(struct sock
*sk
)
271 struct hlist_head
*list
;
272 struct dn_scp
*scp
= DN_SK(sk
);
274 if (scp
->addr
.sdn_flags
& SDF_WILD
)
277 write_lock_bh(&dn_hash_lock
);
278 sk_del_node_init(sk
);
279 DN_SK(sk
)->addrloc
= 0;
280 list
= listen_hash(&DN_SK(sk
)->addr
);
281 sk_add_node(sk
, list
);
282 write_unlock_bh(&dn_hash_lock
);
285 int dn_sockaddr2username(struct sockaddr_dn
*sdn
, unsigned char *buf
, unsigned char type
)
293 *buf
++ = sdn
->sdn_objnum
;
297 *buf
++ = dn_ntohs(sdn
->sdn_objnamel
);
298 memcpy(buf
, sdn
->sdn_objname
, dn_ntohs(sdn
->sdn_objnamel
));
299 len
= 3 + dn_ntohs(sdn
->sdn_objnamel
);
304 *buf
++ = dn_ntohs(sdn
->sdn_objnamel
);
305 memcpy(buf
, sdn
->sdn_objname
, dn_ntohs(sdn
->sdn_objnamel
));
306 len
= 7 + dn_ntohs(sdn
->sdn_objnamel
);
314 * On reception of usernames, we handle types 1 and 0 for destination
315 * addresses only. Types 2 and 4 are used for source addresses, but the
316 * UIC, GIC are ignored and they are both treated the same way. Type 3
317 * is never used as I've no idea what its purpose might be or what its
320 int dn_username2sockaddr(unsigned char *data
, int len
, struct sockaddr_dn
*sdn
, unsigned char *fmt
)
327 sdn
->sdn_objnamel
= dn_htons(0);
328 memset(sdn
->sdn_objname
, 0, DN_MAXOBJL
);
339 sdn
->sdn_objnum
= type
;
361 sdn
->sdn_objnamel
= dn_htons(*data
++);
362 len
-= dn_ntohs(sdn
->sdn_objnamel
);
364 if ((len
< 0) || (dn_ntohs(sdn
->sdn_objnamel
) > namel
))
367 memcpy(sdn
->sdn_objname
, data
, dn_ntohs(sdn
->sdn_objnamel
));
372 struct sock
*dn_sklist_find_listener(struct sockaddr_dn
*addr
)
374 struct hlist_head
*list
= listen_hash(addr
);
375 struct hlist_node
*node
;
378 read_lock(&dn_hash_lock
);
379 sk_for_each(sk
, node
, list
) {
380 struct dn_scp
*scp
= DN_SK(sk
);
381 if (sk
->sk_state
!= TCP_LISTEN
)
383 if (scp
->addr
.sdn_objnum
) {
384 if (scp
->addr
.sdn_objnum
!= addr
->sdn_objnum
)
387 if (addr
->sdn_objnum
)
389 if (scp
->addr
.sdn_objnamel
!= addr
->sdn_objnamel
)
391 if (memcmp(scp
->addr
.sdn_objname
, addr
->sdn_objname
, dn_ntohs(addr
->sdn_objnamel
)) != 0)
395 read_unlock(&dn_hash_lock
);
399 sk
= sk_head(&dn_wild_sk
);
401 if (sk
->sk_state
== TCP_LISTEN
)
407 read_unlock(&dn_hash_lock
);
411 struct sock
*dn_find_by_skb(struct sk_buff
*skb
)
413 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
415 struct hlist_node
*node
;
418 read_lock(&dn_hash_lock
);
419 sk_for_each(sk
, node
, &dn_sk_hash
[cb
->dst_port
& DN_SK_HASH_MASK
]) {
421 if (cb
->src
!= dn_saddr2dn(&scp
->peer
))
423 if (cb
->dst_port
!= scp
->addrloc
)
425 if (scp
->addrrem
&& (cb
->src_port
!= scp
->addrrem
))
432 read_unlock(&dn_hash_lock
);
438 static void dn_destruct(struct sock
*sk
)
440 struct dn_scp
*scp
= DN_SK(sk
);
442 skb_queue_purge(&scp
->data_xmit_queue
);
443 skb_queue_purge(&scp
->other_xmit_queue
);
444 skb_queue_purge(&scp
->other_receive_queue
);
446 dst_release(xchg(&sk
->sk_dst_cache
, NULL
));
449 static struct proto dn_proto
= {
451 .owner
= THIS_MODULE
,
452 .obj_size
= sizeof(struct dn_sock
),
455 static struct sock
*dn_alloc_sock(struct socket
*sock
, gfp_t gfp
)
458 struct sock
*sk
= sk_alloc(PF_DECnet
, gfp
, &dn_proto
, 1);
464 sock
->ops
= &dn_proto_ops
;
465 sock_init_data(sock
, sk
);
467 sk
->sk_backlog_rcv
= dn_nsp_backlog_rcv
;
468 sk
->sk_destruct
= dn_destruct
;
470 sk
->sk_family
= PF_DECnet
;
472 sk
->sk_allocation
= gfp
;
474 /* Initialization of DECnet Session Control Port */
476 scp
->state
= DN_O
; /* Open */
477 scp
->numdat
= 1; /* Next data seg to tx */
478 scp
->numoth
= 1; /* Next oth data to tx */
479 scp
->ackxmt_dat
= 0; /* Last data seg ack'ed */
480 scp
->ackxmt_oth
= 0; /* Last oth data ack'ed */
481 scp
->ackrcv_dat
= 0; /* Highest data ack recv*/
482 scp
->ackrcv_oth
= 0; /* Last oth data ack rec*/
483 scp
->flowrem_sw
= DN_SEND
;
484 scp
->flowloc_sw
= DN_SEND
;
485 scp
->flowrem_dat
= 0;
486 scp
->flowrem_oth
= 1;
487 scp
->flowloc_dat
= 0;
488 scp
->flowloc_oth
= 1;
489 scp
->services_rem
= 0;
490 scp
->services_loc
= 1 | NSP_FC_NONE
;
492 scp
->info_loc
= 0x03; /* NSP version 4.1 */
493 scp
->segsize_rem
= 230 - DN_MAX_NSP_DATA_HEADER
; /* Default: Updated by remote segsize */
496 scp
->accept_mode
= ACC_IMMED
;
497 scp
->addr
.sdn_family
= AF_DECnet
;
498 scp
->peer
.sdn_family
= AF_DECnet
;
499 scp
->accessdata
.acc_accl
= 5;
500 memcpy(scp
->accessdata
.acc_acc
, "LINUX", 5);
502 scp
->max_window
= NSP_MAX_WINDOW
;
503 scp
->snd_window
= NSP_MIN_WINDOW
;
504 scp
->nsp_srtt
= NSP_INITIAL_SRTT
;
505 scp
->nsp_rttvar
= NSP_INITIAL_RTTVAR
;
506 scp
->nsp_rxtshift
= 0;
508 skb_queue_head_init(&scp
->data_xmit_queue
);
509 skb_queue_head_init(&scp
->other_xmit_queue
);
510 skb_queue_head_init(&scp
->other_receive_queue
);
513 scp
->persist_fxn
= NULL
;
514 scp
->keepalive
= 10 * HZ
;
515 scp
->keepalive_fxn
= dn_keepalive
;
517 init_timer(&scp
->delack_timer
);
518 scp
->delack_pending
= 0;
519 scp
->delack_fxn
= dn_nsp_delayed_ack
;
521 dn_start_slow_timer(sk
);
528 * FIXME: Should respond to SO_KEEPALIVE etc.
530 static void dn_keepalive(struct sock
*sk
)
532 struct dn_scp
*scp
= DN_SK(sk
);
535 * By checking the other_data transmit queue is empty
536 * we are double checking that we are not sending too
537 * many of these keepalive frames.
539 if (skb_queue_empty(&scp
->other_xmit_queue
))
540 dn_nsp_send_link(sk
, DN_NOCHANGE
, 0);
545 * Timer for shutdown/destroyed sockets.
546 * When socket is dead & no packets have been sent for a
547 * certain amount of time, they are removed by this
548 * routine. Also takes care of sending out DI & DC
549 * frames at correct times.
551 int dn_destroy_timer(struct sock
*sk
)
553 struct dn_scp
*scp
= DN_SK(sk
);
555 scp
->persist
= dn_nsp_persist(sk
);
559 dn_nsp_send_disc(sk
, NSP_DISCINIT
, 0, GFP_ATOMIC
);
560 if (scp
->nsp_rxtshift
>= decnet_di_count
)
565 dn_nsp_send_disc(sk
, NSP_DISCINIT
, 0, GFP_ATOMIC
);
566 if (scp
->nsp_rxtshift
>= decnet_dr_count
)
571 if (scp
->nsp_rxtshift
< decnet_dn_count
) {
572 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
573 dn_nsp_send_disc(sk
, NSP_DISCCONF
, NSP_REASON_DC
, GFP_ATOMIC
);
578 scp
->persist
= (HZ
* decnet_time_wait
);
583 if ((jiffies
- scp
->stamp
) >= (HZ
* decnet_time_wait
)) {
592 static void dn_destroy_sock(struct sock
*sk
)
594 struct dn_scp
*scp
= DN_SK(sk
);
596 scp
->nsp_rxtshift
= 0; /* reset back off */
599 if (sk
->sk_socket
->state
!= SS_UNCONNECTED
)
600 sk
->sk_socket
->state
= SS_DISCONNECTING
;
603 sk
->sk_state
= TCP_CLOSE
;
607 dn_nsp_send_disc(sk
, NSP_DISCCONF
, NSP_REASON_DC
,
609 scp
->persist_fxn
= dn_destroy_timer
;
610 scp
->persist
= dn_nsp_persist(sk
);
620 dn_nsp_send_disc(sk
, NSP_DISCINIT
, 0, sk
->sk_allocation
);
629 scp
->persist_fxn
= dn_destroy_timer
;
630 scp
->persist
= dn_nsp_persist(sk
);
633 printk(KERN_DEBUG
"DECnet: dn_destroy_sock passed socket in invalid state\n");
635 dn_stop_slow_timer(sk
);
637 dn_unhash_sock_bh(sk
);
644 char *dn_addr2asc(dn_address addr
, char *buf
)
646 unsigned short node
, area
;
648 node
= addr
& 0x03ff;
650 sprintf(buf
, "%hd.%hd", area
, node
);
657 static int dn_create(struct socket
*sock
, int protocol
)
663 if (protocol
!= DNPROTO_NSP
)
664 return -EPROTONOSUPPORT
;
669 return -ESOCKTNOSUPPORT
;
673 if ((sk
= dn_alloc_sock(sock
, GFP_KERNEL
)) == NULL
)
676 sk
->sk_protocol
= protocol
;
683 dn_release(struct socket
*sock
)
685 struct sock
*sk
= sock
->sk
;
699 static int dn_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
701 struct sock
*sk
= sock
->sk
;
702 struct dn_scp
*scp
= DN_SK(sk
);
703 struct sockaddr_dn
*saddr
= (struct sockaddr_dn
*)uaddr
;
704 struct net_device
*dev
;
707 if (addr_len
!= sizeof(struct sockaddr_dn
))
710 if (saddr
->sdn_family
!= AF_DECnet
)
713 if (dn_ntohs(saddr
->sdn_nodeaddrl
) && (dn_ntohs(saddr
->sdn_nodeaddrl
) != 2))
716 if (dn_ntohs(saddr
->sdn_objnamel
) > DN_MAXOBJL
)
719 if (saddr
->sdn_flags
& ~SDF_WILD
)
722 if (!capable(CAP_NET_BIND_SERVICE
) && (saddr
->sdn_objnum
||
723 (saddr
->sdn_flags
& SDF_WILD
)))
726 if (!(saddr
->sdn_flags
& SDF_WILD
)) {
727 if (dn_ntohs(saddr
->sdn_nodeaddrl
)) {
728 read_lock(&dev_base_lock
);
729 for(dev
= dev_base
; dev
; dev
= dev
->next
) {
732 if (dn_dev_islocal(dev
, dn_saddr2dn(saddr
)))
735 read_unlock(&dev_base_lock
);
737 return -EADDRNOTAVAIL
;
743 if (sock_flag(sk
, SOCK_ZAPPED
)) {
744 memcpy(&scp
->addr
, saddr
, addr_len
);
745 sock_reset_flag(sk
, SOCK_ZAPPED
);
747 rv
= dn_hash_sock(sk
);
749 sock_set_flag(sk
, SOCK_ZAPPED
);
757 static int dn_auto_bind(struct socket
*sock
)
759 struct sock
*sk
= sock
->sk
;
760 struct dn_scp
*scp
= DN_SK(sk
);
763 sock_reset_flag(sk
, SOCK_ZAPPED
);
765 scp
->addr
.sdn_flags
= 0;
766 scp
->addr
.sdn_objnum
= 0;
769 * This stuff is to keep compatibility with Eduardo's
770 * patch. I hope I can dispense with it shortly...
772 if ((scp
->accessdata
.acc_accl
!= 0) &&
773 (scp
->accessdata
.acc_accl
<= 12)) {
775 scp
->addr
.sdn_objnamel
= dn_htons(scp
->accessdata
.acc_accl
);
776 memcpy(scp
->addr
.sdn_objname
, scp
->accessdata
.acc_acc
, dn_ntohs(scp
->addr
.sdn_objnamel
));
778 scp
->accessdata
.acc_accl
= 0;
779 memset(scp
->accessdata
.acc_acc
, 0, 40);
781 /* End of compatibility stuff */
783 scp
->addr
.sdn_add
.a_len
= dn_htons(2);
784 rv
= dn_dev_bind_default((dn_address
*)scp
->addr
.sdn_add
.a_addr
);
786 rv
= dn_hash_sock(sk
);
788 sock_set_flag(sk
, SOCK_ZAPPED
);
794 static int dn_confirm_accept(struct sock
*sk
, long *timeo
, gfp_t allocation
)
796 struct dn_scp
*scp
= DN_SK(sk
);
800 if (scp
->state
!= DN_CR
)
804 scp
->segsize_loc
= dst_metric(__sk_dst_get(sk
), RTAX_ADVMSS
);
805 dn_send_conn_conf(sk
, allocation
);
807 prepare_to_wait(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
810 if (scp
->state
== DN_CC
)
811 *timeo
= schedule_timeout(*timeo
);
814 if (scp
->state
== DN_RUN
)
816 err
= sock_error(sk
);
819 err
= sock_intr_errno(*timeo
);
820 if (signal_pending(current
))
825 prepare_to_wait(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
827 finish_wait(sk
->sk_sleep
, &wait
);
829 sk
->sk_socket
->state
= SS_CONNECTED
;
830 } else if (scp
->state
!= DN_CC
) {
831 sk
->sk_socket
->state
= SS_UNCONNECTED
;
836 static int dn_wait_run(struct sock
*sk
, long *timeo
)
838 struct dn_scp
*scp
= DN_SK(sk
);
842 if (scp
->state
== DN_RUN
)
848 prepare_to_wait(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
851 if (scp
->state
== DN_CI
|| scp
->state
== DN_CC
)
852 *timeo
= schedule_timeout(*timeo
);
855 if (scp
->state
== DN_RUN
)
857 err
= sock_error(sk
);
860 err
= sock_intr_errno(*timeo
);
861 if (signal_pending(current
))
866 prepare_to_wait(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
868 finish_wait(sk
->sk_sleep
, &wait
);
871 sk
->sk_socket
->state
= SS_CONNECTED
;
872 } else if (scp
->state
!= DN_CI
&& scp
->state
!= DN_CC
) {
873 sk
->sk_socket
->state
= SS_UNCONNECTED
;
878 static int __dn_connect(struct sock
*sk
, struct sockaddr_dn
*addr
, int addrlen
, long *timeo
, int flags
)
880 struct socket
*sock
= sk
->sk_socket
;
881 struct dn_scp
*scp
= DN_SK(sk
);
885 if (sock
->state
== SS_CONNECTED
)
888 if (sock
->state
== SS_CONNECTING
) {
890 if (scp
->state
== DN_RUN
) {
891 sock
->state
= SS_CONNECTED
;
895 if (scp
->state
!= DN_CI
&& scp
->state
!= DN_CC
) {
896 sock
->state
= SS_UNCONNECTED
;
899 return dn_wait_run(sk
, timeo
);
903 if (scp
->state
!= DN_O
)
906 if (addr
== NULL
|| addrlen
!= sizeof(struct sockaddr_dn
))
908 if (addr
->sdn_family
!= AF_DECnet
)
910 if (addr
->sdn_flags
& SDF_WILD
)
913 if (sock_flag(sk
, SOCK_ZAPPED
)) {
914 err
= dn_auto_bind(sk
->sk_socket
);
919 memcpy(&scp
->peer
, addr
, sizeof(struct sockaddr_dn
));
922 memset(&fl
, 0, sizeof(fl
));
923 fl
.oif
= sk
->sk_bound_dev_if
;
924 fl
.fld_dst
= dn_saddr2dn(&scp
->peer
);
925 fl
.fld_src
= dn_saddr2dn(&scp
->addr
);
926 dn_sk_ports_copy(&fl
, scp
);
927 fl
.proto
= DNPROTO_NSP
;
928 if (dn_route_output_sock(&sk
->sk_dst_cache
, &fl
, sk
, flags
) < 0)
930 sk
->sk_route_caps
= sk
->sk_dst_cache
->dev
->features
;
931 sock
->state
= SS_CONNECTING
;
933 scp
->segsize_loc
= dst_metric(sk
->sk_dst_cache
, RTAX_ADVMSS
);
935 dn_nsp_send_conninit(sk
, NSP_CI
);
938 err
= dn_wait_run(sk
, timeo
);
944 static int dn_connect(struct socket
*sock
, struct sockaddr
*uaddr
, int addrlen
, int flags
)
946 struct sockaddr_dn
*addr
= (struct sockaddr_dn
*)uaddr
;
947 struct sock
*sk
= sock
->sk
;
949 long timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
952 err
= __dn_connect(sk
, addr
, addrlen
, &timeo
, 0);
958 static inline int dn_check_state(struct sock
*sk
, struct sockaddr_dn
*addr
, int addrlen
, long *timeo
, int flags
)
960 struct dn_scp
*scp
= DN_SK(sk
);
966 return dn_confirm_accept(sk
, timeo
, sk
->sk_allocation
);
969 return dn_wait_run(sk
, timeo
);
971 return __dn_connect(sk
, addr
, addrlen
, timeo
, flags
);
978 static void dn_access_copy(struct sk_buff
*skb
, struct accessdata_dn
*acc
)
980 unsigned char *ptr
= skb
->data
;
982 acc
->acc_userl
= *ptr
++;
983 memcpy(&acc
->acc_user
, ptr
, acc
->acc_userl
);
984 ptr
+= acc
->acc_userl
;
986 acc
->acc_passl
= *ptr
++;
987 memcpy(&acc
->acc_pass
, ptr
, acc
->acc_passl
);
988 ptr
+= acc
->acc_passl
;
990 acc
->acc_accl
= *ptr
++;
991 memcpy(&acc
->acc_acc
, ptr
, acc
->acc_accl
);
993 skb_pull(skb
, acc
->acc_accl
+ acc
->acc_passl
+ acc
->acc_userl
+ 3);
997 static void dn_user_copy(struct sk_buff
*skb
, struct optdata_dn
*opt
)
999 unsigned char *ptr
= skb
->data
;
1001 opt
->opt_optl
= *ptr
++;
1002 opt
->opt_status
= 0;
1003 memcpy(opt
->opt_data
, ptr
, opt
->opt_optl
);
1004 skb_pull(skb
, opt
->opt_optl
+ 1);
1008 static struct sk_buff
*dn_wait_for_connect(struct sock
*sk
, long *timeo
)
1011 struct sk_buff
*skb
= NULL
;
1014 prepare_to_wait(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
1017 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1019 *timeo
= schedule_timeout(*timeo
);
1020 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1026 if (sk
->sk_state
!= TCP_LISTEN
)
1028 err
= sock_intr_errno(*timeo
);
1029 if (signal_pending(current
))
1034 prepare_to_wait(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
1036 finish_wait(sk
->sk_sleep
, &wait
);
1038 return skb
== NULL
? ERR_PTR(err
) : skb
;
1041 static int dn_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1043 struct sock
*sk
= sock
->sk
, *newsk
;
1044 struct sk_buff
*skb
= NULL
;
1045 struct dn_skb_cb
*cb
;
1046 unsigned char menuver
;
1049 long timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1053 if (sk
->sk_state
!= TCP_LISTEN
|| DN_SK(sk
)->state
!= DN_O
) {
1058 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1060 skb
= dn_wait_for_connect(sk
, &timeo
);
1063 return PTR_ERR(skb
);
1067 cb
= DN_SKB_CB(skb
);
1068 sk
->sk_ack_backlog
--;
1069 newsk
= dn_alloc_sock(newsock
, sk
->sk_allocation
);
1070 if (newsk
== NULL
) {
1077 dst_release(xchg(&newsk
->sk_dst_cache
, skb
->dst
));
1080 DN_SK(newsk
)->state
= DN_CR
;
1081 DN_SK(newsk
)->addrrem
= cb
->src_port
;
1082 DN_SK(newsk
)->services_rem
= cb
->services
;
1083 DN_SK(newsk
)->info_rem
= cb
->info
;
1084 DN_SK(newsk
)->segsize_rem
= cb
->segsize
;
1085 DN_SK(newsk
)->accept_mode
= DN_SK(sk
)->accept_mode
;
1087 if (DN_SK(newsk
)->segsize_rem
< 230)
1088 DN_SK(newsk
)->segsize_rem
= 230;
1090 if ((DN_SK(newsk
)->services_rem
& NSP_FC_MASK
) == NSP_FC_NONE
)
1091 DN_SK(newsk
)->max_window
= decnet_no_fc_max_cwnd
;
1093 newsk
->sk_state
= TCP_LISTEN
;
1094 memcpy(&(DN_SK(newsk
)->addr
), &(DN_SK(sk
)->addr
), sizeof(struct sockaddr_dn
));
1097 * If we are listening on a wild socket, we don't want
1098 * the newly created socket on the wrong hash queue.
1100 DN_SK(newsk
)->addr
.sdn_flags
&= ~SDF_WILD
;
1102 skb_pull(skb
, dn_username2sockaddr(skb
->data
, skb
->len
, &(DN_SK(newsk
)->addr
), &type
));
1103 skb_pull(skb
, dn_username2sockaddr(skb
->data
, skb
->len
, &(DN_SK(newsk
)->peer
), &type
));
1104 *(dn_address
*)(DN_SK(newsk
)->peer
.sdn_add
.a_addr
) = cb
->src
;
1105 *(dn_address
*)(DN_SK(newsk
)->addr
.sdn_add
.a_addr
) = cb
->dst
;
1107 menuver
= *skb
->data
;
1110 if (menuver
& DN_MENUVER_ACC
)
1111 dn_access_copy(skb
, &(DN_SK(newsk
)->accessdata
));
1113 if (menuver
& DN_MENUVER_USR
)
1114 dn_user_copy(skb
, &(DN_SK(newsk
)->conndata_in
));
1116 if (menuver
& DN_MENUVER_PRX
)
1117 DN_SK(newsk
)->peer
.sdn_flags
|= SDF_PROXY
;
1119 if (menuver
& DN_MENUVER_UIC
)
1120 DN_SK(newsk
)->peer
.sdn_flags
|= SDF_UICPROXY
;
1124 memcpy(&(DN_SK(newsk
)->conndata_out
), &(DN_SK(sk
)->conndata_out
),
1125 sizeof(struct optdata_dn
));
1126 memcpy(&(DN_SK(newsk
)->discdata_out
), &(DN_SK(sk
)->discdata_out
),
1127 sizeof(struct optdata_dn
));
1130 err
= dn_hash_sock(newsk
);
1132 sock_reset_flag(newsk
, SOCK_ZAPPED
);
1133 dn_send_conn_ack(newsk
);
1136 * Here we use sk->sk_allocation since although the conn conf is
1137 * for the newsk, the context is the old socket.
1139 if (DN_SK(newsk
)->accept_mode
== ACC_IMMED
)
1140 err
= dn_confirm_accept(newsk
, &timeo
,
1143 release_sock(newsk
);
1148 static int dn_getname(struct socket
*sock
, struct sockaddr
*uaddr
,int *uaddr_len
,int peer
)
1150 struct sockaddr_dn
*sa
= (struct sockaddr_dn
*)uaddr
;
1151 struct sock
*sk
= sock
->sk
;
1152 struct dn_scp
*scp
= DN_SK(sk
);
1154 *uaddr_len
= sizeof(struct sockaddr_dn
);
1159 if ((sock
->state
!= SS_CONNECTED
&&
1160 sock
->state
!= SS_CONNECTING
) &&
1161 scp
->accept_mode
== ACC_IMMED
)
1164 memcpy(sa
, &scp
->peer
, sizeof(struct sockaddr_dn
));
1166 memcpy(sa
, &scp
->addr
, sizeof(struct sockaddr_dn
));
1175 static unsigned int dn_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
1177 struct sock
*sk
= sock
->sk
;
1178 struct dn_scp
*scp
= DN_SK(sk
);
1179 int mask
= datagram_poll(file
, sock
, wait
);
1181 if (!skb_queue_empty(&scp
->other_receive_queue
))
1187 static int dn_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1189 struct sock
*sk
= sock
->sk
;
1190 struct dn_scp
*scp
= DN_SK(sk
);
1191 int err
= -EOPNOTSUPP
;
1193 struct sk_buff
*skb
;
1200 return dn_dev_ioctl(cmd
, (void __user
*)arg
);
1204 val
= !skb_queue_empty(&scp
->other_receive_queue
);
1205 if (scp
->state
!= DN_RUN
)
1211 amount
= sk
->sk_sndbuf
- atomic_read(&sk
->sk_wmem_alloc
);
1214 err
= put_user(amount
, (int __user
*)arg
);
1219 if ((skb
= skb_peek(&scp
->other_receive_queue
)) != NULL
) {
1222 struct sk_buff
*skb
= sk
->sk_receive_queue
.next
;
1225 (struct sk_buff
*)&sk
->sk_receive_queue
)
1232 err
= put_user(amount
, (int __user
*)arg
);
1236 err
= dev_ioctl(cmd
, (void __user
*)arg
);
1243 static int dn_listen(struct socket
*sock
, int backlog
)
1245 struct sock
*sk
= sock
->sk
;
1250 if (sock_flag(sk
, SOCK_ZAPPED
))
1253 if ((DN_SK(sk
)->state
!= DN_O
) || (sk
->sk_state
== TCP_LISTEN
))
1256 sk
->sk_max_ack_backlog
= backlog
;
1257 sk
->sk_ack_backlog
= 0;
1258 sk
->sk_state
= TCP_LISTEN
;
1269 static int dn_shutdown(struct socket
*sock
, int how
)
1271 struct sock
*sk
= sock
->sk
;
1272 struct dn_scp
*scp
= DN_SK(sk
);
1273 int err
= -ENOTCONN
;
1277 if (sock
->state
== SS_UNCONNECTED
)
1281 if (sock
->state
== SS_DISCONNECTING
)
1285 if (scp
->state
== DN_O
)
1288 if (how
!= SHUTDOWN_MASK
)
1291 sk
->sk_shutdown
= how
;
1292 dn_destroy_sock(sk
);
1301 static int dn_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int optlen
)
1303 struct sock
*sk
= sock
->sk
;
1307 err
= __dn_setsockopt(sock
, level
, optname
, optval
, optlen
, 0);
1313 static int __dn_setsockopt(struct socket
*sock
, int level
,int optname
, char __user
*optval
, int optlen
, int flags
)
1315 struct sock
*sk
= sock
->sk
;
1316 struct dn_scp
*scp
= DN_SK(sk
);
1319 struct optdata_dn opt
;
1320 struct accessdata_dn acc
;
1324 unsigned char services
;
1329 if (optlen
&& !optval
)
1332 if (optlen
> sizeof(u
))
1335 if (copy_from_user(&u
, optval
, optlen
))
1340 if (sock
->state
== SS_CONNECTED
)
1342 if ((scp
->state
!= DN_O
) && (scp
->state
!= DN_CR
))
1345 if (optlen
!= sizeof(struct optdata_dn
))
1348 if (u
.opt
.opt_optl
> 16)
1351 memcpy(&scp
->conndata_out
, &u
.opt
, optlen
);
1355 if (sock
->state
!= SS_CONNECTED
&& scp
->accept_mode
== ACC_IMMED
)
1358 if (optlen
!= sizeof(struct optdata_dn
))
1361 if (u
.opt
.opt_optl
> 16)
1364 memcpy(&scp
->discdata_out
, &u
.opt
, optlen
);
1368 if (sock
->state
== SS_CONNECTED
)
1370 if (scp
->state
!= DN_O
)
1373 if (optlen
!= sizeof(struct accessdata_dn
))
1376 if ((u
.acc
.acc_accl
> DN_MAXACCL
) ||
1377 (u
.acc
.acc_passl
> DN_MAXACCL
) ||
1378 (u
.acc
.acc_userl
> DN_MAXACCL
))
1381 memcpy(&scp
->accessdata
, &u
.acc
, optlen
);
1384 case DSO_ACCEPTMODE
:
1385 if (sock
->state
== SS_CONNECTED
)
1387 if (scp
->state
!= DN_O
)
1390 if (optlen
!= sizeof(int))
1393 if ((u
.mode
!= ACC_IMMED
) && (u
.mode
!= ACC_DEFER
))
1396 scp
->accept_mode
= (unsigned char)u
.mode
;
1401 if (scp
->state
!= DN_CR
)
1403 timeo
= sock_rcvtimeo(sk
, 0);
1404 err
= dn_confirm_accept(sk
, &timeo
, sk
->sk_allocation
);
1409 if (scp
->state
!= DN_CR
)
1413 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1414 dn_nsp_send_disc(sk
, 0x38, 0, sk
->sk_allocation
);
1418 #ifdef CONFIG_NETFILTER
1419 return nf_setsockopt(sk
, PF_DECnet
, optname
, optval
, optlen
);
1424 return -ENOPROTOOPT
;
1427 if (optlen
!= sizeof(unsigned long))
1429 if (u
.win
> NSP_MAX_WINDOW
)
1430 u
.win
= NSP_MAX_WINDOW
;
1433 scp
->max_window
= u
.win
;
1434 if (scp
->snd_window
> u
.win
)
1435 scp
->snd_window
= u
.win
;
1439 if (optlen
!= sizeof(int))
1441 if (scp
->nonagle
== 2)
1443 scp
->nonagle
= (u
.val
== 0) ? 0 : 1;
1444 /* if (scp->nonagle == 1) { Push pending frames } */
1448 if (optlen
!= sizeof(int))
1450 if (scp
->nonagle
== 1)
1452 scp
->nonagle
= (u
.val
== 0) ? 0 : 2;
1453 /* if (scp->nonagle == 0) { Push pending frames } */
1457 if (optlen
!= sizeof(unsigned char))
1459 if ((u
.services
& ~NSP_FC_MASK
) != 0x01)
1461 if ((u
.services
& NSP_FC_MASK
) == NSP_FC_MASK
)
1463 scp
->services_loc
= u
.services
;
1467 if (optlen
!= sizeof(unsigned char))
1471 scp
->info_loc
= u
.info
;
1478 static int dn_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1480 struct sock
*sk
= sock
->sk
;
1484 err
= __dn_getsockopt(sock
, level
, optname
, optval
, optlen
, 0);
1490 static int __dn_getsockopt(struct socket
*sock
, int level
,int optname
, char __user
*optval
,int __user
*optlen
, int flags
)
1492 struct sock
*sk
= sock
->sk
;
1493 struct dn_scp
*scp
= DN_SK(sk
);
1494 struct linkinfo_dn link
;
1496 void *r_data
= NULL
;
1499 if(get_user(r_len
, optlen
))
1504 if (r_len
> sizeof(struct optdata_dn
))
1505 r_len
= sizeof(struct optdata_dn
);
1506 r_data
= &scp
->conndata_in
;
1510 if (r_len
> sizeof(struct optdata_dn
))
1511 r_len
= sizeof(struct optdata_dn
);
1512 r_data
= &scp
->discdata_in
;
1516 if (r_len
> sizeof(struct accessdata_dn
))
1517 r_len
= sizeof(struct accessdata_dn
);
1518 r_data
= &scp
->accessdata
;
1521 case DSO_ACCEPTMODE
:
1522 if (r_len
> sizeof(unsigned char))
1523 r_len
= sizeof(unsigned char);
1524 r_data
= &scp
->accept_mode
;
1528 if (r_len
> sizeof(struct linkinfo_dn
))
1529 r_len
= sizeof(struct linkinfo_dn
);
1531 switch(sock
->state
) {
1533 link
.idn_linkstate
= LL_CONNECTING
;
1535 case SS_DISCONNECTING
:
1536 link
.idn_linkstate
= LL_DISCONNECTING
;
1539 link
.idn_linkstate
= LL_RUNNING
;
1542 link
.idn_linkstate
= LL_INACTIVE
;
1545 link
.idn_segsize
= scp
->segsize_rem
;
1550 #ifdef CONFIG_NETFILTER
1554 if(get_user(len
, optlen
))
1557 val
= nf_getsockopt(sk
, PF_DECnet
, optname
,
1560 val
= put_user(len
, optlen
);
1568 return -ENOPROTOOPT
;
1571 if (r_len
> sizeof(unsigned long))
1572 r_len
= sizeof(unsigned long);
1573 r_data
= &scp
->max_window
;
1577 if (r_len
> sizeof(int))
1578 r_len
= sizeof(int);
1579 val
= (scp
->nonagle
== 1);
1584 if (r_len
> sizeof(int))
1585 r_len
= sizeof(int);
1586 val
= (scp
->nonagle
== 2);
1591 if (r_len
> sizeof(unsigned char))
1592 r_len
= sizeof(unsigned char);
1593 r_data
= &scp
->services_rem
;
1597 if (r_len
> sizeof(unsigned char))
1598 r_len
= sizeof(unsigned char);
1599 r_data
= &scp
->info_rem
;
1604 if (copy_to_user(optval
, r_data
, r_len
))
1606 if (put_user(r_len
, optlen
))
1614 static int dn_data_ready(struct sock
*sk
, struct sk_buff_head
*q
, int flags
, int target
)
1616 struct sk_buff
*skb
= q
->next
;
1619 if (flags
& MSG_OOB
)
1620 return !skb_queue_empty(q
) ? 1 : 0;
1622 while(skb
!= (struct sk_buff
*)q
) {
1623 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
1626 if (cb
->nsp_flags
& 0x40) {
1627 /* SOCK_SEQPACKET reads to EOM */
1628 if (sk
->sk_type
== SOCK_SEQPACKET
)
1630 /* so does SOCK_STREAM unless WAITALL is specified */
1631 if (!(flags
& MSG_WAITALL
))
1635 /* minimum data length for read exceeded */
1646 static int dn_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1647 struct msghdr
*msg
, size_t size
, int flags
)
1649 struct sock
*sk
= sock
->sk
;
1650 struct dn_scp
*scp
= DN_SK(sk
);
1651 struct sk_buff_head
*queue
= &sk
->sk_receive_queue
;
1652 size_t target
= size
> 1 ? 1 : 0;
1655 struct sk_buff
*skb
, *nskb
;
1656 struct dn_skb_cb
*cb
= NULL
;
1657 unsigned char eor
= 0;
1658 long timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1662 if (sock_flag(sk
, SOCK_ZAPPED
)) {
1663 rv
= -EADDRNOTAVAIL
;
1667 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1672 rv
= dn_check_state(sk
, NULL
, 0, &timeo
, flags
);
1676 if (flags
& ~(MSG_PEEK
|MSG_OOB
|MSG_WAITALL
|MSG_DONTWAIT
|MSG_NOSIGNAL
)) {
1681 if (flags
& MSG_OOB
)
1682 queue
= &scp
->other_receive_queue
;
1684 if (flags
& MSG_WAITALL
)
1689 * See if there is data ready to read, sleep if there isn't
1695 if (!skb_queue_empty(&scp
->other_receive_queue
)) {
1696 if (!(flags
& MSG_OOB
)) {
1697 msg
->msg_flags
|= MSG_OOB
;
1698 if (!scp
->other_report
) {
1699 scp
->other_report
= 1;
1705 if (scp
->state
!= DN_RUN
)
1708 if (signal_pending(current
)) {
1709 rv
= sock_intr_errno(timeo
);
1713 if (dn_data_ready(sk
, queue
, flags
, target
))
1716 if (flags
& MSG_DONTWAIT
) {
1721 set_bit(SOCK_ASYNC_WAITDATA
, &sock
->flags
);
1724 if (!dn_data_ready(sk
, queue
, flags
, target
))
1728 clear_bit(SOCK_ASYNC_WAITDATA
, &sock
->flags
);
1731 for(skb
= queue
->next
; skb
!= (struct sk_buff
*)queue
; skb
= nskb
) {
1732 unsigned int chunk
= skb
->len
;
1733 cb
= DN_SKB_CB(skb
);
1735 if ((chunk
+ copied
) > size
)
1736 chunk
= size
- copied
;
1738 if (memcpy_toiovec(msg
->msg_iov
, skb
->data
, chunk
)) {
1744 if (!(flags
& MSG_PEEK
))
1745 skb_pull(skb
, chunk
);
1747 eor
= cb
->nsp_flags
& 0x40;
1750 if (skb
->len
== 0) {
1751 skb_unlink(skb
, queue
);
1754 * N.B. Don't refer to skb or cb after this point
1757 if ((scp
->flowloc_sw
== DN_DONTSEND
) && !dn_congested(sk
)) {
1758 scp
->flowloc_sw
= DN_SEND
;
1759 dn_nsp_send_link(sk
, DN_SEND
, 0);
1764 if (sk
->sk_type
== SOCK_SEQPACKET
)
1766 if (!(flags
& MSG_WAITALL
))
1770 if (flags
& MSG_OOB
)
1773 if (copied
>= target
)
1780 if (eor
&& (sk
->sk_type
== SOCK_SEQPACKET
))
1781 msg
->msg_flags
|= MSG_EOR
;
1785 rv
= (flags
& MSG_PEEK
) ? -sk
->sk_err
: sock_error(sk
);
1787 if ((rv
>= 0) && msg
->msg_name
) {
1788 memcpy(msg
->msg_name
, &scp
->peer
, sizeof(struct sockaddr_dn
));
1789 msg
->msg_namelen
= sizeof(struct sockaddr_dn
);
1798 static inline int dn_queue_too_long(struct dn_scp
*scp
, struct sk_buff_head
*queue
, int flags
)
1800 unsigned char fctype
= scp
->services_rem
& NSP_FC_MASK
;
1801 if (skb_queue_len(queue
) >= scp
->snd_window
)
1803 if (fctype
!= NSP_FC_NONE
) {
1804 if (flags
& MSG_OOB
) {
1805 if (scp
->flowrem_oth
== 0)
1808 if (scp
->flowrem_dat
== 0)
1816 * The DECnet spec requires the the "routing layer" accepts packets which
1817 * are at least 230 bytes in size. This excludes any headers which the NSP
1818 * layer might add, so we always assume that we'll be using the maximal
1819 * length header on data packets. The variation in length is due to the
1820 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1821 * make much practical difference.
1823 unsigned dn_mss_from_pmtu(struct net_device
*dev
, int mtu
)
1825 unsigned mss
= 230 - DN_MAX_NSP_DATA_HEADER
;
1827 struct dn_dev
*dn_db
= dev
->dn_ptr
;
1828 mtu
-= LL_RESERVED_SPACE(dev
);
1829 if (dn_db
->use_long
)
1833 mtu
-= DN_MAX_NSP_DATA_HEADER
;
1836 * 21 = long header, 16 = guess at MAC header length
1838 mtu
-= (21 + DN_MAX_NSP_DATA_HEADER
+ 16);
1845 static inline unsigned int dn_current_mss(struct sock
*sk
, int flags
)
1847 struct dst_entry
*dst
= __sk_dst_get(sk
);
1848 struct dn_scp
*scp
= DN_SK(sk
);
1849 int mss_now
= min_t(int, scp
->segsize_loc
, scp
->segsize_rem
);
1851 /* Other data messages are limited to 16 bytes per packet */
1852 if (flags
& MSG_OOB
)
1855 /* This works out the maximum size of segment we can send out */
1857 u32 mtu
= dst_mtu(dst
);
1858 mss_now
= min_t(int, dn_mss_from_pmtu(dst
->dev
, mtu
), mss_now
);
1865 * N.B. We get the timeout wrong here, but then we always did get it
1866 * wrong before and this is another step along the road to correcting
1867 * it. It ought to get updated each time we pass through the routine,
1868 * but in practise it probably doesn't matter too much for now.
1870 static inline struct sk_buff
*dn_alloc_send_pskb(struct sock
*sk
,
1871 unsigned long datalen
, int noblock
,
1874 struct sk_buff
*skb
= sock_alloc_send_skb(sk
, datalen
,
1877 skb
->protocol
= __constant_htons(ETH_P_DNA_RT
);
1878 skb
->pkt_type
= PACKET_OUTGOING
;
1883 static int dn_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1884 struct msghdr
*msg
, size_t size
)
1886 struct sock
*sk
= sock
->sk
;
1887 struct dn_scp
*scp
= DN_SK(sk
);
1889 struct sk_buff_head
*queue
= &scp
->data_xmit_queue
;
1890 int flags
= msg
->msg_flags
;
1893 int addr_len
= msg
->msg_namelen
;
1894 struct sockaddr_dn
*addr
= (struct sockaddr_dn
*)msg
->msg_name
;
1895 struct sk_buff
*skb
= NULL
;
1896 struct dn_skb_cb
*cb
;
1898 unsigned char fctype
;
1901 if (flags
& ~(MSG_TRYHARD
|MSG_OOB
|MSG_DONTWAIT
|MSG_EOR
|MSG_NOSIGNAL
|MSG_MORE
|MSG_CMSG_COMPAT
))
1904 if (addr_len
&& (addr_len
!= sizeof(struct sockaddr_dn
)))
1908 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
1910 * The only difference between stream sockets and sequenced packet
1911 * sockets is that the stream sockets always behave as if MSG_EOR
1914 if (sock
->type
== SOCK_STREAM
) {
1915 if (flags
& MSG_EOR
) {
1923 err
= dn_check_state(sk
, addr
, addr_len
, &timeo
, flags
);
1927 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1929 if (!(flags
& MSG_NOSIGNAL
))
1930 send_sig(SIGPIPE
, current
, 0);
1934 if ((flags
& MSG_TRYHARD
) && sk
->sk_dst_cache
)
1935 dst_negative_advice(&sk
->sk_dst_cache
);
1937 mss
= scp
->segsize_rem
;
1938 fctype
= scp
->services_rem
& NSP_FC_MASK
;
1940 mss
= dn_current_mss(sk
, flags
);
1942 if (flags
& MSG_OOB
) {
1943 queue
= &scp
->other_xmit_queue
;
1950 scp
->persist_fxn
= dn_nsp_xmit_timeout
;
1952 while(sent
< size
) {
1953 err
= sock_error(sk
);
1957 if (signal_pending(current
)) {
1958 err
= sock_intr_errno(timeo
);
1963 * Calculate size that we wish to send.
1971 * Wait for queue size to go down below the window
1974 if (dn_queue_too_long(scp
, queue
, flags
)) {
1975 if (flags
& MSG_DONTWAIT
) {
1982 if (dn_queue_too_long(scp
, queue
, flags
))
1991 * Get a suitably sized skb.
1992 * 64 is a bit of a hack really, but its larger than any
1993 * link-layer headers and has served us well as a good
1994 * guess as to their real length.
1996 skb
= dn_alloc_send_pskb(sk
, len
+ 64 + DN_MAX_NSP_DATA_HEADER
,
1997 flags
& MSG_DONTWAIT
, &err
);
2005 cb
= DN_SKB_CB(skb
);
2007 skb_reserve(skb
, 64 + DN_MAX_NSP_DATA_HEADER
);
2009 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
2014 if (flags
& MSG_OOB
) {
2015 cb
->nsp_flags
= 0x30;
2016 if (fctype
!= NSP_FC_NONE
)
2019 cb
->nsp_flags
= 0x00;
2020 if (scp
->seg_total
== 0)
2021 cb
->nsp_flags
|= 0x20;
2023 scp
->seg_total
+= len
;
2025 if (((sent
+ len
) == size
) && (flags
& MSG_EOR
)) {
2026 cb
->nsp_flags
|= 0x40;
2028 if (fctype
== NSP_FC_SCMC
)
2031 if (fctype
== NSP_FC_SRC
)
2036 dn_nsp_queue_xmit(sk
, skb
, sk
->sk_allocation
, flags
& MSG_OOB
);
2039 scp
->persist
= dn_nsp_persist(sk
);
2049 return sent
? sent
: err
;
2052 err
= sk_stream_error(sk
, flags
, err
);
2057 static int dn_device_event(struct notifier_block
*this, unsigned long event
,
2060 struct net_device
*dev
= (struct net_device
*)ptr
;
2076 static struct notifier_block dn_dev_notifier
= {
2077 .notifier_call
= dn_device_event
,
2080 extern int dn_route_rcv(struct sk_buff
*, struct net_device
*, struct packet_type
*, struct net_device
*);
2082 static struct packet_type dn_dix_packet_type
= {
2083 .type
= __constant_htons(ETH_P_DNA_RT
),
2084 .dev
= NULL
, /* All devices */
2085 .func
= dn_route_rcv
,
2088 #ifdef CONFIG_PROC_FS
2089 struct dn_iter_state
{
2093 static struct sock
*dn_socket_get_first(struct seq_file
*seq
)
2095 struct dn_iter_state
*state
= seq
->private;
2096 struct sock
*n
= NULL
;
2098 for(state
->bucket
= 0;
2099 state
->bucket
< DN_SK_HASH_SIZE
;
2101 n
= sk_head(&dn_sk_hash
[state
->bucket
]);
2109 static struct sock
*dn_socket_get_next(struct seq_file
*seq
,
2112 struct dn_iter_state
*state
= seq
->private;
2118 if (++state
->bucket
>= DN_SK_HASH_SIZE
)
2120 n
= sk_head(&dn_sk_hash
[state
->bucket
]);
2126 static struct sock
*socket_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2128 struct sock
*sk
= dn_socket_get_first(seq
);
2131 while(*pos
&& (sk
= dn_socket_get_next(seq
, sk
)))
2134 return *pos
? NULL
: sk
;
2137 static void *dn_socket_get_idx(struct seq_file
*seq
, loff_t pos
)
2140 read_lock_bh(&dn_hash_lock
);
2141 rc
= socket_get_idx(seq
, &pos
);
2143 read_unlock_bh(&dn_hash_lock
);
2148 static void *dn_socket_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2150 return *pos
? dn_socket_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2153 static void *dn_socket_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2157 if (v
== SEQ_START_TOKEN
) {
2158 rc
= dn_socket_get_idx(seq
, 0);
2162 rc
= dn_socket_get_next(seq
, v
);
2165 read_unlock_bh(&dn_hash_lock
);
2171 static void dn_socket_seq_stop(struct seq_file
*seq
, void *v
)
2173 if (v
&& v
!= SEQ_START_TOKEN
)
2174 read_unlock_bh(&dn_hash_lock
);
2177 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2179 static void dn_printable_object(struct sockaddr_dn
*dn
, unsigned char *buf
)
2183 switch (dn_ntohs(dn
->sdn_objnamel
)) {
2185 sprintf(buf
, "%d", dn
->sdn_objnum
);
2188 for (i
= 0; i
< dn_ntohs(dn
->sdn_objnamel
); i
++) {
2189 buf
[i
] = dn
->sdn_objname
[i
];
2190 if (IS_NOT_PRINTABLE(buf
[i
]))
2197 static char *dn_state2asc(unsigned char state
)
2237 static inline void dn_socket_format_entry(struct seq_file
*seq
, struct sock
*sk
)
2239 struct dn_scp
*scp
= DN_SK(sk
);
2240 char buf1
[DN_ASCBUF_LEN
];
2241 char buf2
[DN_ASCBUF_LEN
];
2242 char local_object
[DN_MAXOBJL
+3];
2243 char remote_object
[DN_MAXOBJL
+3];
2245 dn_printable_object(&scp
->addr
, local_object
);
2246 dn_printable_object(&scp
->peer
, remote_object
);
2249 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2250 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2251 dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp
->addr
)), buf1
),
2259 dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp
->peer
)), buf2
),
2267 dn_state2asc(scp
->state
),
2268 ((scp
->accept_mode
== ACC_IMMED
) ? "IMMED" : "DEFER"));
2271 static int dn_socket_seq_show(struct seq_file
*seq
, void *v
)
2273 if (v
== SEQ_START_TOKEN
) {
2274 seq_puts(seq
, "Local Remote\n");
2276 dn_socket_format_entry(seq
, v
);
2281 static struct seq_operations dn_socket_seq_ops
= {
2282 .start
= dn_socket_seq_start
,
2283 .next
= dn_socket_seq_next
,
2284 .stop
= dn_socket_seq_stop
,
2285 .show
= dn_socket_seq_show
,
2288 static int dn_socket_seq_open(struct inode
*inode
, struct file
*file
)
2290 struct seq_file
*seq
;
2292 struct dn_iter_state
*s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
2297 rc
= seq_open(file
, &dn_socket_seq_ops
);
2301 seq
= file
->private_data
;
2303 memset(s
, 0, sizeof(*s
));
2311 static struct file_operations dn_socket_seq_fops
= {
2312 .owner
= THIS_MODULE
,
2313 .open
= dn_socket_seq_open
,
2315 .llseek
= seq_lseek
,
2316 .release
= seq_release_private
,
2320 static struct net_proto_family dn_family_ops
= {
2321 .family
= AF_DECnet
,
2322 .create
= dn_create
,
2323 .owner
= THIS_MODULE
,
2326 static struct proto_ops dn_proto_ops
= {
2327 .family
= AF_DECnet
,
2328 .owner
= THIS_MODULE
,
2329 .release
= dn_release
,
2331 .connect
= dn_connect
,
2332 .socketpair
= sock_no_socketpair
,
2333 .accept
= dn_accept
,
2334 .getname
= dn_getname
,
2337 .listen
= dn_listen
,
2338 .shutdown
= dn_shutdown
,
2339 .setsockopt
= dn_setsockopt
,
2340 .getsockopt
= dn_getsockopt
,
2341 .sendmsg
= dn_sendmsg
,
2342 .recvmsg
= dn_recvmsg
,
2343 .mmap
= sock_no_mmap
,
2344 .sendpage
= sock_no_sendpage
,
2347 void dn_register_sysctl(void);
2348 void dn_unregister_sysctl(void);
2350 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2351 MODULE_AUTHOR("Linux DECnet Project Team");
2352 MODULE_LICENSE("GPL");
2353 MODULE_ALIAS_NETPROTO(PF_DECnet
);
2355 static char banner
[] __initdata
= KERN_INFO
"NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2357 static int __init
decnet_init(void)
2363 rc
= proto_register(&dn_proto
, 1);
2372 sock_register(&dn_family_ops
);
2373 dev_add_pack(&dn_dix_packet_type
);
2374 register_netdevice_notifier(&dn_dev_notifier
);
2376 proc_net_fops_create("decnet", S_IRUGO
, &dn_socket_seq_fops
);
2377 dn_register_sysctl();
2382 module_init(decnet_init
);
2385 * Prevent DECnet module unloading until its fixed properly.
2386 * Requires an audit of the code to check for memory leaks and
2387 * initialisation problems etc.
2390 static void __exit
decnet_exit(void)
2392 sock_unregister(AF_DECnet
);
2393 dev_remove_pack(&dn_dix_packet_type
);
2395 dn_unregister_sysctl();
2397 unregister_netdevice_notifier(&dn_dev_notifier
);
2404 proc_net_remove("decnet");
2406 proto_unregister(&dn_proto
);
2408 module_exit(decnet_exit
);