1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * DECnet An implementation of the DECnet protocol suite for the LINUX
5 * operating system. DECnet is implemented using the BSD Socket
6 * interface as the means of communication with the user level.
8 * DECnet Socket Layer Interface
10 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
11 * Patrick Caulfield <patrick@pandh.demon.co.uk>
14 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
15 * version of the code. Original copyright preserved
17 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
18 * compatible with my routing layer.
19 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
21 * Steve Whitehouse: Further bug fixes, checking module code still works
22 * with new routing layer.
23 * Steve Whitehouse: Additional set/get_sockopt() calls.
24 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
26 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
27 * way. Didn't manage it entirely, but its better.
28 * Steve Whitehouse: ditto for sendmsg().
29 * Steve Whitehouse: A selection of bug fixes to various things.
30 * Steve Whitehouse: Added TIOCOUTQ ioctl.
31 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
32 * Steve Whitehouse: Fixes to connect() error returns.
33 * Patrick Caulfield: Fixes to delayed acceptance logic.
34 * David S. Miller: New socket locking
35 * Steve Whitehouse: Socket list hashing/locking
36 * Arnaldo C. Melo: use capable, not suser
37 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
39 * Patrick Caulfield: /proc/net/decnet now has object name/number
40 * Steve Whitehouse: Fixed local port allocation, hashed sk list
41 * Matthew Wilcox: Fixes for dn_ioctl()
42 * Steve Whitehouse: New connect/accept logic to allow timeouts and
43 * prepare for sendpage etc.
47 /******************************************************************************
48 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
53 Version Kernel Date Author/Comments
54 ------- ------ ---- ---------------
55 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
56 (emserrat@geocities.com)
58 First Development of DECnet Socket La-
59 yer for Linux. Only supports outgoing
62 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
63 (patrick@pandh.demon.co.uk)
65 Port to new kernel development version.
67 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
68 (emserrat@geocities.com)
70 Added support for incoming connections
71 so we can start developing server apps
75 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
78 Added support for X11R6.4. Now we can
79 use DECnet transport for X on Linux!!!
81 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
82 (emserrat@geocities.com)
83 Removed bugs on flow control
84 Removed bugs on incoming accessdata
87 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
92 *******************************************************************************/
94 #include <linux/module.h>
95 #include <linux/errno.h>
96 #include <linux/types.h>
97 #include <linux/slab.h>
98 #include <linux/socket.h>
100 #include <linux/kernel.h>
101 #include <linux/sched/signal.h>
102 #include <linux/timer.h>
103 #include <linux/string.h>
104 #include <linux/sockios.h>
105 #include <linux/net.h>
106 #include <linux/netdevice.h>
107 #include <linux/inet.h>
108 #include <linux/route.h>
109 #include <linux/netfilter.h>
110 #include <linux/seq_file.h>
111 #include <net/sock.h>
112 #include <net/tcp_states.h>
113 #include <net/flow.h>
114 #include <asm/ioctls.h>
115 #include <linux/capability.h>
116 #include <linux/mm.h>
117 #include <linux/interrupt.h>
118 #include <linux/proc_fs.h>
119 #include <linux/stat.h>
120 #include <linux/init.h>
121 #include <linux/poll.h>
122 #include <linux/jiffies.h>
123 #include <net/net_namespace.h>
124 #include <net/neighbour.h>
126 #include <net/fib_rules.h>
129 #include <net/dn_nsp.h>
130 #include <net/dn_dev.h>
131 #include <net/dn_route.h>
132 #include <net/dn_fib.h>
133 #include <net/dn_neigh.h>
140 static void dn_keepalive(struct sock
*sk
);
142 #define DN_SK_HASH_SHIFT 8
143 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
144 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
147 static const struct proto_ops dn_proto_ops
;
148 static DEFINE_RWLOCK(dn_hash_lock
);
149 static struct hlist_head dn_sk_hash
[DN_SK_HASH_SIZE
];
150 static struct hlist_head dn_wild_sk
;
151 static atomic_long_t decnet_memory_allocated
;
153 static int __dn_setsockopt(struct socket
*sock
, int level
, int optname
,
154 sockptr_t optval
, unsigned int optlen
, int flags
);
155 static int __dn_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
, int flags
);
157 static struct hlist_head
*dn_find_list(struct sock
*sk
)
159 struct dn_scp
*scp
= DN_SK(sk
);
161 if (scp
->addr
.sdn_flags
& SDF_WILD
)
162 return hlist_empty(&dn_wild_sk
) ? &dn_wild_sk
: NULL
;
164 return &dn_sk_hash
[le16_to_cpu(scp
->addrloc
) & DN_SK_HASH_MASK
];
168 * Valid ports are those greater than zero and not already in use.
170 static int check_port(__le16 port
)
177 sk_for_each(sk
, &dn_sk_hash
[le16_to_cpu(port
) & DN_SK_HASH_MASK
]) {
178 struct dn_scp
*scp
= DN_SK(sk
);
179 if (scp
->addrloc
== port
)
185 static unsigned short port_alloc(struct sock
*sk
)
187 struct dn_scp
*scp
= DN_SK(sk
);
188 static unsigned short port
= 0x2000;
189 unsigned short i_port
= port
;
191 while(check_port(cpu_to_le16(++port
)) != 0) {
196 scp
->addrloc
= cpu_to_le16(port
);
202 * Since this is only ever called from user
203 * level, we don't need a write_lock() version
206 static int dn_hash_sock(struct sock
*sk
)
208 struct dn_scp
*scp
= DN_SK(sk
);
209 struct hlist_head
*list
;
212 BUG_ON(sk_hashed(sk
));
214 write_lock_bh(&dn_hash_lock
);
216 if (!scp
->addrloc
&& !port_alloc(sk
))
220 if ((list
= dn_find_list(sk
)) == NULL
)
223 sk_add_node(sk
, list
);
226 write_unlock_bh(&dn_hash_lock
);
230 static void dn_unhash_sock(struct sock
*sk
)
232 write_lock(&dn_hash_lock
);
233 sk_del_node_init(sk
);
234 write_unlock(&dn_hash_lock
);
237 static void dn_unhash_sock_bh(struct sock
*sk
)
239 write_lock_bh(&dn_hash_lock
);
240 sk_del_node_init(sk
);
241 write_unlock_bh(&dn_hash_lock
);
244 static struct hlist_head
*listen_hash(struct sockaddr_dn
*addr
)
247 unsigned int hash
= addr
->sdn_objnum
;
250 hash
= addr
->sdn_objnamel
;
251 for(i
= 0; i
< le16_to_cpu(addr
->sdn_objnamel
); i
++) {
252 hash
^= addr
->sdn_objname
[i
];
257 return &dn_sk_hash
[hash
& DN_SK_HASH_MASK
];
261 * Called to transform a socket from bound (i.e. with a local address)
262 * into a listening socket (doesn't need a local port number) and rehashes
263 * based upon the object name/number.
265 static void dn_rehash_sock(struct sock
*sk
)
267 struct hlist_head
*list
;
268 struct dn_scp
*scp
= DN_SK(sk
);
270 if (scp
->addr
.sdn_flags
& SDF_WILD
)
273 write_lock_bh(&dn_hash_lock
);
274 sk_del_node_init(sk
);
275 DN_SK(sk
)->addrloc
= 0;
276 list
= listen_hash(&DN_SK(sk
)->addr
);
277 sk_add_node(sk
, list
);
278 write_unlock_bh(&dn_hash_lock
);
281 int dn_sockaddr2username(struct sockaddr_dn
*sdn
, unsigned char *buf
, unsigned char type
)
289 *buf
++ = sdn
->sdn_objnum
;
293 *buf
++ = le16_to_cpu(sdn
->sdn_objnamel
);
294 memcpy(buf
, sdn
->sdn_objname
, le16_to_cpu(sdn
->sdn_objnamel
));
295 len
= 3 + le16_to_cpu(sdn
->sdn_objnamel
);
300 *buf
++ = le16_to_cpu(sdn
->sdn_objnamel
);
301 memcpy(buf
, sdn
->sdn_objname
, le16_to_cpu(sdn
->sdn_objnamel
));
302 len
= 7 + le16_to_cpu(sdn
->sdn_objnamel
);
310 * On reception of usernames, we handle types 1 and 0 for destination
311 * addresses only. Types 2 and 4 are used for source addresses, but the
312 * UIC, GIC are ignored and they are both treated the same way. Type 3
313 * is never used as I've no idea what its purpose might be or what its
316 int dn_username2sockaddr(unsigned char *data
, int len
, struct sockaddr_dn
*sdn
, unsigned char *fmt
)
323 sdn
->sdn_objnamel
= cpu_to_le16(0);
324 memset(sdn
->sdn_objname
, 0, DN_MAXOBJL
);
335 sdn
->sdn_objnum
= type
;
357 sdn
->sdn_objnamel
= cpu_to_le16(*data
++);
358 len
-= le16_to_cpu(sdn
->sdn_objnamel
);
360 if ((len
< 0) || (le16_to_cpu(sdn
->sdn_objnamel
) > namel
))
363 memcpy(sdn
->sdn_objname
, data
, le16_to_cpu(sdn
->sdn_objnamel
));
368 struct sock
*dn_sklist_find_listener(struct sockaddr_dn
*addr
)
370 struct hlist_head
*list
= listen_hash(addr
);
373 read_lock(&dn_hash_lock
);
374 sk_for_each(sk
, list
) {
375 struct dn_scp
*scp
= DN_SK(sk
);
376 if (sk
->sk_state
!= TCP_LISTEN
)
378 if (scp
->addr
.sdn_objnum
) {
379 if (scp
->addr
.sdn_objnum
!= addr
->sdn_objnum
)
382 if (addr
->sdn_objnum
)
384 if (scp
->addr
.sdn_objnamel
!= addr
->sdn_objnamel
)
386 if (memcmp(scp
->addr
.sdn_objname
, addr
->sdn_objname
, le16_to_cpu(addr
->sdn_objnamel
)) != 0)
390 read_unlock(&dn_hash_lock
);
394 sk
= sk_head(&dn_wild_sk
);
396 if (sk
->sk_state
== TCP_LISTEN
)
402 read_unlock(&dn_hash_lock
);
406 struct sock
*dn_find_by_skb(struct sk_buff
*skb
)
408 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
412 read_lock(&dn_hash_lock
);
413 sk_for_each(sk
, &dn_sk_hash
[le16_to_cpu(cb
->dst_port
) & DN_SK_HASH_MASK
]) {
415 if (cb
->src
!= dn_saddr2dn(&scp
->peer
))
417 if (cb
->dst_port
!= scp
->addrloc
)
419 if (scp
->addrrem
&& (cb
->src_port
!= scp
->addrrem
))
426 read_unlock(&dn_hash_lock
);
432 static void dn_destruct(struct sock
*sk
)
434 struct dn_scp
*scp
= DN_SK(sk
);
436 skb_queue_purge(&scp
->data_xmit_queue
);
437 skb_queue_purge(&scp
->other_xmit_queue
);
438 skb_queue_purge(&scp
->other_receive_queue
);
440 dst_release(rcu_dereference_protected(sk
->sk_dst_cache
, 1));
443 static unsigned long dn_memory_pressure
;
445 static void dn_enter_memory_pressure(struct sock
*sk
)
447 if (!dn_memory_pressure
) {
448 dn_memory_pressure
= 1;
452 static struct proto dn_proto
= {
454 .owner
= THIS_MODULE
,
455 .enter_memory_pressure
= dn_enter_memory_pressure
,
456 .memory_pressure
= &dn_memory_pressure
,
457 .memory_allocated
= &decnet_memory_allocated
,
458 .sysctl_mem
= sysctl_decnet_mem
,
459 .sysctl_wmem
= sysctl_decnet_wmem
,
460 .sysctl_rmem
= sysctl_decnet_rmem
,
461 .max_header
= DN_MAX_NSP_DATA_HEADER
+ 64,
462 .obj_size
= sizeof(struct dn_sock
),
465 static struct sock
*dn_alloc_sock(struct net
*net
, struct socket
*sock
, gfp_t gfp
, int kern
)
468 struct sock
*sk
= sk_alloc(net
, PF_DECnet
, gfp
, &dn_proto
, kern
);
474 sock
->ops
= &dn_proto_ops
;
475 sock_init_data(sock
, sk
);
477 sk
->sk_backlog_rcv
= dn_nsp_backlog_rcv
;
478 sk
->sk_destruct
= dn_destruct
;
479 sk
->sk_no_check_tx
= 1;
480 sk
->sk_family
= PF_DECnet
;
482 sk
->sk_allocation
= gfp
;
483 sk
->sk_sndbuf
= sysctl_decnet_wmem
[1];
484 sk
->sk_rcvbuf
= sysctl_decnet_rmem
[1];
486 /* Initialization of DECnet Session Control Port */
488 scp
->state
= DN_O
; /* Open */
489 scp
->numdat
= 1; /* Next data seg to tx */
490 scp
->numoth
= 1; /* Next oth data to tx */
491 scp
->ackxmt_dat
= 0; /* Last data seg ack'ed */
492 scp
->ackxmt_oth
= 0; /* Last oth data ack'ed */
493 scp
->ackrcv_dat
= 0; /* Highest data ack recv*/
494 scp
->ackrcv_oth
= 0; /* Last oth data ack rec*/
495 scp
->flowrem_sw
= DN_SEND
;
496 scp
->flowloc_sw
= DN_SEND
;
497 scp
->flowrem_dat
= 0;
498 scp
->flowrem_oth
= 1;
499 scp
->flowloc_dat
= 0;
500 scp
->flowloc_oth
= 1;
501 scp
->services_rem
= 0;
502 scp
->services_loc
= 1 | NSP_FC_NONE
;
504 scp
->info_loc
= 0x03; /* NSP version 4.1 */
505 scp
->segsize_rem
= 230 - DN_MAX_NSP_DATA_HEADER
; /* Default: Updated by remote segsize */
508 scp
->accept_mode
= ACC_IMMED
;
509 scp
->addr
.sdn_family
= AF_DECnet
;
510 scp
->peer
.sdn_family
= AF_DECnet
;
511 scp
->accessdata
.acc_accl
= 5;
512 memcpy(scp
->accessdata
.acc_acc
, "LINUX", 5);
514 scp
->max_window
= NSP_MAX_WINDOW
;
515 scp
->snd_window
= NSP_MIN_WINDOW
;
516 scp
->nsp_srtt
= NSP_INITIAL_SRTT
;
517 scp
->nsp_rttvar
= NSP_INITIAL_RTTVAR
;
518 scp
->nsp_rxtshift
= 0;
520 skb_queue_head_init(&scp
->data_xmit_queue
);
521 skb_queue_head_init(&scp
->other_xmit_queue
);
522 skb_queue_head_init(&scp
->other_receive_queue
);
525 scp
->persist_fxn
= NULL
;
526 scp
->keepalive
= 10 * HZ
;
527 scp
->keepalive_fxn
= dn_keepalive
;
529 dn_start_slow_timer(sk
);
536 * FIXME: Should respond to SO_KEEPALIVE etc.
538 static void dn_keepalive(struct sock
*sk
)
540 struct dn_scp
*scp
= DN_SK(sk
);
543 * By checking the other_data transmit queue is empty
544 * we are double checking that we are not sending too
545 * many of these keepalive frames.
547 if (skb_queue_empty(&scp
->other_xmit_queue
))
548 dn_nsp_send_link(sk
, DN_NOCHANGE
, 0);
553 * Timer for shutdown/destroyed sockets.
554 * When socket is dead & no packets have been sent for a
555 * certain amount of time, they are removed by this
556 * routine. Also takes care of sending out DI & DC
557 * frames at correct times.
559 int dn_destroy_timer(struct sock
*sk
)
561 struct dn_scp
*scp
= DN_SK(sk
);
563 scp
->persist
= dn_nsp_persist(sk
);
565 switch (scp
->state
) {
567 dn_nsp_send_disc(sk
, NSP_DISCINIT
, 0, GFP_ATOMIC
);
568 if (scp
->nsp_rxtshift
>= decnet_di_count
)
573 dn_nsp_send_disc(sk
, NSP_DISCINIT
, 0, GFP_ATOMIC
);
574 if (scp
->nsp_rxtshift
>= decnet_dr_count
)
579 if (scp
->nsp_rxtshift
< decnet_dn_count
) {
580 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
581 dn_nsp_send_disc(sk
, NSP_DISCCONF
, NSP_REASON_DC
,
587 scp
->persist
= (HZ
* decnet_time_wait
);
592 if (time_after_eq(jiffies
, scp
->stamp
+ HZ
* decnet_time_wait
)) {
601 static void dn_destroy_sock(struct sock
*sk
)
603 struct dn_scp
*scp
= DN_SK(sk
);
605 scp
->nsp_rxtshift
= 0; /* reset back off */
608 if (sk
->sk_socket
->state
!= SS_UNCONNECTED
)
609 sk
->sk_socket
->state
= SS_DISCONNECTING
;
612 sk
->sk_state
= TCP_CLOSE
;
614 switch (scp
->state
) {
616 dn_nsp_send_disc(sk
, NSP_DISCCONF
, NSP_REASON_DC
,
618 scp
->persist_fxn
= dn_destroy_timer
;
619 scp
->persist
= dn_nsp_persist(sk
);
630 dn_nsp_send_disc(sk
, NSP_DISCINIT
, 0, sk
->sk_allocation
);
640 scp
->persist_fxn
= dn_destroy_timer
;
641 scp
->persist
= dn_nsp_persist(sk
);
644 printk(KERN_DEBUG
"DECnet: dn_destroy_sock passed socket in invalid state\n");
647 dn_stop_slow_timer(sk
);
649 dn_unhash_sock_bh(sk
);
656 char *dn_addr2asc(__u16 addr
, char *buf
)
658 unsigned short node
, area
;
660 node
= addr
& 0x03ff;
662 sprintf(buf
, "%hd.%hd", area
, node
);
669 static int dn_create(struct net
*net
, struct socket
*sock
, int protocol
,
674 if (protocol
< 0 || protocol
> U8_MAX
)
677 if (!net_eq(net
, &init_net
))
678 return -EAFNOSUPPORT
;
680 switch (sock
->type
) {
682 if (protocol
!= DNPROTO_NSP
)
683 return -EPROTONOSUPPORT
;
688 return -ESOCKTNOSUPPORT
;
692 if ((sk
= dn_alloc_sock(net
, sock
, GFP_KERNEL
, kern
)) == NULL
)
695 sk
->sk_protocol
= protocol
;
702 dn_release(struct socket
*sock
)
704 struct sock
*sk
= sock
->sk
;
718 static int dn_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
720 struct sock
*sk
= sock
->sk
;
721 struct dn_scp
*scp
= DN_SK(sk
);
722 struct sockaddr_dn
*saddr
= (struct sockaddr_dn
*)uaddr
;
723 struct net_device
*dev
, *ldev
;
726 if (addr_len
!= sizeof(struct sockaddr_dn
))
729 if (saddr
->sdn_family
!= AF_DECnet
)
732 if (le16_to_cpu(saddr
->sdn_nodeaddrl
) && (le16_to_cpu(saddr
->sdn_nodeaddrl
) != 2))
735 if (le16_to_cpu(saddr
->sdn_objnamel
) > DN_MAXOBJL
)
738 if (saddr
->sdn_flags
& ~SDF_WILD
)
741 if (!capable(CAP_NET_BIND_SERVICE
) && (saddr
->sdn_objnum
||
742 (saddr
->sdn_flags
& SDF_WILD
)))
745 if (!(saddr
->sdn_flags
& SDF_WILD
)) {
746 if (le16_to_cpu(saddr
->sdn_nodeaddrl
)) {
749 for_each_netdev_rcu(&init_net
, dev
) {
752 if (dn_dev_islocal(dev
, dn_saddr2dn(saddr
))) {
759 return -EADDRNOTAVAIL
;
765 if (sock_flag(sk
, SOCK_ZAPPED
)) {
766 memcpy(&scp
->addr
, saddr
, addr_len
);
767 sock_reset_flag(sk
, SOCK_ZAPPED
);
769 rv
= dn_hash_sock(sk
);
771 sock_set_flag(sk
, SOCK_ZAPPED
);
779 static int dn_auto_bind(struct socket
*sock
)
781 struct sock
*sk
= sock
->sk
;
782 struct dn_scp
*scp
= DN_SK(sk
);
785 sock_reset_flag(sk
, SOCK_ZAPPED
);
787 scp
->addr
.sdn_flags
= 0;
788 scp
->addr
.sdn_objnum
= 0;
791 * This stuff is to keep compatibility with Eduardo's
792 * patch. I hope I can dispense with it shortly...
794 if ((scp
->accessdata
.acc_accl
!= 0) &&
795 (scp
->accessdata
.acc_accl
<= 12)) {
797 scp
->addr
.sdn_objnamel
= cpu_to_le16(scp
->accessdata
.acc_accl
);
798 memcpy(scp
->addr
.sdn_objname
, scp
->accessdata
.acc_acc
, le16_to_cpu(scp
->addr
.sdn_objnamel
));
800 scp
->accessdata
.acc_accl
= 0;
801 memset(scp
->accessdata
.acc_acc
, 0, 40);
803 /* End of compatibility stuff */
805 scp
->addr
.sdn_add
.a_len
= cpu_to_le16(2);
806 rv
= dn_dev_bind_default((__le16
*)scp
->addr
.sdn_add
.a_addr
);
808 rv
= dn_hash_sock(sk
);
810 sock_set_flag(sk
, SOCK_ZAPPED
);
816 static int dn_confirm_accept(struct sock
*sk
, long *timeo
, gfp_t allocation
)
818 struct dn_scp
*scp
= DN_SK(sk
);
822 if (scp
->state
!= DN_CR
)
826 scp
->segsize_loc
= dst_metric_advmss(__sk_dst_get(sk
));
827 dn_send_conn_conf(sk
, allocation
);
829 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
832 if (scp
->state
== DN_CC
)
833 *timeo
= schedule_timeout(*timeo
);
836 if (scp
->state
== DN_RUN
)
838 err
= sock_error(sk
);
841 err
= sock_intr_errno(*timeo
);
842 if (signal_pending(current
))
847 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
849 finish_wait(sk_sleep(sk
), &wait
);
851 sk
->sk_socket
->state
= SS_CONNECTED
;
852 } else if (scp
->state
!= DN_CC
) {
853 sk
->sk_socket
->state
= SS_UNCONNECTED
;
858 static int dn_wait_run(struct sock
*sk
, long *timeo
)
860 struct dn_scp
*scp
= DN_SK(sk
);
864 if (scp
->state
== DN_RUN
)
870 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
873 if (scp
->state
== DN_CI
|| scp
->state
== DN_CC
)
874 *timeo
= schedule_timeout(*timeo
);
877 if (scp
->state
== DN_RUN
)
879 err
= sock_error(sk
);
882 err
= sock_intr_errno(*timeo
);
883 if (signal_pending(current
))
888 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
890 finish_wait(sk_sleep(sk
), &wait
);
893 sk
->sk_socket
->state
= SS_CONNECTED
;
894 } else if (scp
->state
!= DN_CI
&& scp
->state
!= DN_CC
) {
895 sk
->sk_socket
->state
= SS_UNCONNECTED
;
900 static int __dn_connect(struct sock
*sk
, struct sockaddr_dn
*addr
, int addrlen
, long *timeo
, int flags
)
902 struct socket
*sock
= sk
->sk_socket
;
903 struct dn_scp
*scp
= DN_SK(sk
);
906 struct dst_entry
*dst
;
908 if (sock
->state
== SS_CONNECTED
)
911 if (sock
->state
== SS_CONNECTING
) {
913 if (scp
->state
== DN_RUN
) {
914 sock
->state
= SS_CONNECTED
;
918 if (scp
->state
!= DN_CI
&& scp
->state
!= DN_CC
) {
919 sock
->state
= SS_UNCONNECTED
;
922 return dn_wait_run(sk
, timeo
);
926 if (scp
->state
!= DN_O
)
929 if (addr
== NULL
|| addrlen
!= sizeof(struct sockaddr_dn
))
931 if (addr
->sdn_family
!= AF_DECnet
)
933 if (addr
->sdn_flags
& SDF_WILD
)
936 if (sock_flag(sk
, SOCK_ZAPPED
)) {
937 err
= dn_auto_bind(sk
->sk_socket
);
942 memcpy(&scp
->peer
, addr
, sizeof(struct sockaddr_dn
));
945 memset(&fld
, 0, sizeof(fld
));
946 fld
.flowidn_oif
= sk
->sk_bound_dev_if
;
947 fld
.daddr
= dn_saddr2dn(&scp
->peer
);
948 fld
.saddr
= dn_saddr2dn(&scp
->addr
);
949 dn_sk_ports_copy(&fld
, scp
);
950 fld
.flowidn_proto
= DNPROTO_NSP
;
951 if (dn_route_output_sock(&sk
->sk_dst_cache
, &fld
, sk
, flags
) < 0)
953 dst
= __sk_dst_get(sk
);
954 sk
->sk_route_caps
= dst
->dev
->features
;
955 sock
->state
= SS_CONNECTING
;
957 scp
->segsize_loc
= dst_metric_advmss(dst
);
959 dn_nsp_send_conninit(sk
, NSP_CI
);
962 err
= dn_wait_run(sk
, timeo
);
968 static int dn_connect(struct socket
*sock
, struct sockaddr
*uaddr
, int addrlen
, int flags
)
970 struct sockaddr_dn
*addr
= (struct sockaddr_dn
*)uaddr
;
971 struct sock
*sk
= sock
->sk
;
973 long timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
976 err
= __dn_connect(sk
, addr
, addrlen
, &timeo
, 0);
982 static inline int dn_check_state(struct sock
*sk
, struct sockaddr_dn
*addr
, int addrlen
, long *timeo
, int flags
)
984 struct dn_scp
*scp
= DN_SK(sk
);
986 switch (scp
->state
) {
990 return dn_confirm_accept(sk
, timeo
, sk
->sk_allocation
);
993 return dn_wait_run(sk
, timeo
);
995 return __dn_connect(sk
, addr
, addrlen
, timeo
, flags
);
1002 static void dn_access_copy(struct sk_buff
*skb
, struct accessdata_dn
*acc
)
1004 unsigned char *ptr
= skb
->data
;
1006 acc
->acc_userl
= *ptr
++;
1007 memcpy(&acc
->acc_user
, ptr
, acc
->acc_userl
);
1008 ptr
+= acc
->acc_userl
;
1010 acc
->acc_passl
= *ptr
++;
1011 memcpy(&acc
->acc_pass
, ptr
, acc
->acc_passl
);
1012 ptr
+= acc
->acc_passl
;
1014 acc
->acc_accl
= *ptr
++;
1015 memcpy(&acc
->acc_acc
, ptr
, acc
->acc_accl
);
1017 skb_pull(skb
, acc
->acc_accl
+ acc
->acc_passl
+ acc
->acc_userl
+ 3);
1021 static void dn_user_copy(struct sk_buff
*skb
, struct optdata_dn
*opt
)
1023 unsigned char *ptr
= skb
->data
;
1024 u16 len
= *ptr
++; /* yes, it's 8bit on the wire */
1026 BUG_ON(len
> 16); /* we've checked the contents earlier */
1027 opt
->opt_optl
= cpu_to_le16(len
);
1028 opt
->opt_status
= 0;
1029 memcpy(opt
->opt_data
, ptr
, len
);
1030 skb_pull(skb
, len
+ 1);
1033 static struct sk_buff
*dn_wait_for_connect(struct sock
*sk
, long *timeo
)
1036 struct sk_buff
*skb
= NULL
;
1039 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1042 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1044 *timeo
= schedule_timeout(*timeo
);
1045 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1051 if (sk
->sk_state
!= TCP_LISTEN
)
1053 err
= sock_intr_errno(*timeo
);
1054 if (signal_pending(current
))
1059 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1061 finish_wait(sk_sleep(sk
), &wait
);
1063 return skb
== NULL
? ERR_PTR(err
) : skb
;
1066 static int dn_accept(struct socket
*sock
, struct socket
*newsock
, int flags
,
1069 struct sock
*sk
= sock
->sk
, *newsk
;
1070 struct sk_buff
*skb
= NULL
;
1071 struct dn_skb_cb
*cb
;
1072 unsigned char menuver
;
1075 long timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1076 struct dst_entry
*dst
;
1080 if (sk
->sk_state
!= TCP_LISTEN
|| DN_SK(sk
)->state
!= DN_O
) {
1085 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1087 skb
= dn_wait_for_connect(sk
, &timeo
);
1090 return PTR_ERR(skb
);
1094 cb
= DN_SKB_CB(skb
);
1095 sk_acceptq_removed(sk
);
1096 newsk
= dn_alloc_sock(sock_net(sk
), newsock
, sk
->sk_allocation
, kern
);
1097 if (newsk
== NULL
) {
1105 sk_dst_set(newsk
, dst
);
1106 skb_dst_set(skb
, NULL
);
1108 DN_SK(newsk
)->state
= DN_CR
;
1109 DN_SK(newsk
)->addrrem
= cb
->src_port
;
1110 DN_SK(newsk
)->services_rem
= cb
->services
;
1111 DN_SK(newsk
)->info_rem
= cb
->info
;
1112 DN_SK(newsk
)->segsize_rem
= cb
->segsize
;
1113 DN_SK(newsk
)->accept_mode
= DN_SK(sk
)->accept_mode
;
1115 if (DN_SK(newsk
)->segsize_rem
< 230)
1116 DN_SK(newsk
)->segsize_rem
= 230;
1118 if ((DN_SK(newsk
)->services_rem
& NSP_FC_MASK
) == NSP_FC_NONE
)
1119 DN_SK(newsk
)->max_window
= decnet_no_fc_max_cwnd
;
1121 newsk
->sk_state
= TCP_LISTEN
;
1122 memcpy(&(DN_SK(newsk
)->addr
), &(DN_SK(sk
)->addr
), sizeof(struct sockaddr_dn
));
1125 * If we are listening on a wild socket, we don't want
1126 * the newly created socket on the wrong hash queue.
1128 DN_SK(newsk
)->addr
.sdn_flags
&= ~SDF_WILD
;
1130 skb_pull(skb
, dn_username2sockaddr(skb
->data
, skb
->len
, &(DN_SK(newsk
)->addr
), &type
));
1131 skb_pull(skb
, dn_username2sockaddr(skb
->data
, skb
->len
, &(DN_SK(newsk
)->peer
), &type
));
1132 *(__le16
*)(DN_SK(newsk
)->peer
.sdn_add
.a_addr
) = cb
->src
;
1133 *(__le16
*)(DN_SK(newsk
)->addr
.sdn_add
.a_addr
) = cb
->dst
;
1135 menuver
= *skb
->data
;
1138 if (menuver
& DN_MENUVER_ACC
)
1139 dn_access_copy(skb
, &(DN_SK(newsk
)->accessdata
));
1141 if (menuver
& DN_MENUVER_USR
)
1142 dn_user_copy(skb
, &(DN_SK(newsk
)->conndata_in
));
1144 if (menuver
& DN_MENUVER_PRX
)
1145 DN_SK(newsk
)->peer
.sdn_flags
|= SDF_PROXY
;
1147 if (menuver
& DN_MENUVER_UIC
)
1148 DN_SK(newsk
)->peer
.sdn_flags
|= SDF_UICPROXY
;
1152 memcpy(&(DN_SK(newsk
)->conndata_out
), &(DN_SK(sk
)->conndata_out
),
1153 sizeof(struct optdata_dn
));
1154 memcpy(&(DN_SK(newsk
)->discdata_out
), &(DN_SK(sk
)->discdata_out
),
1155 sizeof(struct optdata_dn
));
1158 err
= dn_hash_sock(newsk
);
1160 sock_reset_flag(newsk
, SOCK_ZAPPED
);
1161 dn_send_conn_ack(newsk
);
1164 * Here we use sk->sk_allocation since although the conn conf is
1165 * for the newsk, the context is the old socket.
1167 if (DN_SK(newsk
)->accept_mode
== ACC_IMMED
)
1168 err
= dn_confirm_accept(newsk
, &timeo
,
1171 release_sock(newsk
);
1176 static int dn_getname(struct socket
*sock
, struct sockaddr
*uaddr
,int peer
)
1178 struct sockaddr_dn
*sa
= (struct sockaddr_dn
*)uaddr
;
1179 struct sock
*sk
= sock
->sk
;
1180 struct dn_scp
*scp
= DN_SK(sk
);
1185 if ((sock
->state
!= SS_CONNECTED
&&
1186 sock
->state
!= SS_CONNECTING
) &&
1187 scp
->accept_mode
== ACC_IMMED
) {
1192 memcpy(sa
, &scp
->peer
, sizeof(struct sockaddr_dn
));
1194 memcpy(sa
, &scp
->addr
, sizeof(struct sockaddr_dn
));
1199 return sizeof(struct sockaddr_dn
);
1203 static __poll_t
dn_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
1205 struct sock
*sk
= sock
->sk
;
1206 struct dn_scp
*scp
= DN_SK(sk
);
1207 __poll_t mask
= datagram_poll(file
, sock
, wait
);
1209 if (!skb_queue_empty_lockless(&scp
->other_receive_queue
))
1210 mask
|= EPOLLRDBAND
;
1215 static int dn_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1217 struct sock
*sk
= sock
->sk
;
1218 struct dn_scp
*scp
= DN_SK(sk
);
1219 int err
= -EOPNOTSUPP
;
1221 struct sk_buff
*skb
;
1228 return dn_dev_ioctl(cmd
, (void __user
*)arg
);
1232 val
= !skb_queue_empty(&scp
->other_receive_queue
);
1233 if (scp
->state
!= DN_RUN
)
1239 amount
= sk
->sk_sndbuf
- sk_wmem_alloc_get(sk
);
1242 err
= put_user(amount
, (int __user
*)arg
);
1247 skb
= skb_peek(&scp
->other_receive_queue
);
1251 skb_queue_walk(&sk
->sk_receive_queue
, skb
)
1255 err
= put_user(amount
, (int __user
*)arg
);
1266 static int dn_listen(struct socket
*sock
, int backlog
)
1268 struct sock
*sk
= sock
->sk
;
1273 if (sock_flag(sk
, SOCK_ZAPPED
))
1276 if ((DN_SK(sk
)->state
!= DN_O
) || (sk
->sk_state
== TCP_LISTEN
))
1279 sk
->sk_max_ack_backlog
= backlog
;
1280 sk
->sk_ack_backlog
= 0;
1281 sk
->sk_state
= TCP_LISTEN
;
1292 static int dn_shutdown(struct socket
*sock
, int how
)
1294 struct sock
*sk
= sock
->sk
;
1295 struct dn_scp
*scp
= DN_SK(sk
);
1296 int err
= -ENOTCONN
;
1300 if (sock
->state
== SS_UNCONNECTED
)
1304 if (sock
->state
== SS_DISCONNECTING
)
1308 if (scp
->state
== DN_O
)
1311 if (how
!= SHUT_RDWR
)
1314 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1315 dn_destroy_sock(sk
);
1324 static int dn_setsockopt(struct socket
*sock
, int level
, int optname
,
1325 sockptr_t optval
, unsigned int optlen
)
1327 struct sock
*sk
= sock
->sk
;
1331 err
= __dn_setsockopt(sock
, level
, optname
, optval
, optlen
, 0);
1333 #ifdef CONFIG_NETFILTER
1334 /* we need to exclude all possible ENOPROTOOPTs except default case */
1335 if (err
== -ENOPROTOOPT
&& optname
!= DSO_LINKINFO
&&
1336 optname
!= DSO_STREAM
&& optname
!= DSO_SEQPACKET
)
1337 err
= nf_setsockopt(sk
, PF_DECnet
, optname
, optval
, optlen
);
1343 static int __dn_setsockopt(struct socket
*sock
, int level
, int optname
,
1344 sockptr_t optval
, unsigned int optlen
, int flags
)
1346 struct sock
*sk
= sock
->sk
;
1347 struct dn_scp
*scp
= DN_SK(sk
);
1350 struct optdata_dn opt
;
1351 struct accessdata_dn acc
;
1355 unsigned char services
;
1360 if (optlen
&& sockptr_is_null(optval
))
1363 if (optlen
> sizeof(u
))
1366 if (copy_from_sockptr(&u
, optval
, optlen
))
1371 if (sock
->state
== SS_CONNECTED
)
1373 if ((scp
->state
!= DN_O
) && (scp
->state
!= DN_CR
))
1376 if (optlen
!= sizeof(struct optdata_dn
))
1379 if (le16_to_cpu(u
.opt
.opt_optl
) > 16)
1382 memcpy(&scp
->conndata_out
, &u
.opt
, optlen
);
1386 if (sock
->state
!= SS_CONNECTED
&&
1387 scp
->accept_mode
== ACC_IMMED
)
1390 if (optlen
!= sizeof(struct optdata_dn
))
1393 if (le16_to_cpu(u
.opt
.opt_optl
) > 16)
1396 memcpy(&scp
->discdata_out
, &u
.opt
, optlen
);
1400 if (sock
->state
== SS_CONNECTED
)
1402 if (scp
->state
!= DN_O
)
1405 if (optlen
!= sizeof(struct accessdata_dn
))
1408 if ((u
.acc
.acc_accl
> DN_MAXACCL
) ||
1409 (u
.acc
.acc_passl
> DN_MAXACCL
) ||
1410 (u
.acc
.acc_userl
> DN_MAXACCL
))
1413 memcpy(&scp
->accessdata
, &u
.acc
, optlen
);
1416 case DSO_ACCEPTMODE
:
1417 if (sock
->state
== SS_CONNECTED
)
1419 if (scp
->state
!= DN_O
)
1422 if (optlen
!= sizeof(int))
1425 if ((u
.mode
!= ACC_IMMED
) && (u
.mode
!= ACC_DEFER
))
1428 scp
->accept_mode
= (unsigned char)u
.mode
;
1432 if (scp
->state
!= DN_CR
)
1434 timeo
= sock_rcvtimeo(sk
, 0);
1435 err
= dn_confirm_accept(sk
, &timeo
, sk
->sk_allocation
);
1439 if (scp
->state
!= DN_CR
)
1443 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1444 dn_nsp_send_disc(sk
, 0x38, 0, sk
->sk_allocation
);
1448 if (optlen
!= sizeof(unsigned long))
1450 if (u
.win
> NSP_MAX_WINDOW
)
1451 u
.win
= NSP_MAX_WINDOW
;
1454 scp
->max_window
= u
.win
;
1455 if (scp
->snd_window
> u
.win
)
1456 scp
->snd_window
= u
.win
;
1460 if (optlen
!= sizeof(int))
1462 if (scp
->nonagle
== TCP_NAGLE_CORK
)
1464 scp
->nonagle
= (u
.val
== 0) ? 0 : TCP_NAGLE_OFF
;
1465 /* if (scp->nonagle == 1) { Push pending frames } */
1469 if (optlen
!= sizeof(int))
1471 if (scp
->nonagle
== TCP_NAGLE_OFF
)
1473 scp
->nonagle
= (u
.val
== 0) ? 0 : TCP_NAGLE_CORK
;
1474 /* if (scp->nonagle == 0) { Push pending frames } */
1478 if (optlen
!= sizeof(unsigned char))
1480 if ((u
.services
& ~NSP_FC_MASK
) != 0x01)
1482 if ((u
.services
& NSP_FC_MASK
) == NSP_FC_MASK
)
1484 scp
->services_loc
= u
.services
;
1488 if (optlen
!= sizeof(unsigned char))
1492 scp
->info_loc
= u
.info
;
1499 return -ENOPROTOOPT
;
1505 static int dn_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1507 struct sock
*sk
= sock
->sk
;
1511 err
= __dn_getsockopt(sock
, level
, optname
, optval
, optlen
, 0);
1513 #ifdef CONFIG_NETFILTER
1514 if (err
== -ENOPROTOOPT
&& optname
!= DSO_STREAM
&&
1515 optname
!= DSO_SEQPACKET
&& optname
!= DSO_CONACCEPT
&&
1516 optname
!= DSO_CONREJECT
) {
1519 if (get_user(len
, optlen
))
1522 err
= nf_getsockopt(sk
, PF_DECnet
, optname
, optval
, &len
);
1524 err
= put_user(len
, optlen
);
1531 static int __dn_getsockopt(struct socket
*sock
, int level
,int optname
, char __user
*optval
,int __user
*optlen
, int flags
)
1533 struct sock
*sk
= sock
->sk
;
1534 struct dn_scp
*scp
= DN_SK(sk
);
1535 struct linkinfo_dn link
;
1537 void *r_data
= NULL
;
1540 if(get_user(r_len
, optlen
))
1545 if (r_len
> sizeof(struct optdata_dn
))
1546 r_len
= sizeof(struct optdata_dn
);
1547 r_data
= &scp
->conndata_in
;
1551 if (r_len
> sizeof(struct optdata_dn
))
1552 r_len
= sizeof(struct optdata_dn
);
1553 r_data
= &scp
->discdata_in
;
1557 if (r_len
> sizeof(struct accessdata_dn
))
1558 r_len
= sizeof(struct accessdata_dn
);
1559 r_data
= &scp
->accessdata
;
1562 case DSO_ACCEPTMODE
:
1563 if (r_len
> sizeof(unsigned char))
1564 r_len
= sizeof(unsigned char);
1565 r_data
= &scp
->accept_mode
;
1569 if (r_len
> sizeof(struct linkinfo_dn
))
1570 r_len
= sizeof(struct linkinfo_dn
);
1572 memset(&link
, 0, sizeof(link
));
1574 switch (sock
->state
) {
1576 link
.idn_linkstate
= LL_CONNECTING
;
1578 case SS_DISCONNECTING
:
1579 link
.idn_linkstate
= LL_DISCONNECTING
;
1582 link
.idn_linkstate
= LL_RUNNING
;
1585 link
.idn_linkstate
= LL_INACTIVE
;
1588 link
.idn_segsize
= scp
->segsize_rem
;
1593 if (r_len
> sizeof(unsigned long))
1594 r_len
= sizeof(unsigned long);
1595 r_data
= &scp
->max_window
;
1599 if (r_len
> sizeof(int))
1600 r_len
= sizeof(int);
1601 val
= (scp
->nonagle
== TCP_NAGLE_OFF
);
1606 if (r_len
> sizeof(int))
1607 r_len
= sizeof(int);
1608 val
= (scp
->nonagle
== TCP_NAGLE_CORK
);
1613 if (r_len
> sizeof(unsigned char))
1614 r_len
= sizeof(unsigned char);
1615 r_data
= &scp
->services_rem
;
1619 if (r_len
> sizeof(unsigned char))
1620 r_len
= sizeof(unsigned char);
1621 r_data
= &scp
->info_rem
;
1629 return -ENOPROTOOPT
;
1633 if (copy_to_user(optval
, r_data
, r_len
))
1635 if (put_user(r_len
, optlen
))
1643 static int dn_data_ready(struct sock
*sk
, struct sk_buff_head
*q
, int flags
, int target
)
1645 struct sk_buff
*skb
;
1648 if (flags
& MSG_OOB
)
1649 return !skb_queue_empty(q
) ? 1 : 0;
1651 skb_queue_walk(q
, skb
) {
1652 struct dn_skb_cb
*cb
= DN_SKB_CB(skb
);
1655 if (cb
->nsp_flags
& 0x40) {
1656 /* SOCK_SEQPACKET reads to EOM */
1657 if (sk
->sk_type
== SOCK_SEQPACKET
)
1659 /* so does SOCK_STREAM unless WAITALL is specified */
1660 if (!(flags
& MSG_WAITALL
))
1664 /* minimum data length for read exceeded */
1673 static int dn_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t size
,
1676 struct sock
*sk
= sock
->sk
;
1677 struct dn_scp
*scp
= DN_SK(sk
);
1678 struct sk_buff_head
*queue
= &sk
->sk_receive_queue
;
1679 size_t target
= size
> 1 ? 1 : 0;
1682 struct sk_buff
*skb
, *n
;
1683 struct dn_skb_cb
*cb
= NULL
;
1684 unsigned char eor
= 0;
1685 long timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1689 if (sock_flag(sk
, SOCK_ZAPPED
)) {
1690 rv
= -EADDRNOTAVAIL
;
1694 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1699 rv
= dn_check_state(sk
, NULL
, 0, &timeo
, flags
);
1703 if (flags
& ~(MSG_CMSG_COMPAT
|MSG_PEEK
|MSG_OOB
|MSG_WAITALL
|MSG_DONTWAIT
|MSG_NOSIGNAL
)) {
1708 if (flags
& MSG_OOB
)
1709 queue
= &scp
->other_receive_queue
;
1711 if (flags
& MSG_WAITALL
)
1716 * See if there is data ready to read, sleep if there isn't
1719 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1724 if (!skb_queue_empty(&scp
->other_receive_queue
)) {
1725 if (!(flags
& MSG_OOB
)) {
1726 msg
->msg_flags
|= MSG_OOB
;
1727 if (!scp
->other_report
) {
1728 scp
->other_report
= 1;
1734 if (scp
->state
!= DN_RUN
)
1737 if (signal_pending(current
)) {
1738 rv
= sock_intr_errno(timeo
);
1742 if (dn_data_ready(sk
, queue
, flags
, target
))
1745 if (flags
& MSG_DONTWAIT
) {
1750 add_wait_queue(sk_sleep(sk
), &wait
);
1751 sk_set_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
1752 sk_wait_event(sk
, &timeo
, dn_data_ready(sk
, queue
, flags
, target
), &wait
);
1753 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
1754 remove_wait_queue(sk_sleep(sk
), &wait
);
1757 skb_queue_walk_safe(queue
, skb
, n
) {
1758 unsigned int chunk
= skb
->len
;
1759 cb
= DN_SKB_CB(skb
);
1761 if ((chunk
+ copied
) > size
)
1762 chunk
= size
- copied
;
1764 if (memcpy_to_msg(msg
, skb
->data
, chunk
)) {
1770 if (!(flags
& MSG_PEEK
))
1771 skb_pull(skb
, chunk
);
1773 eor
= cb
->nsp_flags
& 0x40;
1775 if (skb
->len
== 0) {
1776 skb_unlink(skb
, queue
);
1779 * N.B. Don't refer to skb or cb after this point
1782 if ((scp
->flowloc_sw
== DN_DONTSEND
) && !dn_congested(sk
)) {
1783 scp
->flowloc_sw
= DN_SEND
;
1784 dn_nsp_send_link(sk
, DN_SEND
, 0);
1789 if (sk
->sk_type
== SOCK_SEQPACKET
)
1791 if (!(flags
& MSG_WAITALL
))
1795 if (flags
& MSG_OOB
)
1798 if (copied
>= target
)
1805 if (eor
&& (sk
->sk_type
== SOCK_SEQPACKET
))
1806 msg
->msg_flags
|= MSG_EOR
;
1810 rv
= (flags
& MSG_PEEK
) ? -sk
->sk_err
: sock_error(sk
);
1812 if ((rv
>= 0) && msg
->msg_name
) {
1813 __sockaddr_check_size(sizeof(struct sockaddr_dn
));
1814 memcpy(msg
->msg_name
, &scp
->peer
, sizeof(struct sockaddr_dn
));
1815 msg
->msg_namelen
= sizeof(struct sockaddr_dn
);
1824 static inline int dn_queue_too_long(struct dn_scp
*scp
, struct sk_buff_head
*queue
, int flags
)
1826 unsigned char fctype
= scp
->services_rem
& NSP_FC_MASK
;
1827 if (skb_queue_len(queue
) >= scp
->snd_window
)
1829 if (fctype
!= NSP_FC_NONE
) {
1830 if (flags
& MSG_OOB
) {
1831 if (scp
->flowrem_oth
== 0)
1834 if (scp
->flowrem_dat
== 0)
1842 * The DECnet spec requires that the "routing layer" accepts packets which
1843 * are at least 230 bytes in size. This excludes any headers which the NSP
1844 * layer might add, so we always assume that we'll be using the maximal
1845 * length header on data packets. The variation in length is due to the
1846 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1847 * make much practical difference.
1849 unsigned int dn_mss_from_pmtu(struct net_device
*dev
, int mtu
)
1851 unsigned int mss
= 230 - DN_MAX_NSP_DATA_HEADER
;
1853 struct dn_dev
*dn_db
= rcu_dereference_raw(dev
->dn_ptr
);
1854 mtu
-= LL_RESERVED_SPACE(dev
);
1855 if (dn_db
->use_long
)
1859 mtu
-= DN_MAX_NSP_DATA_HEADER
;
1862 * 21 = long header, 16 = guess at MAC header length
1864 mtu
-= (21 + DN_MAX_NSP_DATA_HEADER
+ 16);
1871 static inline unsigned int dn_current_mss(struct sock
*sk
, int flags
)
1873 struct dst_entry
*dst
= __sk_dst_get(sk
);
1874 struct dn_scp
*scp
= DN_SK(sk
);
1875 int mss_now
= min_t(int, scp
->segsize_loc
, scp
->segsize_rem
);
1877 /* Other data messages are limited to 16 bytes per packet */
1878 if (flags
& MSG_OOB
)
1881 /* This works out the maximum size of segment we can send out */
1883 u32 mtu
= dst_mtu(dst
);
1884 mss_now
= min_t(int, dn_mss_from_pmtu(dst
->dev
, mtu
), mss_now
);
1891 * N.B. We get the timeout wrong here, but then we always did get it
1892 * wrong before and this is another step along the road to correcting
1893 * it. It ought to get updated each time we pass through the routine,
1894 * but in practise it probably doesn't matter too much for now.
1896 static inline struct sk_buff
*dn_alloc_send_pskb(struct sock
*sk
,
1897 unsigned long datalen
, int noblock
,
1900 struct sk_buff
*skb
= sock_alloc_send_skb(sk
, datalen
,
1903 skb
->protocol
= htons(ETH_P_DNA_RT
);
1904 skb
->pkt_type
= PACKET_OUTGOING
;
1909 static int dn_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t size
)
1911 struct sock
*sk
= sock
->sk
;
1912 struct dn_scp
*scp
= DN_SK(sk
);
1914 struct sk_buff_head
*queue
= &scp
->data_xmit_queue
;
1915 int flags
= msg
->msg_flags
;
1918 int addr_len
= msg
->msg_namelen
;
1919 DECLARE_SOCKADDR(struct sockaddr_dn
*, addr
, msg
->msg_name
);
1920 struct sk_buff
*skb
= NULL
;
1921 struct dn_skb_cb
*cb
;
1923 unsigned char fctype
;
1926 if (flags
& ~(MSG_TRYHARD
|MSG_OOB
|MSG_DONTWAIT
|MSG_EOR
|MSG_NOSIGNAL
|MSG_MORE
|MSG_CMSG_COMPAT
))
1929 if (addr_len
&& (addr_len
!= sizeof(struct sockaddr_dn
)))
1933 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
1935 * The only difference between stream sockets and sequenced packet
1936 * sockets is that the stream sockets always behave as if MSG_EOR
1939 if (sock
->type
== SOCK_STREAM
) {
1940 if (flags
& MSG_EOR
) {
1948 err
= dn_check_state(sk
, addr
, addr_len
, &timeo
, flags
);
1952 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1954 if (!(flags
& MSG_NOSIGNAL
))
1955 send_sig(SIGPIPE
, current
, 0);
1959 if ((flags
& MSG_TRYHARD
) && sk
->sk_dst_cache
)
1960 dst_negative_advice(sk
);
1962 mss
= scp
->segsize_rem
;
1963 fctype
= scp
->services_rem
& NSP_FC_MASK
;
1965 mss
= dn_current_mss(sk
, flags
);
1967 if (flags
& MSG_OOB
) {
1968 queue
= &scp
->other_xmit_queue
;
1975 scp
->persist_fxn
= dn_nsp_xmit_timeout
;
1977 while(sent
< size
) {
1978 err
= sock_error(sk
);
1982 if (signal_pending(current
)) {
1983 err
= sock_intr_errno(timeo
);
1988 * Calculate size that we wish to send.
1996 * Wait for queue size to go down below the window
1999 if (dn_queue_too_long(scp
, queue
, flags
)) {
2000 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
2002 if (flags
& MSG_DONTWAIT
) {
2007 add_wait_queue(sk_sleep(sk
), &wait
);
2008 sk_set_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
2009 sk_wait_event(sk
, &timeo
,
2010 !dn_queue_too_long(scp
, queue
, flags
), &wait
);
2011 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
2012 remove_wait_queue(sk_sleep(sk
), &wait
);
2017 * Get a suitably sized skb.
2018 * 64 is a bit of a hack really, but its larger than any
2019 * link-layer headers and has served us well as a good
2020 * guess as to their real length.
2022 skb
= dn_alloc_send_pskb(sk
, len
+ 64 + DN_MAX_NSP_DATA_HEADER
,
2023 flags
& MSG_DONTWAIT
, &err
);
2031 cb
= DN_SKB_CB(skb
);
2033 skb_reserve(skb
, 64 + DN_MAX_NSP_DATA_HEADER
);
2035 if (memcpy_from_msg(skb_put(skb
, len
), msg
, len
)) {
2040 if (flags
& MSG_OOB
) {
2041 cb
->nsp_flags
= 0x30;
2042 if (fctype
!= NSP_FC_NONE
)
2045 cb
->nsp_flags
= 0x00;
2046 if (scp
->seg_total
== 0)
2047 cb
->nsp_flags
|= 0x20;
2049 scp
->seg_total
+= len
;
2051 if (((sent
+ len
) == size
) && (flags
& MSG_EOR
)) {
2052 cb
->nsp_flags
|= 0x40;
2054 if (fctype
== NSP_FC_SCMC
)
2057 if (fctype
== NSP_FC_SRC
)
2062 dn_nsp_queue_xmit(sk
, skb
, sk
->sk_allocation
, flags
& MSG_OOB
);
2065 scp
->persist
= dn_nsp_persist(sk
);
2074 return sent
? sent
: err
;
2077 err
= sk_stream_error(sk
, flags
, err
);
2082 static int dn_device_event(struct notifier_block
*this, unsigned long event
,
2085 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2087 if (!net_eq(dev_net(dev
), &init_net
))
2104 static struct notifier_block dn_dev_notifier
= {
2105 .notifier_call
= dn_device_event
,
2108 static struct packet_type dn_dix_packet_type __read_mostly
= {
2109 .type
= cpu_to_be16(ETH_P_DNA_RT
),
2110 .func
= dn_route_rcv
,
2113 #ifdef CONFIG_PROC_FS
2114 struct dn_iter_state
{
2118 static struct sock
*dn_socket_get_first(struct seq_file
*seq
)
2120 struct dn_iter_state
*state
= seq
->private;
2121 struct sock
*n
= NULL
;
2123 for(state
->bucket
= 0;
2124 state
->bucket
< DN_SK_HASH_SIZE
;
2126 n
= sk_head(&dn_sk_hash
[state
->bucket
]);
2134 static struct sock
*dn_socket_get_next(struct seq_file
*seq
,
2137 struct dn_iter_state
*state
= seq
->private;
2141 if (++state
->bucket
>= DN_SK_HASH_SIZE
)
2143 n
= sk_head(&dn_sk_hash
[state
->bucket
]);
2148 static struct sock
*socket_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2150 struct sock
*sk
= dn_socket_get_first(seq
);
2153 while(*pos
&& (sk
= dn_socket_get_next(seq
, sk
)))
2156 return *pos
? NULL
: sk
;
2159 static void *dn_socket_get_idx(struct seq_file
*seq
, loff_t pos
)
2162 read_lock_bh(&dn_hash_lock
);
2163 rc
= socket_get_idx(seq
, &pos
);
2165 read_unlock_bh(&dn_hash_lock
);
2170 static void *dn_socket_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2172 return *pos
? dn_socket_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2175 static void *dn_socket_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2179 if (v
== SEQ_START_TOKEN
) {
2180 rc
= dn_socket_get_idx(seq
, 0);
2184 rc
= dn_socket_get_next(seq
, v
);
2187 read_unlock_bh(&dn_hash_lock
);
2193 static void dn_socket_seq_stop(struct seq_file
*seq
, void *v
)
2195 if (v
&& v
!= SEQ_START_TOKEN
)
2196 read_unlock_bh(&dn_hash_lock
);
2199 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2201 static void dn_printable_object(struct sockaddr_dn
*dn
, unsigned char *buf
)
2205 switch (le16_to_cpu(dn
->sdn_objnamel
)) {
2207 sprintf(buf
, "%d", dn
->sdn_objnum
);
2210 for (i
= 0; i
< le16_to_cpu(dn
->sdn_objnamel
); i
++) {
2211 buf
[i
] = dn
->sdn_objname
[i
];
2212 if (IS_NOT_PRINTABLE(buf
[i
]))
2219 static char *dn_state2asc(unsigned char state
)
2259 static inline void dn_socket_format_entry(struct seq_file
*seq
, struct sock
*sk
)
2261 struct dn_scp
*scp
= DN_SK(sk
);
2262 char buf1
[DN_ASCBUF_LEN
];
2263 char buf2
[DN_ASCBUF_LEN
];
2264 char local_object
[DN_MAXOBJL
+3];
2265 char remote_object
[DN_MAXOBJL
+3];
2267 dn_printable_object(&scp
->addr
, local_object
);
2268 dn_printable_object(&scp
->peer
, remote_object
);
2271 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2272 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2273 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp
->addr
)), buf1
),
2281 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp
->peer
)), buf2
),
2289 dn_state2asc(scp
->state
),
2290 ((scp
->accept_mode
== ACC_IMMED
) ? "IMMED" : "DEFER"));
2293 static int dn_socket_seq_show(struct seq_file
*seq
, void *v
)
2295 if (v
== SEQ_START_TOKEN
) {
2296 seq_puts(seq
, "Local Remote\n");
2298 dn_socket_format_entry(seq
, v
);
2303 static const struct seq_operations dn_socket_seq_ops
= {
2304 .start
= dn_socket_seq_start
,
2305 .next
= dn_socket_seq_next
,
2306 .stop
= dn_socket_seq_stop
,
2307 .show
= dn_socket_seq_show
,
2311 static const struct net_proto_family dn_family_ops
= {
2312 .family
= AF_DECnet
,
2313 .create
= dn_create
,
2314 .owner
= THIS_MODULE
,
2317 static const struct proto_ops dn_proto_ops
= {
2318 .family
= AF_DECnet
,
2319 .owner
= THIS_MODULE
,
2320 .release
= dn_release
,
2322 .connect
= dn_connect
,
2323 .socketpair
= sock_no_socketpair
,
2324 .accept
= dn_accept
,
2325 .getname
= dn_getname
,
2328 .listen
= dn_listen
,
2329 .shutdown
= dn_shutdown
,
2330 .setsockopt
= dn_setsockopt
,
2331 .getsockopt
= dn_getsockopt
,
2332 .sendmsg
= dn_sendmsg
,
2333 .recvmsg
= dn_recvmsg
,
2334 .mmap
= sock_no_mmap
,
2335 .sendpage
= sock_no_sendpage
,
2338 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2339 MODULE_AUTHOR("Linux DECnet Project Team");
2340 MODULE_LICENSE("GPL");
2341 MODULE_ALIAS_NETPROTO(PF_DECnet
);
2343 static const char banner
[] __initconst
= KERN_INFO
2344 "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2346 static int __init
decnet_init(void)
2352 rc
= proto_register(&dn_proto
, 1);
2361 sock_register(&dn_family_ops
);
2362 dev_add_pack(&dn_dix_packet_type
);
2363 register_netdevice_notifier(&dn_dev_notifier
);
2365 proc_create_seq_private("decnet", 0444, init_net
.proc_net
,
2366 &dn_socket_seq_ops
, sizeof(struct dn_iter_state
),
2368 dn_register_sysctl();
2373 module_init(decnet_init
);
2376 * Prevent DECnet module unloading until its fixed properly.
2377 * Requires an audit of the code to check for memory leaks and
2378 * initialisation problems etc.
2381 static void __exit
decnet_exit(void)
2383 sock_unregister(AF_DECnet
);
2384 rtnl_unregister_all(PF_DECnet
);
2385 dev_remove_pack(&dn_dix_packet_type
);
2387 dn_unregister_sysctl();
2389 unregister_netdevice_notifier(&dn_dev_notifier
);
2396 remove_proc_entry("decnet", init_net
.proc_net
);
2398 proto_unregister(&dn_proto
);
2400 rcu_barrier(); /* Wait for completion of call_rcu()'s */
2402 module_exit(decnet_exit
);