Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / net / decnet / af_decnet.c
blob5dbd45dc35ad3f44933b1fa1ba29ef4a7578249e
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /*
4 * DECnet An implementation of the DECnet protocol suite for the LINUX
5 * operating system. DECnet is implemented using the BSD Socket
6 * interface as the means of communication with the user level.
8 * DECnet Socket Layer Interface
10 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
11 * Patrick Caulfield <patrick@pandh.demon.co.uk>
13 * Changes:
14 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
15 * version of the code. Original copyright preserved
16 * below.
17 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
18 * compatible with my routing layer.
19 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
20 * Caulfield.
21 * Steve Whitehouse: Further bug fixes, checking module code still works
22 * with new routing layer.
23 * Steve Whitehouse: Additional set/get_sockopt() calls.
24 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
25 * code.
26 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
27 * way. Didn't manage it entirely, but its better.
28 * Steve Whitehouse: ditto for sendmsg().
29 * Steve Whitehouse: A selection of bug fixes to various things.
30 * Steve Whitehouse: Added TIOCOUTQ ioctl.
31 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
32 * Steve Whitehouse: Fixes to connect() error returns.
33 * Patrick Caulfield: Fixes to delayed acceptance logic.
34 * David S. Miller: New socket locking
35 * Steve Whitehouse: Socket list hashing/locking
36 * Arnaldo C. Melo: use capable, not suser
37 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
38 * when required.
39 * Patrick Caulfield: /proc/net/decnet now has object name/number
40 * Steve Whitehouse: Fixed local port allocation, hashed sk list
41 * Matthew Wilcox: Fixes for dn_ioctl()
42 * Steve Whitehouse: New connect/accept logic to allow timeouts and
43 * prepare for sendpage etc.
47 /******************************************************************************
48 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
51 HISTORY:
53 Version Kernel Date Author/Comments
54 ------- ------ ---- ---------------
55 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
56 (emserrat@geocities.com)
58 First Development of DECnet Socket La-
59 yer for Linux. Only supports outgoing
60 connections.
62 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
63 (patrick@pandh.demon.co.uk)
65 Port to new kernel development version.
67 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
68 (emserrat@geocities.com)
70 Added support for incoming connections
71 so we can start developing server apps
72 on Linux.
74 Module Support
75 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
78 Added support for X11R6.4. Now we can
79 use DECnet transport for X on Linux!!!
81 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
82 (emserrat@geocities.com)
83 Removed bugs on flow control
84 Removed bugs on incoming accessdata
85 order
87 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
88 dn_recvmsg fixes
90 Patrick J. Caulfield
91 dn_bind fixes
92 *******************************************************************************/
94 #include <linux/module.h>
95 #include <linux/errno.h>
96 #include <linux/types.h>
97 #include <linux/slab.h>
98 #include <linux/socket.h>
99 #include <linux/in.h>
100 #include <linux/kernel.h>
101 #include <linux/sched/signal.h>
102 #include <linux/timer.h>
103 #include <linux/string.h>
104 #include <linux/sockios.h>
105 #include <linux/net.h>
106 #include <linux/netdevice.h>
107 #include <linux/inet.h>
108 #include <linux/route.h>
109 #include <linux/netfilter.h>
110 #include <linux/seq_file.h>
111 #include <net/sock.h>
112 #include <net/tcp_states.h>
113 #include <net/flow.h>
114 #include <asm/ioctls.h>
115 #include <linux/capability.h>
116 #include <linux/mm.h>
117 #include <linux/interrupt.h>
118 #include <linux/proc_fs.h>
119 #include <linux/stat.h>
120 #include <linux/init.h>
121 #include <linux/poll.h>
122 #include <linux/jiffies.h>
123 #include <net/net_namespace.h>
124 #include <net/neighbour.h>
125 #include <net/dst.h>
126 #include <net/fib_rules.h>
127 #include <net/tcp.h>
128 #include <net/dn.h>
129 #include <net/dn_nsp.h>
130 #include <net/dn_dev.h>
131 #include <net/dn_route.h>
132 #include <net/dn_fib.h>
133 #include <net/dn_neigh.h>
135 struct dn_sock {
136 struct sock sk;
137 struct dn_scp scp;
140 static void dn_keepalive(struct sock *sk);
142 #define DN_SK_HASH_SHIFT 8
143 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
144 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
147 static const struct proto_ops dn_proto_ops;
148 static DEFINE_RWLOCK(dn_hash_lock);
149 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
150 static struct hlist_head dn_wild_sk;
151 static atomic_long_t decnet_memory_allocated;
153 static int __dn_setsockopt(struct socket *sock, int level, int optname,
154 sockptr_t optval, unsigned int optlen, int flags);
155 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
157 static struct hlist_head *dn_find_list(struct sock *sk)
159 struct dn_scp *scp = DN_SK(sk);
161 if (scp->addr.sdn_flags & SDF_WILD)
162 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
164 return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK];
168 * Valid ports are those greater than zero and not already in use.
170 static int check_port(__le16 port)
172 struct sock *sk;
174 if (port == 0)
175 return -1;
177 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
178 struct dn_scp *scp = DN_SK(sk);
179 if (scp->addrloc == port)
180 return -1;
182 return 0;
185 static unsigned short port_alloc(struct sock *sk)
187 struct dn_scp *scp = DN_SK(sk);
188 static unsigned short port = 0x2000;
189 unsigned short i_port = port;
191 while(check_port(cpu_to_le16(++port)) != 0) {
192 if (port == i_port)
193 return 0;
196 scp->addrloc = cpu_to_le16(port);
198 return 1;
202 * Since this is only ever called from user
203 * level, we don't need a write_lock() version
204 * of this.
206 static int dn_hash_sock(struct sock *sk)
208 struct dn_scp *scp = DN_SK(sk);
209 struct hlist_head *list;
210 int rv = -EUSERS;
212 BUG_ON(sk_hashed(sk));
214 write_lock_bh(&dn_hash_lock);
216 if (!scp->addrloc && !port_alloc(sk))
217 goto out;
219 rv = -EADDRINUSE;
220 if ((list = dn_find_list(sk)) == NULL)
221 goto out;
223 sk_add_node(sk, list);
224 rv = 0;
225 out:
226 write_unlock_bh(&dn_hash_lock);
227 return rv;
230 static void dn_unhash_sock(struct sock *sk)
232 write_lock(&dn_hash_lock);
233 sk_del_node_init(sk);
234 write_unlock(&dn_hash_lock);
237 static void dn_unhash_sock_bh(struct sock *sk)
239 write_lock_bh(&dn_hash_lock);
240 sk_del_node_init(sk);
241 write_unlock_bh(&dn_hash_lock);
244 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
246 int i;
247 unsigned int hash = addr->sdn_objnum;
249 if (hash == 0) {
250 hash = addr->sdn_objnamel;
251 for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) {
252 hash ^= addr->sdn_objname[i];
253 hash ^= (hash << 3);
257 return &dn_sk_hash[hash & DN_SK_HASH_MASK];
261 * Called to transform a socket from bound (i.e. with a local address)
262 * into a listening socket (doesn't need a local port number) and rehashes
263 * based upon the object name/number.
265 static void dn_rehash_sock(struct sock *sk)
267 struct hlist_head *list;
268 struct dn_scp *scp = DN_SK(sk);
270 if (scp->addr.sdn_flags & SDF_WILD)
271 return;
273 write_lock_bh(&dn_hash_lock);
274 sk_del_node_init(sk);
275 DN_SK(sk)->addrloc = 0;
276 list = listen_hash(&DN_SK(sk)->addr);
277 sk_add_node(sk, list);
278 write_unlock_bh(&dn_hash_lock);
281 int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
283 int len = 2;
285 *buf++ = type;
287 switch (type) {
288 case 0:
289 *buf++ = sdn->sdn_objnum;
290 break;
291 case 1:
292 *buf++ = 0;
293 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
294 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
295 len = 3 + le16_to_cpu(sdn->sdn_objnamel);
296 break;
297 case 2:
298 memset(buf, 0, 5);
299 buf += 5;
300 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
301 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
302 len = 7 + le16_to_cpu(sdn->sdn_objnamel);
303 break;
306 return len;
310 * On reception of usernames, we handle types 1 and 0 for destination
311 * addresses only. Types 2 and 4 are used for source addresses, but the
312 * UIC, GIC are ignored and they are both treated the same way. Type 3
313 * is never used as I've no idea what its purpose might be or what its
314 * format is.
316 int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
318 unsigned char type;
319 int size = len;
320 int namel = 12;
322 sdn->sdn_objnum = 0;
323 sdn->sdn_objnamel = cpu_to_le16(0);
324 memset(sdn->sdn_objname, 0, DN_MAXOBJL);
326 if (len < 2)
327 return -1;
329 len -= 2;
330 *fmt = *data++;
331 type = *data++;
333 switch (*fmt) {
334 case 0:
335 sdn->sdn_objnum = type;
336 return 2;
337 case 1:
338 namel = 16;
339 break;
340 case 2:
341 len -= 4;
342 data += 4;
343 break;
344 case 4:
345 len -= 8;
346 data += 8;
347 break;
348 default:
349 return -1;
352 len -= 1;
354 if (len < 0)
355 return -1;
357 sdn->sdn_objnamel = cpu_to_le16(*data++);
358 len -= le16_to_cpu(sdn->sdn_objnamel);
360 if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel))
361 return -1;
363 memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel));
365 return size - len;
368 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
370 struct hlist_head *list = listen_hash(addr);
371 struct sock *sk;
373 read_lock(&dn_hash_lock);
374 sk_for_each(sk, list) {
375 struct dn_scp *scp = DN_SK(sk);
376 if (sk->sk_state != TCP_LISTEN)
377 continue;
378 if (scp->addr.sdn_objnum) {
379 if (scp->addr.sdn_objnum != addr->sdn_objnum)
380 continue;
381 } else {
382 if (addr->sdn_objnum)
383 continue;
384 if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
385 continue;
386 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0)
387 continue;
389 sock_hold(sk);
390 read_unlock(&dn_hash_lock);
391 return sk;
394 sk = sk_head(&dn_wild_sk);
395 if (sk) {
396 if (sk->sk_state == TCP_LISTEN)
397 sock_hold(sk);
398 else
399 sk = NULL;
402 read_unlock(&dn_hash_lock);
403 return sk;
406 struct sock *dn_find_by_skb(struct sk_buff *skb)
408 struct dn_skb_cb *cb = DN_SKB_CB(skb);
409 struct sock *sk;
410 struct dn_scp *scp;
412 read_lock(&dn_hash_lock);
413 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
414 scp = DN_SK(sk);
415 if (cb->src != dn_saddr2dn(&scp->peer))
416 continue;
417 if (cb->dst_port != scp->addrloc)
418 continue;
419 if (scp->addrrem && (cb->src_port != scp->addrrem))
420 continue;
421 sock_hold(sk);
422 goto found;
424 sk = NULL;
425 found:
426 read_unlock(&dn_hash_lock);
427 return sk;
432 static void dn_destruct(struct sock *sk)
434 struct dn_scp *scp = DN_SK(sk);
436 skb_queue_purge(&scp->data_xmit_queue);
437 skb_queue_purge(&scp->other_xmit_queue);
438 skb_queue_purge(&scp->other_receive_queue);
440 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
443 static unsigned long dn_memory_pressure;
445 static void dn_enter_memory_pressure(struct sock *sk)
447 if (!dn_memory_pressure) {
448 dn_memory_pressure = 1;
452 static struct proto dn_proto = {
453 .name = "NSP",
454 .owner = THIS_MODULE,
455 .enter_memory_pressure = dn_enter_memory_pressure,
456 .memory_pressure = &dn_memory_pressure,
457 .memory_allocated = &decnet_memory_allocated,
458 .sysctl_mem = sysctl_decnet_mem,
459 .sysctl_wmem = sysctl_decnet_wmem,
460 .sysctl_rmem = sysctl_decnet_rmem,
461 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
462 .obj_size = sizeof(struct dn_sock),
465 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp, int kern)
467 struct dn_scp *scp;
468 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern);
470 if (!sk)
471 goto out;
473 if (sock)
474 sock->ops = &dn_proto_ops;
475 sock_init_data(sock, sk);
477 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
478 sk->sk_destruct = dn_destruct;
479 sk->sk_no_check_tx = 1;
480 sk->sk_family = PF_DECnet;
481 sk->sk_protocol = 0;
482 sk->sk_allocation = gfp;
483 sk->sk_sndbuf = sysctl_decnet_wmem[1];
484 sk->sk_rcvbuf = sysctl_decnet_rmem[1];
486 /* Initialization of DECnet Session Control Port */
487 scp = DN_SK(sk);
488 scp->state = DN_O; /* Open */
489 scp->numdat = 1; /* Next data seg to tx */
490 scp->numoth = 1; /* Next oth data to tx */
491 scp->ackxmt_dat = 0; /* Last data seg ack'ed */
492 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
493 scp->ackrcv_dat = 0; /* Highest data ack recv*/
494 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
495 scp->flowrem_sw = DN_SEND;
496 scp->flowloc_sw = DN_SEND;
497 scp->flowrem_dat = 0;
498 scp->flowrem_oth = 1;
499 scp->flowloc_dat = 0;
500 scp->flowloc_oth = 1;
501 scp->services_rem = 0;
502 scp->services_loc = 1 | NSP_FC_NONE;
503 scp->info_rem = 0;
504 scp->info_loc = 0x03; /* NSP version 4.1 */
505 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
506 scp->nonagle = 0;
507 scp->multi_ireq = 1;
508 scp->accept_mode = ACC_IMMED;
509 scp->addr.sdn_family = AF_DECnet;
510 scp->peer.sdn_family = AF_DECnet;
511 scp->accessdata.acc_accl = 5;
512 memcpy(scp->accessdata.acc_acc, "LINUX", 5);
514 scp->max_window = NSP_MAX_WINDOW;
515 scp->snd_window = NSP_MIN_WINDOW;
516 scp->nsp_srtt = NSP_INITIAL_SRTT;
517 scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
518 scp->nsp_rxtshift = 0;
520 skb_queue_head_init(&scp->data_xmit_queue);
521 skb_queue_head_init(&scp->other_xmit_queue);
522 skb_queue_head_init(&scp->other_receive_queue);
524 scp->persist = 0;
525 scp->persist_fxn = NULL;
526 scp->keepalive = 10 * HZ;
527 scp->keepalive_fxn = dn_keepalive;
529 dn_start_slow_timer(sk);
530 out:
531 return sk;
535 * Keepalive timer.
536 * FIXME: Should respond to SO_KEEPALIVE etc.
538 static void dn_keepalive(struct sock *sk)
540 struct dn_scp *scp = DN_SK(sk);
543 * By checking the other_data transmit queue is empty
544 * we are double checking that we are not sending too
545 * many of these keepalive frames.
547 if (skb_queue_empty(&scp->other_xmit_queue))
548 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
553 * Timer for shutdown/destroyed sockets.
554 * When socket is dead & no packets have been sent for a
555 * certain amount of time, they are removed by this
556 * routine. Also takes care of sending out DI & DC
557 * frames at correct times.
559 int dn_destroy_timer(struct sock *sk)
561 struct dn_scp *scp = DN_SK(sk);
563 scp->persist = dn_nsp_persist(sk);
565 switch (scp->state) {
566 case DN_DI:
567 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
568 if (scp->nsp_rxtshift >= decnet_di_count)
569 scp->state = DN_CN;
570 return 0;
572 case DN_DR:
573 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
574 if (scp->nsp_rxtshift >= decnet_dr_count)
575 scp->state = DN_DRC;
576 return 0;
578 case DN_DN:
579 if (scp->nsp_rxtshift < decnet_dn_count) {
580 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
581 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
582 GFP_ATOMIC);
583 return 0;
587 scp->persist = (HZ * decnet_time_wait);
589 if (sk->sk_socket)
590 return 0;
592 if (time_after_eq(jiffies, scp->stamp + HZ * decnet_time_wait)) {
593 dn_unhash_sock(sk);
594 sock_put(sk);
595 return 1;
598 return 0;
601 static void dn_destroy_sock(struct sock *sk)
603 struct dn_scp *scp = DN_SK(sk);
605 scp->nsp_rxtshift = 0; /* reset back off */
607 if (sk->sk_socket) {
608 if (sk->sk_socket->state != SS_UNCONNECTED)
609 sk->sk_socket->state = SS_DISCONNECTING;
612 sk->sk_state = TCP_CLOSE;
614 switch (scp->state) {
615 case DN_DN:
616 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
617 sk->sk_allocation);
618 scp->persist_fxn = dn_destroy_timer;
619 scp->persist = dn_nsp_persist(sk);
620 break;
621 case DN_CR:
622 scp->state = DN_DR;
623 goto disc_reject;
624 case DN_RUN:
625 scp->state = DN_DI;
626 fallthrough;
627 case DN_DI:
628 case DN_DR:
629 disc_reject:
630 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
631 fallthrough;
632 case DN_NC:
633 case DN_NR:
634 case DN_RJ:
635 case DN_DIC:
636 case DN_CN:
637 case DN_DRC:
638 case DN_CI:
639 case DN_CD:
640 scp->persist_fxn = dn_destroy_timer;
641 scp->persist = dn_nsp_persist(sk);
642 break;
643 default:
644 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
645 fallthrough;
646 case DN_O:
647 dn_stop_slow_timer(sk);
649 dn_unhash_sock_bh(sk);
650 sock_put(sk);
652 break;
656 char *dn_addr2asc(__u16 addr, char *buf)
658 unsigned short node, area;
660 node = addr & 0x03ff;
661 area = addr >> 10;
662 sprintf(buf, "%hd.%hd", area, node);
664 return buf;
669 static int dn_create(struct net *net, struct socket *sock, int protocol,
670 int kern)
672 struct sock *sk;
674 if (protocol < 0 || protocol > U8_MAX)
675 return -EINVAL;
677 if (!net_eq(net, &init_net))
678 return -EAFNOSUPPORT;
680 switch (sock->type) {
681 case SOCK_SEQPACKET:
682 if (protocol != DNPROTO_NSP)
683 return -EPROTONOSUPPORT;
684 break;
685 case SOCK_STREAM:
686 break;
687 default:
688 return -ESOCKTNOSUPPORT;
692 if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL, kern)) == NULL)
693 return -ENOBUFS;
695 sk->sk_protocol = protocol;
697 return 0;
701 static int
702 dn_release(struct socket *sock)
704 struct sock *sk = sock->sk;
706 if (sk) {
707 sock_orphan(sk);
708 sock_hold(sk);
709 lock_sock(sk);
710 dn_destroy_sock(sk);
711 release_sock(sk);
712 sock_put(sk);
715 return 0;
718 static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
720 struct sock *sk = sock->sk;
721 struct dn_scp *scp = DN_SK(sk);
722 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
723 struct net_device *dev, *ldev;
724 int rv;
726 if (addr_len != sizeof(struct sockaddr_dn))
727 return -EINVAL;
729 if (saddr->sdn_family != AF_DECnet)
730 return -EINVAL;
732 if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2))
733 return -EINVAL;
735 if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL)
736 return -EINVAL;
738 if (saddr->sdn_flags & ~SDF_WILD)
739 return -EINVAL;
741 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
742 (saddr->sdn_flags & SDF_WILD)))
743 return -EACCES;
745 if (!(saddr->sdn_flags & SDF_WILD)) {
746 if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
747 rcu_read_lock();
748 ldev = NULL;
749 for_each_netdev_rcu(&init_net, dev) {
750 if (!dev->dn_ptr)
751 continue;
752 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
753 ldev = dev;
754 break;
757 rcu_read_unlock();
758 if (ldev == NULL)
759 return -EADDRNOTAVAIL;
763 rv = -EINVAL;
764 lock_sock(sk);
765 if (sock_flag(sk, SOCK_ZAPPED)) {
766 memcpy(&scp->addr, saddr, addr_len);
767 sock_reset_flag(sk, SOCK_ZAPPED);
769 rv = dn_hash_sock(sk);
770 if (rv)
771 sock_set_flag(sk, SOCK_ZAPPED);
773 release_sock(sk);
775 return rv;
779 static int dn_auto_bind(struct socket *sock)
781 struct sock *sk = sock->sk;
782 struct dn_scp *scp = DN_SK(sk);
783 int rv;
785 sock_reset_flag(sk, SOCK_ZAPPED);
787 scp->addr.sdn_flags = 0;
788 scp->addr.sdn_objnum = 0;
791 * This stuff is to keep compatibility with Eduardo's
792 * patch. I hope I can dispense with it shortly...
794 if ((scp->accessdata.acc_accl != 0) &&
795 (scp->accessdata.acc_accl <= 12)) {
797 scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl);
798 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel));
800 scp->accessdata.acc_accl = 0;
801 memset(scp->accessdata.acc_acc, 0, 40);
803 /* End of compatibility stuff */
805 scp->addr.sdn_add.a_len = cpu_to_le16(2);
806 rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr);
807 if (rv == 0) {
808 rv = dn_hash_sock(sk);
809 if (rv)
810 sock_set_flag(sk, SOCK_ZAPPED);
813 return rv;
816 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
818 struct dn_scp *scp = DN_SK(sk);
819 DEFINE_WAIT(wait);
820 int err;
822 if (scp->state != DN_CR)
823 return -EINVAL;
825 scp->state = DN_CC;
826 scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
827 dn_send_conn_conf(sk, allocation);
829 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
830 for(;;) {
831 release_sock(sk);
832 if (scp->state == DN_CC)
833 *timeo = schedule_timeout(*timeo);
834 lock_sock(sk);
835 err = 0;
836 if (scp->state == DN_RUN)
837 break;
838 err = sock_error(sk);
839 if (err)
840 break;
841 err = sock_intr_errno(*timeo);
842 if (signal_pending(current))
843 break;
844 err = -EAGAIN;
845 if (!*timeo)
846 break;
847 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
849 finish_wait(sk_sleep(sk), &wait);
850 if (err == 0) {
851 sk->sk_socket->state = SS_CONNECTED;
852 } else if (scp->state != DN_CC) {
853 sk->sk_socket->state = SS_UNCONNECTED;
855 return err;
858 static int dn_wait_run(struct sock *sk, long *timeo)
860 struct dn_scp *scp = DN_SK(sk);
861 DEFINE_WAIT(wait);
862 int err = 0;
864 if (scp->state == DN_RUN)
865 goto out;
867 if (!*timeo)
868 return -EALREADY;
870 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
871 for(;;) {
872 release_sock(sk);
873 if (scp->state == DN_CI || scp->state == DN_CC)
874 *timeo = schedule_timeout(*timeo);
875 lock_sock(sk);
876 err = 0;
877 if (scp->state == DN_RUN)
878 break;
879 err = sock_error(sk);
880 if (err)
881 break;
882 err = sock_intr_errno(*timeo);
883 if (signal_pending(current))
884 break;
885 err = -ETIMEDOUT;
886 if (!*timeo)
887 break;
888 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
890 finish_wait(sk_sleep(sk), &wait);
891 out:
892 if (err == 0) {
893 sk->sk_socket->state = SS_CONNECTED;
894 } else if (scp->state != DN_CI && scp->state != DN_CC) {
895 sk->sk_socket->state = SS_UNCONNECTED;
897 return err;
900 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
902 struct socket *sock = sk->sk_socket;
903 struct dn_scp *scp = DN_SK(sk);
904 int err = -EISCONN;
905 struct flowidn fld;
906 struct dst_entry *dst;
908 if (sock->state == SS_CONNECTED)
909 goto out;
911 if (sock->state == SS_CONNECTING) {
912 err = 0;
913 if (scp->state == DN_RUN) {
914 sock->state = SS_CONNECTED;
915 goto out;
917 err = -ECONNREFUSED;
918 if (scp->state != DN_CI && scp->state != DN_CC) {
919 sock->state = SS_UNCONNECTED;
920 goto out;
922 return dn_wait_run(sk, timeo);
925 err = -EINVAL;
926 if (scp->state != DN_O)
927 goto out;
929 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
930 goto out;
931 if (addr->sdn_family != AF_DECnet)
932 goto out;
933 if (addr->sdn_flags & SDF_WILD)
934 goto out;
936 if (sock_flag(sk, SOCK_ZAPPED)) {
937 err = dn_auto_bind(sk->sk_socket);
938 if (err)
939 goto out;
942 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
944 err = -EHOSTUNREACH;
945 memset(&fld, 0, sizeof(fld));
946 fld.flowidn_oif = sk->sk_bound_dev_if;
947 fld.daddr = dn_saddr2dn(&scp->peer);
948 fld.saddr = dn_saddr2dn(&scp->addr);
949 dn_sk_ports_copy(&fld, scp);
950 fld.flowidn_proto = DNPROTO_NSP;
951 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0)
952 goto out;
953 dst = __sk_dst_get(sk);
954 sk->sk_route_caps = dst->dev->features;
955 sock->state = SS_CONNECTING;
956 scp->state = DN_CI;
957 scp->segsize_loc = dst_metric_advmss(dst);
959 dn_nsp_send_conninit(sk, NSP_CI);
960 err = -EINPROGRESS;
961 if (*timeo) {
962 err = dn_wait_run(sk, timeo);
964 out:
965 return err;
968 static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
970 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
971 struct sock *sk = sock->sk;
972 int err;
973 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
975 lock_sock(sk);
976 err = __dn_connect(sk, addr, addrlen, &timeo, 0);
977 release_sock(sk);
979 return err;
982 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
984 struct dn_scp *scp = DN_SK(sk);
986 switch (scp->state) {
987 case DN_RUN:
988 return 0;
989 case DN_CR:
990 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
991 case DN_CI:
992 case DN_CC:
993 return dn_wait_run(sk, timeo);
994 case DN_O:
995 return __dn_connect(sk, addr, addrlen, timeo, flags);
998 return -EINVAL;
1002 static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
1004 unsigned char *ptr = skb->data;
1006 acc->acc_userl = *ptr++;
1007 memcpy(&acc->acc_user, ptr, acc->acc_userl);
1008 ptr += acc->acc_userl;
1010 acc->acc_passl = *ptr++;
1011 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1012 ptr += acc->acc_passl;
1014 acc->acc_accl = *ptr++;
1015 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1017 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1021 static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
1023 unsigned char *ptr = skb->data;
1024 u16 len = *ptr++; /* yes, it's 8bit on the wire */
1026 BUG_ON(len > 16); /* we've checked the contents earlier */
1027 opt->opt_optl = cpu_to_le16(len);
1028 opt->opt_status = 0;
1029 memcpy(opt->opt_data, ptr, len);
1030 skb_pull(skb, len + 1);
1033 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1035 DEFINE_WAIT(wait);
1036 struct sk_buff *skb = NULL;
1037 int err = 0;
1039 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1040 for(;;) {
1041 release_sock(sk);
1042 skb = skb_dequeue(&sk->sk_receive_queue);
1043 if (skb == NULL) {
1044 *timeo = schedule_timeout(*timeo);
1045 skb = skb_dequeue(&sk->sk_receive_queue);
1047 lock_sock(sk);
1048 if (skb != NULL)
1049 break;
1050 err = -EINVAL;
1051 if (sk->sk_state != TCP_LISTEN)
1052 break;
1053 err = sock_intr_errno(*timeo);
1054 if (signal_pending(current))
1055 break;
1056 err = -EAGAIN;
1057 if (!*timeo)
1058 break;
1059 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1061 finish_wait(sk_sleep(sk), &wait);
1063 return skb == NULL ? ERR_PTR(err) : skb;
1066 static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
1067 bool kern)
1069 struct sock *sk = sock->sk, *newsk;
1070 struct sk_buff *skb = NULL;
1071 struct dn_skb_cb *cb;
1072 unsigned char menuver;
1073 int err = 0;
1074 unsigned char type;
1075 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1076 struct dst_entry *dst;
1078 lock_sock(sk);
1080 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1081 release_sock(sk);
1082 return -EINVAL;
1085 skb = skb_dequeue(&sk->sk_receive_queue);
1086 if (skb == NULL) {
1087 skb = dn_wait_for_connect(sk, &timeo);
1088 if (IS_ERR(skb)) {
1089 release_sock(sk);
1090 return PTR_ERR(skb);
1094 cb = DN_SKB_CB(skb);
1095 sk_acceptq_removed(sk);
1096 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
1097 if (newsk == NULL) {
1098 release_sock(sk);
1099 kfree_skb(skb);
1100 return -ENOBUFS;
1102 release_sock(sk);
1104 dst = skb_dst(skb);
1105 sk_dst_set(newsk, dst);
1106 skb_dst_set(skb, NULL);
1108 DN_SK(newsk)->state = DN_CR;
1109 DN_SK(newsk)->addrrem = cb->src_port;
1110 DN_SK(newsk)->services_rem = cb->services;
1111 DN_SK(newsk)->info_rem = cb->info;
1112 DN_SK(newsk)->segsize_rem = cb->segsize;
1113 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1115 if (DN_SK(newsk)->segsize_rem < 230)
1116 DN_SK(newsk)->segsize_rem = 230;
1118 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
1119 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
1121 newsk->sk_state = TCP_LISTEN;
1122 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
1125 * If we are listening on a wild socket, we don't want
1126 * the newly created socket on the wrong hash queue.
1128 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
1130 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
1131 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
1132 *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
1133 *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
1135 menuver = *skb->data;
1136 skb_pull(skb, 1);
1138 if (menuver & DN_MENUVER_ACC)
1139 dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
1141 if (menuver & DN_MENUVER_USR)
1142 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
1144 if (menuver & DN_MENUVER_PRX)
1145 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
1147 if (menuver & DN_MENUVER_UIC)
1148 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
1150 kfree_skb(skb);
1152 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
1153 sizeof(struct optdata_dn));
1154 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
1155 sizeof(struct optdata_dn));
1157 lock_sock(newsk);
1158 err = dn_hash_sock(newsk);
1159 if (err == 0) {
1160 sock_reset_flag(newsk, SOCK_ZAPPED);
1161 dn_send_conn_ack(newsk);
1164 * Here we use sk->sk_allocation since although the conn conf is
1165 * for the newsk, the context is the old socket.
1167 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1168 err = dn_confirm_accept(newsk, &timeo,
1169 sk->sk_allocation);
1171 release_sock(newsk);
1172 return err;
1176 static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer)
1178 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
1179 struct sock *sk = sock->sk;
1180 struct dn_scp *scp = DN_SK(sk);
1182 lock_sock(sk);
1184 if (peer) {
1185 if ((sock->state != SS_CONNECTED &&
1186 sock->state != SS_CONNECTING) &&
1187 scp->accept_mode == ACC_IMMED) {
1188 release_sock(sk);
1189 return -ENOTCONN;
1192 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1193 } else {
1194 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
1197 release_sock(sk);
1199 return sizeof(struct sockaddr_dn);
1203 static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wait)
1205 struct sock *sk = sock->sk;
1206 struct dn_scp *scp = DN_SK(sk);
1207 __poll_t mask = datagram_poll(file, sock, wait);
1209 if (!skb_queue_empty_lockless(&scp->other_receive_queue))
1210 mask |= EPOLLRDBAND;
1212 return mask;
1215 static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1217 struct sock *sk = sock->sk;
1218 struct dn_scp *scp = DN_SK(sk);
1219 int err = -EOPNOTSUPP;
1220 long amount = 0;
1221 struct sk_buff *skb;
1222 int val;
1224 switch(cmd)
1226 case SIOCGIFADDR:
1227 case SIOCSIFADDR:
1228 return dn_dev_ioctl(cmd, (void __user *)arg);
1230 case SIOCATMARK:
1231 lock_sock(sk);
1232 val = !skb_queue_empty(&scp->other_receive_queue);
1233 if (scp->state != DN_RUN)
1234 val = -ENOTCONN;
1235 release_sock(sk);
1236 return val;
1238 case TIOCOUTQ:
1239 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1240 if (amount < 0)
1241 amount = 0;
1242 err = put_user(amount, (int __user *)arg);
1243 break;
1245 case TIOCINQ:
1246 lock_sock(sk);
1247 skb = skb_peek(&scp->other_receive_queue);
1248 if (skb) {
1249 amount = skb->len;
1250 } else {
1251 skb_queue_walk(&sk->sk_receive_queue, skb)
1252 amount += skb->len;
1254 release_sock(sk);
1255 err = put_user(amount, (int __user *)arg);
1256 break;
1258 default:
1259 err = -ENOIOCTLCMD;
1260 break;
1263 return err;
1266 static int dn_listen(struct socket *sock, int backlog)
1268 struct sock *sk = sock->sk;
1269 int err = -EINVAL;
1271 lock_sock(sk);
1273 if (sock_flag(sk, SOCK_ZAPPED))
1274 goto out;
1276 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
1277 goto out;
1279 sk->sk_max_ack_backlog = backlog;
1280 sk->sk_ack_backlog = 0;
1281 sk->sk_state = TCP_LISTEN;
1282 err = 0;
1283 dn_rehash_sock(sk);
1285 out:
1286 release_sock(sk);
1288 return err;
1292 static int dn_shutdown(struct socket *sock, int how)
1294 struct sock *sk = sock->sk;
1295 struct dn_scp *scp = DN_SK(sk);
1296 int err = -ENOTCONN;
1298 lock_sock(sk);
1300 if (sock->state == SS_UNCONNECTED)
1301 goto out;
1303 err = 0;
1304 if (sock->state == SS_DISCONNECTING)
1305 goto out;
1307 err = -EINVAL;
1308 if (scp->state == DN_O)
1309 goto out;
1311 if (how != SHUT_RDWR)
1312 goto out;
1314 sk->sk_shutdown = SHUTDOWN_MASK;
1315 dn_destroy_sock(sk);
1316 err = 0;
1318 out:
1319 release_sock(sk);
1321 return err;
1324 static int dn_setsockopt(struct socket *sock, int level, int optname,
1325 sockptr_t optval, unsigned int optlen)
1327 struct sock *sk = sock->sk;
1328 int err;
1330 lock_sock(sk);
1331 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1332 release_sock(sk);
1333 #ifdef CONFIG_NETFILTER
1334 /* we need to exclude all possible ENOPROTOOPTs except default case */
1335 if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
1336 optname != DSO_STREAM && optname != DSO_SEQPACKET)
1337 err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1338 #endif
1340 return err;
1343 static int __dn_setsockopt(struct socket *sock, int level, int optname,
1344 sockptr_t optval, unsigned int optlen, int flags)
1346 struct sock *sk = sock->sk;
1347 struct dn_scp *scp = DN_SK(sk);
1348 long timeo;
1349 union {
1350 struct optdata_dn opt;
1351 struct accessdata_dn acc;
1352 int mode;
1353 unsigned long win;
1354 int val;
1355 unsigned char services;
1356 unsigned char info;
1357 } u;
1358 int err;
1360 if (optlen && sockptr_is_null(optval))
1361 return -EINVAL;
1363 if (optlen > sizeof(u))
1364 return -EINVAL;
1366 if (copy_from_sockptr(&u, optval, optlen))
1367 return -EFAULT;
1369 switch (optname) {
1370 case DSO_CONDATA:
1371 if (sock->state == SS_CONNECTED)
1372 return -EISCONN;
1373 if ((scp->state != DN_O) && (scp->state != DN_CR))
1374 return -EINVAL;
1376 if (optlen != sizeof(struct optdata_dn))
1377 return -EINVAL;
1379 if (le16_to_cpu(u.opt.opt_optl) > 16)
1380 return -EINVAL;
1382 memcpy(&scp->conndata_out, &u.opt, optlen);
1383 break;
1385 case DSO_DISDATA:
1386 if (sock->state != SS_CONNECTED &&
1387 scp->accept_mode == ACC_IMMED)
1388 return -ENOTCONN;
1390 if (optlen != sizeof(struct optdata_dn))
1391 return -EINVAL;
1393 if (le16_to_cpu(u.opt.opt_optl) > 16)
1394 return -EINVAL;
1396 memcpy(&scp->discdata_out, &u.opt, optlen);
1397 break;
1399 case DSO_CONACCESS:
1400 if (sock->state == SS_CONNECTED)
1401 return -EISCONN;
1402 if (scp->state != DN_O)
1403 return -EINVAL;
1405 if (optlen != sizeof(struct accessdata_dn))
1406 return -EINVAL;
1408 if ((u.acc.acc_accl > DN_MAXACCL) ||
1409 (u.acc.acc_passl > DN_MAXACCL) ||
1410 (u.acc.acc_userl > DN_MAXACCL))
1411 return -EINVAL;
1413 memcpy(&scp->accessdata, &u.acc, optlen);
1414 break;
1416 case DSO_ACCEPTMODE:
1417 if (sock->state == SS_CONNECTED)
1418 return -EISCONN;
1419 if (scp->state != DN_O)
1420 return -EINVAL;
1422 if (optlen != sizeof(int))
1423 return -EINVAL;
1425 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1426 return -EINVAL;
1428 scp->accept_mode = (unsigned char)u.mode;
1429 break;
1431 case DSO_CONACCEPT:
1432 if (scp->state != DN_CR)
1433 return -EINVAL;
1434 timeo = sock_rcvtimeo(sk, 0);
1435 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1436 return err;
1438 case DSO_CONREJECT:
1439 if (scp->state != DN_CR)
1440 return -EINVAL;
1442 scp->state = DN_DR;
1443 sk->sk_shutdown = SHUTDOWN_MASK;
1444 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1445 break;
1447 case DSO_MAXWINDOW:
1448 if (optlen != sizeof(unsigned long))
1449 return -EINVAL;
1450 if (u.win > NSP_MAX_WINDOW)
1451 u.win = NSP_MAX_WINDOW;
1452 if (u.win == 0)
1453 return -EINVAL;
1454 scp->max_window = u.win;
1455 if (scp->snd_window > u.win)
1456 scp->snd_window = u.win;
1457 break;
1459 case DSO_NODELAY:
1460 if (optlen != sizeof(int))
1461 return -EINVAL;
1462 if (scp->nonagle == TCP_NAGLE_CORK)
1463 return -EINVAL;
1464 scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_OFF;
1465 /* if (scp->nonagle == 1) { Push pending frames } */
1466 break;
1468 case DSO_CORK:
1469 if (optlen != sizeof(int))
1470 return -EINVAL;
1471 if (scp->nonagle == TCP_NAGLE_OFF)
1472 return -EINVAL;
1473 scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_CORK;
1474 /* if (scp->nonagle == 0) { Push pending frames } */
1475 break;
1477 case DSO_SERVICES:
1478 if (optlen != sizeof(unsigned char))
1479 return -EINVAL;
1480 if ((u.services & ~NSP_FC_MASK) != 0x01)
1481 return -EINVAL;
1482 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1483 return -EINVAL;
1484 scp->services_loc = u.services;
1485 break;
1487 case DSO_INFO:
1488 if (optlen != sizeof(unsigned char))
1489 return -EINVAL;
1490 if (u.info & 0xfc)
1491 return -EINVAL;
1492 scp->info_loc = u.info;
1493 break;
1495 case DSO_LINKINFO:
1496 case DSO_STREAM:
1497 case DSO_SEQPACKET:
1498 default:
1499 return -ENOPROTOOPT;
1502 return 0;
1505 static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1507 struct sock *sk = sock->sk;
1508 int err;
1510 lock_sock(sk);
1511 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1512 release_sock(sk);
1513 #ifdef CONFIG_NETFILTER
1514 if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
1515 optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
1516 optname != DSO_CONREJECT) {
1517 int len;
1519 if (get_user(len, optlen))
1520 return -EFAULT;
1522 err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1523 if (err >= 0)
1524 err = put_user(len, optlen);
1526 #endif
1528 return err;
1531 static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
1533 struct sock *sk = sock->sk;
1534 struct dn_scp *scp = DN_SK(sk);
1535 struct linkinfo_dn link;
1536 unsigned int r_len;
1537 void *r_data = NULL;
1538 unsigned int val;
1540 if(get_user(r_len , optlen))
1541 return -EFAULT;
1543 switch (optname) {
1544 case DSO_CONDATA:
1545 if (r_len > sizeof(struct optdata_dn))
1546 r_len = sizeof(struct optdata_dn);
1547 r_data = &scp->conndata_in;
1548 break;
1550 case DSO_DISDATA:
1551 if (r_len > sizeof(struct optdata_dn))
1552 r_len = sizeof(struct optdata_dn);
1553 r_data = &scp->discdata_in;
1554 break;
1556 case DSO_CONACCESS:
1557 if (r_len > sizeof(struct accessdata_dn))
1558 r_len = sizeof(struct accessdata_dn);
1559 r_data = &scp->accessdata;
1560 break;
1562 case DSO_ACCEPTMODE:
1563 if (r_len > sizeof(unsigned char))
1564 r_len = sizeof(unsigned char);
1565 r_data = &scp->accept_mode;
1566 break;
1568 case DSO_LINKINFO:
1569 if (r_len > sizeof(struct linkinfo_dn))
1570 r_len = sizeof(struct linkinfo_dn);
1572 memset(&link, 0, sizeof(link));
1574 switch (sock->state) {
1575 case SS_CONNECTING:
1576 link.idn_linkstate = LL_CONNECTING;
1577 break;
1578 case SS_DISCONNECTING:
1579 link.idn_linkstate = LL_DISCONNECTING;
1580 break;
1581 case SS_CONNECTED:
1582 link.idn_linkstate = LL_RUNNING;
1583 break;
1584 default:
1585 link.idn_linkstate = LL_INACTIVE;
1588 link.idn_segsize = scp->segsize_rem;
1589 r_data = &link;
1590 break;
1592 case DSO_MAXWINDOW:
1593 if (r_len > sizeof(unsigned long))
1594 r_len = sizeof(unsigned long);
1595 r_data = &scp->max_window;
1596 break;
1598 case DSO_NODELAY:
1599 if (r_len > sizeof(int))
1600 r_len = sizeof(int);
1601 val = (scp->nonagle == TCP_NAGLE_OFF);
1602 r_data = &val;
1603 break;
1605 case DSO_CORK:
1606 if (r_len > sizeof(int))
1607 r_len = sizeof(int);
1608 val = (scp->nonagle == TCP_NAGLE_CORK);
1609 r_data = &val;
1610 break;
1612 case DSO_SERVICES:
1613 if (r_len > sizeof(unsigned char))
1614 r_len = sizeof(unsigned char);
1615 r_data = &scp->services_rem;
1616 break;
1618 case DSO_INFO:
1619 if (r_len > sizeof(unsigned char))
1620 r_len = sizeof(unsigned char);
1621 r_data = &scp->info_rem;
1622 break;
1624 case DSO_STREAM:
1625 case DSO_SEQPACKET:
1626 case DSO_CONACCEPT:
1627 case DSO_CONREJECT:
1628 default:
1629 return -ENOPROTOOPT;
1632 if (r_data) {
1633 if (copy_to_user(optval, r_data, r_len))
1634 return -EFAULT;
1635 if (put_user(r_len, optlen))
1636 return -EFAULT;
1639 return 0;
1643 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1645 struct sk_buff *skb;
1646 int len = 0;
1648 if (flags & MSG_OOB)
1649 return !skb_queue_empty(q) ? 1 : 0;
1651 skb_queue_walk(q, skb) {
1652 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1653 len += skb->len;
1655 if (cb->nsp_flags & 0x40) {
1656 /* SOCK_SEQPACKET reads to EOM */
1657 if (sk->sk_type == SOCK_SEQPACKET)
1658 return 1;
1659 /* so does SOCK_STREAM unless WAITALL is specified */
1660 if (!(flags & MSG_WAITALL))
1661 return 1;
1664 /* minimum data length for read exceeded */
1665 if (len >= target)
1666 return 1;
1669 return 0;
1673 static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1674 int flags)
1676 struct sock *sk = sock->sk;
1677 struct dn_scp *scp = DN_SK(sk);
1678 struct sk_buff_head *queue = &sk->sk_receive_queue;
1679 size_t target = size > 1 ? 1 : 0;
1680 size_t copied = 0;
1681 int rv = 0;
1682 struct sk_buff *skb, *n;
1683 struct dn_skb_cb *cb = NULL;
1684 unsigned char eor = 0;
1685 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1687 lock_sock(sk);
1689 if (sock_flag(sk, SOCK_ZAPPED)) {
1690 rv = -EADDRNOTAVAIL;
1691 goto out;
1694 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1695 rv = 0;
1696 goto out;
1699 rv = dn_check_state(sk, NULL, 0, &timeo, flags);
1700 if (rv)
1701 goto out;
1703 if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
1704 rv = -EOPNOTSUPP;
1705 goto out;
1708 if (flags & MSG_OOB)
1709 queue = &scp->other_receive_queue;
1711 if (flags & MSG_WAITALL)
1712 target = size;
1716 * See if there is data ready to read, sleep if there isn't
1718 for(;;) {
1719 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1721 if (sk->sk_err)
1722 goto out;
1724 if (!skb_queue_empty(&scp->other_receive_queue)) {
1725 if (!(flags & MSG_OOB)) {
1726 msg->msg_flags |= MSG_OOB;
1727 if (!scp->other_report) {
1728 scp->other_report = 1;
1729 goto out;
1734 if (scp->state != DN_RUN)
1735 goto out;
1737 if (signal_pending(current)) {
1738 rv = sock_intr_errno(timeo);
1739 goto out;
1742 if (dn_data_ready(sk, queue, flags, target))
1743 break;
1745 if (flags & MSG_DONTWAIT) {
1746 rv = -EWOULDBLOCK;
1747 goto out;
1750 add_wait_queue(sk_sleep(sk), &wait);
1751 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1752 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target), &wait);
1753 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1754 remove_wait_queue(sk_sleep(sk), &wait);
1757 skb_queue_walk_safe(queue, skb, n) {
1758 unsigned int chunk = skb->len;
1759 cb = DN_SKB_CB(skb);
1761 if ((chunk + copied) > size)
1762 chunk = size - copied;
1764 if (memcpy_to_msg(msg, skb->data, chunk)) {
1765 rv = -EFAULT;
1766 break;
1768 copied += chunk;
1770 if (!(flags & MSG_PEEK))
1771 skb_pull(skb, chunk);
1773 eor = cb->nsp_flags & 0x40;
1775 if (skb->len == 0) {
1776 skb_unlink(skb, queue);
1777 kfree_skb(skb);
1779 * N.B. Don't refer to skb or cb after this point
1780 * in loop.
1782 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
1783 scp->flowloc_sw = DN_SEND;
1784 dn_nsp_send_link(sk, DN_SEND, 0);
1788 if (eor) {
1789 if (sk->sk_type == SOCK_SEQPACKET)
1790 break;
1791 if (!(flags & MSG_WAITALL))
1792 break;
1795 if (flags & MSG_OOB)
1796 break;
1798 if (copied >= target)
1799 break;
1802 rv = copied;
1805 if (eor && (sk->sk_type == SOCK_SEQPACKET))
1806 msg->msg_flags |= MSG_EOR;
1808 out:
1809 if (rv == 0)
1810 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
1812 if ((rv >= 0) && msg->msg_name) {
1813 __sockaddr_check_size(sizeof(struct sockaddr_dn));
1814 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
1815 msg->msg_namelen = sizeof(struct sockaddr_dn);
1818 release_sock(sk);
1820 return rv;
1824 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
1826 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
1827 if (skb_queue_len(queue) >= scp->snd_window)
1828 return 1;
1829 if (fctype != NSP_FC_NONE) {
1830 if (flags & MSG_OOB) {
1831 if (scp->flowrem_oth == 0)
1832 return 1;
1833 } else {
1834 if (scp->flowrem_dat == 0)
1835 return 1;
1838 return 0;
1842 * The DECnet spec requires that the "routing layer" accepts packets which
1843 * are at least 230 bytes in size. This excludes any headers which the NSP
1844 * layer might add, so we always assume that we'll be using the maximal
1845 * length header on data packets. The variation in length is due to the
1846 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1847 * make much practical difference.
1849 unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu)
1851 unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER;
1852 if (dev) {
1853 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1854 mtu -= LL_RESERVED_SPACE(dev);
1855 if (dn_db->use_long)
1856 mtu -= 21;
1857 else
1858 mtu -= 6;
1859 mtu -= DN_MAX_NSP_DATA_HEADER;
1860 } else {
1862 * 21 = long header, 16 = guess at MAC header length
1864 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
1866 if (mtu > mss)
1867 mss = mtu;
1868 return mss;
1871 static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1873 struct dst_entry *dst = __sk_dst_get(sk);
1874 struct dn_scp *scp = DN_SK(sk);
1875 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
1877 /* Other data messages are limited to 16 bytes per packet */
1878 if (flags & MSG_OOB)
1879 return 16;
1881 /* This works out the maximum size of segment we can send out */
1882 if (dst) {
1883 u32 mtu = dst_mtu(dst);
1884 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
1887 return mss_now;
1891 * N.B. We get the timeout wrong here, but then we always did get it
1892 * wrong before and this is another step along the road to correcting
1893 * it. It ought to get updated each time we pass through the routine,
1894 * but in practise it probably doesn't matter too much for now.
1896 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
1897 unsigned long datalen, int noblock,
1898 int *errcode)
1900 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
1901 noblock, errcode);
1902 if (skb) {
1903 skb->protocol = htons(ETH_P_DNA_RT);
1904 skb->pkt_type = PACKET_OUTGOING;
1906 return skb;
1909 static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1911 struct sock *sk = sock->sk;
1912 struct dn_scp *scp = DN_SK(sk);
1913 size_t mss;
1914 struct sk_buff_head *queue = &scp->data_xmit_queue;
1915 int flags = msg->msg_flags;
1916 int err = 0;
1917 size_t sent = 0;
1918 int addr_len = msg->msg_namelen;
1919 DECLARE_SOCKADDR(struct sockaddr_dn *, addr, msg->msg_name);
1920 struct sk_buff *skb = NULL;
1921 struct dn_skb_cb *cb;
1922 size_t len;
1923 unsigned char fctype;
1924 long timeo;
1926 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
1927 return -EOPNOTSUPP;
1929 if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
1930 return -EINVAL;
1932 lock_sock(sk);
1933 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1935 * The only difference between stream sockets and sequenced packet
1936 * sockets is that the stream sockets always behave as if MSG_EOR
1937 * has been set.
1939 if (sock->type == SOCK_STREAM) {
1940 if (flags & MSG_EOR) {
1941 err = -EINVAL;
1942 goto out;
1944 flags |= MSG_EOR;
1948 err = dn_check_state(sk, addr, addr_len, &timeo, flags);
1949 if (err)
1950 goto out_err;
1952 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1953 err = -EPIPE;
1954 if (!(flags & MSG_NOSIGNAL))
1955 send_sig(SIGPIPE, current, 0);
1956 goto out_err;
1959 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1960 dst_negative_advice(sk);
1962 mss = scp->segsize_rem;
1963 fctype = scp->services_rem & NSP_FC_MASK;
1965 mss = dn_current_mss(sk, flags);
1967 if (flags & MSG_OOB) {
1968 queue = &scp->other_xmit_queue;
1969 if (size > mss) {
1970 err = -EMSGSIZE;
1971 goto out;
1975 scp->persist_fxn = dn_nsp_xmit_timeout;
1977 while(sent < size) {
1978 err = sock_error(sk);
1979 if (err)
1980 goto out;
1982 if (signal_pending(current)) {
1983 err = sock_intr_errno(timeo);
1984 goto out;
1988 * Calculate size that we wish to send.
1990 len = size - sent;
1992 if (len > mss)
1993 len = mss;
1996 * Wait for queue size to go down below the window
1997 * size.
1999 if (dn_queue_too_long(scp, queue, flags)) {
2000 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2002 if (flags & MSG_DONTWAIT) {
2003 err = -EWOULDBLOCK;
2004 goto out;
2007 add_wait_queue(sk_sleep(sk), &wait);
2008 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2009 sk_wait_event(sk, &timeo,
2010 !dn_queue_too_long(scp, queue, flags), &wait);
2011 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2012 remove_wait_queue(sk_sleep(sk), &wait);
2013 continue;
2017 * Get a suitably sized skb.
2018 * 64 is a bit of a hack really, but its larger than any
2019 * link-layer headers and has served us well as a good
2020 * guess as to their real length.
2022 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
2023 flags & MSG_DONTWAIT, &err);
2025 if (err)
2026 break;
2028 if (!skb)
2029 continue;
2031 cb = DN_SKB_CB(skb);
2033 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
2035 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
2036 err = -EFAULT;
2037 goto out;
2040 if (flags & MSG_OOB) {
2041 cb->nsp_flags = 0x30;
2042 if (fctype != NSP_FC_NONE)
2043 scp->flowrem_oth--;
2044 } else {
2045 cb->nsp_flags = 0x00;
2046 if (scp->seg_total == 0)
2047 cb->nsp_flags |= 0x20;
2049 scp->seg_total += len;
2051 if (((sent + len) == size) && (flags & MSG_EOR)) {
2052 cb->nsp_flags |= 0x40;
2053 scp->seg_total = 0;
2054 if (fctype == NSP_FC_SCMC)
2055 scp->flowrem_dat--;
2057 if (fctype == NSP_FC_SRC)
2058 scp->flowrem_dat--;
2061 sent += len;
2062 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
2063 skb = NULL;
2065 scp->persist = dn_nsp_persist(sk);
2068 out:
2070 kfree_skb(skb);
2072 release_sock(sk);
2074 return sent ? sent : err;
2076 out_err:
2077 err = sk_stream_error(sk, flags, err);
2078 release_sock(sk);
2079 return err;
2082 static int dn_device_event(struct notifier_block *this, unsigned long event,
2083 void *ptr)
2085 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2087 if (!net_eq(dev_net(dev), &init_net))
2088 return NOTIFY_DONE;
2090 switch (event) {
2091 case NETDEV_UP:
2092 dn_dev_up(dev);
2093 break;
2094 case NETDEV_DOWN:
2095 dn_dev_down(dev);
2096 break;
2097 default:
2098 break;
2101 return NOTIFY_DONE;
2104 static struct notifier_block dn_dev_notifier = {
2105 .notifier_call = dn_device_event,
2108 static struct packet_type dn_dix_packet_type __read_mostly = {
2109 .type = cpu_to_be16(ETH_P_DNA_RT),
2110 .func = dn_route_rcv,
2113 #ifdef CONFIG_PROC_FS
2114 struct dn_iter_state {
2115 int bucket;
2118 static struct sock *dn_socket_get_first(struct seq_file *seq)
2120 struct dn_iter_state *state = seq->private;
2121 struct sock *n = NULL;
2123 for(state->bucket = 0;
2124 state->bucket < DN_SK_HASH_SIZE;
2125 ++state->bucket) {
2126 n = sk_head(&dn_sk_hash[state->bucket]);
2127 if (n)
2128 break;
2131 return n;
2134 static struct sock *dn_socket_get_next(struct seq_file *seq,
2135 struct sock *n)
2137 struct dn_iter_state *state = seq->private;
2139 n = sk_next(n);
2140 while (!n) {
2141 if (++state->bucket >= DN_SK_HASH_SIZE)
2142 break;
2143 n = sk_head(&dn_sk_hash[state->bucket]);
2145 return n;
2148 static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
2150 struct sock *sk = dn_socket_get_first(seq);
2152 if (sk) {
2153 while(*pos && (sk = dn_socket_get_next(seq, sk)))
2154 --*pos;
2156 return *pos ? NULL : sk;
2159 static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
2161 void *rc;
2162 read_lock_bh(&dn_hash_lock);
2163 rc = socket_get_idx(seq, &pos);
2164 if (!rc) {
2165 read_unlock_bh(&dn_hash_lock);
2167 return rc;
2170 static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
2172 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2175 static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2177 void *rc;
2179 if (v == SEQ_START_TOKEN) {
2180 rc = dn_socket_get_idx(seq, 0);
2181 goto out;
2184 rc = dn_socket_get_next(seq, v);
2185 if (rc)
2186 goto out;
2187 read_unlock_bh(&dn_hash_lock);
2188 out:
2189 ++*pos;
2190 return rc;
2193 static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2195 if (v && v != SEQ_START_TOKEN)
2196 read_unlock_bh(&dn_hash_lock);
2199 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2201 static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2203 int i;
2205 switch (le16_to_cpu(dn->sdn_objnamel)) {
2206 case 0:
2207 sprintf(buf, "%d", dn->sdn_objnum);
2208 break;
2209 default:
2210 for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
2211 buf[i] = dn->sdn_objname[i];
2212 if (IS_NOT_PRINTABLE(buf[i]))
2213 buf[i] = '.';
2215 buf[i] = 0;
2219 static char *dn_state2asc(unsigned char state)
2221 switch (state) {
2222 case DN_O:
2223 return "OPEN";
2224 case DN_CR:
2225 return " CR";
2226 case DN_DR:
2227 return " DR";
2228 case DN_DRC:
2229 return " DRC";
2230 case DN_CC:
2231 return " CC";
2232 case DN_CI:
2233 return " CI";
2234 case DN_NR:
2235 return " NR";
2236 case DN_NC:
2237 return " NC";
2238 case DN_CD:
2239 return " CD";
2240 case DN_RJ:
2241 return " RJ";
2242 case DN_RUN:
2243 return " RUN";
2244 case DN_DI:
2245 return " DI";
2246 case DN_DIC:
2247 return " DIC";
2248 case DN_DN:
2249 return " DN";
2250 case DN_CL:
2251 return " CL";
2252 case DN_CN:
2253 return " CN";
2256 return "????";
2259 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
2261 struct dn_scp *scp = DN_SK(sk);
2262 char buf1[DN_ASCBUF_LEN];
2263 char buf2[DN_ASCBUF_LEN];
2264 char local_object[DN_MAXOBJL+3];
2265 char remote_object[DN_MAXOBJL+3];
2267 dn_printable_object(&scp->addr, local_object);
2268 dn_printable_object(&scp->peer, remote_object);
2270 seq_printf(seq,
2271 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2272 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2273 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1),
2274 scp->addrloc,
2275 scp->numdat,
2276 scp->numoth,
2277 scp->ackxmt_dat,
2278 scp->ackxmt_oth,
2279 scp->flowloc_sw,
2280 local_object,
2281 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2),
2282 scp->addrrem,
2283 scp->numdat_rcv,
2284 scp->numoth_rcv,
2285 scp->ackrcv_dat,
2286 scp->ackrcv_oth,
2287 scp->flowrem_sw,
2288 remote_object,
2289 dn_state2asc(scp->state),
2290 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
2293 static int dn_socket_seq_show(struct seq_file *seq, void *v)
2295 if (v == SEQ_START_TOKEN) {
2296 seq_puts(seq, "Local Remote\n");
2297 } else {
2298 dn_socket_format_entry(seq, v);
2300 return 0;
2303 static const struct seq_operations dn_socket_seq_ops = {
2304 .start = dn_socket_seq_start,
2305 .next = dn_socket_seq_next,
2306 .stop = dn_socket_seq_stop,
2307 .show = dn_socket_seq_show,
2309 #endif
2311 static const struct net_proto_family dn_family_ops = {
2312 .family = AF_DECnet,
2313 .create = dn_create,
2314 .owner = THIS_MODULE,
2317 static const struct proto_ops dn_proto_ops = {
2318 .family = AF_DECnet,
2319 .owner = THIS_MODULE,
2320 .release = dn_release,
2321 .bind = dn_bind,
2322 .connect = dn_connect,
2323 .socketpair = sock_no_socketpair,
2324 .accept = dn_accept,
2325 .getname = dn_getname,
2326 .poll = dn_poll,
2327 .ioctl = dn_ioctl,
2328 .listen = dn_listen,
2329 .shutdown = dn_shutdown,
2330 .setsockopt = dn_setsockopt,
2331 .getsockopt = dn_getsockopt,
2332 .sendmsg = dn_sendmsg,
2333 .recvmsg = dn_recvmsg,
2334 .mmap = sock_no_mmap,
2335 .sendpage = sock_no_sendpage,
2338 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2339 MODULE_AUTHOR("Linux DECnet Project Team");
2340 MODULE_LICENSE("GPL");
2341 MODULE_ALIAS_NETPROTO(PF_DECnet);
2343 static const char banner[] __initconst = KERN_INFO
2344 "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2346 static int __init decnet_init(void)
2348 int rc;
2350 printk(banner);
2352 rc = proto_register(&dn_proto, 1);
2353 if (rc != 0)
2354 goto out;
2356 dn_neigh_init();
2357 dn_dev_init();
2358 dn_route_init();
2359 dn_fib_init();
2361 sock_register(&dn_family_ops);
2362 dev_add_pack(&dn_dix_packet_type);
2363 register_netdevice_notifier(&dn_dev_notifier);
2365 proc_create_seq_private("decnet", 0444, init_net.proc_net,
2366 &dn_socket_seq_ops, sizeof(struct dn_iter_state),
2367 NULL);
2368 dn_register_sysctl();
2369 out:
2370 return rc;
2373 module_init(decnet_init);
2376 * Prevent DECnet module unloading until its fixed properly.
2377 * Requires an audit of the code to check for memory leaks and
2378 * initialisation problems etc.
2380 #if 0
2381 static void __exit decnet_exit(void)
2383 sock_unregister(AF_DECnet);
2384 rtnl_unregister_all(PF_DECnet);
2385 dev_remove_pack(&dn_dix_packet_type);
2387 dn_unregister_sysctl();
2389 unregister_netdevice_notifier(&dn_dev_notifier);
2391 dn_route_cleanup();
2392 dn_dev_cleanup();
2393 dn_neigh_cleanup();
2394 dn_fib_cleanup();
2396 remove_proc_entry("decnet", init_net.proc_net);
2398 proto_unregister(&dn_proto);
2400 rcu_barrier(); /* Wait for completion of call_rcu()'s */
2402 module_exit(decnet_exit);
2403 #endif