Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / net / tipc / socket.c
blobaab4948f0affa995adc2603bd09db4b5d354b266
1 /*
2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012 Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include "core.h"
38 #include "port.h"
40 #include <linux/export.h>
41 #include <net/sock.h>
43 #define SS_LISTENING -1 /* socket is listening */
44 #define SS_READY -2 /* socket is connectionless */
46 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
48 struct tipc_sock {
49 struct sock sk;
50 struct tipc_port *p;
51 struct tipc_portid peer_name;
52 unsigned int conn_timeout;
55 #define tipc_sk(sk) ((struct tipc_sock *)(sk))
56 #define tipc_sk_port(sk) (tipc_sk(sk)->p)
58 static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
59 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
60 static void wakeupdispatch(struct tipc_port *tport);
61 static void tipc_data_ready(struct sock *sk, int len);
62 static void tipc_write_space(struct sock *sk);
63 static int release(struct socket *sock);
64 static int accept(struct socket *sock, struct socket *new_sock, int flags);
66 static const struct proto_ops packet_ops;
67 static const struct proto_ops stream_ops;
68 static const struct proto_ops msg_ops;
70 static struct proto tipc_proto;
71 static struct proto tipc_proto_kern;
73 static int sockets_enabled;
76 * Revised TIPC socket locking policy:
78 * Most socket operations take the standard socket lock when they start
79 * and hold it until they finish (or until they need to sleep). Acquiring
80 * this lock grants the owner exclusive access to the fields of the socket
81 * data structures, with the exception of the backlog queue. A few socket
82 * operations can be done without taking the socket lock because they only
83 * read socket information that never changes during the life of the socket.
85 * Socket operations may acquire the lock for the associated TIPC port if they
86 * need to perform an operation on the port. If any routine needs to acquire
87 * both the socket lock and the port lock it must take the socket lock first
88 * to avoid the risk of deadlock.
90 * The dispatcher handling incoming messages cannot grab the socket lock in
91 * the standard fashion, since invoked it runs at the BH level and cannot block.
92 * Instead, it checks to see if the socket lock is currently owned by someone,
93 * and either handles the message itself or adds it to the socket's backlog
94 * queue; in the latter case the queued message is processed once the process
95 * owning the socket lock releases it.
97 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
98 * the problem of a blocked socket operation preventing any other operations
99 * from occurring. However, applications must be careful if they have
100 * multiple threads trying to send (or receive) on the same socket, as these
101 * operations might interfere with each other. For example, doing a connect
102 * and a receive at the same time might allow the receive to consume the
103 * ACK message meant for the connect. While additional work could be done
104 * to try and overcome this, it doesn't seem to be worthwhile at the present.
106 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
107 * that another operation that must be performed in a non-blocking manner is
108 * not delayed for very long because the lock has already been taken.
110 * NOTE: This code assumes that certain fields of a port/socket pair are
111 * constant over its lifetime; such fields can be examined without taking
112 * the socket lock and/or port lock, and do not need to be re-read even
113 * after resuming processing after waiting. These fields include:
114 * - socket type
115 * - pointer to socket sk structure (aka tipc_sock structure)
116 * - pointer to port structure
117 * - port reference
121 * advance_rx_queue - discard first buffer in socket receive queue
123 * Caller must hold socket lock
125 static void advance_rx_queue(struct sock *sk)
127 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
131 * reject_rx_queue - reject all buffers in socket receive queue
133 * Caller must hold socket lock
135 static void reject_rx_queue(struct sock *sk)
137 struct sk_buff *buf;
139 while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
140 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
144 * tipc_sk_create - create a TIPC socket
145 * @net: network namespace (must be default network)
146 * @sock: pre-allocated socket structure
147 * @protocol: protocol indicator (must be 0)
148 * @kern: caused by kernel or by userspace?
150 * This routine creates additional data structures used by the TIPC socket,
151 * initializes them, and links them together.
153 * Returns 0 on success, errno otherwise
155 static int tipc_sk_create(struct net *net, struct socket *sock, int protocol,
156 int kern)
158 const struct proto_ops *ops;
159 socket_state state;
160 struct sock *sk;
161 struct tipc_port *tp_ptr;
163 /* Validate arguments */
164 if (unlikely(protocol != 0))
165 return -EPROTONOSUPPORT;
167 switch (sock->type) {
168 case SOCK_STREAM:
169 ops = &stream_ops;
170 state = SS_UNCONNECTED;
171 break;
172 case SOCK_SEQPACKET:
173 ops = &packet_ops;
174 state = SS_UNCONNECTED;
175 break;
176 case SOCK_DGRAM:
177 case SOCK_RDM:
178 ops = &msg_ops;
179 state = SS_READY;
180 break;
181 default:
182 return -EPROTOTYPE;
185 /* Allocate socket's protocol area */
186 if (!kern)
187 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
188 else
189 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern);
191 if (sk == NULL)
192 return -ENOMEM;
194 /* Allocate TIPC port for socket to use */
195 tp_ptr = tipc_createport(sk, &dispatch, &wakeupdispatch,
196 TIPC_LOW_IMPORTANCE);
197 if (unlikely(!tp_ptr)) {
198 sk_free(sk);
199 return -ENOMEM;
202 /* Finish initializing socket data structures */
203 sock->ops = ops;
204 sock->state = state;
206 sock_init_data(sock, sk);
207 sk->sk_backlog_rcv = backlog_rcv;
208 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
209 sk->sk_data_ready = tipc_data_ready;
210 sk->sk_write_space = tipc_write_space;
211 tipc_sk(sk)->p = tp_ptr;
212 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
214 spin_unlock_bh(tp_ptr->lock);
216 if (sock->state == SS_READY) {
217 tipc_set_portunreturnable(tp_ptr->ref, 1);
218 if (sock->type == SOCK_DGRAM)
219 tipc_set_portunreliable(tp_ptr->ref, 1);
222 return 0;
226 * tipc_sock_create_local - create TIPC socket from inside TIPC module
227 * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
229 * We cannot use sock_creat_kern here because it bumps module user count.
230 * Since socket owner and creator is the same module we must make sure
231 * that module count remains zero for module local sockets, otherwise
232 * we cannot do rmmod.
234 * Returns 0 on success, errno otherwise
236 int tipc_sock_create_local(int type, struct socket **res)
238 int rc;
240 rc = sock_create_lite(AF_TIPC, type, 0, res);
241 if (rc < 0) {
242 pr_err("Failed to create kernel socket\n");
243 return rc;
245 tipc_sk_create(&init_net, *res, 0, 1);
247 return 0;
251 * tipc_sock_release_local - release socket created by tipc_sock_create_local
252 * @sock: the socket to be released.
254 * Module reference count is not incremented when such sockets are created,
255 * so we must keep it from being decremented when they are released.
257 void tipc_sock_release_local(struct socket *sock)
259 release(sock);
260 sock->ops = NULL;
261 sock_release(sock);
265 * tipc_sock_accept_local - accept a connection on a socket created
266 * with tipc_sock_create_local. Use this function to avoid that
267 * module reference count is inadvertently incremented.
269 * @sock: the accepting socket
270 * @newsock: reference to the new socket to be created
271 * @flags: socket flags
274 int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
275 int flags)
277 struct sock *sk = sock->sk;
278 int ret;
280 ret = sock_create_lite(sk->sk_family, sk->sk_type,
281 sk->sk_protocol, newsock);
282 if (ret < 0)
283 return ret;
285 ret = accept(sock, *newsock, flags);
286 if (ret < 0) {
287 sock_release(*newsock);
288 return ret;
290 (*newsock)->ops = sock->ops;
291 return ret;
295 * release - destroy a TIPC socket
296 * @sock: socket to destroy
298 * This routine cleans up any messages that are still queued on the socket.
299 * For DGRAM and RDM socket types, all queued messages are rejected.
300 * For SEQPACKET and STREAM socket types, the first message is rejected
301 * and any others are discarded. (If the first message on a STREAM socket
302 * is partially-read, it is discarded and the next one is rejected instead.)
304 * NOTE: Rejected messages are not necessarily returned to the sender! They
305 * are returned or discarded according to the "destination droppable" setting
306 * specified for the message by the sender.
308 * Returns 0 on success, errno otherwise
310 static int release(struct socket *sock)
312 struct sock *sk = sock->sk;
313 struct tipc_port *tport;
314 struct sk_buff *buf;
315 int res;
318 * Exit if socket isn't fully initialized (occurs when a failed accept()
319 * releases a pre-allocated child socket that was never used)
321 if (sk == NULL)
322 return 0;
324 tport = tipc_sk_port(sk);
325 lock_sock(sk);
328 * Reject all unreceived messages, except on an active connection
329 * (which disconnects locally & sends a 'FIN+' to peer)
331 while (sock->state != SS_DISCONNECTING) {
332 buf = __skb_dequeue(&sk->sk_receive_queue);
333 if (buf == NULL)
334 break;
335 if (TIPC_SKB_CB(buf)->handle != NULL)
336 kfree_skb(buf);
337 else {
338 if ((sock->state == SS_CONNECTING) ||
339 (sock->state == SS_CONNECTED)) {
340 sock->state = SS_DISCONNECTING;
341 tipc_disconnect(tport->ref);
343 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
348 * Delete TIPC port; this ensures no more messages are queued
349 * (also disconnects an active connection & sends a 'FIN-' to peer)
351 res = tipc_deleteport(tport);
353 /* Discard any remaining (connection-based) messages in receive queue */
354 __skb_queue_purge(&sk->sk_receive_queue);
356 /* Reject any messages that accumulated in backlog queue */
357 sock->state = SS_DISCONNECTING;
358 release_sock(sk);
360 sock_put(sk);
361 sock->sk = NULL;
363 return res;
367 * bind - associate or disassocate TIPC name(s) with a socket
368 * @sock: socket structure
369 * @uaddr: socket address describing name(s) and desired operation
370 * @uaddr_len: size of socket address data structure
372 * Name and name sequence binding is indicated using a positive scope value;
373 * a negative scope value unbinds the specified name. Specifying no name
374 * (i.e. a socket address length of 0) unbinds all names from the socket.
376 * Returns 0 on success, errno otherwise
378 * NOTE: This routine doesn't need to take the socket lock since it doesn't
379 * access any non-constant socket information.
381 static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
383 struct sock *sk = sock->sk;
384 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
385 struct tipc_port *tport = tipc_sk_port(sock->sk);
386 int res = -EINVAL;
388 lock_sock(sk);
389 if (unlikely(!uaddr_len)) {
390 res = tipc_withdraw(tport, 0, NULL);
391 goto exit;
394 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
395 res = -EINVAL;
396 goto exit;
398 if (addr->family != AF_TIPC) {
399 res = -EAFNOSUPPORT;
400 goto exit;
403 if (addr->addrtype == TIPC_ADDR_NAME)
404 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
405 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
406 res = -EAFNOSUPPORT;
407 goto exit;
410 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
411 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
412 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
413 res = -EACCES;
414 goto exit;
417 res = (addr->scope > 0) ?
418 tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
419 tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
420 exit:
421 release_sock(sk);
422 return res;
426 * get_name - get port ID of socket or peer socket
427 * @sock: socket structure
428 * @uaddr: area for returned socket address
429 * @uaddr_len: area for returned length of socket address
430 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
432 * Returns 0 on success, errno otherwise
434 * NOTE: This routine doesn't need to take the socket lock since it only
435 * accesses socket information that is unchanging (or which changes in
436 * a completely predictable manner).
438 static int get_name(struct socket *sock, struct sockaddr *uaddr,
439 int *uaddr_len, int peer)
441 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
442 struct tipc_sock *tsock = tipc_sk(sock->sk);
444 memset(addr, 0, sizeof(*addr));
445 if (peer) {
446 if ((sock->state != SS_CONNECTED) &&
447 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
448 return -ENOTCONN;
449 addr->addr.id.ref = tsock->peer_name.ref;
450 addr->addr.id.node = tsock->peer_name.node;
451 } else {
452 addr->addr.id.ref = tsock->p->ref;
453 addr->addr.id.node = tipc_own_addr;
456 *uaddr_len = sizeof(*addr);
457 addr->addrtype = TIPC_ADDR_ID;
458 addr->family = AF_TIPC;
459 addr->scope = 0;
460 addr->addr.name.domain = 0;
462 return 0;
466 * poll - read and possibly block on pollmask
467 * @file: file structure associated with the socket
468 * @sock: socket for which to calculate the poll bits
469 * @wait: ???
471 * Returns pollmask value
473 * COMMENTARY:
474 * It appears that the usual socket locking mechanisms are not useful here
475 * since the pollmask info is potentially out-of-date the moment this routine
476 * exits. TCP and other protocols seem to rely on higher level poll routines
477 * to handle any preventable race conditions, so TIPC will do the same ...
479 * TIPC sets the returned events as follows:
481 * socket state flags set
482 * ------------ ---------
483 * unconnected no read flags
484 * POLLOUT if port is not congested
486 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
487 * no write flags
489 * connected POLLIN/POLLRDNORM if data in rx queue
490 * POLLOUT if port is not congested
492 * disconnecting POLLIN/POLLRDNORM/POLLHUP
493 * no write flags
495 * listening POLLIN if SYN in rx queue
496 * no write flags
498 * ready POLLIN/POLLRDNORM if data in rx queue
499 * [connectionless] POLLOUT (since port cannot be congested)
501 * IMPORTANT: The fact that a read or write operation is indicated does NOT
502 * imply that the operation will succeed, merely that it should be performed
503 * and will not block.
505 static unsigned int poll(struct file *file, struct socket *sock,
506 poll_table *wait)
508 struct sock *sk = sock->sk;
509 u32 mask = 0;
511 sock_poll_wait(file, sk_sleep(sk), wait);
513 switch ((int)sock->state) {
514 case SS_UNCONNECTED:
515 if (!tipc_sk_port(sk)->congested)
516 mask |= POLLOUT;
517 break;
518 case SS_READY:
519 case SS_CONNECTED:
520 if (!tipc_sk_port(sk)->congested)
521 mask |= POLLOUT;
522 /* fall thru' */
523 case SS_CONNECTING:
524 case SS_LISTENING:
525 if (!skb_queue_empty(&sk->sk_receive_queue))
526 mask |= (POLLIN | POLLRDNORM);
527 break;
528 case SS_DISCONNECTING:
529 mask = (POLLIN | POLLRDNORM | POLLHUP);
530 break;
533 return mask;
537 * dest_name_check - verify user is permitted to send to specified port name
538 * @dest: destination address
539 * @m: descriptor for message to be sent
541 * Prevents restricted configuration commands from being issued by
542 * unauthorized users.
544 * Returns 0 if permission is granted, otherwise errno
546 static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
548 struct tipc_cfg_msg_hdr hdr;
550 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
551 return 0;
552 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
553 return 0;
554 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
555 return -EACCES;
557 if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
558 return -EMSGSIZE;
559 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
560 return -EFAULT;
561 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
562 return -EACCES;
564 return 0;
567 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
569 struct sock *sk = sock->sk;
570 struct tipc_port *tport = tipc_sk_port(sk);
571 DEFINE_WAIT(wait);
572 int done;
574 do {
575 int err = sock_error(sk);
576 if (err)
577 return err;
578 if (sock->state == SS_DISCONNECTING)
579 return -EPIPE;
580 if (!*timeo_p)
581 return -EAGAIN;
582 if (signal_pending(current))
583 return sock_intr_errno(*timeo_p);
585 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
586 done = sk_wait_event(sk, timeo_p, !tport->congested);
587 finish_wait(sk_sleep(sk), &wait);
588 } while (!done);
589 return 0;
593 * send_msg - send message in connectionless manner
594 * @iocb: if NULL, indicates that socket lock is already held
595 * @sock: socket structure
596 * @m: message to send
597 * @total_len: length of message
599 * Message must have an destination specified explicitly.
600 * Used for SOCK_RDM and SOCK_DGRAM messages,
601 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
602 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
604 * Returns the number of bytes sent on success, or errno otherwise
606 static int send_msg(struct kiocb *iocb, struct socket *sock,
607 struct msghdr *m, size_t total_len)
609 struct sock *sk = sock->sk;
610 struct tipc_port *tport = tipc_sk_port(sk);
611 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
612 int needs_conn;
613 long timeo;
614 int res = -EINVAL;
616 if (unlikely(!dest))
617 return -EDESTADDRREQ;
618 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
619 (dest->family != AF_TIPC)))
620 return -EINVAL;
621 if (total_len > TIPC_MAX_USER_MSG_SIZE)
622 return -EMSGSIZE;
624 if (iocb)
625 lock_sock(sk);
627 needs_conn = (sock->state != SS_READY);
628 if (unlikely(needs_conn)) {
629 if (sock->state == SS_LISTENING) {
630 res = -EPIPE;
631 goto exit;
633 if (sock->state != SS_UNCONNECTED) {
634 res = -EISCONN;
635 goto exit;
637 if (tport->published) {
638 res = -EOPNOTSUPP;
639 goto exit;
641 if (dest->addrtype == TIPC_ADDR_NAME) {
642 tport->conn_type = dest->addr.name.name.type;
643 tport->conn_instance = dest->addr.name.name.instance;
646 /* Abort any pending connection attempts (very unlikely) */
647 reject_rx_queue(sk);
650 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
651 do {
652 if (dest->addrtype == TIPC_ADDR_NAME) {
653 res = dest_name_check(dest, m);
654 if (res)
655 break;
656 res = tipc_send2name(tport->ref,
657 &dest->addr.name.name,
658 dest->addr.name.domain,
659 m->msg_iov,
660 total_len);
661 } else if (dest->addrtype == TIPC_ADDR_ID) {
662 res = tipc_send2port(tport->ref,
663 &dest->addr.id,
664 m->msg_iov,
665 total_len);
666 } else if (dest->addrtype == TIPC_ADDR_MCAST) {
667 if (needs_conn) {
668 res = -EOPNOTSUPP;
669 break;
671 res = dest_name_check(dest, m);
672 if (res)
673 break;
674 res = tipc_multicast(tport->ref,
675 &dest->addr.nameseq,
676 m->msg_iov,
677 total_len);
679 if (likely(res != -ELINKCONG)) {
680 if (needs_conn && (res >= 0))
681 sock->state = SS_CONNECTING;
682 break;
684 res = tipc_wait_for_sndmsg(sock, &timeo);
685 if (res)
686 break;
687 } while (1);
689 exit:
690 if (iocb)
691 release_sock(sk);
692 return res;
695 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
697 struct sock *sk = sock->sk;
698 struct tipc_port *tport = tipc_sk_port(sk);
699 DEFINE_WAIT(wait);
700 int done;
702 do {
703 int err = sock_error(sk);
704 if (err)
705 return err;
706 if (sock->state == SS_DISCONNECTING)
707 return -EPIPE;
708 else if (sock->state != SS_CONNECTED)
709 return -ENOTCONN;
710 if (!*timeo_p)
711 return -EAGAIN;
712 if (signal_pending(current))
713 return sock_intr_errno(*timeo_p);
715 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
716 done = sk_wait_event(sk, timeo_p,
717 (!tport->congested || !tport->connected));
718 finish_wait(sk_sleep(sk), &wait);
719 } while (!done);
720 return 0;
724 * send_packet - send a connection-oriented message
725 * @iocb: if NULL, indicates that socket lock is already held
726 * @sock: socket structure
727 * @m: message to send
728 * @total_len: length of message
730 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
732 * Returns the number of bytes sent on success, or errno otherwise
734 static int send_packet(struct kiocb *iocb, struct socket *sock,
735 struct msghdr *m, size_t total_len)
737 struct sock *sk = sock->sk;
738 struct tipc_port *tport = tipc_sk_port(sk);
739 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
740 int res = -EINVAL;
741 long timeo;
743 /* Handle implied connection establishment */
744 if (unlikely(dest))
745 return send_msg(iocb, sock, m, total_len);
747 if (total_len > TIPC_MAX_USER_MSG_SIZE)
748 return -EMSGSIZE;
750 if (iocb)
751 lock_sock(sk);
753 if (unlikely(sock->state != SS_CONNECTED)) {
754 if (sock->state == SS_DISCONNECTING)
755 res = -EPIPE;
756 else
757 res = -ENOTCONN;
758 goto exit;
761 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
762 do {
763 res = tipc_send(tport->ref, m->msg_iov, total_len);
764 if (likely(res != -ELINKCONG))
765 break;
766 res = tipc_wait_for_sndpkt(sock, &timeo);
767 if (res)
768 break;
769 } while (1);
770 exit:
771 if (iocb)
772 release_sock(sk);
773 return res;
777 * send_stream - send stream-oriented data
778 * @iocb: (unused)
779 * @sock: socket structure
780 * @m: data to send
781 * @total_len: total length of data to be sent
783 * Used for SOCK_STREAM data.
785 * Returns the number of bytes sent on success (or partial success),
786 * or errno if no data sent
788 static int send_stream(struct kiocb *iocb, struct socket *sock,
789 struct msghdr *m, size_t total_len)
791 struct sock *sk = sock->sk;
792 struct tipc_port *tport = tipc_sk_port(sk);
793 struct msghdr my_msg;
794 struct iovec my_iov;
795 struct iovec *curr_iov;
796 int curr_iovlen;
797 char __user *curr_start;
798 u32 hdr_size;
799 int curr_left;
800 int bytes_to_send;
801 int bytes_sent;
802 int res;
804 lock_sock(sk);
806 /* Handle special cases where there is no connection */
807 if (unlikely(sock->state != SS_CONNECTED)) {
808 if (sock->state == SS_UNCONNECTED)
809 res = send_packet(NULL, sock, m, total_len);
810 else
811 res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
812 goto exit;
815 if (unlikely(m->msg_name)) {
816 res = -EISCONN;
817 goto exit;
820 if (total_len > (unsigned int)INT_MAX) {
821 res = -EMSGSIZE;
822 goto exit;
826 * Send each iovec entry using one or more messages
828 * Note: This algorithm is good for the most likely case
829 * (i.e. one large iovec entry), but could be improved to pass sets
830 * of small iovec entries into send_packet().
832 curr_iov = m->msg_iov;
833 curr_iovlen = m->msg_iovlen;
834 my_msg.msg_iov = &my_iov;
835 my_msg.msg_iovlen = 1;
836 my_msg.msg_flags = m->msg_flags;
837 my_msg.msg_name = NULL;
838 bytes_sent = 0;
840 hdr_size = msg_hdr_sz(&tport->phdr);
842 while (curr_iovlen--) {
843 curr_start = curr_iov->iov_base;
844 curr_left = curr_iov->iov_len;
846 while (curr_left) {
847 bytes_to_send = tport->max_pkt - hdr_size;
848 if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
849 bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
850 if (curr_left < bytes_to_send)
851 bytes_to_send = curr_left;
852 my_iov.iov_base = curr_start;
853 my_iov.iov_len = bytes_to_send;
854 res = send_packet(NULL, sock, &my_msg, bytes_to_send);
855 if (res < 0) {
856 if (bytes_sent)
857 res = bytes_sent;
858 goto exit;
860 curr_left -= bytes_to_send;
861 curr_start += bytes_to_send;
862 bytes_sent += bytes_to_send;
865 curr_iov++;
867 res = bytes_sent;
868 exit:
869 release_sock(sk);
870 return res;
874 * auto_connect - complete connection setup to a remote port
875 * @sock: socket structure
876 * @msg: peer's response message
878 * Returns 0 on success, errno otherwise
880 static int auto_connect(struct socket *sock, struct tipc_msg *msg)
882 struct tipc_sock *tsock = tipc_sk(sock->sk);
883 struct tipc_port *p_ptr;
885 tsock->peer_name.ref = msg_origport(msg);
886 tsock->peer_name.node = msg_orignode(msg);
887 p_ptr = tipc_port_deref(tsock->p->ref);
888 if (!p_ptr)
889 return -EINVAL;
891 __tipc_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
893 if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
894 return -EINVAL;
895 msg_set_importance(&p_ptr->phdr, (u32)msg_importance(msg));
896 sock->state = SS_CONNECTED;
897 return 0;
901 * set_orig_addr - capture sender's address for received message
902 * @m: descriptor for message info
903 * @msg: received message header
905 * Note: Address is not captured if not requested by receiver.
907 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
909 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
911 if (addr) {
912 addr->family = AF_TIPC;
913 addr->addrtype = TIPC_ADDR_ID;
914 memset(&addr->addr, 0, sizeof(addr->addr));
915 addr->addr.id.ref = msg_origport(msg);
916 addr->addr.id.node = msg_orignode(msg);
917 addr->addr.name.domain = 0; /* could leave uninitialized */
918 addr->scope = 0; /* could leave uninitialized */
919 m->msg_namelen = sizeof(struct sockaddr_tipc);
924 * anc_data_recv - optionally capture ancillary data for received message
925 * @m: descriptor for message info
926 * @msg: received message header
927 * @tport: TIPC port associated with message
929 * Note: Ancillary data is not captured if not requested by receiver.
931 * Returns 0 if successful, otherwise errno
933 static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
934 struct tipc_port *tport)
936 u32 anc_data[3];
937 u32 err;
938 u32 dest_type;
939 int has_name;
940 int res;
942 if (likely(m->msg_controllen == 0))
943 return 0;
945 /* Optionally capture errored message object(s) */
946 err = msg ? msg_errcode(msg) : 0;
947 if (unlikely(err)) {
948 anc_data[0] = err;
949 anc_data[1] = msg_data_sz(msg);
950 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
951 if (res)
952 return res;
953 if (anc_data[1]) {
954 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
955 msg_data(msg));
956 if (res)
957 return res;
961 /* Optionally capture message destination object */
962 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
963 switch (dest_type) {
964 case TIPC_NAMED_MSG:
965 has_name = 1;
966 anc_data[0] = msg_nametype(msg);
967 anc_data[1] = msg_namelower(msg);
968 anc_data[2] = msg_namelower(msg);
969 break;
970 case TIPC_MCAST_MSG:
971 has_name = 1;
972 anc_data[0] = msg_nametype(msg);
973 anc_data[1] = msg_namelower(msg);
974 anc_data[2] = msg_nameupper(msg);
975 break;
976 case TIPC_CONN_MSG:
977 has_name = (tport->conn_type != 0);
978 anc_data[0] = tport->conn_type;
979 anc_data[1] = tport->conn_instance;
980 anc_data[2] = tport->conn_instance;
981 break;
982 default:
983 has_name = 0;
985 if (has_name) {
986 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
987 if (res)
988 return res;
991 return 0;
994 static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
996 struct sock *sk = sock->sk;
997 DEFINE_WAIT(wait);
998 int err;
1000 for (;;) {
1001 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1002 if (skb_queue_empty(&sk->sk_receive_queue)) {
1003 if (sock->state == SS_DISCONNECTING) {
1004 err = -ENOTCONN;
1005 break;
1007 release_sock(sk);
1008 timeo = schedule_timeout(timeo);
1009 lock_sock(sk);
1011 err = 0;
1012 if (!skb_queue_empty(&sk->sk_receive_queue))
1013 break;
1014 err = sock_intr_errno(timeo);
1015 if (signal_pending(current))
1016 break;
1017 err = -EAGAIN;
1018 if (!timeo)
1019 break;
1021 finish_wait(sk_sleep(sk), &wait);
1022 return err;
1026 * recv_msg - receive packet-oriented message
1027 * @iocb: (unused)
1028 * @m: descriptor for message info
1029 * @buf_len: total size of user buffer area
1030 * @flags: receive flags
1032 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1033 * If the complete message doesn't fit in user area, truncate it.
1035 * Returns size of returned message data, errno otherwise
1037 static int recv_msg(struct kiocb *iocb, struct socket *sock,
1038 struct msghdr *m, size_t buf_len, int flags)
1040 struct sock *sk = sock->sk;
1041 struct tipc_port *tport = tipc_sk_port(sk);
1042 struct sk_buff *buf;
1043 struct tipc_msg *msg;
1044 long timeo;
1045 unsigned int sz;
1046 u32 err;
1047 int res;
1049 /* Catch invalid receive requests */
1050 if (unlikely(!buf_len))
1051 return -EINVAL;
1053 lock_sock(sk);
1055 if (unlikely(sock->state == SS_UNCONNECTED)) {
1056 res = -ENOTCONN;
1057 goto exit;
1060 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1061 restart:
1063 /* Look for a message in receive queue; wait if necessary */
1064 res = tipc_wait_for_rcvmsg(sock, timeo);
1065 if (res)
1066 goto exit;
1068 /* Look at first message in receive queue */
1069 buf = skb_peek(&sk->sk_receive_queue);
1070 msg = buf_msg(buf);
1071 sz = msg_data_sz(msg);
1072 err = msg_errcode(msg);
1074 /* Discard an empty non-errored message & try again */
1075 if ((!sz) && (!err)) {
1076 advance_rx_queue(sk);
1077 goto restart;
1080 /* Capture sender's address (optional) */
1081 set_orig_addr(m, msg);
1083 /* Capture ancillary data (optional) */
1084 res = anc_data_recv(m, msg, tport);
1085 if (res)
1086 goto exit;
1088 /* Capture message data (if valid) & compute return value (always) */
1089 if (!err) {
1090 if (unlikely(buf_len < sz)) {
1091 sz = buf_len;
1092 m->msg_flags |= MSG_TRUNC;
1094 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
1095 m->msg_iov, sz);
1096 if (res)
1097 goto exit;
1098 res = sz;
1099 } else {
1100 if ((sock->state == SS_READY) ||
1101 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1102 res = 0;
1103 else
1104 res = -ECONNRESET;
1107 /* Consume received message (optional) */
1108 if (likely(!(flags & MSG_PEEK))) {
1109 if ((sock->state != SS_READY) &&
1110 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1111 tipc_acknowledge(tport->ref, tport->conn_unacked);
1112 advance_rx_queue(sk);
1114 exit:
1115 release_sock(sk);
1116 return res;
1120 * recv_stream - receive stream-oriented data
1121 * @iocb: (unused)
1122 * @m: descriptor for message info
1123 * @buf_len: total size of user buffer area
1124 * @flags: receive flags
1126 * Used for SOCK_STREAM messages only. If not enough data is available
1127 * will optionally wait for more; never truncates data.
1129 * Returns size of returned message data, errno otherwise
1131 static int recv_stream(struct kiocb *iocb, struct socket *sock,
1132 struct msghdr *m, size_t buf_len, int flags)
1134 struct sock *sk = sock->sk;
1135 struct tipc_port *tport = tipc_sk_port(sk);
1136 struct sk_buff *buf;
1137 struct tipc_msg *msg;
1138 long timeo;
1139 unsigned int sz;
1140 int sz_to_copy, target, needed;
1141 int sz_copied = 0;
1142 u32 err;
1143 int res = 0;
1145 /* Catch invalid receive attempts */
1146 if (unlikely(!buf_len))
1147 return -EINVAL;
1149 lock_sock(sk);
1151 if (unlikely(sock->state == SS_UNCONNECTED)) {
1152 res = -ENOTCONN;
1153 goto exit;
1156 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1157 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1159 restart:
1160 /* Look for a message in receive queue; wait if necessary */
1161 res = tipc_wait_for_rcvmsg(sock, timeo);
1162 if (res)
1163 goto exit;
1165 /* Look at first message in receive queue */
1166 buf = skb_peek(&sk->sk_receive_queue);
1167 msg = buf_msg(buf);
1168 sz = msg_data_sz(msg);
1169 err = msg_errcode(msg);
1171 /* Discard an empty non-errored message & try again */
1172 if ((!sz) && (!err)) {
1173 advance_rx_queue(sk);
1174 goto restart;
1177 /* Optionally capture sender's address & ancillary data of first msg */
1178 if (sz_copied == 0) {
1179 set_orig_addr(m, msg);
1180 res = anc_data_recv(m, msg, tport);
1181 if (res)
1182 goto exit;
1185 /* Capture message data (if valid) & compute return value (always) */
1186 if (!err) {
1187 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1189 sz -= offset;
1190 needed = (buf_len - sz_copied);
1191 sz_to_copy = (sz <= needed) ? sz : needed;
1193 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
1194 m->msg_iov, sz_to_copy);
1195 if (res)
1196 goto exit;
1198 sz_copied += sz_to_copy;
1200 if (sz_to_copy < sz) {
1201 if (!(flags & MSG_PEEK))
1202 TIPC_SKB_CB(buf)->handle =
1203 (void *)(unsigned long)(offset + sz_to_copy);
1204 goto exit;
1206 } else {
1207 if (sz_copied != 0)
1208 goto exit; /* can't add error msg to valid data */
1210 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1211 res = 0;
1212 else
1213 res = -ECONNRESET;
1216 /* Consume received message (optional) */
1217 if (likely(!(flags & MSG_PEEK))) {
1218 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1219 tipc_acknowledge(tport->ref, tport->conn_unacked);
1220 advance_rx_queue(sk);
1223 /* Loop around if more data is required */
1224 if ((sz_copied < buf_len) && /* didn't get all requested data */
1225 (!skb_queue_empty(&sk->sk_receive_queue) ||
1226 (sz_copied < target)) && /* and more is ready or required */
1227 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1228 (!err)) /* and haven't reached a FIN */
1229 goto restart;
1231 exit:
1232 release_sock(sk);
1233 return sz_copied ? sz_copied : res;
1237 * tipc_write_space - wake up thread if port congestion is released
1238 * @sk: socket
1240 static void tipc_write_space(struct sock *sk)
1242 struct socket_wq *wq;
1244 rcu_read_lock();
1245 wq = rcu_dereference(sk->sk_wq);
1246 if (wq_has_sleeper(wq))
1247 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1248 POLLWRNORM | POLLWRBAND);
1249 rcu_read_unlock();
1253 * tipc_data_ready - wake up threads to indicate messages have been received
1254 * @sk: socket
1255 * @len: the length of messages
1257 static void tipc_data_ready(struct sock *sk, int len)
1259 struct socket_wq *wq;
1261 rcu_read_lock();
1262 wq = rcu_dereference(sk->sk_wq);
1263 if (wq_has_sleeper(wq))
1264 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1265 POLLRDNORM | POLLRDBAND);
1266 rcu_read_unlock();
1270 * filter_connect - Handle all incoming messages for a connection-based socket
1271 * @tsock: TIPC socket
1272 * @msg: message
1274 * Returns TIPC error status code and socket error status code
1275 * once it encounters some errors
1277 static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1279 struct socket *sock = tsock->sk.sk_socket;
1280 struct tipc_msg *msg = buf_msg(*buf);
1281 struct sock *sk = &tsock->sk;
1282 u32 retval = TIPC_ERR_NO_PORT;
1283 int res;
1285 if (msg_mcast(msg))
1286 return retval;
1288 switch ((int)sock->state) {
1289 case SS_CONNECTED:
1290 /* Accept only connection-based messages sent by peer */
1291 if (msg_connected(msg) && tipc_port_peer_msg(tsock->p, msg)) {
1292 if (unlikely(msg_errcode(msg))) {
1293 sock->state = SS_DISCONNECTING;
1294 __tipc_disconnect(tsock->p);
1296 retval = TIPC_OK;
1298 break;
1299 case SS_CONNECTING:
1300 /* Accept only ACK or NACK message */
1301 if (unlikely(msg_errcode(msg))) {
1302 sock->state = SS_DISCONNECTING;
1303 sk->sk_err = ECONNREFUSED;
1304 retval = TIPC_OK;
1305 break;
1308 if (unlikely(!msg_connected(msg)))
1309 break;
1311 res = auto_connect(sock, msg);
1312 if (res) {
1313 sock->state = SS_DISCONNECTING;
1314 sk->sk_err = -res;
1315 retval = TIPC_OK;
1316 break;
1319 /* If an incoming message is an 'ACK-', it should be
1320 * discarded here because it doesn't contain useful
1321 * data. In addition, we should try to wake up
1322 * connect() routine if sleeping.
1324 if (msg_data_sz(msg) == 0) {
1325 kfree_skb(*buf);
1326 *buf = NULL;
1327 if (waitqueue_active(sk_sleep(sk)))
1328 wake_up_interruptible(sk_sleep(sk));
1330 retval = TIPC_OK;
1331 break;
1332 case SS_LISTENING:
1333 case SS_UNCONNECTED:
1334 /* Accept only SYN message */
1335 if (!msg_connected(msg) && !(msg_errcode(msg)))
1336 retval = TIPC_OK;
1337 break;
1338 case SS_DISCONNECTING:
1339 break;
1340 default:
1341 pr_err("Unknown socket state %u\n", sock->state);
1343 return retval;
1347 * rcvbuf_limit - get proper overload limit of socket receive queue
1348 * @sk: socket
1349 * @buf: message
1351 * For all connection oriented messages, irrespective of importance,
1352 * the default overload value (i.e. 67MB) is set as limit.
1354 * For all connectionless messages, by default new queue limits are
1355 * as belows:
1357 * TIPC_LOW_IMPORTANCE (4 MB)
1358 * TIPC_MEDIUM_IMPORTANCE (8 MB)
1359 * TIPC_HIGH_IMPORTANCE (16 MB)
1360 * TIPC_CRITICAL_IMPORTANCE (32 MB)
1362 * Returns overload limit according to corresponding message importance
1364 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1366 struct tipc_msg *msg = buf_msg(buf);
1368 if (msg_connected(msg))
1369 return sysctl_tipc_rmem[2];
1371 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
1372 msg_importance(msg);
1376 * filter_rcv - validate incoming message
1377 * @sk: socket
1378 * @buf: message
1380 * Enqueues message on receive queue if acceptable; optionally handles
1381 * disconnect indication for a connected socket.
1383 * Called with socket lock already taken; port lock may also be taken.
1385 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1387 static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1389 struct socket *sock = sk->sk_socket;
1390 struct tipc_msg *msg = buf_msg(buf);
1391 unsigned int limit = rcvbuf_limit(sk, buf);
1392 u32 res = TIPC_OK;
1394 /* Reject message if it is wrong sort of message for socket */
1395 if (msg_type(msg) > TIPC_DIRECT_MSG)
1396 return TIPC_ERR_NO_PORT;
1398 if (sock->state == SS_READY) {
1399 if (msg_connected(msg))
1400 return TIPC_ERR_NO_PORT;
1401 } else {
1402 res = filter_connect(tipc_sk(sk), &buf);
1403 if (res != TIPC_OK || buf == NULL)
1404 return res;
1407 /* Reject message if there isn't room to queue it */
1408 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
1409 return TIPC_ERR_OVERLOAD;
1411 /* Enqueue message */
1412 TIPC_SKB_CB(buf)->handle = NULL;
1413 __skb_queue_tail(&sk->sk_receive_queue, buf);
1414 skb_set_owner_r(buf, sk);
1416 sk->sk_data_ready(sk, 0);
1417 return TIPC_OK;
1421 * backlog_rcv - handle incoming message from backlog queue
1422 * @sk: socket
1423 * @buf: message
1425 * Caller must hold socket lock, but not port lock.
1427 * Returns 0
1429 static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1431 u32 res;
1433 res = filter_rcv(sk, buf);
1434 if (res)
1435 tipc_reject_msg(buf, res);
1436 return 0;
1440 * dispatch - handle incoming message
1441 * @tport: TIPC port that received message
1442 * @buf: message
1444 * Called with port lock already taken.
1446 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1448 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1450 struct sock *sk = tport->sk;
1451 u32 res;
1454 * Process message if socket is unlocked; otherwise add to backlog queue
1456 * This code is based on sk_receive_skb(), but must be distinct from it
1457 * since a TIPC-specific filter/reject mechanism is utilized
1459 bh_lock_sock(sk);
1460 if (!sock_owned_by_user(sk)) {
1461 res = filter_rcv(sk, buf);
1462 } else {
1463 if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
1464 res = TIPC_ERR_OVERLOAD;
1465 else
1466 res = TIPC_OK;
1468 bh_unlock_sock(sk);
1470 return res;
1474 * wakeupdispatch - wake up port after congestion
1475 * @tport: port to wakeup
1477 * Called with port lock already taken.
1479 static void wakeupdispatch(struct tipc_port *tport)
1481 struct sock *sk = tport->sk;
1483 sk->sk_write_space(sk);
1486 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1488 struct sock *sk = sock->sk;
1489 DEFINE_WAIT(wait);
1490 int done;
1492 do {
1493 int err = sock_error(sk);
1494 if (err)
1495 return err;
1496 if (!*timeo_p)
1497 return -ETIMEDOUT;
1498 if (signal_pending(current))
1499 return sock_intr_errno(*timeo_p);
1501 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1502 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1503 finish_wait(sk_sleep(sk), &wait);
1504 } while (!done);
1505 return 0;
1509 * connect - establish a connection to another TIPC port
1510 * @sock: socket structure
1511 * @dest: socket address for destination port
1512 * @destlen: size of socket address data structure
1513 * @flags: file-related flags associated with socket
1515 * Returns 0 on success, errno otherwise
1517 static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1518 int flags)
1520 struct sock *sk = sock->sk;
1521 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1522 struct msghdr m = {NULL,};
1523 long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
1524 socket_state previous;
1525 int res;
1527 lock_sock(sk);
1529 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1530 if (sock->state == SS_READY) {
1531 res = -EOPNOTSUPP;
1532 goto exit;
1536 * Reject connection attempt using multicast address
1538 * Note: send_msg() validates the rest of the address fields,
1539 * so there's no need to do it here
1541 if (dst->addrtype == TIPC_ADDR_MCAST) {
1542 res = -EINVAL;
1543 goto exit;
1546 previous = sock->state;
1547 switch (sock->state) {
1548 case SS_UNCONNECTED:
1549 /* Send a 'SYN-' to destination */
1550 m.msg_name = dest;
1551 m.msg_namelen = destlen;
1553 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1554 * indicate send_msg() is never blocked.
1556 if (!timeout)
1557 m.msg_flags = MSG_DONTWAIT;
1559 res = send_msg(NULL, sock, &m, 0);
1560 if ((res < 0) && (res != -EWOULDBLOCK))
1561 goto exit;
1563 /* Just entered SS_CONNECTING state; the only
1564 * difference is that return value in non-blocking
1565 * case is EINPROGRESS, rather than EALREADY.
1567 res = -EINPROGRESS;
1568 case SS_CONNECTING:
1569 if (previous == SS_CONNECTING)
1570 res = -EALREADY;
1571 if (!timeout)
1572 goto exit;
1573 timeout = msecs_to_jiffies(timeout);
1574 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1575 res = tipc_wait_for_connect(sock, &timeout);
1576 break;
1577 case SS_CONNECTED:
1578 res = -EISCONN;
1579 break;
1580 default:
1581 res = -EINVAL;
1582 break;
1584 exit:
1585 release_sock(sk);
1586 return res;
1590 * listen - allow socket to listen for incoming connections
1591 * @sock: socket structure
1592 * @len: (unused)
1594 * Returns 0 on success, errno otherwise
1596 static int listen(struct socket *sock, int len)
1598 struct sock *sk = sock->sk;
1599 int res;
1601 lock_sock(sk);
1603 if (sock->state != SS_UNCONNECTED)
1604 res = -EINVAL;
1605 else {
1606 sock->state = SS_LISTENING;
1607 res = 0;
1610 release_sock(sk);
1611 return res;
1614 static int tipc_wait_for_accept(struct socket *sock, long timeo)
1616 struct sock *sk = sock->sk;
1617 DEFINE_WAIT(wait);
1618 int err;
1620 /* True wake-one mechanism for incoming connections: only
1621 * one process gets woken up, not the 'whole herd'.
1622 * Since we do not 'race & poll' for established sockets
1623 * anymore, the common case will execute the loop only once.
1625 for (;;) {
1626 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
1627 TASK_INTERRUPTIBLE);
1628 if (skb_queue_empty(&sk->sk_receive_queue)) {
1629 release_sock(sk);
1630 timeo = schedule_timeout(timeo);
1631 lock_sock(sk);
1633 err = 0;
1634 if (!skb_queue_empty(&sk->sk_receive_queue))
1635 break;
1636 err = -EINVAL;
1637 if (sock->state != SS_LISTENING)
1638 break;
1639 err = sock_intr_errno(timeo);
1640 if (signal_pending(current))
1641 break;
1642 err = -EAGAIN;
1643 if (!timeo)
1644 break;
1646 finish_wait(sk_sleep(sk), &wait);
1647 return err;
1651 * accept - wait for connection request
1652 * @sock: listening socket
1653 * @newsock: new socket that is to be connected
1654 * @flags: file-related flags associated with socket
1656 * Returns 0 on success, errno otherwise
1658 static int accept(struct socket *sock, struct socket *new_sock, int flags)
1660 struct sock *new_sk, *sk = sock->sk;
1661 struct sk_buff *buf;
1662 struct tipc_sock *new_tsock;
1663 struct tipc_port *new_tport;
1664 struct tipc_msg *msg;
1665 u32 new_ref;
1666 long timeo;
1667 int res;
1669 lock_sock(sk);
1671 if (sock->state != SS_LISTENING) {
1672 res = -EINVAL;
1673 goto exit;
1676 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1677 res = tipc_wait_for_accept(sock, timeo);
1678 if (res)
1679 goto exit;
1681 buf = skb_peek(&sk->sk_receive_queue);
1683 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
1684 if (res)
1685 goto exit;
1687 new_sk = new_sock->sk;
1688 new_tsock = tipc_sk(new_sk);
1689 new_tport = new_tsock->p;
1690 new_ref = new_tport->ref;
1691 msg = buf_msg(buf);
1693 /* we lock on new_sk; but lockdep sees the lock on sk */
1694 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
1697 * Reject any stray messages received by new socket
1698 * before the socket lock was taken (very, very unlikely)
1700 reject_rx_queue(new_sk);
1702 /* Connect new socket to it's peer */
1703 new_tsock->peer_name.ref = msg_origport(msg);
1704 new_tsock->peer_name.node = msg_orignode(msg);
1705 tipc_connect(new_ref, &new_tsock->peer_name);
1706 new_sock->state = SS_CONNECTED;
1708 tipc_set_portimportance(new_ref, msg_importance(msg));
1709 if (msg_named(msg)) {
1710 new_tport->conn_type = msg_nametype(msg);
1711 new_tport->conn_instance = msg_nameinst(msg);
1715 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1716 * Respond to 'SYN+' by queuing it on new socket.
1718 if (!msg_data_sz(msg)) {
1719 struct msghdr m = {NULL,};
1721 advance_rx_queue(sk);
1722 send_packet(NULL, new_sock, &m, 0);
1723 } else {
1724 __skb_dequeue(&sk->sk_receive_queue);
1725 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1726 skb_set_owner_r(buf, new_sk);
1728 release_sock(new_sk);
1730 exit:
1731 release_sock(sk);
1732 return res;
1736 * shutdown - shutdown socket connection
1737 * @sock: socket structure
1738 * @how: direction to close (must be SHUT_RDWR)
1740 * Terminates connection (if necessary), then purges socket's receive queue.
1742 * Returns 0 on success, errno otherwise
1744 static int shutdown(struct socket *sock, int how)
1746 struct sock *sk = sock->sk;
1747 struct tipc_port *tport = tipc_sk_port(sk);
1748 struct sk_buff *buf;
1749 int res;
1751 if (how != SHUT_RDWR)
1752 return -EINVAL;
1754 lock_sock(sk);
1756 switch (sock->state) {
1757 case SS_CONNECTING:
1758 case SS_CONNECTED:
1760 restart:
1761 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1762 buf = __skb_dequeue(&sk->sk_receive_queue);
1763 if (buf) {
1764 if (TIPC_SKB_CB(buf)->handle != NULL) {
1765 kfree_skb(buf);
1766 goto restart;
1768 tipc_disconnect(tport->ref);
1769 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
1770 } else {
1771 tipc_shutdown(tport->ref);
1774 sock->state = SS_DISCONNECTING;
1776 /* fall through */
1778 case SS_DISCONNECTING:
1780 /* Discard any unreceived messages */
1781 __skb_queue_purge(&sk->sk_receive_queue);
1783 /* Wake up anyone sleeping in poll */
1784 sk->sk_state_change(sk);
1785 res = 0;
1786 break;
1788 default:
1789 res = -ENOTCONN;
1792 release_sock(sk);
1793 return res;
1797 * setsockopt - set socket option
1798 * @sock: socket structure
1799 * @lvl: option level
1800 * @opt: option identifier
1801 * @ov: pointer to new option value
1802 * @ol: length of option value
1804 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
1805 * (to ease compatibility).
1807 * Returns 0 on success, errno otherwise
1809 static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
1810 unsigned int ol)
1812 struct sock *sk = sock->sk;
1813 struct tipc_port *tport = tipc_sk_port(sk);
1814 u32 value;
1815 int res;
1817 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1818 return 0;
1819 if (lvl != SOL_TIPC)
1820 return -ENOPROTOOPT;
1821 if (ol < sizeof(value))
1822 return -EINVAL;
1823 res = get_user(value, (u32 __user *)ov);
1824 if (res)
1825 return res;
1827 lock_sock(sk);
1829 switch (opt) {
1830 case TIPC_IMPORTANCE:
1831 res = tipc_set_portimportance(tport->ref, value);
1832 break;
1833 case TIPC_SRC_DROPPABLE:
1834 if (sock->type != SOCK_STREAM)
1835 res = tipc_set_portunreliable(tport->ref, value);
1836 else
1837 res = -ENOPROTOOPT;
1838 break;
1839 case TIPC_DEST_DROPPABLE:
1840 res = tipc_set_portunreturnable(tport->ref, value);
1841 break;
1842 case TIPC_CONN_TIMEOUT:
1843 tipc_sk(sk)->conn_timeout = value;
1844 /* no need to set "res", since already 0 at this point */
1845 break;
1846 default:
1847 res = -EINVAL;
1850 release_sock(sk);
1852 return res;
1856 * getsockopt - get socket option
1857 * @sock: socket structure
1858 * @lvl: option level
1859 * @opt: option identifier
1860 * @ov: receptacle for option value
1861 * @ol: receptacle for length of option value
1863 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
1864 * (to ease compatibility).
1866 * Returns 0 on success, errno otherwise
1868 static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
1869 int __user *ol)
1871 struct sock *sk = sock->sk;
1872 struct tipc_port *tport = tipc_sk_port(sk);
1873 int len;
1874 u32 value;
1875 int res;
1877 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1878 return put_user(0, ol);
1879 if (lvl != SOL_TIPC)
1880 return -ENOPROTOOPT;
1881 res = get_user(len, ol);
1882 if (res)
1883 return res;
1885 lock_sock(sk);
1887 switch (opt) {
1888 case TIPC_IMPORTANCE:
1889 res = tipc_portimportance(tport->ref, &value);
1890 break;
1891 case TIPC_SRC_DROPPABLE:
1892 res = tipc_portunreliable(tport->ref, &value);
1893 break;
1894 case TIPC_DEST_DROPPABLE:
1895 res = tipc_portunreturnable(tport->ref, &value);
1896 break;
1897 case TIPC_CONN_TIMEOUT:
1898 value = tipc_sk(sk)->conn_timeout;
1899 /* no need to set "res", since already 0 at this point */
1900 break;
1901 case TIPC_NODE_RECVQ_DEPTH:
1902 value = 0; /* was tipc_queue_size, now obsolete */
1903 break;
1904 case TIPC_SOCK_RECVQ_DEPTH:
1905 value = skb_queue_len(&sk->sk_receive_queue);
1906 break;
1907 default:
1908 res = -EINVAL;
1911 release_sock(sk);
1913 if (res)
1914 return res; /* "get" failed */
1916 if (len < sizeof(value))
1917 return -EINVAL;
1919 if (copy_to_user(ov, &value, sizeof(value)))
1920 return -EFAULT;
1922 return put_user(sizeof(value), ol);
1925 /* Protocol switches for the various types of TIPC sockets */
1927 static const struct proto_ops msg_ops = {
1928 .owner = THIS_MODULE,
1929 .family = AF_TIPC,
1930 .release = release,
1931 .bind = bind,
1932 .connect = connect,
1933 .socketpair = sock_no_socketpair,
1934 .accept = sock_no_accept,
1935 .getname = get_name,
1936 .poll = poll,
1937 .ioctl = sock_no_ioctl,
1938 .listen = sock_no_listen,
1939 .shutdown = shutdown,
1940 .setsockopt = setsockopt,
1941 .getsockopt = getsockopt,
1942 .sendmsg = send_msg,
1943 .recvmsg = recv_msg,
1944 .mmap = sock_no_mmap,
1945 .sendpage = sock_no_sendpage
1948 static const struct proto_ops packet_ops = {
1949 .owner = THIS_MODULE,
1950 .family = AF_TIPC,
1951 .release = release,
1952 .bind = bind,
1953 .connect = connect,
1954 .socketpair = sock_no_socketpair,
1955 .accept = accept,
1956 .getname = get_name,
1957 .poll = poll,
1958 .ioctl = sock_no_ioctl,
1959 .listen = listen,
1960 .shutdown = shutdown,
1961 .setsockopt = setsockopt,
1962 .getsockopt = getsockopt,
1963 .sendmsg = send_packet,
1964 .recvmsg = recv_msg,
1965 .mmap = sock_no_mmap,
1966 .sendpage = sock_no_sendpage
1969 static const struct proto_ops stream_ops = {
1970 .owner = THIS_MODULE,
1971 .family = AF_TIPC,
1972 .release = release,
1973 .bind = bind,
1974 .connect = connect,
1975 .socketpair = sock_no_socketpair,
1976 .accept = accept,
1977 .getname = get_name,
1978 .poll = poll,
1979 .ioctl = sock_no_ioctl,
1980 .listen = listen,
1981 .shutdown = shutdown,
1982 .setsockopt = setsockopt,
1983 .getsockopt = getsockopt,
1984 .sendmsg = send_stream,
1985 .recvmsg = recv_stream,
1986 .mmap = sock_no_mmap,
1987 .sendpage = sock_no_sendpage
1990 static const struct net_proto_family tipc_family_ops = {
1991 .owner = THIS_MODULE,
1992 .family = AF_TIPC,
1993 .create = tipc_sk_create
1996 static struct proto tipc_proto = {
1997 .name = "TIPC",
1998 .owner = THIS_MODULE,
1999 .obj_size = sizeof(struct tipc_sock),
2000 .sysctl_rmem = sysctl_tipc_rmem
2003 static struct proto tipc_proto_kern = {
2004 .name = "TIPC",
2005 .obj_size = sizeof(struct tipc_sock),
2006 .sysctl_rmem = sysctl_tipc_rmem
2010 * tipc_socket_init - initialize TIPC socket interface
2012 * Returns 0 on success, errno otherwise
2014 int tipc_socket_init(void)
2016 int res;
2018 res = proto_register(&tipc_proto, 1);
2019 if (res) {
2020 pr_err("Failed to register TIPC protocol type\n");
2021 goto out;
2024 res = sock_register(&tipc_family_ops);
2025 if (res) {
2026 pr_err("Failed to register TIPC socket type\n");
2027 proto_unregister(&tipc_proto);
2028 goto out;
2031 sockets_enabled = 1;
2032 out:
2033 return res;
2037 * tipc_socket_stop - stop TIPC socket interface
2039 void tipc_socket_stop(void)
2041 if (!sockets_enabled)
2042 return;
2044 sockets_enabled = 0;
2045 sock_unregister(tipc_family_ops.family);
2046 proto_unregister(&tipc_proto);