Linux 5.2
[linux-2.6/linux-2.6-arm.git] / net / tipc / socket.c
blobdd8537f988c4004a5c50e004f1654db5c4e548b6
1 /*
2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
40 #include "core.h"
41 #include "name_table.h"
42 #include "node.h"
43 #include "link.h"
44 #include "name_distr.h"
45 #include "socket.h"
46 #include "bcast.h"
47 #include "netlink.h"
48 #include "group.h"
49 #include "trace.h"
51 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
52 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
53 #define TIPC_FWD_MSG 1
54 #define TIPC_MAX_PORT 0xffffffff
55 #define TIPC_MIN_PORT 1
56 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
58 enum {
59 TIPC_LISTEN = TCP_LISTEN,
60 TIPC_ESTABLISHED = TCP_ESTABLISHED,
61 TIPC_OPEN = TCP_CLOSE,
62 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
63 TIPC_CONNECTING = TCP_SYN_SENT,
66 struct sockaddr_pair {
67 struct sockaddr_tipc sock;
68 struct sockaddr_tipc member;
71 /**
72 * struct tipc_sock - TIPC socket structure
73 * @sk: socket - interacts with 'port' and with user via the socket API
74 * @conn_type: TIPC type used when connection was established
75 * @conn_instance: TIPC instance used when connection was established
76 * @published: non-zero if port has one or more associated names
77 * @max_pkt: maximum packet size "hint" used when building messages sent by port
78 * @portid: unique port identity in TIPC socket hash table
79 * @phdr: preformatted message header used when sending messages
80 * #cong_links: list of congested links
81 * @publications: list of publications for port
82 * @blocking_link: address of the congested link we are currently sleeping on
83 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
94 struct tipc_sock {
95 struct sock sk;
96 u32 conn_type;
97 u32 conn_instance;
98 int published;
99 u32 max_pkt;
100 u32 portid;
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
104 u32 pub_count;
105 atomic_t dupl_rcvcnt;
106 u16 conn_timeout;
107 bool probe_unacked;
108 u16 cong_link_cnt;
109 u16 snt_unacked;
110 u16 snd_win;
111 u16 peer_caps;
112 u16 rcv_unacked;
113 u16 rcv_win;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
117 struct rcu_head rcu;
118 struct tipc_group *group;
119 bool group_is_open;
122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123 static void tipc_data_ready(struct sock *sk);
124 static void tipc_write_space(struct sock *sk);
125 static void tipc_sock_destruct(struct sock *sk);
126 static int tipc_release(struct socket *sock);
127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
128 bool kern);
129 static void tipc_sk_timeout(struct timer_list *t);
130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134 static int tipc_sk_leave(struct tipc_sock *tsk);
135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136 static int tipc_sk_insert(struct tipc_sock *tsk);
137 static void tipc_sk_remove(struct tipc_sock *tsk);
138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
141 static const struct proto_ops packet_ops;
142 static const struct proto_ops stream_ops;
143 static const struct proto_ops msg_ops;
144 static struct proto tipc_proto;
145 static const struct rhashtable_params tsk_rht_params;
147 static u32 tsk_own_node(struct tipc_sock *tsk)
149 return msg_prevnode(&tsk->phdr);
152 static u32 tsk_peer_node(struct tipc_sock *tsk)
154 return msg_destnode(&tsk->phdr);
157 static u32 tsk_peer_port(struct tipc_sock *tsk)
159 return msg_destport(&tsk->phdr);
162 static bool tsk_unreliable(struct tipc_sock *tsk)
164 return msg_src_droppable(&tsk->phdr) != 0;
167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
172 static bool tsk_unreturnable(struct tipc_sock *tsk)
174 return msg_dest_droppable(&tsk->phdr) != 0;
177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
182 static int tsk_importance(struct tipc_sock *tsk)
184 return msg_importance(&tsk->phdr);
187 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
190 return -EINVAL;
191 msg_set_importance(&tsk->phdr, (u32)imp);
192 return 0;
195 static struct tipc_sock *tipc_sk(const struct sock *sk)
197 return container_of(sk, struct tipc_sock, sk);
200 static bool tsk_conn_cong(struct tipc_sock *tsk)
202 return tsk->snt_unacked > tsk->snd_win;
205 static u16 tsk_blocks(int len)
207 return ((len / FLOWCTL_BLK_SZ) + 1);
210 /* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
214 static u16 tsk_adv_blocks(int len)
216 return len / FLOWCTL_BLK_SZ / 4;
219 /* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
227 return 1;
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
233 * Caller must hold socket lock
235 static void tsk_advance_rx_queue(struct sock *sk)
237 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
238 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
241 /* tipc_sk_respond() : send response message back to sender
243 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
245 u32 selector;
246 u32 dnode;
247 u32 onode = tipc_own_addr(sock_net(sk));
249 if (!tipc_msg_reverse(onode, &skb, err))
250 return;
252 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
253 dnode = msg_destnode(buf_msg(skb));
254 selector = msg_origport(buf_msg(skb));
255 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
259 * tsk_rej_rx_queue - reject all buffers in socket receive queue
261 * Caller must hold socket lock
263 static void tsk_rej_rx_queue(struct sock *sk)
265 struct sk_buff *skb;
267 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
268 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
271 static bool tipc_sk_connected(struct sock *sk)
273 return sk->sk_state == TIPC_ESTABLISHED;
276 /* tipc_sk_type_connectionless - check if the socket is datagram socket
277 * @sk: socket
279 * Returns true if connection less, false otherwise
281 static bool tipc_sk_type_connectionless(struct sock *sk)
283 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
286 /* tsk_peer_msg - verify if message was sent by connected port's peer
288 * Handles cases where the node's network address has changed from
289 * the default of <0.0.0> to its configured setting.
291 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
293 struct sock *sk = &tsk->sk;
294 u32 self = tipc_own_addr(sock_net(sk));
295 u32 peer_port = tsk_peer_port(tsk);
296 u32 orig_node, peer_node;
298 if (unlikely(!tipc_sk_connected(sk)))
299 return false;
301 if (unlikely(msg_origport(msg) != peer_port))
302 return false;
304 orig_node = msg_orignode(msg);
305 peer_node = tsk_peer_node(tsk);
307 if (likely(orig_node == peer_node))
308 return true;
310 if (!orig_node && peer_node == self)
311 return true;
313 if (!peer_node && orig_node == self)
314 return true;
316 return false;
319 /* tipc_set_sk_state - set the sk_state of the socket
320 * @sk: socket
322 * Caller must hold socket lock
324 * Returns 0 on success, errno otherwise
326 static int tipc_set_sk_state(struct sock *sk, int state)
328 int oldsk_state = sk->sk_state;
329 int res = -EINVAL;
331 switch (state) {
332 case TIPC_OPEN:
333 res = 0;
334 break;
335 case TIPC_LISTEN:
336 case TIPC_CONNECTING:
337 if (oldsk_state == TIPC_OPEN)
338 res = 0;
339 break;
340 case TIPC_ESTABLISHED:
341 if (oldsk_state == TIPC_CONNECTING ||
342 oldsk_state == TIPC_OPEN)
343 res = 0;
344 break;
345 case TIPC_DISCONNECTING:
346 if (oldsk_state == TIPC_CONNECTING ||
347 oldsk_state == TIPC_ESTABLISHED)
348 res = 0;
349 break;
352 if (!res)
353 sk->sk_state = state;
355 return res;
358 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
360 struct sock *sk = sock->sk;
361 int err = sock_error(sk);
362 int typ = sock->type;
364 if (err)
365 return err;
366 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
367 if (sk->sk_state == TIPC_DISCONNECTING)
368 return -EPIPE;
369 else if (!tipc_sk_connected(sk))
370 return -ENOTCONN;
372 if (!*timeout)
373 return -EAGAIN;
374 if (signal_pending(current))
375 return sock_intr_errno(*timeout);
377 return 0;
380 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
381 ({ \
382 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
383 struct sock *sk_; \
384 int rc_; \
386 while ((rc_ = !(condition_))) { \
387 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
388 smp_rmb(); \
389 sk_ = (sock_)->sk; \
390 rc_ = tipc_sk_sock_err((sock_), timeo_); \
391 if (rc_) \
392 break; \
393 add_wait_queue(sk_sleep(sk_), &wait_); \
394 release_sock(sk_); \
395 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
396 sched_annotate_sleep(); \
397 lock_sock(sk_); \
398 remove_wait_queue(sk_sleep(sk_), &wait_); \
400 rc_; \
404 * tipc_sk_create - create a TIPC socket
405 * @net: network namespace (must be default network)
406 * @sock: pre-allocated socket structure
407 * @protocol: protocol indicator (must be 0)
408 * @kern: caused by kernel or by userspace?
410 * This routine creates additional data structures used by the TIPC socket,
411 * initializes them, and links them together.
413 * Returns 0 on success, errno otherwise
415 static int tipc_sk_create(struct net *net, struct socket *sock,
416 int protocol, int kern)
418 const struct proto_ops *ops;
419 struct sock *sk;
420 struct tipc_sock *tsk;
421 struct tipc_msg *msg;
423 /* Validate arguments */
424 if (unlikely(protocol != 0))
425 return -EPROTONOSUPPORT;
427 switch (sock->type) {
428 case SOCK_STREAM:
429 ops = &stream_ops;
430 break;
431 case SOCK_SEQPACKET:
432 ops = &packet_ops;
433 break;
434 case SOCK_DGRAM:
435 case SOCK_RDM:
436 ops = &msg_ops;
437 break;
438 default:
439 return -EPROTOTYPE;
442 /* Allocate socket's protocol area */
443 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
444 if (sk == NULL)
445 return -ENOMEM;
447 tsk = tipc_sk(sk);
448 tsk->max_pkt = MAX_PKT_DEFAULT;
449 INIT_LIST_HEAD(&tsk->publications);
450 INIT_LIST_HEAD(&tsk->cong_links);
451 msg = &tsk->phdr;
453 /* Finish initializing socket data structures */
454 sock->ops = ops;
455 sock_init_data(sock, sk);
456 tipc_set_sk_state(sk, TIPC_OPEN);
457 if (tipc_sk_insert(tsk)) {
458 pr_warn("Socket create failed; port number exhausted\n");
459 return -EINVAL;
462 /* Ensure tsk is visible before we read own_addr. */
463 smp_mb();
465 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
466 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
468 msg_set_origport(msg, tsk->portid);
469 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
470 sk->sk_shutdown = 0;
471 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
472 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
473 sk->sk_data_ready = tipc_data_ready;
474 sk->sk_write_space = tipc_write_space;
475 sk->sk_destruct = tipc_sock_destruct;
476 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
477 tsk->group_is_open = true;
478 atomic_set(&tsk->dupl_rcvcnt, 0);
480 /* Start out with safe limits until we receive an advertised window */
481 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
482 tsk->rcv_win = tsk->snd_win;
484 if (tipc_sk_type_connectionless(sk)) {
485 tsk_set_unreturnable(tsk, true);
486 if (sock->type == SOCK_DGRAM)
487 tsk_set_unreliable(tsk, true);
488 __skb_queue_head_init(&tsk->mc_method.deferredq);
491 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
492 return 0;
495 static void tipc_sk_callback(struct rcu_head *head)
497 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
499 sock_put(&tsk->sk);
502 /* Caller should hold socket lock for the socket. */
503 static void __tipc_shutdown(struct socket *sock, int error)
505 struct sock *sk = sock->sk;
506 struct tipc_sock *tsk = tipc_sk(sk);
507 struct net *net = sock_net(sk);
508 long timeout = CONN_TIMEOUT_DEFAULT;
509 u32 dnode = tsk_peer_node(tsk);
510 struct sk_buff *skb;
512 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
513 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
514 !tsk_conn_cong(tsk)));
516 /* Remove any pending SYN message */
517 __skb_queue_purge(&sk->sk_write_queue);
519 /* Reject all unreceived messages, except on an active connection
520 * (which disconnects locally & sends a 'FIN+' to peer).
522 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
523 if (TIPC_SKB_CB(skb)->bytes_read) {
524 kfree_skb(skb);
525 continue;
527 if (!tipc_sk_type_connectionless(sk) &&
528 sk->sk_state != TIPC_DISCONNECTING) {
529 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
530 tipc_node_remove_conn(net, dnode, tsk->portid);
532 tipc_sk_respond(sk, skb, error);
535 if (tipc_sk_type_connectionless(sk))
536 return;
538 if (sk->sk_state != TIPC_DISCONNECTING) {
539 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
540 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
541 tsk_own_node(tsk), tsk_peer_port(tsk),
542 tsk->portid, error);
543 if (skb)
544 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
545 tipc_node_remove_conn(net, dnode, tsk->portid);
546 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
551 * tipc_release - destroy a TIPC socket
552 * @sock: socket to destroy
554 * This routine cleans up any messages that are still queued on the socket.
555 * For DGRAM and RDM socket types, all queued messages are rejected.
556 * For SEQPACKET and STREAM socket types, the first message is rejected
557 * and any others are discarded. (If the first message on a STREAM socket
558 * is partially-read, it is discarded and the next one is rejected instead.)
560 * NOTE: Rejected messages are not necessarily returned to the sender! They
561 * are returned or discarded according to the "destination droppable" setting
562 * specified for the message by the sender.
564 * Returns 0 on success, errno otherwise
566 static int tipc_release(struct socket *sock)
568 struct sock *sk = sock->sk;
569 struct tipc_sock *tsk;
572 * Exit if socket isn't fully initialized (occurs when a failed accept()
573 * releases a pre-allocated child socket that was never used)
575 if (sk == NULL)
576 return 0;
578 tsk = tipc_sk(sk);
579 lock_sock(sk);
581 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
582 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
583 sk->sk_shutdown = SHUTDOWN_MASK;
584 tipc_sk_leave(tsk);
585 tipc_sk_withdraw(tsk, 0, NULL);
586 __skb_queue_purge(&tsk->mc_method.deferredq);
587 sk_stop_timer(sk, &sk->sk_timer);
588 tipc_sk_remove(tsk);
590 sock_orphan(sk);
591 /* Reject any messages that accumulated in backlog queue */
592 release_sock(sk);
593 tipc_dest_list_purge(&tsk->cong_links);
594 tsk->cong_link_cnt = 0;
595 call_rcu(&tsk->rcu, tipc_sk_callback);
596 sock->sk = NULL;
598 return 0;
602 * tipc_bind - associate or disassocate TIPC name(s) with a socket
603 * @sock: socket structure
604 * @uaddr: socket address describing name(s) and desired operation
605 * @uaddr_len: size of socket address data structure
607 * Name and name sequence binding is indicated using a positive scope value;
608 * a negative scope value unbinds the specified name. Specifying no name
609 * (i.e. a socket address length of 0) unbinds all names from the socket.
611 * Returns 0 on success, errno otherwise
613 * NOTE: This routine doesn't need to take the socket lock since it doesn't
614 * access any non-constant socket information.
616 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
617 int uaddr_len)
619 struct sock *sk = sock->sk;
620 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
621 struct tipc_sock *tsk = tipc_sk(sk);
622 int res = -EINVAL;
624 lock_sock(sk);
625 if (unlikely(!uaddr_len)) {
626 res = tipc_sk_withdraw(tsk, 0, NULL);
627 goto exit;
629 if (tsk->group) {
630 res = -EACCES;
631 goto exit;
633 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
634 res = -EINVAL;
635 goto exit;
637 if (addr->family != AF_TIPC) {
638 res = -EAFNOSUPPORT;
639 goto exit;
642 if (addr->addrtype == TIPC_ADDR_NAME)
643 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
644 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
645 res = -EAFNOSUPPORT;
646 goto exit;
649 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
650 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
651 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
652 res = -EACCES;
653 goto exit;
656 res = (addr->scope >= 0) ?
657 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
658 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
659 exit:
660 release_sock(sk);
661 return res;
665 * tipc_getname - get port ID of socket or peer socket
666 * @sock: socket structure
667 * @uaddr: area for returned socket address
668 * @uaddr_len: area for returned length of socket address
669 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
671 * Returns 0 on success, errno otherwise
673 * NOTE: This routine doesn't need to take the socket lock since it only
674 * accesses socket information that is unchanging (or which changes in
675 * a completely predictable manner).
677 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
678 int peer)
680 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
681 struct sock *sk = sock->sk;
682 struct tipc_sock *tsk = tipc_sk(sk);
684 memset(addr, 0, sizeof(*addr));
685 if (peer) {
686 if ((!tipc_sk_connected(sk)) &&
687 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
688 return -ENOTCONN;
689 addr->addr.id.ref = tsk_peer_port(tsk);
690 addr->addr.id.node = tsk_peer_node(tsk);
691 } else {
692 addr->addr.id.ref = tsk->portid;
693 addr->addr.id.node = tipc_own_addr(sock_net(sk));
696 addr->addrtype = TIPC_ADDR_ID;
697 addr->family = AF_TIPC;
698 addr->scope = 0;
699 addr->addr.name.domain = 0;
701 return sizeof(*addr);
705 * tipc_poll - read and possibly block on pollmask
706 * @file: file structure associated with the socket
707 * @sock: socket for which to calculate the poll bits
708 * @wait: ???
710 * Returns pollmask value
712 * COMMENTARY:
713 * It appears that the usual socket locking mechanisms are not useful here
714 * since the pollmask info is potentially out-of-date the moment this routine
715 * exits. TCP and other protocols seem to rely on higher level poll routines
716 * to handle any preventable race conditions, so TIPC will do the same ...
718 * IMPORTANT: The fact that a read or write operation is indicated does NOT
719 * imply that the operation will succeed, merely that it should be performed
720 * and will not block.
722 static __poll_t tipc_poll(struct file *file, struct socket *sock,
723 poll_table *wait)
725 struct sock *sk = sock->sk;
726 struct tipc_sock *tsk = tipc_sk(sk);
727 __poll_t revents = 0;
729 sock_poll_wait(file, sock, wait);
730 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
732 if (sk->sk_shutdown & RCV_SHUTDOWN)
733 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
734 if (sk->sk_shutdown == SHUTDOWN_MASK)
735 revents |= EPOLLHUP;
737 switch (sk->sk_state) {
738 case TIPC_ESTABLISHED:
739 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
740 revents |= EPOLLOUT;
741 /* fall through */
742 case TIPC_LISTEN:
743 case TIPC_CONNECTING:
744 if (!skb_queue_empty(&sk->sk_receive_queue))
745 revents |= EPOLLIN | EPOLLRDNORM;
746 break;
747 case TIPC_OPEN:
748 if (tsk->group_is_open && !tsk->cong_link_cnt)
749 revents |= EPOLLOUT;
750 if (!tipc_sk_type_connectionless(sk))
751 break;
752 if (skb_queue_empty(&sk->sk_receive_queue))
753 break;
754 revents |= EPOLLIN | EPOLLRDNORM;
755 break;
756 case TIPC_DISCONNECTING:
757 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
758 break;
760 return revents;
764 * tipc_sendmcast - send multicast message
765 * @sock: socket structure
766 * @seq: destination address
767 * @msg: message to send
768 * @dlen: length of data to send
769 * @timeout: timeout to wait for wakeup
771 * Called from function tipc_sendmsg(), which has done all sanity checks
772 * Returns the number of bytes sent on success, or errno
774 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
775 struct msghdr *msg, size_t dlen, long timeout)
777 struct sock *sk = sock->sk;
778 struct tipc_sock *tsk = tipc_sk(sk);
779 struct tipc_msg *hdr = &tsk->phdr;
780 struct net *net = sock_net(sk);
781 int mtu = tipc_bcast_get_mtu(net);
782 struct tipc_mc_method *method = &tsk->mc_method;
783 struct sk_buff_head pkts;
784 struct tipc_nlist dsts;
785 int rc;
787 if (tsk->group)
788 return -EACCES;
790 /* Block or return if any destination link is congested */
791 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
792 if (unlikely(rc))
793 return rc;
795 /* Lookup destination nodes */
796 tipc_nlist_init(&dsts, tipc_own_addr(net));
797 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
798 seq->upper, &dsts);
799 if (!dsts.local && !dsts.remote)
800 return -EHOSTUNREACH;
802 /* Build message header */
803 msg_set_type(hdr, TIPC_MCAST_MSG);
804 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
805 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
806 msg_set_destport(hdr, 0);
807 msg_set_destnode(hdr, 0);
808 msg_set_nametype(hdr, seq->type);
809 msg_set_namelower(hdr, seq->lower);
810 msg_set_nameupper(hdr, seq->upper);
812 /* Build message as chain of buffers */
813 skb_queue_head_init(&pkts);
814 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
816 /* Send message if build was successful */
817 if (unlikely(rc == dlen)) {
818 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
819 TIPC_DUMP_SK_SNDQ, " ");
820 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
821 &tsk->cong_link_cnt);
824 tipc_nlist_purge(&dsts);
826 return rc ? rc : dlen;
830 * tipc_send_group_msg - send a message to a member in the group
831 * @net: network namespace
832 * @m: message to send
833 * @mb: group member
834 * @dnode: destination node
835 * @dport: destination port
836 * @dlen: total length of message data
838 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
839 struct msghdr *m, struct tipc_member *mb,
840 u32 dnode, u32 dport, int dlen)
842 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
843 struct tipc_mc_method *method = &tsk->mc_method;
844 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
845 struct tipc_msg *hdr = &tsk->phdr;
846 struct sk_buff_head pkts;
847 int mtu, rc;
849 /* Complete message header */
850 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
851 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
852 msg_set_destport(hdr, dport);
853 msg_set_destnode(hdr, dnode);
854 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
856 /* Build message as chain of buffers */
857 skb_queue_head_init(&pkts);
858 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
859 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
860 if (unlikely(rc != dlen))
861 return rc;
863 /* Send message */
864 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
865 if (unlikely(rc == -ELINKCONG)) {
866 tipc_dest_push(&tsk->cong_links, dnode, 0);
867 tsk->cong_link_cnt++;
870 /* Update send window */
871 tipc_group_update_member(mb, blks);
873 /* A broadcast sent within next EXPIRE period must follow same path */
874 method->rcast = true;
875 method->mandatory = true;
876 return dlen;
880 * tipc_send_group_unicast - send message to a member in the group
881 * @sock: socket structure
882 * @m: message to send
883 * @dlen: total length of message data
884 * @timeout: timeout to wait for wakeup
886 * Called from function tipc_sendmsg(), which has done all sanity checks
887 * Returns the number of bytes sent on success, or errno
889 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
890 int dlen, long timeout)
892 struct sock *sk = sock->sk;
893 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
894 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
895 struct tipc_sock *tsk = tipc_sk(sk);
896 struct net *net = sock_net(sk);
897 struct tipc_member *mb = NULL;
898 u32 node, port;
899 int rc;
901 node = dest->addr.id.node;
902 port = dest->addr.id.ref;
903 if (!port && !node)
904 return -EHOSTUNREACH;
906 /* Block or return if destination link or member is congested */
907 rc = tipc_wait_for_cond(sock, &timeout,
908 !tipc_dest_find(&tsk->cong_links, node, 0) &&
909 tsk->group &&
910 !tipc_group_cong(tsk->group, node, port, blks,
911 &mb));
912 if (unlikely(rc))
913 return rc;
915 if (unlikely(!mb))
916 return -EHOSTUNREACH;
918 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
920 return rc ? rc : dlen;
924 * tipc_send_group_anycast - send message to any member with given identity
925 * @sock: socket structure
926 * @m: message to send
927 * @dlen: total length of message data
928 * @timeout: timeout to wait for wakeup
930 * Called from function tipc_sendmsg(), which has done all sanity checks
931 * Returns the number of bytes sent on success, or errno
933 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
934 int dlen, long timeout)
936 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
937 struct sock *sk = sock->sk;
938 struct tipc_sock *tsk = tipc_sk(sk);
939 struct list_head *cong_links = &tsk->cong_links;
940 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
941 struct tipc_msg *hdr = &tsk->phdr;
942 struct tipc_member *first = NULL;
943 struct tipc_member *mbr = NULL;
944 struct net *net = sock_net(sk);
945 u32 node, port, exclude;
946 struct list_head dsts;
947 u32 type, inst, scope;
948 int lookups = 0;
949 int dstcnt, rc;
950 bool cong;
952 INIT_LIST_HEAD(&dsts);
954 type = msg_nametype(hdr);
955 inst = dest->addr.name.name.instance;
956 scope = msg_lookup_scope(hdr);
958 while (++lookups < 4) {
959 exclude = tipc_group_exclude(tsk->group);
961 first = NULL;
963 /* Look for a non-congested destination member, if any */
964 while (1) {
965 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
966 &dstcnt, exclude, false))
967 return -EHOSTUNREACH;
968 tipc_dest_pop(&dsts, &node, &port);
969 cong = tipc_group_cong(tsk->group, node, port, blks,
970 &mbr);
971 if (!cong)
972 break;
973 if (mbr == first)
974 break;
975 if (!first)
976 first = mbr;
979 /* Start over if destination was not in member list */
980 if (unlikely(!mbr))
981 continue;
983 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
984 break;
986 /* Block or return if destination link or member is congested */
987 rc = tipc_wait_for_cond(sock, &timeout,
988 !tipc_dest_find(cong_links, node, 0) &&
989 tsk->group &&
990 !tipc_group_cong(tsk->group, node, port,
991 blks, &mbr));
992 if (unlikely(rc))
993 return rc;
995 /* Send, unless destination disappeared while waiting */
996 if (likely(mbr))
997 break;
1000 if (unlikely(lookups >= 4))
1001 return -EHOSTUNREACH;
1003 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1005 return rc ? rc : dlen;
1009 * tipc_send_group_bcast - send message to all members in communication group
1010 * @sk: socket structure
1011 * @m: message to send
1012 * @dlen: total length of message data
1013 * @timeout: timeout to wait for wakeup
1015 * Called from function tipc_sendmsg(), which has done all sanity checks
1016 * Returns the number of bytes sent on success, or errno
1018 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1019 int dlen, long timeout)
1021 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1022 struct sock *sk = sock->sk;
1023 struct net *net = sock_net(sk);
1024 struct tipc_sock *tsk = tipc_sk(sk);
1025 struct tipc_nlist *dsts;
1026 struct tipc_mc_method *method = &tsk->mc_method;
1027 bool ack = method->mandatory && method->rcast;
1028 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1029 struct tipc_msg *hdr = &tsk->phdr;
1030 int mtu = tipc_bcast_get_mtu(net);
1031 struct sk_buff_head pkts;
1032 int rc = -EHOSTUNREACH;
1034 /* Block or return if any destination link or member is congested */
1035 rc = tipc_wait_for_cond(sock, &timeout,
1036 !tsk->cong_link_cnt && tsk->group &&
1037 !tipc_group_bc_cong(tsk->group, blks));
1038 if (unlikely(rc))
1039 return rc;
1041 dsts = tipc_group_dests(tsk->group);
1042 if (!dsts->local && !dsts->remote)
1043 return -EHOSTUNREACH;
1045 /* Complete message header */
1046 if (dest) {
1047 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1048 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1049 } else {
1050 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1051 msg_set_nameinst(hdr, 0);
1053 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1054 msg_set_destport(hdr, 0);
1055 msg_set_destnode(hdr, 0);
1056 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1058 /* Avoid getting stuck with repeated forced replicasts */
1059 msg_set_grp_bc_ack_req(hdr, ack);
1061 /* Build message as chain of buffers */
1062 skb_queue_head_init(&pkts);
1063 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1064 if (unlikely(rc != dlen))
1065 return rc;
1067 /* Send message */
1068 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1069 if (unlikely(rc))
1070 return rc;
1072 /* Update broadcast sequence number and send windows */
1073 tipc_group_update_bc_members(tsk->group, blks, ack);
1075 /* Broadcast link is now free to choose method for next broadcast */
1076 method->mandatory = false;
1077 method->expires = jiffies;
1079 return dlen;
1083 * tipc_send_group_mcast - send message to all members with given identity
1084 * @sock: socket structure
1085 * @m: message to send
1086 * @dlen: total length of message data
1087 * @timeout: timeout to wait for wakeup
1089 * Called from function tipc_sendmsg(), which has done all sanity checks
1090 * Returns the number of bytes sent on success, or errno
1092 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1093 int dlen, long timeout)
1095 struct sock *sk = sock->sk;
1096 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1097 struct tipc_sock *tsk = tipc_sk(sk);
1098 struct tipc_group *grp = tsk->group;
1099 struct tipc_msg *hdr = &tsk->phdr;
1100 struct net *net = sock_net(sk);
1101 u32 type, inst, scope, exclude;
1102 struct list_head dsts;
1103 u32 dstcnt;
1105 INIT_LIST_HEAD(&dsts);
1107 type = msg_nametype(hdr);
1108 inst = dest->addr.name.name.instance;
1109 scope = msg_lookup_scope(hdr);
1110 exclude = tipc_group_exclude(grp);
1112 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1113 &dstcnt, exclude, true))
1114 return -EHOSTUNREACH;
1116 if (dstcnt == 1) {
1117 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1118 return tipc_send_group_unicast(sock, m, dlen, timeout);
1121 tipc_dest_list_purge(&dsts);
1122 return tipc_send_group_bcast(sock, m, dlen, timeout);
1126 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1127 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1128 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1130 * Multi-threaded: parallel calls with reference to same queues may occur
1132 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1133 struct sk_buff_head *inputq)
1135 u32 self = tipc_own_addr(net);
1136 u32 type, lower, upper, scope;
1137 struct sk_buff *skb, *_skb;
1138 u32 portid, onode;
1139 struct sk_buff_head tmpq;
1140 struct list_head dports;
1141 struct tipc_msg *hdr;
1142 int user, mtyp, hlen;
1143 bool exact;
1145 __skb_queue_head_init(&tmpq);
1146 INIT_LIST_HEAD(&dports);
1148 skb = tipc_skb_peek(arrvq, &inputq->lock);
1149 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1150 hdr = buf_msg(skb);
1151 user = msg_user(hdr);
1152 mtyp = msg_type(hdr);
1153 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1154 onode = msg_orignode(hdr);
1155 type = msg_nametype(hdr);
1157 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1158 spin_lock_bh(&inputq->lock);
1159 if (skb_peek(arrvq) == skb) {
1160 __skb_dequeue(arrvq);
1161 __skb_queue_tail(inputq, skb);
1163 kfree_skb(skb);
1164 spin_unlock_bh(&inputq->lock);
1165 continue;
1168 /* Group messages require exact scope match */
1169 if (msg_in_group(hdr)) {
1170 lower = 0;
1171 upper = ~0;
1172 scope = msg_lookup_scope(hdr);
1173 exact = true;
1174 } else {
1175 /* TIPC_NODE_SCOPE means "any scope" in this context */
1176 if (onode == self)
1177 scope = TIPC_NODE_SCOPE;
1178 else
1179 scope = TIPC_CLUSTER_SCOPE;
1180 exact = false;
1181 lower = msg_namelower(hdr);
1182 upper = msg_nameupper(hdr);
1185 /* Create destination port list: */
1186 tipc_nametbl_mc_lookup(net, type, lower, upper,
1187 scope, exact, &dports);
1189 /* Clone message per destination */
1190 while (tipc_dest_pop(&dports, NULL, &portid)) {
1191 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1192 if (_skb) {
1193 msg_set_destport(buf_msg(_skb), portid);
1194 __skb_queue_tail(&tmpq, _skb);
1195 continue;
1197 pr_warn("Failed to clone mcast rcv buffer\n");
1199 /* Append to inputq if not already done by other thread */
1200 spin_lock_bh(&inputq->lock);
1201 if (skb_peek(arrvq) == skb) {
1202 skb_queue_splice_tail_init(&tmpq, inputq);
1203 kfree_skb(__skb_dequeue(arrvq));
1205 spin_unlock_bh(&inputq->lock);
1206 __skb_queue_purge(&tmpq);
1207 kfree_skb(skb);
1209 tipc_sk_rcv(net, inputq);
1213 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1214 * @tsk: receiving socket
1215 * @skb: pointer to message buffer.
1217 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1218 struct sk_buff_head *inputq,
1219 struct sk_buff_head *xmitq)
1221 struct tipc_msg *hdr = buf_msg(skb);
1222 u32 onode = tsk_own_node(tsk);
1223 struct sock *sk = &tsk->sk;
1224 int mtyp = msg_type(hdr);
1225 bool conn_cong;
1227 /* Ignore if connection cannot be validated: */
1228 if (!tsk_peer_msg(tsk, hdr)) {
1229 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1230 goto exit;
1233 if (unlikely(msg_errcode(hdr))) {
1234 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1235 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1236 tsk_peer_port(tsk));
1237 sk->sk_state_change(sk);
1239 /* State change is ignored if socket already awake,
1240 * - convert msg to abort msg and add to inqueue
1242 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1243 msg_set_type(hdr, TIPC_CONN_MSG);
1244 msg_set_size(hdr, BASIC_H_SIZE);
1245 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1246 __skb_queue_tail(inputq, skb);
1247 return;
1250 tsk->probe_unacked = false;
1252 if (mtyp == CONN_PROBE) {
1253 msg_set_type(hdr, CONN_PROBE_REPLY);
1254 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1255 __skb_queue_tail(xmitq, skb);
1256 return;
1257 } else if (mtyp == CONN_ACK) {
1258 conn_cong = tsk_conn_cong(tsk);
1259 tsk->snt_unacked -= msg_conn_ack(hdr);
1260 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1261 tsk->snd_win = msg_adv_win(hdr);
1262 if (conn_cong)
1263 sk->sk_write_space(sk);
1264 } else if (mtyp != CONN_PROBE_REPLY) {
1265 pr_warn("Received unknown CONN_PROTO msg\n");
1267 exit:
1268 kfree_skb(skb);
1272 * tipc_sendmsg - send message in connectionless manner
1273 * @sock: socket structure
1274 * @m: message to send
1275 * @dsz: amount of user data to be sent
1277 * Message must have an destination specified explicitly.
1278 * Used for SOCK_RDM and SOCK_DGRAM messages,
1279 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1280 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1282 * Returns the number of bytes sent on success, or errno otherwise
1284 static int tipc_sendmsg(struct socket *sock,
1285 struct msghdr *m, size_t dsz)
1287 struct sock *sk = sock->sk;
1288 int ret;
1290 lock_sock(sk);
1291 ret = __tipc_sendmsg(sock, m, dsz);
1292 release_sock(sk);
1294 return ret;
1297 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1299 struct sock *sk = sock->sk;
1300 struct net *net = sock_net(sk);
1301 struct tipc_sock *tsk = tipc_sk(sk);
1302 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1303 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1304 struct list_head *clinks = &tsk->cong_links;
1305 bool syn = !tipc_sk_type_connectionless(sk);
1306 struct tipc_group *grp = tsk->group;
1307 struct tipc_msg *hdr = &tsk->phdr;
1308 struct tipc_name_seq *seq;
1309 struct sk_buff_head pkts;
1310 u32 dport, dnode = 0;
1311 u32 type, inst;
1312 int mtu, rc;
1314 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1315 return -EMSGSIZE;
1317 if (likely(dest)) {
1318 if (unlikely(m->msg_namelen < sizeof(*dest)))
1319 return -EINVAL;
1320 if (unlikely(dest->family != AF_TIPC))
1321 return -EINVAL;
1324 if (grp) {
1325 if (!dest)
1326 return tipc_send_group_bcast(sock, m, dlen, timeout);
1327 if (dest->addrtype == TIPC_ADDR_NAME)
1328 return tipc_send_group_anycast(sock, m, dlen, timeout);
1329 if (dest->addrtype == TIPC_ADDR_ID)
1330 return tipc_send_group_unicast(sock, m, dlen, timeout);
1331 if (dest->addrtype == TIPC_ADDR_MCAST)
1332 return tipc_send_group_mcast(sock, m, dlen, timeout);
1333 return -EINVAL;
1336 if (unlikely(!dest)) {
1337 dest = &tsk->peer;
1338 if (!syn && dest->family != AF_TIPC)
1339 return -EDESTADDRREQ;
1342 if (unlikely(syn)) {
1343 if (sk->sk_state == TIPC_LISTEN)
1344 return -EPIPE;
1345 if (sk->sk_state != TIPC_OPEN)
1346 return -EISCONN;
1347 if (tsk->published)
1348 return -EOPNOTSUPP;
1349 if (dest->addrtype == TIPC_ADDR_NAME) {
1350 tsk->conn_type = dest->addr.name.name.type;
1351 tsk->conn_instance = dest->addr.name.name.instance;
1353 msg_set_syn(hdr, 1);
1356 seq = &dest->addr.nameseq;
1357 if (dest->addrtype == TIPC_ADDR_MCAST)
1358 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1360 if (dest->addrtype == TIPC_ADDR_NAME) {
1361 type = dest->addr.name.name.type;
1362 inst = dest->addr.name.name.instance;
1363 dnode = dest->addr.name.domain;
1364 msg_set_type(hdr, TIPC_NAMED_MSG);
1365 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1366 msg_set_nametype(hdr, type);
1367 msg_set_nameinst(hdr, inst);
1368 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1369 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1370 msg_set_destnode(hdr, dnode);
1371 msg_set_destport(hdr, dport);
1372 if (unlikely(!dport && !dnode))
1373 return -EHOSTUNREACH;
1374 } else if (dest->addrtype == TIPC_ADDR_ID) {
1375 dnode = dest->addr.id.node;
1376 msg_set_type(hdr, TIPC_DIRECT_MSG);
1377 msg_set_lookup_scope(hdr, 0);
1378 msg_set_destnode(hdr, dnode);
1379 msg_set_destport(hdr, dest->addr.id.ref);
1380 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1381 } else {
1382 return -EINVAL;
1385 /* Block or return if destination link is congested */
1386 rc = tipc_wait_for_cond(sock, &timeout,
1387 !tipc_dest_find(clinks, dnode, 0));
1388 if (unlikely(rc))
1389 return rc;
1391 skb_queue_head_init(&pkts);
1392 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1393 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1394 if (unlikely(rc != dlen))
1395 return rc;
1396 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
1397 return -ENOMEM;
1399 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1400 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1401 if (unlikely(rc == -ELINKCONG)) {
1402 tipc_dest_push(clinks, dnode, 0);
1403 tsk->cong_link_cnt++;
1404 rc = 0;
1407 if (unlikely(syn && !rc))
1408 tipc_set_sk_state(sk, TIPC_CONNECTING);
1410 return rc ? rc : dlen;
1414 * tipc_sendstream - send stream-oriented data
1415 * @sock: socket structure
1416 * @m: data to send
1417 * @dsz: total length of data to be transmitted
1419 * Used for SOCK_STREAM data.
1421 * Returns the number of bytes sent on success (or partial success),
1422 * or errno if no data sent
1424 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1426 struct sock *sk = sock->sk;
1427 int ret;
1429 lock_sock(sk);
1430 ret = __tipc_sendstream(sock, m, dsz);
1431 release_sock(sk);
1433 return ret;
1436 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1438 struct sock *sk = sock->sk;
1439 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1440 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1441 struct tipc_sock *tsk = tipc_sk(sk);
1442 struct tipc_msg *hdr = &tsk->phdr;
1443 struct net *net = sock_net(sk);
1444 struct sk_buff_head pkts;
1445 u32 dnode = tsk_peer_node(tsk);
1446 int send, sent = 0;
1447 int rc = 0;
1449 skb_queue_head_init(&pkts);
1451 if (unlikely(dlen > INT_MAX))
1452 return -EMSGSIZE;
1454 /* Handle implicit connection setup */
1455 if (unlikely(dest)) {
1456 rc = __tipc_sendmsg(sock, m, dlen);
1457 if (dlen && dlen == rc) {
1458 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1459 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1461 return rc;
1464 do {
1465 rc = tipc_wait_for_cond(sock, &timeout,
1466 (!tsk->cong_link_cnt &&
1467 !tsk_conn_cong(tsk) &&
1468 tipc_sk_connected(sk)));
1469 if (unlikely(rc))
1470 break;
1472 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1473 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1474 if (unlikely(rc != send))
1475 break;
1477 trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
1478 TIPC_DUMP_SK_SNDQ, " ");
1479 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1480 if (unlikely(rc == -ELINKCONG)) {
1481 tsk->cong_link_cnt = 1;
1482 rc = 0;
1484 if (likely(!rc)) {
1485 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1486 sent += send;
1488 } while (sent < dlen && !rc);
1490 return sent ? sent : rc;
1494 * tipc_send_packet - send a connection-oriented message
1495 * @sock: socket structure
1496 * @m: message to send
1497 * @dsz: length of data to be transmitted
1499 * Used for SOCK_SEQPACKET messages.
1501 * Returns the number of bytes sent on success, or errno otherwise
1503 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1505 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1506 return -EMSGSIZE;
1508 return tipc_sendstream(sock, m, dsz);
1511 /* tipc_sk_finish_conn - complete the setup of a connection
1513 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1514 u32 peer_node)
1516 struct sock *sk = &tsk->sk;
1517 struct net *net = sock_net(sk);
1518 struct tipc_msg *msg = &tsk->phdr;
1520 msg_set_syn(msg, 0);
1521 msg_set_destnode(msg, peer_node);
1522 msg_set_destport(msg, peer_port);
1523 msg_set_type(msg, TIPC_CONN_MSG);
1524 msg_set_lookup_scope(msg, 0);
1525 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1527 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1528 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1529 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1530 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1531 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1532 __skb_queue_purge(&sk->sk_write_queue);
1533 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1534 return;
1536 /* Fall back to message based flow control */
1537 tsk->rcv_win = FLOWCTL_MSG_WIN;
1538 tsk->snd_win = FLOWCTL_MSG_WIN;
1542 * tipc_sk_set_orig_addr - capture sender's address for received message
1543 * @m: descriptor for message info
1544 * @hdr: received message header
1546 * Note: Address is not captured if not requested by receiver.
1548 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1550 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1551 struct tipc_msg *hdr = buf_msg(skb);
1553 if (!srcaddr)
1554 return;
1556 srcaddr->sock.family = AF_TIPC;
1557 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1558 srcaddr->sock.scope = 0;
1559 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1560 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1561 srcaddr->sock.addr.name.domain = 0;
1562 m->msg_namelen = sizeof(struct sockaddr_tipc);
1564 if (!msg_in_group(hdr))
1565 return;
1567 /* Group message users may also want to know sending member's id */
1568 srcaddr->member.family = AF_TIPC;
1569 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1570 srcaddr->member.scope = 0;
1571 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1572 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1573 srcaddr->member.addr.name.domain = 0;
1574 m->msg_namelen = sizeof(*srcaddr);
1578 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1579 * @m: descriptor for message info
1580 * @skb: received message buffer
1581 * @tsk: TIPC port associated with message
1583 * Note: Ancillary data is not captured if not requested by receiver.
1585 * Returns 0 if successful, otherwise errno
1587 static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1588 struct tipc_sock *tsk)
1590 struct tipc_msg *msg;
1591 u32 anc_data[3];
1592 u32 err;
1593 u32 dest_type;
1594 int has_name;
1595 int res;
1597 if (likely(m->msg_controllen == 0))
1598 return 0;
1599 msg = buf_msg(skb);
1601 /* Optionally capture errored message object(s) */
1602 err = msg ? msg_errcode(msg) : 0;
1603 if (unlikely(err)) {
1604 anc_data[0] = err;
1605 anc_data[1] = msg_data_sz(msg);
1606 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1607 if (res)
1608 return res;
1609 if (anc_data[1]) {
1610 if (skb_linearize(skb))
1611 return -ENOMEM;
1612 msg = buf_msg(skb);
1613 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1614 msg_data(msg));
1615 if (res)
1616 return res;
1620 /* Optionally capture message destination object */
1621 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1622 switch (dest_type) {
1623 case TIPC_NAMED_MSG:
1624 has_name = 1;
1625 anc_data[0] = msg_nametype(msg);
1626 anc_data[1] = msg_namelower(msg);
1627 anc_data[2] = msg_namelower(msg);
1628 break;
1629 case TIPC_MCAST_MSG:
1630 has_name = 1;
1631 anc_data[0] = msg_nametype(msg);
1632 anc_data[1] = msg_namelower(msg);
1633 anc_data[2] = msg_nameupper(msg);
1634 break;
1635 case TIPC_CONN_MSG:
1636 has_name = (tsk->conn_type != 0);
1637 anc_data[0] = tsk->conn_type;
1638 anc_data[1] = tsk->conn_instance;
1639 anc_data[2] = tsk->conn_instance;
1640 break;
1641 default:
1642 has_name = 0;
1644 if (has_name) {
1645 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1646 if (res)
1647 return res;
1650 return 0;
1653 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1655 struct sock *sk = &tsk->sk;
1656 struct net *net = sock_net(sk);
1657 struct sk_buff *skb = NULL;
1658 struct tipc_msg *msg;
1659 u32 peer_port = tsk_peer_port(tsk);
1660 u32 dnode = tsk_peer_node(tsk);
1662 if (!tipc_sk_connected(sk))
1663 return;
1664 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1665 dnode, tsk_own_node(tsk), peer_port,
1666 tsk->portid, TIPC_OK);
1667 if (!skb)
1668 return;
1669 msg = buf_msg(skb);
1670 msg_set_conn_ack(msg, tsk->rcv_unacked);
1671 tsk->rcv_unacked = 0;
1673 /* Adjust to and advertize the correct window limit */
1674 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1675 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1676 msg_set_adv_win(msg, tsk->rcv_win);
1678 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1681 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1683 struct sock *sk = sock->sk;
1684 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1685 long timeo = *timeop;
1686 int err = sock_error(sk);
1688 if (err)
1689 return err;
1691 for (;;) {
1692 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1693 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1694 err = -ENOTCONN;
1695 break;
1697 add_wait_queue(sk_sleep(sk), &wait);
1698 release_sock(sk);
1699 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1700 sched_annotate_sleep();
1701 lock_sock(sk);
1702 remove_wait_queue(sk_sleep(sk), &wait);
1704 err = 0;
1705 if (!skb_queue_empty(&sk->sk_receive_queue))
1706 break;
1707 err = -EAGAIN;
1708 if (!timeo)
1709 break;
1710 err = sock_intr_errno(timeo);
1711 if (signal_pending(current))
1712 break;
1714 err = sock_error(sk);
1715 if (err)
1716 break;
1718 *timeop = timeo;
1719 return err;
1723 * tipc_recvmsg - receive packet-oriented message
1724 * @m: descriptor for message info
1725 * @buflen: length of user buffer area
1726 * @flags: receive flags
1728 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1729 * If the complete message doesn't fit in user area, truncate it.
1731 * Returns size of returned message data, errno otherwise
1733 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1734 size_t buflen, int flags)
1736 struct sock *sk = sock->sk;
1737 bool connected = !tipc_sk_type_connectionless(sk);
1738 struct tipc_sock *tsk = tipc_sk(sk);
1739 int rc, err, hlen, dlen, copy;
1740 struct sk_buff_head xmitq;
1741 struct tipc_msg *hdr;
1742 struct sk_buff *skb;
1743 bool grp_evt;
1744 long timeout;
1746 /* Catch invalid receive requests */
1747 if (unlikely(!buflen))
1748 return -EINVAL;
1750 lock_sock(sk);
1751 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1752 rc = -ENOTCONN;
1753 goto exit;
1755 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1757 /* Step rcv queue to first msg with data or error; wait if necessary */
1758 do {
1759 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1760 if (unlikely(rc))
1761 goto exit;
1762 skb = skb_peek(&sk->sk_receive_queue);
1763 hdr = buf_msg(skb);
1764 dlen = msg_data_sz(hdr);
1765 hlen = msg_hdr_sz(hdr);
1766 err = msg_errcode(hdr);
1767 grp_evt = msg_is_grp_evt(hdr);
1768 if (likely(dlen || err))
1769 break;
1770 tsk_advance_rx_queue(sk);
1771 } while (1);
1773 /* Collect msg meta data, including error code and rejected data */
1774 tipc_sk_set_orig_addr(m, skb);
1775 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1776 if (unlikely(rc))
1777 goto exit;
1778 hdr = buf_msg(skb);
1780 /* Capture data if non-error msg, otherwise just set return value */
1781 if (likely(!err)) {
1782 copy = min_t(int, dlen, buflen);
1783 if (unlikely(copy != dlen))
1784 m->msg_flags |= MSG_TRUNC;
1785 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1786 } else {
1787 copy = 0;
1788 rc = 0;
1789 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1790 rc = -ECONNRESET;
1792 if (unlikely(rc))
1793 goto exit;
1795 /* Mark message as group event if applicable */
1796 if (unlikely(grp_evt)) {
1797 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1798 m->msg_flags |= MSG_EOR;
1799 m->msg_flags |= MSG_OOB;
1800 copy = 0;
1803 /* Caption of data or error code/rejected data was successful */
1804 if (unlikely(flags & MSG_PEEK))
1805 goto exit;
1807 /* Send group flow control advertisement when applicable */
1808 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1809 skb_queue_head_init(&xmitq);
1810 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1811 msg_orignode(hdr), msg_origport(hdr),
1812 &xmitq);
1813 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1816 tsk_advance_rx_queue(sk);
1818 if (likely(!connected))
1819 goto exit;
1821 /* Send connection flow control advertisement when applicable */
1822 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1823 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1824 tipc_sk_send_ack(tsk);
1825 exit:
1826 release_sock(sk);
1827 return rc ? rc : copy;
1831 * tipc_recvstream - receive stream-oriented data
1832 * @m: descriptor for message info
1833 * @buflen: total size of user buffer area
1834 * @flags: receive flags
1836 * Used for SOCK_STREAM messages only. If not enough data is available
1837 * will optionally wait for more; never truncates data.
1839 * Returns size of returned message data, errno otherwise
1841 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1842 size_t buflen, int flags)
1844 struct sock *sk = sock->sk;
1845 struct tipc_sock *tsk = tipc_sk(sk);
1846 struct sk_buff *skb;
1847 struct tipc_msg *hdr;
1848 struct tipc_skb_cb *skb_cb;
1849 bool peek = flags & MSG_PEEK;
1850 int offset, required, copy, copied = 0;
1851 int hlen, dlen, err, rc;
1852 long timeout;
1854 /* Catch invalid receive attempts */
1855 if (unlikely(!buflen))
1856 return -EINVAL;
1858 lock_sock(sk);
1860 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1861 rc = -ENOTCONN;
1862 goto exit;
1864 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1865 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1867 do {
1868 /* Look at first msg in receive queue; wait if necessary */
1869 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1870 if (unlikely(rc))
1871 break;
1872 skb = skb_peek(&sk->sk_receive_queue);
1873 skb_cb = TIPC_SKB_CB(skb);
1874 hdr = buf_msg(skb);
1875 dlen = msg_data_sz(hdr);
1876 hlen = msg_hdr_sz(hdr);
1877 err = msg_errcode(hdr);
1879 /* Discard any empty non-errored (SYN-) message */
1880 if (unlikely(!dlen && !err)) {
1881 tsk_advance_rx_queue(sk);
1882 continue;
1885 /* Collect msg meta data, incl. error code and rejected data */
1886 if (!copied) {
1887 tipc_sk_set_orig_addr(m, skb);
1888 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1889 if (rc)
1890 break;
1891 hdr = buf_msg(skb);
1894 /* Copy data if msg ok, otherwise return error/partial data */
1895 if (likely(!err)) {
1896 offset = skb_cb->bytes_read;
1897 copy = min_t(int, dlen - offset, buflen - copied);
1898 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1899 if (unlikely(rc))
1900 break;
1901 copied += copy;
1902 offset += copy;
1903 if (unlikely(offset < dlen)) {
1904 if (!peek)
1905 skb_cb->bytes_read = offset;
1906 break;
1908 } else {
1909 rc = 0;
1910 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1911 rc = -ECONNRESET;
1912 if (copied || rc)
1913 break;
1916 if (unlikely(peek))
1917 break;
1919 tsk_advance_rx_queue(sk);
1921 /* Send connection flow control advertisement when applicable */
1922 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1923 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1924 tipc_sk_send_ack(tsk);
1926 /* Exit if all requested data or FIN/error received */
1927 if (copied == buflen || err)
1928 break;
1930 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1931 exit:
1932 release_sock(sk);
1933 return copied ? copied : rc;
1937 * tipc_write_space - wake up thread if port congestion is released
1938 * @sk: socket
1940 static void tipc_write_space(struct sock *sk)
1942 struct socket_wq *wq;
1944 rcu_read_lock();
1945 wq = rcu_dereference(sk->sk_wq);
1946 if (skwq_has_sleeper(wq))
1947 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1948 EPOLLWRNORM | EPOLLWRBAND);
1949 rcu_read_unlock();
1953 * tipc_data_ready - wake up threads to indicate messages have been received
1954 * @sk: socket
1955 * @len: the length of messages
1957 static void tipc_data_ready(struct sock *sk)
1959 struct socket_wq *wq;
1961 rcu_read_lock();
1962 wq = rcu_dereference(sk->sk_wq);
1963 if (skwq_has_sleeper(wq))
1964 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1965 EPOLLRDNORM | EPOLLRDBAND);
1966 rcu_read_unlock();
1969 static void tipc_sock_destruct(struct sock *sk)
1971 __skb_queue_purge(&sk->sk_receive_queue);
1974 static void tipc_sk_proto_rcv(struct sock *sk,
1975 struct sk_buff_head *inputq,
1976 struct sk_buff_head *xmitq)
1978 struct sk_buff *skb = __skb_dequeue(inputq);
1979 struct tipc_sock *tsk = tipc_sk(sk);
1980 struct tipc_msg *hdr = buf_msg(skb);
1981 struct tipc_group *grp = tsk->group;
1982 bool wakeup = false;
1984 switch (msg_user(hdr)) {
1985 case CONN_MANAGER:
1986 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1987 return;
1988 case SOCK_WAKEUP:
1989 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1990 /* coupled with smp_rmb() in tipc_wait_for_cond() */
1991 smp_wmb();
1992 tsk->cong_link_cnt--;
1993 wakeup = true;
1994 break;
1995 case GROUP_PROTOCOL:
1996 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1997 break;
1998 case TOP_SRV:
1999 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2000 hdr, inputq, xmitq);
2001 break;
2002 default:
2003 break;
2006 if (wakeup)
2007 sk->sk_write_space(sk);
2009 kfree_skb(skb);
2013 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2014 * @tsk: TIPC socket
2015 * @skb: pointer to message buffer.
2016 * Returns true if message should be added to receive queue, false otherwise
2018 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
2020 struct sock *sk = &tsk->sk;
2021 struct net *net = sock_net(sk);
2022 struct tipc_msg *hdr = buf_msg(skb);
2023 bool con_msg = msg_connected(hdr);
2024 u32 pport = tsk_peer_port(tsk);
2025 u32 pnode = tsk_peer_node(tsk);
2026 u32 oport = msg_origport(hdr);
2027 u32 onode = msg_orignode(hdr);
2028 int err = msg_errcode(hdr);
2029 unsigned long delay;
2031 if (unlikely(msg_mcast(hdr)))
2032 return false;
2034 switch (sk->sk_state) {
2035 case TIPC_CONNECTING:
2036 /* Setup ACK */
2037 if (likely(con_msg)) {
2038 if (err)
2039 break;
2040 tipc_sk_finish_conn(tsk, oport, onode);
2041 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2042 /* ACK+ message with data is added to receive queue */
2043 if (msg_data_sz(hdr))
2044 return true;
2045 /* Empty ACK-, - wake up sleeping connect() and drop */
2046 sk->sk_state_change(sk);
2047 msg_set_dest_droppable(hdr, 1);
2048 return false;
2050 /* Ignore connectionless message if not from listening socket */
2051 if (oport != pport || onode != pnode)
2052 return false;
2054 /* Rejected SYN */
2055 if (err != TIPC_ERR_OVERLOAD)
2056 break;
2058 /* Prepare for new setup attempt if we have a SYN clone */
2059 if (skb_queue_empty(&sk->sk_write_queue))
2060 break;
2061 get_random_bytes(&delay, 2);
2062 delay %= (tsk->conn_timeout / 4);
2063 delay = msecs_to_jiffies(delay + 100);
2064 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2065 return false;
2066 case TIPC_OPEN:
2067 case TIPC_DISCONNECTING:
2068 return false;
2069 case TIPC_LISTEN:
2070 /* Accept only SYN message */
2071 if (!msg_is_syn(hdr) &&
2072 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2073 return false;
2074 if (!con_msg && !err)
2075 return true;
2076 return false;
2077 case TIPC_ESTABLISHED:
2078 /* Accept only connection-based messages sent by peer */
2079 if (likely(con_msg && !err && pport == oport && pnode == onode))
2080 return true;
2081 if (!tsk_peer_msg(tsk, hdr))
2082 return false;
2083 if (!err)
2084 return true;
2085 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2086 tipc_node_remove_conn(net, pnode, tsk->portid);
2087 sk->sk_state_change(sk);
2088 return true;
2089 default:
2090 pr_err("Unknown sk_state %u\n", sk->sk_state);
2092 /* Abort connection setup attempt */
2093 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2094 sk->sk_err = ECONNREFUSED;
2095 sk->sk_state_change(sk);
2096 return true;
2100 * rcvbuf_limit - get proper overload limit of socket receive queue
2101 * @sk: socket
2102 * @skb: message
2104 * For connection oriented messages, irrespective of importance,
2105 * default queue limit is 2 MB.
2107 * For connectionless messages, queue limits are based on message
2108 * importance as follows:
2110 * TIPC_LOW_IMPORTANCE (2 MB)
2111 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2112 * TIPC_HIGH_IMPORTANCE (8 MB)
2113 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2115 * Returns overload limit according to corresponding message importance
2117 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2119 struct tipc_sock *tsk = tipc_sk(sk);
2120 struct tipc_msg *hdr = buf_msg(skb);
2122 if (unlikely(msg_in_group(hdr)))
2123 return sk->sk_rcvbuf;
2125 if (unlikely(!msg_connected(hdr)))
2126 return sk->sk_rcvbuf << msg_importance(hdr);
2128 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2129 return sk->sk_rcvbuf;
2131 return FLOWCTL_MSG_LIM;
2135 * tipc_sk_filter_rcv - validate incoming message
2136 * @sk: socket
2137 * @skb: pointer to message.
2139 * Enqueues message on receive queue if acceptable; optionally handles
2140 * disconnect indication for a connected socket.
2142 * Called with socket lock already taken
2145 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2146 struct sk_buff_head *xmitq)
2148 bool sk_conn = !tipc_sk_type_connectionless(sk);
2149 struct tipc_sock *tsk = tipc_sk(sk);
2150 struct tipc_group *grp = tsk->group;
2151 struct tipc_msg *hdr = buf_msg(skb);
2152 struct net *net = sock_net(sk);
2153 struct sk_buff_head inputq;
2154 int mtyp = msg_type(hdr);
2155 int limit, err = TIPC_OK;
2157 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2158 TIPC_SKB_CB(skb)->bytes_read = 0;
2159 __skb_queue_head_init(&inputq);
2160 __skb_queue_tail(&inputq, skb);
2162 if (unlikely(!msg_isdata(hdr)))
2163 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2165 if (unlikely(grp))
2166 tipc_group_filter_msg(grp, &inputq, xmitq);
2168 if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
2169 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2171 /* Validate and add to receive buffer if there is space */
2172 while ((skb = __skb_dequeue(&inputq))) {
2173 hdr = buf_msg(skb);
2174 limit = rcvbuf_limit(sk, skb);
2175 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2176 (!sk_conn && msg_connected(hdr)) ||
2177 (!grp && msg_in_group(hdr)))
2178 err = TIPC_ERR_NO_PORT;
2179 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2180 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2181 "err_overload2!");
2182 atomic_inc(&sk->sk_drops);
2183 err = TIPC_ERR_OVERLOAD;
2186 if (unlikely(err)) {
2187 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2188 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2189 "@filter_rcv!");
2190 __skb_queue_tail(xmitq, skb);
2192 err = TIPC_OK;
2193 continue;
2195 __skb_queue_tail(&sk->sk_receive_queue, skb);
2196 skb_set_owner_r(skb, sk);
2197 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2198 "rcvq >90% allocated!");
2199 sk->sk_data_ready(sk);
2204 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2205 * @sk: socket
2206 * @skb: message
2208 * Caller must hold socket lock
2210 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2212 unsigned int before = sk_rmem_alloc_get(sk);
2213 struct sk_buff_head xmitq;
2214 unsigned int added;
2216 __skb_queue_head_init(&xmitq);
2218 tipc_sk_filter_rcv(sk, skb, &xmitq);
2219 added = sk_rmem_alloc_get(sk) - before;
2220 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2222 /* Send pending response/rejected messages, if any */
2223 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2224 return 0;
2228 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2229 * inputq and try adding them to socket or backlog queue
2230 * @inputq: list of incoming buffers with potentially different destinations
2231 * @sk: socket where the buffers should be enqueued
2232 * @dport: port number for the socket
2234 * Caller must hold socket lock
2236 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2237 u32 dport, struct sk_buff_head *xmitq)
2239 unsigned long time_limit = jiffies + 2;
2240 struct sk_buff *skb;
2241 unsigned int lim;
2242 atomic_t *dcnt;
2243 u32 onode;
2245 while (skb_queue_len(inputq)) {
2246 if (unlikely(time_after_eq(jiffies, time_limit)))
2247 return;
2249 skb = tipc_skb_dequeue(inputq, dport);
2250 if (unlikely(!skb))
2251 return;
2253 /* Add message directly to receive queue if possible */
2254 if (!sock_owned_by_user(sk)) {
2255 tipc_sk_filter_rcv(sk, skb, xmitq);
2256 continue;
2259 /* Try backlog, compensating for double-counted bytes */
2260 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2261 if (!sk->sk_backlog.len)
2262 atomic_set(dcnt, 0);
2263 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2264 if (likely(!sk_add_backlog(sk, skb, lim))) {
2265 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2266 "bklg & rcvq >90% allocated!");
2267 continue;
2270 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2271 /* Overload => reject message back to sender */
2272 onode = tipc_own_addr(sock_net(sk));
2273 atomic_inc(&sk->sk_drops);
2274 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2275 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2276 "@sk_enqueue!");
2277 __skb_queue_tail(xmitq, skb);
2279 break;
2284 * tipc_sk_rcv - handle a chain of incoming buffers
2285 * @inputq: buffer list containing the buffers
2286 * Consumes all buffers in list until inputq is empty
2287 * Note: may be called in multiple threads referring to the same queue
2289 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2291 struct sk_buff_head xmitq;
2292 u32 dnode, dport = 0;
2293 int err;
2294 struct tipc_sock *tsk;
2295 struct sock *sk;
2296 struct sk_buff *skb;
2298 __skb_queue_head_init(&xmitq);
2299 while (skb_queue_len(inputq)) {
2300 dport = tipc_skb_peek_port(inputq, dport);
2301 tsk = tipc_sk_lookup(net, dport);
2303 if (likely(tsk)) {
2304 sk = &tsk->sk;
2305 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2306 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2307 spin_unlock_bh(&sk->sk_lock.slock);
2309 /* Send pending response/rejected messages, if any */
2310 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2311 sock_put(sk);
2312 continue;
2314 /* No destination socket => dequeue skb if still there */
2315 skb = tipc_skb_dequeue(inputq, dport);
2316 if (!skb)
2317 return;
2319 /* Try secondary lookup if unresolved named message */
2320 err = TIPC_ERR_NO_PORT;
2321 if (tipc_msg_lookup_dest(net, skb, &err))
2322 goto xmit;
2324 /* Prepare for message rejection */
2325 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2326 continue;
2328 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2329 xmit:
2330 dnode = msg_destnode(buf_msg(skb));
2331 tipc_node_xmit_skb(net, skb, dnode, dport);
2335 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2337 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2338 struct sock *sk = sock->sk;
2339 int done;
2341 do {
2342 int err = sock_error(sk);
2343 if (err)
2344 return err;
2345 if (!*timeo_p)
2346 return -ETIMEDOUT;
2347 if (signal_pending(current))
2348 return sock_intr_errno(*timeo_p);
2350 add_wait_queue(sk_sleep(sk), &wait);
2351 done = sk_wait_event(sk, timeo_p,
2352 sk->sk_state != TIPC_CONNECTING, &wait);
2353 remove_wait_queue(sk_sleep(sk), &wait);
2354 } while (!done);
2355 return 0;
2358 static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2360 if (addr->family != AF_TIPC)
2361 return false;
2362 if (addr->addrtype == TIPC_SERVICE_RANGE)
2363 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2364 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2365 addr->addrtype == TIPC_SOCKET_ADDR);
2369 * tipc_connect - establish a connection to another TIPC port
2370 * @sock: socket structure
2371 * @dest: socket address for destination port
2372 * @destlen: size of socket address data structure
2373 * @flags: file-related flags associated with socket
2375 * Returns 0 on success, errno otherwise
2377 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2378 int destlen, int flags)
2380 struct sock *sk = sock->sk;
2381 struct tipc_sock *tsk = tipc_sk(sk);
2382 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2383 struct msghdr m = {NULL,};
2384 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2385 int previous;
2386 int res = 0;
2388 if (destlen != sizeof(struct sockaddr_tipc))
2389 return -EINVAL;
2391 lock_sock(sk);
2393 if (tsk->group) {
2394 res = -EINVAL;
2395 goto exit;
2398 if (dst->family == AF_UNSPEC) {
2399 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2400 if (!tipc_sk_type_connectionless(sk))
2401 res = -EINVAL;
2402 goto exit;
2404 if (!tipc_sockaddr_is_sane(dst)) {
2405 res = -EINVAL;
2406 goto exit;
2408 /* DGRAM/RDM connect(), just save the destaddr */
2409 if (tipc_sk_type_connectionless(sk)) {
2410 memcpy(&tsk->peer, dest, destlen);
2411 goto exit;
2412 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2413 res = -EINVAL;
2414 goto exit;
2417 previous = sk->sk_state;
2419 switch (sk->sk_state) {
2420 case TIPC_OPEN:
2421 /* Send a 'SYN-' to destination */
2422 m.msg_name = dest;
2423 m.msg_namelen = destlen;
2425 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2426 * indicate send_msg() is never blocked.
2428 if (!timeout)
2429 m.msg_flags = MSG_DONTWAIT;
2431 res = __tipc_sendmsg(sock, &m, 0);
2432 if ((res < 0) && (res != -EWOULDBLOCK))
2433 goto exit;
2435 /* Just entered TIPC_CONNECTING state; the only
2436 * difference is that return value in non-blocking
2437 * case is EINPROGRESS, rather than EALREADY.
2439 res = -EINPROGRESS;
2440 /* fall through */
2441 case TIPC_CONNECTING:
2442 if (!timeout) {
2443 if (previous == TIPC_CONNECTING)
2444 res = -EALREADY;
2445 goto exit;
2447 timeout = msecs_to_jiffies(timeout);
2448 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2449 res = tipc_wait_for_connect(sock, &timeout);
2450 break;
2451 case TIPC_ESTABLISHED:
2452 res = -EISCONN;
2453 break;
2454 default:
2455 res = -EINVAL;
2458 exit:
2459 release_sock(sk);
2460 return res;
2464 * tipc_listen - allow socket to listen for incoming connections
2465 * @sock: socket structure
2466 * @len: (unused)
2468 * Returns 0 on success, errno otherwise
2470 static int tipc_listen(struct socket *sock, int len)
2472 struct sock *sk = sock->sk;
2473 int res;
2475 lock_sock(sk);
2476 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2477 release_sock(sk);
2479 return res;
2482 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2484 struct sock *sk = sock->sk;
2485 DEFINE_WAIT(wait);
2486 int err;
2488 /* True wake-one mechanism for incoming connections: only
2489 * one process gets woken up, not the 'whole herd'.
2490 * Since we do not 'race & poll' for established sockets
2491 * anymore, the common case will execute the loop only once.
2493 for (;;) {
2494 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2495 TASK_INTERRUPTIBLE);
2496 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2497 release_sock(sk);
2498 timeo = schedule_timeout(timeo);
2499 lock_sock(sk);
2501 err = 0;
2502 if (!skb_queue_empty(&sk->sk_receive_queue))
2503 break;
2504 err = -EAGAIN;
2505 if (!timeo)
2506 break;
2507 err = sock_intr_errno(timeo);
2508 if (signal_pending(current))
2509 break;
2511 finish_wait(sk_sleep(sk), &wait);
2512 return err;
2516 * tipc_accept - wait for connection request
2517 * @sock: listening socket
2518 * @newsock: new socket that is to be connected
2519 * @flags: file-related flags associated with socket
2521 * Returns 0 on success, errno otherwise
2523 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2524 bool kern)
2526 struct sock *new_sk, *sk = sock->sk;
2527 struct sk_buff *buf;
2528 struct tipc_sock *new_tsock;
2529 struct tipc_msg *msg;
2530 long timeo;
2531 int res;
2533 lock_sock(sk);
2535 if (sk->sk_state != TIPC_LISTEN) {
2536 res = -EINVAL;
2537 goto exit;
2539 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2540 res = tipc_wait_for_accept(sock, timeo);
2541 if (res)
2542 goto exit;
2544 buf = skb_peek(&sk->sk_receive_queue);
2546 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2547 if (res)
2548 goto exit;
2549 security_sk_clone(sock->sk, new_sock->sk);
2551 new_sk = new_sock->sk;
2552 new_tsock = tipc_sk(new_sk);
2553 msg = buf_msg(buf);
2555 /* we lock on new_sk; but lockdep sees the lock on sk */
2556 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2559 * Reject any stray messages received by new socket
2560 * before the socket lock was taken (very, very unlikely)
2562 tsk_rej_rx_queue(new_sk);
2564 /* Connect new socket to it's peer */
2565 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2567 tsk_set_importance(new_tsock, msg_importance(msg));
2568 if (msg_named(msg)) {
2569 new_tsock->conn_type = msg_nametype(msg);
2570 new_tsock->conn_instance = msg_nameinst(msg);
2574 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2575 * Respond to 'SYN+' by queuing it on new socket.
2577 if (!msg_data_sz(msg)) {
2578 struct msghdr m = {NULL,};
2580 tsk_advance_rx_queue(sk);
2581 __tipc_sendstream(new_sock, &m, 0);
2582 } else {
2583 __skb_dequeue(&sk->sk_receive_queue);
2584 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2585 skb_set_owner_r(buf, new_sk);
2587 release_sock(new_sk);
2588 exit:
2589 release_sock(sk);
2590 return res;
2594 * tipc_shutdown - shutdown socket connection
2595 * @sock: socket structure
2596 * @how: direction to close (must be SHUT_RDWR)
2598 * Terminates connection (if necessary), then purges socket's receive queue.
2600 * Returns 0 on success, errno otherwise
2602 static int tipc_shutdown(struct socket *sock, int how)
2604 struct sock *sk = sock->sk;
2605 int res;
2607 if (how != SHUT_RDWR)
2608 return -EINVAL;
2610 lock_sock(sk);
2612 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2613 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2614 sk->sk_shutdown = SEND_SHUTDOWN;
2616 if (sk->sk_state == TIPC_DISCONNECTING) {
2617 /* Discard any unreceived messages */
2618 __skb_queue_purge(&sk->sk_receive_queue);
2620 /* Wake up anyone sleeping in poll */
2621 sk->sk_state_change(sk);
2622 res = 0;
2623 } else {
2624 res = -ENOTCONN;
2627 release_sock(sk);
2628 return res;
2631 static void tipc_sk_check_probing_state(struct sock *sk,
2632 struct sk_buff_head *list)
2634 struct tipc_sock *tsk = tipc_sk(sk);
2635 u32 pnode = tsk_peer_node(tsk);
2636 u32 pport = tsk_peer_port(tsk);
2637 u32 self = tsk_own_node(tsk);
2638 u32 oport = tsk->portid;
2639 struct sk_buff *skb;
2641 if (tsk->probe_unacked) {
2642 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2643 sk->sk_err = ECONNABORTED;
2644 tipc_node_remove_conn(sock_net(sk), pnode, pport);
2645 sk->sk_state_change(sk);
2646 return;
2648 /* Prepare new probe */
2649 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2650 pnode, self, pport, oport, TIPC_OK);
2651 if (skb)
2652 __skb_queue_tail(list, skb);
2653 tsk->probe_unacked = true;
2654 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2657 static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2659 struct tipc_sock *tsk = tipc_sk(sk);
2661 /* Try again later if dest link is congested */
2662 if (tsk->cong_link_cnt) {
2663 sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
2664 return;
2666 /* Prepare SYN for retransmit */
2667 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2670 static void tipc_sk_timeout(struct timer_list *t)
2672 struct sock *sk = from_timer(sk, t, sk_timer);
2673 struct tipc_sock *tsk = tipc_sk(sk);
2674 u32 pnode = tsk_peer_node(tsk);
2675 struct sk_buff_head list;
2676 int rc = 0;
2678 skb_queue_head_init(&list);
2679 bh_lock_sock(sk);
2681 /* Try again later if socket is busy */
2682 if (sock_owned_by_user(sk)) {
2683 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2684 bh_unlock_sock(sk);
2685 return;
2688 if (sk->sk_state == TIPC_ESTABLISHED)
2689 tipc_sk_check_probing_state(sk, &list);
2690 else if (sk->sk_state == TIPC_CONNECTING)
2691 tipc_sk_retry_connect(sk, &list);
2693 bh_unlock_sock(sk);
2695 if (!skb_queue_empty(&list))
2696 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2698 /* SYN messages may cause link congestion */
2699 if (rc == -ELINKCONG) {
2700 tipc_dest_push(&tsk->cong_links, pnode, 0);
2701 tsk->cong_link_cnt = 1;
2703 sock_put(sk);
2706 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2707 struct tipc_name_seq const *seq)
2709 struct sock *sk = &tsk->sk;
2710 struct net *net = sock_net(sk);
2711 struct publication *publ;
2712 u32 key;
2714 if (scope != TIPC_NODE_SCOPE)
2715 scope = TIPC_CLUSTER_SCOPE;
2717 if (tipc_sk_connected(sk))
2718 return -EINVAL;
2719 key = tsk->portid + tsk->pub_count + 1;
2720 if (key == tsk->portid)
2721 return -EADDRINUSE;
2723 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2724 scope, tsk->portid, key);
2725 if (unlikely(!publ))
2726 return -EINVAL;
2728 list_add(&publ->binding_sock, &tsk->publications);
2729 tsk->pub_count++;
2730 tsk->published = 1;
2731 return 0;
2734 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2735 struct tipc_name_seq const *seq)
2737 struct net *net = sock_net(&tsk->sk);
2738 struct publication *publ;
2739 struct publication *safe;
2740 int rc = -EINVAL;
2742 if (scope != TIPC_NODE_SCOPE)
2743 scope = TIPC_CLUSTER_SCOPE;
2745 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2746 if (seq) {
2747 if (publ->scope != scope)
2748 continue;
2749 if (publ->type != seq->type)
2750 continue;
2751 if (publ->lower != seq->lower)
2752 continue;
2753 if (publ->upper != seq->upper)
2754 break;
2755 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2756 publ->upper, publ->key);
2757 rc = 0;
2758 break;
2760 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2761 publ->upper, publ->key);
2762 rc = 0;
2764 if (list_empty(&tsk->publications))
2765 tsk->published = 0;
2766 return rc;
2769 /* tipc_sk_reinit: set non-zero address in all existing sockets
2770 * when we go from standalone to network mode.
2772 void tipc_sk_reinit(struct net *net)
2774 struct tipc_net *tn = net_generic(net, tipc_net_id);
2775 struct rhashtable_iter iter;
2776 struct tipc_sock *tsk;
2777 struct tipc_msg *msg;
2779 rhashtable_walk_enter(&tn->sk_rht, &iter);
2781 do {
2782 rhashtable_walk_start(&iter);
2784 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2785 sock_hold(&tsk->sk);
2786 rhashtable_walk_stop(&iter);
2787 lock_sock(&tsk->sk);
2788 msg = &tsk->phdr;
2789 msg_set_prevnode(msg, tipc_own_addr(net));
2790 msg_set_orignode(msg, tipc_own_addr(net));
2791 release_sock(&tsk->sk);
2792 rhashtable_walk_start(&iter);
2793 sock_put(&tsk->sk);
2796 rhashtable_walk_stop(&iter);
2797 } while (tsk == ERR_PTR(-EAGAIN));
2799 rhashtable_walk_exit(&iter);
2802 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2804 struct tipc_net *tn = net_generic(net, tipc_net_id);
2805 struct tipc_sock *tsk;
2807 rcu_read_lock();
2808 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2809 if (tsk)
2810 sock_hold(&tsk->sk);
2811 rcu_read_unlock();
2813 return tsk;
2816 static int tipc_sk_insert(struct tipc_sock *tsk)
2818 struct sock *sk = &tsk->sk;
2819 struct net *net = sock_net(sk);
2820 struct tipc_net *tn = net_generic(net, tipc_net_id);
2821 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2822 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2824 while (remaining--) {
2825 portid++;
2826 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2827 portid = TIPC_MIN_PORT;
2828 tsk->portid = portid;
2829 sock_hold(&tsk->sk);
2830 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2831 tsk_rht_params))
2832 return 0;
2833 sock_put(&tsk->sk);
2836 return -1;
2839 static void tipc_sk_remove(struct tipc_sock *tsk)
2841 struct sock *sk = &tsk->sk;
2842 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2844 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2845 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2846 __sock_put(sk);
2850 static const struct rhashtable_params tsk_rht_params = {
2851 .nelem_hint = 192,
2852 .head_offset = offsetof(struct tipc_sock, node),
2853 .key_offset = offsetof(struct tipc_sock, portid),
2854 .key_len = sizeof(u32), /* portid */
2855 .max_size = 1048576,
2856 .min_size = 256,
2857 .automatic_shrinking = true,
2860 int tipc_sk_rht_init(struct net *net)
2862 struct tipc_net *tn = net_generic(net, tipc_net_id);
2864 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2867 void tipc_sk_rht_destroy(struct net *net)
2869 struct tipc_net *tn = net_generic(net, tipc_net_id);
2871 /* Wait for socket readers to complete */
2872 synchronize_net();
2874 rhashtable_destroy(&tn->sk_rht);
2877 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2879 struct net *net = sock_net(&tsk->sk);
2880 struct tipc_group *grp = tsk->group;
2881 struct tipc_msg *hdr = &tsk->phdr;
2882 struct tipc_name_seq seq;
2883 int rc;
2885 if (mreq->type < TIPC_RESERVED_TYPES)
2886 return -EACCES;
2887 if (mreq->scope > TIPC_NODE_SCOPE)
2888 return -EINVAL;
2889 if (grp)
2890 return -EACCES;
2891 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2892 if (!grp)
2893 return -ENOMEM;
2894 tsk->group = grp;
2895 msg_set_lookup_scope(hdr, mreq->scope);
2896 msg_set_nametype(hdr, mreq->type);
2897 msg_set_dest_droppable(hdr, true);
2898 seq.type = mreq->type;
2899 seq.lower = mreq->instance;
2900 seq.upper = seq.lower;
2901 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2902 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2903 if (rc) {
2904 tipc_group_delete(net, grp);
2905 tsk->group = NULL;
2906 return rc;
2908 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2909 tsk->mc_method.rcast = true;
2910 tsk->mc_method.mandatory = true;
2911 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2912 return rc;
2915 static int tipc_sk_leave(struct tipc_sock *tsk)
2917 struct net *net = sock_net(&tsk->sk);
2918 struct tipc_group *grp = tsk->group;
2919 struct tipc_name_seq seq;
2920 int scope;
2922 if (!grp)
2923 return -EINVAL;
2924 tipc_group_self(grp, &seq, &scope);
2925 tipc_group_delete(net, grp);
2926 tsk->group = NULL;
2927 tipc_sk_withdraw(tsk, scope, &seq);
2928 return 0;
2932 * tipc_setsockopt - set socket option
2933 * @sock: socket structure
2934 * @lvl: option level
2935 * @opt: option identifier
2936 * @ov: pointer to new option value
2937 * @ol: length of option value
2939 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2940 * (to ease compatibility).
2942 * Returns 0 on success, errno otherwise
2944 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2945 char __user *ov, unsigned int ol)
2947 struct sock *sk = sock->sk;
2948 struct tipc_sock *tsk = tipc_sk(sk);
2949 struct tipc_group_req mreq;
2950 u32 value = 0;
2951 int res = 0;
2953 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2954 return 0;
2955 if (lvl != SOL_TIPC)
2956 return -ENOPROTOOPT;
2958 switch (opt) {
2959 case TIPC_IMPORTANCE:
2960 case TIPC_SRC_DROPPABLE:
2961 case TIPC_DEST_DROPPABLE:
2962 case TIPC_CONN_TIMEOUT:
2963 if (ol < sizeof(value))
2964 return -EINVAL;
2965 if (get_user(value, (u32 __user *)ov))
2966 return -EFAULT;
2967 break;
2968 case TIPC_GROUP_JOIN:
2969 if (ol < sizeof(mreq))
2970 return -EINVAL;
2971 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2972 return -EFAULT;
2973 break;
2974 default:
2975 if (ov || ol)
2976 return -EINVAL;
2979 lock_sock(sk);
2981 switch (opt) {
2982 case TIPC_IMPORTANCE:
2983 res = tsk_set_importance(tsk, value);
2984 break;
2985 case TIPC_SRC_DROPPABLE:
2986 if (sock->type != SOCK_STREAM)
2987 tsk_set_unreliable(tsk, value);
2988 else
2989 res = -ENOPROTOOPT;
2990 break;
2991 case TIPC_DEST_DROPPABLE:
2992 tsk_set_unreturnable(tsk, value);
2993 break;
2994 case TIPC_CONN_TIMEOUT:
2995 tipc_sk(sk)->conn_timeout = value;
2996 break;
2997 case TIPC_MCAST_BROADCAST:
2998 tsk->mc_method.rcast = false;
2999 tsk->mc_method.mandatory = true;
3000 break;
3001 case TIPC_MCAST_REPLICAST:
3002 tsk->mc_method.rcast = true;
3003 tsk->mc_method.mandatory = true;
3004 break;
3005 case TIPC_GROUP_JOIN:
3006 res = tipc_sk_join(tsk, &mreq);
3007 break;
3008 case TIPC_GROUP_LEAVE:
3009 res = tipc_sk_leave(tsk);
3010 break;
3011 default:
3012 res = -EINVAL;
3015 release_sock(sk);
3017 return res;
3021 * tipc_getsockopt - get socket option
3022 * @sock: socket structure
3023 * @lvl: option level
3024 * @opt: option identifier
3025 * @ov: receptacle for option value
3026 * @ol: receptacle for length of option value
3028 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3029 * (to ease compatibility).
3031 * Returns 0 on success, errno otherwise
3033 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3034 char __user *ov, int __user *ol)
3036 struct sock *sk = sock->sk;
3037 struct tipc_sock *tsk = tipc_sk(sk);
3038 struct tipc_name_seq seq;
3039 int len, scope;
3040 u32 value;
3041 int res;
3043 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3044 return put_user(0, ol);
3045 if (lvl != SOL_TIPC)
3046 return -ENOPROTOOPT;
3047 res = get_user(len, ol);
3048 if (res)
3049 return res;
3051 lock_sock(sk);
3053 switch (opt) {
3054 case TIPC_IMPORTANCE:
3055 value = tsk_importance(tsk);
3056 break;
3057 case TIPC_SRC_DROPPABLE:
3058 value = tsk_unreliable(tsk);
3059 break;
3060 case TIPC_DEST_DROPPABLE:
3061 value = tsk_unreturnable(tsk);
3062 break;
3063 case TIPC_CONN_TIMEOUT:
3064 value = tsk->conn_timeout;
3065 /* no need to set "res", since already 0 at this point */
3066 break;
3067 case TIPC_NODE_RECVQ_DEPTH:
3068 value = 0; /* was tipc_queue_size, now obsolete */
3069 break;
3070 case TIPC_SOCK_RECVQ_DEPTH:
3071 value = skb_queue_len(&sk->sk_receive_queue);
3072 break;
3073 case TIPC_SOCK_RECVQ_USED:
3074 value = sk_rmem_alloc_get(sk);
3075 break;
3076 case TIPC_GROUP_JOIN:
3077 seq.type = 0;
3078 if (tsk->group)
3079 tipc_group_self(tsk->group, &seq, &scope);
3080 value = seq.type;
3081 break;
3082 default:
3083 res = -EINVAL;
3086 release_sock(sk);
3088 if (res)
3089 return res; /* "get" failed */
3091 if (len < sizeof(value))
3092 return -EINVAL;
3094 if (copy_to_user(ov, &value, sizeof(value)))
3095 return -EFAULT;
3097 return put_user(sizeof(value), ol);
3100 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3102 struct net *net = sock_net(sock->sk);
3103 struct tipc_sioc_nodeid_req nr = {0};
3104 struct tipc_sioc_ln_req lnr;
3105 void __user *argp = (void __user *)arg;
3107 switch (cmd) {
3108 case SIOCGETLINKNAME:
3109 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3110 return -EFAULT;
3111 if (!tipc_node_get_linkname(net,
3112 lnr.bearer_id & 0xffff, lnr.peer,
3113 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3114 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3115 return -EFAULT;
3116 return 0;
3118 return -EADDRNOTAVAIL;
3119 case SIOCGETNODEID:
3120 if (copy_from_user(&nr, argp, sizeof(nr)))
3121 return -EFAULT;
3122 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3123 return -EADDRNOTAVAIL;
3124 if (copy_to_user(argp, &nr, sizeof(nr)))
3125 return -EFAULT;
3126 return 0;
3127 default:
3128 return -ENOIOCTLCMD;
3132 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3134 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3135 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3136 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3138 tsk1->peer.family = AF_TIPC;
3139 tsk1->peer.addrtype = TIPC_ADDR_ID;
3140 tsk1->peer.scope = TIPC_NODE_SCOPE;
3141 tsk1->peer.addr.id.ref = tsk2->portid;
3142 tsk1->peer.addr.id.node = onode;
3143 tsk2->peer.family = AF_TIPC;
3144 tsk2->peer.addrtype = TIPC_ADDR_ID;
3145 tsk2->peer.scope = TIPC_NODE_SCOPE;
3146 tsk2->peer.addr.id.ref = tsk1->portid;
3147 tsk2->peer.addr.id.node = onode;
3149 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3150 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3151 return 0;
3154 /* Protocol switches for the various types of TIPC sockets */
3156 static const struct proto_ops msg_ops = {
3157 .owner = THIS_MODULE,
3158 .family = AF_TIPC,
3159 .release = tipc_release,
3160 .bind = tipc_bind,
3161 .connect = tipc_connect,
3162 .socketpair = tipc_socketpair,
3163 .accept = sock_no_accept,
3164 .getname = tipc_getname,
3165 .poll = tipc_poll,
3166 .ioctl = tipc_ioctl,
3167 .listen = sock_no_listen,
3168 .shutdown = tipc_shutdown,
3169 .setsockopt = tipc_setsockopt,
3170 .getsockopt = tipc_getsockopt,
3171 .sendmsg = tipc_sendmsg,
3172 .recvmsg = tipc_recvmsg,
3173 .mmap = sock_no_mmap,
3174 .sendpage = sock_no_sendpage
3177 static const struct proto_ops packet_ops = {
3178 .owner = THIS_MODULE,
3179 .family = AF_TIPC,
3180 .release = tipc_release,
3181 .bind = tipc_bind,
3182 .connect = tipc_connect,
3183 .socketpair = tipc_socketpair,
3184 .accept = tipc_accept,
3185 .getname = tipc_getname,
3186 .poll = tipc_poll,
3187 .ioctl = tipc_ioctl,
3188 .listen = tipc_listen,
3189 .shutdown = tipc_shutdown,
3190 .setsockopt = tipc_setsockopt,
3191 .getsockopt = tipc_getsockopt,
3192 .sendmsg = tipc_send_packet,
3193 .recvmsg = tipc_recvmsg,
3194 .mmap = sock_no_mmap,
3195 .sendpage = sock_no_sendpage
3198 static const struct proto_ops stream_ops = {
3199 .owner = THIS_MODULE,
3200 .family = AF_TIPC,
3201 .release = tipc_release,
3202 .bind = tipc_bind,
3203 .connect = tipc_connect,
3204 .socketpair = tipc_socketpair,
3205 .accept = tipc_accept,
3206 .getname = tipc_getname,
3207 .poll = tipc_poll,
3208 .ioctl = tipc_ioctl,
3209 .listen = tipc_listen,
3210 .shutdown = tipc_shutdown,
3211 .setsockopt = tipc_setsockopt,
3212 .getsockopt = tipc_getsockopt,
3213 .sendmsg = tipc_sendstream,
3214 .recvmsg = tipc_recvstream,
3215 .mmap = sock_no_mmap,
3216 .sendpage = sock_no_sendpage
3219 static const struct net_proto_family tipc_family_ops = {
3220 .owner = THIS_MODULE,
3221 .family = AF_TIPC,
3222 .create = tipc_sk_create
3225 static struct proto tipc_proto = {
3226 .name = "TIPC",
3227 .owner = THIS_MODULE,
3228 .obj_size = sizeof(struct tipc_sock),
3229 .sysctl_rmem = sysctl_tipc_rmem
3233 * tipc_socket_init - initialize TIPC socket interface
3235 * Returns 0 on success, errno otherwise
3237 int tipc_socket_init(void)
3239 int res;
3241 res = proto_register(&tipc_proto, 1);
3242 if (res) {
3243 pr_err("Failed to register TIPC protocol type\n");
3244 goto out;
3247 res = sock_register(&tipc_family_ops);
3248 if (res) {
3249 pr_err("Failed to register TIPC socket type\n");
3250 proto_unregister(&tipc_proto);
3251 goto out;
3253 out:
3254 return res;
3258 * tipc_socket_stop - stop TIPC socket interface
3260 void tipc_socket_stop(void)
3262 sock_unregister(tipc_family_ops.family);
3263 proto_unregister(&tipc_proto);
3266 /* Caller should hold socket lock for the passed tipc socket. */
3267 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3269 u32 peer_node;
3270 u32 peer_port;
3271 struct nlattr *nest;
3273 peer_node = tsk_peer_node(tsk);
3274 peer_port = tsk_peer_port(tsk);
3276 nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
3277 if (!nest)
3278 return -EMSGSIZE;
3280 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3281 goto msg_full;
3282 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3283 goto msg_full;
3285 if (tsk->conn_type != 0) {
3286 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3287 goto msg_full;
3288 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3289 goto msg_full;
3290 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3291 goto msg_full;
3293 nla_nest_end(skb, nest);
3295 return 0;
3297 msg_full:
3298 nla_nest_cancel(skb, nest);
3300 return -EMSGSIZE;
3303 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3304 *tsk)
3306 struct net *net = sock_net(skb->sk);
3307 struct sock *sk = &tsk->sk;
3309 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3310 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3311 return -EMSGSIZE;
3313 if (tipc_sk_connected(sk)) {
3314 if (__tipc_nl_add_sk_con(skb, tsk))
3315 return -EMSGSIZE;
3316 } else if (!list_empty(&tsk->publications)) {
3317 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3318 return -EMSGSIZE;
3320 return 0;
3323 /* Caller should hold socket lock for the passed tipc socket. */
3324 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3325 struct tipc_sock *tsk)
3327 struct nlattr *attrs;
3328 void *hdr;
3330 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3331 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3332 if (!hdr)
3333 goto msg_cancel;
3335 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3336 if (!attrs)
3337 goto genlmsg_cancel;
3339 if (__tipc_nl_add_sk_info(skb, tsk))
3340 goto attr_msg_cancel;
3342 nla_nest_end(skb, attrs);
3343 genlmsg_end(skb, hdr);
3345 return 0;
3347 attr_msg_cancel:
3348 nla_nest_cancel(skb, attrs);
3349 genlmsg_cancel:
3350 genlmsg_cancel(skb, hdr);
3351 msg_cancel:
3352 return -EMSGSIZE;
3355 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3356 int (*skb_handler)(struct sk_buff *skb,
3357 struct netlink_callback *cb,
3358 struct tipc_sock *tsk))
3360 struct rhashtable_iter *iter = (void *)cb->args[4];
3361 struct tipc_sock *tsk;
3362 int err;
3364 rhashtable_walk_start(iter);
3365 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3366 if (IS_ERR(tsk)) {
3367 err = PTR_ERR(tsk);
3368 if (err == -EAGAIN) {
3369 err = 0;
3370 continue;
3372 break;
3375 sock_hold(&tsk->sk);
3376 rhashtable_walk_stop(iter);
3377 lock_sock(&tsk->sk);
3378 err = skb_handler(skb, cb, tsk);
3379 if (err) {
3380 release_sock(&tsk->sk);
3381 sock_put(&tsk->sk);
3382 goto out;
3384 release_sock(&tsk->sk);
3385 rhashtable_walk_start(iter);
3386 sock_put(&tsk->sk);
3388 rhashtable_walk_stop(iter);
3389 out:
3390 return skb->len;
3392 EXPORT_SYMBOL(tipc_nl_sk_walk);
3394 int tipc_dump_start(struct netlink_callback *cb)
3396 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3398 EXPORT_SYMBOL(tipc_dump_start);
3400 int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3402 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3403 struct rhashtable_iter *iter = (void *)cb->args[4];
3404 struct tipc_net *tn = tipc_net(net);
3406 if (!iter) {
3407 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3408 if (!iter)
3409 return -ENOMEM;
3411 cb->args[4] = (long)iter;
3414 rhashtable_walk_enter(&tn->sk_rht, iter);
3415 return 0;
3418 int tipc_dump_done(struct netlink_callback *cb)
3420 struct rhashtable_iter *hti = (void *)cb->args[4];
3422 rhashtable_walk_exit(hti);
3423 kfree(hti);
3424 return 0;
3426 EXPORT_SYMBOL(tipc_dump_done);
3428 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3429 struct tipc_sock *tsk, u32 sk_filter_state,
3430 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3432 struct sock *sk = &tsk->sk;
3433 struct nlattr *attrs;
3434 struct nlattr *stat;
3436 /*filter response w.r.t sk_state*/
3437 if (!(sk_filter_state & (1 << sk->sk_state)))
3438 return 0;
3440 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3441 if (!attrs)
3442 goto msg_cancel;
3444 if (__tipc_nl_add_sk_info(skb, tsk))
3445 goto attr_msg_cancel;
3447 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3448 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3449 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3450 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3451 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3452 sock_i_uid(sk))) ||
3453 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3454 tipc_diag_gen_cookie(sk),
3455 TIPC_NLA_SOCK_PAD))
3456 goto attr_msg_cancel;
3458 stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
3459 if (!stat)
3460 goto attr_msg_cancel;
3462 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3463 skb_queue_len(&sk->sk_receive_queue)) ||
3464 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3465 skb_queue_len(&sk->sk_write_queue)) ||
3466 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3467 atomic_read(&sk->sk_drops)))
3468 goto stat_msg_cancel;
3470 if (tsk->cong_link_cnt &&
3471 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3472 goto stat_msg_cancel;
3474 if (tsk_conn_cong(tsk) &&
3475 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3476 goto stat_msg_cancel;
3478 nla_nest_end(skb, stat);
3480 if (tsk->group)
3481 if (tipc_group_fill_sock_diag(tsk->group, skb))
3482 goto stat_msg_cancel;
3484 nla_nest_end(skb, attrs);
3486 return 0;
3488 stat_msg_cancel:
3489 nla_nest_cancel(skb, stat);
3490 attr_msg_cancel:
3491 nla_nest_cancel(skb, attrs);
3492 msg_cancel:
3493 return -EMSGSIZE;
3495 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3497 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3499 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3502 /* Caller should hold socket lock for the passed tipc socket. */
3503 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3504 struct netlink_callback *cb,
3505 struct publication *publ)
3507 void *hdr;
3508 struct nlattr *attrs;
3510 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3511 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3512 if (!hdr)
3513 goto msg_cancel;
3515 attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
3516 if (!attrs)
3517 goto genlmsg_cancel;
3519 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3520 goto attr_msg_cancel;
3521 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3522 goto attr_msg_cancel;
3523 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3524 goto attr_msg_cancel;
3525 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3526 goto attr_msg_cancel;
3528 nla_nest_end(skb, attrs);
3529 genlmsg_end(skb, hdr);
3531 return 0;
3533 attr_msg_cancel:
3534 nla_nest_cancel(skb, attrs);
3535 genlmsg_cancel:
3536 genlmsg_cancel(skb, hdr);
3537 msg_cancel:
3538 return -EMSGSIZE;
3541 /* Caller should hold socket lock for the passed tipc socket. */
3542 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3543 struct netlink_callback *cb,
3544 struct tipc_sock *tsk, u32 *last_publ)
3546 int err;
3547 struct publication *p;
3549 if (*last_publ) {
3550 list_for_each_entry(p, &tsk->publications, binding_sock) {
3551 if (p->key == *last_publ)
3552 break;
3554 if (p->key != *last_publ) {
3555 /* We never set seq or call nl_dump_check_consistent()
3556 * this means that setting prev_seq here will cause the
3557 * consistence check to fail in the netlink callback
3558 * handler. Resulting in the last NLMSG_DONE message
3559 * having the NLM_F_DUMP_INTR flag set.
3561 cb->prev_seq = 1;
3562 *last_publ = 0;
3563 return -EPIPE;
3565 } else {
3566 p = list_first_entry(&tsk->publications, struct publication,
3567 binding_sock);
3570 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3571 err = __tipc_nl_add_sk_publ(skb, cb, p);
3572 if (err) {
3573 *last_publ = p->key;
3574 return err;
3577 *last_publ = 0;
3579 return 0;
3582 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3584 int err;
3585 u32 tsk_portid = cb->args[0];
3586 u32 last_publ = cb->args[1];
3587 u32 done = cb->args[2];
3588 struct net *net = sock_net(skb->sk);
3589 struct tipc_sock *tsk;
3591 if (!tsk_portid) {
3592 struct nlattr **attrs;
3593 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3595 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3596 if (err)
3597 return err;
3599 if (!attrs[TIPC_NLA_SOCK])
3600 return -EINVAL;
3602 err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3603 attrs[TIPC_NLA_SOCK],
3604 tipc_nl_sock_policy, NULL);
3605 if (err)
3606 return err;
3608 if (!sock[TIPC_NLA_SOCK_REF])
3609 return -EINVAL;
3611 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3614 if (done)
3615 return 0;
3617 tsk = tipc_sk_lookup(net, tsk_portid);
3618 if (!tsk)
3619 return -EINVAL;
3621 lock_sock(&tsk->sk);
3622 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3623 if (!err)
3624 done = 1;
3625 release_sock(&tsk->sk);
3626 sock_put(&tsk->sk);
3628 cb->args[0] = tsk_portid;
3629 cb->args[1] = last_publ;
3630 cb->args[2] = done;
3632 return skb->len;
3636 * tipc_sk_filtering - check if a socket should be traced
3637 * @sk: the socket to be examined
3638 * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
3639 * (portid, sock type, name type, name lower, name upper)
3641 * Returns true if the socket meets the socket tuple data
3642 * (value 0 = 'any') or when there is no tuple set (all = 0),
3643 * otherwise false
3645 bool tipc_sk_filtering(struct sock *sk)
3647 struct tipc_sock *tsk;
3648 struct publication *p;
3649 u32 _port, _sktype, _type, _lower, _upper;
3650 u32 type = 0, lower = 0, upper = 0;
3652 if (!sk)
3653 return true;
3655 tsk = tipc_sk(sk);
3657 _port = sysctl_tipc_sk_filter[0];
3658 _sktype = sysctl_tipc_sk_filter[1];
3659 _type = sysctl_tipc_sk_filter[2];
3660 _lower = sysctl_tipc_sk_filter[3];
3661 _upper = sysctl_tipc_sk_filter[4];
3663 if (!_port && !_sktype && !_type && !_lower && !_upper)
3664 return true;
3666 if (_port)
3667 return (_port == tsk->portid);
3669 if (_sktype && _sktype != sk->sk_type)
3670 return false;
3672 if (tsk->published) {
3673 p = list_first_entry_or_null(&tsk->publications,
3674 struct publication, binding_sock);
3675 if (p) {
3676 type = p->type;
3677 lower = p->lower;
3678 upper = p->upper;
3682 if (!tipc_sk_type_connectionless(sk)) {
3683 type = tsk->conn_type;
3684 lower = tsk->conn_instance;
3685 upper = tsk->conn_instance;
3688 if ((_type && _type != type) || (_lower && _lower != lower) ||
3689 (_upper && _upper != upper))
3690 return false;
3692 return true;
3695 u32 tipc_sock_get_portid(struct sock *sk)
3697 return (sk) ? (tipc_sk(sk))->portid : 0;
3701 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3702 * both the rcv and backlog queues are considered
3703 * @sk: tipc sk to be checked
3704 * @skb: tipc msg to be checked
3706 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3709 bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3711 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3712 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3713 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3715 return (qsize > lim * 90 / 100);
3719 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3720 * only the rcv queue is considered
3721 * @sk: tipc sk to be checked
3722 * @skb: tipc msg to be checked
3724 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3727 bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3729 unsigned int lim = rcvbuf_limit(sk, skb);
3730 unsigned int qsize = sk_rmem_alloc_get(sk);
3732 return (qsize > lim * 90 / 100);
3736 * tipc_sk_dump - dump TIPC socket
3737 * @sk: tipc sk to be dumped
3738 * @dqueues: bitmask to decide if any socket queue to be dumped?
3739 * - TIPC_DUMP_NONE: don't dump socket queues
3740 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3741 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3742 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3743 * - TIPC_DUMP_ALL: dump all the socket queues above
3744 * @buf: returned buffer of dump data in format
3746 int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3748 int i = 0;
3749 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3750 struct tipc_sock *tsk;
3751 struct publication *p;
3752 bool tsk_connected;
3754 if (!sk) {
3755 i += scnprintf(buf, sz, "sk data: (null)\n");
3756 return i;
3759 tsk = tipc_sk(sk);
3760 tsk_connected = !tipc_sk_type_connectionless(sk);
3762 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3763 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3764 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3765 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3766 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3767 if (tsk_connected) {
3768 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3769 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3770 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
3771 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
3773 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3774 if (tsk->published) {
3775 p = list_first_entry_or_null(&tsk->publications,
3776 struct publication, binding_sock);
3777 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
3778 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
3779 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
3781 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3782 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3783 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3784 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3785 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3786 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3787 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3788 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3789 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3790 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3791 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3792 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3793 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3794 i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len);
3796 if (dqueues & TIPC_DUMP_SK_SNDQ) {
3797 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3798 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3801 if (dqueues & TIPC_DUMP_SK_RCVQ) {
3802 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3803 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
3806 if (dqueues & TIPC_DUMP_SK_BKLGQ) {
3807 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
3808 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
3809 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
3810 i += scnprintf(buf + i, sz - i, " tail ");
3811 i += tipc_skb_dump(sk->sk_backlog.tail, false,
3812 buf + i);
3816 return i;