2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2019, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * Copyright (c) 2020-2021, Red Hat Inc
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <linux/rhashtable.h>
39 #include <linux/sched/signal.h>
40 #include <trace/events/sock.h>
43 #include "name_table.h"
46 #include "name_distr.h"
53 #define NAGLE_START_INIT 4
54 #define NAGLE_START_MAX 1024
55 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
56 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
57 #define TIPC_MAX_PORT 0xffffffff
58 #define TIPC_MIN_PORT 1
59 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of rcv window size */
62 TIPC_LISTEN
= TCP_LISTEN
,
63 TIPC_ESTABLISHED
= TCP_ESTABLISHED
,
64 TIPC_OPEN
= TCP_CLOSE
,
65 TIPC_DISCONNECTING
= TCP_CLOSE_WAIT
,
66 TIPC_CONNECTING
= TCP_SYN_SENT
,
69 struct sockaddr_pair
{
70 struct sockaddr_tipc sock
;
71 struct sockaddr_tipc member
;
75 * struct tipc_sock - TIPC socket structure
76 * @sk: socket - interacts with 'port' and with user via the socket API
77 * @max_pkt: maximum packet size "hint" used when building messages sent by port
78 * @maxnagle: maximum size of msg which can be subject to nagle
79 * @portid: unique port identity in TIPC socket hash table
80 * @phdr: preformatted message header used when sending messages
81 * @cong_links: list of congested links
82 * @publications: list of publications for port
83 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @probe_unacked: probe has not received ack yet
86 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
87 * @cong_link_cnt: number of congested links
88 * @snt_unacked: # messages sent by socket, and not yet acked by peer
89 * @snd_win: send window size
90 * @peer_caps: peer capabilities mask
91 * @rcv_unacked: # messages read by user, but not yet acked back to peer
92 * @rcv_win: receive window size
93 * @peer: 'connected' peer for dgram/rdm
94 * @node: hash table node
95 * @mc_method: cookie for use between socket and broadcast layer
96 * @rcu: rcu struct for tipc_sock
97 * @group: TIPC communications group
98 * @oneway: message count in one direction (FIXME)
99 * @nagle_start: current nagle value
100 * @snd_backlog: send backlog count
101 * @msg_acc: messages accepted; used in managing backlog and nagle
102 * @pkt_cnt: TIPC socket packet count
103 * @expect_ack: whether this TIPC socket is expecting an ack
104 * @nodelay: setsockopt() TIPC_NODELAY setting
105 * @group_is_open: TIPC socket group is fully open (FIXME)
106 * @published: true if port has one or more associated names
107 * @conn_addrtype: address type used when establishing connection
114 struct tipc_msg phdr
;
115 struct list_head cong_links
;
116 struct list_head publications
;
118 atomic_t dupl_rcvcnt
;
127 struct sockaddr_tipc peer
;
128 struct rhash_head node
;
129 struct tipc_mc_method mc_method
;
131 struct tipc_group
*group
;
144 static int tipc_sk_backlog_rcv(struct sock
*sk
, struct sk_buff
*skb
);
145 static void tipc_data_ready(struct sock
*sk
);
146 static void tipc_write_space(struct sock
*sk
);
147 static void tipc_sock_destruct(struct sock
*sk
);
148 static int tipc_release(struct socket
*sock
);
149 static void tipc_sk_timeout(struct timer_list
*t
);
150 static int tipc_sk_publish(struct tipc_sock
*tsk
, struct tipc_uaddr
*ua
);
151 static int tipc_sk_withdraw(struct tipc_sock
*tsk
, struct tipc_uaddr
*ua
);
152 static int tipc_sk_leave(struct tipc_sock
*tsk
);
153 static struct tipc_sock
*tipc_sk_lookup(struct net
*net
, u32 portid
);
154 static int tipc_sk_insert(struct tipc_sock
*tsk
);
155 static void tipc_sk_remove(struct tipc_sock
*tsk
);
156 static int __tipc_sendstream(struct socket
*sock
, struct msghdr
*m
, size_t dsz
);
157 static int __tipc_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t dsz
);
158 static void tipc_sk_push_backlog(struct tipc_sock
*tsk
, bool nagle_ack
);
159 static int tipc_wait_for_connect(struct socket
*sock
, long *timeo_p
);
161 static const struct proto_ops packet_ops
;
162 static const struct proto_ops stream_ops
;
163 static const struct proto_ops msg_ops
;
164 static struct proto tipc_proto
;
165 static const struct rhashtable_params tsk_rht_params
;
167 static u32
tsk_own_node(struct tipc_sock
*tsk
)
169 return msg_prevnode(&tsk
->phdr
);
172 static u32
tsk_peer_node(struct tipc_sock
*tsk
)
174 return msg_destnode(&tsk
->phdr
);
177 static u32
tsk_peer_port(struct tipc_sock
*tsk
)
179 return msg_destport(&tsk
->phdr
);
182 static bool tsk_unreliable(struct tipc_sock
*tsk
)
184 return msg_src_droppable(&tsk
->phdr
) != 0;
187 static void tsk_set_unreliable(struct tipc_sock
*tsk
, bool unreliable
)
189 msg_set_src_droppable(&tsk
->phdr
, unreliable
? 1 : 0);
192 static bool tsk_unreturnable(struct tipc_sock
*tsk
)
194 return msg_dest_droppable(&tsk
->phdr
) != 0;
197 static void tsk_set_unreturnable(struct tipc_sock
*tsk
, bool unreturnable
)
199 msg_set_dest_droppable(&tsk
->phdr
, unreturnable
? 1 : 0);
202 static int tsk_importance(struct tipc_sock
*tsk
)
204 return msg_importance(&tsk
->phdr
);
207 static struct tipc_sock
*tipc_sk(const struct sock
*sk
)
209 return container_of(sk
, struct tipc_sock
, sk
);
212 int tsk_set_importance(struct sock
*sk
, int imp
)
214 if (imp
> TIPC_CRITICAL_IMPORTANCE
)
216 msg_set_importance(&tipc_sk(sk
)->phdr
, (u32
)imp
);
220 static bool tsk_conn_cong(struct tipc_sock
*tsk
)
222 return tsk
->snt_unacked
> tsk
->snd_win
;
225 static u16
tsk_blocks(int len
)
227 return ((len
/ FLOWCTL_BLK_SZ
) + 1);
230 /* tsk_blocks(): translate a buffer size in bytes to number of
231 * advertisable blocks, taking into account the ratio truesize(len)/len
232 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
234 static u16
tsk_adv_blocks(int len
)
236 return len
/ FLOWCTL_BLK_SZ
/ 4;
239 /* tsk_inc(): increment counter for sent or received data
240 * - If block based flow control is not supported by peer we
241 * fall back to message based ditto, incrementing the counter
243 static u16
tsk_inc(struct tipc_sock
*tsk
, int msglen
)
245 if (likely(tsk
->peer_caps
& TIPC_BLOCK_FLOWCTL
))
246 return ((msglen
/ FLOWCTL_BLK_SZ
) + 1);
250 /* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
252 static void tsk_set_nagle(struct tipc_sock
*tsk
)
254 struct sock
*sk
= &tsk
->sk
;
257 if (sk
->sk_type
!= SOCK_STREAM
)
261 if (!(tsk
->peer_caps
& TIPC_NAGLE
))
263 /* Limit node local buffer size to avoid receive queue overflow */
264 if (tsk
->max_pkt
== MAX_MSG_SIZE
)
265 tsk
->maxnagle
= 1500;
267 tsk
->maxnagle
= tsk
->max_pkt
;
271 * tsk_advance_rx_queue - discard first buffer in socket receive queue
272 * @sk: network socket
274 * Caller must hold socket lock
276 static void tsk_advance_rx_queue(struct sock
*sk
)
278 trace_tipc_sk_advance_rx(sk
, NULL
, TIPC_DUMP_SK_RCVQ
, " ");
279 kfree_skb(__skb_dequeue(&sk
->sk_receive_queue
));
282 /* tipc_sk_respond() : send response message back to sender
284 static void tipc_sk_respond(struct sock
*sk
, struct sk_buff
*skb
, int err
)
288 u32 onode
= tipc_own_addr(sock_net(sk
));
290 if (!tipc_msg_reverse(onode
, &skb
, err
))
293 trace_tipc_sk_rej_msg(sk
, skb
, TIPC_DUMP_NONE
, "@sk_respond!");
294 dnode
= msg_destnode(buf_msg(skb
));
295 selector
= msg_origport(buf_msg(skb
));
296 tipc_node_xmit_skb(sock_net(sk
), skb
, dnode
, selector
);
300 * tsk_rej_rx_queue - reject all buffers in socket receive queue
301 * @sk: network socket
302 * @error: response error code
304 * Caller must hold socket lock
306 static void tsk_rej_rx_queue(struct sock
*sk
, int error
)
310 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)))
311 tipc_sk_respond(sk
, skb
, error
);
314 static bool tipc_sk_connected(const struct sock
*sk
)
316 return READ_ONCE(sk
->sk_state
) == TIPC_ESTABLISHED
;
319 /* tipc_sk_type_connectionless - check if the socket is datagram socket
322 * Returns true if connection less, false otherwise
324 static bool tipc_sk_type_connectionless(struct sock
*sk
)
326 return sk
->sk_type
== SOCK_RDM
|| sk
->sk_type
== SOCK_DGRAM
;
329 /* tsk_peer_msg - verify if message was sent by connected port's peer
331 * Handles cases where the node's network address has changed from
332 * the default of <0.0.0> to its configured setting.
334 static bool tsk_peer_msg(struct tipc_sock
*tsk
, struct tipc_msg
*msg
)
336 struct sock
*sk
= &tsk
->sk
;
337 u32 self
= tipc_own_addr(sock_net(sk
));
338 u32 peer_port
= tsk_peer_port(tsk
);
339 u32 orig_node
, peer_node
;
341 if (unlikely(!tipc_sk_connected(sk
)))
344 if (unlikely(msg_origport(msg
) != peer_port
))
347 orig_node
= msg_orignode(msg
);
348 peer_node
= tsk_peer_node(tsk
);
350 if (likely(orig_node
== peer_node
))
353 if (!orig_node
&& peer_node
== self
)
356 if (!peer_node
&& orig_node
== self
)
362 /* tipc_set_sk_state - set the sk_state of the socket
365 * Caller must hold socket lock
367 * Returns 0 on success, errno otherwise
369 static int tipc_set_sk_state(struct sock
*sk
, int state
)
371 int oldsk_state
= sk
->sk_state
;
379 case TIPC_CONNECTING
:
380 if (oldsk_state
== TIPC_OPEN
)
383 case TIPC_ESTABLISHED
:
384 if (oldsk_state
== TIPC_CONNECTING
||
385 oldsk_state
== TIPC_OPEN
)
388 case TIPC_DISCONNECTING
:
389 if (oldsk_state
== TIPC_CONNECTING
||
390 oldsk_state
== TIPC_ESTABLISHED
)
396 sk
->sk_state
= state
;
401 static int tipc_sk_sock_err(struct socket
*sock
, long *timeout
)
403 struct sock
*sk
= sock
->sk
;
404 int err
= sock_error(sk
);
405 int typ
= sock
->type
;
409 if (typ
== SOCK_STREAM
|| typ
== SOCK_SEQPACKET
) {
410 if (sk
->sk_state
== TIPC_DISCONNECTING
)
412 else if (!tipc_sk_connected(sk
))
417 if (signal_pending(current
))
418 return sock_intr_errno(*timeout
);
423 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
425 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
429 while ((rc_ = !(condition_))) { \
430 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
433 rc_ = tipc_sk_sock_err((sock_), timeo_); \
436 add_wait_queue(sk_sleep(sk_), &wait_); \
438 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
439 sched_annotate_sleep(); \
441 remove_wait_queue(sk_sleep(sk_), &wait_); \
447 * tipc_sk_create - create a TIPC socket
448 * @net: network namespace (must be default network)
449 * @sock: pre-allocated socket structure
450 * @protocol: protocol indicator (must be 0)
451 * @kern: caused by kernel or by userspace?
453 * This routine creates additional data structures used by the TIPC socket,
454 * initializes them, and links them together.
456 * Return: 0 on success, errno otherwise
458 static int tipc_sk_create(struct net
*net
, struct socket
*sock
,
459 int protocol
, int kern
)
461 const struct proto_ops
*ops
;
463 struct tipc_sock
*tsk
;
464 struct tipc_msg
*msg
;
466 /* Validate arguments */
467 if (unlikely(protocol
!= 0))
468 return -EPROTONOSUPPORT
;
470 switch (sock
->type
) {
485 /* Allocate socket's protocol area */
486 sk
= sk_alloc(net
, AF_TIPC
, GFP_KERNEL
, &tipc_proto
, kern
);
491 tsk
->max_pkt
= MAX_PKT_DEFAULT
;
493 tsk
->nagle_start
= NAGLE_START_INIT
;
494 INIT_LIST_HEAD(&tsk
->publications
);
495 INIT_LIST_HEAD(&tsk
->cong_links
);
498 /* Finish initializing socket data structures */
500 sock_init_data(sock
, sk
);
501 tipc_set_sk_state(sk
, TIPC_OPEN
);
502 if (tipc_sk_insert(tsk
)) {
504 pr_warn("Socket create failed; port number exhausted\n");
508 /* Ensure tsk is visible before we read own_addr. */
511 tipc_msg_init(tipc_own_addr(net
), msg
, TIPC_LOW_IMPORTANCE
,
512 TIPC_NAMED_MSG
, NAMED_H_SIZE
, 0);
514 msg_set_origport(msg
, tsk
->portid
);
515 timer_setup(&sk
->sk_timer
, tipc_sk_timeout
, 0);
517 sk
->sk_backlog_rcv
= tipc_sk_backlog_rcv
;
518 sk
->sk_rcvbuf
= READ_ONCE(sysctl_tipc_rmem
[1]);
519 sk
->sk_data_ready
= tipc_data_ready
;
520 sk
->sk_write_space
= tipc_write_space
;
521 sk
->sk_destruct
= tipc_sock_destruct
;
522 tsk
->conn_timeout
= CONN_TIMEOUT_DEFAULT
;
523 tsk
->group_is_open
= true;
524 atomic_set(&tsk
->dupl_rcvcnt
, 0);
526 /* Start out with safe limits until we receive an advertised window */
527 tsk
->snd_win
= tsk_adv_blocks(RCVBUF_MIN
);
528 tsk
->rcv_win
= tsk
->snd_win
;
530 if (tipc_sk_type_connectionless(sk
)) {
531 tsk_set_unreturnable(tsk
, true);
532 if (sock
->type
== SOCK_DGRAM
)
533 tsk_set_unreliable(tsk
, true);
535 __skb_queue_head_init(&tsk
->mc_method
.deferredq
);
536 trace_tipc_sk_create(sk
, NULL
, TIPC_DUMP_NONE
, " ");
540 static void tipc_sk_callback(struct rcu_head
*head
)
542 struct tipc_sock
*tsk
= container_of(head
, struct tipc_sock
, rcu
);
547 /* Caller should hold socket lock for the socket. */
548 static void __tipc_shutdown(struct socket
*sock
, int error
)
550 struct sock
*sk
= sock
->sk
;
551 struct tipc_sock
*tsk
= tipc_sk(sk
);
552 struct net
*net
= sock_net(sk
);
553 long timeout
= msecs_to_jiffies(CONN_TIMEOUT_DEFAULT
);
554 u32 dnode
= tsk_peer_node(tsk
);
557 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
558 tipc_wait_for_cond(sock
, &timeout
, (!tsk
->cong_link_cnt
&&
559 !tsk_conn_cong(tsk
)));
561 /* Push out delayed messages if in Nagle mode */
562 tipc_sk_push_backlog(tsk
, false);
563 /* Remove pending SYN */
564 __skb_queue_purge(&sk
->sk_write_queue
);
566 /* Remove partially received buffer if any */
567 skb
= skb_peek(&sk
->sk_receive_queue
);
568 if (skb
&& TIPC_SKB_CB(skb
)->bytes_read
) {
569 __skb_unlink(skb
, &sk
->sk_receive_queue
);
573 /* Reject all unreceived messages if connectionless */
574 if (tipc_sk_type_connectionless(sk
)) {
575 tsk_rej_rx_queue(sk
, error
);
579 switch (sk
->sk_state
) {
580 case TIPC_CONNECTING
:
581 case TIPC_ESTABLISHED
:
582 tipc_set_sk_state(sk
, TIPC_DISCONNECTING
);
583 tipc_node_remove_conn(net
, dnode
, tsk
->portid
);
584 /* Send a FIN+/- to its peer */
585 skb
= __skb_dequeue(&sk
->sk_receive_queue
);
587 __skb_queue_purge(&sk
->sk_receive_queue
);
588 tipc_sk_respond(sk
, skb
, error
);
591 skb
= tipc_msg_create(TIPC_CRITICAL_IMPORTANCE
,
592 TIPC_CONN_MSG
, SHORT_H_SIZE
, 0, dnode
,
593 tsk_own_node(tsk
), tsk_peer_port(tsk
),
596 tipc_node_xmit_skb(net
, skb
, dnode
, tsk
->portid
);
599 /* Reject all SYN messages */
600 tsk_rej_rx_queue(sk
, error
);
603 __skb_queue_purge(&sk
->sk_receive_queue
);
609 * tipc_release - destroy a TIPC socket
610 * @sock: socket to destroy
612 * This routine cleans up any messages that are still queued on the socket.
613 * For DGRAM and RDM socket types, all queued messages are rejected.
614 * For SEQPACKET and STREAM socket types, the first message is rejected
615 * and any others are discarded. (If the first message on a STREAM socket
616 * is partially-read, it is discarded and the next one is rejected instead.)
618 * NOTE: Rejected messages are not necessarily returned to the sender! They
619 * are returned or discarded according to the "destination droppable" setting
620 * specified for the message by the sender.
622 * Return: 0 on success, errno otherwise
624 static int tipc_release(struct socket
*sock
)
626 struct sock
*sk
= sock
->sk
;
627 struct tipc_sock
*tsk
;
630 * Exit if socket isn't fully initialized (occurs when a failed accept()
631 * releases a pre-allocated child socket that was never used)
639 trace_tipc_sk_release(sk
, NULL
, TIPC_DUMP_ALL
, " ");
640 __tipc_shutdown(sock
, TIPC_ERR_NO_PORT
);
641 sk
->sk_shutdown
= SHUTDOWN_MASK
;
643 tipc_sk_withdraw(tsk
, NULL
);
644 __skb_queue_purge(&tsk
->mc_method
.deferredq
);
645 sk_stop_timer(sk
, &sk
->sk_timer
);
649 /* Reject any messages that accumulated in backlog queue */
651 tipc_dest_list_purge(&tsk
->cong_links
);
652 tsk
->cong_link_cnt
= 0;
653 call_rcu(&tsk
->rcu
, tipc_sk_callback
);
660 * __tipc_bind - associate or disassociate TIPC name(s) with a socket
661 * @sock: socket structure
662 * @skaddr: socket address describing name(s) and desired operation
663 * @alen: size of socket address data structure
665 * Name and name sequence binding are indicated using a positive scope value;
666 * a negative scope value unbinds the specified name. Specifying no name
667 * (i.e. a socket address length of 0) unbinds all names from the socket.
669 * Return: 0 on success, errno otherwise
671 * NOTE: This routine doesn't need to take the socket lock since it doesn't
672 * access any non-constant socket information.
674 static int __tipc_bind(struct socket
*sock
, struct sockaddr
*skaddr
, int alen
)
676 struct tipc_uaddr
*ua
= (struct tipc_uaddr
*)skaddr
;
677 struct tipc_sock
*tsk
= tipc_sk(sock
->sk
);
681 return tipc_sk_withdraw(tsk
, NULL
);
683 if (ua
->addrtype
== TIPC_SERVICE_ADDR
) {
684 ua
->addrtype
= TIPC_SERVICE_RANGE
;
685 ua
->sr
.upper
= ua
->sr
.lower
;
689 ua
->scope
= -ua
->scope
;
691 /* Users may still use deprecated TIPC_ZONE_SCOPE */
692 if (ua
->scope
!= TIPC_NODE_SCOPE
)
693 ua
->scope
= TIPC_CLUSTER_SCOPE
;
699 return tipc_sk_withdraw(tsk
, ua
);
700 return tipc_sk_publish(tsk
, ua
);
703 int tipc_sk_bind(struct socket
*sock
, struct sockaddr
*skaddr
, int alen
)
708 res
= __tipc_bind(sock
, skaddr
, alen
);
709 release_sock(sock
->sk
);
713 static int tipc_bind(struct socket
*sock
, struct sockaddr
*skaddr
, int alen
)
715 struct tipc_uaddr
*ua
= (struct tipc_uaddr
*)skaddr
;
716 u32 atype
= ua
->addrtype
;
719 if (!tipc_uaddr_valid(ua
, alen
))
721 if (atype
== TIPC_SOCKET_ADDR
)
722 return -EAFNOSUPPORT
;
723 if (ua
->sr
.type
< TIPC_RESERVED_TYPES
) {
724 pr_warn_once("Can't bind to reserved service type %u\n",
729 return tipc_sk_bind(sock
, skaddr
, alen
);
733 * tipc_getname - get port ID of socket or peer socket
734 * @sock: socket structure
735 * @uaddr: area for returned socket address
736 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
738 * Return: 0 on success, errno otherwise
740 * NOTE: This routine doesn't need to take the socket lock since it only
741 * accesses socket information that is unchanging (or which changes in
742 * a completely predictable manner).
744 static int tipc_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
747 struct sockaddr_tipc
*addr
= (struct sockaddr_tipc
*)uaddr
;
748 struct sock
*sk
= sock
->sk
;
749 struct tipc_sock
*tsk
= tipc_sk(sk
);
751 memset(addr
, 0, sizeof(*addr
));
753 if ((!tipc_sk_connected(sk
)) &&
754 ((peer
!= 2) || (sk
->sk_state
!= TIPC_DISCONNECTING
)))
756 addr
->addr
.id
.ref
= tsk_peer_port(tsk
);
757 addr
->addr
.id
.node
= tsk_peer_node(tsk
);
759 addr
->addr
.id
.ref
= tsk
->portid
;
760 addr
->addr
.id
.node
= tipc_own_addr(sock_net(sk
));
763 addr
->addrtype
= TIPC_SOCKET_ADDR
;
764 addr
->family
= AF_TIPC
;
766 addr
->addr
.name
.domain
= 0;
768 return sizeof(*addr
);
772 * tipc_poll - read and possibly block on pollmask
773 * @file: file structure associated with the socket
774 * @sock: socket for which to calculate the poll bits
777 * Return: pollmask value
780 * It appears that the usual socket locking mechanisms are not useful here
781 * since the pollmask info is potentially out-of-date the moment this routine
782 * exits. TCP and other protocols seem to rely on higher level poll routines
783 * to handle any preventable race conditions, so TIPC will do the same ...
785 * IMPORTANT: The fact that a read or write operation is indicated does NOT
786 * imply that the operation will succeed, merely that it should be performed
787 * and will not block.
789 static __poll_t
tipc_poll(struct file
*file
, struct socket
*sock
,
792 struct sock
*sk
= sock
->sk
;
793 struct tipc_sock
*tsk
= tipc_sk(sk
);
794 __poll_t revents
= 0;
796 sock_poll_wait(file
, sock
, wait
);
797 trace_tipc_sk_poll(sk
, NULL
, TIPC_DUMP_ALL
, " ");
799 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
800 revents
|= EPOLLRDHUP
| EPOLLIN
| EPOLLRDNORM
;
801 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
804 switch (sk
->sk_state
) {
805 case TIPC_ESTABLISHED
:
806 if (!tsk
->cong_link_cnt
&& !tsk_conn_cong(tsk
))
810 case TIPC_CONNECTING
:
811 if (!skb_queue_empty_lockless(&sk
->sk_receive_queue
))
812 revents
|= EPOLLIN
| EPOLLRDNORM
;
815 if (tsk
->group_is_open
&& !tsk
->cong_link_cnt
)
817 if (!tipc_sk_type_connectionless(sk
))
819 if (skb_queue_empty_lockless(&sk
->sk_receive_queue
))
821 revents
|= EPOLLIN
| EPOLLRDNORM
;
823 case TIPC_DISCONNECTING
:
824 revents
= EPOLLIN
| EPOLLRDNORM
| EPOLLHUP
;
831 * tipc_sendmcast - send multicast message
832 * @sock: socket structure
833 * @ua: destination address struct
834 * @msg: message to send
835 * @dlen: length of data to send
836 * @timeout: timeout to wait for wakeup
838 * Called from function tipc_sendmsg(), which has done all sanity checks
839 * Return: the number of bytes sent on success, or errno
841 static int tipc_sendmcast(struct socket
*sock
, struct tipc_uaddr
*ua
,
842 struct msghdr
*msg
, size_t dlen
, long timeout
)
844 struct sock
*sk
= sock
->sk
;
845 struct tipc_sock
*tsk
= tipc_sk(sk
);
846 struct tipc_msg
*hdr
= &tsk
->phdr
;
847 struct net
*net
= sock_net(sk
);
848 int mtu
= tipc_bcast_get_mtu(net
);
849 struct sk_buff_head pkts
;
850 struct tipc_nlist dsts
;
856 /* Block or return if any destination link is congested */
857 rc
= tipc_wait_for_cond(sock
, &timeout
, !tsk
->cong_link_cnt
);
861 /* Lookup destination nodes */
862 tipc_nlist_init(&dsts
, tipc_own_addr(net
));
863 tipc_nametbl_lookup_mcast_nodes(net
, ua
, &dsts
);
864 if (!dsts
.local
&& !dsts
.remote
)
865 return -EHOSTUNREACH
;
867 /* Build message header */
868 msg_set_type(hdr
, TIPC_MCAST_MSG
);
869 msg_set_hdr_sz(hdr
, MCAST_H_SIZE
);
870 msg_set_lookup_scope(hdr
, TIPC_CLUSTER_SCOPE
);
871 msg_set_destport(hdr
, 0);
872 msg_set_destnode(hdr
, 0);
873 msg_set_nametype(hdr
, ua
->sr
.type
);
874 msg_set_namelower(hdr
, ua
->sr
.lower
);
875 msg_set_nameupper(hdr
, ua
->sr
.upper
);
877 /* Build message as chain of buffers */
878 __skb_queue_head_init(&pkts
);
879 rc
= tipc_msg_build(hdr
, msg
, 0, dlen
, mtu
, &pkts
);
881 /* Send message if build was successful */
882 if (unlikely(rc
== dlen
)) {
883 trace_tipc_sk_sendmcast(sk
, skb_peek(&pkts
),
884 TIPC_DUMP_SK_SNDQ
, " ");
885 rc
= tipc_mcast_xmit(net
, &pkts
, &tsk
->mc_method
, &dsts
,
886 &tsk
->cong_link_cnt
);
889 tipc_nlist_purge(&dsts
);
891 return rc
? rc
: dlen
;
895 * tipc_send_group_msg - send a message to a member in the group
896 * @net: network namespace
898 * @m: message to send
900 * @dnode: destination node
901 * @dport: destination port
902 * @dlen: total length of message data
904 static int tipc_send_group_msg(struct net
*net
, struct tipc_sock
*tsk
,
905 struct msghdr
*m
, struct tipc_member
*mb
,
906 u32 dnode
, u32 dport
, int dlen
)
908 u16 bc_snd_nxt
= tipc_group_bc_snd_nxt(tsk
->group
);
909 struct tipc_mc_method
*method
= &tsk
->mc_method
;
910 int blks
= tsk_blocks(GROUP_H_SIZE
+ dlen
);
911 struct tipc_msg
*hdr
= &tsk
->phdr
;
912 struct sk_buff_head pkts
;
915 /* Complete message header */
916 msg_set_type(hdr
, TIPC_GRP_UCAST_MSG
);
917 msg_set_hdr_sz(hdr
, GROUP_H_SIZE
);
918 msg_set_destport(hdr
, dport
);
919 msg_set_destnode(hdr
, dnode
);
920 msg_set_grp_bc_seqno(hdr
, bc_snd_nxt
);
922 /* Build message as chain of buffers */
923 __skb_queue_head_init(&pkts
);
924 mtu
= tipc_node_get_mtu(net
, dnode
, tsk
->portid
, false);
925 rc
= tipc_msg_build(hdr
, m
, 0, dlen
, mtu
, &pkts
);
926 if (unlikely(rc
!= dlen
))
930 rc
= tipc_node_xmit(net
, &pkts
, dnode
, tsk
->portid
);
931 if (unlikely(rc
== -ELINKCONG
)) {
932 tipc_dest_push(&tsk
->cong_links
, dnode
, 0);
933 tsk
->cong_link_cnt
++;
936 /* Update send window */
937 tipc_group_update_member(mb
, blks
);
939 /* A broadcast sent within next EXPIRE period must follow same path */
940 method
->rcast
= true;
941 method
->mandatory
= true;
946 * tipc_send_group_unicast - send message to a member in the group
947 * @sock: socket structure
948 * @m: message to send
949 * @dlen: total length of message data
950 * @timeout: timeout to wait for wakeup
952 * Called from function tipc_sendmsg(), which has done all sanity checks
953 * Return: the number of bytes sent on success, or errno
955 static int tipc_send_group_unicast(struct socket
*sock
, struct msghdr
*m
,
956 int dlen
, long timeout
)
958 struct sock
*sk
= sock
->sk
;
959 struct tipc_uaddr
*ua
= (struct tipc_uaddr
*)m
->msg_name
;
960 int blks
= tsk_blocks(GROUP_H_SIZE
+ dlen
);
961 struct tipc_sock
*tsk
= tipc_sk(sk
);
962 struct net
*net
= sock_net(sk
);
963 struct tipc_member
*mb
= NULL
;
970 return -EHOSTUNREACH
;
972 /* Block or return if destination link or member is congested */
973 rc
= tipc_wait_for_cond(sock
, &timeout
,
974 !tipc_dest_find(&tsk
->cong_links
, node
, 0) &&
976 !tipc_group_cong(tsk
->group
, node
, port
, blks
,
982 return -EHOSTUNREACH
;
984 rc
= tipc_send_group_msg(net
, tsk
, m
, mb
, node
, port
, dlen
);
986 return rc
? rc
: dlen
;
990 * tipc_send_group_anycast - send message to any member with given identity
991 * @sock: socket structure
992 * @m: message to send
993 * @dlen: total length of message data
994 * @timeout: timeout to wait for wakeup
996 * Called from function tipc_sendmsg(), which has done all sanity checks
997 * Return: the number of bytes sent on success, or errno
999 static int tipc_send_group_anycast(struct socket
*sock
, struct msghdr
*m
,
1000 int dlen
, long timeout
)
1002 struct tipc_uaddr
*ua
= (struct tipc_uaddr
*)m
->msg_name
;
1003 struct sock
*sk
= sock
->sk
;
1004 struct tipc_sock
*tsk
= tipc_sk(sk
);
1005 struct list_head
*cong_links
= &tsk
->cong_links
;
1006 int blks
= tsk_blocks(GROUP_H_SIZE
+ dlen
);
1007 struct tipc_msg
*hdr
= &tsk
->phdr
;
1008 struct tipc_member
*first
= NULL
;
1009 struct tipc_member
*mbr
= NULL
;
1010 struct net
*net
= sock_net(sk
);
1011 u32 node
, port
, exclude
;
1017 ua
->sa
.type
= msg_nametype(hdr
);
1018 ua
->scope
= msg_lookup_scope(hdr
);
1020 while (++lookups
< 4) {
1021 exclude
= tipc_group_exclude(tsk
->group
);
1025 /* Look for a non-congested destination member, if any */
1027 if (!tipc_nametbl_lookup_group(net
, ua
, &dsts
, &dstcnt
,
1029 return -EHOSTUNREACH
;
1030 tipc_dest_pop(&dsts
, &node
, &port
);
1031 cong
= tipc_group_cong(tsk
->group
, node
, port
, blks
,
1041 /* Start over if destination was not in member list */
1045 if (likely(!cong
&& !tipc_dest_find(cong_links
, node
, 0)))
1048 /* Block or return if destination link or member is congested */
1049 rc
= tipc_wait_for_cond(sock
, &timeout
,
1050 !tipc_dest_find(cong_links
, node
, 0) &&
1052 !tipc_group_cong(tsk
->group
, node
, port
,
1057 /* Send, unless destination disappeared while waiting */
1062 if (unlikely(lookups
>= 4))
1063 return -EHOSTUNREACH
;
1065 rc
= tipc_send_group_msg(net
, tsk
, m
, mbr
, node
, port
, dlen
);
1067 return rc
? rc
: dlen
;
1071 * tipc_send_group_bcast - send message to all members in communication group
1072 * @sock: socket structure
1073 * @m: message to send
1074 * @dlen: total length of message data
1075 * @timeout: timeout to wait for wakeup
1077 * Called from function tipc_sendmsg(), which has done all sanity checks
1078 * Return: the number of bytes sent on success, or errno
1080 static int tipc_send_group_bcast(struct socket
*sock
, struct msghdr
*m
,
1081 int dlen
, long timeout
)
1083 struct tipc_uaddr
*ua
= (struct tipc_uaddr
*)m
->msg_name
;
1084 struct sock
*sk
= sock
->sk
;
1085 struct net
*net
= sock_net(sk
);
1086 struct tipc_sock
*tsk
= tipc_sk(sk
);
1087 struct tipc_nlist
*dsts
;
1088 struct tipc_mc_method
*method
= &tsk
->mc_method
;
1089 bool ack
= method
->mandatory
&& method
->rcast
;
1090 int blks
= tsk_blocks(MCAST_H_SIZE
+ dlen
);
1091 struct tipc_msg
*hdr
= &tsk
->phdr
;
1092 int mtu
= tipc_bcast_get_mtu(net
);
1093 struct sk_buff_head pkts
;
1094 int rc
= -EHOSTUNREACH
;
1096 /* Block or return if any destination link or member is congested */
1097 rc
= tipc_wait_for_cond(sock
, &timeout
,
1098 !tsk
->cong_link_cnt
&& tsk
->group
&&
1099 !tipc_group_bc_cong(tsk
->group
, blks
));
1103 dsts
= tipc_group_dests(tsk
->group
);
1104 if (!dsts
->local
&& !dsts
->remote
)
1105 return -EHOSTUNREACH
;
1107 /* Complete message header */
1109 msg_set_type(hdr
, TIPC_GRP_MCAST_MSG
);
1110 msg_set_nameinst(hdr
, ua
->sa
.instance
);
1112 msg_set_type(hdr
, TIPC_GRP_BCAST_MSG
);
1113 msg_set_nameinst(hdr
, 0);
1115 msg_set_hdr_sz(hdr
, GROUP_H_SIZE
);
1116 msg_set_destport(hdr
, 0);
1117 msg_set_destnode(hdr
, 0);
1118 msg_set_grp_bc_seqno(hdr
, tipc_group_bc_snd_nxt(tsk
->group
));
1120 /* Avoid getting stuck with repeated forced replicasts */
1121 msg_set_grp_bc_ack_req(hdr
, ack
);
1123 /* Build message as chain of buffers */
1124 __skb_queue_head_init(&pkts
);
1125 rc
= tipc_msg_build(hdr
, m
, 0, dlen
, mtu
, &pkts
);
1126 if (unlikely(rc
!= dlen
))
1130 rc
= tipc_mcast_xmit(net
, &pkts
, method
, dsts
, &tsk
->cong_link_cnt
);
1134 /* Update broadcast sequence number and send windows */
1135 tipc_group_update_bc_members(tsk
->group
, blks
, ack
);
1137 /* Broadcast link is now free to choose method for next broadcast */
1138 method
->mandatory
= false;
1139 method
->expires
= jiffies
;
1145 * tipc_send_group_mcast - send message to all members with given identity
1146 * @sock: socket structure
1147 * @m: message to send
1148 * @dlen: total length of message data
1149 * @timeout: timeout to wait for wakeup
1151 * Called from function tipc_sendmsg(), which has done all sanity checks
1152 * Return: the number of bytes sent on success, or errno
1154 static int tipc_send_group_mcast(struct socket
*sock
, struct msghdr
*m
,
1155 int dlen
, long timeout
)
1157 struct tipc_uaddr
*ua
= (struct tipc_uaddr
*)m
->msg_name
;
1158 struct sock
*sk
= sock
->sk
;
1159 struct tipc_sock
*tsk
= tipc_sk(sk
);
1160 struct tipc_group
*grp
= tsk
->group
;
1161 struct tipc_msg
*hdr
= &tsk
->phdr
;
1162 struct net
*net
= sock_net(sk
);
1163 u32 dstcnt
, exclude
;
1166 ua
->sa
.type
= msg_nametype(hdr
);
1167 ua
->scope
= msg_lookup_scope(hdr
);
1168 exclude
= tipc_group_exclude(grp
);
1170 if (!tipc_nametbl_lookup_group(net
, ua
, &dsts
, &dstcnt
, exclude
, true))
1171 return -EHOSTUNREACH
;
1174 tipc_dest_pop(&dsts
, &ua
->sk
.node
, &ua
->sk
.ref
);
1175 return tipc_send_group_unicast(sock
, m
, dlen
, timeout
);
1178 tipc_dest_list_purge(&dsts
);
1179 return tipc_send_group_bcast(sock
, m
, dlen
, timeout
);
1183 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1184 * @net: the associated network namespace
1185 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1186 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1188 * Multi-threaded: parallel calls with reference to same queues may occur
1190 void tipc_sk_mcast_rcv(struct net
*net
, struct sk_buff_head
*arrvq
,
1191 struct sk_buff_head
*inputq
)
1193 u32 self
= tipc_own_addr(net
);
1194 struct sk_buff
*skb
, *_skb
;
1196 struct sk_buff_head tmpq
;
1197 struct list_head dports
;
1198 struct tipc_msg
*hdr
;
1199 struct tipc_uaddr ua
;
1200 int user
, mtyp
, hlen
;
1202 __skb_queue_head_init(&tmpq
);
1203 INIT_LIST_HEAD(&dports
);
1204 ua
.addrtype
= TIPC_SERVICE_RANGE
;
1206 /* tipc_skb_peek() increments the head skb's reference counter */
1207 skb
= tipc_skb_peek(arrvq
, &inputq
->lock
);
1208 for (; skb
; skb
= tipc_skb_peek(arrvq
, &inputq
->lock
)) {
1210 user
= msg_user(hdr
);
1211 mtyp
= msg_type(hdr
);
1212 hlen
= skb_headroom(skb
) + msg_hdr_sz(hdr
);
1213 onode
= msg_orignode(hdr
);
1214 ua
.sr
.type
= msg_nametype(hdr
);
1215 ua
.sr
.lower
= msg_namelower(hdr
);
1216 ua
.sr
.upper
= msg_nameupper(hdr
);
1218 ua
.scope
= TIPC_ANY_SCOPE
;
1220 ua
.scope
= TIPC_CLUSTER_SCOPE
;
1222 if (mtyp
== TIPC_GRP_UCAST_MSG
|| user
== GROUP_PROTOCOL
) {
1223 spin_lock_bh(&inputq
->lock
);
1224 if (skb_peek(arrvq
) == skb
) {
1225 __skb_dequeue(arrvq
);
1226 __skb_queue_tail(inputq
, skb
);
1229 spin_unlock_bh(&inputq
->lock
);
1233 /* Group messages require exact scope match */
1234 if (msg_in_group(hdr
)) {
1237 ua
.scope
= msg_lookup_scope(hdr
);
1240 /* Create destination port list: */
1241 tipc_nametbl_lookup_mcast_sockets(net
, &ua
, &dports
);
1243 /* Clone message per destination */
1244 while (tipc_dest_pop(&dports
, NULL
, &portid
)) {
1245 _skb
= __pskb_copy(skb
, hlen
, GFP_ATOMIC
);
1247 msg_set_destport(buf_msg(_skb
), portid
);
1248 __skb_queue_tail(&tmpq
, _skb
);
1251 pr_warn("Failed to clone mcast rcv buffer\n");
1253 /* Append clones to inputq only if skb is still head of arrvq */
1254 spin_lock_bh(&inputq
->lock
);
1255 if (skb_peek(arrvq
) == skb
) {
1256 skb_queue_splice_tail_init(&tmpq
, inputq
);
1257 /* Decrement the skb's refcnt */
1258 kfree_skb(__skb_dequeue(arrvq
));
1260 spin_unlock_bh(&inputq
->lock
);
1261 __skb_queue_purge(&tmpq
);
1264 tipc_sk_rcv(net
, inputq
);
1267 /* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
1268 * when socket is in Nagle mode
1270 static void tipc_sk_push_backlog(struct tipc_sock
*tsk
, bool nagle_ack
)
1272 struct sk_buff_head
*txq
= &tsk
->sk
.sk_write_queue
;
1273 struct sk_buff
*skb
= skb_peek_tail(txq
);
1274 struct net
*net
= sock_net(&tsk
->sk
);
1275 u32 dnode
= tsk_peer_node(tsk
);
1279 tsk
->pkt_cnt
+= skb_queue_len(txq
);
1280 if (!tsk
->pkt_cnt
|| tsk
->msg_acc
/ tsk
->pkt_cnt
< 2) {
1282 if (tsk
->nagle_start
< NAGLE_START_MAX
)
1283 tsk
->nagle_start
*= 2;
1284 tsk
->expect_ack
= false;
1285 pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
1286 tsk
->portid
, tsk
->msg_acc
, tsk
->pkt_cnt
,
1289 tsk
->nagle_start
= NAGLE_START_INIT
;
1291 msg_set_ack_required(buf_msg(skb
));
1292 tsk
->expect_ack
= true;
1294 tsk
->expect_ack
= false;
1301 if (!skb
|| tsk
->cong_link_cnt
)
1304 /* Do not send SYN again after congestion */
1305 if (msg_is_syn(buf_msg(skb
)))
1309 tsk
->pkt_cnt
+= skb_queue_len(txq
);
1310 tsk
->snt_unacked
+= tsk
->snd_backlog
;
1311 tsk
->snd_backlog
= 0;
1312 rc
= tipc_node_xmit(net
, txq
, dnode
, tsk
->portid
);
1313 if (rc
== -ELINKCONG
)
1314 tsk
->cong_link_cnt
= 1;
1318 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1319 * @tsk: receiving socket
1320 * @skb: pointer to message buffer.
1321 * @inputq: buffer list containing the buffers
1322 * @xmitq: output message area
1324 static void tipc_sk_conn_proto_rcv(struct tipc_sock
*tsk
, struct sk_buff
*skb
,
1325 struct sk_buff_head
*inputq
,
1326 struct sk_buff_head
*xmitq
)
1328 struct tipc_msg
*hdr
= buf_msg(skb
);
1329 u32 onode
= tsk_own_node(tsk
);
1330 struct sock
*sk
= &tsk
->sk
;
1331 int mtyp
= msg_type(hdr
);
1334 /* Ignore if connection cannot be validated: */
1335 if (!tsk_peer_msg(tsk
, hdr
)) {
1336 trace_tipc_sk_drop_msg(sk
, skb
, TIPC_DUMP_NONE
, "@proto_rcv!");
1340 if (unlikely(msg_errcode(hdr
))) {
1341 tipc_set_sk_state(sk
, TIPC_DISCONNECTING
);
1342 tipc_node_remove_conn(sock_net(sk
), tsk_peer_node(tsk
),
1343 tsk_peer_port(tsk
));
1344 sk
->sk_state_change(sk
);
1346 /* State change is ignored if socket already awake,
1347 * - convert msg to abort msg and add to inqueue
1349 msg_set_user(hdr
, TIPC_CRITICAL_IMPORTANCE
);
1350 msg_set_type(hdr
, TIPC_CONN_MSG
);
1351 msg_set_size(hdr
, BASIC_H_SIZE
);
1352 msg_set_hdr_sz(hdr
, BASIC_H_SIZE
);
1353 __skb_queue_tail(inputq
, skb
);
1357 tsk
->probe_unacked
= false;
1359 if (mtyp
== CONN_PROBE
) {
1360 msg_set_type(hdr
, CONN_PROBE_REPLY
);
1361 if (tipc_msg_reverse(onode
, &skb
, TIPC_OK
))
1362 __skb_queue_tail(xmitq
, skb
);
1364 } else if (mtyp
== CONN_ACK
) {
1365 was_cong
= tsk_conn_cong(tsk
);
1366 tipc_sk_push_backlog(tsk
, msg_nagle_ack(hdr
));
1367 tsk
->snt_unacked
-= msg_conn_ack(hdr
);
1368 if (tsk
->peer_caps
& TIPC_BLOCK_FLOWCTL
)
1369 tsk
->snd_win
= msg_adv_win(hdr
);
1370 if (was_cong
&& !tsk_conn_cong(tsk
))
1371 sk
->sk_write_space(sk
);
1372 } else if (mtyp
!= CONN_PROBE_REPLY
) {
1373 pr_warn("Received unknown CONN_PROTO msg\n");
1380 * tipc_sendmsg - send message in connectionless manner
1381 * @sock: socket structure
1382 * @m: message to send
1383 * @dsz: amount of user data to be sent
1385 * Message must have an destination specified explicitly.
1386 * Used for SOCK_RDM and SOCK_DGRAM messages,
1387 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1388 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1390 * Return: the number of bytes sent on success, or errno otherwise
1392 static int tipc_sendmsg(struct socket
*sock
,
1393 struct msghdr
*m
, size_t dsz
)
1395 struct sock
*sk
= sock
->sk
;
1399 ret
= __tipc_sendmsg(sock
, m
, dsz
);
1405 static int __tipc_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t dlen
)
1407 struct sock
*sk
= sock
->sk
;
1408 struct net
*net
= sock_net(sk
);
1409 struct tipc_sock
*tsk
= tipc_sk(sk
);
1410 struct tipc_uaddr
*ua
= (struct tipc_uaddr
*)m
->msg_name
;
1411 long timeout
= sock_sndtimeo(sk
, m
->msg_flags
& MSG_DONTWAIT
);
1412 struct list_head
*clinks
= &tsk
->cong_links
;
1413 bool syn
= !tipc_sk_type_connectionless(sk
);
1414 struct tipc_group
*grp
= tsk
->group
;
1415 struct tipc_msg
*hdr
= &tsk
->phdr
;
1416 struct tipc_socket_addr skaddr
;
1417 struct sk_buff_head pkts
;
1420 if (unlikely(dlen
> TIPC_MAX_USER_MSG_SIZE
))
1424 if (!tipc_uaddr_valid(ua
, m
->msg_namelen
))
1426 atype
= ua
->addrtype
;
1429 /* If socket belongs to a communication group follow other paths */
1432 return tipc_send_group_bcast(sock
, m
, dlen
, timeout
);
1433 if (atype
== TIPC_SERVICE_ADDR
)
1434 return tipc_send_group_anycast(sock
, m
, dlen
, timeout
);
1435 if (atype
== TIPC_SOCKET_ADDR
)
1436 return tipc_send_group_unicast(sock
, m
, dlen
, timeout
);
1437 if (atype
== TIPC_SERVICE_RANGE
)
1438 return tipc_send_group_mcast(sock
, m
, dlen
, timeout
);
1443 ua
= (struct tipc_uaddr
*)&tsk
->peer
;
1444 if (!syn
&& ua
->family
!= AF_TIPC
)
1445 return -EDESTADDRREQ
;
1446 atype
= ua
->addrtype
;
1449 if (unlikely(syn
)) {
1450 if (sk
->sk_state
== TIPC_LISTEN
)
1452 if (sk
->sk_state
!= TIPC_OPEN
)
1456 if (atype
== TIPC_SERVICE_ADDR
)
1457 tsk
->conn_addrtype
= atype
;
1458 msg_set_syn(hdr
, 1);
1461 memset(&skaddr
, 0, sizeof(skaddr
));
1463 /* Determine destination */
1464 if (atype
== TIPC_SERVICE_RANGE
) {
1465 return tipc_sendmcast(sock
, ua
, m
, dlen
, timeout
);
1466 } else if (atype
== TIPC_SERVICE_ADDR
) {
1467 skaddr
.node
= ua
->lookup_node
;
1468 ua
->scope
= tipc_node2scope(skaddr
.node
);
1469 if (!tipc_nametbl_lookup_anycast(net
, ua
, &skaddr
))
1470 return -EHOSTUNREACH
;
1471 } else if (atype
== TIPC_SOCKET_ADDR
) {
1477 /* Block or return if destination link is congested */
1478 rc
= tipc_wait_for_cond(sock
, &timeout
,
1479 !tipc_dest_find(clinks
, skaddr
.node
, 0));
1483 /* Finally build message header */
1484 msg_set_destnode(hdr
, skaddr
.node
);
1485 msg_set_destport(hdr
, skaddr
.ref
);
1486 if (atype
== TIPC_SERVICE_ADDR
) {
1487 msg_set_type(hdr
, TIPC_NAMED_MSG
);
1488 msg_set_hdr_sz(hdr
, NAMED_H_SIZE
);
1489 msg_set_nametype(hdr
, ua
->sa
.type
);
1490 msg_set_nameinst(hdr
, ua
->sa
.instance
);
1491 msg_set_lookup_scope(hdr
, ua
->scope
);
1492 } else { /* TIPC_SOCKET_ADDR */
1493 msg_set_type(hdr
, TIPC_DIRECT_MSG
);
1494 msg_set_lookup_scope(hdr
, 0);
1495 msg_set_hdr_sz(hdr
, BASIC_H_SIZE
);
1498 /* Add message body */
1499 __skb_queue_head_init(&pkts
);
1500 mtu
= tipc_node_get_mtu(net
, skaddr
.node
, tsk
->portid
, true);
1501 rc
= tipc_msg_build(hdr
, m
, 0, dlen
, mtu
, &pkts
);
1502 if (unlikely(rc
!= dlen
))
1504 if (unlikely(syn
&& !tipc_msg_skb_clone(&pkts
, &sk
->sk_write_queue
))) {
1505 __skb_queue_purge(&pkts
);
1510 trace_tipc_sk_sendmsg(sk
, skb_peek(&pkts
), TIPC_DUMP_SK_SNDQ
, " ");
1511 rc
= tipc_node_xmit(net
, &pkts
, skaddr
.node
, tsk
->portid
);
1512 if (unlikely(rc
== -ELINKCONG
)) {
1513 tipc_dest_push(clinks
, skaddr
.node
, 0);
1514 tsk
->cong_link_cnt
++;
1518 if (unlikely(syn
&& !rc
)) {
1519 tipc_set_sk_state(sk
, TIPC_CONNECTING
);
1520 if (dlen
&& timeout
) {
1521 timeout
= msecs_to_jiffies(timeout
);
1522 tipc_wait_for_connect(sock
, &timeout
);
1526 return rc
? rc
: dlen
;
1530 * tipc_sendstream - send stream-oriented data
1531 * @sock: socket structure
1533 * @dsz: total length of data to be transmitted
1535 * Used for SOCK_STREAM data.
1537 * Return: the number of bytes sent on success (or partial success),
1538 * or errno if no data sent
1540 static int tipc_sendstream(struct socket
*sock
, struct msghdr
*m
, size_t dsz
)
1542 struct sock
*sk
= sock
->sk
;
1546 ret
= __tipc_sendstream(sock
, m
, dsz
);
1552 static int __tipc_sendstream(struct socket
*sock
, struct msghdr
*m
, size_t dlen
)
1554 struct sock
*sk
= sock
->sk
;
1555 DECLARE_SOCKADDR(struct sockaddr_tipc
*, dest
, m
->msg_name
);
1556 long timeout
= sock_sndtimeo(sk
, m
->msg_flags
& MSG_DONTWAIT
);
1557 struct sk_buff_head
*txq
= &sk
->sk_write_queue
;
1558 struct tipc_sock
*tsk
= tipc_sk(sk
);
1559 struct tipc_msg
*hdr
= &tsk
->phdr
;
1560 struct net
*net
= sock_net(sk
);
1561 struct sk_buff
*skb
;
1562 u32 dnode
= tsk_peer_node(tsk
);
1563 int maxnagle
= tsk
->maxnagle
;
1564 int maxpkt
= tsk
->max_pkt
;
1568 if (unlikely(dlen
> INT_MAX
))
1571 /* Handle implicit connection setup */
1572 if (unlikely(dest
&& sk
->sk_state
== TIPC_OPEN
)) {
1573 rc
= __tipc_sendmsg(sock
, m
, dlen
);
1574 if (dlen
&& dlen
== rc
) {
1575 tsk
->peer_caps
= tipc_node_get_capabilities(net
, dnode
);
1576 tsk
->snt_unacked
= tsk_inc(tsk
, dlen
+ msg_hdr_sz(hdr
));
1582 rc
= tipc_wait_for_cond(sock
, &timeout
,
1583 (!tsk
->cong_link_cnt
&&
1584 !tsk_conn_cong(tsk
) &&
1585 tipc_sk_connected(sk
)));
1588 send
= min_t(size_t, dlen
- sent
, TIPC_MAX_USER_MSG_SIZE
);
1589 blocks
= tsk
->snd_backlog
;
1590 if (tsk
->oneway
++ >= tsk
->nagle_start
&& maxnagle
&&
1592 rc
= tipc_msg_append(hdr
, m
, send
, maxnagle
, txq
);
1593 if (unlikely(rc
< 0))
1597 if (blocks
<= 64 && tsk
->expect_ack
) {
1598 tsk
->snd_backlog
= blocks
;
1601 } else if (blocks
> 64) {
1602 tsk
->pkt_cnt
+= skb_queue_len(txq
);
1604 skb
= skb_peek_tail(txq
);
1606 msg_set_ack_required(buf_msg(skb
));
1607 tsk
->expect_ack
= true;
1609 tsk
->expect_ack
= false;
1615 rc
= tipc_msg_build(hdr
, m
, sent
, send
, maxpkt
, txq
);
1616 if (unlikely(rc
!= send
))
1618 blocks
+= tsk_inc(tsk
, send
+ MIN_H_SIZE
);
1620 trace_tipc_sk_sendstream(sk
, skb_peek(txq
),
1621 TIPC_DUMP_SK_SNDQ
, " ");
1622 rc
= tipc_node_xmit(net
, txq
, dnode
, tsk
->portid
);
1623 if (unlikely(rc
== -ELINKCONG
)) {
1624 tsk
->cong_link_cnt
= 1;
1628 tsk
->snt_unacked
+= blocks
;
1629 tsk
->snd_backlog
= 0;
1632 } while (sent
< dlen
&& !rc
);
1634 return sent
? sent
: rc
;
1638 * tipc_send_packet - send a connection-oriented message
1639 * @sock: socket structure
1640 * @m: message to send
1641 * @dsz: length of data to be transmitted
1643 * Used for SOCK_SEQPACKET messages.
1645 * Return: the number of bytes sent on success, or errno otherwise
1647 static int tipc_send_packet(struct socket
*sock
, struct msghdr
*m
, size_t dsz
)
1649 if (dsz
> TIPC_MAX_USER_MSG_SIZE
)
1652 return tipc_sendstream(sock
, m
, dsz
);
1655 /* tipc_sk_finish_conn - complete the setup of a connection
1657 static void tipc_sk_finish_conn(struct tipc_sock
*tsk
, u32 peer_port
,
1660 struct sock
*sk
= &tsk
->sk
;
1661 struct net
*net
= sock_net(sk
);
1662 struct tipc_msg
*msg
= &tsk
->phdr
;
1664 msg_set_syn(msg
, 0);
1665 msg_set_destnode(msg
, peer_node
);
1666 msg_set_destport(msg
, peer_port
);
1667 msg_set_type(msg
, TIPC_CONN_MSG
);
1668 msg_set_lookup_scope(msg
, 0);
1669 msg_set_hdr_sz(msg
, SHORT_H_SIZE
);
1671 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ CONN_PROBING_INTV
);
1672 tipc_set_sk_state(sk
, TIPC_ESTABLISHED
);
1673 tipc_node_add_conn(net
, peer_node
, tsk
->portid
, peer_port
);
1674 tsk
->max_pkt
= tipc_node_get_mtu(net
, peer_node
, tsk
->portid
, true);
1675 tsk
->peer_caps
= tipc_node_get_capabilities(net
, peer_node
);
1677 __skb_queue_purge(&sk
->sk_write_queue
);
1678 if (tsk
->peer_caps
& TIPC_BLOCK_FLOWCTL
)
1681 /* Fall back to message based flow control */
1682 tsk
->rcv_win
= FLOWCTL_MSG_WIN
;
1683 tsk
->snd_win
= FLOWCTL_MSG_WIN
;
1687 * tipc_sk_set_orig_addr - capture sender's address for received message
1688 * @m: descriptor for message info
1689 * @skb: received message
1691 * Note: Address is not captured if not requested by receiver.
1693 static void tipc_sk_set_orig_addr(struct msghdr
*m
, struct sk_buff
*skb
)
1695 DECLARE_SOCKADDR(struct sockaddr_pair
*, srcaddr
, m
->msg_name
);
1696 struct tipc_msg
*hdr
= buf_msg(skb
);
1701 srcaddr
->sock
.family
= AF_TIPC
;
1702 srcaddr
->sock
.addrtype
= TIPC_SOCKET_ADDR
;
1703 srcaddr
->sock
.scope
= 0;
1704 srcaddr
->sock
.addr
.id
.ref
= msg_origport(hdr
);
1705 srcaddr
->sock
.addr
.id
.node
= msg_orignode(hdr
);
1706 srcaddr
->sock
.addr
.name
.domain
= 0;
1707 m
->msg_namelen
= sizeof(struct sockaddr_tipc
);
1709 if (!msg_in_group(hdr
))
1712 /* Group message users may also want to know sending member's id */
1713 srcaddr
->member
.family
= AF_TIPC
;
1714 srcaddr
->member
.addrtype
= TIPC_SERVICE_ADDR
;
1715 srcaddr
->member
.scope
= 0;
1716 srcaddr
->member
.addr
.name
.name
.type
= msg_nametype(hdr
);
1717 srcaddr
->member
.addr
.name
.name
.instance
= TIPC_SKB_CB(skb
)->orig_member
;
1718 srcaddr
->member
.addr
.name
.domain
= 0;
1719 m
->msg_namelen
= sizeof(*srcaddr
);
1723 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1724 * @m: descriptor for message info
1725 * @skb: received message buffer
1726 * @tsk: TIPC port associated with message
1728 * Note: Ancillary data is not captured if not requested by receiver.
1730 * Return: 0 if successful, otherwise errno
1732 static int tipc_sk_anc_data_recv(struct msghdr
*m
, struct sk_buff
*skb
,
1733 struct tipc_sock
*tsk
)
1735 struct tipc_msg
*hdr
;
1740 if (likely(m
->msg_controllen
== 0))
1744 dlen
= msg_data_sz(hdr
);
1746 /* Capture errored message object, if any */
1747 if (msg_errcode(hdr
)) {
1748 if (skb_linearize(skb
))
1751 data
[0] = msg_errcode(hdr
);
1753 rc
= put_cmsg(m
, SOL_TIPC
, TIPC_ERRINFO
, 8, data
);
1756 rc
= put_cmsg(m
, SOL_TIPC
, TIPC_RETDATA
, dlen
, msg_data(hdr
));
1761 /* Capture TIPC_SERVICE_ADDR/RANGE destination address, if any */
1762 switch (msg_type(hdr
)) {
1763 case TIPC_NAMED_MSG
:
1765 data
[0] = msg_nametype(hdr
);
1766 data
[1] = msg_namelower(hdr
);
1769 case TIPC_MCAST_MSG
:
1771 data
[0] = msg_nametype(hdr
);
1772 data
[1] = msg_namelower(hdr
);
1773 data
[2] = msg_nameupper(hdr
);
1776 has_addr
= !!tsk
->conn_addrtype
;
1777 data
[0] = msg_nametype(&tsk
->phdr
);
1778 data
[1] = msg_nameinst(&tsk
->phdr
);
1786 return put_cmsg(m
, SOL_TIPC
, TIPC_DESTNAME
, 12, data
);
1789 static struct sk_buff
*tipc_sk_build_ack(struct tipc_sock
*tsk
)
1791 struct sock
*sk
= &tsk
->sk
;
1792 struct sk_buff
*skb
= NULL
;
1793 struct tipc_msg
*msg
;
1794 u32 peer_port
= tsk_peer_port(tsk
);
1795 u32 dnode
= tsk_peer_node(tsk
);
1797 if (!tipc_sk_connected(sk
))
1799 skb
= tipc_msg_create(CONN_MANAGER
, CONN_ACK
, INT_H_SIZE
, 0,
1800 dnode
, tsk_own_node(tsk
), peer_port
,
1801 tsk
->portid
, TIPC_OK
);
1805 msg_set_conn_ack(msg
, tsk
->rcv_unacked
);
1806 tsk
->rcv_unacked
= 0;
1808 /* Adjust to and advertize the correct window limit */
1809 if (tsk
->peer_caps
& TIPC_BLOCK_FLOWCTL
) {
1810 tsk
->rcv_win
= tsk_adv_blocks(tsk
->sk
.sk_rcvbuf
);
1811 msg_set_adv_win(msg
, tsk
->rcv_win
);
1816 static void tipc_sk_send_ack(struct tipc_sock
*tsk
)
1818 struct sk_buff
*skb
;
1820 skb
= tipc_sk_build_ack(tsk
);
1824 tipc_node_xmit_skb(sock_net(&tsk
->sk
), skb
, tsk_peer_node(tsk
),
1825 msg_link_selector(buf_msg(skb
)));
1828 static int tipc_wait_for_rcvmsg(struct socket
*sock
, long *timeop
)
1830 struct sock
*sk
= sock
->sk
;
1831 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1832 long timeo
= *timeop
;
1833 int err
= sock_error(sk
);
1839 if (timeo
&& skb_queue_empty(&sk
->sk_receive_queue
)) {
1840 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1844 add_wait_queue(sk_sleep(sk
), &wait
);
1846 timeo
= wait_woken(&wait
, TASK_INTERRUPTIBLE
, timeo
);
1847 sched_annotate_sleep();
1849 remove_wait_queue(sk_sleep(sk
), &wait
);
1852 if (!skb_queue_empty(&sk
->sk_receive_queue
))
1857 err
= sock_intr_errno(timeo
);
1858 if (signal_pending(current
))
1861 err
= sock_error(sk
);
1870 * tipc_recvmsg - receive packet-oriented message
1871 * @sock: network socket
1872 * @m: descriptor for message info
1873 * @buflen: length of user buffer area
1874 * @flags: receive flags
1876 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1877 * If the complete message doesn't fit in user area, truncate it.
1879 * Return: size of returned message data, errno otherwise
1881 static int tipc_recvmsg(struct socket
*sock
, struct msghdr
*m
,
1882 size_t buflen
, int flags
)
1884 struct sock
*sk
= sock
->sk
;
1885 bool connected
= !tipc_sk_type_connectionless(sk
);
1886 struct tipc_sock
*tsk
= tipc_sk(sk
);
1887 int rc
, err
, hlen
, dlen
, copy
;
1888 struct tipc_skb_cb
*skb_cb
;
1889 struct sk_buff_head xmitq
;
1890 struct tipc_msg
*hdr
;
1891 struct sk_buff
*skb
;
1895 /* Catch invalid receive requests */
1896 if (unlikely(!buflen
))
1900 if (unlikely(connected
&& sk
->sk_state
== TIPC_OPEN
)) {
1904 timeout
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1906 /* Step rcv queue to first msg with data or error; wait if necessary */
1908 rc
= tipc_wait_for_rcvmsg(sock
, &timeout
);
1911 skb
= skb_peek(&sk
->sk_receive_queue
);
1912 skb_cb
= TIPC_SKB_CB(skb
);
1914 dlen
= msg_data_sz(hdr
);
1915 hlen
= msg_hdr_sz(hdr
);
1916 err
= msg_errcode(hdr
);
1917 grp_evt
= msg_is_grp_evt(hdr
);
1918 if (likely(dlen
|| err
))
1920 tsk_advance_rx_queue(sk
);
1923 /* Collect msg meta data, including error code and rejected data */
1924 tipc_sk_set_orig_addr(m
, skb
);
1925 rc
= tipc_sk_anc_data_recv(m
, skb
, tsk
);
1930 /* Capture data if non-error msg, otherwise just set return value */
1932 int offset
= skb_cb
->bytes_read
;
1934 copy
= min_t(int, dlen
- offset
, buflen
);
1935 rc
= skb_copy_datagram_msg(skb
, hlen
+ offset
, m
, copy
);
1938 if (unlikely(offset
+ copy
< dlen
)) {
1939 if (flags
& MSG_EOR
) {
1940 if (!(flags
& MSG_PEEK
))
1941 skb_cb
->bytes_read
= offset
+ copy
;
1943 m
->msg_flags
|= MSG_TRUNC
;
1944 skb_cb
->bytes_read
= 0;
1947 if (flags
& MSG_EOR
)
1948 m
->msg_flags
|= MSG_EOR
;
1949 skb_cb
->bytes_read
= 0;
1954 if (err
!= TIPC_CONN_SHUTDOWN
&& connected
&& !m
->msg_control
) {
1960 /* Mark message as group event if applicable */
1961 if (unlikely(grp_evt
)) {
1962 if (msg_grp_evt(hdr
) == TIPC_WITHDRAWN
)
1963 m
->msg_flags
|= MSG_EOR
;
1964 m
->msg_flags
|= MSG_OOB
;
1968 /* Caption of data or error code/rejected data was successful */
1969 if (unlikely(flags
& MSG_PEEK
))
1972 /* Send group flow control advertisement when applicable */
1973 if (tsk
->group
&& msg_in_group(hdr
) && !grp_evt
) {
1974 __skb_queue_head_init(&xmitq
);
1975 tipc_group_update_rcv_win(tsk
->group
, tsk_blocks(hlen
+ dlen
),
1976 msg_orignode(hdr
), msg_origport(hdr
),
1978 tipc_node_distr_xmit(sock_net(sk
), &xmitq
);
1981 if (skb_cb
->bytes_read
)
1984 tsk_advance_rx_queue(sk
);
1986 if (likely(!connected
))
1989 /* Send connection flow control advertisement when applicable */
1990 tsk
->rcv_unacked
+= tsk_inc(tsk
, hlen
+ dlen
);
1991 if (tsk
->rcv_unacked
>= tsk
->rcv_win
/ TIPC_ACK_RATE
)
1992 tipc_sk_send_ack(tsk
);
1995 return rc
? rc
: copy
;
1999 * tipc_recvstream - receive stream-oriented data
2000 * @sock: network socket
2001 * @m: descriptor for message info
2002 * @buflen: total size of user buffer area
2003 * @flags: receive flags
2005 * Used for SOCK_STREAM messages only. If not enough data is available
2006 * will optionally wait for more; never truncates data.
2008 * Return: size of returned message data, errno otherwise
2010 static int tipc_recvstream(struct socket
*sock
, struct msghdr
*m
,
2011 size_t buflen
, int flags
)
2013 struct sock
*sk
= sock
->sk
;
2014 struct tipc_sock
*tsk
= tipc_sk(sk
);
2015 struct sk_buff
*skb
;
2016 struct tipc_msg
*hdr
;
2017 struct tipc_skb_cb
*skb_cb
;
2018 bool peek
= flags
& MSG_PEEK
;
2019 int offset
, required
, copy
, copied
= 0;
2020 int hlen
, dlen
, err
, rc
;
2023 /* Catch invalid receive attempts */
2024 if (unlikely(!buflen
))
2029 if (unlikely(sk
->sk_state
== TIPC_OPEN
)) {
2033 required
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, buflen
);
2034 timeout
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
2037 /* Look at first msg in receive queue; wait if necessary */
2038 rc
= tipc_wait_for_rcvmsg(sock
, &timeout
);
2041 skb
= skb_peek(&sk
->sk_receive_queue
);
2042 skb_cb
= TIPC_SKB_CB(skb
);
2044 dlen
= msg_data_sz(hdr
);
2045 hlen
= msg_hdr_sz(hdr
);
2046 err
= msg_errcode(hdr
);
2048 /* Discard any empty non-errored (SYN-) message */
2049 if (unlikely(!dlen
&& !err
)) {
2050 tsk_advance_rx_queue(sk
);
2054 /* Collect msg meta data, incl. error code and rejected data */
2056 tipc_sk_set_orig_addr(m
, skb
);
2057 rc
= tipc_sk_anc_data_recv(m
, skb
, tsk
);
2063 /* Copy data if msg ok, otherwise return error/partial data */
2065 offset
= skb_cb
->bytes_read
;
2066 copy
= min_t(int, dlen
- offset
, buflen
- copied
);
2067 rc
= skb_copy_datagram_msg(skb
, hlen
+ offset
, m
, copy
);
2072 if (unlikely(offset
< dlen
)) {
2074 skb_cb
->bytes_read
= offset
;
2079 if ((err
!= TIPC_CONN_SHUTDOWN
) && !m
->msg_control
)
2088 tsk_advance_rx_queue(sk
);
2090 /* Send connection flow control advertisement when applicable */
2091 tsk
->rcv_unacked
+= tsk_inc(tsk
, hlen
+ dlen
);
2092 if (tsk
->rcv_unacked
>= tsk
->rcv_win
/ TIPC_ACK_RATE
)
2093 tipc_sk_send_ack(tsk
);
2095 /* Exit if all requested data or FIN/error received */
2096 if (copied
== buflen
|| err
)
2099 } while (!skb_queue_empty(&sk
->sk_receive_queue
) || copied
< required
);
2102 return copied
? copied
: rc
;
2106 * tipc_write_space - wake up thread if port congestion is released
2109 static void tipc_write_space(struct sock
*sk
)
2111 struct socket_wq
*wq
;
2114 wq
= rcu_dereference(sk
->sk_wq
);
2115 if (skwq_has_sleeper(wq
))
2116 wake_up_interruptible_sync_poll(&wq
->wait
, EPOLLOUT
|
2117 EPOLLWRNORM
| EPOLLWRBAND
);
2122 * tipc_data_ready - wake up threads to indicate messages have been received
2125 static void tipc_data_ready(struct sock
*sk
)
2127 struct socket_wq
*wq
;
2129 trace_sk_data_ready(sk
);
2132 wq
= rcu_dereference(sk
->sk_wq
);
2133 if (skwq_has_sleeper(wq
))
2134 wake_up_interruptible_sync_poll(&wq
->wait
, EPOLLIN
|
2135 EPOLLRDNORM
| EPOLLRDBAND
);
2139 static void tipc_sock_destruct(struct sock
*sk
)
2141 __skb_queue_purge(&sk
->sk_receive_queue
);
2144 static void tipc_sk_proto_rcv(struct sock
*sk
,
2145 struct sk_buff_head
*inputq
,
2146 struct sk_buff_head
*xmitq
)
2148 struct sk_buff
*skb
= __skb_dequeue(inputq
);
2149 struct tipc_sock
*tsk
= tipc_sk(sk
);
2150 struct tipc_msg
*hdr
= buf_msg(skb
);
2151 struct tipc_group
*grp
= tsk
->group
;
2152 bool wakeup
= false;
2154 switch (msg_user(hdr
)) {
2156 tipc_sk_conn_proto_rcv(tsk
, skb
, inputq
, xmitq
);
2159 tipc_dest_del(&tsk
->cong_links
, msg_orignode(hdr
), 0);
2160 /* coupled with smp_rmb() in tipc_wait_for_cond() */
2162 tsk
->cong_link_cnt
--;
2164 tipc_sk_push_backlog(tsk
, false);
2166 case GROUP_PROTOCOL
:
2167 tipc_group_proto_rcv(grp
, &wakeup
, hdr
, inputq
, xmitq
);
2170 tipc_group_member_evt(tsk
->group
, &wakeup
, &sk
->sk_rcvbuf
,
2171 hdr
, inputq
, xmitq
);
2178 sk
->sk_write_space(sk
);
2184 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2186 * @skb: pointer to message buffer.
2187 * @xmitq: for Nagle ACK if any
2188 * Return: true if message should be added to receive queue, false otherwise
2190 static bool tipc_sk_filter_connect(struct tipc_sock
*tsk
, struct sk_buff
*skb
,
2191 struct sk_buff_head
*xmitq
)
2193 struct sock
*sk
= &tsk
->sk
;
2194 struct net
*net
= sock_net(sk
);
2195 struct tipc_msg
*hdr
= buf_msg(skb
);
2196 bool con_msg
= msg_connected(hdr
);
2197 u32 pport
= tsk_peer_port(tsk
);
2198 u32 pnode
= tsk_peer_node(tsk
);
2199 u32 oport
= msg_origport(hdr
);
2200 u32 onode
= msg_orignode(hdr
);
2201 int err
= msg_errcode(hdr
);
2202 unsigned long delay
;
2204 if (unlikely(msg_mcast(hdr
)))
2208 switch (sk
->sk_state
) {
2209 case TIPC_CONNECTING
:
2211 if (likely(con_msg
)) {
2214 tipc_sk_finish_conn(tsk
, oport
, onode
);
2215 msg_set_importance(&tsk
->phdr
, msg_importance(hdr
));
2216 /* ACK+ message with data is added to receive queue */
2217 if (msg_data_sz(hdr
))
2219 /* Empty ACK-, - wake up sleeping connect() and drop */
2220 sk
->sk_state_change(sk
);
2221 msg_set_dest_droppable(hdr
, 1);
2224 /* Ignore connectionless message if not from listening socket */
2225 if (oport
!= pport
|| onode
!= pnode
)
2229 if (err
!= TIPC_ERR_OVERLOAD
)
2232 /* Prepare for new setup attempt if we have a SYN clone */
2233 if (skb_queue_empty(&sk
->sk_write_queue
))
2235 get_random_bytes(&delay
, 2);
2236 delay
%= (tsk
->conn_timeout
/ 4);
2237 delay
= msecs_to_jiffies(delay
+ 100);
2238 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ delay
);
2241 case TIPC_DISCONNECTING
:
2244 /* Accept only SYN message */
2245 if (!msg_is_syn(hdr
) &&
2246 tipc_node_get_capabilities(net
, onode
) & TIPC_SYN_BIT
)
2248 if (!con_msg
&& !err
)
2251 case TIPC_ESTABLISHED
:
2252 if (!skb_queue_empty(&sk
->sk_write_queue
))
2253 tipc_sk_push_backlog(tsk
, false);
2254 /* Accept only connection-based messages sent by peer */
2255 if (likely(con_msg
&& !err
&& pport
== oport
&&
2257 if (msg_ack_required(hdr
)) {
2258 struct sk_buff
*skb
;
2260 skb
= tipc_sk_build_ack(tsk
);
2262 msg_set_nagle_ack(buf_msg(skb
));
2263 __skb_queue_tail(xmitq
, skb
);
2268 if (!tsk_peer_msg(tsk
, hdr
))
2272 tipc_set_sk_state(sk
, TIPC_DISCONNECTING
);
2273 tipc_node_remove_conn(net
, pnode
, tsk
->portid
);
2274 sk
->sk_state_change(sk
);
2277 pr_err("Unknown sk_state %u\n", sk
->sk_state
);
2279 /* Abort connection setup attempt */
2280 tipc_set_sk_state(sk
, TIPC_DISCONNECTING
);
2281 sk
->sk_err
= ECONNREFUSED
;
2282 sk
->sk_state_change(sk
);
2287 * rcvbuf_limit - get proper overload limit of socket receive queue
2291 * For connection oriented messages, irrespective of importance,
2292 * default queue limit is 2 MB.
2294 * For connectionless messages, queue limits are based on message
2295 * importance as follows:
2297 * TIPC_LOW_IMPORTANCE (2 MB)
2298 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2299 * TIPC_HIGH_IMPORTANCE (8 MB)
2300 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2302 * Return: overload limit according to corresponding message importance
2304 static unsigned int rcvbuf_limit(struct sock
*sk
, struct sk_buff
*skb
)
2306 struct tipc_sock
*tsk
= tipc_sk(sk
);
2307 struct tipc_msg
*hdr
= buf_msg(skb
);
2309 if (unlikely(msg_in_group(hdr
)))
2310 return READ_ONCE(sk
->sk_rcvbuf
);
2312 if (unlikely(!msg_connected(hdr
)))
2313 return READ_ONCE(sk
->sk_rcvbuf
) << msg_importance(hdr
);
2315 if (likely(tsk
->peer_caps
& TIPC_BLOCK_FLOWCTL
))
2316 return READ_ONCE(sk
->sk_rcvbuf
);
2318 return FLOWCTL_MSG_LIM
;
2322 * tipc_sk_filter_rcv - validate incoming message
2324 * @skb: pointer to message.
2325 * @xmitq: output message area (FIXME)
2327 * Enqueues message on receive queue if acceptable; optionally handles
2328 * disconnect indication for a connected socket.
2330 * Called with socket lock already taken
2332 static void tipc_sk_filter_rcv(struct sock
*sk
, struct sk_buff
*skb
,
2333 struct sk_buff_head
*xmitq
)
2335 bool sk_conn
= !tipc_sk_type_connectionless(sk
);
2336 struct tipc_sock
*tsk
= tipc_sk(sk
);
2337 struct tipc_group
*grp
= tsk
->group
;
2338 struct tipc_msg
*hdr
= buf_msg(skb
);
2339 struct net
*net
= sock_net(sk
);
2340 struct sk_buff_head inputq
;
2341 int mtyp
= msg_type(hdr
);
2342 int limit
, err
= TIPC_OK
;
2344 trace_tipc_sk_filter_rcv(sk
, skb
, TIPC_DUMP_ALL
, " ");
2345 TIPC_SKB_CB(skb
)->bytes_read
= 0;
2346 __skb_queue_head_init(&inputq
);
2347 __skb_queue_tail(&inputq
, skb
);
2349 if (unlikely(!msg_isdata(hdr
)))
2350 tipc_sk_proto_rcv(sk
, &inputq
, xmitq
);
2353 tipc_group_filter_msg(grp
, &inputq
, xmitq
);
2355 if (unlikely(!grp
) && mtyp
== TIPC_MCAST_MSG
)
2356 tipc_mcast_filter_msg(net
, &tsk
->mc_method
.deferredq
, &inputq
);
2358 /* Validate and add to receive buffer if there is space */
2359 while ((skb
= __skb_dequeue(&inputq
))) {
2361 limit
= rcvbuf_limit(sk
, skb
);
2362 if ((sk_conn
&& !tipc_sk_filter_connect(tsk
, skb
, xmitq
)) ||
2363 (!sk_conn
&& msg_connected(hdr
)) ||
2364 (!grp
&& msg_in_group(hdr
)))
2365 err
= TIPC_ERR_NO_PORT
;
2366 else if (sk_rmem_alloc_get(sk
) + skb
->truesize
>= limit
) {
2367 trace_tipc_sk_dump(sk
, skb
, TIPC_DUMP_ALL
,
2369 atomic_inc(&sk
->sk_drops
);
2370 err
= TIPC_ERR_OVERLOAD
;
2373 if (unlikely(err
)) {
2374 if (tipc_msg_reverse(tipc_own_addr(net
), &skb
, err
)) {
2375 trace_tipc_sk_rej_msg(sk
, skb
, TIPC_DUMP_NONE
,
2377 __skb_queue_tail(xmitq
, skb
);
2382 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
2383 skb_set_owner_r(skb
, sk
);
2384 trace_tipc_sk_overlimit2(sk
, skb
, TIPC_DUMP_ALL
,
2385 "rcvq >90% allocated!");
2386 sk
->sk_data_ready(sk
);
2391 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2395 * Caller must hold socket lock
2397 static int tipc_sk_backlog_rcv(struct sock
*sk
, struct sk_buff
*skb
)
2399 unsigned int before
= sk_rmem_alloc_get(sk
);
2400 struct sk_buff_head xmitq
;
2403 __skb_queue_head_init(&xmitq
);
2405 tipc_sk_filter_rcv(sk
, skb
, &xmitq
);
2406 added
= sk_rmem_alloc_get(sk
) - before
;
2407 atomic_add(added
, &tipc_sk(sk
)->dupl_rcvcnt
);
2409 /* Send pending response/rejected messages, if any */
2410 tipc_node_distr_xmit(sock_net(sk
), &xmitq
);
2415 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2416 * inputq and try adding them to socket or backlog queue
2417 * @inputq: list of incoming buffers with potentially different destinations
2418 * @sk: socket where the buffers should be enqueued
2419 * @dport: port number for the socket
2420 * @xmitq: output queue
2422 * Caller must hold socket lock
2424 static void tipc_sk_enqueue(struct sk_buff_head
*inputq
, struct sock
*sk
,
2425 u32 dport
, struct sk_buff_head
*xmitq
)
2427 unsigned long time_limit
= jiffies
+ usecs_to_jiffies(20000);
2428 struct sk_buff
*skb
;
2433 while (skb_queue_len(inputq
)) {
2434 if (unlikely(time_after_eq(jiffies
, time_limit
)))
2437 skb
= tipc_skb_dequeue(inputq
, dport
);
2441 /* Add message directly to receive queue if possible */
2442 if (!sock_owned_by_user(sk
)) {
2443 tipc_sk_filter_rcv(sk
, skb
, xmitq
);
2447 /* Try backlog, compensating for double-counted bytes */
2448 dcnt
= &tipc_sk(sk
)->dupl_rcvcnt
;
2449 if (!sk
->sk_backlog
.len
)
2450 atomic_set(dcnt
, 0);
2451 lim
= rcvbuf_limit(sk
, skb
) + atomic_read(dcnt
);
2452 if (likely(!sk_add_backlog(sk
, skb
, lim
))) {
2453 trace_tipc_sk_overlimit1(sk
, skb
, TIPC_DUMP_ALL
,
2454 "bklg & rcvq >90% allocated!");
2458 trace_tipc_sk_dump(sk
, skb
, TIPC_DUMP_ALL
, "err_overload!");
2459 /* Overload => reject message back to sender */
2460 onode
= tipc_own_addr(sock_net(sk
));
2461 atomic_inc(&sk
->sk_drops
);
2462 if (tipc_msg_reverse(onode
, &skb
, TIPC_ERR_OVERLOAD
)) {
2463 trace_tipc_sk_rej_msg(sk
, skb
, TIPC_DUMP_ALL
,
2465 __skb_queue_tail(xmitq
, skb
);
2472 * tipc_sk_rcv - handle a chain of incoming buffers
2473 * @net: the associated network namespace
2474 * @inputq: buffer list containing the buffers
2475 * Consumes all buffers in list until inputq is empty
2476 * Note: may be called in multiple threads referring to the same queue
2478 void tipc_sk_rcv(struct net
*net
, struct sk_buff_head
*inputq
)
2480 struct sk_buff_head xmitq
;
2481 u32 dnode
, dport
= 0;
2483 struct tipc_sock
*tsk
;
2485 struct sk_buff
*skb
;
2487 __skb_queue_head_init(&xmitq
);
2488 while (skb_queue_len(inputq
)) {
2489 dport
= tipc_skb_peek_port(inputq
, dport
);
2490 tsk
= tipc_sk_lookup(net
, dport
);
2494 if (likely(spin_trylock_bh(&sk
->sk_lock
.slock
))) {
2495 tipc_sk_enqueue(inputq
, sk
, dport
, &xmitq
);
2496 spin_unlock_bh(&sk
->sk_lock
.slock
);
2498 /* Send pending response/rejected messages, if any */
2499 tipc_node_distr_xmit(sock_net(sk
), &xmitq
);
2503 /* No destination socket => dequeue skb if still there */
2504 skb
= tipc_skb_dequeue(inputq
, dport
);
2508 /* Try secondary lookup if unresolved named message */
2509 err
= TIPC_ERR_NO_PORT
;
2510 if (tipc_msg_lookup_dest(net
, skb
, &err
))
2513 /* Prepare for message rejection */
2514 if (!tipc_msg_reverse(tipc_own_addr(net
), &skb
, err
))
2517 trace_tipc_sk_rej_msg(NULL
, skb
, TIPC_DUMP_NONE
, "@sk_rcv!");
2519 dnode
= msg_destnode(buf_msg(skb
));
2520 tipc_node_xmit_skb(net
, skb
, dnode
, dport
);
2524 static int tipc_wait_for_connect(struct socket
*sock
, long *timeo_p
)
2526 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
2527 struct sock
*sk
= sock
->sk
;
2531 int err
= sock_error(sk
);
2536 if (signal_pending(current
))
2537 return sock_intr_errno(*timeo_p
);
2538 if (sk
->sk_state
== TIPC_DISCONNECTING
)
2541 add_wait_queue(sk_sleep(sk
), &wait
);
2542 done
= sk_wait_event(sk
, timeo_p
, tipc_sk_connected(sk
),
2544 remove_wait_queue(sk_sleep(sk
), &wait
);
2549 static bool tipc_sockaddr_is_sane(struct sockaddr_tipc
*addr
)
2551 if (addr
->family
!= AF_TIPC
)
2553 if (addr
->addrtype
== TIPC_SERVICE_RANGE
)
2554 return (addr
->addr
.nameseq
.lower
<= addr
->addr
.nameseq
.upper
);
2555 return (addr
->addrtype
== TIPC_SERVICE_ADDR
||
2556 addr
->addrtype
== TIPC_SOCKET_ADDR
);
2560 * tipc_connect - establish a connection to another TIPC port
2561 * @sock: socket structure
2562 * @dest: socket address for destination port
2563 * @destlen: size of socket address data structure
2564 * @flags: file-related flags associated with socket
2566 * Return: 0 on success, errno otherwise
2568 static int tipc_connect(struct socket
*sock
, struct sockaddr
*dest
,
2569 int destlen
, int flags
)
2571 struct sock
*sk
= sock
->sk
;
2572 struct tipc_sock
*tsk
= tipc_sk(sk
);
2573 struct sockaddr_tipc
*dst
= (struct sockaddr_tipc
*)dest
;
2574 struct msghdr m
= {NULL
,};
2575 long timeout
= (flags
& O_NONBLOCK
) ? 0 : tsk
->conn_timeout
;
2579 if (destlen
!= sizeof(struct sockaddr_tipc
))
2589 if (dst
->family
== AF_UNSPEC
) {
2590 memset(&tsk
->peer
, 0, sizeof(struct sockaddr_tipc
));
2591 if (!tipc_sk_type_connectionless(sk
))
2595 if (!tipc_sockaddr_is_sane(dst
)) {
2599 /* DGRAM/RDM connect(), just save the destaddr */
2600 if (tipc_sk_type_connectionless(sk
)) {
2601 memcpy(&tsk
->peer
, dest
, destlen
);
2603 } else if (dst
->addrtype
== TIPC_SERVICE_RANGE
) {
2608 previous
= sk
->sk_state
;
2610 switch (sk
->sk_state
) {
2612 /* Send a 'SYN-' to destination */
2614 m
.msg_namelen
= destlen
;
2615 iov_iter_kvec(&m
.msg_iter
, ITER_SOURCE
, NULL
, 0, 0);
2617 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2618 * indicate send_msg() is never blocked.
2621 m
.msg_flags
= MSG_DONTWAIT
;
2623 res
= __tipc_sendmsg(sock
, &m
, 0);
2624 if ((res
< 0) && (res
!= -EWOULDBLOCK
))
2627 /* Just entered TIPC_CONNECTING state; the only
2628 * difference is that return value in non-blocking
2629 * case is EINPROGRESS, rather than EALREADY.
2633 case TIPC_CONNECTING
:
2635 if (previous
== TIPC_CONNECTING
)
2639 timeout
= msecs_to_jiffies(timeout
);
2640 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2641 res
= tipc_wait_for_connect(sock
, &timeout
);
2643 case TIPC_ESTABLISHED
:
2656 * tipc_listen - allow socket to listen for incoming connections
2657 * @sock: socket structure
2660 * Return: 0 on success, errno otherwise
2662 static int tipc_listen(struct socket
*sock
, int len
)
2664 struct sock
*sk
= sock
->sk
;
2668 res
= tipc_set_sk_state(sk
, TIPC_LISTEN
);
2674 static int tipc_wait_for_accept(struct socket
*sock
, long timeo
)
2676 struct sock
*sk
= sock
->sk
;
2677 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
2680 /* True wake-one mechanism for incoming connections: only
2681 * one process gets woken up, not the 'whole herd'.
2682 * Since we do not 'race & poll' for established sockets
2683 * anymore, the common case will execute the loop only once.
2686 if (timeo
&& skb_queue_empty(&sk
->sk_receive_queue
)) {
2687 add_wait_queue(sk_sleep(sk
), &wait
);
2689 timeo
= wait_woken(&wait
, TASK_INTERRUPTIBLE
, timeo
);
2691 remove_wait_queue(sk_sleep(sk
), &wait
);
2694 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2699 err
= sock_intr_errno(timeo
);
2700 if (signal_pending(current
))
2707 * tipc_accept - wait for connection request
2708 * @sock: listening socket
2709 * @new_sock: new socket that is to be connected
2710 * @arg: arguments for accept
2712 * Return: 0 on success, errno otherwise
2714 static int tipc_accept(struct socket
*sock
, struct socket
*new_sock
,
2715 struct proto_accept_arg
*arg
)
2717 struct sock
*new_sk
, *sk
= sock
->sk
;
2718 struct tipc_sock
*new_tsock
;
2719 struct msghdr m
= {NULL
,};
2720 struct tipc_msg
*msg
;
2721 struct sk_buff
*buf
;
2727 if (sk
->sk_state
!= TIPC_LISTEN
) {
2731 timeo
= sock_rcvtimeo(sk
, arg
->flags
& O_NONBLOCK
);
2732 res
= tipc_wait_for_accept(sock
, timeo
);
2736 buf
= skb_peek(&sk
->sk_receive_queue
);
2738 res
= tipc_sk_create(sock_net(sock
->sk
), new_sock
, 0, arg
->kern
);
2741 security_sk_clone(sock
->sk
, new_sock
->sk
);
2743 new_sk
= new_sock
->sk
;
2744 new_tsock
= tipc_sk(new_sk
);
2747 /* we lock on new_sk; but lockdep sees the lock on sk */
2748 lock_sock_nested(new_sk
, SINGLE_DEPTH_NESTING
);
2751 * Reject any stray messages received by new socket
2752 * before the socket lock was taken (very, very unlikely)
2754 tsk_rej_rx_queue(new_sk
, TIPC_ERR_NO_PORT
);
2756 /* Connect new socket to it's peer */
2757 tipc_sk_finish_conn(new_tsock
, msg_origport(msg
), msg_orignode(msg
));
2759 tsk_set_importance(new_sk
, msg_importance(msg
));
2760 if (msg_named(msg
)) {
2761 new_tsock
->conn_addrtype
= TIPC_SERVICE_ADDR
;
2762 msg_set_nametype(&new_tsock
->phdr
, msg_nametype(msg
));
2763 msg_set_nameinst(&new_tsock
->phdr
, msg_nameinst(msg
));
2767 * Respond to 'SYN-' by discarding it & returning 'ACK'.
2768 * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
2770 if (!msg_data_sz(msg
)) {
2771 tsk_advance_rx_queue(sk
);
2773 __skb_dequeue(&sk
->sk_receive_queue
);
2774 __skb_queue_head(&new_sk
->sk_receive_queue
, buf
);
2775 skb_set_owner_r(buf
, new_sk
);
2777 iov_iter_kvec(&m
.msg_iter
, ITER_SOURCE
, NULL
, 0, 0);
2778 __tipc_sendstream(new_sock
, &m
, 0);
2779 release_sock(new_sk
);
2786 * tipc_shutdown - shutdown socket connection
2787 * @sock: socket structure
2788 * @how: direction to close (must be SHUT_RDWR)
2790 * Terminates connection (if necessary), then purges socket's receive queue.
2792 * Return: 0 on success, errno otherwise
2794 static int tipc_shutdown(struct socket
*sock
, int how
)
2796 struct sock
*sk
= sock
->sk
;
2799 if (how
!= SHUT_RDWR
)
2804 trace_tipc_sk_shutdown(sk
, NULL
, TIPC_DUMP_ALL
, " ");
2805 __tipc_shutdown(sock
, TIPC_CONN_SHUTDOWN
);
2806 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2808 if (sk
->sk_state
== TIPC_DISCONNECTING
) {
2809 /* Discard any unreceived messages */
2810 __skb_queue_purge(&sk
->sk_receive_queue
);
2816 /* Wake up anyone sleeping in poll. */
2817 sk
->sk_state_change(sk
);
2823 static void tipc_sk_check_probing_state(struct sock
*sk
,
2824 struct sk_buff_head
*list
)
2826 struct tipc_sock
*tsk
= tipc_sk(sk
);
2827 u32 pnode
= tsk_peer_node(tsk
);
2828 u32 pport
= tsk_peer_port(tsk
);
2829 u32 self
= tsk_own_node(tsk
);
2830 u32 oport
= tsk
->portid
;
2831 struct sk_buff
*skb
;
2833 if (tsk
->probe_unacked
) {
2834 tipc_set_sk_state(sk
, TIPC_DISCONNECTING
);
2835 sk
->sk_err
= ECONNABORTED
;
2836 tipc_node_remove_conn(sock_net(sk
), pnode
, pport
);
2837 sk
->sk_state_change(sk
);
2840 /* Prepare new probe */
2841 skb
= tipc_msg_create(CONN_MANAGER
, CONN_PROBE
, INT_H_SIZE
, 0,
2842 pnode
, self
, pport
, oport
, TIPC_OK
);
2844 __skb_queue_tail(list
, skb
);
2845 tsk
->probe_unacked
= true;
2846 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ CONN_PROBING_INTV
);
2849 static void tipc_sk_retry_connect(struct sock
*sk
, struct sk_buff_head
*list
)
2851 struct tipc_sock
*tsk
= tipc_sk(sk
);
2853 /* Try again later if dest link is congested */
2854 if (tsk
->cong_link_cnt
) {
2855 sk_reset_timer(sk
, &sk
->sk_timer
,
2856 jiffies
+ msecs_to_jiffies(100));
2859 /* Prepare SYN for retransmit */
2860 tipc_msg_skb_clone(&sk
->sk_write_queue
, list
);
2863 static void tipc_sk_timeout(struct timer_list
*t
)
2865 struct sock
*sk
= from_timer(sk
, t
, sk_timer
);
2866 struct tipc_sock
*tsk
= tipc_sk(sk
);
2867 u32 pnode
= tsk_peer_node(tsk
);
2868 struct sk_buff_head list
;
2871 __skb_queue_head_init(&list
);
2874 /* Try again later if socket is busy */
2875 if (sock_owned_by_user(sk
)) {
2876 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ HZ
/ 20);
2882 if (sk
->sk_state
== TIPC_ESTABLISHED
)
2883 tipc_sk_check_probing_state(sk
, &list
);
2884 else if (sk
->sk_state
== TIPC_CONNECTING
)
2885 tipc_sk_retry_connect(sk
, &list
);
2889 if (!skb_queue_empty(&list
))
2890 rc
= tipc_node_xmit(sock_net(sk
), &list
, pnode
, tsk
->portid
);
2892 /* SYN messages may cause link congestion */
2893 if (rc
== -ELINKCONG
) {
2894 tipc_dest_push(&tsk
->cong_links
, pnode
, 0);
2895 tsk
->cong_link_cnt
= 1;
2900 static int tipc_sk_publish(struct tipc_sock
*tsk
, struct tipc_uaddr
*ua
)
2902 struct sock
*sk
= &tsk
->sk
;
2903 struct net
*net
= sock_net(sk
);
2904 struct tipc_socket_addr skaddr
;
2905 struct publication
*p
;
2908 if (tipc_sk_connected(sk
))
2910 key
= tsk
->portid
+ tsk
->pub_count
+ 1;
2911 if (key
== tsk
->portid
)
2913 skaddr
.ref
= tsk
->portid
;
2914 skaddr
.node
= tipc_own_addr(net
);
2915 p
= tipc_nametbl_publish(net
, ua
, &skaddr
, key
);
2919 list_add(&p
->binding_sock
, &tsk
->publications
);
2921 tsk
->published
= true;
2925 static int tipc_sk_withdraw(struct tipc_sock
*tsk
, struct tipc_uaddr
*ua
)
2927 struct net
*net
= sock_net(&tsk
->sk
);
2928 struct publication
*safe
, *p
;
2929 struct tipc_uaddr _ua
;
2932 list_for_each_entry_safe(p
, safe
, &tsk
->publications
, binding_sock
) {
2934 tipc_uaddr(&_ua
, TIPC_SERVICE_RANGE
, p
->scope
,
2935 p
->sr
.type
, p
->sr
.lower
, p
->sr
.upper
);
2936 tipc_nametbl_withdraw(net
, &_ua
, &p
->sk
, p
->key
);
2939 /* Unbind specific publication */
2940 if (p
->scope
!= ua
->scope
)
2942 if (p
->sr
.type
!= ua
->sr
.type
)
2944 if (p
->sr
.lower
!= ua
->sr
.lower
)
2946 if (p
->sr
.upper
!= ua
->sr
.upper
)
2948 tipc_nametbl_withdraw(net
, ua
, &p
->sk
, p
->key
);
2952 if (list_empty(&tsk
->publications
)) {
2959 /* tipc_sk_reinit: set non-zero address in all existing sockets
2960 * when we go from standalone to network mode.
2962 void tipc_sk_reinit(struct net
*net
)
2964 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
2965 struct rhashtable_iter iter
;
2966 struct tipc_sock
*tsk
;
2967 struct tipc_msg
*msg
;
2969 rhashtable_walk_enter(&tn
->sk_rht
, &iter
);
2972 rhashtable_walk_start(&iter
);
2974 while ((tsk
= rhashtable_walk_next(&iter
)) && !IS_ERR(tsk
)) {
2975 sock_hold(&tsk
->sk
);
2976 rhashtable_walk_stop(&iter
);
2977 lock_sock(&tsk
->sk
);
2979 msg_set_prevnode(msg
, tipc_own_addr(net
));
2980 msg_set_orignode(msg
, tipc_own_addr(net
));
2981 release_sock(&tsk
->sk
);
2982 rhashtable_walk_start(&iter
);
2986 rhashtable_walk_stop(&iter
);
2987 } while (tsk
== ERR_PTR(-EAGAIN
));
2989 rhashtable_walk_exit(&iter
);
2992 static struct tipc_sock
*tipc_sk_lookup(struct net
*net
, u32 portid
)
2994 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
2995 struct tipc_sock
*tsk
;
2998 tsk
= rhashtable_lookup(&tn
->sk_rht
, &portid
, tsk_rht_params
);
3000 sock_hold(&tsk
->sk
);
3006 static int tipc_sk_insert(struct tipc_sock
*tsk
)
3008 struct sock
*sk
= &tsk
->sk
;
3009 struct net
*net
= sock_net(sk
);
3010 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
3011 u32 remaining
= (TIPC_MAX_PORT
- TIPC_MIN_PORT
) + 1;
3012 u32 portid
= get_random_u32_below(remaining
) + TIPC_MIN_PORT
;
3014 while (remaining
--) {
3016 if ((portid
< TIPC_MIN_PORT
) || (portid
> TIPC_MAX_PORT
))
3017 portid
= TIPC_MIN_PORT
;
3018 tsk
->portid
= portid
;
3019 sock_hold(&tsk
->sk
);
3020 if (!rhashtable_lookup_insert_fast(&tn
->sk_rht
, &tsk
->node
,
3029 static void tipc_sk_remove(struct tipc_sock
*tsk
)
3031 struct sock
*sk
= &tsk
->sk
;
3032 struct tipc_net
*tn
= net_generic(sock_net(sk
), tipc_net_id
);
3034 if (!rhashtable_remove_fast(&tn
->sk_rht
, &tsk
->node
, tsk_rht_params
)) {
3035 WARN_ON(refcount_read(&sk
->sk_refcnt
) == 1);
3040 static const struct rhashtable_params tsk_rht_params
= {
3042 .head_offset
= offsetof(struct tipc_sock
, node
),
3043 .key_offset
= offsetof(struct tipc_sock
, portid
),
3044 .key_len
= sizeof(u32
), /* portid */
3045 .max_size
= 1048576,
3047 .automatic_shrinking
= true,
3050 int tipc_sk_rht_init(struct net
*net
)
3052 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
3054 return rhashtable_init(&tn
->sk_rht
, &tsk_rht_params
);
3057 void tipc_sk_rht_destroy(struct net
*net
)
3059 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
3061 /* Wait for socket readers to complete */
3064 rhashtable_destroy(&tn
->sk_rht
);
3067 static int tipc_sk_join(struct tipc_sock
*tsk
, struct tipc_group_req
*mreq
)
3069 struct net
*net
= sock_net(&tsk
->sk
);
3070 struct tipc_group
*grp
= tsk
->group
;
3071 struct tipc_msg
*hdr
= &tsk
->phdr
;
3072 struct tipc_uaddr ua
;
3075 if (mreq
->type
< TIPC_RESERVED_TYPES
)
3077 if (mreq
->scope
> TIPC_NODE_SCOPE
)
3079 if (mreq
->scope
!= TIPC_NODE_SCOPE
)
3080 mreq
->scope
= TIPC_CLUSTER_SCOPE
;
3083 grp
= tipc_group_create(net
, tsk
->portid
, mreq
, &tsk
->group_is_open
);
3087 msg_set_lookup_scope(hdr
, mreq
->scope
);
3088 msg_set_nametype(hdr
, mreq
->type
);
3089 msg_set_dest_droppable(hdr
, true);
3090 tipc_uaddr(&ua
, TIPC_SERVICE_RANGE
, mreq
->scope
,
3091 mreq
->type
, mreq
->instance
, mreq
->instance
);
3092 tipc_nametbl_build_group(net
, grp
, &ua
);
3093 rc
= tipc_sk_publish(tsk
, &ua
);
3095 tipc_group_delete(net
, grp
);
3099 /* Eliminate any risk that a broadcast overtakes sent JOINs */
3100 tsk
->mc_method
.rcast
= true;
3101 tsk
->mc_method
.mandatory
= true;
3102 tipc_group_join(net
, grp
, &tsk
->sk
.sk_rcvbuf
);
3106 static int tipc_sk_leave(struct tipc_sock
*tsk
)
3108 struct net
*net
= sock_net(&tsk
->sk
);
3109 struct tipc_group
*grp
= tsk
->group
;
3110 struct tipc_uaddr ua
;
3115 ua
.addrtype
= TIPC_SERVICE_RANGE
;
3116 tipc_group_self(grp
, &ua
.sr
, &scope
);
3118 tipc_group_delete(net
, grp
);
3120 tipc_sk_withdraw(tsk
, &ua
);
3125 * tipc_setsockopt - set socket option
3126 * @sock: socket structure
3127 * @lvl: option level
3128 * @opt: option identifier
3129 * @ov: pointer to new option value
3130 * @ol: length of option value
3132 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
3133 * (to ease compatibility).
3135 * Return: 0 on success, errno otherwise
3137 static int tipc_setsockopt(struct socket
*sock
, int lvl
, int opt
,
3138 sockptr_t ov
, unsigned int ol
)
3140 struct sock
*sk
= sock
->sk
;
3141 struct tipc_sock
*tsk
= tipc_sk(sk
);
3142 struct tipc_group_req mreq
;
3146 if ((lvl
== IPPROTO_TCP
) && (sock
->type
== SOCK_STREAM
))
3148 if (lvl
!= SOL_TIPC
)
3149 return -ENOPROTOOPT
;
3152 case TIPC_IMPORTANCE
:
3153 case TIPC_SRC_DROPPABLE
:
3154 case TIPC_DEST_DROPPABLE
:
3155 case TIPC_CONN_TIMEOUT
:
3157 if (ol
< sizeof(value
))
3159 if (copy_from_sockptr(&value
, ov
, sizeof(u32
)))
3162 case TIPC_GROUP_JOIN
:
3163 if (ol
< sizeof(mreq
))
3165 if (copy_from_sockptr(&mreq
, ov
, sizeof(mreq
)))
3169 if (!sockptr_is_null(ov
) || ol
)
3176 case TIPC_IMPORTANCE
:
3177 res
= tsk_set_importance(sk
, value
);
3179 case TIPC_SRC_DROPPABLE
:
3180 if (sock
->type
!= SOCK_STREAM
)
3181 tsk_set_unreliable(tsk
, value
);
3185 case TIPC_DEST_DROPPABLE
:
3186 tsk_set_unreturnable(tsk
, value
);
3188 case TIPC_CONN_TIMEOUT
:
3189 tipc_sk(sk
)->conn_timeout
= value
;
3191 case TIPC_MCAST_BROADCAST
:
3192 tsk
->mc_method
.rcast
= false;
3193 tsk
->mc_method
.mandatory
= true;
3195 case TIPC_MCAST_REPLICAST
:
3196 tsk
->mc_method
.rcast
= true;
3197 tsk
->mc_method
.mandatory
= true;
3199 case TIPC_GROUP_JOIN
:
3200 res
= tipc_sk_join(tsk
, &mreq
);
3202 case TIPC_GROUP_LEAVE
:
3203 res
= tipc_sk_leave(tsk
);
3206 tsk
->nodelay
= !!value
;
3219 * tipc_getsockopt - get socket option
3220 * @sock: socket structure
3221 * @lvl: option level
3222 * @opt: option identifier
3223 * @ov: receptacle for option value
3224 * @ol: receptacle for length of option value
3226 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3227 * (to ease compatibility).
3229 * Return: 0 on success, errno otherwise
3231 static int tipc_getsockopt(struct socket
*sock
, int lvl
, int opt
,
3232 char __user
*ov
, int __user
*ol
)
3234 struct sock
*sk
= sock
->sk
;
3235 struct tipc_sock
*tsk
= tipc_sk(sk
);
3236 struct tipc_service_range seq
;
3241 if ((lvl
== IPPROTO_TCP
) && (sock
->type
== SOCK_STREAM
))
3242 return put_user(0, ol
);
3243 if (lvl
!= SOL_TIPC
)
3244 return -ENOPROTOOPT
;
3245 res
= get_user(len
, ol
);
3252 case TIPC_IMPORTANCE
:
3253 value
= tsk_importance(tsk
);
3255 case TIPC_SRC_DROPPABLE
:
3256 value
= tsk_unreliable(tsk
);
3258 case TIPC_DEST_DROPPABLE
:
3259 value
= tsk_unreturnable(tsk
);
3261 case TIPC_CONN_TIMEOUT
:
3262 value
= tsk
->conn_timeout
;
3263 /* no need to set "res", since already 0 at this point */
3265 case TIPC_NODE_RECVQ_DEPTH
:
3266 value
= 0; /* was tipc_queue_size, now obsolete */
3268 case TIPC_SOCK_RECVQ_DEPTH
:
3269 value
= skb_queue_len(&sk
->sk_receive_queue
);
3271 case TIPC_SOCK_RECVQ_USED
:
3272 value
= sk_rmem_alloc_get(sk
);
3274 case TIPC_GROUP_JOIN
:
3277 tipc_group_self(tsk
->group
, &seq
, &scope
);
3287 return res
; /* "get" failed */
3289 if (len
< sizeof(value
))
3292 if (copy_to_user(ov
, &value
, sizeof(value
)))
3295 return put_user(sizeof(value
), ol
);
3298 static int tipc_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
3300 struct net
*net
= sock_net(sock
->sk
);
3301 struct tipc_sioc_nodeid_req nr
= {0};
3302 struct tipc_sioc_ln_req lnr
;
3303 void __user
*argp
= (void __user
*)arg
;
3306 case SIOCGETLINKNAME
:
3307 if (copy_from_user(&lnr
, argp
, sizeof(lnr
)))
3309 if (!tipc_node_get_linkname(net
,
3310 lnr
.bearer_id
& 0xffff, lnr
.peer
,
3311 lnr
.linkname
, TIPC_MAX_LINK_NAME
)) {
3312 if (copy_to_user(argp
, &lnr
, sizeof(lnr
)))
3316 return -EADDRNOTAVAIL
;
3318 if (copy_from_user(&nr
, argp
, sizeof(nr
)))
3320 if (!tipc_node_get_id(net
, nr
.peer
, nr
.node_id
))
3321 return -EADDRNOTAVAIL
;
3322 if (copy_to_user(argp
, &nr
, sizeof(nr
)))
3326 return -ENOIOCTLCMD
;
3330 static int tipc_socketpair(struct socket
*sock1
, struct socket
*sock2
)
3332 struct tipc_sock
*tsk2
= tipc_sk(sock2
->sk
);
3333 struct tipc_sock
*tsk1
= tipc_sk(sock1
->sk
);
3334 u32 onode
= tipc_own_addr(sock_net(sock1
->sk
));
3336 tsk1
->peer
.family
= AF_TIPC
;
3337 tsk1
->peer
.addrtype
= TIPC_SOCKET_ADDR
;
3338 tsk1
->peer
.scope
= TIPC_NODE_SCOPE
;
3339 tsk1
->peer
.addr
.id
.ref
= tsk2
->portid
;
3340 tsk1
->peer
.addr
.id
.node
= onode
;
3341 tsk2
->peer
.family
= AF_TIPC
;
3342 tsk2
->peer
.addrtype
= TIPC_SOCKET_ADDR
;
3343 tsk2
->peer
.scope
= TIPC_NODE_SCOPE
;
3344 tsk2
->peer
.addr
.id
.ref
= tsk1
->portid
;
3345 tsk2
->peer
.addr
.id
.node
= onode
;
3347 tipc_sk_finish_conn(tsk1
, tsk2
->portid
, onode
);
3348 tipc_sk_finish_conn(tsk2
, tsk1
->portid
, onode
);
3352 /* Protocol switches for the various types of TIPC sockets */
3354 static const struct proto_ops msg_ops
= {
3355 .owner
= THIS_MODULE
,
3357 .release
= tipc_release
,
3359 .connect
= tipc_connect
,
3360 .socketpair
= tipc_socketpair
,
3361 .accept
= sock_no_accept
,
3362 .getname
= tipc_getname
,
3364 .ioctl
= tipc_ioctl
,
3365 .listen
= sock_no_listen
,
3366 .shutdown
= tipc_shutdown
,
3367 .setsockopt
= tipc_setsockopt
,
3368 .getsockopt
= tipc_getsockopt
,
3369 .sendmsg
= tipc_sendmsg
,
3370 .recvmsg
= tipc_recvmsg
,
3371 .mmap
= sock_no_mmap
,
3374 static const struct proto_ops packet_ops
= {
3375 .owner
= THIS_MODULE
,
3377 .release
= tipc_release
,
3379 .connect
= tipc_connect
,
3380 .socketpair
= tipc_socketpair
,
3381 .accept
= tipc_accept
,
3382 .getname
= tipc_getname
,
3384 .ioctl
= tipc_ioctl
,
3385 .listen
= tipc_listen
,
3386 .shutdown
= tipc_shutdown
,
3387 .setsockopt
= tipc_setsockopt
,
3388 .getsockopt
= tipc_getsockopt
,
3389 .sendmsg
= tipc_send_packet
,
3390 .recvmsg
= tipc_recvmsg
,
3391 .mmap
= sock_no_mmap
,
3394 static const struct proto_ops stream_ops
= {
3395 .owner
= THIS_MODULE
,
3397 .release
= tipc_release
,
3399 .connect
= tipc_connect
,
3400 .socketpair
= tipc_socketpair
,
3401 .accept
= tipc_accept
,
3402 .getname
= tipc_getname
,
3404 .ioctl
= tipc_ioctl
,
3405 .listen
= tipc_listen
,
3406 .shutdown
= tipc_shutdown
,
3407 .setsockopt
= tipc_setsockopt
,
3408 .getsockopt
= tipc_getsockopt
,
3409 .sendmsg
= tipc_sendstream
,
3410 .recvmsg
= tipc_recvstream
,
3411 .mmap
= sock_no_mmap
,
3414 static const struct net_proto_family tipc_family_ops
= {
3415 .owner
= THIS_MODULE
,
3417 .create
= tipc_sk_create
3420 static struct proto tipc_proto
= {
3422 .owner
= THIS_MODULE
,
3423 .obj_size
= sizeof(struct tipc_sock
),
3424 .sysctl_rmem
= sysctl_tipc_rmem
3428 * tipc_socket_init - initialize TIPC socket interface
3430 * Return: 0 on success, errno otherwise
3432 int tipc_socket_init(void)
3436 res
= proto_register(&tipc_proto
, 1);
3438 pr_err("Failed to register TIPC protocol type\n");
3442 res
= sock_register(&tipc_family_ops
);
3444 pr_err("Failed to register TIPC socket type\n");
3445 proto_unregister(&tipc_proto
);
3453 * tipc_socket_stop - stop TIPC socket interface
3455 void tipc_socket_stop(void)
3457 sock_unregister(tipc_family_ops
.family
);
3458 proto_unregister(&tipc_proto
);
3461 /* Caller should hold socket lock for the passed tipc socket. */
3462 static int __tipc_nl_add_sk_con(struct sk_buff
*skb
, struct tipc_sock
*tsk
)
3464 u32 peer_node
, peer_port
;
3465 u32 conn_type
, conn_instance
;
3466 struct nlattr
*nest
;
3468 peer_node
= tsk_peer_node(tsk
);
3469 peer_port
= tsk_peer_port(tsk
);
3470 conn_type
= msg_nametype(&tsk
->phdr
);
3471 conn_instance
= msg_nameinst(&tsk
->phdr
);
3472 nest
= nla_nest_start_noflag(skb
, TIPC_NLA_SOCK_CON
);
3476 if (nla_put_u32(skb
, TIPC_NLA_CON_NODE
, peer_node
))
3478 if (nla_put_u32(skb
, TIPC_NLA_CON_SOCK
, peer_port
))
3481 if (tsk
->conn_addrtype
!= 0) {
3482 if (nla_put_flag(skb
, TIPC_NLA_CON_FLAG
))
3484 if (nla_put_u32(skb
, TIPC_NLA_CON_TYPE
, conn_type
))
3486 if (nla_put_u32(skb
, TIPC_NLA_CON_INST
, conn_instance
))
3489 nla_nest_end(skb
, nest
);
3494 nla_nest_cancel(skb
, nest
);
3499 static int __tipc_nl_add_sk_info(struct sk_buff
*skb
, struct tipc_sock
3502 struct net
*net
= sock_net(skb
->sk
);
3503 struct sock
*sk
= &tsk
->sk
;
3505 if (nla_put_u32(skb
, TIPC_NLA_SOCK_REF
, tsk
->portid
) ||
3506 nla_put_u32(skb
, TIPC_NLA_SOCK_ADDR
, tipc_own_addr(net
)))
3509 if (tipc_sk_connected(sk
)) {
3510 if (__tipc_nl_add_sk_con(skb
, tsk
))
3512 } else if (!list_empty(&tsk
->publications
)) {
3513 if (nla_put_flag(skb
, TIPC_NLA_SOCK_HAS_PUBL
))
3519 /* Caller should hold socket lock for the passed tipc socket. */
3520 static int __tipc_nl_add_sk(struct sk_buff
*skb
, struct netlink_callback
*cb
,
3521 struct tipc_sock
*tsk
)
3523 struct nlattr
*attrs
;
3526 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
3527 &tipc_genl_family
, NLM_F_MULTI
, TIPC_NL_SOCK_GET
);
3531 attrs
= nla_nest_start_noflag(skb
, TIPC_NLA_SOCK
);
3533 goto genlmsg_cancel
;
3535 if (__tipc_nl_add_sk_info(skb
, tsk
))
3536 goto attr_msg_cancel
;
3538 nla_nest_end(skb
, attrs
);
3539 genlmsg_end(skb
, hdr
);
3544 nla_nest_cancel(skb
, attrs
);
3546 genlmsg_cancel(skb
, hdr
);
3551 int tipc_nl_sk_walk(struct sk_buff
*skb
, struct netlink_callback
*cb
,
3552 int (*skb_handler
)(struct sk_buff
*skb
,
3553 struct netlink_callback
*cb
,
3554 struct tipc_sock
*tsk
))
3556 struct rhashtable_iter
*iter
= (void *)cb
->args
[4];
3557 struct tipc_sock
*tsk
;
3560 rhashtable_walk_start(iter
);
3561 while ((tsk
= rhashtable_walk_next(iter
)) != NULL
) {
3563 if (PTR_ERR(tsk
) == -EAGAIN
)
3568 sock_hold(&tsk
->sk
);
3569 rhashtable_walk_stop(iter
);
3570 lock_sock(&tsk
->sk
);
3571 err
= skb_handler(skb
, cb
, tsk
);
3573 release_sock(&tsk
->sk
);
3577 release_sock(&tsk
->sk
);
3578 rhashtable_walk_start(iter
);
3581 rhashtable_walk_stop(iter
);
3585 EXPORT_SYMBOL(tipc_nl_sk_walk
);
3587 int tipc_dump_start(struct netlink_callback
*cb
)
3589 return __tipc_dump_start(cb
, sock_net(cb
->skb
->sk
));
3591 EXPORT_SYMBOL(tipc_dump_start
);
3593 int __tipc_dump_start(struct netlink_callback
*cb
, struct net
*net
)
3595 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3596 struct rhashtable_iter
*iter
= (void *)cb
->args
[4];
3597 struct tipc_net
*tn
= tipc_net(net
);
3600 iter
= kmalloc(sizeof(*iter
), GFP_KERNEL
);
3604 cb
->args
[4] = (long)iter
;
3607 rhashtable_walk_enter(&tn
->sk_rht
, iter
);
3611 int tipc_dump_done(struct netlink_callback
*cb
)
3613 struct rhashtable_iter
*hti
= (void *)cb
->args
[4];
3615 rhashtable_walk_exit(hti
);
3619 EXPORT_SYMBOL(tipc_dump_done
);
3621 int tipc_sk_fill_sock_diag(struct sk_buff
*skb
, struct netlink_callback
*cb
,
3622 struct tipc_sock
*tsk
, u32 sk_filter_state
,
3623 u64 (*tipc_diag_gen_cookie
)(struct sock
*sk
))
3625 struct sock
*sk
= &tsk
->sk
;
3626 struct nlattr
*attrs
;
3627 struct nlattr
*stat
;
3629 /*filter response w.r.t sk_state*/
3630 if (!(sk_filter_state
& (1 << sk
->sk_state
)))
3633 attrs
= nla_nest_start_noflag(skb
, TIPC_NLA_SOCK
);
3637 if (__tipc_nl_add_sk_info(skb
, tsk
))
3638 goto attr_msg_cancel
;
3640 if (nla_put_u32(skb
, TIPC_NLA_SOCK_TYPE
, (u32
)sk
->sk_type
) ||
3641 nla_put_u32(skb
, TIPC_NLA_SOCK_TIPC_STATE
, (u32
)sk
->sk_state
) ||
3642 nla_put_u32(skb
, TIPC_NLA_SOCK_INO
, sock_i_ino(sk
)) ||
3643 nla_put_u32(skb
, TIPC_NLA_SOCK_UID
,
3644 from_kuid_munged(sk_user_ns(NETLINK_CB(cb
->skb
).sk
),
3646 nla_put_u64_64bit(skb
, TIPC_NLA_SOCK_COOKIE
,
3647 tipc_diag_gen_cookie(sk
),
3649 goto attr_msg_cancel
;
3651 stat
= nla_nest_start_noflag(skb
, TIPC_NLA_SOCK_STAT
);
3653 goto attr_msg_cancel
;
3655 if (nla_put_u32(skb
, TIPC_NLA_SOCK_STAT_RCVQ
,
3656 skb_queue_len(&sk
->sk_receive_queue
)) ||
3657 nla_put_u32(skb
, TIPC_NLA_SOCK_STAT_SENDQ
,
3658 skb_queue_len(&sk
->sk_write_queue
)) ||
3659 nla_put_u32(skb
, TIPC_NLA_SOCK_STAT_DROP
,
3660 atomic_read(&sk
->sk_drops
)))
3661 goto stat_msg_cancel
;
3663 if (tsk
->cong_link_cnt
&&
3664 nla_put_flag(skb
, TIPC_NLA_SOCK_STAT_LINK_CONG
))
3665 goto stat_msg_cancel
;
3667 if (tsk_conn_cong(tsk
) &&
3668 nla_put_flag(skb
, TIPC_NLA_SOCK_STAT_CONN_CONG
))
3669 goto stat_msg_cancel
;
3671 nla_nest_end(skb
, stat
);
3674 if (tipc_group_fill_sock_diag(tsk
->group
, skb
))
3675 goto stat_msg_cancel
;
3677 nla_nest_end(skb
, attrs
);
3682 nla_nest_cancel(skb
, stat
);
3684 nla_nest_cancel(skb
, attrs
);
3688 EXPORT_SYMBOL(tipc_sk_fill_sock_diag
);
3690 int tipc_nl_sk_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3692 return tipc_nl_sk_walk(skb
, cb
, __tipc_nl_add_sk
);
3695 /* Caller should hold socket lock for the passed tipc socket. */
3696 static int __tipc_nl_add_sk_publ(struct sk_buff
*skb
,
3697 struct netlink_callback
*cb
,
3698 struct publication
*publ
)
3701 struct nlattr
*attrs
;
3703 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
3704 &tipc_genl_family
, NLM_F_MULTI
, TIPC_NL_PUBL_GET
);
3708 attrs
= nla_nest_start_noflag(skb
, TIPC_NLA_PUBL
);
3710 goto genlmsg_cancel
;
3712 if (nla_put_u32(skb
, TIPC_NLA_PUBL_KEY
, publ
->key
))
3713 goto attr_msg_cancel
;
3714 if (nla_put_u32(skb
, TIPC_NLA_PUBL_TYPE
, publ
->sr
.type
))
3715 goto attr_msg_cancel
;
3716 if (nla_put_u32(skb
, TIPC_NLA_PUBL_LOWER
, publ
->sr
.lower
))
3717 goto attr_msg_cancel
;
3718 if (nla_put_u32(skb
, TIPC_NLA_PUBL_UPPER
, publ
->sr
.upper
))
3719 goto attr_msg_cancel
;
3721 nla_nest_end(skb
, attrs
);
3722 genlmsg_end(skb
, hdr
);
3727 nla_nest_cancel(skb
, attrs
);
3729 genlmsg_cancel(skb
, hdr
);
3734 /* Caller should hold socket lock for the passed tipc socket. */
3735 static int __tipc_nl_list_sk_publ(struct sk_buff
*skb
,
3736 struct netlink_callback
*cb
,
3737 struct tipc_sock
*tsk
, u32
*last_publ
)
3740 struct publication
*p
;
3743 list_for_each_entry(p
, &tsk
->publications
, binding_sock
) {
3744 if (p
->key
== *last_publ
)
3747 if (list_entry_is_head(p
, &tsk
->publications
, binding_sock
)) {
3748 /* We never set seq or call nl_dump_check_consistent()
3749 * this means that setting prev_seq here will cause the
3750 * consistence check to fail in the netlink callback
3751 * handler. Resulting in the last NLMSG_DONE message
3752 * having the NLM_F_DUMP_INTR flag set.
3759 p
= list_first_entry(&tsk
->publications
, struct publication
,
3763 list_for_each_entry_from(p
, &tsk
->publications
, binding_sock
) {
3764 err
= __tipc_nl_add_sk_publ(skb
, cb
, p
);
3766 *last_publ
= p
->key
;
3775 int tipc_nl_publ_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3778 u32 tsk_portid
= cb
->args
[0];
3779 u32 last_publ
= cb
->args
[1];
3780 u32 done
= cb
->args
[2];
3781 struct net
*net
= sock_net(skb
->sk
);
3782 struct tipc_sock
*tsk
;
3785 struct nlattr
**attrs
= genl_dumpit_info(cb
)->info
.attrs
;
3786 struct nlattr
*sock
[TIPC_NLA_SOCK_MAX
+ 1];
3788 if (!attrs
[TIPC_NLA_SOCK
])
3791 err
= nla_parse_nested_deprecated(sock
, TIPC_NLA_SOCK_MAX
,
3792 attrs
[TIPC_NLA_SOCK
],
3793 tipc_nl_sock_policy
, NULL
);
3797 if (!sock
[TIPC_NLA_SOCK_REF
])
3800 tsk_portid
= nla_get_u32(sock
[TIPC_NLA_SOCK_REF
]);
3806 tsk
= tipc_sk_lookup(net
, tsk_portid
);
3810 lock_sock(&tsk
->sk
);
3811 err
= __tipc_nl_list_sk_publ(skb
, cb
, tsk
, &last_publ
);
3814 release_sock(&tsk
->sk
);
3817 cb
->args
[0] = tsk_portid
;
3818 cb
->args
[1] = last_publ
;
3825 * tipc_sk_filtering - check if a socket should be traced
3826 * @sk: the socket to be examined
3828 * @sysctl_tipc_sk_filter is used as the socket tuple for filtering:
3829 * (portid, sock type, name type, name lower, name upper)
3831 * Return: true if the socket meets the socket tuple data
3832 * (value 0 = 'any') or when there is no tuple set (all = 0),
3835 bool tipc_sk_filtering(struct sock
*sk
)
3837 struct tipc_sock
*tsk
;
3838 struct publication
*p
;
3839 u32 _port
, _sktype
, _type
, _lower
, _upper
;
3840 u32 type
= 0, lower
= 0, upper
= 0;
3847 _port
= sysctl_tipc_sk_filter
[0];
3848 _sktype
= sysctl_tipc_sk_filter
[1];
3849 _type
= sysctl_tipc_sk_filter
[2];
3850 _lower
= sysctl_tipc_sk_filter
[3];
3851 _upper
= sysctl_tipc_sk_filter
[4];
3853 if (!_port
&& !_sktype
&& !_type
&& !_lower
&& !_upper
)
3857 return (_port
== tsk
->portid
);
3859 if (_sktype
&& _sktype
!= sk
->sk_type
)
3862 if (tsk
->published
) {
3863 p
= list_first_entry_or_null(&tsk
->publications
,
3864 struct publication
, binding_sock
);
3867 lower
= p
->sr
.lower
;
3868 upper
= p
->sr
.upper
;
3872 if (!tipc_sk_type_connectionless(sk
)) {
3873 type
= msg_nametype(&tsk
->phdr
);
3874 lower
= msg_nameinst(&tsk
->phdr
);
3878 if ((_type
&& _type
!= type
) || (_lower
&& _lower
!= lower
) ||
3879 (_upper
&& _upper
!= upper
))
3885 u32
tipc_sock_get_portid(struct sock
*sk
)
3887 return (sk
) ? (tipc_sk(sk
))->portid
: 0;
3891 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3892 * both the rcv and backlog queues are considered
3893 * @sk: tipc sk to be checked
3894 * @skb: tipc msg to be checked
3896 * Return: true if the socket rx queue allocation is > 90%, otherwise false
3899 bool tipc_sk_overlimit1(struct sock
*sk
, struct sk_buff
*skb
)
3901 atomic_t
*dcnt
= &tipc_sk(sk
)->dupl_rcvcnt
;
3902 unsigned int lim
= rcvbuf_limit(sk
, skb
) + atomic_read(dcnt
);
3903 unsigned int qsize
= sk
->sk_backlog
.len
+ sk_rmem_alloc_get(sk
);
3905 return (qsize
> lim
* 90 / 100);
3909 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3910 * only the rcv queue is considered
3911 * @sk: tipc sk to be checked
3912 * @skb: tipc msg to be checked
3914 * Return: true if the socket rx queue allocation is > 90%, otherwise false
3917 bool tipc_sk_overlimit2(struct sock
*sk
, struct sk_buff
*skb
)
3919 unsigned int lim
= rcvbuf_limit(sk
, skb
);
3920 unsigned int qsize
= sk_rmem_alloc_get(sk
);
3922 return (qsize
> lim
* 90 / 100);
3926 * tipc_sk_dump - dump TIPC socket
3927 * @sk: tipc sk to be dumped
3928 * @dqueues: bitmask to decide if any socket queue to be dumped?
3929 * - TIPC_DUMP_NONE: don't dump socket queues
3930 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3931 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3932 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3933 * - TIPC_DUMP_ALL: dump all the socket queues above
3934 * @buf: returned buffer of dump data in format
3936 int tipc_sk_dump(struct sock
*sk
, u16 dqueues
, char *buf
)
3939 size_t sz
= (dqueues
) ? SK_LMAX
: SK_LMIN
;
3940 u32 conn_type
, conn_instance
;
3941 struct tipc_sock
*tsk
;
3942 struct publication
*p
;
3946 i
+= scnprintf(buf
, sz
, "sk data: (null)\n");
3951 tsk_connected
= !tipc_sk_type_connectionless(sk
);
3953 i
+= scnprintf(buf
, sz
, "sk data: %u", sk
->sk_type
);
3954 i
+= scnprintf(buf
+ i
, sz
- i
, " %d", sk
->sk_state
);
3955 i
+= scnprintf(buf
+ i
, sz
- i
, " %x", tsk_own_node(tsk
));
3956 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", tsk
->portid
);
3957 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u", tsk_connected
);
3958 if (tsk_connected
) {
3959 i
+= scnprintf(buf
+ i
, sz
- i
, " %x", tsk_peer_node(tsk
));
3960 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", tsk_peer_port(tsk
));
3961 conn_type
= msg_nametype(&tsk
->phdr
);
3962 conn_instance
= msg_nameinst(&tsk
->phdr
);
3963 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", conn_type
);
3964 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", conn_instance
);
3966 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u", tsk
->published
);
3967 if (tsk
->published
) {
3968 p
= list_first_entry_or_null(&tsk
->publications
,
3969 struct publication
, binding_sock
);
3970 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", (p
) ? p
->sr
.type
: 0);
3971 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", (p
) ? p
->sr
.lower
: 0);
3972 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", (p
) ? p
->sr
.upper
: 0);
3974 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u", tsk
->snd_win
);
3975 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", tsk
->rcv_win
);
3976 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", tsk
->max_pkt
);
3977 i
+= scnprintf(buf
+ i
, sz
- i
, " %x", tsk
->peer_caps
);
3978 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", tsk
->cong_link_cnt
);
3979 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", tsk
->snt_unacked
);
3980 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", tsk
->rcv_unacked
);
3981 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", atomic_read(&tsk
->dupl_rcvcnt
));
3982 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", sk
->sk_shutdown
);
3983 i
+= scnprintf(buf
+ i
, sz
- i
, " | %d", sk_wmem_alloc_get(sk
));
3984 i
+= scnprintf(buf
+ i
, sz
- i
, " %d", sk
->sk_sndbuf
);
3985 i
+= scnprintf(buf
+ i
, sz
- i
, " | %d", sk_rmem_alloc_get(sk
));
3986 i
+= scnprintf(buf
+ i
, sz
- i
, " %d", sk
->sk_rcvbuf
);
3987 i
+= scnprintf(buf
+ i
, sz
- i
, " | %d\n", READ_ONCE(sk
->sk_backlog
.len
));
3989 if (dqueues
& TIPC_DUMP_SK_SNDQ
) {
3990 i
+= scnprintf(buf
+ i
, sz
- i
, "sk_write_queue: ");
3991 i
+= tipc_list_dump(&sk
->sk_write_queue
, false, buf
+ i
);
3994 if (dqueues
& TIPC_DUMP_SK_RCVQ
) {
3995 i
+= scnprintf(buf
+ i
, sz
- i
, "sk_receive_queue: ");
3996 i
+= tipc_list_dump(&sk
->sk_receive_queue
, false, buf
+ i
);
3999 if (dqueues
& TIPC_DUMP_SK_BKLGQ
) {
4000 i
+= scnprintf(buf
+ i
, sz
- i
, "sk_backlog:\n head ");
4001 i
+= tipc_skb_dump(sk
->sk_backlog
.head
, false, buf
+ i
);
4002 if (sk
->sk_backlog
.tail
!= sk
->sk_backlog
.head
) {
4003 i
+= scnprintf(buf
+ i
, sz
- i
, " tail ");
4004 i
+= tipc_skb_dump(sk
->sk_backlog
.tail
, false,