1 /* AF_RXRPC sendmsg() implementation.
3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/net.h>
15 #include <linux/gfp.h>
16 #include <linux/skbuff.h>
17 #include <linux/export.h>
18 #include <linux/sched/signal.h>
21 #include <net/af_rxrpc.h>
22 #include "ar-internal.h"
25 RXRPC_CMD_SEND_DATA
, /* send data message */
26 RXRPC_CMD_SEND_ABORT
, /* request abort generation */
27 RXRPC_CMD_ACCEPT
, /* [server] accept incoming call */
28 RXRPC_CMD_REJECT_BUSY
, /* [server] reject a call as busy */
31 struct rxrpc_send_params
{
32 s64 tx_total_len
; /* Total Tx data length (if send data) */
33 unsigned long user_call_ID
; /* User's call ID */
34 u32 abort_code
; /* Abort code to Tx (if abort) */
35 enum rxrpc_command command
: 8; /* The command to implement */
36 bool exclusive
; /* Shared or exclusive call */
37 bool upgrade
; /* If the connection is upgradeable */
41 * wait for space to appear in the transmit/ACK window
42 * - caller holds the socket locked
44 static int rxrpc_wait_for_tx_window(struct rxrpc_sock
*rx
,
45 struct rxrpc_call
*call
,
48 DECLARE_WAITQUEUE(myself
, current
);
52 call
->tx_hard_ack
, call
->tx_top
, call
->tx_winsize
);
54 add_wait_queue(&call
->waitq
, &myself
);
57 set_current_state(TASK_INTERRUPTIBLE
);
59 if (call
->tx_top
- call
->tx_hard_ack
<
60 min_t(unsigned int, call
->tx_winsize
,
61 call
->cong_cwnd
+ call
->cong_extra
))
63 if (call
->state
>= RXRPC_CALL_COMPLETE
) {
67 if (signal_pending(current
)) {
68 ret
= sock_intr_errno(*timeo
);
72 trace_rxrpc_transmit(call
, rxrpc_transmit_wait
);
73 mutex_unlock(&call
->user_mutex
);
74 *timeo
= schedule_timeout(*timeo
);
75 if (mutex_lock_interruptible(&call
->user_mutex
) < 0) {
76 ret
= sock_intr_errno(*timeo
);
81 remove_wait_queue(&call
->waitq
, &myself
);
82 set_current_state(TASK_RUNNING
);
88 * Schedule an instant Tx resend.
90 static inline void rxrpc_instant_resend(struct rxrpc_call
*call
, int ix
)
92 spin_lock_bh(&call
->lock
);
94 if (call
->state
< RXRPC_CALL_COMPLETE
) {
95 call
->rxtx_annotations
[ix
] = RXRPC_TX_ANNO_RETRANS
;
96 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND
, &call
->events
))
97 rxrpc_queue_call(call
);
100 spin_unlock_bh(&call
->lock
);
104 * Notify the owner of the call that the transmit phase is ended and the last
105 * packet has been queued.
107 static void rxrpc_notify_end_tx(struct rxrpc_sock
*rx
, struct rxrpc_call
*call
,
108 rxrpc_notify_end_tx_t notify_end_tx
)
111 notify_end_tx(&rx
->sk
, call
, call
->user_call_ID
);
115 * Queue a DATA packet for transmission, set the resend timeout and send the
118 static void rxrpc_queue_packet(struct rxrpc_sock
*rx
, struct rxrpc_call
*call
,
119 struct sk_buff
*skb
, bool last
,
120 rxrpc_notify_end_tx_t notify_end_tx
)
122 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
123 rxrpc_seq_t seq
= sp
->hdr
.seq
;
125 u8 annotation
= RXRPC_TX_ANNO_UNACK
;
127 _net("queue skb %p [%d]", skb
, seq
);
129 ASSERTCMP(seq
, ==, call
->tx_top
+ 1);
132 annotation
|= RXRPC_TX_ANNO_LAST
;
133 set_bit(RXRPC_CALL_TX_LASTQ
, &call
->flags
);
136 /* We have to set the timestamp before queueing as the retransmit
137 * algorithm can see the packet as soon as we queue it.
139 skb
->tstamp
= ktime_get_real();
141 ix
= seq
& RXRPC_RXTX_BUFF_MASK
;
142 rxrpc_get_skb(skb
, rxrpc_skb_tx_got
);
143 call
->rxtx_annotations
[ix
] = annotation
;
145 call
->rxtx_buffer
[ix
] = skb
;
148 trace_rxrpc_transmit(call
, rxrpc_transmit_queue_last
);
150 trace_rxrpc_transmit(call
, rxrpc_transmit_queue
);
152 if (last
|| call
->state
== RXRPC_CALL_SERVER_ACK_REQUEST
) {
153 _debug("________awaiting reply/ACK__________");
154 write_lock_bh(&call
->state_lock
);
155 switch (call
->state
) {
156 case RXRPC_CALL_CLIENT_SEND_REQUEST
:
157 call
->state
= RXRPC_CALL_CLIENT_AWAIT_REPLY
;
158 rxrpc_notify_end_tx(rx
, call
, notify_end_tx
);
160 case RXRPC_CALL_SERVER_ACK_REQUEST
:
161 call
->state
= RXRPC_CALL_SERVER_SEND_REPLY
;
162 call
->ack_at
= call
->expire_at
;
163 if (call
->ackr_reason
== RXRPC_ACK_DELAY
)
164 call
->ackr_reason
= 0;
165 __rxrpc_set_timer(call
, rxrpc_timer_init_for_send_reply
,
169 case RXRPC_CALL_SERVER_SEND_REPLY
:
170 call
->state
= RXRPC_CALL_SERVER_AWAIT_ACK
;
171 rxrpc_notify_end_tx(rx
, call
, notify_end_tx
);
176 write_unlock_bh(&call
->state_lock
);
179 if (seq
== 1 && rxrpc_is_client_call(call
))
180 rxrpc_expose_client_call(call
);
182 ret
= rxrpc_send_data_packet(call
, skb
, false);
184 _debug("need instant resend %d", ret
);
185 rxrpc_instant_resend(call
, ix
);
187 ktime_t now
= ktime_get_real(), resend_at
;
189 resend_at
= ktime_add_ms(now
, rxrpc_resend_timeout
);
191 if (ktime_before(resend_at
, call
->resend_at
)) {
192 call
->resend_at
= resend_at
;
193 rxrpc_set_timer(call
, rxrpc_timer_set_for_send
, now
);
197 rxrpc_free_skb(skb
, rxrpc_skb_tx_freed
);
202 * send data through a socket
203 * - must be called in process context
204 * - The caller holds the call user access mutex, but not the socket lock.
206 static int rxrpc_send_data(struct rxrpc_sock
*rx
,
207 struct rxrpc_call
*call
,
208 struct msghdr
*msg
, size_t len
,
209 rxrpc_notify_end_tx_t notify_end_tx
)
211 struct rxrpc_skb_priv
*sp
;
213 struct sock
*sk
= &rx
->sk
;
218 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
220 /* this should be in poll */
221 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
223 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
226 more
= msg
->msg_flags
& MSG_MORE
;
228 if (call
->tx_total_len
!= -1) {
229 if (len
> call
->tx_total_len
)
231 if (!more
&& len
!= call
->tx_total_len
)
235 skb
= call
->tx_pending
;
236 call
->tx_pending
= NULL
;
237 rxrpc_see_skb(skb
, rxrpc_skb_tx_seen
);
241 /* Check to see if there's a ping ACK to reply to. */
242 if (call
->ackr_reason
== RXRPC_ACK_PING_RESPONSE
)
243 rxrpc_send_ack_packet(call
, false);
246 size_t size
, chunk
, max
, space
;
250 if (call
->tx_top
- call
->tx_hard_ack
>=
251 min_t(unsigned int, call
->tx_winsize
,
252 call
->cong_cwnd
+ call
->cong_extra
)) {
254 if (msg
->msg_flags
& MSG_DONTWAIT
)
256 ret
= rxrpc_wait_for_tx_window(rx
, call
,
262 max
= RXRPC_JUMBO_DATALEN
;
263 max
-= call
->conn
->security_size
;
264 max
&= ~(call
->conn
->size_align
- 1UL);
267 if (chunk
> msg_data_left(msg
) && !more
)
268 chunk
= msg_data_left(msg
);
270 space
= chunk
+ call
->conn
->size_align
;
271 space
&= ~(call
->conn
->size_align
- 1UL);
273 size
= space
+ call
->conn
->security_size
;
275 _debug("SIZE: %zu/%zu/%zu", chunk
, space
, size
);
277 /* create a buffer that we can retain until it's ACK'd */
278 skb
= sock_alloc_send_skb(
279 sk
, size
, msg
->msg_flags
& MSG_DONTWAIT
, &ret
);
283 rxrpc_new_skb(skb
, rxrpc_skb_tx_new
);
285 _debug("ALLOC SEND %p", skb
);
287 ASSERTCMP(skb
->mark
, ==, 0);
289 _debug("HS: %u", call
->conn
->security_size
);
290 skb_reserve(skb
, call
->conn
->security_size
);
291 skb
->len
+= call
->conn
->security_size
;
295 if (sp
->remain
> skb_tailroom(skb
))
296 sp
->remain
= skb_tailroom(skb
);
298 _net("skb: hr %d, tr %d, hl %d, rm %d",
304 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
310 /* append next segment of data to the current buffer */
311 if (msg_data_left(msg
) > 0) {
312 int copy
= skb_tailroom(skb
);
313 ASSERTCMP(copy
, >, 0);
314 if (copy
> msg_data_left(msg
))
315 copy
= msg_data_left(msg
);
316 if (copy
> sp
->remain
)
320 ret
= skb_add_data(skb
, &msg
->msg_iter
, copy
);
327 if (call
->tx_total_len
!= -1)
328 call
->tx_total_len
-= copy
;
331 /* add the packet to the send queue if it's now full */
332 if (sp
->remain
<= 0 ||
333 (msg_data_left(msg
) == 0 && !more
)) {
334 struct rxrpc_connection
*conn
= call
->conn
;
338 /* pad out if we're using security */
339 if (conn
->security_ix
) {
340 pad
= conn
->security_size
+ skb
->mark
;
341 pad
= conn
->size_align
- pad
;
342 pad
&= conn
->size_align
- 1;
343 _debug("pad %zu", pad
);
345 skb_put_zero(skb
, pad
);
348 seq
= call
->tx_top
+ 1;
352 sp
->hdr
.flags
= conn
->out_clientflag
;
354 if (msg_data_left(msg
) == 0 && !more
)
355 sp
->hdr
.flags
|= RXRPC_LAST_PACKET
;
356 else if (call
->tx_top
- call
->tx_hard_ack
<
358 sp
->hdr
.flags
|= RXRPC_MORE_PACKETS
;
360 ret
= conn
->security
->secure_packet(
361 call
, skb
, skb
->mark
, skb
->head
);
365 rxrpc_queue_packet(rx
, call
, skb
,
366 !msg_data_left(msg
) && !more
,
371 /* Check for the far side aborting the call or a network error
372 * occurring. If this happens, save any packet that was under
373 * construction so that in the case of a network error, the
374 * call can be retried or redirected.
376 if (call
->state
== RXRPC_CALL_COMPLETE
) {
380 } while (msg_data_left(msg
) > 0);
385 call
->tx_pending
= skb
;
386 _leave(" = %d", ret
);
400 * extract control messages from the sendmsg() control buffer
402 static int rxrpc_sendmsg_cmsg(struct msghdr
*msg
, struct rxrpc_send_params
*p
)
404 struct cmsghdr
*cmsg
;
405 bool got_user_ID
= false;
408 if (msg
->msg_controllen
== 0)
411 for_each_cmsghdr(cmsg
, msg
) {
412 if (!CMSG_OK(msg
, cmsg
))
415 len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
416 _debug("CMSG %d, %d, %d",
417 cmsg
->cmsg_level
, cmsg
->cmsg_type
, len
);
419 if (cmsg
->cmsg_level
!= SOL_RXRPC
)
422 switch (cmsg
->cmsg_type
) {
423 case RXRPC_USER_CALL_ID
:
424 if (msg
->msg_flags
& MSG_CMSG_COMPAT
) {
425 if (len
!= sizeof(u32
))
427 p
->user_call_ID
= *(u32
*)CMSG_DATA(cmsg
);
429 if (len
!= sizeof(unsigned long))
431 p
->user_call_ID
= *(unsigned long *)
438 if (p
->command
!= RXRPC_CMD_SEND_DATA
)
440 p
->command
= RXRPC_CMD_SEND_ABORT
;
441 if (len
!= sizeof(p
->abort_code
))
443 p
->abort_code
= *(unsigned int *)CMSG_DATA(cmsg
);
444 if (p
->abort_code
== 0)
449 if (p
->command
!= RXRPC_CMD_SEND_DATA
)
451 p
->command
= RXRPC_CMD_ACCEPT
;
456 case RXRPC_EXCLUSIVE_CALL
:
462 case RXRPC_UPGRADE_SERVICE
:
468 case RXRPC_TX_LENGTH
:
469 if (p
->tx_total_len
!= -1 || len
!= sizeof(__s64
))
471 p
->tx_total_len
= *(__s64
*)CMSG_DATA(cmsg
);
472 if (p
->tx_total_len
< 0)
483 if (p
->tx_total_len
!= -1 && p
->command
!= RXRPC_CMD_SEND_DATA
)
490 * Create a new client call for sendmsg().
491 * - Called with the socket lock held, which it must release.
492 * - If it returns a call, the call's lock will need releasing by the caller.
494 static struct rxrpc_call
*
495 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock
*rx
, struct msghdr
*msg
,
496 struct rxrpc_send_params
*p
)
497 __releases(&rx
->sk
.sk_lock
.slock
)
499 struct rxrpc_conn_parameters cp
;
500 struct rxrpc_call
*call
;
503 DECLARE_SOCKADDR(struct sockaddr_rxrpc
*, srx
, msg
->msg_name
);
507 if (!msg
->msg_name
) {
508 release_sock(&rx
->sk
);
509 return ERR_PTR(-EDESTADDRREQ
);
513 if (key
&& !rx
->key
->payload
.data
[0])
516 memset(&cp
, 0, sizeof(cp
));
517 cp
.local
= rx
->local
;
519 cp
.security_level
= rx
->min_sec_level
;
520 cp
.exclusive
= rx
->exclusive
| p
->exclusive
;
521 cp
.upgrade
= p
->upgrade
;
522 cp
.service_id
= srx
->srx_service
;
523 call
= rxrpc_new_client_call(rx
, &cp
, srx
, p
->user_call_ID
,
524 p
->tx_total_len
, GFP_KERNEL
);
525 /* The socket is now unlocked */
527 _leave(" = %p\n", call
);
532 * send a message forming part of a client call through an RxRPC socket
533 * - caller holds the socket locked
534 * - the socket may be either a client socket or a server socket
536 int rxrpc_do_sendmsg(struct rxrpc_sock
*rx
, struct msghdr
*msg
, size_t len
)
537 __releases(&rx
->sk
.sk_lock
.slock
)
539 enum rxrpc_call_state state
;
540 struct rxrpc_call
*call
;
543 struct rxrpc_send_params p
= {
547 .command
= RXRPC_CMD_SEND_DATA
,
554 ret
= rxrpc_sendmsg_cmsg(msg
, &p
);
556 goto error_release_sock
;
558 if (p
.command
== RXRPC_CMD_ACCEPT
) {
560 if (rx
->sk
.sk_state
!= RXRPC_SERVER_LISTENING
)
561 goto error_release_sock
;
562 call
= rxrpc_accept_call(rx
, p
.user_call_ID
, NULL
);
563 /* The socket is now unlocked. */
565 return PTR_ERR(call
);
566 rxrpc_put_call(call
, rxrpc_call_put
);
570 call
= rxrpc_find_call_by_user_ID(rx
, p
.user_call_ID
);
573 if (p
.command
!= RXRPC_CMD_SEND_DATA
)
574 goto error_release_sock
;
575 call
= rxrpc_new_client_call_for_sendmsg(rx
, msg
, &p
);
576 /* The socket is now unlocked... */
578 return PTR_ERR(call
);
579 /* ... and we have the call lock. */
581 switch (READ_ONCE(call
->state
)) {
582 case RXRPC_CALL_UNINITIALISED
:
583 case RXRPC_CALL_CLIENT_AWAIT_CONN
:
584 case RXRPC_CALL_SERVER_PREALLOC
:
585 case RXRPC_CALL_SERVER_SECURING
:
586 case RXRPC_CALL_SERVER_ACCEPTING
:
588 goto error_release_sock
;
593 ret
= mutex_lock_interruptible(&call
->user_mutex
);
594 release_sock(&rx
->sk
);
600 if (p
.tx_total_len
!= -1) {
602 if (call
->tx_total_len
!= -1 ||
606 call
->tx_total_len
= p
.tx_total_len
;
610 state
= READ_ONCE(call
->state
);
611 _debug("CALL %d USR %lx ST %d on CONN %p",
612 call
->debug_id
, call
->user_call_ID
, state
, call
->conn
);
614 if (state
>= RXRPC_CALL_COMPLETE
) {
615 /* it's too late for this call */
617 } else if (p
.command
== RXRPC_CMD_SEND_ABORT
) {
619 if (rxrpc_abort_call("CMD", call
, 0, p
.abort_code
, -ECONNABORTED
))
620 ret
= rxrpc_send_abort_packet(call
);
621 } else if (p
.command
!= RXRPC_CMD_SEND_DATA
) {
623 } else if (rxrpc_is_client_call(call
) &&
624 state
!= RXRPC_CALL_CLIENT_SEND_REQUEST
) {
625 /* request phase complete for this client call */
627 } else if (rxrpc_is_service_call(call
) &&
628 state
!= RXRPC_CALL_SERVER_ACK_REQUEST
&&
629 state
!= RXRPC_CALL_SERVER_SEND_REPLY
) {
630 /* Reply phase not begun or not complete for service call. */
633 ret
= rxrpc_send_data(rx
, call
, msg
, len
, NULL
);
636 mutex_unlock(&call
->user_mutex
);
638 rxrpc_put_call(call
, rxrpc_call_put
);
639 _leave(" = %d", ret
);
643 release_sock(&rx
->sk
);
648 * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
649 * @sock: The socket the call is on
650 * @call: The call to send data through
651 * @msg: The data to send
652 * @len: The amount of data to send
653 * @notify_end_tx: Notification that the last packet is queued.
655 * Allow a kernel service to send data on a call. The call must be in an state
656 * appropriate to sending data. No control data should be supplied in @msg,
657 * nor should an address be supplied. MSG_MORE should be flagged if there's
658 * more data to come, otherwise this data will end the transmission phase.
660 int rxrpc_kernel_send_data(struct socket
*sock
, struct rxrpc_call
*call
,
661 struct msghdr
*msg
, size_t len
,
662 rxrpc_notify_end_tx_t notify_end_tx
)
666 _enter("{%d,%s},", call
->debug_id
, rxrpc_call_states
[call
->state
]);
668 ASSERTCMP(msg
->msg_name
, ==, NULL
);
669 ASSERTCMP(msg
->msg_control
, ==, NULL
);
671 mutex_lock(&call
->user_mutex
);
673 _debug("CALL %d USR %lx ST %d on CONN %p",
674 call
->debug_id
, call
->user_call_ID
, call
->state
, call
->conn
);
676 switch (READ_ONCE(call
->state
)) {
677 case RXRPC_CALL_CLIENT_SEND_REQUEST
:
678 case RXRPC_CALL_SERVER_ACK_REQUEST
:
679 case RXRPC_CALL_SERVER_SEND_REPLY
:
680 ret
= rxrpc_send_data(rxrpc_sk(sock
->sk
), call
, msg
, len
,
683 case RXRPC_CALL_COMPLETE
:
684 read_lock_bh(&call
->state_lock
);
686 read_unlock_bh(&call
->state_lock
);
689 /* Request phase complete for this client call */
690 trace_rxrpc_rx_eproto(call
, 0, tracepoint_string("late_send"));
695 mutex_unlock(&call
->user_mutex
);
696 _leave(" = %d", ret
);
699 EXPORT_SYMBOL(rxrpc_kernel_send_data
);
702 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
703 * @sock: The socket the call is on
704 * @call: The call to be aborted
705 * @abort_code: The abort code to stick into the ABORT packet
706 * @error: Local error value
707 * @why: 3-char string indicating why.
709 * Allow a kernel service to abort a call, if it's still in an abortable state
710 * and return true if the call was aborted, false if it was already complete.
712 bool rxrpc_kernel_abort_call(struct socket
*sock
, struct rxrpc_call
*call
,
713 u32 abort_code
, int error
, const char *why
)
717 _enter("{%d},%d,%d,%s", call
->debug_id
, abort_code
, error
, why
);
719 mutex_lock(&call
->user_mutex
);
721 aborted
= rxrpc_abort_call(why
, call
, 0, abort_code
, error
);
723 rxrpc_send_abort_packet(call
);
725 mutex_unlock(&call
->user_mutex
);
728 EXPORT_SYMBOL(rxrpc_kernel_abort_call
);
731 * rxrpc_kernel_set_tx_length - Set the total Tx length on a call
732 * @sock: The socket the call is on
733 * @call: The call to be informed
734 * @tx_total_len: The amount of data to be transmitted for this call
736 * Allow a kernel service to set the total transmit length on a call. This
737 * allows buffer-to-packet encrypt-and-copy to be performed.
739 * This function is primarily for use for setting the reply length since the
740 * request length can be set when beginning the call.
742 void rxrpc_kernel_set_tx_length(struct socket
*sock
, struct rxrpc_call
*call
,
745 WARN_ON(call
->tx_total_len
!= -1);
746 call
->tx_total_len
= tx_total_len
;
748 EXPORT_SYMBOL(rxrpc_kernel_set_tx_length
);