1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC packet transmission
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/net.h>
11 #include <linux/gfp.h>
12 #include <linux/skbuff.h>
13 #include <linux/export.h>
15 #include <net/af_rxrpc.h>
16 #include "ar-internal.h"
18 struct rxrpc_ack_buffer
{
19 struct rxrpc_wire_header whdr
;
20 struct rxrpc_ackpacket ack
;
23 struct rxrpc_ackinfo ackinfo
;
26 struct rxrpc_abort_buffer
{
27 struct rxrpc_wire_header whdr
;
31 static const char rxrpc_keepalive_string
[] = "";
34 * Increase Tx backoff on transmission failure and clear it on success.
36 static void rxrpc_tx_backoff(struct rxrpc_call
*call
, int ret
)
39 u16 tx_backoff
= READ_ONCE(call
->tx_backoff
);
42 WRITE_ONCE(call
->tx_backoff
, tx_backoff
+ 1);
44 WRITE_ONCE(call
->tx_backoff
, 0);
49 * Arrange for a keepalive ping a certain time after we last transmitted. This
50 * lets the far side know we're still interested in this call and helps keep
51 * the route through any intervening firewall open.
53 * Receiving a response to the ping will prevent the ->expect_rx_by timer from
56 static void rxrpc_set_keepalive(struct rxrpc_call
*call
)
58 unsigned long now
= jiffies
, keepalive_at
= call
->next_rx_timo
/ 6;
61 WRITE_ONCE(call
->keepalive_at
, keepalive_at
);
62 rxrpc_reduce_call_timer(call
, keepalive_at
, now
,
63 rxrpc_timer_set_for_keepalive
);
67 * Fill out an ACK packet.
69 static size_t rxrpc_fill_out_ack(struct rxrpc_connection
*conn
,
70 struct rxrpc_call
*call
,
71 struct rxrpc_ack_buffer
*pkt
,
72 rxrpc_seq_t
*_hard_ack
,
76 rxrpc_serial_t serial
;
77 rxrpc_seq_t hard_ack
, top
, seq
;
82 /* Barrier against rxrpc_input_data(). */
83 serial
= call
->ackr_serial
;
84 hard_ack
= READ_ONCE(call
->rx_hard_ack
);
85 top
= smp_load_acquire(&call
->rx_top
);
86 *_hard_ack
= hard_ack
;
89 pkt
->ack
.bufferSpace
= htons(8);
90 pkt
->ack
.maxSkew
= htons(0);
91 pkt
->ack
.firstPacket
= htonl(hard_ack
+ 1);
92 pkt
->ack
.previousPacket
= htonl(call
->ackr_prev_seq
);
93 pkt
->ack
.serial
= htonl(serial
);
94 pkt
->ack
.reason
= reason
;
95 pkt
->ack
.nAcks
= top
- hard_ack
;
97 if (reason
== RXRPC_ACK_PING
)
98 pkt
->whdr
.flags
|= RXRPC_REQUEST_ACK
;
100 if (after(top
, hard_ack
)) {
103 ix
= seq
& RXRPC_RXTX_BUFF_MASK
;
104 if (call
->rxtx_buffer
[ix
])
105 *ackp
++ = RXRPC_ACK_TYPE_ACK
;
107 *ackp
++ = RXRPC_ACK_TYPE_NACK
;
109 } while (before_eq(seq
, top
));
112 mtu
= conn
->params
.peer
->if_mtu
;
113 mtu
-= conn
->params
.peer
->hdrsize
;
114 jmax
= (call
->nr_jumbo_bad
> 3) ? 1 : rxrpc_rx_jumbo_max
;
115 pkt
->ackinfo
.rxMTU
= htonl(rxrpc_rx_mtu
);
116 pkt
->ackinfo
.maxMTU
= htonl(mtu
);
117 pkt
->ackinfo
.rwind
= htonl(call
->rx_winsize
);
118 pkt
->ackinfo
.jumbo_max
= htonl(jmax
);
123 return top
- hard_ack
+ 3;
127 * Send an ACK call packet.
129 int rxrpc_send_ack_packet(struct rxrpc_call
*call
, bool ping
,
130 rxrpc_serial_t
*_serial
)
132 struct rxrpc_connection
*conn
;
133 struct rxrpc_ack_buffer
*pkt
;
136 rxrpc_serial_t serial
;
137 rxrpc_seq_t hard_ack
, top
;
142 if (test_bit(RXRPC_CALL_DISCONNECTED
, &call
->flags
))
145 pkt
= kzalloc(sizeof(*pkt
), GFP_KERNEL
);
151 msg
.msg_name
= &call
->peer
->srx
.transport
;
152 msg
.msg_namelen
= call
->peer
->srx
.transport_len
;
153 msg
.msg_control
= NULL
;
154 msg
.msg_controllen
= 0;
157 pkt
->whdr
.epoch
= htonl(conn
->proto
.epoch
);
158 pkt
->whdr
.cid
= htonl(call
->cid
);
159 pkt
->whdr
.callNumber
= htonl(call
->call_id
);
161 pkt
->whdr
.type
= RXRPC_PACKET_TYPE_ACK
;
162 pkt
->whdr
.flags
= RXRPC_SLOW_START_OK
| conn
->out_clientflag
;
163 pkt
->whdr
.userStatus
= 0;
164 pkt
->whdr
.securityIndex
= call
->security_ix
;
166 pkt
->whdr
.serviceId
= htons(call
->service_id
);
168 spin_lock_bh(&call
->lock
);
170 reason
= RXRPC_ACK_PING
;
172 reason
= call
->ackr_reason
;
173 if (!call
->ackr_reason
) {
174 spin_unlock_bh(&call
->lock
);
178 call
->ackr_reason
= 0;
180 n
= rxrpc_fill_out_ack(conn
, call
, pkt
, &hard_ack
, &top
, reason
);
182 spin_unlock_bh(&call
->lock
);
184 iov
[0].iov_base
= pkt
;
185 iov
[0].iov_len
= sizeof(pkt
->whdr
) + sizeof(pkt
->ack
) + n
;
186 iov
[1].iov_base
= &pkt
->ackinfo
;
187 iov
[1].iov_len
= sizeof(pkt
->ackinfo
);
188 len
= iov
[0].iov_len
+ iov
[1].iov_len
;
190 serial
= atomic_inc_return(&conn
->serial
);
191 pkt
->whdr
.serial
= htonl(serial
);
192 trace_rxrpc_tx_ack(call
->debug_id
, serial
,
193 ntohl(pkt
->ack
.firstPacket
),
194 ntohl(pkt
->ack
.serial
),
195 pkt
->ack
.reason
, pkt
->ack
.nAcks
);
200 call
->ping_serial
= serial
;
202 /* We need to stick a time in before we send the packet in case
203 * the reply gets back before kernel_sendmsg() completes - but
204 * asking UDP to send the packet can take a relatively long
207 call
->ping_time
= ktime_get_real();
208 set_bit(RXRPC_CALL_PINGING
, &call
->flags
);
209 trace_rxrpc_rtt_tx(call
, rxrpc_rtt_tx_ping
, serial
);
212 ret
= kernel_sendmsg(conn
->params
.local
->socket
, &msg
, iov
, 2, len
);
213 conn
->params
.peer
->last_tx_at
= ktime_get_seconds();
215 trace_rxrpc_tx_fail(call
->debug_id
, serial
, ret
,
216 rxrpc_tx_point_call_ack
);
218 trace_rxrpc_tx_packet(call
->debug_id
, &pkt
->whdr
,
219 rxrpc_tx_point_call_ack
);
220 rxrpc_tx_backoff(call
, ret
);
222 if (call
->state
< RXRPC_CALL_COMPLETE
) {
225 clear_bit(RXRPC_CALL_PINGING
, &call
->flags
);
226 rxrpc_propose_ACK(call
, pkt
->ack
.reason
,
227 ntohl(pkt
->ack
.serial
),
229 rxrpc_propose_ack_retry_tx
);
231 spin_lock_bh(&call
->lock
);
232 if (after(hard_ack
, call
->ackr_consumed
))
233 call
->ackr_consumed
= hard_ack
;
234 if (after(top
, call
->ackr_seen
))
235 call
->ackr_seen
= top
;
236 spin_unlock_bh(&call
->lock
);
239 rxrpc_set_keepalive(call
);
248 * Send an ABORT call packet.
250 int rxrpc_send_abort_packet(struct rxrpc_call
*call
)
252 struct rxrpc_connection
*conn
;
253 struct rxrpc_abort_buffer pkt
;
256 rxrpc_serial_t serial
;
259 /* Don't bother sending aborts for a client call once the server has
260 * hard-ACK'd all of its request data. After that point, we're not
261 * going to stop the operation proceeding, and whilst we might limit
262 * the reply, it's not worth it if we can send a new call on the same
263 * channel instead, thereby closing off this call.
265 if (rxrpc_is_client_call(call
) &&
266 test_bit(RXRPC_CALL_TX_LAST
, &call
->flags
))
269 if (test_bit(RXRPC_CALL_DISCONNECTED
, &call
->flags
))
274 msg
.msg_name
= &call
->peer
->srx
.transport
;
275 msg
.msg_namelen
= call
->peer
->srx
.transport_len
;
276 msg
.msg_control
= NULL
;
277 msg
.msg_controllen
= 0;
280 pkt
.whdr
.epoch
= htonl(conn
->proto
.epoch
);
281 pkt
.whdr
.cid
= htonl(call
->cid
);
282 pkt
.whdr
.callNumber
= htonl(call
->call_id
);
284 pkt
.whdr
.type
= RXRPC_PACKET_TYPE_ABORT
;
285 pkt
.whdr
.flags
= conn
->out_clientflag
;
286 pkt
.whdr
.userStatus
= 0;
287 pkt
.whdr
.securityIndex
= call
->security_ix
;
289 pkt
.whdr
.serviceId
= htons(call
->service_id
);
290 pkt
.abort_code
= htonl(call
->abort_code
);
292 iov
[0].iov_base
= &pkt
;
293 iov
[0].iov_len
= sizeof(pkt
);
295 serial
= atomic_inc_return(&conn
->serial
);
296 pkt
.whdr
.serial
= htonl(serial
);
298 ret
= kernel_sendmsg(conn
->params
.local
->socket
,
299 &msg
, iov
, 1, sizeof(pkt
));
300 conn
->params
.peer
->last_tx_at
= ktime_get_seconds();
302 trace_rxrpc_tx_fail(call
->debug_id
, serial
, ret
,
303 rxrpc_tx_point_call_abort
);
305 trace_rxrpc_tx_packet(call
->debug_id
, &pkt
.whdr
,
306 rxrpc_tx_point_call_abort
);
307 rxrpc_tx_backoff(call
, ret
);
312 * send a packet through the transport endpoint
314 int rxrpc_send_data_packet(struct rxrpc_call
*call
, struct sk_buff
*skb
,
317 struct rxrpc_connection
*conn
= call
->conn
;
318 struct rxrpc_wire_header whdr
;
319 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
322 rxrpc_serial_t serial
;
326 _enter(",{%d}", skb
->len
);
328 /* Each transmission of a Tx packet needs a new serial number */
329 serial
= atomic_inc_return(&conn
->serial
);
331 whdr
.epoch
= htonl(conn
->proto
.epoch
);
332 whdr
.cid
= htonl(call
->cid
);
333 whdr
.callNumber
= htonl(call
->call_id
);
334 whdr
.seq
= htonl(sp
->hdr
.seq
);
335 whdr
.serial
= htonl(serial
);
336 whdr
.type
= RXRPC_PACKET_TYPE_DATA
;
337 whdr
.flags
= sp
->hdr
.flags
;
339 whdr
.securityIndex
= call
->security_ix
;
340 whdr
._rsvd
= htons(sp
->hdr
._rsvd
);
341 whdr
.serviceId
= htons(call
->service_id
);
343 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE
, &conn
->flags
) &&
345 whdr
.userStatus
= RXRPC_USERSTATUS_SERVICE_UPGRADE
;
347 iov
[0].iov_base
= &whdr
;
348 iov
[0].iov_len
= sizeof(whdr
);
349 iov
[1].iov_base
= skb
->head
;
350 iov
[1].iov_len
= skb
->len
;
351 len
= iov
[0].iov_len
+ iov
[1].iov_len
;
353 msg
.msg_name
= &call
->peer
->srx
.transport
;
354 msg
.msg_namelen
= call
->peer
->srx
.transport_len
;
355 msg
.msg_control
= NULL
;
356 msg
.msg_controllen
= 0;
359 /* If our RTT cache needs working on, request an ACK. Also request
360 * ACKs if a DATA packet appears to have been lost.
362 * However, we mustn't request an ACK on the last reply packet of a
363 * service call, lest OpenAFS incorrectly send us an ACK with some
364 * soft-ACKs in it and then never follow up with a proper hard ACK.
366 if ((!(sp
->hdr
.flags
& RXRPC_LAST_PACKET
) ||
369 (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST
, &call
->events
) ||
371 call
->cong_mode
== RXRPC_CALL_SLOW_START
||
372 (call
->peer
->rtt_count
< 3 && sp
->hdr
.seq
& 1) ||
373 ktime_before(ktime_add_ms(call
->peer
->rtt_last_req
, 1000),
375 whdr
.flags
|= RXRPC_REQUEST_ACK
;
377 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS
)) {
379 if ((lose
++ & 7) == 7) {
381 trace_rxrpc_tx_data(call
, sp
->hdr
.seq
, serial
,
382 whdr
.flags
, retrans
, true);
387 trace_rxrpc_tx_data(call
, sp
->hdr
.seq
, serial
, whdr
.flags
, retrans
,
390 /* send the packet with the don't fragment bit set if we currently
391 * think it's small enough */
392 if (iov
[1].iov_len
>= call
->peer
->maxdata
)
393 goto send_fragmentable
;
395 down_read(&conn
->params
.local
->defrag_sem
);
397 sp
->hdr
.serial
= serial
;
398 smp_wmb(); /* Set serial before timestamp */
399 skb
->tstamp
= ktime_get_real();
401 /* send the packet by UDP
402 * - returns -EMSGSIZE if UDP would have to fragment the packet
403 * to go out of the interface
404 * - in which case, we'll have processed the ICMP error
405 * message and update the peer record
407 ret
= kernel_sendmsg(conn
->params
.local
->socket
, &msg
, iov
, 2, len
);
408 conn
->params
.peer
->last_tx_at
= ktime_get_seconds();
410 up_read(&conn
->params
.local
->defrag_sem
);
412 trace_rxrpc_tx_fail(call
->debug_id
, serial
, ret
,
413 rxrpc_tx_point_call_data_nofrag
);
415 trace_rxrpc_tx_packet(call
->debug_id
, &whdr
,
416 rxrpc_tx_point_call_data_nofrag
);
417 rxrpc_tx_backoff(call
, ret
);
418 if (ret
== -EMSGSIZE
)
419 goto send_fragmentable
;
423 if (whdr
.flags
& RXRPC_REQUEST_ACK
) {
424 call
->peer
->rtt_last_req
= skb
->tstamp
;
425 trace_rxrpc_rtt_tx(call
, rxrpc_rtt_tx_data
, serial
);
426 if (call
->peer
->rtt_count
> 1) {
427 unsigned long nowj
= jiffies
, ack_lost_at
;
429 ack_lost_at
= rxrpc_get_rto_backoff(call
->peer
, retrans
);
431 WRITE_ONCE(call
->ack_lost_at
, ack_lost_at
);
432 rxrpc_reduce_call_timer(call
, ack_lost_at
, nowj
,
433 rxrpc_timer_set_for_lost_ack
);
437 if (sp
->hdr
.seq
== 1 &&
438 !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER
,
440 unsigned long nowj
= jiffies
, expect_rx_by
;
442 expect_rx_by
= nowj
+ call
->next_rx_timo
;
443 WRITE_ONCE(call
->expect_rx_by
, expect_rx_by
);
444 rxrpc_reduce_call_timer(call
, expect_rx_by
, nowj
,
445 rxrpc_timer_set_for_normal
);
448 rxrpc_set_keepalive(call
);
450 /* Cancel the call if the initial transmission fails,
451 * particularly if that's due to network routing issues that
452 * aren't going away anytime soon. The layer above can arrange
453 * the retransmission.
455 if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER
, &call
->flags
))
456 rxrpc_set_call_completion(call
, RXRPC_CALL_LOCAL_ERROR
,
460 _leave(" = %d [%u]", ret
, call
->peer
->maxdata
);
464 /* attempt to send this message with fragmentation enabled */
465 _debug("send fragment");
467 down_write(&conn
->params
.local
->defrag_sem
);
469 sp
->hdr
.serial
= serial
;
470 smp_wmb(); /* Set serial before timestamp */
471 skb
->tstamp
= ktime_get_real();
473 switch (conn
->params
.local
->srx
.transport
.family
) {
476 opt
= IP_PMTUDISC_DONT
;
477 kernel_setsockopt(conn
->params
.local
->socket
,
478 SOL_IP
, IP_MTU_DISCOVER
,
479 (char *)&opt
, sizeof(opt
));
480 ret
= kernel_sendmsg(conn
->params
.local
->socket
, &msg
,
482 conn
->params
.peer
->last_tx_at
= ktime_get_seconds();
484 opt
= IP_PMTUDISC_DO
;
485 kernel_setsockopt(conn
->params
.local
->socket
,
486 SOL_IP
, IP_MTU_DISCOVER
,
487 (char *)&opt
, sizeof(opt
));
495 trace_rxrpc_tx_fail(call
->debug_id
, serial
, ret
,
496 rxrpc_tx_point_call_data_frag
);
498 trace_rxrpc_tx_packet(call
->debug_id
, &whdr
,
499 rxrpc_tx_point_call_data_frag
);
500 rxrpc_tx_backoff(call
, ret
);
502 up_write(&conn
->params
.local
->defrag_sem
);
507 * reject packets through the local endpoint
509 void rxrpc_reject_packets(struct rxrpc_local
*local
)
511 struct sockaddr_rxrpc srx
;
512 struct rxrpc_skb_priv
*sp
;
513 struct rxrpc_wire_header whdr
;
521 _enter("%d", local
->debug_id
);
523 iov
[0].iov_base
= &whdr
;
524 iov
[0].iov_len
= sizeof(whdr
);
525 iov
[1].iov_base
= &code
;
526 iov
[1].iov_len
= sizeof(code
);
528 msg
.msg_name
= &srx
.transport
;
529 msg
.msg_control
= NULL
;
530 msg
.msg_controllen
= 0;
533 memset(&whdr
, 0, sizeof(whdr
));
535 while ((skb
= skb_dequeue(&local
->reject_queue
))) {
536 rxrpc_see_skb(skb
, rxrpc_skb_seen
);
540 case RXRPC_SKB_MARK_REJECT_BUSY
:
541 whdr
.type
= RXRPC_PACKET_TYPE_BUSY
;
545 case RXRPC_SKB_MARK_REJECT_ABORT
:
546 whdr
.type
= RXRPC_PACKET_TYPE_ABORT
;
547 code
= htonl(skb
->priority
);
548 size
= sizeof(whdr
) + sizeof(code
);
552 rxrpc_free_skb(skb
, rxrpc_skb_freed
);
556 if (rxrpc_extract_addr_from_skb(&srx
, skb
) == 0) {
557 msg
.msg_namelen
= srx
.transport_len
;
559 whdr
.epoch
= htonl(sp
->hdr
.epoch
);
560 whdr
.cid
= htonl(sp
->hdr
.cid
);
561 whdr
.callNumber
= htonl(sp
->hdr
.callNumber
);
562 whdr
.serviceId
= htons(sp
->hdr
.serviceId
);
563 whdr
.flags
= sp
->hdr
.flags
;
564 whdr
.flags
^= RXRPC_CLIENT_INITIATED
;
565 whdr
.flags
&= RXRPC_CLIENT_INITIATED
;
567 ret
= kernel_sendmsg(local
->socket
, &msg
,
570 trace_rxrpc_tx_fail(local
->debug_id
, 0, ret
,
571 rxrpc_tx_point_reject
);
573 trace_rxrpc_tx_packet(local
->debug_id
, &whdr
,
574 rxrpc_tx_point_reject
);
577 rxrpc_free_skb(skb
, rxrpc_skb_freed
);
584 * Send a VERSION reply to a peer as a keepalive.
586 void rxrpc_send_keepalive(struct rxrpc_peer
*peer
)
588 struct rxrpc_wire_header whdr
;
596 msg
.msg_name
= &peer
->srx
.transport
;
597 msg
.msg_namelen
= peer
->srx
.transport_len
;
598 msg
.msg_control
= NULL
;
599 msg
.msg_controllen
= 0;
602 whdr
.epoch
= htonl(peer
->local
->rxnet
->epoch
);
607 whdr
.type
= RXRPC_PACKET_TYPE_VERSION
; /* Not client-initiated */
608 whdr
.flags
= RXRPC_LAST_PACKET
;
610 whdr
.securityIndex
= 0;
614 iov
[0].iov_base
= &whdr
;
615 iov
[0].iov_len
= sizeof(whdr
);
616 iov
[1].iov_base
= (char *)rxrpc_keepalive_string
;
617 iov
[1].iov_len
= sizeof(rxrpc_keepalive_string
);
619 len
= iov
[0].iov_len
+ iov
[1].iov_len
;
621 _proto("Tx VERSION (keepalive)");
623 ret
= kernel_sendmsg(peer
->local
->socket
, &msg
, iov
, 2, len
);
625 trace_rxrpc_tx_fail(peer
->debug_id
, 0, ret
,
626 rxrpc_tx_point_version_keepalive
);
628 trace_rxrpc_tx_packet(peer
->debug_id
, &whdr
,
629 rxrpc_tx_point_version_keepalive
);
631 peer
->last_tx_at
= ktime_get_seconds();