2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/time.h>
39 #include <linux/rds.h>
43 void rds_inc_init(struct rds_incoming
*inc
, struct rds_connection
*conn
,
44 struct in6_addr
*saddr
)
48 refcount_set(&inc
->i_refcount
, 1);
49 INIT_LIST_HEAD(&inc
->i_item
);
51 inc
->i_saddr
= *saddr
;
52 inc
->i_rdma_cookie
= 0;
53 inc
->i_rx_tstamp
.tv_sec
= 0;
54 inc
->i_rx_tstamp
.tv_usec
= 0;
56 for (i
= 0; i
< RDS_RX_MAX_TRACES
; i
++)
57 inc
->i_rx_lat_trace
[i
] = 0;
59 EXPORT_SYMBOL_GPL(rds_inc_init
);
61 void rds_inc_path_init(struct rds_incoming
*inc
, struct rds_conn_path
*cp
,
62 struct in6_addr
*saddr
)
64 refcount_set(&inc
->i_refcount
, 1);
65 INIT_LIST_HEAD(&inc
->i_item
);
66 inc
->i_conn
= cp
->cp_conn
;
67 inc
->i_conn_path
= cp
;
68 inc
->i_saddr
= *saddr
;
69 inc
->i_rdma_cookie
= 0;
70 inc
->i_rx_tstamp
.tv_sec
= 0;
71 inc
->i_rx_tstamp
.tv_usec
= 0;
73 EXPORT_SYMBOL_GPL(rds_inc_path_init
);
75 static void rds_inc_addref(struct rds_incoming
*inc
)
77 rdsdebug("addref inc %p ref %d\n", inc
, refcount_read(&inc
->i_refcount
));
78 refcount_inc(&inc
->i_refcount
);
81 void rds_inc_put(struct rds_incoming
*inc
)
83 rdsdebug("put inc %p ref %d\n", inc
, refcount_read(&inc
->i_refcount
));
84 if (refcount_dec_and_test(&inc
->i_refcount
)) {
85 BUG_ON(!list_empty(&inc
->i_item
));
87 inc
->i_conn
->c_trans
->inc_free(inc
);
90 EXPORT_SYMBOL_GPL(rds_inc_put
);
92 static void rds_recv_rcvbuf_delta(struct rds_sock
*rs
, struct sock
*sk
,
93 struct rds_cong_map
*map
,
94 int delta
, __be16 port
)
101 rs
->rs_rcv_bytes
+= delta
;
103 rds_stats_add(s_recv_bytes_added_to_socket
, delta
);
105 rds_stats_add(s_recv_bytes_removed_from_socket
, -delta
);
107 /* loop transport doesn't send/recv congestion updates */
108 if (rs
->rs_transport
->t_type
== RDS_TRANS_LOOP
)
111 now_congested
= rs
->rs_rcv_bytes
> rds_sk_rcvbuf(rs
);
113 rdsdebug("rs %p (%pI6c:%u) recv bytes %d buf %d "
114 "now_cong %d delta %d\n",
115 rs
, &rs
->rs_bound_addr
,
116 ntohs(rs
->rs_bound_port
), rs
->rs_rcv_bytes
,
117 rds_sk_rcvbuf(rs
), now_congested
, delta
);
119 /* wasn't -> am congested */
120 if (!rs
->rs_congested
&& now_congested
) {
121 rs
->rs_congested
= 1;
122 rds_cong_set_bit(map
, port
);
123 rds_cong_queue_updates(map
);
125 /* was -> aren't congested */
126 /* Require more free space before reporting uncongested to prevent
127 bouncing cong/uncong state too often */
128 else if (rs
->rs_congested
&& (rs
->rs_rcv_bytes
< (rds_sk_rcvbuf(rs
)/2))) {
129 rs
->rs_congested
= 0;
130 rds_cong_clear_bit(map
, port
);
131 rds_cong_queue_updates(map
);
134 /* do nothing if no change in cong state */
137 static void rds_conn_peer_gen_update(struct rds_connection
*conn
,
141 struct rds_message
*rm
, *tmp
;
144 WARN_ON(conn
->c_trans
->t_type
!= RDS_TRANS_TCP
);
145 if (peer_gen_num
!= 0) {
146 if (conn
->c_peer_gen_num
!= 0 &&
147 peer_gen_num
!= conn
->c_peer_gen_num
) {
148 for (i
= 0; i
< RDS_MPATH_WORKERS
; i
++) {
149 struct rds_conn_path
*cp
;
151 cp
= &conn
->c_path
[i
];
152 spin_lock_irqsave(&cp
->cp_lock
, flags
);
153 cp
->cp_next_tx_seq
= 1;
154 cp
->cp_next_rx_seq
= 0;
155 list_for_each_entry_safe(rm
, tmp
,
158 set_bit(RDS_MSG_FLUSH
, &rm
->m_flags
);
160 spin_unlock_irqrestore(&cp
->cp_lock
, flags
);
163 conn
->c_peer_gen_num
= peer_gen_num
;
168 * Process all extension headers that come with this message.
170 static void rds_recv_incoming_exthdrs(struct rds_incoming
*inc
, struct rds_sock
*rs
)
172 struct rds_header
*hdr
= &inc
->i_hdr
;
173 unsigned int pos
= 0, type
, len
;
175 struct rds_ext_header_version version
;
176 struct rds_ext_header_rdma rdma
;
177 struct rds_ext_header_rdma_dest rdma_dest
;
181 len
= sizeof(buffer
);
182 type
= rds_message_next_extension(hdr
, &pos
, &buffer
, &len
);
183 if (type
== RDS_EXTHDR_NONE
)
185 /* Process extension header here */
187 case RDS_EXTHDR_RDMA
:
188 rds_rdma_unuse(rs
, be32_to_cpu(buffer
.rdma
.h_rdma_rkey
), 0);
191 case RDS_EXTHDR_RDMA_DEST
:
192 /* We ignore the size for now. We could stash it
193 * somewhere and use it for error checking. */
194 inc
->i_rdma_cookie
= rds_rdma_make_cookie(
195 be32_to_cpu(buffer
.rdma_dest
.h_rdma_rkey
),
196 be32_to_cpu(buffer
.rdma_dest
.h_rdma_offset
));
203 static void rds_recv_hs_exthdrs(struct rds_header
*hdr
,
204 struct rds_connection
*conn
)
206 unsigned int pos
= 0, type
, len
;
208 struct rds_ext_header_version version
;
212 u32 new_peer_gen_num
= 0;
215 len
= sizeof(buffer
);
216 type
= rds_message_next_extension(hdr
, &pos
, &buffer
, &len
);
217 if (type
== RDS_EXTHDR_NONE
)
219 /* Process extension header here */
221 case RDS_EXTHDR_NPATHS
:
222 conn
->c_npaths
= min_t(int, RDS_MPATH_WORKERS
,
223 be16_to_cpu(buffer
.rds_npaths
));
225 case RDS_EXTHDR_GEN_NUM
:
226 new_peer_gen_num
= be32_to_cpu(buffer
.rds_gen_num
);
229 pr_warn_ratelimited("ignoring unknown exthdr type "
233 /* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
234 conn
->c_npaths
= max_t(int, conn
->c_npaths
, 1);
235 conn
->c_ping_triggered
= 0;
236 rds_conn_peer_gen_update(conn
, new_peer_gen_num
);
239 /* rds_start_mprds() will synchronously start multiple paths when appropriate.
240 * The scheme is based on the following rules:
242 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
243 * sender's npaths (s_npaths)
244 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
245 * sends back a probe-pong with r_npaths. After that, if rcvr is the
246 * smaller ip addr, it starts rds_conn_path_connect_if_down on all
248 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
249 * If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
250 * called after reception of the probe-pong on all mprds_paths.
251 * Otherwise (sender of probe-ping is not the smaller ip addr): just call
252 * rds_conn_path_connect_if_down on the hashed path. (see rule 4)
253 * 4. rds_connect_worker must only trigger a connection if laddr < faddr.
254 * 5. sender may end up queuing the packet on the cp. will get sent out later.
255 * when connection is completed.
257 static void rds_start_mprds(struct rds_connection
*conn
)
260 struct rds_conn_path
*cp
;
262 if (conn
->c_npaths
> 1 &&
263 rds_addr_cmp(&conn
->c_laddr
, &conn
->c_faddr
) < 0) {
264 for (i
= 0; i
< conn
->c_npaths
; i
++) {
265 cp
= &conn
->c_path
[i
];
266 rds_conn_path_connect_if_down(cp
);
272 * The transport must make sure that this is serialized against other
273 * rx and conn reset on this specific conn.
275 * We currently assert that only one fragmented message will be sent
276 * down a connection at a time. This lets us reassemble in the conn
277 * instead of per-flow which means that we don't have to go digging through
278 * flows to tear down partial reassembly progress on conn failure and
279 * we save flow lookup and locking for each frag arrival. It does mean
280 * that small messages will wait behind large ones. Fragmenting at all
281 * is only to reduce the memory consumption of pre-posted buffers.
283 * The caller passes in saddr and daddr instead of us getting it from the
284 * conn. This lets loopback, who only has one conn for both directions,
285 * tell us which roles the addrs in the conn are playing for this message.
287 void rds_recv_incoming(struct rds_connection
*conn
, struct in6_addr
*saddr
,
288 struct in6_addr
*daddr
,
289 struct rds_incoming
*inc
, gfp_t gfp
)
291 struct rds_sock
*rs
= NULL
;
294 struct rds_conn_path
*cp
;
297 inc
->i_rx_jiffies
= jiffies
;
298 if (conn
->c_trans
->t_mp_capable
)
299 cp
= inc
->i_conn_path
;
301 cp
= &conn
->c_path
[0];
303 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
304 "flags 0x%x rx_jiffies %lu\n", conn
,
305 (unsigned long long)cp
->cp_next_rx_seq
,
307 (unsigned long long)be64_to_cpu(inc
->i_hdr
.h_sequence
),
308 be32_to_cpu(inc
->i_hdr
.h_len
),
309 be16_to_cpu(inc
->i_hdr
.h_sport
),
310 be16_to_cpu(inc
->i_hdr
.h_dport
),
315 * Sequence numbers should only increase. Messages get their
316 * sequence number as they're queued in a sending conn. They
317 * can be dropped, though, if the sending socket is closed before
318 * they hit the wire. So sequence numbers can skip forward
319 * under normal operation. They can also drop back in the conn
320 * failover case as previously sent messages are resent down the
321 * new instance of a conn. We drop those, otherwise we have
322 * to assume that the next valid seq does not come after a
323 * hole in the fragment stream.
325 * The headers don't give us a way to realize if fragments of
326 * a message have been dropped. We assume that frags that arrive
327 * to a flow are part of the current message on the flow that is
328 * being reassembled. This means that senders can't drop messages
329 * from the sending conn until all their frags are sent.
331 * XXX we could spend more on the wire to get more robust failure
332 * detection, arguably worth it to avoid data corruption.
334 if (be64_to_cpu(inc
->i_hdr
.h_sequence
) < cp
->cp_next_rx_seq
&&
335 (inc
->i_hdr
.h_flags
& RDS_FLAG_RETRANSMITTED
)) {
336 rds_stats_inc(s_recv_drop_old_seq
);
339 cp
->cp_next_rx_seq
= be64_to_cpu(inc
->i_hdr
.h_sequence
) + 1;
341 if (rds_sysctl_ping_enable
&& inc
->i_hdr
.h_dport
== 0) {
342 if (inc
->i_hdr
.h_sport
== 0) {
343 rdsdebug("ignore ping with 0 sport from %pI6c\n",
347 rds_stats_inc(s_recv_ping
);
348 rds_send_pong(cp
, inc
->i_hdr
.h_sport
);
349 /* if this is a handshake ping, start multipath if necessary */
350 if (RDS_HS_PROBE(be16_to_cpu(inc
->i_hdr
.h_sport
),
351 be16_to_cpu(inc
->i_hdr
.h_dport
))) {
352 rds_recv_hs_exthdrs(&inc
->i_hdr
, cp
->cp_conn
);
353 rds_start_mprds(cp
->cp_conn
);
358 if (be16_to_cpu(inc
->i_hdr
.h_dport
) == RDS_FLAG_PROBE_PORT
&&
359 inc
->i_hdr
.h_sport
== 0) {
360 rds_recv_hs_exthdrs(&inc
->i_hdr
, cp
->cp_conn
);
361 /* if this is a handshake pong, start multipath if necessary */
362 rds_start_mprds(cp
->cp_conn
);
363 wake_up(&cp
->cp_conn
->c_hs_waitq
);
367 rs
= rds_find_bound(daddr
, inc
->i_hdr
.h_dport
, conn
->c_bound_if
);
369 rds_stats_inc(s_recv_drop_no_sock
);
373 /* Process extension headers */
374 rds_recv_incoming_exthdrs(inc
, rs
);
376 /* We can be racing with rds_release() which marks the socket dead. */
377 sk
= rds_rs_to_sk(rs
);
379 /* serialize with rds_release -> sock_orphan */
380 write_lock_irqsave(&rs
->rs_recv_lock
, flags
);
381 if (!sock_flag(sk
, SOCK_DEAD
)) {
382 rdsdebug("adding inc %p to rs %p's recv queue\n", inc
, rs
);
383 rds_stats_inc(s_recv_queued
);
384 rds_recv_rcvbuf_delta(rs
, sk
, inc
->i_conn
->c_lcong
,
385 be32_to_cpu(inc
->i_hdr
.h_len
),
387 if (sock_flag(sk
, SOCK_RCVTSTAMP
))
388 do_gettimeofday(&inc
->i_rx_tstamp
);
390 inc
->i_rx_lat_trace
[RDS_MSG_RX_END
] = local_clock();
391 list_add_tail(&inc
->i_item
, &rs
->rs_recv_queue
);
392 __rds_wake_sk_sleep(sk
);
394 rds_stats_inc(s_recv_drop_dead_sock
);
396 write_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
402 EXPORT_SYMBOL_GPL(rds_recv_incoming
);
405 * be very careful here. This is being called as the condition in
406 * wait_event_*() needs to cope with being called many times.
408 static int rds_next_incoming(struct rds_sock
*rs
, struct rds_incoming
**inc
)
413 read_lock_irqsave(&rs
->rs_recv_lock
, flags
);
414 if (!list_empty(&rs
->rs_recv_queue
)) {
415 *inc
= list_entry(rs
->rs_recv_queue
.next
,
418 rds_inc_addref(*inc
);
420 read_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
426 static int rds_still_queued(struct rds_sock
*rs
, struct rds_incoming
*inc
,
429 struct sock
*sk
= rds_rs_to_sk(rs
);
433 write_lock_irqsave(&rs
->rs_recv_lock
, flags
);
434 if (!list_empty(&inc
->i_item
)) {
437 /* XXX make sure this i_conn is reliable */
438 rds_recv_rcvbuf_delta(rs
, sk
, inc
->i_conn
->c_lcong
,
439 -be32_to_cpu(inc
->i_hdr
.h_len
),
441 list_del_init(&inc
->i_item
);
445 write_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
447 rdsdebug("inc %p rs %p still %d dropped %d\n", inc
, rs
, ret
, drop
);
452 * Pull errors off the error queue.
453 * If msghdr is NULL, we will just purge the error queue.
455 int rds_notify_queue_get(struct rds_sock
*rs
, struct msghdr
*msghdr
)
457 struct rds_notifier
*notifier
;
458 struct rds_rdma_notify cmsg
= { 0 }; /* fill holes with zero */
459 unsigned int count
= 0, max_messages
= ~0U;
465 /* put_cmsg copies to user space and thus may sleep. We can't do this
466 * with rs_lock held, so first grab as many notifications as we can stuff
467 * in the user provided cmsg buffer. We don't try to copy more, to avoid
468 * losing notifications - except when the buffer is so small that it wouldn't
469 * even hold a single notification. Then we give him as much of this single
470 * msg as we can squeeze in, and set MSG_CTRUNC.
473 max_messages
= msghdr
->msg_controllen
/ CMSG_SPACE(sizeof(cmsg
));
478 spin_lock_irqsave(&rs
->rs_lock
, flags
);
479 while (!list_empty(&rs
->rs_notify_queue
) && count
< max_messages
) {
480 notifier
= list_entry(rs
->rs_notify_queue
.next
,
481 struct rds_notifier
, n_list
);
482 list_move(¬ifier
->n_list
, ©
);
485 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
490 while (!list_empty(©
)) {
491 notifier
= list_entry(copy
.next
, struct rds_notifier
, n_list
);
494 cmsg
.user_token
= notifier
->n_user_token
;
495 cmsg
.status
= notifier
->n_status
;
497 err
= put_cmsg(msghdr
, SOL_RDS
, RDS_CMSG_RDMA_STATUS
,
498 sizeof(cmsg
), &cmsg
);
503 list_del_init(¬ifier
->n_list
);
507 /* If we bailed out because of an error in put_cmsg,
508 * we may be left with one or more notifications that we
509 * didn't process. Return them to the head of the list. */
510 if (!list_empty(©
)) {
511 spin_lock_irqsave(&rs
->rs_lock
, flags
);
512 list_splice(©
, &rs
->rs_notify_queue
);
513 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
520 * Queue a congestion notification
522 static int rds_notify_cong(struct rds_sock
*rs
, struct msghdr
*msghdr
)
524 uint64_t notify
= rs
->rs_cong_notify
;
528 err
= put_cmsg(msghdr
, SOL_RDS
, RDS_CMSG_CONG_UPDATE
,
529 sizeof(notify
), ¬ify
);
533 spin_lock_irqsave(&rs
->rs_lock
, flags
);
534 rs
->rs_cong_notify
&= ~notify
;
535 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
541 * Receive any control messages.
543 static int rds_cmsg_recv(struct rds_incoming
*inc
, struct msghdr
*msg
,
548 if (inc
->i_rdma_cookie
) {
549 ret
= put_cmsg(msg
, SOL_RDS
, RDS_CMSG_RDMA_DEST
,
550 sizeof(inc
->i_rdma_cookie
), &inc
->i_rdma_cookie
);
555 if ((inc
->i_rx_tstamp
.tv_sec
!= 0) &&
556 sock_flag(rds_rs_to_sk(rs
), SOCK_RCVTSTAMP
)) {
557 ret
= put_cmsg(msg
, SOL_SOCKET
, SCM_TIMESTAMP
,
558 sizeof(struct timeval
),
564 if (rs
->rs_rx_traces
) {
565 struct rds_cmsg_rx_trace t
;
568 memset(&t
, 0, sizeof(t
));
569 inc
->i_rx_lat_trace
[RDS_MSG_RX_CMSG
] = local_clock();
570 t
.rx_traces
= rs
->rs_rx_traces
;
571 for (i
= 0; i
< rs
->rs_rx_traces
; i
++) {
572 j
= rs
->rs_rx_trace
[i
];
573 t
.rx_trace_pos
[i
] = j
;
574 t
.rx_trace
[i
] = inc
->i_rx_lat_trace
[j
+ 1] -
575 inc
->i_rx_lat_trace
[j
];
578 ret
= put_cmsg(msg
, SOL_RDS
, RDS_CMSG_RXPATH_LATENCY
,
588 static bool rds_recvmsg_zcookie(struct rds_sock
*rs
, struct msghdr
*msg
)
590 struct rds_msg_zcopy_queue
*q
= &rs
->rs_zcookie_queue
;
591 struct rds_msg_zcopy_info
*info
= NULL
;
592 struct rds_zcopy_cookies
*done
;
595 if (!msg
->msg_control
)
598 if (!sock_flag(rds_rs_to_sk(rs
), SOCK_ZEROCOPY
) ||
599 msg
->msg_controllen
< CMSG_SPACE(sizeof(*done
)))
602 spin_lock_irqsave(&q
->lock
, flags
);
603 if (!list_empty(&q
->zcookie_head
)) {
604 info
= list_entry(q
->zcookie_head
.next
,
605 struct rds_msg_zcopy_info
, rs_zcookie_next
);
606 list_del(&info
->rs_zcookie_next
);
608 spin_unlock_irqrestore(&q
->lock
, flags
);
611 done
= &info
->zcookies
;
612 if (put_cmsg(msg
, SOL_RDS
, RDS_CMSG_ZCOPY_COMPLETION
, sizeof(*done
),
614 spin_lock_irqsave(&q
->lock
, flags
);
615 list_add(&info
->rs_zcookie_next
, &q
->zcookie_head
);
616 spin_unlock_irqrestore(&q
->lock
, flags
);
623 int rds_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t size
,
626 struct sock
*sk
= sock
->sk
;
627 struct rds_sock
*rs
= rds_sk_to_rs(sk
);
629 int ret
= 0, nonblock
= msg_flags
& MSG_DONTWAIT
;
630 DECLARE_SOCKADDR(struct sockaddr_in6
*, sin6
, msg
->msg_name
);
631 DECLARE_SOCKADDR(struct sockaddr_in
*, sin
, msg
->msg_name
);
632 struct rds_incoming
*inc
= NULL
;
634 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
635 timeo
= sock_rcvtimeo(sk
, nonblock
);
637 rdsdebug("size %zu flags 0x%x timeo %ld\n", size
, msg_flags
, timeo
);
639 if (msg_flags
& MSG_OOB
)
641 if (msg_flags
& MSG_ERRQUEUE
)
642 return sock_recv_errqueue(sk
, msg
, size
, SOL_IP
, IP_RECVERR
);
645 /* If there are pending notifications, do those - and nothing else */
646 if (!list_empty(&rs
->rs_notify_queue
)) {
647 ret
= rds_notify_queue_get(rs
, msg
);
651 if (rs
->rs_cong_notify
) {
652 ret
= rds_notify_cong(rs
, msg
);
656 if (!rds_next_incoming(rs
, &inc
)) {
658 bool reaped
= rds_recvmsg_zcookie(rs
, msg
);
660 ret
= reaped
? 0 : -EAGAIN
;
664 timeo
= wait_event_interruptible_timeout(*sk_sleep(sk
),
665 (!list_empty(&rs
->rs_notify_queue
) ||
666 rs
->rs_cong_notify
||
667 rds_next_incoming(rs
, &inc
)), timeo
);
668 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc
,
670 if (timeo
> 0 || timeo
== MAX_SCHEDULE_TIMEOUT
)
679 rdsdebug("copying inc %p from %pI6c:%u to user\n", inc
,
680 &inc
->i_conn
->c_faddr
,
681 ntohs(inc
->i_hdr
.h_sport
));
682 ret
= inc
->i_conn
->c_trans
->inc_copy_to_user(inc
, &msg
->msg_iter
);
687 * if the message we just copied isn't at the head of the
688 * recv queue then someone else raced us to return it, try
689 * to get the next message.
691 if (!rds_still_queued(rs
, inc
, !(msg_flags
& MSG_PEEK
))) {
694 rds_stats_inc(s_recv_deliver_raced
);
695 iov_iter_revert(&msg
->msg_iter
, ret
);
699 if (ret
< be32_to_cpu(inc
->i_hdr
.h_len
)) {
700 if (msg_flags
& MSG_TRUNC
)
701 ret
= be32_to_cpu(inc
->i_hdr
.h_len
);
702 msg
->msg_flags
|= MSG_TRUNC
;
705 if (rds_cmsg_recv(inc
, msg
, rs
)) {
709 rds_recvmsg_zcookie(rs
, msg
);
711 rds_stats_inc(s_recv_delivered
);
714 if (ipv6_addr_v4mapped(&inc
->i_saddr
)) {
715 sin
= (struct sockaddr_in
*)msg
->msg_name
;
717 sin
->sin_family
= AF_INET
;
718 sin
->sin_port
= inc
->i_hdr
.h_sport
;
719 sin
->sin_addr
.s_addr
=
720 inc
->i_saddr
.s6_addr32
[3];
721 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
722 msg
->msg_namelen
= sizeof(*sin
);
724 sin6
= (struct sockaddr_in6
*)msg
->msg_name
;
726 sin6
->sin6_family
= AF_INET6
;
727 sin6
->sin6_port
= inc
->i_hdr
.h_sport
;
728 sin6
->sin6_addr
= inc
->i_saddr
;
729 sin6
->sin6_flowinfo
= 0;
730 sin6
->sin6_scope_id
= rs
->rs_bound_scope_id
;
731 msg
->msg_namelen
= sizeof(*sin6
);
745 * The socket is being shut down and we're asked to drop messages that were
746 * queued for recvmsg. The caller has unbound the socket so the receive path
747 * won't queue any more incoming fragments or messages on the socket.
749 void rds_clear_recv_queue(struct rds_sock
*rs
)
751 struct sock
*sk
= rds_rs_to_sk(rs
);
752 struct rds_incoming
*inc
, *tmp
;
755 write_lock_irqsave(&rs
->rs_recv_lock
, flags
);
756 list_for_each_entry_safe(inc
, tmp
, &rs
->rs_recv_queue
, i_item
) {
757 rds_recv_rcvbuf_delta(rs
, sk
, inc
->i_conn
->c_lcong
,
758 -be32_to_cpu(inc
->i_hdr
.h_len
),
760 list_del_init(&inc
->i_item
);
763 write_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
767 * inc->i_saddr isn't used here because it is only set in the receive
770 void rds_inc_info_copy(struct rds_incoming
*inc
,
771 struct rds_info_iterator
*iter
,
772 __be32 saddr
, __be32 daddr
, int flip
)
774 struct rds_info_message minfo
;
776 minfo
.seq
= be64_to_cpu(inc
->i_hdr
.h_sequence
);
777 minfo
.len
= be32_to_cpu(inc
->i_hdr
.h_len
);
782 minfo
.lport
= inc
->i_hdr
.h_dport
;
783 minfo
.fport
= inc
->i_hdr
.h_sport
;
787 minfo
.lport
= inc
->i_hdr
.h_sport
;
788 minfo
.fport
= inc
->i_hdr
.h_dport
;
793 rds_info_copy(iter
, &minfo
, sizeof(minfo
));
796 #if IS_ENABLED(CONFIG_IPV6)
797 void rds6_inc_info_copy(struct rds_incoming
*inc
,
798 struct rds_info_iterator
*iter
,
799 struct in6_addr
*saddr
, struct in6_addr
*daddr
,
802 struct rds6_info_message minfo6
;
804 minfo6
.seq
= be64_to_cpu(inc
->i_hdr
.h_sequence
);
805 minfo6
.len
= be32_to_cpu(inc
->i_hdr
.h_len
);
809 minfo6
.laddr
= *daddr
;
810 minfo6
.faddr
= *saddr
;
811 minfo6
.lport
= inc
->i_hdr
.h_dport
;
812 minfo6
.fport
= inc
->i_hdr
.h_sport
;
814 minfo6
.laddr
= *saddr
;
815 minfo6
.faddr
= *daddr
;
816 minfo6
.lport
= inc
->i_hdr
.h_sport
;
817 minfo6
.fport
= inc
->i_hdr
.h_dport
;
822 rds_info_copy(iter
, &minfo6
, sizeof(minfo6
));