2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/time.h>
39 #include <linux/rds.h>
43 void rds_inc_init(struct rds_incoming
*inc
, struct rds_connection
*conn
,
46 atomic_set(&inc
->i_refcount
, 1);
47 INIT_LIST_HEAD(&inc
->i_item
);
50 inc
->i_rdma_cookie
= 0;
51 inc
->i_rx_tstamp
.tv_sec
= 0;
52 inc
->i_rx_tstamp
.tv_usec
= 0;
54 EXPORT_SYMBOL_GPL(rds_inc_init
);
56 void rds_inc_path_init(struct rds_incoming
*inc
, struct rds_conn_path
*cp
,
59 atomic_set(&inc
->i_refcount
, 1);
60 INIT_LIST_HEAD(&inc
->i_item
);
61 inc
->i_conn
= cp
->cp_conn
;
62 inc
->i_conn_path
= cp
;
64 inc
->i_rdma_cookie
= 0;
65 inc
->i_rx_tstamp
.tv_sec
= 0;
66 inc
->i_rx_tstamp
.tv_usec
= 0;
68 EXPORT_SYMBOL_GPL(rds_inc_path_init
);
70 static void rds_inc_addref(struct rds_incoming
*inc
)
72 rdsdebug("addref inc %p ref %d\n", inc
, atomic_read(&inc
->i_refcount
));
73 atomic_inc(&inc
->i_refcount
);
76 void rds_inc_put(struct rds_incoming
*inc
)
78 rdsdebug("put inc %p ref %d\n", inc
, atomic_read(&inc
->i_refcount
));
79 if (atomic_dec_and_test(&inc
->i_refcount
)) {
80 BUG_ON(!list_empty(&inc
->i_item
));
82 inc
->i_conn
->c_trans
->inc_free(inc
);
85 EXPORT_SYMBOL_GPL(rds_inc_put
);
87 static void rds_recv_rcvbuf_delta(struct rds_sock
*rs
, struct sock
*sk
,
88 struct rds_cong_map
*map
,
89 int delta
, __be16 port
)
96 rs
->rs_rcv_bytes
+= delta
;
97 now_congested
= rs
->rs_rcv_bytes
> rds_sk_rcvbuf(rs
);
99 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
100 "now_cong %d delta %d\n",
101 rs
, &rs
->rs_bound_addr
,
102 ntohs(rs
->rs_bound_port
), rs
->rs_rcv_bytes
,
103 rds_sk_rcvbuf(rs
), now_congested
, delta
);
105 /* wasn't -> am congested */
106 if (!rs
->rs_congested
&& now_congested
) {
107 rs
->rs_congested
= 1;
108 rds_cong_set_bit(map
, port
);
109 rds_cong_queue_updates(map
);
111 /* was -> aren't congested */
112 /* Require more free space before reporting uncongested to prevent
113 bouncing cong/uncong state too often */
114 else if (rs
->rs_congested
&& (rs
->rs_rcv_bytes
< (rds_sk_rcvbuf(rs
)/2))) {
115 rs
->rs_congested
= 0;
116 rds_cong_clear_bit(map
, port
);
117 rds_cong_queue_updates(map
);
120 /* do nothing if no change in cong state */
124 * Process all extension headers that come with this message.
126 static void rds_recv_incoming_exthdrs(struct rds_incoming
*inc
, struct rds_sock
*rs
)
128 struct rds_header
*hdr
= &inc
->i_hdr
;
129 unsigned int pos
= 0, type
, len
;
131 struct rds_ext_header_version version
;
132 struct rds_ext_header_rdma rdma
;
133 struct rds_ext_header_rdma_dest rdma_dest
;
137 len
= sizeof(buffer
);
138 type
= rds_message_next_extension(hdr
, &pos
, &buffer
, &len
);
139 if (type
== RDS_EXTHDR_NONE
)
141 /* Process extension header here */
143 case RDS_EXTHDR_RDMA
:
144 rds_rdma_unuse(rs
, be32_to_cpu(buffer
.rdma
.h_rdma_rkey
), 0);
147 case RDS_EXTHDR_RDMA_DEST
:
148 /* We ignore the size for now. We could stash it
149 * somewhere and use it for error checking. */
150 inc
->i_rdma_cookie
= rds_rdma_make_cookie(
151 be32_to_cpu(buffer
.rdma_dest
.h_rdma_rkey
),
152 be32_to_cpu(buffer
.rdma_dest
.h_rdma_offset
));
159 static void rds_recv_hs_exthdrs(struct rds_header
*hdr
,
160 struct rds_connection
*conn
)
162 unsigned int pos
= 0, type
, len
;
164 struct rds_ext_header_version version
;
169 len
= sizeof(buffer
);
170 type
= rds_message_next_extension(hdr
, &pos
, &buffer
, &len
);
171 if (type
== RDS_EXTHDR_NONE
)
173 /* Process extension header here */
175 case RDS_EXTHDR_NPATHS
:
176 conn
->c_npaths
= min_t(int, RDS_MPATH_WORKERS
,
180 pr_warn_ratelimited("ignoring unknown exthdr type "
184 /* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
185 conn
->c_npaths
= max_t(int, conn
->c_npaths
, 1);
188 /* rds_start_mprds() will synchronously start multiple paths when appropriate.
189 * The scheme is based on the following rules:
191 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
192 * sender's npaths (s_npaths)
193 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
194 * sends back a probe-pong with r_npaths. After that, if rcvr is the
195 * smaller ip addr, it starts rds_conn_path_connect_if_down on all
197 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
198 * If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
199 * called after reception of the probe-pong on all mprds_paths.
200 * Otherwise (sender of probe-ping is not the smaller ip addr): just call
201 * rds_conn_path_connect_if_down on the hashed path. (see rule 4)
202 * 4. when cp_index > 0, rds_connect_worker must only trigger
203 * a connection if laddr < faddr.
204 * 5. sender may end up queuing the packet on the cp. will get sent out later.
205 * when connection is completed.
207 static void rds_start_mprds(struct rds_connection
*conn
)
210 struct rds_conn_path
*cp
;
212 if (conn
->c_npaths
> 1 && conn
->c_laddr
< conn
->c_faddr
) {
213 for (i
= 1; i
< conn
->c_npaths
; i
++) {
214 cp
= &conn
->c_path
[i
];
215 rds_conn_path_connect_if_down(cp
);
221 * The transport must make sure that this is serialized against other
222 * rx and conn reset on this specific conn.
224 * We currently assert that only one fragmented message will be sent
225 * down a connection at a time. This lets us reassemble in the conn
226 * instead of per-flow which means that we don't have to go digging through
227 * flows to tear down partial reassembly progress on conn failure and
228 * we save flow lookup and locking for each frag arrival. It does mean
229 * that small messages will wait behind large ones. Fragmenting at all
230 * is only to reduce the memory consumption of pre-posted buffers.
232 * The caller passes in saddr and daddr instead of us getting it from the
233 * conn. This lets loopback, who only has one conn for both directions,
234 * tell us which roles the addrs in the conn are playing for this message.
236 void rds_recv_incoming(struct rds_connection
*conn
, __be32 saddr
, __be32 daddr
,
237 struct rds_incoming
*inc
, gfp_t gfp
)
239 struct rds_sock
*rs
= NULL
;
242 struct rds_conn_path
*cp
;
245 inc
->i_rx_jiffies
= jiffies
;
246 if (conn
->c_trans
->t_mp_capable
)
247 cp
= inc
->i_conn_path
;
249 cp
= &conn
->c_path
[0];
251 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
252 "flags 0x%x rx_jiffies %lu\n", conn
,
253 (unsigned long long)cp
->cp_next_rx_seq
,
255 (unsigned long long)be64_to_cpu(inc
->i_hdr
.h_sequence
),
256 be32_to_cpu(inc
->i_hdr
.h_len
),
257 be16_to_cpu(inc
->i_hdr
.h_sport
),
258 be16_to_cpu(inc
->i_hdr
.h_dport
),
263 * Sequence numbers should only increase. Messages get their
264 * sequence number as they're queued in a sending conn. They
265 * can be dropped, though, if the sending socket is closed before
266 * they hit the wire. So sequence numbers can skip forward
267 * under normal operation. They can also drop back in the conn
268 * failover case as previously sent messages are resent down the
269 * new instance of a conn. We drop those, otherwise we have
270 * to assume that the next valid seq does not come after a
271 * hole in the fragment stream.
273 * The headers don't give us a way to realize if fragments of
274 * a message have been dropped. We assume that frags that arrive
275 * to a flow are part of the current message on the flow that is
276 * being reassembled. This means that senders can't drop messages
277 * from the sending conn until all their frags are sent.
279 * XXX we could spend more on the wire to get more robust failure
280 * detection, arguably worth it to avoid data corruption.
282 if (be64_to_cpu(inc
->i_hdr
.h_sequence
) < cp
->cp_next_rx_seq
&&
283 (inc
->i_hdr
.h_flags
& RDS_FLAG_RETRANSMITTED
)) {
284 rds_stats_inc(s_recv_drop_old_seq
);
287 cp
->cp_next_rx_seq
= be64_to_cpu(inc
->i_hdr
.h_sequence
) + 1;
289 if (rds_sysctl_ping_enable
&& inc
->i_hdr
.h_dport
== 0) {
290 if (inc
->i_hdr
.h_sport
== 0) {
291 rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr
);
294 rds_stats_inc(s_recv_ping
);
295 rds_send_pong(cp
, inc
->i_hdr
.h_sport
);
296 /* if this is a handshake ping, start multipath if necessary */
297 if (RDS_HS_PROBE(inc
->i_hdr
.h_sport
, inc
->i_hdr
.h_dport
)) {
298 rds_recv_hs_exthdrs(&inc
->i_hdr
, cp
->cp_conn
);
299 rds_start_mprds(cp
->cp_conn
);
304 if (inc
->i_hdr
.h_dport
== RDS_FLAG_PROBE_PORT
&&
305 inc
->i_hdr
.h_sport
== 0) {
306 rds_recv_hs_exthdrs(&inc
->i_hdr
, cp
->cp_conn
);
307 /* if this is a handshake pong, start multipath if necessary */
308 rds_start_mprds(cp
->cp_conn
);
309 wake_up(&cp
->cp_conn
->c_hs_waitq
);
313 rs
= rds_find_bound(daddr
, inc
->i_hdr
.h_dport
);
315 rds_stats_inc(s_recv_drop_no_sock
);
319 /* Process extension headers */
320 rds_recv_incoming_exthdrs(inc
, rs
);
322 /* We can be racing with rds_release() which marks the socket dead. */
323 sk
= rds_rs_to_sk(rs
);
325 /* serialize with rds_release -> sock_orphan */
326 write_lock_irqsave(&rs
->rs_recv_lock
, flags
);
327 if (!sock_flag(sk
, SOCK_DEAD
)) {
328 rdsdebug("adding inc %p to rs %p's recv queue\n", inc
, rs
);
329 rds_stats_inc(s_recv_queued
);
330 rds_recv_rcvbuf_delta(rs
, sk
, inc
->i_conn
->c_lcong
,
331 be32_to_cpu(inc
->i_hdr
.h_len
),
333 if (sock_flag(sk
, SOCK_RCVTSTAMP
))
334 do_gettimeofday(&inc
->i_rx_tstamp
);
336 list_add_tail(&inc
->i_item
, &rs
->rs_recv_queue
);
337 __rds_wake_sk_sleep(sk
);
339 rds_stats_inc(s_recv_drop_dead_sock
);
341 write_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
347 EXPORT_SYMBOL_GPL(rds_recv_incoming
);
350 * be very careful here. This is being called as the condition in
351 * wait_event_*() needs to cope with being called many times.
353 static int rds_next_incoming(struct rds_sock
*rs
, struct rds_incoming
**inc
)
358 read_lock_irqsave(&rs
->rs_recv_lock
, flags
);
359 if (!list_empty(&rs
->rs_recv_queue
)) {
360 *inc
= list_entry(rs
->rs_recv_queue
.next
,
363 rds_inc_addref(*inc
);
365 read_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
371 static int rds_still_queued(struct rds_sock
*rs
, struct rds_incoming
*inc
,
374 struct sock
*sk
= rds_rs_to_sk(rs
);
378 write_lock_irqsave(&rs
->rs_recv_lock
, flags
);
379 if (!list_empty(&inc
->i_item
)) {
382 /* XXX make sure this i_conn is reliable */
383 rds_recv_rcvbuf_delta(rs
, sk
, inc
->i_conn
->c_lcong
,
384 -be32_to_cpu(inc
->i_hdr
.h_len
),
386 list_del_init(&inc
->i_item
);
390 write_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
392 rdsdebug("inc %p rs %p still %d dropped %d\n", inc
, rs
, ret
, drop
);
397 * Pull errors off the error queue.
398 * If msghdr is NULL, we will just purge the error queue.
400 int rds_notify_queue_get(struct rds_sock
*rs
, struct msghdr
*msghdr
)
402 struct rds_notifier
*notifier
;
403 struct rds_rdma_notify cmsg
= { 0 }; /* fill holes with zero */
404 unsigned int count
= 0, max_messages
= ~0U;
410 /* put_cmsg copies to user space and thus may sleep. We can't do this
411 * with rs_lock held, so first grab as many notifications as we can stuff
412 * in the user provided cmsg buffer. We don't try to copy more, to avoid
413 * losing notifications - except when the buffer is so small that it wouldn't
414 * even hold a single notification. Then we give him as much of this single
415 * msg as we can squeeze in, and set MSG_CTRUNC.
418 max_messages
= msghdr
->msg_controllen
/ CMSG_SPACE(sizeof(cmsg
));
423 spin_lock_irqsave(&rs
->rs_lock
, flags
);
424 while (!list_empty(&rs
->rs_notify_queue
) && count
< max_messages
) {
425 notifier
= list_entry(rs
->rs_notify_queue
.next
,
426 struct rds_notifier
, n_list
);
427 list_move(¬ifier
->n_list
, ©
);
430 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
435 while (!list_empty(©
)) {
436 notifier
= list_entry(copy
.next
, struct rds_notifier
, n_list
);
439 cmsg
.user_token
= notifier
->n_user_token
;
440 cmsg
.status
= notifier
->n_status
;
442 err
= put_cmsg(msghdr
, SOL_RDS
, RDS_CMSG_RDMA_STATUS
,
443 sizeof(cmsg
), &cmsg
);
448 list_del_init(¬ifier
->n_list
);
452 /* If we bailed out because of an error in put_cmsg,
453 * we may be left with one or more notifications that we
454 * didn't process. Return them to the head of the list. */
455 if (!list_empty(©
)) {
456 spin_lock_irqsave(&rs
->rs_lock
, flags
);
457 list_splice(©
, &rs
->rs_notify_queue
);
458 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
465 * Queue a congestion notification
467 static int rds_notify_cong(struct rds_sock
*rs
, struct msghdr
*msghdr
)
469 uint64_t notify
= rs
->rs_cong_notify
;
473 err
= put_cmsg(msghdr
, SOL_RDS
, RDS_CMSG_CONG_UPDATE
,
474 sizeof(notify
), ¬ify
);
478 spin_lock_irqsave(&rs
->rs_lock
, flags
);
479 rs
->rs_cong_notify
&= ~notify
;
480 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
486 * Receive any control messages.
488 static int rds_cmsg_recv(struct rds_incoming
*inc
, struct msghdr
*msg
,
493 if (inc
->i_rdma_cookie
) {
494 ret
= put_cmsg(msg
, SOL_RDS
, RDS_CMSG_RDMA_DEST
,
495 sizeof(inc
->i_rdma_cookie
), &inc
->i_rdma_cookie
);
500 if ((inc
->i_rx_tstamp
.tv_sec
!= 0) &&
501 sock_flag(rds_rs_to_sk(rs
), SOCK_RCVTSTAMP
)) {
502 ret
= put_cmsg(msg
, SOL_SOCKET
, SCM_TIMESTAMP
,
503 sizeof(struct timeval
),
512 int rds_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t size
,
515 struct sock
*sk
= sock
->sk
;
516 struct rds_sock
*rs
= rds_sk_to_rs(sk
);
518 int ret
= 0, nonblock
= msg_flags
& MSG_DONTWAIT
;
519 DECLARE_SOCKADDR(struct sockaddr_in
*, sin
, msg
->msg_name
);
520 struct rds_incoming
*inc
= NULL
;
522 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
523 timeo
= sock_rcvtimeo(sk
, nonblock
);
525 rdsdebug("size %zu flags 0x%x timeo %ld\n", size
, msg_flags
, timeo
);
527 if (msg_flags
& MSG_OOB
)
531 struct iov_iter save
;
532 /* If there are pending notifications, do those - and nothing else */
533 if (!list_empty(&rs
->rs_notify_queue
)) {
534 ret
= rds_notify_queue_get(rs
, msg
);
538 if (rs
->rs_cong_notify
) {
539 ret
= rds_notify_cong(rs
, msg
);
543 if (!rds_next_incoming(rs
, &inc
)) {
549 timeo
= wait_event_interruptible_timeout(*sk_sleep(sk
),
550 (!list_empty(&rs
->rs_notify_queue
) ||
551 rs
->rs_cong_notify
||
552 rds_next_incoming(rs
, &inc
)), timeo
);
553 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc
,
555 if (timeo
> 0 || timeo
== MAX_SCHEDULE_TIMEOUT
)
564 rdsdebug("copying inc %p from %pI4:%u to user\n", inc
,
565 &inc
->i_conn
->c_faddr
,
566 ntohs(inc
->i_hdr
.h_sport
));
567 save
= msg
->msg_iter
;
568 ret
= inc
->i_conn
->c_trans
->inc_copy_to_user(inc
, &msg
->msg_iter
);
573 * if the message we just copied isn't at the head of the
574 * recv queue then someone else raced us to return it, try
575 * to get the next message.
577 if (!rds_still_queued(rs
, inc
, !(msg_flags
& MSG_PEEK
))) {
580 rds_stats_inc(s_recv_deliver_raced
);
581 msg
->msg_iter
= save
;
585 if (ret
< be32_to_cpu(inc
->i_hdr
.h_len
)) {
586 if (msg_flags
& MSG_TRUNC
)
587 ret
= be32_to_cpu(inc
->i_hdr
.h_len
);
588 msg
->msg_flags
|= MSG_TRUNC
;
591 if (rds_cmsg_recv(inc
, msg
, rs
)) {
596 rds_stats_inc(s_recv_delivered
);
599 sin
->sin_family
= AF_INET
;
600 sin
->sin_port
= inc
->i_hdr
.h_sport
;
601 sin
->sin_addr
.s_addr
= inc
->i_saddr
;
602 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
603 msg
->msg_namelen
= sizeof(*sin
);
616 * The socket is being shut down and we're asked to drop messages that were
617 * queued for recvmsg. The caller has unbound the socket so the receive path
618 * won't queue any more incoming fragments or messages on the socket.
620 void rds_clear_recv_queue(struct rds_sock
*rs
)
622 struct sock
*sk
= rds_rs_to_sk(rs
);
623 struct rds_incoming
*inc
, *tmp
;
626 write_lock_irqsave(&rs
->rs_recv_lock
, flags
);
627 list_for_each_entry_safe(inc
, tmp
, &rs
->rs_recv_queue
, i_item
) {
628 rds_recv_rcvbuf_delta(rs
, sk
, inc
->i_conn
->c_lcong
,
629 -be32_to_cpu(inc
->i_hdr
.h_len
),
631 list_del_init(&inc
->i_item
);
634 write_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
638 * inc->i_saddr isn't used here because it is only set in the receive
641 void rds_inc_info_copy(struct rds_incoming
*inc
,
642 struct rds_info_iterator
*iter
,
643 __be32 saddr
, __be32 daddr
, int flip
)
645 struct rds_info_message minfo
;
647 minfo
.seq
= be64_to_cpu(inc
->i_hdr
.h_sequence
);
648 minfo
.len
= be32_to_cpu(inc
->i_hdr
.h_len
);
653 minfo
.lport
= inc
->i_hdr
.h_dport
;
654 minfo
.fport
= inc
->i_hdr
.h_sport
;
658 minfo
.lport
= inc
->i_hdr
.h_sport
;
659 minfo
.fport
= inc
->i_hdr
.h_dport
;
664 rds_info_copy(iter
, &minfo
, sizeof(minfo
));