2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
37 #include <linux/ratelimit.h>
39 #include "rds_single_path.h"
44 * Set the selected protocol version
46 static void rds_ib_set_protocol(struct rds_connection
*conn
, unsigned int version
)
48 conn
->c_version
= version
;
54 static void rds_ib_set_flow_control(struct rds_connection
*conn
, u32 credits
)
56 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
58 if (rds_ib_sysctl_flow_control
&& credits
!= 0) {
59 /* We're doing flow control */
61 rds_ib_send_add_credits(conn
, credits
);
68 * Tune RNR behavior. Without flow control, we use a rather
69 * low timeout, but not the absolute minimum - this should
72 * We already set the RNR retry count to 7 (which is the
73 * smallest infinite number :-) above.
74 * If flow control is off, we want to change this back to 0
75 * so that we learn quickly when our credit accounting is
78 * Caller passes in a qp_attr pointer - don't waste stack spacv
79 * by allocation this twice.
82 rds_ib_tune_rnr(struct rds_ib_connection
*ic
, struct ib_qp_attr
*attr
)
86 attr
->min_rnr_timer
= IB_RNR_TIMER_000_32
;
87 ret
= ib_modify_qp(ic
->i_cm_id
->qp
, attr
, IB_QP_MIN_RNR_TIMER
);
89 printk(KERN_NOTICE
"ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret
);
93 * Connection established.
94 * We get here for both outgoing and incoming connection.
96 void rds_ib_cm_connect_complete(struct rds_connection
*conn
, struct rdma_cm_event
*event
)
98 const struct rds_ib_connect_private
*dp
= NULL
;
99 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
100 struct ib_qp_attr qp_attr
;
103 if (event
->param
.conn
.private_data_len
>= sizeof(*dp
)) {
104 dp
= event
->param
.conn
.private_data
;
106 /* make sure it isn't empty data */
107 if (dp
->dp_protocol_major
) {
108 rds_ib_set_protocol(conn
,
109 RDS_PROTOCOL(dp
->dp_protocol_major
,
110 dp
->dp_protocol_minor
));
111 rds_ib_set_flow_control(conn
, be32_to_cpu(dp
->dp_credit
));
115 if (conn
->c_version
< RDS_PROTOCOL(3, 1)) {
116 printk(KERN_NOTICE
"RDS/IB: Connection to %pI4 version %u.%u failed,"
117 " no longer supported\n",
119 RDS_PROTOCOL_MAJOR(conn
->c_version
),
120 RDS_PROTOCOL_MINOR(conn
->c_version
));
121 rds_conn_destroy(conn
);
124 printk(KERN_NOTICE
"RDS/IB: connected to %pI4 version %u.%u%s\n",
126 RDS_PROTOCOL_MAJOR(conn
->c_version
),
127 RDS_PROTOCOL_MINOR(conn
->c_version
),
128 ic
->i_flowctl
? ", flow control" : "");
132 * Init rings and fill recv. this needs to wait until protocol negotiation
133 * is complete, since ring layout is different from 3.0 to 3.1.
135 rds_ib_send_init_ring(ic
);
136 rds_ib_recv_init_ring(ic
);
137 /* Post receive buffers - as a side effect, this will update
138 * the posted credit count. */
139 rds_ib_recv_refill(conn
, 1, GFP_KERNEL
);
141 /* Tune RNR behavior */
142 rds_ib_tune_rnr(ic
, &qp_attr
);
144 qp_attr
.qp_state
= IB_QPS_RTS
;
145 err
= ib_modify_qp(ic
->i_cm_id
->qp
, &qp_attr
, IB_QP_STATE
);
147 printk(KERN_NOTICE
"ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err
);
149 /* update ib_device with this local ipaddr */
150 err
= rds_ib_update_ipaddr(ic
->rds_ibdev
, conn
->c_laddr
);
152 printk(KERN_ERR
"rds_ib_update_ipaddr failed (%d)\n",
155 /* If the peer gave us the last packet it saw, process this as if
156 * we had received a regular ACK. */
158 /* dp structure start is not guaranteed to be 8 bytes aligned.
159 * Since dp_ack_seq is 64-bit extended load operations can be
160 * used so go through get_unaligned to avoid unaligned errors.
162 __be64 dp_ack_seq
= get_unaligned(&dp
->dp_ack_seq
);
165 rds_send_drop_acked(conn
, be64_to_cpu(dp_ack_seq
),
169 rds_connect_complete(conn
);
172 static void rds_ib_cm_fill_conn_param(struct rds_connection
*conn
,
173 struct rdma_conn_param
*conn_param
,
174 struct rds_ib_connect_private
*dp
,
175 u32 protocol_version
,
176 u32 max_responder_resources
,
177 u32 max_initiator_depth
)
179 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
180 struct rds_ib_device
*rds_ibdev
= ic
->rds_ibdev
;
182 memset(conn_param
, 0, sizeof(struct rdma_conn_param
));
184 conn_param
->responder_resources
=
185 min_t(u32
, rds_ibdev
->max_responder_resources
, max_responder_resources
);
186 conn_param
->initiator_depth
=
187 min_t(u32
, rds_ibdev
->max_initiator_depth
, max_initiator_depth
);
188 conn_param
->retry_count
= min_t(unsigned int, rds_ib_retry_count
, 7);
189 conn_param
->rnr_retry_count
= 7;
192 memset(dp
, 0, sizeof(*dp
));
193 dp
->dp_saddr
= conn
->c_laddr
;
194 dp
->dp_daddr
= conn
->c_faddr
;
195 dp
->dp_protocol_major
= RDS_PROTOCOL_MAJOR(protocol_version
);
196 dp
->dp_protocol_minor
= RDS_PROTOCOL_MINOR(protocol_version
);
197 dp
->dp_protocol_minor_mask
= cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS
);
198 dp
->dp_ack_seq
= cpu_to_be64(rds_ib_piggyb_ack(ic
));
200 /* Advertise flow control */
202 unsigned int credits
;
204 credits
= IB_GET_POST_CREDITS(atomic_read(&ic
->i_credits
));
205 dp
->dp_credit
= cpu_to_be32(credits
);
206 atomic_sub(IB_SET_POST_CREDITS(credits
), &ic
->i_credits
);
209 conn_param
->private_data
= dp
;
210 conn_param
->private_data_len
= sizeof(*dp
);
214 static void rds_ib_cq_event_handler(struct ib_event
*event
, void *data
)
216 rdsdebug("event %u (%s) data %p\n",
217 event
->event
, ib_event_msg(event
->event
), data
);
220 /* Plucking the oldest entry from the ring can be done concurrently with
221 * the thread refilling the ring. Each ring operation is protected by
222 * spinlocks and the transient state of refilling doesn't change the
223 * recording of which entry is oldest.
225 * This relies on IB only calling one cq comp_handler for each cq so that
226 * there will only be one caller of rds_recv_incoming() per RDS connection.
228 static void rds_ib_cq_comp_handler_recv(struct ib_cq
*cq
, void *context
)
230 struct rds_connection
*conn
= context
;
231 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
233 rdsdebug("conn %p cq %p\n", conn
, cq
);
235 rds_ib_stats_inc(s_ib_evt_handler_call
);
237 tasklet_schedule(&ic
->i_recv_tasklet
);
240 static void poll_scq(struct rds_ib_connection
*ic
, struct ib_cq
*cq
,
246 while ((nr
= ib_poll_cq(cq
, RDS_IB_WC_MAX
, wcs
)) > 0) {
247 for (i
= 0; i
< nr
; i
++) {
249 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
250 (unsigned long long)wc
->wr_id
, wc
->status
,
251 wc
->byte_len
, be32_to_cpu(wc
->ex
.imm_data
));
253 if (wc
->wr_id
<= ic
->i_send_ring
.w_nr
||
254 wc
->wr_id
== RDS_IB_ACK_WR_ID
)
255 rds_ib_send_cqe_handler(ic
, wc
);
257 rds_ib_mr_cqe_handler(ic
, wc
);
263 static void rds_ib_tasklet_fn_send(unsigned long data
)
265 struct rds_ib_connection
*ic
= (struct rds_ib_connection
*)data
;
266 struct rds_connection
*conn
= ic
->conn
;
268 rds_ib_stats_inc(s_ib_tasklet_call
);
270 poll_scq(ic
, ic
->i_send_cq
, ic
->i_send_wc
);
271 ib_req_notify_cq(ic
->i_send_cq
, IB_CQ_NEXT_COMP
);
272 poll_scq(ic
, ic
->i_send_cq
, ic
->i_send_wc
);
274 if (rds_conn_up(conn
) &&
275 (!test_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
) ||
276 test_bit(0, &conn
->c_map_queued
)))
277 rds_send_xmit(&ic
->conn
->c_path
[0]);
280 static void poll_rcq(struct rds_ib_connection
*ic
, struct ib_cq
*cq
,
282 struct rds_ib_ack_state
*ack_state
)
287 while ((nr
= ib_poll_cq(cq
, RDS_IB_WC_MAX
, wcs
)) > 0) {
288 for (i
= 0; i
< nr
; i
++) {
290 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
291 (unsigned long long)wc
->wr_id
, wc
->status
,
292 wc
->byte_len
, be32_to_cpu(wc
->ex
.imm_data
));
294 rds_ib_recv_cqe_handler(ic
, wc
, ack_state
);
299 static void rds_ib_tasklet_fn_recv(unsigned long data
)
301 struct rds_ib_connection
*ic
= (struct rds_ib_connection
*)data
;
302 struct rds_connection
*conn
= ic
->conn
;
303 struct rds_ib_device
*rds_ibdev
= ic
->rds_ibdev
;
304 struct rds_ib_ack_state state
;
309 rds_ib_stats_inc(s_ib_tasklet_call
);
311 memset(&state
, 0, sizeof(state
));
312 poll_rcq(ic
, ic
->i_recv_cq
, ic
->i_recv_wc
, &state
);
313 ib_req_notify_cq(ic
->i_recv_cq
, IB_CQ_SOLICITED
);
314 poll_rcq(ic
, ic
->i_recv_cq
, ic
->i_recv_wc
, &state
);
316 if (state
.ack_next_valid
)
317 rds_ib_set_ack(ic
, state
.ack_next
, state
.ack_required
);
318 if (state
.ack_recv_valid
&& state
.ack_recv
> ic
->i_ack_recv
) {
319 rds_send_drop_acked(conn
, state
.ack_recv
, NULL
);
320 ic
->i_ack_recv
= state
.ack_recv
;
323 if (rds_conn_up(conn
))
324 rds_ib_attempt_ack(ic
);
327 static void rds_ib_qp_event_handler(struct ib_event
*event
, void *data
)
329 struct rds_connection
*conn
= data
;
330 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
332 rdsdebug("conn %p ic %p event %u (%s)\n", conn
, ic
, event
->event
,
333 ib_event_msg(event
->event
));
335 switch (event
->event
) {
336 case IB_EVENT_COMM_EST
:
337 rdma_notify(ic
->i_cm_id
, IB_EVENT_COMM_EST
);
340 rdsdebug("Fatal QP Event %u (%s) "
341 "- connection %pI4->%pI4, reconnecting\n",
342 event
->event
, ib_event_msg(event
->event
),
343 &conn
->c_laddr
, &conn
->c_faddr
);
349 static void rds_ib_cq_comp_handler_send(struct ib_cq
*cq
, void *context
)
351 struct rds_connection
*conn
= context
;
352 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
354 rdsdebug("conn %p cq %p\n", conn
, cq
);
356 rds_ib_stats_inc(s_ib_evt_handler_call
);
358 tasklet_schedule(&ic
->i_send_tasklet
);
362 * This needs to be very careful to not leave IS_ERR pointers around for
363 * cleanup to trip over.
365 static int rds_ib_setup_qp(struct rds_connection
*conn
)
367 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
368 struct ib_device
*dev
= ic
->i_cm_id
->device
;
369 struct ib_qp_init_attr attr
;
370 struct ib_cq_init_attr cq_attr
= {};
371 struct rds_ib_device
*rds_ibdev
;
372 int ret
, fr_queue_space
;
375 * It's normal to see a null device if an incoming connection races
376 * with device removal, so we don't print a warning.
378 rds_ibdev
= rds_ib_get_client_data(dev
);
382 /* The fr_queue_space is currently set to 512, to add extra space on
383 * completion queue and send queue. This extra space is used for FRMR
384 * registration and invalidation work requests
386 fr_queue_space
= (rds_ibdev
->use_fastreg
? RDS_IB_DEFAULT_FR_WR
: 0);
388 /* add the conn now so that connection establishment has the dev */
389 rds_ib_add_conn(rds_ibdev
, conn
);
391 if (rds_ibdev
->max_wrs
< ic
->i_send_ring
.w_nr
+ 1)
392 rds_ib_ring_resize(&ic
->i_send_ring
, rds_ibdev
->max_wrs
- 1);
393 if (rds_ibdev
->max_wrs
< ic
->i_recv_ring
.w_nr
+ 1)
394 rds_ib_ring_resize(&ic
->i_recv_ring
, rds_ibdev
->max_wrs
- 1);
396 /* Protection domain and memory range */
397 ic
->i_pd
= rds_ibdev
->pd
;
399 cq_attr
.cqe
= ic
->i_send_ring
.w_nr
+ fr_queue_space
+ 1;
401 ic
->i_send_cq
= ib_create_cq(dev
, rds_ib_cq_comp_handler_send
,
402 rds_ib_cq_event_handler
, conn
,
404 if (IS_ERR(ic
->i_send_cq
)) {
405 ret
= PTR_ERR(ic
->i_send_cq
);
406 ic
->i_send_cq
= NULL
;
407 rdsdebug("ib_create_cq send failed: %d\n", ret
);
411 cq_attr
.cqe
= ic
->i_recv_ring
.w_nr
;
412 ic
->i_recv_cq
= ib_create_cq(dev
, rds_ib_cq_comp_handler_recv
,
413 rds_ib_cq_event_handler
, conn
,
415 if (IS_ERR(ic
->i_recv_cq
)) {
416 ret
= PTR_ERR(ic
->i_recv_cq
);
417 ic
->i_recv_cq
= NULL
;
418 rdsdebug("ib_create_cq recv failed: %d\n", ret
);
422 ret
= ib_req_notify_cq(ic
->i_send_cq
, IB_CQ_NEXT_COMP
);
424 rdsdebug("ib_req_notify_cq send failed: %d\n", ret
);
428 ret
= ib_req_notify_cq(ic
->i_recv_cq
, IB_CQ_SOLICITED
);
430 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret
);
434 /* XXX negotiate max send/recv with remote? */
435 memset(&attr
, 0, sizeof(attr
));
436 attr
.event_handler
= rds_ib_qp_event_handler
;
437 attr
.qp_context
= conn
;
438 /* + 1 to allow for the single ack message */
439 attr
.cap
.max_send_wr
= ic
->i_send_ring
.w_nr
+ fr_queue_space
+ 1;
440 attr
.cap
.max_recv_wr
= ic
->i_recv_ring
.w_nr
+ 1;
441 attr
.cap
.max_send_sge
= rds_ibdev
->max_sge
;
442 attr
.cap
.max_recv_sge
= RDS_IB_RECV_SGE
;
443 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
444 attr
.qp_type
= IB_QPT_RC
;
445 attr
.send_cq
= ic
->i_send_cq
;
446 attr
.recv_cq
= ic
->i_recv_cq
;
447 atomic_set(&ic
->i_fastreg_wrs
, RDS_IB_DEFAULT_FR_WR
);
450 * XXX this can fail if max_*_wr is too large? Are we supposed
451 * to back off until we get a value that the hardware can support?
453 ret
= rdma_create_qp(ic
->i_cm_id
, ic
->i_pd
, &attr
);
455 rdsdebug("rdma_create_qp failed: %d\n", ret
);
459 ic
->i_send_hdrs
= ib_dma_alloc_coherent(dev
,
460 ic
->i_send_ring
.w_nr
*
461 sizeof(struct rds_header
),
462 &ic
->i_send_hdrs_dma
, GFP_KERNEL
);
463 if (!ic
->i_send_hdrs
) {
465 rdsdebug("ib_dma_alloc_coherent send failed\n");
469 ic
->i_recv_hdrs
= ib_dma_alloc_coherent(dev
,
470 ic
->i_recv_ring
.w_nr
*
471 sizeof(struct rds_header
),
472 &ic
->i_recv_hdrs_dma
, GFP_KERNEL
);
473 if (!ic
->i_recv_hdrs
) {
475 rdsdebug("ib_dma_alloc_coherent recv failed\n");
476 goto send_hdrs_dma_out
;
479 ic
->i_ack
= ib_dma_alloc_coherent(dev
, sizeof(struct rds_header
),
480 &ic
->i_ack_dma
, GFP_KERNEL
);
483 rdsdebug("ib_dma_alloc_coherent ack failed\n");
484 goto recv_hdrs_dma_out
;
487 ic
->i_sends
= vzalloc_node(ic
->i_send_ring
.w_nr
* sizeof(struct rds_ib_send_work
),
491 rdsdebug("send allocation failed\n");
495 ic
->i_recvs
= vzalloc_node(ic
->i_recv_ring
.w_nr
* sizeof(struct rds_ib_recv_work
),
499 rdsdebug("recv allocation failed\n");
503 rds_ib_recv_init_ack(ic
);
505 rdsdebug("conn %p pd %p cq %p %p\n", conn
, ic
->i_pd
,
506 ic
->i_send_cq
, ic
->i_recv_cq
);
513 ib_dma_free_coherent(dev
, sizeof(struct rds_header
),
514 ic
->i_ack
, ic
->i_ack_dma
);
516 ib_dma_free_coherent(dev
, ic
->i_recv_ring
.w_nr
*
517 sizeof(struct rds_header
),
518 ic
->i_recv_hdrs
, ic
->i_recv_hdrs_dma
);
520 ib_dma_free_coherent(dev
, ic
->i_send_ring
.w_nr
*
521 sizeof(struct rds_header
),
522 ic
->i_send_hdrs
, ic
->i_send_hdrs_dma
);
524 rdma_destroy_qp(ic
->i_cm_id
);
526 if (!ib_destroy_cq(ic
->i_recv_cq
))
527 ic
->i_recv_cq
= NULL
;
529 if (!ib_destroy_cq(ic
->i_send_cq
))
530 ic
->i_send_cq
= NULL
;
532 rds_ib_remove_conn(rds_ibdev
, conn
);
534 rds_ib_dev_put(rds_ibdev
);
539 static u32
rds_ib_protocol_compatible(struct rdma_cm_event
*event
)
541 const struct rds_ib_connect_private
*dp
= event
->param
.conn
.private_data
;
546 * rdma_cm private data is odd - when there is any private data in the
547 * request, we will be given a pretty large buffer without telling us the
548 * original size. The only way to tell the difference is by looking at
549 * the contents, which are initialized to zero.
550 * If the protocol version fields aren't set, this is a connection attempt
551 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
552 * We really should have changed this for OFED 1.3 :-(
555 /* Be paranoid. RDS always has privdata */
556 if (!event
->param
.conn
.private_data_len
) {
557 printk(KERN_NOTICE
"RDS incoming connection has no private data, "
562 /* Even if len is crap *now* I still want to check it. -ASG */
563 if (event
->param
.conn
.private_data_len
< sizeof (*dp
) ||
564 dp
->dp_protocol_major
== 0)
565 return RDS_PROTOCOL_3_0
;
567 common
= be16_to_cpu(dp
->dp_protocol_minor_mask
) & RDS_IB_SUPPORTED_PROTOCOLS
;
568 if (dp
->dp_protocol_major
== 3 && common
) {
569 version
= RDS_PROTOCOL_3_0
;
570 while ((common
>>= 1) != 0)
573 printk_ratelimited(KERN_NOTICE
"RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
575 dp
->dp_protocol_major
,
576 dp
->dp_protocol_minor
);
580 int rds_ib_cm_handle_connect(struct rdma_cm_id
*cm_id
,
581 struct rdma_cm_event
*event
)
583 __be64 lguid
= cm_id
->route
.path_rec
->sgid
.global
.interface_id
;
584 __be64 fguid
= cm_id
->route
.path_rec
->dgid
.global
.interface_id
;
585 const struct rds_ib_connect_private
*dp
= event
->param
.conn
.private_data
;
586 struct rds_ib_connect_private dp_rep
;
587 struct rds_connection
*conn
= NULL
;
588 struct rds_ib_connection
*ic
= NULL
;
589 struct rdma_conn_param conn_param
;
591 int err
= 1, destroy
= 1;
593 /* Check whether the remote protocol version matches ours. */
594 version
= rds_ib_protocol_compatible(event
);
598 rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
599 "0x%llx\n", &dp
->dp_saddr
, &dp
->dp_daddr
,
600 RDS_PROTOCOL_MAJOR(version
), RDS_PROTOCOL_MINOR(version
),
601 (unsigned long long)be64_to_cpu(lguid
),
602 (unsigned long long)be64_to_cpu(fguid
));
604 /* RDS/IB is not currently netns aware, thus init_net */
605 conn
= rds_conn_create(&init_net
, dp
->dp_daddr
, dp
->dp_saddr
,
606 &rds_ib_transport
, GFP_KERNEL
);
608 rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn
));
614 * The connection request may occur while the
615 * previous connection exist, e.g. in case of failover.
616 * But as connections may be initiated simultaneously
617 * by both hosts, we have a random backoff mechanism -
618 * see the comment above rds_queue_reconnect()
620 mutex_lock(&conn
->c_cm_lock
);
621 if (!rds_conn_transition(conn
, RDS_CONN_DOWN
, RDS_CONN_CONNECTING
)) {
622 if (rds_conn_state(conn
) == RDS_CONN_UP
) {
623 rdsdebug("incoming connect while connecting\n");
625 rds_ib_stats_inc(s_ib_listen_closed_stale
);
627 if (rds_conn_state(conn
) == RDS_CONN_CONNECTING
) {
628 /* Wait and see - our connect may still be succeeding */
629 rds_ib_stats_inc(s_ib_connect_raced
);
634 ic
= conn
->c_transport_data
;
636 rds_ib_set_protocol(conn
, version
);
637 rds_ib_set_flow_control(conn
, be32_to_cpu(dp
->dp_credit
));
639 /* If the peer gave us the last packet it saw, process this as if
640 * we had received a regular ACK. */
642 rds_send_drop_acked(conn
, be64_to_cpu(dp
->dp_ack_seq
), NULL
);
644 BUG_ON(cm_id
->context
);
648 cm_id
->context
= conn
;
650 /* We got halfway through setting up the ib_connection, if we
651 * fail now, we have to take the long route out of this mess. */
654 err
= rds_ib_setup_qp(conn
);
656 rds_ib_conn_error(conn
, "rds_ib_setup_qp failed (%d)\n", err
);
660 rds_ib_cm_fill_conn_param(conn
, &conn_param
, &dp_rep
, version
,
661 event
->param
.conn
.responder_resources
,
662 event
->param
.conn
.initiator_depth
);
664 /* rdma_accept() calls rdma_reject() internally if it fails */
665 err
= rdma_accept(cm_id
, &conn_param
);
667 rds_ib_conn_error(conn
, "rdma_accept failed (%d)\n", err
);
671 mutex_unlock(&conn
->c_cm_lock
);
673 rdma_reject(cm_id
, NULL
, 0);
678 int rds_ib_cm_initiate_connect(struct rdma_cm_id
*cm_id
)
680 struct rds_connection
*conn
= cm_id
->context
;
681 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
682 struct rdma_conn_param conn_param
;
683 struct rds_ib_connect_private dp
;
686 /* If the peer doesn't do protocol negotiation, we must
687 * default to RDSv3.0 */
688 rds_ib_set_protocol(conn
, RDS_PROTOCOL_3_0
);
689 ic
->i_flowctl
= rds_ib_sysctl_flow_control
; /* advertise flow control */
691 ret
= rds_ib_setup_qp(conn
);
693 rds_ib_conn_error(conn
, "rds_ib_setup_qp failed (%d)\n", ret
);
697 rds_ib_cm_fill_conn_param(conn
, &conn_param
, &dp
, RDS_PROTOCOL_VERSION
,
699 ret
= rdma_connect(cm_id
, &conn_param
);
701 rds_ib_conn_error(conn
, "rdma_connect failed (%d)\n", ret
);
704 /* Beware - returning non-zero tells the rdma_cm to destroy
705 * the cm_id. We should certainly not do it as long as we still
706 * "own" the cm_id. */
708 if (ic
->i_cm_id
== cm_id
)
714 int rds_ib_conn_path_connect(struct rds_conn_path
*cp
)
716 struct rds_connection
*conn
= cp
->cp_conn
;
717 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
718 struct sockaddr_in src
, dest
;
721 /* XXX I wonder what affect the port space has */
722 /* delegate cm event handler to rdma_transport */
723 ic
->i_cm_id
= rdma_create_id(&init_net
, rds_rdma_cm_event_handler
, conn
,
724 RDMA_PS_TCP
, IB_QPT_RC
);
725 if (IS_ERR(ic
->i_cm_id
)) {
726 ret
= PTR_ERR(ic
->i_cm_id
);
728 rdsdebug("rdma_create_id() failed: %d\n", ret
);
732 rdsdebug("created cm id %p for conn %p\n", ic
->i_cm_id
, conn
);
734 src
.sin_family
= AF_INET
;
735 src
.sin_addr
.s_addr
= (__force u32
)conn
->c_laddr
;
736 src
.sin_port
= (__force u16
)htons(0);
738 dest
.sin_family
= AF_INET
;
739 dest
.sin_addr
.s_addr
= (__force u32
)conn
->c_faddr
;
740 dest
.sin_port
= (__force u16
)htons(RDS_PORT
);
742 ret
= rdma_resolve_addr(ic
->i_cm_id
, (struct sockaddr
*)&src
,
743 (struct sockaddr
*)&dest
,
744 RDS_RDMA_RESOLVE_TIMEOUT_MS
);
746 rdsdebug("addr resolve failed for cm id %p: %d\n", ic
->i_cm_id
,
748 rdma_destroy_id(ic
->i_cm_id
);
757 * This is so careful about only cleaning up resources that were built up
758 * so that it can be called at any point during startup. In fact it
759 * can be called multiple times for a given connection.
761 void rds_ib_conn_path_shutdown(struct rds_conn_path
*cp
)
763 struct rds_connection
*conn
= cp
->cp_conn
;
764 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
767 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic
->i_cm_id
,
768 ic
->i_pd
, ic
->i_send_cq
, ic
->i_recv_cq
,
769 ic
->i_cm_id
? ic
->i_cm_id
->qp
: NULL
);
772 struct ib_device
*dev
= ic
->i_cm_id
->device
;
774 rdsdebug("disconnecting cm %p\n", ic
->i_cm_id
);
775 err
= rdma_disconnect(ic
->i_cm_id
);
777 /* Actually this may happen quite frequently, when
778 * an outgoing connect raced with an incoming connect.
780 rdsdebug("failed to disconnect, cm: %p err %d\n",
785 * We want to wait for tx and rx completion to finish
786 * before we tear down the connection, but we have to be
787 * careful not to get stuck waiting on a send ring that
788 * only has unsignaled sends in it. We've shutdown new
789 * sends before getting here so by waiting for signaled
790 * sends to complete we're ensured that there will be no
791 * more tx processing.
793 wait_event(rds_ib_ring_empty_wait
,
794 rds_ib_ring_empty(&ic
->i_recv_ring
) &&
795 (atomic_read(&ic
->i_signaled_sends
) == 0) &&
796 (atomic_read(&ic
->i_fastreg_wrs
) == RDS_IB_DEFAULT_FR_WR
));
797 tasklet_kill(&ic
->i_send_tasklet
);
798 tasklet_kill(&ic
->i_recv_tasklet
);
800 /* first destroy the ib state that generates callbacks */
802 rdma_destroy_qp(ic
->i_cm_id
);
804 ib_destroy_cq(ic
->i_send_cq
);
806 ib_destroy_cq(ic
->i_recv_cq
);
808 /* then free the resources that ib callbacks use */
810 ib_dma_free_coherent(dev
,
811 ic
->i_send_ring
.w_nr
*
812 sizeof(struct rds_header
),
814 ic
->i_send_hdrs_dma
);
817 ib_dma_free_coherent(dev
,
818 ic
->i_recv_ring
.w_nr
*
819 sizeof(struct rds_header
),
821 ic
->i_recv_hdrs_dma
);
824 ib_dma_free_coherent(dev
, sizeof(struct rds_header
),
825 ic
->i_ack
, ic
->i_ack_dma
);
828 rds_ib_send_clear_ring(ic
);
830 rds_ib_recv_clear_ring(ic
);
832 rdma_destroy_id(ic
->i_cm_id
);
835 * Move connection back to the nodev list.
838 rds_ib_remove_conn(ic
->rds_ibdev
, conn
);
842 ic
->i_send_cq
= NULL
;
843 ic
->i_recv_cq
= NULL
;
844 ic
->i_send_hdrs
= NULL
;
845 ic
->i_recv_hdrs
= NULL
;
848 BUG_ON(ic
->rds_ibdev
);
850 /* Clear pending transmit */
852 struct rds_message
*rm
;
854 rm
= container_of(ic
->i_data_op
, struct rds_message
, data
);
856 ic
->i_data_op
= NULL
;
859 /* Clear the ACK state */
860 clear_bit(IB_ACK_IN_FLIGHT
, &ic
->i_ack_flags
);
861 #ifdef KERNEL_HAS_ATOMIC64
862 atomic64_set(&ic
->i_ack_next
, 0);
868 /* Clear flow control state */
870 atomic_set(&ic
->i_credits
, 0);
872 rds_ib_ring_init(&ic
->i_send_ring
, rds_ib_sysctl_max_send_wr
);
873 rds_ib_ring_init(&ic
->i_recv_ring
, rds_ib_sysctl_max_recv_wr
);
876 rds_inc_put(&ic
->i_ibinc
->ii_inc
);
886 int rds_ib_conn_alloc(struct rds_connection
*conn
, gfp_t gfp
)
888 struct rds_ib_connection
*ic
;
893 ic
= kzalloc(sizeof(struct rds_ib_connection
), gfp
);
897 ret
= rds_ib_recv_alloc_caches(ic
);
903 INIT_LIST_HEAD(&ic
->ib_node
);
904 tasklet_init(&ic
->i_send_tasklet
, rds_ib_tasklet_fn_send
,
906 tasklet_init(&ic
->i_recv_tasklet
, rds_ib_tasklet_fn_recv
,
908 mutex_init(&ic
->i_recv_mutex
);
909 #ifndef KERNEL_HAS_ATOMIC64
910 spin_lock_init(&ic
->i_ack_lock
);
912 atomic_set(&ic
->i_signaled_sends
, 0);
915 * rds_ib_conn_shutdown() waits for these to be emptied so they
916 * must be initialized before it can be called.
918 rds_ib_ring_init(&ic
->i_send_ring
, rds_ib_sysctl_max_send_wr
);
919 rds_ib_ring_init(&ic
->i_recv_ring
, rds_ib_sysctl_max_recv_wr
);
922 conn
->c_transport_data
= ic
;
924 spin_lock_irqsave(&ib_nodev_conns_lock
, flags
);
925 list_add_tail(&ic
->ib_node
, &ib_nodev_conns
);
926 spin_unlock_irqrestore(&ib_nodev_conns_lock
, flags
);
929 rdsdebug("conn %p conn ic %p\n", conn
, conn
->c_transport_data
);
934 * Free a connection. Connection must be shut down and not set for reconnect.
936 void rds_ib_conn_free(void *arg
)
938 struct rds_ib_connection
*ic
= arg
;
939 spinlock_t
*lock_ptr
;
941 rdsdebug("ic %p\n", ic
);
944 * Conn is either on a dev's list or on the nodev list.
945 * A race with shutdown() or connect() would cause problems
946 * (since rds_ibdev would change) but that should never happen.
948 lock_ptr
= ic
->rds_ibdev
? &ic
->rds_ibdev
->spinlock
: &ib_nodev_conns_lock
;
950 spin_lock_irq(lock_ptr
);
951 list_del(&ic
->ib_node
);
952 spin_unlock_irq(lock_ptr
);
954 rds_ib_recv_free_caches(ic
);
961 * An error occurred on the connection
964 __rds_ib_conn_error(struct rds_connection
*conn
, const char *fmt
, ...)