powerpc/powernv: Report size of OPAL memcons log
[linux/fpc-iii.git] / net / rds / ib_cm.c
blob5b2ab95afa072f4970e25bbc856e916fe28187aa
1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
37 #include <linux/ratelimit.h>
39 #include "rds_single_path.h"
40 #include "rds.h"
41 #include "ib.h"
44 * Set the selected protocol version
46 static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
48 conn->c_version = version;
52 * Set up flow control
54 static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
56 struct rds_ib_connection *ic = conn->c_transport_data;
58 if (rds_ib_sysctl_flow_control && credits != 0) {
59 /* We're doing flow control */
60 ic->i_flowctl = 1;
61 rds_ib_send_add_credits(conn, credits);
62 } else {
63 ic->i_flowctl = 0;
68 * Tune RNR behavior. Without flow control, we use a rather
69 * low timeout, but not the absolute minimum - this should
70 * be tunable.
72 * We already set the RNR retry count to 7 (which is the
73 * smallest infinite number :-) above.
74 * If flow control is off, we want to change this back to 0
75 * so that we learn quickly when our credit accounting is
76 * buggy.
78 * Caller passes in a qp_attr pointer - don't waste stack spacv
79 * by allocation this twice.
81 static void
82 rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
84 int ret;
86 attr->min_rnr_timer = IB_RNR_TIMER_000_32;
87 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
88 if (ret)
89 printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
93 * Connection established.
94 * We get here for both outgoing and incoming connection.
96 void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
98 const struct rds_ib_connect_private *dp = NULL;
99 struct rds_ib_connection *ic = conn->c_transport_data;
100 struct ib_qp_attr qp_attr;
101 int err;
103 if (event->param.conn.private_data_len >= sizeof(*dp)) {
104 dp = event->param.conn.private_data;
106 /* make sure it isn't empty data */
107 if (dp->dp_protocol_major) {
108 rds_ib_set_protocol(conn,
109 RDS_PROTOCOL(dp->dp_protocol_major,
110 dp->dp_protocol_minor));
111 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
115 if (conn->c_version < RDS_PROTOCOL(3, 1)) {
116 printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
117 " no longer supported\n",
118 &conn->c_faddr,
119 RDS_PROTOCOL_MAJOR(conn->c_version),
120 RDS_PROTOCOL_MINOR(conn->c_version));
121 rds_conn_destroy(conn);
122 return;
123 } else {
124 printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
125 &conn->c_faddr,
126 RDS_PROTOCOL_MAJOR(conn->c_version),
127 RDS_PROTOCOL_MINOR(conn->c_version),
128 ic->i_flowctl ? ", flow control" : "");
132 * Init rings and fill recv. this needs to wait until protocol negotiation
133 * is complete, since ring layout is different from 3.0 to 3.1.
135 rds_ib_send_init_ring(ic);
136 rds_ib_recv_init_ring(ic);
137 /* Post receive buffers - as a side effect, this will update
138 * the posted credit count. */
139 rds_ib_recv_refill(conn, 1, GFP_KERNEL);
141 /* Tune RNR behavior */
142 rds_ib_tune_rnr(ic, &qp_attr);
144 qp_attr.qp_state = IB_QPS_RTS;
145 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
146 if (err)
147 printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
149 /* update ib_device with this local ipaddr */
150 err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
151 if (err)
152 printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
153 err);
155 /* If the peer gave us the last packet it saw, process this as if
156 * we had received a regular ACK. */
157 if (dp) {
158 /* dp structure start is not guaranteed to be 8 bytes aligned.
159 * Since dp_ack_seq is 64-bit extended load operations can be
160 * used so go through get_unaligned to avoid unaligned errors.
162 __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq);
164 if (dp_ack_seq)
165 rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq),
166 NULL);
169 rds_connect_complete(conn);
172 static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
173 struct rdma_conn_param *conn_param,
174 struct rds_ib_connect_private *dp,
175 u32 protocol_version,
176 u32 max_responder_resources,
177 u32 max_initiator_depth)
179 struct rds_ib_connection *ic = conn->c_transport_data;
180 struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
182 memset(conn_param, 0, sizeof(struct rdma_conn_param));
184 conn_param->responder_resources =
185 min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
186 conn_param->initiator_depth =
187 min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
188 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
189 conn_param->rnr_retry_count = 7;
191 if (dp) {
192 memset(dp, 0, sizeof(*dp));
193 dp->dp_saddr = conn->c_laddr;
194 dp->dp_daddr = conn->c_faddr;
195 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
196 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
197 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
198 dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
200 /* Advertise flow control */
201 if (ic->i_flowctl) {
202 unsigned int credits;
204 credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
205 dp->dp_credit = cpu_to_be32(credits);
206 atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
209 conn_param->private_data = dp;
210 conn_param->private_data_len = sizeof(*dp);
214 static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
216 rdsdebug("event %u (%s) data %p\n",
217 event->event, ib_event_msg(event->event), data);
220 /* Plucking the oldest entry from the ring can be done concurrently with
221 * the thread refilling the ring. Each ring operation is protected by
222 * spinlocks and the transient state of refilling doesn't change the
223 * recording of which entry is oldest.
225 * This relies on IB only calling one cq comp_handler for each cq so that
226 * there will only be one caller of rds_recv_incoming() per RDS connection.
228 static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
230 struct rds_connection *conn = context;
231 struct rds_ib_connection *ic = conn->c_transport_data;
233 rdsdebug("conn %p cq %p\n", conn, cq);
235 rds_ib_stats_inc(s_ib_evt_handler_call);
237 tasklet_schedule(&ic->i_recv_tasklet);
240 static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
241 struct ib_wc *wcs)
243 int nr, i;
244 struct ib_wc *wc;
246 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
247 for (i = 0; i < nr; i++) {
248 wc = wcs + i;
249 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
250 (unsigned long long)wc->wr_id, wc->status,
251 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
253 if (wc->wr_id <= ic->i_send_ring.w_nr ||
254 wc->wr_id == RDS_IB_ACK_WR_ID)
255 rds_ib_send_cqe_handler(ic, wc);
256 else
257 rds_ib_mr_cqe_handler(ic, wc);
263 static void rds_ib_tasklet_fn_send(unsigned long data)
265 struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
266 struct rds_connection *conn = ic->conn;
268 rds_ib_stats_inc(s_ib_tasklet_call);
270 poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
271 ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
272 poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
274 if (rds_conn_up(conn) &&
275 (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
276 test_bit(0, &conn->c_map_queued)))
277 rds_send_xmit(&ic->conn->c_path[0]);
280 static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
281 struct ib_wc *wcs,
282 struct rds_ib_ack_state *ack_state)
284 int nr, i;
285 struct ib_wc *wc;
287 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
288 for (i = 0; i < nr; i++) {
289 wc = wcs + i;
290 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
291 (unsigned long long)wc->wr_id, wc->status,
292 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
294 rds_ib_recv_cqe_handler(ic, wc, ack_state);
299 static void rds_ib_tasklet_fn_recv(unsigned long data)
301 struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
302 struct rds_connection *conn = ic->conn;
303 struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
304 struct rds_ib_ack_state state;
306 if (!rds_ibdev)
307 rds_conn_drop(conn);
309 rds_ib_stats_inc(s_ib_tasklet_call);
311 memset(&state, 0, sizeof(state));
312 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
313 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
314 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
316 if (state.ack_next_valid)
317 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
318 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
319 rds_send_drop_acked(conn, state.ack_recv, NULL);
320 ic->i_ack_recv = state.ack_recv;
323 if (rds_conn_up(conn))
324 rds_ib_attempt_ack(ic);
327 static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
329 struct rds_connection *conn = data;
330 struct rds_ib_connection *ic = conn->c_transport_data;
332 rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
333 ib_event_msg(event->event));
335 switch (event->event) {
336 case IB_EVENT_COMM_EST:
337 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
338 break;
339 default:
340 rdsdebug("Fatal QP Event %u (%s) "
341 "- connection %pI4->%pI4, reconnecting\n",
342 event->event, ib_event_msg(event->event),
343 &conn->c_laddr, &conn->c_faddr);
344 rds_conn_drop(conn);
345 break;
349 static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
351 struct rds_connection *conn = context;
352 struct rds_ib_connection *ic = conn->c_transport_data;
354 rdsdebug("conn %p cq %p\n", conn, cq);
356 rds_ib_stats_inc(s_ib_evt_handler_call);
358 tasklet_schedule(&ic->i_send_tasklet);
362 * This needs to be very careful to not leave IS_ERR pointers around for
363 * cleanup to trip over.
365 static int rds_ib_setup_qp(struct rds_connection *conn)
367 struct rds_ib_connection *ic = conn->c_transport_data;
368 struct ib_device *dev = ic->i_cm_id->device;
369 struct ib_qp_init_attr attr;
370 struct ib_cq_init_attr cq_attr = {};
371 struct rds_ib_device *rds_ibdev;
372 int ret, fr_queue_space;
375 * It's normal to see a null device if an incoming connection races
376 * with device removal, so we don't print a warning.
378 rds_ibdev = rds_ib_get_client_data(dev);
379 if (!rds_ibdev)
380 return -EOPNOTSUPP;
382 /* The fr_queue_space is currently set to 512, to add extra space on
383 * completion queue and send queue. This extra space is used for FRMR
384 * registration and invalidation work requests
386 fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0);
388 /* add the conn now so that connection establishment has the dev */
389 rds_ib_add_conn(rds_ibdev, conn);
391 if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
392 rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
393 if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
394 rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
396 /* Protection domain and memory range */
397 ic->i_pd = rds_ibdev->pd;
399 cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
401 ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
402 rds_ib_cq_event_handler, conn,
403 &cq_attr);
404 if (IS_ERR(ic->i_send_cq)) {
405 ret = PTR_ERR(ic->i_send_cq);
406 ic->i_send_cq = NULL;
407 rdsdebug("ib_create_cq send failed: %d\n", ret);
408 goto out;
411 cq_attr.cqe = ic->i_recv_ring.w_nr;
412 ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
413 rds_ib_cq_event_handler, conn,
414 &cq_attr);
415 if (IS_ERR(ic->i_recv_cq)) {
416 ret = PTR_ERR(ic->i_recv_cq);
417 ic->i_recv_cq = NULL;
418 rdsdebug("ib_create_cq recv failed: %d\n", ret);
419 goto out;
422 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
423 if (ret) {
424 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
425 goto out;
428 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
429 if (ret) {
430 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
431 goto out;
434 /* XXX negotiate max send/recv with remote? */
435 memset(&attr, 0, sizeof(attr));
436 attr.event_handler = rds_ib_qp_event_handler;
437 attr.qp_context = conn;
438 /* + 1 to allow for the single ack message */
439 attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1;
440 attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
441 attr.cap.max_send_sge = rds_ibdev->max_sge;
442 attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
443 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
444 attr.qp_type = IB_QPT_RC;
445 attr.send_cq = ic->i_send_cq;
446 attr.recv_cq = ic->i_recv_cq;
447 atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
450 * XXX this can fail if max_*_wr is too large? Are we supposed
451 * to back off until we get a value that the hardware can support?
453 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
454 if (ret) {
455 rdsdebug("rdma_create_qp failed: %d\n", ret);
456 goto out;
459 ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
460 ic->i_send_ring.w_nr *
461 sizeof(struct rds_header),
462 &ic->i_send_hdrs_dma, GFP_KERNEL);
463 if (!ic->i_send_hdrs) {
464 ret = -ENOMEM;
465 rdsdebug("ib_dma_alloc_coherent send failed\n");
466 goto out;
469 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
470 ic->i_recv_ring.w_nr *
471 sizeof(struct rds_header),
472 &ic->i_recv_hdrs_dma, GFP_KERNEL);
473 if (!ic->i_recv_hdrs) {
474 ret = -ENOMEM;
475 rdsdebug("ib_dma_alloc_coherent recv failed\n");
476 goto out;
479 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
480 &ic->i_ack_dma, GFP_KERNEL);
481 if (!ic->i_ack) {
482 ret = -ENOMEM;
483 rdsdebug("ib_dma_alloc_coherent ack failed\n");
484 goto out;
487 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
488 ibdev_to_node(dev));
489 if (!ic->i_sends) {
490 ret = -ENOMEM;
491 rdsdebug("send allocation failed\n");
492 goto out;
495 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
496 ibdev_to_node(dev));
497 if (!ic->i_recvs) {
498 ret = -ENOMEM;
499 rdsdebug("recv allocation failed\n");
500 goto out;
503 rds_ib_recv_init_ack(ic);
505 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
506 ic->i_send_cq, ic->i_recv_cq);
508 out:
509 rds_ib_dev_put(rds_ibdev);
510 return ret;
513 static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
515 const struct rds_ib_connect_private *dp = event->param.conn.private_data;
516 u16 common;
517 u32 version = 0;
520 * rdma_cm private data is odd - when there is any private data in the
521 * request, we will be given a pretty large buffer without telling us the
522 * original size. The only way to tell the difference is by looking at
523 * the contents, which are initialized to zero.
524 * If the protocol version fields aren't set, this is a connection attempt
525 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
526 * We really should have changed this for OFED 1.3 :-(
529 /* Be paranoid. RDS always has privdata */
530 if (!event->param.conn.private_data_len) {
531 printk(KERN_NOTICE "RDS incoming connection has no private data, "
532 "rejecting\n");
533 return 0;
536 /* Even if len is crap *now* I still want to check it. -ASG */
537 if (event->param.conn.private_data_len < sizeof (*dp) ||
538 dp->dp_protocol_major == 0)
539 return RDS_PROTOCOL_3_0;
541 common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
542 if (dp->dp_protocol_major == 3 && common) {
543 version = RDS_PROTOCOL_3_0;
544 while ((common >>= 1) != 0)
545 version++;
546 } else
547 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
548 &dp->dp_saddr,
549 dp->dp_protocol_major,
550 dp->dp_protocol_minor);
551 return version;
554 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
555 struct rdma_cm_event *event)
557 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
558 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
559 const struct rds_ib_connect_private *dp = event->param.conn.private_data;
560 struct rds_ib_connect_private dp_rep;
561 struct rds_connection *conn = NULL;
562 struct rds_ib_connection *ic = NULL;
563 struct rdma_conn_param conn_param;
564 u32 version;
565 int err = 1, destroy = 1;
567 /* Check whether the remote protocol version matches ours. */
568 version = rds_ib_protocol_compatible(event);
569 if (!version)
570 goto out;
572 rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
573 "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr,
574 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
575 (unsigned long long)be64_to_cpu(lguid),
576 (unsigned long long)be64_to_cpu(fguid));
578 /* RDS/IB is not currently netns aware, thus init_net */
579 conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
580 &rds_ib_transport, GFP_KERNEL);
581 if (IS_ERR(conn)) {
582 rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
583 conn = NULL;
584 goto out;
588 * The connection request may occur while the
589 * previous connection exist, e.g. in case of failover.
590 * But as connections may be initiated simultaneously
591 * by both hosts, we have a random backoff mechanism -
592 * see the comment above rds_queue_reconnect()
594 mutex_lock(&conn->c_cm_lock);
595 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
596 if (rds_conn_state(conn) == RDS_CONN_UP) {
597 rdsdebug("incoming connect while connecting\n");
598 rds_conn_drop(conn);
599 rds_ib_stats_inc(s_ib_listen_closed_stale);
600 } else
601 if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
602 /* Wait and see - our connect may still be succeeding */
603 rds_ib_stats_inc(s_ib_connect_raced);
605 goto out;
608 ic = conn->c_transport_data;
610 rds_ib_set_protocol(conn, version);
611 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
613 /* If the peer gave us the last packet it saw, process this as if
614 * we had received a regular ACK. */
615 if (dp->dp_ack_seq)
616 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
618 BUG_ON(cm_id->context);
619 BUG_ON(ic->i_cm_id);
621 ic->i_cm_id = cm_id;
622 cm_id->context = conn;
624 /* We got halfway through setting up the ib_connection, if we
625 * fail now, we have to take the long route out of this mess. */
626 destroy = 0;
628 err = rds_ib_setup_qp(conn);
629 if (err) {
630 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
631 goto out;
634 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
635 event->param.conn.responder_resources,
636 event->param.conn.initiator_depth);
638 /* rdma_accept() calls rdma_reject() internally if it fails */
639 err = rdma_accept(cm_id, &conn_param);
640 if (err)
641 rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
643 out:
644 if (conn)
645 mutex_unlock(&conn->c_cm_lock);
646 if (err)
647 rdma_reject(cm_id, NULL, 0);
648 return destroy;
652 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
654 struct rds_connection *conn = cm_id->context;
655 struct rds_ib_connection *ic = conn->c_transport_data;
656 struct rdma_conn_param conn_param;
657 struct rds_ib_connect_private dp;
658 int ret;
660 /* If the peer doesn't do protocol negotiation, we must
661 * default to RDSv3.0 */
662 rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0);
663 ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
665 ret = rds_ib_setup_qp(conn);
666 if (ret) {
667 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
668 goto out;
671 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
672 UINT_MAX, UINT_MAX);
673 ret = rdma_connect(cm_id, &conn_param);
674 if (ret)
675 rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
677 out:
678 /* Beware - returning non-zero tells the rdma_cm to destroy
679 * the cm_id. We should certainly not do it as long as we still
680 * "own" the cm_id. */
681 if (ret) {
682 if (ic->i_cm_id == cm_id)
683 ret = 0;
685 return ret;
688 int rds_ib_conn_path_connect(struct rds_conn_path *cp)
690 struct rds_connection *conn = cp->cp_conn;
691 struct rds_ib_connection *ic = conn->c_transport_data;
692 struct sockaddr_in src, dest;
693 int ret;
695 /* XXX I wonder what affect the port space has */
696 /* delegate cm event handler to rdma_transport */
697 ic->i_cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, conn,
698 RDMA_PS_TCP, IB_QPT_RC);
699 if (IS_ERR(ic->i_cm_id)) {
700 ret = PTR_ERR(ic->i_cm_id);
701 ic->i_cm_id = NULL;
702 rdsdebug("rdma_create_id() failed: %d\n", ret);
703 goto out;
706 rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
708 src.sin_family = AF_INET;
709 src.sin_addr.s_addr = (__force u32)conn->c_laddr;
710 src.sin_port = (__force u16)htons(0);
712 dest.sin_family = AF_INET;
713 dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
714 dest.sin_port = (__force u16)htons(RDS_PORT);
716 ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
717 (struct sockaddr *)&dest,
718 RDS_RDMA_RESOLVE_TIMEOUT_MS);
719 if (ret) {
720 rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
721 ret);
722 rdma_destroy_id(ic->i_cm_id);
723 ic->i_cm_id = NULL;
726 out:
727 return ret;
731 * This is so careful about only cleaning up resources that were built up
732 * so that it can be called at any point during startup. In fact it
733 * can be called multiple times for a given connection.
735 void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
737 struct rds_connection *conn = cp->cp_conn;
738 struct rds_ib_connection *ic = conn->c_transport_data;
739 int err = 0;
741 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
742 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
743 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
745 if (ic->i_cm_id) {
746 struct ib_device *dev = ic->i_cm_id->device;
748 rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
749 err = rdma_disconnect(ic->i_cm_id);
750 if (err) {
751 /* Actually this may happen quite frequently, when
752 * an outgoing connect raced with an incoming connect.
754 rdsdebug("failed to disconnect, cm: %p err %d\n",
755 ic->i_cm_id, err);
759 * We want to wait for tx and rx completion to finish
760 * before we tear down the connection, but we have to be
761 * careful not to get stuck waiting on a send ring that
762 * only has unsignaled sends in it. We've shutdown new
763 * sends before getting here so by waiting for signaled
764 * sends to complete we're ensured that there will be no
765 * more tx processing.
767 wait_event(rds_ib_ring_empty_wait,
768 rds_ib_ring_empty(&ic->i_recv_ring) &&
769 (atomic_read(&ic->i_signaled_sends) == 0) &&
770 (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
771 tasklet_kill(&ic->i_send_tasklet);
772 tasklet_kill(&ic->i_recv_tasklet);
774 /* first destroy the ib state that generates callbacks */
775 if (ic->i_cm_id->qp)
776 rdma_destroy_qp(ic->i_cm_id);
777 if (ic->i_send_cq)
778 ib_destroy_cq(ic->i_send_cq);
779 if (ic->i_recv_cq)
780 ib_destroy_cq(ic->i_recv_cq);
782 /* then free the resources that ib callbacks use */
783 if (ic->i_send_hdrs)
784 ib_dma_free_coherent(dev,
785 ic->i_send_ring.w_nr *
786 sizeof(struct rds_header),
787 ic->i_send_hdrs,
788 ic->i_send_hdrs_dma);
790 if (ic->i_recv_hdrs)
791 ib_dma_free_coherent(dev,
792 ic->i_recv_ring.w_nr *
793 sizeof(struct rds_header),
794 ic->i_recv_hdrs,
795 ic->i_recv_hdrs_dma);
797 if (ic->i_ack)
798 ib_dma_free_coherent(dev, sizeof(struct rds_header),
799 ic->i_ack, ic->i_ack_dma);
801 if (ic->i_sends)
802 rds_ib_send_clear_ring(ic);
803 if (ic->i_recvs)
804 rds_ib_recv_clear_ring(ic);
806 rdma_destroy_id(ic->i_cm_id);
809 * Move connection back to the nodev list.
811 if (ic->rds_ibdev)
812 rds_ib_remove_conn(ic->rds_ibdev, conn);
814 ic->i_cm_id = NULL;
815 ic->i_pd = NULL;
816 ic->i_send_cq = NULL;
817 ic->i_recv_cq = NULL;
818 ic->i_send_hdrs = NULL;
819 ic->i_recv_hdrs = NULL;
820 ic->i_ack = NULL;
822 BUG_ON(ic->rds_ibdev);
824 /* Clear pending transmit */
825 if (ic->i_data_op) {
826 struct rds_message *rm;
828 rm = container_of(ic->i_data_op, struct rds_message, data);
829 rds_message_put(rm);
830 ic->i_data_op = NULL;
833 /* Clear the ACK state */
834 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
835 #ifdef KERNEL_HAS_ATOMIC64
836 atomic64_set(&ic->i_ack_next, 0);
837 #else
838 ic->i_ack_next = 0;
839 #endif
840 ic->i_ack_recv = 0;
842 /* Clear flow control state */
843 ic->i_flowctl = 0;
844 atomic_set(&ic->i_credits, 0);
846 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
847 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
849 if (ic->i_ibinc) {
850 rds_inc_put(&ic->i_ibinc->ii_inc);
851 ic->i_ibinc = NULL;
854 vfree(ic->i_sends);
855 ic->i_sends = NULL;
856 vfree(ic->i_recvs);
857 ic->i_recvs = NULL;
860 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
862 struct rds_ib_connection *ic;
863 unsigned long flags;
864 int ret;
866 /* XXX too lazy? */
867 ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
868 if (!ic)
869 return -ENOMEM;
871 ret = rds_ib_recv_alloc_caches(ic);
872 if (ret) {
873 kfree(ic);
874 return ret;
877 INIT_LIST_HEAD(&ic->ib_node);
878 tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send,
879 (unsigned long)ic);
880 tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv,
881 (unsigned long)ic);
882 mutex_init(&ic->i_recv_mutex);
883 #ifndef KERNEL_HAS_ATOMIC64
884 spin_lock_init(&ic->i_ack_lock);
885 #endif
886 atomic_set(&ic->i_signaled_sends, 0);
889 * rds_ib_conn_shutdown() waits for these to be emptied so they
890 * must be initialized before it can be called.
892 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
893 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
895 ic->conn = conn;
896 conn->c_transport_data = ic;
898 spin_lock_irqsave(&ib_nodev_conns_lock, flags);
899 list_add_tail(&ic->ib_node, &ib_nodev_conns);
900 spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
903 rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
904 return 0;
908 * Free a connection. Connection must be shut down and not set for reconnect.
910 void rds_ib_conn_free(void *arg)
912 struct rds_ib_connection *ic = arg;
913 spinlock_t *lock_ptr;
915 rdsdebug("ic %p\n", ic);
918 * Conn is either on a dev's list or on the nodev list.
919 * A race with shutdown() or connect() would cause problems
920 * (since rds_ibdev would change) but that should never happen.
922 lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
924 spin_lock_irq(lock_ptr);
925 list_del(&ic->ib_node);
926 spin_unlock_irq(lock_ptr);
928 rds_ib_recv_free_caches(ic);
930 kfree(ic);
935 * An error occurred on the connection
937 void
938 __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
940 va_list ap;
942 rds_conn_drop(conn);
944 va_start(ap, fmt);
945 vprintk(fmt, ap);
946 va_end(ap);