of: MSI: Simplify irqdomain lookup
[linux/fpc-iii.git] / net / rds / iw_cm.c
blobaea4c911bc765c1288adc233640e654f200f6f7f
1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
37 #include <linux/ratelimit.h>
39 #include "rds.h"
40 #include "iw.h"
43 * Set the selected protocol version
45 static void rds_iw_set_protocol(struct rds_connection *conn, unsigned int version)
47 conn->c_version = version;
51 * Set up flow control
53 static void rds_iw_set_flow_control(struct rds_connection *conn, u32 credits)
55 struct rds_iw_connection *ic = conn->c_transport_data;
57 if (rds_iw_sysctl_flow_control && credits != 0) {
58 /* We're doing flow control */
59 ic->i_flowctl = 1;
60 rds_iw_send_add_credits(conn, credits);
61 } else {
62 ic->i_flowctl = 0;
67 * Connection established.
68 * We get here for both outgoing and incoming connection.
70 void rds_iw_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
72 const struct rds_iw_connect_private *dp = NULL;
73 struct rds_iw_connection *ic = conn->c_transport_data;
74 struct rds_iw_device *rds_iwdev;
75 int err;
77 if (event->param.conn.private_data_len) {
78 dp = event->param.conn.private_data;
80 rds_iw_set_protocol(conn,
81 RDS_PROTOCOL(dp->dp_protocol_major,
82 dp->dp_protocol_minor));
83 rds_iw_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
86 /* update ib_device with this local ipaddr & conn */
87 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
88 err = rds_iw_update_cm_id(rds_iwdev, ic->i_cm_id);
89 if (err)
90 printk(KERN_ERR "rds_iw_update_ipaddr failed (%d)\n", err);
91 rds_iw_add_conn(rds_iwdev, conn);
93 /* If the peer gave us the last packet it saw, process this as if
94 * we had received a regular ACK. */
95 if (dp && dp->dp_ack_seq)
96 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
98 printk(KERN_NOTICE "RDS/IW: connected to %pI4<->%pI4 version %u.%u%s\n",
99 &conn->c_laddr, &conn->c_faddr,
100 RDS_PROTOCOL_MAJOR(conn->c_version),
101 RDS_PROTOCOL_MINOR(conn->c_version),
102 ic->i_flowctl ? ", flow control" : "");
104 rds_connect_complete(conn);
107 static void rds_iw_cm_fill_conn_param(struct rds_connection *conn,
108 struct rdma_conn_param *conn_param,
109 struct rds_iw_connect_private *dp,
110 u32 protocol_version)
112 struct rds_iw_connection *ic = conn->c_transport_data;
114 memset(conn_param, 0, sizeof(struct rdma_conn_param));
115 /* XXX tune these? */
116 conn_param->responder_resources = 1;
117 conn_param->initiator_depth = 1;
119 if (dp) {
120 memset(dp, 0, sizeof(*dp));
121 dp->dp_saddr = conn->c_laddr;
122 dp->dp_daddr = conn->c_faddr;
123 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
124 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
125 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IW_SUPPORTED_PROTOCOLS);
126 dp->dp_ack_seq = rds_iw_piggyb_ack(ic);
128 /* Advertise flow control */
129 if (ic->i_flowctl) {
130 unsigned int credits;
132 credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
133 dp->dp_credit = cpu_to_be32(credits);
134 atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
137 conn_param->private_data = dp;
138 conn_param->private_data_len = sizeof(*dp);
142 static void rds_iw_cq_event_handler(struct ib_event *event, void *data)
144 rdsdebug("event %u data %p\n", event->event, data);
147 static void rds_iw_qp_event_handler(struct ib_event *event, void *data)
149 struct rds_connection *conn = data;
150 struct rds_iw_connection *ic = conn->c_transport_data;
152 rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event);
154 switch (event->event) {
155 case IB_EVENT_COMM_EST:
156 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
157 break;
158 case IB_EVENT_QP_REQ_ERR:
159 case IB_EVENT_QP_FATAL:
160 default:
161 rdsdebug("Fatal QP Event %u "
162 "- connection %pI4->%pI4, reconnecting\n",
163 event->event, &conn->c_laddr,
164 &conn->c_faddr);
165 rds_conn_drop(conn);
166 break;
171 * Create a QP
173 static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
174 struct rds_iw_device *rds_iwdev,
175 struct rds_iw_work_ring *send_ring,
176 void (*send_cq_handler)(struct ib_cq *, void *),
177 struct rds_iw_work_ring *recv_ring,
178 void (*recv_cq_handler)(struct ib_cq *, void *),
179 void *context)
181 struct ib_device *dev = rds_iwdev->dev;
182 struct ib_cq_init_attr cq_attr = {};
183 unsigned int send_size, recv_size;
184 int ret;
186 /* The offset of 1 is to accommodate the additional ACK WR. */
187 send_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_send_wr + 1);
188 recv_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_recv_wr + 1);
189 rds_iw_ring_resize(send_ring, send_size - 1);
190 rds_iw_ring_resize(recv_ring, recv_size - 1);
192 memset(attr, 0, sizeof(*attr));
193 attr->event_handler = rds_iw_qp_event_handler;
194 attr->qp_context = context;
195 attr->cap.max_send_wr = send_size;
196 attr->cap.max_recv_wr = recv_size;
197 attr->cap.max_send_sge = rds_iwdev->max_sge;
198 attr->cap.max_recv_sge = RDS_IW_RECV_SGE;
199 attr->sq_sig_type = IB_SIGNAL_REQ_WR;
200 attr->qp_type = IB_QPT_RC;
202 cq_attr.cqe = send_size;
203 attr->send_cq = ib_create_cq(dev, send_cq_handler,
204 rds_iw_cq_event_handler,
205 context, &cq_attr);
206 if (IS_ERR(attr->send_cq)) {
207 ret = PTR_ERR(attr->send_cq);
208 attr->send_cq = NULL;
209 rdsdebug("ib_create_cq send failed: %d\n", ret);
210 goto out;
213 cq_attr.cqe = recv_size;
214 attr->recv_cq = ib_create_cq(dev, recv_cq_handler,
215 rds_iw_cq_event_handler,
216 context, &cq_attr);
217 if (IS_ERR(attr->recv_cq)) {
218 ret = PTR_ERR(attr->recv_cq);
219 attr->recv_cq = NULL;
220 rdsdebug("ib_create_cq send failed: %d\n", ret);
221 goto out;
224 ret = ib_req_notify_cq(attr->send_cq, IB_CQ_NEXT_COMP);
225 if (ret) {
226 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
227 goto out;
230 ret = ib_req_notify_cq(attr->recv_cq, IB_CQ_SOLICITED);
231 if (ret) {
232 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
233 goto out;
236 out:
237 if (ret) {
238 if (attr->send_cq)
239 ib_destroy_cq(attr->send_cq);
240 if (attr->recv_cq)
241 ib_destroy_cq(attr->recv_cq);
243 return ret;
247 * This needs to be very careful to not leave IS_ERR pointers around for
248 * cleanup to trip over.
250 static int rds_iw_setup_qp(struct rds_connection *conn)
252 struct rds_iw_connection *ic = conn->c_transport_data;
253 struct ib_device *dev = ic->i_cm_id->device;
254 struct ib_qp_init_attr attr;
255 struct rds_iw_device *rds_iwdev;
256 int ret;
258 /* rds_iw_add_one creates a rds_iw_device object per IB device,
259 * and allocates a protection domain, memory range and MR pool
260 * for each. If that fails for any reason, it will not register
261 * the rds_iwdev at all.
263 rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
264 if (!rds_iwdev) {
265 printk_ratelimited(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
266 dev->name);
267 return -EOPNOTSUPP;
270 /* Protection domain and memory range */
271 ic->i_pd = rds_iwdev->pd;
272 ic->i_mr = rds_iwdev->mr;
274 ret = rds_iw_init_qp_attrs(&attr, rds_iwdev,
275 &ic->i_send_ring, rds_iw_send_cq_comp_handler,
276 &ic->i_recv_ring, rds_iw_recv_cq_comp_handler,
277 conn);
278 if (ret < 0)
279 goto out;
281 ic->i_send_cq = attr.send_cq;
282 ic->i_recv_cq = attr.recv_cq;
285 * XXX this can fail if max_*_wr is too large? Are we supposed
286 * to back off until we get a value that the hardware can support?
288 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
289 if (ret) {
290 rdsdebug("rdma_create_qp failed: %d\n", ret);
291 goto out;
294 ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
295 ic->i_send_ring.w_nr *
296 sizeof(struct rds_header),
297 &ic->i_send_hdrs_dma, GFP_KERNEL);
298 if (!ic->i_send_hdrs) {
299 ret = -ENOMEM;
300 rdsdebug("ib_dma_alloc_coherent send failed\n");
301 goto out;
304 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
305 ic->i_recv_ring.w_nr *
306 sizeof(struct rds_header),
307 &ic->i_recv_hdrs_dma, GFP_KERNEL);
308 if (!ic->i_recv_hdrs) {
309 ret = -ENOMEM;
310 rdsdebug("ib_dma_alloc_coherent recv failed\n");
311 goto out;
314 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
315 &ic->i_ack_dma, GFP_KERNEL);
316 if (!ic->i_ack) {
317 ret = -ENOMEM;
318 rdsdebug("ib_dma_alloc_coherent ack failed\n");
319 goto out;
322 ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work));
323 if (!ic->i_sends) {
324 ret = -ENOMEM;
325 rdsdebug("send allocation failed\n");
326 goto out;
328 rds_iw_send_init_ring(ic);
330 ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work));
331 if (!ic->i_recvs) {
332 ret = -ENOMEM;
333 rdsdebug("recv allocation failed\n");
334 goto out;
337 rds_iw_recv_init_ring(ic);
338 rds_iw_recv_init_ack(ic);
340 /* Post receive buffers - as a side effect, this will update
341 * the posted credit count. */
342 rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);
344 rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
345 ic->i_send_cq, ic->i_recv_cq);
347 out:
348 return ret;
351 static u32 rds_iw_protocol_compatible(const struct rds_iw_connect_private *dp)
353 u16 common;
354 u32 version = 0;
356 /* rdma_cm private data is odd - when there is any private data in the
357 * request, we will be given a pretty large buffer without telling us the
358 * original size. The only way to tell the difference is by looking at
359 * the contents, which are initialized to zero.
360 * If the protocol version fields aren't set, this is a connection attempt
361 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
362 * We really should have changed this for OFED 1.3 :-( */
363 if (dp->dp_protocol_major == 0)
364 return RDS_PROTOCOL_3_0;
366 common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IW_SUPPORTED_PROTOCOLS;
367 if (dp->dp_protocol_major == 3 && common) {
368 version = RDS_PROTOCOL_3_0;
369 while ((common >>= 1) != 0)
370 version++;
372 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using "
373 "incompatible protocol version %u.%u\n",
374 &dp->dp_saddr,
375 dp->dp_protocol_major,
376 dp->dp_protocol_minor);
377 return version;
380 int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
381 struct rdma_cm_event *event)
383 const struct rds_iw_connect_private *dp = event->param.conn.private_data;
384 struct rds_iw_connect_private dp_rep;
385 struct rds_connection *conn = NULL;
386 struct rds_iw_connection *ic = NULL;
387 struct rdma_conn_param conn_param;
388 struct rds_iw_device *rds_iwdev;
389 u32 version;
390 int err, destroy = 1;
392 /* Check whether the remote protocol version matches ours. */
393 version = rds_iw_protocol_compatible(dp);
394 if (!version)
395 goto out;
397 rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u\n",
398 &dp->dp_saddr, &dp->dp_daddr,
399 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version));
401 /* RDS/IW is not currently netns aware, thus init_net */
402 conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
403 &rds_iw_transport, GFP_KERNEL);
404 if (IS_ERR(conn)) {
405 rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
406 conn = NULL;
407 goto out;
411 * The connection request may occur while the
412 * previous connection exist, e.g. in case of failover.
413 * But as connections may be initiated simultaneously
414 * by both hosts, we have a random backoff mechanism -
415 * see the comment above rds_queue_reconnect()
417 mutex_lock(&conn->c_cm_lock);
418 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
419 if (rds_conn_state(conn) == RDS_CONN_UP) {
420 rdsdebug("incoming connect while connecting\n");
421 rds_conn_drop(conn);
422 rds_iw_stats_inc(s_iw_listen_closed_stale);
423 } else
424 if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
425 /* Wait and see - our connect may still be succeeding */
426 rds_iw_stats_inc(s_iw_connect_raced);
428 mutex_unlock(&conn->c_cm_lock);
429 goto out;
432 ic = conn->c_transport_data;
434 rds_iw_set_protocol(conn, version);
435 rds_iw_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
437 /* If the peer gave us the last packet it saw, process this as if
438 * we had received a regular ACK. */
439 if (dp->dp_ack_seq)
440 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
442 BUG_ON(cm_id->context);
443 BUG_ON(ic->i_cm_id);
445 ic->i_cm_id = cm_id;
446 cm_id->context = conn;
448 rds_iwdev = ib_get_client_data(cm_id->device, &rds_iw_client);
449 ic->i_dma_local_lkey = rds_iwdev->dma_local_lkey;
451 /* We got halfway through setting up the ib_connection, if we
452 * fail now, we have to take the long route out of this mess. */
453 destroy = 0;
455 err = rds_iw_setup_qp(conn);
456 if (err) {
457 rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err);
458 mutex_unlock(&conn->c_cm_lock);
459 goto out;
462 rds_iw_cm_fill_conn_param(conn, &conn_param, &dp_rep, version);
464 /* rdma_accept() calls rdma_reject() internally if it fails */
465 err = rdma_accept(cm_id, &conn_param);
466 mutex_unlock(&conn->c_cm_lock);
467 if (err) {
468 rds_iw_conn_error(conn, "rdma_accept failed (%d)\n", err);
469 goto out;
472 return 0;
474 out:
475 rdma_reject(cm_id, NULL, 0);
476 return destroy;
480 int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id)
482 struct rds_connection *conn = cm_id->context;
483 struct rds_iw_connection *ic = conn->c_transport_data;
484 struct rdma_conn_param conn_param;
485 struct rds_iw_connect_private dp;
486 int ret;
488 /* If the peer doesn't do protocol negotiation, we must
489 * default to RDSv3.0 */
490 rds_iw_set_protocol(conn, RDS_PROTOCOL_3_0);
491 ic->i_flowctl = rds_iw_sysctl_flow_control; /* advertise flow control */
493 ret = rds_iw_setup_qp(conn);
494 if (ret) {
495 rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", ret);
496 goto out;
499 rds_iw_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION);
501 ret = rdma_connect(cm_id, &conn_param);
502 if (ret)
503 rds_iw_conn_error(conn, "rdma_connect failed (%d)\n", ret);
505 out:
506 /* Beware - returning non-zero tells the rdma_cm to destroy
507 * the cm_id. We should certainly not do it as long as we still
508 * "own" the cm_id. */
509 if (ret) {
510 struct rds_iw_connection *ic = conn->c_transport_data;
512 if (ic->i_cm_id == cm_id)
513 ret = 0;
515 return ret;
518 int rds_iw_conn_connect(struct rds_connection *conn)
520 struct rds_iw_connection *ic = conn->c_transport_data;
521 struct rds_iw_device *rds_iwdev;
522 struct sockaddr_in src, dest;
523 int ret;
525 /* XXX I wonder what affect the port space has */
526 /* delegate cm event handler to rdma_transport */
527 ic->i_cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, conn,
528 RDMA_PS_TCP, IB_QPT_RC);
529 if (IS_ERR(ic->i_cm_id)) {
530 ret = PTR_ERR(ic->i_cm_id);
531 ic->i_cm_id = NULL;
532 rdsdebug("rdma_create_id() failed: %d\n", ret);
533 goto out;
536 rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
538 src.sin_family = AF_INET;
539 src.sin_addr.s_addr = (__force u32)conn->c_laddr;
540 src.sin_port = (__force u16)htons(0);
542 /* First, bind to the local address and device. */
543 ret = rdma_bind_addr(ic->i_cm_id, (struct sockaddr *) &src);
544 if (ret) {
545 rdsdebug("rdma_bind_addr(%pI4) failed: %d\n",
546 &conn->c_laddr, ret);
547 rdma_destroy_id(ic->i_cm_id);
548 ic->i_cm_id = NULL;
549 goto out;
552 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
553 ic->i_dma_local_lkey = rds_iwdev->dma_local_lkey;
555 dest.sin_family = AF_INET;
556 dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
557 dest.sin_port = (__force u16)htons(RDS_PORT);
559 ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
560 (struct sockaddr *)&dest,
561 RDS_RDMA_RESOLVE_TIMEOUT_MS);
562 if (ret) {
563 rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
564 ret);
565 rdma_destroy_id(ic->i_cm_id);
566 ic->i_cm_id = NULL;
569 out:
570 return ret;
574 * This is so careful about only cleaning up resources that were built up
575 * so that it can be called at any point during startup. In fact it
576 * can be called multiple times for a given connection.
578 void rds_iw_conn_shutdown(struct rds_connection *conn)
580 struct rds_iw_connection *ic = conn->c_transport_data;
581 int err = 0;
582 struct ib_qp_attr qp_attr;
584 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
585 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
586 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
588 if (ic->i_cm_id) {
589 struct ib_device *dev = ic->i_cm_id->device;
591 rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
592 err = rdma_disconnect(ic->i_cm_id);
593 if (err) {
594 /* Actually this may happen quite frequently, when
595 * an outgoing connect raced with an incoming connect.
597 rdsdebug("failed to disconnect, cm: %p err %d\n",
598 ic->i_cm_id, err);
601 if (ic->i_cm_id->qp) {
602 qp_attr.qp_state = IB_QPS_ERR;
603 ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
606 wait_event(rds_iw_ring_empty_wait,
607 rds_iw_ring_empty(&ic->i_send_ring) &&
608 rds_iw_ring_empty(&ic->i_recv_ring));
610 if (ic->i_send_hdrs)
611 ib_dma_free_coherent(dev,
612 ic->i_send_ring.w_nr *
613 sizeof(struct rds_header),
614 ic->i_send_hdrs,
615 ic->i_send_hdrs_dma);
617 if (ic->i_recv_hdrs)
618 ib_dma_free_coherent(dev,
619 ic->i_recv_ring.w_nr *
620 sizeof(struct rds_header),
621 ic->i_recv_hdrs,
622 ic->i_recv_hdrs_dma);
624 if (ic->i_ack)
625 ib_dma_free_coherent(dev, sizeof(struct rds_header),
626 ic->i_ack, ic->i_ack_dma);
628 if (ic->i_sends)
629 rds_iw_send_clear_ring(ic);
630 if (ic->i_recvs)
631 rds_iw_recv_clear_ring(ic);
633 if (ic->i_cm_id->qp)
634 rdma_destroy_qp(ic->i_cm_id);
635 if (ic->i_send_cq)
636 ib_destroy_cq(ic->i_send_cq);
637 if (ic->i_recv_cq)
638 ib_destroy_cq(ic->i_recv_cq);
641 * If associated with an rds_iw_device:
642 * Move connection back to the nodev list.
643 * Remove cm_id from the device cm_id list.
645 if (ic->rds_iwdev)
646 rds_iw_remove_conn(ic->rds_iwdev, conn);
648 rdma_destroy_id(ic->i_cm_id);
650 ic->i_cm_id = NULL;
651 ic->i_pd = NULL;
652 ic->i_mr = NULL;
653 ic->i_send_cq = NULL;
654 ic->i_recv_cq = NULL;
655 ic->i_send_hdrs = NULL;
656 ic->i_recv_hdrs = NULL;
657 ic->i_ack = NULL;
659 BUG_ON(ic->rds_iwdev);
661 /* Clear pending transmit */
662 if (ic->i_rm) {
663 rds_message_put(ic->i_rm);
664 ic->i_rm = NULL;
667 /* Clear the ACK state */
668 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
669 #ifdef KERNEL_HAS_ATOMIC64
670 atomic64_set(&ic->i_ack_next, 0);
671 #else
672 ic->i_ack_next = 0;
673 #endif
674 ic->i_ack_recv = 0;
676 /* Clear flow control state */
677 ic->i_flowctl = 0;
678 atomic_set(&ic->i_credits, 0);
680 rds_iw_ring_init(&ic->i_send_ring, rds_iw_sysctl_max_send_wr);
681 rds_iw_ring_init(&ic->i_recv_ring, rds_iw_sysctl_max_recv_wr);
683 if (ic->i_iwinc) {
684 rds_inc_put(&ic->i_iwinc->ii_inc);
685 ic->i_iwinc = NULL;
688 vfree(ic->i_sends);
689 ic->i_sends = NULL;
690 vfree(ic->i_recvs);
691 ic->i_recvs = NULL;
692 rdsdebug("shutdown complete\n");
695 int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
697 struct rds_iw_connection *ic;
698 unsigned long flags;
700 /* XXX too lazy? */
701 ic = kzalloc(sizeof(struct rds_iw_connection), gfp);
702 if (!ic)
703 return -ENOMEM;
705 INIT_LIST_HEAD(&ic->iw_node);
706 tasklet_init(&ic->i_recv_tasklet, rds_iw_recv_tasklet_fn,
707 (unsigned long) ic);
708 mutex_init(&ic->i_recv_mutex);
709 #ifndef KERNEL_HAS_ATOMIC64
710 spin_lock_init(&ic->i_ack_lock);
711 #endif
714 * rds_iw_conn_shutdown() waits for these to be emptied so they
715 * must be initialized before it can be called.
717 rds_iw_ring_init(&ic->i_send_ring, rds_iw_sysctl_max_send_wr);
718 rds_iw_ring_init(&ic->i_recv_ring, rds_iw_sysctl_max_recv_wr);
720 ic->conn = conn;
721 conn->c_transport_data = ic;
723 spin_lock_irqsave(&iw_nodev_conns_lock, flags);
724 list_add_tail(&ic->iw_node, &iw_nodev_conns);
725 spin_unlock_irqrestore(&iw_nodev_conns_lock, flags);
728 rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
729 return 0;
733 * Free a connection. Connection must be shut down and not set for reconnect.
735 void rds_iw_conn_free(void *arg)
737 struct rds_iw_connection *ic = arg;
738 spinlock_t *lock_ptr;
740 rdsdebug("ic %p\n", ic);
743 * Conn is either on a dev's list or on the nodev list.
744 * A race with shutdown() or connect() would cause problems
745 * (since rds_iwdev would change) but that should never happen.
747 lock_ptr = ic->rds_iwdev ? &ic->rds_iwdev->spinlock : &iw_nodev_conns_lock;
749 spin_lock_irq(lock_ptr);
750 list_del(&ic->iw_node);
751 spin_unlock_irq(lock_ptr);
753 kfree(ic);
757 * An error occurred on the connection
759 void
760 __rds_iw_conn_error(struct rds_connection *conn, const char *fmt, ...)
762 va_list ap;
764 rds_conn_drop(conn);
766 va_start(ap, fmt);
767 vprintk(fmt, ap);
768 va_end(ap);