Linux 4.11-rc6
[linux/fpc-iii.git] / net / sunrpc / xprtrdma / svc_rdma_transport.c
blobfc8f14c7bfec60dc5828340861a747e49f06193e
1 /*
2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #include <linux/sunrpc/svc_xprt.h>
44 #include <linux/sunrpc/addr.h>
45 #include <linux/sunrpc/debug.h>
46 #include <linux/sunrpc/rpc_rdma.h>
47 #include <linux/interrupt.h>
48 #include <linux/sched.h>
49 #include <linux/slab.h>
50 #include <linux/spinlock.h>
51 #include <linux/workqueue.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/rdma_cm.h>
54 #include <linux/sunrpc/svc_rdma.h>
55 #include <linux/export.h>
56 #include "xprt_rdma.h"
58 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
60 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int);
61 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
62 struct net *net,
63 struct sockaddr *sa, int salen,
64 int flags);
65 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
66 static void svc_rdma_release_rqst(struct svc_rqst *);
67 static void svc_rdma_detach(struct svc_xprt *xprt);
68 static void svc_rdma_free(struct svc_xprt *xprt);
69 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
70 static int svc_rdma_secure_port(struct svc_rqst *);
71 static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
73 static struct svc_xprt_ops svc_rdma_ops = {
74 .xpo_create = svc_rdma_create,
75 .xpo_recvfrom = svc_rdma_recvfrom,
76 .xpo_sendto = svc_rdma_sendto,
77 .xpo_release_rqst = svc_rdma_release_rqst,
78 .xpo_detach = svc_rdma_detach,
79 .xpo_free = svc_rdma_free,
80 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
81 .xpo_has_wspace = svc_rdma_has_wspace,
82 .xpo_accept = svc_rdma_accept,
83 .xpo_secure_port = svc_rdma_secure_port,
84 .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
87 struct svc_xprt_class svc_rdma_class = {
88 .xcl_name = "rdma",
89 .xcl_owner = THIS_MODULE,
90 .xcl_ops = &svc_rdma_ops,
91 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
92 .xcl_ident = XPRT_TRANSPORT_RDMA,
95 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
96 static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *, struct net *,
97 struct sockaddr *, int, int);
98 static void svc_rdma_bc_detach(struct svc_xprt *);
99 static void svc_rdma_bc_free(struct svc_xprt *);
101 static struct svc_xprt_ops svc_rdma_bc_ops = {
102 .xpo_create = svc_rdma_bc_create,
103 .xpo_detach = svc_rdma_bc_detach,
104 .xpo_free = svc_rdma_bc_free,
105 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
106 .xpo_secure_port = svc_rdma_secure_port,
109 struct svc_xprt_class svc_rdma_bc_class = {
110 .xcl_name = "rdma-bc",
111 .xcl_owner = THIS_MODULE,
112 .xcl_ops = &svc_rdma_bc_ops,
113 .xcl_max_payload = (1024 - RPCRDMA_HDRLEN_MIN)
116 static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv,
117 struct net *net,
118 struct sockaddr *sa, int salen,
119 int flags)
121 struct svcxprt_rdma *cma_xprt;
122 struct svc_xprt *xprt;
124 cma_xprt = rdma_create_xprt(serv, 0);
125 if (!cma_xprt)
126 return ERR_PTR(-ENOMEM);
127 xprt = &cma_xprt->sc_xprt;
129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv);
130 set_bit(XPT_CONG_CTRL, &xprt->xpt_flags);
131 serv->sv_bc_xprt = xprt;
133 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
134 return xprt;
137 static void svc_rdma_bc_detach(struct svc_xprt *xprt)
139 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
142 static void svc_rdma_bc_free(struct svc_xprt *xprt)
144 struct svcxprt_rdma *rdma =
145 container_of(xprt, struct svcxprt_rdma, sc_xprt);
147 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
148 if (xprt)
149 kfree(rdma);
151 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
153 static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt,
154 gfp_t flags)
156 struct svc_rdma_op_ctxt *ctxt;
158 ctxt = kmalloc(sizeof(*ctxt), flags);
159 if (ctxt) {
160 ctxt->xprt = xprt;
161 INIT_LIST_HEAD(&ctxt->list);
163 return ctxt;
166 static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt)
168 unsigned int i;
170 /* Each RPC/RDMA credit can consume a number of send
171 * and receive WQEs. One ctxt is allocated for each.
173 i = xprt->sc_sq_depth + xprt->sc_rq_depth;
175 while (i--) {
176 struct svc_rdma_op_ctxt *ctxt;
178 ctxt = alloc_ctxt(xprt, GFP_KERNEL);
179 if (!ctxt) {
180 dprintk("svcrdma: No memory for RDMA ctxt\n");
181 return false;
183 list_add(&ctxt->list, &xprt->sc_ctxts);
185 return true;
188 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
190 struct svc_rdma_op_ctxt *ctxt = NULL;
192 spin_lock(&xprt->sc_ctxt_lock);
193 xprt->sc_ctxt_used++;
194 if (list_empty(&xprt->sc_ctxts))
195 goto out_empty;
197 ctxt = list_first_entry(&xprt->sc_ctxts,
198 struct svc_rdma_op_ctxt, list);
199 list_del(&ctxt->list);
200 spin_unlock(&xprt->sc_ctxt_lock);
202 out:
203 ctxt->count = 0;
204 ctxt->mapped_sges = 0;
205 ctxt->frmr = NULL;
206 return ctxt;
208 out_empty:
209 /* Either pre-allocation missed the mark, or send
210 * queue accounting is broken.
212 spin_unlock(&xprt->sc_ctxt_lock);
214 ctxt = alloc_ctxt(xprt, GFP_NOIO);
215 if (ctxt)
216 goto out;
218 spin_lock(&xprt->sc_ctxt_lock);
219 xprt->sc_ctxt_used--;
220 spin_unlock(&xprt->sc_ctxt_lock);
221 WARN_ONCE(1, "svcrdma: empty RDMA ctxt list?\n");
222 return NULL;
225 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
227 struct svcxprt_rdma *xprt = ctxt->xprt;
228 struct ib_device *device = xprt->sc_cm_id->device;
229 u32 lkey = xprt->sc_pd->local_dma_lkey;
230 unsigned int i;
232 for (i = 0; i < ctxt->mapped_sges; i++) {
234 * Unmap the DMA addr in the SGE if the lkey matches
235 * the local_dma_lkey, otherwise, ignore it since it is
236 * an FRMR lkey and will be unmapped later when the
237 * last WR that uses it completes.
239 if (ctxt->sge[i].lkey == lkey)
240 ib_dma_unmap_page(device,
241 ctxt->sge[i].addr,
242 ctxt->sge[i].length,
243 ctxt->direction);
245 ctxt->mapped_sges = 0;
248 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
250 struct svcxprt_rdma *xprt = ctxt->xprt;
251 int i;
253 if (free_pages)
254 for (i = 0; i < ctxt->count; i++)
255 put_page(ctxt->pages[i]);
257 spin_lock(&xprt->sc_ctxt_lock);
258 xprt->sc_ctxt_used--;
259 list_add(&ctxt->list, &xprt->sc_ctxts);
260 spin_unlock(&xprt->sc_ctxt_lock);
263 static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt)
265 while (!list_empty(&xprt->sc_ctxts)) {
266 struct svc_rdma_op_ctxt *ctxt;
268 ctxt = list_first_entry(&xprt->sc_ctxts,
269 struct svc_rdma_op_ctxt, list);
270 list_del(&ctxt->list);
271 kfree(ctxt);
275 static struct svc_rdma_req_map *alloc_req_map(gfp_t flags)
277 struct svc_rdma_req_map *map;
279 map = kmalloc(sizeof(*map), flags);
280 if (map)
281 INIT_LIST_HEAD(&map->free);
282 return map;
285 static bool svc_rdma_prealloc_maps(struct svcxprt_rdma *xprt)
287 unsigned int i;
289 /* One for each receive buffer on this connection. */
290 i = xprt->sc_max_requests;
292 while (i--) {
293 struct svc_rdma_req_map *map;
295 map = alloc_req_map(GFP_KERNEL);
296 if (!map) {
297 dprintk("svcrdma: No memory for request map\n");
298 return false;
300 list_add(&map->free, &xprt->sc_maps);
302 return true;
305 struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *xprt)
307 struct svc_rdma_req_map *map = NULL;
309 spin_lock(&xprt->sc_map_lock);
310 if (list_empty(&xprt->sc_maps))
311 goto out_empty;
313 map = list_first_entry(&xprt->sc_maps,
314 struct svc_rdma_req_map, free);
315 list_del_init(&map->free);
316 spin_unlock(&xprt->sc_map_lock);
318 out:
319 map->count = 0;
320 return map;
322 out_empty:
323 spin_unlock(&xprt->sc_map_lock);
325 /* Pre-allocation amount was incorrect */
326 map = alloc_req_map(GFP_NOIO);
327 if (map)
328 goto out;
330 WARN_ONCE(1, "svcrdma: empty request map list?\n");
331 return NULL;
334 void svc_rdma_put_req_map(struct svcxprt_rdma *xprt,
335 struct svc_rdma_req_map *map)
337 spin_lock(&xprt->sc_map_lock);
338 list_add(&map->free, &xprt->sc_maps);
339 spin_unlock(&xprt->sc_map_lock);
342 static void svc_rdma_destroy_maps(struct svcxprt_rdma *xprt)
344 while (!list_empty(&xprt->sc_maps)) {
345 struct svc_rdma_req_map *map;
347 map = list_first_entry(&xprt->sc_maps,
348 struct svc_rdma_req_map, free);
349 list_del(&map->free);
350 kfree(map);
354 /* QP event handler */
355 static void qp_event_handler(struct ib_event *event, void *context)
357 struct svc_xprt *xprt = context;
359 switch (event->event) {
360 /* These are considered benign events */
361 case IB_EVENT_PATH_MIG:
362 case IB_EVENT_COMM_EST:
363 case IB_EVENT_SQ_DRAINED:
364 case IB_EVENT_QP_LAST_WQE_REACHED:
365 dprintk("svcrdma: QP event %s (%d) received for QP=%p\n",
366 ib_event_msg(event->event), event->event,
367 event->element.qp);
368 break;
369 /* These are considered fatal events */
370 case IB_EVENT_PATH_MIG_ERR:
371 case IB_EVENT_QP_FATAL:
372 case IB_EVENT_QP_REQ_ERR:
373 case IB_EVENT_QP_ACCESS_ERR:
374 case IB_EVENT_DEVICE_FATAL:
375 default:
376 dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, "
377 "closing transport\n",
378 ib_event_msg(event->event), event->event,
379 event->element.qp);
380 set_bit(XPT_CLOSE, &xprt->xpt_flags);
381 break;
386 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
387 * @cq: completion queue
388 * @wc: completed WR
391 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
393 struct svcxprt_rdma *xprt = cq->cq_context;
394 struct ib_cqe *cqe = wc->wr_cqe;
395 struct svc_rdma_op_ctxt *ctxt;
397 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
398 ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
399 svc_rdma_unmap_dma(ctxt);
401 if (wc->status != IB_WC_SUCCESS)
402 goto flushed;
404 /* All wc fields are now known to be valid */
405 ctxt->byte_len = wc->byte_len;
406 spin_lock(&xprt->sc_rq_dto_lock);
407 list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q);
408 spin_unlock(&xprt->sc_rq_dto_lock);
410 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
411 if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
412 goto out;
413 svc_xprt_enqueue(&xprt->sc_xprt);
414 goto out;
416 flushed:
417 if (wc->status != IB_WC_WR_FLUSH_ERR)
418 pr_warn("svcrdma: receive: %s (%u/0x%x)\n",
419 ib_wc_status_msg(wc->status),
420 wc->status, wc->vendor_err);
421 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
422 svc_rdma_put_context(ctxt, 1);
424 out:
425 svc_xprt_put(&xprt->sc_xprt);
428 static void svc_rdma_send_wc_common(struct svcxprt_rdma *xprt,
429 struct ib_wc *wc,
430 const char *opname)
432 if (wc->status != IB_WC_SUCCESS)
433 goto err;
435 out:
436 atomic_inc(&xprt->sc_sq_avail);
437 wake_up(&xprt->sc_send_wait);
438 return;
440 err:
441 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
442 if (wc->status != IB_WC_WR_FLUSH_ERR)
443 pr_err("svcrdma: %s: %s (%u/0x%x)\n",
444 opname, ib_wc_status_msg(wc->status),
445 wc->status, wc->vendor_err);
446 goto out;
449 static void svc_rdma_send_wc_common_put(struct ib_cq *cq, struct ib_wc *wc,
450 const char *opname)
452 struct svcxprt_rdma *xprt = cq->cq_context;
454 svc_rdma_send_wc_common(xprt, wc, opname);
455 svc_xprt_put(&xprt->sc_xprt);
459 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
460 * @cq: completion queue
461 * @wc: completed WR
464 void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
466 struct ib_cqe *cqe = wc->wr_cqe;
467 struct svc_rdma_op_ctxt *ctxt;
469 svc_rdma_send_wc_common_put(cq, wc, "send");
471 ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
472 svc_rdma_unmap_dma(ctxt);
473 svc_rdma_put_context(ctxt, 1);
477 * svc_rdma_wc_write - Invoked by RDMA provider for each polled Write WC
478 * @cq: completion queue
479 * @wc: completed WR
482 void svc_rdma_wc_write(struct ib_cq *cq, struct ib_wc *wc)
484 struct ib_cqe *cqe = wc->wr_cqe;
485 struct svc_rdma_op_ctxt *ctxt;
487 svc_rdma_send_wc_common_put(cq, wc, "write");
489 ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
490 svc_rdma_unmap_dma(ctxt);
491 svc_rdma_put_context(ctxt, 0);
495 * svc_rdma_wc_reg - Invoked by RDMA provider for each polled FASTREG WC
496 * @cq: completion queue
497 * @wc: completed WR
500 void svc_rdma_wc_reg(struct ib_cq *cq, struct ib_wc *wc)
502 svc_rdma_send_wc_common_put(cq, wc, "fastreg");
506 * svc_rdma_wc_read - Invoked by RDMA provider for each polled Read WC
507 * @cq: completion queue
508 * @wc: completed WR
511 void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc)
513 struct svcxprt_rdma *xprt = cq->cq_context;
514 struct ib_cqe *cqe = wc->wr_cqe;
515 struct svc_rdma_op_ctxt *ctxt;
517 svc_rdma_send_wc_common(xprt, wc, "read");
519 ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
520 svc_rdma_unmap_dma(ctxt);
521 svc_rdma_put_frmr(xprt, ctxt->frmr);
523 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
524 struct svc_rdma_op_ctxt *read_hdr;
526 read_hdr = ctxt->read_hdr;
527 spin_lock(&xprt->sc_rq_dto_lock);
528 list_add_tail(&read_hdr->list,
529 &xprt->sc_read_complete_q);
530 spin_unlock(&xprt->sc_rq_dto_lock);
532 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
533 svc_xprt_enqueue(&xprt->sc_xprt);
536 svc_rdma_put_context(ctxt, 0);
537 svc_xprt_put(&xprt->sc_xprt);
541 * svc_rdma_wc_inv - Invoked by RDMA provider for each polled LOCAL_INV WC
542 * @cq: completion queue
543 * @wc: completed WR
546 void svc_rdma_wc_inv(struct ib_cq *cq, struct ib_wc *wc)
548 svc_rdma_send_wc_common_put(cq, wc, "localInv");
551 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
552 int listener)
554 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
556 if (!cma_xprt)
557 return NULL;
558 svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
559 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
560 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
561 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
562 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
563 INIT_LIST_HEAD(&cma_xprt->sc_ctxts);
564 INIT_LIST_HEAD(&cma_xprt->sc_maps);
565 init_waitqueue_head(&cma_xprt->sc_send_wait);
567 spin_lock_init(&cma_xprt->sc_lock);
568 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
569 spin_lock_init(&cma_xprt->sc_frmr_q_lock);
570 spin_lock_init(&cma_xprt->sc_ctxt_lock);
571 spin_lock_init(&cma_xprt->sc_map_lock);
574 * Note that this implies that the underlying transport support
575 * has some form of congestion control (see RFC 7530 section 3.1
576 * paragraph 2). For now, we assume that all supported RDMA
577 * transports are suitable here.
579 set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);
581 if (listener)
582 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
584 return cma_xprt;
587 int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
589 struct ib_recv_wr recv_wr, *bad_recv_wr;
590 struct svc_rdma_op_ctxt *ctxt;
591 struct page *page;
592 dma_addr_t pa;
593 int sge_no;
594 int buflen;
595 int ret;
597 ctxt = svc_rdma_get_context(xprt);
598 buflen = 0;
599 ctxt->direction = DMA_FROM_DEVICE;
600 ctxt->cqe.done = svc_rdma_wc_receive;
601 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
602 if (sge_no >= xprt->sc_max_sge) {
603 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
604 goto err_put_ctxt;
606 page = alloc_page(flags);
607 if (!page)
608 goto err_put_ctxt;
609 ctxt->pages[sge_no] = page;
610 pa = ib_dma_map_page(xprt->sc_cm_id->device,
611 page, 0, PAGE_SIZE,
612 DMA_FROM_DEVICE);
613 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
614 goto err_put_ctxt;
615 svc_rdma_count_mappings(xprt, ctxt);
616 ctxt->sge[sge_no].addr = pa;
617 ctxt->sge[sge_no].length = PAGE_SIZE;
618 ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
619 ctxt->count = sge_no + 1;
620 buflen += PAGE_SIZE;
622 recv_wr.next = NULL;
623 recv_wr.sg_list = &ctxt->sge[0];
624 recv_wr.num_sge = ctxt->count;
625 recv_wr.wr_cqe = &ctxt->cqe;
627 svc_xprt_get(&xprt->sc_xprt);
628 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
629 if (ret) {
630 svc_rdma_unmap_dma(ctxt);
631 svc_rdma_put_context(ctxt, 1);
632 svc_xprt_put(&xprt->sc_xprt);
634 return ret;
636 err_put_ctxt:
637 svc_rdma_unmap_dma(ctxt);
638 svc_rdma_put_context(ctxt, 1);
639 return -ENOMEM;
642 int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags)
644 int ret = 0;
646 ret = svc_rdma_post_recv(xprt, flags);
647 if (ret) {
648 pr_err("svcrdma: could not post a receive buffer, err=%d.\n",
649 ret);
650 pr_err("svcrdma: closing transport %p.\n", xprt);
651 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
652 ret = -ENOTCONN;
654 return ret;
657 static void
658 svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
659 struct rdma_conn_param *param)
661 const struct rpcrdma_connect_private *pmsg = param->private_data;
663 if (pmsg &&
664 pmsg->cp_magic == rpcrdma_cmp_magic &&
665 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
666 newxprt->sc_snd_w_inv = pmsg->cp_flags &
667 RPCRDMA_CMP_F_SND_W_INV_OK;
669 dprintk("svcrdma: client send_size %u, recv_size %u "
670 "remote inv %ssupported\n",
671 rpcrdma_decode_buffer_size(pmsg->cp_send_size),
672 rpcrdma_decode_buffer_size(pmsg->cp_recv_size),
673 newxprt->sc_snd_w_inv ? "" : "un");
678 * This function handles the CONNECT_REQUEST event on a listening
679 * endpoint. It is passed the cma_id for the _new_ connection. The context in
680 * this cma_id is inherited from the listening cma_id and is the svc_xprt
681 * structure for the listening endpoint.
683 * This function creates a new xprt for the new connection and enqueues it on
684 * the accept queue for the listent xprt. When the listen thread is kicked, it
685 * will call the recvfrom method on the listen xprt which will accept the new
686 * connection.
688 static void handle_connect_req(struct rdma_cm_id *new_cma_id,
689 struct rdma_conn_param *param)
691 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
692 struct svcxprt_rdma *newxprt;
693 struct sockaddr *sa;
695 /* Create a new transport */
696 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
697 if (!newxprt) {
698 dprintk("svcrdma: failed to create new transport\n");
699 return;
701 newxprt->sc_cm_id = new_cma_id;
702 new_cma_id->context = newxprt;
703 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
704 newxprt, newxprt->sc_cm_id, listen_xprt);
705 svc_rdma_parse_connect_private(newxprt, param);
707 /* Save client advertised inbound read limit for use later in accept. */
708 newxprt->sc_ord = param->initiator_depth;
710 /* Set the local and remote addresses in the transport */
711 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
712 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
713 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
714 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
717 * Enqueue the new transport on the accept queue of the listening
718 * transport
720 spin_lock_bh(&listen_xprt->sc_lock);
721 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
722 spin_unlock_bh(&listen_xprt->sc_lock);
724 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
725 svc_xprt_enqueue(&listen_xprt->sc_xprt);
729 * Handles events generated on the listening endpoint. These events will be
730 * either be incoming connect requests or adapter removal events.
732 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
733 struct rdma_cm_event *event)
735 struct svcxprt_rdma *xprt = cma_id->context;
736 int ret = 0;
738 switch (event->event) {
739 case RDMA_CM_EVENT_CONNECT_REQUEST:
740 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
741 "event = %s (%d)\n", cma_id, cma_id->context,
742 rdma_event_msg(event->event), event->event);
743 handle_connect_req(cma_id, &event->param.conn);
744 break;
746 case RDMA_CM_EVENT_ESTABLISHED:
747 /* Accept complete */
748 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
749 "cm_id=%p\n", xprt, cma_id);
750 break;
752 case RDMA_CM_EVENT_DEVICE_REMOVAL:
753 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
754 xprt, cma_id);
755 if (xprt)
756 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
757 break;
759 default:
760 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
761 "event = %s (%d)\n", cma_id,
762 rdma_event_msg(event->event), event->event);
763 break;
766 return ret;
769 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
770 struct rdma_cm_event *event)
772 struct svc_xprt *xprt = cma_id->context;
773 struct svcxprt_rdma *rdma =
774 container_of(xprt, struct svcxprt_rdma, sc_xprt);
775 switch (event->event) {
776 case RDMA_CM_EVENT_ESTABLISHED:
777 /* Accept complete */
778 svc_xprt_get(xprt);
779 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
780 "cm_id=%p\n", xprt, cma_id);
781 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
782 svc_xprt_enqueue(xprt);
783 break;
784 case RDMA_CM_EVENT_DISCONNECTED:
785 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
786 xprt, cma_id);
787 if (xprt) {
788 set_bit(XPT_CLOSE, &xprt->xpt_flags);
789 svc_xprt_enqueue(xprt);
790 svc_xprt_put(xprt);
792 break;
793 case RDMA_CM_EVENT_DEVICE_REMOVAL:
794 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
795 "event = %s (%d)\n", cma_id, xprt,
796 rdma_event_msg(event->event), event->event);
797 if (xprt) {
798 set_bit(XPT_CLOSE, &xprt->xpt_flags);
799 svc_xprt_enqueue(xprt);
800 svc_xprt_put(xprt);
802 break;
803 default:
804 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
805 "event = %s (%d)\n", cma_id,
806 rdma_event_msg(event->event), event->event);
807 break;
809 return 0;
813 * Create a listening RDMA service endpoint.
815 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
816 struct net *net,
817 struct sockaddr *sa, int salen,
818 int flags)
820 struct rdma_cm_id *listen_id;
821 struct svcxprt_rdma *cma_xprt;
822 int ret;
824 dprintk("svcrdma: Creating RDMA socket\n");
825 if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
826 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
827 return ERR_PTR(-EAFNOSUPPORT);
829 cma_xprt = rdma_create_xprt(serv, 1);
830 if (!cma_xprt)
831 return ERR_PTR(-ENOMEM);
833 listen_id = rdma_create_id(&init_net, rdma_listen_handler, cma_xprt,
834 RDMA_PS_TCP, IB_QPT_RC);
835 if (IS_ERR(listen_id)) {
836 ret = PTR_ERR(listen_id);
837 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
838 goto err0;
841 /* Allow both IPv4 and IPv6 sockets to bind a single port
842 * at the same time.
844 #if IS_ENABLED(CONFIG_IPV6)
845 ret = rdma_set_afonly(listen_id, 1);
846 if (ret) {
847 dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
848 goto err1;
850 #endif
851 ret = rdma_bind_addr(listen_id, sa);
852 if (ret) {
853 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
854 goto err1;
856 cma_xprt->sc_cm_id = listen_id;
858 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
859 if (ret) {
860 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
861 goto err1;
865 * We need to use the address from the cm_id in case the
866 * caller specified 0 for the port number.
868 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
869 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
871 return &cma_xprt->sc_xprt;
873 err1:
874 rdma_destroy_id(listen_id);
875 err0:
876 kfree(cma_xprt);
877 return ERR_PTR(ret);
880 static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
882 struct ib_mr *mr;
883 struct scatterlist *sg;
884 struct svc_rdma_fastreg_mr *frmr;
885 u32 num_sg;
887 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
888 if (!frmr)
889 goto err;
891 num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len);
892 mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg);
893 if (IS_ERR(mr))
894 goto err_free_frmr;
896 sg = kcalloc(RPCSVC_MAXPAGES, sizeof(*sg), GFP_KERNEL);
897 if (!sg)
898 goto err_free_mr;
900 sg_init_table(sg, RPCSVC_MAXPAGES);
902 frmr->mr = mr;
903 frmr->sg = sg;
904 INIT_LIST_HEAD(&frmr->frmr_list);
905 return frmr;
907 err_free_mr:
908 ib_dereg_mr(mr);
909 err_free_frmr:
910 kfree(frmr);
911 err:
912 return ERR_PTR(-ENOMEM);
915 static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
917 struct svc_rdma_fastreg_mr *frmr;
919 while (!list_empty(&xprt->sc_frmr_q)) {
920 frmr = list_entry(xprt->sc_frmr_q.next,
921 struct svc_rdma_fastreg_mr, frmr_list);
922 list_del_init(&frmr->frmr_list);
923 kfree(frmr->sg);
924 ib_dereg_mr(frmr->mr);
925 kfree(frmr);
929 struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
931 struct svc_rdma_fastreg_mr *frmr = NULL;
933 spin_lock(&rdma->sc_frmr_q_lock);
934 if (!list_empty(&rdma->sc_frmr_q)) {
935 frmr = list_entry(rdma->sc_frmr_q.next,
936 struct svc_rdma_fastreg_mr, frmr_list);
937 list_del_init(&frmr->frmr_list);
938 frmr->sg_nents = 0;
940 spin_unlock(&rdma->sc_frmr_q_lock);
941 if (frmr)
942 return frmr;
944 return rdma_alloc_frmr(rdma);
947 void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
948 struct svc_rdma_fastreg_mr *frmr)
950 if (frmr) {
951 ib_dma_unmap_sg(rdma->sc_cm_id->device,
952 frmr->sg, frmr->sg_nents, frmr->direction);
953 spin_lock(&rdma->sc_frmr_q_lock);
954 WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
955 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
956 spin_unlock(&rdma->sc_frmr_q_lock);
961 * This is the xpo_recvfrom function for listening endpoints. Its
962 * purpose is to accept incoming connections. The CMA callback handler
963 * has already created a new transport and attached it to the new CMA
964 * ID.
966 * There is a queue of pending connections hung on the listening
967 * transport. This queue contains the new svc_xprt structure. This
968 * function takes svc_xprt structures off the accept_q and completes
969 * the connection.
971 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
973 struct svcxprt_rdma *listen_rdma;
974 struct svcxprt_rdma *newxprt = NULL;
975 struct rdma_conn_param conn_param;
976 struct rpcrdma_connect_private pmsg;
977 struct ib_qp_init_attr qp_attr;
978 struct ib_device *dev;
979 struct sockaddr *sap;
980 unsigned int i;
981 int ret = 0;
983 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
984 clear_bit(XPT_CONN, &xprt->xpt_flags);
985 /* Get the next entry off the accept list */
986 spin_lock_bh(&listen_rdma->sc_lock);
987 if (!list_empty(&listen_rdma->sc_accept_q)) {
988 newxprt = list_entry(listen_rdma->sc_accept_q.next,
989 struct svcxprt_rdma, sc_accept_q);
990 list_del_init(&newxprt->sc_accept_q);
992 if (!list_empty(&listen_rdma->sc_accept_q))
993 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
994 spin_unlock_bh(&listen_rdma->sc_lock);
995 if (!newxprt)
996 return NULL;
998 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
999 newxprt, newxprt->sc_cm_id);
1001 dev = newxprt->sc_cm_id->device;
1003 /* Qualify the transport resource defaults with the
1004 * capabilities of this particular device */
1005 newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge,
1006 (size_t)RPCSVC_MAXPAGES);
1007 newxprt->sc_max_sge_rd = min_t(size_t, dev->attrs.max_sge_rd,
1008 RPCSVC_MAXPAGES);
1009 newxprt->sc_max_req_size = svcrdma_max_req_size;
1010 newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr,
1011 svcrdma_max_requests);
1012 newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
1013 newxprt->sc_max_bc_requests = min_t(u32, dev->attrs.max_qp_wr,
1014 svcrdma_max_bc_requests);
1015 newxprt->sc_rq_depth = newxprt->sc_max_requests +
1016 newxprt->sc_max_bc_requests;
1017 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_rq_depth;
1018 atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
1020 if (!svc_rdma_prealloc_ctxts(newxprt))
1021 goto errout;
1022 if (!svc_rdma_prealloc_maps(newxprt))
1023 goto errout;
1026 * Limit ORD based on client limit, local device limit, and
1027 * configured svcrdma limit.
1029 newxprt->sc_ord = min_t(size_t, dev->attrs.max_qp_rd_atom, newxprt->sc_ord);
1030 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
1032 newxprt->sc_pd = ib_alloc_pd(dev, 0);
1033 if (IS_ERR(newxprt->sc_pd)) {
1034 dprintk("svcrdma: error creating PD for connect request\n");
1035 goto errout;
1037 newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth,
1038 0, IB_POLL_WORKQUEUE);
1039 if (IS_ERR(newxprt->sc_sq_cq)) {
1040 dprintk("svcrdma: error creating SQ CQ for connect request\n");
1041 goto errout;
1043 newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_rq_depth,
1044 0, IB_POLL_WORKQUEUE);
1045 if (IS_ERR(newxprt->sc_rq_cq)) {
1046 dprintk("svcrdma: error creating RQ CQ for connect request\n");
1047 goto errout;
1050 memset(&qp_attr, 0, sizeof qp_attr);
1051 qp_attr.event_handler = qp_event_handler;
1052 qp_attr.qp_context = &newxprt->sc_xprt;
1053 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
1054 qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth;
1055 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
1056 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
1057 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1058 qp_attr.qp_type = IB_QPT_RC;
1059 qp_attr.send_cq = newxprt->sc_sq_cq;
1060 qp_attr.recv_cq = newxprt->sc_rq_cq;
1061 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
1062 newxprt->sc_cm_id, newxprt->sc_pd);
1063 dprintk(" cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
1064 qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
1065 dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
1066 qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
1068 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
1069 if (ret) {
1070 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
1071 goto errout;
1073 newxprt->sc_qp = newxprt->sc_cm_id->qp;
1076 * Use the most secure set of MR resources based on the
1077 * transport type and available memory management features in
1078 * the device. Here's the table implemented below:
1080 * Fast Global DMA Remote WR
1081 * Reg LKEY MR Access
1082 * Sup'd Sup'd Needed Needed
1084 * IWARP N N Y Y
1085 * N Y Y Y
1086 * Y N Y N
1087 * Y Y N -
1089 * IB N N Y N
1090 * N Y N -
1091 * Y N Y N
1092 * Y Y N -
1094 * NB: iWARP requires remote write access for the data sink
1095 * of an RDMA_READ. IB does not.
1097 newxprt->sc_reader = rdma_read_chunk_lcl;
1098 if (dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
1099 newxprt->sc_frmr_pg_list_len =
1100 dev->attrs.max_fast_reg_page_list_len;
1101 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
1102 newxprt->sc_reader = rdma_read_chunk_frmr;
1103 } else
1104 newxprt->sc_snd_w_inv = false;
1107 * Determine if a DMA MR is required and if so, what privs are required
1109 if (!rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) &&
1110 !rdma_ib_or_roce(dev, newxprt->sc_cm_id->port_num))
1111 goto errout;
1113 if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num))
1114 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
1116 /* Post receive buffers */
1117 for (i = 0; i < newxprt->sc_max_requests; i++) {
1118 ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
1119 if (ret) {
1120 dprintk("svcrdma: failure posting receive buffers\n");
1121 goto errout;
1125 /* Swap out the handler */
1126 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1128 /* Construct RDMA-CM private message */
1129 pmsg.cp_magic = rpcrdma_cmp_magic;
1130 pmsg.cp_version = RPCRDMA_CMP_VERSION;
1131 pmsg.cp_flags = 0;
1132 pmsg.cp_send_size = pmsg.cp_recv_size =
1133 rpcrdma_encode_buffer_size(newxprt->sc_max_req_size);
1135 /* Accept Connection */
1136 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1137 memset(&conn_param, 0, sizeof conn_param);
1138 conn_param.responder_resources = 0;
1139 conn_param.initiator_depth = newxprt->sc_ord;
1140 conn_param.private_data = &pmsg;
1141 conn_param.private_data_len = sizeof(pmsg);
1142 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1143 if (ret) {
1144 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1145 ret);
1146 goto errout;
1149 dprintk("svcrdma: new connection %p accepted:\n", newxprt);
1150 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
1151 dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap));
1152 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
1153 dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap));
1154 dprintk(" max_sge : %d\n", newxprt->sc_max_sge);
1155 dprintk(" max_sge_rd : %d\n", newxprt->sc_max_sge_rd);
1156 dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth);
1157 dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
1158 dprintk(" ord : %d\n", newxprt->sc_ord);
1160 return &newxprt->sc_xprt;
1162 errout:
1163 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
1164 /* Take a reference in case the DTO handler runs */
1165 svc_xprt_get(&newxprt->sc_xprt);
1166 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
1167 ib_destroy_qp(newxprt->sc_qp);
1168 rdma_destroy_id(newxprt->sc_cm_id);
1169 /* This call to put will destroy the transport */
1170 svc_xprt_put(&newxprt->sc_xprt);
1171 return NULL;
1174 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1179 * When connected, an svc_xprt has at least two references:
1181 * - A reference held by the cm_id between the ESTABLISHED and
1182 * DISCONNECTED events. If the remote peer disconnected first, this
1183 * reference could be gone.
1185 * - A reference held by the svc_recv code that called this function
1186 * as part of close processing.
1188 * At a minimum one references should still be held.
1190 static void svc_rdma_detach(struct svc_xprt *xprt)
1192 struct svcxprt_rdma *rdma =
1193 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1194 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
1196 /* Disconnect and flush posted WQE */
1197 rdma_disconnect(rdma->sc_cm_id);
1200 static void __svc_rdma_free(struct work_struct *work)
1202 struct svcxprt_rdma *rdma =
1203 container_of(work, struct svcxprt_rdma, sc_work);
1204 struct svc_xprt *xprt = &rdma->sc_xprt;
1206 dprintk("svcrdma: %s(%p)\n", __func__, rdma);
1208 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1209 ib_drain_qp(rdma->sc_qp);
1211 /* We should only be called from kref_put */
1212 if (kref_read(&xprt->xpt_ref) != 0)
1213 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1214 kref_read(&xprt->xpt_ref));
1217 * Destroy queued, but not processed read completions. Note
1218 * that this cleanup has to be done before destroying the
1219 * cm_id because the device ptr is needed to unmap the dma in
1220 * svc_rdma_put_context.
1222 while (!list_empty(&rdma->sc_read_complete_q)) {
1223 struct svc_rdma_op_ctxt *ctxt;
1224 ctxt = list_first_entry(&rdma->sc_read_complete_q,
1225 struct svc_rdma_op_ctxt, list);
1226 list_del(&ctxt->list);
1227 svc_rdma_put_context(ctxt, 1);
1230 /* Destroy queued, but not processed recv completions */
1231 while (!list_empty(&rdma->sc_rq_dto_q)) {
1232 struct svc_rdma_op_ctxt *ctxt;
1233 ctxt = list_first_entry(&rdma->sc_rq_dto_q,
1234 struct svc_rdma_op_ctxt, list);
1235 list_del(&ctxt->list);
1236 svc_rdma_put_context(ctxt, 1);
1239 /* Warn if we leaked a resource or under-referenced */
1240 if (rdma->sc_ctxt_used != 0)
1241 pr_err("svcrdma: ctxt still in use? (%d)\n",
1242 rdma->sc_ctxt_used);
1244 /* Final put of backchannel client transport */
1245 if (xprt->xpt_bc_xprt) {
1246 xprt_put(xprt->xpt_bc_xprt);
1247 xprt->xpt_bc_xprt = NULL;
1250 rdma_dealloc_frmr_q(rdma);
1251 svc_rdma_destroy_ctxts(rdma);
1252 svc_rdma_destroy_maps(rdma);
1254 /* Destroy the QP if present (not a listener) */
1255 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1256 ib_destroy_qp(rdma->sc_qp);
1258 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1259 ib_free_cq(rdma->sc_sq_cq);
1261 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1262 ib_free_cq(rdma->sc_rq_cq);
1264 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1265 ib_dealloc_pd(rdma->sc_pd);
1267 /* Destroy the CM ID */
1268 rdma_destroy_id(rdma->sc_cm_id);
1270 kfree(rdma);
1273 static void svc_rdma_free(struct svc_xprt *xprt)
1275 struct svcxprt_rdma *rdma =
1276 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1277 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1278 queue_work(svc_rdma_wq, &rdma->sc_work);
1281 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1283 struct svcxprt_rdma *rdma =
1284 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1287 * If there are already waiters on the SQ,
1288 * return false.
1290 if (waitqueue_active(&rdma->sc_send_wait))
1291 return 0;
1293 /* Otherwise return true. */
1294 return 1;
1297 static int svc_rdma_secure_port(struct svc_rqst *rqstp)
1299 return 1;
1302 static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
1306 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1308 struct ib_send_wr *bad_wr, *n_wr;
1309 int wr_count;
1310 int i;
1311 int ret;
1313 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1314 return -ENOTCONN;
1316 wr_count = 1;
1317 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1318 wr_count++;
1320 /* If the SQ is full, wait until an SQ entry is available */
1321 while (1) {
1322 if ((atomic_sub_return(wr_count, &xprt->sc_sq_avail) < 0)) {
1323 atomic_inc(&rdma_stat_sq_starve);
1325 /* Wait until SQ WR available if SQ still full */
1326 atomic_add(wr_count, &xprt->sc_sq_avail);
1327 wait_event(xprt->sc_send_wait,
1328 atomic_read(&xprt->sc_sq_avail) > wr_count);
1329 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1330 return -ENOTCONN;
1331 continue;
1333 /* Take a transport ref for each WR posted */
1334 for (i = 0; i < wr_count; i++)
1335 svc_xprt_get(&xprt->sc_xprt);
1337 /* Bump used SQ WR count and post */
1338 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1339 if (ret) {
1340 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1341 for (i = 0; i < wr_count; i ++)
1342 svc_xprt_put(&xprt->sc_xprt);
1343 dprintk("svcrdma: failed to post SQ WR rc=%d\n", ret);
1344 dprintk(" sc_sq_avail=%d, sc_sq_depth=%d\n",
1345 atomic_read(&xprt->sc_sq_avail),
1346 xprt->sc_sq_depth);
1347 wake_up(&xprt->sc_send_wait);
1349 break;
1351 return ret;