1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2015-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
45 #include <linux/interrupt.h>
46 #include <linux/sched.h>
47 #include <linux/slab.h>
48 #include <linux/spinlock.h>
49 #include <linux/workqueue.h>
50 #include <linux/export.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/rdma_cm.h>
56 #include <linux/sunrpc/addr.h>
57 #include <linux/sunrpc/debug.h>
58 #include <linux/sunrpc/rpc_rdma.h>
59 #include <linux/sunrpc/svc_xprt.h>
60 #include <linux/sunrpc/svc_rdma.h>
62 #include "xprt_rdma.h"
63 #include <trace/events/rpcrdma.h>
65 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
67 static struct svcxprt_rdma
*svc_rdma_create_xprt(struct svc_serv
*serv
,
69 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
71 struct sockaddr
*sa
, int salen
,
73 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
);
74 static void svc_rdma_release_rqst(struct svc_rqst
*);
75 static void svc_rdma_detach(struct svc_xprt
*xprt
);
76 static void svc_rdma_free(struct svc_xprt
*xprt
);
77 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
);
78 static void svc_rdma_secure_port(struct svc_rqst
*);
79 static void svc_rdma_kill_temp_xprt(struct svc_xprt
*);
81 static const struct svc_xprt_ops svc_rdma_ops
= {
82 .xpo_create
= svc_rdma_create
,
83 .xpo_recvfrom
= svc_rdma_recvfrom
,
84 .xpo_sendto
= svc_rdma_sendto
,
85 .xpo_release_rqst
= svc_rdma_release_rqst
,
86 .xpo_detach
= svc_rdma_detach
,
87 .xpo_free
= svc_rdma_free
,
88 .xpo_prep_reply_hdr
= svc_rdma_prep_reply_hdr
,
89 .xpo_has_wspace
= svc_rdma_has_wspace
,
90 .xpo_accept
= svc_rdma_accept
,
91 .xpo_secure_port
= svc_rdma_secure_port
,
92 .xpo_kill_temp_xprt
= svc_rdma_kill_temp_xprt
,
95 struct svc_xprt_class svc_rdma_class
= {
97 .xcl_owner
= THIS_MODULE
,
98 .xcl_ops
= &svc_rdma_ops
,
99 .xcl_max_payload
= RPCSVC_MAXPAYLOAD_RDMA
,
100 .xcl_ident
= XPRT_TRANSPORT_RDMA
,
103 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
104 static struct svc_xprt
*svc_rdma_bc_create(struct svc_serv
*, struct net
*,
105 struct sockaddr
*, int, int);
106 static void svc_rdma_bc_detach(struct svc_xprt
*);
107 static void svc_rdma_bc_free(struct svc_xprt
*);
109 static const struct svc_xprt_ops svc_rdma_bc_ops
= {
110 .xpo_create
= svc_rdma_bc_create
,
111 .xpo_detach
= svc_rdma_bc_detach
,
112 .xpo_free
= svc_rdma_bc_free
,
113 .xpo_prep_reply_hdr
= svc_rdma_prep_reply_hdr
,
114 .xpo_secure_port
= svc_rdma_secure_port
,
117 struct svc_xprt_class svc_rdma_bc_class
= {
118 .xcl_name
= "rdma-bc",
119 .xcl_owner
= THIS_MODULE
,
120 .xcl_ops
= &svc_rdma_bc_ops
,
121 .xcl_max_payload
= (1024 - RPCRDMA_HDRLEN_MIN
)
124 static struct svc_xprt
*svc_rdma_bc_create(struct svc_serv
*serv
,
126 struct sockaddr
*sa
, int salen
,
129 struct svcxprt_rdma
*cma_xprt
;
130 struct svc_xprt
*xprt
;
132 cma_xprt
= svc_rdma_create_xprt(serv
, net
);
134 return ERR_PTR(-ENOMEM
);
135 xprt
= &cma_xprt
->sc_xprt
;
137 svc_xprt_init(net
, &svc_rdma_bc_class
, xprt
, serv
);
138 set_bit(XPT_CONG_CTRL
, &xprt
->xpt_flags
);
139 serv
->sv_bc_xprt
= xprt
;
141 dprintk("svcrdma: %s(%p)\n", __func__
, xprt
);
145 static void svc_rdma_bc_detach(struct svc_xprt
*xprt
)
147 dprintk("svcrdma: %s(%p)\n", __func__
, xprt
);
150 static void svc_rdma_bc_free(struct svc_xprt
*xprt
)
152 struct svcxprt_rdma
*rdma
=
153 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
155 dprintk("svcrdma: %s(%p)\n", __func__
, xprt
);
159 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
161 /* QP event handler */
162 static void qp_event_handler(struct ib_event
*event
, void *context
)
164 struct svc_xprt
*xprt
= context
;
166 trace_svcrdma_qp_error(event
, (struct sockaddr
*)&xprt
->xpt_remote
);
167 switch (event
->event
) {
168 /* These are considered benign events */
169 case IB_EVENT_PATH_MIG
:
170 case IB_EVENT_COMM_EST
:
171 case IB_EVENT_SQ_DRAINED
:
172 case IB_EVENT_QP_LAST_WQE_REACHED
:
175 /* These are considered fatal events */
176 case IB_EVENT_PATH_MIG_ERR
:
177 case IB_EVENT_QP_FATAL
:
178 case IB_EVENT_QP_REQ_ERR
:
179 case IB_EVENT_QP_ACCESS_ERR
:
180 case IB_EVENT_DEVICE_FATAL
:
182 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
183 svc_xprt_enqueue(xprt
);
188 static struct svcxprt_rdma
*svc_rdma_create_xprt(struct svc_serv
*serv
,
191 struct svcxprt_rdma
*cma_xprt
= kzalloc(sizeof *cma_xprt
, GFP_KERNEL
);
194 dprintk("svcrdma: failed to create new transport\n");
197 svc_xprt_init(net
, &svc_rdma_class
, &cma_xprt
->sc_xprt
, serv
);
198 INIT_LIST_HEAD(&cma_xprt
->sc_accept_q
);
199 INIT_LIST_HEAD(&cma_xprt
->sc_rq_dto_q
);
200 INIT_LIST_HEAD(&cma_xprt
->sc_read_complete_q
);
201 INIT_LIST_HEAD(&cma_xprt
->sc_send_ctxts
);
202 INIT_LIST_HEAD(&cma_xprt
->sc_recv_ctxts
);
203 INIT_LIST_HEAD(&cma_xprt
->sc_rw_ctxts
);
204 init_waitqueue_head(&cma_xprt
->sc_send_wait
);
206 spin_lock_init(&cma_xprt
->sc_lock
);
207 spin_lock_init(&cma_xprt
->sc_rq_dto_lock
);
208 spin_lock_init(&cma_xprt
->sc_send_lock
);
209 spin_lock_init(&cma_xprt
->sc_recv_lock
);
210 spin_lock_init(&cma_xprt
->sc_rw_ctxt_lock
);
213 * Note that this implies that the underlying transport support
214 * has some form of congestion control (see RFC 7530 section 3.1
215 * paragraph 2). For now, we assume that all supported RDMA
216 * transports are suitable here.
218 set_bit(XPT_CONG_CTRL
, &cma_xprt
->sc_xprt
.xpt_flags
);
224 svc_rdma_parse_connect_private(struct svcxprt_rdma
*newxprt
,
225 struct rdma_conn_param
*param
)
227 const struct rpcrdma_connect_private
*pmsg
= param
->private_data
;
230 pmsg
->cp_magic
== rpcrdma_cmp_magic
&&
231 pmsg
->cp_version
== RPCRDMA_CMP_VERSION
) {
232 newxprt
->sc_snd_w_inv
= pmsg
->cp_flags
&
233 RPCRDMA_CMP_F_SND_W_INV_OK
;
235 dprintk("svcrdma: client send_size %u, recv_size %u "
236 "remote inv %ssupported\n",
237 rpcrdma_decode_buffer_size(pmsg
->cp_send_size
),
238 rpcrdma_decode_buffer_size(pmsg
->cp_recv_size
),
239 newxprt
->sc_snd_w_inv
? "" : "un");
244 * This function handles the CONNECT_REQUEST event on a listening
245 * endpoint. It is passed the cma_id for the _new_ connection. The context in
246 * this cma_id is inherited from the listening cma_id and is the svc_xprt
247 * structure for the listening endpoint.
249 * This function creates a new xprt for the new connection and enqueues it on
250 * the accept queue for the listent xprt. When the listen thread is kicked, it
251 * will call the recvfrom method on the listen xprt which will accept the new
254 static void handle_connect_req(struct rdma_cm_id
*new_cma_id
,
255 struct rdma_conn_param
*param
)
257 struct svcxprt_rdma
*listen_xprt
= new_cma_id
->context
;
258 struct svcxprt_rdma
*newxprt
;
261 /* Create a new transport */
262 newxprt
= svc_rdma_create_xprt(listen_xprt
->sc_xprt
.xpt_server
,
263 listen_xprt
->sc_xprt
.xpt_net
);
266 newxprt
->sc_cm_id
= new_cma_id
;
267 new_cma_id
->context
= newxprt
;
268 svc_rdma_parse_connect_private(newxprt
, param
);
270 /* Save client advertised inbound read limit for use later in accept. */
271 newxprt
->sc_ord
= param
->initiator_depth
;
273 /* Set the local and remote addresses in the transport */
274 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.dst_addr
;
275 svc_xprt_set_remote(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
276 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.src_addr
;
277 svc_xprt_set_local(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
280 * Enqueue the new transport on the accept queue of the listening
283 spin_lock_bh(&listen_xprt
->sc_lock
);
284 list_add_tail(&newxprt
->sc_accept_q
, &listen_xprt
->sc_accept_q
);
285 spin_unlock_bh(&listen_xprt
->sc_lock
);
287 set_bit(XPT_CONN
, &listen_xprt
->sc_xprt
.xpt_flags
);
288 svc_xprt_enqueue(&listen_xprt
->sc_xprt
);
292 * Handles events generated on the listening endpoint. These events will be
293 * either be incoming connect requests or adapter removal events.
295 static int rdma_listen_handler(struct rdma_cm_id
*cma_id
,
296 struct rdma_cm_event
*event
)
298 struct sockaddr
*sap
= (struct sockaddr
*)&cma_id
->route
.addr
.src_addr
;
300 trace_svcrdma_cm_event(event
, sap
);
302 switch (event
->event
) {
303 case RDMA_CM_EVENT_CONNECT_REQUEST
:
304 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
305 "event = %s (%d)\n", cma_id
, cma_id
->context
,
306 rdma_event_msg(event
->event
), event
->event
);
307 handle_connect_req(cma_id
, &event
->param
.conn
);
310 /* NB: No device removal upcall for INADDR_ANY listeners */
311 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
312 "event = %s (%d)\n", cma_id
,
313 rdma_event_msg(event
->event
), event
->event
);
320 static int rdma_cma_handler(struct rdma_cm_id
*cma_id
,
321 struct rdma_cm_event
*event
)
323 struct sockaddr
*sap
= (struct sockaddr
*)&cma_id
->route
.addr
.dst_addr
;
324 struct svcxprt_rdma
*rdma
= cma_id
->context
;
325 struct svc_xprt
*xprt
= &rdma
->sc_xprt
;
327 trace_svcrdma_cm_event(event
, sap
);
329 switch (event
->event
) {
330 case RDMA_CM_EVENT_ESTABLISHED
:
331 /* Accept complete */
333 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
334 "cm_id=%p\n", xprt
, cma_id
);
335 clear_bit(RDMAXPRT_CONN_PENDING
, &rdma
->sc_flags
);
336 svc_xprt_enqueue(xprt
);
338 case RDMA_CM_EVENT_DISCONNECTED
:
339 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
341 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
342 svc_xprt_enqueue(xprt
);
345 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
346 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
347 "event = %s (%d)\n", cma_id
, xprt
,
348 rdma_event_msg(event
->event
), event
->event
);
349 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
350 svc_xprt_enqueue(xprt
);
354 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
355 "event = %s (%d)\n", cma_id
,
356 rdma_event_msg(event
->event
), event
->event
);
363 * Create a listening RDMA service endpoint.
365 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
367 struct sockaddr
*sa
, int salen
,
370 struct rdma_cm_id
*listen_id
;
371 struct svcxprt_rdma
*cma_xprt
;
374 dprintk("svcrdma: Creating RDMA listener\n");
375 if ((sa
->sa_family
!= AF_INET
) && (sa
->sa_family
!= AF_INET6
)) {
376 dprintk("svcrdma: Address family %d is not supported.\n", sa
->sa_family
);
377 return ERR_PTR(-EAFNOSUPPORT
);
379 cma_xprt
= svc_rdma_create_xprt(serv
, net
);
381 return ERR_PTR(-ENOMEM
);
382 set_bit(XPT_LISTENER
, &cma_xprt
->sc_xprt
.xpt_flags
);
383 strcpy(cma_xprt
->sc_xprt
.xpt_remotebuf
, "listener");
385 listen_id
= rdma_create_id(net
, rdma_listen_handler
, cma_xprt
,
386 RDMA_PS_TCP
, IB_QPT_RC
);
387 if (IS_ERR(listen_id
)) {
388 ret
= PTR_ERR(listen_id
);
389 dprintk("svcrdma: rdma_create_id failed = %d\n", ret
);
393 /* Allow both IPv4 and IPv6 sockets to bind a single port
396 #if IS_ENABLED(CONFIG_IPV6)
397 ret
= rdma_set_afonly(listen_id
, 1);
399 dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret
);
403 ret
= rdma_bind_addr(listen_id
, sa
);
405 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret
);
408 cma_xprt
->sc_cm_id
= listen_id
;
410 ret
= rdma_listen(listen_id
, RPCRDMA_LISTEN_BACKLOG
);
412 dprintk("svcrdma: rdma_listen failed = %d\n", ret
);
417 * We need to use the address from the cm_id in case the
418 * caller specified 0 for the port number.
420 sa
= (struct sockaddr
*)&cma_xprt
->sc_cm_id
->route
.addr
.src_addr
;
421 svc_xprt_set_local(&cma_xprt
->sc_xprt
, sa
, salen
);
423 return &cma_xprt
->sc_xprt
;
426 rdma_destroy_id(listen_id
);
433 * This is the xpo_recvfrom function for listening endpoints. Its
434 * purpose is to accept incoming connections. The CMA callback handler
435 * has already created a new transport and attached it to the new CMA
438 * There is a queue of pending connections hung on the listening
439 * transport. This queue contains the new svc_xprt structure. This
440 * function takes svc_xprt structures off the accept_q and completes
443 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
)
445 struct svcxprt_rdma
*listen_rdma
;
446 struct svcxprt_rdma
*newxprt
= NULL
;
447 struct rdma_conn_param conn_param
;
448 struct rpcrdma_connect_private pmsg
;
449 struct ib_qp_init_attr qp_attr
;
450 unsigned int ctxts
, rq_depth
;
451 struct ib_device
*dev
;
452 struct sockaddr
*sap
;
455 listen_rdma
= container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
456 clear_bit(XPT_CONN
, &xprt
->xpt_flags
);
457 /* Get the next entry off the accept list */
458 spin_lock_bh(&listen_rdma
->sc_lock
);
459 if (!list_empty(&listen_rdma
->sc_accept_q
)) {
460 newxprt
= list_entry(listen_rdma
->sc_accept_q
.next
,
461 struct svcxprt_rdma
, sc_accept_q
);
462 list_del_init(&newxprt
->sc_accept_q
);
464 if (!list_empty(&listen_rdma
->sc_accept_q
))
465 set_bit(XPT_CONN
, &listen_rdma
->sc_xprt
.xpt_flags
);
466 spin_unlock_bh(&listen_rdma
->sc_lock
);
470 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
471 newxprt
, newxprt
->sc_cm_id
);
473 dev
= newxprt
->sc_cm_id
->device
;
474 newxprt
->sc_port_num
= newxprt
->sc_cm_id
->port_num
;
476 /* Qualify the transport resource defaults with the
477 * capabilities of this particular device */
478 newxprt
->sc_max_send_sges
= dev
->attrs
.max_send_sge
;
479 /* transport hdr, head iovec, one page list entry, tail iovec */
480 if (newxprt
->sc_max_send_sges
< 4) {
481 pr_err("svcrdma: too few Send SGEs available (%d)\n",
482 newxprt
->sc_max_send_sges
);
485 newxprt
->sc_max_req_size
= svcrdma_max_req_size
;
486 newxprt
->sc_max_requests
= svcrdma_max_requests
;
487 newxprt
->sc_max_bc_requests
= svcrdma_max_bc_requests
;
488 rq_depth
= newxprt
->sc_max_requests
+ newxprt
->sc_max_bc_requests
;
489 if (rq_depth
> dev
->attrs
.max_qp_wr
) {
490 pr_warn("svcrdma: reducing receive depth to %d\n",
491 dev
->attrs
.max_qp_wr
);
492 rq_depth
= dev
->attrs
.max_qp_wr
;
493 newxprt
->sc_max_requests
= rq_depth
- 2;
494 newxprt
->sc_max_bc_requests
= 2;
496 newxprt
->sc_fc_credits
= cpu_to_be32(newxprt
->sc_max_requests
);
497 ctxts
= rdma_rw_mr_factor(dev
, newxprt
->sc_port_num
, RPCSVC_MAXPAGES
);
498 ctxts
*= newxprt
->sc_max_requests
;
499 newxprt
->sc_sq_depth
= rq_depth
+ ctxts
;
500 if (newxprt
->sc_sq_depth
> dev
->attrs
.max_qp_wr
) {
501 pr_warn("svcrdma: reducing send depth to %d\n",
502 dev
->attrs
.max_qp_wr
);
503 newxprt
->sc_sq_depth
= dev
->attrs
.max_qp_wr
;
505 atomic_set(&newxprt
->sc_sq_avail
, newxprt
->sc_sq_depth
);
507 newxprt
->sc_pd
= ib_alloc_pd(dev
, 0);
508 if (IS_ERR(newxprt
->sc_pd
)) {
509 dprintk("svcrdma: error creating PD for connect request\n");
512 newxprt
->sc_sq_cq
= ib_alloc_cq(dev
, newxprt
, newxprt
->sc_sq_depth
,
513 0, IB_POLL_WORKQUEUE
);
514 if (IS_ERR(newxprt
->sc_sq_cq
)) {
515 dprintk("svcrdma: error creating SQ CQ for connect request\n");
518 newxprt
->sc_rq_cq
= ib_alloc_cq(dev
, newxprt
, rq_depth
,
519 0, IB_POLL_WORKQUEUE
);
520 if (IS_ERR(newxprt
->sc_rq_cq
)) {
521 dprintk("svcrdma: error creating RQ CQ for connect request\n");
525 memset(&qp_attr
, 0, sizeof qp_attr
);
526 qp_attr
.event_handler
= qp_event_handler
;
527 qp_attr
.qp_context
= &newxprt
->sc_xprt
;
528 qp_attr
.port_num
= newxprt
->sc_port_num
;
529 qp_attr
.cap
.max_rdma_ctxs
= ctxts
;
530 qp_attr
.cap
.max_send_wr
= newxprt
->sc_sq_depth
- ctxts
;
531 qp_attr
.cap
.max_recv_wr
= rq_depth
;
532 qp_attr
.cap
.max_send_sge
= newxprt
->sc_max_send_sges
;
533 qp_attr
.cap
.max_recv_sge
= 1;
534 qp_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
535 qp_attr
.qp_type
= IB_QPT_RC
;
536 qp_attr
.send_cq
= newxprt
->sc_sq_cq
;
537 qp_attr
.recv_cq
= newxprt
->sc_rq_cq
;
538 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
539 newxprt
->sc_cm_id
, newxprt
->sc_pd
);
540 dprintk(" cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
541 qp_attr
.cap
.max_send_wr
, qp_attr
.cap
.max_recv_wr
);
542 dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
543 qp_attr
.cap
.max_send_sge
, qp_attr
.cap
.max_recv_sge
);
545 ret
= rdma_create_qp(newxprt
->sc_cm_id
, newxprt
->sc_pd
, &qp_attr
);
547 dprintk("svcrdma: failed to create QP, ret=%d\n", ret
);
550 newxprt
->sc_qp
= newxprt
->sc_cm_id
->qp
;
552 if (!(dev
->attrs
.device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
))
553 newxprt
->sc_snd_w_inv
= false;
554 if (!rdma_protocol_iwarp(dev
, newxprt
->sc_port_num
) &&
555 !rdma_ib_or_roce(dev
, newxprt
->sc_port_num
))
558 if (!svc_rdma_post_recvs(newxprt
))
561 /* Swap out the handler */
562 newxprt
->sc_cm_id
->event_handler
= rdma_cma_handler
;
564 /* Construct RDMA-CM private message */
565 pmsg
.cp_magic
= rpcrdma_cmp_magic
;
566 pmsg
.cp_version
= RPCRDMA_CMP_VERSION
;
568 pmsg
.cp_send_size
= pmsg
.cp_recv_size
=
569 rpcrdma_encode_buffer_size(newxprt
->sc_max_req_size
);
571 /* Accept Connection */
572 set_bit(RDMAXPRT_CONN_PENDING
, &newxprt
->sc_flags
);
573 memset(&conn_param
, 0, sizeof conn_param
);
574 conn_param
.responder_resources
= 0;
575 conn_param
.initiator_depth
= min_t(int, newxprt
->sc_ord
,
576 dev
->attrs
.max_qp_init_rd_atom
);
577 if (!conn_param
.initiator_depth
) {
578 dprintk("svcrdma: invalid ORD setting\n");
582 conn_param
.private_data
= &pmsg
;
583 conn_param
.private_data_len
= sizeof(pmsg
);
584 ret
= rdma_accept(newxprt
->sc_cm_id
, &conn_param
);
588 dprintk("svcrdma: new connection %p accepted:\n", newxprt
);
589 sap
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.src_addr
;
590 dprintk(" local address : %pIS:%u\n", sap
, rpc_get_port(sap
));
591 sap
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.dst_addr
;
592 dprintk(" remote address : %pIS:%u\n", sap
, rpc_get_port(sap
));
593 dprintk(" max_sge : %d\n", newxprt
->sc_max_send_sges
);
594 dprintk(" sq_depth : %d\n", newxprt
->sc_sq_depth
);
595 dprintk(" rdma_rw_ctxs : %d\n", ctxts
);
596 dprintk(" max_requests : %d\n", newxprt
->sc_max_requests
);
597 dprintk(" ord : %d\n", conn_param
.initiator_depth
);
599 trace_svcrdma_xprt_accept(&newxprt
->sc_xprt
);
600 return &newxprt
->sc_xprt
;
603 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret
);
604 trace_svcrdma_xprt_fail(&newxprt
->sc_xprt
);
605 /* Take a reference in case the DTO handler runs */
606 svc_xprt_get(&newxprt
->sc_xprt
);
607 if (newxprt
->sc_qp
&& !IS_ERR(newxprt
->sc_qp
))
608 ib_destroy_qp(newxprt
->sc_qp
);
609 rdma_destroy_id(newxprt
->sc_cm_id
);
610 /* This call to put will destroy the transport */
611 svc_xprt_put(&newxprt
->sc_xprt
);
615 static void svc_rdma_release_rqst(struct svc_rqst
*rqstp
)
620 * When connected, an svc_xprt has at least two references:
622 * - A reference held by the cm_id between the ESTABLISHED and
623 * DISCONNECTED events. If the remote peer disconnected first, this
624 * reference could be gone.
626 * - A reference held by the svc_recv code that called this function
627 * as part of close processing.
629 * At a minimum one references should still be held.
631 static void svc_rdma_detach(struct svc_xprt
*xprt
)
633 struct svcxprt_rdma
*rdma
=
634 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
636 /* Disconnect and flush posted WQE */
637 rdma_disconnect(rdma
->sc_cm_id
);
640 static void __svc_rdma_free(struct work_struct
*work
)
642 struct svcxprt_rdma
*rdma
=
643 container_of(work
, struct svcxprt_rdma
, sc_work
);
644 struct svc_xprt
*xprt
= &rdma
->sc_xprt
;
646 trace_svcrdma_xprt_free(xprt
);
648 if (rdma
->sc_qp
&& !IS_ERR(rdma
->sc_qp
))
649 ib_drain_qp(rdma
->sc_qp
);
651 /* We should only be called from kref_put */
652 if (kref_read(&xprt
->xpt_ref
) != 0)
653 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
654 kref_read(&xprt
->xpt_ref
));
656 svc_rdma_flush_recv_queues(rdma
);
658 /* Final put of backchannel client transport */
659 if (xprt
->xpt_bc_xprt
) {
660 xprt_put(xprt
->xpt_bc_xprt
);
661 xprt
->xpt_bc_xprt
= NULL
;
664 svc_rdma_destroy_rw_ctxts(rdma
);
665 svc_rdma_send_ctxts_destroy(rdma
);
666 svc_rdma_recv_ctxts_destroy(rdma
);
668 /* Destroy the QP if present (not a listener) */
669 if (rdma
->sc_qp
&& !IS_ERR(rdma
->sc_qp
))
670 ib_destroy_qp(rdma
->sc_qp
);
672 if (rdma
->sc_sq_cq
&& !IS_ERR(rdma
->sc_sq_cq
))
673 ib_free_cq(rdma
->sc_sq_cq
);
675 if (rdma
->sc_rq_cq
&& !IS_ERR(rdma
->sc_rq_cq
))
676 ib_free_cq(rdma
->sc_rq_cq
);
678 if (rdma
->sc_pd
&& !IS_ERR(rdma
->sc_pd
))
679 ib_dealloc_pd(rdma
->sc_pd
);
681 /* Destroy the CM ID */
682 rdma_destroy_id(rdma
->sc_cm_id
);
687 static void svc_rdma_free(struct svc_xprt
*xprt
)
689 struct svcxprt_rdma
*rdma
=
690 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
691 INIT_WORK(&rdma
->sc_work
, __svc_rdma_free
);
692 queue_work(svc_rdma_wq
, &rdma
->sc_work
);
695 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
)
697 struct svcxprt_rdma
*rdma
=
698 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
701 * If there are already waiters on the SQ,
704 if (waitqueue_active(&rdma
->sc_send_wait
))
707 /* Otherwise return true. */
711 static void svc_rdma_secure_port(struct svc_rqst
*rqstp
)
713 set_bit(RQ_SECURE
, &rqstp
->rq_flags
);
716 static void svc_rdma_kill_temp_xprt(struct svc_xprt
*xprt
)