1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2015-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
45 #include <linux/interrupt.h>
46 #include <linux/sched.h>
47 #include <linux/slab.h>
48 #include <linux/spinlock.h>
49 #include <linux/workqueue.h>
50 #include <linux/export.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/rdma_cm.h>
56 #include <linux/sunrpc/addr.h>
57 #include <linux/sunrpc/debug.h>
58 #include <linux/sunrpc/rpc_rdma.h>
59 #include <linux/sunrpc/svc_xprt.h>
60 #include <linux/sunrpc/svc_rdma.h>
62 #include "xprt_rdma.h"
63 #include <trace/events/rpcrdma.h>
65 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
67 static struct svcxprt_rdma
*svc_rdma_create_xprt(struct svc_serv
*serv
,
69 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
71 struct sockaddr
*sa
, int salen
,
73 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
);
74 static void svc_rdma_detach(struct svc_xprt
*xprt
);
75 static void svc_rdma_free(struct svc_xprt
*xprt
);
76 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
);
77 static void svc_rdma_secure_port(struct svc_rqst
*);
78 static void svc_rdma_kill_temp_xprt(struct svc_xprt
*);
80 static const struct svc_xprt_ops svc_rdma_ops
= {
81 .xpo_create
= svc_rdma_create
,
82 .xpo_recvfrom
= svc_rdma_recvfrom
,
83 .xpo_sendto
= svc_rdma_sendto
,
84 .xpo_release_rqst
= svc_rdma_release_rqst
,
85 .xpo_detach
= svc_rdma_detach
,
86 .xpo_free
= svc_rdma_free
,
87 .xpo_prep_reply_hdr
= svc_rdma_prep_reply_hdr
,
88 .xpo_has_wspace
= svc_rdma_has_wspace
,
89 .xpo_accept
= svc_rdma_accept
,
90 .xpo_secure_port
= svc_rdma_secure_port
,
91 .xpo_kill_temp_xprt
= svc_rdma_kill_temp_xprt
,
94 struct svc_xprt_class svc_rdma_class
= {
96 .xcl_owner
= THIS_MODULE
,
97 .xcl_ops
= &svc_rdma_ops
,
98 .xcl_max_payload
= RPCSVC_MAXPAYLOAD_RDMA
,
99 .xcl_ident
= XPRT_TRANSPORT_RDMA
,
102 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
103 static struct svc_xprt
*svc_rdma_bc_create(struct svc_serv
*, struct net
*,
104 struct sockaddr
*, int, int);
105 static void svc_rdma_bc_detach(struct svc_xprt
*);
106 static void svc_rdma_bc_free(struct svc_xprt
*);
108 static const struct svc_xprt_ops svc_rdma_bc_ops
= {
109 .xpo_create
= svc_rdma_bc_create
,
110 .xpo_detach
= svc_rdma_bc_detach
,
111 .xpo_free
= svc_rdma_bc_free
,
112 .xpo_prep_reply_hdr
= svc_rdma_prep_reply_hdr
,
113 .xpo_secure_port
= svc_rdma_secure_port
,
116 struct svc_xprt_class svc_rdma_bc_class
= {
117 .xcl_name
= "rdma-bc",
118 .xcl_owner
= THIS_MODULE
,
119 .xcl_ops
= &svc_rdma_bc_ops
,
120 .xcl_max_payload
= (1024 - RPCRDMA_HDRLEN_MIN
)
123 static struct svc_xprt
*svc_rdma_bc_create(struct svc_serv
*serv
,
125 struct sockaddr
*sa
, int salen
,
128 struct svcxprt_rdma
*cma_xprt
;
129 struct svc_xprt
*xprt
;
131 cma_xprt
= svc_rdma_create_xprt(serv
, net
);
133 return ERR_PTR(-ENOMEM
);
134 xprt
= &cma_xprt
->sc_xprt
;
136 svc_xprt_init(net
, &svc_rdma_bc_class
, xprt
, serv
);
137 set_bit(XPT_CONG_CTRL
, &xprt
->xpt_flags
);
138 serv
->sv_bc_xprt
= xprt
;
140 dprintk("svcrdma: %s(%p)\n", __func__
, xprt
);
144 static void svc_rdma_bc_detach(struct svc_xprt
*xprt
)
146 dprintk("svcrdma: %s(%p)\n", __func__
, xprt
);
149 static void svc_rdma_bc_free(struct svc_xprt
*xprt
)
151 struct svcxprt_rdma
*rdma
=
152 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
154 dprintk("svcrdma: %s(%p)\n", __func__
, xprt
);
158 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
160 /* QP event handler */
161 static void qp_event_handler(struct ib_event
*event
, void *context
)
163 struct svc_xprt
*xprt
= context
;
165 trace_svcrdma_qp_error(event
, (struct sockaddr
*)&xprt
->xpt_remote
);
166 switch (event
->event
) {
167 /* These are considered benign events */
168 case IB_EVENT_PATH_MIG
:
169 case IB_EVENT_COMM_EST
:
170 case IB_EVENT_SQ_DRAINED
:
171 case IB_EVENT_QP_LAST_WQE_REACHED
:
174 /* These are considered fatal events */
175 case IB_EVENT_PATH_MIG_ERR
:
176 case IB_EVENT_QP_FATAL
:
177 case IB_EVENT_QP_REQ_ERR
:
178 case IB_EVENT_QP_ACCESS_ERR
:
179 case IB_EVENT_DEVICE_FATAL
:
181 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
182 svc_xprt_enqueue(xprt
);
187 static struct svcxprt_rdma
*svc_rdma_create_xprt(struct svc_serv
*serv
,
190 struct svcxprt_rdma
*cma_xprt
= kzalloc(sizeof *cma_xprt
, GFP_KERNEL
);
193 dprintk("svcrdma: failed to create new transport\n");
196 svc_xprt_init(net
, &svc_rdma_class
, &cma_xprt
->sc_xprt
, serv
);
197 INIT_LIST_HEAD(&cma_xprt
->sc_accept_q
);
198 INIT_LIST_HEAD(&cma_xprt
->sc_rq_dto_q
);
199 INIT_LIST_HEAD(&cma_xprt
->sc_read_complete_q
);
200 INIT_LIST_HEAD(&cma_xprt
->sc_send_ctxts
);
201 INIT_LIST_HEAD(&cma_xprt
->sc_recv_ctxts
);
202 INIT_LIST_HEAD(&cma_xprt
->sc_rw_ctxts
);
203 init_waitqueue_head(&cma_xprt
->sc_send_wait
);
205 spin_lock_init(&cma_xprt
->sc_lock
);
206 spin_lock_init(&cma_xprt
->sc_rq_dto_lock
);
207 spin_lock_init(&cma_xprt
->sc_send_lock
);
208 spin_lock_init(&cma_xprt
->sc_recv_lock
);
209 spin_lock_init(&cma_xprt
->sc_rw_ctxt_lock
);
212 * Note that this implies that the underlying transport support
213 * has some form of congestion control (see RFC 7530 section 3.1
214 * paragraph 2). For now, we assume that all supported RDMA
215 * transports are suitable here.
217 set_bit(XPT_CONG_CTRL
, &cma_xprt
->sc_xprt
.xpt_flags
);
223 svc_rdma_parse_connect_private(struct svcxprt_rdma
*newxprt
,
224 struct rdma_conn_param
*param
)
226 const struct rpcrdma_connect_private
*pmsg
= param
->private_data
;
229 pmsg
->cp_magic
== rpcrdma_cmp_magic
&&
230 pmsg
->cp_version
== RPCRDMA_CMP_VERSION
) {
231 newxprt
->sc_snd_w_inv
= pmsg
->cp_flags
&
232 RPCRDMA_CMP_F_SND_W_INV_OK
;
234 dprintk("svcrdma: client send_size %u, recv_size %u "
235 "remote inv %ssupported\n",
236 rpcrdma_decode_buffer_size(pmsg
->cp_send_size
),
237 rpcrdma_decode_buffer_size(pmsg
->cp_recv_size
),
238 newxprt
->sc_snd_w_inv
? "" : "un");
243 * This function handles the CONNECT_REQUEST event on a listening
244 * endpoint. It is passed the cma_id for the _new_ connection. The context in
245 * this cma_id is inherited from the listening cma_id and is the svc_xprt
246 * structure for the listening endpoint.
248 * This function creates a new xprt for the new connection and enqueues it on
249 * the accept queue for the listent xprt. When the listen thread is kicked, it
250 * will call the recvfrom method on the listen xprt which will accept the new
253 static void handle_connect_req(struct rdma_cm_id
*new_cma_id
,
254 struct rdma_conn_param
*param
)
256 struct svcxprt_rdma
*listen_xprt
= new_cma_id
->context
;
257 struct svcxprt_rdma
*newxprt
;
260 /* Create a new transport */
261 newxprt
= svc_rdma_create_xprt(listen_xprt
->sc_xprt
.xpt_server
,
262 listen_xprt
->sc_xprt
.xpt_net
);
265 newxprt
->sc_cm_id
= new_cma_id
;
266 new_cma_id
->context
= newxprt
;
267 svc_rdma_parse_connect_private(newxprt
, param
);
269 /* Save client advertised inbound read limit for use later in accept. */
270 newxprt
->sc_ord
= param
->initiator_depth
;
272 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.dst_addr
;
273 svc_xprt_set_remote(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
274 /* The remote port is arbitrary and not under the control of the
275 * client ULP. Set it to a fixed value so that the DRC continues
276 * to be effective after a reconnect.
278 rpc_set_port((struct sockaddr
*)&newxprt
->sc_xprt
.xpt_remote
, 0);
280 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.src_addr
;
281 svc_xprt_set_local(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
284 * Enqueue the new transport on the accept queue of the listening
287 spin_lock_bh(&listen_xprt
->sc_lock
);
288 list_add_tail(&newxprt
->sc_accept_q
, &listen_xprt
->sc_accept_q
);
289 spin_unlock_bh(&listen_xprt
->sc_lock
);
291 set_bit(XPT_CONN
, &listen_xprt
->sc_xprt
.xpt_flags
);
292 svc_xprt_enqueue(&listen_xprt
->sc_xprt
);
296 * Handles events generated on the listening endpoint. These events will be
297 * either be incoming connect requests or adapter removal events.
299 static int rdma_listen_handler(struct rdma_cm_id
*cma_id
,
300 struct rdma_cm_event
*event
)
302 struct sockaddr
*sap
= (struct sockaddr
*)&cma_id
->route
.addr
.src_addr
;
304 trace_svcrdma_cm_event(event
, sap
);
306 switch (event
->event
) {
307 case RDMA_CM_EVENT_CONNECT_REQUEST
:
308 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
309 "event = %s (%d)\n", cma_id
, cma_id
->context
,
310 rdma_event_msg(event
->event
), event
->event
);
311 handle_connect_req(cma_id
, &event
->param
.conn
);
314 /* NB: No device removal upcall for INADDR_ANY listeners */
315 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
316 "event = %s (%d)\n", cma_id
,
317 rdma_event_msg(event
->event
), event
->event
);
324 static int rdma_cma_handler(struct rdma_cm_id
*cma_id
,
325 struct rdma_cm_event
*event
)
327 struct sockaddr
*sap
= (struct sockaddr
*)&cma_id
->route
.addr
.dst_addr
;
328 struct svcxprt_rdma
*rdma
= cma_id
->context
;
329 struct svc_xprt
*xprt
= &rdma
->sc_xprt
;
331 trace_svcrdma_cm_event(event
, sap
);
333 switch (event
->event
) {
334 case RDMA_CM_EVENT_ESTABLISHED
:
335 /* Accept complete */
337 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
338 "cm_id=%p\n", xprt
, cma_id
);
339 clear_bit(RDMAXPRT_CONN_PENDING
, &rdma
->sc_flags
);
340 svc_xprt_enqueue(xprt
);
342 case RDMA_CM_EVENT_DISCONNECTED
:
343 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
345 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
346 svc_xprt_enqueue(xprt
);
349 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
350 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
351 "event = %s (%d)\n", cma_id
, xprt
,
352 rdma_event_msg(event
->event
), event
->event
);
353 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
354 svc_xprt_enqueue(xprt
);
358 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
359 "event = %s (%d)\n", cma_id
,
360 rdma_event_msg(event
->event
), event
->event
);
367 * Create a listening RDMA service endpoint.
369 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
371 struct sockaddr
*sa
, int salen
,
374 struct rdma_cm_id
*listen_id
;
375 struct svcxprt_rdma
*cma_xprt
;
378 dprintk("svcrdma: Creating RDMA listener\n");
379 if ((sa
->sa_family
!= AF_INET
) && (sa
->sa_family
!= AF_INET6
)) {
380 dprintk("svcrdma: Address family %d is not supported.\n", sa
->sa_family
);
381 return ERR_PTR(-EAFNOSUPPORT
);
383 cma_xprt
= svc_rdma_create_xprt(serv
, net
);
385 return ERR_PTR(-ENOMEM
);
386 set_bit(XPT_LISTENER
, &cma_xprt
->sc_xprt
.xpt_flags
);
387 strcpy(cma_xprt
->sc_xprt
.xpt_remotebuf
, "listener");
389 listen_id
= rdma_create_id(net
, rdma_listen_handler
, cma_xprt
,
390 RDMA_PS_TCP
, IB_QPT_RC
);
391 if (IS_ERR(listen_id
)) {
392 ret
= PTR_ERR(listen_id
);
393 dprintk("svcrdma: rdma_create_id failed = %d\n", ret
);
397 /* Allow both IPv4 and IPv6 sockets to bind a single port
400 #if IS_ENABLED(CONFIG_IPV6)
401 ret
= rdma_set_afonly(listen_id
, 1);
403 dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret
);
407 ret
= rdma_bind_addr(listen_id
, sa
);
409 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret
);
412 cma_xprt
->sc_cm_id
= listen_id
;
414 ret
= rdma_listen(listen_id
, RPCRDMA_LISTEN_BACKLOG
);
416 dprintk("svcrdma: rdma_listen failed = %d\n", ret
);
421 * We need to use the address from the cm_id in case the
422 * caller specified 0 for the port number.
424 sa
= (struct sockaddr
*)&cma_xprt
->sc_cm_id
->route
.addr
.src_addr
;
425 svc_xprt_set_local(&cma_xprt
->sc_xprt
, sa
, salen
);
427 return &cma_xprt
->sc_xprt
;
430 rdma_destroy_id(listen_id
);
437 * This is the xpo_recvfrom function for listening endpoints. Its
438 * purpose is to accept incoming connections. The CMA callback handler
439 * has already created a new transport and attached it to the new CMA
442 * There is a queue of pending connections hung on the listening
443 * transport. This queue contains the new svc_xprt structure. This
444 * function takes svc_xprt structures off the accept_q and completes
447 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
)
449 struct svcxprt_rdma
*listen_rdma
;
450 struct svcxprt_rdma
*newxprt
= NULL
;
451 struct rdma_conn_param conn_param
;
452 struct rpcrdma_connect_private pmsg
;
453 struct ib_qp_init_attr qp_attr
;
454 unsigned int ctxts
, rq_depth
;
455 struct ib_device
*dev
;
456 struct sockaddr
*sap
;
459 listen_rdma
= container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
460 clear_bit(XPT_CONN
, &xprt
->xpt_flags
);
461 /* Get the next entry off the accept list */
462 spin_lock_bh(&listen_rdma
->sc_lock
);
463 if (!list_empty(&listen_rdma
->sc_accept_q
)) {
464 newxprt
= list_entry(listen_rdma
->sc_accept_q
.next
,
465 struct svcxprt_rdma
, sc_accept_q
);
466 list_del_init(&newxprt
->sc_accept_q
);
468 if (!list_empty(&listen_rdma
->sc_accept_q
))
469 set_bit(XPT_CONN
, &listen_rdma
->sc_xprt
.xpt_flags
);
470 spin_unlock_bh(&listen_rdma
->sc_lock
);
474 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
475 newxprt
, newxprt
->sc_cm_id
);
477 dev
= newxprt
->sc_cm_id
->device
;
478 newxprt
->sc_port_num
= newxprt
->sc_cm_id
->port_num
;
480 /* Qualify the transport resource defaults with the
481 * capabilities of this particular device */
482 /* Transport header, head iovec, tail iovec */
483 newxprt
->sc_max_send_sges
= 3;
484 /* Add one SGE per page list entry */
485 newxprt
->sc_max_send_sges
+= (svcrdma_max_req_size
/ PAGE_SIZE
) + 1;
486 if (newxprt
->sc_max_send_sges
> dev
->attrs
.max_send_sge
)
487 newxprt
->sc_max_send_sges
= dev
->attrs
.max_send_sge
;
488 newxprt
->sc_max_req_size
= svcrdma_max_req_size
;
489 newxprt
->sc_max_requests
= svcrdma_max_requests
;
490 newxprt
->sc_max_bc_requests
= svcrdma_max_bc_requests
;
491 rq_depth
= newxprt
->sc_max_requests
+ newxprt
->sc_max_bc_requests
;
492 if (rq_depth
> dev
->attrs
.max_qp_wr
) {
493 pr_warn("svcrdma: reducing receive depth to %d\n",
494 dev
->attrs
.max_qp_wr
);
495 rq_depth
= dev
->attrs
.max_qp_wr
;
496 newxprt
->sc_max_requests
= rq_depth
- 2;
497 newxprt
->sc_max_bc_requests
= 2;
499 newxprt
->sc_fc_credits
= cpu_to_be32(newxprt
->sc_max_requests
);
500 ctxts
= rdma_rw_mr_factor(dev
, newxprt
->sc_port_num
, RPCSVC_MAXPAGES
);
501 ctxts
*= newxprt
->sc_max_requests
;
502 newxprt
->sc_sq_depth
= rq_depth
+ ctxts
;
503 if (newxprt
->sc_sq_depth
> dev
->attrs
.max_qp_wr
) {
504 pr_warn("svcrdma: reducing send depth to %d\n",
505 dev
->attrs
.max_qp_wr
);
506 newxprt
->sc_sq_depth
= dev
->attrs
.max_qp_wr
;
508 atomic_set(&newxprt
->sc_sq_avail
, newxprt
->sc_sq_depth
);
510 newxprt
->sc_pd
= ib_alloc_pd(dev
, 0);
511 if (IS_ERR(newxprt
->sc_pd
)) {
512 dprintk("svcrdma: error creating PD for connect request\n");
515 newxprt
->sc_sq_cq
= ib_alloc_cq(dev
, newxprt
, newxprt
->sc_sq_depth
,
516 0, IB_POLL_WORKQUEUE
);
517 if (IS_ERR(newxprt
->sc_sq_cq
)) {
518 dprintk("svcrdma: error creating SQ CQ for connect request\n");
521 newxprt
->sc_rq_cq
= ib_alloc_cq(dev
, newxprt
, rq_depth
,
522 0, IB_POLL_WORKQUEUE
);
523 if (IS_ERR(newxprt
->sc_rq_cq
)) {
524 dprintk("svcrdma: error creating RQ CQ for connect request\n");
528 memset(&qp_attr
, 0, sizeof qp_attr
);
529 qp_attr
.event_handler
= qp_event_handler
;
530 qp_attr
.qp_context
= &newxprt
->sc_xprt
;
531 qp_attr
.port_num
= newxprt
->sc_port_num
;
532 qp_attr
.cap
.max_rdma_ctxs
= ctxts
;
533 qp_attr
.cap
.max_send_wr
= newxprt
->sc_sq_depth
- ctxts
;
534 qp_attr
.cap
.max_recv_wr
= rq_depth
;
535 qp_attr
.cap
.max_send_sge
= newxprt
->sc_max_send_sges
;
536 qp_attr
.cap
.max_recv_sge
= 1;
537 qp_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
538 qp_attr
.qp_type
= IB_QPT_RC
;
539 qp_attr
.send_cq
= newxprt
->sc_sq_cq
;
540 qp_attr
.recv_cq
= newxprt
->sc_rq_cq
;
541 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
542 newxprt
->sc_cm_id
, newxprt
->sc_pd
);
543 dprintk(" cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
544 qp_attr
.cap
.max_send_wr
, qp_attr
.cap
.max_recv_wr
);
545 dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
546 qp_attr
.cap
.max_send_sge
, qp_attr
.cap
.max_recv_sge
);
548 ret
= rdma_create_qp(newxprt
->sc_cm_id
, newxprt
->sc_pd
, &qp_attr
);
550 dprintk("svcrdma: failed to create QP, ret=%d\n", ret
);
553 newxprt
->sc_qp
= newxprt
->sc_cm_id
->qp
;
555 if (!(dev
->attrs
.device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
))
556 newxprt
->sc_snd_w_inv
= false;
557 if (!rdma_protocol_iwarp(dev
, newxprt
->sc_port_num
) &&
558 !rdma_ib_or_roce(dev
, newxprt
->sc_port_num
))
561 if (!svc_rdma_post_recvs(newxprt
))
564 /* Swap out the handler */
565 newxprt
->sc_cm_id
->event_handler
= rdma_cma_handler
;
567 /* Construct RDMA-CM private message */
568 pmsg
.cp_magic
= rpcrdma_cmp_magic
;
569 pmsg
.cp_version
= RPCRDMA_CMP_VERSION
;
571 pmsg
.cp_send_size
= pmsg
.cp_recv_size
=
572 rpcrdma_encode_buffer_size(newxprt
->sc_max_req_size
);
574 /* Accept Connection */
575 set_bit(RDMAXPRT_CONN_PENDING
, &newxprt
->sc_flags
);
576 memset(&conn_param
, 0, sizeof conn_param
);
577 conn_param
.responder_resources
= 0;
578 conn_param
.initiator_depth
= min_t(int, newxprt
->sc_ord
,
579 dev
->attrs
.max_qp_init_rd_atom
);
580 if (!conn_param
.initiator_depth
) {
581 dprintk("svcrdma: invalid ORD setting\n");
585 conn_param
.private_data
= &pmsg
;
586 conn_param
.private_data_len
= sizeof(pmsg
);
587 ret
= rdma_accept(newxprt
->sc_cm_id
, &conn_param
);
591 dprintk("svcrdma: new connection %p accepted:\n", newxprt
);
592 sap
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.src_addr
;
593 dprintk(" local address : %pIS:%u\n", sap
, rpc_get_port(sap
));
594 sap
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.dst_addr
;
595 dprintk(" remote address : %pIS:%u\n", sap
, rpc_get_port(sap
));
596 dprintk(" max_sge : %d\n", newxprt
->sc_max_send_sges
);
597 dprintk(" sq_depth : %d\n", newxprt
->sc_sq_depth
);
598 dprintk(" rdma_rw_ctxs : %d\n", ctxts
);
599 dprintk(" max_requests : %d\n", newxprt
->sc_max_requests
);
600 dprintk(" ord : %d\n", conn_param
.initiator_depth
);
602 trace_svcrdma_xprt_accept(&newxprt
->sc_xprt
);
603 return &newxprt
->sc_xprt
;
606 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret
);
607 trace_svcrdma_xprt_fail(&newxprt
->sc_xprt
);
608 /* Take a reference in case the DTO handler runs */
609 svc_xprt_get(&newxprt
->sc_xprt
);
610 if (newxprt
->sc_qp
&& !IS_ERR(newxprt
->sc_qp
))
611 ib_destroy_qp(newxprt
->sc_qp
);
612 rdma_destroy_id(newxprt
->sc_cm_id
);
613 /* This call to put will destroy the transport */
614 svc_xprt_put(&newxprt
->sc_xprt
);
619 * When connected, an svc_xprt has at least two references:
621 * - A reference held by the cm_id between the ESTABLISHED and
622 * DISCONNECTED events. If the remote peer disconnected first, this
623 * reference could be gone.
625 * - A reference held by the svc_recv code that called this function
626 * as part of close processing.
628 * At a minimum one references should still be held.
630 static void svc_rdma_detach(struct svc_xprt
*xprt
)
632 struct svcxprt_rdma
*rdma
=
633 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
635 /* Disconnect and flush posted WQE */
636 rdma_disconnect(rdma
->sc_cm_id
);
639 static void __svc_rdma_free(struct work_struct
*work
)
641 struct svcxprt_rdma
*rdma
=
642 container_of(work
, struct svcxprt_rdma
, sc_work
);
643 struct svc_xprt
*xprt
= &rdma
->sc_xprt
;
645 trace_svcrdma_xprt_free(xprt
);
647 if (rdma
->sc_qp
&& !IS_ERR(rdma
->sc_qp
))
648 ib_drain_qp(rdma
->sc_qp
);
650 /* We should only be called from kref_put */
651 if (kref_read(&xprt
->xpt_ref
) != 0)
652 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
653 kref_read(&xprt
->xpt_ref
));
655 svc_rdma_flush_recv_queues(rdma
);
657 /* Final put of backchannel client transport */
658 if (xprt
->xpt_bc_xprt
) {
659 xprt_put(xprt
->xpt_bc_xprt
);
660 xprt
->xpt_bc_xprt
= NULL
;
663 svc_rdma_destroy_rw_ctxts(rdma
);
664 svc_rdma_send_ctxts_destroy(rdma
);
665 svc_rdma_recv_ctxts_destroy(rdma
);
667 /* Destroy the QP if present (not a listener) */
668 if (rdma
->sc_qp
&& !IS_ERR(rdma
->sc_qp
))
669 ib_destroy_qp(rdma
->sc_qp
);
671 if (rdma
->sc_sq_cq
&& !IS_ERR(rdma
->sc_sq_cq
))
672 ib_free_cq(rdma
->sc_sq_cq
);
674 if (rdma
->sc_rq_cq
&& !IS_ERR(rdma
->sc_rq_cq
))
675 ib_free_cq(rdma
->sc_rq_cq
);
677 if (rdma
->sc_pd
&& !IS_ERR(rdma
->sc_pd
))
678 ib_dealloc_pd(rdma
->sc_pd
);
680 /* Destroy the CM ID */
681 rdma_destroy_id(rdma
->sc_cm_id
);
686 static void svc_rdma_free(struct svc_xprt
*xprt
)
688 struct svcxprt_rdma
*rdma
=
689 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
690 INIT_WORK(&rdma
->sc_work
, __svc_rdma_free
);
691 queue_work(svc_rdma_wq
, &rdma
->sc_work
);
694 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
)
696 struct svcxprt_rdma
*rdma
=
697 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
700 * If there are already waiters on the SQ,
703 if (waitqueue_active(&rdma
->sc_send_wait
))
706 /* Otherwise return true. */
710 static void svc_rdma_secure_port(struct svc_rqst
*rqstp
)
712 set_bit(RQ_SECURE
, &rqstp
->rq_flags
);
715 static void svc_rdma_kill_temp_xprt(struct svc_xprt
*xprt
)