1 // SPDX-License-Identifier: GPL-2.0-only
3 * RDMA transport layer based on the trans_fd.c implementation.
5 * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
6 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/net.h>
17 #include <linux/ipv6.h>
18 #include <linux/kthread.h>
19 #include <linux/errno.h>
20 #include <linux/kernel.h>
22 #include <linux/uaccess.h>
23 #include <linux/inet.h>
24 #include <linux/file.h>
25 #include <linux/parser.h>
26 #include <linux/semaphore.h>
27 #include <linux/slab.h>
28 #include <linux/seq_file.h>
29 #include <net/9p/9p.h>
30 #include <net/9p/client.h>
31 #include <net/9p/transport.h>
32 #include <rdma/ib_verbs.h>
33 #include <rdma/rdma_cm.h>
36 #define P9_RDMA_SQ_DEPTH 32
37 #define P9_RDMA_RQ_DEPTH 32
38 #define P9_RDMA_SEND_SGE 4
39 #define P9_RDMA_RECV_SGE 4
42 #define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
43 #define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */
46 * struct p9_trans_rdma - RDMA transport instance
48 * @state: tracks the transport state machine for connection setup and tear down
49 * @cm_id: The RDMA CM ID
50 * @pd: Protection Domain pointer
51 * @qp: Queue Pair pointer
52 * @cq: Completion Queue pointer
53 * @timeout: Number of uSecs to wait for connection management events
54 * @privport: Whether a privileged port may be used
55 * @port: The port to use
56 * @sq_depth: The depth of the Send Queue
57 * @sq_sem: Semaphore for the SQ
58 * @rq_depth: The depth of the Receive Queue.
59 * @rq_sem: Semaphore for the RQ
60 * @excess_rc : Amount of posted Receive Contexts without a pending request.
62 * @addr: The remote peer's address
63 * @req_lock: Protects the active request list
64 * @cm_done: Completion event for connection management tracking
66 struct p9_trans_rdma
{
69 P9_RDMA_ADDR_RESOLVED
,
70 P9_RDMA_ROUTE_RESOLVED
,
76 struct rdma_cm_id
*cm_id
;
84 struct semaphore sq_sem
;
86 struct semaphore rq_sem
;
88 struct sockaddr_in addr
;
91 struct completion cm_done
;
97 * struct p9_rdma_context - Keeps track of in-process WR
99 * @cqe: completion queue entry
100 * @busa: Bus address to unmap when the WR completes
101 * @req: Keeps track of requests (send)
102 * @rc: Keepts track of replies (receive)
104 struct p9_rdma_context
{
108 struct p9_req_t
*req
;
114 * struct p9_rdma_opts - Collection of mount options
115 * @port: port of connection
116 * @privport: Whether a privileged port may be used
117 * @sq_depth: The requested depth of the SQ. This really doesn't need
118 * to be any deeper than the number of threads used in the client
119 * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
120 * @timeout: Time to wait in msecs for CM events
122 struct p9_rdma_opts
{
131 * Option Parsing (code inspired by NFS code)
134 /* Options that take integer arguments */
135 Opt_port
, Opt_rq_depth
, Opt_sq_depth
, Opt_timeout
,
136 /* Options that take no argument */
141 static match_table_t tokens
= {
142 {Opt_port
, "port=%u"},
143 {Opt_sq_depth
, "sq=%u"},
144 {Opt_rq_depth
, "rq=%u"},
145 {Opt_timeout
, "timeout=%u"},
146 {Opt_privport
, "privport"},
150 static int p9_rdma_show_options(struct seq_file
*m
, struct p9_client
*clnt
)
152 struct p9_trans_rdma
*rdma
= clnt
->trans
;
154 if (rdma
->port
!= P9_PORT
)
155 seq_printf(m
, ",port=%u", rdma
->port
);
156 if (rdma
->sq_depth
!= P9_RDMA_SQ_DEPTH
)
157 seq_printf(m
, ",sq=%u", rdma
->sq_depth
);
158 if (rdma
->rq_depth
!= P9_RDMA_RQ_DEPTH
)
159 seq_printf(m
, ",rq=%u", rdma
->rq_depth
);
160 if (rdma
->timeout
!= P9_RDMA_TIMEOUT
)
161 seq_printf(m
, ",timeout=%lu", rdma
->timeout
);
163 seq_puts(m
, ",privport");
168 * parse_opts - parse mount options into rdma options structure
169 * @params: options string passed from mount
170 * @opts: rdma transport-specific structure to parse options into
172 * Returns 0 upon success, -ERRNO upon failure
174 static int parse_opts(char *params
, struct p9_rdma_opts
*opts
)
177 substring_t args
[MAX_OPT_ARGS
];
179 char *options
, *tmp_options
;
181 opts
->port
= P9_PORT
;
182 opts
->sq_depth
= P9_RDMA_SQ_DEPTH
;
183 opts
->rq_depth
= P9_RDMA_RQ_DEPTH
;
184 opts
->timeout
= P9_RDMA_TIMEOUT
;
185 opts
->privport
= false;
190 tmp_options
= kstrdup(params
, GFP_KERNEL
);
192 p9_debug(P9_DEBUG_ERROR
,
193 "failed to allocate copy of option string\n");
196 options
= tmp_options
;
198 while ((p
= strsep(&options
, ",")) != NULL
) {
203 token
= match_token(p
, tokens
, args
);
204 if ((token
!= Opt_err
) && (token
!= Opt_privport
)) {
205 r
= match_int(&args
[0], &option
);
207 p9_debug(P9_DEBUG_ERROR
,
208 "integer field, but no integer?\n");
217 opts
->sq_depth
= option
;
220 opts
->rq_depth
= option
;
223 opts
->timeout
= option
;
226 opts
->privport
= true;
232 /* RQ must be at least as large as the SQ */
233 opts
->rq_depth
= max(opts
->rq_depth
, opts
->sq_depth
);
239 p9_cm_event_handler(struct rdma_cm_id
*id
, struct rdma_cm_event
*event
)
241 struct p9_client
*c
= id
->context
;
242 struct p9_trans_rdma
*rdma
= c
->trans
;
243 switch (event
->event
) {
244 case RDMA_CM_EVENT_ADDR_RESOLVED
:
245 BUG_ON(rdma
->state
!= P9_RDMA_INIT
);
246 rdma
->state
= P9_RDMA_ADDR_RESOLVED
;
249 case RDMA_CM_EVENT_ROUTE_RESOLVED
:
250 BUG_ON(rdma
->state
!= P9_RDMA_ADDR_RESOLVED
);
251 rdma
->state
= P9_RDMA_ROUTE_RESOLVED
;
254 case RDMA_CM_EVENT_ESTABLISHED
:
255 BUG_ON(rdma
->state
!= P9_RDMA_ROUTE_RESOLVED
);
256 rdma
->state
= P9_RDMA_CONNECTED
;
259 case RDMA_CM_EVENT_DISCONNECTED
:
261 rdma
->state
= P9_RDMA_CLOSED
;
262 c
->status
= Disconnected
;
265 case RDMA_CM_EVENT_TIMEWAIT_EXIT
:
268 case RDMA_CM_EVENT_ADDR_CHANGE
:
269 case RDMA_CM_EVENT_ROUTE_ERROR
:
270 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
271 case RDMA_CM_EVENT_MULTICAST_JOIN
:
272 case RDMA_CM_EVENT_MULTICAST_ERROR
:
273 case RDMA_CM_EVENT_REJECTED
:
274 case RDMA_CM_EVENT_CONNECT_REQUEST
:
275 case RDMA_CM_EVENT_CONNECT_RESPONSE
:
276 case RDMA_CM_EVENT_CONNECT_ERROR
:
277 case RDMA_CM_EVENT_ADDR_ERROR
:
278 case RDMA_CM_EVENT_UNREACHABLE
:
279 c
->status
= Disconnected
;
280 rdma_disconnect(rdma
->cm_id
);
285 complete(&rdma
->cm_done
);
290 recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
292 struct p9_client
*client
= cq
->cq_context
;
293 struct p9_trans_rdma
*rdma
= client
->trans
;
294 struct p9_rdma_context
*c
=
295 container_of(wc
->wr_cqe
, struct p9_rdma_context
, cqe
);
296 struct p9_req_t
*req
;
301 ib_dma_unmap_single(rdma
->cm_id
->device
, c
->busa
, client
->msize
,
304 if (wc
->status
!= IB_WC_SUCCESS
)
307 c
->rc
.size
= wc
->byte_len
;
308 err
= p9_parse_header(&c
->rc
, NULL
, NULL
, &tag
, 1);
312 req
= p9_tag_lookup(client
, tag
);
316 /* Check that we have not yet received a reply for this request.
318 if (unlikely(req
->rc
.sdata
)) {
319 pr_err("Duplicate reply for request %d", tag
);
323 req
->rc
.size
= c
->rc
.size
;
324 req
->rc
.sdata
= c
->rc
.sdata
;
325 p9_client_cb(client
, req
, REQ_STATUS_RCVD
);
333 p9_debug(P9_DEBUG_ERROR
, "req %p err %d status %d\n",
334 req
, err
, wc
->status
);
335 rdma
->state
= P9_RDMA_FLUSHING
;
336 client
->status
= Disconnected
;
341 send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
343 struct p9_client
*client
= cq
->cq_context
;
344 struct p9_trans_rdma
*rdma
= client
->trans
;
345 struct p9_rdma_context
*c
=
346 container_of(wc
->wr_cqe
, struct p9_rdma_context
, cqe
);
348 ib_dma_unmap_single(rdma
->cm_id
->device
,
349 c
->busa
, c
->req
->tc
.size
,
352 p9_req_put(client
, c
->req
);
356 static void qp_event_handler(struct ib_event
*event
, void *context
)
358 p9_debug(P9_DEBUG_ERROR
, "QP event %d context %p\n",
359 event
->event
, context
);
362 static void rdma_destroy_trans(struct p9_trans_rdma
*rdma
)
367 if (rdma
->qp
&& !IS_ERR(rdma
->qp
))
368 ib_destroy_qp(rdma
->qp
);
370 if (rdma
->pd
&& !IS_ERR(rdma
->pd
))
371 ib_dealloc_pd(rdma
->pd
);
373 if (rdma
->cq
&& !IS_ERR(rdma
->cq
))
374 ib_free_cq(rdma
->cq
);
376 if (rdma
->cm_id
&& !IS_ERR(rdma
->cm_id
))
377 rdma_destroy_id(rdma
->cm_id
);
383 post_recv(struct p9_client
*client
, struct p9_rdma_context
*c
)
385 struct p9_trans_rdma
*rdma
= client
->trans
;
386 struct ib_recv_wr wr
;
390 c
->busa
= ib_dma_map_single(rdma
->cm_id
->device
,
391 c
->rc
.sdata
, client
->msize
,
393 if (ib_dma_mapping_error(rdma
->cm_id
->device
, c
->busa
))
396 c
->cqe
.done
= recv_done
;
399 sge
.length
= client
->msize
;
400 sge
.lkey
= rdma
->pd
->local_dma_lkey
;
407 ret
= ib_post_recv(rdma
->qp
, &wr
, NULL
);
409 ib_dma_unmap_single(rdma
->cm_id
->device
, c
->busa
,
410 client
->msize
, DMA_FROM_DEVICE
);
414 p9_debug(P9_DEBUG_ERROR
, "EIO\n");
418 static int rdma_request(struct p9_client
*client
, struct p9_req_t
*req
)
420 struct p9_trans_rdma
*rdma
= client
->trans
;
421 struct ib_send_wr wr
;
425 struct p9_rdma_context
*c
= NULL
;
426 struct p9_rdma_context
*rpl_context
= NULL
;
428 /* When an error occurs between posting the recv and the send,
429 * there will be a receive context posted without a pending request.
430 * Since there is no way to "un-post" it, we remember it and skip
431 * post_recv() for the next request.
433 * see if we are this `next request' and need to absorb an excess rc.
434 * If yes, then drop and free our own, and do not recv_post().
436 if (unlikely(atomic_read(&rdma
->excess_rc
) > 0)) {
437 if ((atomic_sub_return(1, &rdma
->excess_rc
) >= 0)) {
439 p9_fcall_fini(&req
->rc
);
440 req
->rc
.sdata
= NULL
;
441 goto dont_need_post_recv
;
443 /* We raced and lost. */
444 atomic_inc(&rdma
->excess_rc
);
448 /* Allocate an fcall for the reply */
449 rpl_context
= kmalloc(sizeof *rpl_context
, GFP_NOFS
);
454 rpl_context
->rc
.sdata
= req
->rc
.sdata
;
457 * Post a receive buffer for this request. We need to ensure
458 * there is a reply buffer available for every outstanding
459 * request. A flushed request can result in no reply for an
460 * outstanding request, so we must keep a count to avoid
461 * overflowing the RQ.
463 if (down_interruptible(&rdma
->rq_sem
)) {
468 err
= post_recv(client
, rpl_context
);
470 p9_debug(P9_DEBUG_ERROR
, "POST RECV failed: %d\n", err
);
473 /* remove posted receive buffer from request structure */
474 req
->rc
.sdata
= NULL
;
477 /* Post the request */
478 c
= kmalloc(sizeof *c
, GFP_NOFS
);
485 c
->busa
= ib_dma_map_single(rdma
->cm_id
->device
,
486 c
->req
->tc
.sdata
, c
->req
->tc
.size
,
488 if (ib_dma_mapping_error(rdma
->cm_id
->device
, c
->busa
)) {
493 c
->cqe
.done
= send_done
;
496 sge
.length
= c
->req
->tc
.size
;
497 sge
.lkey
= rdma
->pd
->local_dma_lkey
;
501 wr
.opcode
= IB_WR_SEND
;
502 wr
.send_flags
= IB_SEND_SIGNALED
;
506 if (down_interruptible(&rdma
->sq_sem
)) {
511 /* Mark request as `sent' *before* we actually send it,
512 * because doing if after could erase the REQ_STATUS_RCVD
513 * status in case of a very fast reply.
515 WRITE_ONCE(req
->status
, REQ_STATUS_SENT
);
516 err
= ib_post_send(rdma
->qp
, &wr
, NULL
);
524 ib_dma_unmap_single(rdma
->cm_id
->device
, c
->busa
,
525 c
->req
->tc
.size
, DMA_TO_DEVICE
);
526 /* Handle errors that happened during or while preparing the send: */
528 WRITE_ONCE(req
->status
, REQ_STATUS_ERROR
);
530 p9_debug(P9_DEBUG_ERROR
, "Error %d in rdma_request()\n", err
);
533 * We did recv_post(), but not send. We have one recv_post in excess.
535 atomic_inc(&rdma
->excess_rc
);
538 /* Handle errors that happened during or while preparing post_recv(): */
541 spin_lock_irqsave(&rdma
->req_lock
, flags
);
542 if (err
!= -EINTR
&& rdma
->state
< P9_RDMA_CLOSING
) {
543 rdma
->state
= P9_RDMA_CLOSING
;
544 spin_unlock_irqrestore(&rdma
->req_lock
, flags
);
545 rdma_disconnect(rdma
->cm_id
);
547 spin_unlock_irqrestore(&rdma
->req_lock
, flags
);
551 static void rdma_close(struct p9_client
*client
)
553 struct p9_trans_rdma
*rdma
;
558 rdma
= client
->trans
;
562 client
->status
= Disconnected
;
563 rdma_disconnect(rdma
->cm_id
);
564 rdma_destroy_trans(rdma
);
568 * alloc_rdma - Allocate and initialize the rdma transport structure
569 * @opts: Mount options structure
571 static struct p9_trans_rdma
*alloc_rdma(struct p9_rdma_opts
*opts
)
573 struct p9_trans_rdma
*rdma
;
575 rdma
= kzalloc(sizeof(struct p9_trans_rdma
), GFP_KERNEL
);
579 rdma
->port
= opts
->port
;
580 rdma
->privport
= opts
->privport
;
581 rdma
->sq_depth
= opts
->sq_depth
;
582 rdma
->rq_depth
= opts
->rq_depth
;
583 rdma
->timeout
= opts
->timeout
;
584 spin_lock_init(&rdma
->req_lock
);
585 init_completion(&rdma
->cm_done
);
586 sema_init(&rdma
->sq_sem
, rdma
->sq_depth
);
587 sema_init(&rdma
->rq_sem
, rdma
->rq_depth
);
588 atomic_set(&rdma
->excess_rc
, 0);
593 static int rdma_cancel(struct p9_client
*client
, struct p9_req_t
*req
)
595 /* Nothing to do here.
596 * We will take care of it (if we have to) in rdma_cancelled()
601 /* A request has been fully flushed without a reply.
602 * That means we have posted one buffer in excess.
604 static int rdma_cancelled(struct p9_client
*client
, struct p9_req_t
*req
)
606 struct p9_trans_rdma
*rdma
= client
->trans
;
607 atomic_inc(&rdma
->excess_rc
);
611 static int p9_rdma_bind_privport(struct p9_trans_rdma
*rdma
)
613 struct sockaddr_in cl
= {
614 .sin_family
= AF_INET
,
615 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
617 int port
, err
= -EINVAL
;
619 for (port
= P9_DEF_MAX_RESVPORT
; port
>= P9_DEF_MIN_RESVPORT
; port
--) {
620 cl
.sin_port
= htons((ushort
)port
);
621 err
= rdma_bind_addr(rdma
->cm_id
, (struct sockaddr
*)&cl
);
622 if (err
!= -EADDRINUSE
)
629 * rdma_create_trans - Transport method for creating a transport instance
630 * @client: client instance
631 * @addr: IP address string
632 * @args: Mount options string
635 rdma_create_trans(struct p9_client
*client
, const char *addr
, char *args
)
638 struct p9_rdma_opts opts
;
639 struct p9_trans_rdma
*rdma
;
640 struct rdma_conn_param conn_param
;
641 struct ib_qp_init_attr qp_attr
;
646 /* Parse the transport specific mount options */
647 err
= parse_opts(args
, &opts
);
651 /* Create and initialize the RDMA transport structure */
652 rdma
= alloc_rdma(&opts
);
656 /* Create the RDMA CM ID */
657 rdma
->cm_id
= rdma_create_id(&init_net
, p9_cm_event_handler
, client
,
658 RDMA_PS_TCP
, IB_QPT_RC
);
659 if (IS_ERR(rdma
->cm_id
))
662 /* Associate the client with the transport */
663 client
->trans
= rdma
;
665 /* Bind to a privileged port if we need to */
667 err
= p9_rdma_bind_privport(rdma
);
669 pr_err("%s (%d): problem binding to privport: %d\n",
670 __func__
, task_pid_nr(current
), -err
);
675 /* Resolve the server's address */
676 rdma
->addr
.sin_family
= AF_INET
;
677 rdma
->addr
.sin_addr
.s_addr
= in_aton(addr
);
678 rdma
->addr
.sin_port
= htons(opts
.port
);
679 err
= rdma_resolve_addr(rdma
->cm_id
, NULL
,
680 (struct sockaddr
*)&rdma
->addr
,
684 err
= wait_for_completion_interruptible(&rdma
->cm_done
);
685 if (err
|| (rdma
->state
!= P9_RDMA_ADDR_RESOLVED
))
688 /* Resolve the route to the server */
689 err
= rdma_resolve_route(rdma
->cm_id
, rdma
->timeout
);
692 err
= wait_for_completion_interruptible(&rdma
->cm_done
);
693 if (err
|| (rdma
->state
!= P9_RDMA_ROUTE_RESOLVED
))
696 /* Create the Completion Queue */
697 rdma
->cq
= ib_alloc_cq_any(rdma
->cm_id
->device
, client
,
698 opts
.sq_depth
+ opts
.rq_depth
+ 1,
700 if (IS_ERR(rdma
->cq
))
703 /* Create the Protection Domain */
704 rdma
->pd
= ib_alloc_pd(rdma
->cm_id
->device
, 0);
705 if (IS_ERR(rdma
->pd
))
708 /* Create the Queue Pair */
709 memset(&qp_attr
, 0, sizeof qp_attr
);
710 qp_attr
.event_handler
= qp_event_handler
;
711 qp_attr
.qp_context
= client
;
712 qp_attr
.cap
.max_send_wr
= opts
.sq_depth
;
713 qp_attr
.cap
.max_recv_wr
= opts
.rq_depth
;
714 qp_attr
.cap
.max_send_sge
= P9_RDMA_SEND_SGE
;
715 qp_attr
.cap
.max_recv_sge
= P9_RDMA_RECV_SGE
;
716 qp_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
717 qp_attr
.qp_type
= IB_QPT_RC
;
718 qp_attr
.send_cq
= rdma
->cq
;
719 qp_attr
.recv_cq
= rdma
->cq
;
720 err
= rdma_create_qp(rdma
->cm_id
, rdma
->pd
, &qp_attr
);
723 rdma
->qp
= rdma
->cm_id
->qp
;
725 /* Request a connection */
726 memset(&conn_param
, 0, sizeof(conn_param
));
727 conn_param
.private_data
= NULL
;
728 conn_param
.private_data_len
= 0;
729 conn_param
.responder_resources
= P9_RDMA_IRD
;
730 conn_param
.initiator_depth
= P9_RDMA_ORD
;
731 err
= rdma_connect(rdma
->cm_id
, &conn_param
);
734 err
= wait_for_completion_interruptible(&rdma
->cm_done
);
735 if (err
|| (rdma
->state
!= P9_RDMA_CONNECTED
))
738 client
->status
= Connected
;
743 rdma_destroy_trans(rdma
);
747 static struct p9_trans_module p9_rdma_trans
= {
749 .maxsize
= P9_RDMA_MAXSIZE
,
750 .pooled_rbuffers
= true,
752 .owner
= THIS_MODULE
,
753 .create
= rdma_create_trans
,
755 .request
= rdma_request
,
756 .cancel
= rdma_cancel
,
757 .cancelled
= rdma_cancelled
,
758 .show_options
= p9_rdma_show_options
,
762 * p9_trans_rdma_init - Register the 9P RDMA transport driver
764 static int __init
p9_trans_rdma_init(void)
766 v9fs_register_trans(&p9_rdma_trans
);
770 static void __exit
p9_trans_rdma_exit(void)
772 v9fs_unregister_trans(&p9_rdma_trans
);
775 module_init(p9_trans_rdma_init
);
776 module_exit(p9_trans_rdma_exit
);
777 MODULE_ALIAS_9P("rdma");
779 MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
780 MODULE_DESCRIPTION("RDMA Transport for 9P");
781 MODULE_LICENSE("Dual BSD/GPL");