2 * linux/fs/9p/trans_rdma.c
4 * RDMA transport layer based on the trans_fd.c implementation.
6 * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
7 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
8 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
9 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
10 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to:
23 * Free Software Foundation
24 * 51 Franklin Street, Fifth Floor
25 * Boston, MA 02111-1301 USA
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/module.h>
33 #include <linux/net.h>
34 #include <linux/ipv6.h>
35 #include <linux/kthread.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
39 #include <linux/uaccess.h>
40 #include <linux/inet.h>
41 #include <linux/idr.h>
42 #include <linux/file.h>
43 #include <linux/parser.h>
44 #include <linux/semaphore.h>
45 #include <linux/slab.h>
46 #include <net/9p/9p.h>
47 #include <net/9p/client.h>
48 #include <net/9p/transport.h>
49 #include <rdma/ib_verbs.h>
50 #include <rdma/rdma_cm.h>
53 #define P9_RDMA_SQ_DEPTH 32
54 #define P9_RDMA_RQ_DEPTH 32
55 #define P9_RDMA_SEND_SGE 4
56 #define P9_RDMA_RECV_SGE 4
59 #define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
60 #define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */
63 * struct p9_trans_rdma - RDMA transport instance
65 * @state: tracks the transport state machine for connection setup and tear down
66 * @cm_id: The RDMA CM ID
67 * @pd: Protection Domain pointer
68 * @qp: Queue Pair pointer
69 * @cq: Completion Queue pointer
70 * @dm_mr: DMA Memory Region pointer
71 * @lkey: The local access only memory region key
72 * @timeout: Number of uSecs to wait for connection management events
73 * @sq_depth: The depth of the Send Queue
74 * @sq_sem: Semaphore for the SQ
75 * @rq_depth: The depth of the Receive Queue.
76 * @rq_sem: Semaphore for the RQ
77 * @excess_rc : Amount of posted Receive Contexts without a pending request.
79 * @addr: The remote peer's address
80 * @req_lock: Protects the active request list
81 * @cm_done: Completion event for connection management tracking
83 struct p9_trans_rdma
{
86 P9_RDMA_ADDR_RESOLVED
,
87 P9_RDMA_ROUTE_RESOLVED
,
93 struct rdma_cm_id
*cm_id
;
101 struct semaphore sq_sem
;
103 struct semaphore rq_sem
;
105 struct sockaddr_in addr
;
108 struct completion cm_done
;
112 * p9_rdma_context - Keeps track of in-process WR
114 * @wc_op: The original WR op for when the CQE completes in error.
115 * @busa: Bus address to unmap when the WR completes
116 * @req: Keeps track of requests (send)
117 * @rc: Keepts track of replies (receive)
120 struct p9_rdma_context
{
121 enum ib_wc_opcode wc_op
;
124 struct p9_req_t
*req
;
130 * p9_rdma_opts - Collection of mount options
131 * @port: port of connection
132 * @sq_depth: The requested depth of the SQ. This really doesn't need
133 * to be any deeper than the number of threads used in the client
134 * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
135 * @timeout: Time to wait in msecs for CM events
137 struct p9_rdma_opts
{
146 * Option Parsing (code inspired by NFS code)
149 /* Options that take integer arguments */
150 Opt_port
, Opt_rq_depth
, Opt_sq_depth
, Opt_timeout
,
151 /* Options that take no argument */
156 static match_table_t tokens
= {
157 {Opt_port
, "port=%u"},
158 {Opt_sq_depth
, "sq=%u"},
159 {Opt_rq_depth
, "rq=%u"},
160 {Opt_timeout
, "timeout=%u"},
161 {Opt_privport
, "privport"},
166 * parse_opts - parse mount options into rdma options structure
167 * @params: options string passed from mount
168 * @opts: rdma transport-specific structure to parse options into
170 * Returns 0 upon success, -ERRNO upon failure
172 static int parse_opts(char *params
, struct p9_rdma_opts
*opts
)
175 substring_t args
[MAX_OPT_ARGS
];
177 char *options
, *tmp_options
;
179 opts
->port
= P9_PORT
;
180 opts
->sq_depth
= P9_RDMA_SQ_DEPTH
;
181 opts
->rq_depth
= P9_RDMA_RQ_DEPTH
;
182 opts
->timeout
= P9_RDMA_TIMEOUT
;
188 tmp_options
= kstrdup(params
, GFP_KERNEL
);
190 p9_debug(P9_DEBUG_ERROR
,
191 "failed to allocate copy of option string\n");
194 options
= tmp_options
;
196 while ((p
= strsep(&options
, ",")) != NULL
) {
201 token
= match_token(p
, tokens
, args
);
202 if ((token
!= Opt_err
) && (token
!= Opt_privport
)) {
203 r
= match_int(&args
[0], &option
);
205 p9_debug(P9_DEBUG_ERROR
,
206 "integer field, but no integer?\n");
215 opts
->sq_depth
= option
;
218 opts
->rq_depth
= option
;
221 opts
->timeout
= option
;
230 /* RQ must be at least as large as the SQ */
231 opts
->rq_depth
= max(opts
->rq_depth
, opts
->sq_depth
);
237 p9_cm_event_handler(struct rdma_cm_id
*id
, struct rdma_cm_event
*event
)
239 struct p9_client
*c
= id
->context
;
240 struct p9_trans_rdma
*rdma
= c
->trans
;
241 switch (event
->event
) {
242 case RDMA_CM_EVENT_ADDR_RESOLVED
:
243 BUG_ON(rdma
->state
!= P9_RDMA_INIT
);
244 rdma
->state
= P9_RDMA_ADDR_RESOLVED
;
247 case RDMA_CM_EVENT_ROUTE_RESOLVED
:
248 BUG_ON(rdma
->state
!= P9_RDMA_ADDR_RESOLVED
);
249 rdma
->state
= P9_RDMA_ROUTE_RESOLVED
;
252 case RDMA_CM_EVENT_ESTABLISHED
:
253 BUG_ON(rdma
->state
!= P9_RDMA_ROUTE_RESOLVED
);
254 rdma
->state
= P9_RDMA_CONNECTED
;
257 case RDMA_CM_EVENT_DISCONNECTED
:
259 rdma
->state
= P9_RDMA_CLOSED
;
261 c
->status
= Disconnected
;
264 case RDMA_CM_EVENT_TIMEWAIT_EXIT
:
267 case RDMA_CM_EVENT_ADDR_CHANGE
:
268 case RDMA_CM_EVENT_ROUTE_ERROR
:
269 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
270 case RDMA_CM_EVENT_MULTICAST_JOIN
:
271 case RDMA_CM_EVENT_MULTICAST_ERROR
:
272 case RDMA_CM_EVENT_REJECTED
:
273 case RDMA_CM_EVENT_CONNECT_REQUEST
:
274 case RDMA_CM_EVENT_CONNECT_RESPONSE
:
275 case RDMA_CM_EVENT_CONNECT_ERROR
:
276 case RDMA_CM_EVENT_ADDR_ERROR
:
277 case RDMA_CM_EVENT_UNREACHABLE
:
278 c
->status
= Disconnected
;
279 rdma_disconnect(rdma
->cm_id
);
284 complete(&rdma
->cm_done
);
289 handle_recv(struct p9_client
*client
, struct p9_trans_rdma
*rdma
,
290 struct p9_rdma_context
*c
, enum ib_wc_status status
, u32 byte_len
)
292 struct p9_req_t
*req
;
297 ib_dma_unmap_single(rdma
->cm_id
->device
, c
->busa
, client
->msize
,
300 if (status
!= IB_WC_SUCCESS
)
303 err
= p9_parse_header(c
->rc
, NULL
, NULL
, &tag
, 1);
307 req
= p9_tag_lookup(client
, tag
);
311 /* Check that we have not yet received a reply for this request.
313 if (unlikely(req
->rc
)) {
314 pr_err("Duplicate reply for request %d", tag
);
319 p9_client_cb(client
, req
, REQ_STATUS_RCVD
);
324 p9_debug(P9_DEBUG_ERROR
, "req %p err %d status %d\n", req
, err
, status
);
325 rdma
->state
= P9_RDMA_FLUSHING
;
326 client
->status
= Disconnected
;
330 handle_send(struct p9_client
*client
, struct p9_trans_rdma
*rdma
,
331 struct p9_rdma_context
*c
, enum ib_wc_status status
, u32 byte_len
)
333 ib_dma_unmap_single(rdma
->cm_id
->device
,
334 c
->busa
, c
->req
->tc
->size
,
338 static void qp_event_handler(struct ib_event
*event
, void *context
)
340 p9_debug(P9_DEBUG_ERROR
, "QP event %d context %p\n",
341 event
->event
, context
);
344 static void cq_comp_handler(struct ib_cq
*cq
, void *cq_context
)
346 struct p9_client
*client
= cq_context
;
347 struct p9_trans_rdma
*rdma
= client
->trans
;
351 ib_req_notify_cq(rdma
->cq
, IB_CQ_NEXT_COMP
);
352 while ((ret
= ib_poll_cq(cq
, 1, &wc
)) > 0) {
353 struct p9_rdma_context
*c
= (void *) (unsigned long) wc
.wr_id
;
357 handle_recv(client
, rdma
, c
, wc
.status
, wc
.byte_len
);
362 handle_send(client
, rdma
, c
, wc
.status
, wc
.byte_len
);
367 pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
368 c
->wc_op
, wc
.opcode
, wc
.status
);
375 static void cq_event_handler(struct ib_event
*e
, void *v
)
377 p9_debug(P9_DEBUG_ERROR
, "CQ event %d context %p\n", e
->event
, v
);
380 static void rdma_destroy_trans(struct p9_trans_rdma
*rdma
)
385 if (rdma
->dma_mr
&& !IS_ERR(rdma
->dma_mr
))
386 ib_dereg_mr(rdma
->dma_mr
);
388 if (rdma
->qp
&& !IS_ERR(rdma
->qp
))
389 ib_destroy_qp(rdma
->qp
);
391 if (rdma
->pd
&& !IS_ERR(rdma
->pd
))
392 ib_dealloc_pd(rdma
->pd
);
394 if (rdma
->cq
&& !IS_ERR(rdma
->cq
))
395 ib_destroy_cq(rdma
->cq
);
397 if (rdma
->cm_id
&& !IS_ERR(rdma
->cm_id
))
398 rdma_destroy_id(rdma
->cm_id
);
404 post_recv(struct p9_client
*client
, struct p9_rdma_context
*c
)
406 struct p9_trans_rdma
*rdma
= client
->trans
;
407 struct ib_recv_wr wr
, *bad_wr
;
410 c
->busa
= ib_dma_map_single(rdma
->cm_id
->device
,
411 c
->rc
->sdata
, client
->msize
,
413 if (ib_dma_mapping_error(rdma
->cm_id
->device
, c
->busa
))
417 sge
.length
= client
->msize
;
418 sge
.lkey
= rdma
->lkey
;
421 c
->wc_op
= IB_WC_RECV
;
422 wr
.wr_id
= (unsigned long) c
;
425 return ib_post_recv(rdma
->qp
, &wr
, &bad_wr
);
428 p9_debug(P9_DEBUG_ERROR
, "EIO\n");
432 static int rdma_request(struct p9_client
*client
, struct p9_req_t
*req
)
434 struct p9_trans_rdma
*rdma
= client
->trans
;
435 struct ib_send_wr wr
, *bad_wr
;
439 struct p9_rdma_context
*c
= NULL
;
440 struct p9_rdma_context
*rpl_context
= NULL
;
442 /* When an error occurs between posting the recv and the send,
443 * there will be a receive context posted without a pending request.
444 * Since there is no way to "un-post" it, we remember it and skip
445 * post_recv() for the next request.
447 * see if we are this `next request' and need to absorb an excess rc.
448 * If yes, then drop and free our own, and do not recv_post().
450 if (unlikely(atomic_read(&rdma
->excess_rc
) > 0)) {
451 if ((atomic_sub_return(1, &rdma
->excess_rc
) >= 0)) {
455 goto dont_need_post_recv
;
457 /* We raced and lost. */
458 atomic_inc(&rdma
->excess_rc
);
462 /* Allocate an fcall for the reply */
463 rpl_context
= kmalloc(sizeof *rpl_context
, GFP_NOFS
);
468 rpl_context
->rc
= req
->rc
;
471 * Post a receive buffer for this request. We need to ensure
472 * there is a reply buffer available for every outstanding
473 * request. A flushed request can result in no reply for an
474 * outstanding request, so we must keep a count to avoid
475 * overflowing the RQ.
477 if (down_interruptible(&rdma
->rq_sem
)) {
482 err
= post_recv(client
, rpl_context
);
484 p9_debug(P9_DEBUG_FCALL
, "POST RECV failed\n");
487 /* remove posted receive buffer from request structure */
491 /* Post the request */
492 c
= kmalloc(sizeof *c
, GFP_NOFS
);
499 c
->busa
= ib_dma_map_single(rdma
->cm_id
->device
,
500 c
->req
->tc
->sdata
, c
->req
->tc
->size
,
502 if (ib_dma_mapping_error(rdma
->cm_id
->device
, c
->busa
)) {
508 sge
.length
= c
->req
->tc
->size
;
509 sge
.lkey
= rdma
->lkey
;
512 c
->wc_op
= IB_WC_SEND
;
513 wr
.wr_id
= (unsigned long) c
;
514 wr
.opcode
= IB_WR_SEND
;
515 wr
.send_flags
= IB_SEND_SIGNALED
;
519 if (down_interruptible(&rdma
->sq_sem
)) {
524 /* Mark request as `sent' *before* we actually send it,
525 * because doing if after could erase the REQ_STATUS_RCVD
526 * status in case of a very fast reply.
528 req
->status
= REQ_STATUS_SENT
;
529 err
= ib_post_send(rdma
->qp
, &wr
, &bad_wr
);
536 /* Handle errors that happened during or while preparing the send: */
538 req
->status
= REQ_STATUS_ERROR
;
540 p9_debug(P9_DEBUG_ERROR
, "Error %d in rdma_request()\n", err
);
543 * We did recv_post(), but not send. We have one recv_post in excess.
545 atomic_inc(&rdma
->excess_rc
);
548 /* Handle errors that happened during or while preparing post_recv(): */
551 spin_lock_irqsave(&rdma
->req_lock
, flags
);
552 if (rdma
->state
< P9_RDMA_CLOSING
) {
553 rdma
->state
= P9_RDMA_CLOSING
;
554 spin_unlock_irqrestore(&rdma
->req_lock
, flags
);
555 rdma_disconnect(rdma
->cm_id
);
557 spin_unlock_irqrestore(&rdma
->req_lock
, flags
);
561 static void rdma_close(struct p9_client
*client
)
563 struct p9_trans_rdma
*rdma
;
568 rdma
= client
->trans
;
572 client
->status
= Disconnected
;
573 rdma_disconnect(rdma
->cm_id
);
574 rdma_destroy_trans(rdma
);
578 * alloc_rdma - Allocate and initialize the rdma transport structure
579 * @opts: Mount options structure
581 static struct p9_trans_rdma
*alloc_rdma(struct p9_rdma_opts
*opts
)
583 struct p9_trans_rdma
*rdma
;
585 rdma
= kzalloc(sizeof(struct p9_trans_rdma
), GFP_KERNEL
);
589 rdma
->sq_depth
= opts
->sq_depth
;
590 rdma
->rq_depth
= opts
->rq_depth
;
591 rdma
->timeout
= opts
->timeout
;
592 spin_lock_init(&rdma
->req_lock
);
593 init_completion(&rdma
->cm_done
);
594 sema_init(&rdma
->sq_sem
, rdma
->sq_depth
);
595 sema_init(&rdma
->rq_sem
, rdma
->rq_depth
);
596 atomic_set(&rdma
->excess_rc
, 0);
601 static int rdma_cancel(struct p9_client
*client
, struct p9_req_t
*req
)
603 /* Nothing to do here.
604 * We will take care of it (if we have to) in rdma_cancelled()
609 /* A request has been fully flushed without a reply.
610 * That means we have posted one buffer in excess.
612 static int rdma_cancelled(struct p9_client
*client
, struct p9_req_t
*req
)
614 struct p9_trans_rdma
*rdma
= client
->trans
;
615 atomic_inc(&rdma
->excess_rc
);
619 static int p9_rdma_bind_privport(struct p9_trans_rdma
*rdma
)
621 struct sockaddr_in cl
= {
622 .sin_family
= AF_INET
,
623 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
625 int port
, err
= -EINVAL
;
627 for (port
= P9_DEF_MAX_RESVPORT
; port
>= P9_DEF_MIN_RESVPORT
; port
--) {
628 cl
.sin_port
= htons((ushort
)port
);
629 err
= rdma_bind_addr(rdma
->cm_id
, (struct sockaddr
*)&cl
);
630 if (err
!= -EADDRINUSE
)
637 * trans_create_rdma - Transport method for creating atransport instance
638 * @client: client instance
639 * @addr: IP address string
640 * @args: Mount options string
643 rdma_create_trans(struct p9_client
*client
, const char *addr
, char *args
)
646 struct p9_rdma_opts opts
;
647 struct p9_trans_rdma
*rdma
;
648 struct rdma_conn_param conn_param
;
649 struct ib_qp_init_attr qp_attr
;
650 struct ib_device_attr devattr
;
652 /* Parse the transport specific mount options */
653 err
= parse_opts(args
, &opts
);
657 /* Create and initialize the RDMA transport structure */
658 rdma
= alloc_rdma(&opts
);
662 /* Create the RDMA CM ID */
663 rdma
->cm_id
= rdma_create_id(p9_cm_event_handler
, client
, RDMA_PS_TCP
,
665 if (IS_ERR(rdma
->cm_id
))
668 /* Associate the client with the transport */
669 client
->trans
= rdma
;
671 /* Bind to a privileged port if we need to */
673 err
= p9_rdma_bind_privport(rdma
);
675 pr_err("%s (%d): problem binding to privport: %d\n",
676 __func__
, task_pid_nr(current
), -err
);
681 /* Resolve the server's address */
682 rdma
->addr
.sin_family
= AF_INET
;
683 rdma
->addr
.sin_addr
.s_addr
= in_aton(addr
);
684 rdma
->addr
.sin_port
= htons(opts
.port
);
685 err
= rdma_resolve_addr(rdma
->cm_id
, NULL
,
686 (struct sockaddr
*)&rdma
->addr
,
690 err
= wait_for_completion_interruptible(&rdma
->cm_done
);
691 if (err
|| (rdma
->state
!= P9_RDMA_ADDR_RESOLVED
))
694 /* Resolve the route to the server */
695 err
= rdma_resolve_route(rdma
->cm_id
, rdma
->timeout
);
698 err
= wait_for_completion_interruptible(&rdma
->cm_done
);
699 if (err
|| (rdma
->state
!= P9_RDMA_ROUTE_RESOLVED
))
702 /* Query the device attributes */
703 err
= ib_query_device(rdma
->cm_id
->device
, &devattr
);
707 /* Create the Completion Queue */
708 rdma
->cq
= ib_create_cq(rdma
->cm_id
->device
, cq_comp_handler
,
709 cq_event_handler
, client
,
710 opts
.sq_depth
+ opts
.rq_depth
+ 1, 0);
711 if (IS_ERR(rdma
->cq
))
713 ib_req_notify_cq(rdma
->cq
, IB_CQ_NEXT_COMP
);
715 /* Create the Protection Domain */
716 rdma
->pd
= ib_alloc_pd(rdma
->cm_id
->device
);
717 if (IS_ERR(rdma
->pd
))
720 /* Cache the DMA lkey in the transport */
722 if (devattr
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
)
723 rdma
->lkey
= rdma
->cm_id
->device
->local_dma_lkey
;
725 rdma
->dma_mr
= ib_get_dma_mr(rdma
->pd
, IB_ACCESS_LOCAL_WRITE
);
726 if (IS_ERR(rdma
->dma_mr
))
728 rdma
->lkey
= rdma
->dma_mr
->lkey
;
731 /* Create the Queue Pair */
732 memset(&qp_attr
, 0, sizeof qp_attr
);
733 qp_attr
.event_handler
= qp_event_handler
;
734 qp_attr
.qp_context
= client
;
735 qp_attr
.cap
.max_send_wr
= opts
.sq_depth
;
736 qp_attr
.cap
.max_recv_wr
= opts
.rq_depth
;
737 qp_attr
.cap
.max_send_sge
= P9_RDMA_SEND_SGE
;
738 qp_attr
.cap
.max_recv_sge
= P9_RDMA_RECV_SGE
;
739 qp_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
740 qp_attr
.qp_type
= IB_QPT_RC
;
741 qp_attr
.send_cq
= rdma
->cq
;
742 qp_attr
.recv_cq
= rdma
->cq
;
743 err
= rdma_create_qp(rdma
->cm_id
, rdma
->pd
, &qp_attr
);
746 rdma
->qp
= rdma
->cm_id
->qp
;
748 /* Request a connection */
749 memset(&conn_param
, 0, sizeof(conn_param
));
750 conn_param
.private_data
= NULL
;
751 conn_param
.private_data_len
= 0;
752 conn_param
.responder_resources
= P9_RDMA_IRD
;
753 conn_param
.initiator_depth
= P9_RDMA_ORD
;
754 err
= rdma_connect(rdma
->cm_id
, &conn_param
);
757 err
= wait_for_completion_interruptible(&rdma
->cm_done
);
758 if (err
|| (rdma
->state
!= P9_RDMA_CONNECTED
))
761 client
->status
= Connected
;
766 rdma_destroy_trans(rdma
);
770 static struct p9_trans_module p9_rdma_trans
= {
772 .maxsize
= P9_RDMA_MAXSIZE
,
774 .owner
= THIS_MODULE
,
775 .create
= rdma_create_trans
,
777 .request
= rdma_request
,
778 .cancel
= rdma_cancel
,
779 .cancelled
= rdma_cancelled
,
783 * p9_trans_rdma_init - Register the 9P RDMA transport driver
785 static int __init
p9_trans_rdma_init(void)
787 v9fs_register_trans(&p9_rdma_trans
);
791 static void __exit
p9_trans_rdma_exit(void)
793 v9fs_unregister_trans(&p9_rdma_trans
);
796 module_init(p9_trans_rdma_init
);
797 module_exit(p9_trans_rdma_exit
);
799 MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
800 MODULE_DESCRIPTION("RDMA Transport for 9P");
801 MODULE_LICENSE("Dual BSD/GPL");