2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/idr.h>
39 #include <linux/in6.h>
40 #include <linux/miscdevice.h>
42 #include <rdma/rdma_user_cm.h>
43 #include <rdma/ib_marshall.h>
44 #include <rdma/rdma_cm.h>
46 MODULE_AUTHOR("Sean Hefty");
47 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
48 MODULE_LICENSE("Dual BSD/GPL");
51 UCMA_MAX_BACKLOG
= 128
57 struct list_head ctx_list
;
58 struct list_head event_list
;
59 wait_queue_head_t poll_wait
;
64 struct completion comp
;
69 struct ucma_file
*file
;
70 struct rdma_cm_id
*cm_id
;
73 struct list_head list
;
74 struct list_head mc_list
;
77 struct ucma_multicast
{
78 struct ucma_context
*ctx
;
83 struct list_head list
;
85 u8 pad
[sizeof(struct sockaddr_in6
) -
86 sizeof(struct sockaddr
)];
90 struct ucma_context
*ctx
;
91 struct ucma_multicast
*mc
;
92 struct list_head list
;
93 struct rdma_cm_id
*cm_id
;
94 struct rdma_ucm_event_resp resp
;
97 static DEFINE_MUTEX(mut
);
98 static DEFINE_IDR(ctx_idr
);
99 static DEFINE_IDR(multicast_idr
);
101 static inline struct ucma_context
*_ucma_find_context(int id
,
102 struct ucma_file
*file
)
104 struct ucma_context
*ctx
;
106 ctx
= idr_find(&ctx_idr
, id
);
108 ctx
= ERR_PTR(-ENOENT
);
109 else if (ctx
->file
!= file
)
110 ctx
= ERR_PTR(-EINVAL
);
114 static struct ucma_context
*ucma_get_ctx(struct ucma_file
*file
, int id
)
116 struct ucma_context
*ctx
;
119 ctx
= _ucma_find_context(id
, file
);
121 atomic_inc(&ctx
->ref
);
126 static void ucma_put_ctx(struct ucma_context
*ctx
)
128 if (atomic_dec_and_test(&ctx
->ref
))
129 complete(&ctx
->comp
);
132 static struct ucma_context
*ucma_alloc_ctx(struct ucma_file
*file
)
134 struct ucma_context
*ctx
;
137 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
141 atomic_set(&ctx
->ref
, 1);
142 init_completion(&ctx
->comp
);
143 INIT_LIST_HEAD(&ctx
->mc_list
);
147 ret
= idr_pre_get(&ctx_idr
, GFP_KERNEL
);
152 ret
= idr_get_new(&ctx_idr
, ctx
, &ctx
->id
);
154 } while (ret
== -EAGAIN
);
159 list_add_tail(&ctx
->list
, &file
->ctx_list
);
167 static struct ucma_multicast
* ucma_alloc_multicast(struct ucma_context
*ctx
)
169 struct ucma_multicast
*mc
;
172 mc
= kzalloc(sizeof(*mc
), GFP_KERNEL
);
177 ret
= idr_pre_get(&multicast_idr
, GFP_KERNEL
);
182 ret
= idr_get_new(&multicast_idr
, mc
, &mc
->id
);
184 } while (ret
== -EAGAIN
);
190 list_add_tail(&mc
->list
, &ctx
->mc_list
);
198 static void ucma_copy_conn_event(struct rdma_ucm_conn_param
*dst
,
199 struct rdma_conn_param
*src
)
201 if (src
->private_data_len
)
202 memcpy(dst
->private_data
, src
->private_data
,
203 src
->private_data_len
);
204 dst
->private_data_len
= src
->private_data_len
;
205 dst
->responder_resources
=src
->responder_resources
;
206 dst
->initiator_depth
= src
->initiator_depth
;
207 dst
->flow_control
= src
->flow_control
;
208 dst
->retry_count
= src
->retry_count
;
209 dst
->rnr_retry_count
= src
->rnr_retry_count
;
211 dst
->qp_num
= src
->qp_num
;
214 static void ucma_copy_ud_event(struct rdma_ucm_ud_param
*dst
,
215 struct rdma_ud_param
*src
)
217 if (src
->private_data_len
)
218 memcpy(dst
->private_data
, src
->private_data
,
219 src
->private_data_len
);
220 dst
->private_data_len
= src
->private_data_len
;
221 ib_copy_ah_attr_to_user(&dst
->ah_attr
, &src
->ah_attr
);
222 dst
->qp_num
= src
->qp_num
;
223 dst
->qkey
= src
->qkey
;
226 static void ucma_set_event_context(struct ucma_context
*ctx
,
227 struct rdma_cm_event
*event
,
228 struct ucma_event
*uevent
)
231 switch (event
->event
) {
232 case RDMA_CM_EVENT_MULTICAST_JOIN
:
233 case RDMA_CM_EVENT_MULTICAST_ERROR
:
234 uevent
->mc
= (struct ucma_multicast
*)
235 event
->param
.ud
.private_data
;
236 uevent
->resp
.uid
= uevent
->mc
->uid
;
237 uevent
->resp
.id
= uevent
->mc
->id
;
240 uevent
->resp
.uid
= ctx
->uid
;
241 uevent
->resp
.id
= ctx
->id
;
246 static int ucma_event_handler(struct rdma_cm_id
*cm_id
,
247 struct rdma_cm_event
*event
)
249 struct ucma_event
*uevent
;
250 struct ucma_context
*ctx
= cm_id
->context
;
253 uevent
= kzalloc(sizeof(*uevent
), GFP_KERNEL
);
255 return event
->event
== RDMA_CM_EVENT_CONNECT_REQUEST
;
257 uevent
->cm_id
= cm_id
;
258 ucma_set_event_context(ctx
, event
, uevent
);
259 uevent
->resp
.event
= event
->event
;
260 uevent
->resp
.status
= event
->status
;
261 if (cm_id
->ps
== RDMA_PS_UDP
|| cm_id
->ps
== RDMA_PS_IPOIB
)
262 ucma_copy_ud_event(&uevent
->resp
.param
.ud
, &event
->param
.ud
);
264 ucma_copy_conn_event(&uevent
->resp
.param
.conn
,
267 mutex_lock(&ctx
->file
->mut
);
268 if (event
->event
== RDMA_CM_EVENT_CONNECT_REQUEST
) {
275 } else if (!ctx
->uid
) {
277 * We ignore events for new connections until userspace has set
278 * their context. This can only happen if an error occurs on a
279 * new connection before the user accepts it. This is okay,
280 * since the accept will just fail later.
286 list_add_tail(&uevent
->list
, &ctx
->file
->event_list
);
287 wake_up_interruptible(&ctx
->file
->poll_wait
);
289 mutex_unlock(&ctx
->file
->mut
);
293 static ssize_t
ucma_get_event(struct ucma_file
*file
, const char __user
*inbuf
,
294 int in_len
, int out_len
)
296 struct ucma_context
*ctx
;
297 struct rdma_ucm_get_event cmd
;
298 struct ucma_event
*uevent
;
302 if (out_len
< sizeof uevent
->resp
)
305 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
308 mutex_lock(&file
->mut
);
309 while (list_empty(&file
->event_list
)) {
310 mutex_unlock(&file
->mut
);
312 if (file
->filp
->f_flags
& O_NONBLOCK
)
315 if (wait_event_interruptible(file
->poll_wait
,
316 !list_empty(&file
->event_list
)))
319 mutex_lock(&file
->mut
);
322 uevent
= list_entry(file
->event_list
.next
, struct ucma_event
, list
);
324 if (uevent
->resp
.event
== RDMA_CM_EVENT_CONNECT_REQUEST
) {
325 ctx
= ucma_alloc_ctx(file
);
330 uevent
->ctx
->backlog
++;
331 ctx
->cm_id
= uevent
->cm_id
;
332 ctx
->cm_id
->context
= ctx
;
333 uevent
->resp
.id
= ctx
->id
;
336 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
337 &uevent
->resp
, sizeof uevent
->resp
)) {
342 list_del(&uevent
->list
);
343 uevent
->ctx
->events_reported
++;
345 uevent
->mc
->events_reported
++;
348 mutex_unlock(&file
->mut
);
352 static ssize_t
ucma_create_id(struct ucma_file
*file
,
353 const char __user
*inbuf
,
354 int in_len
, int out_len
)
356 struct rdma_ucm_create_id cmd
;
357 struct rdma_ucm_create_id_resp resp
;
358 struct ucma_context
*ctx
;
361 if (out_len
< sizeof(resp
))
364 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
367 mutex_lock(&file
->mut
);
368 ctx
= ucma_alloc_ctx(file
);
369 mutex_unlock(&file
->mut
);
374 ctx
->cm_id
= rdma_create_id(ucma_event_handler
, ctx
, cmd
.ps
);
375 if (IS_ERR(ctx
->cm_id
)) {
376 ret
= PTR_ERR(ctx
->cm_id
);
381 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
382 &resp
, sizeof(resp
))) {
389 rdma_destroy_id(ctx
->cm_id
);
392 idr_remove(&ctx_idr
, ctx
->id
);
398 static void ucma_cleanup_multicast(struct ucma_context
*ctx
)
400 struct ucma_multicast
*mc
, *tmp
;
403 list_for_each_entry_safe(mc
, tmp
, &ctx
->mc_list
, list
) {
405 idr_remove(&multicast_idr
, mc
->id
);
411 static void ucma_cleanup_events(struct ucma_context
*ctx
)
413 struct ucma_event
*uevent
, *tmp
;
415 list_for_each_entry_safe(uevent
, tmp
, &ctx
->file
->event_list
, list
) {
416 if (uevent
->ctx
!= ctx
)
419 list_del(&uevent
->list
);
421 /* clear incoming connections. */
422 if (uevent
->resp
.event
== RDMA_CM_EVENT_CONNECT_REQUEST
)
423 rdma_destroy_id(uevent
->cm_id
);
429 static void ucma_cleanup_mc_events(struct ucma_multicast
*mc
)
431 struct ucma_event
*uevent
, *tmp
;
433 list_for_each_entry_safe(uevent
, tmp
, &mc
->ctx
->file
->event_list
, list
) {
434 if (uevent
->mc
!= mc
)
437 list_del(&uevent
->list
);
442 static int ucma_free_ctx(struct ucma_context
*ctx
)
446 /* No new events will be generated after destroying the id. */
447 rdma_destroy_id(ctx
->cm_id
);
449 ucma_cleanup_multicast(ctx
);
451 /* Cleanup events not yet reported to the user. */
452 mutex_lock(&ctx
->file
->mut
);
453 ucma_cleanup_events(ctx
);
454 list_del(&ctx
->list
);
455 mutex_unlock(&ctx
->file
->mut
);
457 events_reported
= ctx
->events_reported
;
459 return events_reported
;
462 static ssize_t
ucma_destroy_id(struct ucma_file
*file
, const char __user
*inbuf
,
463 int in_len
, int out_len
)
465 struct rdma_ucm_destroy_id cmd
;
466 struct rdma_ucm_destroy_id_resp resp
;
467 struct ucma_context
*ctx
;
470 if (out_len
< sizeof(resp
))
473 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
477 ctx
= _ucma_find_context(cmd
.id
, file
);
479 idr_remove(&ctx_idr
, ctx
->id
);
486 wait_for_completion(&ctx
->comp
);
487 resp
.events_reported
= ucma_free_ctx(ctx
);
489 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
490 &resp
, sizeof(resp
)))
496 static ssize_t
ucma_bind_addr(struct ucma_file
*file
, const char __user
*inbuf
,
497 int in_len
, int out_len
)
499 struct rdma_ucm_bind_addr cmd
;
500 struct ucma_context
*ctx
;
503 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
506 ctx
= ucma_get_ctx(file
, cmd
.id
);
510 ret
= rdma_bind_addr(ctx
->cm_id
, (struct sockaddr
*) &cmd
.addr
);
515 static ssize_t
ucma_resolve_addr(struct ucma_file
*file
,
516 const char __user
*inbuf
,
517 int in_len
, int out_len
)
519 struct rdma_ucm_resolve_addr cmd
;
520 struct ucma_context
*ctx
;
523 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
526 ctx
= ucma_get_ctx(file
, cmd
.id
);
530 ret
= rdma_resolve_addr(ctx
->cm_id
, (struct sockaddr
*) &cmd
.src_addr
,
531 (struct sockaddr
*) &cmd
.dst_addr
,
537 static ssize_t
ucma_resolve_route(struct ucma_file
*file
,
538 const char __user
*inbuf
,
539 int in_len
, int out_len
)
541 struct rdma_ucm_resolve_route cmd
;
542 struct ucma_context
*ctx
;
545 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
548 ctx
= ucma_get_ctx(file
, cmd
.id
);
552 ret
= rdma_resolve_route(ctx
->cm_id
, cmd
.timeout_ms
);
557 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp
*resp
,
558 struct rdma_route
*route
)
560 struct rdma_dev_addr
*dev_addr
;
562 resp
->num_paths
= route
->num_paths
;
563 switch (route
->num_paths
) {
565 dev_addr
= &route
->addr
.dev_addr
;
566 ib_addr_get_dgid(dev_addr
,
567 (union ib_gid
*) &resp
->ib_route
[0].dgid
);
568 ib_addr_get_sgid(dev_addr
,
569 (union ib_gid
*) &resp
->ib_route
[0].sgid
);
570 resp
->ib_route
[0].pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
573 ib_copy_path_rec_to_user(&resp
->ib_route
[1],
574 &route
->path_rec
[1]);
577 ib_copy_path_rec_to_user(&resp
->ib_route
[0],
578 &route
->path_rec
[0]);
585 static ssize_t
ucma_query_route(struct ucma_file
*file
,
586 const char __user
*inbuf
,
587 int in_len
, int out_len
)
589 struct rdma_ucm_query_route cmd
;
590 struct rdma_ucm_query_route_resp resp
;
591 struct ucma_context
*ctx
;
592 struct sockaddr
*addr
;
595 if (out_len
< sizeof(resp
))
598 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
601 ctx
= ucma_get_ctx(file
, cmd
.id
);
605 memset(&resp
, 0, sizeof resp
);
606 addr
= &ctx
->cm_id
->route
.addr
.src_addr
;
607 memcpy(&resp
.src_addr
, addr
, addr
->sa_family
== AF_INET
?
608 sizeof(struct sockaddr_in
) :
609 sizeof(struct sockaddr_in6
));
610 addr
= &ctx
->cm_id
->route
.addr
.dst_addr
;
611 memcpy(&resp
.dst_addr
, addr
, addr
->sa_family
== AF_INET
?
612 sizeof(struct sockaddr_in
) :
613 sizeof(struct sockaddr_in6
));
614 if (!ctx
->cm_id
->device
)
617 resp
.node_guid
= (__force __u64
) ctx
->cm_id
->device
->node_guid
;
618 resp
.port_num
= ctx
->cm_id
->port_num
;
619 switch (rdma_node_get_transport(ctx
->cm_id
->device
->node_type
)) {
620 case RDMA_TRANSPORT_IB
:
621 ucma_copy_ib_route(&resp
, &ctx
->cm_id
->route
);
628 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
629 &resp
, sizeof(resp
)))
636 static void ucma_copy_conn_param(struct rdma_conn_param
*dst
,
637 struct rdma_ucm_conn_param
*src
)
639 dst
->private_data
= src
->private_data
;
640 dst
->private_data_len
= src
->private_data_len
;
641 dst
->responder_resources
=src
->responder_resources
;
642 dst
->initiator_depth
= src
->initiator_depth
;
643 dst
->flow_control
= src
->flow_control
;
644 dst
->retry_count
= src
->retry_count
;
645 dst
->rnr_retry_count
= src
->rnr_retry_count
;
647 dst
->qp_num
= src
->qp_num
;
650 static ssize_t
ucma_connect(struct ucma_file
*file
, const char __user
*inbuf
,
651 int in_len
, int out_len
)
653 struct rdma_ucm_connect cmd
;
654 struct rdma_conn_param conn_param
;
655 struct ucma_context
*ctx
;
658 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
661 if (!cmd
.conn_param
.valid
)
664 ctx
= ucma_get_ctx(file
, cmd
.id
);
668 ucma_copy_conn_param(&conn_param
, &cmd
.conn_param
);
669 ret
= rdma_connect(ctx
->cm_id
, &conn_param
);
674 static ssize_t
ucma_listen(struct ucma_file
*file
, const char __user
*inbuf
,
675 int in_len
, int out_len
)
677 struct rdma_ucm_listen cmd
;
678 struct ucma_context
*ctx
;
681 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
684 ctx
= ucma_get_ctx(file
, cmd
.id
);
688 ctx
->backlog
= cmd
.backlog
> 0 && cmd
.backlog
< UCMA_MAX_BACKLOG
?
689 cmd
.backlog
: UCMA_MAX_BACKLOG
;
690 ret
= rdma_listen(ctx
->cm_id
, ctx
->backlog
);
695 static ssize_t
ucma_accept(struct ucma_file
*file
, const char __user
*inbuf
,
696 int in_len
, int out_len
)
698 struct rdma_ucm_accept cmd
;
699 struct rdma_conn_param conn_param
;
700 struct ucma_context
*ctx
;
703 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
706 ctx
= ucma_get_ctx(file
, cmd
.id
);
710 if (cmd
.conn_param
.valid
) {
712 ucma_copy_conn_param(&conn_param
, &cmd
.conn_param
);
713 ret
= rdma_accept(ctx
->cm_id
, &conn_param
);
715 ret
= rdma_accept(ctx
->cm_id
, NULL
);
721 static ssize_t
ucma_reject(struct ucma_file
*file
, const char __user
*inbuf
,
722 int in_len
, int out_len
)
724 struct rdma_ucm_reject cmd
;
725 struct ucma_context
*ctx
;
728 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
731 ctx
= ucma_get_ctx(file
, cmd
.id
);
735 ret
= rdma_reject(ctx
->cm_id
, cmd
.private_data
, cmd
.private_data_len
);
740 static ssize_t
ucma_disconnect(struct ucma_file
*file
, const char __user
*inbuf
,
741 int in_len
, int out_len
)
743 struct rdma_ucm_disconnect cmd
;
744 struct ucma_context
*ctx
;
747 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
750 ctx
= ucma_get_ctx(file
, cmd
.id
);
754 ret
= rdma_disconnect(ctx
->cm_id
);
759 static ssize_t
ucma_init_qp_attr(struct ucma_file
*file
,
760 const char __user
*inbuf
,
761 int in_len
, int out_len
)
763 struct rdma_ucm_init_qp_attr cmd
;
764 struct ib_uverbs_qp_attr resp
;
765 struct ucma_context
*ctx
;
766 struct ib_qp_attr qp_attr
;
769 if (out_len
< sizeof(resp
))
772 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
775 ctx
= ucma_get_ctx(file
, cmd
.id
);
779 resp
.qp_attr_mask
= 0;
780 memset(&qp_attr
, 0, sizeof qp_attr
);
781 qp_attr
.qp_state
= cmd
.qp_state
;
782 ret
= rdma_init_qp_attr(ctx
->cm_id
, &qp_attr
, &resp
.qp_attr_mask
);
786 ib_copy_qp_attr_to_user(&resp
, &qp_attr
);
787 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
788 &resp
, sizeof(resp
)))
796 static int ucma_set_option_id(struct ucma_context
*ctx
, int optname
,
797 void *optval
, size_t optlen
)
802 case RDMA_OPTION_ID_TOS
:
803 if (optlen
!= sizeof(u8
)) {
807 rdma_set_service_type(ctx
->cm_id
, *((u8
*) optval
));
816 static int ucma_set_option_level(struct ucma_context
*ctx
, int level
,
817 int optname
, void *optval
, size_t optlen
)
823 ret
= ucma_set_option_id(ctx
, optname
, optval
, optlen
);
832 static ssize_t
ucma_set_option(struct ucma_file
*file
, const char __user
*inbuf
,
833 int in_len
, int out_len
)
835 struct rdma_ucm_set_option cmd
;
836 struct ucma_context
*ctx
;
840 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
843 ctx
= ucma_get_ctx(file
, cmd
.id
);
847 optval
= kmalloc(cmd
.optlen
, GFP_KERNEL
);
853 if (copy_from_user(optval
, (void __user
*) (unsigned long) cmd
.optval
,
859 ret
= ucma_set_option_level(ctx
, cmd
.level
, cmd
.optname
, optval
,
868 static ssize_t
ucma_notify(struct ucma_file
*file
, const char __user
*inbuf
,
869 int in_len
, int out_len
)
871 struct rdma_ucm_notify cmd
;
872 struct ucma_context
*ctx
;
875 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
878 ctx
= ucma_get_ctx(file
, cmd
.id
);
882 ret
= rdma_notify(ctx
->cm_id
, (enum ib_event_type
) cmd
.event
);
887 static ssize_t
ucma_join_multicast(struct ucma_file
*file
,
888 const char __user
*inbuf
,
889 int in_len
, int out_len
)
891 struct rdma_ucm_join_mcast cmd
;
892 struct rdma_ucm_create_id_resp resp
;
893 struct ucma_context
*ctx
;
894 struct ucma_multicast
*mc
;
897 if (out_len
< sizeof(resp
))
900 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
903 ctx
= ucma_get_ctx(file
, cmd
.id
);
907 mutex_lock(&file
->mut
);
908 mc
= ucma_alloc_multicast(ctx
);
915 memcpy(&mc
->addr
, &cmd
.addr
, sizeof cmd
.addr
);
916 ret
= rdma_join_multicast(ctx
->cm_id
, &mc
->addr
, mc
);
921 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
922 &resp
, sizeof(resp
))) {
927 mutex_unlock(&file
->mut
);
932 rdma_leave_multicast(ctx
->cm_id
, &mc
->addr
);
933 ucma_cleanup_mc_events(mc
);
936 idr_remove(&multicast_idr
, mc
->id
);
941 mutex_unlock(&file
->mut
);
946 static ssize_t
ucma_leave_multicast(struct ucma_file
*file
,
947 const char __user
*inbuf
,
948 int in_len
, int out_len
)
950 struct rdma_ucm_destroy_id cmd
;
951 struct rdma_ucm_destroy_id_resp resp
;
952 struct ucma_multicast
*mc
;
955 if (out_len
< sizeof(resp
))
958 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
962 mc
= idr_find(&multicast_idr
, cmd
.id
);
964 mc
= ERR_PTR(-ENOENT
);
965 else if (mc
->ctx
->file
!= file
)
966 mc
= ERR_PTR(-EINVAL
);
968 idr_remove(&multicast_idr
, mc
->id
);
969 atomic_inc(&mc
->ctx
->ref
);
978 rdma_leave_multicast(mc
->ctx
->cm_id
, &mc
->addr
);
979 mutex_lock(&mc
->ctx
->file
->mut
);
980 ucma_cleanup_mc_events(mc
);
982 mutex_unlock(&mc
->ctx
->file
->mut
);
984 ucma_put_ctx(mc
->ctx
);
985 resp
.events_reported
= mc
->events_reported
;
988 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
989 &resp
, sizeof(resp
)))
995 static void ucma_lock_files(struct ucma_file
*file1
, struct ucma_file
*file2
)
997 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
999 mutex_lock(&file1
->mut
);
1000 mutex_lock(&file2
->mut
);
1002 mutex_lock(&file2
->mut
);
1003 mutex_lock(&file1
->mut
);
1007 static void ucma_unlock_files(struct ucma_file
*file1
, struct ucma_file
*file2
)
1009 if (file1
< file2
) {
1010 mutex_unlock(&file2
->mut
);
1011 mutex_unlock(&file1
->mut
);
1013 mutex_unlock(&file1
->mut
);
1014 mutex_unlock(&file2
->mut
);
1018 static void ucma_move_events(struct ucma_context
*ctx
, struct ucma_file
*file
)
1020 struct ucma_event
*uevent
, *tmp
;
1022 list_for_each_entry_safe(uevent
, tmp
, &ctx
->file
->event_list
, list
)
1023 if (uevent
->ctx
== ctx
)
1024 list_move_tail(&uevent
->list
, &file
->event_list
);
1027 static ssize_t
ucma_migrate_id(struct ucma_file
*new_file
,
1028 const char __user
*inbuf
,
1029 int in_len
, int out_len
)
1031 struct rdma_ucm_migrate_id cmd
;
1032 struct rdma_ucm_migrate_resp resp
;
1033 struct ucma_context
*ctx
;
1035 struct ucma_file
*cur_file
;
1038 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1041 /* Get current fd to protect against it being closed */
1042 filp
= fget(cmd
.fd
);
1046 /* Validate current fd and prevent destruction of id. */
1047 ctx
= ucma_get_ctx(filp
->private_data
, cmd
.id
);
1053 cur_file
= ctx
->file
;
1054 if (cur_file
== new_file
) {
1055 resp
.events_reported
= ctx
->events_reported
;
1060 * Migrate events between fd's, maintaining order, and avoiding new
1061 * events being added before existing events.
1063 ucma_lock_files(cur_file
, new_file
);
1066 list_move_tail(&ctx
->list
, &new_file
->ctx_list
);
1067 ucma_move_events(ctx
, new_file
);
1068 ctx
->file
= new_file
;
1069 resp
.events_reported
= ctx
->events_reported
;
1072 ucma_unlock_files(cur_file
, new_file
);
1075 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
1076 &resp
, sizeof(resp
)))
1085 static ssize_t (*ucma_cmd_table
[])(struct ucma_file
*file
,
1086 const char __user
*inbuf
,
1087 int in_len
, int out_len
) = {
1088 [RDMA_USER_CM_CMD_CREATE_ID
] = ucma_create_id
,
1089 [RDMA_USER_CM_CMD_DESTROY_ID
] = ucma_destroy_id
,
1090 [RDMA_USER_CM_CMD_BIND_ADDR
] = ucma_bind_addr
,
1091 [RDMA_USER_CM_CMD_RESOLVE_ADDR
] = ucma_resolve_addr
,
1092 [RDMA_USER_CM_CMD_RESOLVE_ROUTE
]= ucma_resolve_route
,
1093 [RDMA_USER_CM_CMD_QUERY_ROUTE
] = ucma_query_route
,
1094 [RDMA_USER_CM_CMD_CONNECT
] = ucma_connect
,
1095 [RDMA_USER_CM_CMD_LISTEN
] = ucma_listen
,
1096 [RDMA_USER_CM_CMD_ACCEPT
] = ucma_accept
,
1097 [RDMA_USER_CM_CMD_REJECT
] = ucma_reject
,
1098 [RDMA_USER_CM_CMD_DISCONNECT
] = ucma_disconnect
,
1099 [RDMA_USER_CM_CMD_INIT_QP_ATTR
] = ucma_init_qp_attr
,
1100 [RDMA_USER_CM_CMD_GET_EVENT
] = ucma_get_event
,
1101 [RDMA_USER_CM_CMD_GET_OPTION
] = NULL
,
1102 [RDMA_USER_CM_CMD_SET_OPTION
] = ucma_set_option
,
1103 [RDMA_USER_CM_CMD_NOTIFY
] = ucma_notify
,
1104 [RDMA_USER_CM_CMD_JOIN_MCAST
] = ucma_join_multicast
,
1105 [RDMA_USER_CM_CMD_LEAVE_MCAST
] = ucma_leave_multicast
,
1106 [RDMA_USER_CM_CMD_MIGRATE_ID
] = ucma_migrate_id
1109 static ssize_t
ucma_write(struct file
*filp
, const char __user
*buf
,
1110 size_t len
, loff_t
*pos
)
1112 struct ucma_file
*file
= filp
->private_data
;
1113 struct rdma_ucm_cmd_hdr hdr
;
1116 if (len
< sizeof(hdr
))
1119 if (copy_from_user(&hdr
, buf
, sizeof(hdr
)))
1122 if (hdr
.cmd
< 0 || hdr
.cmd
>= ARRAY_SIZE(ucma_cmd_table
))
1125 if (hdr
.in
+ sizeof(hdr
) > len
)
1128 if (!ucma_cmd_table
[hdr
.cmd
])
1131 ret
= ucma_cmd_table
[hdr
.cmd
](file
, buf
+ sizeof(hdr
), hdr
.in
, hdr
.out
);
1138 static unsigned int ucma_poll(struct file
*filp
, struct poll_table_struct
*wait
)
1140 struct ucma_file
*file
= filp
->private_data
;
1141 unsigned int mask
= 0;
1143 poll_wait(filp
, &file
->poll_wait
, wait
);
1145 if (!list_empty(&file
->event_list
))
1146 mask
= POLLIN
| POLLRDNORM
;
1151 static int ucma_open(struct inode
*inode
, struct file
*filp
)
1153 struct ucma_file
*file
;
1155 file
= kmalloc(sizeof *file
, GFP_KERNEL
);
1159 INIT_LIST_HEAD(&file
->event_list
);
1160 INIT_LIST_HEAD(&file
->ctx_list
);
1161 init_waitqueue_head(&file
->poll_wait
);
1162 mutex_init(&file
->mut
);
1164 filp
->private_data
= file
;
1169 static int ucma_close(struct inode
*inode
, struct file
*filp
)
1171 struct ucma_file
*file
= filp
->private_data
;
1172 struct ucma_context
*ctx
, *tmp
;
1174 mutex_lock(&file
->mut
);
1175 list_for_each_entry_safe(ctx
, tmp
, &file
->ctx_list
, list
) {
1176 mutex_unlock(&file
->mut
);
1179 idr_remove(&ctx_idr
, ctx
->id
);
1183 mutex_lock(&file
->mut
);
1185 mutex_unlock(&file
->mut
);
1190 static const struct file_operations ucma_fops
= {
1191 .owner
= THIS_MODULE
,
1193 .release
= ucma_close
,
1194 .write
= ucma_write
,
1198 static struct miscdevice ucma_misc
= {
1199 .minor
= MISC_DYNAMIC_MINOR
,
1204 static ssize_t
show_abi_version(struct device
*dev
,
1205 struct device_attribute
*attr
,
1208 return sprintf(buf
, "%d\n", RDMA_USER_CM_ABI_VERSION
);
1210 static DEVICE_ATTR(abi_version
, S_IRUGO
, show_abi_version
, NULL
);
1212 static int __init
ucma_init(void)
1216 ret
= misc_register(&ucma_misc
);
1220 ret
= device_create_file(ucma_misc
.this_device
, &dev_attr_abi_version
);
1222 printk(KERN_ERR
"rdma_ucm: couldn't create abi_version attr\n");
1227 misc_deregister(&ucma_misc
);
1231 static void __exit
ucma_cleanup(void)
1233 device_remove_file(ucma_misc
.this_device
, &dev_attr_abi_version
);
1234 misc_deregister(&ucma_misc
);
1235 idr_destroy(&ctx_idr
);
1238 module_init(ucma_init
);
1239 module_exit(ucma_cleanup
);