2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 #include <linux/nsproxy.h>
47 #include <rdma/rdma_user_cm.h>
48 #include <rdma/ib_marshall.h>
49 #include <rdma/rdma_cm.h>
50 #include <rdma/rdma_cm_ib.h>
51 #include <rdma/ib_addr.h>
54 MODULE_AUTHOR("Sean Hefty");
55 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
56 MODULE_LICENSE("Dual BSD/GPL");
58 static unsigned int max_backlog
= 1024;
60 static struct ctl_table_header
*ucma_ctl_table_hdr
;
61 static struct ctl_table ucma_ctl_table
[] = {
63 .procname
= "max_backlog",
65 .maxlen
= sizeof max_backlog
,
67 .proc_handler
= proc_dointvec
,
75 struct list_head ctx_list
;
76 struct list_head event_list
;
77 wait_queue_head_t poll_wait
;
78 struct workqueue_struct
*close_wq
;
83 struct completion comp
;
88 struct ucma_file
*file
;
89 struct rdma_cm_id
*cm_id
;
92 struct list_head list
;
93 struct list_head mc_list
;
94 /* mark that device is in process of destroying the internal HW
95 * resources, protected by the global mut
98 /* sync between removal event and id destroy, protected by file mut */
100 struct work_struct close_work
;
103 struct ucma_multicast
{
104 struct ucma_context
*ctx
;
110 struct list_head list
;
111 struct sockaddr_storage addr
;
115 struct ucma_context
*ctx
;
116 struct ucma_multicast
*mc
;
117 struct list_head list
;
118 struct rdma_cm_id
*cm_id
;
119 struct rdma_ucm_event_resp resp
;
120 struct work_struct close_work
;
123 static DEFINE_MUTEX(mut
);
124 static DEFINE_IDR(ctx_idr
);
125 static DEFINE_IDR(multicast_idr
);
127 static inline struct ucma_context
*_ucma_find_context(int id
,
128 struct ucma_file
*file
)
130 struct ucma_context
*ctx
;
132 ctx
= idr_find(&ctx_idr
, id
);
134 ctx
= ERR_PTR(-ENOENT
);
135 else if (ctx
->file
!= file
|| !ctx
->cm_id
)
136 ctx
= ERR_PTR(-EINVAL
);
140 static struct ucma_context
*ucma_get_ctx(struct ucma_file
*file
, int id
)
142 struct ucma_context
*ctx
;
145 ctx
= _ucma_find_context(id
, file
);
150 atomic_inc(&ctx
->ref
);
156 static void ucma_put_ctx(struct ucma_context
*ctx
)
158 if (atomic_dec_and_test(&ctx
->ref
))
159 complete(&ctx
->comp
);
163 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
166 static struct ucma_context
*ucma_get_ctx_dev(struct ucma_file
*file
, int id
)
168 struct ucma_context
*ctx
= ucma_get_ctx(file
, id
);
172 if (!ctx
->cm_id
->device
) {
174 return ERR_PTR(-EINVAL
);
179 static void ucma_close_event_id(struct work_struct
*work
)
181 struct ucma_event
*uevent_close
= container_of(work
, struct ucma_event
, close_work
);
183 rdma_destroy_id(uevent_close
->cm_id
);
187 static void ucma_close_id(struct work_struct
*work
)
189 struct ucma_context
*ctx
= container_of(work
, struct ucma_context
, close_work
);
191 /* once all inflight tasks are finished, we close all underlying
192 * resources. The context is still alive till its explicit destryoing
196 wait_for_completion(&ctx
->comp
);
197 /* No new events will be generated after destroying the id. */
198 rdma_destroy_id(ctx
->cm_id
);
201 static struct ucma_context
*ucma_alloc_ctx(struct ucma_file
*file
)
203 struct ucma_context
*ctx
;
205 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
209 INIT_WORK(&ctx
->close_work
, ucma_close_id
);
210 atomic_set(&ctx
->ref
, 1);
211 init_completion(&ctx
->comp
);
212 INIT_LIST_HEAD(&ctx
->mc_list
);
216 ctx
->id
= idr_alloc(&ctx_idr
, ctx
, 0, 0, GFP_KERNEL
);
221 list_add_tail(&ctx
->list
, &file
->ctx_list
);
229 static struct ucma_multicast
* ucma_alloc_multicast(struct ucma_context
*ctx
)
231 struct ucma_multicast
*mc
;
233 mc
= kzalloc(sizeof(*mc
), GFP_KERNEL
);
238 mc
->id
= idr_alloc(&multicast_idr
, NULL
, 0, 0, GFP_KERNEL
);
244 list_add_tail(&mc
->list
, &ctx
->mc_list
);
252 static void ucma_copy_conn_event(struct rdma_ucm_conn_param
*dst
,
253 struct rdma_conn_param
*src
)
255 if (src
->private_data_len
)
256 memcpy(dst
->private_data
, src
->private_data
,
257 src
->private_data_len
);
258 dst
->private_data_len
= src
->private_data_len
;
259 dst
->responder_resources
=src
->responder_resources
;
260 dst
->initiator_depth
= src
->initiator_depth
;
261 dst
->flow_control
= src
->flow_control
;
262 dst
->retry_count
= src
->retry_count
;
263 dst
->rnr_retry_count
= src
->rnr_retry_count
;
265 dst
->qp_num
= src
->qp_num
;
268 static void ucma_copy_ud_event(struct ib_device
*device
,
269 struct rdma_ucm_ud_param
*dst
,
270 struct rdma_ud_param
*src
)
272 if (src
->private_data_len
)
273 memcpy(dst
->private_data
, src
->private_data
,
274 src
->private_data_len
);
275 dst
->private_data_len
= src
->private_data_len
;
276 ib_copy_ah_attr_to_user(device
, &dst
->ah_attr
, &src
->ah_attr
);
277 dst
->qp_num
= src
->qp_num
;
278 dst
->qkey
= src
->qkey
;
281 static void ucma_set_event_context(struct ucma_context
*ctx
,
282 struct rdma_cm_event
*event
,
283 struct ucma_event
*uevent
)
286 switch (event
->event
) {
287 case RDMA_CM_EVENT_MULTICAST_JOIN
:
288 case RDMA_CM_EVENT_MULTICAST_ERROR
:
289 uevent
->mc
= (struct ucma_multicast
*)
290 event
->param
.ud
.private_data
;
291 uevent
->resp
.uid
= uevent
->mc
->uid
;
292 uevent
->resp
.id
= uevent
->mc
->id
;
295 uevent
->resp
.uid
= ctx
->uid
;
296 uevent
->resp
.id
= ctx
->id
;
301 /* Called with file->mut locked for the relevant context. */
302 static void ucma_removal_event_handler(struct rdma_cm_id
*cm_id
)
304 struct ucma_context
*ctx
= cm_id
->context
;
305 struct ucma_event
*con_req_eve
;
311 /* only if context is pointing to cm_id that it owns it and can be
312 * queued to be closed, otherwise that cm_id is an inflight one that
313 * is part of that context event list pending to be detached and
314 * reattached to its new context as part of ucma_get_event,
315 * handled separately below.
317 if (ctx
->cm_id
== cm_id
) {
321 queue_work(ctx
->file
->close_wq
, &ctx
->close_work
);
325 list_for_each_entry(con_req_eve
, &ctx
->file
->event_list
, list
) {
326 if (con_req_eve
->cm_id
== cm_id
&&
327 con_req_eve
->resp
.event
== RDMA_CM_EVENT_CONNECT_REQUEST
) {
328 list_del(&con_req_eve
->list
);
329 INIT_WORK(&con_req_eve
->close_work
, ucma_close_event_id
);
330 queue_work(ctx
->file
->close_wq
, &con_req_eve
->close_work
);
336 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
339 static int ucma_event_handler(struct rdma_cm_id
*cm_id
,
340 struct rdma_cm_event
*event
)
342 struct ucma_event
*uevent
;
343 struct ucma_context
*ctx
= cm_id
->context
;
346 uevent
= kzalloc(sizeof(*uevent
), GFP_KERNEL
);
348 return event
->event
== RDMA_CM_EVENT_CONNECT_REQUEST
;
350 mutex_lock(&ctx
->file
->mut
);
351 uevent
->cm_id
= cm_id
;
352 ucma_set_event_context(ctx
, event
, uevent
);
353 uevent
->resp
.event
= event
->event
;
354 uevent
->resp
.status
= event
->status
;
355 if (cm_id
->qp_type
== IB_QPT_UD
)
356 ucma_copy_ud_event(cm_id
->device
, &uevent
->resp
.param
.ud
,
359 ucma_copy_conn_event(&uevent
->resp
.param
.conn
,
362 if (event
->event
== RDMA_CM_EVENT_CONNECT_REQUEST
) {
369 } else if (!ctx
->uid
|| ctx
->cm_id
!= cm_id
) {
371 * We ignore events for new connections until userspace has set
372 * their context. This can only happen if an error occurs on a
373 * new connection before the user accepts it. This is okay,
374 * since the accept will just fail later. However, we do need
375 * to release the underlying HW resources in case of a device
378 if (event
->event
== RDMA_CM_EVENT_DEVICE_REMOVAL
)
379 ucma_removal_event_handler(cm_id
);
385 list_add_tail(&uevent
->list
, &ctx
->file
->event_list
);
386 wake_up_interruptible(&ctx
->file
->poll_wait
);
387 if (event
->event
== RDMA_CM_EVENT_DEVICE_REMOVAL
)
388 ucma_removal_event_handler(cm_id
);
390 mutex_unlock(&ctx
->file
->mut
);
394 static ssize_t
ucma_get_event(struct ucma_file
*file
, const char __user
*inbuf
,
395 int in_len
, int out_len
)
397 struct ucma_context
*ctx
;
398 struct rdma_ucm_get_event cmd
;
399 struct ucma_event
*uevent
;
403 * Old 32 bit user space does not send the 4 byte padding in the
404 * reserved field. We don't care, allow it to keep working.
406 if (out_len
< sizeof(uevent
->resp
) - sizeof(uevent
->resp
.reserved
))
409 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
412 mutex_lock(&file
->mut
);
413 while (list_empty(&file
->event_list
)) {
414 mutex_unlock(&file
->mut
);
416 if (file
->filp
->f_flags
& O_NONBLOCK
)
419 if (wait_event_interruptible(file
->poll_wait
,
420 !list_empty(&file
->event_list
)))
423 mutex_lock(&file
->mut
);
426 uevent
= list_entry(file
->event_list
.next
, struct ucma_event
, list
);
428 if (uevent
->resp
.event
== RDMA_CM_EVENT_CONNECT_REQUEST
) {
429 ctx
= ucma_alloc_ctx(file
);
434 uevent
->ctx
->backlog
++;
435 ctx
->cm_id
= uevent
->cm_id
;
436 ctx
->cm_id
->context
= ctx
;
437 uevent
->resp
.id
= ctx
->id
;
440 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
442 min_t(size_t, out_len
, sizeof(uevent
->resp
)))) {
447 list_del(&uevent
->list
);
448 uevent
->ctx
->events_reported
++;
450 uevent
->mc
->events_reported
++;
453 mutex_unlock(&file
->mut
);
457 static int ucma_get_qp_type(struct rdma_ucm_create_id
*cmd
, enum ib_qp_type
*qp_type
)
461 *qp_type
= IB_QPT_RC
;
465 *qp_type
= IB_QPT_UD
;
468 *qp_type
= cmd
->qp_type
;
475 static ssize_t
ucma_create_id(struct ucma_file
*file
, const char __user
*inbuf
,
476 int in_len
, int out_len
)
478 struct rdma_ucm_create_id cmd
;
479 struct rdma_ucm_create_id_resp resp
;
480 struct ucma_context
*ctx
;
481 struct rdma_cm_id
*cm_id
;
482 enum ib_qp_type qp_type
;
485 if (out_len
< sizeof(resp
))
488 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
491 ret
= ucma_get_qp_type(&cmd
, &qp_type
);
495 mutex_lock(&file
->mut
);
496 ctx
= ucma_alloc_ctx(file
);
497 mutex_unlock(&file
->mut
);
502 cm_id
= __rdma_create_id(current
->nsproxy
->net_ns
,
503 ucma_event_handler
, ctx
, cmd
.ps
, qp_type
, NULL
);
505 ret
= PTR_ERR(cm_id
);
510 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
511 &resp
, sizeof(resp
))) {
520 rdma_destroy_id(cm_id
);
523 idr_remove(&ctx_idr
, ctx
->id
);
525 mutex_lock(&file
->mut
);
526 list_del(&ctx
->list
);
527 mutex_unlock(&file
->mut
);
532 static void ucma_cleanup_multicast(struct ucma_context
*ctx
)
534 struct ucma_multicast
*mc
, *tmp
;
537 list_for_each_entry_safe(mc
, tmp
, &ctx
->mc_list
, list
) {
539 idr_remove(&multicast_idr
, mc
->id
);
545 static void ucma_cleanup_mc_events(struct ucma_multicast
*mc
)
547 struct ucma_event
*uevent
, *tmp
;
549 list_for_each_entry_safe(uevent
, tmp
, &mc
->ctx
->file
->event_list
, list
) {
550 if (uevent
->mc
!= mc
)
553 list_del(&uevent
->list
);
559 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
560 * this point, no new events will be reported from the hardware. However, we
561 * still need to cleanup the UCMA context for this ID. Specifically, there
562 * might be events that have not yet been consumed by the user space software.
563 * These might include pending connect requests which we have not completed
564 * processing. We cannot call rdma_destroy_id while holding the lock of the
565 * context (file->mut), as it might cause a deadlock. We therefore extract all
566 * relevant events from the context pending events list while holding the
567 * mutex. After that we release them as needed.
569 static int ucma_free_ctx(struct ucma_context
*ctx
)
572 struct ucma_event
*uevent
, *tmp
;
576 ucma_cleanup_multicast(ctx
);
578 /* Cleanup events not yet reported to the user. */
579 mutex_lock(&ctx
->file
->mut
);
580 list_for_each_entry_safe(uevent
, tmp
, &ctx
->file
->event_list
, list
) {
581 if (uevent
->ctx
== ctx
)
582 list_move_tail(&uevent
->list
, &list
);
584 list_del(&ctx
->list
);
585 mutex_unlock(&ctx
->file
->mut
);
587 list_for_each_entry_safe(uevent
, tmp
, &list
, list
) {
588 list_del(&uevent
->list
);
589 if (uevent
->resp
.event
== RDMA_CM_EVENT_CONNECT_REQUEST
)
590 rdma_destroy_id(uevent
->cm_id
);
594 events_reported
= ctx
->events_reported
;
596 return events_reported
;
599 static ssize_t
ucma_destroy_id(struct ucma_file
*file
, const char __user
*inbuf
,
600 int in_len
, int out_len
)
602 struct rdma_ucm_destroy_id cmd
;
603 struct rdma_ucm_destroy_id_resp resp
;
604 struct ucma_context
*ctx
;
607 if (out_len
< sizeof(resp
))
610 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
614 ctx
= _ucma_find_context(cmd
.id
, file
);
616 idr_remove(&ctx_idr
, ctx
->id
);
622 mutex_lock(&ctx
->file
->mut
);
624 mutex_unlock(&ctx
->file
->mut
);
626 flush_workqueue(ctx
->file
->close_wq
);
627 /* At this point it's guaranteed that there is no inflight
633 wait_for_completion(&ctx
->comp
);
634 rdma_destroy_id(ctx
->cm_id
);
639 resp
.events_reported
= ucma_free_ctx(ctx
);
640 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
641 &resp
, sizeof(resp
)))
647 static ssize_t
ucma_bind_ip(struct ucma_file
*file
, const char __user
*inbuf
,
648 int in_len
, int out_len
)
650 struct rdma_ucm_bind_ip cmd
;
651 struct ucma_context
*ctx
;
654 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
657 if (!rdma_addr_size_in6(&cmd
.addr
))
660 ctx
= ucma_get_ctx(file
, cmd
.id
);
664 ret
= rdma_bind_addr(ctx
->cm_id
, (struct sockaddr
*) &cmd
.addr
);
669 static ssize_t
ucma_bind(struct ucma_file
*file
, const char __user
*inbuf
,
670 int in_len
, int out_len
)
672 struct rdma_ucm_bind cmd
;
673 struct ucma_context
*ctx
;
676 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
679 if (cmd
.reserved
|| !cmd
.addr_size
||
680 cmd
.addr_size
!= rdma_addr_size_kss(&cmd
.addr
))
683 ctx
= ucma_get_ctx(file
, cmd
.id
);
687 ret
= rdma_bind_addr(ctx
->cm_id
, (struct sockaddr
*) &cmd
.addr
);
692 static ssize_t
ucma_resolve_ip(struct ucma_file
*file
,
693 const char __user
*inbuf
,
694 int in_len
, int out_len
)
696 struct rdma_ucm_resolve_ip cmd
;
697 struct ucma_context
*ctx
;
700 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
703 if ((cmd
.src_addr
.sin6_family
&& !rdma_addr_size_in6(&cmd
.src_addr
)) ||
704 !rdma_addr_size_in6(&cmd
.dst_addr
))
707 ctx
= ucma_get_ctx(file
, cmd
.id
);
711 ret
= rdma_resolve_addr(ctx
->cm_id
, (struct sockaddr
*) &cmd
.src_addr
,
712 (struct sockaddr
*) &cmd
.dst_addr
, cmd
.timeout_ms
);
717 static ssize_t
ucma_resolve_addr(struct ucma_file
*file
,
718 const char __user
*inbuf
,
719 int in_len
, int out_len
)
721 struct rdma_ucm_resolve_addr cmd
;
722 struct ucma_context
*ctx
;
725 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
729 (cmd
.src_size
&& (cmd
.src_size
!= rdma_addr_size_kss(&cmd
.src_addr
))) ||
730 !cmd
.dst_size
|| (cmd
.dst_size
!= rdma_addr_size_kss(&cmd
.dst_addr
)))
733 ctx
= ucma_get_ctx(file
, cmd
.id
);
737 ret
= rdma_resolve_addr(ctx
->cm_id
, (struct sockaddr
*) &cmd
.src_addr
,
738 (struct sockaddr
*) &cmd
.dst_addr
, cmd
.timeout_ms
);
743 static ssize_t
ucma_resolve_route(struct ucma_file
*file
,
744 const char __user
*inbuf
,
745 int in_len
, int out_len
)
747 struct rdma_ucm_resolve_route cmd
;
748 struct ucma_context
*ctx
;
751 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
754 ctx
= ucma_get_ctx_dev(file
, cmd
.id
);
758 ret
= rdma_resolve_route(ctx
->cm_id
, cmd
.timeout_ms
);
763 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp
*resp
,
764 struct rdma_route
*route
)
766 struct rdma_dev_addr
*dev_addr
;
768 resp
->num_paths
= route
->num_paths
;
769 switch (route
->num_paths
) {
771 dev_addr
= &route
->addr
.dev_addr
;
772 rdma_addr_get_dgid(dev_addr
,
773 (union ib_gid
*) &resp
->ib_route
[0].dgid
);
774 rdma_addr_get_sgid(dev_addr
,
775 (union ib_gid
*) &resp
->ib_route
[0].sgid
);
776 resp
->ib_route
[0].pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
779 ib_copy_path_rec_to_user(&resp
->ib_route
[1],
780 &route
->path_rec
[1]);
783 ib_copy_path_rec_to_user(&resp
->ib_route
[0],
784 &route
->path_rec
[0]);
791 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp
*resp
,
792 struct rdma_route
*route
)
795 resp
->num_paths
= route
->num_paths
;
796 switch (route
->num_paths
) {
798 rdma_ip2gid((struct sockaddr
*)&route
->addr
.dst_addr
,
799 (union ib_gid
*)&resp
->ib_route
[0].dgid
);
800 rdma_ip2gid((struct sockaddr
*)&route
->addr
.src_addr
,
801 (union ib_gid
*)&resp
->ib_route
[0].sgid
);
802 resp
->ib_route
[0].pkey
= cpu_to_be16(0xffff);
805 ib_copy_path_rec_to_user(&resp
->ib_route
[1],
806 &route
->path_rec
[1]);
809 ib_copy_path_rec_to_user(&resp
->ib_route
[0],
810 &route
->path_rec
[0]);
817 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp
*resp
,
818 struct rdma_route
*route
)
820 struct rdma_dev_addr
*dev_addr
;
822 dev_addr
= &route
->addr
.dev_addr
;
823 rdma_addr_get_dgid(dev_addr
, (union ib_gid
*) &resp
->ib_route
[0].dgid
);
824 rdma_addr_get_sgid(dev_addr
, (union ib_gid
*) &resp
->ib_route
[0].sgid
);
827 static ssize_t
ucma_query_route(struct ucma_file
*file
,
828 const char __user
*inbuf
,
829 int in_len
, int out_len
)
831 struct rdma_ucm_query cmd
;
832 struct rdma_ucm_query_route_resp resp
;
833 struct ucma_context
*ctx
;
834 struct sockaddr
*addr
;
837 if (out_len
< sizeof(resp
))
840 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
843 ctx
= ucma_get_ctx(file
, cmd
.id
);
847 memset(&resp
, 0, sizeof resp
);
848 addr
= (struct sockaddr
*) &ctx
->cm_id
->route
.addr
.src_addr
;
849 memcpy(&resp
.src_addr
, addr
, addr
->sa_family
== AF_INET
?
850 sizeof(struct sockaddr_in
) :
851 sizeof(struct sockaddr_in6
));
852 addr
= (struct sockaddr
*) &ctx
->cm_id
->route
.addr
.dst_addr
;
853 memcpy(&resp
.dst_addr
, addr
, addr
->sa_family
== AF_INET
?
854 sizeof(struct sockaddr_in
) :
855 sizeof(struct sockaddr_in6
));
856 if (!ctx
->cm_id
->device
)
859 resp
.node_guid
= (__force __u64
) ctx
->cm_id
->device
->node_guid
;
860 resp
.port_num
= ctx
->cm_id
->port_num
;
862 if (rdma_cap_ib_sa(ctx
->cm_id
->device
, ctx
->cm_id
->port_num
))
863 ucma_copy_ib_route(&resp
, &ctx
->cm_id
->route
);
864 else if (rdma_protocol_roce(ctx
->cm_id
->device
, ctx
->cm_id
->port_num
))
865 ucma_copy_iboe_route(&resp
, &ctx
->cm_id
->route
);
866 else if (rdma_protocol_iwarp(ctx
->cm_id
->device
, ctx
->cm_id
->port_num
))
867 ucma_copy_iw_route(&resp
, &ctx
->cm_id
->route
);
870 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
871 &resp
, sizeof(resp
)))
878 static void ucma_query_device_addr(struct rdma_cm_id
*cm_id
,
879 struct rdma_ucm_query_addr_resp
*resp
)
884 resp
->node_guid
= (__force __u64
) cm_id
->device
->node_guid
;
885 resp
->port_num
= cm_id
->port_num
;
886 resp
->pkey
= (__force __u16
) cpu_to_be16(
887 ib_addr_get_pkey(&cm_id
->route
.addr
.dev_addr
));
890 static ssize_t
ucma_query_addr(struct ucma_context
*ctx
,
891 void __user
*response
, int out_len
)
893 struct rdma_ucm_query_addr_resp resp
;
894 struct sockaddr
*addr
;
897 if (out_len
< sizeof(resp
))
900 memset(&resp
, 0, sizeof resp
);
902 addr
= (struct sockaddr
*) &ctx
->cm_id
->route
.addr
.src_addr
;
903 resp
.src_size
= rdma_addr_size(addr
);
904 memcpy(&resp
.src_addr
, addr
, resp
.src_size
);
906 addr
= (struct sockaddr
*) &ctx
->cm_id
->route
.addr
.dst_addr
;
907 resp
.dst_size
= rdma_addr_size(addr
);
908 memcpy(&resp
.dst_addr
, addr
, resp
.dst_size
);
910 ucma_query_device_addr(ctx
->cm_id
, &resp
);
912 if (copy_to_user(response
, &resp
, sizeof(resp
)))
918 static ssize_t
ucma_query_path(struct ucma_context
*ctx
,
919 void __user
*response
, int out_len
)
921 struct rdma_ucm_query_path_resp
*resp
;
924 if (out_len
< sizeof(*resp
))
927 resp
= kzalloc(out_len
, GFP_KERNEL
);
931 resp
->num_paths
= ctx
->cm_id
->route
.num_paths
;
932 for (i
= 0, out_len
-= sizeof(*resp
);
933 i
< resp
->num_paths
&& out_len
> sizeof(struct ib_path_rec_data
);
934 i
++, out_len
-= sizeof(struct ib_path_rec_data
)) {
935 struct sa_path_rec
*rec
= &ctx
->cm_id
->route
.path_rec
[i
];
937 resp
->path_data
[i
].flags
= IB_PATH_GMP
| IB_PATH_PRIMARY
|
938 IB_PATH_BIDIRECTIONAL
;
939 if (rec
->rec_type
== SA_PATH_REC_TYPE_OPA
) {
940 struct sa_path_rec ib
;
942 sa_convert_path_opa_to_ib(&ib
, rec
);
943 ib_sa_pack_path(&ib
, &resp
->path_data
[i
].path_rec
);
946 ib_sa_pack_path(rec
, &resp
->path_data
[i
].path_rec
);
950 if (copy_to_user(response
, resp
,
951 sizeof(*resp
) + (i
* sizeof(struct ib_path_rec_data
))))
958 static ssize_t
ucma_query_gid(struct ucma_context
*ctx
,
959 void __user
*response
, int out_len
)
961 struct rdma_ucm_query_addr_resp resp
;
962 struct sockaddr_ib
*addr
;
965 if (out_len
< sizeof(resp
))
968 memset(&resp
, 0, sizeof resp
);
970 ucma_query_device_addr(ctx
->cm_id
, &resp
);
972 addr
= (struct sockaddr_ib
*) &resp
.src_addr
;
973 resp
.src_size
= sizeof(*addr
);
974 if (ctx
->cm_id
->route
.addr
.src_addr
.ss_family
== AF_IB
) {
975 memcpy(addr
, &ctx
->cm_id
->route
.addr
.src_addr
, resp
.src_size
);
977 addr
->sib_family
= AF_IB
;
978 addr
->sib_pkey
= (__force __be16
) resp
.pkey
;
979 rdma_read_gids(ctx
->cm_id
, (union ib_gid
*)&addr
->sib_addr
,
981 addr
->sib_sid
= rdma_get_service_id(ctx
->cm_id
, (struct sockaddr
*)
982 &ctx
->cm_id
->route
.addr
.src_addr
);
985 addr
= (struct sockaddr_ib
*) &resp
.dst_addr
;
986 resp
.dst_size
= sizeof(*addr
);
987 if (ctx
->cm_id
->route
.addr
.dst_addr
.ss_family
== AF_IB
) {
988 memcpy(addr
, &ctx
->cm_id
->route
.addr
.dst_addr
, resp
.dst_size
);
990 addr
->sib_family
= AF_IB
;
991 addr
->sib_pkey
= (__force __be16
) resp
.pkey
;
992 rdma_read_gids(ctx
->cm_id
, NULL
,
993 (union ib_gid
*)&addr
->sib_addr
);
994 addr
->sib_sid
= rdma_get_service_id(ctx
->cm_id
, (struct sockaddr
*)
995 &ctx
->cm_id
->route
.addr
.dst_addr
);
998 if (copy_to_user(response
, &resp
, sizeof(resp
)))
1004 static ssize_t
ucma_query(struct ucma_file
*file
,
1005 const char __user
*inbuf
,
1006 int in_len
, int out_len
)
1008 struct rdma_ucm_query cmd
;
1009 struct ucma_context
*ctx
;
1010 void __user
*response
;
1013 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1016 response
= u64_to_user_ptr(cmd
.response
);
1017 ctx
= ucma_get_ctx(file
, cmd
.id
);
1019 return PTR_ERR(ctx
);
1021 switch (cmd
.option
) {
1022 case RDMA_USER_CM_QUERY_ADDR
:
1023 ret
= ucma_query_addr(ctx
, response
, out_len
);
1025 case RDMA_USER_CM_QUERY_PATH
:
1026 ret
= ucma_query_path(ctx
, response
, out_len
);
1028 case RDMA_USER_CM_QUERY_GID
:
1029 ret
= ucma_query_gid(ctx
, response
, out_len
);
1040 static void ucma_copy_conn_param(struct rdma_cm_id
*id
,
1041 struct rdma_conn_param
*dst
,
1042 struct rdma_ucm_conn_param
*src
)
1044 dst
->private_data
= src
->private_data
;
1045 dst
->private_data_len
= src
->private_data_len
;
1046 dst
->responder_resources
=src
->responder_resources
;
1047 dst
->initiator_depth
= src
->initiator_depth
;
1048 dst
->flow_control
= src
->flow_control
;
1049 dst
->retry_count
= src
->retry_count
;
1050 dst
->rnr_retry_count
= src
->rnr_retry_count
;
1051 dst
->srq
= src
->srq
;
1052 dst
->qp_num
= src
->qp_num
;
1053 dst
->qkey
= (id
->route
.addr
.src_addr
.ss_family
== AF_IB
) ? src
->qkey
: 0;
1056 static ssize_t
ucma_connect(struct ucma_file
*file
, const char __user
*inbuf
,
1057 int in_len
, int out_len
)
1059 struct rdma_ucm_connect cmd
;
1060 struct rdma_conn_param conn_param
;
1061 struct ucma_context
*ctx
;
1064 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1067 if (!cmd
.conn_param
.valid
)
1070 ctx
= ucma_get_ctx_dev(file
, cmd
.id
);
1072 return PTR_ERR(ctx
);
1074 ucma_copy_conn_param(ctx
->cm_id
, &conn_param
, &cmd
.conn_param
);
1075 ret
= rdma_connect(ctx
->cm_id
, &conn_param
);
1080 static ssize_t
ucma_listen(struct ucma_file
*file
, const char __user
*inbuf
,
1081 int in_len
, int out_len
)
1083 struct rdma_ucm_listen cmd
;
1084 struct ucma_context
*ctx
;
1087 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1090 ctx
= ucma_get_ctx(file
, cmd
.id
);
1092 return PTR_ERR(ctx
);
1094 ctx
->backlog
= cmd
.backlog
> 0 && cmd
.backlog
< max_backlog
?
1095 cmd
.backlog
: max_backlog
;
1096 ret
= rdma_listen(ctx
->cm_id
, ctx
->backlog
);
1101 static ssize_t
ucma_accept(struct ucma_file
*file
, const char __user
*inbuf
,
1102 int in_len
, int out_len
)
1104 struct rdma_ucm_accept cmd
;
1105 struct rdma_conn_param conn_param
;
1106 struct ucma_context
*ctx
;
1109 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1112 ctx
= ucma_get_ctx_dev(file
, cmd
.id
);
1114 return PTR_ERR(ctx
);
1116 if (cmd
.conn_param
.valid
) {
1117 ucma_copy_conn_param(ctx
->cm_id
, &conn_param
, &cmd
.conn_param
);
1118 mutex_lock(&file
->mut
);
1119 ret
= __rdma_accept(ctx
->cm_id
, &conn_param
, NULL
);
1122 mutex_unlock(&file
->mut
);
1124 ret
= __rdma_accept(ctx
->cm_id
, NULL
, NULL
);
1130 static ssize_t
ucma_reject(struct ucma_file
*file
, const char __user
*inbuf
,
1131 int in_len
, int out_len
)
1133 struct rdma_ucm_reject cmd
;
1134 struct ucma_context
*ctx
;
1137 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1140 ctx
= ucma_get_ctx_dev(file
, cmd
.id
);
1142 return PTR_ERR(ctx
);
1144 ret
= rdma_reject(ctx
->cm_id
, cmd
.private_data
, cmd
.private_data_len
);
1149 static ssize_t
ucma_disconnect(struct ucma_file
*file
, const char __user
*inbuf
,
1150 int in_len
, int out_len
)
1152 struct rdma_ucm_disconnect cmd
;
1153 struct ucma_context
*ctx
;
1156 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1159 ctx
= ucma_get_ctx_dev(file
, cmd
.id
);
1161 return PTR_ERR(ctx
);
1163 ret
= rdma_disconnect(ctx
->cm_id
);
1168 static ssize_t
ucma_init_qp_attr(struct ucma_file
*file
,
1169 const char __user
*inbuf
,
1170 int in_len
, int out_len
)
1172 struct rdma_ucm_init_qp_attr cmd
;
1173 struct ib_uverbs_qp_attr resp
;
1174 struct ucma_context
*ctx
;
1175 struct ib_qp_attr qp_attr
;
1178 if (out_len
< sizeof(resp
))
1181 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1184 if (cmd
.qp_state
> IB_QPS_ERR
)
1187 ctx
= ucma_get_ctx_dev(file
, cmd
.id
);
1189 return PTR_ERR(ctx
);
1191 resp
.qp_attr_mask
= 0;
1192 memset(&qp_attr
, 0, sizeof qp_attr
);
1193 qp_attr
.qp_state
= cmd
.qp_state
;
1194 ret
= rdma_init_qp_attr(ctx
->cm_id
, &qp_attr
, &resp
.qp_attr_mask
);
1198 ib_copy_qp_attr_to_user(ctx
->cm_id
->device
, &resp
, &qp_attr
);
1199 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
1200 &resp
, sizeof(resp
)))
1208 static int ucma_set_option_id(struct ucma_context
*ctx
, int optname
,
1209 void *optval
, size_t optlen
)
1214 case RDMA_OPTION_ID_TOS
:
1215 if (optlen
!= sizeof(u8
)) {
1219 rdma_set_service_type(ctx
->cm_id
, *((u8
*) optval
));
1221 case RDMA_OPTION_ID_REUSEADDR
:
1222 if (optlen
!= sizeof(int)) {
1226 ret
= rdma_set_reuseaddr(ctx
->cm_id
, *((int *) optval
) ? 1 : 0);
1228 case RDMA_OPTION_ID_AFONLY
:
1229 if (optlen
!= sizeof(int)) {
1233 ret
= rdma_set_afonly(ctx
->cm_id
, *((int *) optval
) ? 1 : 0);
1242 static int ucma_set_ib_path(struct ucma_context
*ctx
,
1243 struct ib_path_rec_data
*path_data
, size_t optlen
)
1245 struct sa_path_rec sa_path
;
1246 struct rdma_cm_event event
;
1249 if (optlen
% sizeof(*path_data
))
1252 for (; optlen
; optlen
-= sizeof(*path_data
), path_data
++) {
1253 if (path_data
->flags
== (IB_PATH_GMP
| IB_PATH_PRIMARY
|
1254 IB_PATH_BIDIRECTIONAL
))
1261 if (!ctx
->cm_id
->device
)
1264 memset(&sa_path
, 0, sizeof(sa_path
));
1266 sa_path
.rec_type
= SA_PATH_REC_TYPE_IB
;
1267 ib_sa_unpack_path(path_data
->path_rec
, &sa_path
);
1269 if (rdma_cap_opa_ah(ctx
->cm_id
->device
, ctx
->cm_id
->port_num
)) {
1270 struct sa_path_rec opa
;
1272 sa_convert_path_ib_to_opa(&opa
, &sa_path
);
1273 ret
= rdma_set_ib_path(ctx
->cm_id
, &opa
);
1275 ret
= rdma_set_ib_path(ctx
->cm_id
, &sa_path
);
1280 memset(&event
, 0, sizeof event
);
1281 event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1282 return ucma_event_handler(ctx
->cm_id
, &event
);
1285 static int ucma_set_option_ib(struct ucma_context
*ctx
, int optname
,
1286 void *optval
, size_t optlen
)
1291 case RDMA_OPTION_IB_PATH
:
1292 ret
= ucma_set_ib_path(ctx
, optval
, optlen
);
1301 static int ucma_set_option_level(struct ucma_context
*ctx
, int level
,
1302 int optname
, void *optval
, size_t optlen
)
1307 case RDMA_OPTION_ID
:
1308 ret
= ucma_set_option_id(ctx
, optname
, optval
, optlen
);
1310 case RDMA_OPTION_IB
:
1311 ret
= ucma_set_option_ib(ctx
, optname
, optval
, optlen
);
1320 static ssize_t
ucma_set_option(struct ucma_file
*file
, const char __user
*inbuf
,
1321 int in_len
, int out_len
)
1323 struct rdma_ucm_set_option cmd
;
1324 struct ucma_context
*ctx
;
1328 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1331 if (unlikely(cmd
.optlen
> KMALLOC_MAX_SIZE
))
1334 ctx
= ucma_get_ctx(file
, cmd
.id
);
1336 return PTR_ERR(ctx
);
1338 optval
= memdup_user(u64_to_user_ptr(cmd
.optval
),
1340 if (IS_ERR(optval
)) {
1341 ret
= PTR_ERR(optval
);
1345 ret
= ucma_set_option_level(ctx
, cmd
.level
, cmd
.optname
, optval
,
1354 static ssize_t
ucma_notify(struct ucma_file
*file
, const char __user
*inbuf
,
1355 int in_len
, int out_len
)
1357 struct rdma_ucm_notify cmd
;
1358 struct ucma_context
*ctx
;
1361 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1364 ctx
= ucma_get_ctx(file
, cmd
.id
);
1366 return PTR_ERR(ctx
);
1368 if (ctx
->cm_id
->device
)
1369 ret
= rdma_notify(ctx
->cm_id
, (enum ib_event_type
)cmd
.event
);
1375 static ssize_t
ucma_process_join(struct ucma_file
*file
,
1376 struct rdma_ucm_join_mcast
*cmd
, int out_len
)
1378 struct rdma_ucm_create_id_resp resp
;
1379 struct ucma_context
*ctx
;
1380 struct ucma_multicast
*mc
;
1381 struct sockaddr
*addr
;
1385 if (out_len
< sizeof(resp
))
1388 addr
= (struct sockaddr
*) &cmd
->addr
;
1389 if (cmd
->addr_size
!= rdma_addr_size(addr
))
1392 if (cmd
->join_flags
== RDMA_MC_JOIN_FLAG_FULLMEMBER
)
1393 join_state
= BIT(FULLMEMBER_JOIN
);
1394 else if (cmd
->join_flags
== RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER
)
1395 join_state
= BIT(SENDONLY_FULLMEMBER_JOIN
);
1399 ctx
= ucma_get_ctx_dev(file
, cmd
->id
);
1401 return PTR_ERR(ctx
);
1403 mutex_lock(&file
->mut
);
1404 mc
= ucma_alloc_multicast(ctx
);
1409 mc
->join_state
= join_state
;
1411 memcpy(&mc
->addr
, addr
, cmd
->addr_size
);
1412 ret
= rdma_join_multicast(ctx
->cm_id
, (struct sockaddr
*)&mc
->addr
,
1418 if (copy_to_user(u64_to_user_ptr(cmd
->response
),
1419 &resp
, sizeof(resp
))) {
1425 idr_replace(&multicast_idr
, mc
, mc
->id
);
1428 mutex_unlock(&file
->mut
);
1433 rdma_leave_multicast(ctx
->cm_id
, (struct sockaddr
*) &mc
->addr
);
1434 ucma_cleanup_mc_events(mc
);
1437 idr_remove(&multicast_idr
, mc
->id
);
1439 list_del(&mc
->list
);
1442 mutex_unlock(&file
->mut
);
1447 static ssize_t
ucma_join_ip_multicast(struct ucma_file
*file
,
1448 const char __user
*inbuf
,
1449 int in_len
, int out_len
)
1451 struct rdma_ucm_join_ip_mcast cmd
;
1452 struct rdma_ucm_join_mcast join_cmd
;
1454 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1457 join_cmd
.response
= cmd
.response
;
1458 join_cmd
.uid
= cmd
.uid
;
1459 join_cmd
.id
= cmd
.id
;
1460 join_cmd
.addr_size
= rdma_addr_size_in6(&cmd
.addr
);
1461 if (!join_cmd
.addr_size
)
1464 join_cmd
.join_flags
= RDMA_MC_JOIN_FLAG_FULLMEMBER
;
1465 memcpy(&join_cmd
.addr
, &cmd
.addr
, join_cmd
.addr_size
);
1467 return ucma_process_join(file
, &join_cmd
, out_len
);
1470 static ssize_t
ucma_join_multicast(struct ucma_file
*file
,
1471 const char __user
*inbuf
,
1472 int in_len
, int out_len
)
1474 struct rdma_ucm_join_mcast cmd
;
1476 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1479 if (!rdma_addr_size_kss(&cmd
.addr
))
1482 return ucma_process_join(file
, &cmd
, out_len
);
1485 static ssize_t
ucma_leave_multicast(struct ucma_file
*file
,
1486 const char __user
*inbuf
,
1487 int in_len
, int out_len
)
1489 struct rdma_ucm_destroy_id cmd
;
1490 struct rdma_ucm_destroy_id_resp resp
;
1491 struct ucma_multicast
*mc
;
1494 if (out_len
< sizeof(resp
))
1497 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1501 mc
= idr_find(&multicast_idr
, cmd
.id
);
1503 mc
= ERR_PTR(-ENOENT
);
1504 else if (mc
->ctx
->file
!= file
)
1505 mc
= ERR_PTR(-EINVAL
);
1506 else if (!atomic_inc_not_zero(&mc
->ctx
->ref
))
1507 mc
= ERR_PTR(-ENXIO
);
1509 idr_remove(&multicast_idr
, mc
->id
);
1517 rdma_leave_multicast(mc
->ctx
->cm_id
, (struct sockaddr
*) &mc
->addr
);
1518 mutex_lock(&mc
->ctx
->file
->mut
);
1519 ucma_cleanup_mc_events(mc
);
1520 list_del(&mc
->list
);
1521 mutex_unlock(&mc
->ctx
->file
->mut
);
1523 ucma_put_ctx(mc
->ctx
);
1524 resp
.events_reported
= mc
->events_reported
;
1527 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
1528 &resp
, sizeof(resp
)))
1534 static void ucma_lock_files(struct ucma_file
*file1
, struct ucma_file
*file2
)
1536 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1537 if (file1
< file2
) {
1538 mutex_lock(&file1
->mut
);
1539 mutex_lock_nested(&file2
->mut
, SINGLE_DEPTH_NESTING
);
1541 mutex_lock(&file2
->mut
);
1542 mutex_lock_nested(&file1
->mut
, SINGLE_DEPTH_NESTING
);
1546 static void ucma_unlock_files(struct ucma_file
*file1
, struct ucma_file
*file2
)
1548 if (file1
< file2
) {
1549 mutex_unlock(&file2
->mut
);
1550 mutex_unlock(&file1
->mut
);
1552 mutex_unlock(&file1
->mut
);
1553 mutex_unlock(&file2
->mut
);
1557 static void ucma_move_events(struct ucma_context
*ctx
, struct ucma_file
*file
)
1559 struct ucma_event
*uevent
, *tmp
;
1561 list_for_each_entry_safe(uevent
, tmp
, &ctx
->file
->event_list
, list
)
1562 if (uevent
->ctx
== ctx
)
1563 list_move_tail(&uevent
->list
, &file
->event_list
);
1566 static ssize_t
ucma_migrate_id(struct ucma_file
*new_file
,
1567 const char __user
*inbuf
,
1568 int in_len
, int out_len
)
1570 struct rdma_ucm_migrate_id cmd
;
1571 struct rdma_ucm_migrate_resp resp
;
1572 struct ucma_context
*ctx
;
1574 struct ucma_file
*cur_file
;
1577 if (copy_from_user(&cmd
, inbuf
, sizeof(cmd
)))
1580 /* Get current fd to protect against it being closed */
1585 /* Validate current fd and prevent destruction of id. */
1586 ctx
= ucma_get_ctx(f
.file
->private_data
, cmd
.id
);
1592 cur_file
= ctx
->file
;
1593 if (cur_file
== new_file
) {
1594 resp
.events_reported
= ctx
->events_reported
;
1599 * Migrate events between fd's, maintaining order, and avoiding new
1600 * events being added before existing events.
1602 ucma_lock_files(cur_file
, new_file
);
1605 list_move_tail(&ctx
->list
, &new_file
->ctx_list
);
1606 ucma_move_events(ctx
, new_file
);
1607 ctx
->file
= new_file
;
1608 resp
.events_reported
= ctx
->events_reported
;
1611 ucma_unlock_files(cur_file
, new_file
);
1614 if (copy_to_user(u64_to_user_ptr(cmd
.response
),
1615 &resp
, sizeof(resp
)))
1624 static ssize_t (*ucma_cmd_table
[])(struct ucma_file
*file
,
1625 const char __user
*inbuf
,
1626 int in_len
, int out_len
) = {
1627 [RDMA_USER_CM_CMD_CREATE_ID
] = ucma_create_id
,
1628 [RDMA_USER_CM_CMD_DESTROY_ID
] = ucma_destroy_id
,
1629 [RDMA_USER_CM_CMD_BIND_IP
] = ucma_bind_ip
,
1630 [RDMA_USER_CM_CMD_RESOLVE_IP
] = ucma_resolve_ip
,
1631 [RDMA_USER_CM_CMD_RESOLVE_ROUTE
] = ucma_resolve_route
,
1632 [RDMA_USER_CM_CMD_QUERY_ROUTE
] = ucma_query_route
,
1633 [RDMA_USER_CM_CMD_CONNECT
] = ucma_connect
,
1634 [RDMA_USER_CM_CMD_LISTEN
] = ucma_listen
,
1635 [RDMA_USER_CM_CMD_ACCEPT
] = ucma_accept
,
1636 [RDMA_USER_CM_CMD_REJECT
] = ucma_reject
,
1637 [RDMA_USER_CM_CMD_DISCONNECT
] = ucma_disconnect
,
1638 [RDMA_USER_CM_CMD_INIT_QP_ATTR
] = ucma_init_qp_attr
,
1639 [RDMA_USER_CM_CMD_GET_EVENT
] = ucma_get_event
,
1640 [RDMA_USER_CM_CMD_GET_OPTION
] = NULL
,
1641 [RDMA_USER_CM_CMD_SET_OPTION
] = ucma_set_option
,
1642 [RDMA_USER_CM_CMD_NOTIFY
] = ucma_notify
,
1643 [RDMA_USER_CM_CMD_JOIN_IP_MCAST
] = ucma_join_ip_multicast
,
1644 [RDMA_USER_CM_CMD_LEAVE_MCAST
] = ucma_leave_multicast
,
1645 [RDMA_USER_CM_CMD_MIGRATE_ID
] = ucma_migrate_id
,
1646 [RDMA_USER_CM_CMD_QUERY
] = ucma_query
,
1647 [RDMA_USER_CM_CMD_BIND
] = ucma_bind
,
1648 [RDMA_USER_CM_CMD_RESOLVE_ADDR
] = ucma_resolve_addr
,
1649 [RDMA_USER_CM_CMD_JOIN_MCAST
] = ucma_join_multicast
1652 static ssize_t
ucma_write(struct file
*filp
, const char __user
*buf
,
1653 size_t len
, loff_t
*pos
)
1655 struct ucma_file
*file
= filp
->private_data
;
1656 struct rdma_ucm_cmd_hdr hdr
;
1659 if (!ib_safe_file_access(filp
)) {
1660 pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1661 task_tgid_vnr(current
), current
->comm
);
1665 if (len
< sizeof(hdr
))
1668 if (copy_from_user(&hdr
, buf
, sizeof(hdr
)))
1671 if (hdr
.cmd
>= ARRAY_SIZE(ucma_cmd_table
))
1674 if (hdr
.in
+ sizeof(hdr
) > len
)
1677 if (!ucma_cmd_table
[hdr
.cmd
])
1680 ret
= ucma_cmd_table
[hdr
.cmd
](file
, buf
+ sizeof(hdr
), hdr
.in
, hdr
.out
);
1687 static __poll_t
ucma_poll(struct file
*filp
, struct poll_table_struct
*wait
)
1689 struct ucma_file
*file
= filp
->private_data
;
1692 poll_wait(filp
, &file
->poll_wait
, wait
);
1694 if (!list_empty(&file
->event_list
))
1695 mask
= EPOLLIN
| EPOLLRDNORM
;
1701 * ucma_open() does not need the BKL:
1703 * - no global state is referred to;
1704 * - there is no ioctl method to race against;
1705 * - no further module initialization is required for open to work
1706 * after the device is registered.
1708 static int ucma_open(struct inode
*inode
, struct file
*filp
)
1710 struct ucma_file
*file
;
1712 file
= kmalloc(sizeof *file
, GFP_KERNEL
);
1716 file
->close_wq
= alloc_ordered_workqueue("ucma_close_id",
1718 if (!file
->close_wq
) {
1723 INIT_LIST_HEAD(&file
->event_list
);
1724 INIT_LIST_HEAD(&file
->ctx_list
);
1725 init_waitqueue_head(&file
->poll_wait
);
1726 mutex_init(&file
->mut
);
1728 filp
->private_data
= file
;
1731 return nonseekable_open(inode
, filp
);
1734 static int ucma_close(struct inode
*inode
, struct file
*filp
)
1736 struct ucma_file
*file
= filp
->private_data
;
1737 struct ucma_context
*ctx
, *tmp
;
1739 mutex_lock(&file
->mut
);
1740 list_for_each_entry_safe(ctx
, tmp
, &file
->ctx_list
, list
) {
1741 ctx
->destroying
= 1;
1742 mutex_unlock(&file
->mut
);
1745 idr_remove(&ctx_idr
, ctx
->id
);
1748 flush_workqueue(file
->close_wq
);
1749 /* At that step once ctx was marked as destroying and workqueue
1750 * was flushed we are safe from any inflights handlers that
1751 * might put other closing task.
1754 if (!ctx
->closing
) {
1756 /* rdma_destroy_id ensures that no event handlers are
1757 * inflight for that id before releasing it.
1759 rdma_destroy_id(ctx
->cm_id
);
1765 mutex_lock(&file
->mut
);
1767 mutex_unlock(&file
->mut
);
1768 destroy_workqueue(file
->close_wq
);
1773 static const struct file_operations ucma_fops
= {
1774 .owner
= THIS_MODULE
,
1776 .release
= ucma_close
,
1777 .write
= ucma_write
,
1779 .llseek
= no_llseek
,
1782 static struct miscdevice ucma_misc
= {
1783 .minor
= MISC_DYNAMIC_MINOR
,
1785 .nodename
= "infiniband/rdma_cm",
1790 static ssize_t
show_abi_version(struct device
*dev
,
1791 struct device_attribute
*attr
,
1794 return sprintf(buf
, "%d\n", RDMA_USER_CM_ABI_VERSION
);
1796 static DEVICE_ATTR(abi_version
, S_IRUGO
, show_abi_version
, NULL
);
1798 static int __init
ucma_init(void)
1802 ret
= misc_register(&ucma_misc
);
1806 ret
= device_create_file(ucma_misc
.this_device
, &dev_attr_abi_version
);
1808 pr_err("rdma_ucm: couldn't create abi_version attr\n");
1812 ucma_ctl_table_hdr
= register_net_sysctl(&init_net
, "net/rdma_ucm", ucma_ctl_table
);
1813 if (!ucma_ctl_table_hdr
) {
1814 pr_err("rdma_ucm: couldn't register sysctl paths\n");
1820 device_remove_file(ucma_misc
.this_device
, &dev_attr_abi_version
);
1822 misc_deregister(&ucma_misc
);
1826 static void __exit
ucma_cleanup(void)
1828 unregister_net_sysctl_table(ucma_ctl_table_hdr
);
1829 device_remove_file(ucma_misc
.this_device
, &dev_attr_abi_version
);
1830 misc_deregister(&ucma_misc
);
1831 idr_destroy(&ctx_idr
);
1832 idr_destroy(&multicast_idr
);
1835 module_init(ucma_init
);
1836 module_exit(ucma_cleanup
);