Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / infiniband / core / ucma.c
blobf015f1bf88c9c8c52c2a739bb130c2949348f0e4
1 /*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
39 #include <linux/in.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 #include <linux/nsproxy.h>
47 #include <rdma/rdma_user_cm.h>
48 #include <rdma/ib_marshall.h>
49 #include <rdma/rdma_cm.h>
50 #include <rdma/rdma_cm_ib.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib.h>
54 MODULE_AUTHOR("Sean Hefty");
55 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
56 MODULE_LICENSE("Dual BSD/GPL");
58 static unsigned int max_backlog = 1024;
60 static struct ctl_table_header *ucma_ctl_table_hdr;
61 static struct ctl_table ucma_ctl_table[] = {
63 .procname = "max_backlog",
64 .data = &max_backlog,
65 .maxlen = sizeof max_backlog,
66 .mode = 0644,
67 .proc_handler = proc_dointvec,
69 { }
72 struct ucma_file {
73 struct mutex mut;
74 struct file *filp;
75 struct list_head ctx_list;
76 struct list_head event_list;
77 wait_queue_head_t poll_wait;
78 struct workqueue_struct *close_wq;
81 struct ucma_context {
82 int id;
83 struct completion comp;
84 atomic_t ref;
85 int events_reported;
86 int backlog;
88 struct ucma_file *file;
89 struct rdma_cm_id *cm_id;
90 u64 uid;
92 struct list_head list;
93 struct list_head mc_list;
94 /* mark that device is in process of destroying the internal HW
95 * resources, protected by the global mut
97 int closing;
98 /* sync between removal event and id destroy, protected by file mut */
99 int destroying;
100 struct work_struct close_work;
103 struct ucma_multicast {
104 struct ucma_context *ctx;
105 int id;
106 int events_reported;
108 u64 uid;
109 u8 join_state;
110 struct list_head list;
111 struct sockaddr_storage addr;
114 struct ucma_event {
115 struct ucma_context *ctx;
116 struct ucma_multicast *mc;
117 struct list_head list;
118 struct rdma_cm_id *cm_id;
119 struct rdma_ucm_event_resp resp;
120 struct work_struct close_work;
123 static DEFINE_MUTEX(mut);
124 static DEFINE_IDR(ctx_idr);
125 static DEFINE_IDR(multicast_idr);
127 static inline struct ucma_context *_ucma_find_context(int id,
128 struct ucma_file *file)
130 struct ucma_context *ctx;
132 ctx = idr_find(&ctx_idr, id);
133 if (!ctx)
134 ctx = ERR_PTR(-ENOENT);
135 else if (ctx->file != file)
136 ctx = ERR_PTR(-EINVAL);
137 return ctx;
140 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
142 struct ucma_context *ctx;
144 mutex_lock(&mut);
145 ctx = _ucma_find_context(id, file);
146 if (!IS_ERR(ctx)) {
147 if (ctx->closing)
148 ctx = ERR_PTR(-EIO);
149 else
150 atomic_inc(&ctx->ref);
152 mutex_unlock(&mut);
153 return ctx;
156 static void ucma_put_ctx(struct ucma_context *ctx)
158 if (atomic_dec_and_test(&ctx->ref))
159 complete(&ctx->comp);
162 static void ucma_close_event_id(struct work_struct *work)
164 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
166 rdma_destroy_id(uevent_close->cm_id);
167 kfree(uevent_close);
170 static void ucma_close_id(struct work_struct *work)
172 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
174 /* once all inflight tasks are finished, we close all underlying
175 * resources. The context is still alive till its explicit destryoing
176 * by its creator.
178 ucma_put_ctx(ctx);
179 wait_for_completion(&ctx->comp);
180 /* No new events will be generated after destroying the id. */
181 rdma_destroy_id(ctx->cm_id);
184 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
186 struct ucma_context *ctx;
188 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
189 if (!ctx)
190 return NULL;
192 INIT_WORK(&ctx->close_work, ucma_close_id);
193 atomic_set(&ctx->ref, 1);
194 init_completion(&ctx->comp);
195 INIT_LIST_HEAD(&ctx->mc_list);
196 ctx->file = file;
198 mutex_lock(&mut);
199 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
200 mutex_unlock(&mut);
201 if (ctx->id < 0)
202 goto error;
204 list_add_tail(&ctx->list, &file->ctx_list);
205 return ctx;
207 error:
208 kfree(ctx);
209 return NULL;
212 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
214 struct ucma_multicast *mc;
216 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
217 if (!mc)
218 return NULL;
220 mutex_lock(&mut);
221 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
222 mutex_unlock(&mut);
223 if (mc->id < 0)
224 goto error;
226 mc->ctx = ctx;
227 list_add_tail(&mc->list, &ctx->mc_list);
228 return mc;
230 error:
231 kfree(mc);
232 return NULL;
235 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
236 struct rdma_conn_param *src)
238 if (src->private_data_len)
239 memcpy(dst->private_data, src->private_data,
240 src->private_data_len);
241 dst->private_data_len = src->private_data_len;
242 dst->responder_resources =src->responder_resources;
243 dst->initiator_depth = src->initiator_depth;
244 dst->flow_control = src->flow_control;
245 dst->retry_count = src->retry_count;
246 dst->rnr_retry_count = src->rnr_retry_count;
247 dst->srq = src->srq;
248 dst->qp_num = src->qp_num;
251 static void ucma_copy_ud_event(struct ib_device *device,
252 struct rdma_ucm_ud_param *dst,
253 struct rdma_ud_param *src)
255 if (src->private_data_len)
256 memcpy(dst->private_data, src->private_data,
257 src->private_data_len);
258 dst->private_data_len = src->private_data_len;
259 ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
260 dst->qp_num = src->qp_num;
261 dst->qkey = src->qkey;
264 static void ucma_set_event_context(struct ucma_context *ctx,
265 struct rdma_cm_event *event,
266 struct ucma_event *uevent)
268 uevent->ctx = ctx;
269 switch (event->event) {
270 case RDMA_CM_EVENT_MULTICAST_JOIN:
271 case RDMA_CM_EVENT_MULTICAST_ERROR:
272 uevent->mc = (struct ucma_multicast *)
273 event->param.ud.private_data;
274 uevent->resp.uid = uevent->mc->uid;
275 uevent->resp.id = uevent->mc->id;
276 break;
277 default:
278 uevent->resp.uid = ctx->uid;
279 uevent->resp.id = ctx->id;
280 break;
284 /* Called with file->mut locked for the relevant context. */
285 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
287 struct ucma_context *ctx = cm_id->context;
288 struct ucma_event *con_req_eve;
289 int event_found = 0;
291 if (ctx->destroying)
292 return;
294 /* only if context is pointing to cm_id that it owns it and can be
295 * queued to be closed, otherwise that cm_id is an inflight one that
296 * is part of that context event list pending to be detached and
297 * reattached to its new context as part of ucma_get_event,
298 * handled separately below.
300 if (ctx->cm_id == cm_id) {
301 mutex_lock(&mut);
302 ctx->closing = 1;
303 mutex_unlock(&mut);
304 queue_work(ctx->file->close_wq, &ctx->close_work);
305 return;
308 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
309 if (con_req_eve->cm_id == cm_id &&
310 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
311 list_del(&con_req_eve->list);
312 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
313 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
314 event_found = 1;
315 break;
318 if (!event_found)
319 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
322 static int ucma_event_handler(struct rdma_cm_id *cm_id,
323 struct rdma_cm_event *event)
325 struct ucma_event *uevent;
326 struct ucma_context *ctx = cm_id->context;
327 int ret = 0;
329 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
330 if (!uevent)
331 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
333 mutex_lock(&ctx->file->mut);
334 uevent->cm_id = cm_id;
335 ucma_set_event_context(ctx, event, uevent);
336 uevent->resp.event = event->event;
337 uevent->resp.status = event->status;
338 if (cm_id->qp_type == IB_QPT_UD)
339 ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
340 &event->param.ud);
341 else
342 ucma_copy_conn_event(&uevent->resp.param.conn,
343 &event->param.conn);
345 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
346 if (!ctx->backlog) {
347 ret = -ENOMEM;
348 kfree(uevent);
349 goto out;
351 ctx->backlog--;
352 } else if (!ctx->uid || ctx->cm_id != cm_id) {
354 * We ignore events for new connections until userspace has set
355 * their context. This can only happen if an error occurs on a
356 * new connection before the user accepts it. This is okay,
357 * since the accept will just fail later. However, we do need
358 * to release the underlying HW resources in case of a device
359 * removal event.
361 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
362 ucma_removal_event_handler(cm_id);
364 kfree(uevent);
365 goto out;
368 list_add_tail(&uevent->list, &ctx->file->event_list);
369 wake_up_interruptible(&ctx->file->poll_wait);
370 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
371 ucma_removal_event_handler(cm_id);
372 out:
373 mutex_unlock(&ctx->file->mut);
374 return ret;
377 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
378 int in_len, int out_len)
380 struct ucma_context *ctx;
381 struct rdma_ucm_get_event cmd;
382 struct ucma_event *uevent;
383 int ret = 0;
385 if (out_len < sizeof uevent->resp)
386 return -ENOSPC;
388 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
389 return -EFAULT;
391 mutex_lock(&file->mut);
392 while (list_empty(&file->event_list)) {
393 mutex_unlock(&file->mut);
395 if (file->filp->f_flags & O_NONBLOCK)
396 return -EAGAIN;
398 if (wait_event_interruptible(file->poll_wait,
399 !list_empty(&file->event_list)))
400 return -ERESTARTSYS;
402 mutex_lock(&file->mut);
405 uevent = list_entry(file->event_list.next, struct ucma_event, list);
407 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
408 ctx = ucma_alloc_ctx(file);
409 if (!ctx) {
410 ret = -ENOMEM;
411 goto done;
413 uevent->ctx->backlog++;
414 ctx->cm_id = uevent->cm_id;
415 ctx->cm_id->context = ctx;
416 uevent->resp.id = ctx->id;
419 if (copy_to_user((void __user *)(unsigned long)cmd.response,
420 &uevent->resp, sizeof uevent->resp)) {
421 ret = -EFAULT;
422 goto done;
425 list_del(&uevent->list);
426 uevent->ctx->events_reported++;
427 if (uevent->mc)
428 uevent->mc->events_reported++;
429 kfree(uevent);
430 done:
431 mutex_unlock(&file->mut);
432 return ret;
435 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
437 switch (cmd->ps) {
438 case RDMA_PS_TCP:
439 *qp_type = IB_QPT_RC;
440 return 0;
441 case RDMA_PS_UDP:
442 case RDMA_PS_IPOIB:
443 *qp_type = IB_QPT_UD;
444 return 0;
445 case RDMA_PS_IB:
446 *qp_type = cmd->qp_type;
447 return 0;
448 default:
449 return -EINVAL;
453 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
454 int in_len, int out_len)
456 struct rdma_ucm_create_id cmd;
457 struct rdma_ucm_create_id_resp resp;
458 struct ucma_context *ctx;
459 enum ib_qp_type qp_type;
460 int ret;
462 if (out_len < sizeof(resp))
463 return -ENOSPC;
465 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
466 return -EFAULT;
468 ret = ucma_get_qp_type(&cmd, &qp_type);
469 if (ret)
470 return ret;
472 mutex_lock(&file->mut);
473 ctx = ucma_alloc_ctx(file);
474 mutex_unlock(&file->mut);
475 if (!ctx)
476 return -ENOMEM;
478 ctx->uid = cmd.uid;
479 ctx->cm_id = rdma_create_id(current->nsproxy->net_ns,
480 ucma_event_handler, ctx, cmd.ps, qp_type);
481 if (IS_ERR(ctx->cm_id)) {
482 ret = PTR_ERR(ctx->cm_id);
483 goto err1;
486 resp.id = ctx->id;
487 if (copy_to_user((void __user *)(unsigned long)cmd.response,
488 &resp, sizeof(resp))) {
489 ret = -EFAULT;
490 goto err2;
492 return 0;
494 err2:
495 rdma_destroy_id(ctx->cm_id);
496 err1:
497 mutex_lock(&mut);
498 idr_remove(&ctx_idr, ctx->id);
499 mutex_unlock(&mut);
500 kfree(ctx);
501 return ret;
504 static void ucma_cleanup_multicast(struct ucma_context *ctx)
506 struct ucma_multicast *mc, *tmp;
508 mutex_lock(&mut);
509 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
510 list_del(&mc->list);
511 idr_remove(&multicast_idr, mc->id);
512 kfree(mc);
514 mutex_unlock(&mut);
517 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
519 struct ucma_event *uevent, *tmp;
521 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
522 if (uevent->mc != mc)
523 continue;
525 list_del(&uevent->list);
526 kfree(uevent);
531 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
532 * this point, no new events will be reported from the hardware. However, we
533 * still need to cleanup the UCMA context for this ID. Specifically, there
534 * might be events that have not yet been consumed by the user space software.
535 * These might include pending connect requests which we have not completed
536 * processing. We cannot call rdma_destroy_id while holding the lock of the
537 * context (file->mut), as it might cause a deadlock. We therefore extract all
538 * relevant events from the context pending events list while holding the
539 * mutex. After that we release them as needed.
541 static int ucma_free_ctx(struct ucma_context *ctx)
543 int events_reported;
544 struct ucma_event *uevent, *tmp;
545 LIST_HEAD(list);
548 ucma_cleanup_multicast(ctx);
550 /* Cleanup events not yet reported to the user. */
551 mutex_lock(&ctx->file->mut);
552 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
553 if (uevent->ctx == ctx)
554 list_move_tail(&uevent->list, &list);
556 list_del(&ctx->list);
557 mutex_unlock(&ctx->file->mut);
559 list_for_each_entry_safe(uevent, tmp, &list, list) {
560 list_del(&uevent->list);
561 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
562 rdma_destroy_id(uevent->cm_id);
563 kfree(uevent);
566 events_reported = ctx->events_reported;
567 kfree(ctx);
568 return events_reported;
571 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
572 int in_len, int out_len)
574 struct rdma_ucm_destroy_id cmd;
575 struct rdma_ucm_destroy_id_resp resp;
576 struct ucma_context *ctx;
577 int ret = 0;
579 if (out_len < sizeof(resp))
580 return -ENOSPC;
582 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
583 return -EFAULT;
585 mutex_lock(&mut);
586 ctx = _ucma_find_context(cmd.id, file);
587 if (!IS_ERR(ctx))
588 idr_remove(&ctx_idr, ctx->id);
589 mutex_unlock(&mut);
591 if (IS_ERR(ctx))
592 return PTR_ERR(ctx);
594 mutex_lock(&ctx->file->mut);
595 ctx->destroying = 1;
596 mutex_unlock(&ctx->file->mut);
598 flush_workqueue(ctx->file->close_wq);
599 /* At this point it's guaranteed that there is no inflight
600 * closing task */
601 mutex_lock(&mut);
602 if (!ctx->closing) {
603 mutex_unlock(&mut);
604 ucma_put_ctx(ctx);
605 wait_for_completion(&ctx->comp);
606 rdma_destroy_id(ctx->cm_id);
607 } else {
608 mutex_unlock(&mut);
611 resp.events_reported = ucma_free_ctx(ctx);
612 if (copy_to_user((void __user *)(unsigned long)cmd.response,
613 &resp, sizeof(resp)))
614 ret = -EFAULT;
616 return ret;
619 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
620 int in_len, int out_len)
622 struct rdma_ucm_bind_ip cmd;
623 struct ucma_context *ctx;
624 int ret;
626 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
627 return -EFAULT;
629 ctx = ucma_get_ctx(file, cmd.id);
630 if (IS_ERR(ctx))
631 return PTR_ERR(ctx);
633 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
634 ucma_put_ctx(ctx);
635 return ret;
638 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
639 int in_len, int out_len)
641 struct rdma_ucm_bind cmd;
642 struct sockaddr *addr;
643 struct ucma_context *ctx;
644 int ret;
646 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
647 return -EFAULT;
649 addr = (struct sockaddr *) &cmd.addr;
650 if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
651 return -EINVAL;
653 ctx = ucma_get_ctx(file, cmd.id);
654 if (IS_ERR(ctx))
655 return PTR_ERR(ctx);
657 ret = rdma_bind_addr(ctx->cm_id, addr);
658 ucma_put_ctx(ctx);
659 return ret;
662 static ssize_t ucma_resolve_ip(struct ucma_file *file,
663 const char __user *inbuf,
664 int in_len, int out_len)
666 struct rdma_ucm_resolve_ip cmd;
667 struct ucma_context *ctx;
668 int ret;
670 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
671 return -EFAULT;
673 ctx = ucma_get_ctx(file, cmd.id);
674 if (IS_ERR(ctx))
675 return PTR_ERR(ctx);
677 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
678 (struct sockaddr *) &cmd.dst_addr,
679 cmd.timeout_ms);
680 ucma_put_ctx(ctx);
681 return ret;
684 static ssize_t ucma_resolve_addr(struct ucma_file *file,
685 const char __user *inbuf,
686 int in_len, int out_len)
688 struct rdma_ucm_resolve_addr cmd;
689 struct sockaddr *src, *dst;
690 struct ucma_context *ctx;
691 int ret;
693 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
694 return -EFAULT;
696 src = (struct sockaddr *) &cmd.src_addr;
697 dst = (struct sockaddr *) &cmd.dst_addr;
698 if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
699 !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
700 return -EINVAL;
702 ctx = ucma_get_ctx(file, cmd.id);
703 if (IS_ERR(ctx))
704 return PTR_ERR(ctx);
706 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
707 ucma_put_ctx(ctx);
708 return ret;
711 static ssize_t ucma_resolve_route(struct ucma_file *file,
712 const char __user *inbuf,
713 int in_len, int out_len)
715 struct rdma_ucm_resolve_route cmd;
716 struct ucma_context *ctx;
717 int ret;
719 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
720 return -EFAULT;
722 ctx = ucma_get_ctx(file, cmd.id);
723 if (IS_ERR(ctx))
724 return PTR_ERR(ctx);
726 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
727 ucma_put_ctx(ctx);
728 return ret;
731 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
732 struct rdma_route *route)
734 struct rdma_dev_addr *dev_addr;
736 resp->num_paths = route->num_paths;
737 switch (route->num_paths) {
738 case 0:
739 dev_addr = &route->addr.dev_addr;
740 rdma_addr_get_dgid(dev_addr,
741 (union ib_gid *) &resp->ib_route[0].dgid);
742 rdma_addr_get_sgid(dev_addr,
743 (union ib_gid *) &resp->ib_route[0].sgid);
744 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
745 break;
746 case 2:
747 ib_copy_path_rec_to_user(&resp->ib_route[1],
748 &route->path_rec[1]);
749 /* fall through */
750 case 1:
751 ib_copy_path_rec_to_user(&resp->ib_route[0],
752 &route->path_rec[0]);
753 break;
754 default:
755 break;
759 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
760 struct rdma_route *route)
763 resp->num_paths = route->num_paths;
764 switch (route->num_paths) {
765 case 0:
766 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
767 (union ib_gid *)&resp->ib_route[0].dgid);
768 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
769 (union ib_gid *)&resp->ib_route[0].sgid);
770 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
771 break;
772 case 2:
773 ib_copy_path_rec_to_user(&resp->ib_route[1],
774 &route->path_rec[1]);
775 /* fall through */
776 case 1:
777 ib_copy_path_rec_to_user(&resp->ib_route[0],
778 &route->path_rec[0]);
779 break;
780 default:
781 break;
785 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
786 struct rdma_route *route)
788 struct rdma_dev_addr *dev_addr;
790 dev_addr = &route->addr.dev_addr;
791 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
792 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
795 static ssize_t ucma_query_route(struct ucma_file *file,
796 const char __user *inbuf,
797 int in_len, int out_len)
799 struct rdma_ucm_query cmd;
800 struct rdma_ucm_query_route_resp resp;
801 struct ucma_context *ctx;
802 struct sockaddr *addr;
803 int ret = 0;
805 if (out_len < sizeof(resp))
806 return -ENOSPC;
808 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
809 return -EFAULT;
811 ctx = ucma_get_ctx(file, cmd.id);
812 if (IS_ERR(ctx))
813 return PTR_ERR(ctx);
815 memset(&resp, 0, sizeof resp);
816 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
817 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
818 sizeof(struct sockaddr_in) :
819 sizeof(struct sockaddr_in6));
820 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
821 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
822 sizeof(struct sockaddr_in) :
823 sizeof(struct sockaddr_in6));
824 if (!ctx->cm_id->device)
825 goto out;
827 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
828 resp.port_num = ctx->cm_id->port_num;
830 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
831 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
832 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
833 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
834 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
835 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
837 out:
838 if (copy_to_user((void __user *)(unsigned long)cmd.response,
839 &resp, sizeof(resp)))
840 ret = -EFAULT;
842 ucma_put_ctx(ctx);
843 return ret;
846 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
847 struct rdma_ucm_query_addr_resp *resp)
849 if (!cm_id->device)
850 return;
852 resp->node_guid = (__force __u64) cm_id->device->node_guid;
853 resp->port_num = cm_id->port_num;
854 resp->pkey = (__force __u16) cpu_to_be16(
855 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
858 static ssize_t ucma_query_addr(struct ucma_context *ctx,
859 void __user *response, int out_len)
861 struct rdma_ucm_query_addr_resp resp;
862 struct sockaddr *addr;
863 int ret = 0;
865 if (out_len < sizeof(resp))
866 return -ENOSPC;
868 memset(&resp, 0, sizeof resp);
870 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
871 resp.src_size = rdma_addr_size(addr);
872 memcpy(&resp.src_addr, addr, resp.src_size);
874 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
875 resp.dst_size = rdma_addr_size(addr);
876 memcpy(&resp.dst_addr, addr, resp.dst_size);
878 ucma_query_device_addr(ctx->cm_id, &resp);
880 if (copy_to_user(response, &resp, sizeof(resp)))
881 ret = -EFAULT;
883 return ret;
886 static ssize_t ucma_query_path(struct ucma_context *ctx,
887 void __user *response, int out_len)
889 struct rdma_ucm_query_path_resp *resp;
890 int i, ret = 0;
892 if (out_len < sizeof(*resp))
893 return -ENOSPC;
895 resp = kzalloc(out_len, GFP_KERNEL);
896 if (!resp)
897 return -ENOMEM;
899 resp->num_paths = ctx->cm_id->route.num_paths;
900 for (i = 0, out_len -= sizeof(*resp);
901 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
902 i++, out_len -= sizeof(struct ib_path_rec_data)) {
903 struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
905 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
906 IB_PATH_BIDIRECTIONAL;
907 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
908 struct sa_path_rec ib;
910 sa_convert_path_opa_to_ib(&ib, rec);
911 ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
913 } else {
914 ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
918 if (copy_to_user(response, resp,
919 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
920 ret = -EFAULT;
922 kfree(resp);
923 return ret;
926 static ssize_t ucma_query_gid(struct ucma_context *ctx,
927 void __user *response, int out_len)
929 struct rdma_ucm_query_addr_resp resp;
930 struct sockaddr_ib *addr;
931 int ret = 0;
933 if (out_len < sizeof(resp))
934 return -ENOSPC;
936 memset(&resp, 0, sizeof resp);
938 ucma_query_device_addr(ctx->cm_id, &resp);
940 addr = (struct sockaddr_ib *) &resp.src_addr;
941 resp.src_size = sizeof(*addr);
942 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
943 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
944 } else {
945 addr->sib_family = AF_IB;
946 addr->sib_pkey = (__force __be16) resp.pkey;
947 rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
948 NULL);
949 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
950 &ctx->cm_id->route.addr.src_addr);
953 addr = (struct sockaddr_ib *) &resp.dst_addr;
954 resp.dst_size = sizeof(*addr);
955 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
956 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
957 } else {
958 addr->sib_family = AF_IB;
959 addr->sib_pkey = (__force __be16) resp.pkey;
960 rdma_read_gids(ctx->cm_id, NULL,
961 (union ib_gid *)&addr->sib_addr);
962 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
963 &ctx->cm_id->route.addr.dst_addr);
966 if (copy_to_user(response, &resp, sizeof(resp)))
967 ret = -EFAULT;
969 return ret;
972 static ssize_t ucma_query(struct ucma_file *file,
973 const char __user *inbuf,
974 int in_len, int out_len)
976 struct rdma_ucm_query cmd;
977 struct ucma_context *ctx;
978 void __user *response;
979 int ret;
981 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
982 return -EFAULT;
984 response = (void __user *)(unsigned long) cmd.response;
985 ctx = ucma_get_ctx(file, cmd.id);
986 if (IS_ERR(ctx))
987 return PTR_ERR(ctx);
989 switch (cmd.option) {
990 case RDMA_USER_CM_QUERY_ADDR:
991 ret = ucma_query_addr(ctx, response, out_len);
992 break;
993 case RDMA_USER_CM_QUERY_PATH:
994 ret = ucma_query_path(ctx, response, out_len);
995 break;
996 case RDMA_USER_CM_QUERY_GID:
997 ret = ucma_query_gid(ctx, response, out_len);
998 break;
999 default:
1000 ret = -ENOSYS;
1001 break;
1004 ucma_put_ctx(ctx);
1005 return ret;
1008 static void ucma_copy_conn_param(struct rdma_cm_id *id,
1009 struct rdma_conn_param *dst,
1010 struct rdma_ucm_conn_param *src)
1012 dst->private_data = src->private_data;
1013 dst->private_data_len = src->private_data_len;
1014 dst->responder_resources =src->responder_resources;
1015 dst->initiator_depth = src->initiator_depth;
1016 dst->flow_control = src->flow_control;
1017 dst->retry_count = src->retry_count;
1018 dst->rnr_retry_count = src->rnr_retry_count;
1019 dst->srq = src->srq;
1020 dst->qp_num = src->qp_num;
1021 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1024 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1025 int in_len, int out_len)
1027 struct rdma_ucm_connect cmd;
1028 struct rdma_conn_param conn_param;
1029 struct ucma_context *ctx;
1030 int ret;
1032 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1033 return -EFAULT;
1035 if (!cmd.conn_param.valid)
1036 return -EINVAL;
1038 ctx = ucma_get_ctx(file, cmd.id);
1039 if (IS_ERR(ctx))
1040 return PTR_ERR(ctx);
1042 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1043 ret = rdma_connect(ctx->cm_id, &conn_param);
1044 ucma_put_ctx(ctx);
1045 return ret;
1048 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1049 int in_len, int out_len)
1051 struct rdma_ucm_listen cmd;
1052 struct ucma_context *ctx;
1053 int ret;
1055 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1056 return -EFAULT;
1058 ctx = ucma_get_ctx(file, cmd.id);
1059 if (IS_ERR(ctx))
1060 return PTR_ERR(ctx);
1062 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1063 cmd.backlog : max_backlog;
1064 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1065 ucma_put_ctx(ctx);
1066 return ret;
1069 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1070 int in_len, int out_len)
1072 struct rdma_ucm_accept cmd;
1073 struct rdma_conn_param conn_param;
1074 struct ucma_context *ctx;
1075 int ret;
1077 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1078 return -EFAULT;
1080 ctx = ucma_get_ctx(file, cmd.id);
1081 if (IS_ERR(ctx))
1082 return PTR_ERR(ctx);
1084 if (cmd.conn_param.valid) {
1085 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1086 mutex_lock(&file->mut);
1087 ret = rdma_accept(ctx->cm_id, &conn_param);
1088 if (!ret)
1089 ctx->uid = cmd.uid;
1090 mutex_unlock(&file->mut);
1091 } else
1092 ret = rdma_accept(ctx->cm_id, NULL);
1094 ucma_put_ctx(ctx);
1095 return ret;
1098 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1099 int in_len, int out_len)
1101 struct rdma_ucm_reject cmd;
1102 struct ucma_context *ctx;
1103 int ret;
1105 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1106 return -EFAULT;
1108 ctx = ucma_get_ctx(file, cmd.id);
1109 if (IS_ERR(ctx))
1110 return PTR_ERR(ctx);
1112 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1113 ucma_put_ctx(ctx);
1114 return ret;
1117 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1118 int in_len, int out_len)
1120 struct rdma_ucm_disconnect cmd;
1121 struct ucma_context *ctx;
1122 int ret;
1124 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1125 return -EFAULT;
1127 ctx = ucma_get_ctx(file, cmd.id);
1128 if (IS_ERR(ctx))
1129 return PTR_ERR(ctx);
1131 ret = rdma_disconnect(ctx->cm_id);
1132 ucma_put_ctx(ctx);
1133 return ret;
1136 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1137 const char __user *inbuf,
1138 int in_len, int out_len)
1140 struct rdma_ucm_init_qp_attr cmd;
1141 struct ib_uverbs_qp_attr resp;
1142 struct ucma_context *ctx;
1143 struct ib_qp_attr qp_attr;
1144 int ret;
1146 if (out_len < sizeof(resp))
1147 return -ENOSPC;
1149 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1150 return -EFAULT;
1152 ctx = ucma_get_ctx(file, cmd.id);
1153 if (IS_ERR(ctx))
1154 return PTR_ERR(ctx);
1156 resp.qp_attr_mask = 0;
1157 memset(&qp_attr, 0, sizeof qp_attr);
1158 qp_attr.qp_state = cmd.qp_state;
1159 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1160 if (ret)
1161 goto out;
1163 ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
1164 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1165 &resp, sizeof(resp)))
1166 ret = -EFAULT;
1168 out:
1169 ucma_put_ctx(ctx);
1170 return ret;
1173 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1174 void *optval, size_t optlen)
1176 int ret = 0;
1178 switch (optname) {
1179 case RDMA_OPTION_ID_TOS:
1180 if (optlen != sizeof(u8)) {
1181 ret = -EINVAL;
1182 break;
1184 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1185 break;
1186 case RDMA_OPTION_ID_REUSEADDR:
1187 if (optlen != sizeof(int)) {
1188 ret = -EINVAL;
1189 break;
1191 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1192 break;
1193 case RDMA_OPTION_ID_AFONLY:
1194 if (optlen != sizeof(int)) {
1195 ret = -EINVAL;
1196 break;
1198 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1199 break;
1200 default:
1201 ret = -ENOSYS;
1204 return ret;
1207 static int ucma_set_ib_path(struct ucma_context *ctx,
1208 struct ib_path_rec_data *path_data, size_t optlen)
1210 struct sa_path_rec sa_path;
1211 struct rdma_cm_event event;
1212 int ret;
1214 if (optlen % sizeof(*path_data))
1215 return -EINVAL;
1217 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1218 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1219 IB_PATH_BIDIRECTIONAL))
1220 break;
1223 if (!optlen)
1224 return -EINVAL;
1226 memset(&sa_path, 0, sizeof(sa_path));
1228 sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1229 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1231 if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1232 struct sa_path_rec opa;
1234 sa_convert_path_ib_to_opa(&opa, &sa_path);
1235 ret = rdma_set_ib_path(ctx->cm_id, &opa);
1236 } else {
1237 ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
1239 if (ret)
1240 return ret;
1242 memset(&event, 0, sizeof event);
1243 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1244 return ucma_event_handler(ctx->cm_id, &event);
1247 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1248 void *optval, size_t optlen)
1250 int ret;
1252 switch (optname) {
1253 case RDMA_OPTION_IB_PATH:
1254 ret = ucma_set_ib_path(ctx, optval, optlen);
1255 break;
1256 default:
1257 ret = -ENOSYS;
1260 return ret;
1263 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1264 int optname, void *optval, size_t optlen)
1266 int ret;
1268 switch (level) {
1269 case RDMA_OPTION_ID:
1270 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1271 break;
1272 case RDMA_OPTION_IB:
1273 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1274 break;
1275 default:
1276 ret = -ENOSYS;
1279 return ret;
1282 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1283 int in_len, int out_len)
1285 struct rdma_ucm_set_option cmd;
1286 struct ucma_context *ctx;
1287 void *optval;
1288 int ret;
1290 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1291 return -EFAULT;
1293 ctx = ucma_get_ctx(file, cmd.id);
1294 if (IS_ERR(ctx))
1295 return PTR_ERR(ctx);
1297 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1298 cmd.optlen);
1299 if (IS_ERR(optval)) {
1300 ret = PTR_ERR(optval);
1301 goto out;
1304 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1305 cmd.optlen);
1306 kfree(optval);
1308 out:
1309 ucma_put_ctx(ctx);
1310 return ret;
1313 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1314 int in_len, int out_len)
1316 struct rdma_ucm_notify cmd;
1317 struct ucma_context *ctx;
1318 int ret;
1320 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1321 return -EFAULT;
1323 ctx = ucma_get_ctx(file, cmd.id);
1324 if (IS_ERR(ctx))
1325 return PTR_ERR(ctx);
1327 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1328 ucma_put_ctx(ctx);
1329 return ret;
1332 static ssize_t ucma_process_join(struct ucma_file *file,
1333 struct rdma_ucm_join_mcast *cmd, int out_len)
1335 struct rdma_ucm_create_id_resp resp;
1336 struct ucma_context *ctx;
1337 struct ucma_multicast *mc;
1338 struct sockaddr *addr;
1339 int ret;
1340 u8 join_state;
1342 if (out_len < sizeof(resp))
1343 return -ENOSPC;
1345 addr = (struct sockaddr *) &cmd->addr;
1346 if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1347 return -EINVAL;
1349 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1350 join_state = BIT(FULLMEMBER_JOIN);
1351 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1352 join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1353 else
1354 return -EINVAL;
1356 ctx = ucma_get_ctx(file, cmd->id);
1357 if (IS_ERR(ctx))
1358 return PTR_ERR(ctx);
1360 mutex_lock(&file->mut);
1361 mc = ucma_alloc_multicast(ctx);
1362 if (!mc) {
1363 ret = -ENOMEM;
1364 goto err1;
1366 mc->join_state = join_state;
1367 mc->uid = cmd->uid;
1368 memcpy(&mc->addr, addr, cmd->addr_size);
1369 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1370 join_state, mc);
1371 if (ret)
1372 goto err2;
1374 resp.id = mc->id;
1375 if (copy_to_user((void __user *)(unsigned long) cmd->response,
1376 &resp, sizeof(resp))) {
1377 ret = -EFAULT;
1378 goto err3;
1381 mutex_unlock(&file->mut);
1382 ucma_put_ctx(ctx);
1383 return 0;
1385 err3:
1386 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1387 ucma_cleanup_mc_events(mc);
1388 err2:
1389 mutex_lock(&mut);
1390 idr_remove(&multicast_idr, mc->id);
1391 mutex_unlock(&mut);
1392 list_del(&mc->list);
1393 kfree(mc);
1394 err1:
1395 mutex_unlock(&file->mut);
1396 ucma_put_ctx(ctx);
1397 return ret;
1400 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1401 const char __user *inbuf,
1402 int in_len, int out_len)
1404 struct rdma_ucm_join_ip_mcast cmd;
1405 struct rdma_ucm_join_mcast join_cmd;
1407 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1408 return -EFAULT;
1410 join_cmd.response = cmd.response;
1411 join_cmd.uid = cmd.uid;
1412 join_cmd.id = cmd.id;
1413 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1414 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1415 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1417 return ucma_process_join(file, &join_cmd, out_len);
1420 static ssize_t ucma_join_multicast(struct ucma_file *file,
1421 const char __user *inbuf,
1422 int in_len, int out_len)
1424 struct rdma_ucm_join_mcast cmd;
1426 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1427 return -EFAULT;
1429 return ucma_process_join(file, &cmd, out_len);
1432 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1433 const char __user *inbuf,
1434 int in_len, int out_len)
1436 struct rdma_ucm_destroy_id cmd;
1437 struct rdma_ucm_destroy_id_resp resp;
1438 struct ucma_multicast *mc;
1439 int ret = 0;
1441 if (out_len < sizeof(resp))
1442 return -ENOSPC;
1444 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1445 return -EFAULT;
1447 mutex_lock(&mut);
1448 mc = idr_find(&multicast_idr, cmd.id);
1449 if (!mc)
1450 mc = ERR_PTR(-ENOENT);
1451 else if (mc->ctx->file != file)
1452 mc = ERR_PTR(-EINVAL);
1453 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1454 mc = ERR_PTR(-ENXIO);
1455 else
1456 idr_remove(&multicast_idr, mc->id);
1457 mutex_unlock(&mut);
1459 if (IS_ERR(mc)) {
1460 ret = PTR_ERR(mc);
1461 goto out;
1464 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1465 mutex_lock(&mc->ctx->file->mut);
1466 ucma_cleanup_mc_events(mc);
1467 list_del(&mc->list);
1468 mutex_unlock(&mc->ctx->file->mut);
1470 ucma_put_ctx(mc->ctx);
1471 resp.events_reported = mc->events_reported;
1472 kfree(mc);
1474 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1475 &resp, sizeof(resp)))
1476 ret = -EFAULT;
1477 out:
1478 return ret;
1481 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1483 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1484 if (file1 < file2) {
1485 mutex_lock(&file1->mut);
1486 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1487 } else {
1488 mutex_lock(&file2->mut);
1489 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1493 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1495 if (file1 < file2) {
1496 mutex_unlock(&file2->mut);
1497 mutex_unlock(&file1->mut);
1498 } else {
1499 mutex_unlock(&file1->mut);
1500 mutex_unlock(&file2->mut);
1504 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1506 struct ucma_event *uevent, *tmp;
1508 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1509 if (uevent->ctx == ctx)
1510 list_move_tail(&uevent->list, &file->event_list);
1513 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1514 const char __user *inbuf,
1515 int in_len, int out_len)
1517 struct rdma_ucm_migrate_id cmd;
1518 struct rdma_ucm_migrate_resp resp;
1519 struct ucma_context *ctx;
1520 struct fd f;
1521 struct ucma_file *cur_file;
1522 int ret = 0;
1524 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1525 return -EFAULT;
1527 /* Get current fd to protect against it being closed */
1528 f = fdget(cmd.fd);
1529 if (!f.file)
1530 return -ENOENT;
1532 /* Validate current fd and prevent destruction of id. */
1533 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1534 if (IS_ERR(ctx)) {
1535 ret = PTR_ERR(ctx);
1536 goto file_put;
1539 cur_file = ctx->file;
1540 if (cur_file == new_file) {
1541 resp.events_reported = ctx->events_reported;
1542 goto response;
1546 * Migrate events between fd's, maintaining order, and avoiding new
1547 * events being added before existing events.
1549 ucma_lock_files(cur_file, new_file);
1550 mutex_lock(&mut);
1552 list_move_tail(&ctx->list, &new_file->ctx_list);
1553 ucma_move_events(ctx, new_file);
1554 ctx->file = new_file;
1555 resp.events_reported = ctx->events_reported;
1557 mutex_unlock(&mut);
1558 ucma_unlock_files(cur_file, new_file);
1560 response:
1561 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1562 &resp, sizeof(resp)))
1563 ret = -EFAULT;
1565 ucma_put_ctx(ctx);
1566 file_put:
1567 fdput(f);
1568 return ret;
1571 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1572 const char __user *inbuf,
1573 int in_len, int out_len) = {
1574 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1575 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1576 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1577 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1578 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1579 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1580 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1581 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1582 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1583 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1584 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1585 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1586 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1587 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1588 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1589 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1590 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1591 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1592 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1593 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1594 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1595 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1596 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1599 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1600 size_t len, loff_t *pos)
1602 struct ucma_file *file = filp->private_data;
1603 struct rdma_ucm_cmd_hdr hdr;
1604 ssize_t ret;
1606 if (!ib_safe_file_access(filp)) {
1607 pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1608 task_tgid_vnr(current), current->comm);
1609 return -EACCES;
1612 if (len < sizeof(hdr))
1613 return -EINVAL;
1615 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1616 return -EFAULT;
1618 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1619 return -EINVAL;
1621 if (hdr.in + sizeof(hdr) > len)
1622 return -EINVAL;
1624 if (!ucma_cmd_table[hdr.cmd])
1625 return -ENOSYS;
1627 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1628 if (!ret)
1629 ret = len;
1631 return ret;
1634 static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
1636 struct ucma_file *file = filp->private_data;
1637 __poll_t mask = 0;
1639 poll_wait(filp, &file->poll_wait, wait);
1641 if (!list_empty(&file->event_list))
1642 mask = EPOLLIN | EPOLLRDNORM;
1644 return mask;
1648 * ucma_open() does not need the BKL:
1650 * - no global state is referred to;
1651 * - there is no ioctl method to race against;
1652 * - no further module initialization is required for open to work
1653 * after the device is registered.
1655 static int ucma_open(struct inode *inode, struct file *filp)
1657 struct ucma_file *file;
1659 file = kmalloc(sizeof *file, GFP_KERNEL);
1660 if (!file)
1661 return -ENOMEM;
1663 file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1664 WQ_MEM_RECLAIM);
1665 if (!file->close_wq) {
1666 kfree(file);
1667 return -ENOMEM;
1670 INIT_LIST_HEAD(&file->event_list);
1671 INIT_LIST_HEAD(&file->ctx_list);
1672 init_waitqueue_head(&file->poll_wait);
1673 mutex_init(&file->mut);
1675 filp->private_data = file;
1676 file->filp = filp;
1678 return nonseekable_open(inode, filp);
1681 static int ucma_close(struct inode *inode, struct file *filp)
1683 struct ucma_file *file = filp->private_data;
1684 struct ucma_context *ctx, *tmp;
1686 mutex_lock(&file->mut);
1687 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1688 ctx->destroying = 1;
1689 mutex_unlock(&file->mut);
1691 mutex_lock(&mut);
1692 idr_remove(&ctx_idr, ctx->id);
1693 mutex_unlock(&mut);
1695 flush_workqueue(file->close_wq);
1696 /* At that step once ctx was marked as destroying and workqueue
1697 * was flushed we are safe from any inflights handlers that
1698 * might put other closing task.
1700 mutex_lock(&mut);
1701 if (!ctx->closing) {
1702 mutex_unlock(&mut);
1703 /* rdma_destroy_id ensures that no event handlers are
1704 * inflight for that id before releasing it.
1706 rdma_destroy_id(ctx->cm_id);
1707 } else {
1708 mutex_unlock(&mut);
1711 ucma_free_ctx(ctx);
1712 mutex_lock(&file->mut);
1714 mutex_unlock(&file->mut);
1715 destroy_workqueue(file->close_wq);
1716 kfree(file);
1717 return 0;
1720 static const struct file_operations ucma_fops = {
1721 .owner = THIS_MODULE,
1722 .open = ucma_open,
1723 .release = ucma_close,
1724 .write = ucma_write,
1725 .poll = ucma_poll,
1726 .llseek = no_llseek,
1729 static struct miscdevice ucma_misc = {
1730 .minor = MISC_DYNAMIC_MINOR,
1731 .name = "rdma_cm",
1732 .nodename = "infiniband/rdma_cm",
1733 .mode = 0666,
1734 .fops = &ucma_fops,
1737 static ssize_t show_abi_version(struct device *dev,
1738 struct device_attribute *attr,
1739 char *buf)
1741 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1743 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1745 static int __init ucma_init(void)
1747 int ret;
1749 ret = misc_register(&ucma_misc);
1750 if (ret)
1751 return ret;
1753 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1754 if (ret) {
1755 pr_err("rdma_ucm: couldn't create abi_version attr\n");
1756 goto err1;
1759 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1760 if (!ucma_ctl_table_hdr) {
1761 pr_err("rdma_ucm: couldn't register sysctl paths\n");
1762 ret = -ENOMEM;
1763 goto err2;
1765 return 0;
1766 err2:
1767 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1768 err1:
1769 misc_deregister(&ucma_misc);
1770 return ret;
1773 static void __exit ucma_cleanup(void)
1775 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1776 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1777 misc_deregister(&ucma_misc);
1778 idr_destroy(&ctx_idr);
1779 idr_destroy(&multicast_idr);
1782 module_init(ucma_init);
1783 module_exit(ucma_cleanup);