Merge tag 'regmap-fix-v4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / drivers / infiniband / core / ucma.c
blob9520154f1d7c96ddb521ad4e146e8ef251dcd49a
1 /*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
39 #include <linux/in.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 #include <linux/nsproxy.h>
47 #include <rdma/rdma_user_cm.h>
48 #include <rdma/ib_marshall.h>
49 #include <rdma/rdma_cm.h>
50 #include <rdma/rdma_cm_ib.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib.h>
54 MODULE_AUTHOR("Sean Hefty");
55 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
56 MODULE_LICENSE("Dual BSD/GPL");
58 static unsigned int max_backlog = 1024;
60 static struct ctl_table_header *ucma_ctl_table_hdr;
61 static struct ctl_table ucma_ctl_table[] = {
63 .procname = "max_backlog",
64 .data = &max_backlog,
65 .maxlen = sizeof max_backlog,
66 .mode = 0644,
67 .proc_handler = proc_dointvec,
69 { }
72 struct ucma_file {
73 struct mutex mut;
74 struct file *filp;
75 struct list_head ctx_list;
76 struct list_head event_list;
77 wait_queue_head_t poll_wait;
78 struct workqueue_struct *close_wq;
81 struct ucma_context {
82 int id;
83 struct completion comp;
84 atomic_t ref;
85 int events_reported;
86 int backlog;
88 struct ucma_file *file;
89 struct rdma_cm_id *cm_id;
90 u64 uid;
92 struct list_head list;
93 struct list_head mc_list;
94 /* mark that device is in process of destroying the internal HW
95 * resources, protected by the global mut
97 int closing;
98 /* sync between removal event and id destroy, protected by file mut */
99 int destroying;
100 struct work_struct close_work;
103 struct ucma_multicast {
104 struct ucma_context *ctx;
105 int id;
106 int events_reported;
108 u64 uid;
109 u8 join_state;
110 struct list_head list;
111 struct sockaddr_storage addr;
114 struct ucma_event {
115 struct ucma_context *ctx;
116 struct ucma_multicast *mc;
117 struct list_head list;
118 struct rdma_cm_id *cm_id;
119 struct rdma_ucm_event_resp resp;
120 struct work_struct close_work;
123 static DEFINE_MUTEX(mut);
124 static DEFINE_IDR(ctx_idr);
125 static DEFINE_IDR(multicast_idr);
127 static inline struct ucma_context *_ucma_find_context(int id,
128 struct ucma_file *file)
130 struct ucma_context *ctx;
132 ctx = idr_find(&ctx_idr, id);
133 if (!ctx)
134 ctx = ERR_PTR(-ENOENT);
135 else if (ctx->file != file)
136 ctx = ERR_PTR(-EINVAL);
137 return ctx;
140 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
142 struct ucma_context *ctx;
144 mutex_lock(&mut);
145 ctx = _ucma_find_context(id, file);
146 if (!IS_ERR(ctx)) {
147 if (ctx->closing)
148 ctx = ERR_PTR(-EIO);
149 else
150 atomic_inc(&ctx->ref);
152 mutex_unlock(&mut);
153 return ctx;
156 static void ucma_put_ctx(struct ucma_context *ctx)
158 if (atomic_dec_and_test(&ctx->ref))
159 complete(&ctx->comp);
162 static void ucma_close_event_id(struct work_struct *work)
164 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
166 rdma_destroy_id(uevent_close->cm_id);
167 kfree(uevent_close);
170 static void ucma_close_id(struct work_struct *work)
172 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
174 /* once all inflight tasks are finished, we close all underlying
175 * resources. The context is still alive till its explicit destryoing
176 * by its creator.
178 ucma_put_ctx(ctx);
179 wait_for_completion(&ctx->comp);
180 /* No new events will be generated after destroying the id. */
181 rdma_destroy_id(ctx->cm_id);
184 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
186 struct ucma_context *ctx;
188 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
189 if (!ctx)
190 return NULL;
192 INIT_WORK(&ctx->close_work, ucma_close_id);
193 atomic_set(&ctx->ref, 1);
194 init_completion(&ctx->comp);
195 INIT_LIST_HEAD(&ctx->mc_list);
196 ctx->file = file;
198 mutex_lock(&mut);
199 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
200 mutex_unlock(&mut);
201 if (ctx->id < 0)
202 goto error;
204 list_add_tail(&ctx->list, &file->ctx_list);
205 return ctx;
207 error:
208 kfree(ctx);
209 return NULL;
212 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
214 struct ucma_multicast *mc;
216 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
217 if (!mc)
218 return NULL;
220 mutex_lock(&mut);
221 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
222 mutex_unlock(&mut);
223 if (mc->id < 0)
224 goto error;
226 mc->ctx = ctx;
227 list_add_tail(&mc->list, &ctx->mc_list);
228 return mc;
230 error:
231 kfree(mc);
232 return NULL;
235 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
236 struct rdma_conn_param *src)
238 if (src->private_data_len)
239 memcpy(dst->private_data, src->private_data,
240 src->private_data_len);
241 dst->private_data_len = src->private_data_len;
242 dst->responder_resources =src->responder_resources;
243 dst->initiator_depth = src->initiator_depth;
244 dst->flow_control = src->flow_control;
245 dst->retry_count = src->retry_count;
246 dst->rnr_retry_count = src->rnr_retry_count;
247 dst->srq = src->srq;
248 dst->qp_num = src->qp_num;
251 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
252 struct rdma_ud_param *src)
254 if (src->private_data_len)
255 memcpy(dst->private_data, src->private_data,
256 src->private_data_len);
257 dst->private_data_len = src->private_data_len;
258 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
259 dst->qp_num = src->qp_num;
260 dst->qkey = src->qkey;
263 static void ucma_set_event_context(struct ucma_context *ctx,
264 struct rdma_cm_event *event,
265 struct ucma_event *uevent)
267 uevent->ctx = ctx;
268 switch (event->event) {
269 case RDMA_CM_EVENT_MULTICAST_JOIN:
270 case RDMA_CM_EVENT_MULTICAST_ERROR:
271 uevent->mc = (struct ucma_multicast *)
272 event->param.ud.private_data;
273 uevent->resp.uid = uevent->mc->uid;
274 uevent->resp.id = uevent->mc->id;
275 break;
276 default:
277 uevent->resp.uid = ctx->uid;
278 uevent->resp.id = ctx->id;
279 break;
283 /* Called with file->mut locked for the relevant context. */
284 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
286 struct ucma_context *ctx = cm_id->context;
287 struct ucma_event *con_req_eve;
288 int event_found = 0;
290 if (ctx->destroying)
291 return;
293 /* only if context is pointing to cm_id that it owns it and can be
294 * queued to be closed, otherwise that cm_id is an inflight one that
295 * is part of that context event list pending to be detached and
296 * reattached to its new context as part of ucma_get_event,
297 * handled separately below.
299 if (ctx->cm_id == cm_id) {
300 mutex_lock(&mut);
301 ctx->closing = 1;
302 mutex_unlock(&mut);
303 queue_work(ctx->file->close_wq, &ctx->close_work);
304 return;
307 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
308 if (con_req_eve->cm_id == cm_id &&
309 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
310 list_del(&con_req_eve->list);
311 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
312 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
313 event_found = 1;
314 break;
317 if (!event_found)
318 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
321 static int ucma_event_handler(struct rdma_cm_id *cm_id,
322 struct rdma_cm_event *event)
324 struct ucma_event *uevent;
325 struct ucma_context *ctx = cm_id->context;
326 int ret = 0;
328 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
329 if (!uevent)
330 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
332 mutex_lock(&ctx->file->mut);
333 uevent->cm_id = cm_id;
334 ucma_set_event_context(ctx, event, uevent);
335 uevent->resp.event = event->event;
336 uevent->resp.status = event->status;
337 if (cm_id->qp_type == IB_QPT_UD)
338 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
339 else
340 ucma_copy_conn_event(&uevent->resp.param.conn,
341 &event->param.conn);
343 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
344 if (!ctx->backlog) {
345 ret = -ENOMEM;
346 kfree(uevent);
347 goto out;
349 ctx->backlog--;
350 } else if (!ctx->uid || ctx->cm_id != cm_id) {
352 * We ignore events for new connections until userspace has set
353 * their context. This can only happen if an error occurs on a
354 * new connection before the user accepts it. This is okay,
355 * since the accept will just fail later. However, we do need
356 * to release the underlying HW resources in case of a device
357 * removal event.
359 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
360 ucma_removal_event_handler(cm_id);
362 kfree(uevent);
363 goto out;
366 list_add_tail(&uevent->list, &ctx->file->event_list);
367 wake_up_interruptible(&ctx->file->poll_wait);
368 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
369 ucma_removal_event_handler(cm_id);
370 out:
371 mutex_unlock(&ctx->file->mut);
372 return ret;
375 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
376 int in_len, int out_len)
378 struct ucma_context *ctx;
379 struct rdma_ucm_get_event cmd;
380 struct ucma_event *uevent;
381 int ret = 0;
383 if (out_len < sizeof uevent->resp)
384 return -ENOSPC;
386 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
387 return -EFAULT;
389 mutex_lock(&file->mut);
390 while (list_empty(&file->event_list)) {
391 mutex_unlock(&file->mut);
393 if (file->filp->f_flags & O_NONBLOCK)
394 return -EAGAIN;
396 if (wait_event_interruptible(file->poll_wait,
397 !list_empty(&file->event_list)))
398 return -ERESTARTSYS;
400 mutex_lock(&file->mut);
403 uevent = list_entry(file->event_list.next, struct ucma_event, list);
405 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
406 ctx = ucma_alloc_ctx(file);
407 if (!ctx) {
408 ret = -ENOMEM;
409 goto done;
411 uevent->ctx->backlog++;
412 ctx->cm_id = uevent->cm_id;
413 ctx->cm_id->context = ctx;
414 uevent->resp.id = ctx->id;
417 if (copy_to_user((void __user *)(unsigned long)cmd.response,
418 &uevent->resp, sizeof uevent->resp)) {
419 ret = -EFAULT;
420 goto done;
423 list_del(&uevent->list);
424 uevent->ctx->events_reported++;
425 if (uevent->mc)
426 uevent->mc->events_reported++;
427 kfree(uevent);
428 done:
429 mutex_unlock(&file->mut);
430 return ret;
433 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
435 switch (cmd->ps) {
436 case RDMA_PS_TCP:
437 *qp_type = IB_QPT_RC;
438 return 0;
439 case RDMA_PS_UDP:
440 case RDMA_PS_IPOIB:
441 *qp_type = IB_QPT_UD;
442 return 0;
443 case RDMA_PS_IB:
444 *qp_type = cmd->qp_type;
445 return 0;
446 default:
447 return -EINVAL;
451 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
452 int in_len, int out_len)
454 struct rdma_ucm_create_id cmd;
455 struct rdma_ucm_create_id_resp resp;
456 struct ucma_context *ctx;
457 enum ib_qp_type qp_type;
458 int ret;
460 if (out_len < sizeof(resp))
461 return -ENOSPC;
463 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
464 return -EFAULT;
466 ret = ucma_get_qp_type(&cmd, &qp_type);
467 if (ret)
468 return ret;
470 mutex_lock(&file->mut);
471 ctx = ucma_alloc_ctx(file);
472 mutex_unlock(&file->mut);
473 if (!ctx)
474 return -ENOMEM;
476 ctx->uid = cmd.uid;
477 ctx->cm_id = rdma_create_id(current->nsproxy->net_ns,
478 ucma_event_handler, ctx, cmd.ps, qp_type);
479 if (IS_ERR(ctx->cm_id)) {
480 ret = PTR_ERR(ctx->cm_id);
481 goto err1;
484 resp.id = ctx->id;
485 if (copy_to_user((void __user *)(unsigned long)cmd.response,
486 &resp, sizeof(resp))) {
487 ret = -EFAULT;
488 goto err2;
490 return 0;
492 err2:
493 rdma_destroy_id(ctx->cm_id);
494 err1:
495 mutex_lock(&mut);
496 idr_remove(&ctx_idr, ctx->id);
497 mutex_unlock(&mut);
498 kfree(ctx);
499 return ret;
502 static void ucma_cleanup_multicast(struct ucma_context *ctx)
504 struct ucma_multicast *mc, *tmp;
506 mutex_lock(&mut);
507 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
508 list_del(&mc->list);
509 idr_remove(&multicast_idr, mc->id);
510 kfree(mc);
512 mutex_unlock(&mut);
515 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
517 struct ucma_event *uevent, *tmp;
519 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
520 if (uevent->mc != mc)
521 continue;
523 list_del(&uevent->list);
524 kfree(uevent);
529 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
530 * this point, no new events will be reported from the hardware. However, we
531 * still need to cleanup the UCMA context for this ID. Specifically, there
532 * might be events that have not yet been consumed by the user space software.
533 * These might include pending connect requests which we have not completed
534 * processing. We cannot call rdma_destroy_id while holding the lock of the
535 * context (file->mut), as it might cause a deadlock. We therefore extract all
536 * relevant events from the context pending events list while holding the
537 * mutex. After that we release them as needed.
539 static int ucma_free_ctx(struct ucma_context *ctx)
541 int events_reported;
542 struct ucma_event *uevent, *tmp;
543 LIST_HEAD(list);
546 ucma_cleanup_multicast(ctx);
548 /* Cleanup events not yet reported to the user. */
549 mutex_lock(&ctx->file->mut);
550 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
551 if (uevent->ctx == ctx)
552 list_move_tail(&uevent->list, &list);
554 list_del(&ctx->list);
555 mutex_unlock(&ctx->file->mut);
557 list_for_each_entry_safe(uevent, tmp, &list, list) {
558 list_del(&uevent->list);
559 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
560 rdma_destroy_id(uevent->cm_id);
561 kfree(uevent);
564 events_reported = ctx->events_reported;
565 kfree(ctx);
566 return events_reported;
569 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
570 int in_len, int out_len)
572 struct rdma_ucm_destroy_id cmd;
573 struct rdma_ucm_destroy_id_resp resp;
574 struct ucma_context *ctx;
575 int ret = 0;
577 if (out_len < sizeof(resp))
578 return -ENOSPC;
580 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
581 return -EFAULT;
583 mutex_lock(&mut);
584 ctx = _ucma_find_context(cmd.id, file);
585 if (!IS_ERR(ctx))
586 idr_remove(&ctx_idr, ctx->id);
587 mutex_unlock(&mut);
589 if (IS_ERR(ctx))
590 return PTR_ERR(ctx);
592 mutex_lock(&ctx->file->mut);
593 ctx->destroying = 1;
594 mutex_unlock(&ctx->file->mut);
596 flush_workqueue(ctx->file->close_wq);
597 /* At this point it's guaranteed that there is no inflight
598 * closing task */
599 mutex_lock(&mut);
600 if (!ctx->closing) {
601 mutex_unlock(&mut);
602 ucma_put_ctx(ctx);
603 wait_for_completion(&ctx->comp);
604 rdma_destroy_id(ctx->cm_id);
605 } else {
606 mutex_unlock(&mut);
609 resp.events_reported = ucma_free_ctx(ctx);
610 if (copy_to_user((void __user *)(unsigned long)cmd.response,
611 &resp, sizeof(resp)))
612 ret = -EFAULT;
614 return ret;
617 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
618 int in_len, int out_len)
620 struct rdma_ucm_bind_ip cmd;
621 struct ucma_context *ctx;
622 int ret;
624 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
625 return -EFAULT;
627 ctx = ucma_get_ctx(file, cmd.id);
628 if (IS_ERR(ctx))
629 return PTR_ERR(ctx);
631 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
632 ucma_put_ctx(ctx);
633 return ret;
636 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
637 int in_len, int out_len)
639 struct rdma_ucm_bind cmd;
640 struct sockaddr *addr;
641 struct ucma_context *ctx;
642 int ret;
644 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
645 return -EFAULT;
647 addr = (struct sockaddr *) &cmd.addr;
648 if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
649 return -EINVAL;
651 ctx = ucma_get_ctx(file, cmd.id);
652 if (IS_ERR(ctx))
653 return PTR_ERR(ctx);
655 ret = rdma_bind_addr(ctx->cm_id, addr);
656 ucma_put_ctx(ctx);
657 return ret;
660 static ssize_t ucma_resolve_ip(struct ucma_file *file,
661 const char __user *inbuf,
662 int in_len, int out_len)
664 struct rdma_ucm_resolve_ip cmd;
665 struct ucma_context *ctx;
666 int ret;
668 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
669 return -EFAULT;
671 ctx = ucma_get_ctx(file, cmd.id);
672 if (IS_ERR(ctx))
673 return PTR_ERR(ctx);
675 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
676 (struct sockaddr *) &cmd.dst_addr,
677 cmd.timeout_ms);
678 ucma_put_ctx(ctx);
679 return ret;
682 static ssize_t ucma_resolve_addr(struct ucma_file *file,
683 const char __user *inbuf,
684 int in_len, int out_len)
686 struct rdma_ucm_resolve_addr cmd;
687 struct sockaddr *src, *dst;
688 struct ucma_context *ctx;
689 int ret;
691 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
692 return -EFAULT;
694 src = (struct sockaddr *) &cmd.src_addr;
695 dst = (struct sockaddr *) &cmd.dst_addr;
696 if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
697 !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
698 return -EINVAL;
700 ctx = ucma_get_ctx(file, cmd.id);
701 if (IS_ERR(ctx))
702 return PTR_ERR(ctx);
704 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
705 ucma_put_ctx(ctx);
706 return ret;
709 static ssize_t ucma_resolve_route(struct ucma_file *file,
710 const char __user *inbuf,
711 int in_len, int out_len)
713 struct rdma_ucm_resolve_route cmd;
714 struct ucma_context *ctx;
715 int ret;
717 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
718 return -EFAULT;
720 ctx = ucma_get_ctx(file, cmd.id);
721 if (IS_ERR(ctx))
722 return PTR_ERR(ctx);
724 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
725 ucma_put_ctx(ctx);
726 return ret;
729 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
730 struct rdma_route *route)
732 struct rdma_dev_addr *dev_addr;
734 resp->num_paths = route->num_paths;
735 switch (route->num_paths) {
736 case 0:
737 dev_addr = &route->addr.dev_addr;
738 rdma_addr_get_dgid(dev_addr,
739 (union ib_gid *) &resp->ib_route[0].dgid);
740 rdma_addr_get_sgid(dev_addr,
741 (union ib_gid *) &resp->ib_route[0].sgid);
742 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
743 break;
744 case 2:
745 ib_copy_path_rec_to_user(&resp->ib_route[1],
746 &route->path_rec[1]);
747 /* fall through */
748 case 1:
749 ib_copy_path_rec_to_user(&resp->ib_route[0],
750 &route->path_rec[0]);
751 break;
752 default:
753 break;
757 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
758 struct rdma_route *route)
761 resp->num_paths = route->num_paths;
762 switch (route->num_paths) {
763 case 0:
764 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
765 (union ib_gid *)&resp->ib_route[0].dgid);
766 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
767 (union ib_gid *)&resp->ib_route[0].sgid);
768 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
769 break;
770 case 2:
771 ib_copy_path_rec_to_user(&resp->ib_route[1],
772 &route->path_rec[1]);
773 /* fall through */
774 case 1:
775 ib_copy_path_rec_to_user(&resp->ib_route[0],
776 &route->path_rec[0]);
777 break;
778 default:
779 break;
783 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
784 struct rdma_route *route)
786 struct rdma_dev_addr *dev_addr;
788 dev_addr = &route->addr.dev_addr;
789 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
790 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
793 static ssize_t ucma_query_route(struct ucma_file *file,
794 const char __user *inbuf,
795 int in_len, int out_len)
797 struct rdma_ucm_query cmd;
798 struct rdma_ucm_query_route_resp resp;
799 struct ucma_context *ctx;
800 struct sockaddr *addr;
801 int ret = 0;
803 if (out_len < sizeof(resp))
804 return -ENOSPC;
806 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
807 return -EFAULT;
809 ctx = ucma_get_ctx(file, cmd.id);
810 if (IS_ERR(ctx))
811 return PTR_ERR(ctx);
813 memset(&resp, 0, sizeof resp);
814 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
815 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
816 sizeof(struct sockaddr_in) :
817 sizeof(struct sockaddr_in6));
818 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
819 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
820 sizeof(struct sockaddr_in) :
821 sizeof(struct sockaddr_in6));
822 if (!ctx->cm_id->device)
823 goto out;
825 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
826 resp.port_num = ctx->cm_id->port_num;
828 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
829 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
830 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
831 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
832 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
833 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
835 out:
836 if (copy_to_user((void __user *)(unsigned long)cmd.response,
837 &resp, sizeof(resp)))
838 ret = -EFAULT;
840 ucma_put_ctx(ctx);
841 return ret;
844 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
845 struct rdma_ucm_query_addr_resp *resp)
847 if (!cm_id->device)
848 return;
850 resp->node_guid = (__force __u64) cm_id->device->node_guid;
851 resp->port_num = cm_id->port_num;
852 resp->pkey = (__force __u16) cpu_to_be16(
853 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
856 static ssize_t ucma_query_addr(struct ucma_context *ctx,
857 void __user *response, int out_len)
859 struct rdma_ucm_query_addr_resp resp;
860 struct sockaddr *addr;
861 int ret = 0;
863 if (out_len < sizeof(resp))
864 return -ENOSPC;
866 memset(&resp, 0, sizeof resp);
868 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
869 resp.src_size = rdma_addr_size(addr);
870 memcpy(&resp.src_addr, addr, resp.src_size);
872 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
873 resp.dst_size = rdma_addr_size(addr);
874 memcpy(&resp.dst_addr, addr, resp.dst_size);
876 ucma_query_device_addr(ctx->cm_id, &resp);
878 if (copy_to_user(response, &resp, sizeof(resp)))
879 ret = -EFAULT;
881 return ret;
884 static ssize_t ucma_query_path(struct ucma_context *ctx,
885 void __user *response, int out_len)
887 struct rdma_ucm_query_path_resp *resp;
888 int i, ret = 0;
890 if (out_len < sizeof(*resp))
891 return -ENOSPC;
893 resp = kzalloc(out_len, GFP_KERNEL);
894 if (!resp)
895 return -ENOMEM;
897 resp->num_paths = ctx->cm_id->route.num_paths;
898 for (i = 0, out_len -= sizeof(*resp);
899 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
900 i++, out_len -= sizeof(struct ib_path_rec_data)) {
902 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
903 IB_PATH_BIDIRECTIONAL;
904 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
905 &resp->path_data[i].path_rec);
908 if (copy_to_user(response, resp,
909 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
910 ret = -EFAULT;
912 kfree(resp);
913 return ret;
916 static ssize_t ucma_query_gid(struct ucma_context *ctx,
917 void __user *response, int out_len)
919 struct rdma_ucm_query_addr_resp resp;
920 struct sockaddr_ib *addr;
921 int ret = 0;
923 if (out_len < sizeof(resp))
924 return -ENOSPC;
926 memset(&resp, 0, sizeof resp);
928 ucma_query_device_addr(ctx->cm_id, &resp);
930 addr = (struct sockaddr_ib *) &resp.src_addr;
931 resp.src_size = sizeof(*addr);
932 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
933 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
934 } else {
935 addr->sib_family = AF_IB;
936 addr->sib_pkey = (__force __be16) resp.pkey;
937 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
938 (union ib_gid *) &addr->sib_addr);
939 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
940 &ctx->cm_id->route.addr.src_addr);
943 addr = (struct sockaddr_ib *) &resp.dst_addr;
944 resp.dst_size = sizeof(*addr);
945 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
946 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
947 } else {
948 addr->sib_family = AF_IB;
949 addr->sib_pkey = (__force __be16) resp.pkey;
950 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
951 (union ib_gid *) &addr->sib_addr);
952 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
953 &ctx->cm_id->route.addr.dst_addr);
956 if (copy_to_user(response, &resp, sizeof(resp)))
957 ret = -EFAULT;
959 return ret;
962 static ssize_t ucma_query(struct ucma_file *file,
963 const char __user *inbuf,
964 int in_len, int out_len)
966 struct rdma_ucm_query cmd;
967 struct ucma_context *ctx;
968 void __user *response;
969 int ret;
971 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
972 return -EFAULT;
974 response = (void __user *)(unsigned long) cmd.response;
975 ctx = ucma_get_ctx(file, cmd.id);
976 if (IS_ERR(ctx))
977 return PTR_ERR(ctx);
979 switch (cmd.option) {
980 case RDMA_USER_CM_QUERY_ADDR:
981 ret = ucma_query_addr(ctx, response, out_len);
982 break;
983 case RDMA_USER_CM_QUERY_PATH:
984 ret = ucma_query_path(ctx, response, out_len);
985 break;
986 case RDMA_USER_CM_QUERY_GID:
987 ret = ucma_query_gid(ctx, response, out_len);
988 break;
989 default:
990 ret = -ENOSYS;
991 break;
994 ucma_put_ctx(ctx);
995 return ret;
998 static void ucma_copy_conn_param(struct rdma_cm_id *id,
999 struct rdma_conn_param *dst,
1000 struct rdma_ucm_conn_param *src)
1002 dst->private_data = src->private_data;
1003 dst->private_data_len = src->private_data_len;
1004 dst->responder_resources =src->responder_resources;
1005 dst->initiator_depth = src->initiator_depth;
1006 dst->flow_control = src->flow_control;
1007 dst->retry_count = src->retry_count;
1008 dst->rnr_retry_count = src->rnr_retry_count;
1009 dst->srq = src->srq;
1010 dst->qp_num = src->qp_num;
1011 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1014 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1015 int in_len, int out_len)
1017 struct rdma_ucm_connect cmd;
1018 struct rdma_conn_param conn_param;
1019 struct ucma_context *ctx;
1020 int ret;
1022 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1023 return -EFAULT;
1025 if (!cmd.conn_param.valid)
1026 return -EINVAL;
1028 ctx = ucma_get_ctx(file, cmd.id);
1029 if (IS_ERR(ctx))
1030 return PTR_ERR(ctx);
1032 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1033 ret = rdma_connect(ctx->cm_id, &conn_param);
1034 ucma_put_ctx(ctx);
1035 return ret;
1038 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1039 int in_len, int out_len)
1041 struct rdma_ucm_listen cmd;
1042 struct ucma_context *ctx;
1043 int ret;
1045 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1046 return -EFAULT;
1048 ctx = ucma_get_ctx(file, cmd.id);
1049 if (IS_ERR(ctx))
1050 return PTR_ERR(ctx);
1052 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1053 cmd.backlog : max_backlog;
1054 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1055 ucma_put_ctx(ctx);
1056 return ret;
1059 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1060 int in_len, int out_len)
1062 struct rdma_ucm_accept cmd;
1063 struct rdma_conn_param conn_param;
1064 struct ucma_context *ctx;
1065 int ret;
1067 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1068 return -EFAULT;
1070 ctx = ucma_get_ctx(file, cmd.id);
1071 if (IS_ERR(ctx))
1072 return PTR_ERR(ctx);
1074 if (cmd.conn_param.valid) {
1075 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1076 mutex_lock(&file->mut);
1077 ret = rdma_accept(ctx->cm_id, &conn_param);
1078 if (!ret)
1079 ctx->uid = cmd.uid;
1080 mutex_unlock(&file->mut);
1081 } else
1082 ret = rdma_accept(ctx->cm_id, NULL);
1084 ucma_put_ctx(ctx);
1085 return ret;
1088 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1089 int in_len, int out_len)
1091 struct rdma_ucm_reject cmd;
1092 struct ucma_context *ctx;
1093 int ret;
1095 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1096 return -EFAULT;
1098 ctx = ucma_get_ctx(file, cmd.id);
1099 if (IS_ERR(ctx))
1100 return PTR_ERR(ctx);
1102 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1103 ucma_put_ctx(ctx);
1104 return ret;
1107 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1108 int in_len, int out_len)
1110 struct rdma_ucm_disconnect cmd;
1111 struct ucma_context *ctx;
1112 int ret;
1114 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1115 return -EFAULT;
1117 ctx = ucma_get_ctx(file, cmd.id);
1118 if (IS_ERR(ctx))
1119 return PTR_ERR(ctx);
1121 ret = rdma_disconnect(ctx->cm_id);
1122 ucma_put_ctx(ctx);
1123 return ret;
1126 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1127 const char __user *inbuf,
1128 int in_len, int out_len)
1130 struct rdma_ucm_init_qp_attr cmd;
1131 struct ib_uverbs_qp_attr resp;
1132 struct ucma_context *ctx;
1133 struct ib_qp_attr qp_attr;
1134 int ret;
1136 if (out_len < sizeof(resp))
1137 return -ENOSPC;
1139 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1140 return -EFAULT;
1142 ctx = ucma_get_ctx(file, cmd.id);
1143 if (IS_ERR(ctx))
1144 return PTR_ERR(ctx);
1146 resp.qp_attr_mask = 0;
1147 memset(&qp_attr, 0, sizeof qp_attr);
1148 qp_attr.qp_state = cmd.qp_state;
1149 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1150 if (ret)
1151 goto out;
1153 ib_copy_qp_attr_to_user(&resp, &qp_attr);
1154 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1155 &resp, sizeof(resp)))
1156 ret = -EFAULT;
1158 out:
1159 ucma_put_ctx(ctx);
1160 return ret;
1163 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1164 void *optval, size_t optlen)
1166 int ret = 0;
1168 switch (optname) {
1169 case RDMA_OPTION_ID_TOS:
1170 if (optlen != sizeof(u8)) {
1171 ret = -EINVAL;
1172 break;
1174 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1175 break;
1176 case RDMA_OPTION_ID_REUSEADDR:
1177 if (optlen != sizeof(int)) {
1178 ret = -EINVAL;
1179 break;
1181 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1182 break;
1183 case RDMA_OPTION_ID_AFONLY:
1184 if (optlen != sizeof(int)) {
1185 ret = -EINVAL;
1186 break;
1188 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1189 break;
1190 default:
1191 ret = -ENOSYS;
1194 return ret;
1197 static int ucma_set_ib_path(struct ucma_context *ctx,
1198 struct ib_path_rec_data *path_data, size_t optlen)
1200 struct ib_sa_path_rec sa_path;
1201 struct rdma_cm_event event;
1202 int ret;
1204 if (optlen % sizeof(*path_data))
1205 return -EINVAL;
1207 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1208 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1209 IB_PATH_BIDIRECTIONAL))
1210 break;
1213 if (!optlen)
1214 return -EINVAL;
1216 memset(&sa_path, 0, sizeof(sa_path));
1218 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1219 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1220 if (ret)
1221 return ret;
1223 memset(&event, 0, sizeof event);
1224 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1225 return ucma_event_handler(ctx->cm_id, &event);
1228 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1229 void *optval, size_t optlen)
1231 int ret;
1233 switch (optname) {
1234 case RDMA_OPTION_IB_PATH:
1235 ret = ucma_set_ib_path(ctx, optval, optlen);
1236 break;
1237 default:
1238 ret = -ENOSYS;
1241 return ret;
1244 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1245 int optname, void *optval, size_t optlen)
1247 int ret;
1249 switch (level) {
1250 case RDMA_OPTION_ID:
1251 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1252 break;
1253 case RDMA_OPTION_IB:
1254 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1255 break;
1256 default:
1257 ret = -ENOSYS;
1260 return ret;
1263 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1264 int in_len, int out_len)
1266 struct rdma_ucm_set_option cmd;
1267 struct ucma_context *ctx;
1268 void *optval;
1269 int ret;
1271 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1272 return -EFAULT;
1274 ctx = ucma_get_ctx(file, cmd.id);
1275 if (IS_ERR(ctx))
1276 return PTR_ERR(ctx);
1278 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1279 cmd.optlen);
1280 if (IS_ERR(optval)) {
1281 ret = PTR_ERR(optval);
1282 goto out;
1285 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1286 cmd.optlen);
1287 kfree(optval);
1289 out:
1290 ucma_put_ctx(ctx);
1291 return ret;
1294 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1295 int in_len, int out_len)
1297 struct rdma_ucm_notify cmd;
1298 struct ucma_context *ctx;
1299 int ret;
1301 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1302 return -EFAULT;
1304 ctx = ucma_get_ctx(file, cmd.id);
1305 if (IS_ERR(ctx))
1306 return PTR_ERR(ctx);
1308 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1309 ucma_put_ctx(ctx);
1310 return ret;
1313 static ssize_t ucma_process_join(struct ucma_file *file,
1314 struct rdma_ucm_join_mcast *cmd, int out_len)
1316 struct rdma_ucm_create_id_resp resp;
1317 struct ucma_context *ctx;
1318 struct ucma_multicast *mc;
1319 struct sockaddr *addr;
1320 int ret;
1321 u8 join_state;
1323 if (out_len < sizeof(resp))
1324 return -ENOSPC;
1326 addr = (struct sockaddr *) &cmd->addr;
1327 if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1328 return -EINVAL;
1330 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1331 join_state = BIT(FULLMEMBER_JOIN);
1332 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1333 join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1334 else
1335 return -EINVAL;
1337 ctx = ucma_get_ctx(file, cmd->id);
1338 if (IS_ERR(ctx))
1339 return PTR_ERR(ctx);
1341 mutex_lock(&file->mut);
1342 mc = ucma_alloc_multicast(ctx);
1343 if (!mc) {
1344 ret = -ENOMEM;
1345 goto err1;
1347 mc->join_state = join_state;
1348 mc->uid = cmd->uid;
1349 memcpy(&mc->addr, addr, cmd->addr_size);
1350 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1351 join_state, mc);
1352 if (ret)
1353 goto err2;
1355 resp.id = mc->id;
1356 if (copy_to_user((void __user *)(unsigned long) cmd->response,
1357 &resp, sizeof(resp))) {
1358 ret = -EFAULT;
1359 goto err3;
1362 mutex_unlock(&file->mut);
1363 ucma_put_ctx(ctx);
1364 return 0;
1366 err3:
1367 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1368 ucma_cleanup_mc_events(mc);
1369 err2:
1370 mutex_lock(&mut);
1371 idr_remove(&multicast_idr, mc->id);
1372 mutex_unlock(&mut);
1373 list_del(&mc->list);
1374 kfree(mc);
1375 err1:
1376 mutex_unlock(&file->mut);
1377 ucma_put_ctx(ctx);
1378 return ret;
1381 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1382 const char __user *inbuf,
1383 int in_len, int out_len)
1385 struct rdma_ucm_join_ip_mcast cmd;
1386 struct rdma_ucm_join_mcast join_cmd;
1388 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1389 return -EFAULT;
1391 join_cmd.response = cmd.response;
1392 join_cmd.uid = cmd.uid;
1393 join_cmd.id = cmd.id;
1394 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1395 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1396 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1398 return ucma_process_join(file, &join_cmd, out_len);
1401 static ssize_t ucma_join_multicast(struct ucma_file *file,
1402 const char __user *inbuf,
1403 int in_len, int out_len)
1405 struct rdma_ucm_join_mcast cmd;
1407 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1408 return -EFAULT;
1410 return ucma_process_join(file, &cmd, out_len);
1413 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1414 const char __user *inbuf,
1415 int in_len, int out_len)
1417 struct rdma_ucm_destroy_id cmd;
1418 struct rdma_ucm_destroy_id_resp resp;
1419 struct ucma_multicast *mc;
1420 int ret = 0;
1422 if (out_len < sizeof(resp))
1423 return -ENOSPC;
1425 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1426 return -EFAULT;
1428 mutex_lock(&mut);
1429 mc = idr_find(&multicast_idr, cmd.id);
1430 if (!mc)
1431 mc = ERR_PTR(-ENOENT);
1432 else if (mc->ctx->file != file)
1433 mc = ERR_PTR(-EINVAL);
1434 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1435 mc = ERR_PTR(-ENXIO);
1436 else
1437 idr_remove(&multicast_idr, mc->id);
1438 mutex_unlock(&mut);
1440 if (IS_ERR(mc)) {
1441 ret = PTR_ERR(mc);
1442 goto out;
1445 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1446 mutex_lock(&mc->ctx->file->mut);
1447 ucma_cleanup_mc_events(mc);
1448 list_del(&mc->list);
1449 mutex_unlock(&mc->ctx->file->mut);
1451 ucma_put_ctx(mc->ctx);
1452 resp.events_reported = mc->events_reported;
1453 kfree(mc);
1455 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1456 &resp, sizeof(resp)))
1457 ret = -EFAULT;
1458 out:
1459 return ret;
1462 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1464 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1465 if (file1 < file2) {
1466 mutex_lock(&file1->mut);
1467 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1468 } else {
1469 mutex_lock(&file2->mut);
1470 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1474 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1476 if (file1 < file2) {
1477 mutex_unlock(&file2->mut);
1478 mutex_unlock(&file1->mut);
1479 } else {
1480 mutex_unlock(&file1->mut);
1481 mutex_unlock(&file2->mut);
1485 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1487 struct ucma_event *uevent, *tmp;
1489 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1490 if (uevent->ctx == ctx)
1491 list_move_tail(&uevent->list, &file->event_list);
1494 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1495 const char __user *inbuf,
1496 int in_len, int out_len)
1498 struct rdma_ucm_migrate_id cmd;
1499 struct rdma_ucm_migrate_resp resp;
1500 struct ucma_context *ctx;
1501 struct fd f;
1502 struct ucma_file *cur_file;
1503 int ret = 0;
1505 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1506 return -EFAULT;
1508 /* Get current fd to protect against it being closed */
1509 f = fdget(cmd.fd);
1510 if (!f.file)
1511 return -ENOENT;
1513 /* Validate current fd and prevent destruction of id. */
1514 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1515 if (IS_ERR(ctx)) {
1516 ret = PTR_ERR(ctx);
1517 goto file_put;
1520 cur_file = ctx->file;
1521 if (cur_file == new_file) {
1522 resp.events_reported = ctx->events_reported;
1523 goto response;
1527 * Migrate events between fd's, maintaining order, and avoiding new
1528 * events being added before existing events.
1530 ucma_lock_files(cur_file, new_file);
1531 mutex_lock(&mut);
1533 list_move_tail(&ctx->list, &new_file->ctx_list);
1534 ucma_move_events(ctx, new_file);
1535 ctx->file = new_file;
1536 resp.events_reported = ctx->events_reported;
1538 mutex_unlock(&mut);
1539 ucma_unlock_files(cur_file, new_file);
1541 response:
1542 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1543 &resp, sizeof(resp)))
1544 ret = -EFAULT;
1546 ucma_put_ctx(ctx);
1547 file_put:
1548 fdput(f);
1549 return ret;
1552 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1553 const char __user *inbuf,
1554 int in_len, int out_len) = {
1555 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1556 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1557 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1558 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1559 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1560 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1561 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1562 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1563 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1564 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1565 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1566 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1567 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1568 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1569 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1570 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1571 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1572 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1573 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1574 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1575 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1576 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1577 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1580 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1581 size_t len, loff_t *pos)
1583 struct ucma_file *file = filp->private_data;
1584 struct rdma_ucm_cmd_hdr hdr;
1585 ssize_t ret;
1587 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1588 return -EACCES;
1590 if (len < sizeof(hdr))
1591 return -EINVAL;
1593 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1594 return -EFAULT;
1596 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1597 return -EINVAL;
1599 if (hdr.in + sizeof(hdr) > len)
1600 return -EINVAL;
1602 if (!ucma_cmd_table[hdr.cmd])
1603 return -ENOSYS;
1605 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1606 if (!ret)
1607 ret = len;
1609 return ret;
1612 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1614 struct ucma_file *file = filp->private_data;
1615 unsigned int mask = 0;
1617 poll_wait(filp, &file->poll_wait, wait);
1619 if (!list_empty(&file->event_list))
1620 mask = POLLIN | POLLRDNORM;
1622 return mask;
1626 * ucma_open() does not need the BKL:
1628 * - no global state is referred to;
1629 * - there is no ioctl method to race against;
1630 * - no further module initialization is required for open to work
1631 * after the device is registered.
1633 static int ucma_open(struct inode *inode, struct file *filp)
1635 struct ucma_file *file;
1637 file = kmalloc(sizeof *file, GFP_KERNEL);
1638 if (!file)
1639 return -ENOMEM;
1641 file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1642 WQ_MEM_RECLAIM);
1643 if (!file->close_wq) {
1644 kfree(file);
1645 return -ENOMEM;
1648 INIT_LIST_HEAD(&file->event_list);
1649 INIT_LIST_HEAD(&file->ctx_list);
1650 init_waitqueue_head(&file->poll_wait);
1651 mutex_init(&file->mut);
1653 filp->private_data = file;
1654 file->filp = filp;
1656 return nonseekable_open(inode, filp);
1659 static int ucma_close(struct inode *inode, struct file *filp)
1661 struct ucma_file *file = filp->private_data;
1662 struct ucma_context *ctx, *tmp;
1664 mutex_lock(&file->mut);
1665 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1666 ctx->destroying = 1;
1667 mutex_unlock(&file->mut);
1669 mutex_lock(&mut);
1670 idr_remove(&ctx_idr, ctx->id);
1671 mutex_unlock(&mut);
1673 flush_workqueue(file->close_wq);
1674 /* At that step once ctx was marked as destroying and workqueue
1675 * was flushed we are safe from any inflights handlers that
1676 * might put other closing task.
1678 mutex_lock(&mut);
1679 if (!ctx->closing) {
1680 mutex_unlock(&mut);
1681 /* rdma_destroy_id ensures that no event handlers are
1682 * inflight for that id before releasing it.
1684 rdma_destroy_id(ctx->cm_id);
1685 } else {
1686 mutex_unlock(&mut);
1689 ucma_free_ctx(ctx);
1690 mutex_lock(&file->mut);
1692 mutex_unlock(&file->mut);
1693 destroy_workqueue(file->close_wq);
1694 kfree(file);
1695 return 0;
1698 static const struct file_operations ucma_fops = {
1699 .owner = THIS_MODULE,
1700 .open = ucma_open,
1701 .release = ucma_close,
1702 .write = ucma_write,
1703 .poll = ucma_poll,
1704 .llseek = no_llseek,
1707 static struct miscdevice ucma_misc = {
1708 .minor = MISC_DYNAMIC_MINOR,
1709 .name = "rdma_cm",
1710 .nodename = "infiniband/rdma_cm",
1711 .mode = 0666,
1712 .fops = &ucma_fops,
1715 static ssize_t show_abi_version(struct device *dev,
1716 struct device_attribute *attr,
1717 char *buf)
1719 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1721 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1723 static int __init ucma_init(void)
1725 int ret;
1727 ret = misc_register(&ucma_misc);
1728 if (ret)
1729 return ret;
1731 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1732 if (ret) {
1733 pr_err("rdma_ucm: couldn't create abi_version attr\n");
1734 goto err1;
1737 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1738 if (!ucma_ctl_table_hdr) {
1739 pr_err("rdma_ucm: couldn't register sysctl paths\n");
1740 ret = -ENOMEM;
1741 goto err2;
1743 return 0;
1744 err2:
1745 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1746 err1:
1747 misc_deregister(&ucma_misc);
1748 return ret;
1751 static void __exit ucma_cleanup(void)
1753 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1754 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1755 misc_deregister(&ucma_misc);
1756 idr_destroy(&ctx_idr);
1757 idr_destroy(&multicast_idr);
1760 module_init(ucma_init);
1761 module_exit(ucma_cleanup);