Blackfin: bf548-ezkit/bf561-ezkit: update nor flash layout
[zen-stable.git] / drivers / infiniband / core / ucma.c
blobb3fa798525b2a1d8d3df793f41fe255eacf676dd
1 /*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
39 #include <linux/in.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
45 #include <rdma/rdma_user_cm.h>
46 #include <rdma/ib_marshall.h>
47 #include <rdma/rdma_cm.h>
48 #include <rdma/rdma_cm_ib.h>
50 MODULE_AUTHOR("Sean Hefty");
51 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
52 MODULE_LICENSE("Dual BSD/GPL");
54 static unsigned int max_backlog = 1024;
56 static struct ctl_table_header *ucma_ctl_table_hdr;
57 static ctl_table ucma_ctl_table[] = {
59 .procname = "max_backlog",
60 .data = &max_backlog,
61 .maxlen = sizeof max_backlog,
62 .mode = 0644,
63 .proc_handler = proc_dointvec,
65 { }
68 static struct ctl_path ucma_ctl_path[] = {
69 { .procname = "net" },
70 { .procname = "rdma_ucm" },
71 { }
74 struct ucma_file {
75 struct mutex mut;
76 struct file *filp;
77 struct list_head ctx_list;
78 struct list_head event_list;
79 wait_queue_head_t poll_wait;
82 struct ucma_context {
83 int id;
84 struct completion comp;
85 atomic_t ref;
86 int events_reported;
87 int backlog;
89 struct ucma_file *file;
90 struct rdma_cm_id *cm_id;
91 u64 uid;
93 struct list_head list;
94 struct list_head mc_list;
97 struct ucma_multicast {
98 struct ucma_context *ctx;
99 int id;
100 int events_reported;
102 u64 uid;
103 struct list_head list;
104 struct sockaddr_storage addr;
107 struct ucma_event {
108 struct ucma_context *ctx;
109 struct ucma_multicast *mc;
110 struct list_head list;
111 struct rdma_cm_id *cm_id;
112 struct rdma_ucm_event_resp resp;
115 static DEFINE_MUTEX(mut);
116 static DEFINE_IDR(ctx_idr);
117 static DEFINE_IDR(multicast_idr);
119 static inline struct ucma_context *_ucma_find_context(int id,
120 struct ucma_file *file)
122 struct ucma_context *ctx;
124 ctx = idr_find(&ctx_idr, id);
125 if (!ctx)
126 ctx = ERR_PTR(-ENOENT);
127 else if (ctx->file != file)
128 ctx = ERR_PTR(-EINVAL);
129 return ctx;
132 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
134 struct ucma_context *ctx;
136 mutex_lock(&mut);
137 ctx = _ucma_find_context(id, file);
138 if (!IS_ERR(ctx))
139 atomic_inc(&ctx->ref);
140 mutex_unlock(&mut);
141 return ctx;
144 static void ucma_put_ctx(struct ucma_context *ctx)
146 if (atomic_dec_and_test(&ctx->ref))
147 complete(&ctx->comp);
150 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
152 struct ucma_context *ctx;
153 int ret;
155 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
156 if (!ctx)
157 return NULL;
159 atomic_set(&ctx->ref, 1);
160 init_completion(&ctx->comp);
161 INIT_LIST_HEAD(&ctx->mc_list);
162 ctx->file = file;
164 do {
165 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
166 if (!ret)
167 goto error;
169 mutex_lock(&mut);
170 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
171 mutex_unlock(&mut);
172 } while (ret == -EAGAIN);
174 if (ret)
175 goto error;
177 list_add_tail(&ctx->list, &file->ctx_list);
178 return ctx;
180 error:
181 kfree(ctx);
182 return NULL;
185 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
187 struct ucma_multicast *mc;
188 int ret;
190 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
191 if (!mc)
192 return NULL;
194 do {
195 ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
196 if (!ret)
197 goto error;
199 mutex_lock(&mut);
200 ret = idr_get_new(&multicast_idr, mc, &mc->id);
201 mutex_unlock(&mut);
202 } while (ret == -EAGAIN);
204 if (ret)
205 goto error;
207 mc->ctx = ctx;
208 list_add_tail(&mc->list, &ctx->mc_list);
209 return mc;
211 error:
212 kfree(mc);
213 return NULL;
216 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
217 struct rdma_conn_param *src)
219 if (src->private_data_len)
220 memcpy(dst->private_data, src->private_data,
221 src->private_data_len);
222 dst->private_data_len = src->private_data_len;
223 dst->responder_resources =src->responder_resources;
224 dst->initiator_depth = src->initiator_depth;
225 dst->flow_control = src->flow_control;
226 dst->retry_count = src->retry_count;
227 dst->rnr_retry_count = src->rnr_retry_count;
228 dst->srq = src->srq;
229 dst->qp_num = src->qp_num;
232 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
233 struct rdma_ud_param *src)
235 if (src->private_data_len)
236 memcpy(dst->private_data, src->private_data,
237 src->private_data_len);
238 dst->private_data_len = src->private_data_len;
239 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
240 dst->qp_num = src->qp_num;
241 dst->qkey = src->qkey;
244 static void ucma_set_event_context(struct ucma_context *ctx,
245 struct rdma_cm_event *event,
246 struct ucma_event *uevent)
248 uevent->ctx = ctx;
249 switch (event->event) {
250 case RDMA_CM_EVENT_MULTICAST_JOIN:
251 case RDMA_CM_EVENT_MULTICAST_ERROR:
252 uevent->mc = (struct ucma_multicast *)
253 event->param.ud.private_data;
254 uevent->resp.uid = uevent->mc->uid;
255 uevent->resp.id = uevent->mc->id;
256 break;
257 default:
258 uevent->resp.uid = ctx->uid;
259 uevent->resp.id = ctx->id;
260 break;
264 static int ucma_event_handler(struct rdma_cm_id *cm_id,
265 struct rdma_cm_event *event)
267 struct ucma_event *uevent;
268 struct ucma_context *ctx = cm_id->context;
269 int ret = 0;
271 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
272 if (!uevent)
273 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
275 uevent->cm_id = cm_id;
276 ucma_set_event_context(ctx, event, uevent);
277 uevent->resp.event = event->event;
278 uevent->resp.status = event->status;
279 if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
280 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
281 else
282 ucma_copy_conn_event(&uevent->resp.param.conn,
283 &event->param.conn);
285 mutex_lock(&ctx->file->mut);
286 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
287 if (!ctx->backlog) {
288 ret = -ENOMEM;
289 kfree(uevent);
290 goto out;
292 ctx->backlog--;
293 } else if (!ctx->uid) {
295 * We ignore events for new connections until userspace has set
296 * their context. This can only happen if an error occurs on a
297 * new connection before the user accepts it. This is okay,
298 * since the accept will just fail later.
300 kfree(uevent);
301 goto out;
304 list_add_tail(&uevent->list, &ctx->file->event_list);
305 wake_up_interruptible(&ctx->file->poll_wait);
306 out:
307 mutex_unlock(&ctx->file->mut);
308 return ret;
311 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
312 int in_len, int out_len)
314 struct ucma_context *ctx;
315 struct rdma_ucm_get_event cmd;
316 struct ucma_event *uevent;
317 int ret = 0;
318 DEFINE_WAIT(wait);
320 if (out_len < sizeof uevent->resp)
321 return -ENOSPC;
323 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
324 return -EFAULT;
326 mutex_lock(&file->mut);
327 while (list_empty(&file->event_list)) {
328 mutex_unlock(&file->mut);
330 if (file->filp->f_flags & O_NONBLOCK)
331 return -EAGAIN;
333 if (wait_event_interruptible(file->poll_wait,
334 !list_empty(&file->event_list)))
335 return -ERESTARTSYS;
337 mutex_lock(&file->mut);
340 uevent = list_entry(file->event_list.next, struct ucma_event, list);
342 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
343 ctx = ucma_alloc_ctx(file);
344 if (!ctx) {
345 ret = -ENOMEM;
346 goto done;
348 uevent->ctx->backlog++;
349 ctx->cm_id = uevent->cm_id;
350 ctx->cm_id->context = ctx;
351 uevent->resp.id = ctx->id;
354 if (copy_to_user((void __user *)(unsigned long)cmd.response,
355 &uevent->resp, sizeof uevent->resp)) {
356 ret = -EFAULT;
357 goto done;
360 list_del(&uevent->list);
361 uevent->ctx->events_reported++;
362 if (uevent->mc)
363 uevent->mc->events_reported++;
364 kfree(uevent);
365 done:
366 mutex_unlock(&file->mut);
367 return ret;
370 static ssize_t ucma_create_id(struct ucma_file *file,
371 const char __user *inbuf,
372 int in_len, int out_len)
374 struct rdma_ucm_create_id cmd;
375 struct rdma_ucm_create_id_resp resp;
376 struct ucma_context *ctx;
377 int ret;
379 if (out_len < sizeof(resp))
380 return -ENOSPC;
382 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
383 return -EFAULT;
385 mutex_lock(&file->mut);
386 ctx = ucma_alloc_ctx(file);
387 mutex_unlock(&file->mut);
388 if (!ctx)
389 return -ENOMEM;
391 ctx->uid = cmd.uid;
392 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
393 if (IS_ERR(ctx->cm_id)) {
394 ret = PTR_ERR(ctx->cm_id);
395 goto err1;
398 resp.id = ctx->id;
399 if (copy_to_user((void __user *)(unsigned long)cmd.response,
400 &resp, sizeof(resp))) {
401 ret = -EFAULT;
402 goto err2;
404 return 0;
406 err2:
407 rdma_destroy_id(ctx->cm_id);
408 err1:
409 mutex_lock(&mut);
410 idr_remove(&ctx_idr, ctx->id);
411 mutex_unlock(&mut);
412 kfree(ctx);
413 return ret;
416 static void ucma_cleanup_multicast(struct ucma_context *ctx)
418 struct ucma_multicast *mc, *tmp;
420 mutex_lock(&mut);
421 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
422 list_del(&mc->list);
423 idr_remove(&multicast_idr, mc->id);
424 kfree(mc);
426 mutex_unlock(&mut);
429 static void ucma_cleanup_events(struct ucma_context *ctx)
431 struct ucma_event *uevent, *tmp;
433 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
434 if (uevent->ctx != ctx)
435 continue;
437 list_del(&uevent->list);
439 /* clear incoming connections. */
440 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
441 rdma_destroy_id(uevent->cm_id);
443 kfree(uevent);
447 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
449 struct ucma_event *uevent, *tmp;
451 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
452 if (uevent->mc != mc)
453 continue;
455 list_del(&uevent->list);
456 kfree(uevent);
460 static int ucma_free_ctx(struct ucma_context *ctx)
462 int events_reported;
464 /* No new events will be generated after destroying the id. */
465 rdma_destroy_id(ctx->cm_id);
467 ucma_cleanup_multicast(ctx);
469 /* Cleanup events not yet reported to the user. */
470 mutex_lock(&ctx->file->mut);
471 ucma_cleanup_events(ctx);
472 list_del(&ctx->list);
473 mutex_unlock(&ctx->file->mut);
475 events_reported = ctx->events_reported;
476 kfree(ctx);
477 return events_reported;
480 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
481 int in_len, int out_len)
483 struct rdma_ucm_destroy_id cmd;
484 struct rdma_ucm_destroy_id_resp resp;
485 struct ucma_context *ctx;
486 int ret = 0;
488 if (out_len < sizeof(resp))
489 return -ENOSPC;
491 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
492 return -EFAULT;
494 mutex_lock(&mut);
495 ctx = _ucma_find_context(cmd.id, file);
496 if (!IS_ERR(ctx))
497 idr_remove(&ctx_idr, ctx->id);
498 mutex_unlock(&mut);
500 if (IS_ERR(ctx))
501 return PTR_ERR(ctx);
503 ucma_put_ctx(ctx);
504 wait_for_completion(&ctx->comp);
505 resp.events_reported = ucma_free_ctx(ctx);
507 if (copy_to_user((void __user *)(unsigned long)cmd.response,
508 &resp, sizeof(resp)))
509 ret = -EFAULT;
511 return ret;
514 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
515 int in_len, int out_len)
517 struct rdma_ucm_bind_addr cmd;
518 struct ucma_context *ctx;
519 int ret;
521 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
522 return -EFAULT;
524 ctx = ucma_get_ctx(file, cmd.id);
525 if (IS_ERR(ctx))
526 return PTR_ERR(ctx);
528 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
529 ucma_put_ctx(ctx);
530 return ret;
533 static ssize_t ucma_resolve_addr(struct ucma_file *file,
534 const char __user *inbuf,
535 int in_len, int out_len)
537 struct rdma_ucm_resolve_addr cmd;
538 struct ucma_context *ctx;
539 int ret;
541 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
542 return -EFAULT;
544 ctx = ucma_get_ctx(file, cmd.id);
545 if (IS_ERR(ctx))
546 return PTR_ERR(ctx);
548 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
549 (struct sockaddr *) &cmd.dst_addr,
550 cmd.timeout_ms);
551 ucma_put_ctx(ctx);
552 return ret;
555 static ssize_t ucma_resolve_route(struct ucma_file *file,
556 const char __user *inbuf,
557 int in_len, int out_len)
559 struct rdma_ucm_resolve_route cmd;
560 struct ucma_context *ctx;
561 int ret;
563 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
564 return -EFAULT;
566 ctx = ucma_get_ctx(file, cmd.id);
567 if (IS_ERR(ctx))
568 return PTR_ERR(ctx);
570 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
571 ucma_put_ctx(ctx);
572 return ret;
575 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
576 struct rdma_route *route)
578 struct rdma_dev_addr *dev_addr;
580 resp->num_paths = route->num_paths;
581 switch (route->num_paths) {
582 case 0:
583 dev_addr = &route->addr.dev_addr;
584 rdma_addr_get_dgid(dev_addr,
585 (union ib_gid *) &resp->ib_route[0].dgid);
586 rdma_addr_get_sgid(dev_addr,
587 (union ib_gid *) &resp->ib_route[0].sgid);
588 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
589 break;
590 case 2:
591 ib_copy_path_rec_to_user(&resp->ib_route[1],
592 &route->path_rec[1]);
593 /* fall through */
594 case 1:
595 ib_copy_path_rec_to_user(&resp->ib_route[0],
596 &route->path_rec[0]);
597 break;
598 default:
599 break;
603 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
604 struct rdma_route *route)
606 struct rdma_dev_addr *dev_addr;
607 struct net_device *dev;
608 u16 vid = 0;
610 resp->num_paths = route->num_paths;
611 switch (route->num_paths) {
612 case 0:
613 dev_addr = &route->addr.dev_addr;
614 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
615 if (dev) {
616 vid = rdma_vlan_dev_vlan_id(dev);
617 dev_put(dev);
620 iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
621 dev_addr->dst_dev_addr, vid);
622 iboe_addr_get_sgid(dev_addr,
623 (union ib_gid *) &resp->ib_route[0].sgid);
624 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
625 break;
626 case 2:
627 ib_copy_path_rec_to_user(&resp->ib_route[1],
628 &route->path_rec[1]);
629 /* fall through */
630 case 1:
631 ib_copy_path_rec_to_user(&resp->ib_route[0],
632 &route->path_rec[0]);
633 break;
634 default:
635 break;
639 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
640 struct rdma_route *route)
642 struct rdma_dev_addr *dev_addr;
644 dev_addr = &route->addr.dev_addr;
645 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
646 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
649 static ssize_t ucma_query_route(struct ucma_file *file,
650 const char __user *inbuf,
651 int in_len, int out_len)
653 struct rdma_ucm_query_route cmd;
654 struct rdma_ucm_query_route_resp resp;
655 struct ucma_context *ctx;
656 struct sockaddr *addr;
657 int ret = 0;
659 if (out_len < sizeof(resp))
660 return -ENOSPC;
662 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
663 return -EFAULT;
665 ctx = ucma_get_ctx(file, cmd.id);
666 if (IS_ERR(ctx))
667 return PTR_ERR(ctx);
669 memset(&resp, 0, sizeof resp);
670 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
671 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
672 sizeof(struct sockaddr_in) :
673 sizeof(struct sockaddr_in6));
674 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
675 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
676 sizeof(struct sockaddr_in) :
677 sizeof(struct sockaddr_in6));
678 if (!ctx->cm_id->device)
679 goto out;
681 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
682 resp.port_num = ctx->cm_id->port_num;
683 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
684 case RDMA_TRANSPORT_IB:
685 switch (rdma_port_get_link_layer(ctx->cm_id->device,
686 ctx->cm_id->port_num)) {
687 case IB_LINK_LAYER_INFINIBAND:
688 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
689 break;
690 case IB_LINK_LAYER_ETHERNET:
691 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
692 break;
693 default:
694 break;
696 break;
697 case RDMA_TRANSPORT_IWARP:
698 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
699 break;
700 default:
701 break;
704 out:
705 if (copy_to_user((void __user *)(unsigned long)cmd.response,
706 &resp, sizeof(resp)))
707 ret = -EFAULT;
709 ucma_put_ctx(ctx);
710 return ret;
713 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
714 struct rdma_ucm_conn_param *src)
716 dst->private_data = src->private_data;
717 dst->private_data_len = src->private_data_len;
718 dst->responder_resources =src->responder_resources;
719 dst->initiator_depth = src->initiator_depth;
720 dst->flow_control = src->flow_control;
721 dst->retry_count = src->retry_count;
722 dst->rnr_retry_count = src->rnr_retry_count;
723 dst->srq = src->srq;
724 dst->qp_num = src->qp_num;
727 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
728 int in_len, int out_len)
730 struct rdma_ucm_connect cmd;
731 struct rdma_conn_param conn_param;
732 struct ucma_context *ctx;
733 int ret;
735 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
736 return -EFAULT;
738 if (!cmd.conn_param.valid)
739 return -EINVAL;
741 ctx = ucma_get_ctx(file, cmd.id);
742 if (IS_ERR(ctx))
743 return PTR_ERR(ctx);
745 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
746 ret = rdma_connect(ctx->cm_id, &conn_param);
747 ucma_put_ctx(ctx);
748 return ret;
751 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
752 int in_len, int out_len)
754 struct rdma_ucm_listen cmd;
755 struct ucma_context *ctx;
756 int ret;
758 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
759 return -EFAULT;
761 ctx = ucma_get_ctx(file, cmd.id);
762 if (IS_ERR(ctx))
763 return PTR_ERR(ctx);
765 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
766 cmd.backlog : max_backlog;
767 ret = rdma_listen(ctx->cm_id, ctx->backlog);
768 ucma_put_ctx(ctx);
769 return ret;
772 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
773 int in_len, int out_len)
775 struct rdma_ucm_accept cmd;
776 struct rdma_conn_param conn_param;
777 struct ucma_context *ctx;
778 int ret;
780 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
781 return -EFAULT;
783 ctx = ucma_get_ctx(file, cmd.id);
784 if (IS_ERR(ctx))
785 return PTR_ERR(ctx);
787 if (cmd.conn_param.valid) {
788 ctx->uid = cmd.uid;
789 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
790 ret = rdma_accept(ctx->cm_id, &conn_param);
791 } else
792 ret = rdma_accept(ctx->cm_id, NULL);
794 ucma_put_ctx(ctx);
795 return ret;
798 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
799 int in_len, int out_len)
801 struct rdma_ucm_reject cmd;
802 struct ucma_context *ctx;
803 int ret;
805 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
806 return -EFAULT;
808 ctx = ucma_get_ctx(file, cmd.id);
809 if (IS_ERR(ctx))
810 return PTR_ERR(ctx);
812 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
813 ucma_put_ctx(ctx);
814 return ret;
817 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
818 int in_len, int out_len)
820 struct rdma_ucm_disconnect cmd;
821 struct ucma_context *ctx;
822 int ret;
824 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
825 return -EFAULT;
827 ctx = ucma_get_ctx(file, cmd.id);
828 if (IS_ERR(ctx))
829 return PTR_ERR(ctx);
831 ret = rdma_disconnect(ctx->cm_id);
832 ucma_put_ctx(ctx);
833 return ret;
836 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
837 const char __user *inbuf,
838 int in_len, int out_len)
840 struct rdma_ucm_init_qp_attr cmd;
841 struct ib_uverbs_qp_attr resp;
842 struct ucma_context *ctx;
843 struct ib_qp_attr qp_attr;
844 int ret;
846 if (out_len < sizeof(resp))
847 return -ENOSPC;
849 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
850 return -EFAULT;
852 ctx = ucma_get_ctx(file, cmd.id);
853 if (IS_ERR(ctx))
854 return PTR_ERR(ctx);
856 resp.qp_attr_mask = 0;
857 memset(&qp_attr, 0, sizeof qp_attr);
858 qp_attr.qp_state = cmd.qp_state;
859 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
860 if (ret)
861 goto out;
863 ib_copy_qp_attr_to_user(&resp, &qp_attr);
864 if (copy_to_user((void __user *)(unsigned long)cmd.response,
865 &resp, sizeof(resp)))
866 ret = -EFAULT;
868 out:
869 ucma_put_ctx(ctx);
870 return ret;
873 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
874 void *optval, size_t optlen)
876 int ret = 0;
878 switch (optname) {
879 case RDMA_OPTION_ID_TOS:
880 if (optlen != sizeof(u8)) {
881 ret = -EINVAL;
882 break;
884 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
885 break;
886 case RDMA_OPTION_ID_REUSEADDR:
887 if (optlen != sizeof(int)) {
888 ret = -EINVAL;
889 break;
891 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
892 break;
893 default:
894 ret = -ENOSYS;
897 return ret;
900 static int ucma_set_ib_path(struct ucma_context *ctx,
901 struct ib_path_rec_data *path_data, size_t optlen)
903 struct ib_sa_path_rec sa_path;
904 struct rdma_cm_event event;
905 int ret;
907 if (optlen % sizeof(*path_data))
908 return -EINVAL;
910 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
911 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
912 IB_PATH_BIDIRECTIONAL))
913 break;
916 if (!optlen)
917 return -EINVAL;
919 ib_sa_unpack_path(path_data->path_rec, &sa_path);
920 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
921 if (ret)
922 return ret;
924 memset(&event, 0, sizeof event);
925 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
926 return ucma_event_handler(ctx->cm_id, &event);
929 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
930 void *optval, size_t optlen)
932 int ret;
934 switch (optname) {
935 case RDMA_OPTION_IB_PATH:
936 ret = ucma_set_ib_path(ctx, optval, optlen);
937 break;
938 default:
939 ret = -ENOSYS;
942 return ret;
945 static int ucma_set_option_level(struct ucma_context *ctx, int level,
946 int optname, void *optval, size_t optlen)
948 int ret;
950 switch (level) {
951 case RDMA_OPTION_ID:
952 ret = ucma_set_option_id(ctx, optname, optval, optlen);
953 break;
954 case RDMA_OPTION_IB:
955 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
956 break;
957 default:
958 ret = -ENOSYS;
961 return ret;
964 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
965 int in_len, int out_len)
967 struct rdma_ucm_set_option cmd;
968 struct ucma_context *ctx;
969 void *optval;
970 int ret;
972 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
973 return -EFAULT;
975 ctx = ucma_get_ctx(file, cmd.id);
976 if (IS_ERR(ctx))
977 return PTR_ERR(ctx);
979 optval = kmalloc(cmd.optlen, GFP_KERNEL);
980 if (!optval) {
981 ret = -ENOMEM;
982 goto out1;
985 if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
986 cmd.optlen)) {
987 ret = -EFAULT;
988 goto out2;
991 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
992 cmd.optlen);
993 out2:
994 kfree(optval);
995 out1:
996 ucma_put_ctx(ctx);
997 return ret;
1000 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1001 int in_len, int out_len)
1003 struct rdma_ucm_notify cmd;
1004 struct ucma_context *ctx;
1005 int ret;
1007 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1008 return -EFAULT;
1010 ctx = ucma_get_ctx(file, cmd.id);
1011 if (IS_ERR(ctx))
1012 return PTR_ERR(ctx);
1014 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1015 ucma_put_ctx(ctx);
1016 return ret;
1019 static ssize_t ucma_join_multicast(struct ucma_file *file,
1020 const char __user *inbuf,
1021 int in_len, int out_len)
1023 struct rdma_ucm_join_mcast cmd;
1024 struct rdma_ucm_create_id_resp resp;
1025 struct ucma_context *ctx;
1026 struct ucma_multicast *mc;
1027 int ret;
1029 if (out_len < sizeof(resp))
1030 return -ENOSPC;
1032 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1033 return -EFAULT;
1035 ctx = ucma_get_ctx(file, cmd.id);
1036 if (IS_ERR(ctx))
1037 return PTR_ERR(ctx);
1039 mutex_lock(&file->mut);
1040 mc = ucma_alloc_multicast(ctx);
1041 if (!mc) {
1042 ret = -ENOMEM;
1043 goto err1;
1046 mc->uid = cmd.uid;
1047 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
1048 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1049 if (ret)
1050 goto err2;
1052 resp.id = mc->id;
1053 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1054 &resp, sizeof(resp))) {
1055 ret = -EFAULT;
1056 goto err3;
1059 mutex_unlock(&file->mut);
1060 ucma_put_ctx(ctx);
1061 return 0;
1063 err3:
1064 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1065 ucma_cleanup_mc_events(mc);
1066 err2:
1067 mutex_lock(&mut);
1068 idr_remove(&multicast_idr, mc->id);
1069 mutex_unlock(&mut);
1070 list_del(&mc->list);
1071 kfree(mc);
1072 err1:
1073 mutex_unlock(&file->mut);
1074 ucma_put_ctx(ctx);
1075 return ret;
1078 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1079 const char __user *inbuf,
1080 int in_len, int out_len)
1082 struct rdma_ucm_destroy_id cmd;
1083 struct rdma_ucm_destroy_id_resp resp;
1084 struct ucma_multicast *mc;
1085 int ret = 0;
1087 if (out_len < sizeof(resp))
1088 return -ENOSPC;
1090 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1091 return -EFAULT;
1093 mutex_lock(&mut);
1094 mc = idr_find(&multicast_idr, cmd.id);
1095 if (!mc)
1096 mc = ERR_PTR(-ENOENT);
1097 else if (mc->ctx->file != file)
1098 mc = ERR_PTR(-EINVAL);
1099 else {
1100 idr_remove(&multicast_idr, mc->id);
1101 atomic_inc(&mc->ctx->ref);
1103 mutex_unlock(&mut);
1105 if (IS_ERR(mc)) {
1106 ret = PTR_ERR(mc);
1107 goto out;
1110 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1111 mutex_lock(&mc->ctx->file->mut);
1112 ucma_cleanup_mc_events(mc);
1113 list_del(&mc->list);
1114 mutex_unlock(&mc->ctx->file->mut);
1116 ucma_put_ctx(mc->ctx);
1117 resp.events_reported = mc->events_reported;
1118 kfree(mc);
1120 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1121 &resp, sizeof(resp)))
1122 ret = -EFAULT;
1123 out:
1124 return ret;
1127 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1129 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1130 if (file1 < file2) {
1131 mutex_lock(&file1->mut);
1132 mutex_lock(&file2->mut);
1133 } else {
1134 mutex_lock(&file2->mut);
1135 mutex_lock(&file1->mut);
1139 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1141 if (file1 < file2) {
1142 mutex_unlock(&file2->mut);
1143 mutex_unlock(&file1->mut);
1144 } else {
1145 mutex_unlock(&file1->mut);
1146 mutex_unlock(&file2->mut);
1150 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1152 struct ucma_event *uevent, *tmp;
1154 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1155 if (uevent->ctx == ctx)
1156 list_move_tail(&uevent->list, &file->event_list);
1159 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1160 const char __user *inbuf,
1161 int in_len, int out_len)
1163 struct rdma_ucm_migrate_id cmd;
1164 struct rdma_ucm_migrate_resp resp;
1165 struct ucma_context *ctx;
1166 struct file *filp;
1167 struct ucma_file *cur_file;
1168 int ret = 0;
1170 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1171 return -EFAULT;
1173 /* Get current fd to protect against it being closed */
1174 filp = fget(cmd.fd);
1175 if (!filp)
1176 return -ENOENT;
1178 /* Validate current fd and prevent destruction of id. */
1179 ctx = ucma_get_ctx(filp->private_data, cmd.id);
1180 if (IS_ERR(ctx)) {
1181 ret = PTR_ERR(ctx);
1182 goto file_put;
1185 cur_file = ctx->file;
1186 if (cur_file == new_file) {
1187 resp.events_reported = ctx->events_reported;
1188 goto response;
1192 * Migrate events between fd's, maintaining order, and avoiding new
1193 * events being added before existing events.
1195 ucma_lock_files(cur_file, new_file);
1196 mutex_lock(&mut);
1198 list_move_tail(&ctx->list, &new_file->ctx_list);
1199 ucma_move_events(ctx, new_file);
1200 ctx->file = new_file;
1201 resp.events_reported = ctx->events_reported;
1203 mutex_unlock(&mut);
1204 ucma_unlock_files(cur_file, new_file);
1206 response:
1207 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1208 &resp, sizeof(resp)))
1209 ret = -EFAULT;
1211 ucma_put_ctx(ctx);
1212 file_put:
1213 fput(filp);
1214 return ret;
1217 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1218 const char __user *inbuf,
1219 int in_len, int out_len) = {
1220 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1221 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1222 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
1223 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1224 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1225 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1226 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1227 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1228 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1229 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1230 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1231 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1232 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1233 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1234 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1235 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1236 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
1237 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1238 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
1241 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1242 size_t len, loff_t *pos)
1244 struct ucma_file *file = filp->private_data;
1245 struct rdma_ucm_cmd_hdr hdr;
1246 ssize_t ret;
1248 if (len < sizeof(hdr))
1249 return -EINVAL;
1251 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1252 return -EFAULT;
1254 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1255 return -EINVAL;
1257 if (hdr.in + sizeof(hdr) > len)
1258 return -EINVAL;
1260 if (!ucma_cmd_table[hdr.cmd])
1261 return -ENOSYS;
1263 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1264 if (!ret)
1265 ret = len;
1267 return ret;
1270 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1272 struct ucma_file *file = filp->private_data;
1273 unsigned int mask = 0;
1275 poll_wait(filp, &file->poll_wait, wait);
1277 if (!list_empty(&file->event_list))
1278 mask = POLLIN | POLLRDNORM;
1280 return mask;
1284 * ucma_open() does not need the BKL:
1286 * - no global state is referred to;
1287 * - there is no ioctl method to race against;
1288 * - no further module initialization is required for open to work
1289 * after the device is registered.
1291 static int ucma_open(struct inode *inode, struct file *filp)
1293 struct ucma_file *file;
1295 file = kmalloc(sizeof *file, GFP_KERNEL);
1296 if (!file)
1297 return -ENOMEM;
1299 INIT_LIST_HEAD(&file->event_list);
1300 INIT_LIST_HEAD(&file->ctx_list);
1301 init_waitqueue_head(&file->poll_wait);
1302 mutex_init(&file->mut);
1304 filp->private_data = file;
1305 file->filp = filp;
1307 return nonseekable_open(inode, filp);
1310 static int ucma_close(struct inode *inode, struct file *filp)
1312 struct ucma_file *file = filp->private_data;
1313 struct ucma_context *ctx, *tmp;
1315 mutex_lock(&file->mut);
1316 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1317 mutex_unlock(&file->mut);
1319 mutex_lock(&mut);
1320 idr_remove(&ctx_idr, ctx->id);
1321 mutex_unlock(&mut);
1323 ucma_free_ctx(ctx);
1324 mutex_lock(&file->mut);
1326 mutex_unlock(&file->mut);
1327 kfree(file);
1328 return 0;
1331 static const struct file_operations ucma_fops = {
1332 .owner = THIS_MODULE,
1333 .open = ucma_open,
1334 .release = ucma_close,
1335 .write = ucma_write,
1336 .poll = ucma_poll,
1337 .llseek = no_llseek,
1340 static struct miscdevice ucma_misc = {
1341 .minor = MISC_DYNAMIC_MINOR,
1342 .name = "rdma_cm",
1343 .fops = &ucma_fops,
1346 static ssize_t show_abi_version(struct device *dev,
1347 struct device_attribute *attr,
1348 char *buf)
1350 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1352 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1354 static int __init ucma_init(void)
1356 int ret;
1358 ret = misc_register(&ucma_misc);
1359 if (ret)
1360 return ret;
1362 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1363 if (ret) {
1364 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1365 goto err1;
1368 ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
1369 if (!ucma_ctl_table_hdr) {
1370 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1371 ret = -ENOMEM;
1372 goto err2;
1374 return 0;
1375 err2:
1376 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1377 err1:
1378 misc_deregister(&ucma_misc);
1379 return ret;
1382 static void __exit ucma_cleanup(void)
1384 unregister_sysctl_table(ucma_ctl_table_hdr);
1385 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1386 misc_deregister(&ucma_misc);
1387 idr_destroy(&ctx_idr);
1390 module_init(ucma_init);
1391 module_exit(ucma_cleanup);