2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
49 #include <linux/uaccess.h>
52 #include <rdma/uverbs_std_types.h>
55 #include "core_priv.h"
56 #include "rdma_core.h"
58 MODULE_AUTHOR("Roland Dreier");
59 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
60 MODULE_LICENSE("Dual BSD/GPL");
63 IB_UVERBS_MAJOR
= 231,
64 IB_UVERBS_BASE_MINOR
= 192,
65 IB_UVERBS_MAX_DEVICES
= RDMA_MAX_PORTS
,
66 IB_UVERBS_NUM_FIXED_MINOR
= 32,
67 IB_UVERBS_NUM_DYNAMIC_MINOR
= IB_UVERBS_MAX_DEVICES
- IB_UVERBS_NUM_FIXED_MINOR
,
70 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
72 static dev_t dynamic_uverbs_dev
;
73 static struct class *uverbs_class
;
75 static DECLARE_BITMAP(dev_map
, IB_UVERBS_MAX_DEVICES
);
77 static ssize_t (*uverbs_cmd_table
[])(struct ib_uverbs_file
*file
,
78 struct ib_device
*ib_dev
,
79 const char __user
*buf
, int in_len
,
81 [IB_USER_VERBS_CMD_GET_CONTEXT
] = ib_uverbs_get_context
,
82 [IB_USER_VERBS_CMD_QUERY_DEVICE
] = ib_uverbs_query_device
,
83 [IB_USER_VERBS_CMD_QUERY_PORT
] = ib_uverbs_query_port
,
84 [IB_USER_VERBS_CMD_ALLOC_PD
] = ib_uverbs_alloc_pd
,
85 [IB_USER_VERBS_CMD_DEALLOC_PD
] = ib_uverbs_dealloc_pd
,
86 [IB_USER_VERBS_CMD_REG_MR
] = ib_uverbs_reg_mr
,
87 [IB_USER_VERBS_CMD_REREG_MR
] = ib_uverbs_rereg_mr
,
88 [IB_USER_VERBS_CMD_DEREG_MR
] = ib_uverbs_dereg_mr
,
89 [IB_USER_VERBS_CMD_ALLOC_MW
] = ib_uverbs_alloc_mw
,
90 [IB_USER_VERBS_CMD_DEALLOC_MW
] = ib_uverbs_dealloc_mw
,
91 [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
] = ib_uverbs_create_comp_channel
,
92 [IB_USER_VERBS_CMD_CREATE_CQ
] = ib_uverbs_create_cq
,
93 [IB_USER_VERBS_CMD_RESIZE_CQ
] = ib_uverbs_resize_cq
,
94 [IB_USER_VERBS_CMD_POLL_CQ
] = ib_uverbs_poll_cq
,
95 [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
] = ib_uverbs_req_notify_cq
,
96 [IB_USER_VERBS_CMD_DESTROY_CQ
] = ib_uverbs_destroy_cq
,
97 [IB_USER_VERBS_CMD_CREATE_QP
] = ib_uverbs_create_qp
,
98 [IB_USER_VERBS_CMD_QUERY_QP
] = ib_uverbs_query_qp
,
99 [IB_USER_VERBS_CMD_MODIFY_QP
] = ib_uverbs_modify_qp
,
100 [IB_USER_VERBS_CMD_DESTROY_QP
] = ib_uverbs_destroy_qp
,
101 [IB_USER_VERBS_CMD_POST_SEND
] = ib_uverbs_post_send
,
102 [IB_USER_VERBS_CMD_POST_RECV
] = ib_uverbs_post_recv
,
103 [IB_USER_VERBS_CMD_POST_SRQ_RECV
] = ib_uverbs_post_srq_recv
,
104 [IB_USER_VERBS_CMD_CREATE_AH
] = ib_uverbs_create_ah
,
105 [IB_USER_VERBS_CMD_DESTROY_AH
] = ib_uverbs_destroy_ah
,
106 [IB_USER_VERBS_CMD_ATTACH_MCAST
] = ib_uverbs_attach_mcast
,
107 [IB_USER_VERBS_CMD_DETACH_MCAST
] = ib_uverbs_detach_mcast
,
108 [IB_USER_VERBS_CMD_CREATE_SRQ
] = ib_uverbs_create_srq
,
109 [IB_USER_VERBS_CMD_MODIFY_SRQ
] = ib_uverbs_modify_srq
,
110 [IB_USER_VERBS_CMD_QUERY_SRQ
] = ib_uverbs_query_srq
,
111 [IB_USER_VERBS_CMD_DESTROY_SRQ
] = ib_uverbs_destroy_srq
,
112 [IB_USER_VERBS_CMD_OPEN_XRCD
] = ib_uverbs_open_xrcd
,
113 [IB_USER_VERBS_CMD_CLOSE_XRCD
] = ib_uverbs_close_xrcd
,
114 [IB_USER_VERBS_CMD_CREATE_XSRQ
] = ib_uverbs_create_xsrq
,
115 [IB_USER_VERBS_CMD_OPEN_QP
] = ib_uverbs_open_qp
,
118 static int (*uverbs_ex_cmd_table
[])(struct ib_uverbs_file
*file
,
119 struct ib_device
*ib_dev
,
120 struct ib_udata
*ucore
,
121 struct ib_udata
*uhw
) = {
122 [IB_USER_VERBS_EX_CMD_CREATE_FLOW
] = ib_uverbs_ex_create_flow
,
123 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW
] = ib_uverbs_ex_destroy_flow
,
124 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE
] = ib_uverbs_ex_query_device
,
125 [IB_USER_VERBS_EX_CMD_CREATE_CQ
] = ib_uverbs_ex_create_cq
,
126 [IB_USER_VERBS_EX_CMD_CREATE_QP
] = ib_uverbs_ex_create_qp
,
127 [IB_USER_VERBS_EX_CMD_CREATE_WQ
] = ib_uverbs_ex_create_wq
,
128 [IB_USER_VERBS_EX_CMD_MODIFY_WQ
] = ib_uverbs_ex_modify_wq
,
129 [IB_USER_VERBS_EX_CMD_DESTROY_WQ
] = ib_uverbs_ex_destroy_wq
,
130 [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL
] = ib_uverbs_ex_create_rwq_ind_table
,
131 [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL
] = ib_uverbs_ex_destroy_rwq_ind_table
,
132 [IB_USER_VERBS_EX_CMD_MODIFY_QP
] = ib_uverbs_ex_modify_qp
,
133 [IB_USER_VERBS_EX_CMD_MODIFY_CQ
] = ib_uverbs_ex_modify_cq
,
136 static void ib_uverbs_add_one(struct ib_device
*device
);
137 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
);
139 int uverbs_dealloc_mw(struct ib_mw
*mw
)
141 struct ib_pd
*pd
= mw
->pd
;
144 ret
= mw
->device
->dealloc_mw(mw
);
146 atomic_dec(&pd
->usecnt
);
150 static void ib_uverbs_release_dev(struct kobject
*kobj
)
152 struct ib_uverbs_device
*dev
=
153 container_of(kobj
, struct ib_uverbs_device
, kobj
);
155 cleanup_srcu_struct(&dev
->disassociate_srcu
);
159 static struct kobj_type ib_uverbs_dev_ktype
= {
160 .release
= ib_uverbs_release_dev
,
163 static void ib_uverbs_release_async_event_file(struct kref
*ref
)
165 struct ib_uverbs_async_event_file
*file
=
166 container_of(ref
, struct ib_uverbs_async_event_file
, ref
);
171 void ib_uverbs_release_ucq(struct ib_uverbs_file
*file
,
172 struct ib_uverbs_completion_event_file
*ev_file
,
173 struct ib_ucq_object
*uobj
)
175 struct ib_uverbs_event
*evt
, *tmp
;
178 spin_lock_irq(&ev_file
->ev_queue
.lock
);
179 list_for_each_entry_safe(evt
, tmp
, &uobj
->comp_list
, obj_list
) {
180 list_del(&evt
->list
);
183 spin_unlock_irq(&ev_file
->ev_queue
.lock
);
185 uverbs_uobject_put(&ev_file
->uobj_file
.uobj
);
188 spin_lock_irq(&file
->async_file
->ev_queue
.lock
);
189 list_for_each_entry_safe(evt
, tmp
, &uobj
->async_list
, obj_list
) {
190 list_del(&evt
->list
);
193 spin_unlock_irq(&file
->async_file
->ev_queue
.lock
);
196 void ib_uverbs_release_uevent(struct ib_uverbs_file
*file
,
197 struct ib_uevent_object
*uobj
)
199 struct ib_uverbs_event
*evt
, *tmp
;
201 spin_lock_irq(&file
->async_file
->ev_queue
.lock
);
202 list_for_each_entry_safe(evt
, tmp
, &uobj
->event_list
, obj_list
) {
203 list_del(&evt
->list
);
206 spin_unlock_irq(&file
->async_file
->ev_queue
.lock
);
209 void ib_uverbs_detach_umcast(struct ib_qp
*qp
,
210 struct ib_uqp_object
*uobj
)
212 struct ib_uverbs_mcast_entry
*mcast
, *tmp
;
214 list_for_each_entry_safe(mcast
, tmp
, &uobj
->mcast_list
, list
) {
215 ib_detach_mcast(qp
, &mcast
->gid
, mcast
->lid
);
216 list_del(&mcast
->list
);
221 static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file
*file
,
222 struct ib_ucontext
*context
,
225 context
->closing
= 1;
226 uverbs_cleanup_ucontext(context
, device_removed
);
227 put_pid(context
->tgid
);
229 ib_rdmacg_uncharge(&context
->cg_obj
, context
->device
,
230 RDMACG_RESOURCE_HCA_HANDLE
);
232 return context
->device
->dealloc_ucontext(context
);
235 static void ib_uverbs_comp_dev(struct ib_uverbs_device
*dev
)
237 complete(&dev
->comp
);
240 void ib_uverbs_release_file(struct kref
*ref
)
242 struct ib_uverbs_file
*file
=
243 container_of(ref
, struct ib_uverbs_file
, ref
);
244 struct ib_device
*ib_dev
;
247 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
248 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
249 &file
->device
->disassociate_srcu
);
250 if (ib_dev
&& !ib_dev
->disassociate_ucontext
)
251 module_put(ib_dev
->owner
);
252 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
254 if (atomic_dec_and_test(&file
->device
->refcount
))
255 ib_uverbs_comp_dev(file
->device
);
257 kobject_put(&file
->device
->kobj
);
261 static ssize_t
ib_uverbs_event_read(struct ib_uverbs_event_queue
*ev_queue
,
262 struct ib_uverbs_file
*uverbs_file
,
263 struct file
*filp
, char __user
*buf
,
264 size_t count
, loff_t
*pos
,
267 struct ib_uverbs_event
*event
;
270 spin_lock_irq(&ev_queue
->lock
);
272 while (list_empty(&ev_queue
->event_list
)) {
273 spin_unlock_irq(&ev_queue
->lock
);
275 if (filp
->f_flags
& O_NONBLOCK
)
278 if (wait_event_interruptible(ev_queue
->poll_wait
,
279 (!list_empty(&ev_queue
->event_list
) ||
280 /* The barriers built into wait_event_interruptible()
281 * and wake_up() guarentee this will see the null set
284 !uverbs_file
->device
->ib_dev
)))
287 /* If device was disassociated and no event exists set an error */
288 if (list_empty(&ev_queue
->event_list
) &&
289 !uverbs_file
->device
->ib_dev
)
292 spin_lock_irq(&ev_queue
->lock
);
295 event
= list_entry(ev_queue
->event_list
.next
, struct ib_uverbs_event
, list
);
297 if (eventsz
> count
) {
301 list_del(ev_queue
->event_list
.next
);
302 if (event
->counter
) {
304 list_del(&event
->obj_list
);
308 spin_unlock_irq(&ev_queue
->lock
);
311 if (copy_to_user(buf
, event
, eventsz
))
322 static ssize_t
ib_uverbs_async_event_read(struct file
*filp
, char __user
*buf
,
323 size_t count
, loff_t
*pos
)
325 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
327 return ib_uverbs_event_read(&file
->ev_queue
, file
->uverbs_file
, filp
,
329 sizeof(struct ib_uverbs_async_event_desc
));
332 static ssize_t
ib_uverbs_comp_event_read(struct file
*filp
, char __user
*buf
,
333 size_t count
, loff_t
*pos
)
335 struct ib_uverbs_completion_event_file
*comp_ev_file
=
338 return ib_uverbs_event_read(&comp_ev_file
->ev_queue
,
339 comp_ev_file
->uobj_file
.ufile
, filp
,
341 sizeof(struct ib_uverbs_comp_event_desc
));
344 static __poll_t
ib_uverbs_event_poll(struct ib_uverbs_event_queue
*ev_queue
,
346 struct poll_table_struct
*wait
)
348 __poll_t pollflags
= 0;
350 poll_wait(filp
, &ev_queue
->poll_wait
, wait
);
352 spin_lock_irq(&ev_queue
->lock
);
353 if (!list_empty(&ev_queue
->event_list
))
354 pollflags
= EPOLLIN
| EPOLLRDNORM
;
355 spin_unlock_irq(&ev_queue
->lock
);
360 static __poll_t
ib_uverbs_async_event_poll(struct file
*filp
,
361 struct poll_table_struct
*wait
)
363 return ib_uverbs_event_poll(filp
->private_data
, filp
, wait
);
366 static __poll_t
ib_uverbs_comp_event_poll(struct file
*filp
,
367 struct poll_table_struct
*wait
)
369 struct ib_uverbs_completion_event_file
*comp_ev_file
=
372 return ib_uverbs_event_poll(&comp_ev_file
->ev_queue
, filp
, wait
);
375 static int ib_uverbs_async_event_fasync(int fd
, struct file
*filp
, int on
)
377 struct ib_uverbs_event_queue
*ev_queue
= filp
->private_data
;
379 return fasync_helper(fd
, filp
, on
, &ev_queue
->async_queue
);
382 static int ib_uverbs_comp_event_fasync(int fd
, struct file
*filp
, int on
)
384 struct ib_uverbs_completion_event_file
*comp_ev_file
=
387 return fasync_helper(fd
, filp
, on
, &comp_ev_file
->ev_queue
.async_queue
);
390 static int ib_uverbs_async_event_close(struct inode
*inode
, struct file
*filp
)
392 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
393 struct ib_uverbs_file
*uverbs_file
= file
->uverbs_file
;
394 struct ib_uverbs_event
*entry
, *tmp
;
395 int closed_already
= 0;
397 mutex_lock(&uverbs_file
->device
->lists_mutex
);
398 spin_lock_irq(&file
->ev_queue
.lock
);
399 closed_already
= file
->ev_queue
.is_closed
;
400 file
->ev_queue
.is_closed
= 1;
401 list_for_each_entry_safe(entry
, tmp
, &file
->ev_queue
.event_list
, list
) {
403 list_del(&entry
->obj_list
);
406 spin_unlock_irq(&file
->ev_queue
.lock
);
407 if (!closed_already
) {
408 list_del(&file
->list
);
409 ib_unregister_event_handler(&uverbs_file
->event_handler
);
411 mutex_unlock(&uverbs_file
->device
->lists_mutex
);
413 kref_put(&uverbs_file
->ref
, ib_uverbs_release_file
);
414 kref_put(&file
->ref
, ib_uverbs_release_async_event_file
);
419 static int ib_uverbs_comp_event_close(struct inode
*inode
, struct file
*filp
)
421 struct ib_uverbs_completion_event_file
*file
= filp
->private_data
;
422 struct ib_uverbs_event
*entry
, *tmp
;
424 spin_lock_irq(&file
->ev_queue
.lock
);
425 list_for_each_entry_safe(entry
, tmp
, &file
->ev_queue
.event_list
, list
) {
427 list_del(&entry
->obj_list
);
430 spin_unlock_irq(&file
->ev_queue
.lock
);
432 uverbs_close_fd(filp
);
437 const struct file_operations uverbs_event_fops
= {
438 .owner
= THIS_MODULE
,
439 .read
= ib_uverbs_comp_event_read
,
440 .poll
= ib_uverbs_comp_event_poll
,
441 .release
= ib_uverbs_comp_event_close
,
442 .fasync
= ib_uverbs_comp_event_fasync
,
446 static const struct file_operations uverbs_async_event_fops
= {
447 .owner
= THIS_MODULE
,
448 .read
= ib_uverbs_async_event_read
,
449 .poll
= ib_uverbs_async_event_poll
,
450 .release
= ib_uverbs_async_event_close
,
451 .fasync
= ib_uverbs_async_event_fasync
,
455 void ib_uverbs_comp_handler(struct ib_cq
*cq
, void *cq_context
)
457 struct ib_uverbs_event_queue
*ev_queue
= cq_context
;
458 struct ib_ucq_object
*uobj
;
459 struct ib_uverbs_event
*entry
;
465 spin_lock_irqsave(&ev_queue
->lock
, flags
);
466 if (ev_queue
->is_closed
) {
467 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
471 entry
= kmalloc(sizeof *entry
, GFP_ATOMIC
);
473 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
477 uobj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
479 entry
->desc
.comp
.cq_handle
= cq
->uobject
->user_handle
;
480 entry
->counter
= &uobj
->comp_events_reported
;
482 list_add_tail(&entry
->list
, &ev_queue
->event_list
);
483 list_add_tail(&entry
->obj_list
, &uobj
->comp_list
);
484 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
486 wake_up_interruptible(&ev_queue
->poll_wait
);
487 kill_fasync(&ev_queue
->async_queue
, SIGIO
, POLL_IN
);
490 static void ib_uverbs_async_handler(struct ib_uverbs_file
*file
,
491 __u64 element
, __u64 event
,
492 struct list_head
*obj_list
,
495 struct ib_uverbs_event
*entry
;
498 spin_lock_irqsave(&file
->async_file
->ev_queue
.lock
, flags
);
499 if (file
->async_file
->ev_queue
.is_closed
) {
500 spin_unlock_irqrestore(&file
->async_file
->ev_queue
.lock
, flags
);
504 entry
= kmalloc(sizeof *entry
, GFP_ATOMIC
);
506 spin_unlock_irqrestore(&file
->async_file
->ev_queue
.lock
, flags
);
510 entry
->desc
.async
.element
= element
;
511 entry
->desc
.async
.event_type
= event
;
512 entry
->desc
.async
.reserved
= 0;
513 entry
->counter
= counter
;
515 list_add_tail(&entry
->list
, &file
->async_file
->ev_queue
.event_list
);
517 list_add_tail(&entry
->obj_list
, obj_list
);
518 spin_unlock_irqrestore(&file
->async_file
->ev_queue
.lock
, flags
);
520 wake_up_interruptible(&file
->async_file
->ev_queue
.poll_wait
);
521 kill_fasync(&file
->async_file
->ev_queue
.async_queue
, SIGIO
, POLL_IN
);
524 void ib_uverbs_cq_event_handler(struct ib_event
*event
, void *context_ptr
)
526 struct ib_ucq_object
*uobj
= container_of(event
->element
.cq
->uobject
,
527 struct ib_ucq_object
, uobject
);
529 ib_uverbs_async_handler(uobj
->uverbs_file
, uobj
->uobject
.user_handle
,
530 event
->event
, &uobj
->async_list
,
531 &uobj
->async_events_reported
);
534 void ib_uverbs_qp_event_handler(struct ib_event
*event
, void *context_ptr
)
536 struct ib_uevent_object
*uobj
;
538 /* for XRC target qp's, check that qp is live */
539 if (!event
->element
.qp
->uobject
)
542 uobj
= container_of(event
->element
.qp
->uobject
,
543 struct ib_uevent_object
, uobject
);
545 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
546 event
->event
, &uobj
->event_list
,
547 &uobj
->events_reported
);
550 void ib_uverbs_wq_event_handler(struct ib_event
*event
, void *context_ptr
)
552 struct ib_uevent_object
*uobj
= container_of(event
->element
.wq
->uobject
,
553 struct ib_uevent_object
, uobject
);
555 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
556 event
->event
, &uobj
->event_list
,
557 &uobj
->events_reported
);
560 void ib_uverbs_srq_event_handler(struct ib_event
*event
, void *context_ptr
)
562 struct ib_uevent_object
*uobj
;
564 uobj
= container_of(event
->element
.srq
->uobject
,
565 struct ib_uevent_object
, uobject
);
567 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
568 event
->event
, &uobj
->event_list
,
569 &uobj
->events_reported
);
572 void ib_uverbs_event_handler(struct ib_event_handler
*handler
,
573 struct ib_event
*event
)
575 struct ib_uverbs_file
*file
=
576 container_of(handler
, struct ib_uverbs_file
, event_handler
);
578 ib_uverbs_async_handler(file
, event
->element
.port_num
, event
->event
,
582 void ib_uverbs_free_async_event_file(struct ib_uverbs_file
*file
)
584 kref_put(&file
->async_file
->ref
, ib_uverbs_release_async_event_file
);
585 file
->async_file
= NULL
;
588 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue
*ev_queue
)
590 spin_lock_init(&ev_queue
->lock
);
591 INIT_LIST_HEAD(&ev_queue
->event_list
);
592 init_waitqueue_head(&ev_queue
->poll_wait
);
593 ev_queue
->is_closed
= 0;
594 ev_queue
->async_queue
= NULL
;
597 struct file
*ib_uverbs_alloc_async_event_file(struct ib_uverbs_file
*uverbs_file
,
598 struct ib_device
*ib_dev
)
600 struct ib_uverbs_async_event_file
*ev_file
;
603 ev_file
= kzalloc(sizeof(*ev_file
), GFP_KERNEL
);
605 return ERR_PTR(-ENOMEM
);
607 ib_uverbs_init_event_queue(&ev_file
->ev_queue
);
608 ev_file
->uverbs_file
= uverbs_file
;
609 kref_get(&ev_file
->uverbs_file
->ref
);
610 kref_init(&ev_file
->ref
);
611 filp
= anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops
,
616 mutex_lock(&uverbs_file
->device
->lists_mutex
);
617 list_add_tail(&ev_file
->list
,
618 &uverbs_file
->device
->uverbs_events_file_list
);
619 mutex_unlock(&uverbs_file
->device
->lists_mutex
);
621 WARN_ON(uverbs_file
->async_file
);
622 uverbs_file
->async_file
= ev_file
;
623 kref_get(&uverbs_file
->async_file
->ref
);
624 INIT_IB_EVENT_HANDLER(&uverbs_file
->event_handler
,
626 ib_uverbs_event_handler
);
627 ib_register_event_handler(&uverbs_file
->event_handler
);
628 /* At that point async file stuff was fully set */
633 kref_put(&ev_file
->uverbs_file
->ref
, ib_uverbs_release_file
);
634 kref_put(&ev_file
->ref
, ib_uverbs_release_async_event_file
);
638 static int verify_command_mask(struct ib_device
*ib_dev
, __u32 command
)
642 if (command
<= IB_USER_VERBS_CMD_OPEN_QP
)
643 mask
= ib_dev
->uverbs_cmd_mask
;
645 mask
= ib_dev
->uverbs_ex_cmd_mask
;
647 if (mask
& ((u64
)1 << command
))
653 static bool verify_command_idx(u32 command
, bool extended
)
656 return command
< ARRAY_SIZE(uverbs_ex_cmd_table
);
658 return command
< ARRAY_SIZE(uverbs_cmd_table
);
661 static ssize_t
ib_uverbs_write(struct file
*filp
, const char __user
*buf
,
662 size_t count
, loff_t
*pos
)
664 struct ib_uverbs_file
*file
= filp
->private_data
;
665 struct ib_device
*ib_dev
;
666 struct ib_uverbs_cmd_hdr hdr
;
667 bool extended_command
;
673 if (!ib_safe_file_access(filp
)) {
674 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
675 task_tgid_vnr(current
), current
->comm
);
679 if (count
< sizeof hdr
)
682 if (copy_from_user(&hdr
, buf
, sizeof hdr
))
685 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
686 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
687 &file
->device
->disassociate_srcu
);
693 if (hdr
.command
& ~(__u32
)(IB_USER_VERBS_CMD_FLAGS_MASK
|
694 IB_USER_VERBS_CMD_COMMAND_MASK
)) {
699 command
= hdr
.command
& IB_USER_VERBS_CMD_COMMAND_MASK
;
700 flags
= (hdr
.command
&
701 IB_USER_VERBS_CMD_FLAGS_MASK
) >> IB_USER_VERBS_CMD_FLAGS_SHIFT
;
703 extended_command
= flags
& IB_USER_VERBS_CMD_FLAG_EXTENDED
;
704 if (!verify_command_idx(command
, extended_command
)) {
709 if (verify_command_mask(ib_dev
, command
)) {
714 if (!file
->ucontext
&&
715 command
!= IB_USER_VERBS_CMD_GET_CONTEXT
) {
721 if (!uverbs_cmd_table
[command
]) {
726 if (hdr
.in_words
* 4 != count
) {
731 ret
= uverbs_cmd_table
[command
](file
, ib_dev
,
736 } else if (flags
== IB_USER_VERBS_CMD_FLAG_EXTENDED
) {
737 struct ib_uverbs_ex_cmd_hdr ex_hdr
;
738 struct ib_udata ucore
;
740 size_t written_count
= count
;
742 if (!uverbs_ex_cmd_table
[command
]) {
747 if (!file
->ucontext
) {
752 if (count
< (sizeof(hdr
) + sizeof(ex_hdr
))) {
757 if (copy_from_user(&ex_hdr
, buf
+ sizeof(hdr
), sizeof(ex_hdr
))) {
762 count
-= sizeof(hdr
) + sizeof(ex_hdr
);
763 buf
+= sizeof(hdr
) + sizeof(ex_hdr
);
765 if ((hdr
.in_words
+ ex_hdr
.provider_in_words
) * 8 != count
) {
770 if (ex_hdr
.cmd_hdr_reserved
) {
775 if (ex_hdr
.response
) {
776 if (!hdr
.out_words
&& !ex_hdr
.provider_out_words
) {
781 if (!access_ok(VERIFY_WRITE
,
782 u64_to_user_ptr(ex_hdr
.response
),
783 (hdr
.out_words
+ ex_hdr
.provider_out_words
) * 8)) {
788 if (hdr
.out_words
|| ex_hdr
.provider_out_words
) {
794 ib_uverbs_init_udata_buf_or_null(&ucore
, buf
,
795 u64_to_user_ptr(ex_hdr
.response
),
796 hdr
.in_words
* 8, hdr
.out_words
* 8);
798 ib_uverbs_init_udata_buf_or_null(&uhw
,
800 u64_to_user_ptr(ex_hdr
.response
) + ucore
.outlen
,
801 ex_hdr
.provider_in_words
* 8,
802 ex_hdr
.provider_out_words
* 8);
804 ret
= uverbs_ex_cmd_table
[command
](file
, ib_dev
, &ucore
, &uhw
);
812 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
816 static int ib_uverbs_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
818 struct ib_uverbs_file
*file
= filp
->private_data
;
819 struct ib_device
*ib_dev
;
823 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
824 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
825 &file
->device
->disassociate_srcu
);
834 ret
= ib_dev
->mmap(file
->ucontext
, vma
);
836 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
841 * ib_uverbs_open() does not need the BKL:
843 * - the ib_uverbs_device structures are properly reference counted and
844 * everything else is purely local to the file being created, so
845 * races against other open calls are not a problem;
846 * - there is no ioctl method to race against;
847 * - the open method will either immediately run -ENXIO, or all
848 * required initialization will be done.
850 static int ib_uverbs_open(struct inode
*inode
, struct file
*filp
)
852 struct ib_uverbs_device
*dev
;
853 struct ib_uverbs_file
*file
;
854 struct ib_device
*ib_dev
;
856 int module_dependent
;
859 dev
= container_of(inode
->i_cdev
, struct ib_uverbs_device
, cdev
);
860 if (!atomic_inc_not_zero(&dev
->refcount
))
863 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
864 mutex_lock(&dev
->lists_mutex
);
865 ib_dev
= srcu_dereference(dev
->ib_dev
,
866 &dev
->disassociate_srcu
);
872 /* In case IB device supports disassociate ucontext, there is no hard
873 * dependency between uverbs device and its low level device.
875 module_dependent
= !(ib_dev
->disassociate_ucontext
);
877 if (module_dependent
) {
878 if (!try_module_get(ib_dev
->owner
)) {
884 file
= kzalloc(sizeof(*file
), GFP_KERNEL
);
887 if (module_dependent
)
894 spin_lock_init(&file
->idr_lock
);
895 idr_init(&file
->idr
);
896 file
->ucontext
= NULL
;
897 file
->async_file
= NULL
;
898 kref_init(&file
->ref
);
899 mutex_init(&file
->mutex
);
900 mutex_init(&file
->cleanup_mutex
);
902 filp
->private_data
= file
;
903 kobject_get(&dev
->kobj
);
904 list_add_tail(&file
->list
, &dev
->uverbs_file_list
);
905 mutex_unlock(&dev
->lists_mutex
);
906 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
908 return nonseekable_open(inode
, filp
);
911 module_put(ib_dev
->owner
);
914 mutex_unlock(&dev
->lists_mutex
);
915 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
916 if (atomic_dec_and_test(&dev
->refcount
))
917 ib_uverbs_comp_dev(dev
);
922 static int ib_uverbs_close(struct inode
*inode
, struct file
*filp
)
924 struct ib_uverbs_file
*file
= filp
->private_data
;
926 mutex_lock(&file
->cleanup_mutex
);
927 if (file
->ucontext
) {
928 ib_uverbs_cleanup_ucontext(file
, file
->ucontext
, false);
929 file
->ucontext
= NULL
;
931 mutex_unlock(&file
->cleanup_mutex
);
932 idr_destroy(&file
->idr
);
934 mutex_lock(&file
->device
->lists_mutex
);
935 if (!file
->is_closed
) {
936 list_del(&file
->list
);
939 mutex_unlock(&file
->device
->lists_mutex
);
941 if (file
->async_file
)
942 kref_put(&file
->async_file
->ref
,
943 ib_uverbs_release_async_event_file
);
945 kref_put(&file
->ref
, ib_uverbs_release_file
);
950 static const struct file_operations uverbs_fops
= {
951 .owner
= THIS_MODULE
,
952 .write
= ib_uverbs_write
,
953 .open
= ib_uverbs_open
,
954 .release
= ib_uverbs_close
,
956 #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
957 .unlocked_ioctl
= ib_uverbs_ioctl
,
958 .compat_ioctl
= ib_uverbs_ioctl
,
962 static const struct file_operations uverbs_mmap_fops
= {
963 .owner
= THIS_MODULE
,
964 .write
= ib_uverbs_write
,
965 .mmap
= ib_uverbs_mmap
,
966 .open
= ib_uverbs_open
,
967 .release
= ib_uverbs_close
,
969 #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
970 .unlocked_ioctl
= ib_uverbs_ioctl
,
971 .compat_ioctl
= ib_uverbs_ioctl
,
975 static struct ib_client uverbs_client
= {
977 .add
= ib_uverbs_add_one
,
978 .remove
= ib_uverbs_remove_one
981 static ssize_t
show_ibdev(struct device
*device
, struct device_attribute
*attr
,
986 struct ib_uverbs_device
*dev
= dev_get_drvdata(device
);
987 struct ib_device
*ib_dev
;
992 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
993 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
995 ret
= sprintf(buf
, "%s\n", ib_dev
->name
);
996 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1000 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
1002 static ssize_t
show_dev_abi_version(struct device
*device
,
1003 struct device_attribute
*attr
, char *buf
)
1005 struct ib_uverbs_device
*dev
= dev_get_drvdata(device
);
1008 struct ib_device
*ib_dev
;
1012 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1013 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1015 ret
= sprintf(buf
, "%d\n", ib_dev
->uverbs_abi_ver
);
1016 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1020 static DEVICE_ATTR(abi_version
, S_IRUGO
, show_dev_abi_version
, NULL
);
1022 static CLASS_ATTR_STRING(abi_version
, S_IRUGO
,
1023 __stringify(IB_USER_VERBS_ABI_VERSION
));
1025 static void ib_uverbs_add_one(struct ib_device
*device
)
1029 struct ib_uverbs_device
*uverbs_dev
;
1032 if (!device
->alloc_ucontext
)
1035 uverbs_dev
= kzalloc(sizeof *uverbs_dev
, GFP_KERNEL
);
1039 ret
= init_srcu_struct(&uverbs_dev
->disassociate_srcu
);
1045 atomic_set(&uverbs_dev
->refcount
, 1);
1046 init_completion(&uverbs_dev
->comp
);
1047 uverbs_dev
->xrcd_tree
= RB_ROOT
;
1048 mutex_init(&uverbs_dev
->xrcd_tree_mutex
);
1049 kobject_init(&uverbs_dev
->kobj
, &ib_uverbs_dev_ktype
);
1050 mutex_init(&uverbs_dev
->lists_mutex
);
1051 INIT_LIST_HEAD(&uverbs_dev
->uverbs_file_list
);
1052 INIT_LIST_HEAD(&uverbs_dev
->uverbs_events_file_list
);
1054 devnum
= find_first_zero_bit(dev_map
, IB_UVERBS_MAX_DEVICES
);
1055 if (devnum
>= IB_UVERBS_MAX_DEVICES
)
1057 uverbs_dev
->devnum
= devnum
;
1058 set_bit(devnum
, dev_map
);
1059 if (devnum
>= IB_UVERBS_NUM_FIXED_MINOR
)
1060 base
= dynamic_uverbs_dev
+ devnum
- IB_UVERBS_NUM_FIXED_MINOR
;
1062 base
= IB_UVERBS_BASE_DEV
+ devnum
;
1064 rcu_assign_pointer(uverbs_dev
->ib_dev
, device
);
1065 uverbs_dev
->num_comp_vectors
= device
->num_comp_vectors
;
1067 cdev_init(&uverbs_dev
->cdev
, NULL
);
1068 uverbs_dev
->cdev
.owner
= THIS_MODULE
;
1069 uverbs_dev
->cdev
.ops
= device
->mmap
? &uverbs_mmap_fops
: &uverbs_fops
;
1070 cdev_set_parent(&uverbs_dev
->cdev
, &uverbs_dev
->kobj
);
1071 kobject_set_name(&uverbs_dev
->cdev
.kobj
, "uverbs%d", uverbs_dev
->devnum
);
1072 if (cdev_add(&uverbs_dev
->cdev
, base
, 1))
1075 uverbs_dev
->dev
= device_create(uverbs_class
, device
->dev
.parent
,
1076 uverbs_dev
->cdev
.dev
, uverbs_dev
,
1077 "uverbs%d", uverbs_dev
->devnum
);
1078 if (IS_ERR(uverbs_dev
->dev
))
1081 if (device_create_file(uverbs_dev
->dev
, &dev_attr_ibdev
))
1083 if (device_create_file(uverbs_dev
->dev
, &dev_attr_abi_version
))
1086 if (!device
->specs_root
) {
1087 const struct uverbs_object_tree_def
*default_root
[] = {
1088 uverbs_default_get_objects()};
1090 uverbs_dev
->specs_root
= uverbs_alloc_spec_tree(1,
1092 if (IS_ERR(uverbs_dev
->specs_root
))
1095 device
->specs_root
= uverbs_dev
->specs_root
;
1098 ib_set_client_data(device
, &uverbs_client
, uverbs_dev
);
1103 device_destroy(uverbs_class
, uverbs_dev
->cdev
.dev
);
1106 cdev_del(&uverbs_dev
->cdev
);
1107 clear_bit(devnum
, dev_map
);
1110 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1111 ib_uverbs_comp_dev(uverbs_dev
);
1112 wait_for_completion(&uverbs_dev
->comp
);
1113 kobject_put(&uverbs_dev
->kobj
);
1117 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device
*uverbs_dev
,
1118 struct ib_device
*ib_dev
)
1120 struct ib_uverbs_file
*file
;
1121 struct ib_uverbs_async_event_file
*event_file
;
1122 struct ib_event event
;
1124 /* Pending running commands to terminate */
1125 synchronize_srcu(&uverbs_dev
->disassociate_srcu
);
1126 event
.event
= IB_EVENT_DEVICE_FATAL
;
1127 event
.element
.port_num
= 0;
1128 event
.device
= ib_dev
;
1130 mutex_lock(&uverbs_dev
->lists_mutex
);
1131 while (!list_empty(&uverbs_dev
->uverbs_file_list
)) {
1132 struct ib_ucontext
*ucontext
;
1133 file
= list_first_entry(&uverbs_dev
->uverbs_file_list
,
1134 struct ib_uverbs_file
, list
);
1135 file
->is_closed
= 1;
1136 list_del(&file
->list
);
1137 kref_get(&file
->ref
);
1138 mutex_unlock(&uverbs_dev
->lists_mutex
);
1141 mutex_lock(&file
->cleanup_mutex
);
1142 ucontext
= file
->ucontext
;
1143 file
->ucontext
= NULL
;
1144 mutex_unlock(&file
->cleanup_mutex
);
1146 /* At this point ib_uverbs_close cannot be running
1147 * ib_uverbs_cleanup_ucontext
1150 /* We must release the mutex before going ahead and
1151 * calling disassociate_ucontext. disassociate_ucontext
1152 * might end up indirectly calling uverbs_close,
1153 * for example due to freeing the resources
1156 ib_uverbs_event_handler(&file
->event_handler
, &event
);
1157 ib_dev
->disassociate_ucontext(ucontext
);
1158 mutex_lock(&file
->cleanup_mutex
);
1159 ib_uverbs_cleanup_ucontext(file
, ucontext
, true);
1160 mutex_unlock(&file
->cleanup_mutex
);
1163 mutex_lock(&uverbs_dev
->lists_mutex
);
1164 kref_put(&file
->ref
, ib_uverbs_release_file
);
1167 while (!list_empty(&uverbs_dev
->uverbs_events_file_list
)) {
1168 event_file
= list_first_entry(&uverbs_dev
->
1169 uverbs_events_file_list
,
1170 struct ib_uverbs_async_event_file
,
1172 spin_lock_irq(&event_file
->ev_queue
.lock
);
1173 event_file
->ev_queue
.is_closed
= 1;
1174 spin_unlock_irq(&event_file
->ev_queue
.lock
);
1176 list_del(&event_file
->list
);
1177 ib_unregister_event_handler(
1178 &event_file
->uverbs_file
->event_handler
);
1179 event_file
->uverbs_file
->event_handler
.device
=
1182 wake_up_interruptible(&event_file
->ev_queue
.poll_wait
);
1183 kill_fasync(&event_file
->ev_queue
.async_queue
, SIGIO
, POLL_IN
);
1185 mutex_unlock(&uverbs_dev
->lists_mutex
);
1188 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
)
1190 struct ib_uverbs_device
*uverbs_dev
= client_data
;
1191 int wait_clients
= 1;
1196 dev_set_drvdata(uverbs_dev
->dev
, NULL
);
1197 device_destroy(uverbs_class
, uverbs_dev
->cdev
.dev
);
1198 cdev_del(&uverbs_dev
->cdev
);
1199 clear_bit(uverbs_dev
->devnum
, dev_map
);
1201 if (device
->disassociate_ucontext
) {
1202 /* We disassociate HW resources and immediately return.
1203 * Userspace will see a EIO errno for all future access.
1204 * Upon returning, ib_device may be freed internally and is not
1206 * uverbs_device is still available until all clients close
1207 * their files, then the uverbs device ref count will be zero
1208 * and its resources will be freed.
1209 * Note: At this point no more files can be opened since the
1210 * cdev was deleted, however active clients can still issue
1211 * commands and close their open files.
1213 rcu_assign_pointer(uverbs_dev
->ib_dev
, NULL
);
1214 ib_uverbs_free_hw_resources(uverbs_dev
, device
);
1218 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1219 ib_uverbs_comp_dev(uverbs_dev
);
1221 wait_for_completion(&uverbs_dev
->comp
);
1222 if (uverbs_dev
->specs_root
) {
1223 uverbs_free_spec_tree(uverbs_dev
->specs_root
);
1224 device
->specs_root
= NULL
;
1227 kobject_put(&uverbs_dev
->kobj
);
1230 static char *uverbs_devnode(struct device
*dev
, umode_t
*mode
)
1234 return kasprintf(GFP_KERNEL
, "infiniband/%s", dev_name(dev
));
1237 static int __init
ib_uverbs_init(void)
1241 ret
= register_chrdev_region(IB_UVERBS_BASE_DEV
,
1242 IB_UVERBS_NUM_FIXED_MINOR
,
1243 "infiniband_verbs");
1245 pr_err("user_verbs: couldn't register device number\n");
1249 ret
= alloc_chrdev_region(&dynamic_uverbs_dev
, 0,
1250 IB_UVERBS_NUM_DYNAMIC_MINOR
,
1251 "infiniband_verbs");
1253 pr_err("couldn't register dynamic device number\n");
1257 uverbs_class
= class_create(THIS_MODULE
, "infiniband_verbs");
1258 if (IS_ERR(uverbs_class
)) {
1259 ret
= PTR_ERR(uverbs_class
);
1260 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1264 uverbs_class
->devnode
= uverbs_devnode
;
1266 ret
= class_create_file(uverbs_class
, &class_attr_abi_version
.attr
);
1268 pr_err("user_verbs: couldn't create abi_version attribute\n");
1272 ret
= ib_register_client(&uverbs_client
);
1274 pr_err("user_verbs: couldn't register client\n");
1281 class_destroy(uverbs_class
);
1284 unregister_chrdev_region(dynamic_uverbs_dev
,
1285 IB_UVERBS_NUM_DYNAMIC_MINOR
);
1288 unregister_chrdev_region(IB_UVERBS_BASE_DEV
,
1289 IB_UVERBS_NUM_FIXED_MINOR
);
1295 static void __exit
ib_uverbs_cleanup(void)
1297 ib_unregister_client(&uverbs_client
);
1298 class_destroy(uverbs_class
);
1299 unregister_chrdev_region(IB_UVERBS_BASE_DEV
,
1300 IB_UVERBS_NUM_FIXED_MINOR
);
1301 unregister_chrdev_region(dynamic_uverbs_dev
,
1302 IB_UVERBS_NUM_DYNAMIC_MINOR
);
1305 module_init(ib_uverbs_init
);
1306 module_exit(ib_uverbs_cleanup
);