2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
49 #include <linux/uaccess.h>
55 MODULE_AUTHOR("Roland Dreier");
56 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
57 MODULE_LICENSE("Dual BSD/GPL");
60 IB_UVERBS_MAJOR
= 231,
61 IB_UVERBS_BASE_MINOR
= 192,
62 IB_UVERBS_MAX_DEVICES
= 32
65 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
67 static struct class *uverbs_class
;
69 DEFINE_SPINLOCK(ib_uverbs_idr_lock
);
70 DEFINE_IDR(ib_uverbs_pd_idr
);
71 DEFINE_IDR(ib_uverbs_mr_idr
);
72 DEFINE_IDR(ib_uverbs_mw_idr
);
73 DEFINE_IDR(ib_uverbs_ah_idr
);
74 DEFINE_IDR(ib_uverbs_cq_idr
);
75 DEFINE_IDR(ib_uverbs_qp_idr
);
76 DEFINE_IDR(ib_uverbs_srq_idr
);
77 DEFINE_IDR(ib_uverbs_xrcd_idr
);
78 DEFINE_IDR(ib_uverbs_rule_idr
);
79 DEFINE_IDR(ib_uverbs_wq_idr
);
80 DEFINE_IDR(ib_uverbs_rwq_ind_tbl_idr
);
82 static DEFINE_SPINLOCK(map_lock
);
83 static DECLARE_BITMAP(dev_map
, IB_UVERBS_MAX_DEVICES
);
85 static ssize_t (*uverbs_cmd_table
[])(struct ib_uverbs_file
*file
,
86 struct ib_device
*ib_dev
,
87 const char __user
*buf
, int in_len
,
89 [IB_USER_VERBS_CMD_GET_CONTEXT
] = ib_uverbs_get_context
,
90 [IB_USER_VERBS_CMD_QUERY_DEVICE
] = ib_uverbs_query_device
,
91 [IB_USER_VERBS_CMD_QUERY_PORT
] = ib_uverbs_query_port
,
92 [IB_USER_VERBS_CMD_ALLOC_PD
] = ib_uverbs_alloc_pd
,
93 [IB_USER_VERBS_CMD_DEALLOC_PD
] = ib_uverbs_dealloc_pd
,
94 [IB_USER_VERBS_CMD_REG_MR
] = ib_uverbs_reg_mr
,
95 [IB_USER_VERBS_CMD_REREG_MR
] = ib_uverbs_rereg_mr
,
96 [IB_USER_VERBS_CMD_DEREG_MR
] = ib_uverbs_dereg_mr
,
97 [IB_USER_VERBS_CMD_ALLOC_MW
] = ib_uverbs_alloc_mw
,
98 [IB_USER_VERBS_CMD_DEALLOC_MW
] = ib_uverbs_dealloc_mw
,
99 [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
] = ib_uverbs_create_comp_channel
,
100 [IB_USER_VERBS_CMD_CREATE_CQ
] = ib_uverbs_create_cq
,
101 [IB_USER_VERBS_CMD_RESIZE_CQ
] = ib_uverbs_resize_cq
,
102 [IB_USER_VERBS_CMD_POLL_CQ
] = ib_uverbs_poll_cq
,
103 [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
] = ib_uverbs_req_notify_cq
,
104 [IB_USER_VERBS_CMD_DESTROY_CQ
] = ib_uverbs_destroy_cq
,
105 [IB_USER_VERBS_CMD_CREATE_QP
] = ib_uverbs_create_qp
,
106 [IB_USER_VERBS_CMD_QUERY_QP
] = ib_uverbs_query_qp
,
107 [IB_USER_VERBS_CMD_MODIFY_QP
] = ib_uverbs_modify_qp
,
108 [IB_USER_VERBS_CMD_DESTROY_QP
] = ib_uverbs_destroy_qp
,
109 [IB_USER_VERBS_CMD_POST_SEND
] = ib_uverbs_post_send
,
110 [IB_USER_VERBS_CMD_POST_RECV
] = ib_uverbs_post_recv
,
111 [IB_USER_VERBS_CMD_POST_SRQ_RECV
] = ib_uverbs_post_srq_recv
,
112 [IB_USER_VERBS_CMD_CREATE_AH
] = ib_uverbs_create_ah
,
113 [IB_USER_VERBS_CMD_DESTROY_AH
] = ib_uverbs_destroy_ah
,
114 [IB_USER_VERBS_CMD_ATTACH_MCAST
] = ib_uverbs_attach_mcast
,
115 [IB_USER_VERBS_CMD_DETACH_MCAST
] = ib_uverbs_detach_mcast
,
116 [IB_USER_VERBS_CMD_CREATE_SRQ
] = ib_uverbs_create_srq
,
117 [IB_USER_VERBS_CMD_MODIFY_SRQ
] = ib_uverbs_modify_srq
,
118 [IB_USER_VERBS_CMD_QUERY_SRQ
] = ib_uverbs_query_srq
,
119 [IB_USER_VERBS_CMD_DESTROY_SRQ
] = ib_uverbs_destroy_srq
,
120 [IB_USER_VERBS_CMD_OPEN_XRCD
] = ib_uverbs_open_xrcd
,
121 [IB_USER_VERBS_CMD_CLOSE_XRCD
] = ib_uverbs_close_xrcd
,
122 [IB_USER_VERBS_CMD_CREATE_XSRQ
] = ib_uverbs_create_xsrq
,
123 [IB_USER_VERBS_CMD_OPEN_QP
] = ib_uverbs_open_qp
,
126 static int (*uverbs_ex_cmd_table
[])(struct ib_uverbs_file
*file
,
127 struct ib_device
*ib_dev
,
128 struct ib_udata
*ucore
,
129 struct ib_udata
*uhw
) = {
130 [IB_USER_VERBS_EX_CMD_CREATE_FLOW
] = ib_uverbs_ex_create_flow
,
131 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW
] = ib_uverbs_ex_destroy_flow
,
132 [IB_USER_VERBS_EX_CMD_QUERY_DEVICE
] = ib_uverbs_ex_query_device
,
133 [IB_USER_VERBS_EX_CMD_CREATE_CQ
] = ib_uverbs_ex_create_cq
,
134 [IB_USER_VERBS_EX_CMD_CREATE_QP
] = ib_uverbs_ex_create_qp
,
135 [IB_USER_VERBS_EX_CMD_CREATE_WQ
] = ib_uverbs_ex_create_wq
,
136 [IB_USER_VERBS_EX_CMD_MODIFY_WQ
] = ib_uverbs_ex_modify_wq
,
137 [IB_USER_VERBS_EX_CMD_DESTROY_WQ
] = ib_uverbs_ex_destroy_wq
,
138 [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL
] = ib_uverbs_ex_create_rwq_ind_table
,
139 [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL
] = ib_uverbs_ex_destroy_rwq_ind_table
,
140 [IB_USER_VERBS_EX_CMD_MODIFY_QP
] = ib_uverbs_ex_modify_qp
,
143 static void ib_uverbs_add_one(struct ib_device
*device
);
144 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
);
146 int uverbs_dealloc_mw(struct ib_mw
*mw
)
148 struct ib_pd
*pd
= mw
->pd
;
151 ret
= mw
->device
->dealloc_mw(mw
);
153 atomic_dec(&pd
->usecnt
);
157 static void ib_uverbs_release_dev(struct kobject
*kobj
)
159 struct ib_uverbs_device
*dev
=
160 container_of(kobj
, struct ib_uverbs_device
, kobj
);
162 cleanup_srcu_struct(&dev
->disassociate_srcu
);
166 static struct kobj_type ib_uverbs_dev_ktype
= {
167 .release
= ib_uverbs_release_dev
,
170 static void ib_uverbs_release_event_file(struct kref
*ref
)
172 struct ib_uverbs_event_file
*file
=
173 container_of(ref
, struct ib_uverbs_event_file
, ref
);
178 void ib_uverbs_release_ucq(struct ib_uverbs_file
*file
,
179 struct ib_uverbs_event_file
*ev_file
,
180 struct ib_ucq_object
*uobj
)
182 struct ib_uverbs_event
*evt
, *tmp
;
185 spin_lock_irq(&ev_file
->lock
);
186 list_for_each_entry_safe(evt
, tmp
, &uobj
->comp_list
, obj_list
) {
187 list_del(&evt
->list
);
190 spin_unlock_irq(&ev_file
->lock
);
192 kref_put(&ev_file
->ref
, ib_uverbs_release_event_file
);
195 spin_lock_irq(&file
->async_file
->lock
);
196 list_for_each_entry_safe(evt
, tmp
, &uobj
->async_list
, obj_list
) {
197 list_del(&evt
->list
);
200 spin_unlock_irq(&file
->async_file
->lock
);
203 void ib_uverbs_release_uevent(struct ib_uverbs_file
*file
,
204 struct ib_uevent_object
*uobj
)
206 struct ib_uverbs_event
*evt
, *tmp
;
208 spin_lock_irq(&file
->async_file
->lock
);
209 list_for_each_entry_safe(evt
, tmp
, &uobj
->event_list
, obj_list
) {
210 list_del(&evt
->list
);
213 spin_unlock_irq(&file
->async_file
->lock
);
216 static void ib_uverbs_detach_umcast(struct ib_qp
*qp
,
217 struct ib_uqp_object
*uobj
)
219 struct ib_uverbs_mcast_entry
*mcast
, *tmp
;
221 list_for_each_entry_safe(mcast
, tmp
, &uobj
->mcast_list
, list
) {
222 ib_detach_mcast(qp
, &mcast
->gid
, mcast
->lid
);
223 list_del(&mcast
->list
);
228 static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file
*file
,
229 struct ib_ucontext
*context
)
231 struct ib_uobject
*uobj
, *tmp
;
233 context
->closing
= 1;
235 list_for_each_entry_safe(uobj
, tmp
, &context
->ah_list
, list
) {
236 struct ib_ah
*ah
= uobj
->object
;
238 idr_remove_uobj(&ib_uverbs_ah_idr
, uobj
);
243 /* Remove MWs before QPs, in order to support type 2A MWs. */
244 list_for_each_entry_safe(uobj
, tmp
, &context
->mw_list
, list
) {
245 struct ib_mw
*mw
= uobj
->object
;
247 idr_remove_uobj(&ib_uverbs_mw_idr
, uobj
);
248 uverbs_dealloc_mw(mw
);
252 list_for_each_entry_safe(uobj
, tmp
, &context
->rule_list
, list
) {
253 struct ib_flow
*flow_id
= uobj
->object
;
255 idr_remove_uobj(&ib_uverbs_rule_idr
, uobj
);
256 ib_destroy_flow(flow_id
);
260 list_for_each_entry_safe(uobj
, tmp
, &context
->qp_list
, list
) {
261 struct ib_qp
*qp
= uobj
->object
;
262 struct ib_uqp_object
*uqp
=
263 container_of(uobj
, struct ib_uqp_object
, uevent
.uobject
);
265 idr_remove_uobj(&ib_uverbs_qp_idr
, uobj
);
266 if (qp
== qp
->real_qp
)
267 ib_uverbs_detach_umcast(qp
, uqp
);
269 ib_uverbs_release_uevent(file
, &uqp
->uevent
);
273 list_for_each_entry_safe(uobj
, tmp
, &context
->rwq_ind_tbl_list
, list
) {
274 struct ib_rwq_ind_table
*rwq_ind_tbl
= uobj
->object
;
275 struct ib_wq
**ind_tbl
= rwq_ind_tbl
->ind_tbl
;
277 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr
, uobj
);
278 ib_destroy_rwq_ind_table(rwq_ind_tbl
);
283 list_for_each_entry_safe(uobj
, tmp
, &context
->wq_list
, list
) {
284 struct ib_wq
*wq
= uobj
->object
;
285 struct ib_uwq_object
*uwq
=
286 container_of(uobj
, struct ib_uwq_object
, uevent
.uobject
);
288 idr_remove_uobj(&ib_uverbs_wq_idr
, uobj
);
290 ib_uverbs_release_uevent(file
, &uwq
->uevent
);
294 list_for_each_entry_safe(uobj
, tmp
, &context
->srq_list
, list
) {
295 struct ib_srq
*srq
= uobj
->object
;
296 struct ib_uevent_object
*uevent
=
297 container_of(uobj
, struct ib_uevent_object
, uobject
);
299 idr_remove_uobj(&ib_uverbs_srq_idr
, uobj
);
301 ib_uverbs_release_uevent(file
, uevent
);
305 list_for_each_entry_safe(uobj
, tmp
, &context
->cq_list
, list
) {
306 struct ib_cq
*cq
= uobj
->object
;
307 struct ib_uverbs_event_file
*ev_file
= cq
->cq_context
;
308 struct ib_ucq_object
*ucq
=
309 container_of(uobj
, struct ib_ucq_object
, uobject
);
311 idr_remove_uobj(&ib_uverbs_cq_idr
, uobj
);
313 ib_uverbs_release_ucq(file
, ev_file
, ucq
);
317 list_for_each_entry_safe(uobj
, tmp
, &context
->mr_list
, list
) {
318 struct ib_mr
*mr
= uobj
->object
;
320 idr_remove_uobj(&ib_uverbs_mr_idr
, uobj
);
325 mutex_lock(&file
->device
->xrcd_tree_mutex
);
326 list_for_each_entry_safe(uobj
, tmp
, &context
->xrcd_list
, list
) {
327 struct ib_xrcd
*xrcd
= uobj
->object
;
328 struct ib_uxrcd_object
*uxrcd
=
329 container_of(uobj
, struct ib_uxrcd_object
, uobject
);
331 idr_remove_uobj(&ib_uverbs_xrcd_idr
, uobj
);
332 ib_uverbs_dealloc_xrcd(file
->device
, xrcd
);
335 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
337 list_for_each_entry_safe(uobj
, tmp
, &context
->pd_list
, list
) {
338 struct ib_pd
*pd
= uobj
->object
;
340 idr_remove_uobj(&ib_uverbs_pd_idr
, uobj
);
345 put_pid(context
->tgid
);
347 return context
->device
->dealloc_ucontext(context
);
350 static void ib_uverbs_comp_dev(struct ib_uverbs_device
*dev
)
352 complete(&dev
->comp
);
355 static void ib_uverbs_release_file(struct kref
*ref
)
357 struct ib_uverbs_file
*file
=
358 container_of(ref
, struct ib_uverbs_file
, ref
);
359 struct ib_device
*ib_dev
;
362 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
363 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
364 &file
->device
->disassociate_srcu
);
365 if (ib_dev
&& !ib_dev
->disassociate_ucontext
)
366 module_put(ib_dev
->owner
);
367 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
369 if (atomic_dec_and_test(&file
->device
->refcount
))
370 ib_uverbs_comp_dev(file
->device
);
375 static ssize_t
ib_uverbs_event_read(struct file
*filp
, char __user
*buf
,
376 size_t count
, loff_t
*pos
)
378 struct ib_uverbs_event_file
*file
= filp
->private_data
;
379 struct ib_uverbs_event
*event
;
383 spin_lock_irq(&file
->lock
);
385 while (list_empty(&file
->event_list
)) {
386 spin_unlock_irq(&file
->lock
);
388 if (filp
->f_flags
& O_NONBLOCK
)
391 if (wait_event_interruptible(file
->poll_wait
,
392 (!list_empty(&file
->event_list
) ||
393 /* The barriers built into wait_event_interruptible()
394 * and wake_up() guarentee this will see the null set
397 !file
->uverbs_file
->device
->ib_dev
)))
400 /* If device was disassociated and no event exists set an error */
401 if (list_empty(&file
->event_list
) &&
402 !file
->uverbs_file
->device
->ib_dev
)
405 spin_lock_irq(&file
->lock
);
408 event
= list_entry(file
->event_list
.next
, struct ib_uverbs_event
, list
);
411 eventsz
= sizeof (struct ib_uverbs_async_event_desc
);
413 eventsz
= sizeof (struct ib_uverbs_comp_event_desc
);
415 if (eventsz
> count
) {
419 list_del(file
->event_list
.next
);
420 if (event
->counter
) {
422 list_del(&event
->obj_list
);
426 spin_unlock_irq(&file
->lock
);
429 if (copy_to_user(buf
, event
, eventsz
))
440 static unsigned int ib_uverbs_event_poll(struct file
*filp
,
441 struct poll_table_struct
*wait
)
443 unsigned int pollflags
= 0;
444 struct ib_uverbs_event_file
*file
= filp
->private_data
;
446 poll_wait(filp
, &file
->poll_wait
, wait
);
448 spin_lock_irq(&file
->lock
);
449 if (!list_empty(&file
->event_list
))
450 pollflags
= POLLIN
| POLLRDNORM
;
451 spin_unlock_irq(&file
->lock
);
456 static int ib_uverbs_event_fasync(int fd
, struct file
*filp
, int on
)
458 struct ib_uverbs_event_file
*file
= filp
->private_data
;
460 return fasync_helper(fd
, filp
, on
, &file
->async_queue
);
463 static int ib_uverbs_event_close(struct inode
*inode
, struct file
*filp
)
465 struct ib_uverbs_event_file
*file
= filp
->private_data
;
466 struct ib_uverbs_event
*entry
, *tmp
;
467 int closed_already
= 0;
469 mutex_lock(&file
->uverbs_file
->device
->lists_mutex
);
470 spin_lock_irq(&file
->lock
);
471 closed_already
= file
->is_closed
;
473 list_for_each_entry_safe(entry
, tmp
, &file
->event_list
, list
) {
475 list_del(&entry
->obj_list
);
478 spin_unlock_irq(&file
->lock
);
479 if (!closed_already
) {
480 list_del(&file
->list
);
482 ib_unregister_event_handler(&file
->uverbs_file
->
485 mutex_unlock(&file
->uverbs_file
->device
->lists_mutex
);
487 kref_put(&file
->uverbs_file
->ref
, ib_uverbs_release_file
);
488 kref_put(&file
->ref
, ib_uverbs_release_event_file
);
493 static const struct file_operations uverbs_event_fops
= {
494 .owner
= THIS_MODULE
,
495 .read
= ib_uverbs_event_read
,
496 .poll
= ib_uverbs_event_poll
,
497 .release
= ib_uverbs_event_close
,
498 .fasync
= ib_uverbs_event_fasync
,
502 void ib_uverbs_comp_handler(struct ib_cq
*cq
, void *cq_context
)
504 struct ib_uverbs_event_file
*file
= cq_context
;
505 struct ib_ucq_object
*uobj
;
506 struct ib_uverbs_event
*entry
;
512 spin_lock_irqsave(&file
->lock
, flags
);
513 if (file
->is_closed
) {
514 spin_unlock_irqrestore(&file
->lock
, flags
);
518 entry
= kmalloc(sizeof *entry
, GFP_ATOMIC
);
520 spin_unlock_irqrestore(&file
->lock
, flags
);
524 uobj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
526 entry
->desc
.comp
.cq_handle
= cq
->uobject
->user_handle
;
527 entry
->counter
= &uobj
->comp_events_reported
;
529 list_add_tail(&entry
->list
, &file
->event_list
);
530 list_add_tail(&entry
->obj_list
, &uobj
->comp_list
);
531 spin_unlock_irqrestore(&file
->lock
, flags
);
533 wake_up_interruptible(&file
->poll_wait
);
534 kill_fasync(&file
->async_queue
, SIGIO
, POLL_IN
);
537 static void ib_uverbs_async_handler(struct ib_uverbs_file
*file
,
538 __u64 element
, __u64 event
,
539 struct list_head
*obj_list
,
542 struct ib_uverbs_event
*entry
;
545 spin_lock_irqsave(&file
->async_file
->lock
, flags
);
546 if (file
->async_file
->is_closed
) {
547 spin_unlock_irqrestore(&file
->async_file
->lock
, flags
);
551 entry
= kmalloc(sizeof *entry
, GFP_ATOMIC
);
553 spin_unlock_irqrestore(&file
->async_file
->lock
, flags
);
557 entry
->desc
.async
.element
= element
;
558 entry
->desc
.async
.event_type
= event
;
559 entry
->desc
.async
.reserved
= 0;
560 entry
->counter
= counter
;
562 list_add_tail(&entry
->list
, &file
->async_file
->event_list
);
564 list_add_tail(&entry
->obj_list
, obj_list
);
565 spin_unlock_irqrestore(&file
->async_file
->lock
, flags
);
567 wake_up_interruptible(&file
->async_file
->poll_wait
);
568 kill_fasync(&file
->async_file
->async_queue
, SIGIO
, POLL_IN
);
571 void ib_uverbs_cq_event_handler(struct ib_event
*event
, void *context_ptr
)
573 struct ib_ucq_object
*uobj
= container_of(event
->element
.cq
->uobject
,
574 struct ib_ucq_object
, uobject
);
576 ib_uverbs_async_handler(uobj
->uverbs_file
, uobj
->uobject
.user_handle
,
577 event
->event
, &uobj
->async_list
,
578 &uobj
->async_events_reported
);
581 void ib_uverbs_qp_event_handler(struct ib_event
*event
, void *context_ptr
)
583 struct ib_uevent_object
*uobj
;
585 /* for XRC target qp's, check that qp is live */
586 if (!event
->element
.qp
->uobject
|| !event
->element
.qp
->uobject
->live
)
589 uobj
= container_of(event
->element
.qp
->uobject
,
590 struct ib_uevent_object
, uobject
);
592 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
593 event
->event
, &uobj
->event_list
,
594 &uobj
->events_reported
);
597 void ib_uverbs_wq_event_handler(struct ib_event
*event
, void *context_ptr
)
599 struct ib_uevent_object
*uobj
= container_of(event
->element
.wq
->uobject
,
600 struct ib_uevent_object
, uobject
);
602 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
603 event
->event
, &uobj
->event_list
,
604 &uobj
->events_reported
);
607 void ib_uverbs_srq_event_handler(struct ib_event
*event
, void *context_ptr
)
609 struct ib_uevent_object
*uobj
;
611 uobj
= container_of(event
->element
.srq
->uobject
,
612 struct ib_uevent_object
, uobject
);
614 ib_uverbs_async_handler(context_ptr
, uobj
->uobject
.user_handle
,
615 event
->event
, &uobj
->event_list
,
616 &uobj
->events_reported
);
619 void ib_uverbs_event_handler(struct ib_event_handler
*handler
,
620 struct ib_event
*event
)
622 struct ib_uverbs_file
*file
=
623 container_of(handler
, struct ib_uverbs_file
, event_handler
);
625 ib_uverbs_async_handler(file
, event
->element
.port_num
, event
->event
,
629 void ib_uverbs_free_async_event_file(struct ib_uverbs_file
*file
)
631 kref_put(&file
->async_file
->ref
, ib_uverbs_release_event_file
);
632 file
->async_file
= NULL
;
635 struct file
*ib_uverbs_alloc_event_file(struct ib_uverbs_file
*uverbs_file
,
636 struct ib_device
*ib_dev
,
639 struct ib_uverbs_event_file
*ev_file
;
643 ev_file
= kzalloc(sizeof(*ev_file
), GFP_KERNEL
);
645 return ERR_PTR(-ENOMEM
);
647 kref_init(&ev_file
->ref
);
648 spin_lock_init(&ev_file
->lock
);
649 INIT_LIST_HEAD(&ev_file
->event_list
);
650 init_waitqueue_head(&ev_file
->poll_wait
);
651 ev_file
->uverbs_file
= uverbs_file
;
652 kref_get(&ev_file
->uverbs_file
->ref
);
653 ev_file
->async_queue
= NULL
;
654 ev_file
->is_closed
= 0;
656 filp
= anon_inode_getfile("[infinibandevent]", &uverbs_event_fops
,
661 mutex_lock(&uverbs_file
->device
->lists_mutex
);
662 list_add_tail(&ev_file
->list
,
663 &uverbs_file
->device
->uverbs_events_file_list
);
664 mutex_unlock(&uverbs_file
->device
->lists_mutex
);
667 WARN_ON(uverbs_file
->async_file
);
668 uverbs_file
->async_file
= ev_file
;
669 kref_get(&uverbs_file
->async_file
->ref
);
670 INIT_IB_EVENT_HANDLER(&uverbs_file
->event_handler
,
672 ib_uverbs_event_handler
);
673 ret
= ib_register_event_handler(&uverbs_file
->event_handler
);
677 /* At that point async file stuff was fully set */
678 ev_file
->is_async
= 1;
685 kref_put(&uverbs_file
->async_file
->ref
, ib_uverbs_release_event_file
);
686 uverbs_file
->async_file
= NULL
;
690 kref_put(&ev_file
->uverbs_file
->ref
, ib_uverbs_release_file
);
691 kref_put(&ev_file
->ref
, ib_uverbs_release_event_file
);
696 * Look up a completion event file by FD. If lookup is successful,
697 * takes a ref to the event file struct that it returns; if
698 * unsuccessful, returns NULL.
700 struct ib_uverbs_event_file
*ib_uverbs_lookup_comp_file(int fd
)
702 struct ib_uverbs_event_file
*ev_file
= NULL
;
703 struct fd f
= fdget(fd
);
708 if (f
.file
->f_op
!= &uverbs_event_fops
)
711 ev_file
= f
.file
->private_data
;
712 if (ev_file
->is_async
) {
717 kref_get(&ev_file
->ref
);
724 static int verify_command_mask(struct ib_device
*ib_dev
, __u32 command
)
728 if (command
<= IB_USER_VERBS_CMD_OPEN_QP
)
729 mask
= ib_dev
->uverbs_cmd_mask
;
731 mask
= ib_dev
->uverbs_ex_cmd_mask
;
733 if (mask
& ((u64
)1 << command
))
739 static ssize_t
ib_uverbs_write(struct file
*filp
, const char __user
*buf
,
740 size_t count
, loff_t
*pos
)
742 struct ib_uverbs_file
*file
= filp
->private_data
;
743 struct ib_device
*ib_dev
;
744 struct ib_uverbs_cmd_hdr hdr
;
750 if (!ib_safe_file_access(filp
)) {
751 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
752 task_tgid_vnr(current
), current
->comm
);
756 if (count
< sizeof hdr
)
759 if (copy_from_user(&hdr
, buf
, sizeof hdr
))
762 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
763 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
764 &file
->device
->disassociate_srcu
);
770 if (hdr
.command
& ~(__u32
)(IB_USER_VERBS_CMD_FLAGS_MASK
|
771 IB_USER_VERBS_CMD_COMMAND_MASK
)) {
776 command
= hdr
.command
& IB_USER_VERBS_CMD_COMMAND_MASK
;
777 if (verify_command_mask(ib_dev
, command
)) {
782 if (!file
->ucontext
&&
783 command
!= IB_USER_VERBS_CMD_GET_CONTEXT
) {
788 flags
= (hdr
.command
&
789 IB_USER_VERBS_CMD_FLAGS_MASK
) >> IB_USER_VERBS_CMD_FLAGS_SHIFT
;
792 if (command
>= ARRAY_SIZE(uverbs_cmd_table
) ||
793 !uverbs_cmd_table
[command
]) {
798 if (hdr
.in_words
* 4 != count
) {
803 ret
= uverbs_cmd_table
[command
](file
, ib_dev
,
808 } else if (flags
== IB_USER_VERBS_CMD_FLAG_EXTENDED
) {
809 struct ib_uverbs_ex_cmd_hdr ex_hdr
;
810 struct ib_udata ucore
;
812 size_t written_count
= count
;
814 if (command
>= ARRAY_SIZE(uverbs_ex_cmd_table
) ||
815 !uverbs_ex_cmd_table
[command
]) {
820 if (!file
->ucontext
) {
825 if (count
< (sizeof(hdr
) + sizeof(ex_hdr
))) {
830 if (copy_from_user(&ex_hdr
, buf
+ sizeof(hdr
), sizeof(ex_hdr
))) {
835 count
-= sizeof(hdr
) + sizeof(ex_hdr
);
836 buf
+= sizeof(hdr
) + sizeof(ex_hdr
);
838 if ((hdr
.in_words
+ ex_hdr
.provider_in_words
) * 8 != count
) {
843 if (ex_hdr
.cmd_hdr_reserved
) {
848 if (ex_hdr
.response
) {
849 if (!hdr
.out_words
&& !ex_hdr
.provider_out_words
) {
854 if (!access_ok(VERIFY_WRITE
,
855 (void __user
*) (unsigned long) ex_hdr
.response
,
856 (hdr
.out_words
+ ex_hdr
.provider_out_words
) * 8)) {
861 if (hdr
.out_words
|| ex_hdr
.provider_out_words
) {
867 INIT_UDATA_BUF_OR_NULL(&ucore
, buf
, (unsigned long) ex_hdr
.response
,
868 hdr
.in_words
* 8, hdr
.out_words
* 8);
870 INIT_UDATA_BUF_OR_NULL(&uhw
,
872 (unsigned long) ex_hdr
.response
+ ucore
.outlen
,
873 ex_hdr
.provider_in_words
* 8,
874 ex_hdr
.provider_out_words
* 8);
876 ret
= uverbs_ex_cmd_table
[command
](file
,
887 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
891 static int ib_uverbs_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
893 struct ib_uverbs_file
*file
= filp
->private_data
;
894 struct ib_device
*ib_dev
;
898 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
899 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
900 &file
->device
->disassociate_srcu
);
909 ret
= ib_dev
->mmap(file
->ucontext
, vma
);
911 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
916 * ib_uverbs_open() does not need the BKL:
918 * - the ib_uverbs_device structures are properly reference counted and
919 * everything else is purely local to the file being created, so
920 * races against other open calls are not a problem;
921 * - there is no ioctl method to race against;
922 * - the open method will either immediately run -ENXIO, or all
923 * required initialization will be done.
925 static int ib_uverbs_open(struct inode
*inode
, struct file
*filp
)
927 struct ib_uverbs_device
*dev
;
928 struct ib_uverbs_file
*file
;
929 struct ib_device
*ib_dev
;
931 int module_dependent
;
934 dev
= container_of(inode
->i_cdev
, struct ib_uverbs_device
, cdev
);
935 if (!atomic_inc_not_zero(&dev
->refcount
))
938 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
939 mutex_lock(&dev
->lists_mutex
);
940 ib_dev
= srcu_dereference(dev
->ib_dev
,
941 &dev
->disassociate_srcu
);
947 /* In case IB device supports disassociate ucontext, there is no hard
948 * dependency between uverbs device and its low level device.
950 module_dependent
= !(ib_dev
->disassociate_ucontext
);
952 if (module_dependent
) {
953 if (!try_module_get(ib_dev
->owner
)) {
959 file
= kzalloc(sizeof(*file
), GFP_KERNEL
);
962 if (module_dependent
)
969 file
->ucontext
= NULL
;
970 file
->async_file
= NULL
;
971 kref_init(&file
->ref
);
972 mutex_init(&file
->mutex
);
973 mutex_init(&file
->cleanup_mutex
);
975 filp
->private_data
= file
;
976 kobject_get(&dev
->kobj
);
977 list_add_tail(&file
->list
, &dev
->uverbs_file_list
);
978 mutex_unlock(&dev
->lists_mutex
);
979 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
981 return nonseekable_open(inode
, filp
);
984 module_put(ib_dev
->owner
);
987 mutex_unlock(&dev
->lists_mutex
);
988 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
989 if (atomic_dec_and_test(&dev
->refcount
))
990 ib_uverbs_comp_dev(dev
);
995 static int ib_uverbs_close(struct inode
*inode
, struct file
*filp
)
997 struct ib_uverbs_file
*file
= filp
->private_data
;
998 struct ib_uverbs_device
*dev
= file
->device
;
1000 mutex_lock(&file
->cleanup_mutex
);
1001 if (file
->ucontext
) {
1002 ib_uverbs_cleanup_ucontext(file
, file
->ucontext
);
1003 file
->ucontext
= NULL
;
1005 mutex_unlock(&file
->cleanup_mutex
);
1007 mutex_lock(&file
->device
->lists_mutex
);
1008 if (!file
->is_closed
) {
1009 list_del(&file
->list
);
1010 file
->is_closed
= 1;
1012 mutex_unlock(&file
->device
->lists_mutex
);
1014 if (file
->async_file
)
1015 kref_put(&file
->async_file
->ref
, ib_uverbs_release_event_file
);
1017 kref_put(&file
->ref
, ib_uverbs_release_file
);
1018 kobject_put(&dev
->kobj
);
1023 static const struct file_operations uverbs_fops
= {
1024 .owner
= THIS_MODULE
,
1025 .write
= ib_uverbs_write
,
1026 .open
= ib_uverbs_open
,
1027 .release
= ib_uverbs_close
,
1028 .llseek
= no_llseek
,
1031 static const struct file_operations uverbs_mmap_fops
= {
1032 .owner
= THIS_MODULE
,
1033 .write
= ib_uverbs_write
,
1034 .mmap
= ib_uverbs_mmap
,
1035 .open
= ib_uverbs_open
,
1036 .release
= ib_uverbs_close
,
1037 .llseek
= no_llseek
,
1040 static struct ib_client uverbs_client
= {
1042 .add
= ib_uverbs_add_one
,
1043 .remove
= ib_uverbs_remove_one
1046 static ssize_t
show_ibdev(struct device
*device
, struct device_attribute
*attr
,
1051 struct ib_uverbs_device
*dev
= dev_get_drvdata(device
);
1052 struct ib_device
*ib_dev
;
1057 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1058 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1060 ret
= sprintf(buf
, "%s\n", ib_dev
->name
);
1061 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1065 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
1067 static ssize_t
show_dev_abi_version(struct device
*device
,
1068 struct device_attribute
*attr
, char *buf
)
1070 struct ib_uverbs_device
*dev
= dev_get_drvdata(device
);
1073 struct ib_device
*ib_dev
;
1077 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1078 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1080 ret
= sprintf(buf
, "%d\n", ib_dev
->uverbs_abi_ver
);
1081 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1085 static DEVICE_ATTR(abi_version
, S_IRUGO
, show_dev_abi_version
, NULL
);
1087 static CLASS_ATTR_STRING(abi_version
, S_IRUGO
,
1088 __stringify(IB_USER_VERBS_ABI_VERSION
));
1090 static dev_t overflow_maj
;
1091 static DECLARE_BITMAP(overflow_map
, IB_UVERBS_MAX_DEVICES
);
1094 * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
1095 * requesting a new major number and doubling the number of max devices we
1096 * support. It's stupid, but simple.
1098 static int find_overflow_devnum(void)
1102 if (!overflow_maj
) {
1103 ret
= alloc_chrdev_region(&overflow_maj
, 0, IB_UVERBS_MAX_DEVICES
,
1104 "infiniband_verbs");
1106 pr_err("user_verbs: couldn't register dynamic device number\n");
1111 ret
= find_first_zero_bit(overflow_map
, IB_UVERBS_MAX_DEVICES
);
1112 if (ret
>= IB_UVERBS_MAX_DEVICES
)
1118 static void ib_uverbs_add_one(struct ib_device
*device
)
1122 struct ib_uverbs_device
*uverbs_dev
;
1125 if (!device
->alloc_ucontext
)
1128 uverbs_dev
= kzalloc(sizeof *uverbs_dev
, GFP_KERNEL
);
1132 ret
= init_srcu_struct(&uverbs_dev
->disassociate_srcu
);
1138 atomic_set(&uverbs_dev
->refcount
, 1);
1139 init_completion(&uverbs_dev
->comp
);
1140 uverbs_dev
->xrcd_tree
= RB_ROOT
;
1141 mutex_init(&uverbs_dev
->xrcd_tree_mutex
);
1142 kobject_init(&uverbs_dev
->kobj
, &ib_uverbs_dev_ktype
);
1143 mutex_init(&uverbs_dev
->lists_mutex
);
1144 INIT_LIST_HEAD(&uverbs_dev
->uverbs_file_list
);
1145 INIT_LIST_HEAD(&uverbs_dev
->uverbs_events_file_list
);
1147 spin_lock(&map_lock
);
1148 devnum
= find_first_zero_bit(dev_map
, IB_UVERBS_MAX_DEVICES
);
1149 if (devnum
>= IB_UVERBS_MAX_DEVICES
) {
1150 spin_unlock(&map_lock
);
1151 devnum
= find_overflow_devnum();
1155 spin_lock(&map_lock
);
1156 uverbs_dev
->devnum
= devnum
+ IB_UVERBS_MAX_DEVICES
;
1157 base
= devnum
+ overflow_maj
;
1158 set_bit(devnum
, overflow_map
);
1160 uverbs_dev
->devnum
= devnum
;
1161 base
= devnum
+ IB_UVERBS_BASE_DEV
;
1162 set_bit(devnum
, dev_map
);
1164 spin_unlock(&map_lock
);
1166 rcu_assign_pointer(uverbs_dev
->ib_dev
, device
);
1167 uverbs_dev
->num_comp_vectors
= device
->num_comp_vectors
;
1169 cdev_init(&uverbs_dev
->cdev
, NULL
);
1170 uverbs_dev
->cdev
.owner
= THIS_MODULE
;
1171 uverbs_dev
->cdev
.ops
= device
->mmap
? &uverbs_mmap_fops
: &uverbs_fops
;
1172 uverbs_dev
->cdev
.kobj
.parent
= &uverbs_dev
->kobj
;
1173 kobject_set_name(&uverbs_dev
->cdev
.kobj
, "uverbs%d", uverbs_dev
->devnum
);
1174 if (cdev_add(&uverbs_dev
->cdev
, base
, 1))
1177 uverbs_dev
->dev
= device_create(uverbs_class
, device
->dma_device
,
1178 uverbs_dev
->cdev
.dev
, uverbs_dev
,
1179 "uverbs%d", uverbs_dev
->devnum
);
1180 if (IS_ERR(uverbs_dev
->dev
))
1183 if (device_create_file(uverbs_dev
->dev
, &dev_attr_ibdev
))
1185 if (device_create_file(uverbs_dev
->dev
, &dev_attr_abi_version
))
1188 ib_set_client_data(device
, &uverbs_client
, uverbs_dev
);
1193 device_destroy(uverbs_class
, uverbs_dev
->cdev
.dev
);
1196 cdev_del(&uverbs_dev
->cdev
);
1197 if (uverbs_dev
->devnum
< IB_UVERBS_MAX_DEVICES
)
1198 clear_bit(devnum
, dev_map
);
1200 clear_bit(devnum
, overflow_map
);
1203 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1204 ib_uverbs_comp_dev(uverbs_dev
);
1205 wait_for_completion(&uverbs_dev
->comp
);
1206 kobject_put(&uverbs_dev
->kobj
);
1210 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device
*uverbs_dev
,
1211 struct ib_device
*ib_dev
)
1213 struct ib_uverbs_file
*file
;
1214 struct ib_uverbs_event_file
*event_file
;
1215 struct ib_event event
;
1217 /* Pending running commands to terminate */
1218 synchronize_srcu(&uverbs_dev
->disassociate_srcu
);
1219 event
.event
= IB_EVENT_DEVICE_FATAL
;
1220 event
.element
.port_num
= 0;
1221 event
.device
= ib_dev
;
1223 mutex_lock(&uverbs_dev
->lists_mutex
);
1224 while (!list_empty(&uverbs_dev
->uverbs_file_list
)) {
1225 struct ib_ucontext
*ucontext
;
1226 file
= list_first_entry(&uverbs_dev
->uverbs_file_list
,
1227 struct ib_uverbs_file
, list
);
1228 file
->is_closed
= 1;
1229 list_del(&file
->list
);
1230 kref_get(&file
->ref
);
1231 mutex_unlock(&uverbs_dev
->lists_mutex
);
1233 ib_uverbs_event_handler(&file
->event_handler
, &event
);
1235 mutex_lock(&file
->cleanup_mutex
);
1236 ucontext
= file
->ucontext
;
1237 file
->ucontext
= NULL
;
1238 mutex_unlock(&file
->cleanup_mutex
);
1240 /* At this point ib_uverbs_close cannot be running
1241 * ib_uverbs_cleanup_ucontext
1244 /* We must release the mutex before going ahead and
1245 * calling disassociate_ucontext. disassociate_ucontext
1246 * might end up indirectly calling uverbs_close,
1247 * for example due to freeing the resources
1250 ib_dev
->disassociate_ucontext(ucontext
);
1251 ib_uverbs_cleanup_ucontext(file
, ucontext
);
1254 mutex_lock(&uverbs_dev
->lists_mutex
);
1255 kref_put(&file
->ref
, ib_uverbs_release_file
);
1258 while (!list_empty(&uverbs_dev
->uverbs_events_file_list
)) {
1259 event_file
= list_first_entry(&uverbs_dev
->
1260 uverbs_events_file_list
,
1261 struct ib_uverbs_event_file
,
1263 spin_lock_irq(&event_file
->lock
);
1264 event_file
->is_closed
= 1;
1265 spin_unlock_irq(&event_file
->lock
);
1267 list_del(&event_file
->list
);
1268 if (event_file
->is_async
) {
1269 ib_unregister_event_handler(&event_file
->uverbs_file
->
1271 event_file
->uverbs_file
->event_handler
.device
= NULL
;
1274 wake_up_interruptible(&event_file
->poll_wait
);
1275 kill_fasync(&event_file
->async_queue
, SIGIO
, POLL_IN
);
1277 mutex_unlock(&uverbs_dev
->lists_mutex
);
1280 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
)
1282 struct ib_uverbs_device
*uverbs_dev
= client_data
;
1283 int wait_clients
= 1;
1288 dev_set_drvdata(uverbs_dev
->dev
, NULL
);
1289 device_destroy(uverbs_class
, uverbs_dev
->cdev
.dev
);
1290 cdev_del(&uverbs_dev
->cdev
);
1292 if (uverbs_dev
->devnum
< IB_UVERBS_MAX_DEVICES
)
1293 clear_bit(uverbs_dev
->devnum
, dev_map
);
1295 clear_bit(uverbs_dev
->devnum
- IB_UVERBS_MAX_DEVICES
, overflow_map
);
1297 if (device
->disassociate_ucontext
) {
1298 /* We disassociate HW resources and immediately return.
1299 * Userspace will see a EIO errno for all future access.
1300 * Upon returning, ib_device may be freed internally and is not
1302 * uverbs_device is still available until all clients close
1303 * their files, then the uverbs device ref count will be zero
1304 * and its resources will be freed.
1305 * Note: At this point no more files can be opened since the
1306 * cdev was deleted, however active clients can still issue
1307 * commands and close their open files.
1309 rcu_assign_pointer(uverbs_dev
->ib_dev
, NULL
);
1310 ib_uverbs_free_hw_resources(uverbs_dev
, device
);
1314 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1315 ib_uverbs_comp_dev(uverbs_dev
);
1317 wait_for_completion(&uverbs_dev
->comp
);
1318 kobject_put(&uverbs_dev
->kobj
);
1321 static char *uverbs_devnode(struct device
*dev
, umode_t
*mode
)
1325 return kasprintf(GFP_KERNEL
, "infiniband/%s", dev_name(dev
));
1328 static int __init
ib_uverbs_init(void)
1332 ret
= register_chrdev_region(IB_UVERBS_BASE_DEV
, IB_UVERBS_MAX_DEVICES
,
1333 "infiniband_verbs");
1335 pr_err("user_verbs: couldn't register device number\n");
1339 uverbs_class
= class_create(THIS_MODULE
, "infiniband_verbs");
1340 if (IS_ERR(uverbs_class
)) {
1341 ret
= PTR_ERR(uverbs_class
);
1342 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1346 uverbs_class
->devnode
= uverbs_devnode
;
1348 ret
= class_create_file(uverbs_class
, &class_attr_abi_version
.attr
);
1350 pr_err("user_verbs: couldn't create abi_version attribute\n");
1354 ret
= ib_register_client(&uverbs_client
);
1356 pr_err("user_verbs: couldn't register client\n");
1363 class_destroy(uverbs_class
);
1366 unregister_chrdev_region(IB_UVERBS_BASE_DEV
, IB_UVERBS_MAX_DEVICES
);
1372 static void __exit
ib_uverbs_cleanup(void)
1374 ib_unregister_client(&uverbs_client
);
1375 class_destroy(uverbs_class
);
1376 unregister_chrdev_region(IB_UVERBS_BASE_DEV
, IB_UVERBS_MAX_DEVICES
);
1378 unregister_chrdev_region(overflow_maj
, IB_UVERBS_MAX_DEVICES
);
1379 idr_destroy(&ib_uverbs_pd_idr
);
1380 idr_destroy(&ib_uverbs_mr_idr
);
1381 idr_destroy(&ib_uverbs_mw_idr
);
1382 idr_destroy(&ib_uverbs_ah_idr
);
1383 idr_destroy(&ib_uverbs_cq_idr
);
1384 idr_destroy(&ib_uverbs_qp_idr
);
1385 idr_destroy(&ib_uverbs_srq_idr
);
1388 module_init(ib_uverbs_init
);
1389 module_exit(ib_uverbs_cleanup
);