2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
48 #include <linux/sched/mm.h>
50 #include <linux/uaccess.h>
53 #include <rdma/uverbs_std_types.h>
54 #include <rdma/rdma_netlink.h>
57 #include "core_priv.h"
58 #include "rdma_core.h"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
62 MODULE_LICENSE("Dual BSD/GPL");
65 IB_UVERBS_MAJOR
= 231,
66 IB_UVERBS_BASE_MINOR
= 192,
67 IB_UVERBS_MAX_DEVICES
= RDMA_MAX_PORTS
,
68 IB_UVERBS_NUM_FIXED_MINOR
= 32,
69 IB_UVERBS_NUM_DYNAMIC_MINOR
= IB_UVERBS_MAX_DEVICES
- IB_UVERBS_NUM_FIXED_MINOR
,
72 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
74 static dev_t dynamic_uverbs_dev
;
75 static struct class *uverbs_class
;
77 static DEFINE_IDA(uverbs_ida
);
78 static int ib_uverbs_add_one(struct ib_device
*device
);
79 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
);
82 * Must be called with the ufile->device->disassociate_srcu held, and the lock
83 * must be held until use of the ucontext is finished.
85 struct ib_ucontext
*ib_uverbs_get_ucontext_file(struct ib_uverbs_file
*ufile
)
88 * We do not hold the hw_destroy_rwsem lock for this flow, instead
89 * srcu is used. It does not matter if someone races this with
90 * get_context, we get NULL or valid ucontext.
92 struct ib_ucontext
*ucontext
= smp_load_acquire(&ufile
->ucontext
);
94 if (!srcu_dereference(ufile
->device
->ib_dev
,
95 &ufile
->device
->disassociate_srcu
))
99 return ERR_PTR(-EINVAL
);
103 EXPORT_SYMBOL(ib_uverbs_get_ucontext_file
);
105 int uverbs_dealloc_mw(struct ib_mw
*mw
)
107 struct ib_pd
*pd
= mw
->pd
;
110 ret
= mw
->device
->ops
.dealloc_mw(mw
);
114 atomic_dec(&pd
->usecnt
);
119 static void ib_uverbs_release_dev(struct device
*device
)
121 struct ib_uverbs_device
*dev
=
122 container_of(device
, struct ib_uverbs_device
, dev
);
124 uverbs_destroy_api(dev
->uapi
);
125 cleanup_srcu_struct(&dev
->disassociate_srcu
);
126 mutex_destroy(&dev
->lists_mutex
);
127 mutex_destroy(&dev
->xrcd_tree_mutex
);
131 void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file
*ev_file
,
132 struct ib_ucq_object
*uobj
)
134 struct ib_uverbs_event
*evt
, *tmp
;
137 spin_lock_irq(&ev_file
->ev_queue
.lock
);
138 list_for_each_entry_safe(evt
, tmp
, &uobj
->comp_list
, obj_list
) {
139 list_del(&evt
->list
);
142 spin_unlock_irq(&ev_file
->ev_queue
.lock
);
144 uverbs_uobject_put(&ev_file
->uobj
);
147 ib_uverbs_release_uevent(&uobj
->uevent
);
150 void ib_uverbs_release_uevent(struct ib_uevent_object
*uobj
)
152 struct ib_uverbs_async_event_file
*async_file
= uobj
->event_file
;
153 struct ib_uverbs_event
*evt
, *tmp
;
158 spin_lock_irq(&async_file
->ev_queue
.lock
);
159 list_for_each_entry_safe(evt
, tmp
, &uobj
->event_list
, obj_list
) {
160 list_del(&evt
->list
);
163 spin_unlock_irq(&async_file
->ev_queue
.lock
);
164 uverbs_uobject_put(&async_file
->uobj
);
167 void ib_uverbs_detach_umcast(struct ib_qp
*qp
,
168 struct ib_uqp_object
*uobj
)
170 struct ib_uverbs_mcast_entry
*mcast
, *tmp
;
172 list_for_each_entry_safe(mcast
, tmp
, &uobj
->mcast_list
, list
) {
173 ib_detach_mcast(qp
, &mcast
->gid
, mcast
->lid
);
174 list_del(&mcast
->list
);
179 static void ib_uverbs_comp_dev(struct ib_uverbs_device
*dev
)
181 complete(&dev
->comp
);
184 void ib_uverbs_release_file(struct kref
*ref
)
186 struct ib_uverbs_file
*file
=
187 container_of(ref
, struct ib_uverbs_file
, ref
);
188 struct ib_device
*ib_dev
;
191 release_ufile_idr_uobject(file
);
193 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
194 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
195 &file
->device
->disassociate_srcu
);
196 if (ib_dev
&& !ib_dev
->ops
.disassociate_ucontext
)
197 module_put(ib_dev
->ops
.owner
);
198 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
200 if (atomic_dec_and_test(&file
->device
->refcount
))
201 ib_uverbs_comp_dev(file
->device
);
203 if (file
->default_async_file
)
204 uverbs_uobject_put(&file
->default_async_file
->uobj
);
205 put_device(&file
->device
->dev
);
207 if (file
->disassociate_page
)
208 __free_pages(file
->disassociate_page
, 0);
209 mutex_destroy(&file
->umap_lock
);
210 mutex_destroy(&file
->ucontext_lock
);
214 static ssize_t
ib_uverbs_event_read(struct ib_uverbs_event_queue
*ev_queue
,
215 struct file
*filp
, char __user
*buf
,
216 size_t count
, loff_t
*pos
,
219 struct ib_uverbs_event
*event
;
222 spin_lock_irq(&ev_queue
->lock
);
224 while (list_empty(&ev_queue
->event_list
)) {
225 spin_unlock_irq(&ev_queue
->lock
);
227 if (filp
->f_flags
& O_NONBLOCK
)
230 if (wait_event_interruptible(ev_queue
->poll_wait
,
231 (!list_empty(&ev_queue
->event_list
) ||
232 ev_queue
->is_closed
)))
235 spin_lock_irq(&ev_queue
->lock
);
237 /* If device was disassociated and no event exists set an error */
238 if (list_empty(&ev_queue
->event_list
) && ev_queue
->is_closed
) {
239 spin_unlock_irq(&ev_queue
->lock
);
244 event
= list_entry(ev_queue
->event_list
.next
, struct ib_uverbs_event
, list
);
246 if (eventsz
> count
) {
250 list_del(ev_queue
->event_list
.next
);
251 if (event
->counter
) {
253 list_del(&event
->obj_list
);
257 spin_unlock_irq(&ev_queue
->lock
);
260 if (copy_to_user(buf
, event
, eventsz
))
271 static ssize_t
ib_uverbs_async_event_read(struct file
*filp
, char __user
*buf
,
272 size_t count
, loff_t
*pos
)
274 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
276 return ib_uverbs_event_read(&file
->ev_queue
, filp
, buf
, count
, pos
,
277 sizeof(struct ib_uverbs_async_event_desc
));
280 static ssize_t
ib_uverbs_comp_event_read(struct file
*filp
, char __user
*buf
,
281 size_t count
, loff_t
*pos
)
283 struct ib_uverbs_completion_event_file
*comp_ev_file
=
286 return ib_uverbs_event_read(&comp_ev_file
->ev_queue
, filp
, buf
, count
,
288 sizeof(struct ib_uverbs_comp_event_desc
));
291 static __poll_t
ib_uverbs_event_poll(struct ib_uverbs_event_queue
*ev_queue
,
293 struct poll_table_struct
*wait
)
295 __poll_t pollflags
= 0;
297 poll_wait(filp
, &ev_queue
->poll_wait
, wait
);
299 spin_lock_irq(&ev_queue
->lock
);
300 if (!list_empty(&ev_queue
->event_list
))
301 pollflags
= EPOLLIN
| EPOLLRDNORM
;
302 else if (ev_queue
->is_closed
)
303 pollflags
= EPOLLERR
;
304 spin_unlock_irq(&ev_queue
->lock
);
309 static __poll_t
ib_uverbs_async_event_poll(struct file
*filp
,
310 struct poll_table_struct
*wait
)
312 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
314 return ib_uverbs_event_poll(&file
->ev_queue
, filp
, wait
);
317 static __poll_t
ib_uverbs_comp_event_poll(struct file
*filp
,
318 struct poll_table_struct
*wait
)
320 struct ib_uverbs_completion_event_file
*comp_ev_file
=
323 return ib_uverbs_event_poll(&comp_ev_file
->ev_queue
, filp
, wait
);
326 static int ib_uverbs_async_event_fasync(int fd
, struct file
*filp
, int on
)
328 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
330 return fasync_helper(fd
, filp
, on
, &file
->ev_queue
.async_queue
);
333 static int ib_uverbs_comp_event_fasync(int fd
, struct file
*filp
, int on
)
335 struct ib_uverbs_completion_event_file
*comp_ev_file
=
338 return fasync_helper(fd
, filp
, on
, &comp_ev_file
->ev_queue
.async_queue
);
341 const struct file_operations uverbs_event_fops
= {
342 .owner
= THIS_MODULE
,
343 .read
= ib_uverbs_comp_event_read
,
344 .poll
= ib_uverbs_comp_event_poll
,
345 .release
= uverbs_uobject_fd_release
,
346 .fasync
= ib_uverbs_comp_event_fasync
,
350 const struct file_operations uverbs_async_event_fops
= {
351 .owner
= THIS_MODULE
,
352 .read
= ib_uverbs_async_event_read
,
353 .poll
= ib_uverbs_async_event_poll
,
354 .release
= uverbs_async_event_release
,
355 .fasync
= ib_uverbs_async_event_fasync
,
359 void ib_uverbs_comp_handler(struct ib_cq
*cq
, void *cq_context
)
361 struct ib_uverbs_event_queue
*ev_queue
= cq_context
;
362 struct ib_ucq_object
*uobj
;
363 struct ib_uverbs_event
*entry
;
369 spin_lock_irqsave(&ev_queue
->lock
, flags
);
370 if (ev_queue
->is_closed
) {
371 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
375 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
377 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
383 entry
->desc
.comp
.cq_handle
= cq
->uobject
->uevent
.uobject
.user_handle
;
384 entry
->counter
= &uobj
->comp_events_reported
;
386 list_add_tail(&entry
->list
, &ev_queue
->event_list
);
387 list_add_tail(&entry
->obj_list
, &uobj
->comp_list
);
388 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
390 wake_up_interruptible(&ev_queue
->poll_wait
);
391 kill_fasync(&ev_queue
->async_queue
, SIGIO
, POLL_IN
);
394 void ib_uverbs_async_handler(struct ib_uverbs_async_event_file
*async_file
,
395 __u64 element
, __u64 event
,
396 struct list_head
*obj_list
, u32
*counter
)
398 struct ib_uverbs_event
*entry
;
404 spin_lock_irqsave(&async_file
->ev_queue
.lock
, flags
);
405 if (async_file
->ev_queue
.is_closed
) {
406 spin_unlock_irqrestore(&async_file
->ev_queue
.lock
, flags
);
410 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
412 spin_unlock_irqrestore(&async_file
->ev_queue
.lock
, flags
);
416 entry
->desc
.async
.element
= element
;
417 entry
->desc
.async
.event_type
= event
;
418 entry
->desc
.async
.reserved
= 0;
419 entry
->counter
= counter
;
421 list_add_tail(&entry
->list
, &async_file
->ev_queue
.event_list
);
423 list_add_tail(&entry
->obj_list
, obj_list
);
424 spin_unlock_irqrestore(&async_file
->ev_queue
.lock
, flags
);
426 wake_up_interruptible(&async_file
->ev_queue
.poll_wait
);
427 kill_fasync(&async_file
->ev_queue
.async_queue
, SIGIO
, POLL_IN
);
430 static void uverbs_uobj_event(struct ib_uevent_object
*eobj
,
431 struct ib_event
*event
)
433 ib_uverbs_async_handler(eobj
->event_file
,
434 eobj
->uobject
.user_handle
, event
->event
,
435 &eobj
->event_list
, &eobj
->events_reported
);
438 void ib_uverbs_cq_event_handler(struct ib_event
*event
, void *context_ptr
)
440 uverbs_uobj_event(&event
->element
.cq
->uobject
->uevent
, event
);
443 void ib_uverbs_qp_event_handler(struct ib_event
*event
, void *context_ptr
)
445 /* for XRC target qp's, check that qp is live */
446 if (!event
->element
.qp
->uobject
)
449 uverbs_uobj_event(&event
->element
.qp
->uobject
->uevent
, event
);
452 void ib_uverbs_wq_event_handler(struct ib_event
*event
, void *context_ptr
)
454 uverbs_uobj_event(&event
->element
.wq
->uobject
->uevent
, event
);
457 void ib_uverbs_srq_event_handler(struct ib_event
*event
, void *context_ptr
)
459 uverbs_uobj_event(&event
->element
.srq
->uobject
->uevent
, event
);
462 static void ib_uverbs_event_handler(struct ib_event_handler
*handler
,
463 struct ib_event
*event
)
465 ib_uverbs_async_handler(
466 container_of(handler
, struct ib_uverbs_async_event_file
,
468 event
->element
.port_num
, event
->event
, NULL
, NULL
);
471 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue
*ev_queue
)
473 spin_lock_init(&ev_queue
->lock
);
474 INIT_LIST_HEAD(&ev_queue
->event_list
);
475 init_waitqueue_head(&ev_queue
->poll_wait
);
476 ev_queue
->is_closed
= 0;
477 ev_queue
->async_queue
= NULL
;
480 void ib_uverbs_init_async_event_file(
481 struct ib_uverbs_async_event_file
*async_file
)
483 struct ib_uverbs_file
*uverbs_file
= async_file
->uobj
.ufile
;
484 struct ib_device
*ib_dev
= async_file
->uobj
.context
->device
;
486 ib_uverbs_init_event_queue(&async_file
->ev_queue
);
488 /* The first async_event_file becomes the default one for the file. */
489 mutex_lock(&uverbs_file
->ucontext_lock
);
490 if (!uverbs_file
->default_async_file
) {
491 /* Pairs with the put in ib_uverbs_release_file */
492 uverbs_uobject_get(&async_file
->uobj
);
493 smp_store_release(&uverbs_file
->default_async_file
, async_file
);
495 mutex_unlock(&uverbs_file
->ucontext_lock
);
497 INIT_IB_EVENT_HANDLER(&async_file
->event_handler
, ib_dev
,
498 ib_uverbs_event_handler
);
499 ib_register_event_handler(&async_file
->event_handler
);
502 static ssize_t
verify_hdr(struct ib_uverbs_cmd_hdr
*hdr
,
503 struct ib_uverbs_ex_cmd_hdr
*ex_hdr
, size_t count
,
504 const struct uverbs_api_write_method
*method_elm
)
506 if (method_elm
->is_ex
) {
507 count
-= sizeof(*hdr
) + sizeof(*ex_hdr
);
509 if ((hdr
->in_words
+ ex_hdr
->provider_in_words
) * 8 != count
)
512 if (hdr
->in_words
* 8 < method_elm
->req_size
)
515 if (ex_hdr
->cmd_hdr_reserved
)
518 if (ex_hdr
->response
) {
519 if (!hdr
->out_words
&& !ex_hdr
->provider_out_words
)
522 if (hdr
->out_words
* 8 < method_elm
->resp_size
)
525 if (!access_ok(u64_to_user_ptr(ex_hdr
->response
),
526 (hdr
->out_words
+ ex_hdr
->provider_out_words
) * 8))
529 if (hdr
->out_words
|| ex_hdr
->provider_out_words
)
536 /* not extended command */
537 if (hdr
->in_words
* 4 != count
)
540 if (count
< method_elm
->req_size
+ sizeof(hdr
)) {
542 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
543 * with a 16 byte write instead of 24. Old kernels didn't
544 * check the size so they allowed this. Now that the size is
545 * checked provide a compatibility work around to not break
548 if (hdr
->command
== IB_USER_VERBS_CMD_DESTROY_CQ
&&
555 if (hdr
->out_words
* 4 < method_elm
->resp_size
)
561 static ssize_t
ib_uverbs_write(struct file
*filp
, const char __user
*buf
,
562 size_t count
, loff_t
*pos
)
564 struct ib_uverbs_file
*file
= filp
->private_data
;
565 const struct uverbs_api_write_method
*method_elm
;
566 struct uverbs_api
*uapi
= file
->device
->uapi
;
567 struct ib_uverbs_ex_cmd_hdr ex_hdr
;
568 struct ib_uverbs_cmd_hdr hdr
;
569 struct uverbs_attr_bundle bundle
;
573 if (!ib_safe_file_access(filp
)) {
574 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
575 task_tgid_vnr(current
), current
->comm
);
579 if (count
< sizeof(hdr
))
582 if (copy_from_user(&hdr
, buf
, sizeof(hdr
)))
585 method_elm
= uapi_get_method(uapi
, hdr
.command
);
586 if (IS_ERR(method_elm
))
587 return PTR_ERR(method_elm
);
589 if (method_elm
->is_ex
) {
590 if (count
< (sizeof(hdr
) + sizeof(ex_hdr
)))
592 if (copy_from_user(&ex_hdr
, buf
+ sizeof(hdr
), sizeof(ex_hdr
)))
596 ret
= verify_hdr(&hdr
, &ex_hdr
, count
, method_elm
);
600 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
604 memset(bundle
.attr_present
, 0, sizeof(bundle
.attr_present
));
606 bundle
.context
= NULL
; /* only valid if bundle has uobject */
607 bundle
.uobject
= NULL
;
608 if (!method_elm
->is_ex
) {
609 size_t in_len
= hdr
.in_words
* 4 - sizeof(hdr
);
610 size_t out_len
= hdr
.out_words
* 4;
613 if (method_elm
->has_udata
) {
614 bundle
.driver_udata
.inlen
=
615 in_len
- method_elm
->req_size
;
616 in_len
= method_elm
->req_size
;
617 if (bundle
.driver_udata
.inlen
)
618 bundle
.driver_udata
.inbuf
= buf
+ in_len
;
620 bundle
.driver_udata
.inbuf
= NULL
;
622 memset(&bundle
.driver_udata
, 0,
623 sizeof(bundle
.driver_udata
));
626 if (method_elm
->has_resp
) {
628 * The macros check that if has_resp is set
629 * then the command request structure starts
630 * with a '__aligned u64 response' member.
632 ret
= get_user(response
, (const u64 __user
*)buf
);
636 if (method_elm
->has_udata
) {
637 bundle
.driver_udata
.outlen
=
638 out_len
- method_elm
->resp_size
;
639 out_len
= method_elm
->resp_size
;
640 if (bundle
.driver_udata
.outlen
)
641 bundle
.driver_udata
.outbuf
=
642 u64_to_user_ptr(response
+
645 bundle
.driver_udata
.outbuf
= NULL
;
648 bundle
.driver_udata
.outlen
= 0;
649 bundle
.driver_udata
.outbuf
= NULL
;
652 ib_uverbs_init_udata_buf_or_null(
653 &bundle
.ucore
, buf
, u64_to_user_ptr(response
),
656 buf
+= sizeof(ex_hdr
);
658 ib_uverbs_init_udata_buf_or_null(&bundle
.ucore
, buf
,
659 u64_to_user_ptr(ex_hdr
.response
),
660 hdr
.in_words
* 8, hdr
.out_words
* 8);
662 ib_uverbs_init_udata_buf_or_null(
663 &bundle
.driver_udata
, buf
+ bundle
.ucore
.inlen
,
664 u64_to_user_ptr(ex_hdr
.response
) + bundle
.ucore
.outlen
,
665 ex_hdr
.provider_in_words
* 8,
666 ex_hdr
.provider_out_words
* 8);
670 ret
= method_elm
->handler(&bundle
);
672 uverbs_finalize_object(bundle
.uobject
, UVERBS_ACCESS_NEW
, true,
675 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
676 return (ret
) ? : count
;
679 static const struct vm_operations_struct rdma_umap_ops
;
681 static int ib_uverbs_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
683 struct ib_uverbs_file
*file
= filp
->private_data
;
684 struct ib_ucontext
*ucontext
;
688 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
689 ucontext
= ib_uverbs_get_ucontext_file(file
);
690 if (IS_ERR(ucontext
)) {
691 ret
= PTR_ERR(ucontext
);
694 vma
->vm_ops
= &rdma_umap_ops
;
695 ret
= ucontext
->device
->ops
.mmap(ucontext
, vma
);
697 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
702 * The VMA has been dup'd, initialize the vm_private_data with a new tracking
705 static void rdma_umap_open(struct vm_area_struct
*vma
)
707 struct ib_uverbs_file
*ufile
= vma
->vm_file
->private_data
;
708 struct rdma_umap_priv
*opriv
= vma
->vm_private_data
;
709 struct rdma_umap_priv
*priv
;
714 /* We are racing with disassociation */
715 if (!down_read_trylock(&ufile
->hw_destroy_rwsem
))
718 * Disassociation already completed, the VMA should already be zapped.
720 if (!ufile
->ucontext
)
723 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
726 rdma_umap_priv_init(priv
, vma
, opriv
->entry
);
728 up_read(&ufile
->hw_destroy_rwsem
);
732 up_read(&ufile
->hw_destroy_rwsem
);
735 * We can't allow the VMA to be created with the actual IO pages, that
736 * would break our API contract, and it can't be stopped at this
739 vma
->vm_private_data
= NULL
;
740 zap_vma_ptes(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
743 static void rdma_umap_close(struct vm_area_struct
*vma
)
745 struct ib_uverbs_file
*ufile
= vma
->vm_file
->private_data
;
746 struct rdma_umap_priv
*priv
= vma
->vm_private_data
;
752 * The vma holds a reference on the struct file that created it, which
753 * in turn means that the ib_uverbs_file is guaranteed to exist at
756 mutex_lock(&ufile
->umap_lock
);
758 rdma_user_mmap_entry_put(priv
->entry
);
760 list_del(&priv
->list
);
761 mutex_unlock(&ufile
->umap_lock
);
766 * Once the zap_vma_ptes has been called touches to the VMA will come here and
767 * we return a dummy writable zero page for all the pfns.
769 static vm_fault_t
rdma_umap_fault(struct vm_fault
*vmf
)
771 struct ib_uverbs_file
*ufile
= vmf
->vma
->vm_file
->private_data
;
772 struct rdma_umap_priv
*priv
= vmf
->vma
->vm_private_data
;
776 return VM_FAULT_SIGBUS
;
778 /* Read only pages can just use the system zero page. */
779 if (!(vmf
->vma
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
))) {
780 vmf
->page
= ZERO_PAGE(vmf
->address
);
785 mutex_lock(&ufile
->umap_lock
);
786 if (!ufile
->disassociate_page
)
787 ufile
->disassociate_page
=
788 alloc_pages(vmf
->gfp_mask
| __GFP_ZERO
, 0);
790 if (ufile
->disassociate_page
) {
792 * This VMA is forced to always be shared so this doesn't have
793 * to worry about COW.
795 vmf
->page
= ufile
->disassociate_page
;
798 ret
= VM_FAULT_SIGBUS
;
800 mutex_unlock(&ufile
->umap_lock
);
805 static const struct vm_operations_struct rdma_umap_ops
= {
806 .open
= rdma_umap_open
,
807 .close
= rdma_umap_close
,
808 .fault
= rdma_umap_fault
,
811 void uverbs_user_mmap_disassociate(struct ib_uverbs_file
*ufile
)
813 struct rdma_umap_priv
*priv
, *next_priv
;
815 lockdep_assert_held(&ufile
->hw_destroy_rwsem
);
818 struct mm_struct
*mm
= NULL
;
820 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
821 mutex_lock(&ufile
->umap_lock
);
822 while (!list_empty(&ufile
->umaps
)) {
825 priv
= list_first_entry(&ufile
->umaps
,
826 struct rdma_umap_priv
, list
);
827 mm
= priv
->vma
->vm_mm
;
828 ret
= mmget_not_zero(mm
);
830 list_del_init(&priv
->list
);
832 rdma_user_mmap_entry_put(priv
->entry
);
840 mutex_unlock(&ufile
->umap_lock
);
845 * The umap_lock is nested under mmap_lock since it used within
846 * the vma_ops callbacks, so we have to clean the list one mm
847 * at a time to get the lock ordering right. Typically there
848 * will only be one mm, so no big deal.
851 mutex_lock(&ufile
->umap_lock
);
852 list_for_each_entry_safe (priv
, next_priv
, &ufile
->umaps
,
854 struct vm_area_struct
*vma
= priv
->vma
;
856 if (vma
->vm_mm
!= mm
)
858 list_del_init(&priv
->list
);
860 zap_vma_ptes(vma
, vma
->vm_start
,
861 vma
->vm_end
- vma
->vm_start
);
864 rdma_user_mmap_entry_put(priv
->entry
);
868 mutex_unlock(&ufile
->umap_lock
);
869 mmap_read_unlock(mm
);
875 * ib_uverbs_open() does not need the BKL:
877 * - the ib_uverbs_device structures are properly reference counted and
878 * everything else is purely local to the file being created, so
879 * races against other open calls are not a problem;
880 * - there is no ioctl method to race against;
881 * - the open method will either immediately run -ENXIO, or all
882 * required initialization will be done.
884 static int ib_uverbs_open(struct inode
*inode
, struct file
*filp
)
886 struct ib_uverbs_device
*dev
;
887 struct ib_uverbs_file
*file
;
888 struct ib_device
*ib_dev
;
890 int module_dependent
;
893 dev
= container_of(inode
->i_cdev
, struct ib_uverbs_device
, cdev
);
894 if (!atomic_inc_not_zero(&dev
->refcount
))
897 get_device(&dev
->dev
);
898 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
899 mutex_lock(&dev
->lists_mutex
);
900 ib_dev
= srcu_dereference(dev
->ib_dev
,
901 &dev
->disassociate_srcu
);
907 if (!rdma_dev_access_netns(ib_dev
, current
->nsproxy
->net_ns
)) {
912 /* In case IB device supports disassociate ucontext, there is no hard
913 * dependency between uverbs device and its low level device.
915 module_dependent
= !(ib_dev
->ops
.disassociate_ucontext
);
917 if (module_dependent
) {
918 if (!try_module_get(ib_dev
->ops
.owner
)) {
924 file
= kzalloc(sizeof(*file
), GFP_KERNEL
);
927 if (module_dependent
)
934 kref_init(&file
->ref
);
935 mutex_init(&file
->ucontext_lock
);
937 spin_lock_init(&file
->uobjects_lock
);
938 INIT_LIST_HEAD(&file
->uobjects
);
939 init_rwsem(&file
->hw_destroy_rwsem
);
940 mutex_init(&file
->umap_lock
);
941 INIT_LIST_HEAD(&file
->umaps
);
943 filp
->private_data
= file
;
944 list_add_tail(&file
->list
, &dev
->uverbs_file_list
);
945 mutex_unlock(&dev
->lists_mutex
);
946 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
948 setup_ufile_idr_uobject(file
);
950 return stream_open(inode
, filp
);
953 module_put(ib_dev
->ops
.owner
);
956 mutex_unlock(&dev
->lists_mutex
);
957 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
958 if (atomic_dec_and_test(&dev
->refcount
))
959 ib_uverbs_comp_dev(dev
);
961 put_device(&dev
->dev
);
965 static int ib_uverbs_close(struct inode
*inode
, struct file
*filp
)
967 struct ib_uverbs_file
*file
= filp
->private_data
;
969 uverbs_destroy_ufile_hw(file
, RDMA_REMOVE_CLOSE
);
971 mutex_lock(&file
->device
->lists_mutex
);
972 list_del_init(&file
->list
);
973 mutex_unlock(&file
->device
->lists_mutex
);
975 kref_put(&file
->ref
, ib_uverbs_release_file
);
980 static const struct file_operations uverbs_fops
= {
981 .owner
= THIS_MODULE
,
982 .write
= ib_uverbs_write
,
983 .open
= ib_uverbs_open
,
984 .release
= ib_uverbs_close
,
986 .unlocked_ioctl
= ib_uverbs_ioctl
,
987 .compat_ioctl
= compat_ptr_ioctl
,
990 static const struct file_operations uverbs_mmap_fops
= {
991 .owner
= THIS_MODULE
,
992 .write
= ib_uverbs_write
,
993 .mmap
= ib_uverbs_mmap
,
994 .open
= ib_uverbs_open
,
995 .release
= ib_uverbs_close
,
997 .unlocked_ioctl
= ib_uverbs_ioctl
,
998 .compat_ioctl
= compat_ptr_ioctl
,
1001 static int ib_uverbs_get_nl_info(struct ib_device
*ibdev
, void *client_data
,
1002 struct ib_client_nl_info
*res
)
1004 struct ib_uverbs_device
*uverbs_dev
= client_data
;
1007 if (res
->port
!= -1)
1010 res
->abi
= ibdev
->ops
.uverbs_abi_ver
;
1011 res
->cdev
= &uverbs_dev
->dev
;
1014 * To support DRIVER_ID binding in userspace some of the driver need
1015 * upgrading to expose their PCI dependent revision information
1016 * through get_context instead of relying on modalias matching. When
1017 * the drivers are fixed they can drop this flag.
1019 if (!ibdev
->ops
.uverbs_no_driver_id_binding
) {
1020 ret
= nla_put_u32(res
->nl_msg
, RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID
,
1021 ibdev
->ops
.driver_id
);
1028 static struct ib_client uverbs_client
= {
1030 .no_kverbs_req
= true,
1031 .add
= ib_uverbs_add_one
,
1032 .remove
= ib_uverbs_remove_one
,
1033 .get_nl_info
= ib_uverbs_get_nl_info
,
1035 MODULE_ALIAS_RDMA_CLIENT("uverbs");
1037 static ssize_t
ibdev_show(struct device
*device
, struct device_attribute
*attr
,
1040 struct ib_uverbs_device
*dev
=
1041 container_of(device
, struct ib_uverbs_device
, dev
);
1044 struct ib_device
*ib_dev
;
1046 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1047 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1049 ret
= sysfs_emit(buf
, "%s\n", dev_name(&ib_dev
->dev
));
1050 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1054 static DEVICE_ATTR_RO(ibdev
);
1056 static ssize_t
abi_version_show(struct device
*device
,
1057 struct device_attribute
*attr
, char *buf
)
1059 struct ib_uverbs_device
*dev
=
1060 container_of(device
, struct ib_uverbs_device
, dev
);
1063 struct ib_device
*ib_dev
;
1065 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1066 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1068 ret
= sysfs_emit(buf
, "%u\n", ib_dev
->ops
.uverbs_abi_ver
);
1069 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1073 static DEVICE_ATTR_RO(abi_version
);
1075 static struct attribute
*ib_dev_attrs
[] = {
1076 &dev_attr_abi_version
.attr
,
1077 &dev_attr_ibdev
.attr
,
1081 static const struct attribute_group dev_attr_group
= {
1082 .attrs
= ib_dev_attrs
,
1085 static CLASS_ATTR_STRING(abi_version
, S_IRUGO
,
1086 __stringify(IB_USER_VERBS_ABI_VERSION
));
1088 static int ib_uverbs_create_uapi(struct ib_device
*device
,
1089 struct ib_uverbs_device
*uverbs_dev
)
1091 struct uverbs_api
*uapi
;
1093 uapi
= uverbs_alloc_api(device
);
1095 return PTR_ERR(uapi
);
1097 uverbs_dev
->uapi
= uapi
;
1101 static int ib_uverbs_add_one(struct ib_device
*device
)
1105 struct ib_uverbs_device
*uverbs_dev
;
1108 if (!device
->ops
.alloc_ucontext
)
1111 uverbs_dev
= kzalloc(sizeof(*uverbs_dev
), GFP_KERNEL
);
1115 ret
= init_srcu_struct(&uverbs_dev
->disassociate_srcu
);
1121 device_initialize(&uverbs_dev
->dev
);
1122 uverbs_dev
->dev
.class = uverbs_class
;
1123 uverbs_dev
->dev
.parent
= device
->dev
.parent
;
1124 uverbs_dev
->dev
.release
= ib_uverbs_release_dev
;
1125 uverbs_dev
->groups
[0] = &dev_attr_group
;
1126 uverbs_dev
->dev
.groups
= uverbs_dev
->groups
;
1127 atomic_set(&uverbs_dev
->refcount
, 1);
1128 init_completion(&uverbs_dev
->comp
);
1129 uverbs_dev
->xrcd_tree
= RB_ROOT
;
1130 mutex_init(&uverbs_dev
->xrcd_tree_mutex
);
1131 mutex_init(&uverbs_dev
->lists_mutex
);
1132 INIT_LIST_HEAD(&uverbs_dev
->uverbs_file_list
);
1133 rcu_assign_pointer(uverbs_dev
->ib_dev
, device
);
1134 uverbs_dev
->num_comp_vectors
= device
->num_comp_vectors
;
1136 devnum
= ida_alloc_max(&uverbs_ida
, IB_UVERBS_MAX_DEVICES
- 1,
1142 uverbs_dev
->devnum
= devnum
;
1143 if (devnum
>= IB_UVERBS_NUM_FIXED_MINOR
)
1144 base
= dynamic_uverbs_dev
+ devnum
- IB_UVERBS_NUM_FIXED_MINOR
;
1146 base
= IB_UVERBS_BASE_DEV
+ devnum
;
1148 ret
= ib_uverbs_create_uapi(device
, uverbs_dev
);
1152 uverbs_dev
->dev
.devt
= base
;
1153 dev_set_name(&uverbs_dev
->dev
, "uverbs%d", uverbs_dev
->devnum
);
1155 cdev_init(&uverbs_dev
->cdev
,
1156 device
->ops
.mmap
? &uverbs_mmap_fops
: &uverbs_fops
);
1157 uverbs_dev
->cdev
.owner
= THIS_MODULE
;
1159 ret
= cdev_device_add(&uverbs_dev
->cdev
, &uverbs_dev
->dev
);
1163 ib_set_client_data(device
, &uverbs_client
, uverbs_dev
);
1167 ida_free(&uverbs_ida
, devnum
);
1169 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1170 ib_uverbs_comp_dev(uverbs_dev
);
1171 wait_for_completion(&uverbs_dev
->comp
);
1172 put_device(&uverbs_dev
->dev
);
1176 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device
*uverbs_dev
,
1177 struct ib_device
*ib_dev
)
1179 struct ib_uverbs_file
*file
;
1181 /* Pending running commands to terminate */
1182 uverbs_disassociate_api_pre(uverbs_dev
);
1184 mutex_lock(&uverbs_dev
->lists_mutex
);
1185 while (!list_empty(&uverbs_dev
->uverbs_file_list
)) {
1186 file
= list_first_entry(&uverbs_dev
->uverbs_file_list
,
1187 struct ib_uverbs_file
, list
);
1188 list_del_init(&file
->list
);
1189 kref_get(&file
->ref
);
1191 /* We must release the mutex before going ahead and calling
1192 * uverbs_cleanup_ufile, as it might end up indirectly calling
1193 * uverbs_close, for example due to freeing the resources (e.g
1196 mutex_unlock(&uverbs_dev
->lists_mutex
);
1198 uverbs_destroy_ufile_hw(file
, RDMA_REMOVE_DRIVER_REMOVE
);
1199 kref_put(&file
->ref
, ib_uverbs_release_file
);
1201 mutex_lock(&uverbs_dev
->lists_mutex
);
1203 mutex_unlock(&uverbs_dev
->lists_mutex
);
1205 uverbs_disassociate_api(uverbs_dev
->uapi
);
1208 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
)
1210 struct ib_uverbs_device
*uverbs_dev
= client_data
;
1211 int wait_clients
= 1;
1213 cdev_device_del(&uverbs_dev
->cdev
, &uverbs_dev
->dev
);
1214 ida_free(&uverbs_ida
, uverbs_dev
->devnum
);
1216 if (device
->ops
.disassociate_ucontext
) {
1217 /* We disassociate HW resources and immediately return.
1218 * Userspace will see a EIO errno for all future access.
1219 * Upon returning, ib_device may be freed internally and is not
1221 * uverbs_device is still available until all clients close
1222 * their files, then the uverbs device ref count will be zero
1223 * and its resources will be freed.
1224 * Note: At this point no more files can be opened since the
1225 * cdev was deleted, however active clients can still issue
1226 * commands and close their open files.
1228 ib_uverbs_free_hw_resources(uverbs_dev
, device
);
1232 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1233 ib_uverbs_comp_dev(uverbs_dev
);
1235 wait_for_completion(&uverbs_dev
->comp
);
1237 put_device(&uverbs_dev
->dev
);
1240 static char *uverbs_devnode(struct device
*dev
, umode_t
*mode
)
1244 return kasprintf(GFP_KERNEL
, "infiniband/%s", dev_name(dev
));
1247 static int __init
ib_uverbs_init(void)
1251 ret
= register_chrdev_region(IB_UVERBS_BASE_DEV
,
1252 IB_UVERBS_NUM_FIXED_MINOR
,
1253 "infiniband_verbs");
1255 pr_err("user_verbs: couldn't register device number\n");
1259 ret
= alloc_chrdev_region(&dynamic_uverbs_dev
, 0,
1260 IB_UVERBS_NUM_DYNAMIC_MINOR
,
1261 "infiniband_verbs");
1263 pr_err("couldn't register dynamic device number\n");
1267 uverbs_class
= class_create(THIS_MODULE
, "infiniband_verbs");
1268 if (IS_ERR(uverbs_class
)) {
1269 ret
= PTR_ERR(uverbs_class
);
1270 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1274 uverbs_class
->devnode
= uverbs_devnode
;
1276 ret
= class_create_file(uverbs_class
, &class_attr_abi_version
.attr
);
1278 pr_err("user_verbs: couldn't create abi_version attribute\n");
1282 ret
= ib_register_client(&uverbs_client
);
1284 pr_err("user_verbs: couldn't register client\n");
1291 class_destroy(uverbs_class
);
1294 unregister_chrdev_region(dynamic_uverbs_dev
,
1295 IB_UVERBS_NUM_DYNAMIC_MINOR
);
1298 unregister_chrdev_region(IB_UVERBS_BASE_DEV
,
1299 IB_UVERBS_NUM_FIXED_MINOR
);
1305 static void __exit
ib_uverbs_cleanup(void)
1307 ib_unregister_client(&uverbs_client
);
1308 class_destroy(uverbs_class
);
1309 unregister_chrdev_region(IB_UVERBS_BASE_DEV
,
1310 IB_UVERBS_NUM_FIXED_MINOR
);
1311 unregister_chrdev_region(dynamic_uverbs_dev
,
1312 IB_UVERBS_NUM_DYNAMIC_MINOR
);
1313 mmu_notifier_synchronize();
1316 module_init(ib_uverbs_init
);
1317 module_exit(ib_uverbs_cleanup
);