2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
48 #include <linux/sched/mm.h>
50 #include <linux/uaccess.h>
53 #include <rdma/uverbs_std_types.h>
54 #include <rdma/rdma_netlink.h>
57 #include "core_priv.h"
58 #include "rdma_core.h"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
62 MODULE_LICENSE("Dual BSD/GPL");
65 IB_UVERBS_MAJOR
= 231,
66 IB_UVERBS_BASE_MINOR
= 192,
67 IB_UVERBS_MAX_DEVICES
= RDMA_MAX_PORTS
,
68 IB_UVERBS_NUM_FIXED_MINOR
= 32,
69 IB_UVERBS_NUM_DYNAMIC_MINOR
= IB_UVERBS_MAX_DEVICES
- IB_UVERBS_NUM_FIXED_MINOR
,
72 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
74 static dev_t dynamic_uverbs_dev
;
75 static struct class *uverbs_class
;
77 static DEFINE_IDA(uverbs_ida
);
78 static void ib_uverbs_add_one(struct ib_device
*device
);
79 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
);
82 * Must be called with the ufile->device->disassociate_srcu held, and the lock
83 * must be held until use of the ucontext is finished.
85 struct ib_ucontext
*ib_uverbs_get_ucontext_file(struct ib_uverbs_file
*ufile
)
88 * We do not hold the hw_destroy_rwsem lock for this flow, instead
89 * srcu is used. It does not matter if someone races this with
90 * get_context, we get NULL or valid ucontext.
92 struct ib_ucontext
*ucontext
= smp_load_acquire(&ufile
->ucontext
);
94 if (!srcu_dereference(ufile
->device
->ib_dev
,
95 &ufile
->device
->disassociate_srcu
))
99 return ERR_PTR(-EINVAL
);
103 EXPORT_SYMBOL(ib_uverbs_get_ucontext_file
);
105 int uverbs_dealloc_mw(struct ib_mw
*mw
)
107 struct ib_pd
*pd
= mw
->pd
;
110 ret
= mw
->device
->ops
.dealloc_mw(mw
);
112 atomic_dec(&pd
->usecnt
);
116 static void ib_uverbs_release_dev(struct device
*device
)
118 struct ib_uverbs_device
*dev
=
119 container_of(device
, struct ib_uverbs_device
, dev
);
121 uverbs_destroy_api(dev
->uapi
);
122 cleanup_srcu_struct(&dev
->disassociate_srcu
);
123 mutex_destroy(&dev
->lists_mutex
);
124 mutex_destroy(&dev
->xrcd_tree_mutex
);
128 void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file
*ev_file
,
129 struct ib_ucq_object
*uobj
)
131 struct ib_uverbs_event
*evt
, *tmp
;
134 spin_lock_irq(&ev_file
->ev_queue
.lock
);
135 list_for_each_entry_safe(evt
, tmp
, &uobj
->comp_list
, obj_list
) {
136 list_del(&evt
->list
);
139 spin_unlock_irq(&ev_file
->ev_queue
.lock
);
141 uverbs_uobject_put(&ev_file
->uobj
);
144 ib_uverbs_release_uevent(&uobj
->uevent
);
147 void ib_uverbs_release_uevent(struct ib_uevent_object
*uobj
)
149 struct ib_uverbs_async_event_file
*async_file
=
150 READ_ONCE(uobj
->uobject
.ufile
->async_file
);
151 struct ib_uverbs_event
*evt
, *tmp
;
156 spin_lock_irq(&async_file
->ev_queue
.lock
);
157 list_for_each_entry_safe(evt
, tmp
, &uobj
->event_list
, obj_list
) {
158 list_del(&evt
->list
);
161 spin_unlock_irq(&async_file
->ev_queue
.lock
);
164 void ib_uverbs_detach_umcast(struct ib_qp
*qp
,
165 struct ib_uqp_object
*uobj
)
167 struct ib_uverbs_mcast_entry
*mcast
, *tmp
;
169 list_for_each_entry_safe(mcast
, tmp
, &uobj
->mcast_list
, list
) {
170 ib_detach_mcast(qp
, &mcast
->gid
, mcast
->lid
);
171 list_del(&mcast
->list
);
176 static void ib_uverbs_comp_dev(struct ib_uverbs_device
*dev
)
178 complete(&dev
->comp
);
181 void ib_uverbs_release_file(struct kref
*ref
)
183 struct ib_uverbs_file
*file
=
184 container_of(ref
, struct ib_uverbs_file
, ref
);
185 struct ib_device
*ib_dev
;
188 release_ufile_idr_uobject(file
);
190 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
191 ib_dev
= srcu_dereference(file
->device
->ib_dev
,
192 &file
->device
->disassociate_srcu
);
193 if (ib_dev
&& !ib_dev
->ops
.disassociate_ucontext
)
194 module_put(ib_dev
->ops
.owner
);
195 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
197 if (atomic_dec_and_test(&file
->device
->refcount
))
198 ib_uverbs_comp_dev(file
->device
);
200 if (file
->async_file
)
201 uverbs_uobject_put(&file
->async_file
->uobj
);
202 put_device(&file
->device
->dev
);
204 if (file
->disassociate_page
)
205 __free_pages(file
->disassociate_page
, 0);
206 mutex_destroy(&file
->umap_lock
);
207 mutex_destroy(&file
->ucontext_lock
);
211 static ssize_t
ib_uverbs_event_read(struct ib_uverbs_event_queue
*ev_queue
,
212 struct file
*filp
, char __user
*buf
,
213 size_t count
, loff_t
*pos
,
216 struct ib_uverbs_event
*event
;
219 spin_lock_irq(&ev_queue
->lock
);
221 while (list_empty(&ev_queue
->event_list
)) {
222 spin_unlock_irq(&ev_queue
->lock
);
224 if (filp
->f_flags
& O_NONBLOCK
)
227 if (wait_event_interruptible(ev_queue
->poll_wait
,
228 (!list_empty(&ev_queue
->event_list
) ||
229 ev_queue
->is_closed
)))
232 spin_lock_irq(&ev_queue
->lock
);
234 /* If device was disassociated and no event exists set an error */
235 if (list_empty(&ev_queue
->event_list
) && ev_queue
->is_closed
) {
236 spin_unlock_irq(&ev_queue
->lock
);
241 event
= list_entry(ev_queue
->event_list
.next
, struct ib_uverbs_event
, list
);
243 if (eventsz
> count
) {
247 list_del(ev_queue
->event_list
.next
);
248 if (event
->counter
) {
250 list_del(&event
->obj_list
);
254 spin_unlock_irq(&ev_queue
->lock
);
257 if (copy_to_user(buf
, event
, eventsz
))
268 static ssize_t
ib_uverbs_async_event_read(struct file
*filp
, char __user
*buf
,
269 size_t count
, loff_t
*pos
)
271 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
273 return ib_uverbs_event_read(&file
->ev_queue
, filp
, buf
, count
, pos
,
274 sizeof(struct ib_uverbs_async_event_desc
));
277 static ssize_t
ib_uverbs_comp_event_read(struct file
*filp
, char __user
*buf
,
278 size_t count
, loff_t
*pos
)
280 struct ib_uverbs_completion_event_file
*comp_ev_file
=
283 return ib_uverbs_event_read(&comp_ev_file
->ev_queue
, filp
, buf
, count
,
285 sizeof(struct ib_uverbs_comp_event_desc
));
288 static __poll_t
ib_uverbs_event_poll(struct ib_uverbs_event_queue
*ev_queue
,
290 struct poll_table_struct
*wait
)
292 __poll_t pollflags
= 0;
294 poll_wait(filp
, &ev_queue
->poll_wait
, wait
);
296 spin_lock_irq(&ev_queue
->lock
);
297 if (!list_empty(&ev_queue
->event_list
))
298 pollflags
= EPOLLIN
| EPOLLRDNORM
;
299 else if (ev_queue
->is_closed
)
300 pollflags
= EPOLLERR
;
301 spin_unlock_irq(&ev_queue
->lock
);
306 static __poll_t
ib_uverbs_async_event_poll(struct file
*filp
,
307 struct poll_table_struct
*wait
)
309 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
311 return ib_uverbs_event_poll(&file
->ev_queue
, filp
, wait
);
314 static __poll_t
ib_uverbs_comp_event_poll(struct file
*filp
,
315 struct poll_table_struct
*wait
)
317 struct ib_uverbs_completion_event_file
*comp_ev_file
=
320 return ib_uverbs_event_poll(&comp_ev_file
->ev_queue
, filp
, wait
);
323 static int ib_uverbs_async_event_fasync(int fd
, struct file
*filp
, int on
)
325 struct ib_uverbs_async_event_file
*file
= filp
->private_data
;
327 return fasync_helper(fd
, filp
, on
, &file
->ev_queue
.async_queue
);
330 static int ib_uverbs_comp_event_fasync(int fd
, struct file
*filp
, int on
)
332 struct ib_uverbs_completion_event_file
*comp_ev_file
=
335 return fasync_helper(fd
, filp
, on
, &comp_ev_file
->ev_queue
.async_queue
);
338 const struct file_operations uverbs_event_fops
= {
339 .owner
= THIS_MODULE
,
340 .read
= ib_uverbs_comp_event_read
,
341 .poll
= ib_uverbs_comp_event_poll
,
342 .release
= uverbs_uobject_fd_release
,
343 .fasync
= ib_uverbs_comp_event_fasync
,
347 const struct file_operations uverbs_async_event_fops
= {
348 .owner
= THIS_MODULE
,
349 .read
= ib_uverbs_async_event_read
,
350 .poll
= ib_uverbs_async_event_poll
,
351 .release
= uverbs_async_event_release
,
352 .fasync
= ib_uverbs_async_event_fasync
,
356 void ib_uverbs_comp_handler(struct ib_cq
*cq
, void *cq_context
)
358 struct ib_uverbs_event_queue
*ev_queue
= cq_context
;
359 struct ib_ucq_object
*uobj
;
360 struct ib_uverbs_event
*entry
;
366 spin_lock_irqsave(&ev_queue
->lock
, flags
);
367 if (ev_queue
->is_closed
) {
368 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
372 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
374 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
380 entry
->desc
.comp
.cq_handle
= cq
->uobject
->uevent
.uobject
.user_handle
;
381 entry
->counter
= &uobj
->comp_events_reported
;
383 list_add_tail(&entry
->list
, &ev_queue
->event_list
);
384 list_add_tail(&entry
->obj_list
, &uobj
->comp_list
);
385 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
387 wake_up_interruptible(&ev_queue
->poll_wait
);
388 kill_fasync(&ev_queue
->async_queue
, SIGIO
, POLL_IN
);
391 void ib_uverbs_async_handler(struct ib_uverbs_async_event_file
*async_file
,
392 __u64 element
, __u64 event
,
393 struct list_head
*obj_list
, u32
*counter
)
395 struct ib_uverbs_event
*entry
;
401 spin_lock_irqsave(&async_file
->ev_queue
.lock
, flags
);
402 if (async_file
->ev_queue
.is_closed
) {
403 spin_unlock_irqrestore(&async_file
->ev_queue
.lock
, flags
);
407 entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
409 spin_unlock_irqrestore(&async_file
->ev_queue
.lock
, flags
);
413 entry
->desc
.async
.element
= element
;
414 entry
->desc
.async
.event_type
= event
;
415 entry
->desc
.async
.reserved
= 0;
416 entry
->counter
= counter
;
418 list_add_tail(&entry
->list
, &async_file
->ev_queue
.event_list
);
420 list_add_tail(&entry
->obj_list
, obj_list
);
421 spin_unlock_irqrestore(&async_file
->ev_queue
.lock
, flags
);
423 wake_up_interruptible(&async_file
->ev_queue
.poll_wait
);
424 kill_fasync(&async_file
->ev_queue
.async_queue
, SIGIO
, POLL_IN
);
427 static void uverbs_uobj_event(struct ib_uevent_object
*eobj
,
428 struct ib_event
*event
)
430 ib_uverbs_async_handler(READ_ONCE(eobj
->uobject
.ufile
->async_file
),
431 eobj
->uobject
.user_handle
, event
->event
,
432 &eobj
->event_list
, &eobj
->events_reported
);
435 void ib_uverbs_cq_event_handler(struct ib_event
*event
, void *context_ptr
)
437 uverbs_uobj_event(&event
->element
.cq
->uobject
->uevent
, event
);
440 void ib_uverbs_qp_event_handler(struct ib_event
*event
, void *context_ptr
)
442 /* for XRC target qp's, check that qp is live */
443 if (!event
->element
.qp
->uobject
)
446 uverbs_uobj_event(&event
->element
.qp
->uobject
->uevent
, event
);
449 void ib_uverbs_wq_event_handler(struct ib_event
*event
, void *context_ptr
)
451 uverbs_uobj_event(&event
->element
.wq
->uobject
->uevent
, event
);
454 void ib_uverbs_srq_event_handler(struct ib_event
*event
, void *context_ptr
)
456 uverbs_uobj_event(&event
->element
.srq
->uobject
->uevent
, event
);
459 static void ib_uverbs_event_handler(struct ib_event_handler
*handler
,
460 struct ib_event
*event
)
462 ib_uverbs_async_handler(
463 container_of(handler
, struct ib_uverbs_async_event_file
,
465 event
->element
.port_num
, event
->event
, NULL
, NULL
);
468 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue
*ev_queue
)
470 spin_lock_init(&ev_queue
->lock
);
471 INIT_LIST_HEAD(&ev_queue
->event_list
);
472 init_waitqueue_head(&ev_queue
->poll_wait
);
473 ev_queue
->is_closed
= 0;
474 ev_queue
->async_queue
= NULL
;
477 void ib_uverbs_init_async_event_file(
478 struct ib_uverbs_async_event_file
*async_file
)
480 struct ib_uverbs_file
*uverbs_file
= async_file
->uobj
.ufile
;
481 struct ib_device
*ib_dev
= async_file
->uobj
.context
->device
;
483 ib_uverbs_init_event_queue(&async_file
->ev_queue
);
485 /* The first async_event_file becomes the default one for the file. */
486 mutex_lock(&uverbs_file
->ucontext_lock
);
487 if (!uverbs_file
->async_file
) {
488 /* Pairs with the put in ib_uverbs_release_file */
489 uverbs_uobject_get(&async_file
->uobj
);
490 smp_store_release(&uverbs_file
->async_file
, async_file
);
492 mutex_unlock(&uverbs_file
->ucontext_lock
);
494 INIT_IB_EVENT_HANDLER(&async_file
->event_handler
, ib_dev
,
495 ib_uverbs_event_handler
);
496 ib_register_event_handler(&async_file
->event_handler
);
499 static ssize_t
verify_hdr(struct ib_uverbs_cmd_hdr
*hdr
,
500 struct ib_uverbs_ex_cmd_hdr
*ex_hdr
, size_t count
,
501 const struct uverbs_api_write_method
*method_elm
)
503 if (method_elm
->is_ex
) {
504 count
-= sizeof(*hdr
) + sizeof(*ex_hdr
);
506 if ((hdr
->in_words
+ ex_hdr
->provider_in_words
) * 8 != count
)
509 if (hdr
->in_words
* 8 < method_elm
->req_size
)
512 if (ex_hdr
->cmd_hdr_reserved
)
515 if (ex_hdr
->response
) {
516 if (!hdr
->out_words
&& !ex_hdr
->provider_out_words
)
519 if (hdr
->out_words
* 8 < method_elm
->resp_size
)
522 if (!access_ok(u64_to_user_ptr(ex_hdr
->response
),
523 (hdr
->out_words
+ ex_hdr
->provider_out_words
) * 8))
526 if (hdr
->out_words
|| ex_hdr
->provider_out_words
)
533 /* not extended command */
534 if (hdr
->in_words
* 4 != count
)
537 if (count
< method_elm
->req_size
+ sizeof(hdr
)) {
539 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
540 * with a 16 byte write instead of 24. Old kernels didn't
541 * check the size so they allowed this. Now that the size is
542 * checked provide a compatibility work around to not break
545 if (hdr
->command
== IB_USER_VERBS_CMD_DESTROY_CQ
&&
552 if (hdr
->out_words
* 4 < method_elm
->resp_size
)
558 static ssize_t
ib_uverbs_write(struct file
*filp
, const char __user
*buf
,
559 size_t count
, loff_t
*pos
)
561 struct ib_uverbs_file
*file
= filp
->private_data
;
562 const struct uverbs_api_write_method
*method_elm
;
563 struct uverbs_api
*uapi
= file
->device
->uapi
;
564 struct ib_uverbs_ex_cmd_hdr ex_hdr
;
565 struct ib_uverbs_cmd_hdr hdr
;
566 struct uverbs_attr_bundle bundle
;
570 if (!ib_safe_file_access(filp
)) {
571 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
572 task_tgid_vnr(current
), current
->comm
);
576 if (count
< sizeof(hdr
))
579 if (copy_from_user(&hdr
, buf
, sizeof(hdr
)))
582 method_elm
= uapi_get_method(uapi
, hdr
.command
);
583 if (IS_ERR(method_elm
))
584 return PTR_ERR(method_elm
);
586 if (method_elm
->is_ex
) {
587 if (count
< (sizeof(hdr
) + sizeof(ex_hdr
)))
589 if (copy_from_user(&ex_hdr
, buf
+ sizeof(hdr
), sizeof(ex_hdr
)))
593 ret
= verify_hdr(&hdr
, &ex_hdr
, count
, method_elm
);
597 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
601 memset(bundle
.attr_present
, 0, sizeof(bundle
.attr_present
));
603 bundle
.context
= NULL
; /* only valid if bundle has uobject */
604 if (!method_elm
->is_ex
) {
605 size_t in_len
= hdr
.in_words
* 4 - sizeof(hdr
);
606 size_t out_len
= hdr
.out_words
* 4;
609 if (method_elm
->has_udata
) {
610 bundle
.driver_udata
.inlen
=
611 in_len
- method_elm
->req_size
;
612 in_len
= method_elm
->req_size
;
613 if (bundle
.driver_udata
.inlen
)
614 bundle
.driver_udata
.inbuf
= buf
+ in_len
;
616 bundle
.driver_udata
.inbuf
= NULL
;
618 memset(&bundle
.driver_udata
, 0,
619 sizeof(bundle
.driver_udata
));
622 if (method_elm
->has_resp
) {
624 * The macros check that if has_resp is set
625 * then the command request structure starts
626 * with a '__aligned u64 response' member.
628 ret
= get_user(response
, (const u64 __user
*)buf
);
632 if (method_elm
->has_udata
) {
633 bundle
.driver_udata
.outlen
=
634 out_len
- method_elm
->resp_size
;
635 out_len
= method_elm
->resp_size
;
636 if (bundle
.driver_udata
.outlen
)
637 bundle
.driver_udata
.outbuf
=
638 u64_to_user_ptr(response
+
641 bundle
.driver_udata
.outbuf
= NULL
;
644 bundle
.driver_udata
.outlen
= 0;
645 bundle
.driver_udata
.outbuf
= NULL
;
648 ib_uverbs_init_udata_buf_or_null(
649 &bundle
.ucore
, buf
, u64_to_user_ptr(response
),
652 buf
+= sizeof(ex_hdr
);
654 ib_uverbs_init_udata_buf_or_null(&bundle
.ucore
, buf
,
655 u64_to_user_ptr(ex_hdr
.response
),
656 hdr
.in_words
* 8, hdr
.out_words
* 8);
658 ib_uverbs_init_udata_buf_or_null(
659 &bundle
.driver_udata
, buf
+ bundle
.ucore
.inlen
,
660 u64_to_user_ptr(ex_hdr
.response
) + bundle
.ucore
.outlen
,
661 ex_hdr
.provider_in_words
* 8,
662 ex_hdr
.provider_out_words
* 8);
666 ret
= method_elm
->handler(&bundle
);
668 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
669 return (ret
) ? : count
;
672 static const struct vm_operations_struct rdma_umap_ops
;
674 static int ib_uverbs_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
676 struct ib_uverbs_file
*file
= filp
->private_data
;
677 struct ib_ucontext
*ucontext
;
681 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
682 ucontext
= ib_uverbs_get_ucontext_file(file
);
683 if (IS_ERR(ucontext
)) {
684 ret
= PTR_ERR(ucontext
);
687 vma
->vm_ops
= &rdma_umap_ops
;
688 ret
= ucontext
->device
->ops
.mmap(ucontext
, vma
);
690 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
695 * The VMA has been dup'd, initialize the vm_private_data with a new tracking
698 static void rdma_umap_open(struct vm_area_struct
*vma
)
700 struct ib_uverbs_file
*ufile
= vma
->vm_file
->private_data
;
701 struct rdma_umap_priv
*opriv
= vma
->vm_private_data
;
702 struct rdma_umap_priv
*priv
;
707 /* We are racing with disassociation */
708 if (!down_read_trylock(&ufile
->hw_destroy_rwsem
))
711 * Disassociation already completed, the VMA should already be zapped.
713 if (!ufile
->ucontext
)
716 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
719 rdma_umap_priv_init(priv
, vma
, opriv
->entry
);
721 up_read(&ufile
->hw_destroy_rwsem
);
725 up_read(&ufile
->hw_destroy_rwsem
);
728 * We can't allow the VMA to be created with the actual IO pages, that
729 * would break our API contract, and it can't be stopped at this
732 vma
->vm_private_data
= NULL
;
733 zap_vma_ptes(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
736 static void rdma_umap_close(struct vm_area_struct
*vma
)
738 struct ib_uverbs_file
*ufile
= vma
->vm_file
->private_data
;
739 struct rdma_umap_priv
*priv
= vma
->vm_private_data
;
745 * The vma holds a reference on the struct file that created it, which
746 * in turn means that the ib_uverbs_file is guaranteed to exist at
749 mutex_lock(&ufile
->umap_lock
);
751 rdma_user_mmap_entry_put(priv
->entry
);
753 list_del(&priv
->list
);
754 mutex_unlock(&ufile
->umap_lock
);
759 * Once the zap_vma_ptes has been called touches to the VMA will come here and
760 * we return a dummy writable zero page for all the pfns.
762 static vm_fault_t
rdma_umap_fault(struct vm_fault
*vmf
)
764 struct ib_uverbs_file
*ufile
= vmf
->vma
->vm_file
->private_data
;
765 struct rdma_umap_priv
*priv
= vmf
->vma
->vm_private_data
;
769 return VM_FAULT_SIGBUS
;
771 /* Read only pages can just use the system zero page. */
772 if (!(vmf
->vma
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
))) {
773 vmf
->page
= ZERO_PAGE(vmf
->address
);
778 mutex_lock(&ufile
->umap_lock
);
779 if (!ufile
->disassociate_page
)
780 ufile
->disassociate_page
=
781 alloc_pages(vmf
->gfp_mask
| __GFP_ZERO
, 0);
783 if (ufile
->disassociate_page
) {
785 * This VMA is forced to always be shared so this doesn't have
786 * to worry about COW.
788 vmf
->page
= ufile
->disassociate_page
;
791 ret
= VM_FAULT_SIGBUS
;
793 mutex_unlock(&ufile
->umap_lock
);
798 static const struct vm_operations_struct rdma_umap_ops
= {
799 .open
= rdma_umap_open
,
800 .close
= rdma_umap_close
,
801 .fault
= rdma_umap_fault
,
804 void uverbs_user_mmap_disassociate(struct ib_uverbs_file
*ufile
)
806 struct rdma_umap_priv
*priv
, *next_priv
;
808 lockdep_assert_held(&ufile
->hw_destroy_rwsem
);
811 struct mm_struct
*mm
= NULL
;
813 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
814 mutex_lock(&ufile
->umap_lock
);
815 while (!list_empty(&ufile
->umaps
)) {
818 priv
= list_first_entry(&ufile
->umaps
,
819 struct rdma_umap_priv
, list
);
820 mm
= priv
->vma
->vm_mm
;
821 ret
= mmget_not_zero(mm
);
823 list_del_init(&priv
->list
);
825 rdma_user_mmap_entry_put(priv
->entry
);
833 mutex_unlock(&ufile
->umap_lock
);
838 * The umap_lock is nested under mmap_sem since it used within
839 * the vma_ops callbacks, so we have to clean the list one mm
840 * at a time to get the lock ordering right. Typically there
841 * will only be one mm, so no big deal.
843 down_read(&mm
->mmap_sem
);
844 if (!mmget_still_valid(mm
))
846 mutex_lock(&ufile
->umap_lock
);
847 list_for_each_entry_safe (priv
, next_priv
, &ufile
->umaps
,
849 struct vm_area_struct
*vma
= priv
->vma
;
851 if (vma
->vm_mm
!= mm
)
853 list_del_init(&priv
->list
);
855 zap_vma_ptes(vma
, vma
->vm_start
,
856 vma
->vm_end
- vma
->vm_start
);
859 rdma_user_mmap_entry_put(priv
->entry
);
863 mutex_unlock(&ufile
->umap_lock
);
865 up_read(&mm
->mmap_sem
);
871 * ib_uverbs_open() does not need the BKL:
873 * - the ib_uverbs_device structures are properly reference counted and
874 * everything else is purely local to the file being created, so
875 * races against other open calls are not a problem;
876 * - there is no ioctl method to race against;
877 * - the open method will either immediately run -ENXIO, or all
878 * required initialization will be done.
880 static int ib_uverbs_open(struct inode
*inode
, struct file
*filp
)
882 struct ib_uverbs_device
*dev
;
883 struct ib_uverbs_file
*file
;
884 struct ib_device
*ib_dev
;
886 int module_dependent
;
889 dev
= container_of(inode
->i_cdev
, struct ib_uverbs_device
, cdev
);
890 if (!atomic_inc_not_zero(&dev
->refcount
))
893 get_device(&dev
->dev
);
894 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
895 mutex_lock(&dev
->lists_mutex
);
896 ib_dev
= srcu_dereference(dev
->ib_dev
,
897 &dev
->disassociate_srcu
);
903 if (!rdma_dev_access_netns(ib_dev
, current
->nsproxy
->net_ns
)) {
908 /* In case IB device supports disassociate ucontext, there is no hard
909 * dependency between uverbs device and its low level device.
911 module_dependent
= !(ib_dev
->ops
.disassociate_ucontext
);
913 if (module_dependent
) {
914 if (!try_module_get(ib_dev
->ops
.owner
)) {
920 file
= kzalloc(sizeof(*file
), GFP_KERNEL
);
923 if (module_dependent
)
930 kref_init(&file
->ref
);
931 mutex_init(&file
->ucontext_lock
);
933 spin_lock_init(&file
->uobjects_lock
);
934 INIT_LIST_HEAD(&file
->uobjects
);
935 init_rwsem(&file
->hw_destroy_rwsem
);
936 mutex_init(&file
->umap_lock
);
937 INIT_LIST_HEAD(&file
->umaps
);
939 filp
->private_data
= file
;
940 list_add_tail(&file
->list
, &dev
->uverbs_file_list
);
941 mutex_unlock(&dev
->lists_mutex
);
942 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
944 setup_ufile_idr_uobject(file
);
946 return stream_open(inode
, filp
);
949 module_put(ib_dev
->ops
.owner
);
952 mutex_unlock(&dev
->lists_mutex
);
953 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
954 if (atomic_dec_and_test(&dev
->refcount
))
955 ib_uverbs_comp_dev(dev
);
957 put_device(&dev
->dev
);
961 static int ib_uverbs_close(struct inode
*inode
, struct file
*filp
)
963 struct ib_uverbs_file
*file
= filp
->private_data
;
965 uverbs_destroy_ufile_hw(file
, RDMA_REMOVE_CLOSE
);
967 mutex_lock(&file
->device
->lists_mutex
);
968 list_del_init(&file
->list
);
969 mutex_unlock(&file
->device
->lists_mutex
);
971 kref_put(&file
->ref
, ib_uverbs_release_file
);
976 static const struct file_operations uverbs_fops
= {
977 .owner
= THIS_MODULE
,
978 .write
= ib_uverbs_write
,
979 .open
= ib_uverbs_open
,
980 .release
= ib_uverbs_close
,
982 .unlocked_ioctl
= ib_uverbs_ioctl
,
983 .compat_ioctl
= compat_ptr_ioctl
,
986 static const struct file_operations uverbs_mmap_fops
= {
987 .owner
= THIS_MODULE
,
988 .write
= ib_uverbs_write
,
989 .mmap
= ib_uverbs_mmap
,
990 .open
= ib_uverbs_open
,
991 .release
= ib_uverbs_close
,
993 .unlocked_ioctl
= ib_uverbs_ioctl
,
994 .compat_ioctl
= compat_ptr_ioctl
,
997 static int ib_uverbs_get_nl_info(struct ib_device
*ibdev
, void *client_data
,
998 struct ib_client_nl_info
*res
)
1000 struct ib_uverbs_device
*uverbs_dev
= client_data
;
1003 if (res
->port
!= -1)
1006 res
->abi
= ibdev
->ops
.uverbs_abi_ver
;
1007 res
->cdev
= &uverbs_dev
->dev
;
1010 * To support DRIVER_ID binding in userspace some of the driver need
1011 * upgrading to expose their PCI dependent revision information
1012 * through get_context instead of relying on modalias matching. When
1013 * the drivers are fixed they can drop this flag.
1015 if (!ibdev
->ops
.uverbs_no_driver_id_binding
) {
1016 ret
= nla_put_u32(res
->nl_msg
, RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID
,
1017 ibdev
->ops
.driver_id
);
1024 static struct ib_client uverbs_client
= {
1026 .no_kverbs_req
= true,
1027 .add
= ib_uverbs_add_one
,
1028 .remove
= ib_uverbs_remove_one
,
1029 .get_nl_info
= ib_uverbs_get_nl_info
,
1031 MODULE_ALIAS_RDMA_CLIENT("uverbs");
1033 static ssize_t
ibdev_show(struct device
*device
, struct device_attribute
*attr
,
1036 struct ib_uverbs_device
*dev
=
1037 container_of(device
, struct ib_uverbs_device
, dev
);
1040 struct ib_device
*ib_dev
;
1042 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1043 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1045 ret
= sprintf(buf
, "%s\n", dev_name(&ib_dev
->dev
));
1046 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1050 static DEVICE_ATTR_RO(ibdev
);
1052 static ssize_t
abi_version_show(struct device
*device
,
1053 struct device_attribute
*attr
, char *buf
)
1055 struct ib_uverbs_device
*dev
=
1056 container_of(device
, struct ib_uverbs_device
, dev
);
1059 struct ib_device
*ib_dev
;
1061 srcu_key
= srcu_read_lock(&dev
->disassociate_srcu
);
1062 ib_dev
= srcu_dereference(dev
->ib_dev
, &dev
->disassociate_srcu
);
1064 ret
= sprintf(buf
, "%u\n", ib_dev
->ops
.uverbs_abi_ver
);
1065 srcu_read_unlock(&dev
->disassociate_srcu
, srcu_key
);
1069 static DEVICE_ATTR_RO(abi_version
);
1071 static struct attribute
*ib_dev_attrs
[] = {
1072 &dev_attr_abi_version
.attr
,
1073 &dev_attr_ibdev
.attr
,
1077 static const struct attribute_group dev_attr_group
= {
1078 .attrs
= ib_dev_attrs
,
1081 static CLASS_ATTR_STRING(abi_version
, S_IRUGO
,
1082 __stringify(IB_USER_VERBS_ABI_VERSION
));
1084 static int ib_uverbs_create_uapi(struct ib_device
*device
,
1085 struct ib_uverbs_device
*uverbs_dev
)
1087 struct uverbs_api
*uapi
;
1089 uapi
= uverbs_alloc_api(device
);
1091 return PTR_ERR(uapi
);
1093 uverbs_dev
->uapi
= uapi
;
1097 static void ib_uverbs_add_one(struct ib_device
*device
)
1101 struct ib_uverbs_device
*uverbs_dev
;
1104 if (!device
->ops
.alloc_ucontext
)
1107 uverbs_dev
= kzalloc(sizeof(*uverbs_dev
), GFP_KERNEL
);
1111 ret
= init_srcu_struct(&uverbs_dev
->disassociate_srcu
);
1117 device_initialize(&uverbs_dev
->dev
);
1118 uverbs_dev
->dev
.class = uverbs_class
;
1119 uverbs_dev
->dev
.parent
= device
->dev
.parent
;
1120 uverbs_dev
->dev
.release
= ib_uverbs_release_dev
;
1121 uverbs_dev
->groups
[0] = &dev_attr_group
;
1122 uverbs_dev
->dev
.groups
= uverbs_dev
->groups
;
1123 atomic_set(&uverbs_dev
->refcount
, 1);
1124 init_completion(&uverbs_dev
->comp
);
1125 uverbs_dev
->xrcd_tree
= RB_ROOT
;
1126 mutex_init(&uverbs_dev
->xrcd_tree_mutex
);
1127 mutex_init(&uverbs_dev
->lists_mutex
);
1128 INIT_LIST_HEAD(&uverbs_dev
->uverbs_file_list
);
1129 rcu_assign_pointer(uverbs_dev
->ib_dev
, device
);
1130 uverbs_dev
->num_comp_vectors
= device
->num_comp_vectors
;
1132 devnum
= ida_alloc_max(&uverbs_ida
, IB_UVERBS_MAX_DEVICES
- 1,
1136 uverbs_dev
->devnum
= devnum
;
1137 if (devnum
>= IB_UVERBS_NUM_FIXED_MINOR
)
1138 base
= dynamic_uverbs_dev
+ devnum
- IB_UVERBS_NUM_FIXED_MINOR
;
1140 base
= IB_UVERBS_BASE_DEV
+ devnum
;
1142 if (ib_uverbs_create_uapi(device
, uverbs_dev
))
1145 uverbs_dev
->dev
.devt
= base
;
1146 dev_set_name(&uverbs_dev
->dev
, "uverbs%d", uverbs_dev
->devnum
);
1148 cdev_init(&uverbs_dev
->cdev
,
1149 device
->ops
.mmap
? &uverbs_mmap_fops
: &uverbs_fops
);
1150 uverbs_dev
->cdev
.owner
= THIS_MODULE
;
1152 ret
= cdev_device_add(&uverbs_dev
->cdev
, &uverbs_dev
->dev
);
1156 ib_set_client_data(device
, &uverbs_client
, uverbs_dev
);
1160 ida_free(&uverbs_ida
, devnum
);
1162 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1163 ib_uverbs_comp_dev(uverbs_dev
);
1164 wait_for_completion(&uverbs_dev
->comp
);
1165 put_device(&uverbs_dev
->dev
);
1169 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device
*uverbs_dev
,
1170 struct ib_device
*ib_dev
)
1172 struct ib_uverbs_file
*file
;
1174 /* Pending running commands to terminate */
1175 uverbs_disassociate_api_pre(uverbs_dev
);
1177 mutex_lock(&uverbs_dev
->lists_mutex
);
1178 while (!list_empty(&uverbs_dev
->uverbs_file_list
)) {
1179 file
= list_first_entry(&uverbs_dev
->uverbs_file_list
,
1180 struct ib_uverbs_file
, list
);
1181 list_del_init(&file
->list
);
1182 kref_get(&file
->ref
);
1184 /* We must release the mutex before going ahead and calling
1185 * uverbs_cleanup_ufile, as it might end up indirectly calling
1186 * uverbs_close, for example due to freeing the resources (e.g
1189 mutex_unlock(&uverbs_dev
->lists_mutex
);
1191 uverbs_destroy_ufile_hw(file
, RDMA_REMOVE_DRIVER_REMOVE
);
1192 kref_put(&file
->ref
, ib_uverbs_release_file
);
1194 mutex_lock(&uverbs_dev
->lists_mutex
);
1196 mutex_unlock(&uverbs_dev
->lists_mutex
);
1198 uverbs_disassociate_api(uverbs_dev
->uapi
);
1201 static void ib_uverbs_remove_one(struct ib_device
*device
, void *client_data
)
1203 struct ib_uverbs_device
*uverbs_dev
= client_data
;
1204 int wait_clients
= 1;
1209 cdev_device_del(&uverbs_dev
->cdev
, &uverbs_dev
->dev
);
1210 ida_free(&uverbs_ida
, uverbs_dev
->devnum
);
1212 if (device
->ops
.disassociate_ucontext
) {
1213 /* We disassociate HW resources and immediately return.
1214 * Userspace will see a EIO errno for all future access.
1215 * Upon returning, ib_device may be freed internally and is not
1217 * uverbs_device is still available until all clients close
1218 * their files, then the uverbs device ref count will be zero
1219 * and its resources will be freed.
1220 * Note: At this point no more files can be opened since the
1221 * cdev was deleted, however active clients can still issue
1222 * commands and close their open files.
1224 ib_uverbs_free_hw_resources(uverbs_dev
, device
);
1228 if (atomic_dec_and_test(&uverbs_dev
->refcount
))
1229 ib_uverbs_comp_dev(uverbs_dev
);
1231 wait_for_completion(&uverbs_dev
->comp
);
1233 put_device(&uverbs_dev
->dev
);
1236 static char *uverbs_devnode(struct device
*dev
, umode_t
*mode
)
1240 return kasprintf(GFP_KERNEL
, "infiniband/%s", dev_name(dev
));
1243 static int __init
ib_uverbs_init(void)
1247 ret
= register_chrdev_region(IB_UVERBS_BASE_DEV
,
1248 IB_UVERBS_NUM_FIXED_MINOR
,
1249 "infiniband_verbs");
1251 pr_err("user_verbs: couldn't register device number\n");
1255 ret
= alloc_chrdev_region(&dynamic_uverbs_dev
, 0,
1256 IB_UVERBS_NUM_DYNAMIC_MINOR
,
1257 "infiniband_verbs");
1259 pr_err("couldn't register dynamic device number\n");
1263 uverbs_class
= class_create(THIS_MODULE
, "infiniband_verbs");
1264 if (IS_ERR(uverbs_class
)) {
1265 ret
= PTR_ERR(uverbs_class
);
1266 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1270 uverbs_class
->devnode
= uverbs_devnode
;
1272 ret
= class_create_file(uverbs_class
, &class_attr_abi_version
.attr
);
1274 pr_err("user_verbs: couldn't create abi_version attribute\n");
1278 ret
= ib_register_client(&uverbs_client
);
1280 pr_err("user_verbs: couldn't register client\n");
1287 class_destroy(uverbs_class
);
1290 unregister_chrdev_region(dynamic_uverbs_dev
,
1291 IB_UVERBS_NUM_DYNAMIC_MINOR
);
1294 unregister_chrdev_region(IB_UVERBS_BASE_DEV
,
1295 IB_UVERBS_NUM_FIXED_MINOR
);
1301 static void __exit
ib_uverbs_cleanup(void)
1303 ib_unregister_client(&uverbs_client
);
1304 class_destroy(uverbs_class
);
1305 unregister_chrdev_region(IB_UVERBS_BASE_DEV
,
1306 IB_UVERBS_NUM_FIXED_MINOR
);
1307 unregister_chrdev_region(dynamic_uverbs_dev
,
1308 IB_UVERBS_NUM_DYNAMIC_MINOR
);
1309 mmu_notifier_synchronize();
1312 module_init(ib_uverbs_init
);
1313 module_exit(ib_uverbs_cleanup
);