2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/file.h>
34 #include <linux/anon_inodes.h>
35 #include <rdma/ib_verbs.h>
36 #include <rdma/uverbs_types.h>
37 #include <linux/rcupdate.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include <rdma/rdma_user_ioctl.h>
41 #include "core_priv.h"
42 #include "rdma_core.h"
44 int uverbs_ns_idx(u16
*id
, unsigned int ns_count
)
46 int ret
= (*id
& UVERBS_ID_NS_MASK
) >> UVERBS_ID_NS_SHIFT
;
51 *id
&= ~UVERBS_ID_NS_MASK
;
55 const struct uverbs_object_spec
*uverbs_get_object(const struct ib_device
*ibdev
,
58 const struct uverbs_root_spec
*object_hash
= ibdev
->specs_root
;
59 const struct uverbs_object_spec_hash
*objects
;
60 int ret
= uverbs_ns_idx(&object
, object_hash
->num_buckets
);
65 objects
= object_hash
->object_buckets
[ret
];
67 if (object
>= objects
->num_objects
)
70 return objects
->objects
[object
];
73 const struct uverbs_method_spec
*uverbs_get_method(const struct uverbs_object_spec
*object
,
76 const struct uverbs_method_spec_hash
*methods
;
77 int ret
= uverbs_ns_idx(&method
, object
->num_buckets
);
82 methods
= object
->method_buckets
[ret
];
83 if (method
>= methods
->num_methods
)
86 return methods
->methods
[method
];
89 void uverbs_uobject_get(struct ib_uobject
*uobject
)
91 kref_get(&uobject
->ref
);
94 static void uverbs_uobject_free(struct kref
*ref
)
96 struct ib_uobject
*uobj
=
97 container_of(ref
, struct ib_uobject
, ref
);
99 if (uobj
->type
->type_class
->needs_kfree_rcu
)
100 kfree_rcu(uobj
, rcu
);
105 void uverbs_uobject_put(struct ib_uobject
*uobject
)
107 kref_put(&uobject
->ref
, uverbs_uobject_free
);
110 static int uverbs_try_lock_object(struct ib_uobject
*uobj
, bool exclusive
)
113 * When a shared access is required, we use a positive counter. Each
114 * shared access request checks that the value != -1 and increment it.
115 * Exclusive access is required for operations like write or destroy.
116 * In exclusive access mode, we check that the counter is zero (nobody
117 * claimed this object) and we set it to -1. Releasing a shared access
118 * lock is done simply by decreasing the counter. As for exclusive
119 * access locks, since only a single one of them is is allowed
120 * concurrently, setting the counter to zero is enough for releasing
124 return __atomic_add_unless(&uobj
->usecnt
, 1, -1) == -1 ?
127 /* lock is either WRITE or DESTROY - should be exclusive */
128 return atomic_cmpxchg(&uobj
->usecnt
, 0, -1) == 0 ? 0 : -EBUSY
;
131 static struct ib_uobject
*alloc_uobj(struct ib_ucontext
*context
,
132 const struct uverbs_obj_type
*type
)
134 struct ib_uobject
*uobj
= kzalloc(type
->obj_size
, GFP_KERNEL
);
137 return ERR_PTR(-ENOMEM
);
139 * user_handle should be filled by the handler,
140 * The object is added to the list in the commit stage.
142 uobj
->context
= context
;
145 * Allocated objects start out as write locked to deny any other
146 * syscalls from accessing them until they are committed. See
147 * rdma_alloc_commit_uobject
149 atomic_set(&uobj
->usecnt
, -1);
150 kref_init(&uobj
->ref
);
155 static int idr_add_uobj(struct ib_uobject
*uobj
)
159 idr_preload(GFP_KERNEL
);
160 spin_lock(&uobj
->context
->ufile
->idr_lock
);
163 * We start with allocating an idr pointing to NULL. This represents an
164 * object which isn't initialized yet. We'll replace it later on with
165 * the real object once we commit.
167 ret
= idr_alloc(&uobj
->context
->ufile
->idr
, NULL
, 0,
168 min_t(unsigned long, U32_MAX
- 1, INT_MAX
), GFP_NOWAIT
);
172 spin_unlock(&uobj
->context
->ufile
->idr_lock
);
175 return ret
< 0 ? ret
: 0;
179 * It only removes it from the uobjects list, uverbs_uobject_put() is still
182 static void uverbs_idr_remove_uobj(struct ib_uobject
*uobj
)
184 spin_lock(&uobj
->context
->ufile
->idr_lock
);
185 idr_remove(&uobj
->context
->ufile
->idr
, uobj
->id
);
186 spin_unlock(&uobj
->context
->ufile
->idr_lock
);
189 /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
190 static struct ib_uobject
*lookup_get_idr_uobject(const struct uverbs_obj_type
*type
,
191 struct ib_ucontext
*ucontext
,
192 int id
, bool exclusive
)
194 struct ib_uobject
*uobj
;
197 /* object won't be released as we're protected in rcu */
198 uobj
= idr_find(&ucontext
->ufile
->idr
, id
);
200 uobj
= ERR_PTR(-ENOENT
);
205 * The idr_find is guaranteed to return a pointer to something that
206 * isn't freed yet, or NULL, as the free after idr_remove goes through
207 * kfree_rcu(). However the object may still have been released and
208 * kfree() could be called at any time.
210 if (!kref_get_unless_zero(&uobj
->ref
))
211 uobj
= ERR_PTR(-ENOENT
);
218 static struct ib_uobject
*lookup_get_fd_uobject(const struct uverbs_obj_type
*type
,
219 struct ib_ucontext
*ucontext
,
220 int id
, bool exclusive
)
223 struct ib_uobject
*uobject
;
224 const struct uverbs_obj_fd_type
*fd_type
=
225 container_of(type
, struct uverbs_obj_fd_type
, type
);
228 return ERR_PTR(-EOPNOTSUPP
);
232 return ERR_PTR(-EBADF
);
234 uobject
= f
->private_data
;
236 * fget(id) ensures we are not currently running uverbs_close_fd,
237 * and the caller is expected to ensure that uverbs_close_fd is never
238 * done while a call top lookup is possible.
240 if (f
->f_op
!= fd_type
->fops
) {
242 return ERR_PTR(-EBADF
);
245 uverbs_uobject_get(uobject
);
249 struct ib_uobject
*rdma_lookup_get_uobject(const struct uverbs_obj_type
*type
,
250 struct ib_ucontext
*ucontext
,
251 int id
, bool exclusive
)
253 struct ib_uobject
*uobj
;
256 uobj
= type
->type_class
->lookup_get(type
, ucontext
, id
, exclusive
);
260 if (uobj
->type
!= type
) {
265 ret
= uverbs_try_lock_object(uobj
, exclusive
);
267 WARN(ucontext
->cleanup_reason
,
268 "ib_uverbs: Trying to lookup_get while cleanup context\n");
274 uobj
->type
->type_class
->lookup_put(uobj
, exclusive
);
275 uverbs_uobject_put(uobj
);
279 static struct ib_uobject
*alloc_begin_idr_uobject(const struct uverbs_obj_type
*type
,
280 struct ib_ucontext
*ucontext
)
283 struct ib_uobject
*uobj
;
285 uobj
= alloc_uobj(ucontext
, type
);
289 ret
= idr_add_uobj(uobj
);
293 ret
= ib_rdmacg_try_charge(&uobj
->cg_obj
, ucontext
->device
,
294 RDMACG_RESOURCE_HCA_OBJECT
);
301 uverbs_idr_remove_uobj(uobj
);
303 uverbs_uobject_put(uobj
);
307 static struct ib_uobject
*alloc_begin_fd_uobject(const struct uverbs_obj_type
*type
,
308 struct ib_ucontext
*ucontext
)
310 const struct uverbs_obj_fd_type
*fd_type
=
311 container_of(type
, struct uverbs_obj_fd_type
, type
);
313 struct ib_uobject
*uobj
;
314 struct ib_uobject_file
*uobj_file
;
317 new_fd
= get_unused_fd_flags(O_CLOEXEC
);
319 return ERR_PTR(new_fd
);
321 uobj
= alloc_uobj(ucontext
, type
);
323 put_unused_fd(new_fd
);
327 uobj_file
= container_of(uobj
, struct ib_uobject_file
, uobj
);
328 filp
= anon_inode_getfile(fd_type
->name
,
333 put_unused_fd(new_fd
);
334 uverbs_uobject_put(uobj
);
338 uobj_file
->uobj
.id
= new_fd
;
339 uobj_file
->uobj
.object
= filp
;
340 uobj_file
->ufile
= ucontext
->ufile
;
341 INIT_LIST_HEAD(&uobj
->list
);
342 kref_get(&uobj_file
->ufile
->ref
);
347 struct ib_uobject
*rdma_alloc_begin_uobject(const struct uverbs_obj_type
*type
,
348 struct ib_ucontext
*ucontext
)
350 return type
->type_class
->alloc_begin(type
, ucontext
);
353 static void uverbs_uobject_add(struct ib_uobject
*uobject
)
355 mutex_lock(&uobject
->context
->uobjects_lock
);
356 list_add(&uobject
->list
, &uobject
->context
->uobjects
);
357 mutex_unlock(&uobject
->context
->uobjects_lock
);
360 static int __must_check
remove_commit_idr_uobject(struct ib_uobject
*uobj
,
361 enum rdma_remove_reason why
)
363 const struct uverbs_obj_idr_type
*idr_type
=
364 container_of(uobj
->type
, struct uverbs_obj_idr_type
,
366 int ret
= idr_type
->destroy_object(uobj
, why
);
369 * We can only fail gracefully if the user requested to destroy the
370 * object. In the rest of the cases, just remove whatever you can.
372 if (why
== RDMA_REMOVE_DESTROY
&& ret
)
375 ib_rdmacg_uncharge(&uobj
->cg_obj
, uobj
->context
->device
,
376 RDMACG_RESOURCE_HCA_OBJECT
);
377 uverbs_idr_remove_uobj(uobj
);
382 static void alloc_abort_fd_uobject(struct ib_uobject
*uobj
)
384 struct ib_uobject_file
*uobj_file
=
385 container_of(uobj
, struct ib_uobject_file
, uobj
);
386 struct file
*filp
= uobj
->object
;
387 int id
= uobj_file
->uobj
.id
;
389 /* Unsuccessful NEW */
394 static int __must_check
remove_commit_fd_uobject(struct ib_uobject
*uobj
,
395 enum rdma_remove_reason why
)
397 const struct uverbs_obj_fd_type
*fd_type
=
398 container_of(uobj
->type
, struct uverbs_obj_fd_type
, type
);
399 struct ib_uobject_file
*uobj_file
=
400 container_of(uobj
, struct ib_uobject_file
, uobj
);
401 int ret
= fd_type
->context_closed(uobj_file
, why
);
403 if (why
== RDMA_REMOVE_DESTROY
&& ret
)
406 if (why
== RDMA_REMOVE_DURING_CLEANUP
) {
407 alloc_abort_fd_uobject(uobj
);
411 uobj_file
->uobj
.context
= NULL
;
415 static void assert_uverbs_usecnt(struct ib_uobject
*uobj
, bool exclusive
)
417 #ifdef CONFIG_LOCKDEP
419 WARN_ON(atomic_read(&uobj
->usecnt
) != -1);
421 WARN_ON(atomic_read(&uobj
->usecnt
) <= 0);
425 static int __must_check
_rdma_remove_commit_uobject(struct ib_uobject
*uobj
,
426 enum rdma_remove_reason why
)
429 struct ib_ucontext
*ucontext
= uobj
->context
;
431 ret
= uobj
->type
->type_class
->remove_commit(uobj
, why
);
432 if (ret
&& why
== RDMA_REMOVE_DESTROY
) {
433 /* We couldn't remove the object, so just unlock the uobject */
434 atomic_set(&uobj
->usecnt
, 0);
435 uobj
->type
->type_class
->lookup_put(uobj
, true);
437 mutex_lock(&ucontext
->uobjects_lock
);
438 list_del(&uobj
->list
);
439 mutex_unlock(&ucontext
->uobjects_lock
);
440 /* put the ref we took when we created the object */
441 uverbs_uobject_put(uobj
);
447 /* This is called only for user requested DESTROY reasons */
448 int __must_check
rdma_remove_commit_uobject(struct ib_uobject
*uobj
)
451 struct ib_ucontext
*ucontext
= uobj
->context
;
453 /* put the ref count we took at lookup_get */
454 uverbs_uobject_put(uobj
);
455 /* Cleanup is running. Calling this should have been impossible */
456 if (!down_read_trylock(&ucontext
->cleanup_rwsem
)) {
457 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
460 assert_uverbs_usecnt(uobj
, true);
461 ret
= _rdma_remove_commit_uobject(uobj
, RDMA_REMOVE_DESTROY
);
463 up_read(&ucontext
->cleanup_rwsem
);
467 static int null_obj_type_class_remove_commit(struct ib_uobject
*uobj
,
468 enum rdma_remove_reason why
)
473 static const struct uverbs_obj_type null_obj_type
= {
474 .type_class
= &((const struct uverbs_obj_type_class
){
475 .remove_commit
= null_obj_type_class_remove_commit
,
477 .needs_kfree_rcu
= true}),
480 int rdma_explicit_destroy(struct ib_uobject
*uobject
)
483 struct ib_ucontext
*ucontext
= uobject
->context
;
485 /* Cleanup is running. Calling this should have been impossible */
486 if (!down_read_trylock(&ucontext
->cleanup_rwsem
)) {
487 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
490 assert_uverbs_usecnt(uobject
, true);
491 ret
= uobject
->type
->type_class
->remove_commit(uobject
,
492 RDMA_REMOVE_DESTROY
);
496 uobject
->type
= &null_obj_type
;
499 up_read(&ucontext
->cleanup_rwsem
);
503 static void alloc_commit_idr_uobject(struct ib_uobject
*uobj
)
505 uverbs_uobject_add(uobj
);
506 spin_lock(&uobj
->context
->ufile
->idr_lock
);
508 * We already allocated this IDR with a NULL object, so
509 * this shouldn't fail.
511 WARN_ON(idr_replace(&uobj
->context
->ufile
->idr
,
513 spin_unlock(&uobj
->context
->ufile
->idr_lock
);
516 static void alloc_commit_fd_uobject(struct ib_uobject
*uobj
)
518 struct ib_uobject_file
*uobj_file
=
519 container_of(uobj
, struct ib_uobject_file
, uobj
);
521 uverbs_uobject_add(&uobj_file
->uobj
);
522 fd_install(uobj_file
->uobj
.id
, uobj
->object
);
523 /* This shouldn't be used anymore. Use the file object instead */
524 uobj_file
->uobj
.id
= 0;
525 /* Get another reference as we export this to the fops */
526 uverbs_uobject_get(&uobj_file
->uobj
);
529 int rdma_alloc_commit_uobject(struct ib_uobject
*uobj
)
531 /* Cleanup is running. Calling this should have been impossible */
532 if (!down_read_trylock(&uobj
->context
->cleanup_rwsem
)) {
535 WARN(true, "ib_uverbs: Cleanup is running while allocating an uobject\n");
536 ret
= uobj
->type
->type_class
->remove_commit(uobj
,
537 RDMA_REMOVE_DURING_CLEANUP
);
539 pr_warn("ib_uverbs: cleanup of idr object %d failed\n",
544 /* matches atomic_set(-1) in alloc_uobj */
545 assert_uverbs_usecnt(uobj
, true);
546 atomic_set(&uobj
->usecnt
, 0);
548 uobj
->type
->type_class
->alloc_commit(uobj
);
549 up_read(&uobj
->context
->cleanup_rwsem
);
554 static void alloc_abort_idr_uobject(struct ib_uobject
*uobj
)
556 uverbs_idr_remove_uobj(uobj
);
557 ib_rdmacg_uncharge(&uobj
->cg_obj
, uobj
->context
->device
,
558 RDMACG_RESOURCE_HCA_OBJECT
);
559 uverbs_uobject_put(uobj
);
562 void rdma_alloc_abort_uobject(struct ib_uobject
*uobj
)
564 uobj
->type
->type_class
->alloc_abort(uobj
);
567 static void lookup_put_idr_uobject(struct ib_uobject
*uobj
, bool exclusive
)
571 static void lookup_put_fd_uobject(struct ib_uobject
*uobj
, bool exclusive
)
573 struct file
*filp
= uobj
->object
;
576 /* This indirectly calls uverbs_close_fd and free the object */
580 void rdma_lookup_put_uobject(struct ib_uobject
*uobj
, bool exclusive
)
582 assert_uverbs_usecnt(uobj
, exclusive
);
583 uobj
->type
->type_class
->lookup_put(uobj
, exclusive
);
585 * In order to unlock an object, either decrease its usecnt for
586 * read access or zero it in case of exclusive access. See
587 * uverbs_try_lock_object for locking schema information.
590 atomic_dec(&uobj
->usecnt
);
592 atomic_set(&uobj
->usecnt
, 0);
594 uverbs_uobject_put(uobj
);
597 const struct uverbs_obj_type_class uverbs_idr_class
= {
598 .alloc_begin
= alloc_begin_idr_uobject
,
599 .lookup_get
= lookup_get_idr_uobject
,
600 .alloc_commit
= alloc_commit_idr_uobject
,
601 .alloc_abort
= alloc_abort_idr_uobject
,
602 .lookup_put
= lookup_put_idr_uobject
,
603 .remove_commit
= remove_commit_idr_uobject
,
605 * When we destroy an object, we first just lock it for WRITE and
606 * actually DESTROY it in the finalize stage. So, the problematic
607 * scenario is when we just started the finalize stage of the
608 * destruction (nothing was executed yet). Now, the other thread
609 * fetched the object for READ access, but it didn't lock it yet.
610 * The DESTROY thread continues and starts destroying the object.
611 * When the other thread continue - without the RCU, it would
612 * access freed memory. However, the rcu_read_lock delays the free
613 * until the rcu_read_lock of the READ operation quits. Since the
614 * exclusive lock of the object is still taken by the DESTROY flow, the
615 * READ operation will get -EBUSY and it'll just bail out.
617 .needs_kfree_rcu
= true,
620 static void _uverbs_close_fd(struct ib_uobject_file
*uobj_file
)
622 struct ib_ucontext
*ucontext
;
623 struct ib_uverbs_file
*ufile
= uobj_file
->ufile
;
626 mutex_lock(&uobj_file
->ufile
->cleanup_mutex
);
628 /* uobject was either already cleaned up or is cleaned up right now anyway */
629 if (!uobj_file
->uobj
.context
||
630 !down_read_trylock(&uobj_file
->uobj
.context
->cleanup_rwsem
))
633 ucontext
= uobj_file
->uobj
.context
;
634 ret
= _rdma_remove_commit_uobject(&uobj_file
->uobj
, RDMA_REMOVE_CLOSE
);
635 up_read(&ucontext
->cleanup_rwsem
);
637 pr_warn("uverbs: unable to clean up uobject file in uverbs_close_fd.\n");
639 mutex_unlock(&ufile
->cleanup_mutex
);
642 void uverbs_close_fd(struct file
*f
)
644 struct ib_uobject_file
*uobj_file
= f
->private_data
;
645 struct kref
*uverbs_file_ref
= &uobj_file
->ufile
->ref
;
647 _uverbs_close_fd(uobj_file
);
648 uverbs_uobject_put(&uobj_file
->uobj
);
649 kref_put(uverbs_file_ref
, ib_uverbs_release_file
);
652 void uverbs_cleanup_ucontext(struct ib_ucontext
*ucontext
, bool device_removed
)
654 enum rdma_remove_reason reason
= device_removed
?
655 RDMA_REMOVE_DRIVER_REMOVE
: RDMA_REMOVE_CLOSE
;
656 unsigned int cur_order
= 0;
658 ucontext
->cleanup_reason
= reason
;
660 * Waits for all remove_commit and alloc_commit to finish. Logically, We
661 * want to hold this forever as the context is going to be destroyed,
662 * but we'll release it since it causes a "held lock freed" BUG message.
664 down_write(&ucontext
->cleanup_rwsem
);
666 while (!list_empty(&ucontext
->uobjects
)) {
667 struct ib_uobject
*obj
, *next_obj
;
668 unsigned int next_order
= UINT_MAX
;
671 * This shouldn't run while executing other commands on this
672 * context. Thus, the only thing we should take care of is
673 * releasing a FD while traversing this list. The FD could be
674 * closed and released from the _release fop of this FD.
675 * In order to mitigate this, we add a lock.
676 * We take and release the lock per order traversal in order
677 * to let other threads (which might still use the FDs) chance
680 mutex_lock(&ucontext
->uobjects_lock
);
681 list_for_each_entry_safe(obj
, next_obj
, &ucontext
->uobjects
,
683 if (obj
->type
->destroy_order
== cur_order
) {
687 * if we hit this WARN_ON, that means we are
688 * racing with a lookup_get.
690 WARN_ON(uverbs_try_lock_object(obj
, true));
691 ret
= obj
->type
->type_class
->remove_commit(obj
,
693 list_del(&obj
->list
);
695 pr_warn("ib_uverbs: failed to remove uobject id %d order %u\n",
697 /* put the ref we took when we created the object */
698 uverbs_uobject_put(obj
);
700 next_order
= min(next_order
,
701 obj
->type
->destroy_order
);
704 mutex_unlock(&ucontext
->uobjects_lock
);
705 cur_order
= next_order
;
707 up_write(&ucontext
->cleanup_rwsem
);
710 void uverbs_initialize_ucontext(struct ib_ucontext
*ucontext
)
712 ucontext
->cleanup_reason
= 0;
713 mutex_init(&ucontext
->uobjects_lock
);
714 INIT_LIST_HEAD(&ucontext
->uobjects
);
715 init_rwsem(&ucontext
->cleanup_rwsem
);
718 const struct uverbs_obj_type_class uverbs_fd_class
= {
719 .alloc_begin
= alloc_begin_fd_uobject
,
720 .lookup_get
= lookup_get_fd_uobject
,
721 .alloc_commit
= alloc_commit_fd_uobject
,
722 .alloc_abort
= alloc_abort_fd_uobject
,
723 .lookup_put
= lookup_put_fd_uobject
,
724 .remove_commit
= remove_commit_fd_uobject
,
725 .needs_kfree_rcu
= false,
728 struct ib_uobject
*uverbs_get_uobject_from_context(const struct uverbs_obj_type
*type_attrs
,
729 struct ib_ucontext
*ucontext
,
730 enum uverbs_obj_access access
,
734 case UVERBS_ACCESS_READ
:
735 return rdma_lookup_get_uobject(type_attrs
, ucontext
, id
, false);
736 case UVERBS_ACCESS_DESTROY
:
737 case UVERBS_ACCESS_WRITE
:
738 return rdma_lookup_get_uobject(type_attrs
, ucontext
, id
, true);
739 case UVERBS_ACCESS_NEW
:
740 return rdma_alloc_begin_uobject(type_attrs
, ucontext
);
743 return ERR_PTR(-EOPNOTSUPP
);
747 int uverbs_finalize_object(struct ib_uobject
*uobj
,
748 enum uverbs_obj_access access
,
754 * refcounts should be handled at the object level and not at the
755 * uobject level. Refcounts of the objects themselves are done in
760 case UVERBS_ACCESS_READ
:
761 rdma_lookup_put_uobject(uobj
, false);
763 case UVERBS_ACCESS_WRITE
:
764 rdma_lookup_put_uobject(uobj
, true);
766 case UVERBS_ACCESS_DESTROY
:
768 ret
= rdma_remove_commit_uobject(uobj
);
770 rdma_lookup_put_uobject(uobj
, true);
772 case UVERBS_ACCESS_NEW
:
774 ret
= rdma_alloc_commit_uobject(uobj
);
776 rdma_alloc_abort_uobject(uobj
);
786 int uverbs_finalize_objects(struct uverbs_attr_bundle
*attrs_bundle
,
787 struct uverbs_attr_spec_hash
* const *spec_hash
,
794 for (i
= 0; i
< num
; i
++) {
795 struct uverbs_attr_bundle_hash
*curr_bundle
=
796 &attrs_bundle
->hash
[i
];
797 const struct uverbs_attr_spec_hash
*curr_spec_bucket
=
801 for (j
= 0; j
< curr_bundle
->num_attrs
; j
++) {
802 struct uverbs_attr
*attr
;
803 const struct uverbs_attr_spec
*spec
;
805 if (!uverbs_attr_is_valid_in_hash(curr_bundle
, j
))
808 attr
= &curr_bundle
->attrs
[j
];
809 spec
= &curr_spec_bucket
->attrs
[j
];
811 if (spec
->type
== UVERBS_ATTR_TYPE_IDR
||
812 spec
->type
== UVERBS_ATTR_TYPE_FD
) {
815 current_ret
= uverbs_finalize_object(attr
->obj_attr
.uobject
,