2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/file.h>
34 #include <linux/anon_inodes.h>
35 #include <rdma/ib_verbs.h>
36 #include <rdma/uverbs_types.h>
37 #include <linux/rcupdate.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include <rdma/rdma_user_ioctl.h>
41 #include "core_priv.h"
42 #include "rdma_core.h"
44 int uverbs_ns_idx(u16
*id
, unsigned int ns_count
)
46 int ret
= (*id
& UVERBS_ID_NS_MASK
) >> UVERBS_ID_NS_SHIFT
;
51 *id
&= ~UVERBS_ID_NS_MASK
;
55 const struct uverbs_object_spec
*uverbs_get_object(const struct ib_device
*ibdev
,
58 const struct uverbs_root_spec
*object_hash
= ibdev
->specs_root
;
59 const struct uverbs_object_spec_hash
*objects
;
60 int ret
= uverbs_ns_idx(&object
, object_hash
->num_buckets
);
65 objects
= object_hash
->object_buckets
[ret
];
67 if (object
>= objects
->num_objects
)
70 return objects
->objects
[object
];
73 const struct uverbs_method_spec
*uverbs_get_method(const struct uverbs_object_spec
*object
,
76 const struct uverbs_method_spec_hash
*methods
;
77 int ret
= uverbs_ns_idx(&method
, object
->num_buckets
);
82 methods
= object
->method_buckets
[ret
];
83 if (method
>= methods
->num_methods
)
86 return methods
->methods
[method
];
89 void uverbs_uobject_get(struct ib_uobject
*uobject
)
91 kref_get(&uobject
->ref
);
94 static void uverbs_uobject_free(struct kref
*ref
)
96 struct ib_uobject
*uobj
=
97 container_of(ref
, struct ib_uobject
, ref
);
99 if (uobj
->type
->type_class
->needs_kfree_rcu
)
100 kfree_rcu(uobj
, rcu
);
105 void uverbs_uobject_put(struct ib_uobject
*uobject
)
107 kref_put(&uobject
->ref
, uverbs_uobject_free
);
110 static int uverbs_try_lock_object(struct ib_uobject
*uobj
, bool exclusive
)
113 * When a shared access is required, we use a positive counter. Each
114 * shared access request checks that the value != -1 and increment it.
115 * Exclusive access is required for operations like write or destroy.
116 * In exclusive access mode, we check that the counter is zero (nobody
117 * claimed this object) and we set it to -1. Releasing a shared access
118 * lock is done simply by decreasing the counter. As for exclusive
119 * access locks, since only a single one of them is is allowed
120 * concurrently, setting the counter to zero is enough for releasing
124 return __atomic_add_unless(&uobj
->usecnt
, 1, -1) == -1 ?
127 /* lock is either WRITE or DESTROY - should be exclusive */
128 return atomic_cmpxchg(&uobj
->usecnt
, 0, -1) == 0 ? 0 : -EBUSY
;
131 static struct ib_uobject
*alloc_uobj(struct ib_ucontext
*context
,
132 const struct uverbs_obj_type
*type
)
134 struct ib_uobject
*uobj
= kzalloc(type
->obj_size
, GFP_KERNEL
);
137 return ERR_PTR(-ENOMEM
);
139 * user_handle should be filled by the handler,
140 * The object is added to the list in the commit stage.
142 uobj
->context
= context
;
144 atomic_set(&uobj
->usecnt
, 0);
145 kref_init(&uobj
->ref
);
150 static int idr_add_uobj(struct ib_uobject
*uobj
)
154 idr_preload(GFP_KERNEL
);
155 spin_lock(&uobj
->context
->ufile
->idr_lock
);
158 * We start with allocating an idr pointing to NULL. This represents an
159 * object which isn't initialized yet. We'll replace it later on with
160 * the real object once we commit.
162 ret
= idr_alloc(&uobj
->context
->ufile
->idr
, NULL
, 0,
163 min_t(unsigned long, U32_MAX
- 1, INT_MAX
), GFP_NOWAIT
);
167 spin_unlock(&uobj
->context
->ufile
->idr_lock
);
170 return ret
< 0 ? ret
: 0;
174 * It only removes it from the uobjects list, uverbs_uobject_put() is still
177 static void uverbs_idr_remove_uobj(struct ib_uobject
*uobj
)
179 spin_lock(&uobj
->context
->ufile
->idr_lock
);
180 idr_remove(&uobj
->context
->ufile
->idr
, uobj
->id
);
181 spin_unlock(&uobj
->context
->ufile
->idr_lock
);
184 /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
185 static struct ib_uobject
*lookup_get_idr_uobject(const struct uverbs_obj_type
*type
,
186 struct ib_ucontext
*ucontext
,
187 int id
, bool exclusive
)
189 struct ib_uobject
*uobj
;
192 /* object won't be released as we're protected in rcu */
193 uobj
= idr_find(&ucontext
->ufile
->idr
, id
);
195 uobj
= ERR_PTR(-ENOENT
);
199 uverbs_uobject_get(uobj
);
205 static struct ib_uobject
*lookup_get_fd_uobject(const struct uverbs_obj_type
*type
,
206 struct ib_ucontext
*ucontext
,
207 int id
, bool exclusive
)
210 struct ib_uobject
*uobject
;
211 const struct uverbs_obj_fd_type
*fd_type
=
212 container_of(type
, struct uverbs_obj_fd_type
, type
);
215 return ERR_PTR(-EOPNOTSUPP
);
219 return ERR_PTR(-EBADF
);
221 uobject
= f
->private_data
;
223 * fget(id) ensures we are not currently running uverbs_close_fd,
224 * and the caller is expected to ensure that uverbs_close_fd is never
225 * done while a call top lookup is possible.
227 if (f
->f_op
!= fd_type
->fops
) {
229 return ERR_PTR(-EBADF
);
232 uverbs_uobject_get(uobject
);
236 struct ib_uobject
*rdma_lookup_get_uobject(const struct uverbs_obj_type
*type
,
237 struct ib_ucontext
*ucontext
,
238 int id
, bool exclusive
)
240 struct ib_uobject
*uobj
;
243 uobj
= type
->type_class
->lookup_get(type
, ucontext
, id
, exclusive
);
247 if (uobj
->type
!= type
) {
252 ret
= uverbs_try_lock_object(uobj
, exclusive
);
254 WARN(ucontext
->cleanup_reason
,
255 "ib_uverbs: Trying to lookup_get while cleanup context\n");
261 uobj
->type
->type_class
->lookup_put(uobj
, exclusive
);
262 uverbs_uobject_put(uobj
);
266 static struct ib_uobject
*alloc_begin_idr_uobject(const struct uverbs_obj_type
*type
,
267 struct ib_ucontext
*ucontext
)
270 struct ib_uobject
*uobj
;
272 uobj
= alloc_uobj(ucontext
, type
);
276 ret
= idr_add_uobj(uobj
);
280 ret
= ib_rdmacg_try_charge(&uobj
->cg_obj
, ucontext
->device
,
281 RDMACG_RESOURCE_HCA_OBJECT
);
288 uverbs_idr_remove_uobj(uobj
);
290 uverbs_uobject_put(uobj
);
294 static struct ib_uobject
*alloc_begin_fd_uobject(const struct uverbs_obj_type
*type
,
295 struct ib_ucontext
*ucontext
)
297 const struct uverbs_obj_fd_type
*fd_type
=
298 container_of(type
, struct uverbs_obj_fd_type
, type
);
300 struct ib_uobject
*uobj
;
301 struct ib_uobject_file
*uobj_file
;
304 new_fd
= get_unused_fd_flags(O_CLOEXEC
);
306 return ERR_PTR(new_fd
);
308 uobj
= alloc_uobj(ucontext
, type
);
310 put_unused_fd(new_fd
);
314 uobj_file
= container_of(uobj
, struct ib_uobject_file
, uobj
);
315 filp
= anon_inode_getfile(fd_type
->name
,
320 put_unused_fd(new_fd
);
321 uverbs_uobject_put(uobj
);
325 uobj_file
->uobj
.id
= new_fd
;
326 uobj_file
->uobj
.object
= filp
;
327 uobj_file
->ufile
= ucontext
->ufile
;
328 INIT_LIST_HEAD(&uobj
->list
);
329 kref_get(&uobj_file
->ufile
->ref
);
334 struct ib_uobject
*rdma_alloc_begin_uobject(const struct uverbs_obj_type
*type
,
335 struct ib_ucontext
*ucontext
)
337 return type
->type_class
->alloc_begin(type
, ucontext
);
340 static void uverbs_uobject_add(struct ib_uobject
*uobject
)
342 mutex_lock(&uobject
->context
->uobjects_lock
);
343 list_add(&uobject
->list
, &uobject
->context
->uobjects
);
344 mutex_unlock(&uobject
->context
->uobjects_lock
);
347 static int __must_check
remove_commit_idr_uobject(struct ib_uobject
*uobj
,
348 enum rdma_remove_reason why
)
350 const struct uverbs_obj_idr_type
*idr_type
=
351 container_of(uobj
->type
, struct uverbs_obj_idr_type
,
353 int ret
= idr_type
->destroy_object(uobj
, why
);
356 * We can only fail gracefully if the user requested to destroy the
357 * object. In the rest of the cases, just remove whatever you can.
359 if (why
== RDMA_REMOVE_DESTROY
&& ret
)
362 ib_rdmacg_uncharge(&uobj
->cg_obj
, uobj
->context
->device
,
363 RDMACG_RESOURCE_HCA_OBJECT
);
364 uverbs_idr_remove_uobj(uobj
);
369 static void alloc_abort_fd_uobject(struct ib_uobject
*uobj
)
371 struct ib_uobject_file
*uobj_file
=
372 container_of(uobj
, struct ib_uobject_file
, uobj
);
373 struct file
*filp
= uobj
->object
;
374 int id
= uobj_file
->uobj
.id
;
376 /* Unsuccessful NEW */
381 static int __must_check
remove_commit_fd_uobject(struct ib_uobject
*uobj
,
382 enum rdma_remove_reason why
)
384 const struct uverbs_obj_fd_type
*fd_type
=
385 container_of(uobj
->type
, struct uverbs_obj_fd_type
, type
);
386 struct ib_uobject_file
*uobj_file
=
387 container_of(uobj
, struct ib_uobject_file
, uobj
);
388 int ret
= fd_type
->context_closed(uobj_file
, why
);
390 if (why
== RDMA_REMOVE_DESTROY
&& ret
)
393 if (why
== RDMA_REMOVE_DURING_CLEANUP
) {
394 alloc_abort_fd_uobject(uobj
);
398 uobj_file
->uobj
.context
= NULL
;
402 static void lockdep_check(struct ib_uobject
*uobj
, bool exclusive
)
404 #ifdef CONFIG_LOCKDEP
406 WARN_ON(atomic_read(&uobj
->usecnt
) > 0);
408 WARN_ON(atomic_read(&uobj
->usecnt
) == -1);
412 static int __must_check
_rdma_remove_commit_uobject(struct ib_uobject
*uobj
,
413 enum rdma_remove_reason why
)
416 struct ib_ucontext
*ucontext
= uobj
->context
;
418 ret
= uobj
->type
->type_class
->remove_commit(uobj
, why
);
419 if (ret
&& why
== RDMA_REMOVE_DESTROY
) {
420 /* We couldn't remove the object, so just unlock the uobject */
421 atomic_set(&uobj
->usecnt
, 0);
422 uobj
->type
->type_class
->lookup_put(uobj
, true);
424 mutex_lock(&ucontext
->uobjects_lock
);
425 list_del(&uobj
->list
);
426 mutex_unlock(&ucontext
->uobjects_lock
);
427 /* put the ref we took when we created the object */
428 uverbs_uobject_put(uobj
);
434 /* This is called only for user requested DESTROY reasons */
435 int __must_check
rdma_remove_commit_uobject(struct ib_uobject
*uobj
)
438 struct ib_ucontext
*ucontext
= uobj
->context
;
440 /* put the ref count we took at lookup_get */
441 uverbs_uobject_put(uobj
);
442 /* Cleanup is running. Calling this should have been impossible */
443 if (!down_read_trylock(&ucontext
->cleanup_rwsem
)) {
444 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
447 lockdep_check(uobj
, true);
448 ret
= _rdma_remove_commit_uobject(uobj
, RDMA_REMOVE_DESTROY
);
450 up_read(&ucontext
->cleanup_rwsem
);
454 static int null_obj_type_class_remove_commit(struct ib_uobject
*uobj
,
455 enum rdma_remove_reason why
)
460 static const struct uverbs_obj_type null_obj_type
= {
461 .type_class
= &((const struct uverbs_obj_type_class
){
462 .remove_commit
= null_obj_type_class_remove_commit
,
464 .needs_kfree_rcu
= true}),
467 int rdma_explicit_destroy(struct ib_uobject
*uobject
)
470 struct ib_ucontext
*ucontext
= uobject
->context
;
472 /* Cleanup is running. Calling this should have been impossible */
473 if (!down_read_trylock(&ucontext
->cleanup_rwsem
)) {
474 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
477 lockdep_check(uobject
, true);
478 ret
= uobject
->type
->type_class
->remove_commit(uobject
,
479 RDMA_REMOVE_DESTROY
);
483 uobject
->type
= &null_obj_type
;
485 up_read(&ucontext
->cleanup_rwsem
);
489 static void alloc_commit_idr_uobject(struct ib_uobject
*uobj
)
491 uverbs_uobject_add(uobj
);
492 spin_lock(&uobj
->context
->ufile
->idr_lock
);
494 * We already allocated this IDR with a NULL object, so
495 * this shouldn't fail.
497 WARN_ON(idr_replace(&uobj
->context
->ufile
->idr
,
499 spin_unlock(&uobj
->context
->ufile
->idr_lock
);
502 static void alloc_commit_fd_uobject(struct ib_uobject
*uobj
)
504 struct ib_uobject_file
*uobj_file
=
505 container_of(uobj
, struct ib_uobject_file
, uobj
);
507 uverbs_uobject_add(&uobj_file
->uobj
);
508 fd_install(uobj_file
->uobj
.id
, uobj
->object
);
509 /* This shouldn't be used anymore. Use the file object instead */
510 uobj_file
->uobj
.id
= 0;
511 /* Get another reference as we export this to the fops */
512 uverbs_uobject_get(&uobj_file
->uobj
);
515 int rdma_alloc_commit_uobject(struct ib_uobject
*uobj
)
517 /* Cleanup is running. Calling this should have been impossible */
518 if (!down_read_trylock(&uobj
->context
->cleanup_rwsem
)) {
521 WARN(true, "ib_uverbs: Cleanup is running while allocating an uobject\n");
522 ret
= uobj
->type
->type_class
->remove_commit(uobj
,
523 RDMA_REMOVE_DURING_CLEANUP
);
525 pr_warn("ib_uverbs: cleanup of idr object %d failed\n",
530 uobj
->type
->type_class
->alloc_commit(uobj
);
531 up_read(&uobj
->context
->cleanup_rwsem
);
536 static void alloc_abort_idr_uobject(struct ib_uobject
*uobj
)
538 uverbs_idr_remove_uobj(uobj
);
539 ib_rdmacg_uncharge(&uobj
->cg_obj
, uobj
->context
->device
,
540 RDMACG_RESOURCE_HCA_OBJECT
);
541 uverbs_uobject_put(uobj
);
544 void rdma_alloc_abort_uobject(struct ib_uobject
*uobj
)
546 uobj
->type
->type_class
->alloc_abort(uobj
);
549 static void lookup_put_idr_uobject(struct ib_uobject
*uobj
, bool exclusive
)
553 static void lookup_put_fd_uobject(struct ib_uobject
*uobj
, bool exclusive
)
555 struct file
*filp
= uobj
->object
;
558 /* This indirectly calls uverbs_close_fd and free the object */
562 void rdma_lookup_put_uobject(struct ib_uobject
*uobj
, bool exclusive
)
564 lockdep_check(uobj
, exclusive
);
565 uobj
->type
->type_class
->lookup_put(uobj
, exclusive
);
567 * In order to unlock an object, either decrease its usecnt for
568 * read access or zero it in case of exclusive access. See
569 * uverbs_try_lock_object for locking schema information.
572 atomic_dec(&uobj
->usecnt
);
574 atomic_set(&uobj
->usecnt
, 0);
576 uverbs_uobject_put(uobj
);
579 const struct uverbs_obj_type_class uverbs_idr_class
= {
580 .alloc_begin
= alloc_begin_idr_uobject
,
581 .lookup_get
= lookup_get_idr_uobject
,
582 .alloc_commit
= alloc_commit_idr_uobject
,
583 .alloc_abort
= alloc_abort_idr_uobject
,
584 .lookup_put
= lookup_put_idr_uobject
,
585 .remove_commit
= remove_commit_idr_uobject
,
587 * When we destroy an object, we first just lock it for WRITE and
588 * actually DESTROY it in the finalize stage. So, the problematic
589 * scenario is when we just started the finalize stage of the
590 * destruction (nothing was executed yet). Now, the other thread
591 * fetched the object for READ access, but it didn't lock it yet.
592 * The DESTROY thread continues and starts destroying the object.
593 * When the other thread continue - without the RCU, it would
594 * access freed memory. However, the rcu_read_lock delays the free
595 * until the rcu_read_lock of the READ operation quits. Since the
596 * exclusive lock of the object is still taken by the DESTROY flow, the
597 * READ operation will get -EBUSY and it'll just bail out.
599 .needs_kfree_rcu
= true,
602 static void _uverbs_close_fd(struct ib_uobject_file
*uobj_file
)
604 struct ib_ucontext
*ucontext
;
605 struct ib_uverbs_file
*ufile
= uobj_file
->ufile
;
608 mutex_lock(&uobj_file
->ufile
->cleanup_mutex
);
610 /* uobject was either already cleaned up or is cleaned up right now anyway */
611 if (!uobj_file
->uobj
.context
||
612 !down_read_trylock(&uobj_file
->uobj
.context
->cleanup_rwsem
))
615 ucontext
= uobj_file
->uobj
.context
;
616 ret
= _rdma_remove_commit_uobject(&uobj_file
->uobj
, RDMA_REMOVE_CLOSE
);
617 up_read(&ucontext
->cleanup_rwsem
);
619 pr_warn("uverbs: unable to clean up uobject file in uverbs_close_fd.\n");
621 mutex_unlock(&ufile
->cleanup_mutex
);
624 void uverbs_close_fd(struct file
*f
)
626 struct ib_uobject_file
*uobj_file
= f
->private_data
;
627 struct kref
*uverbs_file_ref
= &uobj_file
->ufile
->ref
;
629 _uverbs_close_fd(uobj_file
);
630 uverbs_uobject_put(&uobj_file
->uobj
);
631 kref_put(uverbs_file_ref
, ib_uverbs_release_file
);
634 void uverbs_cleanup_ucontext(struct ib_ucontext
*ucontext
, bool device_removed
)
636 enum rdma_remove_reason reason
= device_removed
?
637 RDMA_REMOVE_DRIVER_REMOVE
: RDMA_REMOVE_CLOSE
;
638 unsigned int cur_order
= 0;
640 ucontext
->cleanup_reason
= reason
;
642 * Waits for all remove_commit and alloc_commit to finish. Logically, We
643 * want to hold this forever as the context is going to be destroyed,
644 * but we'll release it since it causes a "held lock freed" BUG message.
646 down_write(&ucontext
->cleanup_rwsem
);
648 while (!list_empty(&ucontext
->uobjects
)) {
649 struct ib_uobject
*obj
, *next_obj
;
650 unsigned int next_order
= UINT_MAX
;
653 * This shouldn't run while executing other commands on this
654 * context. Thus, the only thing we should take care of is
655 * releasing a FD while traversing this list. The FD could be
656 * closed and released from the _release fop of this FD.
657 * In order to mitigate this, we add a lock.
658 * We take and release the lock per order traversal in order
659 * to let other threads (which might still use the FDs) chance
662 mutex_lock(&ucontext
->uobjects_lock
);
663 list_for_each_entry_safe(obj
, next_obj
, &ucontext
->uobjects
,
665 if (obj
->type
->destroy_order
== cur_order
) {
669 * if we hit this WARN_ON, that means we are
670 * racing with a lookup_get.
672 WARN_ON(uverbs_try_lock_object(obj
, true));
673 ret
= obj
->type
->type_class
->remove_commit(obj
,
675 list_del(&obj
->list
);
677 pr_warn("ib_uverbs: failed to remove uobject id %d order %u\n",
679 /* put the ref we took when we created the object */
680 uverbs_uobject_put(obj
);
682 next_order
= min(next_order
,
683 obj
->type
->destroy_order
);
686 mutex_unlock(&ucontext
->uobjects_lock
);
687 cur_order
= next_order
;
689 up_write(&ucontext
->cleanup_rwsem
);
692 void uverbs_initialize_ucontext(struct ib_ucontext
*ucontext
)
694 ucontext
->cleanup_reason
= 0;
695 mutex_init(&ucontext
->uobjects_lock
);
696 INIT_LIST_HEAD(&ucontext
->uobjects
);
697 init_rwsem(&ucontext
->cleanup_rwsem
);
700 const struct uverbs_obj_type_class uverbs_fd_class
= {
701 .alloc_begin
= alloc_begin_fd_uobject
,
702 .lookup_get
= lookup_get_fd_uobject
,
703 .alloc_commit
= alloc_commit_fd_uobject
,
704 .alloc_abort
= alloc_abort_fd_uobject
,
705 .lookup_put
= lookup_put_fd_uobject
,
706 .remove_commit
= remove_commit_fd_uobject
,
707 .needs_kfree_rcu
= false,
710 struct ib_uobject
*uverbs_get_uobject_from_context(const struct uverbs_obj_type
*type_attrs
,
711 struct ib_ucontext
*ucontext
,
712 enum uverbs_obj_access access
,
716 case UVERBS_ACCESS_READ
:
717 return rdma_lookup_get_uobject(type_attrs
, ucontext
, id
, false);
718 case UVERBS_ACCESS_DESTROY
:
719 case UVERBS_ACCESS_WRITE
:
720 return rdma_lookup_get_uobject(type_attrs
, ucontext
, id
, true);
721 case UVERBS_ACCESS_NEW
:
722 return rdma_alloc_begin_uobject(type_attrs
, ucontext
);
725 return ERR_PTR(-EOPNOTSUPP
);
729 int uverbs_finalize_object(struct ib_uobject
*uobj
,
730 enum uverbs_obj_access access
,
736 * refcounts should be handled at the object level and not at the
737 * uobject level. Refcounts of the objects themselves are done in
742 case UVERBS_ACCESS_READ
:
743 rdma_lookup_put_uobject(uobj
, false);
745 case UVERBS_ACCESS_WRITE
:
746 rdma_lookup_put_uobject(uobj
, true);
748 case UVERBS_ACCESS_DESTROY
:
750 ret
= rdma_remove_commit_uobject(uobj
);
752 rdma_lookup_put_uobject(uobj
, true);
754 case UVERBS_ACCESS_NEW
:
756 ret
= rdma_alloc_commit_uobject(uobj
);
758 rdma_alloc_abort_uobject(uobj
);
768 int uverbs_finalize_objects(struct uverbs_attr_bundle
*attrs_bundle
,
769 struct uverbs_attr_spec_hash
* const *spec_hash
,
776 for (i
= 0; i
< num
; i
++) {
777 struct uverbs_attr_bundle_hash
*curr_bundle
=
778 &attrs_bundle
->hash
[i
];
779 const struct uverbs_attr_spec_hash
*curr_spec_bucket
=
783 for (j
= 0; j
< curr_bundle
->num_attrs
; j
++) {
784 struct uverbs_attr
*attr
;
785 const struct uverbs_attr_spec
*spec
;
787 if (!uverbs_attr_is_valid_in_hash(curr_bundle
, j
))
790 attr
= &curr_bundle
->attrs
[j
];
791 spec
= &curr_spec_bucket
->attrs
[j
];
793 if (spec
->type
== UVERBS_ATTR_TYPE_IDR
||
794 spec
->type
== UVERBS_ATTR_TYPE_FD
) {
797 current_ret
= uverbs_finalize_object(attr
->obj_attr
.uobject
,