locking/refcounts: Include fewer headers in <linux/refcount.h>
[linux/fpc-iii.git] / drivers / infiniband / core / rdma_core.c
blob475910ffbcb6800f2e729f012b795483cbcc5c15
1 /*
2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/file.h>
34 #include <linux/anon_inodes.h>
35 #include <rdma/ib_verbs.h>
36 #include <rdma/uverbs_types.h>
37 #include <linux/rcupdate.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include <rdma/rdma_user_ioctl.h>
40 #include "uverbs.h"
41 #include "core_priv.h"
42 #include "rdma_core.h"
44 int uverbs_ns_idx(u16 *id, unsigned int ns_count)
46 int ret = (*id & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT;
48 if (ret >= ns_count)
49 return -EINVAL;
51 *id &= ~UVERBS_ID_NS_MASK;
52 return ret;
55 const struct uverbs_object_spec *uverbs_get_object(const struct ib_device *ibdev,
56 uint16_t object)
58 const struct uverbs_root_spec *object_hash = ibdev->specs_root;
59 const struct uverbs_object_spec_hash *objects;
60 int ret = uverbs_ns_idx(&object, object_hash->num_buckets);
62 if (ret < 0)
63 return NULL;
65 objects = object_hash->object_buckets[ret];
67 if (object >= objects->num_objects)
68 return NULL;
70 return objects->objects[object];
73 const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_spec *object,
74 uint16_t method)
76 const struct uverbs_method_spec_hash *methods;
77 int ret = uverbs_ns_idx(&method, object->num_buckets);
79 if (ret < 0)
80 return NULL;
82 methods = object->method_buckets[ret];
83 if (method >= methods->num_methods)
84 return NULL;
86 return methods->methods[method];
89 void uverbs_uobject_get(struct ib_uobject *uobject)
91 kref_get(&uobject->ref);
94 static void uverbs_uobject_free(struct kref *ref)
96 struct ib_uobject *uobj =
97 container_of(ref, struct ib_uobject, ref);
99 if (uobj->type->type_class->needs_kfree_rcu)
100 kfree_rcu(uobj, rcu);
101 else
102 kfree(uobj);
105 void uverbs_uobject_put(struct ib_uobject *uobject)
107 kref_put(&uobject->ref, uverbs_uobject_free);
110 static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
113 * When a shared access is required, we use a positive counter. Each
114 * shared access request checks that the value != -1 and increment it.
115 * Exclusive access is required for operations like write or destroy.
116 * In exclusive access mode, we check that the counter is zero (nobody
117 * claimed this object) and we set it to -1. Releasing a shared access
118 * lock is done simply by decreasing the counter. As for exclusive
119 * access locks, since only a single one of them is is allowed
120 * concurrently, setting the counter to zero is enough for releasing
121 * this lock.
123 if (!exclusive)
124 return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
125 -EBUSY : 0;
127 /* lock is either WRITE or DESTROY - should be exclusive */
128 return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
131 static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
132 const struct uverbs_obj_type *type)
134 struct ib_uobject *uobj = kzalloc(type->obj_size, GFP_KERNEL);
136 if (!uobj)
137 return ERR_PTR(-ENOMEM);
139 * user_handle should be filled by the handler,
140 * The object is added to the list in the commit stage.
142 uobj->context = context;
143 uobj->type = type;
145 * Allocated objects start out as write locked to deny any other
146 * syscalls from accessing them until they are committed. See
147 * rdma_alloc_commit_uobject
149 atomic_set(&uobj->usecnt, -1);
150 kref_init(&uobj->ref);
152 return uobj;
155 static int idr_add_uobj(struct ib_uobject *uobj)
157 int ret;
159 idr_preload(GFP_KERNEL);
160 spin_lock(&uobj->context->ufile->idr_lock);
163 * We start with allocating an idr pointing to NULL. This represents an
164 * object which isn't initialized yet. We'll replace it later on with
165 * the real object once we commit.
167 ret = idr_alloc(&uobj->context->ufile->idr, NULL, 0,
168 min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT);
169 if (ret >= 0)
170 uobj->id = ret;
172 spin_unlock(&uobj->context->ufile->idr_lock);
173 idr_preload_end();
175 return ret < 0 ? ret : 0;
179 * It only removes it from the uobjects list, uverbs_uobject_put() is still
180 * required.
182 static void uverbs_idr_remove_uobj(struct ib_uobject *uobj)
184 spin_lock(&uobj->context->ufile->idr_lock);
185 idr_remove(&uobj->context->ufile->idr, uobj->id);
186 spin_unlock(&uobj->context->ufile->idr_lock);
189 /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
190 static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *type,
191 struct ib_ucontext *ucontext,
192 int id, bool exclusive)
194 struct ib_uobject *uobj;
196 rcu_read_lock();
197 /* object won't be released as we're protected in rcu */
198 uobj = idr_find(&ucontext->ufile->idr, id);
199 if (!uobj) {
200 uobj = ERR_PTR(-ENOENT);
201 goto free;
205 * The idr_find is guaranteed to return a pointer to something that
206 * isn't freed yet, or NULL, as the free after idr_remove goes through
207 * kfree_rcu(). However the object may still have been released and
208 * kfree() could be called at any time.
210 if (!kref_get_unless_zero(&uobj->ref))
211 uobj = ERR_PTR(-ENOENT);
213 free:
214 rcu_read_unlock();
215 return uobj;
218 static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *type,
219 struct ib_ucontext *ucontext,
220 int id, bool exclusive)
222 struct file *f;
223 struct ib_uobject *uobject;
224 const struct uverbs_obj_fd_type *fd_type =
225 container_of(type, struct uverbs_obj_fd_type, type);
227 if (exclusive)
228 return ERR_PTR(-EOPNOTSUPP);
230 f = fget(id);
231 if (!f)
232 return ERR_PTR(-EBADF);
234 uobject = f->private_data;
236 * fget(id) ensures we are not currently running uverbs_close_fd,
237 * and the caller is expected to ensure that uverbs_close_fd is never
238 * done while a call top lookup is possible.
240 if (f->f_op != fd_type->fops) {
241 fput(f);
242 return ERR_PTR(-EBADF);
245 uverbs_uobject_get(uobject);
246 return uobject;
249 struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
250 struct ib_ucontext *ucontext,
251 int id, bool exclusive)
253 struct ib_uobject *uobj;
254 int ret;
256 uobj = type->type_class->lookup_get(type, ucontext, id, exclusive);
257 if (IS_ERR(uobj))
258 return uobj;
260 if (uobj->type != type) {
261 ret = -EINVAL;
262 goto free;
265 ret = uverbs_try_lock_object(uobj, exclusive);
266 if (ret) {
267 WARN(ucontext->cleanup_reason,
268 "ib_uverbs: Trying to lookup_get while cleanup context\n");
269 goto free;
272 return uobj;
273 free:
274 uobj->type->type_class->lookup_put(uobj, exclusive);
275 uverbs_uobject_put(uobj);
276 return ERR_PTR(ret);
279 static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *type,
280 struct ib_ucontext *ucontext)
282 int ret;
283 struct ib_uobject *uobj;
285 uobj = alloc_uobj(ucontext, type);
286 if (IS_ERR(uobj))
287 return uobj;
289 ret = idr_add_uobj(uobj);
290 if (ret)
291 goto uobj_put;
293 ret = ib_rdmacg_try_charge(&uobj->cg_obj, ucontext->device,
294 RDMACG_RESOURCE_HCA_OBJECT);
295 if (ret)
296 goto idr_remove;
298 return uobj;
300 idr_remove:
301 uverbs_idr_remove_uobj(uobj);
302 uobj_put:
303 uverbs_uobject_put(uobj);
304 return ERR_PTR(ret);
307 static struct ib_uobject *alloc_begin_fd_uobject(const struct uverbs_obj_type *type,
308 struct ib_ucontext *ucontext)
310 const struct uverbs_obj_fd_type *fd_type =
311 container_of(type, struct uverbs_obj_fd_type, type);
312 int new_fd;
313 struct ib_uobject *uobj;
314 struct ib_uobject_file *uobj_file;
315 struct file *filp;
317 new_fd = get_unused_fd_flags(O_CLOEXEC);
318 if (new_fd < 0)
319 return ERR_PTR(new_fd);
321 uobj = alloc_uobj(ucontext, type);
322 if (IS_ERR(uobj)) {
323 put_unused_fd(new_fd);
324 return uobj;
327 uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
328 filp = anon_inode_getfile(fd_type->name,
329 fd_type->fops,
330 uobj_file,
331 fd_type->flags);
332 if (IS_ERR(filp)) {
333 put_unused_fd(new_fd);
334 uverbs_uobject_put(uobj);
335 return (void *)filp;
338 uobj_file->uobj.id = new_fd;
339 uobj_file->uobj.object = filp;
340 uobj_file->ufile = ucontext->ufile;
341 INIT_LIST_HEAD(&uobj->list);
342 kref_get(&uobj_file->ufile->ref);
344 return uobj;
347 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
348 struct ib_ucontext *ucontext)
350 return type->type_class->alloc_begin(type, ucontext);
353 static int __must_check remove_commit_idr_uobject(struct ib_uobject *uobj,
354 enum rdma_remove_reason why)
356 const struct uverbs_obj_idr_type *idr_type =
357 container_of(uobj->type, struct uverbs_obj_idr_type,
358 type);
359 int ret = idr_type->destroy_object(uobj, why);
362 * We can only fail gracefully if the user requested to destroy the
363 * object. In the rest of the cases, just remove whatever you can.
365 if (why == RDMA_REMOVE_DESTROY && ret)
366 return ret;
368 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
369 RDMACG_RESOURCE_HCA_OBJECT);
370 uverbs_idr_remove_uobj(uobj);
372 return ret;
375 static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
377 struct ib_uobject_file *uobj_file =
378 container_of(uobj, struct ib_uobject_file, uobj);
379 struct file *filp = uobj->object;
380 int id = uobj_file->uobj.id;
382 /* Unsuccessful NEW */
383 fput(filp);
384 put_unused_fd(id);
387 static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
388 enum rdma_remove_reason why)
390 const struct uverbs_obj_fd_type *fd_type =
391 container_of(uobj->type, struct uverbs_obj_fd_type, type);
392 struct ib_uobject_file *uobj_file =
393 container_of(uobj, struct ib_uobject_file, uobj);
394 int ret = fd_type->context_closed(uobj_file, why);
396 if (why == RDMA_REMOVE_DESTROY && ret)
397 return ret;
399 if (why == RDMA_REMOVE_DURING_CLEANUP) {
400 alloc_abort_fd_uobject(uobj);
401 return ret;
404 uobj_file->uobj.context = NULL;
405 return ret;
408 static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
410 #ifdef CONFIG_LOCKDEP
411 if (exclusive)
412 WARN_ON(atomic_read(&uobj->usecnt) != -1);
413 else
414 WARN_ON(atomic_read(&uobj->usecnt) <= 0);
415 #endif
418 static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
419 enum rdma_remove_reason why)
421 int ret;
422 struct ib_ucontext *ucontext = uobj->context;
424 ret = uobj->type->type_class->remove_commit(uobj, why);
425 if (ret && why == RDMA_REMOVE_DESTROY) {
426 /* We couldn't remove the object, so just unlock the uobject */
427 atomic_set(&uobj->usecnt, 0);
428 uobj->type->type_class->lookup_put(uobj, true);
429 } else {
430 mutex_lock(&ucontext->uobjects_lock);
431 list_del(&uobj->list);
432 mutex_unlock(&ucontext->uobjects_lock);
433 /* put the ref we took when we created the object */
434 uverbs_uobject_put(uobj);
437 return ret;
440 /* This is called only for user requested DESTROY reasons */
441 int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
443 int ret;
444 struct ib_ucontext *ucontext = uobj->context;
446 /* put the ref count we took at lookup_get */
447 uverbs_uobject_put(uobj);
448 /* Cleanup is running. Calling this should have been impossible */
449 if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
450 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
451 return 0;
453 assert_uverbs_usecnt(uobj, true);
454 ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
456 up_read(&ucontext->cleanup_rwsem);
457 return ret;
460 static int null_obj_type_class_remove_commit(struct ib_uobject *uobj,
461 enum rdma_remove_reason why)
463 return 0;
466 static const struct uverbs_obj_type null_obj_type = {
467 .type_class = &((const struct uverbs_obj_type_class){
468 .remove_commit = null_obj_type_class_remove_commit,
469 /* be cautious */
470 .needs_kfree_rcu = true}),
473 int rdma_explicit_destroy(struct ib_uobject *uobject)
475 int ret;
476 struct ib_ucontext *ucontext = uobject->context;
478 /* Cleanup is running. Calling this should have been impossible */
479 if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
480 WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
481 return 0;
483 assert_uverbs_usecnt(uobject, true);
484 ret = uobject->type->type_class->remove_commit(uobject,
485 RDMA_REMOVE_DESTROY);
486 if (ret)
487 goto out;
489 uobject->type = &null_obj_type;
491 out:
492 up_read(&ucontext->cleanup_rwsem);
493 return ret;
496 static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
498 spin_lock(&uobj->context->ufile->idr_lock);
500 * We already allocated this IDR with a NULL object, so
501 * this shouldn't fail.
503 WARN_ON(idr_replace(&uobj->context->ufile->idr,
504 uobj, uobj->id));
505 spin_unlock(&uobj->context->ufile->idr_lock);
508 static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
510 struct ib_uobject_file *uobj_file =
511 container_of(uobj, struct ib_uobject_file, uobj);
513 fd_install(uobj_file->uobj.id, uobj->object);
514 /* This shouldn't be used anymore. Use the file object instead */
515 uobj_file->uobj.id = 0;
516 /* Get another reference as we export this to the fops */
517 uverbs_uobject_get(&uobj_file->uobj);
520 int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
522 /* Cleanup is running. Calling this should have been impossible */
523 if (!down_read_trylock(&uobj->context->cleanup_rwsem)) {
524 int ret;
526 WARN(true, "ib_uverbs: Cleanup is running while allocating an uobject\n");
527 ret = uobj->type->type_class->remove_commit(uobj,
528 RDMA_REMOVE_DURING_CLEANUP);
529 if (ret)
530 pr_warn("ib_uverbs: cleanup of idr object %d failed\n",
531 uobj->id);
532 return ret;
535 /* matches atomic_set(-1) in alloc_uobj */
536 assert_uverbs_usecnt(uobj, true);
537 atomic_set(&uobj->usecnt, 0);
539 mutex_lock(&uobj->context->uobjects_lock);
540 list_add(&uobj->list, &uobj->context->uobjects);
541 mutex_unlock(&uobj->context->uobjects_lock);
543 uobj->type->type_class->alloc_commit(uobj);
544 up_read(&uobj->context->cleanup_rwsem);
546 return 0;
549 static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
551 uverbs_idr_remove_uobj(uobj);
552 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
553 RDMACG_RESOURCE_HCA_OBJECT);
554 uverbs_uobject_put(uobj);
557 void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
559 uobj->type->type_class->alloc_abort(uobj);
562 static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool exclusive)
566 static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
568 struct file *filp = uobj->object;
570 WARN_ON(exclusive);
571 /* This indirectly calls uverbs_close_fd and free the object */
572 fput(filp);
575 void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
577 assert_uverbs_usecnt(uobj, exclusive);
578 uobj->type->type_class->lookup_put(uobj, exclusive);
580 * In order to unlock an object, either decrease its usecnt for
581 * read access or zero it in case of exclusive access. See
582 * uverbs_try_lock_object for locking schema information.
584 if (!exclusive)
585 atomic_dec(&uobj->usecnt);
586 else
587 atomic_set(&uobj->usecnt, 0);
589 uverbs_uobject_put(uobj);
592 const struct uverbs_obj_type_class uverbs_idr_class = {
593 .alloc_begin = alloc_begin_idr_uobject,
594 .lookup_get = lookup_get_idr_uobject,
595 .alloc_commit = alloc_commit_idr_uobject,
596 .alloc_abort = alloc_abort_idr_uobject,
597 .lookup_put = lookup_put_idr_uobject,
598 .remove_commit = remove_commit_idr_uobject,
600 * When we destroy an object, we first just lock it for WRITE and
601 * actually DESTROY it in the finalize stage. So, the problematic
602 * scenario is when we just started the finalize stage of the
603 * destruction (nothing was executed yet). Now, the other thread
604 * fetched the object for READ access, but it didn't lock it yet.
605 * The DESTROY thread continues and starts destroying the object.
606 * When the other thread continue - without the RCU, it would
607 * access freed memory. However, the rcu_read_lock delays the free
608 * until the rcu_read_lock of the READ operation quits. Since the
609 * exclusive lock of the object is still taken by the DESTROY flow, the
610 * READ operation will get -EBUSY and it'll just bail out.
612 .needs_kfree_rcu = true,
615 static void _uverbs_close_fd(struct ib_uobject_file *uobj_file)
617 struct ib_ucontext *ucontext;
618 struct ib_uverbs_file *ufile = uobj_file->ufile;
619 int ret;
621 mutex_lock(&uobj_file->ufile->cleanup_mutex);
623 /* uobject was either already cleaned up or is cleaned up right now anyway */
624 if (!uobj_file->uobj.context ||
625 !down_read_trylock(&uobj_file->uobj.context->cleanup_rwsem))
626 goto unlock;
628 ucontext = uobj_file->uobj.context;
629 ret = _rdma_remove_commit_uobject(&uobj_file->uobj, RDMA_REMOVE_CLOSE);
630 up_read(&ucontext->cleanup_rwsem);
631 if (ret)
632 pr_warn("uverbs: unable to clean up uobject file in uverbs_close_fd.\n");
633 unlock:
634 mutex_unlock(&ufile->cleanup_mutex);
637 void uverbs_close_fd(struct file *f)
639 struct ib_uobject_file *uobj_file = f->private_data;
640 struct kref *uverbs_file_ref = &uobj_file->ufile->ref;
642 _uverbs_close_fd(uobj_file);
643 uverbs_uobject_put(&uobj_file->uobj);
644 kref_put(uverbs_file_ref, ib_uverbs_release_file);
647 void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
649 enum rdma_remove_reason reason = device_removed ?
650 RDMA_REMOVE_DRIVER_REMOVE : RDMA_REMOVE_CLOSE;
651 unsigned int cur_order = 0;
653 ucontext->cleanup_reason = reason;
655 * Waits for all remove_commit and alloc_commit to finish. Logically, We
656 * want to hold this forever as the context is going to be destroyed,
657 * but we'll release it since it causes a "held lock freed" BUG message.
659 down_write(&ucontext->cleanup_rwsem);
661 while (!list_empty(&ucontext->uobjects)) {
662 struct ib_uobject *obj, *next_obj;
663 unsigned int next_order = UINT_MAX;
666 * This shouldn't run while executing other commands on this
667 * context. Thus, the only thing we should take care of is
668 * releasing a FD while traversing this list. The FD could be
669 * closed and released from the _release fop of this FD.
670 * In order to mitigate this, we add a lock.
671 * We take and release the lock per order traversal in order
672 * to let other threads (which might still use the FDs) chance
673 * to run.
675 mutex_lock(&ucontext->uobjects_lock);
676 list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects,
677 list) {
678 if (obj->type->destroy_order == cur_order) {
679 int ret;
682 * if we hit this WARN_ON, that means we are
683 * racing with a lookup_get.
685 WARN_ON(uverbs_try_lock_object(obj, true));
686 ret = obj->type->type_class->remove_commit(obj,
687 reason);
688 list_del(&obj->list);
689 if (ret)
690 pr_warn("ib_uverbs: failed to remove uobject id %d order %u\n",
691 obj->id, cur_order);
692 /* put the ref we took when we created the object */
693 uverbs_uobject_put(obj);
694 } else {
695 next_order = min(next_order,
696 obj->type->destroy_order);
699 mutex_unlock(&ucontext->uobjects_lock);
700 cur_order = next_order;
702 up_write(&ucontext->cleanup_rwsem);
705 void uverbs_initialize_ucontext(struct ib_ucontext *ucontext)
707 ucontext->cleanup_reason = 0;
708 mutex_init(&ucontext->uobjects_lock);
709 INIT_LIST_HEAD(&ucontext->uobjects);
710 init_rwsem(&ucontext->cleanup_rwsem);
713 const struct uverbs_obj_type_class uverbs_fd_class = {
714 .alloc_begin = alloc_begin_fd_uobject,
715 .lookup_get = lookup_get_fd_uobject,
716 .alloc_commit = alloc_commit_fd_uobject,
717 .alloc_abort = alloc_abort_fd_uobject,
718 .lookup_put = lookup_put_fd_uobject,
719 .remove_commit = remove_commit_fd_uobject,
720 .needs_kfree_rcu = false,
723 struct ib_uobject *uverbs_get_uobject_from_context(const struct uverbs_obj_type *type_attrs,
724 struct ib_ucontext *ucontext,
725 enum uverbs_obj_access access,
726 int id)
728 switch (access) {
729 case UVERBS_ACCESS_READ:
730 return rdma_lookup_get_uobject(type_attrs, ucontext, id, false);
731 case UVERBS_ACCESS_DESTROY:
732 case UVERBS_ACCESS_WRITE:
733 return rdma_lookup_get_uobject(type_attrs, ucontext, id, true);
734 case UVERBS_ACCESS_NEW:
735 return rdma_alloc_begin_uobject(type_attrs, ucontext);
736 default:
737 WARN_ON(true);
738 return ERR_PTR(-EOPNOTSUPP);
742 int uverbs_finalize_object(struct ib_uobject *uobj,
743 enum uverbs_obj_access access,
744 bool commit)
746 int ret = 0;
749 * refcounts should be handled at the object level and not at the
750 * uobject level. Refcounts of the objects themselves are done in
751 * handlers.
754 switch (access) {
755 case UVERBS_ACCESS_READ:
756 rdma_lookup_put_uobject(uobj, false);
757 break;
758 case UVERBS_ACCESS_WRITE:
759 rdma_lookup_put_uobject(uobj, true);
760 break;
761 case UVERBS_ACCESS_DESTROY:
762 if (commit)
763 ret = rdma_remove_commit_uobject(uobj);
764 else
765 rdma_lookup_put_uobject(uobj, true);
766 break;
767 case UVERBS_ACCESS_NEW:
768 if (commit)
769 ret = rdma_alloc_commit_uobject(uobj);
770 else
771 rdma_alloc_abort_uobject(uobj);
772 break;
773 default:
774 WARN_ON(true);
775 ret = -EOPNOTSUPP;
778 return ret;
781 int uverbs_finalize_objects(struct uverbs_attr_bundle *attrs_bundle,
782 struct uverbs_attr_spec_hash * const *spec_hash,
783 size_t num,
784 bool commit)
786 unsigned int i;
787 int ret = 0;
789 for (i = 0; i < num; i++) {
790 struct uverbs_attr_bundle_hash *curr_bundle =
791 &attrs_bundle->hash[i];
792 const struct uverbs_attr_spec_hash *curr_spec_bucket =
793 spec_hash[i];
794 unsigned int j;
796 for (j = 0; j < curr_bundle->num_attrs; j++) {
797 struct uverbs_attr *attr;
798 const struct uverbs_attr_spec *spec;
800 if (!uverbs_attr_is_valid_in_hash(curr_bundle, j))
801 continue;
803 attr = &curr_bundle->attrs[j];
804 spec = &curr_spec_bucket->attrs[j];
806 if (spec->type == UVERBS_ATTR_TYPE_IDR ||
807 spec->type == UVERBS_ATTR_TYPE_FD) {
808 int current_ret;
810 current_ret = uverbs_finalize_object(attr->obj_attr.uobject,
811 spec->obj.access,
812 commit);
813 if (!ret)
814 ret = current_ret;
818 return ret;