2 * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/uverbs_ioctl.h>
34 #include <rdma/rdma_user_ioctl.h>
35 #include <linux/bitops.h>
38 #define UVERBS_NUM_NS (UVERBS_ID_NS_MASK >> UVERBS_ID_NS_SHIFT)
39 #define GET_NS_ID(idx) (((idx) & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT)
40 #define GET_ID(idx) ((idx) & ~UVERBS_ID_NS_MASK)
42 #define _for_each_element(elem, tmpi, tmpj, hashes, num_buckets_offset, \
45 elem = (*(const void ***)((hashes)[tmpi] + \
46 (buckets_offset)))[0]; \
47 tmpj < *(size_t *)((hashes)[tmpi] + (num_buckets_offset)); \
49 if ((elem = ((*(const void ***)(hashes[tmpi] + \
50 (buckets_offset)))[tmpj])))
53 * Iterate all elements of a few @hashes. The number of given hashes is
54 * indicated by @num_hashes. The offset of the number of buckets in the hash is
55 * represented by @num_buckets_offset, while the offset of the buckets array in
56 * the hash structure is represented by @buckets_offset. tmpi and tmpj are two
57 * short (or int) based indices that are given by the user. tmpi iterates over
58 * the different hashes. @elem points the current element in the hashes[tmpi]
59 * bucket we are looping on. To be honest, @hashes representation isn't exactly
60 * a hash, but more a collection of elements. These elements' ids are treated
61 * in a hash like manner, where the first upper bits are the bucket number.
62 * These elements are later mapped into a perfect-hash.
64 #define for_each_element(elem, tmpi, tmpj, hashes, num_hashes, \
65 num_buckets_offset, buckets_offset) \
66 for (tmpi = 0; tmpi < (num_hashes); tmpi++) \
67 _for_each_element(elem, tmpi, tmpj, hashes, num_buckets_offset,\
70 #define get_elements_iterators_entry_above(iters, num_elements, elements, \
71 num_objects_fld, objects_fld, bucket,\
73 get_elements_above_id((const void **)iters, num_elements, \
74 (const void **)(elements), \
75 offsetof(typeof(**elements), \
77 offsetof(typeof(**elements), objects_fld),\
78 offsetof(typeof(***(*elements)->objects_fld), id),\
81 #define get_objects_above_id(iters, num_trees, trees, bucket, min_id) \
82 get_elements_iterators_entry_above(iters, num_trees, trees, \
83 num_objects, objects, bucket, min_id)
85 #define get_methods_above_id(method_iters, num_iters, iters, bucket, min_id)\
86 get_elements_iterators_entry_above(method_iters, num_iters, iters, \
87 num_methods, methods, bucket, min_id)
89 #define get_attrs_above_id(attrs_iters, num_iters, iters, bucket, min_id)\
90 get_elements_iterators_entry_above(attrs_iters, num_iters, iters, \
91 num_attrs, attrs, bucket, min_id)
94 * get_elements_above_id get a few hashes represented by @elements and
95 * @num_elements. The hashes fields are described by @num_offset, @data_offset
96 * and @id_offset in the same way as required by for_each_element. The function
97 * returns an array of @iters, represents an array of elements in the hashes
98 * buckets, which their ids are the smallest ids in all hashes but are all
99 * larger than the id given by min_id. Elements are only added to the iters
100 * array if their id belongs to the bucket @bucket. The number of elements in
101 * the returned array is returned by the function. @min_id is also updated to
102 * reflect the new min_id of all elements in iters.
104 static size_t get_elements_above_id(const void **iters
,
105 unsigned int num_elements
,
106 const void **elements
,
113 size_t num_iters
= 0;
114 short min
= SHRT_MAX
;
116 int i
, j
, last_stored
= -1;
118 for_each_element(elem
, i
, j
, elements
, num_elements
, num_offset
,
120 u16 id
= *(u16
*)(elem
+ id_offset
);
122 if (GET_NS_ID(id
) != bucket
)
125 if (GET_ID(id
) < *min_id
||
126 (min
!= SHRT_MAX
&& GET_ID(id
) > min
))
130 * We first iterate all hashes represented by @elements. When
131 * we do, we try to find an element @elem in the bucket @bucket
132 * which its id is min. Since we can't ensure the user sorted
133 * the elements in increasing order, we override this hash's
134 * minimal id element we found, if a new element with a smaller
137 iters
[last_stored
== i
? num_iters
- 1 : num_iters
++] = elem
;
143 * We only insert to our iters array an element, if its id is smaller
144 * than all previous ids. Therefore, the final iters array is sorted so
145 * that smaller ids are in the end of the array.
146 * Therefore, we need to clean the beginning of the array to make sure
147 * all ids of final elements are equal to min.
149 for (i
= num_iters
- 1; i
>= 0 &&
150 GET_ID(*(u16
*)(iters
[i
] + id_offset
)) == min
; i
--)
154 memmove(iters
, iters
+ i
+ 1, sizeof(*iters
) * num_iters
);
160 #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
161 objects_fld, bucket) \
162 find_max_element_id(num_elements, (const void **)(elements), \
163 offsetof(typeof(**elements), num_objects_fld), \
164 offsetof(typeof(**elements), objects_fld), \
165 offsetof(typeof(***(*elements)->objects_fld), id),\
168 static short find_max_element_ns_id(unsigned int num_elements
,
169 const void **elements
,
174 short max_ns
= SHRT_MIN
;
178 for_each_element(elem
, i
, j
, elements
, num_elements
, num_offset
,
180 u16 id
= *(u16
*)(elem
+ id_offset
);
182 if (GET_NS_ID(id
) > max_ns
)
183 max_ns
= GET_NS_ID(id
);
189 static short find_max_element_id(unsigned int num_elements
,
190 const void **elements
,
196 short max_id
= SHRT_MIN
;
200 for_each_element(elem
, i
, j
, elements
, num_elements
, num_offset
,
202 u16 id
= *(u16
*)(elem
+ id_offset
);
204 if (GET_NS_ID(id
) == bucket
&&
211 #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
212 objects_fld, bucket) \
213 find_max_element_id(num_elements, (const void **)(elements), \
214 offsetof(typeof(**elements), num_objects_fld), \
215 offsetof(typeof(**elements), objects_fld), \
216 offsetof(typeof(***(*elements)->objects_fld), id),\
219 #define find_max_element_ns_entry_id(num_elements, elements, \
220 num_objects_fld, objects_fld) \
221 find_max_element_ns_id(num_elements, (const void **)(elements), \
222 offsetof(typeof(**elements), num_objects_fld),\
223 offsetof(typeof(**elements), objects_fld), \
224 offsetof(typeof(***(*elements)->objects_fld), id))
227 * find_max_xxxx_ns_id gets a few elements. Each element is described by an id
228 * which its upper bits represents a namespace. It finds the max namespace. This
229 * could be used in order to know how many buckets do we need to allocate. If no
230 * elements exist, SHRT_MIN is returned. Namespace represents here different
231 * buckets. The common example is "common bucket" and "driver bucket".
233 * find_max_xxxx_id gets a few elements and a bucket. Each element is described
234 * by an id which its upper bits represent a namespace. It returns the max id
235 * which is contained in the same namespace defined in @bucket. This could be
236 * used in order to know how many elements do we need to allocate in the bucket.
237 * If no elements exist, SHRT_MIN is returned.
240 #define find_max_object_id(num_trees, trees, bucket) \
241 find_max_element_entry_id(num_trees, trees, num_objects,\
243 #define find_max_object_ns_id(num_trees, trees) \
244 find_max_element_ns_entry_id(num_trees, trees, \
245 num_objects, objects)
247 #define find_max_method_id(num_iters, iters, bucket) \
248 find_max_element_entry_id(num_iters, iters, num_methods,\
250 #define find_max_method_ns_id(num_iters, iters) \
251 find_max_element_ns_entry_id(num_iters, iters, \
252 num_methods, methods)
254 #define find_max_attr_id(num_iters, iters, bucket) \
255 find_max_element_entry_id(num_iters, iters, num_attrs, \
257 #define find_max_attr_ns_id(num_iters, iters) \
258 find_max_element_ns_entry_id(num_iters, iters, \
261 static void free_method(struct uverbs_method_spec
*method
)
268 for (i
= 0; i
< method
->num_buckets
; i
++)
269 kfree(method
->attr_buckets
[i
]);
274 #define IS_ATTR_OBJECT(attr) ((attr)->type == UVERBS_ATTR_TYPE_IDR || \
275 (attr)->type == UVERBS_ATTR_TYPE_FD)
278 * This function gets array of size @num_method_defs which contains pointers to
279 * method definitions @method_defs. The function allocates an
280 * uverbs_method_spec structure and initializes its number of buckets and the
281 * elements in buckets to the correct attributes. While doing that, it
282 * validates that there aren't conflicts between attributes of different
285 static struct uverbs_method_spec
*build_method_with_attrs(const struct uverbs_method_def
**method_defs
,
286 size_t num_method_defs
)
289 int max_attr_buckets
= 0;
290 size_t num_attr_buckets
= 0;
292 struct uverbs_method_spec
*method
= NULL
;
293 const struct uverbs_attr_def
**attr_defs
;
294 unsigned int num_of_singularities
= 0;
296 max_attr_buckets
= find_max_attr_ns_id(num_method_defs
, method_defs
);
297 if (max_attr_buckets
>= 0)
298 num_attr_buckets
= max_attr_buckets
+ 1;
300 method
= kzalloc(sizeof(*method
) +
301 num_attr_buckets
* sizeof(*method
->attr_buckets
),
304 return ERR_PTR(-ENOMEM
);
306 method
->num_buckets
= num_attr_buckets
;
307 attr_defs
= kcalloc(num_method_defs
, sizeof(*attr_defs
), GFP_KERNEL
);
312 for (bucket_idx
= 0; bucket_idx
< method
->num_buckets
; bucket_idx
++) {
313 short min_id
= SHRT_MIN
;
314 int attr_max_bucket
= 0;
315 struct uverbs_attr_spec_hash
*hash
= NULL
;
317 attr_max_bucket
= find_max_attr_id(num_method_defs
, method_defs
,
319 if (attr_max_bucket
< 0)
322 hash
= kzalloc(sizeof(*hash
) +
323 ALIGN(sizeof(*hash
->attrs
) * (attr_max_bucket
+ 1),
325 BITS_TO_LONGS(attr_max_bucket
) * sizeof(long),
331 hash
->num_attrs
= attr_max_bucket
+ 1;
332 method
->num_child_attrs
+= hash
->num_attrs
;
333 hash
->mandatory_attrs_bitmask
= (void *)(hash
+ 1) +
334 ALIGN(sizeof(*hash
->attrs
) *
335 (attr_max_bucket
+ 1),
338 method
->attr_buckets
[bucket_idx
] = hash
;
341 size_t num_attr_defs
;
342 struct uverbs_attr_spec
*attr
;
343 bool attr_obj_with_special_access
;
346 get_attrs_above_id(attr_defs
,
351 /* Last attr in bucket */
355 if (num_attr_defs
> 1) {
357 * We don't allow two attribute definitions for
358 * the same attribute. This is usually a
359 * programmer error. If required, it's better to
360 * just add a new attribute to capture the new
367 attr
= &hash
->attrs
[min_id
];
368 memcpy(attr
, &attr_defs
[0]->attr
, sizeof(*attr
));
370 attr_obj_with_special_access
= IS_ATTR_OBJECT(attr
) &&
371 (attr
->obj
.access
== UVERBS_ACCESS_NEW
||
372 attr
->obj
.access
== UVERBS_ACCESS_DESTROY
);
373 num_of_singularities
+= !!attr_obj_with_special_access
;
374 if (WARN(num_of_singularities
> 1,
375 "ib_uverbs: Method contains more than one object attr (%d) with new/destroy access\n",
377 WARN(attr_obj_with_special_access
&&
378 !(attr
->flags
& UVERBS_ATTR_SPEC_F_MANDATORY
),
379 "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n",
381 WARN(IS_ATTR_OBJECT(attr
) &&
382 attr
->flags
& UVERBS_ATTR_SPEC_F_MIN_SZ
,
383 "ib_uverbs: Tried to merge attr (%d) but it's an object with min_sz flag\n",
389 if (attr
->flags
& UVERBS_ATTR_SPEC_F_MANDATORY
)
390 set_bit(min_id
, hash
->mandatory_attrs_bitmask
);
405 static void free_object(struct uverbs_object_spec
*object
)
412 for (i
= 0; i
< object
->num_buckets
; i
++) {
413 struct uverbs_method_spec_hash
*method_buckets
=
414 object
->method_buckets
[i
];
419 for (j
= 0; j
< method_buckets
->num_methods
; j
++)
420 free_method(method_buckets
->methods
[j
]);
422 kfree(method_buckets
);
429 * This function gets array of size @num_object_defs which contains pointers to
430 * object definitions @object_defs. The function allocated an
431 * uverbs_object_spec structure and initialize its number of buckets and the
432 * elements in buckets to the correct methods. While doing that, it
433 * sorts out the correct relationship between conflicts in the same method.
435 static struct uverbs_object_spec
*build_object_with_methods(const struct uverbs_object_def
**object_defs
,
436 size_t num_object_defs
)
439 int max_method_buckets
= 0;
440 u16 num_method_buckets
= 0;
442 struct uverbs_object_spec
*object
= NULL
;
443 const struct uverbs_method_def
**method_defs
;
445 max_method_buckets
= find_max_method_ns_id(num_object_defs
, object_defs
);
446 if (max_method_buckets
>= 0)
447 num_method_buckets
= max_method_buckets
+ 1;
449 object
= kzalloc(sizeof(*object
) +
451 sizeof(*object
->method_buckets
), GFP_KERNEL
);
453 return ERR_PTR(-ENOMEM
);
455 object
->num_buckets
= num_method_buckets
;
456 method_defs
= kcalloc(num_object_defs
, sizeof(*method_defs
), GFP_KERNEL
);
462 for (bucket_idx
= 0; bucket_idx
< object
->num_buckets
; bucket_idx
++) {
463 short min_id
= SHRT_MIN
;
464 int methods_max_bucket
= 0;
465 struct uverbs_method_spec_hash
*hash
= NULL
;
467 methods_max_bucket
= find_max_method_id(num_object_defs
, object_defs
,
469 if (methods_max_bucket
< 0)
472 hash
= kzalloc(sizeof(*hash
) +
473 sizeof(*hash
->methods
) * (methods_max_bucket
+ 1),
480 hash
->num_methods
= methods_max_bucket
+ 1;
481 object
->method_buckets
[bucket_idx
] = hash
;
484 size_t num_method_defs
;
485 struct uverbs_method_spec
*method
;
489 get_methods_above_id(method_defs
,
494 /* Last method in bucket */
495 if (!num_method_defs
)
498 method
= build_method_with_attrs(method_defs
,
500 if (IS_ERR(method
)) {
501 res
= PTR_ERR(method
);
506 * The last tree which is given as an argument to the
507 * merge overrides previous method handler.
508 * Therefore, we iterate backwards and search for the
509 * first handler which != NULL. This also defines the
510 * set of flags used for this handler.
512 for (i
= num_object_defs
- 1;
513 i
>= 0 && !method_defs
[i
]->handler
; i
--)
515 hash
->methods
[min_id
++] = method
;
516 /* NULL handler isn't allowed */
518 "ib_uverbs: tried to merge function id %d, but all handlers are NULL\n",
523 method
->handler
= method_defs
[i
]->handler
;
524 method
->flags
= method_defs
[i
]->flags
;
538 void uverbs_free_spec_tree(struct uverbs_root_spec
*root
)
545 for (i
= 0; i
< root
->num_buckets
; i
++) {
546 struct uverbs_object_spec_hash
*object_hash
=
547 root
->object_buckets
[i
];
552 for (j
= 0; j
< object_hash
->num_objects
; j
++)
553 free_object(object_hash
->objects
[j
]);
560 EXPORT_SYMBOL(uverbs_free_spec_tree
);
562 struct uverbs_root_spec
*uverbs_alloc_spec_tree(unsigned int num_trees
,
563 const struct uverbs_object_tree_def
**trees
)
566 short max_object_buckets
= 0;
567 size_t num_objects_buckets
= 0;
568 struct uverbs_root_spec
*root_spec
= NULL
;
569 const struct uverbs_object_def
**object_defs
;
573 max_object_buckets
= find_max_object_ns_id(num_trees
, trees
);
575 * Devices which don't want to support ib_uverbs, should just allocate
576 * an empty parsing tree. Every user-space command won't hit any valid
577 * entry in the parsing tree and thus will fail.
579 if (max_object_buckets
>= 0)
580 num_objects_buckets
= max_object_buckets
+ 1;
582 root_spec
= kzalloc(sizeof(*root_spec
) +
583 num_objects_buckets
* sizeof(*root_spec
->object_buckets
),
586 return ERR_PTR(-ENOMEM
);
587 root_spec
->num_buckets
= num_objects_buckets
;
589 object_defs
= kcalloc(num_trees
, sizeof(*object_defs
),
596 for (bucket_idx
= 0; bucket_idx
< root_spec
->num_buckets
; bucket_idx
++) {
597 short min_id
= SHRT_MIN
;
598 short objects_max_bucket
;
599 struct uverbs_object_spec_hash
*hash
= NULL
;
601 objects_max_bucket
= find_max_object_id(num_trees
, trees
,
603 if (objects_max_bucket
< 0)
606 hash
= kzalloc(sizeof(*hash
) +
607 sizeof(*hash
->objects
) * (objects_max_bucket
+ 1),
613 hash
->num_objects
= objects_max_bucket
+ 1;
614 root_spec
->object_buckets
[bucket_idx
] = hash
;
617 size_t num_object_defs
;
618 struct uverbs_object_spec
*object
;
620 num_object_defs
= get_objects_above_id(object_defs
,
625 /* Last object in bucket */
626 if (!num_object_defs
)
629 object
= build_object_with_methods(object_defs
,
631 if (IS_ERR(object
)) {
632 res
= PTR_ERR(object
);
637 * The last tree which is given as an argument to the
638 * merge overrides previous object's type_attrs.
639 * Therefore, we iterate backwards and search for the
640 * first type_attrs which != NULL.
642 for (i
= num_object_defs
- 1;
643 i
>= 0 && !object_defs
[i
]->type_attrs
; i
--)
646 * NULL is a valid type_attrs. It means an object we
647 * can't instantiate (like DEVICE).
649 object
->type_attrs
= i
< 0 ? NULL
:
650 object_defs
[i
]->type_attrs
;
652 hash
->objects
[min_id
++] = object
;
662 uverbs_free_spec_tree(root_spec
);
665 EXPORT_SYMBOL(uverbs_alloc_spec_tree
);