2 * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/rdma_user_ioctl.h>
34 #include <rdma/uverbs_ioctl.h>
35 #include "rdma_core.h"
38 struct bundle_alloc_head
{
39 struct bundle_alloc_head
*next
;
45 struct bundle_alloc_head alloc_head
;
46 struct bundle_alloc_head
*allocated_mem
;
47 size_t internal_avail
;
50 struct radix_tree_root
*radix
;
51 const struct uverbs_api_ioctl_method
*method_elm
;
52 void __rcu
**radix_slots
;
53 unsigned long radix_slots_len
;
56 struct ib_uverbs_attr __user
*user_attrs
;
57 struct ib_uverbs_attr
*uattrs
;
59 DECLARE_BITMAP(uobj_finalize
, UVERBS_API_ATTR_BKEY_LEN
);
60 DECLARE_BITMAP(spec_finalize
, UVERBS_API_ATTR_BKEY_LEN
);
63 * Must be last. bundle ends in a flex array which overlaps
66 struct uverbs_attr_bundle bundle
;
67 u64 internal_buffer
[32];
71 * Each method has an absolute minimum amount of memory it needs to allocate,
72 * precompute that amount and determine if the onstack memory can be used or
73 * if allocation is need.
75 void uapi_compute_bundle_size(struct uverbs_api_ioctl_method
*method_elm
,
76 unsigned int num_attrs
)
78 struct bundle_priv
*pbundle
;
80 offsetof(struct bundle_priv
, internal_buffer
) +
81 sizeof(*pbundle
->bundle
.attrs
) * method_elm
->key_bitmap_len
+
82 sizeof(*pbundle
->uattrs
) * num_attrs
;
84 method_elm
->use_stack
= bundle_size
<= sizeof(*pbundle
);
85 method_elm
->bundle_size
=
86 ALIGN(bundle_size
+ 256, sizeof(*pbundle
->internal_buffer
));
88 /* Do not want order-2 allocations for this. */
89 WARN_ON_ONCE(method_elm
->bundle_size
> PAGE_SIZE
);
93 * uverbs_alloc() - Quickly allocate memory for use with a bundle
95 * @size: Number of bytes to allocate
96 * @flags: Allocator flags
98 * The bundle allocator is intended for allocations that are connected with
99 * processing the system call related to the bundle. The allocated memory is
100 * always freed once the system call completes, and cannot be freed any other
103 * This tries to use a small pool of pre-allocated memory for performance.
105 __malloc
void *_uverbs_alloc(struct uverbs_attr_bundle
*bundle
, size_t size
,
108 struct bundle_priv
*pbundle
=
109 container_of(bundle
, struct bundle_priv
, bundle
);
113 if (check_add_overflow(size
, pbundle
->internal_used
, &new_used
))
114 return ERR_PTR(-EOVERFLOW
);
116 if (new_used
> pbundle
->internal_avail
) {
117 struct bundle_alloc_head
*buf
;
119 buf
= kvmalloc(struct_size(buf
, data
, size
), flags
);
121 return ERR_PTR(-ENOMEM
);
122 buf
->next
= pbundle
->allocated_mem
;
123 pbundle
->allocated_mem
= buf
;
127 res
= (void *)pbundle
->internal_buffer
+ pbundle
->internal_used
;
128 pbundle
->internal_used
=
129 ALIGN(new_used
, sizeof(*pbundle
->internal_buffer
));
130 if (want_init_on_alloc(flags
))
131 memset(res
, 0, size
);
134 EXPORT_SYMBOL(_uverbs_alloc
);
136 static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr
*uattr
,
139 if (uattr
->len
> sizeof(((struct ib_uverbs_attr
*)0)->data
))
140 return ib_is_buffer_cleared(u64_to_user_ptr(uattr
->data
) + len
,
143 return !memchr_inv((const void *)&uattr
->data
+ len
,
144 0, uattr
->len
- len
);
147 static int uverbs_set_output(const struct uverbs_attr_bundle
*bundle
,
148 const struct uverbs_attr
*attr
)
150 struct bundle_priv
*pbundle
=
151 container_of(bundle
, struct bundle_priv
, bundle
);
154 flags
= pbundle
->uattrs
[attr
->ptr_attr
.uattr_idx
].flags
|
155 UVERBS_ATTR_F_VALID_OUTPUT
;
157 &pbundle
->user_attrs
[attr
->ptr_attr
.uattr_idx
].flags
))
162 static int uverbs_process_idrs_array(struct bundle_priv
*pbundle
,
163 const struct uverbs_api_attr
*attr_uapi
,
164 struct uverbs_objs_arr_attr
*attr
,
165 struct ib_uverbs_attr
*uattr
,
168 const struct uverbs_attr_spec
*spec
= &attr_uapi
->spec
;
174 if (uattr
->attr_data
.reserved
)
177 if (uattr
->len
% sizeof(u32
))
180 array_len
= uattr
->len
/ sizeof(u32
);
181 if (array_len
< spec
->u2
.objs_arr
.min_len
||
182 array_len
> spec
->u2
.objs_arr
.max_len
)
186 uverbs_alloc(&pbundle
->bundle
,
187 array_size(array_len
, sizeof(*attr
->uobjects
)));
188 if (IS_ERR(attr
->uobjects
))
189 return PTR_ERR(attr
->uobjects
);
192 * Since idr is 4B and *uobjects is >= 4B, we can use attr->uobjects
193 * to store idrs array and avoid additional memory allocation. The
194 * idrs array is offset to the end of the uobjects array so we will be
195 * able to read idr and replace with a pointer.
197 idr_vals
= (u32
*)(attr
->uobjects
+ array_len
) - array_len
;
199 if (uattr
->len
> sizeof(uattr
->data
)) {
200 ret
= copy_from_user(idr_vals
, u64_to_user_ptr(uattr
->data
),
205 memcpy(idr_vals
, &uattr
->data
, uattr
->len
);
208 for (i
= 0; i
!= array_len
; i
++) {
209 attr
->uobjects
[i
] = uverbs_get_uobject_from_file(
210 spec
->u2
.objs_arr
.obj_type
, spec
->u2
.objs_arr
.access
,
211 idr_vals
[i
], &pbundle
->bundle
);
212 if (IS_ERR(attr
->uobjects
[i
])) {
213 ret
= PTR_ERR(attr
->uobjects
[i
]);
219 __set_bit(attr_bkey
, pbundle
->spec_finalize
);
223 static void uverbs_free_idrs_array(const struct uverbs_api_attr
*attr_uapi
,
224 struct uverbs_objs_arr_attr
*attr
,
226 struct uverbs_attr_bundle
*attrs
)
228 const struct uverbs_attr_spec
*spec
= &attr_uapi
->spec
;
231 for (i
= 0; i
!= attr
->len
; i
++)
232 uverbs_finalize_object(attr
->uobjects
[i
],
233 spec
->u2
.objs_arr
.access
, commit
, attrs
);
236 static int uverbs_process_attr(struct bundle_priv
*pbundle
,
237 const struct uverbs_api_attr
*attr_uapi
,
238 struct ib_uverbs_attr
*uattr
, u32 attr_bkey
)
240 const struct uverbs_attr_spec
*spec
= &attr_uapi
->spec
;
241 struct uverbs_attr
*e
= &pbundle
->bundle
.attrs
[attr_bkey
];
242 const struct uverbs_attr_spec
*val_spec
= spec
;
243 struct uverbs_obj_attr
*o_attr
;
245 switch (spec
->type
) {
246 case UVERBS_ATTR_TYPE_ENUM_IN
:
247 if (uattr
->attr_data
.enum_data
.elem_id
>= spec
->u
.enum_def
.num_elems
)
250 if (uattr
->attr_data
.enum_data
.reserved
)
253 val_spec
= &spec
->u2
.enum_def
.ids
[uattr
->attr_data
.enum_data
.elem_id
];
255 /* Currently we only support PTR_IN based enums */
256 if (val_spec
->type
!= UVERBS_ATTR_TYPE_PTR_IN
)
259 e
->ptr_attr
.enum_id
= uattr
->attr_data
.enum_data
.elem_id
;
261 case UVERBS_ATTR_TYPE_PTR_IN
:
262 /* Ensure that any data provided by userspace beyond the known
263 * struct is zero. Userspace that knows how to use some future
264 * longer struct will fail here if used with an old kernel and
265 * non-zero content, making ABI compat/discovery simpler.
267 if (uattr
->len
> val_spec
->u
.ptr
.len
&&
268 val_spec
->zero_trailing
&&
269 !uverbs_is_attr_cleared(uattr
, val_spec
->u
.ptr
.len
))
273 case UVERBS_ATTR_TYPE_PTR_OUT
:
274 if (uattr
->len
< val_spec
->u
.ptr
.min_len
||
275 (!val_spec
->zero_trailing
&&
276 uattr
->len
> val_spec
->u
.ptr
.len
))
279 if (spec
->type
!= UVERBS_ATTR_TYPE_ENUM_IN
&&
280 uattr
->attr_data
.reserved
)
283 e
->ptr_attr
.uattr_idx
= uattr
- pbundle
->uattrs
;
284 e
->ptr_attr
.len
= uattr
->len
;
286 if (val_spec
->alloc_and_copy
&& !uverbs_attr_ptr_is_inline(e
)) {
289 p
= uverbs_alloc(&pbundle
->bundle
, uattr
->len
);
295 if (copy_from_user(p
, u64_to_user_ptr(uattr
->data
),
299 e
->ptr_attr
.data
= uattr
->data
;
303 case UVERBS_ATTR_TYPE_IDR
:
304 case UVERBS_ATTR_TYPE_FD
:
305 if (uattr
->attr_data
.reserved
)
311 o_attr
= &e
->obj_attr
;
312 o_attr
->attr_elm
= attr_uapi
;
315 * The type of uattr->data is u64 for UVERBS_ATTR_TYPE_IDR and
316 * s64 for UVERBS_ATTR_TYPE_FD. We can cast the u64 to s64
317 * here without caring about truncation as we know that the
318 * IDR implementation today rejects negative IDs
320 o_attr
->uobject
= uverbs_get_uobject_from_file(
321 spec
->u
.obj
.obj_type
, spec
->u
.obj
.access
,
322 uattr
->data_s64
, &pbundle
->bundle
);
323 if (IS_ERR(o_attr
->uobject
))
324 return PTR_ERR(o_attr
->uobject
);
325 __set_bit(attr_bkey
, pbundle
->uobj_finalize
);
327 if (spec
->u
.obj
.access
== UVERBS_ACCESS_NEW
) {
328 unsigned int uattr_idx
= uattr
- pbundle
->uattrs
;
329 s64 id
= o_attr
->uobject
->id
;
331 /* Copy the allocated id to the user-space */
332 if (put_user(id
, &pbundle
->user_attrs
[uattr_idx
].data
))
338 case UVERBS_ATTR_TYPE_IDRS_ARRAY
:
339 return uverbs_process_idrs_array(pbundle
, attr_uapi
,
340 &e
->objs_arr_attr
, uattr
,
350 * We search the radix tree with the method prefix and now we want to fast
351 * search the suffix bits to get a particular attribute pointer. It is not
352 * totally clear to me if this breaks the radix tree encasulation or not, but
353 * it uses the iter data to determine if the method iter points at the same
354 * chunk that will store the attribute, if so it just derefs it directly. By
355 * construction in most kernel configs the method and attrs will all fit in a
356 * single radix chunk, so in most cases this will have no search. Other cases
357 * this falls back to a full search.
359 static void __rcu
**uapi_get_attr_for_method(struct bundle_priv
*pbundle
,
364 if (likely(attr_key
< pbundle
->radix_slots_len
)) {
367 slot
= pbundle
->radix_slots
+ attr_key
;
368 entry
= rcu_dereference_raw(*slot
);
369 if (likely(!radix_tree_is_internal_node(entry
) && entry
))
373 return radix_tree_lookup_slot(pbundle
->radix
,
374 pbundle
->method_key
| attr_key
);
377 static int uverbs_set_attr(struct bundle_priv
*pbundle
,
378 struct ib_uverbs_attr
*uattr
)
380 u32 attr_key
= uapi_key_attr(uattr
->attr_id
);
381 u32 attr_bkey
= uapi_bkey_attr(attr_key
);
382 const struct uverbs_api_attr
*attr
;
386 slot
= uapi_get_attr_for_method(pbundle
, attr_key
);
389 * Kernel does not support the attribute but user-space says it
392 if (uattr
->flags
& UVERBS_ATTR_F_MANDATORY
)
393 return -EPROTONOSUPPORT
;
396 attr
= rcu_dereference_protected(*slot
, true);
398 /* Reject duplicate attributes from user-space */
399 if (test_bit(attr_bkey
, pbundle
->bundle
.attr_present
))
402 ret
= uverbs_process_attr(pbundle
, attr
, uattr
, attr_bkey
);
406 __set_bit(attr_bkey
, pbundle
->bundle
.attr_present
);
411 static int ib_uverbs_run_method(struct bundle_priv
*pbundle
,
412 unsigned int num_attrs
)
414 int (*handler
)(struct uverbs_attr_bundle
*attrs
);
415 size_t uattrs_size
= array_size(sizeof(*pbundle
->uattrs
), num_attrs
);
416 unsigned int destroy_bkey
= pbundle
->method_elm
->destroy_bkey
;
420 /* See uverbs_disassociate_api() */
421 handler
= srcu_dereference(
422 pbundle
->method_elm
->handler
,
423 &pbundle
->bundle
.ufile
->device
->disassociate_srcu
);
427 pbundle
->uattrs
= uverbs_alloc(&pbundle
->bundle
, uattrs_size
);
428 if (IS_ERR(pbundle
->uattrs
))
429 return PTR_ERR(pbundle
->uattrs
);
430 if (copy_from_user(pbundle
->uattrs
, pbundle
->user_attrs
, uattrs_size
))
433 for (i
= 0; i
!= num_attrs
; i
++) {
434 ret
= uverbs_set_attr(pbundle
, &pbundle
->uattrs
[i
]);
439 /* User space did not provide all the mandatory attributes */
440 if (unlikely(!bitmap_subset(pbundle
->method_elm
->attr_mandatory
,
441 pbundle
->bundle
.attr_present
,
442 pbundle
->method_elm
->key_bitmap_len
)))
445 if (pbundle
->method_elm
->has_udata
)
446 uverbs_fill_udata(&pbundle
->bundle
,
447 &pbundle
->bundle
.driver_udata
,
448 UVERBS_ATTR_UHW_IN
, UVERBS_ATTR_UHW_OUT
);
450 pbundle
->bundle
.driver_udata
= (struct ib_udata
){};
452 if (destroy_bkey
!= UVERBS_API_ATTR_BKEY_LEN
) {
453 struct uverbs_obj_attr
*destroy_attr
=
454 &pbundle
->bundle
.attrs
[destroy_bkey
].obj_attr
;
456 ret
= uobj_destroy(destroy_attr
->uobject
, &pbundle
->bundle
);
459 __clear_bit(destroy_bkey
, pbundle
->uobj_finalize
);
461 ret
= handler(&pbundle
->bundle
);
462 uobj_put_destroy(destroy_attr
->uobject
);
464 ret
= handler(&pbundle
->bundle
);
468 * Until the drivers are revised to use the bundle directly we have to
469 * assume that the driver wrote to its UHW_OUT and flag userspace
472 if (!ret
&& pbundle
->method_elm
->has_udata
) {
473 const struct uverbs_attr
*attr
=
474 uverbs_attr_get(&pbundle
->bundle
, UVERBS_ATTR_UHW_OUT
);
477 ret
= uverbs_set_output(&pbundle
->bundle
, attr
);
481 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
482 * not invoke the method because the request is not supported. No
483 * other cases should return this code.
485 if (WARN_ON_ONCE(ret
== -EPROTONOSUPPORT
))
491 static void bundle_destroy(struct bundle_priv
*pbundle
, bool commit
)
493 unsigned int key_bitmap_len
= pbundle
->method_elm
->key_bitmap_len
;
494 struct bundle_alloc_head
*memblock
;
497 /* fast path for simple uobjects */
499 while ((i
= find_next_bit(pbundle
->uobj_finalize
, key_bitmap_len
,
500 i
+ 1)) < key_bitmap_len
) {
501 struct uverbs_attr
*attr
= &pbundle
->bundle
.attrs
[i
];
503 uverbs_finalize_object(
504 attr
->obj_attr
.uobject
,
505 attr
->obj_attr
.attr_elm
->spec
.u
.obj
.access
, commit
,
510 while ((i
= find_next_bit(pbundle
->spec_finalize
, key_bitmap_len
,
511 i
+ 1)) < key_bitmap_len
) {
512 struct uverbs_attr
*attr
= &pbundle
->bundle
.attrs
[i
];
513 const struct uverbs_api_attr
*attr_uapi
;
516 slot
= uapi_get_attr_for_method(
518 pbundle
->method_key
| uapi_bkey_to_key_attr(i
));
522 attr_uapi
= rcu_dereference_protected(*slot
, true);
524 if (attr_uapi
->spec
.type
== UVERBS_ATTR_TYPE_IDRS_ARRAY
) {
525 uverbs_free_idrs_array(attr_uapi
, &attr
->objs_arr_attr
,
526 commit
, &pbundle
->bundle
);
530 for (memblock
= pbundle
->allocated_mem
; memblock
;) {
531 struct bundle_alloc_head
*tmp
= memblock
;
533 memblock
= memblock
->next
;
538 static int ib_uverbs_cmd_verbs(struct ib_uverbs_file
*ufile
,
539 struct ib_uverbs_ioctl_hdr
*hdr
,
540 struct ib_uverbs_attr __user
*user_attrs
)
542 const struct uverbs_api_ioctl_method
*method_elm
;
543 struct uverbs_api
*uapi
= ufile
->device
->uapi
;
544 struct radix_tree_iter attrs_iter
;
545 struct bundle_priv
*pbundle
;
546 struct bundle_priv onstack
;
550 if (unlikely(hdr
->driver_id
!= uapi
->driver_id
))
553 slot
= radix_tree_iter_lookup(
554 &uapi
->radix
, &attrs_iter
,
555 uapi_key_obj(hdr
->object_id
) |
556 uapi_key_ioctl_method(hdr
->method_id
));
558 return -EPROTONOSUPPORT
;
559 method_elm
= rcu_dereference_protected(*slot
, true);
561 if (!method_elm
->use_stack
) {
562 pbundle
= kmalloc(method_elm
->bundle_size
, GFP_KERNEL
);
565 pbundle
->internal_avail
=
566 method_elm
->bundle_size
-
567 offsetof(struct bundle_priv
, internal_buffer
);
568 pbundle
->alloc_head
.next
= NULL
;
569 pbundle
->allocated_mem
= &pbundle
->alloc_head
;
572 pbundle
->internal_avail
= sizeof(pbundle
->internal_buffer
);
573 pbundle
->allocated_mem
= NULL
;
576 /* Space for the pbundle->bundle.attrs flex array */
577 pbundle
->method_elm
= method_elm
;
578 pbundle
->method_key
= attrs_iter
.index
;
579 pbundle
->bundle
.ufile
= ufile
;
580 pbundle
->bundle
.context
= NULL
; /* only valid if bundle has uobject */
581 pbundle
->radix
= &uapi
->radix
;
582 pbundle
->radix_slots
= slot
;
583 pbundle
->radix_slots_len
= radix_tree_chunk_size(&attrs_iter
);
584 pbundle
->user_attrs
= user_attrs
;
586 pbundle
->internal_used
= ALIGN(pbundle
->method_elm
->key_bitmap_len
*
587 sizeof(*pbundle
->bundle
.attrs
),
588 sizeof(*pbundle
->internal_buffer
));
589 memset(pbundle
->bundle
.attr_present
, 0,
590 sizeof(pbundle
->bundle
.attr_present
));
591 memset(pbundle
->uobj_finalize
, 0, sizeof(pbundle
->uobj_finalize
));
592 memset(pbundle
->spec_finalize
, 0, sizeof(pbundle
->spec_finalize
));
594 ret
= ib_uverbs_run_method(pbundle
, hdr
->num_attrs
);
595 bundle_destroy(pbundle
, ret
== 0);
599 long ib_uverbs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
601 struct ib_uverbs_file
*file
= filp
->private_data
;
602 struct ib_uverbs_ioctl_hdr __user
*user_hdr
=
603 (struct ib_uverbs_ioctl_hdr __user
*)arg
;
604 struct ib_uverbs_ioctl_hdr hdr
;
608 if (unlikely(cmd
!= RDMA_VERBS_IOCTL
))
611 err
= copy_from_user(&hdr
, user_hdr
, sizeof(hdr
));
615 if (hdr
.length
> PAGE_SIZE
||
616 hdr
.length
!= struct_size(&hdr
, attrs
, hdr
.num_attrs
))
619 if (hdr
.reserved1
|| hdr
.reserved2
)
620 return -EPROTONOSUPPORT
;
622 srcu_key
= srcu_read_lock(&file
->device
->disassociate_srcu
);
623 err
= ib_uverbs_cmd_verbs(file
, &hdr
, user_hdr
->attrs
);
624 srcu_read_unlock(&file
->device
->disassociate_srcu
, srcu_key
);
628 int uverbs_get_flags64(u64
*to
, const struct uverbs_attr_bundle
*attrs_bundle
,
629 size_t idx
, u64 allowed_bits
)
631 const struct uverbs_attr
*attr
;
634 attr
= uverbs_attr_get(attrs_bundle
, idx
);
635 /* Missing attribute means 0 flags */
642 * New userspace code should use 8 bytes to pass flags, but we
643 * transparently support old userspaces that were using 4 bytes as
646 if (attr
->ptr_attr
.len
== 8)
647 flags
= attr
->ptr_attr
.data
;
648 else if (attr
->ptr_attr
.len
== 4)
649 flags
= *(u32
*)&attr
->ptr_attr
.data
;
653 if (flags
& ~allowed_bits
)
659 EXPORT_SYMBOL(uverbs_get_flags64
);
661 int uverbs_get_flags32(u32
*to
, const struct uverbs_attr_bundle
*attrs_bundle
,
662 size_t idx
, u64 allowed_bits
)
667 ret
= uverbs_get_flags64(&flags
, attrs_bundle
, idx
, allowed_bits
);
677 EXPORT_SYMBOL(uverbs_get_flags32
);
680 * Fill a ib_udata struct (core or uhw) using the given attribute IDs.
681 * This is primarily used to convert the UVERBS_ATTR_UHW() into the
682 * ib_udata format used by the drivers.
684 void uverbs_fill_udata(struct uverbs_attr_bundle
*bundle
,
685 struct ib_udata
*udata
, unsigned int attr_in
,
686 unsigned int attr_out
)
688 struct bundle_priv
*pbundle
=
689 container_of(bundle
, struct bundle_priv
, bundle
);
690 const struct uverbs_attr
*in
=
691 uverbs_attr_get(&pbundle
->bundle
, attr_in
);
692 const struct uverbs_attr
*out
=
693 uverbs_attr_get(&pbundle
->bundle
, attr_out
);
696 udata
->inlen
= in
->ptr_attr
.len
;
697 if (uverbs_attr_ptr_is_inline(in
))
699 &pbundle
->user_attrs
[in
->ptr_attr
.uattr_idx
]
702 udata
->inbuf
= u64_to_user_ptr(in
->ptr_attr
.data
);
709 udata
->outbuf
= u64_to_user_ptr(out
->ptr_attr
.data
);
710 udata
->outlen
= out
->ptr_attr
.len
;
712 udata
->outbuf
= NULL
;
717 int uverbs_copy_to(const struct uverbs_attr_bundle
*bundle
, size_t idx
,
718 const void *from
, size_t size
)
720 const struct uverbs_attr
*attr
= uverbs_attr_get(bundle
, idx
);
724 return PTR_ERR(attr
);
726 min_size
= min_t(size_t, attr
->ptr_attr
.len
, size
);
727 if (copy_to_user(u64_to_user_ptr(attr
->ptr_attr
.data
), from
, min_size
))
730 return uverbs_set_output(bundle
, attr
);
732 EXPORT_SYMBOL(uverbs_copy_to
);
736 * This is only used if the caller has directly used copy_to_use to write the
737 * data. It signals to user space that the buffer is filled in.
739 int uverbs_output_written(const struct uverbs_attr_bundle
*bundle
, size_t idx
)
741 const struct uverbs_attr
*attr
= uverbs_attr_get(bundle
, idx
);
744 return PTR_ERR(attr
);
746 return uverbs_set_output(bundle
, attr
);
749 int _uverbs_get_const(s64
*to
, const struct uverbs_attr_bundle
*attrs_bundle
,
750 size_t idx
, s64 lower_bound
, u64 upper_bound
,
753 const struct uverbs_attr
*attr
;
755 attr
= uverbs_attr_get(attrs_bundle
, idx
);
757 if ((PTR_ERR(attr
) != -ENOENT
) || !def_val
)
758 return PTR_ERR(attr
);
762 *to
= attr
->ptr_attr
.data
;
765 if (*to
< lower_bound
|| (*to
> 0 && (u64
)*to
> upper_bound
))
770 EXPORT_SYMBOL(_uverbs_get_const
);
772 int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle
*bundle
,
773 size_t idx
, const void *from
, size_t size
)
775 const struct uverbs_attr
*attr
= uverbs_attr_get(bundle
, idx
);
778 return PTR_ERR(attr
);
780 if (size
< attr
->ptr_attr
.len
) {
781 if (clear_user(u64_to_user_ptr(attr
->ptr_attr
.data
) + size
,
782 attr
->ptr_attr
.len
- size
))
785 return uverbs_copy_to(bundle
, idx
, from
, size
);