2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <linux/uaccess.h>
43 #include <rdma/uverbs_types.h>
44 #include <rdma/uverbs_std_types.h>
45 #include "rdma_core.h"
48 #include "core_priv.h"
50 static struct ib_uverbs_completion_event_file
*
51 ib_uverbs_lookup_comp_file(int fd
, struct ib_ucontext
*context
)
53 struct ib_uobject
*uobj
= uobj_get_read(uobj_get_type(comp_channel
),
55 struct ib_uobject_file
*uobj_file
;
60 uverbs_uobject_get(uobj
);
63 uobj_file
= container_of(uobj
, struct ib_uobject_file
, uobj
);
64 return container_of(uobj_file
, struct ib_uverbs_completion_event_file
,
68 ssize_t
ib_uverbs_get_context(struct ib_uverbs_file
*file
,
69 struct ib_device
*ib_dev
,
70 const char __user
*buf
,
71 int in_len
, int out_len
)
73 struct ib_uverbs_get_context cmd
;
74 struct ib_uverbs_get_context_resp resp
;
75 struct ib_udata udata
;
76 struct ib_ucontext
*ucontext
;
78 struct ib_rdmacg_object cg_obj
;
81 if (out_len
< sizeof resp
)
84 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
87 mutex_lock(&file
->mutex
);
94 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
95 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
96 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
97 out_len
- sizeof(resp
));
99 ret
= ib_rdmacg_try_charge(&cg_obj
, ib_dev
, RDMACG_RESOURCE_HCA_HANDLE
);
103 ucontext
= ib_dev
->alloc_ucontext(ib_dev
, &udata
);
104 if (IS_ERR(ucontext
)) {
105 ret
= PTR_ERR(ucontext
);
109 ucontext
->device
= ib_dev
;
110 ucontext
->cg_obj
= cg_obj
;
111 /* ufile is required when some objects are released */
112 ucontext
->ufile
= file
;
113 uverbs_initialize_ucontext(ucontext
);
116 ucontext
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
118 ucontext
->closing
= 0;
120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
121 ucontext
->umem_tree
= RB_ROOT_CACHED
;
122 init_rwsem(&ucontext
->umem_rwsem
);
123 ucontext
->odp_mrs_count
= 0;
124 INIT_LIST_HEAD(&ucontext
->no_private_counters
);
126 if (!(ib_dev
->attrs
.device_cap_flags
& IB_DEVICE_ON_DEMAND_PAGING
))
127 ucontext
->invalidate_range
= NULL
;
131 resp
.num_comp_vectors
= file
->device
->num_comp_vectors
;
133 ret
= get_unused_fd_flags(O_CLOEXEC
);
138 filp
= ib_uverbs_alloc_async_event_file(file
, ib_dev
);
144 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
149 file
->ucontext
= ucontext
;
151 fd_install(resp
.async_fd
, filp
);
153 mutex_unlock(&file
->mutex
);
158 ib_uverbs_free_async_event_file(file
);
162 put_unused_fd(resp
.async_fd
);
165 put_pid(ucontext
->tgid
);
166 ib_dev
->dealloc_ucontext(ucontext
);
169 ib_rdmacg_uncharge(&cg_obj
, ib_dev
, RDMACG_RESOURCE_HCA_HANDLE
);
172 mutex_unlock(&file
->mutex
);
176 static void copy_query_dev_fields(struct ib_uverbs_file
*file
,
177 struct ib_device
*ib_dev
,
178 struct ib_uverbs_query_device_resp
*resp
,
179 struct ib_device_attr
*attr
)
181 resp
->fw_ver
= attr
->fw_ver
;
182 resp
->node_guid
= ib_dev
->node_guid
;
183 resp
->sys_image_guid
= attr
->sys_image_guid
;
184 resp
->max_mr_size
= attr
->max_mr_size
;
185 resp
->page_size_cap
= attr
->page_size_cap
;
186 resp
->vendor_id
= attr
->vendor_id
;
187 resp
->vendor_part_id
= attr
->vendor_part_id
;
188 resp
->hw_ver
= attr
->hw_ver
;
189 resp
->max_qp
= attr
->max_qp
;
190 resp
->max_qp_wr
= attr
->max_qp_wr
;
191 resp
->device_cap_flags
= lower_32_bits(attr
->device_cap_flags
);
192 resp
->max_sge
= attr
->max_sge
;
193 resp
->max_sge_rd
= attr
->max_sge_rd
;
194 resp
->max_cq
= attr
->max_cq
;
195 resp
->max_cqe
= attr
->max_cqe
;
196 resp
->max_mr
= attr
->max_mr
;
197 resp
->max_pd
= attr
->max_pd
;
198 resp
->max_qp_rd_atom
= attr
->max_qp_rd_atom
;
199 resp
->max_ee_rd_atom
= attr
->max_ee_rd_atom
;
200 resp
->max_res_rd_atom
= attr
->max_res_rd_atom
;
201 resp
->max_qp_init_rd_atom
= attr
->max_qp_init_rd_atom
;
202 resp
->max_ee_init_rd_atom
= attr
->max_ee_init_rd_atom
;
203 resp
->atomic_cap
= attr
->atomic_cap
;
204 resp
->max_ee
= attr
->max_ee
;
205 resp
->max_rdd
= attr
->max_rdd
;
206 resp
->max_mw
= attr
->max_mw
;
207 resp
->max_raw_ipv6_qp
= attr
->max_raw_ipv6_qp
;
208 resp
->max_raw_ethy_qp
= attr
->max_raw_ethy_qp
;
209 resp
->max_mcast_grp
= attr
->max_mcast_grp
;
210 resp
->max_mcast_qp_attach
= attr
->max_mcast_qp_attach
;
211 resp
->max_total_mcast_qp_attach
= attr
->max_total_mcast_qp_attach
;
212 resp
->max_ah
= attr
->max_ah
;
213 resp
->max_fmr
= attr
->max_fmr
;
214 resp
->max_map_per_fmr
= attr
->max_map_per_fmr
;
215 resp
->max_srq
= attr
->max_srq
;
216 resp
->max_srq_wr
= attr
->max_srq_wr
;
217 resp
->max_srq_sge
= attr
->max_srq_sge
;
218 resp
->max_pkeys
= attr
->max_pkeys
;
219 resp
->local_ca_ack_delay
= attr
->local_ca_ack_delay
;
220 resp
->phys_port_cnt
= ib_dev
->phys_port_cnt
;
223 ssize_t
ib_uverbs_query_device(struct ib_uverbs_file
*file
,
224 struct ib_device
*ib_dev
,
225 const char __user
*buf
,
226 int in_len
, int out_len
)
228 struct ib_uverbs_query_device cmd
;
229 struct ib_uverbs_query_device_resp resp
;
231 if (out_len
< sizeof resp
)
234 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
237 memset(&resp
, 0, sizeof resp
);
238 copy_query_dev_fields(file
, ib_dev
, &resp
, &ib_dev
->attrs
);
240 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
246 ssize_t
ib_uverbs_query_port(struct ib_uverbs_file
*file
,
247 struct ib_device
*ib_dev
,
248 const char __user
*buf
,
249 int in_len
, int out_len
)
251 struct ib_uverbs_query_port cmd
;
252 struct ib_uverbs_query_port_resp resp
;
253 struct ib_port_attr attr
;
256 if (out_len
< sizeof resp
)
259 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
262 ret
= ib_query_port(ib_dev
, cmd
.port_num
, &attr
);
266 memset(&resp
, 0, sizeof resp
);
268 resp
.state
= attr
.state
;
269 resp
.max_mtu
= attr
.max_mtu
;
270 resp
.active_mtu
= attr
.active_mtu
;
271 resp
.gid_tbl_len
= attr
.gid_tbl_len
;
272 resp
.port_cap_flags
= attr
.port_cap_flags
;
273 resp
.max_msg_sz
= attr
.max_msg_sz
;
274 resp
.bad_pkey_cntr
= attr
.bad_pkey_cntr
;
275 resp
.qkey_viol_cntr
= attr
.qkey_viol_cntr
;
276 resp
.pkey_tbl_len
= attr
.pkey_tbl_len
;
278 if (rdma_cap_opa_ah(ib_dev
, cmd
.port_num
)) {
279 resp
.lid
= OPA_TO_IB_UCAST_LID(attr
.lid
);
280 resp
.sm_lid
= OPA_TO_IB_UCAST_LID(attr
.sm_lid
);
282 resp
.lid
= ib_lid_cpu16(attr
.lid
);
283 resp
.sm_lid
= ib_lid_cpu16(attr
.sm_lid
);
286 resp
.max_vl_num
= attr
.max_vl_num
;
287 resp
.sm_sl
= attr
.sm_sl
;
288 resp
.subnet_timeout
= attr
.subnet_timeout
;
289 resp
.init_type_reply
= attr
.init_type_reply
;
290 resp
.active_width
= attr
.active_width
;
291 resp
.active_speed
= attr
.active_speed
;
292 resp
.phys_state
= attr
.phys_state
;
293 resp
.link_layer
= rdma_port_get_link_layer(ib_dev
,
296 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
302 ssize_t
ib_uverbs_alloc_pd(struct ib_uverbs_file
*file
,
303 struct ib_device
*ib_dev
,
304 const char __user
*buf
,
305 int in_len
, int out_len
)
307 struct ib_uverbs_alloc_pd cmd
;
308 struct ib_uverbs_alloc_pd_resp resp
;
309 struct ib_udata udata
;
310 struct ib_uobject
*uobj
;
314 if (out_len
< sizeof resp
)
317 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
320 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
321 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
322 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
323 out_len
- sizeof(resp
));
325 uobj
= uobj_alloc(uobj_get_type(pd
), file
->ucontext
);
327 return PTR_ERR(uobj
);
329 pd
= ib_dev
->alloc_pd(ib_dev
, file
->ucontext
, &udata
);
337 pd
->__internal_mr
= NULL
;
338 atomic_set(&pd
->usecnt
, 0);
341 memset(&resp
, 0, sizeof resp
);
342 resp
.pd_handle
= uobj
->id
;
343 pd
->res
.type
= RDMA_RESTRACK_PD
;
344 rdma_restrack_add(&pd
->res
);
346 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
351 uobj_alloc_commit(uobj
);
359 uobj_alloc_abort(uobj
);
363 ssize_t
ib_uverbs_dealloc_pd(struct ib_uverbs_file
*file
,
364 struct ib_device
*ib_dev
,
365 const char __user
*buf
,
366 int in_len
, int out_len
)
368 struct ib_uverbs_dealloc_pd cmd
;
369 struct ib_uobject
*uobj
;
372 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
375 uobj
= uobj_get_write(uobj_get_type(pd
), cmd
.pd_handle
,
378 return PTR_ERR(uobj
);
380 ret
= uobj_remove_commit(uobj
);
382 return ret
?: in_len
;
385 struct xrcd_table_entry
{
387 struct ib_xrcd
*xrcd
;
391 static int xrcd_table_insert(struct ib_uverbs_device
*dev
,
393 struct ib_xrcd
*xrcd
)
395 struct xrcd_table_entry
*entry
, *scan
;
396 struct rb_node
**p
= &dev
->xrcd_tree
.rb_node
;
397 struct rb_node
*parent
= NULL
;
399 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
404 entry
->inode
= inode
;
408 scan
= rb_entry(parent
, struct xrcd_table_entry
, node
);
410 if (inode
< scan
->inode
) {
412 } else if (inode
> scan
->inode
) {
420 rb_link_node(&entry
->node
, parent
, p
);
421 rb_insert_color(&entry
->node
, &dev
->xrcd_tree
);
426 static struct xrcd_table_entry
*xrcd_table_search(struct ib_uverbs_device
*dev
,
429 struct xrcd_table_entry
*entry
;
430 struct rb_node
*p
= dev
->xrcd_tree
.rb_node
;
433 entry
= rb_entry(p
, struct xrcd_table_entry
, node
);
435 if (inode
< entry
->inode
)
437 else if (inode
> entry
->inode
)
446 static struct ib_xrcd
*find_xrcd(struct ib_uverbs_device
*dev
, struct inode
*inode
)
448 struct xrcd_table_entry
*entry
;
450 entry
= xrcd_table_search(dev
, inode
);
457 static void xrcd_table_delete(struct ib_uverbs_device
*dev
,
460 struct xrcd_table_entry
*entry
;
462 entry
= xrcd_table_search(dev
, inode
);
465 rb_erase(&entry
->node
, &dev
->xrcd_tree
);
470 ssize_t
ib_uverbs_open_xrcd(struct ib_uverbs_file
*file
,
471 struct ib_device
*ib_dev
,
472 const char __user
*buf
, int in_len
,
475 struct ib_uverbs_open_xrcd cmd
;
476 struct ib_uverbs_open_xrcd_resp resp
;
477 struct ib_udata udata
;
478 struct ib_uxrcd_object
*obj
;
479 struct ib_xrcd
*xrcd
= NULL
;
480 struct fd f
= {NULL
, 0};
481 struct inode
*inode
= NULL
;
485 if (out_len
< sizeof resp
)
488 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
491 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
492 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
493 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
494 out_len
- sizeof(resp
));
496 mutex_lock(&file
->device
->xrcd_tree_mutex
);
499 /* search for file descriptor */
503 goto err_tree_mutex_unlock
;
506 inode
= file_inode(f
.file
);
507 xrcd
= find_xrcd(file
->device
, inode
);
508 if (!xrcd
&& !(cmd
.oflags
& O_CREAT
)) {
509 /* no file descriptor. Need CREATE flag */
511 goto err_tree_mutex_unlock
;
514 if (xrcd
&& cmd
.oflags
& O_EXCL
) {
516 goto err_tree_mutex_unlock
;
520 obj
= (struct ib_uxrcd_object
*)uobj_alloc(uobj_get_type(xrcd
),
524 goto err_tree_mutex_unlock
;
528 xrcd
= ib_dev
->alloc_xrcd(ib_dev
, file
->ucontext
, &udata
);
535 xrcd
->device
= ib_dev
;
536 atomic_set(&xrcd
->usecnt
, 0);
537 mutex_init(&xrcd
->tgt_qp_mutex
);
538 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
542 atomic_set(&obj
->refcnt
, 0);
543 obj
->uobject
.object
= xrcd
;
544 memset(&resp
, 0, sizeof resp
);
545 resp
.xrcd_handle
= obj
->uobject
.id
;
549 /* create new inode/xrcd table entry */
550 ret
= xrcd_table_insert(file
->device
, inode
, xrcd
);
552 goto err_dealloc_xrcd
;
554 atomic_inc(&xrcd
->usecnt
);
557 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
565 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
567 uobj_alloc_commit(&obj
->uobject
);
574 xrcd_table_delete(file
->device
, inode
);
575 atomic_dec(&xrcd
->usecnt
);
579 ib_dealloc_xrcd(xrcd
);
582 uobj_alloc_abort(&obj
->uobject
);
584 err_tree_mutex_unlock
:
588 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
593 ssize_t
ib_uverbs_close_xrcd(struct ib_uverbs_file
*file
,
594 struct ib_device
*ib_dev
,
595 const char __user
*buf
, int in_len
,
598 struct ib_uverbs_close_xrcd cmd
;
599 struct ib_uobject
*uobj
;
602 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
605 uobj
= uobj_get_write(uobj_get_type(xrcd
), cmd
.xrcd_handle
,
608 return PTR_ERR(uobj
);
610 ret
= uobj_remove_commit(uobj
);
611 return ret
?: in_len
;
614 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device
*dev
,
615 struct ib_xrcd
*xrcd
,
616 enum rdma_remove_reason why
)
622 if (inode
&& !atomic_dec_and_test(&xrcd
->usecnt
))
625 ret
= ib_dealloc_xrcd(xrcd
);
627 if (why
== RDMA_REMOVE_DESTROY
&& ret
)
628 atomic_inc(&xrcd
->usecnt
);
630 xrcd_table_delete(dev
, inode
);
635 ssize_t
ib_uverbs_reg_mr(struct ib_uverbs_file
*file
,
636 struct ib_device
*ib_dev
,
637 const char __user
*buf
, int in_len
,
640 struct ib_uverbs_reg_mr cmd
;
641 struct ib_uverbs_reg_mr_resp resp
;
642 struct ib_udata udata
;
643 struct ib_uobject
*uobj
;
648 if (out_len
< sizeof resp
)
651 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
654 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
655 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
656 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
657 out_len
- sizeof(resp
));
659 if ((cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
))
662 ret
= ib_check_mr_access(cmd
.access_flags
);
666 uobj
= uobj_alloc(uobj_get_type(mr
), file
->ucontext
);
668 return PTR_ERR(uobj
);
670 pd
= uobj_get_obj_read(pd
, cmd
.pd_handle
, file
->ucontext
);
676 if (cmd
.access_flags
& IB_ACCESS_ON_DEMAND
) {
677 if (!(pd
->device
->attrs
.device_cap_flags
&
678 IB_DEVICE_ON_DEMAND_PAGING
)) {
679 pr_debug("ODP support not available\n");
685 mr
= pd
->device
->reg_user_mr(pd
, cmd
.start
, cmd
.length
, cmd
.hca_va
,
686 cmd
.access_flags
, &udata
);
692 mr
->device
= pd
->device
;
695 atomic_inc(&pd
->usecnt
);
699 memset(&resp
, 0, sizeof resp
);
700 resp
.lkey
= mr
->lkey
;
701 resp
.rkey
= mr
->rkey
;
702 resp
.mr_handle
= uobj
->id
;
704 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
709 uobj_put_obj_read(pd
);
711 uobj_alloc_commit(uobj
);
719 uobj_put_obj_read(pd
);
722 uobj_alloc_abort(uobj
);
726 ssize_t
ib_uverbs_rereg_mr(struct ib_uverbs_file
*file
,
727 struct ib_device
*ib_dev
,
728 const char __user
*buf
, int in_len
,
731 struct ib_uverbs_rereg_mr cmd
;
732 struct ib_uverbs_rereg_mr_resp resp
;
733 struct ib_udata udata
;
734 struct ib_pd
*pd
= NULL
;
736 struct ib_pd
*old_pd
;
738 struct ib_uobject
*uobj
;
740 if (out_len
< sizeof(resp
))
743 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
746 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
747 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
748 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
749 out_len
- sizeof(resp
));
751 if (cmd
.flags
& ~IB_MR_REREG_SUPPORTED
|| !cmd
.flags
)
754 if ((cmd
.flags
& IB_MR_REREG_TRANS
) &&
755 (!cmd
.start
|| !cmd
.hca_va
|| 0 >= cmd
.length
||
756 (cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
)))
759 uobj
= uobj_get_write(uobj_get_type(mr
), cmd
.mr_handle
,
762 return PTR_ERR(uobj
);
766 if (cmd
.flags
& IB_MR_REREG_ACCESS
) {
767 ret
= ib_check_mr_access(cmd
.access_flags
);
772 if (cmd
.flags
& IB_MR_REREG_PD
) {
773 pd
= uobj_get_obj_read(pd
, cmd
.pd_handle
, file
->ucontext
);
781 ret
= mr
->device
->rereg_user_mr(mr
, cmd
.flags
, cmd
.start
,
782 cmd
.length
, cmd
.hca_va
,
783 cmd
.access_flags
, pd
, &udata
);
785 if (cmd
.flags
& IB_MR_REREG_PD
) {
786 atomic_inc(&pd
->usecnt
);
788 atomic_dec(&old_pd
->usecnt
);
794 memset(&resp
, 0, sizeof(resp
));
795 resp
.lkey
= mr
->lkey
;
796 resp
.rkey
= mr
->rkey
;
798 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof(resp
)))
804 if (cmd
.flags
& IB_MR_REREG_PD
)
805 uobj_put_obj_read(pd
);
808 uobj_put_write(uobj
);
813 ssize_t
ib_uverbs_dereg_mr(struct ib_uverbs_file
*file
,
814 struct ib_device
*ib_dev
,
815 const char __user
*buf
, int in_len
,
818 struct ib_uverbs_dereg_mr cmd
;
819 struct ib_uobject
*uobj
;
822 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
825 uobj
= uobj_get_write(uobj_get_type(mr
), cmd
.mr_handle
,
828 return PTR_ERR(uobj
);
830 ret
= uobj_remove_commit(uobj
);
832 return ret
?: in_len
;
835 ssize_t
ib_uverbs_alloc_mw(struct ib_uverbs_file
*file
,
836 struct ib_device
*ib_dev
,
837 const char __user
*buf
, int in_len
,
840 struct ib_uverbs_alloc_mw cmd
;
841 struct ib_uverbs_alloc_mw_resp resp
;
842 struct ib_uobject
*uobj
;
845 struct ib_udata udata
;
848 if (out_len
< sizeof(resp
))
851 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
854 uobj
= uobj_alloc(uobj_get_type(mw
), file
->ucontext
);
856 return PTR_ERR(uobj
);
858 pd
= uobj_get_obj_read(pd
, cmd
.pd_handle
, file
->ucontext
);
864 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
865 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
866 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
867 out_len
- sizeof(resp
));
869 mw
= pd
->device
->alloc_mw(pd
, cmd
.mw_type
, &udata
);
875 mw
->device
= pd
->device
;
878 atomic_inc(&pd
->usecnt
);
882 memset(&resp
, 0, sizeof(resp
));
883 resp
.rkey
= mw
->rkey
;
884 resp
.mw_handle
= uobj
->id
;
886 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof(resp
))) {
891 uobj_put_obj_read(pd
);
892 uobj_alloc_commit(uobj
);
897 uverbs_dealloc_mw(mw
);
899 uobj_put_obj_read(pd
);
901 uobj_alloc_abort(uobj
);
905 ssize_t
ib_uverbs_dealloc_mw(struct ib_uverbs_file
*file
,
906 struct ib_device
*ib_dev
,
907 const char __user
*buf
, int in_len
,
910 struct ib_uverbs_dealloc_mw cmd
;
911 struct ib_uobject
*uobj
;
914 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
917 uobj
= uobj_get_write(uobj_get_type(mw
), cmd
.mw_handle
,
920 return PTR_ERR(uobj
);
922 ret
= uobj_remove_commit(uobj
);
923 return ret
?: in_len
;
926 ssize_t
ib_uverbs_create_comp_channel(struct ib_uverbs_file
*file
,
927 struct ib_device
*ib_dev
,
928 const char __user
*buf
, int in_len
,
931 struct ib_uverbs_create_comp_channel cmd
;
932 struct ib_uverbs_create_comp_channel_resp resp
;
933 struct ib_uobject
*uobj
;
934 struct ib_uverbs_completion_event_file
*ev_file
;
936 if (out_len
< sizeof resp
)
939 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
942 uobj
= uobj_alloc(uobj_get_type(comp_channel
), file
->ucontext
);
944 return PTR_ERR(uobj
);
948 ev_file
= container_of(uobj
, struct ib_uverbs_completion_event_file
,
950 ib_uverbs_init_event_queue(&ev_file
->ev_queue
);
952 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
953 uobj_alloc_abort(uobj
);
957 uobj_alloc_commit(uobj
);
961 static struct ib_ucq_object
*create_cq(struct ib_uverbs_file
*file
,
962 struct ib_device
*ib_dev
,
963 struct ib_udata
*ucore
,
964 struct ib_udata
*uhw
,
965 struct ib_uverbs_ex_create_cq
*cmd
,
967 int (*cb
)(struct ib_uverbs_file
*file
,
968 struct ib_ucq_object
*obj
,
969 struct ib_uverbs_ex_create_cq_resp
*resp
,
970 struct ib_udata
*udata
,
974 struct ib_ucq_object
*obj
;
975 struct ib_uverbs_completion_event_file
*ev_file
= NULL
;
978 struct ib_uverbs_ex_create_cq_resp resp
;
979 struct ib_cq_init_attr attr
= {};
981 if (!ib_dev
->create_cq
)
982 return ERR_PTR(-EOPNOTSUPP
);
984 if (cmd
->comp_vector
>= file
->device
->num_comp_vectors
)
985 return ERR_PTR(-EINVAL
);
987 obj
= (struct ib_ucq_object
*)uobj_alloc(uobj_get_type(cq
),
992 if (cmd
->comp_channel
>= 0) {
993 ev_file
= ib_uverbs_lookup_comp_file(cmd
->comp_channel
,
995 if (IS_ERR(ev_file
)) {
996 ret
= PTR_ERR(ev_file
);
1001 obj
->uobject
.user_handle
= cmd
->user_handle
;
1002 obj
->uverbs_file
= file
;
1003 obj
->comp_events_reported
= 0;
1004 obj
->async_events_reported
= 0;
1005 INIT_LIST_HEAD(&obj
->comp_list
);
1006 INIT_LIST_HEAD(&obj
->async_list
);
1008 attr
.cqe
= cmd
->cqe
;
1009 attr
.comp_vector
= cmd
->comp_vector
;
1011 if (cmd_sz
> offsetof(typeof(*cmd
), flags
) + sizeof(cmd
->flags
))
1012 attr
.flags
= cmd
->flags
;
1014 cq
= ib_dev
->create_cq(ib_dev
, &attr
, file
->ucontext
, uhw
);
1020 cq
->device
= ib_dev
;
1021 cq
->uobject
= &obj
->uobject
;
1022 cq
->comp_handler
= ib_uverbs_comp_handler
;
1023 cq
->event_handler
= ib_uverbs_cq_event_handler
;
1024 cq
->cq_context
= ev_file
? &ev_file
->ev_queue
: NULL
;
1025 atomic_set(&cq
->usecnt
, 0);
1027 obj
->uobject
.object
= cq
;
1028 memset(&resp
, 0, sizeof resp
);
1029 resp
.base
.cq_handle
= obj
->uobject
.id
;
1030 resp
.base
.cqe
= cq
->cqe
;
1032 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
1033 sizeof(resp
.response_length
);
1035 cq
->res
.type
= RDMA_RESTRACK_CQ
;
1036 rdma_restrack_add(&cq
->res
);
1038 ret
= cb(file
, obj
, &resp
, ucore
, context
);
1042 uobj_alloc_commit(&obj
->uobject
);
1050 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1053 uobj_alloc_abort(&obj
->uobject
);
1055 return ERR_PTR(ret
);
1058 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file
*file
,
1059 struct ib_ucq_object
*obj
,
1060 struct ib_uverbs_ex_create_cq_resp
*resp
,
1061 struct ib_udata
*ucore
, void *context
)
1063 if (ib_copy_to_udata(ucore
, &resp
->base
, sizeof(resp
->base
)))
1069 ssize_t
ib_uverbs_create_cq(struct ib_uverbs_file
*file
,
1070 struct ib_device
*ib_dev
,
1071 const char __user
*buf
, int in_len
,
1074 struct ib_uverbs_create_cq cmd
;
1075 struct ib_uverbs_ex_create_cq cmd_ex
;
1076 struct ib_uverbs_create_cq_resp resp
;
1077 struct ib_udata ucore
;
1078 struct ib_udata uhw
;
1079 struct ib_ucq_object
*obj
;
1081 if (out_len
< sizeof(resp
))
1084 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1087 ib_uverbs_init_udata(&ucore
, buf
, u64_to_user_ptr(cmd
.response
),
1088 sizeof(cmd
), sizeof(resp
));
1090 ib_uverbs_init_udata(&uhw
, buf
+ sizeof(cmd
),
1091 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
1092 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1093 out_len
- sizeof(resp
));
1095 memset(&cmd_ex
, 0, sizeof(cmd_ex
));
1096 cmd_ex
.user_handle
= cmd
.user_handle
;
1097 cmd_ex
.cqe
= cmd
.cqe
;
1098 cmd_ex
.comp_vector
= cmd
.comp_vector
;
1099 cmd_ex
.comp_channel
= cmd
.comp_channel
;
1101 obj
= create_cq(file
, ib_dev
, &ucore
, &uhw
, &cmd_ex
,
1102 offsetof(typeof(cmd_ex
), comp_channel
) +
1103 sizeof(cmd
.comp_channel
), ib_uverbs_create_cq_cb
,
1107 return PTR_ERR(obj
);
1112 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file
*file
,
1113 struct ib_ucq_object
*obj
,
1114 struct ib_uverbs_ex_create_cq_resp
*resp
,
1115 struct ib_udata
*ucore
, void *context
)
1117 if (ib_copy_to_udata(ucore
, resp
, resp
->response_length
))
1123 int ib_uverbs_ex_create_cq(struct ib_uverbs_file
*file
,
1124 struct ib_device
*ib_dev
,
1125 struct ib_udata
*ucore
,
1126 struct ib_udata
*uhw
)
1128 struct ib_uverbs_ex_create_cq_resp resp
;
1129 struct ib_uverbs_ex_create_cq cmd
;
1130 struct ib_ucq_object
*obj
;
1133 if (ucore
->inlen
< sizeof(cmd
))
1136 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
1146 if (ucore
->outlen
< (offsetof(typeof(resp
), response_length
) +
1147 sizeof(resp
.response_length
)))
1150 obj
= create_cq(file
, ib_dev
, ucore
, uhw
, &cmd
,
1151 min(ucore
->inlen
, sizeof(cmd
)),
1152 ib_uverbs_ex_create_cq_cb
, NULL
);
1154 return PTR_ERR_OR_ZERO(obj
);
1157 ssize_t
ib_uverbs_resize_cq(struct ib_uverbs_file
*file
,
1158 struct ib_device
*ib_dev
,
1159 const char __user
*buf
, int in_len
,
1162 struct ib_uverbs_resize_cq cmd
;
1163 struct ib_uverbs_resize_cq_resp resp
= {};
1164 struct ib_udata udata
;
1168 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1171 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
1172 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
1173 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1174 out_len
- sizeof(resp
));
1176 cq
= uobj_get_obj_read(cq
, cmd
.cq_handle
, file
->ucontext
);
1180 ret
= cq
->device
->resize_cq(cq
, cmd
.cqe
, &udata
);
1186 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
.cqe
))
1190 uobj_put_obj_read(cq
);
1192 return ret
? ret
: in_len
;
1195 static int copy_wc_to_user(struct ib_device
*ib_dev
, void __user
*dest
,
1198 struct ib_uverbs_wc tmp
;
1200 tmp
.wr_id
= wc
->wr_id
;
1201 tmp
.status
= wc
->status
;
1202 tmp
.opcode
= wc
->opcode
;
1203 tmp
.vendor_err
= wc
->vendor_err
;
1204 tmp
.byte_len
= wc
->byte_len
;
1205 tmp
.ex
.imm_data
= wc
->ex
.imm_data
;
1206 tmp
.qp_num
= wc
->qp
->qp_num
;
1207 tmp
.src_qp
= wc
->src_qp
;
1208 tmp
.wc_flags
= wc
->wc_flags
;
1209 tmp
.pkey_index
= wc
->pkey_index
;
1210 if (rdma_cap_opa_ah(ib_dev
, wc
->port_num
))
1211 tmp
.slid
= OPA_TO_IB_UCAST_LID(wc
->slid
);
1213 tmp
.slid
= ib_lid_cpu16(wc
->slid
);
1215 tmp
.dlid_path_bits
= wc
->dlid_path_bits
;
1216 tmp
.port_num
= wc
->port_num
;
1219 if (copy_to_user(dest
, &tmp
, sizeof tmp
))
1225 ssize_t
ib_uverbs_poll_cq(struct ib_uverbs_file
*file
,
1226 struct ib_device
*ib_dev
,
1227 const char __user
*buf
, int in_len
,
1230 struct ib_uverbs_poll_cq cmd
;
1231 struct ib_uverbs_poll_cq_resp resp
;
1232 u8 __user
*header_ptr
;
1233 u8 __user
*data_ptr
;
1238 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1241 cq
= uobj_get_obj_read(cq
, cmd
.cq_handle
, file
->ucontext
);
1245 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1246 header_ptr
= u64_to_user_ptr(cmd
.response
);
1247 data_ptr
= header_ptr
+ sizeof resp
;
1249 memset(&resp
, 0, sizeof resp
);
1250 while (resp
.count
< cmd
.ne
) {
1251 ret
= ib_poll_cq(cq
, 1, &wc
);
1257 ret
= copy_wc_to_user(ib_dev
, data_ptr
, &wc
);
1261 data_ptr
+= sizeof(struct ib_uverbs_wc
);
1265 if (copy_to_user(header_ptr
, &resp
, sizeof resp
)) {
1273 uobj_put_obj_read(cq
);
1277 ssize_t
ib_uverbs_req_notify_cq(struct ib_uverbs_file
*file
,
1278 struct ib_device
*ib_dev
,
1279 const char __user
*buf
, int in_len
,
1282 struct ib_uverbs_req_notify_cq cmd
;
1285 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1288 cq
= uobj_get_obj_read(cq
, cmd
.cq_handle
, file
->ucontext
);
1292 ib_req_notify_cq(cq
, cmd
.solicited_only
?
1293 IB_CQ_SOLICITED
: IB_CQ_NEXT_COMP
);
1295 uobj_put_obj_read(cq
);
1300 ssize_t
ib_uverbs_destroy_cq(struct ib_uverbs_file
*file
,
1301 struct ib_device
*ib_dev
,
1302 const char __user
*buf
, int in_len
,
1305 struct ib_uverbs_destroy_cq cmd
;
1306 struct ib_uverbs_destroy_cq_resp resp
;
1307 struct ib_uobject
*uobj
;
1309 struct ib_ucq_object
*obj
;
1312 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1315 uobj
= uobj_get_write(uobj_get_type(cq
), cmd
.cq_handle
,
1318 return PTR_ERR(uobj
);
1321 * Make sure we don't free the memory in remove_commit as we still
1322 * needs the uobject memory to create the response.
1324 uverbs_uobject_get(uobj
);
1326 obj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
1328 memset(&resp
, 0, sizeof(resp
));
1330 ret
= uobj_remove_commit(uobj
);
1332 uverbs_uobject_put(uobj
);
1336 resp
.comp_events_reported
= obj
->comp_events_reported
;
1337 resp
.async_events_reported
= obj
->async_events_reported
;
1339 uverbs_uobject_put(uobj
);
1340 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
1346 static int create_qp(struct ib_uverbs_file
*file
,
1347 struct ib_udata
*ucore
,
1348 struct ib_udata
*uhw
,
1349 struct ib_uverbs_ex_create_qp
*cmd
,
1351 int (*cb
)(struct ib_uverbs_file
*file
,
1352 struct ib_uverbs_ex_create_qp_resp
*resp
,
1353 struct ib_udata
*udata
),
1356 struct ib_uqp_object
*obj
;
1357 struct ib_device
*device
;
1358 struct ib_pd
*pd
= NULL
;
1359 struct ib_xrcd
*xrcd
= NULL
;
1360 struct ib_uobject
*xrcd_uobj
= ERR_PTR(-ENOENT
);
1361 struct ib_cq
*scq
= NULL
, *rcq
= NULL
;
1362 struct ib_srq
*srq
= NULL
;
1365 struct ib_qp_init_attr attr
= {};
1366 struct ib_uverbs_ex_create_qp_resp resp
;
1368 struct ib_rwq_ind_table
*ind_tbl
= NULL
;
1371 if (cmd
->qp_type
== IB_QPT_RAW_PACKET
&& !capable(CAP_NET_RAW
))
1374 obj
= (struct ib_uqp_object
*)uobj_alloc(uobj_get_type(qp
),
1377 return PTR_ERR(obj
);
1379 obj
->uevent
.uobject
.user_handle
= cmd
->user_handle
;
1380 mutex_init(&obj
->mcast_lock
);
1382 if (cmd_sz
>= offsetof(typeof(*cmd
), rwq_ind_tbl_handle
) +
1383 sizeof(cmd
->rwq_ind_tbl_handle
) &&
1384 (cmd
->comp_mask
& IB_UVERBS_CREATE_QP_MASK_IND_TABLE
)) {
1385 ind_tbl
= uobj_get_obj_read(rwq_ind_table
,
1386 cmd
->rwq_ind_tbl_handle
,
1393 attr
.rwq_ind_tbl
= ind_tbl
;
1396 if (cmd_sz
> sizeof(*cmd
) &&
1397 !ib_is_udata_cleared(ucore
, sizeof(*cmd
),
1398 cmd_sz
- sizeof(*cmd
))) {
1403 if (ind_tbl
&& (cmd
->max_recv_wr
|| cmd
->max_recv_sge
|| cmd
->is_srq
)) {
1408 if (ind_tbl
&& !cmd
->max_send_wr
)
1411 if (cmd
->qp_type
== IB_QPT_XRC_TGT
) {
1412 xrcd_uobj
= uobj_get_read(uobj_get_type(xrcd
), cmd
->pd_handle
,
1415 if (IS_ERR(xrcd_uobj
)) {
1420 xrcd
= (struct ib_xrcd
*)xrcd_uobj
->object
;
1425 device
= xrcd
->device
;
1427 if (cmd
->qp_type
== IB_QPT_XRC_INI
) {
1428 cmd
->max_recv_wr
= 0;
1429 cmd
->max_recv_sge
= 0;
1432 srq
= uobj_get_obj_read(srq
, cmd
->srq_handle
,
1434 if (!srq
|| srq
->srq_type
== IB_SRQT_XRC
) {
1441 if (cmd
->recv_cq_handle
!= cmd
->send_cq_handle
) {
1442 rcq
= uobj_get_obj_read(cq
, cmd
->recv_cq_handle
,
1453 scq
= uobj_get_obj_read(cq
, cmd
->send_cq_handle
,
1457 pd
= uobj_get_obj_read(pd
, cmd
->pd_handle
, file
->ucontext
);
1458 if (!pd
|| (!scq
&& has_sq
)) {
1463 device
= pd
->device
;
1466 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1467 attr
.qp_context
= file
;
1472 attr
.sq_sig_type
= cmd
->sq_sig_all
? IB_SIGNAL_ALL_WR
:
1474 attr
.qp_type
= cmd
->qp_type
;
1475 attr
.create_flags
= 0;
1477 attr
.cap
.max_send_wr
= cmd
->max_send_wr
;
1478 attr
.cap
.max_recv_wr
= cmd
->max_recv_wr
;
1479 attr
.cap
.max_send_sge
= cmd
->max_send_sge
;
1480 attr
.cap
.max_recv_sge
= cmd
->max_recv_sge
;
1481 attr
.cap
.max_inline_data
= cmd
->max_inline_data
;
1483 obj
->uevent
.events_reported
= 0;
1484 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1485 INIT_LIST_HEAD(&obj
->mcast_list
);
1487 if (cmd_sz
>= offsetof(typeof(*cmd
), create_flags
) +
1488 sizeof(cmd
->create_flags
))
1489 attr
.create_flags
= cmd
->create_flags
;
1491 if (attr
.create_flags
& ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
1492 IB_QP_CREATE_CROSS_CHANNEL
|
1493 IB_QP_CREATE_MANAGED_SEND
|
1494 IB_QP_CREATE_MANAGED_RECV
|
1495 IB_QP_CREATE_SCATTER_FCS
|
1496 IB_QP_CREATE_CVLAN_STRIPPING
|
1497 IB_QP_CREATE_SOURCE_QPN
|
1498 IB_QP_CREATE_PCI_WRITE_END_PADDING
)) {
1503 if (attr
.create_flags
& IB_QP_CREATE_SOURCE_QPN
) {
1504 if (!capable(CAP_NET_RAW
)) {
1509 attr
.source_qpn
= cmd
->source_qpn
;
1512 buf
= (void *)cmd
+ sizeof(*cmd
);
1513 if (cmd_sz
> sizeof(*cmd
))
1514 if (!(buf
[0] == 0 && !memcmp(buf
, buf
+ 1,
1515 cmd_sz
- sizeof(*cmd
) - 1))) {
1520 if (cmd
->qp_type
== IB_QPT_XRC_TGT
)
1521 qp
= ib_create_qp(pd
, &attr
);
1523 qp
= _ib_create_qp(device
, pd
, &attr
, uhw
,
1524 &obj
->uevent
.uobject
);
1531 if (cmd
->qp_type
!= IB_QPT_XRC_TGT
) {
1532 ret
= ib_create_qp_security(qp
, device
);
1538 qp
->send_cq
= attr
.send_cq
;
1539 qp
->recv_cq
= attr
.recv_cq
;
1541 qp
->rwq_ind_tbl
= ind_tbl
;
1542 qp
->event_handler
= attr
.event_handler
;
1543 qp
->qp_context
= attr
.qp_context
;
1544 qp
->qp_type
= attr
.qp_type
;
1545 atomic_set(&qp
->usecnt
, 0);
1546 atomic_inc(&pd
->usecnt
);
1549 atomic_inc(&attr
.send_cq
->usecnt
);
1551 atomic_inc(&attr
.recv_cq
->usecnt
);
1553 atomic_inc(&attr
.srq
->usecnt
);
1555 atomic_inc(&ind_tbl
->usecnt
);
1557 /* It is done in _ib_create_qp for other QP types */
1558 qp
->uobject
= &obj
->uevent
.uobject
;
1561 obj
->uevent
.uobject
.object
= qp
;
1563 memset(&resp
, 0, sizeof resp
);
1564 resp
.base
.qpn
= qp
->qp_num
;
1565 resp
.base
.qp_handle
= obj
->uevent
.uobject
.id
;
1566 resp
.base
.max_recv_sge
= attr
.cap
.max_recv_sge
;
1567 resp
.base
.max_send_sge
= attr
.cap
.max_send_sge
;
1568 resp
.base
.max_recv_wr
= attr
.cap
.max_recv_wr
;
1569 resp
.base
.max_send_wr
= attr
.cap
.max_send_wr
;
1570 resp
.base
.max_inline_data
= attr
.cap
.max_inline_data
;
1572 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
1573 sizeof(resp
.response_length
);
1575 ret
= cb(file
, &resp
, ucore
);
1580 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
,
1582 atomic_inc(&obj
->uxrcd
->refcnt
);
1583 uobj_put_read(xrcd_uobj
);
1587 uobj_put_obj_read(pd
);
1589 uobj_put_obj_read(scq
);
1590 if (rcq
&& rcq
!= scq
)
1591 uobj_put_obj_read(rcq
);
1593 uobj_put_obj_read(srq
);
1595 uobj_put_obj_read(ind_tbl
);
1597 uobj_alloc_commit(&obj
->uevent
.uobject
);
1604 if (!IS_ERR(xrcd_uobj
))
1605 uobj_put_read(xrcd_uobj
);
1607 uobj_put_obj_read(pd
);
1609 uobj_put_obj_read(scq
);
1610 if (rcq
&& rcq
!= scq
)
1611 uobj_put_obj_read(rcq
);
1613 uobj_put_obj_read(srq
);
1615 uobj_put_obj_read(ind_tbl
);
1617 uobj_alloc_abort(&obj
->uevent
.uobject
);
1621 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file
*file
,
1622 struct ib_uverbs_ex_create_qp_resp
*resp
,
1623 struct ib_udata
*ucore
)
1625 if (ib_copy_to_udata(ucore
, &resp
->base
, sizeof(resp
->base
)))
1631 ssize_t
ib_uverbs_create_qp(struct ib_uverbs_file
*file
,
1632 struct ib_device
*ib_dev
,
1633 const char __user
*buf
, int in_len
,
1636 struct ib_uverbs_create_qp cmd
;
1637 struct ib_uverbs_ex_create_qp cmd_ex
;
1638 struct ib_udata ucore
;
1639 struct ib_udata uhw
;
1640 ssize_t resp_size
= sizeof(struct ib_uverbs_create_qp_resp
);
1643 if (out_len
< resp_size
)
1646 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1649 ib_uverbs_init_udata(&ucore
, buf
, u64_to_user_ptr(cmd
.response
),
1650 sizeof(cmd
), resp_size
);
1651 ib_uverbs_init_udata(&uhw
, buf
+ sizeof(cmd
),
1652 u64_to_user_ptr(cmd
.response
) + resp_size
,
1653 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1654 out_len
- resp_size
);
1656 memset(&cmd_ex
, 0, sizeof(cmd_ex
));
1657 cmd_ex
.user_handle
= cmd
.user_handle
;
1658 cmd_ex
.pd_handle
= cmd
.pd_handle
;
1659 cmd_ex
.send_cq_handle
= cmd
.send_cq_handle
;
1660 cmd_ex
.recv_cq_handle
= cmd
.recv_cq_handle
;
1661 cmd_ex
.srq_handle
= cmd
.srq_handle
;
1662 cmd_ex
.max_send_wr
= cmd
.max_send_wr
;
1663 cmd_ex
.max_recv_wr
= cmd
.max_recv_wr
;
1664 cmd_ex
.max_send_sge
= cmd
.max_send_sge
;
1665 cmd_ex
.max_recv_sge
= cmd
.max_recv_sge
;
1666 cmd_ex
.max_inline_data
= cmd
.max_inline_data
;
1667 cmd_ex
.sq_sig_all
= cmd
.sq_sig_all
;
1668 cmd_ex
.qp_type
= cmd
.qp_type
;
1669 cmd_ex
.is_srq
= cmd
.is_srq
;
1671 err
= create_qp(file
, &ucore
, &uhw
, &cmd_ex
,
1672 offsetof(typeof(cmd_ex
), is_srq
) +
1673 sizeof(cmd
.is_srq
), ib_uverbs_create_qp_cb
,
1682 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file
*file
,
1683 struct ib_uverbs_ex_create_qp_resp
*resp
,
1684 struct ib_udata
*ucore
)
1686 if (ib_copy_to_udata(ucore
, resp
, resp
->response_length
))
1692 int ib_uverbs_ex_create_qp(struct ib_uverbs_file
*file
,
1693 struct ib_device
*ib_dev
,
1694 struct ib_udata
*ucore
,
1695 struct ib_udata
*uhw
)
1697 struct ib_uverbs_ex_create_qp_resp resp
;
1698 struct ib_uverbs_ex_create_qp cmd
= {0};
1701 if (ucore
->inlen
< (offsetof(typeof(cmd
), comp_mask
) +
1702 sizeof(cmd
.comp_mask
)))
1705 err
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
1709 if (cmd
.comp_mask
& ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK
)
1715 if (ucore
->outlen
< (offsetof(typeof(resp
), response_length
) +
1716 sizeof(resp
.response_length
)))
1719 err
= create_qp(file
, ucore
, uhw
, &cmd
,
1720 min(ucore
->inlen
, sizeof(cmd
)),
1721 ib_uverbs_ex_create_qp_cb
, NULL
);
1729 ssize_t
ib_uverbs_open_qp(struct ib_uverbs_file
*file
,
1730 struct ib_device
*ib_dev
,
1731 const char __user
*buf
, int in_len
, int out_len
)
1733 struct ib_uverbs_open_qp cmd
;
1734 struct ib_uverbs_create_qp_resp resp
;
1735 struct ib_udata udata
;
1736 struct ib_uqp_object
*obj
;
1737 struct ib_xrcd
*xrcd
;
1738 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
1740 struct ib_qp_open_attr attr
;
1743 if (out_len
< sizeof resp
)
1746 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1749 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
1750 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
1751 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1752 out_len
- sizeof(resp
));
1754 obj
= (struct ib_uqp_object
*)uobj_alloc(uobj_get_type(qp
),
1757 return PTR_ERR(obj
);
1759 xrcd_uobj
= uobj_get_read(uobj_get_type(xrcd
), cmd
.pd_handle
,
1761 if (IS_ERR(xrcd_uobj
)) {
1766 xrcd
= (struct ib_xrcd
*)xrcd_uobj
->object
;
1772 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1773 attr
.qp_context
= file
;
1774 attr
.qp_num
= cmd
.qpn
;
1775 attr
.qp_type
= cmd
.qp_type
;
1777 obj
->uevent
.events_reported
= 0;
1778 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1779 INIT_LIST_HEAD(&obj
->mcast_list
);
1781 qp
= ib_open_qp(xrcd
, &attr
);
1787 obj
->uevent
.uobject
.object
= qp
;
1788 obj
->uevent
.uobject
.user_handle
= cmd
.user_handle
;
1790 memset(&resp
, 0, sizeof resp
);
1791 resp
.qpn
= qp
->qp_num
;
1792 resp
.qp_handle
= obj
->uevent
.uobject
.id
;
1794 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
1799 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
1800 atomic_inc(&obj
->uxrcd
->refcnt
);
1801 qp
->uobject
= &obj
->uevent
.uobject
;
1802 uobj_put_read(xrcd_uobj
);
1805 uobj_alloc_commit(&obj
->uevent
.uobject
);
1812 uobj_put_read(xrcd_uobj
);
1814 uobj_alloc_abort(&obj
->uevent
.uobject
);
1818 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest
*uverb_attr
,
1819 struct rdma_ah_attr
*rdma_attr
)
1821 const struct ib_global_route
*grh
;
1823 uverb_attr
->dlid
= rdma_ah_get_dlid(rdma_attr
);
1824 uverb_attr
->sl
= rdma_ah_get_sl(rdma_attr
);
1825 uverb_attr
->src_path_bits
= rdma_ah_get_path_bits(rdma_attr
);
1826 uverb_attr
->static_rate
= rdma_ah_get_static_rate(rdma_attr
);
1827 uverb_attr
->is_global
= !!(rdma_ah_get_ah_flags(rdma_attr
) &
1829 if (uverb_attr
->is_global
) {
1830 grh
= rdma_ah_read_grh(rdma_attr
);
1831 memcpy(uverb_attr
->dgid
, grh
->dgid
.raw
, 16);
1832 uverb_attr
->flow_label
= grh
->flow_label
;
1833 uverb_attr
->sgid_index
= grh
->sgid_index
;
1834 uverb_attr
->hop_limit
= grh
->hop_limit
;
1835 uverb_attr
->traffic_class
= grh
->traffic_class
;
1837 uverb_attr
->port_num
= rdma_ah_get_port_num(rdma_attr
);
1840 ssize_t
ib_uverbs_query_qp(struct ib_uverbs_file
*file
,
1841 struct ib_device
*ib_dev
,
1842 const char __user
*buf
, int in_len
,
1845 struct ib_uverbs_query_qp cmd
;
1846 struct ib_uverbs_query_qp_resp resp
;
1848 struct ib_qp_attr
*attr
;
1849 struct ib_qp_init_attr
*init_attr
;
1852 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1855 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1856 init_attr
= kmalloc(sizeof *init_attr
, GFP_KERNEL
);
1857 if (!attr
|| !init_attr
) {
1862 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
1868 ret
= ib_query_qp(qp
, attr
, cmd
.attr_mask
, init_attr
);
1870 uobj_put_obj_read(qp
);
1875 memset(&resp
, 0, sizeof resp
);
1877 resp
.qp_state
= attr
->qp_state
;
1878 resp
.cur_qp_state
= attr
->cur_qp_state
;
1879 resp
.path_mtu
= attr
->path_mtu
;
1880 resp
.path_mig_state
= attr
->path_mig_state
;
1881 resp
.qkey
= attr
->qkey
;
1882 resp
.rq_psn
= attr
->rq_psn
;
1883 resp
.sq_psn
= attr
->sq_psn
;
1884 resp
.dest_qp_num
= attr
->dest_qp_num
;
1885 resp
.qp_access_flags
= attr
->qp_access_flags
;
1886 resp
.pkey_index
= attr
->pkey_index
;
1887 resp
.alt_pkey_index
= attr
->alt_pkey_index
;
1888 resp
.sq_draining
= attr
->sq_draining
;
1889 resp
.max_rd_atomic
= attr
->max_rd_atomic
;
1890 resp
.max_dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1891 resp
.min_rnr_timer
= attr
->min_rnr_timer
;
1892 resp
.port_num
= attr
->port_num
;
1893 resp
.timeout
= attr
->timeout
;
1894 resp
.retry_cnt
= attr
->retry_cnt
;
1895 resp
.rnr_retry
= attr
->rnr_retry
;
1896 resp
.alt_port_num
= attr
->alt_port_num
;
1897 resp
.alt_timeout
= attr
->alt_timeout
;
1899 copy_ah_attr_to_uverbs(&resp
.dest
, &attr
->ah_attr
);
1900 copy_ah_attr_to_uverbs(&resp
.alt_dest
, &attr
->alt_ah_attr
);
1902 resp
.max_send_wr
= init_attr
->cap
.max_send_wr
;
1903 resp
.max_recv_wr
= init_attr
->cap
.max_recv_wr
;
1904 resp
.max_send_sge
= init_attr
->cap
.max_send_sge
;
1905 resp
.max_recv_sge
= init_attr
->cap
.max_recv_sge
;
1906 resp
.max_inline_data
= init_attr
->cap
.max_inline_data
;
1907 resp
.sq_sig_all
= init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
;
1909 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
1916 return ret
? ret
: in_len
;
1919 /* Remove ignored fields set in the attribute mask */
1920 static int modify_qp_mask(enum ib_qp_type qp_type
, int mask
)
1923 case IB_QPT_XRC_INI
:
1924 return mask
& ~(IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
);
1925 case IB_QPT_XRC_TGT
:
1926 return mask
& ~(IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_RETRY_CNT
|
1933 static void copy_ah_attr_from_uverbs(struct ib_device
*dev
,
1934 struct rdma_ah_attr
*rdma_attr
,
1935 struct ib_uverbs_qp_dest
*uverb_attr
)
1937 rdma_attr
->type
= rdma_ah_find_type(dev
, uverb_attr
->port_num
);
1938 if (uverb_attr
->is_global
) {
1939 rdma_ah_set_grh(rdma_attr
, NULL
,
1940 uverb_attr
->flow_label
,
1941 uverb_attr
->sgid_index
,
1942 uverb_attr
->hop_limit
,
1943 uverb_attr
->traffic_class
);
1944 rdma_ah_set_dgid_raw(rdma_attr
, uverb_attr
->dgid
);
1946 rdma_ah_set_ah_flags(rdma_attr
, 0);
1948 rdma_ah_set_dlid(rdma_attr
, uverb_attr
->dlid
);
1949 rdma_ah_set_sl(rdma_attr
, uverb_attr
->sl
);
1950 rdma_ah_set_path_bits(rdma_attr
, uverb_attr
->src_path_bits
);
1951 rdma_ah_set_static_rate(rdma_attr
, uverb_attr
->static_rate
);
1952 rdma_ah_set_port_num(rdma_attr
, uverb_attr
->port_num
);
1953 rdma_ah_set_make_grd(rdma_attr
, false);
1956 static int modify_qp(struct ib_uverbs_file
*file
,
1957 struct ib_uverbs_ex_modify_qp
*cmd
, struct ib_udata
*udata
)
1959 struct ib_qp_attr
*attr
;
1963 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1967 qp
= uobj_get_obj_read(qp
, cmd
->base
.qp_handle
, file
->ucontext
);
1973 if ((cmd
->base
.attr_mask
& IB_QP_PORT
) &&
1974 !rdma_is_port_valid(qp
->device
, cmd
->base
.port_num
)) {
1979 if ((cmd
->base
.attr_mask
& IB_QP_AV
) &&
1980 !rdma_is_port_valid(qp
->device
, cmd
->base
.dest
.port_num
)) {
1985 if ((cmd
->base
.attr_mask
& IB_QP_ALT_PATH
) &&
1986 (!rdma_is_port_valid(qp
->device
, cmd
->base
.alt_port_num
) ||
1987 !rdma_is_port_valid(qp
->device
, cmd
->base
.alt_dest
.port_num
))) {
1992 attr
->qp_state
= cmd
->base
.qp_state
;
1993 attr
->cur_qp_state
= cmd
->base
.cur_qp_state
;
1994 attr
->path_mtu
= cmd
->base
.path_mtu
;
1995 attr
->path_mig_state
= cmd
->base
.path_mig_state
;
1996 attr
->qkey
= cmd
->base
.qkey
;
1997 attr
->rq_psn
= cmd
->base
.rq_psn
;
1998 attr
->sq_psn
= cmd
->base
.sq_psn
;
1999 attr
->dest_qp_num
= cmd
->base
.dest_qp_num
;
2000 attr
->qp_access_flags
= cmd
->base
.qp_access_flags
;
2001 attr
->pkey_index
= cmd
->base
.pkey_index
;
2002 attr
->alt_pkey_index
= cmd
->base
.alt_pkey_index
;
2003 attr
->en_sqd_async_notify
= cmd
->base
.en_sqd_async_notify
;
2004 attr
->max_rd_atomic
= cmd
->base
.max_rd_atomic
;
2005 attr
->max_dest_rd_atomic
= cmd
->base
.max_dest_rd_atomic
;
2006 attr
->min_rnr_timer
= cmd
->base
.min_rnr_timer
;
2007 attr
->port_num
= cmd
->base
.port_num
;
2008 attr
->timeout
= cmd
->base
.timeout
;
2009 attr
->retry_cnt
= cmd
->base
.retry_cnt
;
2010 attr
->rnr_retry
= cmd
->base
.rnr_retry
;
2011 attr
->alt_port_num
= cmd
->base
.alt_port_num
;
2012 attr
->alt_timeout
= cmd
->base
.alt_timeout
;
2013 attr
->rate_limit
= cmd
->rate_limit
;
2015 if (cmd
->base
.attr_mask
& IB_QP_AV
)
2016 copy_ah_attr_from_uverbs(qp
->device
, &attr
->ah_attr
,
2019 if (cmd
->base
.attr_mask
& IB_QP_ALT_PATH
)
2020 copy_ah_attr_from_uverbs(qp
->device
, &attr
->alt_ah_attr
,
2021 &cmd
->base
.alt_dest
);
2023 ret
= ib_modify_qp_with_udata(qp
, attr
,
2024 modify_qp_mask(qp
->qp_type
,
2025 cmd
->base
.attr_mask
),
2029 uobj_put_obj_read(qp
);
2036 ssize_t
ib_uverbs_modify_qp(struct ib_uverbs_file
*file
,
2037 struct ib_device
*ib_dev
,
2038 const char __user
*buf
, int in_len
,
2041 struct ib_uverbs_ex_modify_qp cmd
= {};
2042 struct ib_udata udata
;
2045 if (copy_from_user(&cmd
.base
, buf
, sizeof(cmd
.base
)))
2048 if (cmd
.base
.attr_mask
&
2049 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK
<< 1) - 1))
2052 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
.base
), NULL
,
2053 in_len
- sizeof(cmd
.base
) - sizeof(struct ib_uverbs_cmd_hdr
),
2056 ret
= modify_qp(file
, &cmd
, &udata
);
2063 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file
*file
,
2064 struct ib_device
*ib_dev
,
2065 struct ib_udata
*ucore
,
2066 struct ib_udata
*uhw
)
2068 struct ib_uverbs_ex_modify_qp cmd
= {};
2072 * Last bit is reserved for extending the attr_mask by
2073 * using another field.
2075 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK
== (1 << 31));
2077 if (ucore
->inlen
< sizeof(cmd
.base
))
2080 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
2084 if (cmd
.base
.attr_mask
&
2085 ~((IB_USER_LAST_QP_ATTR_MASK
<< 1) - 1))
2088 if (ucore
->inlen
> sizeof(cmd
)) {
2089 if (!ib_is_udata_cleared(ucore
, sizeof(cmd
),
2090 ucore
->inlen
- sizeof(cmd
)))
2094 ret
= modify_qp(file
, &cmd
, uhw
);
2099 ssize_t
ib_uverbs_destroy_qp(struct ib_uverbs_file
*file
,
2100 struct ib_device
*ib_dev
,
2101 const char __user
*buf
, int in_len
,
2104 struct ib_uverbs_destroy_qp cmd
;
2105 struct ib_uverbs_destroy_qp_resp resp
;
2106 struct ib_uobject
*uobj
;
2107 struct ib_uqp_object
*obj
;
2110 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2113 memset(&resp
, 0, sizeof resp
);
2115 uobj
= uobj_get_write(uobj_get_type(qp
), cmd
.qp_handle
,
2118 return PTR_ERR(uobj
);
2120 obj
= container_of(uobj
, struct ib_uqp_object
, uevent
.uobject
);
2122 * Make sure we don't free the memory in remove_commit as we still
2123 * needs the uobject memory to create the response.
2125 uverbs_uobject_get(uobj
);
2127 ret
= uobj_remove_commit(uobj
);
2129 uverbs_uobject_put(uobj
);
2133 resp
.events_reported
= obj
->uevent
.events_reported
;
2134 uverbs_uobject_put(uobj
);
2136 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2142 static void *alloc_wr(size_t wr_size
, __u32 num_sge
)
2144 if (num_sge
>= (U32_MAX
- ALIGN(wr_size
, sizeof (struct ib_sge
))) /
2145 sizeof (struct ib_sge
))
2148 return kmalloc(ALIGN(wr_size
, sizeof (struct ib_sge
)) +
2149 num_sge
* sizeof (struct ib_sge
), GFP_KERNEL
);
2152 ssize_t
ib_uverbs_post_send(struct ib_uverbs_file
*file
,
2153 struct ib_device
*ib_dev
,
2154 const char __user
*buf
, int in_len
,
2157 struct ib_uverbs_post_send cmd
;
2158 struct ib_uverbs_post_send_resp resp
;
2159 struct ib_uverbs_send_wr
*user_wr
;
2160 struct ib_send_wr
*wr
= NULL
, *last
, *next
, *bad_wr
;
2164 ssize_t ret
= -EINVAL
;
2167 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2170 if (in_len
< sizeof cmd
+ cmd
.wqe_size
* cmd
.wr_count
+
2171 cmd
.sge_count
* sizeof (struct ib_uverbs_sge
))
2174 if (cmd
.wqe_size
< sizeof (struct ib_uverbs_send_wr
))
2177 user_wr
= kmalloc(cmd
.wqe_size
, GFP_KERNEL
);
2181 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
2185 is_ud
= qp
->qp_type
== IB_QPT_UD
;
2188 for (i
= 0; i
< cmd
.wr_count
; ++i
) {
2189 if (copy_from_user(user_wr
,
2190 buf
+ sizeof cmd
+ i
* cmd
.wqe_size
,
2196 if (user_wr
->num_sge
+ sg_ind
> cmd
.sge_count
) {
2202 struct ib_ud_wr
*ud
;
2204 if (user_wr
->opcode
!= IB_WR_SEND
&&
2205 user_wr
->opcode
!= IB_WR_SEND_WITH_IMM
) {
2210 next_size
= sizeof(*ud
);
2211 ud
= alloc_wr(next_size
, user_wr
->num_sge
);
2217 ud
->ah
= uobj_get_obj_read(ah
, user_wr
->wr
.ud
.ah
,
2224 ud
->remote_qpn
= user_wr
->wr
.ud
.remote_qpn
;
2225 ud
->remote_qkey
= user_wr
->wr
.ud
.remote_qkey
;
2228 } else if (user_wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
||
2229 user_wr
->opcode
== IB_WR_RDMA_WRITE
||
2230 user_wr
->opcode
== IB_WR_RDMA_READ
) {
2231 struct ib_rdma_wr
*rdma
;
2233 next_size
= sizeof(*rdma
);
2234 rdma
= alloc_wr(next_size
, user_wr
->num_sge
);
2240 rdma
->remote_addr
= user_wr
->wr
.rdma
.remote_addr
;
2241 rdma
->rkey
= user_wr
->wr
.rdma
.rkey
;
2244 } else if (user_wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
2245 user_wr
->opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) {
2246 struct ib_atomic_wr
*atomic
;
2248 next_size
= sizeof(*atomic
);
2249 atomic
= alloc_wr(next_size
, user_wr
->num_sge
);
2255 atomic
->remote_addr
= user_wr
->wr
.atomic
.remote_addr
;
2256 atomic
->compare_add
= user_wr
->wr
.atomic
.compare_add
;
2257 atomic
->swap
= user_wr
->wr
.atomic
.swap
;
2258 atomic
->rkey
= user_wr
->wr
.atomic
.rkey
;
2261 } else if (user_wr
->opcode
== IB_WR_SEND
||
2262 user_wr
->opcode
== IB_WR_SEND_WITH_IMM
||
2263 user_wr
->opcode
== IB_WR_SEND_WITH_INV
) {
2264 next_size
= sizeof(*next
);
2265 next
= alloc_wr(next_size
, user_wr
->num_sge
);
2275 if (user_wr
->opcode
== IB_WR_SEND_WITH_IMM
||
2276 user_wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
) {
2278 (__be32 __force
) user_wr
->ex
.imm_data
;
2279 } else if (user_wr
->opcode
== IB_WR_SEND_WITH_INV
) {
2280 next
->ex
.invalidate_rkey
= user_wr
->ex
.invalidate_rkey
;
2290 next
->wr_id
= user_wr
->wr_id
;
2291 next
->num_sge
= user_wr
->num_sge
;
2292 next
->opcode
= user_wr
->opcode
;
2293 next
->send_flags
= user_wr
->send_flags
;
2295 if (next
->num_sge
) {
2296 next
->sg_list
= (void *) next
+
2297 ALIGN(next_size
, sizeof(struct ib_sge
));
2298 if (copy_from_user(next
->sg_list
,
2300 cmd
.wr_count
* cmd
.wqe_size
+
2301 sg_ind
* sizeof (struct ib_sge
),
2302 next
->num_sge
* sizeof (struct ib_sge
))) {
2306 sg_ind
+= next
->num_sge
;
2308 next
->sg_list
= NULL
;
2312 ret
= qp
->device
->post_send(qp
->real_qp
, wr
, &bad_wr
);
2314 for (next
= wr
; next
; next
= next
->next
) {
2320 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2324 uobj_put_obj_read(qp
);
2327 if (is_ud
&& ud_wr(wr
)->ah
)
2328 uobj_put_obj_read(ud_wr(wr
)->ah
);
2337 return ret
? ret
: in_len
;
2340 static struct ib_recv_wr
*ib_uverbs_unmarshall_recv(const char __user
*buf
,
2346 struct ib_uverbs_recv_wr
*user_wr
;
2347 struct ib_recv_wr
*wr
= NULL
, *last
, *next
;
2352 if (in_len
< wqe_size
* wr_count
+
2353 sge_count
* sizeof (struct ib_uverbs_sge
))
2354 return ERR_PTR(-EINVAL
);
2356 if (wqe_size
< sizeof (struct ib_uverbs_recv_wr
))
2357 return ERR_PTR(-EINVAL
);
2359 user_wr
= kmalloc(wqe_size
, GFP_KERNEL
);
2361 return ERR_PTR(-ENOMEM
);
2365 for (i
= 0; i
< wr_count
; ++i
) {
2366 if (copy_from_user(user_wr
, buf
+ i
* wqe_size
,
2372 if (user_wr
->num_sge
+ sg_ind
> sge_count
) {
2377 if (user_wr
->num_sge
>=
2378 (U32_MAX
- ALIGN(sizeof *next
, sizeof (struct ib_sge
))) /
2379 sizeof (struct ib_sge
)) {
2384 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
2385 user_wr
->num_sge
* sizeof (struct ib_sge
),
2399 next
->wr_id
= user_wr
->wr_id
;
2400 next
->num_sge
= user_wr
->num_sge
;
2402 if (next
->num_sge
) {
2403 next
->sg_list
= (void *) next
+
2404 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
2405 if (copy_from_user(next
->sg_list
,
2406 buf
+ wr_count
* wqe_size
+
2407 sg_ind
* sizeof (struct ib_sge
),
2408 next
->num_sge
* sizeof (struct ib_sge
))) {
2412 sg_ind
+= next
->num_sge
;
2414 next
->sg_list
= NULL
;
2429 return ERR_PTR(ret
);
2432 ssize_t
ib_uverbs_post_recv(struct ib_uverbs_file
*file
,
2433 struct ib_device
*ib_dev
,
2434 const char __user
*buf
, int in_len
,
2437 struct ib_uverbs_post_recv cmd
;
2438 struct ib_uverbs_post_recv_resp resp
;
2439 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2441 ssize_t ret
= -EINVAL
;
2443 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2446 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2447 in_len
- sizeof cmd
, cmd
.wr_count
,
2448 cmd
.sge_count
, cmd
.wqe_size
);
2452 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
2457 ret
= qp
->device
->post_recv(qp
->real_qp
, wr
, &bad_wr
);
2459 uobj_put_obj_read(qp
);
2461 for (next
= wr
; next
; next
= next
->next
) {
2468 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2478 return ret
? ret
: in_len
;
2481 ssize_t
ib_uverbs_post_srq_recv(struct ib_uverbs_file
*file
,
2482 struct ib_device
*ib_dev
,
2483 const char __user
*buf
, int in_len
,
2486 struct ib_uverbs_post_srq_recv cmd
;
2487 struct ib_uverbs_post_srq_recv_resp resp
;
2488 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2490 ssize_t ret
= -EINVAL
;
2492 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2495 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2496 in_len
- sizeof cmd
, cmd
.wr_count
,
2497 cmd
.sge_count
, cmd
.wqe_size
);
2501 srq
= uobj_get_obj_read(srq
, cmd
.srq_handle
, file
->ucontext
);
2506 ret
= srq
->device
->post_srq_recv(srq
, wr
, &bad_wr
);
2508 uobj_put_obj_read(srq
);
2511 for (next
= wr
; next
; next
= next
->next
) {
2517 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2527 return ret
? ret
: in_len
;
2530 ssize_t
ib_uverbs_create_ah(struct ib_uverbs_file
*file
,
2531 struct ib_device
*ib_dev
,
2532 const char __user
*buf
, int in_len
,
2535 struct ib_uverbs_create_ah cmd
;
2536 struct ib_uverbs_create_ah_resp resp
;
2537 struct ib_uobject
*uobj
;
2540 struct rdma_ah_attr attr
;
2542 struct ib_udata udata
;
2544 if (out_len
< sizeof resp
)
2547 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2550 if (!rdma_is_port_valid(ib_dev
, cmd
.attr
.port_num
))
2553 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
2554 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
2555 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
2556 out_len
- sizeof(resp
));
2558 uobj
= uobj_alloc(uobj_get_type(ah
), file
->ucontext
);
2560 return PTR_ERR(uobj
);
2562 pd
= uobj_get_obj_read(pd
, cmd
.pd_handle
, file
->ucontext
);
2568 attr
.type
= rdma_ah_find_type(ib_dev
, cmd
.attr
.port_num
);
2569 rdma_ah_set_make_grd(&attr
, false);
2570 rdma_ah_set_dlid(&attr
, cmd
.attr
.dlid
);
2571 rdma_ah_set_sl(&attr
, cmd
.attr
.sl
);
2572 rdma_ah_set_path_bits(&attr
, cmd
.attr
.src_path_bits
);
2573 rdma_ah_set_static_rate(&attr
, cmd
.attr
.static_rate
);
2574 rdma_ah_set_port_num(&attr
, cmd
.attr
.port_num
);
2576 if (cmd
.attr
.is_global
) {
2577 rdma_ah_set_grh(&attr
, NULL
, cmd
.attr
.grh
.flow_label
,
2578 cmd
.attr
.grh
.sgid_index
,
2579 cmd
.attr
.grh
.hop_limit
,
2580 cmd
.attr
.grh
.traffic_class
);
2581 rdma_ah_set_dgid_raw(&attr
, cmd
.attr
.grh
.dgid
);
2583 rdma_ah_set_ah_flags(&attr
, 0);
2586 ah
= rdma_create_user_ah(pd
, &attr
, &udata
);
2593 uobj
->user_handle
= cmd
.user_handle
;
2596 resp
.ah_handle
= uobj
->id
;
2598 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
2603 uobj_put_obj_read(pd
);
2604 uobj_alloc_commit(uobj
);
2609 rdma_destroy_ah(ah
);
2612 uobj_put_obj_read(pd
);
2615 uobj_alloc_abort(uobj
);
2619 ssize_t
ib_uverbs_destroy_ah(struct ib_uverbs_file
*file
,
2620 struct ib_device
*ib_dev
,
2621 const char __user
*buf
, int in_len
, int out_len
)
2623 struct ib_uverbs_destroy_ah cmd
;
2624 struct ib_uobject
*uobj
;
2627 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2630 uobj
= uobj_get_write(uobj_get_type(ah
), cmd
.ah_handle
,
2633 return PTR_ERR(uobj
);
2635 ret
= uobj_remove_commit(uobj
);
2636 return ret
?: in_len
;
2639 ssize_t
ib_uverbs_attach_mcast(struct ib_uverbs_file
*file
,
2640 struct ib_device
*ib_dev
,
2641 const char __user
*buf
, int in_len
,
2644 struct ib_uverbs_attach_mcast cmd
;
2646 struct ib_uqp_object
*obj
;
2647 struct ib_uverbs_mcast_entry
*mcast
;
2650 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2653 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
2657 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2659 mutex_lock(&obj
->mcast_lock
);
2660 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2661 if (cmd
.mlid
== mcast
->lid
&&
2662 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2667 mcast
= kmalloc(sizeof *mcast
, GFP_KERNEL
);
2673 mcast
->lid
= cmd
.mlid
;
2674 memcpy(mcast
->gid
.raw
, cmd
.gid
, sizeof mcast
->gid
.raw
);
2676 ret
= ib_attach_mcast(qp
, &mcast
->gid
, cmd
.mlid
);
2678 list_add_tail(&mcast
->list
, &obj
->mcast_list
);
2683 mutex_unlock(&obj
->mcast_lock
);
2684 uobj_put_obj_read(qp
);
2686 return ret
? ret
: in_len
;
2689 ssize_t
ib_uverbs_detach_mcast(struct ib_uverbs_file
*file
,
2690 struct ib_device
*ib_dev
,
2691 const char __user
*buf
, int in_len
,
2694 struct ib_uverbs_detach_mcast cmd
;
2695 struct ib_uqp_object
*obj
;
2697 struct ib_uverbs_mcast_entry
*mcast
;
2701 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2704 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
2708 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2709 mutex_lock(&obj
->mcast_lock
);
2711 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2712 if (cmd
.mlid
== mcast
->lid
&&
2713 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2714 list_del(&mcast
->list
);
2725 ret
= ib_detach_mcast(qp
, (union ib_gid
*)cmd
.gid
, cmd
.mlid
);
2728 mutex_unlock(&obj
->mcast_lock
);
2729 uobj_put_obj_read(qp
);
2730 return ret
? ret
: in_len
;
2733 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec
*kern_spec
,
2734 union ib_flow_spec
*ib_spec
)
2736 ib_spec
->type
= kern_spec
->type
;
2737 switch (ib_spec
->type
) {
2738 case IB_FLOW_SPEC_ACTION_TAG
:
2739 if (kern_spec
->flow_tag
.size
!=
2740 sizeof(struct ib_uverbs_flow_spec_action_tag
))
2743 ib_spec
->flow_tag
.size
= sizeof(struct ib_flow_spec_action_tag
);
2744 ib_spec
->flow_tag
.tag_id
= kern_spec
->flow_tag
.tag_id
;
2746 case IB_FLOW_SPEC_ACTION_DROP
:
2747 if (kern_spec
->drop
.size
!=
2748 sizeof(struct ib_uverbs_flow_spec_action_drop
))
2751 ib_spec
->drop
.size
= sizeof(struct ib_flow_spec_action_drop
);
2759 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr
*spec
)
2761 /* Returns user space filter size, includes padding */
2762 return (spec
->size
- sizeof(struct ib_uverbs_flow_spec_hdr
)) / 2;
2765 static ssize_t
spec_filter_size(void *kern_spec_filter
, u16 kern_filter_size
,
2766 u16 ib_real_filter_sz
)
2769 * User space filter structures must be 64 bit aligned, otherwise this
2770 * may pass, but we won't handle additional new attributes.
2773 if (kern_filter_size
> ib_real_filter_sz
) {
2774 if (memchr_inv(kern_spec_filter
+
2775 ib_real_filter_sz
, 0,
2776 kern_filter_size
- ib_real_filter_sz
))
2778 return ib_real_filter_sz
;
2780 return kern_filter_size
;
2783 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec
*kern_spec
,
2784 union ib_flow_spec
*ib_spec
)
2786 ssize_t actual_filter_sz
;
2787 ssize_t kern_filter_sz
;
2788 ssize_t ib_filter_sz
;
2789 void *kern_spec_mask
;
2790 void *kern_spec_val
;
2792 if (kern_spec
->reserved
)
2795 ib_spec
->type
= kern_spec
->type
;
2797 kern_filter_sz
= kern_spec_filter_sz(&kern_spec
->hdr
);
2798 /* User flow spec size must be aligned to 4 bytes */
2799 if (kern_filter_sz
!= ALIGN(kern_filter_sz
, 4))
2802 kern_spec_val
= (void *)kern_spec
+
2803 sizeof(struct ib_uverbs_flow_spec_hdr
);
2804 kern_spec_mask
= kern_spec_val
+ kern_filter_sz
;
2805 if (ib_spec
->type
== (IB_FLOW_SPEC_INNER
| IB_FLOW_SPEC_VXLAN_TUNNEL
))
2808 switch (ib_spec
->type
& ~IB_FLOW_SPEC_INNER
) {
2809 case IB_FLOW_SPEC_ETH
:
2810 ib_filter_sz
= offsetof(struct ib_flow_eth_filter
, real_sz
);
2811 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2814 if (actual_filter_sz
<= 0)
2816 ib_spec
->size
= sizeof(struct ib_flow_spec_eth
);
2817 memcpy(&ib_spec
->eth
.val
, kern_spec_val
, actual_filter_sz
);
2818 memcpy(&ib_spec
->eth
.mask
, kern_spec_mask
, actual_filter_sz
);
2820 case IB_FLOW_SPEC_IPV4
:
2821 ib_filter_sz
= offsetof(struct ib_flow_ipv4_filter
, real_sz
);
2822 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2825 if (actual_filter_sz
<= 0)
2827 ib_spec
->size
= sizeof(struct ib_flow_spec_ipv4
);
2828 memcpy(&ib_spec
->ipv4
.val
, kern_spec_val
, actual_filter_sz
);
2829 memcpy(&ib_spec
->ipv4
.mask
, kern_spec_mask
, actual_filter_sz
);
2831 case IB_FLOW_SPEC_IPV6
:
2832 ib_filter_sz
= offsetof(struct ib_flow_ipv6_filter
, real_sz
);
2833 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2836 if (actual_filter_sz
<= 0)
2838 ib_spec
->size
= sizeof(struct ib_flow_spec_ipv6
);
2839 memcpy(&ib_spec
->ipv6
.val
, kern_spec_val
, actual_filter_sz
);
2840 memcpy(&ib_spec
->ipv6
.mask
, kern_spec_mask
, actual_filter_sz
);
2842 if ((ntohl(ib_spec
->ipv6
.mask
.flow_label
)) >= BIT(20) ||
2843 (ntohl(ib_spec
->ipv6
.val
.flow_label
)) >= BIT(20))
2846 case IB_FLOW_SPEC_TCP
:
2847 case IB_FLOW_SPEC_UDP
:
2848 ib_filter_sz
= offsetof(struct ib_flow_tcp_udp_filter
, real_sz
);
2849 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2852 if (actual_filter_sz
<= 0)
2854 ib_spec
->size
= sizeof(struct ib_flow_spec_tcp_udp
);
2855 memcpy(&ib_spec
->tcp_udp
.val
, kern_spec_val
, actual_filter_sz
);
2856 memcpy(&ib_spec
->tcp_udp
.mask
, kern_spec_mask
, actual_filter_sz
);
2858 case IB_FLOW_SPEC_VXLAN_TUNNEL
:
2859 ib_filter_sz
= offsetof(struct ib_flow_tunnel_filter
, real_sz
);
2860 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2863 if (actual_filter_sz
<= 0)
2865 ib_spec
->tunnel
.size
= sizeof(struct ib_flow_spec_tunnel
);
2866 memcpy(&ib_spec
->tunnel
.val
, kern_spec_val
, actual_filter_sz
);
2867 memcpy(&ib_spec
->tunnel
.mask
, kern_spec_mask
, actual_filter_sz
);
2869 if ((ntohl(ib_spec
->tunnel
.mask
.tunnel_id
)) >= BIT(24) ||
2870 (ntohl(ib_spec
->tunnel
.val
.tunnel_id
)) >= BIT(24))
2879 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec
*kern_spec
,
2880 union ib_flow_spec
*ib_spec
)
2882 if (kern_spec
->reserved
)
2885 if (kern_spec
->type
>= IB_FLOW_SPEC_ACTION_TAG
)
2886 return kern_spec_to_ib_spec_action(kern_spec
, ib_spec
);
2888 return kern_spec_to_ib_spec_filter(kern_spec
, ib_spec
);
2891 int ib_uverbs_ex_create_wq(struct ib_uverbs_file
*file
,
2892 struct ib_device
*ib_dev
,
2893 struct ib_udata
*ucore
,
2894 struct ib_udata
*uhw
)
2896 struct ib_uverbs_ex_create_wq cmd
= {};
2897 struct ib_uverbs_ex_create_wq_resp resp
= {};
2898 struct ib_uwq_object
*obj
;
2903 struct ib_wq_init_attr wq_init_attr
= {};
2904 size_t required_cmd_sz
;
2905 size_t required_resp_len
;
2907 required_cmd_sz
= offsetof(typeof(cmd
), max_sge
) + sizeof(cmd
.max_sge
);
2908 required_resp_len
= offsetof(typeof(resp
), wqn
) + sizeof(resp
.wqn
);
2910 if (ucore
->inlen
< required_cmd_sz
)
2913 if (ucore
->outlen
< required_resp_len
)
2916 if (ucore
->inlen
> sizeof(cmd
) &&
2917 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
2918 ucore
->inlen
- sizeof(cmd
)))
2921 err
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
2928 obj
= (struct ib_uwq_object
*)uobj_alloc(uobj_get_type(wq
),
2931 return PTR_ERR(obj
);
2933 pd
= uobj_get_obj_read(pd
, cmd
.pd_handle
, file
->ucontext
);
2939 cq
= uobj_get_obj_read(cq
, cmd
.cq_handle
, file
->ucontext
);
2945 wq_init_attr
.cq
= cq
;
2946 wq_init_attr
.max_sge
= cmd
.max_sge
;
2947 wq_init_attr
.max_wr
= cmd
.max_wr
;
2948 wq_init_attr
.wq_context
= file
;
2949 wq_init_attr
.wq_type
= cmd
.wq_type
;
2950 wq_init_attr
.event_handler
= ib_uverbs_wq_event_handler
;
2951 if (ucore
->inlen
>= (offsetof(typeof(cmd
), create_flags
) +
2952 sizeof(cmd
.create_flags
)))
2953 wq_init_attr
.create_flags
= cmd
.create_flags
;
2954 obj
->uevent
.events_reported
= 0;
2955 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
2957 if (!pd
->device
->create_wq
) {
2961 wq
= pd
->device
->create_wq(pd
, &wq_init_attr
, uhw
);
2967 wq
->uobject
= &obj
->uevent
.uobject
;
2968 obj
->uevent
.uobject
.object
= wq
;
2969 wq
->wq_type
= wq_init_attr
.wq_type
;
2972 wq
->device
= pd
->device
;
2973 wq
->wq_context
= wq_init_attr
.wq_context
;
2974 atomic_set(&wq
->usecnt
, 0);
2975 atomic_inc(&pd
->usecnt
);
2976 atomic_inc(&cq
->usecnt
);
2977 wq
->uobject
= &obj
->uevent
.uobject
;
2978 obj
->uevent
.uobject
.object
= wq
;
2980 memset(&resp
, 0, sizeof(resp
));
2981 resp
.wq_handle
= obj
->uevent
.uobject
.id
;
2982 resp
.max_sge
= wq_init_attr
.max_sge
;
2983 resp
.max_wr
= wq_init_attr
.max_wr
;
2984 resp
.wqn
= wq
->wq_num
;
2985 resp
.response_length
= required_resp_len
;
2986 err
= ib_copy_to_udata(ucore
,
2987 &resp
, resp
.response_length
);
2991 uobj_put_obj_read(pd
);
2992 uobj_put_obj_read(cq
);
2993 uobj_alloc_commit(&obj
->uevent
.uobject
);
2999 uobj_put_obj_read(cq
);
3001 uobj_put_obj_read(pd
);
3003 uobj_alloc_abort(&obj
->uevent
.uobject
);
3008 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file
*file
,
3009 struct ib_device
*ib_dev
,
3010 struct ib_udata
*ucore
,
3011 struct ib_udata
*uhw
)
3013 struct ib_uverbs_ex_destroy_wq cmd
= {};
3014 struct ib_uverbs_ex_destroy_wq_resp resp
= {};
3015 struct ib_uobject
*uobj
;
3016 struct ib_uwq_object
*obj
;
3017 size_t required_cmd_sz
;
3018 size_t required_resp_len
;
3021 required_cmd_sz
= offsetof(typeof(cmd
), wq_handle
) + sizeof(cmd
.wq_handle
);
3022 required_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
3024 if (ucore
->inlen
< required_cmd_sz
)
3027 if (ucore
->outlen
< required_resp_len
)
3030 if (ucore
->inlen
> sizeof(cmd
) &&
3031 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3032 ucore
->inlen
- sizeof(cmd
)))
3035 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3042 resp
.response_length
= required_resp_len
;
3043 uobj
= uobj_get_write(uobj_get_type(wq
), cmd
.wq_handle
,
3046 return PTR_ERR(uobj
);
3048 obj
= container_of(uobj
, struct ib_uwq_object
, uevent
.uobject
);
3050 * Make sure we don't free the memory in remove_commit as we still
3051 * needs the uobject memory to create the response.
3053 uverbs_uobject_get(uobj
);
3055 ret
= uobj_remove_commit(uobj
);
3056 resp
.events_reported
= obj
->uevent
.events_reported
;
3057 uverbs_uobject_put(uobj
);
3061 return ib_copy_to_udata(ucore
, &resp
, resp
.response_length
);
3064 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file
*file
,
3065 struct ib_device
*ib_dev
,
3066 struct ib_udata
*ucore
,
3067 struct ib_udata
*uhw
)
3069 struct ib_uverbs_ex_modify_wq cmd
= {};
3071 struct ib_wq_attr wq_attr
= {};
3072 size_t required_cmd_sz
;
3075 required_cmd_sz
= offsetof(typeof(cmd
), curr_wq_state
) + sizeof(cmd
.curr_wq_state
);
3076 if (ucore
->inlen
< required_cmd_sz
)
3079 if (ucore
->inlen
> sizeof(cmd
) &&
3080 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3081 ucore
->inlen
- sizeof(cmd
)))
3084 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3091 if (cmd
.attr_mask
> (IB_WQ_STATE
| IB_WQ_CUR_STATE
| IB_WQ_FLAGS
))
3094 wq
= uobj_get_obj_read(wq
, cmd
.wq_handle
, file
->ucontext
);
3098 wq_attr
.curr_wq_state
= cmd
.curr_wq_state
;
3099 wq_attr
.wq_state
= cmd
.wq_state
;
3100 if (cmd
.attr_mask
& IB_WQ_FLAGS
) {
3101 wq_attr
.flags
= cmd
.flags
;
3102 wq_attr
.flags_mask
= cmd
.flags_mask
;
3104 if (!wq
->device
->modify_wq
) {
3108 ret
= wq
->device
->modify_wq(wq
, &wq_attr
, cmd
.attr_mask
, uhw
);
3110 uobj_put_obj_read(wq
);
3114 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file
*file
,
3115 struct ib_device
*ib_dev
,
3116 struct ib_udata
*ucore
,
3117 struct ib_udata
*uhw
)
3119 struct ib_uverbs_ex_create_rwq_ind_table cmd
= {};
3120 struct ib_uverbs_ex_create_rwq_ind_table_resp resp
= {};
3121 struct ib_uobject
*uobj
;
3123 struct ib_rwq_ind_table_init_attr init_attr
= {};
3124 struct ib_rwq_ind_table
*rwq_ind_tbl
;
3125 struct ib_wq
**wqs
= NULL
;
3126 u32
*wqs_handles
= NULL
;
3127 struct ib_wq
*wq
= NULL
;
3128 int i
, j
, num_read_wqs
;
3130 u32 expected_in_size
;
3131 size_t required_cmd_sz_header
;
3132 size_t required_resp_len
;
3134 required_cmd_sz_header
= offsetof(typeof(cmd
), log_ind_tbl_size
) + sizeof(cmd
.log_ind_tbl_size
);
3135 required_resp_len
= offsetof(typeof(resp
), ind_tbl_num
) + sizeof(resp
.ind_tbl_num
);
3137 if (ucore
->inlen
< required_cmd_sz_header
)
3140 if (ucore
->outlen
< required_resp_len
)
3143 err
= ib_copy_from_udata(&cmd
, ucore
, required_cmd_sz_header
);
3147 ucore
->inbuf
+= required_cmd_sz_header
;
3148 ucore
->inlen
-= required_cmd_sz_header
;
3153 if (cmd
.log_ind_tbl_size
> IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE
)
3156 num_wq_handles
= 1 << cmd
.log_ind_tbl_size
;
3157 expected_in_size
= num_wq_handles
* sizeof(__u32
);
3158 if (num_wq_handles
== 1)
3159 /* input size for wq handles is u64 aligned */
3160 expected_in_size
+= sizeof(__u32
);
3162 if (ucore
->inlen
< expected_in_size
)
3165 if (ucore
->inlen
> expected_in_size
&&
3166 !ib_is_udata_cleared(ucore
, expected_in_size
,
3167 ucore
->inlen
- expected_in_size
))
3170 wqs_handles
= kcalloc(num_wq_handles
, sizeof(*wqs_handles
),
3175 err
= ib_copy_from_udata(wqs_handles
, ucore
,
3176 num_wq_handles
* sizeof(__u32
));
3180 wqs
= kcalloc(num_wq_handles
, sizeof(*wqs
), GFP_KERNEL
);
3186 for (num_read_wqs
= 0; num_read_wqs
< num_wq_handles
;
3188 wq
= uobj_get_obj_read(wq
, wqs_handles
[num_read_wqs
],
3195 wqs
[num_read_wqs
] = wq
;
3198 uobj
= uobj_alloc(uobj_get_type(rwq_ind_table
), file
->ucontext
);
3200 err
= PTR_ERR(uobj
);
3204 init_attr
.log_ind_tbl_size
= cmd
.log_ind_tbl_size
;
3205 init_attr
.ind_tbl
= wqs
;
3207 if (!ib_dev
->create_rwq_ind_table
) {
3211 rwq_ind_tbl
= ib_dev
->create_rwq_ind_table(ib_dev
, &init_attr
, uhw
);
3213 if (IS_ERR(rwq_ind_tbl
)) {
3214 err
= PTR_ERR(rwq_ind_tbl
);
3218 rwq_ind_tbl
->ind_tbl
= wqs
;
3219 rwq_ind_tbl
->log_ind_tbl_size
= init_attr
.log_ind_tbl_size
;
3220 rwq_ind_tbl
->uobject
= uobj
;
3221 uobj
->object
= rwq_ind_tbl
;
3222 rwq_ind_tbl
->device
= ib_dev
;
3223 atomic_set(&rwq_ind_tbl
->usecnt
, 0);
3225 for (i
= 0; i
< num_wq_handles
; i
++)
3226 atomic_inc(&wqs
[i
]->usecnt
);
3228 resp
.ind_tbl_handle
= uobj
->id
;
3229 resp
.ind_tbl_num
= rwq_ind_tbl
->ind_tbl_num
;
3230 resp
.response_length
= required_resp_len
;
3232 err
= ib_copy_to_udata(ucore
,
3233 &resp
, resp
.response_length
);
3239 for (j
= 0; j
< num_read_wqs
; j
++)
3240 uobj_put_obj_read(wqs
[j
]);
3242 uobj_alloc_commit(uobj
);
3246 ib_destroy_rwq_ind_table(rwq_ind_tbl
);
3248 uobj_alloc_abort(uobj
);
3250 for (j
= 0; j
< num_read_wqs
; j
++)
3251 uobj_put_obj_read(wqs
[j
]);
3258 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file
*file
,
3259 struct ib_device
*ib_dev
,
3260 struct ib_udata
*ucore
,
3261 struct ib_udata
*uhw
)
3263 struct ib_uverbs_ex_destroy_rwq_ind_table cmd
= {};
3264 struct ib_uobject
*uobj
;
3266 size_t required_cmd_sz
;
3268 required_cmd_sz
= offsetof(typeof(cmd
), ind_tbl_handle
) + sizeof(cmd
.ind_tbl_handle
);
3270 if (ucore
->inlen
< required_cmd_sz
)
3273 if (ucore
->inlen
> sizeof(cmd
) &&
3274 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3275 ucore
->inlen
- sizeof(cmd
)))
3278 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3285 uobj
= uobj_get_write(uobj_get_type(rwq_ind_table
), cmd
.ind_tbl_handle
,
3288 return PTR_ERR(uobj
);
3290 return uobj_remove_commit(uobj
);
3293 int ib_uverbs_ex_create_flow(struct ib_uverbs_file
*file
,
3294 struct ib_device
*ib_dev
,
3295 struct ib_udata
*ucore
,
3296 struct ib_udata
*uhw
)
3298 struct ib_uverbs_create_flow cmd
;
3299 struct ib_uverbs_create_flow_resp resp
;
3300 struct ib_uobject
*uobj
;
3301 struct ib_flow
*flow_id
;
3302 struct ib_uverbs_flow_attr
*kern_flow_attr
;
3303 struct ib_flow_attr
*flow_attr
;
3310 if (ucore
->inlen
< sizeof(cmd
))
3313 if (ucore
->outlen
< sizeof(resp
))
3316 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3320 ucore
->inbuf
+= sizeof(cmd
);
3321 ucore
->inlen
-= sizeof(cmd
);
3326 if (!capable(CAP_NET_RAW
))
3329 if (cmd
.flow_attr
.flags
>= IB_FLOW_ATTR_FLAGS_RESERVED
)
3332 if ((cmd
.flow_attr
.flags
& IB_FLOW_ATTR_FLAGS_DONT_TRAP
) &&
3333 ((cmd
.flow_attr
.type
== IB_FLOW_ATTR_ALL_DEFAULT
) ||
3334 (cmd
.flow_attr
.type
== IB_FLOW_ATTR_MC_DEFAULT
)))
3337 if (cmd
.flow_attr
.num_of_specs
> IB_FLOW_SPEC_SUPPORT_LAYERS
)
3340 if (cmd
.flow_attr
.size
> ucore
->inlen
||
3341 cmd
.flow_attr
.size
>
3342 (cmd
.flow_attr
.num_of_specs
* sizeof(struct ib_uverbs_flow_spec
)))
3345 if (cmd
.flow_attr
.reserved
[0] ||
3346 cmd
.flow_attr
.reserved
[1])
3349 if (cmd
.flow_attr
.num_of_specs
) {
3350 kern_flow_attr
= kmalloc(sizeof(*kern_flow_attr
) + cmd
.flow_attr
.size
,
3352 if (!kern_flow_attr
)
3355 memcpy(kern_flow_attr
, &cmd
.flow_attr
, sizeof(*kern_flow_attr
));
3356 err
= ib_copy_from_udata(kern_flow_attr
+ 1, ucore
,
3357 cmd
.flow_attr
.size
);
3361 kern_flow_attr
= &cmd
.flow_attr
;
3364 uobj
= uobj_alloc(uobj_get_type(flow
), file
->ucontext
);
3366 err
= PTR_ERR(uobj
);
3370 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
3376 flow_attr
= kzalloc(sizeof(*flow_attr
) + cmd
.flow_attr
.num_of_specs
*
3377 sizeof(union ib_flow_spec
), GFP_KERNEL
);
3383 flow_attr
->type
= kern_flow_attr
->type
;
3384 flow_attr
->priority
= kern_flow_attr
->priority
;
3385 flow_attr
->num_of_specs
= kern_flow_attr
->num_of_specs
;
3386 flow_attr
->port
= kern_flow_attr
->port
;
3387 flow_attr
->flags
= kern_flow_attr
->flags
;
3388 flow_attr
->size
= sizeof(*flow_attr
);
3390 kern_spec
= kern_flow_attr
+ 1;
3391 ib_spec
= flow_attr
+ 1;
3392 for (i
= 0; i
< flow_attr
->num_of_specs
&&
3393 cmd
.flow_attr
.size
> offsetof(struct ib_uverbs_flow_spec
, reserved
) &&
3394 cmd
.flow_attr
.size
>=
3395 ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
; i
++) {
3396 err
= kern_spec_to_ib_spec(kern_spec
, ib_spec
);
3400 ((union ib_flow_spec
*) ib_spec
)->size
;
3401 cmd
.flow_attr
.size
-= ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
;
3402 kern_spec
+= ((struct ib_uverbs_flow_spec
*) kern_spec
)->size
;
3403 ib_spec
+= ((union ib_flow_spec
*) ib_spec
)->size
;
3405 if (cmd
.flow_attr
.size
|| (i
!= flow_attr
->num_of_specs
)) {
3406 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3407 i
, cmd
.flow_attr
.size
);
3411 flow_id
= ib_create_flow(qp
, flow_attr
, IB_FLOW_DOMAIN_USER
);
3412 if (IS_ERR(flow_id
)) {
3413 err
= PTR_ERR(flow_id
);
3416 flow_id
->uobject
= uobj
;
3417 uobj
->object
= flow_id
;
3419 memset(&resp
, 0, sizeof(resp
));
3420 resp
.flow_handle
= uobj
->id
;
3422 err
= ib_copy_to_udata(ucore
,
3423 &resp
, sizeof(resp
));
3427 uobj_put_obj_read(qp
);
3428 uobj_alloc_commit(uobj
);
3430 if (cmd
.flow_attr
.num_of_specs
)
3431 kfree(kern_flow_attr
);
3434 ib_destroy_flow(flow_id
);
3438 uobj_put_obj_read(qp
);
3440 uobj_alloc_abort(uobj
);
3442 if (cmd
.flow_attr
.num_of_specs
)
3443 kfree(kern_flow_attr
);
3447 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file
*file
,
3448 struct ib_device
*ib_dev
,
3449 struct ib_udata
*ucore
,
3450 struct ib_udata
*uhw
)
3452 struct ib_uverbs_destroy_flow cmd
;
3453 struct ib_uobject
*uobj
;
3456 if (ucore
->inlen
< sizeof(cmd
))
3459 ret
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3466 uobj
= uobj_get_write(uobj_get_type(flow
), cmd
.flow_handle
,
3469 return PTR_ERR(uobj
);
3471 ret
= uobj_remove_commit(uobj
);
3475 static int __uverbs_create_xsrq(struct ib_uverbs_file
*file
,
3476 struct ib_device
*ib_dev
,
3477 struct ib_uverbs_create_xsrq
*cmd
,
3478 struct ib_udata
*udata
)
3480 struct ib_uverbs_create_srq_resp resp
;
3481 struct ib_usrq_object
*obj
;
3484 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
3485 struct ib_srq_init_attr attr
;
3488 obj
= (struct ib_usrq_object
*)uobj_alloc(uobj_get_type(srq
),
3491 return PTR_ERR(obj
);
3493 if (cmd
->srq_type
== IB_SRQT_TM
)
3494 attr
.ext
.tag_matching
.max_num_tags
= cmd
->max_num_tags
;
3496 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3497 xrcd_uobj
= uobj_get_read(uobj_get_type(xrcd
), cmd
->xrcd_handle
,
3499 if (IS_ERR(xrcd_uobj
)) {
3504 attr
.ext
.xrc
.xrcd
= (struct ib_xrcd
*)xrcd_uobj
->object
;
3505 if (!attr
.ext
.xrc
.xrcd
) {
3510 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
3511 atomic_inc(&obj
->uxrcd
->refcnt
);
3514 if (ib_srq_has_cq(cmd
->srq_type
)) {
3515 attr
.ext
.cq
= uobj_get_obj_read(cq
, cmd
->cq_handle
,
3523 pd
= uobj_get_obj_read(pd
, cmd
->pd_handle
, file
->ucontext
);
3529 attr
.event_handler
= ib_uverbs_srq_event_handler
;
3530 attr
.srq_context
= file
;
3531 attr
.srq_type
= cmd
->srq_type
;
3532 attr
.attr
.max_wr
= cmd
->max_wr
;
3533 attr
.attr
.max_sge
= cmd
->max_sge
;
3534 attr
.attr
.srq_limit
= cmd
->srq_limit
;
3536 obj
->uevent
.events_reported
= 0;
3537 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
3539 srq
= pd
->device
->create_srq(pd
, &attr
, udata
);
3545 srq
->device
= pd
->device
;
3547 srq
->srq_type
= cmd
->srq_type
;
3548 srq
->uobject
= &obj
->uevent
.uobject
;
3549 srq
->event_handler
= attr
.event_handler
;
3550 srq
->srq_context
= attr
.srq_context
;
3552 if (ib_srq_has_cq(cmd
->srq_type
)) {
3553 srq
->ext
.cq
= attr
.ext
.cq
;
3554 atomic_inc(&attr
.ext
.cq
->usecnt
);
3557 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3558 srq
->ext
.xrc
.xrcd
= attr
.ext
.xrc
.xrcd
;
3559 atomic_inc(&attr
.ext
.xrc
.xrcd
->usecnt
);
3562 atomic_inc(&pd
->usecnt
);
3563 atomic_set(&srq
->usecnt
, 0);
3565 obj
->uevent
.uobject
.object
= srq
;
3566 obj
->uevent
.uobject
.user_handle
= cmd
->user_handle
;
3568 memset(&resp
, 0, sizeof resp
);
3569 resp
.srq_handle
= obj
->uevent
.uobject
.id
;
3570 resp
.max_wr
= attr
.attr
.max_wr
;
3571 resp
.max_sge
= attr
.attr
.max_sge
;
3572 if (cmd
->srq_type
== IB_SRQT_XRC
)
3573 resp
.srqn
= srq
->ext
.xrc
.srq_num
;
3575 if (copy_to_user((void __user
*) (unsigned long) cmd
->response
,
3576 &resp
, sizeof resp
)) {
3581 if (cmd
->srq_type
== IB_SRQT_XRC
)
3582 uobj_put_read(xrcd_uobj
);
3584 if (ib_srq_has_cq(cmd
->srq_type
))
3585 uobj_put_obj_read(attr
.ext
.cq
);
3587 uobj_put_obj_read(pd
);
3588 uobj_alloc_commit(&obj
->uevent
.uobject
);
3593 ib_destroy_srq(srq
);
3596 uobj_put_obj_read(pd
);
3599 if (ib_srq_has_cq(cmd
->srq_type
))
3600 uobj_put_obj_read(attr
.ext
.cq
);
3603 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3604 atomic_dec(&obj
->uxrcd
->refcnt
);
3605 uobj_put_read(xrcd_uobj
);
3609 uobj_alloc_abort(&obj
->uevent
.uobject
);
3613 ssize_t
ib_uverbs_create_srq(struct ib_uverbs_file
*file
,
3614 struct ib_device
*ib_dev
,
3615 const char __user
*buf
, int in_len
,
3618 struct ib_uverbs_create_srq cmd
;
3619 struct ib_uverbs_create_xsrq xcmd
;
3620 struct ib_uverbs_create_srq_resp resp
;
3621 struct ib_udata udata
;
3624 if (out_len
< sizeof resp
)
3627 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3630 memset(&xcmd
, 0, sizeof(xcmd
));
3631 xcmd
.response
= cmd
.response
;
3632 xcmd
.user_handle
= cmd
.user_handle
;
3633 xcmd
.srq_type
= IB_SRQT_BASIC
;
3634 xcmd
.pd_handle
= cmd
.pd_handle
;
3635 xcmd
.max_wr
= cmd
.max_wr
;
3636 xcmd
.max_sge
= cmd
.max_sge
;
3637 xcmd
.srq_limit
= cmd
.srq_limit
;
3639 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
3640 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
3641 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
3642 out_len
- sizeof(resp
));
3644 ret
= __uverbs_create_xsrq(file
, ib_dev
, &xcmd
, &udata
);
3651 ssize_t
ib_uverbs_create_xsrq(struct ib_uverbs_file
*file
,
3652 struct ib_device
*ib_dev
,
3653 const char __user
*buf
, int in_len
, int out_len
)
3655 struct ib_uverbs_create_xsrq cmd
;
3656 struct ib_uverbs_create_srq_resp resp
;
3657 struct ib_udata udata
;
3660 if (out_len
< sizeof resp
)
3663 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3666 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
3667 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
3668 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
3669 out_len
- sizeof(resp
));
3671 ret
= __uverbs_create_xsrq(file
, ib_dev
, &cmd
, &udata
);
3678 ssize_t
ib_uverbs_modify_srq(struct ib_uverbs_file
*file
,
3679 struct ib_device
*ib_dev
,
3680 const char __user
*buf
, int in_len
,
3683 struct ib_uverbs_modify_srq cmd
;
3684 struct ib_udata udata
;
3686 struct ib_srq_attr attr
;
3689 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3692 ib_uverbs_init_udata(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
3695 srq
= uobj_get_obj_read(srq
, cmd
.srq_handle
, file
->ucontext
);
3699 attr
.max_wr
= cmd
.max_wr
;
3700 attr
.srq_limit
= cmd
.srq_limit
;
3702 ret
= srq
->device
->modify_srq(srq
, &attr
, cmd
.attr_mask
, &udata
);
3704 uobj_put_obj_read(srq
);
3706 return ret
? ret
: in_len
;
3709 ssize_t
ib_uverbs_query_srq(struct ib_uverbs_file
*file
,
3710 struct ib_device
*ib_dev
,
3711 const char __user
*buf
,
3712 int in_len
, int out_len
)
3714 struct ib_uverbs_query_srq cmd
;
3715 struct ib_uverbs_query_srq_resp resp
;
3716 struct ib_srq_attr attr
;
3720 if (out_len
< sizeof resp
)
3723 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3726 srq
= uobj_get_obj_read(srq
, cmd
.srq_handle
, file
->ucontext
);
3730 ret
= ib_query_srq(srq
, &attr
);
3732 uobj_put_obj_read(srq
);
3737 memset(&resp
, 0, sizeof resp
);
3739 resp
.max_wr
= attr
.max_wr
;
3740 resp
.max_sge
= attr
.max_sge
;
3741 resp
.srq_limit
= attr
.srq_limit
;
3743 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
3749 ssize_t
ib_uverbs_destroy_srq(struct ib_uverbs_file
*file
,
3750 struct ib_device
*ib_dev
,
3751 const char __user
*buf
, int in_len
,
3754 struct ib_uverbs_destroy_srq cmd
;
3755 struct ib_uverbs_destroy_srq_resp resp
;
3756 struct ib_uobject
*uobj
;
3757 struct ib_uevent_object
*obj
;
3760 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3763 uobj
= uobj_get_write(uobj_get_type(srq
), cmd
.srq_handle
,
3766 return PTR_ERR(uobj
);
3768 obj
= container_of(uobj
, struct ib_uevent_object
, uobject
);
3770 * Make sure we don't free the memory in remove_commit as we still
3771 * needs the uobject memory to create the response.
3773 uverbs_uobject_get(uobj
);
3775 memset(&resp
, 0, sizeof(resp
));
3777 ret
= uobj_remove_commit(uobj
);
3779 uverbs_uobject_put(uobj
);
3782 resp
.events_reported
= obj
->events_reported
;
3783 uverbs_uobject_put(uobj
);
3784 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof(resp
)))
3790 int ib_uverbs_ex_query_device(struct ib_uverbs_file
*file
,
3791 struct ib_device
*ib_dev
,
3792 struct ib_udata
*ucore
,
3793 struct ib_udata
*uhw
)
3795 struct ib_uverbs_ex_query_device_resp resp
= { {0} };
3796 struct ib_uverbs_ex_query_device cmd
;
3797 struct ib_device_attr attr
= {0};
3800 if (!ib_dev
->query_device
)
3803 if (ucore
->inlen
< sizeof(cmd
))
3806 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3816 resp
.response_length
= offsetof(typeof(resp
), odp_caps
);
3818 if (ucore
->outlen
< resp
.response_length
)
3821 err
= ib_dev
->query_device(ib_dev
, &attr
, uhw
);
3825 copy_query_dev_fields(file
, ib_dev
, &resp
.base
, &attr
);
3827 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.odp_caps
))
3830 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3831 resp
.odp_caps
.general_caps
= attr
.odp_caps
.general_caps
;
3832 resp
.odp_caps
.per_transport_caps
.rc_odp_caps
=
3833 attr
.odp_caps
.per_transport_caps
.rc_odp_caps
;
3834 resp
.odp_caps
.per_transport_caps
.uc_odp_caps
=
3835 attr
.odp_caps
.per_transport_caps
.uc_odp_caps
;
3836 resp
.odp_caps
.per_transport_caps
.ud_odp_caps
=
3837 attr
.odp_caps
.per_transport_caps
.ud_odp_caps
;
3839 resp
.response_length
+= sizeof(resp
.odp_caps
);
3841 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.timestamp_mask
))
3844 resp
.timestamp_mask
= attr
.timestamp_mask
;
3845 resp
.response_length
+= sizeof(resp
.timestamp_mask
);
3847 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.hca_core_clock
))
3850 resp
.hca_core_clock
= attr
.hca_core_clock
;
3851 resp
.response_length
+= sizeof(resp
.hca_core_clock
);
3853 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.device_cap_flags_ex
))
3856 resp
.device_cap_flags_ex
= attr
.device_cap_flags
;
3857 resp
.response_length
+= sizeof(resp
.device_cap_flags_ex
);
3859 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.rss_caps
))
3862 resp
.rss_caps
.supported_qpts
= attr
.rss_caps
.supported_qpts
;
3863 resp
.rss_caps
.max_rwq_indirection_tables
=
3864 attr
.rss_caps
.max_rwq_indirection_tables
;
3865 resp
.rss_caps
.max_rwq_indirection_table_size
=
3866 attr
.rss_caps
.max_rwq_indirection_table_size
;
3868 resp
.response_length
+= sizeof(resp
.rss_caps
);
3870 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.max_wq_type_rq
))
3873 resp
.max_wq_type_rq
= attr
.max_wq_type_rq
;
3874 resp
.response_length
+= sizeof(resp
.max_wq_type_rq
);
3876 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.raw_packet_caps
))
3879 resp
.raw_packet_caps
= attr
.raw_packet_caps
;
3880 resp
.response_length
+= sizeof(resp
.raw_packet_caps
);
3882 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.tm_caps
))
3885 resp
.tm_caps
.max_rndv_hdr_size
= attr
.tm_caps
.max_rndv_hdr_size
;
3886 resp
.tm_caps
.max_num_tags
= attr
.tm_caps
.max_num_tags
;
3887 resp
.tm_caps
.max_ops
= attr
.tm_caps
.max_ops
;
3888 resp
.tm_caps
.max_sge
= attr
.tm_caps
.max_sge
;
3889 resp
.tm_caps
.flags
= attr
.tm_caps
.flags
;
3890 resp
.response_length
+= sizeof(resp
.tm_caps
);
3892 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.cq_moderation_caps
))
3895 resp
.cq_moderation_caps
.max_cq_moderation_count
=
3896 attr
.cq_caps
.max_cq_moderation_count
;
3897 resp
.cq_moderation_caps
.max_cq_moderation_period
=
3898 attr
.cq_caps
.max_cq_moderation_period
;
3899 resp
.response_length
+= sizeof(resp
.cq_moderation_caps
);
3901 err
= ib_copy_to_udata(ucore
, &resp
, resp
.response_length
);
3905 int ib_uverbs_ex_modify_cq(struct ib_uverbs_file
*file
,
3906 struct ib_device
*ib_dev
,
3907 struct ib_udata
*ucore
,
3908 struct ib_udata
*uhw
)
3910 struct ib_uverbs_ex_modify_cq cmd
= {};
3912 size_t required_cmd_sz
;
3915 required_cmd_sz
= offsetof(typeof(cmd
), reserved
) +
3916 sizeof(cmd
.reserved
);
3917 if (ucore
->inlen
< required_cmd_sz
)
3921 if (ucore
->inlen
> sizeof(cmd
) &&
3922 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3923 ucore
->inlen
- sizeof(cmd
)))
3926 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3930 if (!cmd
.attr_mask
|| cmd
.reserved
)
3933 if (cmd
.attr_mask
> IB_CQ_MODERATE
)
3936 cq
= uobj_get_obj_read(cq
, cmd
.cq_handle
, file
->ucontext
);
3940 ret
= rdma_set_cq_moderation(cq
, cmd
.attr
.cq_count
, cmd
.attr
.cq_period
);
3942 uobj_put_obj_read(cq
);