2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <linux/uaccess.h>
43 #include <rdma/uverbs_types.h>
44 #include <rdma/uverbs_std_types.h>
45 #include "rdma_core.h"
48 #include "core_priv.h"
50 static struct ib_uverbs_completion_event_file
*
51 ib_uverbs_lookup_comp_file(int fd
, struct ib_ucontext
*context
)
53 struct ib_uobject
*uobj
= uobj_get_read(uobj_get_type(comp_channel
),
55 struct ib_uobject_file
*uobj_file
;
60 uverbs_uobject_get(uobj
);
63 uobj_file
= container_of(uobj
, struct ib_uobject_file
, uobj
);
64 return container_of(uobj_file
, struct ib_uverbs_completion_event_file
,
68 ssize_t
ib_uverbs_get_context(struct ib_uverbs_file
*file
,
69 struct ib_device
*ib_dev
,
70 const char __user
*buf
,
71 int in_len
, int out_len
)
73 struct ib_uverbs_get_context cmd
;
74 struct ib_uverbs_get_context_resp resp
;
75 struct ib_udata udata
;
76 struct ib_ucontext
*ucontext
;
78 struct ib_rdmacg_object cg_obj
;
81 if (out_len
< sizeof resp
)
84 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
87 mutex_lock(&file
->mutex
);
94 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
95 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
96 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
97 out_len
- sizeof(resp
));
99 ret
= ib_rdmacg_try_charge(&cg_obj
, ib_dev
, RDMACG_RESOURCE_HCA_HANDLE
);
103 ucontext
= ib_dev
->alloc_ucontext(ib_dev
, &udata
);
104 if (IS_ERR(ucontext
)) {
105 ret
= PTR_ERR(ucontext
);
109 ucontext
->device
= ib_dev
;
110 ucontext
->cg_obj
= cg_obj
;
111 /* ufile is required when some objects are released */
112 ucontext
->ufile
= file
;
113 uverbs_initialize_ucontext(ucontext
);
116 ucontext
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
118 ucontext
->closing
= 0;
120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
121 ucontext
->umem_tree
= RB_ROOT_CACHED
;
122 init_rwsem(&ucontext
->umem_rwsem
);
123 ucontext
->odp_mrs_count
= 0;
124 INIT_LIST_HEAD(&ucontext
->no_private_counters
);
126 if (!(ib_dev
->attrs
.device_cap_flags
& IB_DEVICE_ON_DEMAND_PAGING
))
127 ucontext
->invalidate_range
= NULL
;
131 resp
.num_comp_vectors
= file
->device
->num_comp_vectors
;
133 ret
= get_unused_fd_flags(O_CLOEXEC
);
138 filp
= ib_uverbs_alloc_async_event_file(file
, ib_dev
);
144 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
149 file
->ucontext
= ucontext
;
151 fd_install(resp
.async_fd
, filp
);
153 mutex_unlock(&file
->mutex
);
158 ib_uverbs_free_async_event_file(file
);
162 put_unused_fd(resp
.async_fd
);
165 put_pid(ucontext
->tgid
);
166 ib_dev
->dealloc_ucontext(ucontext
);
169 ib_rdmacg_uncharge(&cg_obj
, ib_dev
, RDMACG_RESOURCE_HCA_HANDLE
);
172 mutex_unlock(&file
->mutex
);
176 static void copy_query_dev_fields(struct ib_uverbs_file
*file
,
177 struct ib_device
*ib_dev
,
178 struct ib_uverbs_query_device_resp
*resp
,
179 struct ib_device_attr
*attr
)
181 resp
->fw_ver
= attr
->fw_ver
;
182 resp
->node_guid
= ib_dev
->node_guid
;
183 resp
->sys_image_guid
= attr
->sys_image_guid
;
184 resp
->max_mr_size
= attr
->max_mr_size
;
185 resp
->page_size_cap
= attr
->page_size_cap
;
186 resp
->vendor_id
= attr
->vendor_id
;
187 resp
->vendor_part_id
= attr
->vendor_part_id
;
188 resp
->hw_ver
= attr
->hw_ver
;
189 resp
->max_qp
= attr
->max_qp
;
190 resp
->max_qp_wr
= attr
->max_qp_wr
;
191 resp
->device_cap_flags
= lower_32_bits(attr
->device_cap_flags
);
192 resp
->max_sge
= attr
->max_sge
;
193 resp
->max_sge_rd
= attr
->max_sge_rd
;
194 resp
->max_cq
= attr
->max_cq
;
195 resp
->max_cqe
= attr
->max_cqe
;
196 resp
->max_mr
= attr
->max_mr
;
197 resp
->max_pd
= attr
->max_pd
;
198 resp
->max_qp_rd_atom
= attr
->max_qp_rd_atom
;
199 resp
->max_ee_rd_atom
= attr
->max_ee_rd_atom
;
200 resp
->max_res_rd_atom
= attr
->max_res_rd_atom
;
201 resp
->max_qp_init_rd_atom
= attr
->max_qp_init_rd_atom
;
202 resp
->max_ee_init_rd_atom
= attr
->max_ee_init_rd_atom
;
203 resp
->atomic_cap
= attr
->atomic_cap
;
204 resp
->max_ee
= attr
->max_ee
;
205 resp
->max_rdd
= attr
->max_rdd
;
206 resp
->max_mw
= attr
->max_mw
;
207 resp
->max_raw_ipv6_qp
= attr
->max_raw_ipv6_qp
;
208 resp
->max_raw_ethy_qp
= attr
->max_raw_ethy_qp
;
209 resp
->max_mcast_grp
= attr
->max_mcast_grp
;
210 resp
->max_mcast_qp_attach
= attr
->max_mcast_qp_attach
;
211 resp
->max_total_mcast_qp_attach
= attr
->max_total_mcast_qp_attach
;
212 resp
->max_ah
= attr
->max_ah
;
213 resp
->max_fmr
= attr
->max_fmr
;
214 resp
->max_map_per_fmr
= attr
->max_map_per_fmr
;
215 resp
->max_srq
= attr
->max_srq
;
216 resp
->max_srq_wr
= attr
->max_srq_wr
;
217 resp
->max_srq_sge
= attr
->max_srq_sge
;
218 resp
->max_pkeys
= attr
->max_pkeys
;
219 resp
->local_ca_ack_delay
= attr
->local_ca_ack_delay
;
220 resp
->phys_port_cnt
= ib_dev
->phys_port_cnt
;
223 ssize_t
ib_uverbs_query_device(struct ib_uverbs_file
*file
,
224 struct ib_device
*ib_dev
,
225 const char __user
*buf
,
226 int in_len
, int out_len
)
228 struct ib_uverbs_query_device cmd
;
229 struct ib_uverbs_query_device_resp resp
;
231 if (out_len
< sizeof resp
)
234 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
237 memset(&resp
, 0, sizeof resp
);
238 copy_query_dev_fields(file
, ib_dev
, &resp
, &ib_dev
->attrs
);
240 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
246 ssize_t
ib_uverbs_query_port(struct ib_uverbs_file
*file
,
247 struct ib_device
*ib_dev
,
248 const char __user
*buf
,
249 int in_len
, int out_len
)
251 struct ib_uverbs_query_port cmd
;
252 struct ib_uverbs_query_port_resp resp
;
253 struct ib_port_attr attr
;
256 if (out_len
< sizeof resp
)
259 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
262 ret
= ib_query_port(ib_dev
, cmd
.port_num
, &attr
);
266 memset(&resp
, 0, sizeof resp
);
268 resp
.state
= attr
.state
;
269 resp
.max_mtu
= attr
.max_mtu
;
270 resp
.active_mtu
= attr
.active_mtu
;
271 resp
.gid_tbl_len
= attr
.gid_tbl_len
;
272 resp
.port_cap_flags
= attr
.port_cap_flags
;
273 resp
.max_msg_sz
= attr
.max_msg_sz
;
274 resp
.bad_pkey_cntr
= attr
.bad_pkey_cntr
;
275 resp
.qkey_viol_cntr
= attr
.qkey_viol_cntr
;
276 resp
.pkey_tbl_len
= attr
.pkey_tbl_len
;
278 if (rdma_cap_opa_ah(ib_dev
, cmd
.port_num
)) {
279 resp
.lid
= OPA_TO_IB_UCAST_LID(attr
.lid
);
280 resp
.sm_lid
= OPA_TO_IB_UCAST_LID(attr
.sm_lid
);
282 resp
.lid
= ib_lid_cpu16(attr
.lid
);
283 resp
.sm_lid
= ib_lid_cpu16(attr
.sm_lid
);
286 resp
.max_vl_num
= attr
.max_vl_num
;
287 resp
.sm_sl
= attr
.sm_sl
;
288 resp
.subnet_timeout
= attr
.subnet_timeout
;
289 resp
.init_type_reply
= attr
.init_type_reply
;
290 resp
.active_width
= attr
.active_width
;
291 resp
.active_speed
= attr
.active_speed
;
292 resp
.phys_state
= attr
.phys_state
;
293 resp
.link_layer
= rdma_port_get_link_layer(ib_dev
,
296 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
302 ssize_t
ib_uverbs_alloc_pd(struct ib_uverbs_file
*file
,
303 struct ib_device
*ib_dev
,
304 const char __user
*buf
,
305 int in_len
, int out_len
)
307 struct ib_uverbs_alloc_pd cmd
;
308 struct ib_uverbs_alloc_pd_resp resp
;
309 struct ib_udata udata
;
310 struct ib_uobject
*uobj
;
314 if (out_len
< sizeof resp
)
317 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
320 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
321 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
322 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
323 out_len
- sizeof(resp
));
325 uobj
= uobj_alloc(uobj_get_type(pd
), file
->ucontext
);
327 return PTR_ERR(uobj
);
329 pd
= ib_dev
->alloc_pd(ib_dev
, file
->ucontext
, &udata
);
337 pd
->__internal_mr
= NULL
;
338 atomic_set(&pd
->usecnt
, 0);
341 memset(&resp
, 0, sizeof resp
);
342 resp
.pd_handle
= uobj
->id
;
343 pd
->res
.type
= RDMA_RESTRACK_PD
;
344 rdma_restrack_add(&pd
->res
);
346 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
351 uobj_alloc_commit(uobj
);
359 uobj_alloc_abort(uobj
);
363 ssize_t
ib_uverbs_dealloc_pd(struct ib_uverbs_file
*file
,
364 struct ib_device
*ib_dev
,
365 const char __user
*buf
,
366 int in_len
, int out_len
)
368 struct ib_uverbs_dealloc_pd cmd
;
369 struct ib_uobject
*uobj
;
372 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
375 uobj
= uobj_get_write(uobj_get_type(pd
), cmd
.pd_handle
,
378 return PTR_ERR(uobj
);
380 ret
= uobj_remove_commit(uobj
);
382 return ret
?: in_len
;
385 struct xrcd_table_entry
{
387 struct ib_xrcd
*xrcd
;
391 static int xrcd_table_insert(struct ib_uverbs_device
*dev
,
393 struct ib_xrcd
*xrcd
)
395 struct xrcd_table_entry
*entry
, *scan
;
396 struct rb_node
**p
= &dev
->xrcd_tree
.rb_node
;
397 struct rb_node
*parent
= NULL
;
399 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
404 entry
->inode
= inode
;
408 scan
= rb_entry(parent
, struct xrcd_table_entry
, node
);
410 if (inode
< scan
->inode
) {
412 } else if (inode
> scan
->inode
) {
420 rb_link_node(&entry
->node
, parent
, p
);
421 rb_insert_color(&entry
->node
, &dev
->xrcd_tree
);
426 static struct xrcd_table_entry
*xrcd_table_search(struct ib_uverbs_device
*dev
,
429 struct xrcd_table_entry
*entry
;
430 struct rb_node
*p
= dev
->xrcd_tree
.rb_node
;
433 entry
= rb_entry(p
, struct xrcd_table_entry
, node
);
435 if (inode
< entry
->inode
)
437 else if (inode
> entry
->inode
)
446 static struct ib_xrcd
*find_xrcd(struct ib_uverbs_device
*dev
, struct inode
*inode
)
448 struct xrcd_table_entry
*entry
;
450 entry
= xrcd_table_search(dev
, inode
);
457 static void xrcd_table_delete(struct ib_uverbs_device
*dev
,
460 struct xrcd_table_entry
*entry
;
462 entry
= xrcd_table_search(dev
, inode
);
465 rb_erase(&entry
->node
, &dev
->xrcd_tree
);
470 ssize_t
ib_uverbs_open_xrcd(struct ib_uverbs_file
*file
,
471 struct ib_device
*ib_dev
,
472 const char __user
*buf
, int in_len
,
475 struct ib_uverbs_open_xrcd cmd
;
476 struct ib_uverbs_open_xrcd_resp resp
;
477 struct ib_udata udata
;
478 struct ib_uxrcd_object
*obj
;
479 struct ib_xrcd
*xrcd
= NULL
;
480 struct fd f
= {NULL
, 0};
481 struct inode
*inode
= NULL
;
485 if (out_len
< sizeof resp
)
488 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
491 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
492 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
493 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
494 out_len
- sizeof(resp
));
496 mutex_lock(&file
->device
->xrcd_tree_mutex
);
499 /* search for file descriptor */
503 goto err_tree_mutex_unlock
;
506 inode
= file_inode(f
.file
);
507 xrcd
= find_xrcd(file
->device
, inode
);
508 if (!xrcd
&& !(cmd
.oflags
& O_CREAT
)) {
509 /* no file descriptor. Need CREATE flag */
511 goto err_tree_mutex_unlock
;
514 if (xrcd
&& cmd
.oflags
& O_EXCL
) {
516 goto err_tree_mutex_unlock
;
520 obj
= (struct ib_uxrcd_object
*)uobj_alloc(uobj_get_type(xrcd
),
524 goto err_tree_mutex_unlock
;
528 xrcd
= ib_dev
->alloc_xrcd(ib_dev
, file
->ucontext
, &udata
);
535 xrcd
->device
= ib_dev
;
536 atomic_set(&xrcd
->usecnt
, 0);
537 mutex_init(&xrcd
->tgt_qp_mutex
);
538 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
542 atomic_set(&obj
->refcnt
, 0);
543 obj
->uobject
.object
= xrcd
;
544 memset(&resp
, 0, sizeof resp
);
545 resp
.xrcd_handle
= obj
->uobject
.id
;
549 /* create new inode/xrcd table entry */
550 ret
= xrcd_table_insert(file
->device
, inode
, xrcd
);
552 goto err_dealloc_xrcd
;
554 atomic_inc(&xrcd
->usecnt
);
557 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
565 uobj_alloc_commit(&obj
->uobject
);
567 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
573 xrcd_table_delete(file
->device
, inode
);
574 atomic_dec(&xrcd
->usecnt
);
578 ib_dealloc_xrcd(xrcd
);
581 uobj_alloc_abort(&obj
->uobject
);
583 err_tree_mutex_unlock
:
587 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
592 ssize_t
ib_uverbs_close_xrcd(struct ib_uverbs_file
*file
,
593 struct ib_device
*ib_dev
,
594 const char __user
*buf
, int in_len
,
597 struct ib_uverbs_close_xrcd cmd
;
598 struct ib_uobject
*uobj
;
601 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
604 uobj
= uobj_get_write(uobj_get_type(xrcd
), cmd
.xrcd_handle
,
607 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
608 return PTR_ERR(uobj
);
611 ret
= uobj_remove_commit(uobj
);
612 return ret
?: in_len
;
615 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device
*dev
,
616 struct ib_xrcd
*xrcd
,
617 enum rdma_remove_reason why
)
623 if (inode
&& !atomic_dec_and_test(&xrcd
->usecnt
))
626 ret
= ib_dealloc_xrcd(xrcd
);
628 if (why
== RDMA_REMOVE_DESTROY
&& ret
)
629 atomic_inc(&xrcd
->usecnt
);
631 xrcd_table_delete(dev
, inode
);
636 ssize_t
ib_uverbs_reg_mr(struct ib_uverbs_file
*file
,
637 struct ib_device
*ib_dev
,
638 const char __user
*buf
, int in_len
,
641 struct ib_uverbs_reg_mr cmd
;
642 struct ib_uverbs_reg_mr_resp resp
;
643 struct ib_udata udata
;
644 struct ib_uobject
*uobj
;
649 if (out_len
< sizeof resp
)
652 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
655 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
656 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
657 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
658 out_len
- sizeof(resp
));
660 if ((cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
))
663 ret
= ib_check_mr_access(cmd
.access_flags
);
667 uobj
= uobj_alloc(uobj_get_type(mr
), file
->ucontext
);
669 return PTR_ERR(uobj
);
671 pd
= uobj_get_obj_read(pd
, cmd
.pd_handle
, file
->ucontext
);
677 if (cmd
.access_flags
& IB_ACCESS_ON_DEMAND
) {
678 if (!(pd
->device
->attrs
.device_cap_flags
&
679 IB_DEVICE_ON_DEMAND_PAGING
)) {
680 pr_debug("ODP support not available\n");
686 mr
= pd
->device
->reg_user_mr(pd
, cmd
.start
, cmd
.length
, cmd
.hca_va
,
687 cmd
.access_flags
, &udata
);
693 mr
->device
= pd
->device
;
696 atomic_inc(&pd
->usecnt
);
700 memset(&resp
, 0, sizeof resp
);
701 resp
.lkey
= mr
->lkey
;
702 resp
.rkey
= mr
->rkey
;
703 resp
.mr_handle
= uobj
->id
;
705 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
710 uobj_put_obj_read(pd
);
712 uobj_alloc_commit(uobj
);
720 uobj_put_obj_read(pd
);
723 uobj_alloc_abort(uobj
);
727 ssize_t
ib_uverbs_rereg_mr(struct ib_uverbs_file
*file
,
728 struct ib_device
*ib_dev
,
729 const char __user
*buf
, int in_len
,
732 struct ib_uverbs_rereg_mr cmd
;
733 struct ib_uverbs_rereg_mr_resp resp
;
734 struct ib_udata udata
;
735 struct ib_pd
*pd
= NULL
;
737 struct ib_pd
*old_pd
;
739 struct ib_uobject
*uobj
;
741 if (out_len
< sizeof(resp
))
744 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
747 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
748 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
749 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
750 out_len
- sizeof(resp
));
752 if (cmd
.flags
& ~IB_MR_REREG_SUPPORTED
|| !cmd
.flags
)
755 if ((cmd
.flags
& IB_MR_REREG_TRANS
) &&
756 (!cmd
.start
|| !cmd
.hca_va
|| 0 >= cmd
.length
||
757 (cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
)))
760 uobj
= uobj_get_write(uobj_get_type(mr
), cmd
.mr_handle
,
763 return PTR_ERR(uobj
);
767 if (cmd
.flags
& IB_MR_REREG_ACCESS
) {
768 ret
= ib_check_mr_access(cmd
.access_flags
);
773 if (cmd
.flags
& IB_MR_REREG_PD
) {
774 pd
= uobj_get_obj_read(pd
, cmd
.pd_handle
, file
->ucontext
);
782 ret
= mr
->device
->rereg_user_mr(mr
, cmd
.flags
, cmd
.start
,
783 cmd
.length
, cmd
.hca_va
,
784 cmd
.access_flags
, pd
, &udata
);
786 if (cmd
.flags
& IB_MR_REREG_PD
) {
787 atomic_inc(&pd
->usecnt
);
789 atomic_dec(&old_pd
->usecnt
);
795 memset(&resp
, 0, sizeof(resp
));
796 resp
.lkey
= mr
->lkey
;
797 resp
.rkey
= mr
->rkey
;
799 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof(resp
)))
805 if (cmd
.flags
& IB_MR_REREG_PD
)
806 uobj_put_obj_read(pd
);
809 uobj_put_write(uobj
);
814 ssize_t
ib_uverbs_dereg_mr(struct ib_uverbs_file
*file
,
815 struct ib_device
*ib_dev
,
816 const char __user
*buf
, int in_len
,
819 struct ib_uverbs_dereg_mr cmd
;
820 struct ib_uobject
*uobj
;
823 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
826 uobj
= uobj_get_write(uobj_get_type(mr
), cmd
.mr_handle
,
829 return PTR_ERR(uobj
);
831 ret
= uobj_remove_commit(uobj
);
833 return ret
?: in_len
;
836 ssize_t
ib_uverbs_alloc_mw(struct ib_uverbs_file
*file
,
837 struct ib_device
*ib_dev
,
838 const char __user
*buf
, int in_len
,
841 struct ib_uverbs_alloc_mw cmd
;
842 struct ib_uverbs_alloc_mw_resp resp
;
843 struct ib_uobject
*uobj
;
846 struct ib_udata udata
;
849 if (out_len
< sizeof(resp
))
852 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
855 uobj
= uobj_alloc(uobj_get_type(mw
), file
->ucontext
);
857 return PTR_ERR(uobj
);
859 pd
= uobj_get_obj_read(pd
, cmd
.pd_handle
, file
->ucontext
);
865 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
866 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
867 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
868 out_len
- sizeof(resp
));
870 mw
= pd
->device
->alloc_mw(pd
, cmd
.mw_type
, &udata
);
876 mw
->device
= pd
->device
;
879 atomic_inc(&pd
->usecnt
);
883 memset(&resp
, 0, sizeof(resp
));
884 resp
.rkey
= mw
->rkey
;
885 resp
.mw_handle
= uobj
->id
;
887 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof(resp
))) {
892 uobj_put_obj_read(pd
);
893 uobj_alloc_commit(uobj
);
898 uverbs_dealloc_mw(mw
);
900 uobj_put_obj_read(pd
);
902 uobj_alloc_abort(uobj
);
906 ssize_t
ib_uverbs_dealloc_mw(struct ib_uverbs_file
*file
,
907 struct ib_device
*ib_dev
,
908 const char __user
*buf
, int in_len
,
911 struct ib_uverbs_dealloc_mw cmd
;
912 struct ib_uobject
*uobj
;
915 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
918 uobj
= uobj_get_write(uobj_get_type(mw
), cmd
.mw_handle
,
921 return PTR_ERR(uobj
);
923 ret
= uobj_remove_commit(uobj
);
924 return ret
?: in_len
;
927 ssize_t
ib_uverbs_create_comp_channel(struct ib_uverbs_file
*file
,
928 struct ib_device
*ib_dev
,
929 const char __user
*buf
, int in_len
,
932 struct ib_uverbs_create_comp_channel cmd
;
933 struct ib_uverbs_create_comp_channel_resp resp
;
934 struct ib_uobject
*uobj
;
935 struct ib_uverbs_completion_event_file
*ev_file
;
937 if (out_len
< sizeof resp
)
940 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
943 uobj
= uobj_alloc(uobj_get_type(comp_channel
), file
->ucontext
);
945 return PTR_ERR(uobj
);
949 ev_file
= container_of(uobj
, struct ib_uverbs_completion_event_file
,
951 ib_uverbs_init_event_queue(&ev_file
->ev_queue
);
953 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
954 uobj_alloc_abort(uobj
);
958 uobj_alloc_commit(uobj
);
962 static struct ib_ucq_object
*create_cq(struct ib_uverbs_file
*file
,
963 struct ib_device
*ib_dev
,
964 struct ib_udata
*ucore
,
965 struct ib_udata
*uhw
,
966 struct ib_uverbs_ex_create_cq
*cmd
,
968 int (*cb
)(struct ib_uverbs_file
*file
,
969 struct ib_ucq_object
*obj
,
970 struct ib_uverbs_ex_create_cq_resp
*resp
,
971 struct ib_udata
*udata
,
975 struct ib_ucq_object
*obj
;
976 struct ib_uverbs_completion_event_file
*ev_file
= NULL
;
979 struct ib_uverbs_ex_create_cq_resp resp
;
980 struct ib_cq_init_attr attr
= {};
982 if (cmd
->comp_vector
>= file
->device
->num_comp_vectors
)
983 return ERR_PTR(-EINVAL
);
985 obj
= (struct ib_ucq_object
*)uobj_alloc(uobj_get_type(cq
),
990 if (cmd
->comp_channel
>= 0) {
991 ev_file
= ib_uverbs_lookup_comp_file(cmd
->comp_channel
,
993 if (IS_ERR(ev_file
)) {
994 ret
= PTR_ERR(ev_file
);
999 obj
->uobject
.user_handle
= cmd
->user_handle
;
1000 obj
->uverbs_file
= file
;
1001 obj
->comp_events_reported
= 0;
1002 obj
->async_events_reported
= 0;
1003 INIT_LIST_HEAD(&obj
->comp_list
);
1004 INIT_LIST_HEAD(&obj
->async_list
);
1006 attr
.cqe
= cmd
->cqe
;
1007 attr
.comp_vector
= cmd
->comp_vector
;
1009 if (cmd_sz
> offsetof(typeof(*cmd
), flags
) + sizeof(cmd
->flags
))
1010 attr
.flags
= cmd
->flags
;
1012 cq
= ib_dev
->create_cq(ib_dev
, &attr
, file
->ucontext
, uhw
);
1018 cq
->device
= ib_dev
;
1019 cq
->uobject
= &obj
->uobject
;
1020 cq
->comp_handler
= ib_uverbs_comp_handler
;
1021 cq
->event_handler
= ib_uverbs_cq_event_handler
;
1022 cq
->cq_context
= ev_file
? &ev_file
->ev_queue
: NULL
;
1023 atomic_set(&cq
->usecnt
, 0);
1025 obj
->uobject
.object
= cq
;
1026 memset(&resp
, 0, sizeof resp
);
1027 resp
.base
.cq_handle
= obj
->uobject
.id
;
1028 resp
.base
.cqe
= cq
->cqe
;
1030 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
1031 sizeof(resp
.response_length
);
1033 ret
= cb(file
, obj
, &resp
, ucore
, context
);
1037 uobj_alloc_commit(&obj
->uobject
);
1038 cq
->res
.type
= RDMA_RESTRACK_CQ
;
1039 rdma_restrack_add(&cq
->res
);
1048 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1051 uobj_alloc_abort(&obj
->uobject
);
1053 return ERR_PTR(ret
);
1056 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file
*file
,
1057 struct ib_ucq_object
*obj
,
1058 struct ib_uverbs_ex_create_cq_resp
*resp
,
1059 struct ib_udata
*ucore
, void *context
)
1061 if (ib_copy_to_udata(ucore
, &resp
->base
, sizeof(resp
->base
)))
1067 ssize_t
ib_uverbs_create_cq(struct ib_uverbs_file
*file
,
1068 struct ib_device
*ib_dev
,
1069 const char __user
*buf
, int in_len
,
1072 struct ib_uverbs_create_cq cmd
;
1073 struct ib_uverbs_ex_create_cq cmd_ex
;
1074 struct ib_uverbs_create_cq_resp resp
;
1075 struct ib_udata ucore
;
1076 struct ib_udata uhw
;
1077 struct ib_ucq_object
*obj
;
1079 if (out_len
< sizeof(resp
))
1082 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1085 ib_uverbs_init_udata(&ucore
, buf
, u64_to_user_ptr(cmd
.response
),
1086 sizeof(cmd
), sizeof(resp
));
1088 ib_uverbs_init_udata(&uhw
, buf
+ sizeof(cmd
),
1089 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
1090 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1091 out_len
- sizeof(resp
));
1093 memset(&cmd_ex
, 0, sizeof(cmd_ex
));
1094 cmd_ex
.user_handle
= cmd
.user_handle
;
1095 cmd_ex
.cqe
= cmd
.cqe
;
1096 cmd_ex
.comp_vector
= cmd
.comp_vector
;
1097 cmd_ex
.comp_channel
= cmd
.comp_channel
;
1099 obj
= create_cq(file
, ib_dev
, &ucore
, &uhw
, &cmd_ex
,
1100 offsetof(typeof(cmd_ex
), comp_channel
) +
1101 sizeof(cmd
.comp_channel
), ib_uverbs_create_cq_cb
,
1105 return PTR_ERR(obj
);
1110 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file
*file
,
1111 struct ib_ucq_object
*obj
,
1112 struct ib_uverbs_ex_create_cq_resp
*resp
,
1113 struct ib_udata
*ucore
, void *context
)
1115 if (ib_copy_to_udata(ucore
, resp
, resp
->response_length
))
1121 int ib_uverbs_ex_create_cq(struct ib_uverbs_file
*file
,
1122 struct ib_device
*ib_dev
,
1123 struct ib_udata
*ucore
,
1124 struct ib_udata
*uhw
)
1126 struct ib_uverbs_ex_create_cq_resp resp
;
1127 struct ib_uverbs_ex_create_cq cmd
;
1128 struct ib_ucq_object
*obj
;
1131 if (ucore
->inlen
< sizeof(cmd
))
1134 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
1144 if (ucore
->outlen
< (offsetof(typeof(resp
), response_length
) +
1145 sizeof(resp
.response_length
)))
1148 obj
= create_cq(file
, ib_dev
, ucore
, uhw
, &cmd
,
1149 min(ucore
->inlen
, sizeof(cmd
)),
1150 ib_uverbs_ex_create_cq_cb
, NULL
);
1152 return PTR_ERR_OR_ZERO(obj
);
1155 ssize_t
ib_uverbs_resize_cq(struct ib_uverbs_file
*file
,
1156 struct ib_device
*ib_dev
,
1157 const char __user
*buf
, int in_len
,
1160 struct ib_uverbs_resize_cq cmd
;
1161 struct ib_uverbs_resize_cq_resp resp
= {};
1162 struct ib_udata udata
;
1166 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1169 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
1170 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
1171 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1172 out_len
- sizeof(resp
));
1174 cq
= uobj_get_obj_read(cq
, cmd
.cq_handle
, file
->ucontext
);
1178 ret
= cq
->device
->resize_cq(cq
, cmd
.cqe
, &udata
);
1184 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
.cqe
))
1188 uobj_put_obj_read(cq
);
1190 return ret
? ret
: in_len
;
1193 static int copy_wc_to_user(struct ib_device
*ib_dev
, void __user
*dest
,
1196 struct ib_uverbs_wc tmp
;
1198 tmp
.wr_id
= wc
->wr_id
;
1199 tmp
.status
= wc
->status
;
1200 tmp
.opcode
= wc
->opcode
;
1201 tmp
.vendor_err
= wc
->vendor_err
;
1202 tmp
.byte_len
= wc
->byte_len
;
1203 tmp
.ex
.imm_data
= wc
->ex
.imm_data
;
1204 tmp
.qp_num
= wc
->qp
->qp_num
;
1205 tmp
.src_qp
= wc
->src_qp
;
1206 tmp
.wc_flags
= wc
->wc_flags
;
1207 tmp
.pkey_index
= wc
->pkey_index
;
1208 if (rdma_cap_opa_ah(ib_dev
, wc
->port_num
))
1209 tmp
.slid
= OPA_TO_IB_UCAST_LID(wc
->slid
);
1211 tmp
.slid
= ib_lid_cpu16(wc
->slid
);
1213 tmp
.dlid_path_bits
= wc
->dlid_path_bits
;
1214 tmp
.port_num
= wc
->port_num
;
1217 if (copy_to_user(dest
, &tmp
, sizeof tmp
))
1223 ssize_t
ib_uverbs_poll_cq(struct ib_uverbs_file
*file
,
1224 struct ib_device
*ib_dev
,
1225 const char __user
*buf
, int in_len
,
1228 struct ib_uverbs_poll_cq cmd
;
1229 struct ib_uverbs_poll_cq_resp resp
;
1230 u8 __user
*header_ptr
;
1231 u8 __user
*data_ptr
;
1236 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1239 cq
= uobj_get_obj_read(cq
, cmd
.cq_handle
, file
->ucontext
);
1243 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1244 header_ptr
= u64_to_user_ptr(cmd
.response
);
1245 data_ptr
= header_ptr
+ sizeof resp
;
1247 memset(&resp
, 0, sizeof resp
);
1248 while (resp
.count
< cmd
.ne
) {
1249 ret
= ib_poll_cq(cq
, 1, &wc
);
1255 ret
= copy_wc_to_user(ib_dev
, data_ptr
, &wc
);
1259 data_ptr
+= sizeof(struct ib_uverbs_wc
);
1263 if (copy_to_user(header_ptr
, &resp
, sizeof resp
)) {
1271 uobj_put_obj_read(cq
);
1275 ssize_t
ib_uverbs_req_notify_cq(struct ib_uverbs_file
*file
,
1276 struct ib_device
*ib_dev
,
1277 const char __user
*buf
, int in_len
,
1280 struct ib_uverbs_req_notify_cq cmd
;
1283 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1286 cq
= uobj_get_obj_read(cq
, cmd
.cq_handle
, file
->ucontext
);
1290 ib_req_notify_cq(cq
, cmd
.solicited_only
?
1291 IB_CQ_SOLICITED
: IB_CQ_NEXT_COMP
);
1293 uobj_put_obj_read(cq
);
1298 ssize_t
ib_uverbs_destroy_cq(struct ib_uverbs_file
*file
,
1299 struct ib_device
*ib_dev
,
1300 const char __user
*buf
, int in_len
,
1303 struct ib_uverbs_destroy_cq cmd
;
1304 struct ib_uverbs_destroy_cq_resp resp
;
1305 struct ib_uobject
*uobj
;
1307 struct ib_ucq_object
*obj
;
1310 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1313 uobj
= uobj_get_write(uobj_get_type(cq
), cmd
.cq_handle
,
1316 return PTR_ERR(uobj
);
1319 * Make sure we don't free the memory in remove_commit as we still
1320 * needs the uobject memory to create the response.
1322 uverbs_uobject_get(uobj
);
1324 obj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
1326 memset(&resp
, 0, sizeof(resp
));
1328 ret
= uobj_remove_commit(uobj
);
1330 uverbs_uobject_put(uobj
);
1334 resp
.comp_events_reported
= obj
->comp_events_reported
;
1335 resp
.async_events_reported
= obj
->async_events_reported
;
1337 uverbs_uobject_put(uobj
);
1338 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
1344 static int create_qp(struct ib_uverbs_file
*file
,
1345 struct ib_udata
*ucore
,
1346 struct ib_udata
*uhw
,
1347 struct ib_uverbs_ex_create_qp
*cmd
,
1349 int (*cb
)(struct ib_uverbs_file
*file
,
1350 struct ib_uverbs_ex_create_qp_resp
*resp
,
1351 struct ib_udata
*udata
),
1354 struct ib_uqp_object
*obj
;
1355 struct ib_device
*device
;
1356 struct ib_pd
*pd
= NULL
;
1357 struct ib_xrcd
*xrcd
= NULL
;
1358 struct ib_uobject
*xrcd_uobj
= ERR_PTR(-ENOENT
);
1359 struct ib_cq
*scq
= NULL
, *rcq
= NULL
;
1360 struct ib_srq
*srq
= NULL
;
1363 struct ib_qp_init_attr attr
= {};
1364 struct ib_uverbs_ex_create_qp_resp resp
;
1366 struct ib_rwq_ind_table
*ind_tbl
= NULL
;
1369 if (cmd
->qp_type
== IB_QPT_RAW_PACKET
&& !capable(CAP_NET_RAW
))
1372 obj
= (struct ib_uqp_object
*)uobj_alloc(uobj_get_type(qp
),
1375 return PTR_ERR(obj
);
1377 obj
->uevent
.uobject
.user_handle
= cmd
->user_handle
;
1378 mutex_init(&obj
->mcast_lock
);
1380 if (cmd_sz
>= offsetof(typeof(*cmd
), rwq_ind_tbl_handle
) +
1381 sizeof(cmd
->rwq_ind_tbl_handle
) &&
1382 (cmd
->comp_mask
& IB_UVERBS_CREATE_QP_MASK_IND_TABLE
)) {
1383 ind_tbl
= uobj_get_obj_read(rwq_ind_table
,
1384 cmd
->rwq_ind_tbl_handle
,
1391 attr
.rwq_ind_tbl
= ind_tbl
;
1394 if (cmd_sz
> sizeof(*cmd
) &&
1395 !ib_is_udata_cleared(ucore
, sizeof(*cmd
),
1396 cmd_sz
- sizeof(*cmd
))) {
1401 if (ind_tbl
&& (cmd
->max_recv_wr
|| cmd
->max_recv_sge
|| cmd
->is_srq
)) {
1406 if (ind_tbl
&& !cmd
->max_send_wr
)
1409 if (cmd
->qp_type
== IB_QPT_XRC_TGT
) {
1410 xrcd_uobj
= uobj_get_read(uobj_get_type(xrcd
), cmd
->pd_handle
,
1413 if (IS_ERR(xrcd_uobj
)) {
1418 xrcd
= (struct ib_xrcd
*)xrcd_uobj
->object
;
1423 device
= xrcd
->device
;
1425 if (cmd
->qp_type
== IB_QPT_XRC_INI
) {
1426 cmd
->max_recv_wr
= 0;
1427 cmd
->max_recv_sge
= 0;
1430 srq
= uobj_get_obj_read(srq
, cmd
->srq_handle
,
1432 if (!srq
|| srq
->srq_type
== IB_SRQT_XRC
) {
1439 if (cmd
->recv_cq_handle
!= cmd
->send_cq_handle
) {
1440 rcq
= uobj_get_obj_read(cq
, cmd
->recv_cq_handle
,
1451 scq
= uobj_get_obj_read(cq
, cmd
->send_cq_handle
,
1455 pd
= uobj_get_obj_read(pd
, cmd
->pd_handle
, file
->ucontext
);
1456 if (!pd
|| (!scq
&& has_sq
)) {
1461 device
= pd
->device
;
1464 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1465 attr
.qp_context
= file
;
1470 attr
.sq_sig_type
= cmd
->sq_sig_all
? IB_SIGNAL_ALL_WR
:
1472 attr
.qp_type
= cmd
->qp_type
;
1473 attr
.create_flags
= 0;
1475 attr
.cap
.max_send_wr
= cmd
->max_send_wr
;
1476 attr
.cap
.max_recv_wr
= cmd
->max_recv_wr
;
1477 attr
.cap
.max_send_sge
= cmd
->max_send_sge
;
1478 attr
.cap
.max_recv_sge
= cmd
->max_recv_sge
;
1479 attr
.cap
.max_inline_data
= cmd
->max_inline_data
;
1481 obj
->uevent
.events_reported
= 0;
1482 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1483 INIT_LIST_HEAD(&obj
->mcast_list
);
1485 if (cmd_sz
>= offsetof(typeof(*cmd
), create_flags
) +
1486 sizeof(cmd
->create_flags
))
1487 attr
.create_flags
= cmd
->create_flags
;
1489 if (attr
.create_flags
& ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
1490 IB_QP_CREATE_CROSS_CHANNEL
|
1491 IB_QP_CREATE_MANAGED_SEND
|
1492 IB_QP_CREATE_MANAGED_RECV
|
1493 IB_QP_CREATE_SCATTER_FCS
|
1494 IB_QP_CREATE_CVLAN_STRIPPING
|
1495 IB_QP_CREATE_SOURCE_QPN
|
1496 IB_QP_CREATE_PCI_WRITE_END_PADDING
)) {
1501 if (attr
.create_flags
& IB_QP_CREATE_SOURCE_QPN
) {
1502 if (!capable(CAP_NET_RAW
)) {
1507 attr
.source_qpn
= cmd
->source_qpn
;
1510 buf
= (void *)cmd
+ sizeof(*cmd
);
1511 if (cmd_sz
> sizeof(*cmd
))
1512 if (!(buf
[0] == 0 && !memcmp(buf
, buf
+ 1,
1513 cmd_sz
- sizeof(*cmd
) - 1))) {
1518 if (cmd
->qp_type
== IB_QPT_XRC_TGT
)
1519 qp
= ib_create_qp(pd
, &attr
);
1521 qp
= _ib_create_qp(device
, pd
, &attr
, uhw
);
1528 if (cmd
->qp_type
!= IB_QPT_XRC_TGT
) {
1529 ret
= ib_create_qp_security(qp
, device
);
1535 qp
->send_cq
= attr
.send_cq
;
1536 qp
->recv_cq
= attr
.recv_cq
;
1538 qp
->rwq_ind_tbl
= ind_tbl
;
1539 qp
->event_handler
= attr
.event_handler
;
1540 qp
->qp_context
= attr
.qp_context
;
1541 qp
->qp_type
= attr
.qp_type
;
1542 atomic_set(&qp
->usecnt
, 0);
1543 atomic_inc(&pd
->usecnt
);
1546 atomic_inc(&attr
.send_cq
->usecnt
);
1548 atomic_inc(&attr
.recv_cq
->usecnt
);
1550 atomic_inc(&attr
.srq
->usecnt
);
1552 atomic_inc(&ind_tbl
->usecnt
);
1554 qp
->uobject
= &obj
->uevent
.uobject
;
1556 obj
->uevent
.uobject
.object
= qp
;
1558 memset(&resp
, 0, sizeof resp
);
1559 resp
.base
.qpn
= qp
->qp_num
;
1560 resp
.base
.qp_handle
= obj
->uevent
.uobject
.id
;
1561 resp
.base
.max_recv_sge
= attr
.cap
.max_recv_sge
;
1562 resp
.base
.max_send_sge
= attr
.cap
.max_send_sge
;
1563 resp
.base
.max_recv_wr
= attr
.cap
.max_recv_wr
;
1564 resp
.base
.max_send_wr
= attr
.cap
.max_send_wr
;
1565 resp
.base
.max_inline_data
= attr
.cap
.max_inline_data
;
1567 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
1568 sizeof(resp
.response_length
);
1570 ret
= cb(file
, &resp
, ucore
);
1575 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
,
1577 atomic_inc(&obj
->uxrcd
->refcnt
);
1578 uobj_put_read(xrcd_uobj
);
1582 uobj_put_obj_read(pd
);
1584 uobj_put_obj_read(scq
);
1585 if (rcq
&& rcq
!= scq
)
1586 uobj_put_obj_read(rcq
);
1588 uobj_put_obj_read(srq
);
1590 uobj_put_obj_read(ind_tbl
);
1592 uobj_alloc_commit(&obj
->uevent
.uobject
);
1599 if (!IS_ERR(xrcd_uobj
))
1600 uobj_put_read(xrcd_uobj
);
1602 uobj_put_obj_read(pd
);
1604 uobj_put_obj_read(scq
);
1605 if (rcq
&& rcq
!= scq
)
1606 uobj_put_obj_read(rcq
);
1608 uobj_put_obj_read(srq
);
1610 uobj_put_obj_read(ind_tbl
);
1612 uobj_alloc_abort(&obj
->uevent
.uobject
);
1616 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file
*file
,
1617 struct ib_uverbs_ex_create_qp_resp
*resp
,
1618 struct ib_udata
*ucore
)
1620 if (ib_copy_to_udata(ucore
, &resp
->base
, sizeof(resp
->base
)))
1626 ssize_t
ib_uverbs_create_qp(struct ib_uverbs_file
*file
,
1627 struct ib_device
*ib_dev
,
1628 const char __user
*buf
, int in_len
,
1631 struct ib_uverbs_create_qp cmd
;
1632 struct ib_uverbs_ex_create_qp cmd_ex
;
1633 struct ib_udata ucore
;
1634 struct ib_udata uhw
;
1635 ssize_t resp_size
= sizeof(struct ib_uverbs_create_qp_resp
);
1638 if (out_len
< resp_size
)
1641 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1644 ib_uverbs_init_udata(&ucore
, buf
, u64_to_user_ptr(cmd
.response
),
1645 sizeof(cmd
), resp_size
);
1646 ib_uverbs_init_udata(&uhw
, buf
+ sizeof(cmd
),
1647 u64_to_user_ptr(cmd
.response
) + resp_size
,
1648 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1649 out_len
- resp_size
);
1651 memset(&cmd_ex
, 0, sizeof(cmd_ex
));
1652 cmd_ex
.user_handle
= cmd
.user_handle
;
1653 cmd_ex
.pd_handle
= cmd
.pd_handle
;
1654 cmd_ex
.send_cq_handle
= cmd
.send_cq_handle
;
1655 cmd_ex
.recv_cq_handle
= cmd
.recv_cq_handle
;
1656 cmd_ex
.srq_handle
= cmd
.srq_handle
;
1657 cmd_ex
.max_send_wr
= cmd
.max_send_wr
;
1658 cmd_ex
.max_recv_wr
= cmd
.max_recv_wr
;
1659 cmd_ex
.max_send_sge
= cmd
.max_send_sge
;
1660 cmd_ex
.max_recv_sge
= cmd
.max_recv_sge
;
1661 cmd_ex
.max_inline_data
= cmd
.max_inline_data
;
1662 cmd_ex
.sq_sig_all
= cmd
.sq_sig_all
;
1663 cmd_ex
.qp_type
= cmd
.qp_type
;
1664 cmd_ex
.is_srq
= cmd
.is_srq
;
1666 err
= create_qp(file
, &ucore
, &uhw
, &cmd_ex
,
1667 offsetof(typeof(cmd_ex
), is_srq
) +
1668 sizeof(cmd
.is_srq
), ib_uverbs_create_qp_cb
,
1677 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file
*file
,
1678 struct ib_uverbs_ex_create_qp_resp
*resp
,
1679 struct ib_udata
*ucore
)
1681 if (ib_copy_to_udata(ucore
, resp
, resp
->response_length
))
1687 int ib_uverbs_ex_create_qp(struct ib_uverbs_file
*file
,
1688 struct ib_device
*ib_dev
,
1689 struct ib_udata
*ucore
,
1690 struct ib_udata
*uhw
)
1692 struct ib_uverbs_ex_create_qp_resp resp
;
1693 struct ib_uverbs_ex_create_qp cmd
= {0};
1696 if (ucore
->inlen
< (offsetof(typeof(cmd
), comp_mask
) +
1697 sizeof(cmd
.comp_mask
)))
1700 err
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
1704 if (cmd
.comp_mask
& ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK
)
1710 if (ucore
->outlen
< (offsetof(typeof(resp
), response_length
) +
1711 sizeof(resp
.response_length
)))
1714 err
= create_qp(file
, ucore
, uhw
, &cmd
,
1715 min(ucore
->inlen
, sizeof(cmd
)),
1716 ib_uverbs_ex_create_qp_cb
, NULL
);
1724 ssize_t
ib_uverbs_open_qp(struct ib_uverbs_file
*file
,
1725 struct ib_device
*ib_dev
,
1726 const char __user
*buf
, int in_len
, int out_len
)
1728 struct ib_uverbs_open_qp cmd
;
1729 struct ib_uverbs_create_qp_resp resp
;
1730 struct ib_udata udata
;
1731 struct ib_uqp_object
*obj
;
1732 struct ib_xrcd
*xrcd
;
1733 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
1735 struct ib_qp_open_attr attr
;
1738 if (out_len
< sizeof resp
)
1741 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1744 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
1745 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
1746 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1747 out_len
- sizeof(resp
));
1749 obj
= (struct ib_uqp_object
*)uobj_alloc(uobj_get_type(qp
),
1752 return PTR_ERR(obj
);
1754 xrcd_uobj
= uobj_get_read(uobj_get_type(xrcd
), cmd
.pd_handle
,
1756 if (IS_ERR(xrcd_uobj
)) {
1761 xrcd
= (struct ib_xrcd
*)xrcd_uobj
->object
;
1767 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1768 attr
.qp_context
= file
;
1769 attr
.qp_num
= cmd
.qpn
;
1770 attr
.qp_type
= cmd
.qp_type
;
1772 obj
->uevent
.events_reported
= 0;
1773 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1774 INIT_LIST_HEAD(&obj
->mcast_list
);
1776 qp
= ib_open_qp(xrcd
, &attr
);
1782 obj
->uevent
.uobject
.object
= qp
;
1783 obj
->uevent
.uobject
.user_handle
= cmd
.user_handle
;
1785 memset(&resp
, 0, sizeof resp
);
1786 resp
.qpn
= qp
->qp_num
;
1787 resp
.qp_handle
= obj
->uevent
.uobject
.id
;
1789 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
1794 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
1795 atomic_inc(&obj
->uxrcd
->refcnt
);
1796 qp
->uobject
= &obj
->uevent
.uobject
;
1797 uobj_put_read(xrcd_uobj
);
1800 uobj_alloc_commit(&obj
->uevent
.uobject
);
1807 uobj_put_read(xrcd_uobj
);
1809 uobj_alloc_abort(&obj
->uevent
.uobject
);
1813 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest
*uverb_attr
,
1814 struct rdma_ah_attr
*rdma_attr
)
1816 const struct ib_global_route
*grh
;
1818 uverb_attr
->dlid
= rdma_ah_get_dlid(rdma_attr
);
1819 uverb_attr
->sl
= rdma_ah_get_sl(rdma_attr
);
1820 uverb_attr
->src_path_bits
= rdma_ah_get_path_bits(rdma_attr
);
1821 uverb_attr
->static_rate
= rdma_ah_get_static_rate(rdma_attr
);
1822 uverb_attr
->is_global
= !!(rdma_ah_get_ah_flags(rdma_attr
) &
1824 if (uverb_attr
->is_global
) {
1825 grh
= rdma_ah_read_grh(rdma_attr
);
1826 memcpy(uverb_attr
->dgid
, grh
->dgid
.raw
, 16);
1827 uverb_attr
->flow_label
= grh
->flow_label
;
1828 uverb_attr
->sgid_index
= grh
->sgid_index
;
1829 uverb_attr
->hop_limit
= grh
->hop_limit
;
1830 uverb_attr
->traffic_class
= grh
->traffic_class
;
1832 uverb_attr
->port_num
= rdma_ah_get_port_num(rdma_attr
);
1835 ssize_t
ib_uverbs_query_qp(struct ib_uverbs_file
*file
,
1836 struct ib_device
*ib_dev
,
1837 const char __user
*buf
, int in_len
,
1840 struct ib_uverbs_query_qp cmd
;
1841 struct ib_uverbs_query_qp_resp resp
;
1843 struct ib_qp_attr
*attr
;
1844 struct ib_qp_init_attr
*init_attr
;
1847 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1850 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1851 init_attr
= kmalloc(sizeof *init_attr
, GFP_KERNEL
);
1852 if (!attr
|| !init_attr
) {
1857 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
1863 ret
= ib_query_qp(qp
, attr
, cmd
.attr_mask
, init_attr
);
1865 uobj_put_obj_read(qp
);
1870 memset(&resp
, 0, sizeof resp
);
1872 resp
.qp_state
= attr
->qp_state
;
1873 resp
.cur_qp_state
= attr
->cur_qp_state
;
1874 resp
.path_mtu
= attr
->path_mtu
;
1875 resp
.path_mig_state
= attr
->path_mig_state
;
1876 resp
.qkey
= attr
->qkey
;
1877 resp
.rq_psn
= attr
->rq_psn
;
1878 resp
.sq_psn
= attr
->sq_psn
;
1879 resp
.dest_qp_num
= attr
->dest_qp_num
;
1880 resp
.qp_access_flags
= attr
->qp_access_flags
;
1881 resp
.pkey_index
= attr
->pkey_index
;
1882 resp
.alt_pkey_index
= attr
->alt_pkey_index
;
1883 resp
.sq_draining
= attr
->sq_draining
;
1884 resp
.max_rd_atomic
= attr
->max_rd_atomic
;
1885 resp
.max_dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1886 resp
.min_rnr_timer
= attr
->min_rnr_timer
;
1887 resp
.port_num
= attr
->port_num
;
1888 resp
.timeout
= attr
->timeout
;
1889 resp
.retry_cnt
= attr
->retry_cnt
;
1890 resp
.rnr_retry
= attr
->rnr_retry
;
1891 resp
.alt_port_num
= attr
->alt_port_num
;
1892 resp
.alt_timeout
= attr
->alt_timeout
;
1894 copy_ah_attr_to_uverbs(&resp
.dest
, &attr
->ah_attr
);
1895 copy_ah_attr_to_uverbs(&resp
.alt_dest
, &attr
->alt_ah_attr
);
1897 resp
.max_send_wr
= init_attr
->cap
.max_send_wr
;
1898 resp
.max_recv_wr
= init_attr
->cap
.max_recv_wr
;
1899 resp
.max_send_sge
= init_attr
->cap
.max_send_sge
;
1900 resp
.max_recv_sge
= init_attr
->cap
.max_recv_sge
;
1901 resp
.max_inline_data
= init_attr
->cap
.max_inline_data
;
1902 resp
.sq_sig_all
= init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
;
1904 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
1911 return ret
? ret
: in_len
;
1914 /* Remove ignored fields set in the attribute mask */
1915 static int modify_qp_mask(enum ib_qp_type qp_type
, int mask
)
1918 case IB_QPT_XRC_INI
:
1919 return mask
& ~(IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
);
1920 case IB_QPT_XRC_TGT
:
1921 return mask
& ~(IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_RETRY_CNT
|
1928 static void copy_ah_attr_from_uverbs(struct ib_device
*dev
,
1929 struct rdma_ah_attr
*rdma_attr
,
1930 struct ib_uverbs_qp_dest
*uverb_attr
)
1932 rdma_attr
->type
= rdma_ah_find_type(dev
, uverb_attr
->port_num
);
1933 if (uverb_attr
->is_global
) {
1934 rdma_ah_set_grh(rdma_attr
, NULL
,
1935 uverb_attr
->flow_label
,
1936 uverb_attr
->sgid_index
,
1937 uverb_attr
->hop_limit
,
1938 uverb_attr
->traffic_class
);
1939 rdma_ah_set_dgid_raw(rdma_attr
, uverb_attr
->dgid
);
1941 rdma_ah_set_ah_flags(rdma_attr
, 0);
1943 rdma_ah_set_dlid(rdma_attr
, uverb_attr
->dlid
);
1944 rdma_ah_set_sl(rdma_attr
, uverb_attr
->sl
);
1945 rdma_ah_set_path_bits(rdma_attr
, uverb_attr
->src_path_bits
);
1946 rdma_ah_set_static_rate(rdma_attr
, uverb_attr
->static_rate
);
1947 rdma_ah_set_port_num(rdma_attr
, uverb_attr
->port_num
);
1948 rdma_ah_set_make_grd(rdma_attr
, false);
1951 static int modify_qp(struct ib_uverbs_file
*file
,
1952 struct ib_uverbs_ex_modify_qp
*cmd
, struct ib_udata
*udata
)
1954 struct ib_qp_attr
*attr
;
1958 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1962 qp
= uobj_get_obj_read(qp
, cmd
->base
.qp_handle
, file
->ucontext
);
1968 if ((cmd
->base
.attr_mask
& IB_QP_PORT
) &&
1969 !rdma_is_port_valid(qp
->device
, cmd
->base
.port_num
)) {
1974 if ((cmd
->base
.attr_mask
& IB_QP_ALT_PATH
) &&
1975 !rdma_is_port_valid(qp
->device
, cmd
->base
.alt_port_num
)) {
1980 attr
->qp_state
= cmd
->base
.qp_state
;
1981 attr
->cur_qp_state
= cmd
->base
.cur_qp_state
;
1982 attr
->path_mtu
= cmd
->base
.path_mtu
;
1983 attr
->path_mig_state
= cmd
->base
.path_mig_state
;
1984 attr
->qkey
= cmd
->base
.qkey
;
1985 attr
->rq_psn
= cmd
->base
.rq_psn
;
1986 attr
->sq_psn
= cmd
->base
.sq_psn
;
1987 attr
->dest_qp_num
= cmd
->base
.dest_qp_num
;
1988 attr
->qp_access_flags
= cmd
->base
.qp_access_flags
;
1989 attr
->pkey_index
= cmd
->base
.pkey_index
;
1990 attr
->alt_pkey_index
= cmd
->base
.alt_pkey_index
;
1991 attr
->en_sqd_async_notify
= cmd
->base
.en_sqd_async_notify
;
1992 attr
->max_rd_atomic
= cmd
->base
.max_rd_atomic
;
1993 attr
->max_dest_rd_atomic
= cmd
->base
.max_dest_rd_atomic
;
1994 attr
->min_rnr_timer
= cmd
->base
.min_rnr_timer
;
1995 attr
->port_num
= cmd
->base
.port_num
;
1996 attr
->timeout
= cmd
->base
.timeout
;
1997 attr
->retry_cnt
= cmd
->base
.retry_cnt
;
1998 attr
->rnr_retry
= cmd
->base
.rnr_retry
;
1999 attr
->alt_port_num
= cmd
->base
.alt_port_num
;
2000 attr
->alt_timeout
= cmd
->base
.alt_timeout
;
2001 attr
->rate_limit
= cmd
->rate_limit
;
2003 if (cmd
->base
.attr_mask
& IB_QP_AV
)
2004 copy_ah_attr_from_uverbs(qp
->device
, &attr
->ah_attr
,
2007 if (cmd
->base
.attr_mask
& IB_QP_ALT_PATH
)
2008 copy_ah_attr_from_uverbs(qp
->device
, &attr
->alt_ah_attr
,
2009 &cmd
->base
.alt_dest
);
2011 ret
= ib_modify_qp_with_udata(qp
, attr
,
2012 modify_qp_mask(qp
->qp_type
,
2013 cmd
->base
.attr_mask
),
2017 uobj_put_obj_read(qp
);
2024 ssize_t
ib_uverbs_modify_qp(struct ib_uverbs_file
*file
,
2025 struct ib_device
*ib_dev
,
2026 const char __user
*buf
, int in_len
,
2029 struct ib_uverbs_ex_modify_qp cmd
= {};
2030 struct ib_udata udata
;
2033 if (copy_from_user(&cmd
.base
, buf
, sizeof(cmd
.base
)))
2036 if (cmd
.base
.attr_mask
&
2037 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK
<< 1) - 1))
2040 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
.base
), NULL
,
2041 in_len
- sizeof(cmd
.base
) - sizeof(struct ib_uverbs_cmd_hdr
),
2044 ret
= modify_qp(file
, &cmd
, &udata
);
2051 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file
*file
,
2052 struct ib_device
*ib_dev
,
2053 struct ib_udata
*ucore
,
2054 struct ib_udata
*uhw
)
2056 struct ib_uverbs_ex_modify_qp cmd
= {};
2060 * Last bit is reserved for extending the attr_mask by
2061 * using another field.
2063 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK
== (1 << 31));
2065 if (ucore
->inlen
< sizeof(cmd
.base
))
2068 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
2072 if (cmd
.base
.attr_mask
&
2073 ~((IB_USER_LAST_QP_ATTR_MASK
<< 1) - 1))
2076 if (ucore
->inlen
> sizeof(cmd
)) {
2077 if (!ib_is_udata_cleared(ucore
, sizeof(cmd
),
2078 ucore
->inlen
- sizeof(cmd
)))
2082 ret
= modify_qp(file
, &cmd
, uhw
);
2087 ssize_t
ib_uverbs_destroy_qp(struct ib_uverbs_file
*file
,
2088 struct ib_device
*ib_dev
,
2089 const char __user
*buf
, int in_len
,
2092 struct ib_uverbs_destroy_qp cmd
;
2093 struct ib_uverbs_destroy_qp_resp resp
;
2094 struct ib_uobject
*uobj
;
2095 struct ib_uqp_object
*obj
;
2098 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2101 memset(&resp
, 0, sizeof resp
);
2103 uobj
= uobj_get_write(uobj_get_type(qp
), cmd
.qp_handle
,
2106 return PTR_ERR(uobj
);
2108 obj
= container_of(uobj
, struct ib_uqp_object
, uevent
.uobject
);
2110 * Make sure we don't free the memory in remove_commit as we still
2111 * needs the uobject memory to create the response.
2113 uverbs_uobject_get(uobj
);
2115 ret
= uobj_remove_commit(uobj
);
2117 uverbs_uobject_put(uobj
);
2121 resp
.events_reported
= obj
->uevent
.events_reported
;
2122 uverbs_uobject_put(uobj
);
2124 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2130 static void *alloc_wr(size_t wr_size
, __u32 num_sge
)
2132 if (num_sge
>= (U32_MAX
- ALIGN(wr_size
, sizeof (struct ib_sge
))) /
2133 sizeof (struct ib_sge
))
2136 return kmalloc(ALIGN(wr_size
, sizeof (struct ib_sge
)) +
2137 num_sge
* sizeof (struct ib_sge
), GFP_KERNEL
);
2140 ssize_t
ib_uverbs_post_send(struct ib_uverbs_file
*file
,
2141 struct ib_device
*ib_dev
,
2142 const char __user
*buf
, int in_len
,
2145 struct ib_uverbs_post_send cmd
;
2146 struct ib_uverbs_post_send_resp resp
;
2147 struct ib_uverbs_send_wr
*user_wr
;
2148 struct ib_send_wr
*wr
= NULL
, *last
, *next
, *bad_wr
;
2152 ssize_t ret
= -EINVAL
;
2155 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2158 if (in_len
< sizeof cmd
+ cmd
.wqe_size
* cmd
.wr_count
+
2159 cmd
.sge_count
* sizeof (struct ib_uverbs_sge
))
2162 if (cmd
.wqe_size
< sizeof (struct ib_uverbs_send_wr
))
2165 user_wr
= kmalloc(cmd
.wqe_size
, GFP_KERNEL
);
2169 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
2173 is_ud
= qp
->qp_type
== IB_QPT_UD
;
2176 for (i
= 0; i
< cmd
.wr_count
; ++i
) {
2177 if (copy_from_user(user_wr
,
2178 buf
+ sizeof cmd
+ i
* cmd
.wqe_size
,
2184 if (user_wr
->num_sge
+ sg_ind
> cmd
.sge_count
) {
2190 struct ib_ud_wr
*ud
;
2192 if (user_wr
->opcode
!= IB_WR_SEND
&&
2193 user_wr
->opcode
!= IB_WR_SEND_WITH_IMM
) {
2198 next_size
= sizeof(*ud
);
2199 ud
= alloc_wr(next_size
, user_wr
->num_sge
);
2205 ud
->ah
= uobj_get_obj_read(ah
, user_wr
->wr
.ud
.ah
,
2212 ud
->remote_qpn
= user_wr
->wr
.ud
.remote_qpn
;
2213 ud
->remote_qkey
= user_wr
->wr
.ud
.remote_qkey
;
2216 } else if (user_wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
||
2217 user_wr
->opcode
== IB_WR_RDMA_WRITE
||
2218 user_wr
->opcode
== IB_WR_RDMA_READ
) {
2219 struct ib_rdma_wr
*rdma
;
2221 next_size
= sizeof(*rdma
);
2222 rdma
= alloc_wr(next_size
, user_wr
->num_sge
);
2228 rdma
->remote_addr
= user_wr
->wr
.rdma
.remote_addr
;
2229 rdma
->rkey
= user_wr
->wr
.rdma
.rkey
;
2232 } else if (user_wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
2233 user_wr
->opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) {
2234 struct ib_atomic_wr
*atomic
;
2236 next_size
= sizeof(*atomic
);
2237 atomic
= alloc_wr(next_size
, user_wr
->num_sge
);
2243 atomic
->remote_addr
= user_wr
->wr
.atomic
.remote_addr
;
2244 atomic
->compare_add
= user_wr
->wr
.atomic
.compare_add
;
2245 atomic
->swap
= user_wr
->wr
.atomic
.swap
;
2246 atomic
->rkey
= user_wr
->wr
.atomic
.rkey
;
2249 } else if (user_wr
->opcode
== IB_WR_SEND
||
2250 user_wr
->opcode
== IB_WR_SEND_WITH_IMM
||
2251 user_wr
->opcode
== IB_WR_SEND_WITH_INV
) {
2252 next_size
= sizeof(*next
);
2253 next
= alloc_wr(next_size
, user_wr
->num_sge
);
2263 if (user_wr
->opcode
== IB_WR_SEND_WITH_IMM
||
2264 user_wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
) {
2266 (__be32 __force
) user_wr
->ex
.imm_data
;
2267 } else if (user_wr
->opcode
== IB_WR_SEND_WITH_INV
) {
2268 next
->ex
.invalidate_rkey
= user_wr
->ex
.invalidate_rkey
;
2278 next
->wr_id
= user_wr
->wr_id
;
2279 next
->num_sge
= user_wr
->num_sge
;
2280 next
->opcode
= user_wr
->opcode
;
2281 next
->send_flags
= user_wr
->send_flags
;
2283 if (next
->num_sge
) {
2284 next
->sg_list
= (void *) next
+
2285 ALIGN(next_size
, sizeof(struct ib_sge
));
2286 if (copy_from_user(next
->sg_list
,
2288 cmd
.wr_count
* cmd
.wqe_size
+
2289 sg_ind
* sizeof (struct ib_sge
),
2290 next
->num_sge
* sizeof (struct ib_sge
))) {
2294 sg_ind
+= next
->num_sge
;
2296 next
->sg_list
= NULL
;
2300 ret
= qp
->device
->post_send(qp
->real_qp
, wr
, &bad_wr
);
2302 for (next
= wr
; next
; next
= next
->next
) {
2308 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2312 uobj_put_obj_read(qp
);
2315 if (is_ud
&& ud_wr(wr
)->ah
)
2316 uobj_put_obj_read(ud_wr(wr
)->ah
);
2325 return ret
? ret
: in_len
;
2328 static struct ib_recv_wr
*ib_uverbs_unmarshall_recv(const char __user
*buf
,
2334 struct ib_uverbs_recv_wr
*user_wr
;
2335 struct ib_recv_wr
*wr
= NULL
, *last
, *next
;
2340 if (in_len
< wqe_size
* wr_count
+
2341 sge_count
* sizeof (struct ib_uverbs_sge
))
2342 return ERR_PTR(-EINVAL
);
2344 if (wqe_size
< sizeof (struct ib_uverbs_recv_wr
))
2345 return ERR_PTR(-EINVAL
);
2347 user_wr
= kmalloc(wqe_size
, GFP_KERNEL
);
2349 return ERR_PTR(-ENOMEM
);
2353 for (i
= 0; i
< wr_count
; ++i
) {
2354 if (copy_from_user(user_wr
, buf
+ i
* wqe_size
,
2360 if (user_wr
->num_sge
+ sg_ind
> sge_count
) {
2365 if (user_wr
->num_sge
>=
2366 (U32_MAX
- ALIGN(sizeof *next
, sizeof (struct ib_sge
))) /
2367 sizeof (struct ib_sge
)) {
2372 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
2373 user_wr
->num_sge
* sizeof (struct ib_sge
),
2387 next
->wr_id
= user_wr
->wr_id
;
2388 next
->num_sge
= user_wr
->num_sge
;
2390 if (next
->num_sge
) {
2391 next
->sg_list
= (void *) next
+
2392 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
2393 if (copy_from_user(next
->sg_list
,
2394 buf
+ wr_count
* wqe_size
+
2395 sg_ind
* sizeof (struct ib_sge
),
2396 next
->num_sge
* sizeof (struct ib_sge
))) {
2400 sg_ind
+= next
->num_sge
;
2402 next
->sg_list
= NULL
;
2417 return ERR_PTR(ret
);
2420 ssize_t
ib_uverbs_post_recv(struct ib_uverbs_file
*file
,
2421 struct ib_device
*ib_dev
,
2422 const char __user
*buf
, int in_len
,
2425 struct ib_uverbs_post_recv cmd
;
2426 struct ib_uverbs_post_recv_resp resp
;
2427 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2429 ssize_t ret
= -EINVAL
;
2431 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2434 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2435 in_len
- sizeof cmd
, cmd
.wr_count
,
2436 cmd
.sge_count
, cmd
.wqe_size
);
2440 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
2445 ret
= qp
->device
->post_recv(qp
->real_qp
, wr
, &bad_wr
);
2447 uobj_put_obj_read(qp
);
2449 for (next
= wr
; next
; next
= next
->next
) {
2456 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2466 return ret
? ret
: in_len
;
2469 ssize_t
ib_uverbs_post_srq_recv(struct ib_uverbs_file
*file
,
2470 struct ib_device
*ib_dev
,
2471 const char __user
*buf
, int in_len
,
2474 struct ib_uverbs_post_srq_recv cmd
;
2475 struct ib_uverbs_post_srq_recv_resp resp
;
2476 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2478 ssize_t ret
= -EINVAL
;
2480 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2483 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2484 in_len
- sizeof cmd
, cmd
.wr_count
,
2485 cmd
.sge_count
, cmd
.wqe_size
);
2489 srq
= uobj_get_obj_read(srq
, cmd
.srq_handle
, file
->ucontext
);
2494 ret
= srq
->device
->post_srq_recv(srq
, wr
, &bad_wr
);
2496 uobj_put_obj_read(srq
);
2499 for (next
= wr
; next
; next
= next
->next
) {
2505 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2515 return ret
? ret
: in_len
;
2518 ssize_t
ib_uverbs_create_ah(struct ib_uverbs_file
*file
,
2519 struct ib_device
*ib_dev
,
2520 const char __user
*buf
, int in_len
,
2523 struct ib_uverbs_create_ah cmd
;
2524 struct ib_uverbs_create_ah_resp resp
;
2525 struct ib_uobject
*uobj
;
2528 struct rdma_ah_attr attr
;
2530 struct ib_udata udata
;
2532 if (out_len
< sizeof resp
)
2535 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2538 if (!rdma_is_port_valid(ib_dev
, cmd
.attr
.port_num
))
2541 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
2542 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
2543 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
2544 out_len
- sizeof(resp
));
2546 uobj
= uobj_alloc(uobj_get_type(ah
), file
->ucontext
);
2548 return PTR_ERR(uobj
);
2550 pd
= uobj_get_obj_read(pd
, cmd
.pd_handle
, file
->ucontext
);
2556 attr
.type
= rdma_ah_find_type(ib_dev
, cmd
.attr
.port_num
);
2557 rdma_ah_set_make_grd(&attr
, false);
2558 rdma_ah_set_dlid(&attr
, cmd
.attr
.dlid
);
2559 rdma_ah_set_sl(&attr
, cmd
.attr
.sl
);
2560 rdma_ah_set_path_bits(&attr
, cmd
.attr
.src_path_bits
);
2561 rdma_ah_set_static_rate(&attr
, cmd
.attr
.static_rate
);
2562 rdma_ah_set_port_num(&attr
, cmd
.attr
.port_num
);
2564 if (cmd
.attr
.is_global
) {
2565 rdma_ah_set_grh(&attr
, NULL
, cmd
.attr
.grh
.flow_label
,
2566 cmd
.attr
.grh
.sgid_index
,
2567 cmd
.attr
.grh
.hop_limit
,
2568 cmd
.attr
.grh
.traffic_class
);
2569 rdma_ah_set_dgid_raw(&attr
, cmd
.attr
.grh
.dgid
);
2571 rdma_ah_set_ah_flags(&attr
, 0);
2574 ah
= rdma_create_user_ah(pd
, &attr
, &udata
);
2581 uobj
->user_handle
= cmd
.user_handle
;
2584 resp
.ah_handle
= uobj
->id
;
2586 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
2591 uobj_put_obj_read(pd
);
2592 uobj_alloc_commit(uobj
);
2597 rdma_destroy_ah(ah
);
2600 uobj_put_obj_read(pd
);
2603 uobj_alloc_abort(uobj
);
2607 ssize_t
ib_uverbs_destroy_ah(struct ib_uverbs_file
*file
,
2608 struct ib_device
*ib_dev
,
2609 const char __user
*buf
, int in_len
, int out_len
)
2611 struct ib_uverbs_destroy_ah cmd
;
2612 struct ib_uobject
*uobj
;
2615 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2618 uobj
= uobj_get_write(uobj_get_type(ah
), cmd
.ah_handle
,
2621 return PTR_ERR(uobj
);
2623 ret
= uobj_remove_commit(uobj
);
2624 return ret
?: in_len
;
2627 ssize_t
ib_uverbs_attach_mcast(struct ib_uverbs_file
*file
,
2628 struct ib_device
*ib_dev
,
2629 const char __user
*buf
, int in_len
,
2632 struct ib_uverbs_attach_mcast cmd
;
2634 struct ib_uqp_object
*obj
;
2635 struct ib_uverbs_mcast_entry
*mcast
;
2638 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2641 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
2645 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2647 mutex_lock(&obj
->mcast_lock
);
2648 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2649 if (cmd
.mlid
== mcast
->lid
&&
2650 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2655 mcast
= kmalloc(sizeof *mcast
, GFP_KERNEL
);
2661 mcast
->lid
= cmd
.mlid
;
2662 memcpy(mcast
->gid
.raw
, cmd
.gid
, sizeof mcast
->gid
.raw
);
2664 ret
= ib_attach_mcast(qp
, &mcast
->gid
, cmd
.mlid
);
2666 list_add_tail(&mcast
->list
, &obj
->mcast_list
);
2671 mutex_unlock(&obj
->mcast_lock
);
2672 uobj_put_obj_read(qp
);
2674 return ret
? ret
: in_len
;
2677 ssize_t
ib_uverbs_detach_mcast(struct ib_uverbs_file
*file
,
2678 struct ib_device
*ib_dev
,
2679 const char __user
*buf
, int in_len
,
2682 struct ib_uverbs_detach_mcast cmd
;
2683 struct ib_uqp_object
*obj
;
2685 struct ib_uverbs_mcast_entry
*mcast
;
2689 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2692 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
2696 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2697 mutex_lock(&obj
->mcast_lock
);
2699 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2700 if (cmd
.mlid
== mcast
->lid
&&
2701 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2702 list_del(&mcast
->list
);
2713 ret
= ib_detach_mcast(qp
, (union ib_gid
*)cmd
.gid
, cmd
.mlid
);
2716 mutex_unlock(&obj
->mcast_lock
);
2717 uobj_put_obj_read(qp
);
2718 return ret
? ret
: in_len
;
2721 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec
*kern_spec
,
2722 union ib_flow_spec
*ib_spec
)
2724 ib_spec
->type
= kern_spec
->type
;
2725 switch (ib_spec
->type
) {
2726 case IB_FLOW_SPEC_ACTION_TAG
:
2727 if (kern_spec
->flow_tag
.size
!=
2728 sizeof(struct ib_uverbs_flow_spec_action_tag
))
2731 ib_spec
->flow_tag
.size
= sizeof(struct ib_flow_spec_action_tag
);
2732 ib_spec
->flow_tag
.tag_id
= kern_spec
->flow_tag
.tag_id
;
2734 case IB_FLOW_SPEC_ACTION_DROP
:
2735 if (kern_spec
->drop
.size
!=
2736 sizeof(struct ib_uverbs_flow_spec_action_drop
))
2739 ib_spec
->drop
.size
= sizeof(struct ib_flow_spec_action_drop
);
2747 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr
*spec
)
2749 /* Returns user space filter size, includes padding */
2750 return (spec
->size
- sizeof(struct ib_uverbs_flow_spec_hdr
)) / 2;
2753 static ssize_t
spec_filter_size(void *kern_spec_filter
, u16 kern_filter_size
,
2754 u16 ib_real_filter_sz
)
2757 * User space filter structures must be 64 bit aligned, otherwise this
2758 * may pass, but we won't handle additional new attributes.
2761 if (kern_filter_size
> ib_real_filter_sz
) {
2762 if (memchr_inv(kern_spec_filter
+
2763 ib_real_filter_sz
, 0,
2764 kern_filter_size
- ib_real_filter_sz
))
2766 return ib_real_filter_sz
;
2768 return kern_filter_size
;
2771 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec
*kern_spec
,
2772 union ib_flow_spec
*ib_spec
)
2774 ssize_t actual_filter_sz
;
2775 ssize_t kern_filter_sz
;
2776 ssize_t ib_filter_sz
;
2777 void *kern_spec_mask
;
2778 void *kern_spec_val
;
2780 if (kern_spec
->reserved
)
2783 ib_spec
->type
= kern_spec
->type
;
2785 kern_filter_sz
= kern_spec_filter_sz(&kern_spec
->hdr
);
2786 /* User flow spec size must be aligned to 4 bytes */
2787 if (kern_filter_sz
!= ALIGN(kern_filter_sz
, 4))
2790 kern_spec_val
= (void *)kern_spec
+
2791 sizeof(struct ib_uverbs_flow_spec_hdr
);
2792 kern_spec_mask
= kern_spec_val
+ kern_filter_sz
;
2793 if (ib_spec
->type
== (IB_FLOW_SPEC_INNER
| IB_FLOW_SPEC_VXLAN_TUNNEL
))
2796 switch (ib_spec
->type
& ~IB_FLOW_SPEC_INNER
) {
2797 case IB_FLOW_SPEC_ETH
:
2798 ib_filter_sz
= offsetof(struct ib_flow_eth_filter
, real_sz
);
2799 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2802 if (actual_filter_sz
<= 0)
2804 ib_spec
->size
= sizeof(struct ib_flow_spec_eth
);
2805 memcpy(&ib_spec
->eth
.val
, kern_spec_val
, actual_filter_sz
);
2806 memcpy(&ib_spec
->eth
.mask
, kern_spec_mask
, actual_filter_sz
);
2808 case IB_FLOW_SPEC_IPV4
:
2809 ib_filter_sz
= offsetof(struct ib_flow_ipv4_filter
, real_sz
);
2810 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2813 if (actual_filter_sz
<= 0)
2815 ib_spec
->size
= sizeof(struct ib_flow_spec_ipv4
);
2816 memcpy(&ib_spec
->ipv4
.val
, kern_spec_val
, actual_filter_sz
);
2817 memcpy(&ib_spec
->ipv4
.mask
, kern_spec_mask
, actual_filter_sz
);
2819 case IB_FLOW_SPEC_IPV6
:
2820 ib_filter_sz
= offsetof(struct ib_flow_ipv6_filter
, real_sz
);
2821 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2824 if (actual_filter_sz
<= 0)
2826 ib_spec
->size
= sizeof(struct ib_flow_spec_ipv6
);
2827 memcpy(&ib_spec
->ipv6
.val
, kern_spec_val
, actual_filter_sz
);
2828 memcpy(&ib_spec
->ipv6
.mask
, kern_spec_mask
, actual_filter_sz
);
2830 if ((ntohl(ib_spec
->ipv6
.mask
.flow_label
)) >= BIT(20) ||
2831 (ntohl(ib_spec
->ipv6
.val
.flow_label
)) >= BIT(20))
2834 case IB_FLOW_SPEC_TCP
:
2835 case IB_FLOW_SPEC_UDP
:
2836 ib_filter_sz
= offsetof(struct ib_flow_tcp_udp_filter
, real_sz
);
2837 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2840 if (actual_filter_sz
<= 0)
2842 ib_spec
->size
= sizeof(struct ib_flow_spec_tcp_udp
);
2843 memcpy(&ib_spec
->tcp_udp
.val
, kern_spec_val
, actual_filter_sz
);
2844 memcpy(&ib_spec
->tcp_udp
.mask
, kern_spec_mask
, actual_filter_sz
);
2846 case IB_FLOW_SPEC_VXLAN_TUNNEL
:
2847 ib_filter_sz
= offsetof(struct ib_flow_tunnel_filter
, real_sz
);
2848 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2851 if (actual_filter_sz
<= 0)
2853 ib_spec
->tunnel
.size
= sizeof(struct ib_flow_spec_tunnel
);
2854 memcpy(&ib_spec
->tunnel
.val
, kern_spec_val
, actual_filter_sz
);
2855 memcpy(&ib_spec
->tunnel
.mask
, kern_spec_mask
, actual_filter_sz
);
2857 if ((ntohl(ib_spec
->tunnel
.mask
.tunnel_id
)) >= BIT(24) ||
2858 (ntohl(ib_spec
->tunnel
.val
.tunnel_id
)) >= BIT(24))
2867 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec
*kern_spec
,
2868 union ib_flow_spec
*ib_spec
)
2870 if (kern_spec
->reserved
)
2873 if (kern_spec
->type
>= IB_FLOW_SPEC_ACTION_TAG
)
2874 return kern_spec_to_ib_spec_action(kern_spec
, ib_spec
);
2876 return kern_spec_to_ib_spec_filter(kern_spec
, ib_spec
);
2879 int ib_uverbs_ex_create_wq(struct ib_uverbs_file
*file
,
2880 struct ib_device
*ib_dev
,
2881 struct ib_udata
*ucore
,
2882 struct ib_udata
*uhw
)
2884 struct ib_uverbs_ex_create_wq cmd
= {};
2885 struct ib_uverbs_ex_create_wq_resp resp
= {};
2886 struct ib_uwq_object
*obj
;
2891 struct ib_wq_init_attr wq_init_attr
= {};
2892 size_t required_cmd_sz
;
2893 size_t required_resp_len
;
2895 required_cmd_sz
= offsetof(typeof(cmd
), max_sge
) + sizeof(cmd
.max_sge
);
2896 required_resp_len
= offsetof(typeof(resp
), wqn
) + sizeof(resp
.wqn
);
2898 if (ucore
->inlen
< required_cmd_sz
)
2901 if (ucore
->outlen
< required_resp_len
)
2904 if (ucore
->inlen
> sizeof(cmd
) &&
2905 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
2906 ucore
->inlen
- sizeof(cmd
)))
2909 err
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
2916 obj
= (struct ib_uwq_object
*)uobj_alloc(uobj_get_type(wq
),
2919 return PTR_ERR(obj
);
2921 pd
= uobj_get_obj_read(pd
, cmd
.pd_handle
, file
->ucontext
);
2927 cq
= uobj_get_obj_read(cq
, cmd
.cq_handle
, file
->ucontext
);
2933 wq_init_attr
.cq
= cq
;
2934 wq_init_attr
.max_sge
= cmd
.max_sge
;
2935 wq_init_attr
.max_wr
= cmd
.max_wr
;
2936 wq_init_attr
.wq_context
= file
;
2937 wq_init_attr
.wq_type
= cmd
.wq_type
;
2938 wq_init_attr
.event_handler
= ib_uverbs_wq_event_handler
;
2939 if (ucore
->inlen
>= (offsetof(typeof(cmd
), create_flags
) +
2940 sizeof(cmd
.create_flags
)))
2941 wq_init_attr
.create_flags
= cmd
.create_flags
;
2942 obj
->uevent
.events_reported
= 0;
2943 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
2944 wq
= pd
->device
->create_wq(pd
, &wq_init_attr
, uhw
);
2950 wq
->uobject
= &obj
->uevent
.uobject
;
2951 obj
->uevent
.uobject
.object
= wq
;
2952 wq
->wq_type
= wq_init_attr
.wq_type
;
2955 wq
->device
= pd
->device
;
2956 wq
->wq_context
= wq_init_attr
.wq_context
;
2957 atomic_set(&wq
->usecnt
, 0);
2958 atomic_inc(&pd
->usecnt
);
2959 atomic_inc(&cq
->usecnt
);
2960 wq
->uobject
= &obj
->uevent
.uobject
;
2961 obj
->uevent
.uobject
.object
= wq
;
2963 memset(&resp
, 0, sizeof(resp
));
2964 resp
.wq_handle
= obj
->uevent
.uobject
.id
;
2965 resp
.max_sge
= wq_init_attr
.max_sge
;
2966 resp
.max_wr
= wq_init_attr
.max_wr
;
2967 resp
.wqn
= wq
->wq_num
;
2968 resp
.response_length
= required_resp_len
;
2969 err
= ib_copy_to_udata(ucore
,
2970 &resp
, resp
.response_length
);
2974 uobj_put_obj_read(pd
);
2975 uobj_put_obj_read(cq
);
2976 uobj_alloc_commit(&obj
->uevent
.uobject
);
2982 uobj_put_obj_read(cq
);
2984 uobj_put_obj_read(pd
);
2986 uobj_alloc_abort(&obj
->uevent
.uobject
);
2991 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file
*file
,
2992 struct ib_device
*ib_dev
,
2993 struct ib_udata
*ucore
,
2994 struct ib_udata
*uhw
)
2996 struct ib_uverbs_ex_destroy_wq cmd
= {};
2997 struct ib_uverbs_ex_destroy_wq_resp resp
= {};
2998 struct ib_uobject
*uobj
;
2999 struct ib_uwq_object
*obj
;
3000 size_t required_cmd_sz
;
3001 size_t required_resp_len
;
3004 required_cmd_sz
= offsetof(typeof(cmd
), wq_handle
) + sizeof(cmd
.wq_handle
);
3005 required_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
3007 if (ucore
->inlen
< required_cmd_sz
)
3010 if (ucore
->outlen
< required_resp_len
)
3013 if (ucore
->inlen
> sizeof(cmd
) &&
3014 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3015 ucore
->inlen
- sizeof(cmd
)))
3018 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3025 resp
.response_length
= required_resp_len
;
3026 uobj
= uobj_get_write(uobj_get_type(wq
), cmd
.wq_handle
,
3029 return PTR_ERR(uobj
);
3031 obj
= container_of(uobj
, struct ib_uwq_object
, uevent
.uobject
);
3033 * Make sure we don't free the memory in remove_commit as we still
3034 * needs the uobject memory to create the response.
3036 uverbs_uobject_get(uobj
);
3038 ret
= uobj_remove_commit(uobj
);
3039 resp
.events_reported
= obj
->uevent
.events_reported
;
3040 uverbs_uobject_put(uobj
);
3044 return ib_copy_to_udata(ucore
, &resp
, resp
.response_length
);
3047 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file
*file
,
3048 struct ib_device
*ib_dev
,
3049 struct ib_udata
*ucore
,
3050 struct ib_udata
*uhw
)
3052 struct ib_uverbs_ex_modify_wq cmd
= {};
3054 struct ib_wq_attr wq_attr
= {};
3055 size_t required_cmd_sz
;
3058 required_cmd_sz
= offsetof(typeof(cmd
), curr_wq_state
) + sizeof(cmd
.curr_wq_state
);
3059 if (ucore
->inlen
< required_cmd_sz
)
3062 if (ucore
->inlen
> sizeof(cmd
) &&
3063 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3064 ucore
->inlen
- sizeof(cmd
)))
3067 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3074 if (cmd
.attr_mask
> (IB_WQ_STATE
| IB_WQ_CUR_STATE
| IB_WQ_FLAGS
))
3077 wq
= uobj_get_obj_read(wq
, cmd
.wq_handle
, file
->ucontext
);
3081 wq_attr
.curr_wq_state
= cmd
.curr_wq_state
;
3082 wq_attr
.wq_state
= cmd
.wq_state
;
3083 if (cmd
.attr_mask
& IB_WQ_FLAGS
) {
3084 wq_attr
.flags
= cmd
.flags
;
3085 wq_attr
.flags_mask
= cmd
.flags_mask
;
3087 ret
= wq
->device
->modify_wq(wq
, &wq_attr
, cmd
.attr_mask
, uhw
);
3088 uobj_put_obj_read(wq
);
3092 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file
*file
,
3093 struct ib_device
*ib_dev
,
3094 struct ib_udata
*ucore
,
3095 struct ib_udata
*uhw
)
3097 struct ib_uverbs_ex_create_rwq_ind_table cmd
= {};
3098 struct ib_uverbs_ex_create_rwq_ind_table_resp resp
= {};
3099 struct ib_uobject
*uobj
;
3101 struct ib_rwq_ind_table_init_attr init_attr
= {};
3102 struct ib_rwq_ind_table
*rwq_ind_tbl
;
3103 struct ib_wq
**wqs
= NULL
;
3104 u32
*wqs_handles
= NULL
;
3105 struct ib_wq
*wq
= NULL
;
3106 int i
, j
, num_read_wqs
;
3108 u32 expected_in_size
;
3109 size_t required_cmd_sz_header
;
3110 size_t required_resp_len
;
3112 required_cmd_sz_header
= offsetof(typeof(cmd
), log_ind_tbl_size
) + sizeof(cmd
.log_ind_tbl_size
);
3113 required_resp_len
= offsetof(typeof(resp
), ind_tbl_num
) + sizeof(resp
.ind_tbl_num
);
3115 if (ucore
->inlen
< required_cmd_sz_header
)
3118 if (ucore
->outlen
< required_resp_len
)
3121 err
= ib_copy_from_udata(&cmd
, ucore
, required_cmd_sz_header
);
3125 ucore
->inbuf
+= required_cmd_sz_header
;
3126 ucore
->inlen
-= required_cmd_sz_header
;
3131 if (cmd
.log_ind_tbl_size
> IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE
)
3134 num_wq_handles
= 1 << cmd
.log_ind_tbl_size
;
3135 expected_in_size
= num_wq_handles
* sizeof(__u32
);
3136 if (num_wq_handles
== 1)
3137 /* input size for wq handles is u64 aligned */
3138 expected_in_size
+= sizeof(__u32
);
3140 if (ucore
->inlen
< expected_in_size
)
3143 if (ucore
->inlen
> expected_in_size
&&
3144 !ib_is_udata_cleared(ucore
, expected_in_size
,
3145 ucore
->inlen
- expected_in_size
))
3148 wqs_handles
= kcalloc(num_wq_handles
, sizeof(*wqs_handles
),
3153 err
= ib_copy_from_udata(wqs_handles
, ucore
,
3154 num_wq_handles
* sizeof(__u32
));
3158 wqs
= kcalloc(num_wq_handles
, sizeof(*wqs
), GFP_KERNEL
);
3164 for (num_read_wqs
= 0; num_read_wqs
< num_wq_handles
;
3166 wq
= uobj_get_obj_read(wq
, wqs_handles
[num_read_wqs
],
3173 wqs
[num_read_wqs
] = wq
;
3176 uobj
= uobj_alloc(uobj_get_type(rwq_ind_table
), file
->ucontext
);
3178 err
= PTR_ERR(uobj
);
3182 init_attr
.log_ind_tbl_size
= cmd
.log_ind_tbl_size
;
3183 init_attr
.ind_tbl
= wqs
;
3184 rwq_ind_tbl
= ib_dev
->create_rwq_ind_table(ib_dev
, &init_attr
, uhw
);
3186 if (IS_ERR(rwq_ind_tbl
)) {
3187 err
= PTR_ERR(rwq_ind_tbl
);
3191 rwq_ind_tbl
->ind_tbl
= wqs
;
3192 rwq_ind_tbl
->log_ind_tbl_size
= init_attr
.log_ind_tbl_size
;
3193 rwq_ind_tbl
->uobject
= uobj
;
3194 uobj
->object
= rwq_ind_tbl
;
3195 rwq_ind_tbl
->device
= ib_dev
;
3196 atomic_set(&rwq_ind_tbl
->usecnt
, 0);
3198 for (i
= 0; i
< num_wq_handles
; i
++)
3199 atomic_inc(&wqs
[i
]->usecnt
);
3201 resp
.ind_tbl_handle
= uobj
->id
;
3202 resp
.ind_tbl_num
= rwq_ind_tbl
->ind_tbl_num
;
3203 resp
.response_length
= required_resp_len
;
3205 err
= ib_copy_to_udata(ucore
,
3206 &resp
, resp
.response_length
);
3212 for (j
= 0; j
< num_read_wqs
; j
++)
3213 uobj_put_obj_read(wqs
[j
]);
3215 uobj_alloc_commit(uobj
);
3219 ib_destroy_rwq_ind_table(rwq_ind_tbl
);
3221 uobj_alloc_abort(uobj
);
3223 for (j
= 0; j
< num_read_wqs
; j
++)
3224 uobj_put_obj_read(wqs
[j
]);
3231 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file
*file
,
3232 struct ib_device
*ib_dev
,
3233 struct ib_udata
*ucore
,
3234 struct ib_udata
*uhw
)
3236 struct ib_uverbs_ex_destroy_rwq_ind_table cmd
= {};
3237 struct ib_uobject
*uobj
;
3239 size_t required_cmd_sz
;
3241 required_cmd_sz
= offsetof(typeof(cmd
), ind_tbl_handle
) + sizeof(cmd
.ind_tbl_handle
);
3243 if (ucore
->inlen
< required_cmd_sz
)
3246 if (ucore
->inlen
> sizeof(cmd
) &&
3247 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3248 ucore
->inlen
- sizeof(cmd
)))
3251 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3258 uobj
= uobj_get_write(uobj_get_type(rwq_ind_table
), cmd
.ind_tbl_handle
,
3261 return PTR_ERR(uobj
);
3263 return uobj_remove_commit(uobj
);
3266 int ib_uverbs_ex_create_flow(struct ib_uverbs_file
*file
,
3267 struct ib_device
*ib_dev
,
3268 struct ib_udata
*ucore
,
3269 struct ib_udata
*uhw
)
3271 struct ib_uverbs_create_flow cmd
;
3272 struct ib_uverbs_create_flow_resp resp
;
3273 struct ib_uobject
*uobj
;
3274 struct ib_flow
*flow_id
;
3275 struct ib_uverbs_flow_attr
*kern_flow_attr
;
3276 struct ib_flow_attr
*flow_attr
;
3283 if (ucore
->inlen
< sizeof(cmd
))
3286 if (ucore
->outlen
< sizeof(resp
))
3289 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3293 ucore
->inbuf
+= sizeof(cmd
);
3294 ucore
->inlen
-= sizeof(cmd
);
3299 if (!capable(CAP_NET_RAW
))
3302 if (cmd
.flow_attr
.flags
>= IB_FLOW_ATTR_FLAGS_RESERVED
)
3305 if ((cmd
.flow_attr
.flags
& IB_FLOW_ATTR_FLAGS_DONT_TRAP
) &&
3306 ((cmd
.flow_attr
.type
== IB_FLOW_ATTR_ALL_DEFAULT
) ||
3307 (cmd
.flow_attr
.type
== IB_FLOW_ATTR_MC_DEFAULT
)))
3310 if (cmd
.flow_attr
.num_of_specs
> IB_FLOW_SPEC_SUPPORT_LAYERS
)
3313 if (cmd
.flow_attr
.size
> ucore
->inlen
||
3314 cmd
.flow_attr
.size
>
3315 (cmd
.flow_attr
.num_of_specs
* sizeof(struct ib_uverbs_flow_spec
)))
3318 if (cmd
.flow_attr
.reserved
[0] ||
3319 cmd
.flow_attr
.reserved
[1])
3322 if (cmd
.flow_attr
.num_of_specs
) {
3323 kern_flow_attr
= kmalloc(sizeof(*kern_flow_attr
) + cmd
.flow_attr
.size
,
3325 if (!kern_flow_attr
)
3328 memcpy(kern_flow_attr
, &cmd
.flow_attr
, sizeof(*kern_flow_attr
));
3329 err
= ib_copy_from_udata(kern_flow_attr
+ 1, ucore
,
3330 cmd
.flow_attr
.size
);
3334 kern_flow_attr
= &cmd
.flow_attr
;
3337 uobj
= uobj_alloc(uobj_get_type(flow
), file
->ucontext
);
3339 err
= PTR_ERR(uobj
);
3343 qp
= uobj_get_obj_read(qp
, cmd
.qp_handle
, file
->ucontext
);
3349 flow_attr
= kzalloc(sizeof(*flow_attr
) + cmd
.flow_attr
.num_of_specs
*
3350 sizeof(union ib_flow_spec
), GFP_KERNEL
);
3356 flow_attr
->type
= kern_flow_attr
->type
;
3357 flow_attr
->priority
= kern_flow_attr
->priority
;
3358 flow_attr
->num_of_specs
= kern_flow_attr
->num_of_specs
;
3359 flow_attr
->port
= kern_flow_attr
->port
;
3360 flow_attr
->flags
= kern_flow_attr
->flags
;
3361 flow_attr
->size
= sizeof(*flow_attr
);
3363 kern_spec
= kern_flow_attr
+ 1;
3364 ib_spec
= flow_attr
+ 1;
3365 for (i
= 0; i
< flow_attr
->num_of_specs
&&
3366 cmd
.flow_attr
.size
> offsetof(struct ib_uverbs_flow_spec
, reserved
) &&
3367 cmd
.flow_attr
.size
>=
3368 ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
; i
++) {
3369 err
= kern_spec_to_ib_spec(kern_spec
, ib_spec
);
3373 ((union ib_flow_spec
*) ib_spec
)->size
;
3374 cmd
.flow_attr
.size
-= ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
;
3375 kern_spec
+= ((struct ib_uverbs_flow_spec
*) kern_spec
)->size
;
3376 ib_spec
+= ((union ib_flow_spec
*) ib_spec
)->size
;
3378 if (cmd
.flow_attr
.size
|| (i
!= flow_attr
->num_of_specs
)) {
3379 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3380 i
, cmd
.flow_attr
.size
);
3384 flow_id
= ib_create_flow(qp
, flow_attr
, IB_FLOW_DOMAIN_USER
);
3385 if (IS_ERR(flow_id
)) {
3386 err
= PTR_ERR(flow_id
);
3389 flow_id
->uobject
= uobj
;
3390 uobj
->object
= flow_id
;
3392 memset(&resp
, 0, sizeof(resp
));
3393 resp
.flow_handle
= uobj
->id
;
3395 err
= ib_copy_to_udata(ucore
,
3396 &resp
, sizeof(resp
));
3400 uobj_put_obj_read(qp
);
3401 uobj_alloc_commit(uobj
);
3403 if (cmd
.flow_attr
.num_of_specs
)
3404 kfree(kern_flow_attr
);
3407 ib_destroy_flow(flow_id
);
3411 uobj_put_obj_read(qp
);
3413 uobj_alloc_abort(uobj
);
3415 if (cmd
.flow_attr
.num_of_specs
)
3416 kfree(kern_flow_attr
);
3420 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file
*file
,
3421 struct ib_device
*ib_dev
,
3422 struct ib_udata
*ucore
,
3423 struct ib_udata
*uhw
)
3425 struct ib_uverbs_destroy_flow cmd
;
3426 struct ib_uobject
*uobj
;
3429 if (ucore
->inlen
< sizeof(cmd
))
3432 ret
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3439 uobj
= uobj_get_write(uobj_get_type(flow
), cmd
.flow_handle
,
3442 return PTR_ERR(uobj
);
3444 ret
= uobj_remove_commit(uobj
);
3448 static int __uverbs_create_xsrq(struct ib_uverbs_file
*file
,
3449 struct ib_device
*ib_dev
,
3450 struct ib_uverbs_create_xsrq
*cmd
,
3451 struct ib_udata
*udata
)
3453 struct ib_uverbs_create_srq_resp resp
;
3454 struct ib_usrq_object
*obj
;
3457 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
3458 struct ib_srq_init_attr attr
;
3461 obj
= (struct ib_usrq_object
*)uobj_alloc(uobj_get_type(srq
),
3464 return PTR_ERR(obj
);
3466 if (cmd
->srq_type
== IB_SRQT_TM
)
3467 attr
.ext
.tag_matching
.max_num_tags
= cmd
->max_num_tags
;
3469 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3470 xrcd_uobj
= uobj_get_read(uobj_get_type(xrcd
), cmd
->xrcd_handle
,
3472 if (IS_ERR(xrcd_uobj
)) {
3477 attr
.ext
.xrc
.xrcd
= (struct ib_xrcd
*)xrcd_uobj
->object
;
3478 if (!attr
.ext
.xrc
.xrcd
) {
3483 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
3484 atomic_inc(&obj
->uxrcd
->refcnt
);
3487 if (ib_srq_has_cq(cmd
->srq_type
)) {
3488 attr
.ext
.cq
= uobj_get_obj_read(cq
, cmd
->cq_handle
,
3496 pd
= uobj_get_obj_read(pd
, cmd
->pd_handle
, file
->ucontext
);
3502 attr
.event_handler
= ib_uverbs_srq_event_handler
;
3503 attr
.srq_context
= file
;
3504 attr
.srq_type
= cmd
->srq_type
;
3505 attr
.attr
.max_wr
= cmd
->max_wr
;
3506 attr
.attr
.max_sge
= cmd
->max_sge
;
3507 attr
.attr
.srq_limit
= cmd
->srq_limit
;
3509 obj
->uevent
.events_reported
= 0;
3510 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
3512 srq
= pd
->device
->create_srq(pd
, &attr
, udata
);
3518 srq
->device
= pd
->device
;
3520 srq
->srq_type
= cmd
->srq_type
;
3521 srq
->uobject
= &obj
->uevent
.uobject
;
3522 srq
->event_handler
= attr
.event_handler
;
3523 srq
->srq_context
= attr
.srq_context
;
3525 if (ib_srq_has_cq(cmd
->srq_type
)) {
3526 srq
->ext
.cq
= attr
.ext
.cq
;
3527 atomic_inc(&attr
.ext
.cq
->usecnt
);
3530 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3531 srq
->ext
.xrc
.xrcd
= attr
.ext
.xrc
.xrcd
;
3532 atomic_inc(&attr
.ext
.xrc
.xrcd
->usecnt
);
3535 atomic_inc(&pd
->usecnt
);
3536 atomic_set(&srq
->usecnt
, 0);
3538 obj
->uevent
.uobject
.object
= srq
;
3539 obj
->uevent
.uobject
.user_handle
= cmd
->user_handle
;
3541 memset(&resp
, 0, sizeof resp
);
3542 resp
.srq_handle
= obj
->uevent
.uobject
.id
;
3543 resp
.max_wr
= attr
.attr
.max_wr
;
3544 resp
.max_sge
= attr
.attr
.max_sge
;
3545 if (cmd
->srq_type
== IB_SRQT_XRC
)
3546 resp
.srqn
= srq
->ext
.xrc
.srq_num
;
3548 if (copy_to_user((void __user
*) (unsigned long) cmd
->response
,
3549 &resp
, sizeof resp
)) {
3554 if (cmd
->srq_type
== IB_SRQT_XRC
)
3555 uobj_put_read(xrcd_uobj
);
3557 if (ib_srq_has_cq(cmd
->srq_type
))
3558 uobj_put_obj_read(attr
.ext
.cq
);
3560 uobj_put_obj_read(pd
);
3561 uobj_alloc_commit(&obj
->uevent
.uobject
);
3566 ib_destroy_srq(srq
);
3569 uobj_put_obj_read(pd
);
3572 if (ib_srq_has_cq(cmd
->srq_type
))
3573 uobj_put_obj_read(attr
.ext
.cq
);
3576 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3577 atomic_dec(&obj
->uxrcd
->refcnt
);
3578 uobj_put_read(xrcd_uobj
);
3582 uobj_alloc_abort(&obj
->uevent
.uobject
);
3586 ssize_t
ib_uverbs_create_srq(struct ib_uverbs_file
*file
,
3587 struct ib_device
*ib_dev
,
3588 const char __user
*buf
, int in_len
,
3591 struct ib_uverbs_create_srq cmd
;
3592 struct ib_uverbs_create_xsrq xcmd
;
3593 struct ib_uverbs_create_srq_resp resp
;
3594 struct ib_udata udata
;
3597 if (out_len
< sizeof resp
)
3600 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3603 memset(&xcmd
, 0, sizeof(xcmd
));
3604 xcmd
.response
= cmd
.response
;
3605 xcmd
.user_handle
= cmd
.user_handle
;
3606 xcmd
.srq_type
= IB_SRQT_BASIC
;
3607 xcmd
.pd_handle
= cmd
.pd_handle
;
3608 xcmd
.max_wr
= cmd
.max_wr
;
3609 xcmd
.max_sge
= cmd
.max_sge
;
3610 xcmd
.srq_limit
= cmd
.srq_limit
;
3612 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
3613 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
3614 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
3615 out_len
- sizeof(resp
));
3617 ret
= __uverbs_create_xsrq(file
, ib_dev
, &xcmd
, &udata
);
3624 ssize_t
ib_uverbs_create_xsrq(struct ib_uverbs_file
*file
,
3625 struct ib_device
*ib_dev
,
3626 const char __user
*buf
, int in_len
, int out_len
)
3628 struct ib_uverbs_create_xsrq cmd
;
3629 struct ib_uverbs_create_srq_resp resp
;
3630 struct ib_udata udata
;
3633 if (out_len
< sizeof resp
)
3636 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3639 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
3640 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
3641 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
3642 out_len
- sizeof(resp
));
3644 ret
= __uverbs_create_xsrq(file
, ib_dev
, &cmd
, &udata
);
3651 ssize_t
ib_uverbs_modify_srq(struct ib_uverbs_file
*file
,
3652 struct ib_device
*ib_dev
,
3653 const char __user
*buf
, int in_len
,
3656 struct ib_uverbs_modify_srq cmd
;
3657 struct ib_udata udata
;
3659 struct ib_srq_attr attr
;
3662 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3665 ib_uverbs_init_udata(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
3668 srq
= uobj_get_obj_read(srq
, cmd
.srq_handle
, file
->ucontext
);
3672 attr
.max_wr
= cmd
.max_wr
;
3673 attr
.srq_limit
= cmd
.srq_limit
;
3675 ret
= srq
->device
->modify_srq(srq
, &attr
, cmd
.attr_mask
, &udata
);
3677 uobj_put_obj_read(srq
);
3679 return ret
? ret
: in_len
;
3682 ssize_t
ib_uverbs_query_srq(struct ib_uverbs_file
*file
,
3683 struct ib_device
*ib_dev
,
3684 const char __user
*buf
,
3685 int in_len
, int out_len
)
3687 struct ib_uverbs_query_srq cmd
;
3688 struct ib_uverbs_query_srq_resp resp
;
3689 struct ib_srq_attr attr
;
3693 if (out_len
< sizeof resp
)
3696 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3699 srq
= uobj_get_obj_read(srq
, cmd
.srq_handle
, file
->ucontext
);
3703 ret
= ib_query_srq(srq
, &attr
);
3705 uobj_put_obj_read(srq
);
3710 memset(&resp
, 0, sizeof resp
);
3712 resp
.max_wr
= attr
.max_wr
;
3713 resp
.max_sge
= attr
.max_sge
;
3714 resp
.srq_limit
= attr
.srq_limit
;
3716 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
3722 ssize_t
ib_uverbs_destroy_srq(struct ib_uverbs_file
*file
,
3723 struct ib_device
*ib_dev
,
3724 const char __user
*buf
, int in_len
,
3727 struct ib_uverbs_destroy_srq cmd
;
3728 struct ib_uverbs_destroy_srq_resp resp
;
3729 struct ib_uobject
*uobj
;
3730 struct ib_uevent_object
*obj
;
3733 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3736 uobj
= uobj_get_write(uobj_get_type(srq
), cmd
.srq_handle
,
3739 return PTR_ERR(uobj
);
3741 obj
= container_of(uobj
, struct ib_uevent_object
, uobject
);
3743 * Make sure we don't free the memory in remove_commit as we still
3744 * needs the uobject memory to create the response.
3746 uverbs_uobject_get(uobj
);
3748 memset(&resp
, 0, sizeof(resp
));
3750 ret
= uobj_remove_commit(uobj
);
3752 uverbs_uobject_put(uobj
);
3755 resp
.events_reported
= obj
->events_reported
;
3756 uverbs_uobject_put(uobj
);
3757 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof(resp
)))
3763 int ib_uverbs_ex_query_device(struct ib_uverbs_file
*file
,
3764 struct ib_device
*ib_dev
,
3765 struct ib_udata
*ucore
,
3766 struct ib_udata
*uhw
)
3768 struct ib_uverbs_ex_query_device_resp resp
= { {0} };
3769 struct ib_uverbs_ex_query_device cmd
;
3770 struct ib_device_attr attr
= {0};
3773 if (ucore
->inlen
< sizeof(cmd
))
3776 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3786 resp
.response_length
= offsetof(typeof(resp
), odp_caps
);
3788 if (ucore
->outlen
< resp
.response_length
)
3791 err
= ib_dev
->query_device(ib_dev
, &attr
, uhw
);
3795 copy_query_dev_fields(file
, ib_dev
, &resp
.base
, &attr
);
3797 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.odp_caps
))
3800 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3801 resp
.odp_caps
.general_caps
= attr
.odp_caps
.general_caps
;
3802 resp
.odp_caps
.per_transport_caps
.rc_odp_caps
=
3803 attr
.odp_caps
.per_transport_caps
.rc_odp_caps
;
3804 resp
.odp_caps
.per_transport_caps
.uc_odp_caps
=
3805 attr
.odp_caps
.per_transport_caps
.uc_odp_caps
;
3806 resp
.odp_caps
.per_transport_caps
.ud_odp_caps
=
3807 attr
.odp_caps
.per_transport_caps
.ud_odp_caps
;
3809 resp
.response_length
+= sizeof(resp
.odp_caps
);
3811 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.timestamp_mask
))
3814 resp
.timestamp_mask
= attr
.timestamp_mask
;
3815 resp
.response_length
+= sizeof(resp
.timestamp_mask
);
3817 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.hca_core_clock
))
3820 resp
.hca_core_clock
= attr
.hca_core_clock
;
3821 resp
.response_length
+= sizeof(resp
.hca_core_clock
);
3823 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.device_cap_flags_ex
))
3826 resp
.device_cap_flags_ex
= attr
.device_cap_flags
;
3827 resp
.response_length
+= sizeof(resp
.device_cap_flags_ex
);
3829 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.rss_caps
))
3832 resp
.rss_caps
.supported_qpts
= attr
.rss_caps
.supported_qpts
;
3833 resp
.rss_caps
.max_rwq_indirection_tables
=
3834 attr
.rss_caps
.max_rwq_indirection_tables
;
3835 resp
.rss_caps
.max_rwq_indirection_table_size
=
3836 attr
.rss_caps
.max_rwq_indirection_table_size
;
3838 resp
.response_length
+= sizeof(resp
.rss_caps
);
3840 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.max_wq_type_rq
))
3843 resp
.max_wq_type_rq
= attr
.max_wq_type_rq
;
3844 resp
.response_length
+= sizeof(resp
.max_wq_type_rq
);
3846 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.raw_packet_caps
))
3849 resp
.raw_packet_caps
= attr
.raw_packet_caps
;
3850 resp
.response_length
+= sizeof(resp
.raw_packet_caps
);
3852 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.tm_caps
))
3855 resp
.tm_caps
.max_rndv_hdr_size
= attr
.tm_caps
.max_rndv_hdr_size
;
3856 resp
.tm_caps
.max_num_tags
= attr
.tm_caps
.max_num_tags
;
3857 resp
.tm_caps
.max_ops
= attr
.tm_caps
.max_ops
;
3858 resp
.tm_caps
.max_sge
= attr
.tm_caps
.max_sge
;
3859 resp
.tm_caps
.flags
= attr
.tm_caps
.flags
;
3860 resp
.response_length
+= sizeof(resp
.tm_caps
);
3862 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.cq_moderation_caps
))
3865 resp
.cq_moderation_caps
.max_cq_moderation_count
=
3866 attr
.cq_caps
.max_cq_moderation_count
;
3867 resp
.cq_moderation_caps
.max_cq_moderation_period
=
3868 attr
.cq_caps
.max_cq_moderation_period
;
3869 resp
.response_length
+= sizeof(resp
.cq_moderation_caps
);
3871 err
= ib_copy_to_udata(ucore
, &resp
, resp
.response_length
);
3875 int ib_uverbs_ex_modify_cq(struct ib_uverbs_file
*file
,
3876 struct ib_device
*ib_dev
,
3877 struct ib_udata
*ucore
,
3878 struct ib_udata
*uhw
)
3880 struct ib_uverbs_ex_modify_cq cmd
= {};
3882 size_t required_cmd_sz
;
3885 required_cmd_sz
= offsetof(typeof(cmd
), reserved
) +
3886 sizeof(cmd
.reserved
);
3887 if (ucore
->inlen
< required_cmd_sz
)
3891 if (ucore
->inlen
> sizeof(cmd
) &&
3892 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3893 ucore
->inlen
- sizeof(cmd
)))
3896 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3900 if (!cmd
.attr_mask
|| cmd
.reserved
)
3903 if (cmd
.attr_mask
> IB_CQ_MODERATE
)
3906 cq
= uobj_get_obj_read(cq
, cmd
.cq_handle
, file
->ucontext
);
3910 ret
= rdma_set_cq_moderation(cq
, cmd
.attr
.cq_count
, cmd
.attr
.cq_period
);
3912 uobj_put_obj_read(cq
);