2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <linux/uaccess.h>
43 #include <rdma/uverbs_types.h>
44 #include <rdma/uverbs_std_types.h>
45 #include "rdma_core.h"
48 #include "core_priv.h"
50 static struct ib_uverbs_completion_event_file
*
51 ib_uverbs_lookup_comp_file(int fd
, struct ib_ucontext
*context
)
53 struct ib_uobject
*uobj
= uobj_get_read(UVERBS_OBJECT_COMP_CHANNEL
,
55 struct ib_uobject_file
*uobj_file
;
60 uverbs_uobject_get(uobj
);
63 uobj_file
= container_of(uobj
, struct ib_uobject_file
, uobj
);
64 return container_of(uobj_file
, struct ib_uverbs_completion_event_file
,
68 ssize_t
ib_uverbs_get_context(struct ib_uverbs_file
*file
,
69 struct ib_device
*ib_dev
,
70 const char __user
*buf
,
71 int in_len
, int out_len
)
73 struct ib_uverbs_get_context cmd
;
74 struct ib_uverbs_get_context_resp resp
;
75 struct ib_udata udata
;
76 struct ib_ucontext
*ucontext
;
78 struct ib_rdmacg_object cg_obj
;
81 if (out_len
< sizeof resp
)
84 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
87 mutex_lock(&file
->mutex
);
94 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
95 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
96 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
97 out_len
- sizeof(resp
));
99 ret
= ib_rdmacg_try_charge(&cg_obj
, ib_dev
, RDMACG_RESOURCE_HCA_HANDLE
);
103 ucontext
= ib_dev
->alloc_ucontext(ib_dev
, &udata
);
104 if (IS_ERR(ucontext
)) {
105 ret
= PTR_ERR(ucontext
);
109 ucontext
->device
= ib_dev
;
110 ucontext
->cg_obj
= cg_obj
;
111 /* ufile is required when some objects are released */
112 ucontext
->ufile
= file
;
113 uverbs_initialize_ucontext(ucontext
);
116 ucontext
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
118 ucontext
->closing
= 0;
120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
121 ucontext
->umem_tree
= RB_ROOT_CACHED
;
122 init_rwsem(&ucontext
->umem_rwsem
);
123 ucontext
->odp_mrs_count
= 0;
124 INIT_LIST_HEAD(&ucontext
->no_private_counters
);
126 if (!(ib_dev
->attrs
.device_cap_flags
& IB_DEVICE_ON_DEMAND_PAGING
))
127 ucontext
->invalidate_range
= NULL
;
131 resp
.num_comp_vectors
= file
->device
->num_comp_vectors
;
133 ret
= get_unused_fd_flags(O_CLOEXEC
);
138 filp
= ib_uverbs_alloc_async_event_file(file
, ib_dev
);
144 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
149 file
->ucontext
= ucontext
;
151 fd_install(resp
.async_fd
, filp
);
153 mutex_unlock(&file
->mutex
);
158 ib_uverbs_free_async_event_file(file
);
162 put_unused_fd(resp
.async_fd
);
165 put_pid(ucontext
->tgid
);
166 ib_dev
->dealloc_ucontext(ucontext
);
169 ib_rdmacg_uncharge(&cg_obj
, ib_dev
, RDMACG_RESOURCE_HCA_HANDLE
);
172 mutex_unlock(&file
->mutex
);
176 static void copy_query_dev_fields(struct ib_uverbs_file
*file
,
177 struct ib_device
*ib_dev
,
178 struct ib_uverbs_query_device_resp
*resp
,
179 struct ib_device_attr
*attr
)
181 resp
->fw_ver
= attr
->fw_ver
;
182 resp
->node_guid
= ib_dev
->node_guid
;
183 resp
->sys_image_guid
= attr
->sys_image_guid
;
184 resp
->max_mr_size
= attr
->max_mr_size
;
185 resp
->page_size_cap
= attr
->page_size_cap
;
186 resp
->vendor_id
= attr
->vendor_id
;
187 resp
->vendor_part_id
= attr
->vendor_part_id
;
188 resp
->hw_ver
= attr
->hw_ver
;
189 resp
->max_qp
= attr
->max_qp
;
190 resp
->max_qp_wr
= attr
->max_qp_wr
;
191 resp
->device_cap_flags
= lower_32_bits(attr
->device_cap_flags
);
192 resp
->max_sge
= attr
->max_sge
;
193 resp
->max_sge_rd
= attr
->max_sge_rd
;
194 resp
->max_cq
= attr
->max_cq
;
195 resp
->max_cqe
= attr
->max_cqe
;
196 resp
->max_mr
= attr
->max_mr
;
197 resp
->max_pd
= attr
->max_pd
;
198 resp
->max_qp_rd_atom
= attr
->max_qp_rd_atom
;
199 resp
->max_ee_rd_atom
= attr
->max_ee_rd_atom
;
200 resp
->max_res_rd_atom
= attr
->max_res_rd_atom
;
201 resp
->max_qp_init_rd_atom
= attr
->max_qp_init_rd_atom
;
202 resp
->max_ee_init_rd_atom
= attr
->max_ee_init_rd_atom
;
203 resp
->atomic_cap
= attr
->atomic_cap
;
204 resp
->max_ee
= attr
->max_ee
;
205 resp
->max_rdd
= attr
->max_rdd
;
206 resp
->max_mw
= attr
->max_mw
;
207 resp
->max_raw_ipv6_qp
= attr
->max_raw_ipv6_qp
;
208 resp
->max_raw_ethy_qp
= attr
->max_raw_ethy_qp
;
209 resp
->max_mcast_grp
= attr
->max_mcast_grp
;
210 resp
->max_mcast_qp_attach
= attr
->max_mcast_qp_attach
;
211 resp
->max_total_mcast_qp_attach
= attr
->max_total_mcast_qp_attach
;
212 resp
->max_ah
= attr
->max_ah
;
213 resp
->max_fmr
= attr
->max_fmr
;
214 resp
->max_map_per_fmr
= attr
->max_map_per_fmr
;
215 resp
->max_srq
= attr
->max_srq
;
216 resp
->max_srq_wr
= attr
->max_srq_wr
;
217 resp
->max_srq_sge
= attr
->max_srq_sge
;
218 resp
->max_pkeys
= attr
->max_pkeys
;
219 resp
->local_ca_ack_delay
= attr
->local_ca_ack_delay
;
220 resp
->phys_port_cnt
= ib_dev
->phys_port_cnt
;
223 ssize_t
ib_uverbs_query_device(struct ib_uverbs_file
*file
,
224 struct ib_device
*ib_dev
,
225 const char __user
*buf
,
226 int in_len
, int out_len
)
228 struct ib_uverbs_query_device cmd
;
229 struct ib_uverbs_query_device_resp resp
;
231 if (out_len
< sizeof resp
)
234 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
237 memset(&resp
, 0, sizeof resp
);
238 copy_query_dev_fields(file
, ib_dev
, &resp
, &ib_dev
->attrs
);
240 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
246 ssize_t
ib_uverbs_query_port(struct ib_uverbs_file
*file
,
247 struct ib_device
*ib_dev
,
248 const char __user
*buf
,
249 int in_len
, int out_len
)
251 struct ib_uverbs_query_port cmd
;
252 struct ib_uverbs_query_port_resp resp
;
253 struct ib_port_attr attr
;
256 if (out_len
< sizeof resp
)
259 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
262 ret
= ib_query_port(ib_dev
, cmd
.port_num
, &attr
);
266 memset(&resp
, 0, sizeof resp
);
268 resp
.state
= attr
.state
;
269 resp
.max_mtu
= attr
.max_mtu
;
270 resp
.active_mtu
= attr
.active_mtu
;
271 resp
.gid_tbl_len
= attr
.gid_tbl_len
;
272 resp
.port_cap_flags
= attr
.port_cap_flags
;
273 resp
.max_msg_sz
= attr
.max_msg_sz
;
274 resp
.bad_pkey_cntr
= attr
.bad_pkey_cntr
;
275 resp
.qkey_viol_cntr
= attr
.qkey_viol_cntr
;
276 resp
.pkey_tbl_len
= attr
.pkey_tbl_len
;
278 if (rdma_cap_opa_ah(ib_dev
, cmd
.port_num
)) {
279 resp
.lid
= OPA_TO_IB_UCAST_LID(attr
.lid
);
280 resp
.sm_lid
= OPA_TO_IB_UCAST_LID(attr
.sm_lid
);
282 resp
.lid
= ib_lid_cpu16(attr
.lid
);
283 resp
.sm_lid
= ib_lid_cpu16(attr
.sm_lid
);
286 resp
.max_vl_num
= attr
.max_vl_num
;
287 resp
.sm_sl
= attr
.sm_sl
;
288 resp
.subnet_timeout
= attr
.subnet_timeout
;
289 resp
.init_type_reply
= attr
.init_type_reply
;
290 resp
.active_width
= attr
.active_width
;
291 resp
.active_speed
= attr
.active_speed
;
292 resp
.phys_state
= attr
.phys_state
;
293 resp
.link_layer
= rdma_port_get_link_layer(ib_dev
,
296 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
302 ssize_t
ib_uverbs_alloc_pd(struct ib_uverbs_file
*file
,
303 struct ib_device
*ib_dev
,
304 const char __user
*buf
,
305 int in_len
, int out_len
)
307 struct ib_uverbs_alloc_pd cmd
;
308 struct ib_uverbs_alloc_pd_resp resp
;
309 struct ib_udata udata
;
310 struct ib_uobject
*uobj
;
314 if (out_len
< sizeof resp
)
317 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
320 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
321 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
322 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
323 out_len
- sizeof(resp
));
325 uobj
= uobj_alloc(UVERBS_OBJECT_PD
, file
->ucontext
);
327 return PTR_ERR(uobj
);
329 pd
= ib_dev
->alloc_pd(ib_dev
, file
->ucontext
, &udata
);
337 pd
->__internal_mr
= NULL
;
338 atomic_set(&pd
->usecnt
, 0);
341 memset(&resp
, 0, sizeof resp
);
342 resp
.pd_handle
= uobj
->id
;
343 pd
->res
.type
= RDMA_RESTRACK_PD
;
344 rdma_restrack_add(&pd
->res
);
346 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
351 uobj_alloc_commit(uobj
);
359 uobj_alloc_abort(uobj
);
363 ssize_t
ib_uverbs_dealloc_pd(struct ib_uverbs_file
*file
,
364 struct ib_device
*ib_dev
,
365 const char __user
*buf
,
366 int in_len
, int out_len
)
368 struct ib_uverbs_dealloc_pd cmd
;
369 struct ib_uobject
*uobj
;
372 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
375 uobj
= uobj_get_write(UVERBS_OBJECT_PD
, cmd
.pd_handle
,
378 return PTR_ERR(uobj
);
380 ret
= uobj_remove_commit(uobj
);
382 return ret
?: in_len
;
385 struct xrcd_table_entry
{
387 struct ib_xrcd
*xrcd
;
391 static int xrcd_table_insert(struct ib_uverbs_device
*dev
,
393 struct ib_xrcd
*xrcd
)
395 struct xrcd_table_entry
*entry
, *scan
;
396 struct rb_node
**p
= &dev
->xrcd_tree
.rb_node
;
397 struct rb_node
*parent
= NULL
;
399 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
404 entry
->inode
= inode
;
408 scan
= rb_entry(parent
, struct xrcd_table_entry
, node
);
410 if (inode
< scan
->inode
) {
412 } else if (inode
> scan
->inode
) {
420 rb_link_node(&entry
->node
, parent
, p
);
421 rb_insert_color(&entry
->node
, &dev
->xrcd_tree
);
426 static struct xrcd_table_entry
*xrcd_table_search(struct ib_uverbs_device
*dev
,
429 struct xrcd_table_entry
*entry
;
430 struct rb_node
*p
= dev
->xrcd_tree
.rb_node
;
433 entry
= rb_entry(p
, struct xrcd_table_entry
, node
);
435 if (inode
< entry
->inode
)
437 else if (inode
> entry
->inode
)
446 static struct ib_xrcd
*find_xrcd(struct ib_uverbs_device
*dev
, struct inode
*inode
)
448 struct xrcd_table_entry
*entry
;
450 entry
= xrcd_table_search(dev
, inode
);
457 static void xrcd_table_delete(struct ib_uverbs_device
*dev
,
460 struct xrcd_table_entry
*entry
;
462 entry
= xrcd_table_search(dev
, inode
);
465 rb_erase(&entry
->node
, &dev
->xrcd_tree
);
470 ssize_t
ib_uverbs_open_xrcd(struct ib_uverbs_file
*file
,
471 struct ib_device
*ib_dev
,
472 const char __user
*buf
, int in_len
,
475 struct ib_uverbs_open_xrcd cmd
;
476 struct ib_uverbs_open_xrcd_resp resp
;
477 struct ib_udata udata
;
478 struct ib_uxrcd_object
*obj
;
479 struct ib_xrcd
*xrcd
= NULL
;
480 struct fd f
= {NULL
, 0};
481 struct inode
*inode
= NULL
;
485 if (out_len
< sizeof resp
)
488 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
491 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
492 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
493 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
494 out_len
- sizeof(resp
));
496 mutex_lock(&file
->device
->xrcd_tree_mutex
);
499 /* search for file descriptor */
503 goto err_tree_mutex_unlock
;
506 inode
= file_inode(f
.file
);
507 xrcd
= find_xrcd(file
->device
, inode
);
508 if (!xrcd
&& !(cmd
.oflags
& O_CREAT
)) {
509 /* no file descriptor. Need CREATE flag */
511 goto err_tree_mutex_unlock
;
514 if (xrcd
&& cmd
.oflags
& O_EXCL
) {
516 goto err_tree_mutex_unlock
;
520 obj
= (struct ib_uxrcd_object
*)uobj_alloc(UVERBS_OBJECT_XRCD
,
524 goto err_tree_mutex_unlock
;
528 xrcd
= ib_dev
->alloc_xrcd(ib_dev
, file
->ucontext
, &udata
);
535 xrcd
->device
= ib_dev
;
536 atomic_set(&xrcd
->usecnt
, 0);
537 mutex_init(&xrcd
->tgt_qp_mutex
);
538 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
542 atomic_set(&obj
->refcnt
, 0);
543 obj
->uobject
.object
= xrcd
;
544 memset(&resp
, 0, sizeof resp
);
545 resp
.xrcd_handle
= obj
->uobject
.id
;
549 /* create new inode/xrcd table entry */
550 ret
= xrcd_table_insert(file
->device
, inode
, xrcd
);
552 goto err_dealloc_xrcd
;
554 atomic_inc(&xrcd
->usecnt
);
557 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
565 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
567 uobj_alloc_commit(&obj
->uobject
);
574 xrcd_table_delete(file
->device
, inode
);
575 atomic_dec(&xrcd
->usecnt
);
579 ib_dealloc_xrcd(xrcd
);
582 uobj_alloc_abort(&obj
->uobject
);
584 err_tree_mutex_unlock
:
588 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
593 ssize_t
ib_uverbs_close_xrcd(struct ib_uverbs_file
*file
,
594 struct ib_device
*ib_dev
,
595 const char __user
*buf
, int in_len
,
598 struct ib_uverbs_close_xrcd cmd
;
599 struct ib_uobject
*uobj
;
602 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
605 uobj
= uobj_get_write(UVERBS_OBJECT_XRCD
, cmd
.xrcd_handle
,
608 return PTR_ERR(uobj
);
610 ret
= uobj_remove_commit(uobj
);
611 return ret
?: in_len
;
614 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device
*dev
,
615 struct ib_xrcd
*xrcd
,
616 enum rdma_remove_reason why
)
622 if (inode
&& !atomic_dec_and_test(&xrcd
->usecnt
))
625 ret
= ib_dealloc_xrcd(xrcd
);
627 if (why
== RDMA_REMOVE_DESTROY
&& ret
)
628 atomic_inc(&xrcd
->usecnt
);
630 xrcd_table_delete(dev
, inode
);
635 ssize_t
ib_uverbs_reg_mr(struct ib_uverbs_file
*file
,
636 struct ib_device
*ib_dev
,
637 const char __user
*buf
, int in_len
,
640 struct ib_uverbs_reg_mr cmd
;
641 struct ib_uverbs_reg_mr_resp resp
;
642 struct ib_udata udata
;
643 struct ib_uobject
*uobj
;
648 if (out_len
< sizeof resp
)
651 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
654 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
655 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
656 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
657 out_len
- sizeof(resp
));
659 if ((cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
))
662 ret
= ib_check_mr_access(cmd
.access_flags
);
666 uobj
= uobj_alloc(UVERBS_OBJECT_MR
, file
->ucontext
);
668 return PTR_ERR(uobj
);
670 pd
= uobj_get_obj_read(pd
, UVERBS_OBJECT_PD
, cmd
.pd_handle
, file
->ucontext
);
676 if (cmd
.access_flags
& IB_ACCESS_ON_DEMAND
) {
677 if (!(pd
->device
->attrs
.device_cap_flags
&
678 IB_DEVICE_ON_DEMAND_PAGING
)) {
679 pr_debug("ODP support not available\n");
685 mr
= pd
->device
->reg_user_mr(pd
, cmd
.start
, cmd
.length
, cmd
.hca_va
,
686 cmd
.access_flags
, &udata
);
692 mr
->device
= pd
->device
;
696 atomic_inc(&pd
->usecnt
);
697 mr
->res
.type
= RDMA_RESTRACK_MR
;
698 rdma_restrack_add(&mr
->res
);
702 memset(&resp
, 0, sizeof resp
);
703 resp
.lkey
= mr
->lkey
;
704 resp
.rkey
= mr
->rkey
;
705 resp
.mr_handle
= uobj
->id
;
707 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
712 uobj_put_obj_read(pd
);
714 uobj_alloc_commit(uobj
);
722 uobj_put_obj_read(pd
);
725 uobj_alloc_abort(uobj
);
729 ssize_t
ib_uverbs_rereg_mr(struct ib_uverbs_file
*file
,
730 struct ib_device
*ib_dev
,
731 const char __user
*buf
, int in_len
,
734 struct ib_uverbs_rereg_mr cmd
;
735 struct ib_uverbs_rereg_mr_resp resp
;
736 struct ib_udata udata
;
737 struct ib_pd
*pd
= NULL
;
739 struct ib_pd
*old_pd
;
741 struct ib_uobject
*uobj
;
743 if (out_len
< sizeof(resp
))
746 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
749 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
750 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
751 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
752 out_len
- sizeof(resp
));
754 if (cmd
.flags
& ~IB_MR_REREG_SUPPORTED
|| !cmd
.flags
)
757 if ((cmd
.flags
& IB_MR_REREG_TRANS
) &&
758 (!cmd
.start
|| !cmd
.hca_va
|| 0 >= cmd
.length
||
759 (cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
)))
762 uobj
= uobj_get_write(UVERBS_OBJECT_MR
, cmd
.mr_handle
,
765 return PTR_ERR(uobj
);
774 if (cmd
.flags
& IB_MR_REREG_ACCESS
) {
775 ret
= ib_check_mr_access(cmd
.access_flags
);
780 if (cmd
.flags
& IB_MR_REREG_PD
) {
781 pd
= uobj_get_obj_read(pd
, UVERBS_OBJECT_PD
, cmd
.pd_handle
, file
->ucontext
);
789 ret
= mr
->device
->rereg_user_mr(mr
, cmd
.flags
, cmd
.start
,
790 cmd
.length
, cmd
.hca_va
,
791 cmd
.access_flags
, pd
, &udata
);
793 if (cmd
.flags
& IB_MR_REREG_PD
) {
794 atomic_inc(&pd
->usecnt
);
796 atomic_dec(&old_pd
->usecnt
);
802 memset(&resp
, 0, sizeof(resp
));
803 resp
.lkey
= mr
->lkey
;
804 resp
.rkey
= mr
->rkey
;
806 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof(resp
)))
812 if (cmd
.flags
& IB_MR_REREG_PD
)
813 uobj_put_obj_read(pd
);
816 uobj_put_write(uobj
);
821 ssize_t
ib_uverbs_dereg_mr(struct ib_uverbs_file
*file
,
822 struct ib_device
*ib_dev
,
823 const char __user
*buf
, int in_len
,
826 struct ib_uverbs_dereg_mr cmd
;
827 struct ib_uobject
*uobj
;
830 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
833 uobj
= uobj_get_write(UVERBS_OBJECT_MR
, cmd
.mr_handle
,
836 return PTR_ERR(uobj
);
838 ret
= uobj_remove_commit(uobj
);
840 return ret
?: in_len
;
843 ssize_t
ib_uverbs_alloc_mw(struct ib_uverbs_file
*file
,
844 struct ib_device
*ib_dev
,
845 const char __user
*buf
, int in_len
,
848 struct ib_uverbs_alloc_mw cmd
;
849 struct ib_uverbs_alloc_mw_resp resp
;
850 struct ib_uobject
*uobj
;
853 struct ib_udata udata
;
856 if (out_len
< sizeof(resp
))
859 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
862 uobj
= uobj_alloc(UVERBS_OBJECT_MW
, file
->ucontext
);
864 return PTR_ERR(uobj
);
866 pd
= uobj_get_obj_read(pd
, UVERBS_OBJECT_PD
, cmd
.pd_handle
, file
->ucontext
);
872 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
873 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
874 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
875 out_len
- sizeof(resp
));
877 mw
= pd
->device
->alloc_mw(pd
, cmd
.mw_type
, &udata
);
883 mw
->device
= pd
->device
;
886 atomic_inc(&pd
->usecnt
);
890 memset(&resp
, 0, sizeof(resp
));
891 resp
.rkey
= mw
->rkey
;
892 resp
.mw_handle
= uobj
->id
;
894 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof(resp
))) {
899 uobj_put_obj_read(pd
);
900 uobj_alloc_commit(uobj
);
905 uverbs_dealloc_mw(mw
);
907 uobj_put_obj_read(pd
);
909 uobj_alloc_abort(uobj
);
913 ssize_t
ib_uverbs_dealloc_mw(struct ib_uverbs_file
*file
,
914 struct ib_device
*ib_dev
,
915 const char __user
*buf
, int in_len
,
918 struct ib_uverbs_dealloc_mw cmd
;
919 struct ib_uobject
*uobj
;
922 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
925 uobj
= uobj_get_write(UVERBS_OBJECT_MW
, cmd
.mw_handle
,
928 return PTR_ERR(uobj
);
930 ret
= uobj_remove_commit(uobj
);
931 return ret
?: in_len
;
934 ssize_t
ib_uverbs_create_comp_channel(struct ib_uverbs_file
*file
,
935 struct ib_device
*ib_dev
,
936 const char __user
*buf
, int in_len
,
939 struct ib_uverbs_create_comp_channel cmd
;
940 struct ib_uverbs_create_comp_channel_resp resp
;
941 struct ib_uobject
*uobj
;
942 struct ib_uverbs_completion_event_file
*ev_file
;
944 if (out_len
< sizeof resp
)
947 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
950 uobj
= uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL
, file
->ucontext
);
952 return PTR_ERR(uobj
);
956 ev_file
= container_of(uobj
, struct ib_uverbs_completion_event_file
,
958 ib_uverbs_init_event_queue(&ev_file
->ev_queue
);
960 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
961 uobj_alloc_abort(uobj
);
965 uobj_alloc_commit(uobj
);
969 static struct ib_ucq_object
*create_cq(struct ib_uverbs_file
*file
,
970 struct ib_device
*ib_dev
,
971 struct ib_udata
*ucore
,
972 struct ib_udata
*uhw
,
973 struct ib_uverbs_ex_create_cq
*cmd
,
975 int (*cb
)(struct ib_uverbs_file
*file
,
976 struct ib_ucq_object
*obj
,
977 struct ib_uverbs_ex_create_cq_resp
*resp
,
978 struct ib_udata
*udata
,
982 struct ib_ucq_object
*obj
;
983 struct ib_uverbs_completion_event_file
*ev_file
= NULL
;
986 struct ib_uverbs_ex_create_cq_resp resp
;
987 struct ib_cq_init_attr attr
= {};
989 if (!ib_dev
->create_cq
)
990 return ERR_PTR(-EOPNOTSUPP
);
992 if (cmd
->comp_vector
>= file
->device
->num_comp_vectors
)
993 return ERR_PTR(-EINVAL
);
995 obj
= (struct ib_ucq_object
*)uobj_alloc(UVERBS_OBJECT_CQ
,
1000 if (cmd
->comp_channel
>= 0) {
1001 ev_file
= ib_uverbs_lookup_comp_file(cmd
->comp_channel
,
1003 if (IS_ERR(ev_file
)) {
1004 ret
= PTR_ERR(ev_file
);
1009 obj
->uobject
.user_handle
= cmd
->user_handle
;
1010 obj
->uverbs_file
= file
;
1011 obj
->comp_events_reported
= 0;
1012 obj
->async_events_reported
= 0;
1013 INIT_LIST_HEAD(&obj
->comp_list
);
1014 INIT_LIST_HEAD(&obj
->async_list
);
1016 attr
.cqe
= cmd
->cqe
;
1017 attr
.comp_vector
= cmd
->comp_vector
;
1019 if (cmd_sz
> offsetof(typeof(*cmd
), flags
) + sizeof(cmd
->flags
))
1020 attr
.flags
= cmd
->flags
;
1022 cq
= ib_dev
->create_cq(ib_dev
, &attr
, file
->ucontext
, uhw
);
1028 cq
->device
= ib_dev
;
1029 cq
->uobject
= &obj
->uobject
;
1030 cq
->comp_handler
= ib_uverbs_comp_handler
;
1031 cq
->event_handler
= ib_uverbs_cq_event_handler
;
1032 cq
->cq_context
= ev_file
? &ev_file
->ev_queue
: NULL
;
1033 atomic_set(&cq
->usecnt
, 0);
1035 obj
->uobject
.object
= cq
;
1036 memset(&resp
, 0, sizeof resp
);
1037 resp
.base
.cq_handle
= obj
->uobject
.id
;
1038 resp
.base
.cqe
= cq
->cqe
;
1040 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
1041 sizeof(resp
.response_length
);
1043 cq
->res
.type
= RDMA_RESTRACK_CQ
;
1044 rdma_restrack_add(&cq
->res
);
1046 ret
= cb(file
, obj
, &resp
, ucore
, context
);
1050 uobj_alloc_commit(&obj
->uobject
);
1058 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1061 uobj_alloc_abort(&obj
->uobject
);
1063 return ERR_PTR(ret
);
1066 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file
*file
,
1067 struct ib_ucq_object
*obj
,
1068 struct ib_uverbs_ex_create_cq_resp
*resp
,
1069 struct ib_udata
*ucore
, void *context
)
1071 if (ib_copy_to_udata(ucore
, &resp
->base
, sizeof(resp
->base
)))
1077 ssize_t
ib_uverbs_create_cq(struct ib_uverbs_file
*file
,
1078 struct ib_device
*ib_dev
,
1079 const char __user
*buf
, int in_len
,
1082 struct ib_uverbs_create_cq cmd
;
1083 struct ib_uverbs_ex_create_cq cmd_ex
;
1084 struct ib_uverbs_create_cq_resp resp
;
1085 struct ib_udata ucore
;
1086 struct ib_udata uhw
;
1087 struct ib_ucq_object
*obj
;
1089 if (out_len
< sizeof(resp
))
1092 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1095 ib_uverbs_init_udata(&ucore
, buf
, u64_to_user_ptr(cmd
.response
),
1096 sizeof(cmd
), sizeof(resp
));
1098 ib_uverbs_init_udata(&uhw
, buf
+ sizeof(cmd
),
1099 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
1100 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1101 out_len
- sizeof(resp
));
1103 memset(&cmd_ex
, 0, sizeof(cmd_ex
));
1104 cmd_ex
.user_handle
= cmd
.user_handle
;
1105 cmd_ex
.cqe
= cmd
.cqe
;
1106 cmd_ex
.comp_vector
= cmd
.comp_vector
;
1107 cmd_ex
.comp_channel
= cmd
.comp_channel
;
1109 obj
= create_cq(file
, ib_dev
, &ucore
, &uhw
, &cmd_ex
,
1110 offsetof(typeof(cmd_ex
), comp_channel
) +
1111 sizeof(cmd
.comp_channel
), ib_uverbs_create_cq_cb
,
1115 return PTR_ERR(obj
);
1120 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file
*file
,
1121 struct ib_ucq_object
*obj
,
1122 struct ib_uverbs_ex_create_cq_resp
*resp
,
1123 struct ib_udata
*ucore
, void *context
)
1125 if (ib_copy_to_udata(ucore
, resp
, resp
->response_length
))
1131 int ib_uverbs_ex_create_cq(struct ib_uverbs_file
*file
,
1132 struct ib_device
*ib_dev
,
1133 struct ib_udata
*ucore
,
1134 struct ib_udata
*uhw
)
1136 struct ib_uverbs_ex_create_cq_resp resp
;
1137 struct ib_uverbs_ex_create_cq cmd
;
1138 struct ib_ucq_object
*obj
;
1141 if (ucore
->inlen
< sizeof(cmd
))
1144 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
1154 if (ucore
->outlen
< (offsetof(typeof(resp
), response_length
) +
1155 sizeof(resp
.response_length
)))
1158 obj
= create_cq(file
, ib_dev
, ucore
, uhw
, &cmd
,
1159 min(ucore
->inlen
, sizeof(cmd
)),
1160 ib_uverbs_ex_create_cq_cb
, NULL
);
1162 return PTR_ERR_OR_ZERO(obj
);
1165 ssize_t
ib_uverbs_resize_cq(struct ib_uverbs_file
*file
,
1166 struct ib_device
*ib_dev
,
1167 const char __user
*buf
, int in_len
,
1170 struct ib_uverbs_resize_cq cmd
;
1171 struct ib_uverbs_resize_cq_resp resp
= {};
1172 struct ib_udata udata
;
1176 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1179 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
1180 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
1181 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1182 out_len
- sizeof(resp
));
1184 cq
= uobj_get_obj_read(cq
, UVERBS_OBJECT_CQ
, cmd
.cq_handle
, file
->ucontext
);
1188 ret
= cq
->device
->resize_cq(cq
, cmd
.cqe
, &udata
);
1194 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
.cqe
))
1198 uobj_put_obj_read(cq
);
1200 return ret
? ret
: in_len
;
1203 static int copy_wc_to_user(struct ib_device
*ib_dev
, void __user
*dest
,
1206 struct ib_uverbs_wc tmp
;
1208 tmp
.wr_id
= wc
->wr_id
;
1209 tmp
.status
= wc
->status
;
1210 tmp
.opcode
= wc
->opcode
;
1211 tmp
.vendor_err
= wc
->vendor_err
;
1212 tmp
.byte_len
= wc
->byte_len
;
1213 tmp
.ex
.imm_data
= wc
->ex
.imm_data
;
1214 tmp
.qp_num
= wc
->qp
->qp_num
;
1215 tmp
.src_qp
= wc
->src_qp
;
1216 tmp
.wc_flags
= wc
->wc_flags
;
1217 tmp
.pkey_index
= wc
->pkey_index
;
1218 if (rdma_cap_opa_ah(ib_dev
, wc
->port_num
))
1219 tmp
.slid
= OPA_TO_IB_UCAST_LID(wc
->slid
);
1221 tmp
.slid
= ib_lid_cpu16(wc
->slid
);
1223 tmp
.dlid_path_bits
= wc
->dlid_path_bits
;
1224 tmp
.port_num
= wc
->port_num
;
1227 if (copy_to_user(dest
, &tmp
, sizeof tmp
))
1233 ssize_t
ib_uverbs_poll_cq(struct ib_uverbs_file
*file
,
1234 struct ib_device
*ib_dev
,
1235 const char __user
*buf
, int in_len
,
1238 struct ib_uverbs_poll_cq cmd
;
1239 struct ib_uverbs_poll_cq_resp resp
;
1240 u8 __user
*header_ptr
;
1241 u8 __user
*data_ptr
;
1246 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1249 cq
= uobj_get_obj_read(cq
, UVERBS_OBJECT_CQ
, cmd
.cq_handle
, file
->ucontext
);
1253 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1254 header_ptr
= u64_to_user_ptr(cmd
.response
);
1255 data_ptr
= header_ptr
+ sizeof resp
;
1257 memset(&resp
, 0, sizeof resp
);
1258 while (resp
.count
< cmd
.ne
) {
1259 ret
= ib_poll_cq(cq
, 1, &wc
);
1265 ret
= copy_wc_to_user(ib_dev
, data_ptr
, &wc
);
1269 data_ptr
+= sizeof(struct ib_uverbs_wc
);
1273 if (copy_to_user(header_ptr
, &resp
, sizeof resp
)) {
1281 uobj_put_obj_read(cq
);
1285 ssize_t
ib_uverbs_req_notify_cq(struct ib_uverbs_file
*file
,
1286 struct ib_device
*ib_dev
,
1287 const char __user
*buf
, int in_len
,
1290 struct ib_uverbs_req_notify_cq cmd
;
1293 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1296 cq
= uobj_get_obj_read(cq
, UVERBS_OBJECT_CQ
, cmd
.cq_handle
, file
->ucontext
);
1300 ib_req_notify_cq(cq
, cmd
.solicited_only
?
1301 IB_CQ_SOLICITED
: IB_CQ_NEXT_COMP
);
1303 uobj_put_obj_read(cq
);
1308 ssize_t
ib_uverbs_destroy_cq(struct ib_uverbs_file
*file
,
1309 struct ib_device
*ib_dev
,
1310 const char __user
*buf
, int in_len
,
1313 struct ib_uverbs_destroy_cq cmd
;
1314 struct ib_uverbs_destroy_cq_resp resp
;
1315 struct ib_uobject
*uobj
;
1317 struct ib_ucq_object
*obj
;
1320 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1323 uobj
= uobj_get_write(UVERBS_OBJECT_CQ
, cmd
.cq_handle
,
1326 return PTR_ERR(uobj
);
1329 * Make sure we don't free the memory in remove_commit as we still
1330 * needs the uobject memory to create the response.
1332 uverbs_uobject_get(uobj
);
1334 obj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
1336 memset(&resp
, 0, sizeof(resp
));
1338 ret
= uobj_remove_commit(uobj
);
1340 uverbs_uobject_put(uobj
);
1344 resp
.comp_events_reported
= obj
->comp_events_reported
;
1345 resp
.async_events_reported
= obj
->async_events_reported
;
1347 uverbs_uobject_put(uobj
);
1348 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
1354 static int create_qp(struct ib_uverbs_file
*file
,
1355 struct ib_udata
*ucore
,
1356 struct ib_udata
*uhw
,
1357 struct ib_uverbs_ex_create_qp
*cmd
,
1359 int (*cb
)(struct ib_uverbs_file
*file
,
1360 struct ib_uverbs_ex_create_qp_resp
*resp
,
1361 struct ib_udata
*udata
),
1364 struct ib_uqp_object
*obj
;
1365 struct ib_device
*device
;
1366 struct ib_pd
*pd
= NULL
;
1367 struct ib_xrcd
*xrcd
= NULL
;
1368 struct ib_uobject
*xrcd_uobj
= ERR_PTR(-ENOENT
);
1369 struct ib_cq
*scq
= NULL
, *rcq
= NULL
;
1370 struct ib_srq
*srq
= NULL
;
1373 struct ib_qp_init_attr attr
= {};
1374 struct ib_uverbs_ex_create_qp_resp resp
;
1376 struct ib_rwq_ind_table
*ind_tbl
= NULL
;
1379 if (cmd
->qp_type
== IB_QPT_RAW_PACKET
&& !capable(CAP_NET_RAW
))
1382 obj
= (struct ib_uqp_object
*)uobj_alloc(UVERBS_OBJECT_QP
,
1385 return PTR_ERR(obj
);
1387 obj
->uevent
.uobject
.user_handle
= cmd
->user_handle
;
1388 mutex_init(&obj
->mcast_lock
);
1390 if (cmd_sz
>= offsetof(typeof(*cmd
), rwq_ind_tbl_handle
) +
1391 sizeof(cmd
->rwq_ind_tbl_handle
) &&
1392 (cmd
->comp_mask
& IB_UVERBS_CREATE_QP_MASK_IND_TABLE
)) {
1393 ind_tbl
= uobj_get_obj_read(rwq_ind_table
, UVERBS_OBJECT_RWQ_IND_TBL
,
1394 cmd
->rwq_ind_tbl_handle
,
1401 attr
.rwq_ind_tbl
= ind_tbl
;
1404 if (cmd_sz
> sizeof(*cmd
) &&
1405 !ib_is_udata_cleared(ucore
, sizeof(*cmd
),
1406 cmd_sz
- sizeof(*cmd
))) {
1411 if (ind_tbl
&& (cmd
->max_recv_wr
|| cmd
->max_recv_sge
|| cmd
->is_srq
)) {
1416 if (ind_tbl
&& !cmd
->max_send_wr
)
1419 if (cmd
->qp_type
== IB_QPT_XRC_TGT
) {
1420 xrcd_uobj
= uobj_get_read(UVERBS_OBJECT_XRCD
, cmd
->pd_handle
,
1423 if (IS_ERR(xrcd_uobj
)) {
1428 xrcd
= (struct ib_xrcd
*)xrcd_uobj
->object
;
1433 device
= xrcd
->device
;
1435 if (cmd
->qp_type
== IB_QPT_XRC_INI
) {
1436 cmd
->max_recv_wr
= 0;
1437 cmd
->max_recv_sge
= 0;
1440 srq
= uobj_get_obj_read(srq
, UVERBS_OBJECT_SRQ
, cmd
->srq_handle
,
1442 if (!srq
|| srq
->srq_type
== IB_SRQT_XRC
) {
1449 if (cmd
->recv_cq_handle
!= cmd
->send_cq_handle
) {
1450 rcq
= uobj_get_obj_read(cq
, UVERBS_OBJECT_CQ
, cmd
->recv_cq_handle
,
1461 scq
= uobj_get_obj_read(cq
, UVERBS_OBJECT_CQ
, cmd
->send_cq_handle
,
1465 pd
= uobj_get_obj_read(pd
, UVERBS_OBJECT_PD
, cmd
->pd_handle
, file
->ucontext
);
1466 if (!pd
|| (!scq
&& has_sq
)) {
1471 device
= pd
->device
;
1474 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1475 attr
.qp_context
= file
;
1480 attr
.sq_sig_type
= cmd
->sq_sig_all
? IB_SIGNAL_ALL_WR
:
1482 attr
.qp_type
= cmd
->qp_type
;
1483 attr
.create_flags
= 0;
1485 attr
.cap
.max_send_wr
= cmd
->max_send_wr
;
1486 attr
.cap
.max_recv_wr
= cmd
->max_recv_wr
;
1487 attr
.cap
.max_send_sge
= cmd
->max_send_sge
;
1488 attr
.cap
.max_recv_sge
= cmd
->max_recv_sge
;
1489 attr
.cap
.max_inline_data
= cmd
->max_inline_data
;
1491 obj
->uevent
.events_reported
= 0;
1492 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1493 INIT_LIST_HEAD(&obj
->mcast_list
);
1495 if (cmd_sz
>= offsetof(typeof(*cmd
), create_flags
) +
1496 sizeof(cmd
->create_flags
))
1497 attr
.create_flags
= cmd
->create_flags
;
1499 if (attr
.create_flags
& ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
1500 IB_QP_CREATE_CROSS_CHANNEL
|
1501 IB_QP_CREATE_MANAGED_SEND
|
1502 IB_QP_CREATE_MANAGED_RECV
|
1503 IB_QP_CREATE_SCATTER_FCS
|
1504 IB_QP_CREATE_CVLAN_STRIPPING
|
1505 IB_QP_CREATE_SOURCE_QPN
|
1506 IB_QP_CREATE_PCI_WRITE_END_PADDING
)) {
1511 if (attr
.create_flags
& IB_QP_CREATE_SOURCE_QPN
) {
1512 if (!capable(CAP_NET_RAW
)) {
1517 attr
.source_qpn
= cmd
->source_qpn
;
1520 buf
= (void *)cmd
+ sizeof(*cmd
);
1521 if (cmd_sz
> sizeof(*cmd
))
1522 if (!(buf
[0] == 0 && !memcmp(buf
, buf
+ 1,
1523 cmd_sz
- sizeof(*cmd
) - 1))) {
1528 if (cmd
->qp_type
== IB_QPT_XRC_TGT
)
1529 qp
= ib_create_qp(pd
, &attr
);
1531 qp
= _ib_create_qp(device
, pd
, &attr
, uhw
,
1532 &obj
->uevent
.uobject
);
1539 if (cmd
->qp_type
!= IB_QPT_XRC_TGT
) {
1540 ret
= ib_create_qp_security(qp
, device
);
1546 qp
->send_cq
= attr
.send_cq
;
1547 qp
->recv_cq
= attr
.recv_cq
;
1549 qp
->rwq_ind_tbl
= ind_tbl
;
1550 qp
->event_handler
= attr
.event_handler
;
1551 qp
->qp_context
= attr
.qp_context
;
1552 qp
->qp_type
= attr
.qp_type
;
1553 atomic_set(&qp
->usecnt
, 0);
1554 atomic_inc(&pd
->usecnt
);
1557 atomic_inc(&attr
.send_cq
->usecnt
);
1559 atomic_inc(&attr
.recv_cq
->usecnt
);
1561 atomic_inc(&attr
.srq
->usecnt
);
1563 atomic_inc(&ind_tbl
->usecnt
);
1565 /* It is done in _ib_create_qp for other QP types */
1566 qp
->uobject
= &obj
->uevent
.uobject
;
1569 obj
->uevent
.uobject
.object
= qp
;
1571 memset(&resp
, 0, sizeof resp
);
1572 resp
.base
.qpn
= qp
->qp_num
;
1573 resp
.base
.qp_handle
= obj
->uevent
.uobject
.id
;
1574 resp
.base
.max_recv_sge
= attr
.cap
.max_recv_sge
;
1575 resp
.base
.max_send_sge
= attr
.cap
.max_send_sge
;
1576 resp
.base
.max_recv_wr
= attr
.cap
.max_recv_wr
;
1577 resp
.base
.max_send_wr
= attr
.cap
.max_send_wr
;
1578 resp
.base
.max_inline_data
= attr
.cap
.max_inline_data
;
1580 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
1581 sizeof(resp
.response_length
);
1583 ret
= cb(file
, &resp
, ucore
);
1588 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
,
1590 atomic_inc(&obj
->uxrcd
->refcnt
);
1591 uobj_put_read(xrcd_uobj
);
1595 uobj_put_obj_read(pd
);
1597 uobj_put_obj_read(scq
);
1598 if (rcq
&& rcq
!= scq
)
1599 uobj_put_obj_read(rcq
);
1601 uobj_put_obj_read(srq
);
1603 uobj_put_obj_read(ind_tbl
);
1605 uobj_alloc_commit(&obj
->uevent
.uobject
);
1612 if (!IS_ERR(xrcd_uobj
))
1613 uobj_put_read(xrcd_uobj
);
1615 uobj_put_obj_read(pd
);
1617 uobj_put_obj_read(scq
);
1618 if (rcq
&& rcq
!= scq
)
1619 uobj_put_obj_read(rcq
);
1621 uobj_put_obj_read(srq
);
1623 uobj_put_obj_read(ind_tbl
);
1625 uobj_alloc_abort(&obj
->uevent
.uobject
);
1629 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file
*file
,
1630 struct ib_uverbs_ex_create_qp_resp
*resp
,
1631 struct ib_udata
*ucore
)
1633 if (ib_copy_to_udata(ucore
, &resp
->base
, sizeof(resp
->base
)))
1639 ssize_t
ib_uverbs_create_qp(struct ib_uverbs_file
*file
,
1640 struct ib_device
*ib_dev
,
1641 const char __user
*buf
, int in_len
,
1644 struct ib_uverbs_create_qp cmd
;
1645 struct ib_uverbs_ex_create_qp cmd_ex
;
1646 struct ib_udata ucore
;
1647 struct ib_udata uhw
;
1648 ssize_t resp_size
= sizeof(struct ib_uverbs_create_qp_resp
);
1651 if (out_len
< resp_size
)
1654 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1657 ib_uverbs_init_udata(&ucore
, buf
, u64_to_user_ptr(cmd
.response
),
1658 sizeof(cmd
), resp_size
);
1659 ib_uverbs_init_udata(&uhw
, buf
+ sizeof(cmd
),
1660 u64_to_user_ptr(cmd
.response
) + resp_size
,
1661 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1662 out_len
- resp_size
);
1664 memset(&cmd_ex
, 0, sizeof(cmd_ex
));
1665 cmd_ex
.user_handle
= cmd
.user_handle
;
1666 cmd_ex
.pd_handle
= cmd
.pd_handle
;
1667 cmd_ex
.send_cq_handle
= cmd
.send_cq_handle
;
1668 cmd_ex
.recv_cq_handle
= cmd
.recv_cq_handle
;
1669 cmd_ex
.srq_handle
= cmd
.srq_handle
;
1670 cmd_ex
.max_send_wr
= cmd
.max_send_wr
;
1671 cmd_ex
.max_recv_wr
= cmd
.max_recv_wr
;
1672 cmd_ex
.max_send_sge
= cmd
.max_send_sge
;
1673 cmd_ex
.max_recv_sge
= cmd
.max_recv_sge
;
1674 cmd_ex
.max_inline_data
= cmd
.max_inline_data
;
1675 cmd_ex
.sq_sig_all
= cmd
.sq_sig_all
;
1676 cmd_ex
.qp_type
= cmd
.qp_type
;
1677 cmd_ex
.is_srq
= cmd
.is_srq
;
1679 err
= create_qp(file
, &ucore
, &uhw
, &cmd_ex
,
1680 offsetof(typeof(cmd_ex
), is_srq
) +
1681 sizeof(cmd
.is_srq
), ib_uverbs_create_qp_cb
,
1690 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file
*file
,
1691 struct ib_uverbs_ex_create_qp_resp
*resp
,
1692 struct ib_udata
*ucore
)
1694 if (ib_copy_to_udata(ucore
, resp
, resp
->response_length
))
1700 int ib_uverbs_ex_create_qp(struct ib_uverbs_file
*file
,
1701 struct ib_device
*ib_dev
,
1702 struct ib_udata
*ucore
,
1703 struct ib_udata
*uhw
)
1705 struct ib_uverbs_ex_create_qp_resp resp
;
1706 struct ib_uverbs_ex_create_qp cmd
= {0};
1709 if (ucore
->inlen
< (offsetof(typeof(cmd
), comp_mask
) +
1710 sizeof(cmd
.comp_mask
)))
1713 err
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
1717 if (cmd
.comp_mask
& ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK
)
1723 if (ucore
->outlen
< (offsetof(typeof(resp
), response_length
) +
1724 sizeof(resp
.response_length
)))
1727 err
= create_qp(file
, ucore
, uhw
, &cmd
,
1728 min(ucore
->inlen
, sizeof(cmd
)),
1729 ib_uverbs_ex_create_qp_cb
, NULL
);
1737 ssize_t
ib_uverbs_open_qp(struct ib_uverbs_file
*file
,
1738 struct ib_device
*ib_dev
,
1739 const char __user
*buf
, int in_len
, int out_len
)
1741 struct ib_uverbs_open_qp cmd
;
1742 struct ib_uverbs_create_qp_resp resp
;
1743 struct ib_udata udata
;
1744 struct ib_uqp_object
*obj
;
1745 struct ib_xrcd
*xrcd
;
1746 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
1748 struct ib_qp_open_attr attr
;
1751 if (out_len
< sizeof resp
)
1754 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1757 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
1758 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
1759 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1760 out_len
- sizeof(resp
));
1762 obj
= (struct ib_uqp_object
*)uobj_alloc(UVERBS_OBJECT_QP
,
1765 return PTR_ERR(obj
);
1767 xrcd_uobj
= uobj_get_read(UVERBS_OBJECT_XRCD
, cmd
.pd_handle
,
1769 if (IS_ERR(xrcd_uobj
)) {
1774 xrcd
= (struct ib_xrcd
*)xrcd_uobj
->object
;
1780 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1781 attr
.qp_context
= file
;
1782 attr
.qp_num
= cmd
.qpn
;
1783 attr
.qp_type
= cmd
.qp_type
;
1785 obj
->uevent
.events_reported
= 0;
1786 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1787 INIT_LIST_HEAD(&obj
->mcast_list
);
1789 qp
= ib_open_qp(xrcd
, &attr
);
1795 obj
->uevent
.uobject
.object
= qp
;
1796 obj
->uevent
.uobject
.user_handle
= cmd
.user_handle
;
1798 memset(&resp
, 0, sizeof resp
);
1799 resp
.qpn
= qp
->qp_num
;
1800 resp
.qp_handle
= obj
->uevent
.uobject
.id
;
1802 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
1807 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
1808 atomic_inc(&obj
->uxrcd
->refcnt
);
1809 qp
->uobject
= &obj
->uevent
.uobject
;
1810 uobj_put_read(xrcd_uobj
);
1813 uobj_alloc_commit(&obj
->uevent
.uobject
);
1820 uobj_put_read(xrcd_uobj
);
1822 uobj_alloc_abort(&obj
->uevent
.uobject
);
1826 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest
*uverb_attr
,
1827 struct rdma_ah_attr
*rdma_attr
)
1829 const struct ib_global_route
*grh
;
1831 uverb_attr
->dlid
= rdma_ah_get_dlid(rdma_attr
);
1832 uverb_attr
->sl
= rdma_ah_get_sl(rdma_attr
);
1833 uverb_attr
->src_path_bits
= rdma_ah_get_path_bits(rdma_attr
);
1834 uverb_attr
->static_rate
= rdma_ah_get_static_rate(rdma_attr
);
1835 uverb_attr
->is_global
= !!(rdma_ah_get_ah_flags(rdma_attr
) &
1837 if (uverb_attr
->is_global
) {
1838 grh
= rdma_ah_read_grh(rdma_attr
);
1839 memcpy(uverb_attr
->dgid
, grh
->dgid
.raw
, 16);
1840 uverb_attr
->flow_label
= grh
->flow_label
;
1841 uverb_attr
->sgid_index
= grh
->sgid_index
;
1842 uverb_attr
->hop_limit
= grh
->hop_limit
;
1843 uverb_attr
->traffic_class
= grh
->traffic_class
;
1845 uverb_attr
->port_num
= rdma_ah_get_port_num(rdma_attr
);
1848 ssize_t
ib_uverbs_query_qp(struct ib_uverbs_file
*file
,
1849 struct ib_device
*ib_dev
,
1850 const char __user
*buf
, int in_len
,
1853 struct ib_uverbs_query_qp cmd
;
1854 struct ib_uverbs_query_qp_resp resp
;
1856 struct ib_qp_attr
*attr
;
1857 struct ib_qp_init_attr
*init_attr
;
1860 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1863 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1864 init_attr
= kmalloc(sizeof *init_attr
, GFP_KERNEL
);
1865 if (!attr
|| !init_attr
) {
1870 qp
= uobj_get_obj_read(qp
, UVERBS_OBJECT_QP
, cmd
.qp_handle
, file
->ucontext
);
1876 ret
= ib_query_qp(qp
, attr
, cmd
.attr_mask
, init_attr
);
1878 uobj_put_obj_read(qp
);
1883 memset(&resp
, 0, sizeof resp
);
1885 resp
.qp_state
= attr
->qp_state
;
1886 resp
.cur_qp_state
= attr
->cur_qp_state
;
1887 resp
.path_mtu
= attr
->path_mtu
;
1888 resp
.path_mig_state
= attr
->path_mig_state
;
1889 resp
.qkey
= attr
->qkey
;
1890 resp
.rq_psn
= attr
->rq_psn
;
1891 resp
.sq_psn
= attr
->sq_psn
;
1892 resp
.dest_qp_num
= attr
->dest_qp_num
;
1893 resp
.qp_access_flags
= attr
->qp_access_flags
;
1894 resp
.pkey_index
= attr
->pkey_index
;
1895 resp
.alt_pkey_index
= attr
->alt_pkey_index
;
1896 resp
.sq_draining
= attr
->sq_draining
;
1897 resp
.max_rd_atomic
= attr
->max_rd_atomic
;
1898 resp
.max_dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1899 resp
.min_rnr_timer
= attr
->min_rnr_timer
;
1900 resp
.port_num
= attr
->port_num
;
1901 resp
.timeout
= attr
->timeout
;
1902 resp
.retry_cnt
= attr
->retry_cnt
;
1903 resp
.rnr_retry
= attr
->rnr_retry
;
1904 resp
.alt_port_num
= attr
->alt_port_num
;
1905 resp
.alt_timeout
= attr
->alt_timeout
;
1907 copy_ah_attr_to_uverbs(&resp
.dest
, &attr
->ah_attr
);
1908 copy_ah_attr_to_uverbs(&resp
.alt_dest
, &attr
->alt_ah_attr
);
1910 resp
.max_send_wr
= init_attr
->cap
.max_send_wr
;
1911 resp
.max_recv_wr
= init_attr
->cap
.max_recv_wr
;
1912 resp
.max_send_sge
= init_attr
->cap
.max_send_sge
;
1913 resp
.max_recv_sge
= init_attr
->cap
.max_recv_sge
;
1914 resp
.max_inline_data
= init_attr
->cap
.max_inline_data
;
1915 resp
.sq_sig_all
= init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
;
1917 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
1924 return ret
? ret
: in_len
;
1927 /* Remove ignored fields set in the attribute mask */
1928 static int modify_qp_mask(enum ib_qp_type qp_type
, int mask
)
1931 case IB_QPT_XRC_INI
:
1932 return mask
& ~(IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
);
1933 case IB_QPT_XRC_TGT
:
1934 return mask
& ~(IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_RETRY_CNT
|
1941 static void copy_ah_attr_from_uverbs(struct ib_device
*dev
,
1942 struct rdma_ah_attr
*rdma_attr
,
1943 struct ib_uverbs_qp_dest
*uverb_attr
)
1945 rdma_attr
->type
= rdma_ah_find_type(dev
, uverb_attr
->port_num
);
1946 if (uverb_attr
->is_global
) {
1947 rdma_ah_set_grh(rdma_attr
, NULL
,
1948 uverb_attr
->flow_label
,
1949 uverb_attr
->sgid_index
,
1950 uverb_attr
->hop_limit
,
1951 uverb_attr
->traffic_class
);
1952 rdma_ah_set_dgid_raw(rdma_attr
, uverb_attr
->dgid
);
1954 rdma_ah_set_ah_flags(rdma_attr
, 0);
1956 rdma_ah_set_dlid(rdma_attr
, uverb_attr
->dlid
);
1957 rdma_ah_set_sl(rdma_attr
, uverb_attr
->sl
);
1958 rdma_ah_set_path_bits(rdma_attr
, uverb_attr
->src_path_bits
);
1959 rdma_ah_set_static_rate(rdma_attr
, uverb_attr
->static_rate
);
1960 rdma_ah_set_port_num(rdma_attr
, uverb_attr
->port_num
);
1961 rdma_ah_set_make_grd(rdma_attr
, false);
1964 static int modify_qp(struct ib_uverbs_file
*file
,
1965 struct ib_uverbs_ex_modify_qp
*cmd
, struct ib_udata
*udata
)
1967 struct ib_qp_attr
*attr
;
1971 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1975 qp
= uobj_get_obj_read(qp
, UVERBS_OBJECT_QP
, cmd
->base
.qp_handle
, file
->ucontext
);
1981 if ((cmd
->base
.attr_mask
& IB_QP_PORT
) &&
1982 !rdma_is_port_valid(qp
->device
, cmd
->base
.port_num
)) {
1987 if ((cmd
->base
.attr_mask
& IB_QP_AV
) &&
1988 !rdma_is_port_valid(qp
->device
, cmd
->base
.dest
.port_num
)) {
1993 if ((cmd
->base
.attr_mask
& IB_QP_ALT_PATH
) &&
1994 (!rdma_is_port_valid(qp
->device
, cmd
->base
.alt_port_num
) ||
1995 !rdma_is_port_valid(qp
->device
, cmd
->base
.alt_dest
.port_num
))) {
2000 if ((cmd
->base
.attr_mask
& IB_QP_CUR_STATE
&&
2001 cmd
->base
.cur_qp_state
> IB_QPS_ERR
) ||
2002 cmd
->base
.qp_state
> IB_QPS_ERR
) {
2007 attr
->qp_state
= cmd
->base
.qp_state
;
2008 attr
->cur_qp_state
= cmd
->base
.cur_qp_state
;
2009 attr
->path_mtu
= cmd
->base
.path_mtu
;
2010 attr
->path_mig_state
= cmd
->base
.path_mig_state
;
2011 attr
->qkey
= cmd
->base
.qkey
;
2012 attr
->rq_psn
= cmd
->base
.rq_psn
;
2013 attr
->sq_psn
= cmd
->base
.sq_psn
;
2014 attr
->dest_qp_num
= cmd
->base
.dest_qp_num
;
2015 attr
->qp_access_flags
= cmd
->base
.qp_access_flags
;
2016 attr
->pkey_index
= cmd
->base
.pkey_index
;
2017 attr
->alt_pkey_index
= cmd
->base
.alt_pkey_index
;
2018 attr
->en_sqd_async_notify
= cmd
->base
.en_sqd_async_notify
;
2019 attr
->max_rd_atomic
= cmd
->base
.max_rd_atomic
;
2020 attr
->max_dest_rd_atomic
= cmd
->base
.max_dest_rd_atomic
;
2021 attr
->min_rnr_timer
= cmd
->base
.min_rnr_timer
;
2022 attr
->port_num
= cmd
->base
.port_num
;
2023 attr
->timeout
= cmd
->base
.timeout
;
2024 attr
->retry_cnt
= cmd
->base
.retry_cnt
;
2025 attr
->rnr_retry
= cmd
->base
.rnr_retry
;
2026 attr
->alt_port_num
= cmd
->base
.alt_port_num
;
2027 attr
->alt_timeout
= cmd
->base
.alt_timeout
;
2028 attr
->rate_limit
= cmd
->rate_limit
;
2030 if (cmd
->base
.attr_mask
& IB_QP_AV
)
2031 copy_ah_attr_from_uverbs(qp
->device
, &attr
->ah_attr
,
2034 if (cmd
->base
.attr_mask
& IB_QP_ALT_PATH
)
2035 copy_ah_attr_from_uverbs(qp
->device
, &attr
->alt_ah_attr
,
2036 &cmd
->base
.alt_dest
);
2038 ret
= ib_modify_qp_with_udata(qp
, attr
,
2039 modify_qp_mask(qp
->qp_type
,
2040 cmd
->base
.attr_mask
),
2044 uobj_put_obj_read(qp
);
2051 ssize_t
ib_uverbs_modify_qp(struct ib_uverbs_file
*file
,
2052 struct ib_device
*ib_dev
,
2053 const char __user
*buf
, int in_len
,
2056 struct ib_uverbs_ex_modify_qp cmd
= {};
2057 struct ib_udata udata
;
2060 if (copy_from_user(&cmd
.base
, buf
, sizeof(cmd
.base
)))
2063 if (cmd
.base
.attr_mask
&
2064 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK
<< 1) - 1))
2067 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
.base
), NULL
,
2068 in_len
- sizeof(cmd
.base
) - sizeof(struct ib_uverbs_cmd_hdr
),
2071 ret
= modify_qp(file
, &cmd
, &udata
);
2078 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file
*file
,
2079 struct ib_device
*ib_dev
,
2080 struct ib_udata
*ucore
,
2081 struct ib_udata
*uhw
)
2083 struct ib_uverbs_ex_modify_qp cmd
= {};
2087 * Last bit is reserved for extending the attr_mask by
2088 * using another field.
2090 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK
== (1 << 31));
2092 if (ucore
->inlen
< sizeof(cmd
.base
))
2095 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
2099 if (cmd
.base
.attr_mask
&
2100 ~((IB_USER_LAST_QP_ATTR_MASK
<< 1) - 1))
2103 if (ucore
->inlen
> sizeof(cmd
)) {
2104 if (!ib_is_udata_cleared(ucore
, sizeof(cmd
),
2105 ucore
->inlen
- sizeof(cmd
)))
2109 ret
= modify_qp(file
, &cmd
, uhw
);
2114 ssize_t
ib_uverbs_destroy_qp(struct ib_uverbs_file
*file
,
2115 struct ib_device
*ib_dev
,
2116 const char __user
*buf
, int in_len
,
2119 struct ib_uverbs_destroy_qp cmd
;
2120 struct ib_uverbs_destroy_qp_resp resp
;
2121 struct ib_uobject
*uobj
;
2122 struct ib_uqp_object
*obj
;
2125 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2128 memset(&resp
, 0, sizeof resp
);
2130 uobj
= uobj_get_write(UVERBS_OBJECT_QP
, cmd
.qp_handle
,
2133 return PTR_ERR(uobj
);
2135 obj
= container_of(uobj
, struct ib_uqp_object
, uevent
.uobject
);
2137 * Make sure we don't free the memory in remove_commit as we still
2138 * needs the uobject memory to create the response.
2140 uverbs_uobject_get(uobj
);
2142 ret
= uobj_remove_commit(uobj
);
2144 uverbs_uobject_put(uobj
);
2148 resp
.events_reported
= obj
->uevent
.events_reported
;
2149 uverbs_uobject_put(uobj
);
2151 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2157 static void *alloc_wr(size_t wr_size
, __u32 num_sge
)
2159 if (num_sge
>= (U32_MAX
- ALIGN(wr_size
, sizeof (struct ib_sge
))) /
2160 sizeof (struct ib_sge
))
2163 return kmalloc(ALIGN(wr_size
, sizeof (struct ib_sge
)) +
2164 num_sge
* sizeof (struct ib_sge
), GFP_KERNEL
);
2167 ssize_t
ib_uverbs_post_send(struct ib_uverbs_file
*file
,
2168 struct ib_device
*ib_dev
,
2169 const char __user
*buf
, int in_len
,
2172 struct ib_uverbs_post_send cmd
;
2173 struct ib_uverbs_post_send_resp resp
;
2174 struct ib_uverbs_send_wr
*user_wr
;
2175 struct ib_send_wr
*wr
= NULL
, *last
, *next
, *bad_wr
;
2179 ssize_t ret
= -EINVAL
;
2182 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2185 if (in_len
< sizeof cmd
+ cmd
.wqe_size
* cmd
.wr_count
+
2186 cmd
.sge_count
* sizeof (struct ib_uverbs_sge
))
2189 if (cmd
.wqe_size
< sizeof (struct ib_uverbs_send_wr
))
2192 user_wr
= kmalloc(cmd
.wqe_size
, GFP_KERNEL
);
2196 qp
= uobj_get_obj_read(qp
, UVERBS_OBJECT_QP
, cmd
.qp_handle
, file
->ucontext
);
2200 is_ud
= qp
->qp_type
== IB_QPT_UD
;
2203 for (i
= 0; i
< cmd
.wr_count
; ++i
) {
2204 if (copy_from_user(user_wr
,
2205 buf
+ sizeof cmd
+ i
* cmd
.wqe_size
,
2211 if (user_wr
->num_sge
+ sg_ind
> cmd
.sge_count
) {
2217 struct ib_ud_wr
*ud
;
2219 if (user_wr
->opcode
!= IB_WR_SEND
&&
2220 user_wr
->opcode
!= IB_WR_SEND_WITH_IMM
) {
2225 next_size
= sizeof(*ud
);
2226 ud
= alloc_wr(next_size
, user_wr
->num_sge
);
2232 ud
->ah
= uobj_get_obj_read(ah
, UVERBS_OBJECT_AH
, user_wr
->wr
.ud
.ah
,
2239 ud
->remote_qpn
= user_wr
->wr
.ud
.remote_qpn
;
2240 ud
->remote_qkey
= user_wr
->wr
.ud
.remote_qkey
;
2243 } else if (user_wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
||
2244 user_wr
->opcode
== IB_WR_RDMA_WRITE
||
2245 user_wr
->opcode
== IB_WR_RDMA_READ
) {
2246 struct ib_rdma_wr
*rdma
;
2248 next_size
= sizeof(*rdma
);
2249 rdma
= alloc_wr(next_size
, user_wr
->num_sge
);
2255 rdma
->remote_addr
= user_wr
->wr
.rdma
.remote_addr
;
2256 rdma
->rkey
= user_wr
->wr
.rdma
.rkey
;
2259 } else if (user_wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
2260 user_wr
->opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) {
2261 struct ib_atomic_wr
*atomic
;
2263 next_size
= sizeof(*atomic
);
2264 atomic
= alloc_wr(next_size
, user_wr
->num_sge
);
2270 atomic
->remote_addr
= user_wr
->wr
.atomic
.remote_addr
;
2271 atomic
->compare_add
= user_wr
->wr
.atomic
.compare_add
;
2272 atomic
->swap
= user_wr
->wr
.atomic
.swap
;
2273 atomic
->rkey
= user_wr
->wr
.atomic
.rkey
;
2276 } else if (user_wr
->opcode
== IB_WR_SEND
||
2277 user_wr
->opcode
== IB_WR_SEND_WITH_IMM
||
2278 user_wr
->opcode
== IB_WR_SEND_WITH_INV
) {
2279 next_size
= sizeof(*next
);
2280 next
= alloc_wr(next_size
, user_wr
->num_sge
);
2290 if (user_wr
->opcode
== IB_WR_SEND_WITH_IMM
||
2291 user_wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
) {
2293 (__be32 __force
) user_wr
->ex
.imm_data
;
2294 } else if (user_wr
->opcode
== IB_WR_SEND_WITH_INV
) {
2295 next
->ex
.invalidate_rkey
= user_wr
->ex
.invalidate_rkey
;
2305 next
->wr_id
= user_wr
->wr_id
;
2306 next
->num_sge
= user_wr
->num_sge
;
2307 next
->opcode
= user_wr
->opcode
;
2308 next
->send_flags
= user_wr
->send_flags
;
2310 if (next
->num_sge
) {
2311 next
->sg_list
= (void *) next
+
2312 ALIGN(next_size
, sizeof(struct ib_sge
));
2313 if (copy_from_user(next
->sg_list
,
2315 cmd
.wr_count
* cmd
.wqe_size
+
2316 sg_ind
* sizeof (struct ib_sge
),
2317 next
->num_sge
* sizeof (struct ib_sge
))) {
2321 sg_ind
+= next
->num_sge
;
2323 next
->sg_list
= NULL
;
2327 ret
= qp
->device
->post_send(qp
->real_qp
, wr
, &bad_wr
);
2329 for (next
= wr
; next
; next
= next
->next
) {
2335 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2339 uobj_put_obj_read(qp
);
2342 if (is_ud
&& ud_wr(wr
)->ah
)
2343 uobj_put_obj_read(ud_wr(wr
)->ah
);
2352 return ret
? ret
: in_len
;
2355 static struct ib_recv_wr
*ib_uverbs_unmarshall_recv(const char __user
*buf
,
2361 struct ib_uverbs_recv_wr
*user_wr
;
2362 struct ib_recv_wr
*wr
= NULL
, *last
, *next
;
2367 if (in_len
< wqe_size
* wr_count
+
2368 sge_count
* sizeof (struct ib_uverbs_sge
))
2369 return ERR_PTR(-EINVAL
);
2371 if (wqe_size
< sizeof (struct ib_uverbs_recv_wr
))
2372 return ERR_PTR(-EINVAL
);
2374 user_wr
= kmalloc(wqe_size
, GFP_KERNEL
);
2376 return ERR_PTR(-ENOMEM
);
2380 for (i
= 0; i
< wr_count
; ++i
) {
2381 if (copy_from_user(user_wr
, buf
+ i
* wqe_size
,
2387 if (user_wr
->num_sge
+ sg_ind
> sge_count
) {
2392 if (user_wr
->num_sge
>=
2393 (U32_MAX
- ALIGN(sizeof *next
, sizeof (struct ib_sge
))) /
2394 sizeof (struct ib_sge
)) {
2399 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
2400 user_wr
->num_sge
* sizeof (struct ib_sge
),
2414 next
->wr_id
= user_wr
->wr_id
;
2415 next
->num_sge
= user_wr
->num_sge
;
2417 if (next
->num_sge
) {
2418 next
->sg_list
= (void *) next
+
2419 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
2420 if (copy_from_user(next
->sg_list
,
2421 buf
+ wr_count
* wqe_size
+
2422 sg_ind
* sizeof (struct ib_sge
),
2423 next
->num_sge
* sizeof (struct ib_sge
))) {
2427 sg_ind
+= next
->num_sge
;
2429 next
->sg_list
= NULL
;
2444 return ERR_PTR(ret
);
2447 ssize_t
ib_uverbs_post_recv(struct ib_uverbs_file
*file
,
2448 struct ib_device
*ib_dev
,
2449 const char __user
*buf
, int in_len
,
2452 struct ib_uverbs_post_recv cmd
;
2453 struct ib_uverbs_post_recv_resp resp
;
2454 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2456 ssize_t ret
= -EINVAL
;
2458 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2461 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2462 in_len
- sizeof cmd
, cmd
.wr_count
,
2463 cmd
.sge_count
, cmd
.wqe_size
);
2467 qp
= uobj_get_obj_read(qp
, UVERBS_OBJECT_QP
, cmd
.qp_handle
, file
->ucontext
);
2472 ret
= qp
->device
->post_recv(qp
->real_qp
, wr
, &bad_wr
);
2474 uobj_put_obj_read(qp
);
2476 for (next
= wr
; next
; next
= next
->next
) {
2483 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2493 return ret
? ret
: in_len
;
2496 ssize_t
ib_uverbs_post_srq_recv(struct ib_uverbs_file
*file
,
2497 struct ib_device
*ib_dev
,
2498 const char __user
*buf
, int in_len
,
2501 struct ib_uverbs_post_srq_recv cmd
;
2502 struct ib_uverbs_post_srq_recv_resp resp
;
2503 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2505 ssize_t ret
= -EINVAL
;
2507 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2510 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2511 in_len
- sizeof cmd
, cmd
.wr_count
,
2512 cmd
.sge_count
, cmd
.wqe_size
);
2516 srq
= uobj_get_obj_read(srq
, UVERBS_OBJECT_SRQ
, cmd
.srq_handle
, file
->ucontext
);
2521 ret
= srq
->device
->post_srq_recv(srq
, wr
, &bad_wr
);
2523 uobj_put_obj_read(srq
);
2526 for (next
= wr
; next
; next
= next
->next
) {
2532 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
2542 return ret
? ret
: in_len
;
2545 ssize_t
ib_uverbs_create_ah(struct ib_uverbs_file
*file
,
2546 struct ib_device
*ib_dev
,
2547 const char __user
*buf
, int in_len
,
2550 struct ib_uverbs_create_ah cmd
;
2551 struct ib_uverbs_create_ah_resp resp
;
2552 struct ib_uobject
*uobj
;
2555 struct rdma_ah_attr attr
;
2557 struct ib_udata udata
;
2559 if (out_len
< sizeof resp
)
2562 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2565 if (!rdma_is_port_valid(ib_dev
, cmd
.attr
.port_num
))
2568 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
2569 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
2570 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
2571 out_len
- sizeof(resp
));
2573 uobj
= uobj_alloc(UVERBS_OBJECT_AH
, file
->ucontext
);
2575 return PTR_ERR(uobj
);
2577 pd
= uobj_get_obj_read(pd
, UVERBS_OBJECT_PD
, cmd
.pd_handle
, file
->ucontext
);
2583 attr
.type
= rdma_ah_find_type(ib_dev
, cmd
.attr
.port_num
);
2584 rdma_ah_set_make_grd(&attr
, false);
2585 rdma_ah_set_dlid(&attr
, cmd
.attr
.dlid
);
2586 rdma_ah_set_sl(&attr
, cmd
.attr
.sl
);
2587 rdma_ah_set_path_bits(&attr
, cmd
.attr
.src_path_bits
);
2588 rdma_ah_set_static_rate(&attr
, cmd
.attr
.static_rate
);
2589 rdma_ah_set_port_num(&attr
, cmd
.attr
.port_num
);
2591 if (cmd
.attr
.is_global
) {
2592 rdma_ah_set_grh(&attr
, NULL
, cmd
.attr
.grh
.flow_label
,
2593 cmd
.attr
.grh
.sgid_index
,
2594 cmd
.attr
.grh
.hop_limit
,
2595 cmd
.attr
.grh
.traffic_class
);
2596 rdma_ah_set_dgid_raw(&attr
, cmd
.attr
.grh
.dgid
);
2598 rdma_ah_set_ah_flags(&attr
, 0);
2601 ah
= rdma_create_user_ah(pd
, &attr
, &udata
);
2608 uobj
->user_handle
= cmd
.user_handle
;
2611 resp
.ah_handle
= uobj
->id
;
2613 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
)) {
2618 uobj_put_obj_read(pd
);
2619 uobj_alloc_commit(uobj
);
2624 rdma_destroy_ah(ah
);
2627 uobj_put_obj_read(pd
);
2630 uobj_alloc_abort(uobj
);
2634 ssize_t
ib_uverbs_destroy_ah(struct ib_uverbs_file
*file
,
2635 struct ib_device
*ib_dev
,
2636 const char __user
*buf
, int in_len
, int out_len
)
2638 struct ib_uverbs_destroy_ah cmd
;
2639 struct ib_uobject
*uobj
;
2642 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2645 uobj
= uobj_get_write(UVERBS_OBJECT_AH
, cmd
.ah_handle
,
2648 return PTR_ERR(uobj
);
2650 ret
= uobj_remove_commit(uobj
);
2651 return ret
?: in_len
;
2654 ssize_t
ib_uverbs_attach_mcast(struct ib_uverbs_file
*file
,
2655 struct ib_device
*ib_dev
,
2656 const char __user
*buf
, int in_len
,
2659 struct ib_uverbs_attach_mcast cmd
;
2661 struct ib_uqp_object
*obj
;
2662 struct ib_uverbs_mcast_entry
*mcast
;
2665 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2668 qp
= uobj_get_obj_read(qp
, UVERBS_OBJECT_QP
, cmd
.qp_handle
, file
->ucontext
);
2672 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2674 mutex_lock(&obj
->mcast_lock
);
2675 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2676 if (cmd
.mlid
== mcast
->lid
&&
2677 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2682 mcast
= kmalloc(sizeof *mcast
, GFP_KERNEL
);
2688 mcast
->lid
= cmd
.mlid
;
2689 memcpy(mcast
->gid
.raw
, cmd
.gid
, sizeof mcast
->gid
.raw
);
2691 ret
= ib_attach_mcast(qp
, &mcast
->gid
, cmd
.mlid
);
2693 list_add_tail(&mcast
->list
, &obj
->mcast_list
);
2698 mutex_unlock(&obj
->mcast_lock
);
2699 uobj_put_obj_read(qp
);
2701 return ret
? ret
: in_len
;
2704 ssize_t
ib_uverbs_detach_mcast(struct ib_uverbs_file
*file
,
2705 struct ib_device
*ib_dev
,
2706 const char __user
*buf
, int in_len
,
2709 struct ib_uverbs_detach_mcast cmd
;
2710 struct ib_uqp_object
*obj
;
2712 struct ib_uverbs_mcast_entry
*mcast
;
2716 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2719 qp
= uobj_get_obj_read(qp
, UVERBS_OBJECT_QP
, cmd
.qp_handle
, file
->ucontext
);
2723 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2724 mutex_lock(&obj
->mcast_lock
);
2726 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2727 if (cmd
.mlid
== mcast
->lid
&&
2728 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2729 list_del(&mcast
->list
);
2740 ret
= ib_detach_mcast(qp
, (union ib_gid
*)cmd
.gid
, cmd
.mlid
);
2743 mutex_unlock(&obj
->mcast_lock
);
2744 uobj_put_obj_read(qp
);
2745 return ret
? ret
: in_len
;
2748 struct ib_uflow_resources
{
2751 size_t collection_num
;
2752 size_t counters_num
;
2753 struct ib_counters
**counters
;
2754 struct ib_flow_action
**collection
;
2757 static struct ib_uflow_resources
*flow_resources_alloc(size_t num_specs
)
2759 struct ib_uflow_resources
*resources
;
2761 resources
= kzalloc(sizeof(*resources
), GFP_KERNEL
);
2766 resources
->counters
=
2767 kcalloc(num_specs
, sizeof(*resources
->counters
), GFP_KERNEL
);
2769 if (!resources
->counters
)
2772 resources
->collection
=
2773 kcalloc(num_specs
, sizeof(*resources
->collection
), GFP_KERNEL
);
2775 if (!resources
->collection
)
2776 goto err_collection
;
2778 resources
->max
= num_specs
;
2783 kfree(resources
->counters
);
2790 void ib_uverbs_flow_resources_free(struct ib_uflow_resources
*uflow_res
)
2794 for (i
= 0; i
< uflow_res
->collection_num
; i
++)
2795 atomic_dec(&uflow_res
->collection
[i
]->usecnt
);
2797 for (i
= 0; i
< uflow_res
->counters_num
; i
++)
2798 atomic_dec(&uflow_res
->counters
[i
]->usecnt
);
2800 kfree(uflow_res
->collection
);
2801 kfree(uflow_res
->counters
);
2805 static void flow_resources_add(struct ib_uflow_resources
*uflow_res
,
2806 enum ib_flow_spec_type type
,
2809 WARN_ON(uflow_res
->num
>= uflow_res
->max
);
2812 case IB_FLOW_SPEC_ACTION_HANDLE
:
2813 atomic_inc(&((struct ib_flow_action
*)ibobj
)->usecnt
);
2814 uflow_res
->collection
[uflow_res
->collection_num
++] =
2815 (struct ib_flow_action
*)ibobj
;
2817 case IB_FLOW_SPEC_ACTION_COUNT
:
2818 atomic_inc(&((struct ib_counters
*)ibobj
)->usecnt
);
2819 uflow_res
->counters
[uflow_res
->counters_num
++] =
2820 (struct ib_counters
*)ibobj
;
2829 static int kern_spec_to_ib_spec_action(struct ib_ucontext
*ucontext
,
2830 struct ib_uverbs_flow_spec
*kern_spec
,
2831 union ib_flow_spec
*ib_spec
,
2832 struct ib_uflow_resources
*uflow_res
)
2834 ib_spec
->type
= kern_spec
->type
;
2835 switch (ib_spec
->type
) {
2836 case IB_FLOW_SPEC_ACTION_TAG
:
2837 if (kern_spec
->flow_tag
.size
!=
2838 sizeof(struct ib_uverbs_flow_spec_action_tag
))
2841 ib_spec
->flow_tag
.size
= sizeof(struct ib_flow_spec_action_tag
);
2842 ib_spec
->flow_tag
.tag_id
= kern_spec
->flow_tag
.tag_id
;
2844 case IB_FLOW_SPEC_ACTION_DROP
:
2845 if (kern_spec
->drop
.size
!=
2846 sizeof(struct ib_uverbs_flow_spec_action_drop
))
2849 ib_spec
->drop
.size
= sizeof(struct ib_flow_spec_action_drop
);
2851 case IB_FLOW_SPEC_ACTION_HANDLE
:
2852 if (kern_spec
->action
.size
!=
2853 sizeof(struct ib_uverbs_flow_spec_action_handle
))
2855 ib_spec
->action
.act
= uobj_get_obj_read(flow_action
,
2856 UVERBS_OBJECT_FLOW_ACTION
,
2857 kern_spec
->action
.handle
,
2859 if (!ib_spec
->action
.act
)
2861 ib_spec
->action
.size
=
2862 sizeof(struct ib_flow_spec_action_handle
);
2863 flow_resources_add(uflow_res
,
2864 IB_FLOW_SPEC_ACTION_HANDLE
,
2865 ib_spec
->action
.act
);
2866 uobj_put_obj_read(ib_spec
->action
.act
);
2868 case IB_FLOW_SPEC_ACTION_COUNT
:
2869 if (kern_spec
->flow_count
.size
!=
2870 sizeof(struct ib_uverbs_flow_spec_action_count
))
2872 ib_spec
->flow_count
.counters
=
2873 uobj_get_obj_read(counters
,
2874 UVERBS_OBJECT_COUNTERS
,
2875 kern_spec
->flow_count
.handle
,
2877 if (!ib_spec
->flow_count
.counters
)
2879 ib_spec
->flow_count
.size
=
2880 sizeof(struct ib_flow_spec_action_count
);
2881 flow_resources_add(uflow_res
,
2882 IB_FLOW_SPEC_ACTION_COUNT
,
2883 ib_spec
->flow_count
.counters
);
2884 uobj_put_obj_read(ib_spec
->flow_count
.counters
);
2892 static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr
*spec
)
2894 /* Returns user space filter size, includes padding */
2895 return (spec
->size
- sizeof(struct ib_uverbs_flow_spec_hdr
)) / 2;
2898 static ssize_t
spec_filter_size(const void *kern_spec_filter
, u16 kern_filter_size
,
2899 u16 ib_real_filter_sz
)
2902 * User space filter structures must be 64 bit aligned, otherwise this
2903 * may pass, but we won't handle additional new attributes.
2906 if (kern_filter_size
> ib_real_filter_sz
) {
2907 if (memchr_inv(kern_spec_filter
+
2908 ib_real_filter_sz
, 0,
2909 kern_filter_size
- ib_real_filter_sz
))
2911 return ib_real_filter_sz
;
2913 return kern_filter_size
;
2916 int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type
,
2917 const void *kern_spec_mask
,
2918 const void *kern_spec_val
,
2919 size_t kern_filter_sz
,
2920 union ib_flow_spec
*ib_spec
)
2922 ssize_t actual_filter_sz
;
2923 ssize_t ib_filter_sz
;
2925 /* User flow spec size must be aligned to 4 bytes */
2926 if (kern_filter_sz
!= ALIGN(kern_filter_sz
, 4))
2929 ib_spec
->type
= type
;
2931 if (ib_spec
->type
== (IB_FLOW_SPEC_INNER
| IB_FLOW_SPEC_VXLAN_TUNNEL
))
2934 switch (ib_spec
->type
& ~IB_FLOW_SPEC_INNER
) {
2935 case IB_FLOW_SPEC_ETH
:
2936 ib_filter_sz
= offsetof(struct ib_flow_eth_filter
, real_sz
);
2937 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2940 if (actual_filter_sz
<= 0)
2942 ib_spec
->size
= sizeof(struct ib_flow_spec_eth
);
2943 memcpy(&ib_spec
->eth
.val
, kern_spec_val
, actual_filter_sz
);
2944 memcpy(&ib_spec
->eth
.mask
, kern_spec_mask
, actual_filter_sz
);
2946 case IB_FLOW_SPEC_IPV4
:
2947 ib_filter_sz
= offsetof(struct ib_flow_ipv4_filter
, real_sz
);
2948 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2951 if (actual_filter_sz
<= 0)
2953 ib_spec
->size
= sizeof(struct ib_flow_spec_ipv4
);
2954 memcpy(&ib_spec
->ipv4
.val
, kern_spec_val
, actual_filter_sz
);
2955 memcpy(&ib_spec
->ipv4
.mask
, kern_spec_mask
, actual_filter_sz
);
2957 case IB_FLOW_SPEC_IPV6
:
2958 ib_filter_sz
= offsetof(struct ib_flow_ipv6_filter
, real_sz
);
2959 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2962 if (actual_filter_sz
<= 0)
2964 ib_spec
->size
= sizeof(struct ib_flow_spec_ipv6
);
2965 memcpy(&ib_spec
->ipv6
.val
, kern_spec_val
, actual_filter_sz
);
2966 memcpy(&ib_spec
->ipv6
.mask
, kern_spec_mask
, actual_filter_sz
);
2968 if ((ntohl(ib_spec
->ipv6
.mask
.flow_label
)) >= BIT(20) ||
2969 (ntohl(ib_spec
->ipv6
.val
.flow_label
)) >= BIT(20))
2972 case IB_FLOW_SPEC_TCP
:
2973 case IB_FLOW_SPEC_UDP
:
2974 ib_filter_sz
= offsetof(struct ib_flow_tcp_udp_filter
, real_sz
);
2975 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2978 if (actual_filter_sz
<= 0)
2980 ib_spec
->size
= sizeof(struct ib_flow_spec_tcp_udp
);
2981 memcpy(&ib_spec
->tcp_udp
.val
, kern_spec_val
, actual_filter_sz
);
2982 memcpy(&ib_spec
->tcp_udp
.mask
, kern_spec_mask
, actual_filter_sz
);
2984 case IB_FLOW_SPEC_VXLAN_TUNNEL
:
2985 ib_filter_sz
= offsetof(struct ib_flow_tunnel_filter
, real_sz
);
2986 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
2989 if (actual_filter_sz
<= 0)
2991 ib_spec
->tunnel
.size
= sizeof(struct ib_flow_spec_tunnel
);
2992 memcpy(&ib_spec
->tunnel
.val
, kern_spec_val
, actual_filter_sz
);
2993 memcpy(&ib_spec
->tunnel
.mask
, kern_spec_mask
, actual_filter_sz
);
2995 if ((ntohl(ib_spec
->tunnel
.mask
.tunnel_id
)) >= BIT(24) ||
2996 (ntohl(ib_spec
->tunnel
.val
.tunnel_id
)) >= BIT(24))
2999 case IB_FLOW_SPEC_ESP
:
3000 ib_filter_sz
= offsetof(struct ib_flow_esp_filter
, real_sz
);
3001 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
3004 if (actual_filter_sz
<= 0)
3006 ib_spec
->esp
.size
= sizeof(struct ib_flow_spec_esp
);
3007 memcpy(&ib_spec
->esp
.val
, kern_spec_val
, actual_filter_sz
);
3008 memcpy(&ib_spec
->esp
.mask
, kern_spec_mask
, actual_filter_sz
);
3010 case IB_FLOW_SPEC_GRE
:
3011 ib_filter_sz
= offsetof(struct ib_flow_gre_filter
, real_sz
);
3012 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
3015 if (actual_filter_sz
<= 0)
3017 ib_spec
->gre
.size
= sizeof(struct ib_flow_spec_gre
);
3018 memcpy(&ib_spec
->gre
.val
, kern_spec_val
, actual_filter_sz
);
3019 memcpy(&ib_spec
->gre
.mask
, kern_spec_mask
, actual_filter_sz
);
3021 case IB_FLOW_SPEC_MPLS
:
3022 ib_filter_sz
= offsetof(struct ib_flow_mpls_filter
, real_sz
);
3023 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
3026 if (actual_filter_sz
<= 0)
3028 ib_spec
->mpls
.size
= sizeof(struct ib_flow_spec_mpls
);
3029 memcpy(&ib_spec
->mpls
.val
, kern_spec_val
, actual_filter_sz
);
3030 memcpy(&ib_spec
->mpls
.mask
, kern_spec_mask
, actual_filter_sz
);
3038 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec
*kern_spec
,
3039 union ib_flow_spec
*ib_spec
)
3041 ssize_t kern_filter_sz
;
3042 void *kern_spec_mask
;
3043 void *kern_spec_val
;
3045 if (kern_spec
->reserved
)
3048 kern_filter_sz
= kern_spec_filter_sz(&kern_spec
->hdr
);
3050 kern_spec_val
= (void *)kern_spec
+
3051 sizeof(struct ib_uverbs_flow_spec_hdr
);
3052 kern_spec_mask
= kern_spec_val
+ kern_filter_sz
;
3054 return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec
->type
,
3057 kern_filter_sz
, ib_spec
);
3060 static int kern_spec_to_ib_spec(struct ib_ucontext
*ucontext
,
3061 struct ib_uverbs_flow_spec
*kern_spec
,
3062 union ib_flow_spec
*ib_spec
,
3063 struct ib_uflow_resources
*uflow_res
)
3065 if (kern_spec
->reserved
)
3068 if (kern_spec
->type
>= IB_FLOW_SPEC_ACTION_TAG
)
3069 return kern_spec_to_ib_spec_action(ucontext
, kern_spec
, ib_spec
,
3072 return kern_spec_to_ib_spec_filter(kern_spec
, ib_spec
);
3075 int ib_uverbs_ex_create_wq(struct ib_uverbs_file
*file
,
3076 struct ib_device
*ib_dev
,
3077 struct ib_udata
*ucore
,
3078 struct ib_udata
*uhw
)
3080 struct ib_uverbs_ex_create_wq cmd
= {};
3081 struct ib_uverbs_ex_create_wq_resp resp
= {};
3082 struct ib_uwq_object
*obj
;
3087 struct ib_wq_init_attr wq_init_attr
= {};
3088 size_t required_cmd_sz
;
3089 size_t required_resp_len
;
3091 required_cmd_sz
= offsetof(typeof(cmd
), max_sge
) + sizeof(cmd
.max_sge
);
3092 required_resp_len
= offsetof(typeof(resp
), wqn
) + sizeof(resp
.wqn
);
3094 if (ucore
->inlen
< required_cmd_sz
)
3097 if (ucore
->outlen
< required_resp_len
)
3100 if (ucore
->inlen
> sizeof(cmd
) &&
3101 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3102 ucore
->inlen
- sizeof(cmd
)))
3105 err
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3112 obj
= (struct ib_uwq_object
*)uobj_alloc(UVERBS_OBJECT_WQ
,
3115 return PTR_ERR(obj
);
3117 pd
= uobj_get_obj_read(pd
, UVERBS_OBJECT_PD
, cmd
.pd_handle
, file
->ucontext
);
3123 cq
= uobj_get_obj_read(cq
, UVERBS_OBJECT_CQ
, cmd
.cq_handle
, file
->ucontext
);
3129 wq_init_attr
.cq
= cq
;
3130 wq_init_attr
.max_sge
= cmd
.max_sge
;
3131 wq_init_attr
.max_wr
= cmd
.max_wr
;
3132 wq_init_attr
.wq_context
= file
;
3133 wq_init_attr
.wq_type
= cmd
.wq_type
;
3134 wq_init_attr
.event_handler
= ib_uverbs_wq_event_handler
;
3135 if (ucore
->inlen
>= (offsetof(typeof(cmd
), create_flags
) +
3136 sizeof(cmd
.create_flags
)))
3137 wq_init_attr
.create_flags
= cmd
.create_flags
;
3138 obj
->uevent
.events_reported
= 0;
3139 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
3141 if (!pd
->device
->create_wq
) {
3145 wq
= pd
->device
->create_wq(pd
, &wq_init_attr
, uhw
);
3151 wq
->uobject
= &obj
->uevent
.uobject
;
3152 obj
->uevent
.uobject
.object
= wq
;
3153 wq
->wq_type
= wq_init_attr
.wq_type
;
3156 wq
->device
= pd
->device
;
3157 wq
->wq_context
= wq_init_attr
.wq_context
;
3158 atomic_set(&wq
->usecnt
, 0);
3159 atomic_inc(&pd
->usecnt
);
3160 atomic_inc(&cq
->usecnt
);
3161 wq
->uobject
= &obj
->uevent
.uobject
;
3162 obj
->uevent
.uobject
.object
= wq
;
3164 memset(&resp
, 0, sizeof(resp
));
3165 resp
.wq_handle
= obj
->uevent
.uobject
.id
;
3166 resp
.max_sge
= wq_init_attr
.max_sge
;
3167 resp
.max_wr
= wq_init_attr
.max_wr
;
3168 resp
.wqn
= wq
->wq_num
;
3169 resp
.response_length
= required_resp_len
;
3170 err
= ib_copy_to_udata(ucore
,
3171 &resp
, resp
.response_length
);
3175 uobj_put_obj_read(pd
);
3176 uobj_put_obj_read(cq
);
3177 uobj_alloc_commit(&obj
->uevent
.uobject
);
3183 uobj_put_obj_read(cq
);
3185 uobj_put_obj_read(pd
);
3187 uobj_alloc_abort(&obj
->uevent
.uobject
);
3192 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file
*file
,
3193 struct ib_device
*ib_dev
,
3194 struct ib_udata
*ucore
,
3195 struct ib_udata
*uhw
)
3197 struct ib_uverbs_ex_destroy_wq cmd
= {};
3198 struct ib_uverbs_ex_destroy_wq_resp resp
= {};
3199 struct ib_uobject
*uobj
;
3200 struct ib_uwq_object
*obj
;
3201 size_t required_cmd_sz
;
3202 size_t required_resp_len
;
3205 required_cmd_sz
= offsetof(typeof(cmd
), wq_handle
) + sizeof(cmd
.wq_handle
);
3206 required_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
3208 if (ucore
->inlen
< required_cmd_sz
)
3211 if (ucore
->outlen
< required_resp_len
)
3214 if (ucore
->inlen
> sizeof(cmd
) &&
3215 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3216 ucore
->inlen
- sizeof(cmd
)))
3219 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3226 resp
.response_length
= required_resp_len
;
3227 uobj
= uobj_get_write(UVERBS_OBJECT_WQ
, cmd
.wq_handle
,
3230 return PTR_ERR(uobj
);
3232 obj
= container_of(uobj
, struct ib_uwq_object
, uevent
.uobject
);
3234 * Make sure we don't free the memory in remove_commit as we still
3235 * needs the uobject memory to create the response.
3237 uverbs_uobject_get(uobj
);
3239 ret
= uobj_remove_commit(uobj
);
3240 resp
.events_reported
= obj
->uevent
.events_reported
;
3241 uverbs_uobject_put(uobj
);
3245 return ib_copy_to_udata(ucore
, &resp
, resp
.response_length
);
3248 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file
*file
,
3249 struct ib_device
*ib_dev
,
3250 struct ib_udata
*ucore
,
3251 struct ib_udata
*uhw
)
3253 struct ib_uverbs_ex_modify_wq cmd
= {};
3255 struct ib_wq_attr wq_attr
= {};
3256 size_t required_cmd_sz
;
3259 required_cmd_sz
= offsetof(typeof(cmd
), curr_wq_state
) + sizeof(cmd
.curr_wq_state
);
3260 if (ucore
->inlen
< required_cmd_sz
)
3263 if (ucore
->inlen
> sizeof(cmd
) &&
3264 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3265 ucore
->inlen
- sizeof(cmd
)))
3268 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3275 if (cmd
.attr_mask
> (IB_WQ_STATE
| IB_WQ_CUR_STATE
| IB_WQ_FLAGS
))
3278 wq
= uobj_get_obj_read(wq
, UVERBS_OBJECT_WQ
, cmd
.wq_handle
, file
->ucontext
);
3282 wq_attr
.curr_wq_state
= cmd
.curr_wq_state
;
3283 wq_attr
.wq_state
= cmd
.wq_state
;
3284 if (cmd
.attr_mask
& IB_WQ_FLAGS
) {
3285 wq_attr
.flags
= cmd
.flags
;
3286 wq_attr
.flags_mask
= cmd
.flags_mask
;
3288 if (!wq
->device
->modify_wq
) {
3292 ret
= wq
->device
->modify_wq(wq
, &wq_attr
, cmd
.attr_mask
, uhw
);
3294 uobj_put_obj_read(wq
);
3298 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file
*file
,
3299 struct ib_device
*ib_dev
,
3300 struct ib_udata
*ucore
,
3301 struct ib_udata
*uhw
)
3303 struct ib_uverbs_ex_create_rwq_ind_table cmd
= {};
3304 struct ib_uverbs_ex_create_rwq_ind_table_resp resp
= {};
3305 struct ib_uobject
*uobj
;
3307 struct ib_rwq_ind_table_init_attr init_attr
= {};
3308 struct ib_rwq_ind_table
*rwq_ind_tbl
;
3309 struct ib_wq
**wqs
= NULL
;
3310 u32
*wqs_handles
= NULL
;
3311 struct ib_wq
*wq
= NULL
;
3312 int i
, j
, num_read_wqs
;
3314 u32 expected_in_size
;
3315 size_t required_cmd_sz_header
;
3316 size_t required_resp_len
;
3318 required_cmd_sz_header
= offsetof(typeof(cmd
), log_ind_tbl_size
) + sizeof(cmd
.log_ind_tbl_size
);
3319 required_resp_len
= offsetof(typeof(resp
), ind_tbl_num
) + sizeof(resp
.ind_tbl_num
);
3321 if (ucore
->inlen
< required_cmd_sz_header
)
3324 if (ucore
->outlen
< required_resp_len
)
3327 err
= ib_copy_from_udata(&cmd
, ucore
, required_cmd_sz_header
);
3331 ucore
->inbuf
+= required_cmd_sz_header
;
3332 ucore
->inlen
-= required_cmd_sz_header
;
3337 if (cmd
.log_ind_tbl_size
> IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE
)
3340 num_wq_handles
= 1 << cmd
.log_ind_tbl_size
;
3341 expected_in_size
= num_wq_handles
* sizeof(__u32
);
3342 if (num_wq_handles
== 1)
3343 /* input size for wq handles is u64 aligned */
3344 expected_in_size
+= sizeof(__u32
);
3346 if (ucore
->inlen
< expected_in_size
)
3349 if (ucore
->inlen
> expected_in_size
&&
3350 !ib_is_udata_cleared(ucore
, expected_in_size
,
3351 ucore
->inlen
- expected_in_size
))
3354 wqs_handles
= kcalloc(num_wq_handles
, sizeof(*wqs_handles
),
3359 err
= ib_copy_from_udata(wqs_handles
, ucore
,
3360 num_wq_handles
* sizeof(__u32
));
3364 wqs
= kcalloc(num_wq_handles
, sizeof(*wqs
), GFP_KERNEL
);
3370 for (num_read_wqs
= 0; num_read_wqs
< num_wq_handles
;
3372 wq
= uobj_get_obj_read(wq
, UVERBS_OBJECT_WQ
, wqs_handles
[num_read_wqs
],
3379 wqs
[num_read_wqs
] = wq
;
3382 uobj
= uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL
, file
->ucontext
);
3384 err
= PTR_ERR(uobj
);
3388 init_attr
.log_ind_tbl_size
= cmd
.log_ind_tbl_size
;
3389 init_attr
.ind_tbl
= wqs
;
3391 if (!ib_dev
->create_rwq_ind_table
) {
3395 rwq_ind_tbl
= ib_dev
->create_rwq_ind_table(ib_dev
, &init_attr
, uhw
);
3397 if (IS_ERR(rwq_ind_tbl
)) {
3398 err
= PTR_ERR(rwq_ind_tbl
);
3402 rwq_ind_tbl
->ind_tbl
= wqs
;
3403 rwq_ind_tbl
->log_ind_tbl_size
= init_attr
.log_ind_tbl_size
;
3404 rwq_ind_tbl
->uobject
= uobj
;
3405 uobj
->object
= rwq_ind_tbl
;
3406 rwq_ind_tbl
->device
= ib_dev
;
3407 atomic_set(&rwq_ind_tbl
->usecnt
, 0);
3409 for (i
= 0; i
< num_wq_handles
; i
++)
3410 atomic_inc(&wqs
[i
]->usecnt
);
3412 resp
.ind_tbl_handle
= uobj
->id
;
3413 resp
.ind_tbl_num
= rwq_ind_tbl
->ind_tbl_num
;
3414 resp
.response_length
= required_resp_len
;
3416 err
= ib_copy_to_udata(ucore
,
3417 &resp
, resp
.response_length
);
3423 for (j
= 0; j
< num_read_wqs
; j
++)
3424 uobj_put_obj_read(wqs
[j
]);
3426 uobj_alloc_commit(uobj
);
3430 ib_destroy_rwq_ind_table(rwq_ind_tbl
);
3432 uobj_alloc_abort(uobj
);
3434 for (j
= 0; j
< num_read_wqs
; j
++)
3435 uobj_put_obj_read(wqs
[j
]);
3442 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file
*file
,
3443 struct ib_device
*ib_dev
,
3444 struct ib_udata
*ucore
,
3445 struct ib_udata
*uhw
)
3447 struct ib_uverbs_ex_destroy_rwq_ind_table cmd
= {};
3448 struct ib_uobject
*uobj
;
3450 size_t required_cmd_sz
;
3452 required_cmd_sz
= offsetof(typeof(cmd
), ind_tbl_handle
) + sizeof(cmd
.ind_tbl_handle
);
3454 if (ucore
->inlen
< required_cmd_sz
)
3457 if (ucore
->inlen
> sizeof(cmd
) &&
3458 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3459 ucore
->inlen
- sizeof(cmd
)))
3462 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3469 uobj
= uobj_get_write(UVERBS_OBJECT_RWQ_IND_TBL
, cmd
.ind_tbl_handle
,
3472 return PTR_ERR(uobj
);
3474 return uobj_remove_commit(uobj
);
3477 int ib_uverbs_ex_create_flow(struct ib_uverbs_file
*file
,
3478 struct ib_device
*ib_dev
,
3479 struct ib_udata
*ucore
,
3480 struct ib_udata
*uhw
)
3482 struct ib_uverbs_create_flow cmd
;
3483 struct ib_uverbs_create_flow_resp resp
;
3484 struct ib_uobject
*uobj
;
3485 struct ib_uflow_object
*uflow
;
3486 struct ib_flow
*flow_id
;
3487 struct ib_uverbs_flow_attr
*kern_flow_attr
;
3488 struct ib_flow_attr
*flow_attr
;
3490 struct ib_uflow_resources
*uflow_res
;
3496 if (ucore
->inlen
< sizeof(cmd
))
3499 if (ucore
->outlen
< sizeof(resp
))
3502 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3506 ucore
->inbuf
+= sizeof(cmd
);
3507 ucore
->inlen
-= sizeof(cmd
);
3512 if (!capable(CAP_NET_RAW
))
3515 if (cmd
.flow_attr
.flags
>= IB_FLOW_ATTR_FLAGS_RESERVED
)
3518 if ((cmd
.flow_attr
.flags
& IB_FLOW_ATTR_FLAGS_DONT_TRAP
) &&
3519 ((cmd
.flow_attr
.type
== IB_FLOW_ATTR_ALL_DEFAULT
) ||
3520 (cmd
.flow_attr
.type
== IB_FLOW_ATTR_MC_DEFAULT
)))
3523 if (cmd
.flow_attr
.num_of_specs
> IB_FLOW_SPEC_SUPPORT_LAYERS
)
3526 if (cmd
.flow_attr
.size
> ucore
->inlen
||
3527 cmd
.flow_attr
.size
>
3528 (cmd
.flow_attr
.num_of_specs
* sizeof(struct ib_uverbs_flow_spec
)))
3531 if (cmd
.flow_attr
.reserved
[0] ||
3532 cmd
.flow_attr
.reserved
[1])
3535 if (cmd
.flow_attr
.num_of_specs
) {
3536 kern_flow_attr
= kmalloc(sizeof(*kern_flow_attr
) + cmd
.flow_attr
.size
,
3538 if (!kern_flow_attr
)
3541 memcpy(kern_flow_attr
, &cmd
.flow_attr
, sizeof(*kern_flow_attr
));
3542 err
= ib_copy_from_udata(kern_flow_attr
+ 1, ucore
,
3543 cmd
.flow_attr
.size
);
3547 kern_flow_attr
= &cmd
.flow_attr
;
3550 uobj
= uobj_alloc(UVERBS_OBJECT_FLOW
, file
->ucontext
);
3552 err
= PTR_ERR(uobj
);
3556 qp
= uobj_get_obj_read(qp
, UVERBS_OBJECT_QP
, cmd
.qp_handle
, file
->ucontext
);
3562 flow_attr
= kzalloc(struct_size(flow_attr
, flows
,
3563 cmd
.flow_attr
.num_of_specs
), GFP_KERNEL
);
3568 uflow_res
= flow_resources_alloc(cmd
.flow_attr
.num_of_specs
);
3571 goto err_free_flow_attr
;
3574 flow_attr
->type
= kern_flow_attr
->type
;
3575 flow_attr
->priority
= kern_flow_attr
->priority
;
3576 flow_attr
->num_of_specs
= kern_flow_attr
->num_of_specs
;
3577 flow_attr
->port
= kern_flow_attr
->port
;
3578 flow_attr
->flags
= kern_flow_attr
->flags
;
3579 flow_attr
->size
= sizeof(*flow_attr
);
3581 kern_spec
= kern_flow_attr
+ 1;
3582 ib_spec
= flow_attr
+ 1;
3583 for (i
= 0; i
< flow_attr
->num_of_specs
&&
3584 cmd
.flow_attr
.size
> offsetof(struct ib_uverbs_flow_spec
, reserved
) &&
3585 cmd
.flow_attr
.size
>=
3586 ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
; i
++) {
3587 err
= kern_spec_to_ib_spec(file
->ucontext
, kern_spec
, ib_spec
,
3593 ((union ib_flow_spec
*) ib_spec
)->size
;
3594 cmd
.flow_attr
.size
-= ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
;
3595 kern_spec
+= ((struct ib_uverbs_flow_spec
*) kern_spec
)->size
;
3596 ib_spec
+= ((union ib_flow_spec
*) ib_spec
)->size
;
3598 if (cmd
.flow_attr
.size
|| (i
!= flow_attr
->num_of_specs
)) {
3599 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3600 i
, cmd
.flow_attr
.size
);
3605 flow_id
= qp
->device
->create_flow(qp
, flow_attr
,
3606 IB_FLOW_DOMAIN_USER
, uhw
);
3608 if (IS_ERR(flow_id
)) {
3609 err
= PTR_ERR(flow_id
);
3612 atomic_inc(&qp
->usecnt
);
3614 flow_id
->uobject
= uobj
;
3615 uobj
->object
= flow_id
;
3616 uflow
= container_of(uobj
, typeof(*uflow
), uobject
);
3617 uflow
->resources
= uflow_res
;
3619 memset(&resp
, 0, sizeof(resp
));
3620 resp
.flow_handle
= uobj
->id
;
3622 err
= ib_copy_to_udata(ucore
,
3623 &resp
, sizeof(resp
));
3627 uobj_put_obj_read(qp
);
3628 uobj_alloc_commit(uobj
);
3630 if (cmd
.flow_attr
.num_of_specs
)
3631 kfree(kern_flow_attr
);
3634 ib_destroy_flow(flow_id
);
3636 ib_uverbs_flow_resources_free(uflow_res
);
3640 uobj_put_obj_read(qp
);
3642 uobj_alloc_abort(uobj
);
3644 if (cmd
.flow_attr
.num_of_specs
)
3645 kfree(kern_flow_attr
);
3649 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file
*file
,
3650 struct ib_device
*ib_dev
,
3651 struct ib_udata
*ucore
,
3652 struct ib_udata
*uhw
)
3654 struct ib_uverbs_destroy_flow cmd
;
3655 struct ib_uobject
*uobj
;
3658 if (ucore
->inlen
< sizeof(cmd
))
3661 ret
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3668 uobj
= uobj_get_write(UVERBS_OBJECT_FLOW
, cmd
.flow_handle
,
3671 return PTR_ERR(uobj
);
3673 ret
= uobj_remove_commit(uobj
);
3677 static int __uverbs_create_xsrq(struct ib_uverbs_file
*file
,
3678 struct ib_device
*ib_dev
,
3679 struct ib_uverbs_create_xsrq
*cmd
,
3680 struct ib_udata
*udata
)
3682 struct ib_uverbs_create_srq_resp resp
;
3683 struct ib_usrq_object
*obj
;
3686 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
3687 struct ib_srq_init_attr attr
;
3690 obj
= (struct ib_usrq_object
*)uobj_alloc(UVERBS_OBJECT_SRQ
,
3693 return PTR_ERR(obj
);
3695 if (cmd
->srq_type
== IB_SRQT_TM
)
3696 attr
.ext
.tag_matching
.max_num_tags
= cmd
->max_num_tags
;
3698 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3699 xrcd_uobj
= uobj_get_read(UVERBS_OBJECT_XRCD
, cmd
->xrcd_handle
,
3701 if (IS_ERR(xrcd_uobj
)) {
3706 attr
.ext
.xrc
.xrcd
= (struct ib_xrcd
*)xrcd_uobj
->object
;
3707 if (!attr
.ext
.xrc
.xrcd
) {
3712 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
3713 atomic_inc(&obj
->uxrcd
->refcnt
);
3716 if (ib_srq_has_cq(cmd
->srq_type
)) {
3717 attr
.ext
.cq
= uobj_get_obj_read(cq
, UVERBS_OBJECT_CQ
, cmd
->cq_handle
,
3725 pd
= uobj_get_obj_read(pd
, UVERBS_OBJECT_PD
, cmd
->pd_handle
, file
->ucontext
);
3731 attr
.event_handler
= ib_uverbs_srq_event_handler
;
3732 attr
.srq_context
= file
;
3733 attr
.srq_type
= cmd
->srq_type
;
3734 attr
.attr
.max_wr
= cmd
->max_wr
;
3735 attr
.attr
.max_sge
= cmd
->max_sge
;
3736 attr
.attr
.srq_limit
= cmd
->srq_limit
;
3738 obj
->uevent
.events_reported
= 0;
3739 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
3741 srq
= pd
->device
->create_srq(pd
, &attr
, udata
);
3747 srq
->device
= pd
->device
;
3749 srq
->srq_type
= cmd
->srq_type
;
3750 srq
->uobject
= &obj
->uevent
.uobject
;
3751 srq
->event_handler
= attr
.event_handler
;
3752 srq
->srq_context
= attr
.srq_context
;
3754 if (ib_srq_has_cq(cmd
->srq_type
)) {
3755 srq
->ext
.cq
= attr
.ext
.cq
;
3756 atomic_inc(&attr
.ext
.cq
->usecnt
);
3759 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3760 srq
->ext
.xrc
.xrcd
= attr
.ext
.xrc
.xrcd
;
3761 atomic_inc(&attr
.ext
.xrc
.xrcd
->usecnt
);
3764 atomic_inc(&pd
->usecnt
);
3765 atomic_set(&srq
->usecnt
, 0);
3767 obj
->uevent
.uobject
.object
= srq
;
3768 obj
->uevent
.uobject
.user_handle
= cmd
->user_handle
;
3770 memset(&resp
, 0, sizeof resp
);
3771 resp
.srq_handle
= obj
->uevent
.uobject
.id
;
3772 resp
.max_wr
= attr
.attr
.max_wr
;
3773 resp
.max_sge
= attr
.attr
.max_sge
;
3774 if (cmd
->srq_type
== IB_SRQT_XRC
)
3775 resp
.srqn
= srq
->ext
.xrc
.srq_num
;
3777 if (copy_to_user(u64_to_user_ptr(cmd
->response
),
3778 &resp
, sizeof resp
)) {
3783 if (cmd
->srq_type
== IB_SRQT_XRC
)
3784 uobj_put_read(xrcd_uobj
);
3786 if (ib_srq_has_cq(cmd
->srq_type
))
3787 uobj_put_obj_read(attr
.ext
.cq
);
3789 uobj_put_obj_read(pd
);
3790 uobj_alloc_commit(&obj
->uevent
.uobject
);
3795 ib_destroy_srq(srq
);
3798 uobj_put_obj_read(pd
);
3801 if (ib_srq_has_cq(cmd
->srq_type
))
3802 uobj_put_obj_read(attr
.ext
.cq
);
3805 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3806 atomic_dec(&obj
->uxrcd
->refcnt
);
3807 uobj_put_read(xrcd_uobj
);
3811 uobj_alloc_abort(&obj
->uevent
.uobject
);
3815 ssize_t
ib_uverbs_create_srq(struct ib_uverbs_file
*file
,
3816 struct ib_device
*ib_dev
,
3817 const char __user
*buf
, int in_len
,
3820 struct ib_uverbs_create_srq cmd
;
3821 struct ib_uverbs_create_xsrq xcmd
;
3822 struct ib_uverbs_create_srq_resp resp
;
3823 struct ib_udata udata
;
3826 if (out_len
< sizeof resp
)
3829 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3832 memset(&xcmd
, 0, sizeof(xcmd
));
3833 xcmd
.response
= cmd
.response
;
3834 xcmd
.user_handle
= cmd
.user_handle
;
3835 xcmd
.srq_type
= IB_SRQT_BASIC
;
3836 xcmd
.pd_handle
= cmd
.pd_handle
;
3837 xcmd
.max_wr
= cmd
.max_wr
;
3838 xcmd
.max_sge
= cmd
.max_sge
;
3839 xcmd
.srq_limit
= cmd
.srq_limit
;
3841 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
3842 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
3843 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
3844 out_len
- sizeof(resp
));
3846 ret
= __uverbs_create_xsrq(file
, ib_dev
, &xcmd
, &udata
);
3853 ssize_t
ib_uverbs_create_xsrq(struct ib_uverbs_file
*file
,
3854 struct ib_device
*ib_dev
,
3855 const char __user
*buf
, int in_len
, int out_len
)
3857 struct ib_uverbs_create_xsrq cmd
;
3858 struct ib_uverbs_create_srq_resp resp
;
3859 struct ib_udata udata
;
3862 if (out_len
< sizeof resp
)
3865 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3868 ib_uverbs_init_udata(&udata
, buf
+ sizeof(cmd
),
3869 u64_to_user_ptr(cmd
.response
) + sizeof(resp
),
3870 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
3871 out_len
- sizeof(resp
));
3873 ret
= __uverbs_create_xsrq(file
, ib_dev
, &cmd
, &udata
);
3880 ssize_t
ib_uverbs_modify_srq(struct ib_uverbs_file
*file
,
3881 struct ib_device
*ib_dev
,
3882 const char __user
*buf
, int in_len
,
3885 struct ib_uverbs_modify_srq cmd
;
3886 struct ib_udata udata
;
3888 struct ib_srq_attr attr
;
3891 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3894 ib_uverbs_init_udata(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
3897 srq
= uobj_get_obj_read(srq
, UVERBS_OBJECT_SRQ
, cmd
.srq_handle
, file
->ucontext
);
3901 attr
.max_wr
= cmd
.max_wr
;
3902 attr
.srq_limit
= cmd
.srq_limit
;
3904 ret
= srq
->device
->modify_srq(srq
, &attr
, cmd
.attr_mask
, &udata
);
3906 uobj_put_obj_read(srq
);
3908 return ret
? ret
: in_len
;
3911 ssize_t
ib_uverbs_query_srq(struct ib_uverbs_file
*file
,
3912 struct ib_device
*ib_dev
,
3913 const char __user
*buf
,
3914 int in_len
, int out_len
)
3916 struct ib_uverbs_query_srq cmd
;
3917 struct ib_uverbs_query_srq_resp resp
;
3918 struct ib_srq_attr attr
;
3922 if (out_len
< sizeof resp
)
3925 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3928 srq
= uobj_get_obj_read(srq
, UVERBS_OBJECT_SRQ
, cmd
.srq_handle
, file
->ucontext
);
3932 ret
= ib_query_srq(srq
, &attr
);
3934 uobj_put_obj_read(srq
);
3939 memset(&resp
, 0, sizeof resp
);
3941 resp
.max_wr
= attr
.max_wr
;
3942 resp
.max_sge
= attr
.max_sge
;
3943 resp
.srq_limit
= attr
.srq_limit
;
3945 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof resp
))
3951 ssize_t
ib_uverbs_destroy_srq(struct ib_uverbs_file
*file
,
3952 struct ib_device
*ib_dev
,
3953 const char __user
*buf
, int in_len
,
3956 struct ib_uverbs_destroy_srq cmd
;
3957 struct ib_uverbs_destroy_srq_resp resp
;
3958 struct ib_uobject
*uobj
;
3959 struct ib_uevent_object
*obj
;
3962 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3965 uobj
= uobj_get_write(UVERBS_OBJECT_SRQ
, cmd
.srq_handle
,
3968 return PTR_ERR(uobj
);
3970 obj
= container_of(uobj
, struct ib_uevent_object
, uobject
);
3972 * Make sure we don't free the memory in remove_commit as we still
3973 * needs the uobject memory to create the response.
3975 uverbs_uobject_get(uobj
);
3977 memset(&resp
, 0, sizeof(resp
));
3979 ret
= uobj_remove_commit(uobj
);
3981 uverbs_uobject_put(uobj
);
3984 resp
.events_reported
= obj
->events_reported
;
3985 uverbs_uobject_put(uobj
);
3986 if (copy_to_user(u64_to_user_ptr(cmd
.response
), &resp
, sizeof(resp
)))
3992 int ib_uverbs_ex_query_device(struct ib_uverbs_file
*file
,
3993 struct ib_device
*ib_dev
,
3994 struct ib_udata
*ucore
,
3995 struct ib_udata
*uhw
)
3997 struct ib_uverbs_ex_query_device_resp resp
= { {0} };
3998 struct ib_uverbs_ex_query_device cmd
;
3999 struct ib_device_attr attr
= {0};
4002 if (!ib_dev
->query_device
)
4005 if (ucore
->inlen
< sizeof(cmd
))
4008 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
4018 resp
.response_length
= offsetof(typeof(resp
), odp_caps
);
4020 if (ucore
->outlen
< resp
.response_length
)
4023 err
= ib_dev
->query_device(ib_dev
, &attr
, uhw
);
4027 copy_query_dev_fields(file
, ib_dev
, &resp
.base
, &attr
);
4029 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.odp_caps
))
4032 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
4033 resp
.odp_caps
.general_caps
= attr
.odp_caps
.general_caps
;
4034 resp
.odp_caps
.per_transport_caps
.rc_odp_caps
=
4035 attr
.odp_caps
.per_transport_caps
.rc_odp_caps
;
4036 resp
.odp_caps
.per_transport_caps
.uc_odp_caps
=
4037 attr
.odp_caps
.per_transport_caps
.uc_odp_caps
;
4038 resp
.odp_caps
.per_transport_caps
.ud_odp_caps
=
4039 attr
.odp_caps
.per_transport_caps
.ud_odp_caps
;
4041 resp
.response_length
+= sizeof(resp
.odp_caps
);
4043 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.timestamp_mask
))
4046 resp
.timestamp_mask
= attr
.timestamp_mask
;
4047 resp
.response_length
+= sizeof(resp
.timestamp_mask
);
4049 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.hca_core_clock
))
4052 resp
.hca_core_clock
= attr
.hca_core_clock
;
4053 resp
.response_length
+= sizeof(resp
.hca_core_clock
);
4055 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.device_cap_flags_ex
))
4058 resp
.device_cap_flags_ex
= attr
.device_cap_flags
;
4059 resp
.response_length
+= sizeof(resp
.device_cap_flags_ex
);
4061 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.rss_caps
))
4064 resp
.rss_caps
.supported_qpts
= attr
.rss_caps
.supported_qpts
;
4065 resp
.rss_caps
.max_rwq_indirection_tables
=
4066 attr
.rss_caps
.max_rwq_indirection_tables
;
4067 resp
.rss_caps
.max_rwq_indirection_table_size
=
4068 attr
.rss_caps
.max_rwq_indirection_table_size
;
4070 resp
.response_length
+= sizeof(resp
.rss_caps
);
4072 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.max_wq_type_rq
))
4075 resp
.max_wq_type_rq
= attr
.max_wq_type_rq
;
4076 resp
.response_length
+= sizeof(resp
.max_wq_type_rq
);
4078 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.raw_packet_caps
))
4081 resp
.raw_packet_caps
= attr
.raw_packet_caps
;
4082 resp
.response_length
+= sizeof(resp
.raw_packet_caps
);
4084 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.tm_caps
))
4087 resp
.tm_caps
.max_rndv_hdr_size
= attr
.tm_caps
.max_rndv_hdr_size
;
4088 resp
.tm_caps
.max_num_tags
= attr
.tm_caps
.max_num_tags
;
4089 resp
.tm_caps
.max_ops
= attr
.tm_caps
.max_ops
;
4090 resp
.tm_caps
.max_sge
= attr
.tm_caps
.max_sge
;
4091 resp
.tm_caps
.flags
= attr
.tm_caps
.flags
;
4092 resp
.response_length
+= sizeof(resp
.tm_caps
);
4094 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.cq_moderation_caps
))
4097 resp
.cq_moderation_caps
.max_cq_moderation_count
=
4098 attr
.cq_caps
.max_cq_moderation_count
;
4099 resp
.cq_moderation_caps
.max_cq_moderation_period
=
4100 attr
.cq_caps
.max_cq_moderation_period
;
4101 resp
.response_length
+= sizeof(resp
.cq_moderation_caps
);
4103 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.max_dm_size
))
4106 resp
.max_dm_size
= attr
.max_dm_size
;
4107 resp
.response_length
+= sizeof(resp
.max_dm_size
);
4109 err
= ib_copy_to_udata(ucore
, &resp
, resp
.response_length
);
4113 int ib_uverbs_ex_modify_cq(struct ib_uverbs_file
*file
,
4114 struct ib_device
*ib_dev
,
4115 struct ib_udata
*ucore
,
4116 struct ib_udata
*uhw
)
4118 struct ib_uverbs_ex_modify_cq cmd
= {};
4120 size_t required_cmd_sz
;
4123 required_cmd_sz
= offsetof(typeof(cmd
), reserved
) +
4124 sizeof(cmd
.reserved
);
4125 if (ucore
->inlen
< required_cmd_sz
)
4129 if (ucore
->inlen
> sizeof(cmd
) &&
4130 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
4131 ucore
->inlen
- sizeof(cmd
)))
4134 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
4138 if (!cmd
.attr_mask
|| cmd
.reserved
)
4141 if (cmd
.attr_mask
> IB_CQ_MODERATE
)
4144 cq
= uobj_get_obj_read(cq
, UVERBS_OBJECT_CQ
, cmd
.cq_handle
, file
->ucontext
);
4148 ret
= rdma_set_cq_moderation(cq
, cmd
.attr
.cq_count
, cmd
.attr
.cq_period
);
4150 uobj_put_obj_read(cq
);