2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
40 #include <asm/uaccess.h>
43 #include "core_priv.h"
45 struct uverbs_lock_class
{
46 struct lock_class_key key
;
50 static struct uverbs_lock_class pd_lock_class
= { .name
= "PD-uobj" };
51 static struct uverbs_lock_class mr_lock_class
= { .name
= "MR-uobj" };
52 static struct uverbs_lock_class mw_lock_class
= { .name
= "MW-uobj" };
53 static struct uverbs_lock_class cq_lock_class
= { .name
= "CQ-uobj" };
54 static struct uverbs_lock_class qp_lock_class
= { .name
= "QP-uobj" };
55 static struct uverbs_lock_class ah_lock_class
= { .name
= "AH-uobj" };
56 static struct uverbs_lock_class srq_lock_class
= { .name
= "SRQ-uobj" };
57 static struct uverbs_lock_class xrcd_lock_class
= { .name
= "XRCD-uobj" };
58 static struct uverbs_lock_class rule_lock_class
= { .name
= "RULE-uobj" };
61 * The ib_uobject locking scheme is as follows:
63 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
64 * needs to be held during all idr operations. When an object is
65 * looked up, a reference must be taken on the object's kref before
68 * - Each object also has an rwsem. This rwsem must be held for
69 * reading while an operation that uses the object is performed.
70 * For example, while registering an MR, the associated PD's
71 * uobject.mutex must be held for reading. The rwsem must be held
72 * for writing while initializing or destroying an object.
74 * - In addition, each object has a "live" flag. If this flag is not
75 * set, then lookups of the object will fail even if it is found in
76 * the idr. This handles a reader that blocks and does not acquire
77 * the rwsem until after the object is destroyed. The destroy
78 * operation will set the live flag to 0 and then drop the rwsem;
79 * this will allow the reader to acquire the rwsem, see that the
80 * live flag is 0, and then drop the rwsem and its reference to
81 * object. The underlying storage will not be freed until the last
82 * reference to the object is dropped.
85 static void init_uobj(struct ib_uobject
*uobj
, u64 user_handle
,
86 struct ib_ucontext
*context
, struct uverbs_lock_class
*c
)
88 uobj
->user_handle
= user_handle
;
89 uobj
->context
= context
;
90 kref_init(&uobj
->ref
);
91 init_rwsem(&uobj
->mutex
);
92 lockdep_set_class_and_name(&uobj
->mutex
, &c
->key
, c
->name
);
96 static void release_uobj(struct kref
*kref
)
98 kfree(container_of(kref
, struct ib_uobject
, ref
));
101 static void put_uobj(struct ib_uobject
*uobj
)
103 kref_put(&uobj
->ref
, release_uobj
);
106 static void put_uobj_read(struct ib_uobject
*uobj
)
108 up_read(&uobj
->mutex
);
112 static void put_uobj_write(struct ib_uobject
*uobj
)
114 up_write(&uobj
->mutex
);
118 static int idr_add_uobj(struct idr
*idr
, struct ib_uobject
*uobj
)
122 idr_preload(GFP_KERNEL
);
123 spin_lock(&ib_uverbs_idr_lock
);
125 ret
= idr_alloc(idr
, uobj
, 0, 0, GFP_NOWAIT
);
129 spin_unlock(&ib_uverbs_idr_lock
);
132 return ret
< 0 ? ret
: 0;
135 void idr_remove_uobj(struct idr
*idr
, struct ib_uobject
*uobj
)
137 spin_lock(&ib_uverbs_idr_lock
);
138 idr_remove(idr
, uobj
->id
);
139 spin_unlock(&ib_uverbs_idr_lock
);
142 static struct ib_uobject
*__idr_get_uobj(struct idr
*idr
, int id
,
143 struct ib_ucontext
*context
)
145 struct ib_uobject
*uobj
;
147 spin_lock(&ib_uverbs_idr_lock
);
148 uobj
= idr_find(idr
, id
);
150 if (uobj
->context
== context
)
151 kref_get(&uobj
->ref
);
155 spin_unlock(&ib_uverbs_idr_lock
);
160 static struct ib_uobject
*idr_read_uobj(struct idr
*idr
, int id
,
161 struct ib_ucontext
*context
, int nested
)
163 struct ib_uobject
*uobj
;
165 uobj
= __idr_get_uobj(idr
, id
, context
);
170 down_read_nested(&uobj
->mutex
, SINGLE_DEPTH_NESTING
);
172 down_read(&uobj
->mutex
);
181 static struct ib_uobject
*idr_write_uobj(struct idr
*idr
, int id
,
182 struct ib_ucontext
*context
)
184 struct ib_uobject
*uobj
;
186 uobj
= __idr_get_uobj(idr
, id
, context
);
190 down_write(&uobj
->mutex
);
192 put_uobj_write(uobj
);
199 static void *idr_read_obj(struct idr
*idr
, int id
, struct ib_ucontext
*context
,
202 struct ib_uobject
*uobj
;
204 uobj
= idr_read_uobj(idr
, id
, context
, nested
);
205 return uobj
? uobj
->object
: NULL
;
208 static struct ib_pd
*idr_read_pd(int pd_handle
, struct ib_ucontext
*context
)
210 return idr_read_obj(&ib_uverbs_pd_idr
, pd_handle
, context
, 0);
213 static void put_pd_read(struct ib_pd
*pd
)
215 put_uobj_read(pd
->uobject
);
218 static struct ib_cq
*idr_read_cq(int cq_handle
, struct ib_ucontext
*context
, int nested
)
220 return idr_read_obj(&ib_uverbs_cq_idr
, cq_handle
, context
, nested
);
223 static void put_cq_read(struct ib_cq
*cq
)
225 put_uobj_read(cq
->uobject
);
228 static struct ib_ah
*idr_read_ah(int ah_handle
, struct ib_ucontext
*context
)
230 return idr_read_obj(&ib_uverbs_ah_idr
, ah_handle
, context
, 0);
233 static void put_ah_read(struct ib_ah
*ah
)
235 put_uobj_read(ah
->uobject
);
238 static struct ib_qp
*idr_read_qp(int qp_handle
, struct ib_ucontext
*context
)
240 return idr_read_obj(&ib_uverbs_qp_idr
, qp_handle
, context
, 0);
243 static struct ib_qp
*idr_write_qp(int qp_handle
, struct ib_ucontext
*context
)
245 struct ib_uobject
*uobj
;
247 uobj
= idr_write_uobj(&ib_uverbs_qp_idr
, qp_handle
, context
);
248 return uobj
? uobj
->object
: NULL
;
251 static void put_qp_read(struct ib_qp
*qp
)
253 put_uobj_read(qp
->uobject
);
256 static void put_qp_write(struct ib_qp
*qp
)
258 put_uobj_write(qp
->uobject
);
261 static struct ib_srq
*idr_read_srq(int srq_handle
, struct ib_ucontext
*context
)
263 return idr_read_obj(&ib_uverbs_srq_idr
, srq_handle
, context
, 0);
266 static void put_srq_read(struct ib_srq
*srq
)
268 put_uobj_read(srq
->uobject
);
271 static struct ib_xrcd
*idr_read_xrcd(int xrcd_handle
, struct ib_ucontext
*context
,
272 struct ib_uobject
**uobj
)
274 *uobj
= idr_read_uobj(&ib_uverbs_xrcd_idr
, xrcd_handle
, context
, 0);
275 return *uobj
? (*uobj
)->object
: NULL
;
278 static void put_xrcd_read(struct ib_uobject
*uobj
)
283 ssize_t
ib_uverbs_get_context(struct ib_uverbs_file
*file
,
284 const char __user
*buf
,
285 int in_len
, int out_len
)
287 struct ib_uverbs_get_context cmd
;
288 struct ib_uverbs_get_context_resp resp
;
289 struct ib_udata udata
;
290 struct ib_device
*ibdev
= file
->device
->ib_dev
;
291 struct ib_ucontext
*ucontext
;
295 if (out_len
< sizeof resp
)
298 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
301 mutex_lock(&file
->mutex
);
303 if (file
->ucontext
) {
308 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
309 (unsigned long) cmd
.response
+ sizeof resp
,
310 in_len
- sizeof cmd
, out_len
- sizeof resp
);
312 ucontext
= ibdev
->alloc_ucontext(ibdev
, &udata
);
313 if (IS_ERR(ucontext
)) {
314 ret
= PTR_ERR(ucontext
);
318 ucontext
->device
= ibdev
;
319 INIT_LIST_HEAD(&ucontext
->pd_list
);
320 INIT_LIST_HEAD(&ucontext
->mr_list
);
321 INIT_LIST_HEAD(&ucontext
->mw_list
);
322 INIT_LIST_HEAD(&ucontext
->cq_list
);
323 INIT_LIST_HEAD(&ucontext
->qp_list
);
324 INIT_LIST_HEAD(&ucontext
->srq_list
);
325 INIT_LIST_HEAD(&ucontext
->ah_list
);
326 INIT_LIST_HEAD(&ucontext
->xrcd_list
);
327 INIT_LIST_HEAD(&ucontext
->rule_list
);
328 ucontext
->closing
= 0;
330 resp
.num_comp_vectors
= file
->device
->num_comp_vectors
;
332 ret
= get_unused_fd_flags(O_CLOEXEC
);
337 filp
= ib_uverbs_alloc_event_file(file
, 1);
343 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
344 &resp
, sizeof resp
)) {
349 file
->async_file
= filp
->private_data
;
351 INIT_IB_EVENT_HANDLER(&file
->event_handler
, file
->device
->ib_dev
,
352 ib_uverbs_event_handler
);
353 ret
= ib_register_event_handler(&file
->event_handler
);
357 kref_get(&file
->async_file
->ref
);
358 kref_get(&file
->ref
);
359 file
->ucontext
= ucontext
;
361 fd_install(resp
.async_fd
, filp
);
363 mutex_unlock(&file
->mutex
);
371 put_unused_fd(resp
.async_fd
);
374 ibdev
->dealloc_ucontext(ucontext
);
377 mutex_unlock(&file
->mutex
);
381 ssize_t
ib_uverbs_query_device(struct ib_uverbs_file
*file
,
382 const char __user
*buf
,
383 int in_len
, int out_len
)
385 struct ib_uverbs_query_device cmd
;
386 struct ib_uverbs_query_device_resp resp
;
387 struct ib_device_attr attr
;
390 if (out_len
< sizeof resp
)
393 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
396 ret
= ib_query_device(file
->device
->ib_dev
, &attr
);
400 memset(&resp
, 0, sizeof resp
);
402 resp
.fw_ver
= attr
.fw_ver
;
403 resp
.node_guid
= file
->device
->ib_dev
->node_guid
;
404 resp
.sys_image_guid
= attr
.sys_image_guid
;
405 resp
.max_mr_size
= attr
.max_mr_size
;
406 resp
.page_size_cap
= attr
.page_size_cap
;
407 resp
.vendor_id
= attr
.vendor_id
;
408 resp
.vendor_part_id
= attr
.vendor_part_id
;
409 resp
.hw_ver
= attr
.hw_ver
;
410 resp
.max_qp
= attr
.max_qp
;
411 resp
.max_qp_wr
= attr
.max_qp_wr
;
412 resp
.device_cap_flags
= attr
.device_cap_flags
;
413 resp
.max_sge
= attr
.max_sge
;
414 resp
.max_sge_rd
= attr
.max_sge_rd
;
415 resp
.max_cq
= attr
.max_cq
;
416 resp
.max_cqe
= attr
.max_cqe
;
417 resp
.max_mr
= attr
.max_mr
;
418 resp
.max_pd
= attr
.max_pd
;
419 resp
.max_qp_rd_atom
= attr
.max_qp_rd_atom
;
420 resp
.max_ee_rd_atom
= attr
.max_ee_rd_atom
;
421 resp
.max_res_rd_atom
= attr
.max_res_rd_atom
;
422 resp
.max_qp_init_rd_atom
= attr
.max_qp_init_rd_atom
;
423 resp
.max_ee_init_rd_atom
= attr
.max_ee_init_rd_atom
;
424 resp
.atomic_cap
= attr
.atomic_cap
;
425 resp
.max_ee
= attr
.max_ee
;
426 resp
.max_rdd
= attr
.max_rdd
;
427 resp
.max_mw
= attr
.max_mw
;
428 resp
.max_raw_ipv6_qp
= attr
.max_raw_ipv6_qp
;
429 resp
.max_raw_ethy_qp
= attr
.max_raw_ethy_qp
;
430 resp
.max_mcast_grp
= attr
.max_mcast_grp
;
431 resp
.max_mcast_qp_attach
= attr
.max_mcast_qp_attach
;
432 resp
.max_total_mcast_qp_attach
= attr
.max_total_mcast_qp_attach
;
433 resp
.max_ah
= attr
.max_ah
;
434 resp
.max_fmr
= attr
.max_fmr
;
435 resp
.max_map_per_fmr
= attr
.max_map_per_fmr
;
436 resp
.max_srq
= attr
.max_srq
;
437 resp
.max_srq_wr
= attr
.max_srq_wr
;
438 resp
.max_srq_sge
= attr
.max_srq_sge
;
439 resp
.max_pkeys
= attr
.max_pkeys
;
440 resp
.local_ca_ack_delay
= attr
.local_ca_ack_delay
;
441 resp
.phys_port_cnt
= file
->device
->ib_dev
->phys_port_cnt
;
443 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
450 ssize_t
ib_uverbs_query_port(struct ib_uverbs_file
*file
,
451 const char __user
*buf
,
452 int in_len
, int out_len
)
454 struct ib_uverbs_query_port cmd
;
455 struct ib_uverbs_query_port_resp resp
;
456 struct ib_port_attr attr
;
459 if (out_len
< sizeof resp
)
462 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
465 ret
= ib_query_port(file
->device
->ib_dev
, cmd
.port_num
, &attr
);
469 memset(&resp
, 0, sizeof resp
);
471 resp
.state
= attr
.state
;
472 resp
.max_mtu
= attr
.max_mtu
;
473 resp
.active_mtu
= attr
.active_mtu
;
474 resp
.gid_tbl_len
= attr
.gid_tbl_len
;
475 resp
.port_cap_flags
= attr
.port_cap_flags
;
476 resp
.max_msg_sz
= attr
.max_msg_sz
;
477 resp
.bad_pkey_cntr
= attr
.bad_pkey_cntr
;
478 resp
.qkey_viol_cntr
= attr
.qkey_viol_cntr
;
479 resp
.pkey_tbl_len
= attr
.pkey_tbl_len
;
481 resp
.sm_lid
= attr
.sm_lid
;
483 resp
.max_vl_num
= attr
.max_vl_num
;
484 resp
.sm_sl
= attr
.sm_sl
;
485 resp
.subnet_timeout
= attr
.subnet_timeout
;
486 resp
.init_type_reply
= attr
.init_type_reply
;
487 resp
.active_width
= attr
.active_width
;
488 resp
.active_speed
= attr
.active_speed
;
489 resp
.phys_state
= attr
.phys_state
;
490 resp
.link_layer
= rdma_port_get_link_layer(file
->device
->ib_dev
,
493 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
500 ssize_t
ib_uverbs_alloc_pd(struct ib_uverbs_file
*file
,
501 const char __user
*buf
,
502 int in_len
, int out_len
)
504 struct ib_uverbs_alloc_pd cmd
;
505 struct ib_uverbs_alloc_pd_resp resp
;
506 struct ib_udata udata
;
507 struct ib_uobject
*uobj
;
511 if (out_len
< sizeof resp
)
514 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
517 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
518 (unsigned long) cmd
.response
+ sizeof resp
,
519 in_len
- sizeof cmd
, out_len
- sizeof resp
);
521 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
525 init_uobj(uobj
, 0, file
->ucontext
, &pd_lock_class
);
526 down_write(&uobj
->mutex
);
528 pd
= file
->device
->ib_dev
->alloc_pd(file
->device
->ib_dev
,
529 file
->ucontext
, &udata
);
535 pd
->device
= file
->device
->ib_dev
;
537 atomic_set(&pd
->usecnt
, 0);
540 ret
= idr_add_uobj(&ib_uverbs_pd_idr
, uobj
);
544 memset(&resp
, 0, sizeof resp
);
545 resp
.pd_handle
= uobj
->id
;
547 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
548 &resp
, sizeof resp
)) {
553 mutex_lock(&file
->mutex
);
554 list_add_tail(&uobj
->list
, &file
->ucontext
->pd_list
);
555 mutex_unlock(&file
->mutex
);
559 up_write(&uobj
->mutex
);
564 idr_remove_uobj(&ib_uverbs_pd_idr
, uobj
);
570 put_uobj_write(uobj
);
574 ssize_t
ib_uverbs_dealloc_pd(struct ib_uverbs_file
*file
,
575 const char __user
*buf
,
576 int in_len
, int out_len
)
578 struct ib_uverbs_dealloc_pd cmd
;
579 struct ib_uobject
*uobj
;
582 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
585 uobj
= idr_write_uobj(&ib_uverbs_pd_idr
, cmd
.pd_handle
, file
->ucontext
);
589 ret
= ib_dealloc_pd(uobj
->object
);
593 put_uobj_write(uobj
);
598 idr_remove_uobj(&ib_uverbs_pd_idr
, uobj
);
600 mutex_lock(&file
->mutex
);
601 list_del(&uobj
->list
);
602 mutex_unlock(&file
->mutex
);
609 struct xrcd_table_entry
{
611 struct ib_xrcd
*xrcd
;
615 static int xrcd_table_insert(struct ib_uverbs_device
*dev
,
617 struct ib_xrcd
*xrcd
)
619 struct xrcd_table_entry
*entry
, *scan
;
620 struct rb_node
**p
= &dev
->xrcd_tree
.rb_node
;
621 struct rb_node
*parent
= NULL
;
623 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
628 entry
->inode
= inode
;
632 scan
= rb_entry(parent
, struct xrcd_table_entry
, node
);
634 if (inode
< scan
->inode
) {
636 } else if (inode
> scan
->inode
) {
644 rb_link_node(&entry
->node
, parent
, p
);
645 rb_insert_color(&entry
->node
, &dev
->xrcd_tree
);
650 static struct xrcd_table_entry
*xrcd_table_search(struct ib_uverbs_device
*dev
,
653 struct xrcd_table_entry
*entry
;
654 struct rb_node
*p
= dev
->xrcd_tree
.rb_node
;
657 entry
= rb_entry(p
, struct xrcd_table_entry
, node
);
659 if (inode
< entry
->inode
)
661 else if (inode
> entry
->inode
)
670 static struct ib_xrcd
*find_xrcd(struct ib_uverbs_device
*dev
, struct inode
*inode
)
672 struct xrcd_table_entry
*entry
;
674 entry
= xrcd_table_search(dev
, inode
);
681 static void xrcd_table_delete(struct ib_uverbs_device
*dev
,
684 struct xrcd_table_entry
*entry
;
686 entry
= xrcd_table_search(dev
, inode
);
689 rb_erase(&entry
->node
, &dev
->xrcd_tree
);
694 ssize_t
ib_uverbs_open_xrcd(struct ib_uverbs_file
*file
,
695 const char __user
*buf
, int in_len
,
698 struct ib_uverbs_open_xrcd cmd
;
699 struct ib_uverbs_open_xrcd_resp resp
;
700 struct ib_udata udata
;
701 struct ib_uxrcd_object
*obj
;
702 struct ib_xrcd
*xrcd
= NULL
;
703 struct fd f
= {NULL
, 0};
704 struct inode
*inode
= NULL
;
708 if (out_len
< sizeof resp
)
711 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
714 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
715 (unsigned long) cmd
.response
+ sizeof resp
,
716 in_len
- sizeof cmd
, out_len
- sizeof resp
);
718 mutex_lock(&file
->device
->xrcd_tree_mutex
);
721 /* search for file descriptor */
725 goto err_tree_mutex_unlock
;
728 inode
= file_inode(f
.file
);
729 xrcd
= find_xrcd(file
->device
, inode
);
730 if (!xrcd
&& !(cmd
.oflags
& O_CREAT
)) {
731 /* no file descriptor. Need CREATE flag */
733 goto err_tree_mutex_unlock
;
736 if (xrcd
&& cmd
.oflags
& O_EXCL
) {
738 goto err_tree_mutex_unlock
;
742 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
745 goto err_tree_mutex_unlock
;
748 init_uobj(&obj
->uobject
, 0, file
->ucontext
, &xrcd_lock_class
);
750 down_write(&obj
->uobject
.mutex
);
753 xrcd
= file
->device
->ib_dev
->alloc_xrcd(file
->device
->ib_dev
,
754 file
->ucontext
, &udata
);
761 xrcd
->device
= file
->device
->ib_dev
;
762 atomic_set(&xrcd
->usecnt
, 0);
763 mutex_init(&xrcd
->tgt_qp_mutex
);
764 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
768 atomic_set(&obj
->refcnt
, 0);
769 obj
->uobject
.object
= xrcd
;
770 ret
= idr_add_uobj(&ib_uverbs_xrcd_idr
, &obj
->uobject
);
774 memset(&resp
, 0, sizeof resp
);
775 resp
.xrcd_handle
= obj
->uobject
.id
;
779 /* create new inode/xrcd table entry */
780 ret
= xrcd_table_insert(file
->device
, inode
, xrcd
);
782 goto err_insert_xrcd
;
784 atomic_inc(&xrcd
->usecnt
);
787 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
788 &resp
, sizeof resp
)) {
796 mutex_lock(&file
->mutex
);
797 list_add_tail(&obj
->uobject
.list
, &file
->ucontext
->xrcd_list
);
798 mutex_unlock(&file
->mutex
);
800 obj
->uobject
.live
= 1;
801 up_write(&obj
->uobject
.mutex
);
803 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
809 xrcd_table_delete(file
->device
, inode
);
810 atomic_dec(&xrcd
->usecnt
);
814 idr_remove_uobj(&ib_uverbs_xrcd_idr
, &obj
->uobject
);
817 ib_dealloc_xrcd(xrcd
);
820 put_uobj_write(&obj
->uobject
);
822 err_tree_mutex_unlock
:
826 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
831 ssize_t
ib_uverbs_close_xrcd(struct ib_uverbs_file
*file
,
832 const char __user
*buf
, int in_len
,
835 struct ib_uverbs_close_xrcd cmd
;
836 struct ib_uobject
*uobj
;
837 struct ib_xrcd
*xrcd
= NULL
;
838 struct inode
*inode
= NULL
;
839 struct ib_uxrcd_object
*obj
;
843 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
846 mutex_lock(&file
->device
->xrcd_tree_mutex
);
847 uobj
= idr_write_uobj(&ib_uverbs_xrcd_idr
, cmd
.xrcd_handle
, file
->ucontext
);
855 obj
= container_of(uobj
, struct ib_uxrcd_object
, uobject
);
856 if (atomic_read(&obj
->refcnt
)) {
857 put_uobj_write(uobj
);
862 if (!inode
|| atomic_dec_and_test(&xrcd
->usecnt
)) {
863 ret
= ib_dealloc_xrcd(uobj
->object
);
870 atomic_inc(&xrcd
->usecnt
);
872 put_uobj_write(uobj
);
878 xrcd_table_delete(file
->device
, inode
);
880 idr_remove_uobj(&ib_uverbs_xrcd_idr
, uobj
);
881 mutex_lock(&file
->mutex
);
882 list_del(&uobj
->list
);
883 mutex_unlock(&file
->mutex
);
889 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
893 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device
*dev
,
894 struct ib_xrcd
*xrcd
)
899 if (inode
&& !atomic_dec_and_test(&xrcd
->usecnt
))
902 ib_dealloc_xrcd(xrcd
);
905 xrcd_table_delete(dev
, inode
);
908 ssize_t
ib_uverbs_reg_mr(struct ib_uverbs_file
*file
,
909 const char __user
*buf
, int in_len
,
912 struct ib_uverbs_reg_mr cmd
;
913 struct ib_uverbs_reg_mr_resp resp
;
914 struct ib_udata udata
;
915 struct ib_uobject
*uobj
;
920 if (out_len
< sizeof resp
)
923 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
926 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
927 (unsigned long) cmd
.response
+ sizeof resp
,
928 in_len
- sizeof cmd
, out_len
- sizeof resp
);
930 if ((cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
))
933 ret
= ib_check_mr_access(cmd
.access_flags
);
937 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
941 init_uobj(uobj
, 0, file
->ucontext
, &mr_lock_class
);
942 down_write(&uobj
->mutex
);
944 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
950 mr
= pd
->device
->reg_user_mr(pd
, cmd
.start
, cmd
.length
, cmd
.hca_va
,
951 cmd
.access_flags
, &udata
);
957 mr
->device
= pd
->device
;
960 atomic_inc(&pd
->usecnt
);
961 atomic_set(&mr
->usecnt
, 0);
964 ret
= idr_add_uobj(&ib_uverbs_mr_idr
, uobj
);
968 memset(&resp
, 0, sizeof resp
);
969 resp
.lkey
= mr
->lkey
;
970 resp
.rkey
= mr
->rkey
;
971 resp
.mr_handle
= uobj
->id
;
973 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
974 &resp
, sizeof resp
)) {
981 mutex_lock(&file
->mutex
);
982 list_add_tail(&uobj
->list
, &file
->ucontext
->mr_list
);
983 mutex_unlock(&file
->mutex
);
987 up_write(&uobj
->mutex
);
992 idr_remove_uobj(&ib_uverbs_mr_idr
, uobj
);
1001 put_uobj_write(uobj
);
1005 ssize_t
ib_uverbs_dereg_mr(struct ib_uverbs_file
*file
,
1006 const char __user
*buf
, int in_len
,
1009 struct ib_uverbs_dereg_mr cmd
;
1011 struct ib_uobject
*uobj
;
1014 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1017 uobj
= idr_write_uobj(&ib_uverbs_mr_idr
, cmd
.mr_handle
, file
->ucontext
);
1023 ret
= ib_dereg_mr(mr
);
1027 put_uobj_write(uobj
);
1032 idr_remove_uobj(&ib_uverbs_mr_idr
, uobj
);
1034 mutex_lock(&file
->mutex
);
1035 list_del(&uobj
->list
);
1036 mutex_unlock(&file
->mutex
);
1043 ssize_t
ib_uverbs_alloc_mw(struct ib_uverbs_file
*file
,
1044 const char __user
*buf
, int in_len
,
1047 struct ib_uverbs_alloc_mw cmd
;
1048 struct ib_uverbs_alloc_mw_resp resp
;
1049 struct ib_uobject
*uobj
;
1054 if (out_len
< sizeof(resp
))
1057 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1060 uobj
= kmalloc(sizeof(*uobj
), GFP_KERNEL
);
1064 init_uobj(uobj
, 0, file
->ucontext
, &mw_lock_class
);
1065 down_write(&uobj
->mutex
);
1067 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
1073 mw
= pd
->device
->alloc_mw(pd
, cmd
.mw_type
);
1079 mw
->device
= pd
->device
;
1082 atomic_inc(&pd
->usecnt
);
1085 ret
= idr_add_uobj(&ib_uverbs_mw_idr
, uobj
);
1089 memset(&resp
, 0, sizeof(resp
));
1090 resp
.rkey
= mw
->rkey
;
1091 resp
.mw_handle
= uobj
->id
;
1093 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
1094 &resp
, sizeof(resp
))) {
1101 mutex_lock(&file
->mutex
);
1102 list_add_tail(&uobj
->list
, &file
->ucontext
->mw_list
);
1103 mutex_unlock(&file
->mutex
);
1107 up_write(&uobj
->mutex
);
1112 idr_remove_uobj(&ib_uverbs_mw_idr
, uobj
);
1121 put_uobj_write(uobj
);
1125 ssize_t
ib_uverbs_dealloc_mw(struct ib_uverbs_file
*file
,
1126 const char __user
*buf
, int in_len
,
1129 struct ib_uverbs_dealloc_mw cmd
;
1131 struct ib_uobject
*uobj
;
1134 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1137 uobj
= idr_write_uobj(&ib_uverbs_mw_idr
, cmd
.mw_handle
, file
->ucontext
);
1143 ret
= ib_dealloc_mw(mw
);
1147 put_uobj_write(uobj
);
1152 idr_remove_uobj(&ib_uverbs_mw_idr
, uobj
);
1154 mutex_lock(&file
->mutex
);
1155 list_del(&uobj
->list
);
1156 mutex_unlock(&file
->mutex
);
1163 ssize_t
ib_uverbs_create_comp_channel(struct ib_uverbs_file
*file
,
1164 const char __user
*buf
, int in_len
,
1167 struct ib_uverbs_create_comp_channel cmd
;
1168 struct ib_uverbs_create_comp_channel_resp resp
;
1172 if (out_len
< sizeof resp
)
1175 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1178 ret
= get_unused_fd_flags(O_CLOEXEC
);
1183 filp
= ib_uverbs_alloc_event_file(file
, 0);
1185 put_unused_fd(resp
.fd
);
1186 return PTR_ERR(filp
);
1189 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1190 &resp
, sizeof resp
)) {
1191 put_unused_fd(resp
.fd
);
1196 fd_install(resp
.fd
, filp
);
1200 ssize_t
ib_uverbs_create_cq(struct ib_uverbs_file
*file
,
1201 const char __user
*buf
, int in_len
,
1204 struct ib_uverbs_create_cq cmd
;
1205 struct ib_uverbs_create_cq_resp resp
;
1206 struct ib_udata udata
;
1207 struct ib_ucq_object
*obj
;
1208 struct ib_uverbs_event_file
*ev_file
= NULL
;
1212 if (out_len
< sizeof resp
)
1215 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1218 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1219 (unsigned long) cmd
.response
+ sizeof resp
,
1220 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1222 if (cmd
.comp_vector
>= file
->device
->num_comp_vectors
)
1225 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
1229 init_uobj(&obj
->uobject
, cmd
.user_handle
, file
->ucontext
, &cq_lock_class
);
1230 down_write(&obj
->uobject
.mutex
);
1232 if (cmd
.comp_channel
>= 0) {
1233 ev_file
= ib_uverbs_lookup_comp_file(cmd
.comp_channel
);
1240 obj
->uverbs_file
= file
;
1241 obj
->comp_events_reported
= 0;
1242 obj
->async_events_reported
= 0;
1243 INIT_LIST_HEAD(&obj
->comp_list
);
1244 INIT_LIST_HEAD(&obj
->async_list
);
1246 cq
= file
->device
->ib_dev
->create_cq(file
->device
->ib_dev
, cmd
.cqe
,
1248 file
->ucontext
, &udata
);
1254 cq
->device
= file
->device
->ib_dev
;
1255 cq
->uobject
= &obj
->uobject
;
1256 cq
->comp_handler
= ib_uverbs_comp_handler
;
1257 cq
->event_handler
= ib_uverbs_cq_event_handler
;
1258 cq
->cq_context
= ev_file
;
1259 atomic_set(&cq
->usecnt
, 0);
1261 obj
->uobject
.object
= cq
;
1262 ret
= idr_add_uobj(&ib_uverbs_cq_idr
, &obj
->uobject
);
1266 memset(&resp
, 0, sizeof resp
);
1267 resp
.cq_handle
= obj
->uobject
.id
;
1270 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1271 &resp
, sizeof resp
)) {
1276 mutex_lock(&file
->mutex
);
1277 list_add_tail(&obj
->uobject
.list
, &file
->ucontext
->cq_list
);
1278 mutex_unlock(&file
->mutex
);
1280 obj
->uobject
.live
= 1;
1282 up_write(&obj
->uobject
.mutex
);
1287 idr_remove_uobj(&ib_uverbs_cq_idr
, &obj
->uobject
);
1294 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1297 put_uobj_write(&obj
->uobject
);
1301 ssize_t
ib_uverbs_resize_cq(struct ib_uverbs_file
*file
,
1302 const char __user
*buf
, int in_len
,
1305 struct ib_uverbs_resize_cq cmd
;
1306 struct ib_uverbs_resize_cq_resp resp
;
1307 struct ib_udata udata
;
1311 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1314 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1315 (unsigned long) cmd
.response
+ sizeof resp
,
1316 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1318 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1322 ret
= cq
->device
->resize_cq(cq
, cmd
.cqe
, &udata
);
1328 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1329 &resp
, sizeof resp
.cqe
))
1335 return ret
? ret
: in_len
;
1338 static int copy_wc_to_user(void __user
*dest
, struct ib_wc
*wc
)
1340 struct ib_uverbs_wc tmp
;
1342 tmp
.wr_id
= wc
->wr_id
;
1343 tmp
.status
= wc
->status
;
1344 tmp
.opcode
= wc
->opcode
;
1345 tmp
.vendor_err
= wc
->vendor_err
;
1346 tmp
.byte_len
= wc
->byte_len
;
1347 tmp
.ex
.imm_data
= (__u32 __force
) wc
->ex
.imm_data
;
1348 tmp
.qp_num
= wc
->qp
->qp_num
;
1349 tmp
.src_qp
= wc
->src_qp
;
1350 tmp
.wc_flags
= wc
->wc_flags
;
1351 tmp
.pkey_index
= wc
->pkey_index
;
1352 tmp
.slid
= wc
->slid
;
1354 tmp
.dlid_path_bits
= wc
->dlid_path_bits
;
1355 tmp
.port_num
= wc
->port_num
;
1358 if (copy_to_user(dest
, &tmp
, sizeof tmp
))
1364 ssize_t
ib_uverbs_poll_cq(struct ib_uverbs_file
*file
,
1365 const char __user
*buf
, int in_len
,
1368 struct ib_uverbs_poll_cq cmd
;
1369 struct ib_uverbs_poll_cq_resp resp
;
1370 u8 __user
*header_ptr
;
1371 u8 __user
*data_ptr
;
1376 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1379 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1383 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1384 header_ptr
= (void __user
*)(unsigned long) cmd
.response
;
1385 data_ptr
= header_ptr
+ sizeof resp
;
1387 memset(&resp
, 0, sizeof resp
);
1388 while (resp
.count
< cmd
.ne
) {
1389 ret
= ib_poll_cq(cq
, 1, &wc
);
1395 ret
= copy_wc_to_user(data_ptr
, &wc
);
1399 data_ptr
+= sizeof(struct ib_uverbs_wc
);
1403 if (copy_to_user(header_ptr
, &resp
, sizeof resp
)) {
1415 ssize_t
ib_uverbs_req_notify_cq(struct ib_uverbs_file
*file
,
1416 const char __user
*buf
, int in_len
,
1419 struct ib_uverbs_req_notify_cq cmd
;
1422 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1425 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1429 ib_req_notify_cq(cq
, cmd
.solicited_only
?
1430 IB_CQ_SOLICITED
: IB_CQ_NEXT_COMP
);
1437 ssize_t
ib_uverbs_destroy_cq(struct ib_uverbs_file
*file
,
1438 const char __user
*buf
, int in_len
,
1441 struct ib_uverbs_destroy_cq cmd
;
1442 struct ib_uverbs_destroy_cq_resp resp
;
1443 struct ib_uobject
*uobj
;
1445 struct ib_ucq_object
*obj
;
1446 struct ib_uverbs_event_file
*ev_file
;
1449 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1452 uobj
= idr_write_uobj(&ib_uverbs_cq_idr
, cmd
.cq_handle
, file
->ucontext
);
1456 ev_file
= cq
->cq_context
;
1457 obj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
1459 ret
= ib_destroy_cq(cq
);
1463 put_uobj_write(uobj
);
1468 idr_remove_uobj(&ib_uverbs_cq_idr
, uobj
);
1470 mutex_lock(&file
->mutex
);
1471 list_del(&uobj
->list
);
1472 mutex_unlock(&file
->mutex
);
1474 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1476 memset(&resp
, 0, sizeof resp
);
1477 resp
.comp_events_reported
= obj
->comp_events_reported
;
1478 resp
.async_events_reported
= obj
->async_events_reported
;
1482 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1483 &resp
, sizeof resp
))
1489 ssize_t
ib_uverbs_create_qp(struct ib_uverbs_file
*file
,
1490 const char __user
*buf
, int in_len
,
1493 struct ib_uverbs_create_qp cmd
;
1494 struct ib_uverbs_create_qp_resp resp
;
1495 struct ib_udata udata
;
1496 struct ib_uqp_object
*obj
;
1497 struct ib_device
*device
;
1498 struct ib_pd
*pd
= NULL
;
1499 struct ib_xrcd
*xrcd
= NULL
;
1500 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
1501 struct ib_cq
*scq
= NULL
, *rcq
= NULL
;
1502 struct ib_srq
*srq
= NULL
;
1504 struct ib_qp_init_attr attr
;
1507 if (out_len
< sizeof resp
)
1510 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1513 if (cmd
.qp_type
== IB_QPT_RAW_PACKET
&& !capable(CAP_NET_RAW
))
1516 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1517 (unsigned long) cmd
.response
+ sizeof resp
,
1518 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1520 obj
= kzalloc(sizeof *obj
, GFP_KERNEL
);
1524 init_uobj(&obj
->uevent
.uobject
, cmd
.user_handle
, file
->ucontext
, &qp_lock_class
);
1525 down_write(&obj
->uevent
.uobject
.mutex
);
1527 if (cmd
.qp_type
== IB_QPT_XRC_TGT
) {
1528 xrcd
= idr_read_xrcd(cmd
.pd_handle
, file
->ucontext
, &xrcd_uobj
);
1533 device
= xrcd
->device
;
1535 if (cmd
.qp_type
== IB_QPT_XRC_INI
) {
1536 cmd
.max_recv_wr
= cmd
.max_recv_sge
= 0;
1539 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
1540 if (!srq
|| srq
->srq_type
!= IB_SRQT_BASIC
) {
1546 if (cmd
.recv_cq_handle
!= cmd
.send_cq_handle
) {
1547 rcq
= idr_read_cq(cmd
.recv_cq_handle
, file
->ucontext
, 0);
1555 scq
= idr_read_cq(cmd
.send_cq_handle
, file
->ucontext
, !!rcq
);
1557 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
1563 device
= pd
->device
;
1566 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1567 attr
.qp_context
= file
;
1572 attr
.sq_sig_type
= cmd
.sq_sig_all
? IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
1573 attr
.qp_type
= cmd
.qp_type
;
1574 attr
.create_flags
= 0;
1576 attr
.cap
.max_send_wr
= cmd
.max_send_wr
;
1577 attr
.cap
.max_recv_wr
= cmd
.max_recv_wr
;
1578 attr
.cap
.max_send_sge
= cmd
.max_send_sge
;
1579 attr
.cap
.max_recv_sge
= cmd
.max_recv_sge
;
1580 attr
.cap
.max_inline_data
= cmd
.max_inline_data
;
1582 obj
->uevent
.events_reported
= 0;
1583 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1584 INIT_LIST_HEAD(&obj
->mcast_list
);
1586 if (cmd
.qp_type
== IB_QPT_XRC_TGT
)
1587 qp
= ib_create_qp(pd
, &attr
);
1589 qp
= device
->create_qp(pd
, &attr
, &udata
);
1596 if (cmd
.qp_type
!= IB_QPT_XRC_TGT
) {
1598 qp
->device
= device
;
1600 qp
->send_cq
= attr
.send_cq
;
1601 qp
->recv_cq
= attr
.recv_cq
;
1603 qp
->event_handler
= attr
.event_handler
;
1604 qp
->qp_context
= attr
.qp_context
;
1605 qp
->qp_type
= attr
.qp_type
;
1606 atomic_set(&qp
->usecnt
, 0);
1607 atomic_inc(&pd
->usecnt
);
1608 atomic_inc(&attr
.send_cq
->usecnt
);
1610 atomic_inc(&attr
.recv_cq
->usecnt
);
1612 atomic_inc(&attr
.srq
->usecnt
);
1614 qp
->uobject
= &obj
->uevent
.uobject
;
1616 obj
->uevent
.uobject
.object
= qp
;
1617 ret
= idr_add_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1621 memset(&resp
, 0, sizeof resp
);
1622 resp
.qpn
= qp
->qp_num
;
1623 resp
.qp_handle
= obj
->uevent
.uobject
.id
;
1624 resp
.max_recv_sge
= attr
.cap
.max_recv_sge
;
1625 resp
.max_send_sge
= attr
.cap
.max_send_sge
;
1626 resp
.max_recv_wr
= attr
.cap
.max_recv_wr
;
1627 resp
.max_send_wr
= attr
.cap
.max_send_wr
;
1628 resp
.max_inline_data
= attr
.cap
.max_inline_data
;
1630 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1631 &resp
, sizeof resp
)) {
1637 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
,
1639 atomic_inc(&obj
->uxrcd
->refcnt
);
1640 put_xrcd_read(xrcd_uobj
);
1647 if (rcq
&& rcq
!= scq
)
1652 mutex_lock(&file
->mutex
);
1653 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->qp_list
);
1654 mutex_unlock(&file
->mutex
);
1656 obj
->uevent
.uobject
.live
= 1;
1658 up_write(&obj
->uevent
.uobject
.mutex
);
1663 idr_remove_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1670 put_xrcd_read(xrcd_uobj
);
1675 if (rcq
&& rcq
!= scq
)
1680 put_uobj_write(&obj
->uevent
.uobject
);
1684 ssize_t
ib_uverbs_open_qp(struct ib_uverbs_file
*file
,
1685 const char __user
*buf
, int in_len
, int out_len
)
1687 struct ib_uverbs_open_qp cmd
;
1688 struct ib_uverbs_create_qp_resp resp
;
1689 struct ib_udata udata
;
1690 struct ib_uqp_object
*obj
;
1691 struct ib_xrcd
*xrcd
;
1692 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
1694 struct ib_qp_open_attr attr
;
1697 if (out_len
< sizeof resp
)
1700 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1703 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1704 (unsigned long) cmd
.response
+ sizeof resp
,
1705 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1707 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
1711 init_uobj(&obj
->uevent
.uobject
, cmd
.user_handle
, file
->ucontext
, &qp_lock_class
);
1712 down_write(&obj
->uevent
.uobject
.mutex
);
1714 xrcd
= idr_read_xrcd(cmd
.pd_handle
, file
->ucontext
, &xrcd_uobj
);
1720 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1721 attr
.qp_context
= file
;
1722 attr
.qp_num
= cmd
.qpn
;
1723 attr
.qp_type
= cmd
.qp_type
;
1725 obj
->uevent
.events_reported
= 0;
1726 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1727 INIT_LIST_HEAD(&obj
->mcast_list
);
1729 qp
= ib_open_qp(xrcd
, &attr
);
1735 qp
->uobject
= &obj
->uevent
.uobject
;
1737 obj
->uevent
.uobject
.object
= qp
;
1738 ret
= idr_add_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1742 memset(&resp
, 0, sizeof resp
);
1743 resp
.qpn
= qp
->qp_num
;
1744 resp
.qp_handle
= obj
->uevent
.uobject
.id
;
1746 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1747 &resp
, sizeof resp
)) {
1752 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
1753 atomic_inc(&obj
->uxrcd
->refcnt
);
1754 put_xrcd_read(xrcd_uobj
);
1756 mutex_lock(&file
->mutex
);
1757 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->qp_list
);
1758 mutex_unlock(&file
->mutex
);
1760 obj
->uevent
.uobject
.live
= 1;
1762 up_write(&obj
->uevent
.uobject
.mutex
);
1767 idr_remove_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1773 put_xrcd_read(xrcd_uobj
);
1774 put_uobj_write(&obj
->uevent
.uobject
);
1778 ssize_t
ib_uverbs_query_qp(struct ib_uverbs_file
*file
,
1779 const char __user
*buf
, int in_len
,
1782 struct ib_uverbs_query_qp cmd
;
1783 struct ib_uverbs_query_qp_resp resp
;
1785 struct ib_qp_attr
*attr
;
1786 struct ib_qp_init_attr
*init_attr
;
1789 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1792 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1793 init_attr
= kmalloc(sizeof *init_attr
, GFP_KERNEL
);
1794 if (!attr
|| !init_attr
) {
1799 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
1805 ret
= ib_query_qp(qp
, attr
, cmd
.attr_mask
, init_attr
);
1812 memset(&resp
, 0, sizeof resp
);
1814 resp
.qp_state
= attr
->qp_state
;
1815 resp
.cur_qp_state
= attr
->cur_qp_state
;
1816 resp
.path_mtu
= attr
->path_mtu
;
1817 resp
.path_mig_state
= attr
->path_mig_state
;
1818 resp
.qkey
= attr
->qkey
;
1819 resp
.rq_psn
= attr
->rq_psn
;
1820 resp
.sq_psn
= attr
->sq_psn
;
1821 resp
.dest_qp_num
= attr
->dest_qp_num
;
1822 resp
.qp_access_flags
= attr
->qp_access_flags
;
1823 resp
.pkey_index
= attr
->pkey_index
;
1824 resp
.alt_pkey_index
= attr
->alt_pkey_index
;
1825 resp
.sq_draining
= attr
->sq_draining
;
1826 resp
.max_rd_atomic
= attr
->max_rd_atomic
;
1827 resp
.max_dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1828 resp
.min_rnr_timer
= attr
->min_rnr_timer
;
1829 resp
.port_num
= attr
->port_num
;
1830 resp
.timeout
= attr
->timeout
;
1831 resp
.retry_cnt
= attr
->retry_cnt
;
1832 resp
.rnr_retry
= attr
->rnr_retry
;
1833 resp
.alt_port_num
= attr
->alt_port_num
;
1834 resp
.alt_timeout
= attr
->alt_timeout
;
1836 memcpy(resp
.dest
.dgid
, attr
->ah_attr
.grh
.dgid
.raw
, 16);
1837 resp
.dest
.flow_label
= attr
->ah_attr
.grh
.flow_label
;
1838 resp
.dest
.sgid_index
= attr
->ah_attr
.grh
.sgid_index
;
1839 resp
.dest
.hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
1840 resp
.dest
.traffic_class
= attr
->ah_attr
.grh
.traffic_class
;
1841 resp
.dest
.dlid
= attr
->ah_attr
.dlid
;
1842 resp
.dest
.sl
= attr
->ah_attr
.sl
;
1843 resp
.dest
.src_path_bits
= attr
->ah_attr
.src_path_bits
;
1844 resp
.dest
.static_rate
= attr
->ah_attr
.static_rate
;
1845 resp
.dest
.is_global
= !!(attr
->ah_attr
.ah_flags
& IB_AH_GRH
);
1846 resp
.dest
.port_num
= attr
->ah_attr
.port_num
;
1848 memcpy(resp
.alt_dest
.dgid
, attr
->alt_ah_attr
.grh
.dgid
.raw
, 16);
1849 resp
.alt_dest
.flow_label
= attr
->alt_ah_attr
.grh
.flow_label
;
1850 resp
.alt_dest
.sgid_index
= attr
->alt_ah_attr
.grh
.sgid_index
;
1851 resp
.alt_dest
.hop_limit
= attr
->alt_ah_attr
.grh
.hop_limit
;
1852 resp
.alt_dest
.traffic_class
= attr
->alt_ah_attr
.grh
.traffic_class
;
1853 resp
.alt_dest
.dlid
= attr
->alt_ah_attr
.dlid
;
1854 resp
.alt_dest
.sl
= attr
->alt_ah_attr
.sl
;
1855 resp
.alt_dest
.src_path_bits
= attr
->alt_ah_attr
.src_path_bits
;
1856 resp
.alt_dest
.static_rate
= attr
->alt_ah_attr
.static_rate
;
1857 resp
.alt_dest
.is_global
= !!(attr
->alt_ah_attr
.ah_flags
& IB_AH_GRH
);
1858 resp
.alt_dest
.port_num
= attr
->alt_ah_attr
.port_num
;
1860 resp
.max_send_wr
= init_attr
->cap
.max_send_wr
;
1861 resp
.max_recv_wr
= init_attr
->cap
.max_recv_wr
;
1862 resp
.max_send_sge
= init_attr
->cap
.max_send_sge
;
1863 resp
.max_recv_sge
= init_attr
->cap
.max_recv_sge
;
1864 resp
.max_inline_data
= init_attr
->cap
.max_inline_data
;
1865 resp
.sq_sig_all
= init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
;
1867 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1868 &resp
, sizeof resp
))
1875 return ret
? ret
: in_len
;
1878 /* Remove ignored fields set in the attribute mask */
1879 static int modify_qp_mask(enum ib_qp_type qp_type
, int mask
)
1882 case IB_QPT_XRC_INI
:
1883 return mask
& ~(IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
);
1884 case IB_QPT_XRC_TGT
:
1885 return mask
& ~(IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_RETRY_CNT
|
1892 ssize_t
ib_uverbs_modify_qp(struct ib_uverbs_file
*file
,
1893 const char __user
*buf
, int in_len
,
1896 struct ib_uverbs_modify_qp cmd
;
1897 struct ib_udata udata
;
1899 struct ib_qp_attr
*attr
;
1902 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1905 INIT_UDATA(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
1908 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1912 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
1918 attr
->qp_state
= cmd
.qp_state
;
1919 attr
->cur_qp_state
= cmd
.cur_qp_state
;
1920 attr
->path_mtu
= cmd
.path_mtu
;
1921 attr
->path_mig_state
= cmd
.path_mig_state
;
1922 attr
->qkey
= cmd
.qkey
;
1923 attr
->rq_psn
= cmd
.rq_psn
;
1924 attr
->sq_psn
= cmd
.sq_psn
;
1925 attr
->dest_qp_num
= cmd
.dest_qp_num
;
1926 attr
->qp_access_flags
= cmd
.qp_access_flags
;
1927 attr
->pkey_index
= cmd
.pkey_index
;
1928 attr
->alt_pkey_index
= cmd
.alt_pkey_index
;
1929 attr
->en_sqd_async_notify
= cmd
.en_sqd_async_notify
;
1930 attr
->max_rd_atomic
= cmd
.max_rd_atomic
;
1931 attr
->max_dest_rd_atomic
= cmd
.max_dest_rd_atomic
;
1932 attr
->min_rnr_timer
= cmd
.min_rnr_timer
;
1933 attr
->port_num
= cmd
.port_num
;
1934 attr
->timeout
= cmd
.timeout
;
1935 attr
->retry_cnt
= cmd
.retry_cnt
;
1936 attr
->rnr_retry
= cmd
.rnr_retry
;
1937 attr
->alt_port_num
= cmd
.alt_port_num
;
1938 attr
->alt_timeout
= cmd
.alt_timeout
;
1940 memcpy(attr
->ah_attr
.grh
.dgid
.raw
, cmd
.dest
.dgid
, 16);
1941 attr
->ah_attr
.grh
.flow_label
= cmd
.dest
.flow_label
;
1942 attr
->ah_attr
.grh
.sgid_index
= cmd
.dest
.sgid_index
;
1943 attr
->ah_attr
.grh
.hop_limit
= cmd
.dest
.hop_limit
;
1944 attr
->ah_attr
.grh
.traffic_class
= cmd
.dest
.traffic_class
;
1945 attr
->ah_attr
.dlid
= cmd
.dest
.dlid
;
1946 attr
->ah_attr
.sl
= cmd
.dest
.sl
;
1947 attr
->ah_attr
.src_path_bits
= cmd
.dest
.src_path_bits
;
1948 attr
->ah_attr
.static_rate
= cmd
.dest
.static_rate
;
1949 attr
->ah_attr
.ah_flags
= cmd
.dest
.is_global
? IB_AH_GRH
: 0;
1950 attr
->ah_attr
.port_num
= cmd
.dest
.port_num
;
1952 memcpy(attr
->alt_ah_attr
.grh
.dgid
.raw
, cmd
.alt_dest
.dgid
, 16);
1953 attr
->alt_ah_attr
.grh
.flow_label
= cmd
.alt_dest
.flow_label
;
1954 attr
->alt_ah_attr
.grh
.sgid_index
= cmd
.alt_dest
.sgid_index
;
1955 attr
->alt_ah_attr
.grh
.hop_limit
= cmd
.alt_dest
.hop_limit
;
1956 attr
->alt_ah_attr
.grh
.traffic_class
= cmd
.alt_dest
.traffic_class
;
1957 attr
->alt_ah_attr
.dlid
= cmd
.alt_dest
.dlid
;
1958 attr
->alt_ah_attr
.sl
= cmd
.alt_dest
.sl
;
1959 attr
->alt_ah_attr
.src_path_bits
= cmd
.alt_dest
.src_path_bits
;
1960 attr
->alt_ah_attr
.static_rate
= cmd
.alt_dest
.static_rate
;
1961 attr
->alt_ah_attr
.ah_flags
= cmd
.alt_dest
.is_global
? IB_AH_GRH
: 0;
1962 attr
->alt_ah_attr
.port_num
= cmd
.alt_dest
.port_num
;
1964 if (qp
->real_qp
== qp
) {
1965 ret
= ib_resolve_eth_l2_attrs(qp
, attr
, &cmd
.attr_mask
);
1968 ret
= qp
->device
->modify_qp(qp
, attr
,
1969 modify_qp_mask(qp
->qp_type
, cmd
.attr_mask
), &udata
);
1971 ret
= ib_modify_qp(qp
, attr
, modify_qp_mask(qp
->qp_type
, cmd
.attr_mask
));
1987 ssize_t
ib_uverbs_destroy_qp(struct ib_uverbs_file
*file
,
1988 const char __user
*buf
, int in_len
,
1991 struct ib_uverbs_destroy_qp cmd
;
1992 struct ib_uverbs_destroy_qp_resp resp
;
1993 struct ib_uobject
*uobj
;
1995 struct ib_uqp_object
*obj
;
1998 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2001 memset(&resp
, 0, sizeof resp
);
2003 uobj
= idr_write_uobj(&ib_uverbs_qp_idr
, cmd
.qp_handle
, file
->ucontext
);
2007 obj
= container_of(uobj
, struct ib_uqp_object
, uevent
.uobject
);
2009 if (!list_empty(&obj
->mcast_list
)) {
2010 put_uobj_write(uobj
);
2014 ret
= ib_destroy_qp(qp
);
2018 put_uobj_write(uobj
);
2024 atomic_dec(&obj
->uxrcd
->refcnt
);
2026 idr_remove_uobj(&ib_uverbs_qp_idr
, uobj
);
2028 mutex_lock(&file
->mutex
);
2029 list_del(&uobj
->list
);
2030 mutex_unlock(&file
->mutex
);
2032 ib_uverbs_release_uevent(file
, &obj
->uevent
);
2034 resp
.events_reported
= obj
->uevent
.events_reported
;
2038 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2039 &resp
, sizeof resp
))
2045 ssize_t
ib_uverbs_post_send(struct ib_uverbs_file
*file
,
2046 const char __user
*buf
, int in_len
,
2049 struct ib_uverbs_post_send cmd
;
2050 struct ib_uverbs_post_send_resp resp
;
2051 struct ib_uverbs_send_wr
*user_wr
;
2052 struct ib_send_wr
*wr
= NULL
, *last
, *next
, *bad_wr
;
2056 ssize_t ret
= -EINVAL
;
2058 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2061 if (in_len
< sizeof cmd
+ cmd
.wqe_size
* cmd
.wr_count
+
2062 cmd
.sge_count
* sizeof (struct ib_uverbs_sge
))
2065 if (cmd
.wqe_size
< sizeof (struct ib_uverbs_send_wr
))
2068 user_wr
= kmalloc(cmd
.wqe_size
, GFP_KERNEL
);
2072 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2076 is_ud
= qp
->qp_type
== IB_QPT_UD
;
2079 for (i
= 0; i
< cmd
.wr_count
; ++i
) {
2080 if (copy_from_user(user_wr
,
2081 buf
+ sizeof cmd
+ i
* cmd
.wqe_size
,
2087 if (user_wr
->num_sge
+ sg_ind
> cmd
.sge_count
) {
2092 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
2093 user_wr
->num_sge
* sizeof (struct ib_sge
),
2107 next
->wr_id
= user_wr
->wr_id
;
2108 next
->num_sge
= user_wr
->num_sge
;
2109 next
->opcode
= user_wr
->opcode
;
2110 next
->send_flags
= user_wr
->send_flags
;
2113 next
->wr
.ud
.ah
= idr_read_ah(user_wr
->wr
.ud
.ah
,
2115 if (!next
->wr
.ud
.ah
) {
2119 next
->wr
.ud
.remote_qpn
= user_wr
->wr
.ud
.remote_qpn
;
2120 next
->wr
.ud
.remote_qkey
= user_wr
->wr
.ud
.remote_qkey
;
2121 if (next
->opcode
== IB_WR_SEND_WITH_IMM
)
2123 (__be32 __force
) user_wr
->ex
.imm_data
;
2125 switch (next
->opcode
) {
2126 case IB_WR_RDMA_WRITE_WITH_IMM
:
2128 (__be32 __force
) user_wr
->ex
.imm_data
;
2129 case IB_WR_RDMA_WRITE
:
2130 case IB_WR_RDMA_READ
:
2131 next
->wr
.rdma
.remote_addr
=
2132 user_wr
->wr
.rdma
.remote_addr
;
2133 next
->wr
.rdma
.rkey
=
2134 user_wr
->wr
.rdma
.rkey
;
2136 case IB_WR_SEND_WITH_IMM
:
2138 (__be32 __force
) user_wr
->ex
.imm_data
;
2140 case IB_WR_SEND_WITH_INV
:
2141 next
->ex
.invalidate_rkey
=
2142 user_wr
->ex
.invalidate_rkey
;
2144 case IB_WR_ATOMIC_CMP_AND_SWP
:
2145 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2146 next
->wr
.atomic
.remote_addr
=
2147 user_wr
->wr
.atomic
.remote_addr
;
2148 next
->wr
.atomic
.compare_add
=
2149 user_wr
->wr
.atomic
.compare_add
;
2150 next
->wr
.atomic
.swap
= user_wr
->wr
.atomic
.swap
;
2151 next
->wr
.atomic
.rkey
= user_wr
->wr
.atomic
.rkey
;
2158 if (next
->num_sge
) {
2159 next
->sg_list
= (void *) next
+
2160 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
2161 if (copy_from_user(next
->sg_list
,
2163 cmd
.wr_count
* cmd
.wqe_size
+
2164 sg_ind
* sizeof (struct ib_sge
),
2165 next
->num_sge
* sizeof (struct ib_sge
))) {
2169 sg_ind
+= next
->num_sge
;
2171 next
->sg_list
= NULL
;
2175 ret
= qp
->device
->post_send(qp
->real_qp
, wr
, &bad_wr
);
2177 for (next
= wr
; next
; next
= next
->next
) {
2183 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2184 &resp
, sizeof resp
))
2191 if (is_ud
&& wr
->wr
.ud
.ah
)
2192 put_ah_read(wr
->wr
.ud
.ah
);
2201 return ret
? ret
: in_len
;
2204 static struct ib_recv_wr
*ib_uverbs_unmarshall_recv(const char __user
*buf
,
2210 struct ib_uverbs_recv_wr
*user_wr
;
2211 struct ib_recv_wr
*wr
= NULL
, *last
, *next
;
2216 if (in_len
< wqe_size
* wr_count
+
2217 sge_count
* sizeof (struct ib_uverbs_sge
))
2218 return ERR_PTR(-EINVAL
);
2220 if (wqe_size
< sizeof (struct ib_uverbs_recv_wr
))
2221 return ERR_PTR(-EINVAL
);
2223 user_wr
= kmalloc(wqe_size
, GFP_KERNEL
);
2225 return ERR_PTR(-ENOMEM
);
2229 for (i
= 0; i
< wr_count
; ++i
) {
2230 if (copy_from_user(user_wr
, buf
+ i
* wqe_size
,
2236 if (user_wr
->num_sge
+ sg_ind
> sge_count
) {
2241 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
2242 user_wr
->num_sge
* sizeof (struct ib_sge
),
2256 next
->wr_id
= user_wr
->wr_id
;
2257 next
->num_sge
= user_wr
->num_sge
;
2259 if (next
->num_sge
) {
2260 next
->sg_list
= (void *) next
+
2261 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
2262 if (copy_from_user(next
->sg_list
,
2263 buf
+ wr_count
* wqe_size
+
2264 sg_ind
* sizeof (struct ib_sge
),
2265 next
->num_sge
* sizeof (struct ib_sge
))) {
2269 sg_ind
+= next
->num_sge
;
2271 next
->sg_list
= NULL
;
2286 return ERR_PTR(ret
);
2289 ssize_t
ib_uverbs_post_recv(struct ib_uverbs_file
*file
,
2290 const char __user
*buf
, int in_len
,
2293 struct ib_uverbs_post_recv cmd
;
2294 struct ib_uverbs_post_recv_resp resp
;
2295 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2297 ssize_t ret
= -EINVAL
;
2299 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2302 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2303 in_len
- sizeof cmd
, cmd
.wr_count
,
2304 cmd
.sge_count
, cmd
.wqe_size
);
2308 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2313 ret
= qp
->device
->post_recv(qp
->real_qp
, wr
, &bad_wr
);
2318 for (next
= wr
; next
; next
= next
->next
) {
2324 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2325 &resp
, sizeof resp
))
2335 return ret
? ret
: in_len
;
2338 ssize_t
ib_uverbs_post_srq_recv(struct ib_uverbs_file
*file
,
2339 const char __user
*buf
, int in_len
,
2342 struct ib_uverbs_post_srq_recv cmd
;
2343 struct ib_uverbs_post_srq_recv_resp resp
;
2344 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2346 ssize_t ret
= -EINVAL
;
2348 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2351 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2352 in_len
- sizeof cmd
, cmd
.wr_count
,
2353 cmd
.sge_count
, cmd
.wqe_size
);
2357 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
2362 ret
= srq
->device
->post_srq_recv(srq
, wr
, &bad_wr
);
2367 for (next
= wr
; next
; next
= next
->next
) {
2373 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2374 &resp
, sizeof resp
))
2384 return ret
? ret
: in_len
;
2387 ssize_t
ib_uverbs_create_ah(struct ib_uverbs_file
*file
,
2388 const char __user
*buf
, int in_len
,
2391 struct ib_uverbs_create_ah cmd
;
2392 struct ib_uverbs_create_ah_resp resp
;
2393 struct ib_uobject
*uobj
;
2396 struct ib_ah_attr attr
;
2399 if (out_len
< sizeof resp
)
2402 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2405 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
2409 init_uobj(uobj
, cmd
.user_handle
, file
->ucontext
, &ah_lock_class
);
2410 down_write(&uobj
->mutex
);
2412 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
2418 attr
.dlid
= cmd
.attr
.dlid
;
2419 attr
.sl
= cmd
.attr
.sl
;
2420 attr
.src_path_bits
= cmd
.attr
.src_path_bits
;
2421 attr
.static_rate
= cmd
.attr
.static_rate
;
2422 attr
.ah_flags
= cmd
.attr
.is_global
? IB_AH_GRH
: 0;
2423 attr
.port_num
= cmd
.attr
.port_num
;
2424 attr
.grh
.flow_label
= cmd
.attr
.grh
.flow_label
;
2425 attr
.grh
.sgid_index
= cmd
.attr
.grh
.sgid_index
;
2426 attr
.grh
.hop_limit
= cmd
.attr
.grh
.hop_limit
;
2427 attr
.grh
.traffic_class
= cmd
.attr
.grh
.traffic_class
;
2428 memcpy(attr
.grh
.dgid
.raw
, cmd
.attr
.grh
.dgid
, 16);
2430 ah
= ib_create_ah(pd
, &attr
);
2439 ret
= idr_add_uobj(&ib_uverbs_ah_idr
, uobj
);
2443 resp
.ah_handle
= uobj
->id
;
2445 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2446 &resp
, sizeof resp
)) {
2453 mutex_lock(&file
->mutex
);
2454 list_add_tail(&uobj
->list
, &file
->ucontext
->ah_list
);
2455 mutex_unlock(&file
->mutex
);
2459 up_write(&uobj
->mutex
);
2464 idr_remove_uobj(&ib_uverbs_ah_idr
, uobj
);
2473 put_uobj_write(uobj
);
2477 ssize_t
ib_uverbs_destroy_ah(struct ib_uverbs_file
*file
,
2478 const char __user
*buf
, int in_len
, int out_len
)
2480 struct ib_uverbs_destroy_ah cmd
;
2482 struct ib_uobject
*uobj
;
2485 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2488 uobj
= idr_write_uobj(&ib_uverbs_ah_idr
, cmd
.ah_handle
, file
->ucontext
);
2493 ret
= ib_destroy_ah(ah
);
2497 put_uobj_write(uobj
);
2502 idr_remove_uobj(&ib_uverbs_ah_idr
, uobj
);
2504 mutex_lock(&file
->mutex
);
2505 list_del(&uobj
->list
);
2506 mutex_unlock(&file
->mutex
);
2513 ssize_t
ib_uverbs_attach_mcast(struct ib_uverbs_file
*file
,
2514 const char __user
*buf
, int in_len
,
2517 struct ib_uverbs_attach_mcast cmd
;
2519 struct ib_uqp_object
*obj
;
2520 struct ib_uverbs_mcast_entry
*mcast
;
2523 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2526 qp
= idr_write_qp(cmd
.qp_handle
, file
->ucontext
);
2530 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2532 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2533 if (cmd
.mlid
== mcast
->lid
&&
2534 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2539 mcast
= kmalloc(sizeof *mcast
, GFP_KERNEL
);
2545 mcast
->lid
= cmd
.mlid
;
2546 memcpy(mcast
->gid
.raw
, cmd
.gid
, sizeof mcast
->gid
.raw
);
2548 ret
= ib_attach_mcast(qp
, &mcast
->gid
, cmd
.mlid
);
2550 list_add_tail(&mcast
->list
, &obj
->mcast_list
);
2557 return ret
? ret
: in_len
;
2560 ssize_t
ib_uverbs_detach_mcast(struct ib_uverbs_file
*file
,
2561 const char __user
*buf
, int in_len
,
2564 struct ib_uverbs_detach_mcast cmd
;
2565 struct ib_uqp_object
*obj
;
2567 struct ib_uverbs_mcast_entry
*mcast
;
2570 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2573 qp
= idr_write_qp(cmd
.qp_handle
, file
->ucontext
);
2577 ret
= ib_detach_mcast(qp
, (union ib_gid
*) cmd
.gid
, cmd
.mlid
);
2581 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2583 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2584 if (cmd
.mlid
== mcast
->lid
&&
2585 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2586 list_del(&mcast
->list
);
2594 return ret
? ret
: in_len
;
2597 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec
*kern_spec
,
2598 union ib_flow_spec
*ib_spec
)
2600 if (kern_spec
->reserved
)
2603 ib_spec
->type
= kern_spec
->type
;
2605 switch (ib_spec
->type
) {
2606 case IB_FLOW_SPEC_ETH
:
2607 ib_spec
->eth
.size
= sizeof(struct ib_flow_spec_eth
);
2608 if (ib_spec
->eth
.size
!= kern_spec
->eth
.size
)
2610 memcpy(&ib_spec
->eth
.val
, &kern_spec
->eth
.val
,
2611 sizeof(struct ib_flow_eth_filter
));
2612 memcpy(&ib_spec
->eth
.mask
, &kern_spec
->eth
.mask
,
2613 sizeof(struct ib_flow_eth_filter
));
2615 case IB_FLOW_SPEC_IPV4
:
2616 ib_spec
->ipv4
.size
= sizeof(struct ib_flow_spec_ipv4
);
2617 if (ib_spec
->ipv4
.size
!= kern_spec
->ipv4
.size
)
2619 memcpy(&ib_spec
->ipv4
.val
, &kern_spec
->ipv4
.val
,
2620 sizeof(struct ib_flow_ipv4_filter
));
2621 memcpy(&ib_spec
->ipv4
.mask
, &kern_spec
->ipv4
.mask
,
2622 sizeof(struct ib_flow_ipv4_filter
));
2624 case IB_FLOW_SPEC_TCP
:
2625 case IB_FLOW_SPEC_UDP
:
2626 ib_spec
->tcp_udp
.size
= sizeof(struct ib_flow_spec_tcp_udp
);
2627 if (ib_spec
->tcp_udp
.size
!= kern_spec
->tcp_udp
.size
)
2629 memcpy(&ib_spec
->tcp_udp
.val
, &kern_spec
->tcp_udp
.val
,
2630 sizeof(struct ib_flow_tcp_udp_filter
));
2631 memcpy(&ib_spec
->tcp_udp
.mask
, &kern_spec
->tcp_udp
.mask
,
2632 sizeof(struct ib_flow_tcp_udp_filter
));
2640 int ib_uverbs_ex_create_flow(struct ib_uverbs_file
*file
,
2641 struct ib_udata
*ucore
,
2642 struct ib_udata
*uhw
)
2644 struct ib_uverbs_create_flow cmd
;
2645 struct ib_uverbs_create_flow_resp resp
;
2646 struct ib_uobject
*uobj
;
2647 struct ib_flow
*flow_id
;
2648 struct ib_uverbs_flow_attr
*kern_flow_attr
;
2649 struct ib_flow_attr
*flow_attr
;
2656 if (ucore
->inlen
< sizeof(cmd
))
2659 if (ucore
->outlen
< sizeof(resp
))
2662 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
2666 ucore
->inbuf
+= sizeof(cmd
);
2667 ucore
->inlen
-= sizeof(cmd
);
2672 if ((cmd
.flow_attr
.type
== IB_FLOW_ATTR_SNIFFER
&&
2673 !capable(CAP_NET_ADMIN
)) || !capable(CAP_NET_RAW
))
2676 if (cmd
.flow_attr
.num_of_specs
> IB_FLOW_SPEC_SUPPORT_LAYERS
)
2679 if (cmd
.flow_attr
.size
> ucore
->inlen
||
2680 cmd
.flow_attr
.size
>
2681 (cmd
.flow_attr
.num_of_specs
* sizeof(struct ib_uverbs_flow_spec
)))
2684 if (cmd
.flow_attr
.reserved
[0] ||
2685 cmd
.flow_attr
.reserved
[1])
2688 if (cmd
.flow_attr
.num_of_specs
) {
2689 kern_flow_attr
= kmalloc(sizeof(*kern_flow_attr
) + cmd
.flow_attr
.size
,
2691 if (!kern_flow_attr
)
2694 memcpy(kern_flow_attr
, &cmd
.flow_attr
, sizeof(*kern_flow_attr
));
2695 err
= ib_copy_from_udata(kern_flow_attr
+ 1, ucore
,
2696 cmd
.flow_attr
.size
);
2700 kern_flow_attr
= &cmd
.flow_attr
;
2703 uobj
= kmalloc(sizeof(*uobj
), GFP_KERNEL
);
2708 init_uobj(uobj
, 0, file
->ucontext
, &rule_lock_class
);
2709 down_write(&uobj
->mutex
);
2711 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2717 flow_attr
= kmalloc(sizeof(*flow_attr
) + cmd
.flow_attr
.size
, GFP_KERNEL
);
2723 flow_attr
->type
= kern_flow_attr
->type
;
2724 flow_attr
->priority
= kern_flow_attr
->priority
;
2725 flow_attr
->num_of_specs
= kern_flow_attr
->num_of_specs
;
2726 flow_attr
->port
= kern_flow_attr
->port
;
2727 flow_attr
->flags
= kern_flow_attr
->flags
;
2728 flow_attr
->size
= sizeof(*flow_attr
);
2730 kern_spec
= kern_flow_attr
+ 1;
2731 ib_spec
= flow_attr
+ 1;
2732 for (i
= 0; i
< flow_attr
->num_of_specs
&&
2733 cmd
.flow_attr
.size
> offsetof(struct ib_uverbs_flow_spec
, reserved
) &&
2734 cmd
.flow_attr
.size
>=
2735 ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
; i
++) {
2736 err
= kern_spec_to_ib_spec(kern_spec
, ib_spec
);
2740 ((union ib_flow_spec
*) ib_spec
)->size
;
2741 cmd
.flow_attr
.size
-= ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
;
2742 kern_spec
+= ((struct ib_uverbs_flow_spec
*) kern_spec
)->size
;
2743 ib_spec
+= ((union ib_flow_spec
*) ib_spec
)->size
;
2745 if (cmd
.flow_attr
.size
|| (i
!= flow_attr
->num_of_specs
)) {
2746 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2747 i
, cmd
.flow_attr
.size
);
2751 flow_id
= ib_create_flow(qp
, flow_attr
, IB_FLOW_DOMAIN_USER
);
2752 if (IS_ERR(flow_id
)) {
2753 err
= PTR_ERR(flow_id
);
2757 flow_id
->uobject
= uobj
;
2758 uobj
->object
= flow_id
;
2760 err
= idr_add_uobj(&ib_uverbs_rule_idr
, uobj
);
2764 memset(&resp
, 0, sizeof(resp
));
2765 resp
.flow_handle
= uobj
->id
;
2767 err
= ib_copy_to_udata(ucore
,
2768 &resp
, sizeof(resp
));
2773 mutex_lock(&file
->mutex
);
2774 list_add_tail(&uobj
->list
, &file
->ucontext
->rule_list
);
2775 mutex_unlock(&file
->mutex
);
2779 up_write(&uobj
->mutex
);
2781 if (cmd
.flow_attr
.num_of_specs
)
2782 kfree(kern_flow_attr
);
2785 idr_remove_uobj(&ib_uverbs_rule_idr
, uobj
);
2787 ib_destroy_flow(flow_id
);
2793 put_uobj_write(uobj
);
2795 if (cmd
.flow_attr
.num_of_specs
)
2796 kfree(kern_flow_attr
);
2800 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file
*file
,
2801 struct ib_udata
*ucore
,
2802 struct ib_udata
*uhw
)
2804 struct ib_uverbs_destroy_flow cmd
;
2805 struct ib_flow
*flow_id
;
2806 struct ib_uobject
*uobj
;
2809 if (ucore
->inlen
< sizeof(cmd
))
2812 ret
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
2819 uobj
= idr_write_uobj(&ib_uverbs_rule_idr
, cmd
.flow_handle
,
2823 flow_id
= uobj
->object
;
2825 ret
= ib_destroy_flow(flow_id
);
2829 put_uobj_write(uobj
);
2831 idr_remove_uobj(&ib_uverbs_rule_idr
, uobj
);
2833 mutex_lock(&file
->mutex
);
2834 list_del(&uobj
->list
);
2835 mutex_unlock(&file
->mutex
);
2842 static int __uverbs_create_xsrq(struct ib_uverbs_file
*file
,
2843 struct ib_uverbs_create_xsrq
*cmd
,
2844 struct ib_udata
*udata
)
2846 struct ib_uverbs_create_srq_resp resp
;
2847 struct ib_usrq_object
*obj
;
2850 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
2851 struct ib_srq_init_attr attr
;
2854 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
2858 init_uobj(&obj
->uevent
.uobject
, cmd
->user_handle
, file
->ucontext
, &srq_lock_class
);
2859 down_write(&obj
->uevent
.uobject
.mutex
);
2861 if (cmd
->srq_type
== IB_SRQT_XRC
) {
2862 attr
.ext
.xrc
.xrcd
= idr_read_xrcd(cmd
->xrcd_handle
, file
->ucontext
, &xrcd_uobj
);
2863 if (!attr
.ext
.xrc
.xrcd
) {
2868 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
2869 atomic_inc(&obj
->uxrcd
->refcnt
);
2871 attr
.ext
.xrc
.cq
= idr_read_cq(cmd
->cq_handle
, file
->ucontext
, 0);
2872 if (!attr
.ext
.xrc
.cq
) {
2878 pd
= idr_read_pd(cmd
->pd_handle
, file
->ucontext
);
2884 attr
.event_handler
= ib_uverbs_srq_event_handler
;
2885 attr
.srq_context
= file
;
2886 attr
.srq_type
= cmd
->srq_type
;
2887 attr
.attr
.max_wr
= cmd
->max_wr
;
2888 attr
.attr
.max_sge
= cmd
->max_sge
;
2889 attr
.attr
.srq_limit
= cmd
->srq_limit
;
2891 obj
->uevent
.events_reported
= 0;
2892 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
2894 srq
= pd
->device
->create_srq(pd
, &attr
, udata
);
2900 srq
->device
= pd
->device
;
2902 srq
->srq_type
= cmd
->srq_type
;
2903 srq
->uobject
= &obj
->uevent
.uobject
;
2904 srq
->event_handler
= attr
.event_handler
;
2905 srq
->srq_context
= attr
.srq_context
;
2907 if (cmd
->srq_type
== IB_SRQT_XRC
) {
2908 srq
->ext
.xrc
.cq
= attr
.ext
.xrc
.cq
;
2909 srq
->ext
.xrc
.xrcd
= attr
.ext
.xrc
.xrcd
;
2910 atomic_inc(&attr
.ext
.xrc
.cq
->usecnt
);
2911 atomic_inc(&attr
.ext
.xrc
.xrcd
->usecnt
);
2914 atomic_inc(&pd
->usecnt
);
2915 atomic_set(&srq
->usecnt
, 0);
2917 obj
->uevent
.uobject
.object
= srq
;
2918 ret
= idr_add_uobj(&ib_uverbs_srq_idr
, &obj
->uevent
.uobject
);
2922 memset(&resp
, 0, sizeof resp
);
2923 resp
.srq_handle
= obj
->uevent
.uobject
.id
;
2924 resp
.max_wr
= attr
.attr
.max_wr
;
2925 resp
.max_sge
= attr
.attr
.max_sge
;
2926 if (cmd
->srq_type
== IB_SRQT_XRC
)
2927 resp
.srqn
= srq
->ext
.xrc
.srq_num
;
2929 if (copy_to_user((void __user
*) (unsigned long) cmd
->response
,
2930 &resp
, sizeof resp
)) {
2935 if (cmd
->srq_type
== IB_SRQT_XRC
) {
2936 put_uobj_read(xrcd_uobj
);
2937 put_cq_read(attr
.ext
.xrc
.cq
);
2941 mutex_lock(&file
->mutex
);
2942 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->srq_list
);
2943 mutex_unlock(&file
->mutex
);
2945 obj
->uevent
.uobject
.live
= 1;
2947 up_write(&obj
->uevent
.uobject
.mutex
);
2952 idr_remove_uobj(&ib_uverbs_srq_idr
, &obj
->uevent
.uobject
);
2955 ib_destroy_srq(srq
);
2961 if (cmd
->srq_type
== IB_SRQT_XRC
)
2962 put_cq_read(attr
.ext
.xrc
.cq
);
2965 if (cmd
->srq_type
== IB_SRQT_XRC
) {
2966 atomic_dec(&obj
->uxrcd
->refcnt
);
2967 put_uobj_read(xrcd_uobj
);
2971 put_uobj_write(&obj
->uevent
.uobject
);
2975 ssize_t
ib_uverbs_create_srq(struct ib_uverbs_file
*file
,
2976 const char __user
*buf
, int in_len
,
2979 struct ib_uverbs_create_srq cmd
;
2980 struct ib_uverbs_create_xsrq xcmd
;
2981 struct ib_uverbs_create_srq_resp resp
;
2982 struct ib_udata udata
;
2985 if (out_len
< sizeof resp
)
2988 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2991 xcmd
.response
= cmd
.response
;
2992 xcmd
.user_handle
= cmd
.user_handle
;
2993 xcmd
.srq_type
= IB_SRQT_BASIC
;
2994 xcmd
.pd_handle
= cmd
.pd_handle
;
2995 xcmd
.max_wr
= cmd
.max_wr
;
2996 xcmd
.max_sge
= cmd
.max_sge
;
2997 xcmd
.srq_limit
= cmd
.srq_limit
;
2999 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
3000 (unsigned long) cmd
.response
+ sizeof resp
,
3001 in_len
- sizeof cmd
, out_len
- sizeof resp
);
3003 ret
= __uverbs_create_xsrq(file
, &xcmd
, &udata
);
3010 ssize_t
ib_uverbs_create_xsrq(struct ib_uverbs_file
*file
,
3011 const char __user
*buf
, int in_len
, int out_len
)
3013 struct ib_uverbs_create_xsrq cmd
;
3014 struct ib_uverbs_create_srq_resp resp
;
3015 struct ib_udata udata
;
3018 if (out_len
< sizeof resp
)
3021 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3024 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
3025 (unsigned long) cmd
.response
+ sizeof resp
,
3026 in_len
- sizeof cmd
, out_len
- sizeof resp
);
3028 ret
= __uverbs_create_xsrq(file
, &cmd
, &udata
);
3035 ssize_t
ib_uverbs_modify_srq(struct ib_uverbs_file
*file
,
3036 const char __user
*buf
, int in_len
,
3039 struct ib_uverbs_modify_srq cmd
;
3040 struct ib_udata udata
;
3042 struct ib_srq_attr attr
;
3045 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3048 INIT_UDATA(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
3051 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
3055 attr
.max_wr
= cmd
.max_wr
;
3056 attr
.srq_limit
= cmd
.srq_limit
;
3058 ret
= srq
->device
->modify_srq(srq
, &attr
, cmd
.attr_mask
, &udata
);
3062 return ret
? ret
: in_len
;
3065 ssize_t
ib_uverbs_query_srq(struct ib_uverbs_file
*file
,
3066 const char __user
*buf
,
3067 int in_len
, int out_len
)
3069 struct ib_uverbs_query_srq cmd
;
3070 struct ib_uverbs_query_srq_resp resp
;
3071 struct ib_srq_attr attr
;
3075 if (out_len
< sizeof resp
)
3078 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3081 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
3085 ret
= ib_query_srq(srq
, &attr
);
3092 memset(&resp
, 0, sizeof resp
);
3094 resp
.max_wr
= attr
.max_wr
;
3095 resp
.max_sge
= attr
.max_sge
;
3096 resp
.srq_limit
= attr
.srq_limit
;
3098 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
3099 &resp
, sizeof resp
))
3105 ssize_t
ib_uverbs_destroy_srq(struct ib_uverbs_file
*file
,
3106 const char __user
*buf
, int in_len
,
3109 struct ib_uverbs_destroy_srq cmd
;
3110 struct ib_uverbs_destroy_srq_resp resp
;
3111 struct ib_uobject
*uobj
;
3113 struct ib_uevent_object
*obj
;
3115 struct ib_usrq_object
*us
;
3116 enum ib_srq_type srq_type
;
3118 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3121 uobj
= idr_write_uobj(&ib_uverbs_srq_idr
, cmd
.srq_handle
, file
->ucontext
);
3125 obj
= container_of(uobj
, struct ib_uevent_object
, uobject
);
3126 srq_type
= srq
->srq_type
;
3128 ret
= ib_destroy_srq(srq
);
3132 put_uobj_write(uobj
);
3137 if (srq_type
== IB_SRQT_XRC
) {
3138 us
= container_of(obj
, struct ib_usrq_object
, uevent
);
3139 atomic_dec(&us
->uxrcd
->refcnt
);
3142 idr_remove_uobj(&ib_uverbs_srq_idr
, uobj
);
3144 mutex_lock(&file
->mutex
);
3145 list_del(&uobj
->list
);
3146 mutex_unlock(&file
->mutex
);
3148 ib_uverbs_release_uevent(file
, obj
);
3150 memset(&resp
, 0, sizeof resp
);
3151 resp
.events_reported
= obj
->events_reported
;
3155 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
3156 &resp
, sizeof resp
))
3159 return ret
? ret
: in_len
;