2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <asm/uaccess.h>
44 #include "core_priv.h"
46 struct uverbs_lock_class
{
47 struct lock_class_key key
;
51 static struct uverbs_lock_class pd_lock_class
= { .name
= "PD-uobj" };
52 static struct uverbs_lock_class mr_lock_class
= { .name
= "MR-uobj" };
53 static struct uverbs_lock_class mw_lock_class
= { .name
= "MW-uobj" };
54 static struct uverbs_lock_class cq_lock_class
= { .name
= "CQ-uobj" };
55 static struct uverbs_lock_class qp_lock_class
= { .name
= "QP-uobj" };
56 static struct uverbs_lock_class ah_lock_class
= { .name
= "AH-uobj" };
57 static struct uverbs_lock_class srq_lock_class
= { .name
= "SRQ-uobj" };
58 static struct uverbs_lock_class xrcd_lock_class
= { .name
= "XRCD-uobj" };
59 static struct uverbs_lock_class rule_lock_class
= { .name
= "RULE-uobj" };
62 * The ib_uobject locking scheme is as follows:
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr operations. When an object is
66 * looked up, a reference must be taken on the object's kref before
69 * - Each object also has an rwsem. This rwsem must be held for
70 * reading while an operation that uses the object is performed.
71 * For example, while registering an MR, the associated PD's
72 * uobject.mutex must be held for reading. The rwsem must be held
73 * for writing while initializing or destroying an object.
75 * - In addition, each object has a "live" flag. If this flag is not
76 * set, then lookups of the object will fail even if it is found in
77 * the idr. This handles a reader that blocks and does not acquire
78 * the rwsem until after the object is destroyed. The destroy
79 * operation will set the live flag to 0 and then drop the rwsem;
80 * this will allow the reader to acquire the rwsem, see that the
81 * live flag is 0, and then drop the rwsem and its reference to
82 * object. The underlying storage will not be freed until the last
83 * reference to the object is dropped.
86 static void init_uobj(struct ib_uobject
*uobj
, u64 user_handle
,
87 struct ib_ucontext
*context
, struct uverbs_lock_class
*c
)
89 uobj
->user_handle
= user_handle
;
90 uobj
->context
= context
;
91 kref_init(&uobj
->ref
);
92 init_rwsem(&uobj
->mutex
);
93 lockdep_set_class_and_name(&uobj
->mutex
, &c
->key
, c
->name
);
97 static void release_uobj(struct kref
*kref
)
99 kfree(container_of(kref
, struct ib_uobject
, ref
));
102 static void put_uobj(struct ib_uobject
*uobj
)
104 kref_put(&uobj
->ref
, release_uobj
);
107 static void put_uobj_read(struct ib_uobject
*uobj
)
109 up_read(&uobj
->mutex
);
113 static void put_uobj_write(struct ib_uobject
*uobj
)
115 up_write(&uobj
->mutex
);
119 static int idr_add_uobj(struct idr
*idr
, struct ib_uobject
*uobj
)
123 idr_preload(GFP_KERNEL
);
124 spin_lock(&ib_uverbs_idr_lock
);
126 ret
= idr_alloc(idr
, uobj
, 0, 0, GFP_NOWAIT
);
130 spin_unlock(&ib_uverbs_idr_lock
);
133 return ret
< 0 ? ret
: 0;
136 void idr_remove_uobj(struct idr
*idr
, struct ib_uobject
*uobj
)
138 spin_lock(&ib_uverbs_idr_lock
);
139 idr_remove(idr
, uobj
->id
);
140 spin_unlock(&ib_uverbs_idr_lock
);
143 static struct ib_uobject
*__idr_get_uobj(struct idr
*idr
, int id
,
144 struct ib_ucontext
*context
)
146 struct ib_uobject
*uobj
;
148 spin_lock(&ib_uverbs_idr_lock
);
149 uobj
= idr_find(idr
, id
);
151 if (uobj
->context
== context
)
152 kref_get(&uobj
->ref
);
156 spin_unlock(&ib_uverbs_idr_lock
);
161 static struct ib_uobject
*idr_read_uobj(struct idr
*idr
, int id
,
162 struct ib_ucontext
*context
, int nested
)
164 struct ib_uobject
*uobj
;
166 uobj
= __idr_get_uobj(idr
, id
, context
);
171 down_read_nested(&uobj
->mutex
, SINGLE_DEPTH_NESTING
);
173 down_read(&uobj
->mutex
);
182 static struct ib_uobject
*idr_write_uobj(struct idr
*idr
, int id
,
183 struct ib_ucontext
*context
)
185 struct ib_uobject
*uobj
;
187 uobj
= __idr_get_uobj(idr
, id
, context
);
191 down_write(&uobj
->mutex
);
193 put_uobj_write(uobj
);
200 static void *idr_read_obj(struct idr
*idr
, int id
, struct ib_ucontext
*context
,
203 struct ib_uobject
*uobj
;
205 uobj
= idr_read_uobj(idr
, id
, context
, nested
);
206 return uobj
? uobj
->object
: NULL
;
209 static struct ib_pd
*idr_read_pd(int pd_handle
, struct ib_ucontext
*context
)
211 return idr_read_obj(&ib_uverbs_pd_idr
, pd_handle
, context
, 0);
214 static void put_pd_read(struct ib_pd
*pd
)
216 put_uobj_read(pd
->uobject
);
219 static struct ib_cq
*idr_read_cq(int cq_handle
, struct ib_ucontext
*context
, int nested
)
221 return idr_read_obj(&ib_uverbs_cq_idr
, cq_handle
, context
, nested
);
224 static void put_cq_read(struct ib_cq
*cq
)
226 put_uobj_read(cq
->uobject
);
229 static struct ib_ah
*idr_read_ah(int ah_handle
, struct ib_ucontext
*context
)
231 return idr_read_obj(&ib_uverbs_ah_idr
, ah_handle
, context
, 0);
234 static void put_ah_read(struct ib_ah
*ah
)
236 put_uobj_read(ah
->uobject
);
239 static struct ib_qp
*idr_read_qp(int qp_handle
, struct ib_ucontext
*context
)
241 return idr_read_obj(&ib_uverbs_qp_idr
, qp_handle
, context
, 0);
244 static struct ib_qp
*idr_write_qp(int qp_handle
, struct ib_ucontext
*context
)
246 struct ib_uobject
*uobj
;
248 uobj
= idr_write_uobj(&ib_uverbs_qp_idr
, qp_handle
, context
);
249 return uobj
? uobj
->object
: NULL
;
252 static void put_qp_read(struct ib_qp
*qp
)
254 put_uobj_read(qp
->uobject
);
257 static void put_qp_write(struct ib_qp
*qp
)
259 put_uobj_write(qp
->uobject
);
262 static struct ib_srq
*idr_read_srq(int srq_handle
, struct ib_ucontext
*context
)
264 return idr_read_obj(&ib_uverbs_srq_idr
, srq_handle
, context
, 0);
267 static void put_srq_read(struct ib_srq
*srq
)
269 put_uobj_read(srq
->uobject
);
272 static struct ib_xrcd
*idr_read_xrcd(int xrcd_handle
, struct ib_ucontext
*context
,
273 struct ib_uobject
**uobj
)
275 *uobj
= idr_read_uobj(&ib_uverbs_xrcd_idr
, xrcd_handle
, context
, 0);
276 return *uobj
? (*uobj
)->object
: NULL
;
279 static void put_xrcd_read(struct ib_uobject
*uobj
)
284 ssize_t
ib_uverbs_get_context(struct ib_uverbs_file
*file
,
285 const char __user
*buf
,
286 int in_len
, int out_len
)
288 struct ib_uverbs_get_context cmd
;
289 struct ib_uverbs_get_context_resp resp
;
290 struct ib_udata udata
;
291 struct ib_device
*ibdev
= file
->device
->ib_dev
;
292 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
293 struct ib_device_attr dev_attr
;
295 struct ib_ucontext
*ucontext
;
299 if (out_len
< sizeof resp
)
302 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
305 mutex_lock(&file
->mutex
);
307 if (file
->ucontext
) {
312 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
313 (unsigned long) cmd
.response
+ sizeof resp
,
314 in_len
- sizeof cmd
, out_len
- sizeof resp
);
316 ucontext
= ibdev
->alloc_ucontext(ibdev
, &udata
);
317 if (IS_ERR(ucontext
)) {
318 ret
= PTR_ERR(ucontext
);
322 ucontext
->device
= ibdev
;
323 INIT_LIST_HEAD(&ucontext
->pd_list
);
324 INIT_LIST_HEAD(&ucontext
->mr_list
);
325 INIT_LIST_HEAD(&ucontext
->mw_list
);
326 INIT_LIST_HEAD(&ucontext
->cq_list
);
327 INIT_LIST_HEAD(&ucontext
->qp_list
);
328 INIT_LIST_HEAD(&ucontext
->srq_list
);
329 INIT_LIST_HEAD(&ucontext
->ah_list
);
330 INIT_LIST_HEAD(&ucontext
->xrcd_list
);
331 INIT_LIST_HEAD(&ucontext
->rule_list
);
333 ucontext
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
335 ucontext
->closing
= 0;
337 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
338 ucontext
->umem_tree
= RB_ROOT
;
339 init_rwsem(&ucontext
->umem_rwsem
);
340 ucontext
->odp_mrs_count
= 0;
341 INIT_LIST_HEAD(&ucontext
->no_private_counters
);
343 ret
= ib_query_device(ibdev
, &dev_attr
);
346 if (!(dev_attr
.device_cap_flags
& IB_DEVICE_ON_DEMAND_PAGING
))
347 ucontext
->invalidate_range
= NULL
;
351 resp
.num_comp_vectors
= file
->device
->num_comp_vectors
;
353 ret
= get_unused_fd_flags(O_CLOEXEC
);
358 filp
= ib_uverbs_alloc_event_file(file
, 1);
364 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
365 &resp
, sizeof resp
)) {
370 file
->async_file
= filp
->private_data
;
372 INIT_IB_EVENT_HANDLER(&file
->event_handler
, file
->device
->ib_dev
,
373 ib_uverbs_event_handler
);
374 ret
= ib_register_event_handler(&file
->event_handler
);
378 kref_get(&file
->async_file
->ref
);
379 kref_get(&file
->ref
);
380 file
->ucontext
= ucontext
;
382 fd_install(resp
.async_fd
, filp
);
384 mutex_unlock(&file
->mutex
);
392 put_unused_fd(resp
.async_fd
);
395 put_pid(ucontext
->tgid
);
396 ibdev
->dealloc_ucontext(ucontext
);
399 mutex_unlock(&file
->mutex
);
403 static void copy_query_dev_fields(struct ib_uverbs_file
*file
,
404 struct ib_uverbs_query_device_resp
*resp
,
405 struct ib_device_attr
*attr
)
407 resp
->fw_ver
= attr
->fw_ver
;
408 resp
->node_guid
= file
->device
->ib_dev
->node_guid
;
409 resp
->sys_image_guid
= attr
->sys_image_guid
;
410 resp
->max_mr_size
= attr
->max_mr_size
;
411 resp
->page_size_cap
= attr
->page_size_cap
;
412 resp
->vendor_id
= attr
->vendor_id
;
413 resp
->vendor_part_id
= attr
->vendor_part_id
;
414 resp
->hw_ver
= attr
->hw_ver
;
415 resp
->max_qp
= attr
->max_qp
;
416 resp
->max_qp_wr
= attr
->max_qp_wr
;
417 resp
->device_cap_flags
= attr
->device_cap_flags
;
418 resp
->max_sge
= attr
->max_sge
;
419 resp
->max_sge_rd
= attr
->max_sge_rd
;
420 resp
->max_cq
= attr
->max_cq
;
421 resp
->max_cqe
= attr
->max_cqe
;
422 resp
->max_mr
= attr
->max_mr
;
423 resp
->max_pd
= attr
->max_pd
;
424 resp
->max_qp_rd_atom
= attr
->max_qp_rd_atom
;
425 resp
->max_ee_rd_atom
= attr
->max_ee_rd_atom
;
426 resp
->max_res_rd_atom
= attr
->max_res_rd_atom
;
427 resp
->max_qp_init_rd_atom
= attr
->max_qp_init_rd_atom
;
428 resp
->max_ee_init_rd_atom
= attr
->max_ee_init_rd_atom
;
429 resp
->atomic_cap
= attr
->atomic_cap
;
430 resp
->max_ee
= attr
->max_ee
;
431 resp
->max_rdd
= attr
->max_rdd
;
432 resp
->max_mw
= attr
->max_mw
;
433 resp
->max_raw_ipv6_qp
= attr
->max_raw_ipv6_qp
;
434 resp
->max_raw_ethy_qp
= attr
->max_raw_ethy_qp
;
435 resp
->max_mcast_grp
= attr
->max_mcast_grp
;
436 resp
->max_mcast_qp_attach
= attr
->max_mcast_qp_attach
;
437 resp
->max_total_mcast_qp_attach
= attr
->max_total_mcast_qp_attach
;
438 resp
->max_ah
= attr
->max_ah
;
439 resp
->max_fmr
= attr
->max_fmr
;
440 resp
->max_map_per_fmr
= attr
->max_map_per_fmr
;
441 resp
->max_srq
= attr
->max_srq
;
442 resp
->max_srq_wr
= attr
->max_srq_wr
;
443 resp
->max_srq_sge
= attr
->max_srq_sge
;
444 resp
->max_pkeys
= attr
->max_pkeys
;
445 resp
->local_ca_ack_delay
= attr
->local_ca_ack_delay
;
446 resp
->phys_port_cnt
= file
->device
->ib_dev
->phys_port_cnt
;
449 ssize_t
ib_uverbs_query_device(struct ib_uverbs_file
*file
,
450 const char __user
*buf
,
451 int in_len
, int out_len
)
453 struct ib_uverbs_query_device cmd
;
454 struct ib_uverbs_query_device_resp resp
;
455 struct ib_device_attr attr
;
458 if (out_len
< sizeof resp
)
461 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
464 ret
= ib_query_device(file
->device
->ib_dev
, &attr
);
468 memset(&resp
, 0, sizeof resp
);
469 copy_query_dev_fields(file
, &resp
, &attr
);
471 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
478 ssize_t
ib_uverbs_query_port(struct ib_uverbs_file
*file
,
479 const char __user
*buf
,
480 int in_len
, int out_len
)
482 struct ib_uverbs_query_port cmd
;
483 struct ib_uverbs_query_port_resp resp
;
484 struct ib_port_attr attr
;
487 if (out_len
< sizeof resp
)
490 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
493 ret
= ib_query_port(file
->device
->ib_dev
, cmd
.port_num
, &attr
);
497 memset(&resp
, 0, sizeof resp
);
499 resp
.state
= attr
.state
;
500 resp
.max_mtu
= attr
.max_mtu
;
501 resp
.active_mtu
= attr
.active_mtu
;
502 resp
.gid_tbl_len
= attr
.gid_tbl_len
;
503 resp
.port_cap_flags
= attr
.port_cap_flags
;
504 resp
.max_msg_sz
= attr
.max_msg_sz
;
505 resp
.bad_pkey_cntr
= attr
.bad_pkey_cntr
;
506 resp
.qkey_viol_cntr
= attr
.qkey_viol_cntr
;
507 resp
.pkey_tbl_len
= attr
.pkey_tbl_len
;
509 resp
.sm_lid
= attr
.sm_lid
;
511 resp
.max_vl_num
= attr
.max_vl_num
;
512 resp
.sm_sl
= attr
.sm_sl
;
513 resp
.subnet_timeout
= attr
.subnet_timeout
;
514 resp
.init_type_reply
= attr
.init_type_reply
;
515 resp
.active_width
= attr
.active_width
;
516 resp
.active_speed
= attr
.active_speed
;
517 resp
.phys_state
= attr
.phys_state
;
518 resp
.link_layer
= rdma_port_get_link_layer(file
->device
->ib_dev
,
521 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
528 ssize_t
ib_uverbs_alloc_pd(struct ib_uverbs_file
*file
,
529 const char __user
*buf
,
530 int in_len
, int out_len
)
532 struct ib_uverbs_alloc_pd cmd
;
533 struct ib_uverbs_alloc_pd_resp resp
;
534 struct ib_udata udata
;
535 struct ib_uobject
*uobj
;
539 if (out_len
< sizeof resp
)
542 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
545 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
546 (unsigned long) cmd
.response
+ sizeof resp
,
547 in_len
- sizeof cmd
, out_len
- sizeof resp
);
549 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
553 init_uobj(uobj
, 0, file
->ucontext
, &pd_lock_class
);
554 down_write(&uobj
->mutex
);
556 pd
= file
->device
->ib_dev
->alloc_pd(file
->device
->ib_dev
,
557 file
->ucontext
, &udata
);
563 pd
->device
= file
->device
->ib_dev
;
565 atomic_set(&pd
->usecnt
, 0);
568 ret
= idr_add_uobj(&ib_uverbs_pd_idr
, uobj
);
572 memset(&resp
, 0, sizeof resp
);
573 resp
.pd_handle
= uobj
->id
;
575 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
576 &resp
, sizeof resp
)) {
581 mutex_lock(&file
->mutex
);
582 list_add_tail(&uobj
->list
, &file
->ucontext
->pd_list
);
583 mutex_unlock(&file
->mutex
);
587 up_write(&uobj
->mutex
);
592 idr_remove_uobj(&ib_uverbs_pd_idr
, uobj
);
598 put_uobj_write(uobj
);
602 ssize_t
ib_uverbs_dealloc_pd(struct ib_uverbs_file
*file
,
603 const char __user
*buf
,
604 int in_len
, int out_len
)
606 struct ib_uverbs_dealloc_pd cmd
;
607 struct ib_uobject
*uobj
;
610 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
613 uobj
= idr_write_uobj(&ib_uverbs_pd_idr
, cmd
.pd_handle
, file
->ucontext
);
617 ret
= ib_dealloc_pd(uobj
->object
);
621 put_uobj_write(uobj
);
626 idr_remove_uobj(&ib_uverbs_pd_idr
, uobj
);
628 mutex_lock(&file
->mutex
);
629 list_del(&uobj
->list
);
630 mutex_unlock(&file
->mutex
);
637 struct xrcd_table_entry
{
639 struct ib_xrcd
*xrcd
;
643 static int xrcd_table_insert(struct ib_uverbs_device
*dev
,
645 struct ib_xrcd
*xrcd
)
647 struct xrcd_table_entry
*entry
, *scan
;
648 struct rb_node
**p
= &dev
->xrcd_tree
.rb_node
;
649 struct rb_node
*parent
= NULL
;
651 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
656 entry
->inode
= inode
;
660 scan
= rb_entry(parent
, struct xrcd_table_entry
, node
);
662 if (inode
< scan
->inode
) {
664 } else if (inode
> scan
->inode
) {
672 rb_link_node(&entry
->node
, parent
, p
);
673 rb_insert_color(&entry
->node
, &dev
->xrcd_tree
);
678 static struct xrcd_table_entry
*xrcd_table_search(struct ib_uverbs_device
*dev
,
681 struct xrcd_table_entry
*entry
;
682 struct rb_node
*p
= dev
->xrcd_tree
.rb_node
;
685 entry
= rb_entry(p
, struct xrcd_table_entry
, node
);
687 if (inode
< entry
->inode
)
689 else if (inode
> entry
->inode
)
698 static struct ib_xrcd
*find_xrcd(struct ib_uverbs_device
*dev
, struct inode
*inode
)
700 struct xrcd_table_entry
*entry
;
702 entry
= xrcd_table_search(dev
, inode
);
709 static void xrcd_table_delete(struct ib_uverbs_device
*dev
,
712 struct xrcd_table_entry
*entry
;
714 entry
= xrcd_table_search(dev
, inode
);
717 rb_erase(&entry
->node
, &dev
->xrcd_tree
);
722 ssize_t
ib_uverbs_open_xrcd(struct ib_uverbs_file
*file
,
723 const char __user
*buf
, int in_len
,
726 struct ib_uverbs_open_xrcd cmd
;
727 struct ib_uverbs_open_xrcd_resp resp
;
728 struct ib_udata udata
;
729 struct ib_uxrcd_object
*obj
;
730 struct ib_xrcd
*xrcd
= NULL
;
731 struct fd f
= {NULL
, 0};
732 struct inode
*inode
= NULL
;
736 if (out_len
< sizeof resp
)
739 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
742 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
743 (unsigned long) cmd
.response
+ sizeof resp
,
744 in_len
- sizeof cmd
, out_len
- sizeof resp
);
746 mutex_lock(&file
->device
->xrcd_tree_mutex
);
749 /* search for file descriptor */
753 goto err_tree_mutex_unlock
;
756 inode
= file_inode(f
.file
);
757 xrcd
= find_xrcd(file
->device
, inode
);
758 if (!xrcd
&& !(cmd
.oflags
& O_CREAT
)) {
759 /* no file descriptor. Need CREATE flag */
761 goto err_tree_mutex_unlock
;
764 if (xrcd
&& cmd
.oflags
& O_EXCL
) {
766 goto err_tree_mutex_unlock
;
770 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
773 goto err_tree_mutex_unlock
;
776 init_uobj(&obj
->uobject
, 0, file
->ucontext
, &xrcd_lock_class
);
778 down_write(&obj
->uobject
.mutex
);
781 xrcd
= file
->device
->ib_dev
->alloc_xrcd(file
->device
->ib_dev
,
782 file
->ucontext
, &udata
);
789 xrcd
->device
= file
->device
->ib_dev
;
790 atomic_set(&xrcd
->usecnt
, 0);
791 mutex_init(&xrcd
->tgt_qp_mutex
);
792 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
796 atomic_set(&obj
->refcnt
, 0);
797 obj
->uobject
.object
= xrcd
;
798 ret
= idr_add_uobj(&ib_uverbs_xrcd_idr
, &obj
->uobject
);
802 memset(&resp
, 0, sizeof resp
);
803 resp
.xrcd_handle
= obj
->uobject
.id
;
807 /* create new inode/xrcd table entry */
808 ret
= xrcd_table_insert(file
->device
, inode
, xrcd
);
810 goto err_insert_xrcd
;
812 atomic_inc(&xrcd
->usecnt
);
815 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
816 &resp
, sizeof resp
)) {
824 mutex_lock(&file
->mutex
);
825 list_add_tail(&obj
->uobject
.list
, &file
->ucontext
->xrcd_list
);
826 mutex_unlock(&file
->mutex
);
828 obj
->uobject
.live
= 1;
829 up_write(&obj
->uobject
.mutex
);
831 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
837 xrcd_table_delete(file
->device
, inode
);
838 atomic_dec(&xrcd
->usecnt
);
842 idr_remove_uobj(&ib_uverbs_xrcd_idr
, &obj
->uobject
);
845 ib_dealloc_xrcd(xrcd
);
848 put_uobj_write(&obj
->uobject
);
850 err_tree_mutex_unlock
:
854 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
859 ssize_t
ib_uverbs_close_xrcd(struct ib_uverbs_file
*file
,
860 const char __user
*buf
, int in_len
,
863 struct ib_uverbs_close_xrcd cmd
;
864 struct ib_uobject
*uobj
;
865 struct ib_xrcd
*xrcd
= NULL
;
866 struct inode
*inode
= NULL
;
867 struct ib_uxrcd_object
*obj
;
871 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
874 mutex_lock(&file
->device
->xrcd_tree_mutex
);
875 uobj
= idr_write_uobj(&ib_uverbs_xrcd_idr
, cmd
.xrcd_handle
, file
->ucontext
);
883 obj
= container_of(uobj
, struct ib_uxrcd_object
, uobject
);
884 if (atomic_read(&obj
->refcnt
)) {
885 put_uobj_write(uobj
);
890 if (!inode
|| atomic_dec_and_test(&xrcd
->usecnt
)) {
891 ret
= ib_dealloc_xrcd(uobj
->object
);
898 atomic_inc(&xrcd
->usecnt
);
900 put_uobj_write(uobj
);
906 xrcd_table_delete(file
->device
, inode
);
908 idr_remove_uobj(&ib_uverbs_xrcd_idr
, uobj
);
909 mutex_lock(&file
->mutex
);
910 list_del(&uobj
->list
);
911 mutex_unlock(&file
->mutex
);
917 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
921 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device
*dev
,
922 struct ib_xrcd
*xrcd
)
927 if (inode
&& !atomic_dec_and_test(&xrcd
->usecnt
))
930 ib_dealloc_xrcd(xrcd
);
933 xrcd_table_delete(dev
, inode
);
936 ssize_t
ib_uverbs_reg_mr(struct ib_uverbs_file
*file
,
937 const char __user
*buf
, int in_len
,
940 struct ib_uverbs_reg_mr cmd
;
941 struct ib_uverbs_reg_mr_resp resp
;
942 struct ib_udata udata
;
943 struct ib_uobject
*uobj
;
948 if (out_len
< sizeof resp
)
951 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
954 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
955 (unsigned long) cmd
.response
+ sizeof resp
,
956 in_len
- sizeof cmd
, out_len
- sizeof resp
);
958 if ((cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
))
961 ret
= ib_check_mr_access(cmd
.access_flags
);
965 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
969 init_uobj(uobj
, 0, file
->ucontext
, &mr_lock_class
);
970 down_write(&uobj
->mutex
);
972 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
978 if (cmd
.access_flags
& IB_ACCESS_ON_DEMAND
) {
979 struct ib_device_attr attr
;
981 ret
= ib_query_device(pd
->device
, &attr
);
982 if (ret
|| !(attr
.device_cap_flags
&
983 IB_DEVICE_ON_DEMAND_PAGING
)) {
984 pr_debug("ODP support not available\n");
990 mr
= pd
->device
->reg_user_mr(pd
, cmd
.start
, cmd
.length
, cmd
.hca_va
,
991 cmd
.access_flags
, &udata
);
997 mr
->device
= pd
->device
;
1000 atomic_inc(&pd
->usecnt
);
1001 atomic_set(&mr
->usecnt
, 0);
1004 ret
= idr_add_uobj(&ib_uverbs_mr_idr
, uobj
);
1008 memset(&resp
, 0, sizeof resp
);
1009 resp
.lkey
= mr
->lkey
;
1010 resp
.rkey
= mr
->rkey
;
1011 resp
.mr_handle
= uobj
->id
;
1013 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1014 &resp
, sizeof resp
)) {
1021 mutex_lock(&file
->mutex
);
1022 list_add_tail(&uobj
->list
, &file
->ucontext
->mr_list
);
1023 mutex_unlock(&file
->mutex
);
1027 up_write(&uobj
->mutex
);
1032 idr_remove_uobj(&ib_uverbs_mr_idr
, uobj
);
1041 put_uobj_write(uobj
);
1045 ssize_t
ib_uverbs_rereg_mr(struct ib_uverbs_file
*file
,
1046 const char __user
*buf
, int in_len
,
1049 struct ib_uverbs_rereg_mr cmd
;
1050 struct ib_uverbs_rereg_mr_resp resp
;
1051 struct ib_udata udata
;
1052 struct ib_pd
*pd
= NULL
;
1054 struct ib_pd
*old_pd
;
1056 struct ib_uobject
*uobj
;
1058 if (out_len
< sizeof(resp
))
1061 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1064 INIT_UDATA(&udata
, buf
+ sizeof(cmd
),
1065 (unsigned long) cmd
.response
+ sizeof(resp
),
1066 in_len
- sizeof(cmd
), out_len
- sizeof(resp
));
1068 if (cmd
.flags
& ~IB_MR_REREG_SUPPORTED
|| !cmd
.flags
)
1071 if ((cmd
.flags
& IB_MR_REREG_TRANS
) &&
1072 (!cmd
.start
|| !cmd
.hca_va
|| 0 >= cmd
.length
||
1073 (cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
)))
1076 uobj
= idr_write_uobj(&ib_uverbs_mr_idr
, cmd
.mr_handle
,
1084 if (cmd
.flags
& IB_MR_REREG_ACCESS
) {
1085 ret
= ib_check_mr_access(cmd
.access_flags
);
1090 if (cmd
.flags
& IB_MR_REREG_PD
) {
1091 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
1098 if (atomic_read(&mr
->usecnt
)) {
1104 ret
= mr
->device
->rereg_user_mr(mr
, cmd
.flags
, cmd
.start
,
1105 cmd
.length
, cmd
.hca_va
,
1106 cmd
.access_flags
, pd
, &udata
);
1108 if (cmd
.flags
& IB_MR_REREG_PD
) {
1109 atomic_inc(&pd
->usecnt
);
1111 atomic_dec(&old_pd
->usecnt
);
1117 memset(&resp
, 0, sizeof(resp
));
1118 resp
.lkey
= mr
->lkey
;
1119 resp
.rkey
= mr
->rkey
;
1121 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
1122 &resp
, sizeof(resp
)))
1128 if (cmd
.flags
& IB_MR_REREG_PD
)
1133 put_uobj_write(mr
->uobject
);
1138 ssize_t
ib_uverbs_dereg_mr(struct ib_uverbs_file
*file
,
1139 const char __user
*buf
, int in_len
,
1142 struct ib_uverbs_dereg_mr cmd
;
1144 struct ib_uobject
*uobj
;
1147 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1150 uobj
= idr_write_uobj(&ib_uverbs_mr_idr
, cmd
.mr_handle
, file
->ucontext
);
1156 ret
= ib_dereg_mr(mr
);
1160 put_uobj_write(uobj
);
1165 idr_remove_uobj(&ib_uverbs_mr_idr
, uobj
);
1167 mutex_lock(&file
->mutex
);
1168 list_del(&uobj
->list
);
1169 mutex_unlock(&file
->mutex
);
1176 ssize_t
ib_uverbs_alloc_mw(struct ib_uverbs_file
*file
,
1177 const char __user
*buf
, int in_len
,
1180 struct ib_uverbs_alloc_mw cmd
;
1181 struct ib_uverbs_alloc_mw_resp resp
;
1182 struct ib_uobject
*uobj
;
1187 if (out_len
< sizeof(resp
))
1190 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1193 uobj
= kmalloc(sizeof(*uobj
), GFP_KERNEL
);
1197 init_uobj(uobj
, 0, file
->ucontext
, &mw_lock_class
);
1198 down_write(&uobj
->mutex
);
1200 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
1206 mw
= pd
->device
->alloc_mw(pd
, cmd
.mw_type
);
1212 mw
->device
= pd
->device
;
1215 atomic_inc(&pd
->usecnt
);
1218 ret
= idr_add_uobj(&ib_uverbs_mw_idr
, uobj
);
1222 memset(&resp
, 0, sizeof(resp
));
1223 resp
.rkey
= mw
->rkey
;
1224 resp
.mw_handle
= uobj
->id
;
1226 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
1227 &resp
, sizeof(resp
))) {
1234 mutex_lock(&file
->mutex
);
1235 list_add_tail(&uobj
->list
, &file
->ucontext
->mw_list
);
1236 mutex_unlock(&file
->mutex
);
1240 up_write(&uobj
->mutex
);
1245 idr_remove_uobj(&ib_uverbs_mw_idr
, uobj
);
1254 put_uobj_write(uobj
);
1258 ssize_t
ib_uverbs_dealloc_mw(struct ib_uverbs_file
*file
,
1259 const char __user
*buf
, int in_len
,
1262 struct ib_uverbs_dealloc_mw cmd
;
1264 struct ib_uobject
*uobj
;
1267 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1270 uobj
= idr_write_uobj(&ib_uverbs_mw_idr
, cmd
.mw_handle
, file
->ucontext
);
1276 ret
= ib_dealloc_mw(mw
);
1280 put_uobj_write(uobj
);
1285 idr_remove_uobj(&ib_uverbs_mw_idr
, uobj
);
1287 mutex_lock(&file
->mutex
);
1288 list_del(&uobj
->list
);
1289 mutex_unlock(&file
->mutex
);
1296 ssize_t
ib_uverbs_create_comp_channel(struct ib_uverbs_file
*file
,
1297 const char __user
*buf
, int in_len
,
1300 struct ib_uverbs_create_comp_channel cmd
;
1301 struct ib_uverbs_create_comp_channel_resp resp
;
1305 if (out_len
< sizeof resp
)
1308 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1311 ret
= get_unused_fd_flags(O_CLOEXEC
);
1316 filp
= ib_uverbs_alloc_event_file(file
, 0);
1318 put_unused_fd(resp
.fd
);
1319 return PTR_ERR(filp
);
1322 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1323 &resp
, sizeof resp
)) {
1324 put_unused_fd(resp
.fd
);
1329 fd_install(resp
.fd
, filp
);
1333 ssize_t
ib_uverbs_create_cq(struct ib_uverbs_file
*file
,
1334 const char __user
*buf
, int in_len
,
1337 struct ib_uverbs_create_cq cmd
;
1338 struct ib_uverbs_create_cq_resp resp
;
1339 struct ib_udata udata
;
1340 struct ib_ucq_object
*obj
;
1341 struct ib_uverbs_event_file
*ev_file
= NULL
;
1345 if (out_len
< sizeof resp
)
1348 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1351 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1352 (unsigned long) cmd
.response
+ sizeof resp
,
1353 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1355 if (cmd
.comp_vector
>= file
->device
->num_comp_vectors
)
1358 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
1362 init_uobj(&obj
->uobject
, cmd
.user_handle
, file
->ucontext
, &cq_lock_class
);
1363 down_write(&obj
->uobject
.mutex
);
1365 if (cmd
.comp_channel
>= 0) {
1366 ev_file
= ib_uverbs_lookup_comp_file(cmd
.comp_channel
);
1373 obj
->uverbs_file
= file
;
1374 obj
->comp_events_reported
= 0;
1375 obj
->async_events_reported
= 0;
1376 INIT_LIST_HEAD(&obj
->comp_list
);
1377 INIT_LIST_HEAD(&obj
->async_list
);
1379 cq
= file
->device
->ib_dev
->create_cq(file
->device
->ib_dev
, cmd
.cqe
,
1381 file
->ucontext
, &udata
);
1387 cq
->device
= file
->device
->ib_dev
;
1388 cq
->uobject
= &obj
->uobject
;
1389 cq
->comp_handler
= ib_uverbs_comp_handler
;
1390 cq
->event_handler
= ib_uverbs_cq_event_handler
;
1391 cq
->cq_context
= ev_file
;
1392 atomic_set(&cq
->usecnt
, 0);
1394 obj
->uobject
.object
= cq
;
1395 ret
= idr_add_uobj(&ib_uverbs_cq_idr
, &obj
->uobject
);
1399 memset(&resp
, 0, sizeof resp
);
1400 resp
.cq_handle
= obj
->uobject
.id
;
1403 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1404 &resp
, sizeof resp
)) {
1409 mutex_lock(&file
->mutex
);
1410 list_add_tail(&obj
->uobject
.list
, &file
->ucontext
->cq_list
);
1411 mutex_unlock(&file
->mutex
);
1413 obj
->uobject
.live
= 1;
1415 up_write(&obj
->uobject
.mutex
);
1420 idr_remove_uobj(&ib_uverbs_cq_idr
, &obj
->uobject
);
1427 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1430 put_uobj_write(&obj
->uobject
);
1434 ssize_t
ib_uverbs_resize_cq(struct ib_uverbs_file
*file
,
1435 const char __user
*buf
, int in_len
,
1438 struct ib_uverbs_resize_cq cmd
;
1439 struct ib_uverbs_resize_cq_resp resp
;
1440 struct ib_udata udata
;
1444 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1447 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1448 (unsigned long) cmd
.response
+ sizeof resp
,
1449 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1451 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1455 ret
= cq
->device
->resize_cq(cq
, cmd
.cqe
, &udata
);
1461 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1462 &resp
, sizeof resp
.cqe
))
1468 return ret
? ret
: in_len
;
1471 static int copy_wc_to_user(void __user
*dest
, struct ib_wc
*wc
)
1473 struct ib_uverbs_wc tmp
;
1475 tmp
.wr_id
= wc
->wr_id
;
1476 tmp
.status
= wc
->status
;
1477 tmp
.opcode
= wc
->opcode
;
1478 tmp
.vendor_err
= wc
->vendor_err
;
1479 tmp
.byte_len
= wc
->byte_len
;
1480 tmp
.ex
.imm_data
= (__u32 __force
) wc
->ex
.imm_data
;
1481 tmp
.qp_num
= wc
->qp
->qp_num
;
1482 tmp
.src_qp
= wc
->src_qp
;
1483 tmp
.wc_flags
= wc
->wc_flags
;
1484 tmp
.pkey_index
= wc
->pkey_index
;
1485 tmp
.slid
= wc
->slid
;
1487 tmp
.dlid_path_bits
= wc
->dlid_path_bits
;
1488 tmp
.port_num
= wc
->port_num
;
1491 if (copy_to_user(dest
, &tmp
, sizeof tmp
))
1497 ssize_t
ib_uverbs_poll_cq(struct ib_uverbs_file
*file
,
1498 const char __user
*buf
, int in_len
,
1501 struct ib_uverbs_poll_cq cmd
;
1502 struct ib_uverbs_poll_cq_resp resp
;
1503 u8 __user
*header_ptr
;
1504 u8 __user
*data_ptr
;
1509 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1512 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1516 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1517 header_ptr
= (void __user
*)(unsigned long) cmd
.response
;
1518 data_ptr
= header_ptr
+ sizeof resp
;
1520 memset(&resp
, 0, sizeof resp
);
1521 while (resp
.count
< cmd
.ne
) {
1522 ret
= ib_poll_cq(cq
, 1, &wc
);
1528 ret
= copy_wc_to_user(data_ptr
, &wc
);
1532 data_ptr
+= sizeof(struct ib_uverbs_wc
);
1536 if (copy_to_user(header_ptr
, &resp
, sizeof resp
)) {
1548 ssize_t
ib_uverbs_req_notify_cq(struct ib_uverbs_file
*file
,
1549 const char __user
*buf
, int in_len
,
1552 struct ib_uverbs_req_notify_cq cmd
;
1555 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1558 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1562 ib_req_notify_cq(cq
, cmd
.solicited_only
?
1563 IB_CQ_SOLICITED
: IB_CQ_NEXT_COMP
);
1570 ssize_t
ib_uverbs_destroy_cq(struct ib_uverbs_file
*file
,
1571 const char __user
*buf
, int in_len
,
1574 struct ib_uverbs_destroy_cq cmd
;
1575 struct ib_uverbs_destroy_cq_resp resp
;
1576 struct ib_uobject
*uobj
;
1578 struct ib_ucq_object
*obj
;
1579 struct ib_uverbs_event_file
*ev_file
;
1582 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1585 uobj
= idr_write_uobj(&ib_uverbs_cq_idr
, cmd
.cq_handle
, file
->ucontext
);
1589 ev_file
= cq
->cq_context
;
1590 obj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
1592 ret
= ib_destroy_cq(cq
);
1596 put_uobj_write(uobj
);
1601 idr_remove_uobj(&ib_uverbs_cq_idr
, uobj
);
1603 mutex_lock(&file
->mutex
);
1604 list_del(&uobj
->list
);
1605 mutex_unlock(&file
->mutex
);
1607 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1609 memset(&resp
, 0, sizeof resp
);
1610 resp
.comp_events_reported
= obj
->comp_events_reported
;
1611 resp
.async_events_reported
= obj
->async_events_reported
;
1615 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1616 &resp
, sizeof resp
))
1622 ssize_t
ib_uverbs_create_qp(struct ib_uverbs_file
*file
,
1623 const char __user
*buf
, int in_len
,
1626 struct ib_uverbs_create_qp cmd
;
1627 struct ib_uverbs_create_qp_resp resp
;
1628 struct ib_udata udata
;
1629 struct ib_uqp_object
*obj
;
1630 struct ib_device
*device
;
1631 struct ib_pd
*pd
= NULL
;
1632 struct ib_xrcd
*xrcd
= NULL
;
1633 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
1634 struct ib_cq
*scq
= NULL
, *rcq
= NULL
;
1635 struct ib_srq
*srq
= NULL
;
1637 struct ib_qp_init_attr attr
;
1640 if (out_len
< sizeof resp
)
1643 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1646 if (cmd
.qp_type
== IB_QPT_RAW_PACKET
&& !capable(CAP_NET_RAW
))
1649 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1650 (unsigned long) cmd
.response
+ sizeof resp
,
1651 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1653 obj
= kzalloc(sizeof *obj
, GFP_KERNEL
);
1657 init_uobj(&obj
->uevent
.uobject
, cmd
.user_handle
, file
->ucontext
, &qp_lock_class
);
1658 down_write(&obj
->uevent
.uobject
.mutex
);
1660 if (cmd
.qp_type
== IB_QPT_XRC_TGT
) {
1661 xrcd
= idr_read_xrcd(cmd
.pd_handle
, file
->ucontext
, &xrcd_uobj
);
1666 device
= xrcd
->device
;
1668 if (cmd
.qp_type
== IB_QPT_XRC_INI
) {
1669 cmd
.max_recv_wr
= cmd
.max_recv_sge
= 0;
1672 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
1673 if (!srq
|| srq
->srq_type
!= IB_SRQT_BASIC
) {
1679 if (cmd
.recv_cq_handle
!= cmd
.send_cq_handle
) {
1680 rcq
= idr_read_cq(cmd
.recv_cq_handle
, file
->ucontext
, 0);
1688 scq
= idr_read_cq(cmd
.send_cq_handle
, file
->ucontext
, !!rcq
);
1690 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
1696 device
= pd
->device
;
1699 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1700 attr
.qp_context
= file
;
1705 attr
.sq_sig_type
= cmd
.sq_sig_all
? IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
1706 attr
.qp_type
= cmd
.qp_type
;
1707 attr
.create_flags
= 0;
1709 attr
.cap
.max_send_wr
= cmd
.max_send_wr
;
1710 attr
.cap
.max_recv_wr
= cmd
.max_recv_wr
;
1711 attr
.cap
.max_send_sge
= cmd
.max_send_sge
;
1712 attr
.cap
.max_recv_sge
= cmd
.max_recv_sge
;
1713 attr
.cap
.max_inline_data
= cmd
.max_inline_data
;
1715 obj
->uevent
.events_reported
= 0;
1716 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1717 INIT_LIST_HEAD(&obj
->mcast_list
);
1719 if (cmd
.qp_type
== IB_QPT_XRC_TGT
)
1720 qp
= ib_create_qp(pd
, &attr
);
1722 qp
= device
->create_qp(pd
, &attr
, &udata
);
1729 if (cmd
.qp_type
!= IB_QPT_XRC_TGT
) {
1731 qp
->device
= device
;
1733 qp
->send_cq
= attr
.send_cq
;
1734 qp
->recv_cq
= attr
.recv_cq
;
1736 qp
->event_handler
= attr
.event_handler
;
1737 qp
->qp_context
= attr
.qp_context
;
1738 qp
->qp_type
= attr
.qp_type
;
1739 atomic_set(&qp
->usecnt
, 0);
1740 atomic_inc(&pd
->usecnt
);
1741 atomic_inc(&attr
.send_cq
->usecnt
);
1743 atomic_inc(&attr
.recv_cq
->usecnt
);
1745 atomic_inc(&attr
.srq
->usecnt
);
1747 qp
->uobject
= &obj
->uevent
.uobject
;
1749 obj
->uevent
.uobject
.object
= qp
;
1750 ret
= idr_add_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1754 memset(&resp
, 0, sizeof resp
);
1755 resp
.qpn
= qp
->qp_num
;
1756 resp
.qp_handle
= obj
->uevent
.uobject
.id
;
1757 resp
.max_recv_sge
= attr
.cap
.max_recv_sge
;
1758 resp
.max_send_sge
= attr
.cap
.max_send_sge
;
1759 resp
.max_recv_wr
= attr
.cap
.max_recv_wr
;
1760 resp
.max_send_wr
= attr
.cap
.max_send_wr
;
1761 resp
.max_inline_data
= attr
.cap
.max_inline_data
;
1763 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1764 &resp
, sizeof resp
)) {
1770 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
,
1772 atomic_inc(&obj
->uxrcd
->refcnt
);
1773 put_xrcd_read(xrcd_uobj
);
1780 if (rcq
&& rcq
!= scq
)
1785 mutex_lock(&file
->mutex
);
1786 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->qp_list
);
1787 mutex_unlock(&file
->mutex
);
1789 obj
->uevent
.uobject
.live
= 1;
1791 up_write(&obj
->uevent
.uobject
.mutex
);
1796 idr_remove_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1803 put_xrcd_read(xrcd_uobj
);
1808 if (rcq
&& rcq
!= scq
)
1813 put_uobj_write(&obj
->uevent
.uobject
);
1817 ssize_t
ib_uverbs_open_qp(struct ib_uverbs_file
*file
,
1818 const char __user
*buf
, int in_len
, int out_len
)
1820 struct ib_uverbs_open_qp cmd
;
1821 struct ib_uverbs_create_qp_resp resp
;
1822 struct ib_udata udata
;
1823 struct ib_uqp_object
*obj
;
1824 struct ib_xrcd
*xrcd
;
1825 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
1827 struct ib_qp_open_attr attr
;
1830 if (out_len
< sizeof resp
)
1833 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1836 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1837 (unsigned long) cmd
.response
+ sizeof resp
,
1838 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1840 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
1844 init_uobj(&obj
->uevent
.uobject
, cmd
.user_handle
, file
->ucontext
, &qp_lock_class
);
1845 down_write(&obj
->uevent
.uobject
.mutex
);
1847 xrcd
= idr_read_xrcd(cmd
.pd_handle
, file
->ucontext
, &xrcd_uobj
);
1853 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1854 attr
.qp_context
= file
;
1855 attr
.qp_num
= cmd
.qpn
;
1856 attr
.qp_type
= cmd
.qp_type
;
1858 obj
->uevent
.events_reported
= 0;
1859 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1860 INIT_LIST_HEAD(&obj
->mcast_list
);
1862 qp
= ib_open_qp(xrcd
, &attr
);
1868 qp
->uobject
= &obj
->uevent
.uobject
;
1870 obj
->uevent
.uobject
.object
= qp
;
1871 ret
= idr_add_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1875 memset(&resp
, 0, sizeof resp
);
1876 resp
.qpn
= qp
->qp_num
;
1877 resp
.qp_handle
= obj
->uevent
.uobject
.id
;
1879 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1880 &resp
, sizeof resp
)) {
1885 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
1886 atomic_inc(&obj
->uxrcd
->refcnt
);
1887 put_xrcd_read(xrcd_uobj
);
1889 mutex_lock(&file
->mutex
);
1890 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->qp_list
);
1891 mutex_unlock(&file
->mutex
);
1893 obj
->uevent
.uobject
.live
= 1;
1895 up_write(&obj
->uevent
.uobject
.mutex
);
1900 idr_remove_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1906 put_xrcd_read(xrcd_uobj
);
1907 put_uobj_write(&obj
->uevent
.uobject
);
1911 ssize_t
ib_uverbs_query_qp(struct ib_uverbs_file
*file
,
1912 const char __user
*buf
, int in_len
,
1915 struct ib_uverbs_query_qp cmd
;
1916 struct ib_uverbs_query_qp_resp resp
;
1918 struct ib_qp_attr
*attr
;
1919 struct ib_qp_init_attr
*init_attr
;
1922 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1925 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1926 init_attr
= kmalloc(sizeof *init_attr
, GFP_KERNEL
);
1927 if (!attr
|| !init_attr
) {
1932 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
1938 ret
= ib_query_qp(qp
, attr
, cmd
.attr_mask
, init_attr
);
1945 memset(&resp
, 0, sizeof resp
);
1947 resp
.qp_state
= attr
->qp_state
;
1948 resp
.cur_qp_state
= attr
->cur_qp_state
;
1949 resp
.path_mtu
= attr
->path_mtu
;
1950 resp
.path_mig_state
= attr
->path_mig_state
;
1951 resp
.qkey
= attr
->qkey
;
1952 resp
.rq_psn
= attr
->rq_psn
;
1953 resp
.sq_psn
= attr
->sq_psn
;
1954 resp
.dest_qp_num
= attr
->dest_qp_num
;
1955 resp
.qp_access_flags
= attr
->qp_access_flags
;
1956 resp
.pkey_index
= attr
->pkey_index
;
1957 resp
.alt_pkey_index
= attr
->alt_pkey_index
;
1958 resp
.sq_draining
= attr
->sq_draining
;
1959 resp
.max_rd_atomic
= attr
->max_rd_atomic
;
1960 resp
.max_dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1961 resp
.min_rnr_timer
= attr
->min_rnr_timer
;
1962 resp
.port_num
= attr
->port_num
;
1963 resp
.timeout
= attr
->timeout
;
1964 resp
.retry_cnt
= attr
->retry_cnt
;
1965 resp
.rnr_retry
= attr
->rnr_retry
;
1966 resp
.alt_port_num
= attr
->alt_port_num
;
1967 resp
.alt_timeout
= attr
->alt_timeout
;
1969 memcpy(resp
.dest
.dgid
, attr
->ah_attr
.grh
.dgid
.raw
, 16);
1970 resp
.dest
.flow_label
= attr
->ah_attr
.grh
.flow_label
;
1971 resp
.dest
.sgid_index
= attr
->ah_attr
.grh
.sgid_index
;
1972 resp
.dest
.hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
1973 resp
.dest
.traffic_class
= attr
->ah_attr
.grh
.traffic_class
;
1974 resp
.dest
.dlid
= attr
->ah_attr
.dlid
;
1975 resp
.dest
.sl
= attr
->ah_attr
.sl
;
1976 resp
.dest
.src_path_bits
= attr
->ah_attr
.src_path_bits
;
1977 resp
.dest
.static_rate
= attr
->ah_attr
.static_rate
;
1978 resp
.dest
.is_global
= !!(attr
->ah_attr
.ah_flags
& IB_AH_GRH
);
1979 resp
.dest
.port_num
= attr
->ah_attr
.port_num
;
1981 memcpy(resp
.alt_dest
.dgid
, attr
->alt_ah_attr
.grh
.dgid
.raw
, 16);
1982 resp
.alt_dest
.flow_label
= attr
->alt_ah_attr
.grh
.flow_label
;
1983 resp
.alt_dest
.sgid_index
= attr
->alt_ah_attr
.grh
.sgid_index
;
1984 resp
.alt_dest
.hop_limit
= attr
->alt_ah_attr
.grh
.hop_limit
;
1985 resp
.alt_dest
.traffic_class
= attr
->alt_ah_attr
.grh
.traffic_class
;
1986 resp
.alt_dest
.dlid
= attr
->alt_ah_attr
.dlid
;
1987 resp
.alt_dest
.sl
= attr
->alt_ah_attr
.sl
;
1988 resp
.alt_dest
.src_path_bits
= attr
->alt_ah_attr
.src_path_bits
;
1989 resp
.alt_dest
.static_rate
= attr
->alt_ah_attr
.static_rate
;
1990 resp
.alt_dest
.is_global
= !!(attr
->alt_ah_attr
.ah_flags
& IB_AH_GRH
);
1991 resp
.alt_dest
.port_num
= attr
->alt_ah_attr
.port_num
;
1993 resp
.max_send_wr
= init_attr
->cap
.max_send_wr
;
1994 resp
.max_recv_wr
= init_attr
->cap
.max_recv_wr
;
1995 resp
.max_send_sge
= init_attr
->cap
.max_send_sge
;
1996 resp
.max_recv_sge
= init_attr
->cap
.max_recv_sge
;
1997 resp
.max_inline_data
= init_attr
->cap
.max_inline_data
;
1998 resp
.sq_sig_all
= init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
;
2000 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2001 &resp
, sizeof resp
))
2008 return ret
? ret
: in_len
;
2011 /* Remove ignored fields set in the attribute mask */
2012 static int modify_qp_mask(enum ib_qp_type qp_type
, int mask
)
2015 case IB_QPT_XRC_INI
:
2016 return mask
& ~(IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
);
2017 case IB_QPT_XRC_TGT
:
2018 return mask
& ~(IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_RETRY_CNT
|
2025 ssize_t
ib_uverbs_modify_qp(struct ib_uverbs_file
*file
,
2026 const char __user
*buf
, int in_len
,
2029 struct ib_uverbs_modify_qp cmd
;
2030 struct ib_udata udata
;
2032 struct ib_qp_attr
*attr
;
2035 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2038 INIT_UDATA(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
2041 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2045 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2051 attr
->qp_state
= cmd
.qp_state
;
2052 attr
->cur_qp_state
= cmd
.cur_qp_state
;
2053 attr
->path_mtu
= cmd
.path_mtu
;
2054 attr
->path_mig_state
= cmd
.path_mig_state
;
2055 attr
->qkey
= cmd
.qkey
;
2056 attr
->rq_psn
= cmd
.rq_psn
;
2057 attr
->sq_psn
= cmd
.sq_psn
;
2058 attr
->dest_qp_num
= cmd
.dest_qp_num
;
2059 attr
->qp_access_flags
= cmd
.qp_access_flags
;
2060 attr
->pkey_index
= cmd
.pkey_index
;
2061 attr
->alt_pkey_index
= cmd
.alt_pkey_index
;
2062 attr
->en_sqd_async_notify
= cmd
.en_sqd_async_notify
;
2063 attr
->max_rd_atomic
= cmd
.max_rd_atomic
;
2064 attr
->max_dest_rd_atomic
= cmd
.max_dest_rd_atomic
;
2065 attr
->min_rnr_timer
= cmd
.min_rnr_timer
;
2066 attr
->port_num
= cmd
.port_num
;
2067 attr
->timeout
= cmd
.timeout
;
2068 attr
->retry_cnt
= cmd
.retry_cnt
;
2069 attr
->rnr_retry
= cmd
.rnr_retry
;
2070 attr
->alt_port_num
= cmd
.alt_port_num
;
2071 attr
->alt_timeout
= cmd
.alt_timeout
;
2073 memcpy(attr
->ah_attr
.grh
.dgid
.raw
, cmd
.dest
.dgid
, 16);
2074 attr
->ah_attr
.grh
.flow_label
= cmd
.dest
.flow_label
;
2075 attr
->ah_attr
.grh
.sgid_index
= cmd
.dest
.sgid_index
;
2076 attr
->ah_attr
.grh
.hop_limit
= cmd
.dest
.hop_limit
;
2077 attr
->ah_attr
.grh
.traffic_class
= cmd
.dest
.traffic_class
;
2078 attr
->ah_attr
.dlid
= cmd
.dest
.dlid
;
2079 attr
->ah_attr
.sl
= cmd
.dest
.sl
;
2080 attr
->ah_attr
.src_path_bits
= cmd
.dest
.src_path_bits
;
2081 attr
->ah_attr
.static_rate
= cmd
.dest
.static_rate
;
2082 attr
->ah_attr
.ah_flags
= cmd
.dest
.is_global
? IB_AH_GRH
: 0;
2083 attr
->ah_attr
.port_num
= cmd
.dest
.port_num
;
2085 memcpy(attr
->alt_ah_attr
.grh
.dgid
.raw
, cmd
.alt_dest
.dgid
, 16);
2086 attr
->alt_ah_attr
.grh
.flow_label
= cmd
.alt_dest
.flow_label
;
2087 attr
->alt_ah_attr
.grh
.sgid_index
= cmd
.alt_dest
.sgid_index
;
2088 attr
->alt_ah_attr
.grh
.hop_limit
= cmd
.alt_dest
.hop_limit
;
2089 attr
->alt_ah_attr
.grh
.traffic_class
= cmd
.alt_dest
.traffic_class
;
2090 attr
->alt_ah_attr
.dlid
= cmd
.alt_dest
.dlid
;
2091 attr
->alt_ah_attr
.sl
= cmd
.alt_dest
.sl
;
2092 attr
->alt_ah_attr
.src_path_bits
= cmd
.alt_dest
.src_path_bits
;
2093 attr
->alt_ah_attr
.static_rate
= cmd
.alt_dest
.static_rate
;
2094 attr
->alt_ah_attr
.ah_flags
= cmd
.alt_dest
.is_global
? IB_AH_GRH
: 0;
2095 attr
->alt_ah_attr
.port_num
= cmd
.alt_dest
.port_num
;
2097 if (qp
->real_qp
== qp
) {
2098 ret
= ib_resolve_eth_l2_attrs(qp
, attr
, &cmd
.attr_mask
);
2101 ret
= qp
->device
->modify_qp(qp
, attr
,
2102 modify_qp_mask(qp
->qp_type
, cmd
.attr_mask
), &udata
);
2104 ret
= ib_modify_qp(qp
, attr
, modify_qp_mask(qp
->qp_type
, cmd
.attr_mask
));
2121 ssize_t
ib_uverbs_destroy_qp(struct ib_uverbs_file
*file
,
2122 const char __user
*buf
, int in_len
,
2125 struct ib_uverbs_destroy_qp cmd
;
2126 struct ib_uverbs_destroy_qp_resp resp
;
2127 struct ib_uobject
*uobj
;
2129 struct ib_uqp_object
*obj
;
2132 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2135 memset(&resp
, 0, sizeof resp
);
2137 uobj
= idr_write_uobj(&ib_uverbs_qp_idr
, cmd
.qp_handle
, file
->ucontext
);
2141 obj
= container_of(uobj
, struct ib_uqp_object
, uevent
.uobject
);
2143 if (!list_empty(&obj
->mcast_list
)) {
2144 put_uobj_write(uobj
);
2148 ret
= ib_destroy_qp(qp
);
2152 put_uobj_write(uobj
);
2158 atomic_dec(&obj
->uxrcd
->refcnt
);
2160 idr_remove_uobj(&ib_uverbs_qp_idr
, uobj
);
2162 mutex_lock(&file
->mutex
);
2163 list_del(&uobj
->list
);
2164 mutex_unlock(&file
->mutex
);
2166 ib_uverbs_release_uevent(file
, &obj
->uevent
);
2168 resp
.events_reported
= obj
->uevent
.events_reported
;
2172 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2173 &resp
, sizeof resp
))
2179 ssize_t
ib_uverbs_post_send(struct ib_uverbs_file
*file
,
2180 const char __user
*buf
, int in_len
,
2183 struct ib_uverbs_post_send cmd
;
2184 struct ib_uverbs_post_send_resp resp
;
2185 struct ib_uverbs_send_wr
*user_wr
;
2186 struct ib_send_wr
*wr
= NULL
, *last
, *next
, *bad_wr
;
2190 ssize_t ret
= -EINVAL
;
2192 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2195 if (in_len
< sizeof cmd
+ cmd
.wqe_size
* cmd
.wr_count
+
2196 cmd
.sge_count
* sizeof (struct ib_uverbs_sge
))
2199 if (cmd
.wqe_size
< sizeof (struct ib_uverbs_send_wr
))
2202 user_wr
= kmalloc(cmd
.wqe_size
, GFP_KERNEL
);
2206 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2210 is_ud
= qp
->qp_type
== IB_QPT_UD
;
2213 for (i
= 0; i
< cmd
.wr_count
; ++i
) {
2214 if (copy_from_user(user_wr
,
2215 buf
+ sizeof cmd
+ i
* cmd
.wqe_size
,
2221 if (user_wr
->num_sge
+ sg_ind
> cmd
.sge_count
) {
2226 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
2227 user_wr
->num_sge
* sizeof (struct ib_sge
),
2241 next
->wr_id
= user_wr
->wr_id
;
2242 next
->num_sge
= user_wr
->num_sge
;
2243 next
->opcode
= user_wr
->opcode
;
2244 next
->send_flags
= user_wr
->send_flags
;
2247 if (next
->opcode
!= IB_WR_SEND
&&
2248 next
->opcode
!= IB_WR_SEND_WITH_IMM
) {
2253 next
->wr
.ud
.ah
= idr_read_ah(user_wr
->wr
.ud
.ah
,
2255 if (!next
->wr
.ud
.ah
) {
2259 next
->wr
.ud
.remote_qpn
= user_wr
->wr
.ud
.remote_qpn
;
2260 next
->wr
.ud
.remote_qkey
= user_wr
->wr
.ud
.remote_qkey
;
2261 if (next
->opcode
== IB_WR_SEND_WITH_IMM
)
2263 (__be32 __force
) user_wr
->ex
.imm_data
;
2265 switch (next
->opcode
) {
2266 case IB_WR_RDMA_WRITE_WITH_IMM
:
2268 (__be32 __force
) user_wr
->ex
.imm_data
;
2269 case IB_WR_RDMA_WRITE
:
2270 case IB_WR_RDMA_READ
:
2271 next
->wr
.rdma
.remote_addr
=
2272 user_wr
->wr
.rdma
.remote_addr
;
2273 next
->wr
.rdma
.rkey
=
2274 user_wr
->wr
.rdma
.rkey
;
2276 case IB_WR_SEND_WITH_IMM
:
2278 (__be32 __force
) user_wr
->ex
.imm_data
;
2280 case IB_WR_SEND_WITH_INV
:
2281 next
->ex
.invalidate_rkey
=
2282 user_wr
->ex
.invalidate_rkey
;
2284 case IB_WR_ATOMIC_CMP_AND_SWP
:
2285 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2286 next
->wr
.atomic
.remote_addr
=
2287 user_wr
->wr
.atomic
.remote_addr
;
2288 next
->wr
.atomic
.compare_add
=
2289 user_wr
->wr
.atomic
.compare_add
;
2290 next
->wr
.atomic
.swap
= user_wr
->wr
.atomic
.swap
;
2291 next
->wr
.atomic
.rkey
= user_wr
->wr
.atomic
.rkey
;
2300 if (next
->num_sge
) {
2301 next
->sg_list
= (void *) next
+
2302 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
2303 if (copy_from_user(next
->sg_list
,
2305 cmd
.wr_count
* cmd
.wqe_size
+
2306 sg_ind
* sizeof (struct ib_sge
),
2307 next
->num_sge
* sizeof (struct ib_sge
))) {
2311 sg_ind
+= next
->num_sge
;
2313 next
->sg_list
= NULL
;
2317 ret
= qp
->device
->post_send(qp
->real_qp
, wr
, &bad_wr
);
2319 for (next
= wr
; next
; next
= next
->next
) {
2325 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2326 &resp
, sizeof resp
))
2333 if (is_ud
&& wr
->wr
.ud
.ah
)
2334 put_ah_read(wr
->wr
.ud
.ah
);
2343 return ret
? ret
: in_len
;
2346 static struct ib_recv_wr
*ib_uverbs_unmarshall_recv(const char __user
*buf
,
2352 struct ib_uverbs_recv_wr
*user_wr
;
2353 struct ib_recv_wr
*wr
= NULL
, *last
, *next
;
2358 if (in_len
< wqe_size
* wr_count
+
2359 sge_count
* sizeof (struct ib_uverbs_sge
))
2360 return ERR_PTR(-EINVAL
);
2362 if (wqe_size
< sizeof (struct ib_uverbs_recv_wr
))
2363 return ERR_PTR(-EINVAL
);
2365 user_wr
= kmalloc(wqe_size
, GFP_KERNEL
);
2367 return ERR_PTR(-ENOMEM
);
2371 for (i
= 0; i
< wr_count
; ++i
) {
2372 if (copy_from_user(user_wr
, buf
+ i
* wqe_size
,
2378 if (user_wr
->num_sge
+ sg_ind
> sge_count
) {
2383 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
2384 user_wr
->num_sge
* sizeof (struct ib_sge
),
2398 next
->wr_id
= user_wr
->wr_id
;
2399 next
->num_sge
= user_wr
->num_sge
;
2401 if (next
->num_sge
) {
2402 next
->sg_list
= (void *) next
+
2403 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
2404 if (copy_from_user(next
->sg_list
,
2405 buf
+ wr_count
* wqe_size
+
2406 sg_ind
* sizeof (struct ib_sge
),
2407 next
->num_sge
* sizeof (struct ib_sge
))) {
2411 sg_ind
+= next
->num_sge
;
2413 next
->sg_list
= NULL
;
2428 return ERR_PTR(ret
);
2431 ssize_t
ib_uverbs_post_recv(struct ib_uverbs_file
*file
,
2432 const char __user
*buf
, int in_len
,
2435 struct ib_uverbs_post_recv cmd
;
2436 struct ib_uverbs_post_recv_resp resp
;
2437 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2439 ssize_t ret
= -EINVAL
;
2441 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2444 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2445 in_len
- sizeof cmd
, cmd
.wr_count
,
2446 cmd
.sge_count
, cmd
.wqe_size
);
2450 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2455 ret
= qp
->device
->post_recv(qp
->real_qp
, wr
, &bad_wr
);
2460 for (next
= wr
; next
; next
= next
->next
) {
2466 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2467 &resp
, sizeof resp
))
2477 return ret
? ret
: in_len
;
2480 ssize_t
ib_uverbs_post_srq_recv(struct ib_uverbs_file
*file
,
2481 const char __user
*buf
, int in_len
,
2484 struct ib_uverbs_post_srq_recv cmd
;
2485 struct ib_uverbs_post_srq_recv_resp resp
;
2486 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2488 ssize_t ret
= -EINVAL
;
2490 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2493 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2494 in_len
- sizeof cmd
, cmd
.wr_count
,
2495 cmd
.sge_count
, cmd
.wqe_size
);
2499 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
2504 ret
= srq
->device
->post_srq_recv(srq
, wr
, &bad_wr
);
2509 for (next
= wr
; next
; next
= next
->next
) {
2515 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2516 &resp
, sizeof resp
))
2526 return ret
? ret
: in_len
;
2529 ssize_t
ib_uverbs_create_ah(struct ib_uverbs_file
*file
,
2530 const char __user
*buf
, int in_len
,
2533 struct ib_uverbs_create_ah cmd
;
2534 struct ib_uverbs_create_ah_resp resp
;
2535 struct ib_uobject
*uobj
;
2538 struct ib_ah_attr attr
;
2541 if (out_len
< sizeof resp
)
2544 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2547 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
2551 init_uobj(uobj
, cmd
.user_handle
, file
->ucontext
, &ah_lock_class
);
2552 down_write(&uobj
->mutex
);
2554 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
2560 attr
.dlid
= cmd
.attr
.dlid
;
2561 attr
.sl
= cmd
.attr
.sl
;
2562 attr
.src_path_bits
= cmd
.attr
.src_path_bits
;
2563 attr
.static_rate
= cmd
.attr
.static_rate
;
2564 attr
.ah_flags
= cmd
.attr
.is_global
? IB_AH_GRH
: 0;
2565 attr
.port_num
= cmd
.attr
.port_num
;
2566 attr
.grh
.flow_label
= cmd
.attr
.grh
.flow_label
;
2567 attr
.grh
.sgid_index
= cmd
.attr
.grh
.sgid_index
;
2568 attr
.grh
.hop_limit
= cmd
.attr
.grh
.hop_limit
;
2569 attr
.grh
.traffic_class
= cmd
.attr
.grh
.traffic_class
;
2571 memset(&attr
.dmac
, 0, sizeof(attr
.dmac
));
2572 memcpy(attr
.grh
.dgid
.raw
, cmd
.attr
.grh
.dgid
, 16);
2574 ah
= ib_create_ah(pd
, &attr
);
2583 ret
= idr_add_uobj(&ib_uverbs_ah_idr
, uobj
);
2587 resp
.ah_handle
= uobj
->id
;
2589 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2590 &resp
, sizeof resp
)) {
2597 mutex_lock(&file
->mutex
);
2598 list_add_tail(&uobj
->list
, &file
->ucontext
->ah_list
);
2599 mutex_unlock(&file
->mutex
);
2603 up_write(&uobj
->mutex
);
2608 idr_remove_uobj(&ib_uverbs_ah_idr
, uobj
);
2617 put_uobj_write(uobj
);
2621 ssize_t
ib_uverbs_destroy_ah(struct ib_uverbs_file
*file
,
2622 const char __user
*buf
, int in_len
, int out_len
)
2624 struct ib_uverbs_destroy_ah cmd
;
2626 struct ib_uobject
*uobj
;
2629 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2632 uobj
= idr_write_uobj(&ib_uverbs_ah_idr
, cmd
.ah_handle
, file
->ucontext
);
2637 ret
= ib_destroy_ah(ah
);
2641 put_uobj_write(uobj
);
2646 idr_remove_uobj(&ib_uverbs_ah_idr
, uobj
);
2648 mutex_lock(&file
->mutex
);
2649 list_del(&uobj
->list
);
2650 mutex_unlock(&file
->mutex
);
2657 ssize_t
ib_uverbs_attach_mcast(struct ib_uverbs_file
*file
,
2658 const char __user
*buf
, int in_len
,
2661 struct ib_uverbs_attach_mcast cmd
;
2663 struct ib_uqp_object
*obj
;
2664 struct ib_uverbs_mcast_entry
*mcast
;
2667 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2670 qp
= idr_write_qp(cmd
.qp_handle
, file
->ucontext
);
2674 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2676 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2677 if (cmd
.mlid
== mcast
->lid
&&
2678 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2683 mcast
= kmalloc(sizeof *mcast
, GFP_KERNEL
);
2689 mcast
->lid
= cmd
.mlid
;
2690 memcpy(mcast
->gid
.raw
, cmd
.gid
, sizeof mcast
->gid
.raw
);
2692 ret
= ib_attach_mcast(qp
, &mcast
->gid
, cmd
.mlid
);
2694 list_add_tail(&mcast
->list
, &obj
->mcast_list
);
2701 return ret
? ret
: in_len
;
2704 ssize_t
ib_uverbs_detach_mcast(struct ib_uverbs_file
*file
,
2705 const char __user
*buf
, int in_len
,
2708 struct ib_uverbs_detach_mcast cmd
;
2709 struct ib_uqp_object
*obj
;
2711 struct ib_uverbs_mcast_entry
*mcast
;
2714 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2717 qp
= idr_write_qp(cmd
.qp_handle
, file
->ucontext
);
2721 ret
= ib_detach_mcast(qp
, (union ib_gid
*) cmd
.gid
, cmd
.mlid
);
2725 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
2727 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
2728 if (cmd
.mlid
== mcast
->lid
&&
2729 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
2730 list_del(&mcast
->list
);
2738 return ret
? ret
: in_len
;
2741 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec
*kern_spec
,
2742 union ib_flow_spec
*ib_spec
)
2744 if (kern_spec
->reserved
)
2747 ib_spec
->type
= kern_spec
->type
;
2749 switch (ib_spec
->type
) {
2750 case IB_FLOW_SPEC_ETH
:
2751 ib_spec
->eth
.size
= sizeof(struct ib_flow_spec_eth
);
2752 if (ib_spec
->eth
.size
!= kern_spec
->eth
.size
)
2754 memcpy(&ib_spec
->eth
.val
, &kern_spec
->eth
.val
,
2755 sizeof(struct ib_flow_eth_filter
));
2756 memcpy(&ib_spec
->eth
.mask
, &kern_spec
->eth
.mask
,
2757 sizeof(struct ib_flow_eth_filter
));
2759 case IB_FLOW_SPEC_IPV4
:
2760 ib_spec
->ipv4
.size
= sizeof(struct ib_flow_spec_ipv4
);
2761 if (ib_spec
->ipv4
.size
!= kern_spec
->ipv4
.size
)
2763 memcpy(&ib_spec
->ipv4
.val
, &kern_spec
->ipv4
.val
,
2764 sizeof(struct ib_flow_ipv4_filter
));
2765 memcpy(&ib_spec
->ipv4
.mask
, &kern_spec
->ipv4
.mask
,
2766 sizeof(struct ib_flow_ipv4_filter
));
2768 case IB_FLOW_SPEC_TCP
:
2769 case IB_FLOW_SPEC_UDP
:
2770 ib_spec
->tcp_udp
.size
= sizeof(struct ib_flow_spec_tcp_udp
);
2771 if (ib_spec
->tcp_udp
.size
!= kern_spec
->tcp_udp
.size
)
2773 memcpy(&ib_spec
->tcp_udp
.val
, &kern_spec
->tcp_udp
.val
,
2774 sizeof(struct ib_flow_tcp_udp_filter
));
2775 memcpy(&ib_spec
->tcp_udp
.mask
, &kern_spec
->tcp_udp
.mask
,
2776 sizeof(struct ib_flow_tcp_udp_filter
));
2784 int ib_uverbs_ex_create_flow(struct ib_uverbs_file
*file
,
2785 struct ib_udata
*ucore
,
2786 struct ib_udata
*uhw
)
2788 struct ib_uverbs_create_flow cmd
;
2789 struct ib_uverbs_create_flow_resp resp
;
2790 struct ib_uobject
*uobj
;
2791 struct ib_flow
*flow_id
;
2792 struct ib_uverbs_flow_attr
*kern_flow_attr
;
2793 struct ib_flow_attr
*flow_attr
;
2800 if (ucore
->inlen
< sizeof(cmd
))
2803 if (ucore
->outlen
< sizeof(resp
))
2806 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
2810 ucore
->inbuf
+= sizeof(cmd
);
2811 ucore
->inlen
-= sizeof(cmd
);
2816 if ((cmd
.flow_attr
.type
== IB_FLOW_ATTR_SNIFFER
&&
2817 !capable(CAP_NET_ADMIN
)) || !capable(CAP_NET_RAW
))
2820 if (cmd
.flow_attr
.num_of_specs
> IB_FLOW_SPEC_SUPPORT_LAYERS
)
2823 if (cmd
.flow_attr
.size
> ucore
->inlen
||
2824 cmd
.flow_attr
.size
>
2825 (cmd
.flow_attr
.num_of_specs
* sizeof(struct ib_uverbs_flow_spec
)))
2828 if (cmd
.flow_attr
.reserved
[0] ||
2829 cmd
.flow_attr
.reserved
[1])
2832 if (cmd
.flow_attr
.num_of_specs
) {
2833 kern_flow_attr
= kmalloc(sizeof(*kern_flow_attr
) + cmd
.flow_attr
.size
,
2835 if (!kern_flow_attr
)
2838 memcpy(kern_flow_attr
, &cmd
.flow_attr
, sizeof(*kern_flow_attr
));
2839 err
= ib_copy_from_udata(kern_flow_attr
+ 1, ucore
,
2840 cmd
.flow_attr
.size
);
2844 kern_flow_attr
= &cmd
.flow_attr
;
2847 uobj
= kmalloc(sizeof(*uobj
), GFP_KERNEL
);
2852 init_uobj(uobj
, 0, file
->ucontext
, &rule_lock_class
);
2853 down_write(&uobj
->mutex
);
2855 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2861 flow_attr
= kmalloc(sizeof(*flow_attr
) + cmd
.flow_attr
.size
, GFP_KERNEL
);
2867 flow_attr
->type
= kern_flow_attr
->type
;
2868 flow_attr
->priority
= kern_flow_attr
->priority
;
2869 flow_attr
->num_of_specs
= kern_flow_attr
->num_of_specs
;
2870 flow_attr
->port
= kern_flow_attr
->port
;
2871 flow_attr
->flags
= kern_flow_attr
->flags
;
2872 flow_attr
->size
= sizeof(*flow_attr
);
2874 kern_spec
= kern_flow_attr
+ 1;
2875 ib_spec
= flow_attr
+ 1;
2876 for (i
= 0; i
< flow_attr
->num_of_specs
&&
2877 cmd
.flow_attr
.size
> offsetof(struct ib_uverbs_flow_spec
, reserved
) &&
2878 cmd
.flow_attr
.size
>=
2879 ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
; i
++) {
2880 err
= kern_spec_to_ib_spec(kern_spec
, ib_spec
);
2884 ((union ib_flow_spec
*) ib_spec
)->size
;
2885 cmd
.flow_attr
.size
-= ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
;
2886 kern_spec
+= ((struct ib_uverbs_flow_spec
*) kern_spec
)->size
;
2887 ib_spec
+= ((union ib_flow_spec
*) ib_spec
)->size
;
2889 if (cmd
.flow_attr
.size
|| (i
!= flow_attr
->num_of_specs
)) {
2890 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2891 i
, cmd
.flow_attr
.size
);
2895 flow_id
= ib_create_flow(qp
, flow_attr
, IB_FLOW_DOMAIN_USER
);
2896 if (IS_ERR(flow_id
)) {
2897 err
= PTR_ERR(flow_id
);
2901 flow_id
->uobject
= uobj
;
2902 uobj
->object
= flow_id
;
2904 err
= idr_add_uobj(&ib_uverbs_rule_idr
, uobj
);
2908 memset(&resp
, 0, sizeof(resp
));
2909 resp
.flow_handle
= uobj
->id
;
2911 err
= ib_copy_to_udata(ucore
,
2912 &resp
, sizeof(resp
));
2917 mutex_lock(&file
->mutex
);
2918 list_add_tail(&uobj
->list
, &file
->ucontext
->rule_list
);
2919 mutex_unlock(&file
->mutex
);
2923 up_write(&uobj
->mutex
);
2925 if (cmd
.flow_attr
.num_of_specs
)
2926 kfree(kern_flow_attr
);
2929 idr_remove_uobj(&ib_uverbs_rule_idr
, uobj
);
2931 ib_destroy_flow(flow_id
);
2937 put_uobj_write(uobj
);
2939 if (cmd
.flow_attr
.num_of_specs
)
2940 kfree(kern_flow_attr
);
2944 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file
*file
,
2945 struct ib_udata
*ucore
,
2946 struct ib_udata
*uhw
)
2948 struct ib_uverbs_destroy_flow cmd
;
2949 struct ib_flow
*flow_id
;
2950 struct ib_uobject
*uobj
;
2953 if (ucore
->inlen
< sizeof(cmd
))
2956 ret
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
2963 uobj
= idr_write_uobj(&ib_uverbs_rule_idr
, cmd
.flow_handle
,
2967 flow_id
= uobj
->object
;
2969 ret
= ib_destroy_flow(flow_id
);
2973 put_uobj_write(uobj
);
2975 idr_remove_uobj(&ib_uverbs_rule_idr
, uobj
);
2977 mutex_lock(&file
->mutex
);
2978 list_del(&uobj
->list
);
2979 mutex_unlock(&file
->mutex
);
2986 static int __uverbs_create_xsrq(struct ib_uverbs_file
*file
,
2987 struct ib_uverbs_create_xsrq
*cmd
,
2988 struct ib_udata
*udata
)
2990 struct ib_uverbs_create_srq_resp resp
;
2991 struct ib_usrq_object
*obj
;
2994 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
2995 struct ib_srq_init_attr attr
;
2998 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
3002 init_uobj(&obj
->uevent
.uobject
, cmd
->user_handle
, file
->ucontext
, &srq_lock_class
);
3003 down_write(&obj
->uevent
.uobject
.mutex
);
3005 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3006 attr
.ext
.xrc
.xrcd
= idr_read_xrcd(cmd
->xrcd_handle
, file
->ucontext
, &xrcd_uobj
);
3007 if (!attr
.ext
.xrc
.xrcd
) {
3012 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
3013 atomic_inc(&obj
->uxrcd
->refcnt
);
3015 attr
.ext
.xrc
.cq
= idr_read_cq(cmd
->cq_handle
, file
->ucontext
, 0);
3016 if (!attr
.ext
.xrc
.cq
) {
3022 pd
= idr_read_pd(cmd
->pd_handle
, file
->ucontext
);
3028 attr
.event_handler
= ib_uverbs_srq_event_handler
;
3029 attr
.srq_context
= file
;
3030 attr
.srq_type
= cmd
->srq_type
;
3031 attr
.attr
.max_wr
= cmd
->max_wr
;
3032 attr
.attr
.max_sge
= cmd
->max_sge
;
3033 attr
.attr
.srq_limit
= cmd
->srq_limit
;
3035 obj
->uevent
.events_reported
= 0;
3036 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
3038 srq
= pd
->device
->create_srq(pd
, &attr
, udata
);
3044 srq
->device
= pd
->device
;
3046 srq
->srq_type
= cmd
->srq_type
;
3047 srq
->uobject
= &obj
->uevent
.uobject
;
3048 srq
->event_handler
= attr
.event_handler
;
3049 srq
->srq_context
= attr
.srq_context
;
3051 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3052 srq
->ext
.xrc
.cq
= attr
.ext
.xrc
.cq
;
3053 srq
->ext
.xrc
.xrcd
= attr
.ext
.xrc
.xrcd
;
3054 atomic_inc(&attr
.ext
.xrc
.cq
->usecnt
);
3055 atomic_inc(&attr
.ext
.xrc
.xrcd
->usecnt
);
3058 atomic_inc(&pd
->usecnt
);
3059 atomic_set(&srq
->usecnt
, 0);
3061 obj
->uevent
.uobject
.object
= srq
;
3062 ret
= idr_add_uobj(&ib_uverbs_srq_idr
, &obj
->uevent
.uobject
);
3066 memset(&resp
, 0, sizeof resp
);
3067 resp
.srq_handle
= obj
->uevent
.uobject
.id
;
3068 resp
.max_wr
= attr
.attr
.max_wr
;
3069 resp
.max_sge
= attr
.attr
.max_sge
;
3070 if (cmd
->srq_type
== IB_SRQT_XRC
)
3071 resp
.srqn
= srq
->ext
.xrc
.srq_num
;
3073 if (copy_to_user((void __user
*) (unsigned long) cmd
->response
,
3074 &resp
, sizeof resp
)) {
3079 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3080 put_uobj_read(xrcd_uobj
);
3081 put_cq_read(attr
.ext
.xrc
.cq
);
3085 mutex_lock(&file
->mutex
);
3086 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->srq_list
);
3087 mutex_unlock(&file
->mutex
);
3089 obj
->uevent
.uobject
.live
= 1;
3091 up_write(&obj
->uevent
.uobject
.mutex
);
3096 idr_remove_uobj(&ib_uverbs_srq_idr
, &obj
->uevent
.uobject
);
3099 ib_destroy_srq(srq
);
3105 if (cmd
->srq_type
== IB_SRQT_XRC
)
3106 put_cq_read(attr
.ext
.xrc
.cq
);
3109 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3110 atomic_dec(&obj
->uxrcd
->refcnt
);
3111 put_uobj_read(xrcd_uobj
);
3115 put_uobj_write(&obj
->uevent
.uobject
);
3119 ssize_t
ib_uverbs_create_srq(struct ib_uverbs_file
*file
,
3120 const char __user
*buf
, int in_len
,
3123 struct ib_uverbs_create_srq cmd
;
3124 struct ib_uverbs_create_xsrq xcmd
;
3125 struct ib_uverbs_create_srq_resp resp
;
3126 struct ib_udata udata
;
3129 if (out_len
< sizeof resp
)
3132 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3135 xcmd
.response
= cmd
.response
;
3136 xcmd
.user_handle
= cmd
.user_handle
;
3137 xcmd
.srq_type
= IB_SRQT_BASIC
;
3138 xcmd
.pd_handle
= cmd
.pd_handle
;
3139 xcmd
.max_wr
= cmd
.max_wr
;
3140 xcmd
.max_sge
= cmd
.max_sge
;
3141 xcmd
.srq_limit
= cmd
.srq_limit
;
3143 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
3144 (unsigned long) cmd
.response
+ sizeof resp
,
3145 in_len
- sizeof cmd
, out_len
- sizeof resp
);
3147 ret
= __uverbs_create_xsrq(file
, &xcmd
, &udata
);
3154 ssize_t
ib_uverbs_create_xsrq(struct ib_uverbs_file
*file
,
3155 const char __user
*buf
, int in_len
, int out_len
)
3157 struct ib_uverbs_create_xsrq cmd
;
3158 struct ib_uverbs_create_srq_resp resp
;
3159 struct ib_udata udata
;
3162 if (out_len
< sizeof resp
)
3165 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3168 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
3169 (unsigned long) cmd
.response
+ sizeof resp
,
3170 in_len
- sizeof cmd
, out_len
- sizeof resp
);
3172 ret
= __uverbs_create_xsrq(file
, &cmd
, &udata
);
3179 ssize_t
ib_uverbs_modify_srq(struct ib_uverbs_file
*file
,
3180 const char __user
*buf
, int in_len
,
3183 struct ib_uverbs_modify_srq cmd
;
3184 struct ib_udata udata
;
3186 struct ib_srq_attr attr
;
3189 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3192 INIT_UDATA(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
3195 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
3199 attr
.max_wr
= cmd
.max_wr
;
3200 attr
.srq_limit
= cmd
.srq_limit
;
3202 ret
= srq
->device
->modify_srq(srq
, &attr
, cmd
.attr_mask
, &udata
);
3206 return ret
? ret
: in_len
;
3209 ssize_t
ib_uverbs_query_srq(struct ib_uverbs_file
*file
,
3210 const char __user
*buf
,
3211 int in_len
, int out_len
)
3213 struct ib_uverbs_query_srq cmd
;
3214 struct ib_uverbs_query_srq_resp resp
;
3215 struct ib_srq_attr attr
;
3219 if (out_len
< sizeof resp
)
3222 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3225 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
3229 ret
= ib_query_srq(srq
, &attr
);
3236 memset(&resp
, 0, sizeof resp
);
3238 resp
.max_wr
= attr
.max_wr
;
3239 resp
.max_sge
= attr
.max_sge
;
3240 resp
.srq_limit
= attr
.srq_limit
;
3242 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
3243 &resp
, sizeof resp
))
3249 ssize_t
ib_uverbs_destroy_srq(struct ib_uverbs_file
*file
,
3250 const char __user
*buf
, int in_len
,
3253 struct ib_uverbs_destroy_srq cmd
;
3254 struct ib_uverbs_destroy_srq_resp resp
;
3255 struct ib_uobject
*uobj
;
3257 struct ib_uevent_object
*obj
;
3259 struct ib_usrq_object
*us
;
3260 enum ib_srq_type srq_type
;
3262 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3265 uobj
= idr_write_uobj(&ib_uverbs_srq_idr
, cmd
.srq_handle
, file
->ucontext
);
3269 obj
= container_of(uobj
, struct ib_uevent_object
, uobject
);
3270 srq_type
= srq
->srq_type
;
3272 ret
= ib_destroy_srq(srq
);
3276 put_uobj_write(uobj
);
3281 if (srq_type
== IB_SRQT_XRC
) {
3282 us
= container_of(obj
, struct ib_usrq_object
, uevent
);
3283 atomic_dec(&us
->uxrcd
->refcnt
);
3286 idr_remove_uobj(&ib_uverbs_srq_idr
, uobj
);
3288 mutex_lock(&file
->mutex
);
3289 list_del(&uobj
->list
);
3290 mutex_unlock(&file
->mutex
);
3292 ib_uverbs_release_uevent(file
, obj
);
3294 memset(&resp
, 0, sizeof resp
);
3295 resp
.events_reported
= obj
->events_reported
;
3299 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
3300 &resp
, sizeof resp
))
3303 return ret
? ret
: in_len
;
3306 int ib_uverbs_ex_query_device(struct ib_uverbs_file
*file
,
3307 struct ib_udata
*ucore
,
3308 struct ib_udata
*uhw
)
3310 struct ib_uverbs_ex_query_device_resp resp
;
3311 struct ib_uverbs_ex_query_device cmd
;
3312 struct ib_device_attr attr
;
3313 struct ib_device
*device
;
3316 device
= file
->device
->ib_dev
;
3317 if (ucore
->inlen
< sizeof(cmd
))
3320 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3330 resp
.response_length
= offsetof(typeof(resp
), odp_caps
);
3332 if (ucore
->outlen
< resp
.response_length
)
3335 err
= device
->query_device(device
, &attr
);
3339 copy_query_dev_fields(file
, &resp
.base
, &attr
);
3342 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.odp_caps
))
3345 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3346 resp
.odp_caps
.general_caps
= attr
.odp_caps
.general_caps
;
3347 resp
.odp_caps
.per_transport_caps
.rc_odp_caps
=
3348 attr
.odp_caps
.per_transport_caps
.rc_odp_caps
;
3349 resp
.odp_caps
.per_transport_caps
.uc_odp_caps
=
3350 attr
.odp_caps
.per_transport_caps
.uc_odp_caps
;
3351 resp
.odp_caps
.per_transport_caps
.ud_odp_caps
=
3352 attr
.odp_caps
.per_transport_caps
.ud_odp_caps
;
3353 resp
.odp_caps
.reserved
= 0;
3355 memset(&resp
.odp_caps
, 0, sizeof(resp
.odp_caps
));
3357 resp
.response_length
+= sizeof(resp
.odp_caps
);
3360 err
= ib_copy_to_udata(ucore
, &resp
, resp
.response_length
);