2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <asm/uaccess.h>
44 #include "core_priv.h"
46 struct uverbs_lock_class
{
47 struct lock_class_key key
;
51 static struct uverbs_lock_class pd_lock_class
= { .name
= "PD-uobj" };
52 static struct uverbs_lock_class mr_lock_class
= { .name
= "MR-uobj" };
53 static struct uverbs_lock_class mw_lock_class
= { .name
= "MW-uobj" };
54 static struct uverbs_lock_class cq_lock_class
= { .name
= "CQ-uobj" };
55 static struct uverbs_lock_class qp_lock_class
= { .name
= "QP-uobj" };
56 static struct uverbs_lock_class ah_lock_class
= { .name
= "AH-uobj" };
57 static struct uverbs_lock_class srq_lock_class
= { .name
= "SRQ-uobj" };
58 static struct uverbs_lock_class xrcd_lock_class
= { .name
= "XRCD-uobj" };
59 static struct uverbs_lock_class rule_lock_class
= { .name
= "RULE-uobj" };
60 static struct uverbs_lock_class wq_lock_class
= { .name
= "WQ-uobj" };
61 static struct uverbs_lock_class rwq_ind_table_lock_class
= { .name
= "IND_TBL-uobj" };
64 * The ib_uobject locking scheme is as follows:
66 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
67 * needs to be held during all idr write operations. When an object is
68 * looked up, a reference must be taken on the object's kref before
69 * dropping this lock. For read operations, the rcu_read_lock()
70 * and rcu_write_lock() but similarly the kref reference is grabbed
71 * before the rcu_read_unlock().
73 * - Each object also has an rwsem. This rwsem must be held for
74 * reading while an operation that uses the object is performed.
75 * For example, while registering an MR, the associated PD's
76 * uobject.mutex must be held for reading. The rwsem must be held
77 * for writing while initializing or destroying an object.
79 * - In addition, each object has a "live" flag. If this flag is not
80 * set, then lookups of the object will fail even if it is found in
81 * the idr. This handles a reader that blocks and does not acquire
82 * the rwsem until after the object is destroyed. The destroy
83 * operation will set the live flag to 0 and then drop the rwsem;
84 * this will allow the reader to acquire the rwsem, see that the
85 * live flag is 0, and then drop the rwsem and its reference to
86 * object. The underlying storage will not be freed until the last
87 * reference to the object is dropped.
90 static void init_uobj(struct ib_uobject
*uobj
, u64 user_handle
,
91 struct ib_ucontext
*context
, struct uverbs_lock_class
*c
)
93 uobj
->user_handle
= user_handle
;
94 uobj
->context
= context
;
95 kref_init(&uobj
->ref
);
96 init_rwsem(&uobj
->mutex
);
97 lockdep_set_class_and_name(&uobj
->mutex
, &c
->key
, c
->name
);
101 static void release_uobj(struct kref
*kref
)
103 kfree_rcu(container_of(kref
, struct ib_uobject
, ref
), rcu
);
106 static void put_uobj(struct ib_uobject
*uobj
)
108 kref_put(&uobj
->ref
, release_uobj
);
111 static void put_uobj_read(struct ib_uobject
*uobj
)
113 up_read(&uobj
->mutex
);
117 static void put_uobj_write(struct ib_uobject
*uobj
)
119 up_write(&uobj
->mutex
);
123 static int idr_add_uobj(struct idr
*idr
, struct ib_uobject
*uobj
)
127 idr_preload(GFP_KERNEL
);
128 spin_lock(&ib_uverbs_idr_lock
);
130 ret
= idr_alloc(idr
, uobj
, 0, 0, GFP_NOWAIT
);
134 spin_unlock(&ib_uverbs_idr_lock
);
137 return ret
< 0 ? ret
: 0;
140 void idr_remove_uobj(struct idr
*idr
, struct ib_uobject
*uobj
)
142 spin_lock(&ib_uverbs_idr_lock
);
143 idr_remove(idr
, uobj
->id
);
144 spin_unlock(&ib_uverbs_idr_lock
);
147 static struct ib_uobject
*__idr_get_uobj(struct idr
*idr
, int id
,
148 struct ib_ucontext
*context
)
150 struct ib_uobject
*uobj
;
153 uobj
= idr_find(idr
, id
);
155 if (uobj
->context
== context
)
156 kref_get(&uobj
->ref
);
165 static struct ib_uobject
*idr_read_uobj(struct idr
*idr
, int id
,
166 struct ib_ucontext
*context
, int nested
)
168 struct ib_uobject
*uobj
;
170 uobj
= __idr_get_uobj(idr
, id
, context
);
175 down_read_nested(&uobj
->mutex
, SINGLE_DEPTH_NESTING
);
177 down_read(&uobj
->mutex
);
186 static struct ib_uobject
*idr_write_uobj(struct idr
*idr
, int id
,
187 struct ib_ucontext
*context
)
189 struct ib_uobject
*uobj
;
191 uobj
= __idr_get_uobj(idr
, id
, context
);
195 down_write(&uobj
->mutex
);
197 put_uobj_write(uobj
);
204 static void *idr_read_obj(struct idr
*idr
, int id
, struct ib_ucontext
*context
,
207 struct ib_uobject
*uobj
;
209 uobj
= idr_read_uobj(idr
, id
, context
, nested
);
210 return uobj
? uobj
->object
: NULL
;
213 static struct ib_pd
*idr_read_pd(int pd_handle
, struct ib_ucontext
*context
)
215 return idr_read_obj(&ib_uverbs_pd_idr
, pd_handle
, context
, 0);
218 static void put_pd_read(struct ib_pd
*pd
)
220 put_uobj_read(pd
->uobject
);
223 static struct ib_cq
*idr_read_cq(int cq_handle
, struct ib_ucontext
*context
, int nested
)
225 return idr_read_obj(&ib_uverbs_cq_idr
, cq_handle
, context
, nested
);
228 static void put_cq_read(struct ib_cq
*cq
)
230 put_uobj_read(cq
->uobject
);
233 static struct ib_ah
*idr_read_ah(int ah_handle
, struct ib_ucontext
*context
)
235 return idr_read_obj(&ib_uverbs_ah_idr
, ah_handle
, context
, 0);
238 static void put_ah_read(struct ib_ah
*ah
)
240 put_uobj_read(ah
->uobject
);
243 static struct ib_qp
*idr_read_qp(int qp_handle
, struct ib_ucontext
*context
)
245 return idr_read_obj(&ib_uverbs_qp_idr
, qp_handle
, context
, 0);
248 static struct ib_wq
*idr_read_wq(int wq_handle
, struct ib_ucontext
*context
)
250 return idr_read_obj(&ib_uverbs_wq_idr
, wq_handle
, context
, 0);
253 static void put_wq_read(struct ib_wq
*wq
)
255 put_uobj_read(wq
->uobject
);
258 static struct ib_rwq_ind_table
*idr_read_rwq_indirection_table(int ind_table_handle
,
259 struct ib_ucontext
*context
)
261 return idr_read_obj(&ib_uverbs_rwq_ind_tbl_idr
, ind_table_handle
, context
, 0);
264 static void put_rwq_indirection_table_read(struct ib_rwq_ind_table
*ind_table
)
266 put_uobj_read(ind_table
->uobject
);
269 static struct ib_qp
*idr_write_qp(int qp_handle
, struct ib_ucontext
*context
)
271 struct ib_uobject
*uobj
;
273 uobj
= idr_write_uobj(&ib_uverbs_qp_idr
, qp_handle
, context
);
274 return uobj
? uobj
->object
: NULL
;
277 static void put_qp_read(struct ib_qp
*qp
)
279 put_uobj_read(qp
->uobject
);
282 static void put_qp_write(struct ib_qp
*qp
)
284 put_uobj_write(qp
->uobject
);
287 static struct ib_srq
*idr_read_srq(int srq_handle
, struct ib_ucontext
*context
)
289 return idr_read_obj(&ib_uverbs_srq_idr
, srq_handle
, context
, 0);
292 static void put_srq_read(struct ib_srq
*srq
)
294 put_uobj_read(srq
->uobject
);
297 static struct ib_xrcd
*idr_read_xrcd(int xrcd_handle
, struct ib_ucontext
*context
,
298 struct ib_uobject
**uobj
)
300 *uobj
= idr_read_uobj(&ib_uverbs_xrcd_idr
, xrcd_handle
, context
, 0);
301 return *uobj
? (*uobj
)->object
: NULL
;
304 static void put_xrcd_read(struct ib_uobject
*uobj
)
309 ssize_t
ib_uverbs_get_context(struct ib_uverbs_file
*file
,
310 struct ib_device
*ib_dev
,
311 const char __user
*buf
,
312 int in_len
, int out_len
)
314 struct ib_uverbs_get_context cmd
;
315 struct ib_uverbs_get_context_resp resp
;
316 struct ib_udata udata
;
317 struct ib_ucontext
*ucontext
;
321 if (out_len
< sizeof resp
)
324 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
327 mutex_lock(&file
->mutex
);
329 if (file
->ucontext
) {
334 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
335 (unsigned long) cmd
.response
+ sizeof resp
,
336 in_len
- sizeof cmd
, out_len
- sizeof resp
);
338 ucontext
= ib_dev
->alloc_ucontext(ib_dev
, &udata
);
339 if (IS_ERR(ucontext
)) {
340 ret
= PTR_ERR(ucontext
);
344 ucontext
->device
= ib_dev
;
345 INIT_LIST_HEAD(&ucontext
->pd_list
);
346 INIT_LIST_HEAD(&ucontext
->mr_list
);
347 INIT_LIST_HEAD(&ucontext
->mw_list
);
348 INIT_LIST_HEAD(&ucontext
->cq_list
);
349 INIT_LIST_HEAD(&ucontext
->qp_list
);
350 INIT_LIST_HEAD(&ucontext
->srq_list
);
351 INIT_LIST_HEAD(&ucontext
->ah_list
);
352 INIT_LIST_HEAD(&ucontext
->wq_list
);
353 INIT_LIST_HEAD(&ucontext
->rwq_ind_tbl_list
);
354 INIT_LIST_HEAD(&ucontext
->xrcd_list
);
355 INIT_LIST_HEAD(&ucontext
->rule_list
);
357 ucontext
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
359 ucontext
->closing
= 0;
361 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
362 ucontext
->umem_tree
= RB_ROOT
;
363 init_rwsem(&ucontext
->umem_rwsem
);
364 ucontext
->odp_mrs_count
= 0;
365 INIT_LIST_HEAD(&ucontext
->no_private_counters
);
367 if (!(ib_dev
->attrs
.device_cap_flags
& IB_DEVICE_ON_DEMAND_PAGING
))
368 ucontext
->invalidate_range
= NULL
;
372 resp
.num_comp_vectors
= file
->device
->num_comp_vectors
;
374 ret
= get_unused_fd_flags(O_CLOEXEC
);
379 filp
= ib_uverbs_alloc_event_file(file
, ib_dev
, 1);
385 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
386 &resp
, sizeof resp
)) {
391 file
->ucontext
= ucontext
;
393 fd_install(resp
.async_fd
, filp
);
395 mutex_unlock(&file
->mutex
);
400 ib_uverbs_free_async_event_file(file
);
404 put_unused_fd(resp
.async_fd
);
407 put_pid(ucontext
->tgid
);
408 ib_dev
->dealloc_ucontext(ucontext
);
411 mutex_unlock(&file
->mutex
);
415 static void copy_query_dev_fields(struct ib_uverbs_file
*file
,
416 struct ib_device
*ib_dev
,
417 struct ib_uverbs_query_device_resp
*resp
,
418 struct ib_device_attr
*attr
)
420 resp
->fw_ver
= attr
->fw_ver
;
421 resp
->node_guid
= ib_dev
->node_guid
;
422 resp
->sys_image_guid
= attr
->sys_image_guid
;
423 resp
->max_mr_size
= attr
->max_mr_size
;
424 resp
->page_size_cap
= attr
->page_size_cap
;
425 resp
->vendor_id
= attr
->vendor_id
;
426 resp
->vendor_part_id
= attr
->vendor_part_id
;
427 resp
->hw_ver
= attr
->hw_ver
;
428 resp
->max_qp
= attr
->max_qp
;
429 resp
->max_qp_wr
= attr
->max_qp_wr
;
430 resp
->device_cap_flags
= lower_32_bits(attr
->device_cap_flags
);
431 resp
->max_sge
= attr
->max_sge
;
432 resp
->max_sge_rd
= attr
->max_sge_rd
;
433 resp
->max_cq
= attr
->max_cq
;
434 resp
->max_cqe
= attr
->max_cqe
;
435 resp
->max_mr
= attr
->max_mr
;
436 resp
->max_pd
= attr
->max_pd
;
437 resp
->max_qp_rd_atom
= attr
->max_qp_rd_atom
;
438 resp
->max_ee_rd_atom
= attr
->max_ee_rd_atom
;
439 resp
->max_res_rd_atom
= attr
->max_res_rd_atom
;
440 resp
->max_qp_init_rd_atom
= attr
->max_qp_init_rd_atom
;
441 resp
->max_ee_init_rd_atom
= attr
->max_ee_init_rd_atom
;
442 resp
->atomic_cap
= attr
->atomic_cap
;
443 resp
->max_ee
= attr
->max_ee
;
444 resp
->max_rdd
= attr
->max_rdd
;
445 resp
->max_mw
= attr
->max_mw
;
446 resp
->max_raw_ipv6_qp
= attr
->max_raw_ipv6_qp
;
447 resp
->max_raw_ethy_qp
= attr
->max_raw_ethy_qp
;
448 resp
->max_mcast_grp
= attr
->max_mcast_grp
;
449 resp
->max_mcast_qp_attach
= attr
->max_mcast_qp_attach
;
450 resp
->max_total_mcast_qp_attach
= attr
->max_total_mcast_qp_attach
;
451 resp
->max_ah
= attr
->max_ah
;
452 resp
->max_fmr
= attr
->max_fmr
;
453 resp
->max_map_per_fmr
= attr
->max_map_per_fmr
;
454 resp
->max_srq
= attr
->max_srq
;
455 resp
->max_srq_wr
= attr
->max_srq_wr
;
456 resp
->max_srq_sge
= attr
->max_srq_sge
;
457 resp
->max_pkeys
= attr
->max_pkeys
;
458 resp
->local_ca_ack_delay
= attr
->local_ca_ack_delay
;
459 resp
->phys_port_cnt
= ib_dev
->phys_port_cnt
;
462 ssize_t
ib_uverbs_query_device(struct ib_uverbs_file
*file
,
463 struct ib_device
*ib_dev
,
464 const char __user
*buf
,
465 int in_len
, int out_len
)
467 struct ib_uverbs_query_device cmd
;
468 struct ib_uverbs_query_device_resp resp
;
470 if (out_len
< sizeof resp
)
473 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
476 memset(&resp
, 0, sizeof resp
);
477 copy_query_dev_fields(file
, ib_dev
, &resp
, &ib_dev
->attrs
);
479 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
486 ssize_t
ib_uverbs_query_port(struct ib_uverbs_file
*file
,
487 struct ib_device
*ib_dev
,
488 const char __user
*buf
,
489 int in_len
, int out_len
)
491 struct ib_uverbs_query_port cmd
;
492 struct ib_uverbs_query_port_resp resp
;
493 struct ib_port_attr attr
;
496 if (out_len
< sizeof resp
)
499 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
502 ret
= ib_query_port(ib_dev
, cmd
.port_num
, &attr
);
506 memset(&resp
, 0, sizeof resp
);
508 resp
.state
= attr
.state
;
509 resp
.max_mtu
= attr
.max_mtu
;
510 resp
.active_mtu
= attr
.active_mtu
;
511 resp
.gid_tbl_len
= attr
.gid_tbl_len
;
512 resp
.port_cap_flags
= attr
.port_cap_flags
;
513 resp
.max_msg_sz
= attr
.max_msg_sz
;
514 resp
.bad_pkey_cntr
= attr
.bad_pkey_cntr
;
515 resp
.qkey_viol_cntr
= attr
.qkey_viol_cntr
;
516 resp
.pkey_tbl_len
= attr
.pkey_tbl_len
;
518 resp
.sm_lid
= attr
.sm_lid
;
520 resp
.max_vl_num
= attr
.max_vl_num
;
521 resp
.sm_sl
= attr
.sm_sl
;
522 resp
.subnet_timeout
= attr
.subnet_timeout
;
523 resp
.init_type_reply
= attr
.init_type_reply
;
524 resp
.active_width
= attr
.active_width
;
525 resp
.active_speed
= attr
.active_speed
;
526 resp
.phys_state
= attr
.phys_state
;
527 resp
.link_layer
= rdma_port_get_link_layer(ib_dev
,
530 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
537 ssize_t
ib_uverbs_alloc_pd(struct ib_uverbs_file
*file
,
538 struct ib_device
*ib_dev
,
539 const char __user
*buf
,
540 int in_len
, int out_len
)
542 struct ib_uverbs_alloc_pd cmd
;
543 struct ib_uverbs_alloc_pd_resp resp
;
544 struct ib_udata udata
;
545 struct ib_uobject
*uobj
;
549 if (out_len
< sizeof resp
)
552 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
555 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
556 (unsigned long) cmd
.response
+ sizeof resp
,
557 in_len
- sizeof cmd
, out_len
- sizeof resp
);
559 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
563 init_uobj(uobj
, 0, file
->ucontext
, &pd_lock_class
);
564 down_write(&uobj
->mutex
);
566 pd
= ib_dev
->alloc_pd(ib_dev
, file
->ucontext
, &udata
);
574 pd
->__internal_mr
= NULL
;
575 atomic_set(&pd
->usecnt
, 0);
578 ret
= idr_add_uobj(&ib_uverbs_pd_idr
, uobj
);
582 memset(&resp
, 0, sizeof resp
);
583 resp
.pd_handle
= uobj
->id
;
585 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
586 &resp
, sizeof resp
)) {
591 mutex_lock(&file
->mutex
);
592 list_add_tail(&uobj
->list
, &file
->ucontext
->pd_list
);
593 mutex_unlock(&file
->mutex
);
597 up_write(&uobj
->mutex
);
602 idr_remove_uobj(&ib_uverbs_pd_idr
, uobj
);
608 put_uobj_write(uobj
);
612 ssize_t
ib_uverbs_dealloc_pd(struct ib_uverbs_file
*file
,
613 struct ib_device
*ib_dev
,
614 const char __user
*buf
,
615 int in_len
, int out_len
)
617 struct ib_uverbs_dealloc_pd cmd
;
618 struct ib_uobject
*uobj
;
622 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
625 uobj
= idr_write_uobj(&ib_uverbs_pd_idr
, cmd
.pd_handle
, file
->ucontext
);
630 if (atomic_read(&pd
->usecnt
)) {
635 ret
= pd
->device
->dealloc_pd(uobj
->object
);
636 WARN_ONCE(ret
, "Infiniband HW driver failed dealloc_pd");
641 put_uobj_write(uobj
);
643 idr_remove_uobj(&ib_uverbs_pd_idr
, uobj
);
645 mutex_lock(&file
->mutex
);
646 list_del(&uobj
->list
);
647 mutex_unlock(&file
->mutex
);
654 put_uobj_write(uobj
);
658 struct xrcd_table_entry
{
660 struct ib_xrcd
*xrcd
;
664 static int xrcd_table_insert(struct ib_uverbs_device
*dev
,
666 struct ib_xrcd
*xrcd
)
668 struct xrcd_table_entry
*entry
, *scan
;
669 struct rb_node
**p
= &dev
->xrcd_tree
.rb_node
;
670 struct rb_node
*parent
= NULL
;
672 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
677 entry
->inode
= inode
;
681 scan
= rb_entry(parent
, struct xrcd_table_entry
, node
);
683 if (inode
< scan
->inode
) {
685 } else if (inode
> scan
->inode
) {
693 rb_link_node(&entry
->node
, parent
, p
);
694 rb_insert_color(&entry
->node
, &dev
->xrcd_tree
);
699 static struct xrcd_table_entry
*xrcd_table_search(struct ib_uverbs_device
*dev
,
702 struct xrcd_table_entry
*entry
;
703 struct rb_node
*p
= dev
->xrcd_tree
.rb_node
;
706 entry
= rb_entry(p
, struct xrcd_table_entry
, node
);
708 if (inode
< entry
->inode
)
710 else if (inode
> entry
->inode
)
719 static struct ib_xrcd
*find_xrcd(struct ib_uverbs_device
*dev
, struct inode
*inode
)
721 struct xrcd_table_entry
*entry
;
723 entry
= xrcd_table_search(dev
, inode
);
730 static void xrcd_table_delete(struct ib_uverbs_device
*dev
,
733 struct xrcd_table_entry
*entry
;
735 entry
= xrcd_table_search(dev
, inode
);
738 rb_erase(&entry
->node
, &dev
->xrcd_tree
);
743 ssize_t
ib_uverbs_open_xrcd(struct ib_uverbs_file
*file
,
744 struct ib_device
*ib_dev
,
745 const char __user
*buf
, int in_len
,
748 struct ib_uverbs_open_xrcd cmd
;
749 struct ib_uverbs_open_xrcd_resp resp
;
750 struct ib_udata udata
;
751 struct ib_uxrcd_object
*obj
;
752 struct ib_xrcd
*xrcd
= NULL
;
753 struct fd f
= {NULL
, 0};
754 struct inode
*inode
= NULL
;
758 if (out_len
< sizeof resp
)
761 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
764 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
765 (unsigned long) cmd
.response
+ sizeof resp
,
766 in_len
- sizeof cmd
, out_len
- sizeof resp
);
768 mutex_lock(&file
->device
->xrcd_tree_mutex
);
771 /* search for file descriptor */
775 goto err_tree_mutex_unlock
;
778 inode
= file_inode(f
.file
);
779 xrcd
= find_xrcd(file
->device
, inode
);
780 if (!xrcd
&& !(cmd
.oflags
& O_CREAT
)) {
781 /* no file descriptor. Need CREATE flag */
783 goto err_tree_mutex_unlock
;
786 if (xrcd
&& cmd
.oflags
& O_EXCL
) {
788 goto err_tree_mutex_unlock
;
792 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
795 goto err_tree_mutex_unlock
;
798 init_uobj(&obj
->uobject
, 0, file
->ucontext
, &xrcd_lock_class
);
800 down_write(&obj
->uobject
.mutex
);
803 xrcd
= ib_dev
->alloc_xrcd(ib_dev
, file
->ucontext
, &udata
);
810 xrcd
->device
= ib_dev
;
811 atomic_set(&xrcd
->usecnt
, 0);
812 mutex_init(&xrcd
->tgt_qp_mutex
);
813 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
817 atomic_set(&obj
->refcnt
, 0);
818 obj
->uobject
.object
= xrcd
;
819 ret
= idr_add_uobj(&ib_uverbs_xrcd_idr
, &obj
->uobject
);
823 memset(&resp
, 0, sizeof resp
);
824 resp
.xrcd_handle
= obj
->uobject
.id
;
828 /* create new inode/xrcd table entry */
829 ret
= xrcd_table_insert(file
->device
, inode
, xrcd
);
831 goto err_insert_xrcd
;
833 atomic_inc(&xrcd
->usecnt
);
836 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
837 &resp
, sizeof resp
)) {
845 mutex_lock(&file
->mutex
);
846 list_add_tail(&obj
->uobject
.list
, &file
->ucontext
->xrcd_list
);
847 mutex_unlock(&file
->mutex
);
849 obj
->uobject
.live
= 1;
850 up_write(&obj
->uobject
.mutex
);
852 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
858 xrcd_table_delete(file
->device
, inode
);
859 atomic_dec(&xrcd
->usecnt
);
863 idr_remove_uobj(&ib_uverbs_xrcd_idr
, &obj
->uobject
);
866 ib_dealloc_xrcd(xrcd
);
869 put_uobj_write(&obj
->uobject
);
871 err_tree_mutex_unlock
:
875 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
880 ssize_t
ib_uverbs_close_xrcd(struct ib_uverbs_file
*file
,
881 struct ib_device
*ib_dev
,
882 const char __user
*buf
, int in_len
,
885 struct ib_uverbs_close_xrcd cmd
;
886 struct ib_uobject
*uobj
;
887 struct ib_xrcd
*xrcd
= NULL
;
888 struct inode
*inode
= NULL
;
889 struct ib_uxrcd_object
*obj
;
893 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
896 mutex_lock(&file
->device
->xrcd_tree_mutex
);
897 uobj
= idr_write_uobj(&ib_uverbs_xrcd_idr
, cmd
.xrcd_handle
, file
->ucontext
);
905 obj
= container_of(uobj
, struct ib_uxrcd_object
, uobject
);
906 if (atomic_read(&obj
->refcnt
)) {
907 put_uobj_write(uobj
);
912 if (!inode
|| atomic_dec_and_test(&xrcd
->usecnt
)) {
913 ret
= ib_dealloc_xrcd(uobj
->object
);
920 atomic_inc(&xrcd
->usecnt
);
922 put_uobj_write(uobj
);
928 xrcd_table_delete(file
->device
, inode
);
930 idr_remove_uobj(&ib_uverbs_xrcd_idr
, uobj
);
931 mutex_lock(&file
->mutex
);
932 list_del(&uobj
->list
);
933 mutex_unlock(&file
->mutex
);
939 mutex_unlock(&file
->device
->xrcd_tree_mutex
);
943 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device
*dev
,
944 struct ib_xrcd
*xrcd
)
949 if (inode
&& !atomic_dec_and_test(&xrcd
->usecnt
))
952 ib_dealloc_xrcd(xrcd
);
955 xrcd_table_delete(dev
, inode
);
958 ssize_t
ib_uverbs_reg_mr(struct ib_uverbs_file
*file
,
959 struct ib_device
*ib_dev
,
960 const char __user
*buf
, int in_len
,
963 struct ib_uverbs_reg_mr cmd
;
964 struct ib_uverbs_reg_mr_resp resp
;
965 struct ib_udata udata
;
966 struct ib_uobject
*uobj
;
971 if (out_len
< sizeof resp
)
974 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
977 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
978 (unsigned long) cmd
.response
+ sizeof resp
,
979 in_len
- sizeof cmd
, out_len
- sizeof resp
);
981 if ((cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
))
984 ret
= ib_check_mr_access(cmd
.access_flags
);
988 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
992 init_uobj(uobj
, 0, file
->ucontext
, &mr_lock_class
);
993 down_write(&uobj
->mutex
);
995 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
1001 if (cmd
.access_flags
& IB_ACCESS_ON_DEMAND
) {
1002 if (!(pd
->device
->attrs
.device_cap_flags
&
1003 IB_DEVICE_ON_DEMAND_PAGING
)) {
1004 pr_debug("ODP support not available\n");
1010 mr
= pd
->device
->reg_user_mr(pd
, cmd
.start
, cmd
.length
, cmd
.hca_va
,
1011 cmd
.access_flags
, &udata
);
1017 mr
->device
= pd
->device
;
1020 atomic_inc(&pd
->usecnt
);
1023 ret
= idr_add_uobj(&ib_uverbs_mr_idr
, uobj
);
1027 memset(&resp
, 0, sizeof resp
);
1028 resp
.lkey
= mr
->lkey
;
1029 resp
.rkey
= mr
->rkey
;
1030 resp
.mr_handle
= uobj
->id
;
1032 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1033 &resp
, sizeof resp
)) {
1040 mutex_lock(&file
->mutex
);
1041 list_add_tail(&uobj
->list
, &file
->ucontext
->mr_list
);
1042 mutex_unlock(&file
->mutex
);
1046 up_write(&uobj
->mutex
);
1051 idr_remove_uobj(&ib_uverbs_mr_idr
, uobj
);
1060 put_uobj_write(uobj
);
1064 ssize_t
ib_uverbs_rereg_mr(struct ib_uverbs_file
*file
,
1065 struct ib_device
*ib_dev
,
1066 const char __user
*buf
, int in_len
,
1069 struct ib_uverbs_rereg_mr cmd
;
1070 struct ib_uverbs_rereg_mr_resp resp
;
1071 struct ib_udata udata
;
1072 struct ib_pd
*pd
= NULL
;
1074 struct ib_pd
*old_pd
;
1076 struct ib_uobject
*uobj
;
1078 if (out_len
< sizeof(resp
))
1081 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1084 INIT_UDATA(&udata
, buf
+ sizeof(cmd
),
1085 (unsigned long) cmd
.response
+ sizeof(resp
),
1086 in_len
- sizeof(cmd
), out_len
- sizeof(resp
));
1088 if (cmd
.flags
& ~IB_MR_REREG_SUPPORTED
|| !cmd
.flags
)
1091 if ((cmd
.flags
& IB_MR_REREG_TRANS
) &&
1092 (!cmd
.start
|| !cmd
.hca_va
|| 0 >= cmd
.length
||
1093 (cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
)))
1096 uobj
= idr_write_uobj(&ib_uverbs_mr_idr
, cmd
.mr_handle
,
1104 if (cmd
.flags
& IB_MR_REREG_ACCESS
) {
1105 ret
= ib_check_mr_access(cmd
.access_flags
);
1110 if (cmd
.flags
& IB_MR_REREG_PD
) {
1111 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
1119 ret
= mr
->device
->rereg_user_mr(mr
, cmd
.flags
, cmd
.start
,
1120 cmd
.length
, cmd
.hca_va
,
1121 cmd
.access_flags
, pd
, &udata
);
1123 if (cmd
.flags
& IB_MR_REREG_PD
) {
1124 atomic_inc(&pd
->usecnt
);
1126 atomic_dec(&old_pd
->usecnt
);
1132 memset(&resp
, 0, sizeof(resp
));
1133 resp
.lkey
= mr
->lkey
;
1134 resp
.rkey
= mr
->rkey
;
1136 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
1137 &resp
, sizeof(resp
)))
1143 if (cmd
.flags
& IB_MR_REREG_PD
)
1148 put_uobj_write(mr
->uobject
);
1153 ssize_t
ib_uverbs_dereg_mr(struct ib_uverbs_file
*file
,
1154 struct ib_device
*ib_dev
,
1155 const char __user
*buf
, int in_len
,
1158 struct ib_uverbs_dereg_mr cmd
;
1160 struct ib_uobject
*uobj
;
1163 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1166 uobj
= idr_write_uobj(&ib_uverbs_mr_idr
, cmd
.mr_handle
, file
->ucontext
);
1172 ret
= ib_dereg_mr(mr
);
1176 put_uobj_write(uobj
);
1181 idr_remove_uobj(&ib_uverbs_mr_idr
, uobj
);
1183 mutex_lock(&file
->mutex
);
1184 list_del(&uobj
->list
);
1185 mutex_unlock(&file
->mutex
);
1192 ssize_t
ib_uverbs_alloc_mw(struct ib_uverbs_file
*file
,
1193 struct ib_device
*ib_dev
,
1194 const char __user
*buf
, int in_len
,
1197 struct ib_uverbs_alloc_mw cmd
;
1198 struct ib_uverbs_alloc_mw_resp resp
;
1199 struct ib_uobject
*uobj
;
1202 struct ib_udata udata
;
1205 if (out_len
< sizeof(resp
))
1208 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1211 uobj
= kmalloc(sizeof(*uobj
), GFP_KERNEL
);
1215 init_uobj(uobj
, 0, file
->ucontext
, &mw_lock_class
);
1216 down_write(&uobj
->mutex
);
1218 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
1224 INIT_UDATA(&udata
, buf
+ sizeof(cmd
),
1225 (unsigned long)cmd
.response
+ sizeof(resp
),
1226 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
1227 out_len
- sizeof(resp
));
1229 mw
= pd
->device
->alloc_mw(pd
, cmd
.mw_type
, &udata
);
1235 mw
->device
= pd
->device
;
1238 atomic_inc(&pd
->usecnt
);
1241 ret
= idr_add_uobj(&ib_uverbs_mw_idr
, uobj
);
1245 memset(&resp
, 0, sizeof(resp
));
1246 resp
.rkey
= mw
->rkey
;
1247 resp
.mw_handle
= uobj
->id
;
1249 if (copy_to_user((void __user
*)(unsigned long)cmd
.response
,
1250 &resp
, sizeof(resp
))) {
1257 mutex_lock(&file
->mutex
);
1258 list_add_tail(&uobj
->list
, &file
->ucontext
->mw_list
);
1259 mutex_unlock(&file
->mutex
);
1263 up_write(&uobj
->mutex
);
1268 idr_remove_uobj(&ib_uverbs_mw_idr
, uobj
);
1271 uverbs_dealloc_mw(mw
);
1277 put_uobj_write(uobj
);
1281 ssize_t
ib_uverbs_dealloc_mw(struct ib_uverbs_file
*file
,
1282 struct ib_device
*ib_dev
,
1283 const char __user
*buf
, int in_len
,
1286 struct ib_uverbs_dealloc_mw cmd
;
1288 struct ib_uobject
*uobj
;
1291 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1294 uobj
= idr_write_uobj(&ib_uverbs_mw_idr
, cmd
.mw_handle
, file
->ucontext
);
1300 ret
= uverbs_dealloc_mw(mw
);
1304 put_uobj_write(uobj
);
1309 idr_remove_uobj(&ib_uverbs_mw_idr
, uobj
);
1311 mutex_lock(&file
->mutex
);
1312 list_del(&uobj
->list
);
1313 mutex_unlock(&file
->mutex
);
1320 ssize_t
ib_uverbs_create_comp_channel(struct ib_uverbs_file
*file
,
1321 struct ib_device
*ib_dev
,
1322 const char __user
*buf
, int in_len
,
1325 struct ib_uverbs_create_comp_channel cmd
;
1326 struct ib_uverbs_create_comp_channel_resp resp
;
1330 if (out_len
< sizeof resp
)
1333 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1336 ret
= get_unused_fd_flags(O_CLOEXEC
);
1341 filp
= ib_uverbs_alloc_event_file(file
, ib_dev
, 0);
1343 put_unused_fd(resp
.fd
);
1344 return PTR_ERR(filp
);
1347 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1348 &resp
, sizeof resp
)) {
1349 put_unused_fd(resp
.fd
);
1354 fd_install(resp
.fd
, filp
);
1358 static struct ib_ucq_object
*create_cq(struct ib_uverbs_file
*file
,
1359 struct ib_device
*ib_dev
,
1360 struct ib_udata
*ucore
,
1361 struct ib_udata
*uhw
,
1362 struct ib_uverbs_ex_create_cq
*cmd
,
1364 int (*cb
)(struct ib_uverbs_file
*file
,
1365 struct ib_ucq_object
*obj
,
1366 struct ib_uverbs_ex_create_cq_resp
*resp
,
1367 struct ib_udata
*udata
,
1371 struct ib_ucq_object
*obj
;
1372 struct ib_uverbs_event_file
*ev_file
= NULL
;
1375 struct ib_uverbs_ex_create_cq_resp resp
;
1376 struct ib_cq_init_attr attr
= {};
1378 if (cmd
->comp_vector
>= file
->device
->num_comp_vectors
)
1379 return ERR_PTR(-EINVAL
);
1381 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
1383 return ERR_PTR(-ENOMEM
);
1385 init_uobj(&obj
->uobject
, cmd
->user_handle
, file
->ucontext
, &cq_lock_class
);
1386 down_write(&obj
->uobject
.mutex
);
1388 if (cmd
->comp_channel
>= 0) {
1389 ev_file
= ib_uverbs_lookup_comp_file(cmd
->comp_channel
);
1396 obj
->uverbs_file
= file
;
1397 obj
->comp_events_reported
= 0;
1398 obj
->async_events_reported
= 0;
1399 INIT_LIST_HEAD(&obj
->comp_list
);
1400 INIT_LIST_HEAD(&obj
->async_list
);
1402 attr
.cqe
= cmd
->cqe
;
1403 attr
.comp_vector
= cmd
->comp_vector
;
1405 if (cmd_sz
> offsetof(typeof(*cmd
), flags
) + sizeof(cmd
->flags
))
1406 attr
.flags
= cmd
->flags
;
1408 cq
= ib_dev
->create_cq(ib_dev
, &attr
,
1409 file
->ucontext
, uhw
);
1415 cq
->device
= ib_dev
;
1416 cq
->uobject
= &obj
->uobject
;
1417 cq
->comp_handler
= ib_uverbs_comp_handler
;
1418 cq
->event_handler
= ib_uverbs_cq_event_handler
;
1419 cq
->cq_context
= ev_file
;
1420 atomic_set(&cq
->usecnt
, 0);
1422 obj
->uobject
.object
= cq
;
1423 ret
= idr_add_uobj(&ib_uverbs_cq_idr
, &obj
->uobject
);
1427 memset(&resp
, 0, sizeof resp
);
1428 resp
.base
.cq_handle
= obj
->uobject
.id
;
1429 resp
.base
.cqe
= cq
->cqe
;
1431 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
1432 sizeof(resp
.response_length
);
1434 ret
= cb(file
, obj
, &resp
, ucore
, context
);
1438 mutex_lock(&file
->mutex
);
1439 list_add_tail(&obj
->uobject
.list
, &file
->ucontext
->cq_list
);
1440 mutex_unlock(&file
->mutex
);
1442 obj
->uobject
.live
= 1;
1444 up_write(&obj
->uobject
.mutex
);
1449 idr_remove_uobj(&ib_uverbs_cq_idr
, &obj
->uobject
);
1456 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1459 put_uobj_write(&obj
->uobject
);
1461 return ERR_PTR(ret
);
1464 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file
*file
,
1465 struct ib_ucq_object
*obj
,
1466 struct ib_uverbs_ex_create_cq_resp
*resp
,
1467 struct ib_udata
*ucore
, void *context
)
1469 if (ib_copy_to_udata(ucore
, &resp
->base
, sizeof(resp
->base
)))
1475 ssize_t
ib_uverbs_create_cq(struct ib_uverbs_file
*file
,
1476 struct ib_device
*ib_dev
,
1477 const char __user
*buf
, int in_len
,
1480 struct ib_uverbs_create_cq cmd
;
1481 struct ib_uverbs_ex_create_cq cmd_ex
;
1482 struct ib_uverbs_create_cq_resp resp
;
1483 struct ib_udata ucore
;
1484 struct ib_udata uhw
;
1485 struct ib_ucq_object
*obj
;
1487 if (out_len
< sizeof(resp
))
1490 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
1493 INIT_UDATA(&ucore
, buf
, (unsigned long)cmd
.response
, sizeof(cmd
), sizeof(resp
));
1495 INIT_UDATA(&uhw
, buf
+ sizeof(cmd
),
1496 (unsigned long)cmd
.response
+ sizeof(resp
),
1497 in_len
- sizeof(cmd
), out_len
- sizeof(resp
));
1499 memset(&cmd_ex
, 0, sizeof(cmd_ex
));
1500 cmd_ex
.user_handle
= cmd
.user_handle
;
1501 cmd_ex
.cqe
= cmd
.cqe
;
1502 cmd_ex
.comp_vector
= cmd
.comp_vector
;
1503 cmd_ex
.comp_channel
= cmd
.comp_channel
;
1505 obj
= create_cq(file
, ib_dev
, &ucore
, &uhw
, &cmd_ex
,
1506 offsetof(typeof(cmd_ex
), comp_channel
) +
1507 sizeof(cmd
.comp_channel
), ib_uverbs_create_cq_cb
,
1511 return PTR_ERR(obj
);
1516 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file
*file
,
1517 struct ib_ucq_object
*obj
,
1518 struct ib_uverbs_ex_create_cq_resp
*resp
,
1519 struct ib_udata
*ucore
, void *context
)
1521 if (ib_copy_to_udata(ucore
, resp
, resp
->response_length
))
1527 int ib_uverbs_ex_create_cq(struct ib_uverbs_file
*file
,
1528 struct ib_device
*ib_dev
,
1529 struct ib_udata
*ucore
,
1530 struct ib_udata
*uhw
)
1532 struct ib_uverbs_ex_create_cq_resp resp
;
1533 struct ib_uverbs_ex_create_cq cmd
;
1534 struct ib_ucq_object
*obj
;
1537 if (ucore
->inlen
< sizeof(cmd
))
1540 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
1550 if (ucore
->outlen
< (offsetof(typeof(resp
), response_length
) +
1551 sizeof(resp
.response_length
)))
1554 obj
= create_cq(file
, ib_dev
, ucore
, uhw
, &cmd
,
1555 min(ucore
->inlen
, sizeof(cmd
)),
1556 ib_uverbs_ex_create_cq_cb
, NULL
);
1559 return PTR_ERR(obj
);
1564 ssize_t
ib_uverbs_resize_cq(struct ib_uverbs_file
*file
,
1565 struct ib_device
*ib_dev
,
1566 const char __user
*buf
, int in_len
,
1569 struct ib_uverbs_resize_cq cmd
;
1570 struct ib_uverbs_resize_cq_resp resp
;
1571 struct ib_udata udata
;
1575 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1578 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1579 (unsigned long) cmd
.response
+ sizeof resp
,
1580 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1582 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1586 ret
= cq
->device
->resize_cq(cq
, cmd
.cqe
, &udata
);
1592 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1593 &resp
, sizeof resp
.cqe
))
1599 return ret
? ret
: in_len
;
1602 static int copy_wc_to_user(void __user
*dest
, struct ib_wc
*wc
)
1604 struct ib_uverbs_wc tmp
;
1606 tmp
.wr_id
= wc
->wr_id
;
1607 tmp
.status
= wc
->status
;
1608 tmp
.opcode
= wc
->opcode
;
1609 tmp
.vendor_err
= wc
->vendor_err
;
1610 tmp
.byte_len
= wc
->byte_len
;
1611 tmp
.ex
.imm_data
= (__u32 __force
) wc
->ex
.imm_data
;
1612 tmp
.qp_num
= wc
->qp
->qp_num
;
1613 tmp
.src_qp
= wc
->src_qp
;
1614 tmp
.wc_flags
= wc
->wc_flags
;
1615 tmp
.pkey_index
= wc
->pkey_index
;
1616 tmp
.slid
= wc
->slid
;
1618 tmp
.dlid_path_bits
= wc
->dlid_path_bits
;
1619 tmp
.port_num
= wc
->port_num
;
1622 if (copy_to_user(dest
, &tmp
, sizeof tmp
))
1628 ssize_t
ib_uverbs_poll_cq(struct ib_uverbs_file
*file
,
1629 struct ib_device
*ib_dev
,
1630 const char __user
*buf
, int in_len
,
1633 struct ib_uverbs_poll_cq cmd
;
1634 struct ib_uverbs_poll_cq_resp resp
;
1635 u8 __user
*header_ptr
;
1636 u8 __user
*data_ptr
;
1641 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1644 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1648 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1649 header_ptr
= (void __user
*)(unsigned long) cmd
.response
;
1650 data_ptr
= header_ptr
+ sizeof resp
;
1652 memset(&resp
, 0, sizeof resp
);
1653 while (resp
.count
< cmd
.ne
) {
1654 ret
= ib_poll_cq(cq
, 1, &wc
);
1660 ret
= copy_wc_to_user(data_ptr
, &wc
);
1664 data_ptr
+= sizeof(struct ib_uverbs_wc
);
1668 if (copy_to_user(header_ptr
, &resp
, sizeof resp
)) {
1680 ssize_t
ib_uverbs_req_notify_cq(struct ib_uverbs_file
*file
,
1681 struct ib_device
*ib_dev
,
1682 const char __user
*buf
, int in_len
,
1685 struct ib_uverbs_req_notify_cq cmd
;
1688 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1691 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
1695 ib_req_notify_cq(cq
, cmd
.solicited_only
?
1696 IB_CQ_SOLICITED
: IB_CQ_NEXT_COMP
);
1703 ssize_t
ib_uverbs_destroy_cq(struct ib_uverbs_file
*file
,
1704 struct ib_device
*ib_dev
,
1705 const char __user
*buf
, int in_len
,
1708 struct ib_uverbs_destroy_cq cmd
;
1709 struct ib_uverbs_destroy_cq_resp resp
;
1710 struct ib_uobject
*uobj
;
1712 struct ib_ucq_object
*obj
;
1713 struct ib_uverbs_event_file
*ev_file
;
1716 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1719 uobj
= idr_write_uobj(&ib_uverbs_cq_idr
, cmd
.cq_handle
, file
->ucontext
);
1723 ev_file
= cq
->cq_context
;
1724 obj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
1726 ret
= ib_destroy_cq(cq
);
1730 put_uobj_write(uobj
);
1735 idr_remove_uobj(&ib_uverbs_cq_idr
, uobj
);
1737 mutex_lock(&file
->mutex
);
1738 list_del(&uobj
->list
);
1739 mutex_unlock(&file
->mutex
);
1741 ib_uverbs_release_ucq(file
, ev_file
, obj
);
1743 memset(&resp
, 0, sizeof resp
);
1744 resp
.comp_events_reported
= obj
->comp_events_reported
;
1745 resp
.async_events_reported
= obj
->async_events_reported
;
1749 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1750 &resp
, sizeof resp
))
1756 static int create_qp(struct ib_uverbs_file
*file
,
1757 struct ib_udata
*ucore
,
1758 struct ib_udata
*uhw
,
1759 struct ib_uverbs_ex_create_qp
*cmd
,
1761 int (*cb
)(struct ib_uverbs_file
*file
,
1762 struct ib_uverbs_ex_create_qp_resp
*resp
,
1763 struct ib_udata
*udata
),
1766 struct ib_uqp_object
*obj
;
1767 struct ib_device
*device
;
1768 struct ib_pd
*pd
= NULL
;
1769 struct ib_xrcd
*xrcd
= NULL
;
1770 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
1771 struct ib_cq
*scq
= NULL
, *rcq
= NULL
;
1772 struct ib_srq
*srq
= NULL
;
1775 struct ib_qp_init_attr attr
= {};
1776 struct ib_uverbs_ex_create_qp_resp resp
;
1778 struct ib_rwq_ind_table
*ind_tbl
= NULL
;
1781 if (cmd
->qp_type
== IB_QPT_RAW_PACKET
&& !capable(CAP_NET_RAW
))
1784 obj
= kzalloc(sizeof *obj
, GFP_KERNEL
);
1788 init_uobj(&obj
->uevent
.uobject
, cmd
->user_handle
, file
->ucontext
,
1790 down_write(&obj
->uevent
.uobject
.mutex
);
1791 if (cmd_sz
>= offsetof(typeof(*cmd
), rwq_ind_tbl_handle
) +
1792 sizeof(cmd
->rwq_ind_tbl_handle
) &&
1793 (cmd
->comp_mask
& IB_UVERBS_CREATE_QP_MASK_IND_TABLE
)) {
1794 ind_tbl
= idr_read_rwq_indirection_table(cmd
->rwq_ind_tbl_handle
,
1801 attr
.rwq_ind_tbl
= ind_tbl
;
1804 if ((cmd_sz
>= offsetof(typeof(*cmd
), reserved1
) +
1805 sizeof(cmd
->reserved1
)) && cmd
->reserved1
) {
1810 if (ind_tbl
&& (cmd
->max_recv_wr
|| cmd
->max_recv_sge
|| cmd
->is_srq
)) {
1815 if (ind_tbl
&& !cmd
->max_send_wr
)
1818 if (cmd
->qp_type
== IB_QPT_XRC_TGT
) {
1819 xrcd
= idr_read_xrcd(cmd
->pd_handle
, file
->ucontext
,
1825 device
= xrcd
->device
;
1827 if (cmd
->qp_type
== IB_QPT_XRC_INI
) {
1828 cmd
->max_recv_wr
= 0;
1829 cmd
->max_recv_sge
= 0;
1832 srq
= idr_read_srq(cmd
->srq_handle
,
1834 if (!srq
|| srq
->srq_type
!= IB_SRQT_BASIC
) {
1841 if (cmd
->recv_cq_handle
!= cmd
->send_cq_handle
) {
1842 rcq
= idr_read_cq(cmd
->recv_cq_handle
,
1853 scq
= idr_read_cq(cmd
->send_cq_handle
, file
->ucontext
, !!rcq
);
1856 pd
= idr_read_pd(cmd
->pd_handle
, file
->ucontext
);
1857 if (!pd
|| (!scq
&& has_sq
)) {
1862 device
= pd
->device
;
1865 attr
.event_handler
= ib_uverbs_qp_event_handler
;
1866 attr
.qp_context
= file
;
1871 attr
.sq_sig_type
= cmd
->sq_sig_all
? IB_SIGNAL_ALL_WR
:
1873 attr
.qp_type
= cmd
->qp_type
;
1874 attr
.create_flags
= 0;
1876 attr
.cap
.max_send_wr
= cmd
->max_send_wr
;
1877 attr
.cap
.max_recv_wr
= cmd
->max_recv_wr
;
1878 attr
.cap
.max_send_sge
= cmd
->max_send_sge
;
1879 attr
.cap
.max_recv_sge
= cmd
->max_recv_sge
;
1880 attr
.cap
.max_inline_data
= cmd
->max_inline_data
;
1882 obj
->uevent
.events_reported
= 0;
1883 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
1884 INIT_LIST_HEAD(&obj
->mcast_list
);
1886 if (cmd_sz
>= offsetof(typeof(*cmd
), create_flags
) +
1887 sizeof(cmd
->create_flags
))
1888 attr
.create_flags
= cmd
->create_flags
;
1890 if (attr
.create_flags
& ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
1891 IB_QP_CREATE_CROSS_CHANNEL
|
1892 IB_QP_CREATE_MANAGED_SEND
|
1893 IB_QP_CREATE_MANAGED_RECV
|
1894 IB_QP_CREATE_SCATTER_FCS
)) {
1899 buf
= (void *)cmd
+ sizeof(*cmd
);
1900 if (cmd_sz
> sizeof(*cmd
))
1901 if (!(buf
[0] == 0 && !memcmp(buf
, buf
+ 1,
1902 cmd_sz
- sizeof(*cmd
) - 1))) {
1907 if (cmd
->qp_type
== IB_QPT_XRC_TGT
)
1908 qp
= ib_create_qp(pd
, &attr
);
1910 qp
= device
->create_qp(pd
, &attr
, uhw
);
1917 if (cmd
->qp_type
!= IB_QPT_XRC_TGT
) {
1919 qp
->device
= device
;
1921 qp
->send_cq
= attr
.send_cq
;
1922 qp
->recv_cq
= attr
.recv_cq
;
1924 qp
->rwq_ind_tbl
= ind_tbl
;
1925 qp
->event_handler
= attr
.event_handler
;
1926 qp
->qp_context
= attr
.qp_context
;
1927 qp
->qp_type
= attr
.qp_type
;
1928 atomic_set(&qp
->usecnt
, 0);
1929 atomic_inc(&pd
->usecnt
);
1931 atomic_inc(&attr
.send_cq
->usecnt
);
1933 atomic_inc(&attr
.recv_cq
->usecnt
);
1935 atomic_inc(&attr
.srq
->usecnt
);
1937 atomic_inc(&ind_tbl
->usecnt
);
1939 qp
->uobject
= &obj
->uevent
.uobject
;
1941 obj
->uevent
.uobject
.object
= qp
;
1942 ret
= idr_add_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1946 memset(&resp
, 0, sizeof resp
);
1947 resp
.base
.qpn
= qp
->qp_num
;
1948 resp
.base
.qp_handle
= obj
->uevent
.uobject
.id
;
1949 resp
.base
.max_recv_sge
= attr
.cap
.max_recv_sge
;
1950 resp
.base
.max_send_sge
= attr
.cap
.max_send_sge
;
1951 resp
.base
.max_recv_wr
= attr
.cap
.max_recv_wr
;
1952 resp
.base
.max_send_wr
= attr
.cap
.max_send_wr
;
1953 resp
.base
.max_inline_data
= attr
.cap
.max_inline_data
;
1955 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
1956 sizeof(resp
.response_length
);
1958 ret
= cb(file
, &resp
, ucore
);
1963 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
,
1965 atomic_inc(&obj
->uxrcd
->refcnt
);
1966 put_xrcd_read(xrcd_uobj
);
1973 if (rcq
&& rcq
!= scq
)
1978 put_rwq_indirection_table_read(ind_tbl
);
1980 mutex_lock(&file
->mutex
);
1981 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->qp_list
);
1982 mutex_unlock(&file
->mutex
);
1984 obj
->uevent
.uobject
.live
= 1;
1986 up_write(&obj
->uevent
.uobject
.mutex
);
1990 idr_remove_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
1997 put_xrcd_read(xrcd_uobj
);
2002 if (rcq
&& rcq
!= scq
)
2007 put_rwq_indirection_table_read(ind_tbl
);
2009 put_uobj_write(&obj
->uevent
.uobject
);
2013 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file
*file
,
2014 struct ib_uverbs_ex_create_qp_resp
*resp
,
2015 struct ib_udata
*ucore
)
2017 if (ib_copy_to_udata(ucore
, &resp
->base
, sizeof(resp
->base
)))
2023 ssize_t
ib_uverbs_create_qp(struct ib_uverbs_file
*file
,
2024 struct ib_device
*ib_dev
,
2025 const char __user
*buf
, int in_len
,
2028 struct ib_uverbs_create_qp cmd
;
2029 struct ib_uverbs_ex_create_qp cmd_ex
;
2030 struct ib_udata ucore
;
2031 struct ib_udata uhw
;
2032 ssize_t resp_size
= sizeof(struct ib_uverbs_create_qp_resp
);
2035 if (out_len
< resp_size
)
2038 if (copy_from_user(&cmd
, buf
, sizeof(cmd
)))
2041 INIT_UDATA(&ucore
, buf
, (unsigned long)cmd
.response
, sizeof(cmd
),
2043 INIT_UDATA(&uhw
, buf
+ sizeof(cmd
),
2044 (unsigned long)cmd
.response
+ resp_size
,
2045 in_len
- sizeof(cmd
) - sizeof(struct ib_uverbs_cmd_hdr
),
2046 out_len
- resp_size
);
2048 memset(&cmd_ex
, 0, sizeof(cmd_ex
));
2049 cmd_ex
.user_handle
= cmd
.user_handle
;
2050 cmd_ex
.pd_handle
= cmd
.pd_handle
;
2051 cmd_ex
.send_cq_handle
= cmd
.send_cq_handle
;
2052 cmd_ex
.recv_cq_handle
= cmd
.recv_cq_handle
;
2053 cmd_ex
.srq_handle
= cmd
.srq_handle
;
2054 cmd_ex
.max_send_wr
= cmd
.max_send_wr
;
2055 cmd_ex
.max_recv_wr
= cmd
.max_recv_wr
;
2056 cmd_ex
.max_send_sge
= cmd
.max_send_sge
;
2057 cmd_ex
.max_recv_sge
= cmd
.max_recv_sge
;
2058 cmd_ex
.max_inline_data
= cmd
.max_inline_data
;
2059 cmd_ex
.sq_sig_all
= cmd
.sq_sig_all
;
2060 cmd_ex
.qp_type
= cmd
.qp_type
;
2061 cmd_ex
.is_srq
= cmd
.is_srq
;
2063 err
= create_qp(file
, &ucore
, &uhw
, &cmd_ex
,
2064 offsetof(typeof(cmd_ex
), is_srq
) +
2065 sizeof(cmd
.is_srq
), ib_uverbs_create_qp_cb
,
2074 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file
*file
,
2075 struct ib_uverbs_ex_create_qp_resp
*resp
,
2076 struct ib_udata
*ucore
)
2078 if (ib_copy_to_udata(ucore
, resp
, resp
->response_length
))
2084 int ib_uverbs_ex_create_qp(struct ib_uverbs_file
*file
,
2085 struct ib_device
*ib_dev
,
2086 struct ib_udata
*ucore
,
2087 struct ib_udata
*uhw
)
2089 struct ib_uverbs_ex_create_qp_resp resp
;
2090 struct ib_uverbs_ex_create_qp cmd
= {0};
2093 if (ucore
->inlen
< (offsetof(typeof(cmd
), comp_mask
) +
2094 sizeof(cmd
.comp_mask
)))
2097 err
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
2101 if (cmd
.comp_mask
& ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK
)
2107 if (ucore
->outlen
< (offsetof(typeof(resp
), response_length
) +
2108 sizeof(resp
.response_length
)))
2111 err
= create_qp(file
, ucore
, uhw
, &cmd
,
2112 min(ucore
->inlen
, sizeof(cmd
)),
2113 ib_uverbs_ex_create_qp_cb
, NULL
);
2121 ssize_t
ib_uverbs_open_qp(struct ib_uverbs_file
*file
,
2122 struct ib_device
*ib_dev
,
2123 const char __user
*buf
, int in_len
, int out_len
)
2125 struct ib_uverbs_open_qp cmd
;
2126 struct ib_uverbs_create_qp_resp resp
;
2127 struct ib_udata udata
;
2128 struct ib_uqp_object
*obj
;
2129 struct ib_xrcd
*xrcd
;
2130 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
2132 struct ib_qp_open_attr attr
;
2135 if (out_len
< sizeof resp
)
2138 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2141 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
2142 (unsigned long) cmd
.response
+ sizeof resp
,
2143 in_len
- sizeof cmd
, out_len
- sizeof resp
);
2145 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
2149 init_uobj(&obj
->uevent
.uobject
, cmd
.user_handle
, file
->ucontext
, &qp_lock_class
);
2150 down_write(&obj
->uevent
.uobject
.mutex
);
2152 xrcd
= idr_read_xrcd(cmd
.pd_handle
, file
->ucontext
, &xrcd_uobj
);
2158 attr
.event_handler
= ib_uverbs_qp_event_handler
;
2159 attr
.qp_context
= file
;
2160 attr
.qp_num
= cmd
.qpn
;
2161 attr
.qp_type
= cmd
.qp_type
;
2163 obj
->uevent
.events_reported
= 0;
2164 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
2165 INIT_LIST_HEAD(&obj
->mcast_list
);
2167 qp
= ib_open_qp(xrcd
, &attr
);
2173 qp
->uobject
= &obj
->uevent
.uobject
;
2175 obj
->uevent
.uobject
.object
= qp
;
2176 ret
= idr_add_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
2180 memset(&resp
, 0, sizeof resp
);
2181 resp
.qpn
= qp
->qp_num
;
2182 resp
.qp_handle
= obj
->uevent
.uobject
.id
;
2184 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2185 &resp
, sizeof resp
)) {
2190 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
2191 atomic_inc(&obj
->uxrcd
->refcnt
);
2192 put_xrcd_read(xrcd_uobj
);
2194 mutex_lock(&file
->mutex
);
2195 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->qp_list
);
2196 mutex_unlock(&file
->mutex
);
2198 obj
->uevent
.uobject
.live
= 1;
2200 up_write(&obj
->uevent
.uobject
.mutex
);
2205 idr_remove_uobj(&ib_uverbs_qp_idr
, &obj
->uevent
.uobject
);
2211 put_xrcd_read(xrcd_uobj
);
2212 put_uobj_write(&obj
->uevent
.uobject
);
2216 ssize_t
ib_uverbs_query_qp(struct ib_uverbs_file
*file
,
2217 struct ib_device
*ib_dev
,
2218 const char __user
*buf
, int in_len
,
2221 struct ib_uverbs_query_qp cmd
;
2222 struct ib_uverbs_query_qp_resp resp
;
2224 struct ib_qp_attr
*attr
;
2225 struct ib_qp_init_attr
*init_attr
;
2228 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2231 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2232 init_attr
= kmalloc(sizeof *init_attr
, GFP_KERNEL
);
2233 if (!attr
|| !init_attr
) {
2238 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2244 ret
= ib_query_qp(qp
, attr
, cmd
.attr_mask
, init_attr
);
2251 memset(&resp
, 0, sizeof resp
);
2253 resp
.qp_state
= attr
->qp_state
;
2254 resp
.cur_qp_state
= attr
->cur_qp_state
;
2255 resp
.path_mtu
= attr
->path_mtu
;
2256 resp
.path_mig_state
= attr
->path_mig_state
;
2257 resp
.qkey
= attr
->qkey
;
2258 resp
.rq_psn
= attr
->rq_psn
;
2259 resp
.sq_psn
= attr
->sq_psn
;
2260 resp
.dest_qp_num
= attr
->dest_qp_num
;
2261 resp
.qp_access_flags
= attr
->qp_access_flags
;
2262 resp
.pkey_index
= attr
->pkey_index
;
2263 resp
.alt_pkey_index
= attr
->alt_pkey_index
;
2264 resp
.sq_draining
= attr
->sq_draining
;
2265 resp
.max_rd_atomic
= attr
->max_rd_atomic
;
2266 resp
.max_dest_rd_atomic
= attr
->max_dest_rd_atomic
;
2267 resp
.min_rnr_timer
= attr
->min_rnr_timer
;
2268 resp
.port_num
= attr
->port_num
;
2269 resp
.timeout
= attr
->timeout
;
2270 resp
.retry_cnt
= attr
->retry_cnt
;
2271 resp
.rnr_retry
= attr
->rnr_retry
;
2272 resp
.alt_port_num
= attr
->alt_port_num
;
2273 resp
.alt_timeout
= attr
->alt_timeout
;
2275 memcpy(resp
.dest
.dgid
, attr
->ah_attr
.grh
.dgid
.raw
, 16);
2276 resp
.dest
.flow_label
= attr
->ah_attr
.grh
.flow_label
;
2277 resp
.dest
.sgid_index
= attr
->ah_attr
.grh
.sgid_index
;
2278 resp
.dest
.hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
2279 resp
.dest
.traffic_class
= attr
->ah_attr
.grh
.traffic_class
;
2280 resp
.dest
.dlid
= attr
->ah_attr
.dlid
;
2281 resp
.dest
.sl
= attr
->ah_attr
.sl
;
2282 resp
.dest
.src_path_bits
= attr
->ah_attr
.src_path_bits
;
2283 resp
.dest
.static_rate
= attr
->ah_attr
.static_rate
;
2284 resp
.dest
.is_global
= !!(attr
->ah_attr
.ah_flags
& IB_AH_GRH
);
2285 resp
.dest
.port_num
= attr
->ah_attr
.port_num
;
2287 memcpy(resp
.alt_dest
.dgid
, attr
->alt_ah_attr
.grh
.dgid
.raw
, 16);
2288 resp
.alt_dest
.flow_label
= attr
->alt_ah_attr
.grh
.flow_label
;
2289 resp
.alt_dest
.sgid_index
= attr
->alt_ah_attr
.grh
.sgid_index
;
2290 resp
.alt_dest
.hop_limit
= attr
->alt_ah_attr
.grh
.hop_limit
;
2291 resp
.alt_dest
.traffic_class
= attr
->alt_ah_attr
.grh
.traffic_class
;
2292 resp
.alt_dest
.dlid
= attr
->alt_ah_attr
.dlid
;
2293 resp
.alt_dest
.sl
= attr
->alt_ah_attr
.sl
;
2294 resp
.alt_dest
.src_path_bits
= attr
->alt_ah_attr
.src_path_bits
;
2295 resp
.alt_dest
.static_rate
= attr
->alt_ah_attr
.static_rate
;
2296 resp
.alt_dest
.is_global
= !!(attr
->alt_ah_attr
.ah_flags
& IB_AH_GRH
);
2297 resp
.alt_dest
.port_num
= attr
->alt_ah_attr
.port_num
;
2299 resp
.max_send_wr
= init_attr
->cap
.max_send_wr
;
2300 resp
.max_recv_wr
= init_attr
->cap
.max_recv_wr
;
2301 resp
.max_send_sge
= init_attr
->cap
.max_send_sge
;
2302 resp
.max_recv_sge
= init_attr
->cap
.max_recv_sge
;
2303 resp
.max_inline_data
= init_attr
->cap
.max_inline_data
;
2304 resp
.sq_sig_all
= init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
;
2306 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2307 &resp
, sizeof resp
))
2314 return ret
? ret
: in_len
;
2317 /* Remove ignored fields set in the attribute mask */
2318 static int modify_qp_mask(enum ib_qp_type qp_type
, int mask
)
2321 case IB_QPT_XRC_INI
:
2322 return mask
& ~(IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
);
2323 case IB_QPT_XRC_TGT
:
2324 return mask
& ~(IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_RETRY_CNT
|
2331 ssize_t
ib_uverbs_modify_qp(struct ib_uverbs_file
*file
,
2332 struct ib_device
*ib_dev
,
2333 const char __user
*buf
, int in_len
,
2336 struct ib_uverbs_modify_qp cmd
;
2337 struct ib_udata udata
;
2339 struct ib_qp_attr
*attr
;
2342 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2345 INIT_UDATA(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
2348 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2352 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2358 attr
->qp_state
= cmd
.qp_state
;
2359 attr
->cur_qp_state
= cmd
.cur_qp_state
;
2360 attr
->path_mtu
= cmd
.path_mtu
;
2361 attr
->path_mig_state
= cmd
.path_mig_state
;
2362 attr
->qkey
= cmd
.qkey
;
2363 attr
->rq_psn
= cmd
.rq_psn
;
2364 attr
->sq_psn
= cmd
.sq_psn
;
2365 attr
->dest_qp_num
= cmd
.dest_qp_num
;
2366 attr
->qp_access_flags
= cmd
.qp_access_flags
;
2367 attr
->pkey_index
= cmd
.pkey_index
;
2368 attr
->alt_pkey_index
= cmd
.alt_pkey_index
;
2369 attr
->en_sqd_async_notify
= cmd
.en_sqd_async_notify
;
2370 attr
->max_rd_atomic
= cmd
.max_rd_atomic
;
2371 attr
->max_dest_rd_atomic
= cmd
.max_dest_rd_atomic
;
2372 attr
->min_rnr_timer
= cmd
.min_rnr_timer
;
2373 attr
->port_num
= cmd
.port_num
;
2374 attr
->timeout
= cmd
.timeout
;
2375 attr
->retry_cnt
= cmd
.retry_cnt
;
2376 attr
->rnr_retry
= cmd
.rnr_retry
;
2377 attr
->alt_port_num
= cmd
.alt_port_num
;
2378 attr
->alt_timeout
= cmd
.alt_timeout
;
2380 memcpy(attr
->ah_attr
.grh
.dgid
.raw
, cmd
.dest
.dgid
, 16);
2381 attr
->ah_attr
.grh
.flow_label
= cmd
.dest
.flow_label
;
2382 attr
->ah_attr
.grh
.sgid_index
= cmd
.dest
.sgid_index
;
2383 attr
->ah_attr
.grh
.hop_limit
= cmd
.dest
.hop_limit
;
2384 attr
->ah_attr
.grh
.traffic_class
= cmd
.dest
.traffic_class
;
2385 attr
->ah_attr
.dlid
= cmd
.dest
.dlid
;
2386 attr
->ah_attr
.sl
= cmd
.dest
.sl
;
2387 attr
->ah_attr
.src_path_bits
= cmd
.dest
.src_path_bits
;
2388 attr
->ah_attr
.static_rate
= cmd
.dest
.static_rate
;
2389 attr
->ah_attr
.ah_flags
= cmd
.dest
.is_global
? IB_AH_GRH
: 0;
2390 attr
->ah_attr
.port_num
= cmd
.dest
.port_num
;
2392 memcpy(attr
->alt_ah_attr
.grh
.dgid
.raw
, cmd
.alt_dest
.dgid
, 16);
2393 attr
->alt_ah_attr
.grh
.flow_label
= cmd
.alt_dest
.flow_label
;
2394 attr
->alt_ah_attr
.grh
.sgid_index
= cmd
.alt_dest
.sgid_index
;
2395 attr
->alt_ah_attr
.grh
.hop_limit
= cmd
.alt_dest
.hop_limit
;
2396 attr
->alt_ah_attr
.grh
.traffic_class
= cmd
.alt_dest
.traffic_class
;
2397 attr
->alt_ah_attr
.dlid
= cmd
.alt_dest
.dlid
;
2398 attr
->alt_ah_attr
.sl
= cmd
.alt_dest
.sl
;
2399 attr
->alt_ah_attr
.src_path_bits
= cmd
.alt_dest
.src_path_bits
;
2400 attr
->alt_ah_attr
.static_rate
= cmd
.alt_dest
.static_rate
;
2401 attr
->alt_ah_attr
.ah_flags
= cmd
.alt_dest
.is_global
? IB_AH_GRH
: 0;
2402 attr
->alt_ah_attr
.port_num
= cmd
.alt_dest
.port_num
;
2404 if (qp
->real_qp
== qp
) {
2405 ret
= ib_resolve_eth_dmac(qp
, attr
, &cmd
.attr_mask
);
2408 ret
= qp
->device
->modify_qp(qp
, attr
,
2409 modify_qp_mask(qp
->qp_type
, cmd
.attr_mask
), &udata
);
2411 ret
= ib_modify_qp(qp
, attr
, modify_qp_mask(qp
->qp_type
, cmd
.attr_mask
));
2428 ssize_t
ib_uverbs_destroy_qp(struct ib_uverbs_file
*file
,
2429 struct ib_device
*ib_dev
,
2430 const char __user
*buf
, int in_len
,
2433 struct ib_uverbs_destroy_qp cmd
;
2434 struct ib_uverbs_destroy_qp_resp resp
;
2435 struct ib_uobject
*uobj
;
2437 struct ib_uqp_object
*obj
;
2440 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2443 memset(&resp
, 0, sizeof resp
);
2445 uobj
= idr_write_uobj(&ib_uverbs_qp_idr
, cmd
.qp_handle
, file
->ucontext
);
2449 obj
= container_of(uobj
, struct ib_uqp_object
, uevent
.uobject
);
2451 if (!list_empty(&obj
->mcast_list
)) {
2452 put_uobj_write(uobj
);
2456 ret
= ib_destroy_qp(qp
);
2460 put_uobj_write(uobj
);
2466 atomic_dec(&obj
->uxrcd
->refcnt
);
2468 idr_remove_uobj(&ib_uverbs_qp_idr
, uobj
);
2470 mutex_lock(&file
->mutex
);
2471 list_del(&uobj
->list
);
2472 mutex_unlock(&file
->mutex
);
2474 ib_uverbs_release_uevent(file
, &obj
->uevent
);
2476 resp
.events_reported
= obj
->uevent
.events_reported
;
2480 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2481 &resp
, sizeof resp
))
2487 static void *alloc_wr(size_t wr_size
, __u32 num_sge
)
2489 return kmalloc(ALIGN(wr_size
, sizeof (struct ib_sge
)) +
2490 num_sge
* sizeof (struct ib_sge
), GFP_KERNEL
);
2493 ssize_t
ib_uverbs_post_send(struct ib_uverbs_file
*file
,
2494 struct ib_device
*ib_dev
,
2495 const char __user
*buf
, int in_len
,
2498 struct ib_uverbs_post_send cmd
;
2499 struct ib_uverbs_post_send_resp resp
;
2500 struct ib_uverbs_send_wr
*user_wr
;
2501 struct ib_send_wr
*wr
= NULL
, *last
, *next
, *bad_wr
;
2505 ssize_t ret
= -EINVAL
;
2508 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2511 if (in_len
< sizeof cmd
+ cmd
.wqe_size
* cmd
.wr_count
+
2512 cmd
.sge_count
* sizeof (struct ib_uverbs_sge
))
2515 if (cmd
.wqe_size
< sizeof (struct ib_uverbs_send_wr
))
2518 user_wr
= kmalloc(cmd
.wqe_size
, GFP_KERNEL
);
2522 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2526 is_ud
= qp
->qp_type
== IB_QPT_UD
;
2529 for (i
= 0; i
< cmd
.wr_count
; ++i
) {
2530 if (copy_from_user(user_wr
,
2531 buf
+ sizeof cmd
+ i
* cmd
.wqe_size
,
2537 if (user_wr
->num_sge
+ sg_ind
> cmd
.sge_count
) {
2543 struct ib_ud_wr
*ud
;
2545 if (user_wr
->opcode
!= IB_WR_SEND
&&
2546 user_wr
->opcode
!= IB_WR_SEND_WITH_IMM
) {
2551 next_size
= sizeof(*ud
);
2552 ud
= alloc_wr(next_size
, user_wr
->num_sge
);
2558 ud
->ah
= idr_read_ah(user_wr
->wr
.ud
.ah
, file
->ucontext
);
2564 ud
->remote_qpn
= user_wr
->wr
.ud
.remote_qpn
;
2565 ud
->remote_qkey
= user_wr
->wr
.ud
.remote_qkey
;
2568 } else if (user_wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
||
2569 user_wr
->opcode
== IB_WR_RDMA_WRITE
||
2570 user_wr
->opcode
== IB_WR_RDMA_READ
) {
2571 struct ib_rdma_wr
*rdma
;
2573 next_size
= sizeof(*rdma
);
2574 rdma
= alloc_wr(next_size
, user_wr
->num_sge
);
2580 rdma
->remote_addr
= user_wr
->wr
.rdma
.remote_addr
;
2581 rdma
->rkey
= user_wr
->wr
.rdma
.rkey
;
2584 } else if (user_wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
2585 user_wr
->opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) {
2586 struct ib_atomic_wr
*atomic
;
2588 next_size
= sizeof(*atomic
);
2589 atomic
= alloc_wr(next_size
, user_wr
->num_sge
);
2595 atomic
->remote_addr
= user_wr
->wr
.atomic
.remote_addr
;
2596 atomic
->compare_add
= user_wr
->wr
.atomic
.compare_add
;
2597 atomic
->swap
= user_wr
->wr
.atomic
.swap
;
2598 atomic
->rkey
= user_wr
->wr
.atomic
.rkey
;
2601 } else if (user_wr
->opcode
== IB_WR_SEND
||
2602 user_wr
->opcode
== IB_WR_SEND_WITH_IMM
||
2603 user_wr
->opcode
== IB_WR_SEND_WITH_INV
) {
2604 next_size
= sizeof(*next
);
2605 next
= alloc_wr(next_size
, user_wr
->num_sge
);
2615 if (user_wr
->opcode
== IB_WR_SEND_WITH_IMM
||
2616 user_wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
) {
2618 (__be32 __force
) user_wr
->ex
.imm_data
;
2619 } else if (user_wr
->opcode
== IB_WR_SEND_WITH_INV
) {
2620 next
->ex
.invalidate_rkey
= user_wr
->ex
.invalidate_rkey
;
2630 next
->wr_id
= user_wr
->wr_id
;
2631 next
->num_sge
= user_wr
->num_sge
;
2632 next
->opcode
= user_wr
->opcode
;
2633 next
->send_flags
= user_wr
->send_flags
;
2635 if (next
->num_sge
) {
2636 next
->sg_list
= (void *) next
+
2637 ALIGN(next_size
, sizeof(struct ib_sge
));
2638 if (copy_from_user(next
->sg_list
,
2640 cmd
.wr_count
* cmd
.wqe_size
+
2641 sg_ind
* sizeof (struct ib_sge
),
2642 next
->num_sge
* sizeof (struct ib_sge
))) {
2646 sg_ind
+= next
->num_sge
;
2648 next
->sg_list
= NULL
;
2652 ret
= qp
->device
->post_send(qp
->real_qp
, wr
, &bad_wr
);
2654 for (next
= wr
; next
; next
= next
->next
) {
2660 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2661 &resp
, sizeof resp
))
2668 if (is_ud
&& ud_wr(wr
)->ah
)
2669 put_ah_read(ud_wr(wr
)->ah
);
2678 return ret
? ret
: in_len
;
2681 static struct ib_recv_wr
*ib_uverbs_unmarshall_recv(const char __user
*buf
,
2687 struct ib_uverbs_recv_wr
*user_wr
;
2688 struct ib_recv_wr
*wr
= NULL
, *last
, *next
;
2693 if (in_len
< wqe_size
* wr_count
+
2694 sge_count
* sizeof (struct ib_uverbs_sge
))
2695 return ERR_PTR(-EINVAL
);
2697 if (wqe_size
< sizeof (struct ib_uverbs_recv_wr
))
2698 return ERR_PTR(-EINVAL
);
2700 user_wr
= kmalloc(wqe_size
, GFP_KERNEL
);
2702 return ERR_PTR(-ENOMEM
);
2706 for (i
= 0; i
< wr_count
; ++i
) {
2707 if (copy_from_user(user_wr
, buf
+ i
* wqe_size
,
2713 if (user_wr
->num_sge
+ sg_ind
> sge_count
) {
2718 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
2719 user_wr
->num_sge
* sizeof (struct ib_sge
),
2733 next
->wr_id
= user_wr
->wr_id
;
2734 next
->num_sge
= user_wr
->num_sge
;
2736 if (next
->num_sge
) {
2737 next
->sg_list
= (void *) next
+
2738 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
2739 if (copy_from_user(next
->sg_list
,
2740 buf
+ wr_count
* wqe_size
+
2741 sg_ind
* sizeof (struct ib_sge
),
2742 next
->num_sge
* sizeof (struct ib_sge
))) {
2746 sg_ind
+= next
->num_sge
;
2748 next
->sg_list
= NULL
;
2763 return ERR_PTR(ret
);
2766 ssize_t
ib_uverbs_post_recv(struct ib_uverbs_file
*file
,
2767 struct ib_device
*ib_dev
,
2768 const char __user
*buf
, int in_len
,
2771 struct ib_uverbs_post_recv cmd
;
2772 struct ib_uverbs_post_recv_resp resp
;
2773 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2775 ssize_t ret
= -EINVAL
;
2777 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2780 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2781 in_len
- sizeof cmd
, cmd
.wr_count
,
2782 cmd
.sge_count
, cmd
.wqe_size
);
2786 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
2791 ret
= qp
->device
->post_recv(qp
->real_qp
, wr
, &bad_wr
);
2796 for (next
= wr
; next
; next
= next
->next
) {
2802 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2803 &resp
, sizeof resp
))
2813 return ret
? ret
: in_len
;
2816 ssize_t
ib_uverbs_post_srq_recv(struct ib_uverbs_file
*file
,
2817 struct ib_device
*ib_dev
,
2818 const char __user
*buf
, int in_len
,
2821 struct ib_uverbs_post_srq_recv cmd
;
2822 struct ib_uverbs_post_srq_recv_resp resp
;
2823 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
2825 ssize_t ret
= -EINVAL
;
2827 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2830 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
2831 in_len
- sizeof cmd
, cmd
.wr_count
,
2832 cmd
.sge_count
, cmd
.wqe_size
);
2836 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
2841 ret
= srq
->device
->post_srq_recv(srq
, wr
, &bad_wr
);
2846 for (next
= wr
; next
; next
= next
->next
) {
2852 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2853 &resp
, sizeof resp
))
2863 return ret
? ret
: in_len
;
2866 ssize_t
ib_uverbs_create_ah(struct ib_uverbs_file
*file
,
2867 struct ib_device
*ib_dev
,
2868 const char __user
*buf
, int in_len
,
2871 struct ib_uverbs_create_ah cmd
;
2872 struct ib_uverbs_create_ah_resp resp
;
2873 struct ib_uobject
*uobj
;
2876 struct ib_ah_attr attr
;
2879 if (out_len
< sizeof resp
)
2882 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2885 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
2889 init_uobj(uobj
, cmd
.user_handle
, file
->ucontext
, &ah_lock_class
);
2890 down_write(&uobj
->mutex
);
2892 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
2898 attr
.dlid
= cmd
.attr
.dlid
;
2899 attr
.sl
= cmd
.attr
.sl
;
2900 attr
.src_path_bits
= cmd
.attr
.src_path_bits
;
2901 attr
.static_rate
= cmd
.attr
.static_rate
;
2902 attr
.ah_flags
= cmd
.attr
.is_global
? IB_AH_GRH
: 0;
2903 attr
.port_num
= cmd
.attr
.port_num
;
2904 attr
.grh
.flow_label
= cmd
.attr
.grh
.flow_label
;
2905 attr
.grh
.sgid_index
= cmd
.attr
.grh
.sgid_index
;
2906 attr
.grh
.hop_limit
= cmd
.attr
.grh
.hop_limit
;
2907 attr
.grh
.traffic_class
= cmd
.attr
.grh
.traffic_class
;
2908 memset(&attr
.dmac
, 0, sizeof(attr
.dmac
));
2909 memcpy(attr
.grh
.dgid
.raw
, cmd
.attr
.grh
.dgid
, 16);
2911 ah
= ib_create_ah(pd
, &attr
);
2920 ret
= idr_add_uobj(&ib_uverbs_ah_idr
, uobj
);
2924 resp
.ah_handle
= uobj
->id
;
2926 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
2927 &resp
, sizeof resp
)) {
2934 mutex_lock(&file
->mutex
);
2935 list_add_tail(&uobj
->list
, &file
->ucontext
->ah_list
);
2936 mutex_unlock(&file
->mutex
);
2940 up_write(&uobj
->mutex
);
2945 idr_remove_uobj(&ib_uverbs_ah_idr
, uobj
);
2954 put_uobj_write(uobj
);
2958 ssize_t
ib_uverbs_destroy_ah(struct ib_uverbs_file
*file
,
2959 struct ib_device
*ib_dev
,
2960 const char __user
*buf
, int in_len
, int out_len
)
2962 struct ib_uverbs_destroy_ah cmd
;
2964 struct ib_uobject
*uobj
;
2967 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
2970 uobj
= idr_write_uobj(&ib_uverbs_ah_idr
, cmd
.ah_handle
, file
->ucontext
);
2975 ret
= ib_destroy_ah(ah
);
2979 put_uobj_write(uobj
);
2984 idr_remove_uobj(&ib_uverbs_ah_idr
, uobj
);
2986 mutex_lock(&file
->mutex
);
2987 list_del(&uobj
->list
);
2988 mutex_unlock(&file
->mutex
);
2995 ssize_t
ib_uverbs_attach_mcast(struct ib_uverbs_file
*file
,
2996 struct ib_device
*ib_dev
,
2997 const char __user
*buf
, int in_len
,
3000 struct ib_uverbs_attach_mcast cmd
;
3002 struct ib_uqp_object
*obj
;
3003 struct ib_uverbs_mcast_entry
*mcast
;
3006 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3009 qp
= idr_write_qp(cmd
.qp_handle
, file
->ucontext
);
3013 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
3015 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
3016 if (cmd
.mlid
== mcast
->lid
&&
3017 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
3022 mcast
= kmalloc(sizeof *mcast
, GFP_KERNEL
);
3028 mcast
->lid
= cmd
.mlid
;
3029 memcpy(mcast
->gid
.raw
, cmd
.gid
, sizeof mcast
->gid
.raw
);
3031 ret
= ib_attach_mcast(qp
, &mcast
->gid
, cmd
.mlid
);
3033 list_add_tail(&mcast
->list
, &obj
->mcast_list
);
3040 return ret
? ret
: in_len
;
3043 ssize_t
ib_uverbs_detach_mcast(struct ib_uverbs_file
*file
,
3044 struct ib_device
*ib_dev
,
3045 const char __user
*buf
, int in_len
,
3048 struct ib_uverbs_detach_mcast cmd
;
3049 struct ib_uqp_object
*obj
;
3051 struct ib_uverbs_mcast_entry
*mcast
;
3054 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3057 qp
= idr_write_qp(cmd
.qp_handle
, file
->ucontext
);
3061 ret
= ib_detach_mcast(qp
, (union ib_gid
*) cmd
.gid
, cmd
.mlid
);
3065 obj
= container_of(qp
->uobject
, struct ib_uqp_object
, uevent
.uobject
);
3067 list_for_each_entry(mcast
, &obj
->mcast_list
, list
)
3068 if (cmd
.mlid
== mcast
->lid
&&
3069 !memcmp(cmd
.gid
, mcast
->gid
.raw
, sizeof mcast
->gid
.raw
)) {
3070 list_del(&mcast
->list
);
3078 return ret
? ret
: in_len
;
3081 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr
*spec
)
3083 /* Returns user space filter size, includes padding */
3084 return (spec
->size
- sizeof(struct ib_uverbs_flow_spec_hdr
)) / 2;
3087 static ssize_t
spec_filter_size(void *kern_spec_filter
, u16 kern_filter_size
,
3088 u16 ib_real_filter_sz
)
3091 * User space filter structures must be 64 bit aligned, otherwise this
3092 * may pass, but we won't handle additional new attributes.
3095 if (kern_filter_size
> ib_real_filter_sz
) {
3096 if (memchr_inv(kern_spec_filter
+
3097 ib_real_filter_sz
, 0,
3098 kern_filter_size
- ib_real_filter_sz
))
3100 return ib_real_filter_sz
;
3102 return kern_filter_size
;
3105 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec
*kern_spec
,
3106 union ib_flow_spec
*ib_spec
)
3108 ssize_t actual_filter_sz
;
3109 ssize_t kern_filter_sz
;
3110 ssize_t ib_filter_sz
;
3111 void *kern_spec_mask
;
3112 void *kern_spec_val
;
3114 if (kern_spec
->reserved
)
3117 ib_spec
->type
= kern_spec
->type
;
3119 kern_filter_sz
= kern_spec_filter_sz(&kern_spec
->hdr
);
3120 /* User flow spec size must be aligned to 4 bytes */
3121 if (kern_filter_sz
!= ALIGN(kern_filter_sz
, 4))
3124 kern_spec_val
= (void *)kern_spec
+
3125 sizeof(struct ib_uverbs_flow_spec_hdr
);
3126 kern_spec_mask
= kern_spec_val
+ kern_filter_sz
;
3128 switch (ib_spec
->type
) {
3129 case IB_FLOW_SPEC_ETH
:
3130 ib_filter_sz
= offsetof(struct ib_flow_eth_filter
, real_sz
);
3131 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
3134 if (actual_filter_sz
<= 0)
3136 ib_spec
->size
= sizeof(struct ib_flow_spec_eth
);
3137 memcpy(&ib_spec
->eth
.val
, kern_spec_val
, actual_filter_sz
);
3138 memcpy(&ib_spec
->eth
.mask
, kern_spec_mask
, actual_filter_sz
);
3140 case IB_FLOW_SPEC_IPV4
:
3141 ib_filter_sz
= offsetof(struct ib_flow_ipv4_filter
, real_sz
);
3142 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
3145 if (actual_filter_sz
<= 0)
3147 ib_spec
->size
= sizeof(struct ib_flow_spec_ipv4
);
3148 memcpy(&ib_spec
->ipv4
.val
, kern_spec_val
, actual_filter_sz
);
3149 memcpy(&ib_spec
->ipv4
.mask
, kern_spec_mask
, actual_filter_sz
);
3151 case IB_FLOW_SPEC_IPV6
:
3152 ib_filter_sz
= offsetof(struct ib_flow_ipv6_filter
, real_sz
);
3153 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
3156 if (actual_filter_sz
<= 0)
3158 ib_spec
->size
= sizeof(struct ib_flow_spec_ipv6
);
3159 memcpy(&ib_spec
->ipv6
.val
, kern_spec_val
, actual_filter_sz
);
3160 memcpy(&ib_spec
->ipv6
.mask
, kern_spec_mask
, actual_filter_sz
);
3162 if ((ntohl(ib_spec
->ipv6
.mask
.flow_label
)) >= BIT(20) ||
3163 (ntohl(ib_spec
->ipv6
.val
.flow_label
)) >= BIT(20))
3166 case IB_FLOW_SPEC_TCP
:
3167 case IB_FLOW_SPEC_UDP
:
3168 ib_filter_sz
= offsetof(struct ib_flow_tcp_udp_filter
, real_sz
);
3169 actual_filter_sz
= spec_filter_size(kern_spec_mask
,
3172 if (actual_filter_sz
<= 0)
3174 ib_spec
->size
= sizeof(struct ib_flow_spec_tcp_udp
);
3175 memcpy(&ib_spec
->tcp_udp
.val
, kern_spec_val
, actual_filter_sz
);
3176 memcpy(&ib_spec
->tcp_udp
.mask
, kern_spec_mask
, actual_filter_sz
);
3184 int ib_uverbs_ex_create_wq(struct ib_uverbs_file
*file
,
3185 struct ib_device
*ib_dev
,
3186 struct ib_udata
*ucore
,
3187 struct ib_udata
*uhw
)
3189 struct ib_uverbs_ex_create_wq cmd
= {};
3190 struct ib_uverbs_ex_create_wq_resp resp
= {};
3191 struct ib_uwq_object
*obj
;
3196 struct ib_wq_init_attr wq_init_attr
= {};
3197 size_t required_cmd_sz
;
3198 size_t required_resp_len
;
3200 required_cmd_sz
= offsetof(typeof(cmd
), max_sge
) + sizeof(cmd
.max_sge
);
3201 required_resp_len
= offsetof(typeof(resp
), wqn
) + sizeof(resp
.wqn
);
3203 if (ucore
->inlen
< required_cmd_sz
)
3206 if (ucore
->outlen
< required_resp_len
)
3209 if (ucore
->inlen
> sizeof(cmd
) &&
3210 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3211 ucore
->inlen
- sizeof(cmd
)))
3214 err
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3221 obj
= kmalloc(sizeof(*obj
), GFP_KERNEL
);
3225 init_uobj(&obj
->uevent
.uobject
, cmd
.user_handle
, file
->ucontext
,
3227 down_write(&obj
->uevent
.uobject
.mutex
);
3228 pd
= idr_read_pd(cmd
.pd_handle
, file
->ucontext
);
3234 cq
= idr_read_cq(cmd
.cq_handle
, file
->ucontext
, 0);
3240 wq_init_attr
.cq
= cq
;
3241 wq_init_attr
.max_sge
= cmd
.max_sge
;
3242 wq_init_attr
.max_wr
= cmd
.max_wr
;
3243 wq_init_attr
.wq_context
= file
;
3244 wq_init_attr
.wq_type
= cmd
.wq_type
;
3245 wq_init_attr
.event_handler
= ib_uverbs_wq_event_handler
;
3246 obj
->uevent
.events_reported
= 0;
3247 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
3248 wq
= pd
->device
->create_wq(pd
, &wq_init_attr
, uhw
);
3254 wq
->uobject
= &obj
->uevent
.uobject
;
3255 obj
->uevent
.uobject
.object
= wq
;
3256 wq
->wq_type
= wq_init_attr
.wq_type
;
3259 wq
->device
= pd
->device
;
3260 wq
->wq_context
= wq_init_attr
.wq_context
;
3261 atomic_set(&wq
->usecnt
, 0);
3262 atomic_inc(&pd
->usecnt
);
3263 atomic_inc(&cq
->usecnt
);
3264 wq
->uobject
= &obj
->uevent
.uobject
;
3265 obj
->uevent
.uobject
.object
= wq
;
3266 err
= idr_add_uobj(&ib_uverbs_wq_idr
, &obj
->uevent
.uobject
);
3270 memset(&resp
, 0, sizeof(resp
));
3271 resp
.wq_handle
= obj
->uevent
.uobject
.id
;
3272 resp
.max_sge
= wq_init_attr
.max_sge
;
3273 resp
.max_wr
= wq_init_attr
.max_wr
;
3274 resp
.wqn
= wq
->wq_num
;
3275 resp
.response_length
= required_resp_len
;
3276 err
= ib_copy_to_udata(ucore
,
3277 &resp
, resp
.response_length
);
3284 mutex_lock(&file
->mutex
);
3285 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->wq_list
);
3286 mutex_unlock(&file
->mutex
);
3288 obj
->uevent
.uobject
.live
= 1;
3289 up_write(&obj
->uevent
.uobject
.mutex
);
3293 idr_remove_uobj(&ib_uverbs_wq_idr
, &obj
->uevent
.uobject
);
3301 put_uobj_write(&obj
->uevent
.uobject
);
3306 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file
*file
,
3307 struct ib_device
*ib_dev
,
3308 struct ib_udata
*ucore
,
3309 struct ib_udata
*uhw
)
3311 struct ib_uverbs_ex_destroy_wq cmd
= {};
3312 struct ib_uverbs_ex_destroy_wq_resp resp
= {};
3314 struct ib_uobject
*uobj
;
3315 struct ib_uwq_object
*obj
;
3316 size_t required_cmd_sz
;
3317 size_t required_resp_len
;
3320 required_cmd_sz
= offsetof(typeof(cmd
), wq_handle
) + sizeof(cmd
.wq_handle
);
3321 required_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
3323 if (ucore
->inlen
< required_cmd_sz
)
3326 if (ucore
->outlen
< required_resp_len
)
3329 if (ucore
->inlen
> sizeof(cmd
) &&
3330 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3331 ucore
->inlen
- sizeof(cmd
)))
3334 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3341 resp
.response_length
= required_resp_len
;
3342 uobj
= idr_write_uobj(&ib_uverbs_wq_idr
, cmd
.wq_handle
,
3348 obj
= container_of(uobj
, struct ib_uwq_object
, uevent
.uobject
);
3349 ret
= ib_destroy_wq(wq
);
3353 put_uobj_write(uobj
);
3357 idr_remove_uobj(&ib_uverbs_wq_idr
, uobj
);
3359 mutex_lock(&file
->mutex
);
3360 list_del(&uobj
->list
);
3361 mutex_unlock(&file
->mutex
);
3363 ib_uverbs_release_uevent(file
, &obj
->uevent
);
3364 resp
.events_reported
= obj
->uevent
.events_reported
;
3367 ret
= ib_copy_to_udata(ucore
, &resp
, resp
.response_length
);
3374 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file
*file
,
3375 struct ib_device
*ib_dev
,
3376 struct ib_udata
*ucore
,
3377 struct ib_udata
*uhw
)
3379 struct ib_uverbs_ex_modify_wq cmd
= {};
3381 struct ib_wq_attr wq_attr
= {};
3382 size_t required_cmd_sz
;
3385 required_cmd_sz
= offsetof(typeof(cmd
), curr_wq_state
) + sizeof(cmd
.curr_wq_state
);
3386 if (ucore
->inlen
< required_cmd_sz
)
3389 if (ucore
->inlen
> sizeof(cmd
) &&
3390 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3391 ucore
->inlen
- sizeof(cmd
)))
3394 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3401 if (cmd
.attr_mask
> (IB_WQ_STATE
| IB_WQ_CUR_STATE
))
3404 wq
= idr_read_wq(cmd
.wq_handle
, file
->ucontext
);
3408 wq_attr
.curr_wq_state
= cmd
.curr_wq_state
;
3409 wq_attr
.wq_state
= cmd
.wq_state
;
3410 ret
= wq
->device
->modify_wq(wq
, &wq_attr
, cmd
.attr_mask
, uhw
);
3415 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file
*file
,
3416 struct ib_device
*ib_dev
,
3417 struct ib_udata
*ucore
,
3418 struct ib_udata
*uhw
)
3420 struct ib_uverbs_ex_create_rwq_ind_table cmd
= {};
3421 struct ib_uverbs_ex_create_rwq_ind_table_resp resp
= {};
3422 struct ib_uobject
*uobj
;
3424 struct ib_rwq_ind_table_init_attr init_attr
= {};
3425 struct ib_rwq_ind_table
*rwq_ind_tbl
;
3426 struct ib_wq
**wqs
= NULL
;
3427 u32
*wqs_handles
= NULL
;
3428 struct ib_wq
*wq
= NULL
;
3429 int i
, j
, num_read_wqs
;
3431 u32 expected_in_size
;
3432 size_t required_cmd_sz_header
;
3433 size_t required_resp_len
;
3435 required_cmd_sz_header
= offsetof(typeof(cmd
), log_ind_tbl_size
) + sizeof(cmd
.log_ind_tbl_size
);
3436 required_resp_len
= offsetof(typeof(resp
), ind_tbl_num
) + sizeof(resp
.ind_tbl_num
);
3438 if (ucore
->inlen
< required_cmd_sz_header
)
3441 if (ucore
->outlen
< required_resp_len
)
3444 err
= ib_copy_from_udata(&cmd
, ucore
, required_cmd_sz_header
);
3448 ucore
->inbuf
+= required_cmd_sz_header
;
3449 ucore
->inlen
-= required_cmd_sz_header
;
3454 if (cmd
.log_ind_tbl_size
> IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE
)
3457 num_wq_handles
= 1 << cmd
.log_ind_tbl_size
;
3458 expected_in_size
= num_wq_handles
* sizeof(__u32
);
3459 if (num_wq_handles
== 1)
3460 /* input size for wq handles is u64 aligned */
3461 expected_in_size
+= sizeof(__u32
);
3463 if (ucore
->inlen
< expected_in_size
)
3466 if (ucore
->inlen
> expected_in_size
&&
3467 !ib_is_udata_cleared(ucore
, expected_in_size
,
3468 ucore
->inlen
- expected_in_size
))
3471 wqs_handles
= kcalloc(num_wq_handles
, sizeof(*wqs_handles
),
3476 err
= ib_copy_from_udata(wqs_handles
, ucore
,
3477 num_wq_handles
* sizeof(__u32
));
3481 wqs
= kcalloc(num_wq_handles
, sizeof(*wqs
), GFP_KERNEL
);
3487 for (num_read_wqs
= 0; num_read_wqs
< num_wq_handles
;
3489 wq
= idr_read_wq(wqs_handles
[num_read_wqs
], file
->ucontext
);
3495 wqs
[num_read_wqs
] = wq
;
3498 uobj
= kmalloc(sizeof(*uobj
), GFP_KERNEL
);
3504 init_uobj(uobj
, 0, file
->ucontext
, &rwq_ind_table_lock_class
);
3505 down_write(&uobj
->mutex
);
3506 init_attr
.log_ind_tbl_size
= cmd
.log_ind_tbl_size
;
3507 init_attr
.ind_tbl
= wqs
;
3508 rwq_ind_tbl
= ib_dev
->create_rwq_ind_table(ib_dev
, &init_attr
, uhw
);
3510 if (IS_ERR(rwq_ind_tbl
)) {
3511 err
= PTR_ERR(rwq_ind_tbl
);
3515 rwq_ind_tbl
->ind_tbl
= wqs
;
3516 rwq_ind_tbl
->log_ind_tbl_size
= init_attr
.log_ind_tbl_size
;
3517 rwq_ind_tbl
->uobject
= uobj
;
3518 uobj
->object
= rwq_ind_tbl
;
3519 rwq_ind_tbl
->device
= ib_dev
;
3520 atomic_set(&rwq_ind_tbl
->usecnt
, 0);
3522 for (i
= 0; i
< num_wq_handles
; i
++)
3523 atomic_inc(&wqs
[i
]->usecnt
);
3525 err
= idr_add_uobj(&ib_uverbs_rwq_ind_tbl_idr
, uobj
);
3527 goto destroy_ind_tbl
;
3529 resp
.ind_tbl_handle
= uobj
->id
;
3530 resp
.ind_tbl_num
= rwq_ind_tbl
->ind_tbl_num
;
3531 resp
.response_length
= required_resp_len
;
3533 err
= ib_copy_to_udata(ucore
,
3534 &resp
, resp
.response_length
);
3540 for (j
= 0; j
< num_read_wqs
; j
++)
3541 put_wq_read(wqs
[j
]);
3543 mutex_lock(&file
->mutex
);
3544 list_add_tail(&uobj
->list
, &file
->ucontext
->rwq_ind_tbl_list
);
3545 mutex_unlock(&file
->mutex
);
3549 up_write(&uobj
->mutex
);
3553 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr
, uobj
);
3555 ib_destroy_rwq_ind_table(rwq_ind_tbl
);
3557 put_uobj_write(uobj
);
3559 for (j
= 0; j
< num_read_wqs
; j
++)
3560 put_wq_read(wqs
[j
]);
3567 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file
*file
,
3568 struct ib_device
*ib_dev
,
3569 struct ib_udata
*ucore
,
3570 struct ib_udata
*uhw
)
3572 struct ib_uverbs_ex_destroy_rwq_ind_table cmd
= {};
3573 struct ib_rwq_ind_table
*rwq_ind_tbl
;
3574 struct ib_uobject
*uobj
;
3576 struct ib_wq
**ind_tbl
;
3577 size_t required_cmd_sz
;
3579 required_cmd_sz
= offsetof(typeof(cmd
), ind_tbl_handle
) + sizeof(cmd
.ind_tbl_handle
);
3581 if (ucore
->inlen
< required_cmd_sz
)
3584 if (ucore
->inlen
> sizeof(cmd
) &&
3585 !ib_is_udata_cleared(ucore
, sizeof(cmd
),
3586 ucore
->inlen
- sizeof(cmd
)))
3589 ret
= ib_copy_from_udata(&cmd
, ucore
, min(sizeof(cmd
), ucore
->inlen
));
3596 uobj
= idr_write_uobj(&ib_uverbs_rwq_ind_tbl_idr
, cmd
.ind_tbl_handle
,
3600 rwq_ind_tbl
= uobj
->object
;
3601 ind_tbl
= rwq_ind_tbl
->ind_tbl
;
3603 ret
= ib_destroy_rwq_ind_table(rwq_ind_tbl
);
3607 put_uobj_write(uobj
);
3612 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr
, uobj
);
3614 mutex_lock(&file
->mutex
);
3615 list_del(&uobj
->list
);
3616 mutex_unlock(&file
->mutex
);
3623 int ib_uverbs_ex_create_flow(struct ib_uverbs_file
*file
,
3624 struct ib_device
*ib_dev
,
3625 struct ib_udata
*ucore
,
3626 struct ib_udata
*uhw
)
3628 struct ib_uverbs_create_flow cmd
;
3629 struct ib_uverbs_create_flow_resp resp
;
3630 struct ib_uobject
*uobj
;
3631 struct ib_flow
*flow_id
;
3632 struct ib_uverbs_flow_attr
*kern_flow_attr
;
3633 struct ib_flow_attr
*flow_attr
;
3640 if (ucore
->inlen
< sizeof(cmd
))
3643 if (ucore
->outlen
< sizeof(resp
))
3646 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3650 ucore
->inbuf
+= sizeof(cmd
);
3651 ucore
->inlen
-= sizeof(cmd
);
3656 if (!capable(CAP_NET_RAW
))
3659 if (cmd
.flow_attr
.flags
>= IB_FLOW_ATTR_FLAGS_RESERVED
)
3662 if ((cmd
.flow_attr
.flags
& IB_FLOW_ATTR_FLAGS_DONT_TRAP
) &&
3663 ((cmd
.flow_attr
.type
== IB_FLOW_ATTR_ALL_DEFAULT
) ||
3664 (cmd
.flow_attr
.type
== IB_FLOW_ATTR_MC_DEFAULT
)))
3667 if (cmd
.flow_attr
.num_of_specs
> IB_FLOW_SPEC_SUPPORT_LAYERS
)
3670 if (cmd
.flow_attr
.size
> ucore
->inlen
||
3671 cmd
.flow_attr
.size
>
3672 (cmd
.flow_attr
.num_of_specs
* sizeof(struct ib_uverbs_flow_spec
)))
3675 if (cmd
.flow_attr
.reserved
[0] ||
3676 cmd
.flow_attr
.reserved
[1])
3679 if (cmd
.flow_attr
.num_of_specs
) {
3680 kern_flow_attr
= kmalloc(sizeof(*kern_flow_attr
) + cmd
.flow_attr
.size
,
3682 if (!kern_flow_attr
)
3685 memcpy(kern_flow_attr
, &cmd
.flow_attr
, sizeof(*kern_flow_attr
));
3686 err
= ib_copy_from_udata(kern_flow_attr
+ 1, ucore
,
3687 cmd
.flow_attr
.size
);
3691 kern_flow_attr
= &cmd
.flow_attr
;
3694 uobj
= kmalloc(sizeof(*uobj
), GFP_KERNEL
);
3699 init_uobj(uobj
, 0, file
->ucontext
, &rule_lock_class
);
3700 down_write(&uobj
->mutex
);
3702 qp
= idr_read_qp(cmd
.qp_handle
, file
->ucontext
);
3708 flow_attr
= kzalloc(sizeof(*flow_attr
) + cmd
.flow_attr
.num_of_specs
*
3709 sizeof(union ib_flow_spec
), GFP_KERNEL
);
3715 flow_attr
->type
= kern_flow_attr
->type
;
3716 flow_attr
->priority
= kern_flow_attr
->priority
;
3717 flow_attr
->num_of_specs
= kern_flow_attr
->num_of_specs
;
3718 flow_attr
->port
= kern_flow_attr
->port
;
3719 flow_attr
->flags
= kern_flow_attr
->flags
;
3720 flow_attr
->size
= sizeof(*flow_attr
);
3722 kern_spec
= kern_flow_attr
+ 1;
3723 ib_spec
= flow_attr
+ 1;
3724 for (i
= 0; i
< flow_attr
->num_of_specs
&&
3725 cmd
.flow_attr
.size
> offsetof(struct ib_uverbs_flow_spec
, reserved
) &&
3726 cmd
.flow_attr
.size
>=
3727 ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
; i
++) {
3728 err
= kern_spec_to_ib_spec(kern_spec
, ib_spec
);
3732 ((union ib_flow_spec
*) ib_spec
)->size
;
3733 cmd
.flow_attr
.size
-= ((struct ib_uverbs_flow_spec
*)kern_spec
)->size
;
3734 kern_spec
+= ((struct ib_uverbs_flow_spec
*) kern_spec
)->size
;
3735 ib_spec
+= ((union ib_flow_spec
*) ib_spec
)->size
;
3737 if (cmd
.flow_attr
.size
|| (i
!= flow_attr
->num_of_specs
)) {
3738 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3739 i
, cmd
.flow_attr
.size
);
3743 flow_id
= ib_create_flow(qp
, flow_attr
, IB_FLOW_DOMAIN_USER
);
3744 if (IS_ERR(flow_id
)) {
3745 err
= PTR_ERR(flow_id
);
3749 flow_id
->uobject
= uobj
;
3750 uobj
->object
= flow_id
;
3752 err
= idr_add_uobj(&ib_uverbs_rule_idr
, uobj
);
3756 memset(&resp
, 0, sizeof(resp
));
3757 resp
.flow_handle
= uobj
->id
;
3759 err
= ib_copy_to_udata(ucore
,
3760 &resp
, sizeof(resp
));
3765 mutex_lock(&file
->mutex
);
3766 list_add_tail(&uobj
->list
, &file
->ucontext
->rule_list
);
3767 mutex_unlock(&file
->mutex
);
3771 up_write(&uobj
->mutex
);
3773 if (cmd
.flow_attr
.num_of_specs
)
3774 kfree(kern_flow_attr
);
3777 idr_remove_uobj(&ib_uverbs_rule_idr
, uobj
);
3779 ib_destroy_flow(flow_id
);
3785 put_uobj_write(uobj
);
3787 if (cmd
.flow_attr
.num_of_specs
)
3788 kfree(kern_flow_attr
);
3792 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file
*file
,
3793 struct ib_device
*ib_dev
,
3794 struct ib_udata
*ucore
,
3795 struct ib_udata
*uhw
)
3797 struct ib_uverbs_destroy_flow cmd
;
3798 struct ib_flow
*flow_id
;
3799 struct ib_uobject
*uobj
;
3802 if (ucore
->inlen
< sizeof(cmd
))
3805 ret
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
3812 uobj
= idr_write_uobj(&ib_uverbs_rule_idr
, cmd
.flow_handle
,
3816 flow_id
= uobj
->object
;
3818 ret
= ib_destroy_flow(flow_id
);
3822 put_uobj_write(uobj
);
3824 idr_remove_uobj(&ib_uverbs_rule_idr
, uobj
);
3826 mutex_lock(&file
->mutex
);
3827 list_del(&uobj
->list
);
3828 mutex_unlock(&file
->mutex
);
3835 static int __uverbs_create_xsrq(struct ib_uverbs_file
*file
,
3836 struct ib_device
*ib_dev
,
3837 struct ib_uverbs_create_xsrq
*cmd
,
3838 struct ib_udata
*udata
)
3840 struct ib_uverbs_create_srq_resp resp
;
3841 struct ib_usrq_object
*obj
;
3844 struct ib_uobject
*uninitialized_var(xrcd_uobj
);
3845 struct ib_srq_init_attr attr
;
3848 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
3852 init_uobj(&obj
->uevent
.uobject
, cmd
->user_handle
, file
->ucontext
, &srq_lock_class
);
3853 down_write(&obj
->uevent
.uobject
.mutex
);
3855 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3856 attr
.ext
.xrc
.xrcd
= idr_read_xrcd(cmd
->xrcd_handle
, file
->ucontext
, &xrcd_uobj
);
3857 if (!attr
.ext
.xrc
.xrcd
) {
3862 obj
->uxrcd
= container_of(xrcd_uobj
, struct ib_uxrcd_object
, uobject
);
3863 atomic_inc(&obj
->uxrcd
->refcnt
);
3865 attr
.ext
.xrc
.cq
= idr_read_cq(cmd
->cq_handle
, file
->ucontext
, 0);
3866 if (!attr
.ext
.xrc
.cq
) {
3872 pd
= idr_read_pd(cmd
->pd_handle
, file
->ucontext
);
3878 attr
.event_handler
= ib_uverbs_srq_event_handler
;
3879 attr
.srq_context
= file
;
3880 attr
.srq_type
= cmd
->srq_type
;
3881 attr
.attr
.max_wr
= cmd
->max_wr
;
3882 attr
.attr
.max_sge
= cmd
->max_sge
;
3883 attr
.attr
.srq_limit
= cmd
->srq_limit
;
3885 obj
->uevent
.events_reported
= 0;
3886 INIT_LIST_HEAD(&obj
->uevent
.event_list
);
3888 srq
= pd
->device
->create_srq(pd
, &attr
, udata
);
3894 srq
->device
= pd
->device
;
3896 srq
->srq_type
= cmd
->srq_type
;
3897 srq
->uobject
= &obj
->uevent
.uobject
;
3898 srq
->event_handler
= attr
.event_handler
;
3899 srq
->srq_context
= attr
.srq_context
;
3901 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3902 srq
->ext
.xrc
.cq
= attr
.ext
.xrc
.cq
;
3903 srq
->ext
.xrc
.xrcd
= attr
.ext
.xrc
.xrcd
;
3904 atomic_inc(&attr
.ext
.xrc
.cq
->usecnt
);
3905 atomic_inc(&attr
.ext
.xrc
.xrcd
->usecnt
);
3908 atomic_inc(&pd
->usecnt
);
3909 atomic_set(&srq
->usecnt
, 0);
3911 obj
->uevent
.uobject
.object
= srq
;
3912 ret
= idr_add_uobj(&ib_uverbs_srq_idr
, &obj
->uevent
.uobject
);
3916 memset(&resp
, 0, sizeof resp
);
3917 resp
.srq_handle
= obj
->uevent
.uobject
.id
;
3918 resp
.max_wr
= attr
.attr
.max_wr
;
3919 resp
.max_sge
= attr
.attr
.max_sge
;
3920 if (cmd
->srq_type
== IB_SRQT_XRC
)
3921 resp
.srqn
= srq
->ext
.xrc
.srq_num
;
3923 if (copy_to_user((void __user
*) (unsigned long) cmd
->response
,
3924 &resp
, sizeof resp
)) {
3929 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3930 put_uobj_read(xrcd_uobj
);
3931 put_cq_read(attr
.ext
.xrc
.cq
);
3935 mutex_lock(&file
->mutex
);
3936 list_add_tail(&obj
->uevent
.uobject
.list
, &file
->ucontext
->srq_list
);
3937 mutex_unlock(&file
->mutex
);
3939 obj
->uevent
.uobject
.live
= 1;
3941 up_write(&obj
->uevent
.uobject
.mutex
);
3946 idr_remove_uobj(&ib_uverbs_srq_idr
, &obj
->uevent
.uobject
);
3949 ib_destroy_srq(srq
);
3955 if (cmd
->srq_type
== IB_SRQT_XRC
)
3956 put_cq_read(attr
.ext
.xrc
.cq
);
3959 if (cmd
->srq_type
== IB_SRQT_XRC
) {
3960 atomic_dec(&obj
->uxrcd
->refcnt
);
3961 put_uobj_read(xrcd_uobj
);
3965 put_uobj_write(&obj
->uevent
.uobject
);
3969 ssize_t
ib_uverbs_create_srq(struct ib_uverbs_file
*file
,
3970 struct ib_device
*ib_dev
,
3971 const char __user
*buf
, int in_len
,
3974 struct ib_uverbs_create_srq cmd
;
3975 struct ib_uverbs_create_xsrq xcmd
;
3976 struct ib_uverbs_create_srq_resp resp
;
3977 struct ib_udata udata
;
3980 if (out_len
< sizeof resp
)
3983 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
3986 xcmd
.response
= cmd
.response
;
3987 xcmd
.user_handle
= cmd
.user_handle
;
3988 xcmd
.srq_type
= IB_SRQT_BASIC
;
3989 xcmd
.pd_handle
= cmd
.pd_handle
;
3990 xcmd
.max_wr
= cmd
.max_wr
;
3991 xcmd
.max_sge
= cmd
.max_sge
;
3992 xcmd
.srq_limit
= cmd
.srq_limit
;
3994 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
3995 (unsigned long) cmd
.response
+ sizeof resp
,
3996 in_len
- sizeof cmd
- sizeof(struct ib_uverbs_cmd_hdr
),
3997 out_len
- sizeof resp
);
3999 ret
= __uverbs_create_xsrq(file
, ib_dev
, &xcmd
, &udata
);
4006 ssize_t
ib_uverbs_create_xsrq(struct ib_uverbs_file
*file
,
4007 struct ib_device
*ib_dev
,
4008 const char __user
*buf
, int in_len
, int out_len
)
4010 struct ib_uverbs_create_xsrq cmd
;
4011 struct ib_uverbs_create_srq_resp resp
;
4012 struct ib_udata udata
;
4015 if (out_len
< sizeof resp
)
4018 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
4021 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
4022 (unsigned long) cmd
.response
+ sizeof resp
,
4023 in_len
- sizeof cmd
- sizeof(struct ib_uverbs_cmd_hdr
),
4024 out_len
- sizeof resp
);
4026 ret
= __uverbs_create_xsrq(file
, ib_dev
, &cmd
, &udata
);
4033 ssize_t
ib_uverbs_modify_srq(struct ib_uverbs_file
*file
,
4034 struct ib_device
*ib_dev
,
4035 const char __user
*buf
, int in_len
,
4038 struct ib_uverbs_modify_srq cmd
;
4039 struct ib_udata udata
;
4041 struct ib_srq_attr attr
;
4044 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
4047 INIT_UDATA(&udata
, buf
+ sizeof cmd
, NULL
, in_len
- sizeof cmd
,
4050 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
4054 attr
.max_wr
= cmd
.max_wr
;
4055 attr
.srq_limit
= cmd
.srq_limit
;
4057 ret
= srq
->device
->modify_srq(srq
, &attr
, cmd
.attr_mask
, &udata
);
4061 return ret
? ret
: in_len
;
4064 ssize_t
ib_uverbs_query_srq(struct ib_uverbs_file
*file
,
4065 struct ib_device
*ib_dev
,
4066 const char __user
*buf
,
4067 int in_len
, int out_len
)
4069 struct ib_uverbs_query_srq cmd
;
4070 struct ib_uverbs_query_srq_resp resp
;
4071 struct ib_srq_attr attr
;
4075 if (out_len
< sizeof resp
)
4078 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
4081 srq
= idr_read_srq(cmd
.srq_handle
, file
->ucontext
);
4085 ret
= ib_query_srq(srq
, &attr
);
4092 memset(&resp
, 0, sizeof resp
);
4094 resp
.max_wr
= attr
.max_wr
;
4095 resp
.max_sge
= attr
.max_sge
;
4096 resp
.srq_limit
= attr
.srq_limit
;
4098 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
4099 &resp
, sizeof resp
))
4105 ssize_t
ib_uverbs_destroy_srq(struct ib_uverbs_file
*file
,
4106 struct ib_device
*ib_dev
,
4107 const char __user
*buf
, int in_len
,
4110 struct ib_uverbs_destroy_srq cmd
;
4111 struct ib_uverbs_destroy_srq_resp resp
;
4112 struct ib_uobject
*uobj
;
4114 struct ib_uevent_object
*obj
;
4116 struct ib_usrq_object
*us
;
4117 enum ib_srq_type srq_type
;
4119 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
4122 uobj
= idr_write_uobj(&ib_uverbs_srq_idr
, cmd
.srq_handle
, file
->ucontext
);
4126 obj
= container_of(uobj
, struct ib_uevent_object
, uobject
);
4127 srq_type
= srq
->srq_type
;
4129 ret
= ib_destroy_srq(srq
);
4133 put_uobj_write(uobj
);
4138 if (srq_type
== IB_SRQT_XRC
) {
4139 us
= container_of(obj
, struct ib_usrq_object
, uevent
);
4140 atomic_dec(&us
->uxrcd
->refcnt
);
4143 idr_remove_uobj(&ib_uverbs_srq_idr
, uobj
);
4145 mutex_lock(&file
->mutex
);
4146 list_del(&uobj
->list
);
4147 mutex_unlock(&file
->mutex
);
4149 ib_uverbs_release_uevent(file
, obj
);
4151 memset(&resp
, 0, sizeof resp
);
4152 resp
.events_reported
= obj
->events_reported
;
4156 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
4157 &resp
, sizeof resp
))
4160 return ret
? ret
: in_len
;
4163 int ib_uverbs_ex_query_device(struct ib_uverbs_file
*file
,
4164 struct ib_device
*ib_dev
,
4165 struct ib_udata
*ucore
,
4166 struct ib_udata
*uhw
)
4168 struct ib_uverbs_ex_query_device_resp resp
= { {0} };
4169 struct ib_uverbs_ex_query_device cmd
;
4170 struct ib_device_attr attr
= {0};
4173 if (ucore
->inlen
< sizeof(cmd
))
4176 err
= ib_copy_from_udata(&cmd
, ucore
, sizeof(cmd
));
4186 resp
.response_length
= offsetof(typeof(resp
), odp_caps
);
4188 if (ucore
->outlen
< resp
.response_length
)
4191 err
= ib_dev
->query_device(ib_dev
, &attr
, uhw
);
4195 copy_query_dev_fields(file
, ib_dev
, &resp
.base
, &attr
);
4197 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.odp_caps
))
4200 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
4201 resp
.odp_caps
.general_caps
= attr
.odp_caps
.general_caps
;
4202 resp
.odp_caps
.per_transport_caps
.rc_odp_caps
=
4203 attr
.odp_caps
.per_transport_caps
.rc_odp_caps
;
4204 resp
.odp_caps
.per_transport_caps
.uc_odp_caps
=
4205 attr
.odp_caps
.per_transport_caps
.uc_odp_caps
;
4206 resp
.odp_caps
.per_transport_caps
.ud_odp_caps
=
4207 attr
.odp_caps
.per_transport_caps
.ud_odp_caps
;
4209 resp
.response_length
+= sizeof(resp
.odp_caps
);
4211 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.timestamp_mask
))
4214 resp
.timestamp_mask
= attr
.timestamp_mask
;
4215 resp
.response_length
+= sizeof(resp
.timestamp_mask
);
4217 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.hca_core_clock
))
4220 resp
.hca_core_clock
= attr
.hca_core_clock
;
4221 resp
.response_length
+= sizeof(resp
.hca_core_clock
);
4223 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.device_cap_flags_ex
))
4226 resp
.device_cap_flags_ex
= attr
.device_cap_flags
;
4227 resp
.response_length
+= sizeof(resp
.device_cap_flags_ex
);
4229 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.rss_caps
))
4232 resp
.rss_caps
.supported_qpts
= attr
.rss_caps
.supported_qpts
;
4233 resp
.rss_caps
.max_rwq_indirection_tables
=
4234 attr
.rss_caps
.max_rwq_indirection_tables
;
4235 resp
.rss_caps
.max_rwq_indirection_table_size
=
4236 attr
.rss_caps
.max_rwq_indirection_table_size
;
4238 resp
.response_length
+= sizeof(resp
.rss_caps
);
4240 if (ucore
->outlen
< resp
.response_length
+ sizeof(resp
.max_wq_type_rq
))
4243 resp
.max_wq_type_rq
= attr
.max_wq_type_rq
;
4244 resp
.response_length
+= sizeof(resp
.max_wq_type_rq
);
4246 err
= ib_copy_to_udata(ucore
, &resp
, resp
.response_length
);