2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $
37 #include <linux/file.h>
40 #include <asm/uaccess.h>
44 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
46 (udata)->inbuf = (void __user *) (ibuf); \
47 (udata)->outbuf = (void __user *) (obuf); \
48 (udata)->inlen = (ilen); \
49 (udata)->outlen = (olen); \
52 ssize_t
ib_uverbs_get_context(struct ib_uverbs_file
*file
,
53 const char __user
*buf
,
54 int in_len
, int out_len
)
56 struct ib_uverbs_get_context cmd
;
57 struct ib_uverbs_get_context_resp resp
;
58 struct ib_udata udata
;
59 struct ib_device
*ibdev
= file
->device
->ib_dev
;
60 struct ib_ucontext
*ucontext
;
64 if (out_len
< sizeof resp
)
67 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
77 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
78 (unsigned long) cmd
.response
+ sizeof resp
,
79 in_len
- sizeof cmd
, out_len
- sizeof resp
);
81 ucontext
= ibdev
->alloc_ucontext(ibdev
, &udata
);
83 return PTR_ERR(file
->ucontext
);
85 ucontext
->device
= ibdev
;
86 INIT_LIST_HEAD(&ucontext
->pd_list
);
87 INIT_LIST_HEAD(&ucontext
->mr_list
);
88 INIT_LIST_HEAD(&ucontext
->mw_list
);
89 INIT_LIST_HEAD(&ucontext
->cq_list
);
90 INIT_LIST_HEAD(&ucontext
->qp_list
);
91 INIT_LIST_HEAD(&ucontext
->srq_list
);
92 INIT_LIST_HEAD(&ucontext
->ah_list
);
94 resp
.num_comp_vectors
= file
->device
->num_comp_vectors
;
96 filp
= ib_uverbs_alloc_event_file(file
, 1, &resp
.async_fd
);
102 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
103 &resp
, sizeof resp
)) {
108 file
->async_file
= filp
->private_data
;
110 INIT_IB_EVENT_HANDLER(&file
->event_handler
, file
->device
->ib_dev
,
111 ib_uverbs_event_handler
);
112 ret
= ib_register_event_handler(&file
->event_handler
);
116 kref_get(&file
->async_file
->ref
);
117 kref_get(&file
->ref
);
118 file
->ucontext
= ucontext
;
120 fd_install(resp
.async_fd
, filp
);
127 put_unused_fd(resp
.async_fd
);
131 ibdev
->dealloc_ucontext(ucontext
);
138 ssize_t
ib_uverbs_query_device(struct ib_uverbs_file
*file
,
139 const char __user
*buf
,
140 int in_len
, int out_len
)
142 struct ib_uverbs_query_device cmd
;
143 struct ib_uverbs_query_device_resp resp
;
144 struct ib_device_attr attr
;
147 if (out_len
< sizeof resp
)
150 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
153 ret
= ib_query_device(file
->device
->ib_dev
, &attr
);
157 memset(&resp
, 0, sizeof resp
);
159 resp
.fw_ver
= attr
.fw_ver
;
160 resp
.node_guid
= attr
.node_guid
;
161 resp
.sys_image_guid
= attr
.sys_image_guid
;
162 resp
.max_mr_size
= attr
.max_mr_size
;
163 resp
.page_size_cap
= attr
.page_size_cap
;
164 resp
.vendor_id
= attr
.vendor_id
;
165 resp
.vendor_part_id
= attr
.vendor_part_id
;
166 resp
.hw_ver
= attr
.hw_ver
;
167 resp
.max_qp
= attr
.max_qp
;
168 resp
.max_qp_wr
= attr
.max_qp_wr
;
169 resp
.device_cap_flags
= attr
.device_cap_flags
;
170 resp
.max_sge
= attr
.max_sge
;
171 resp
.max_sge_rd
= attr
.max_sge_rd
;
172 resp
.max_cq
= attr
.max_cq
;
173 resp
.max_cqe
= attr
.max_cqe
;
174 resp
.max_mr
= attr
.max_mr
;
175 resp
.max_pd
= attr
.max_pd
;
176 resp
.max_qp_rd_atom
= attr
.max_qp_rd_atom
;
177 resp
.max_ee_rd_atom
= attr
.max_ee_rd_atom
;
178 resp
.max_res_rd_atom
= attr
.max_res_rd_atom
;
179 resp
.max_qp_init_rd_atom
= attr
.max_qp_init_rd_atom
;
180 resp
.max_ee_init_rd_atom
= attr
.max_ee_init_rd_atom
;
181 resp
.atomic_cap
= attr
.atomic_cap
;
182 resp
.max_ee
= attr
.max_ee
;
183 resp
.max_rdd
= attr
.max_rdd
;
184 resp
.max_mw
= attr
.max_mw
;
185 resp
.max_raw_ipv6_qp
= attr
.max_raw_ipv6_qp
;
186 resp
.max_raw_ethy_qp
= attr
.max_raw_ethy_qp
;
187 resp
.max_mcast_grp
= attr
.max_mcast_grp
;
188 resp
.max_mcast_qp_attach
= attr
.max_mcast_qp_attach
;
189 resp
.max_total_mcast_qp_attach
= attr
.max_total_mcast_qp_attach
;
190 resp
.max_ah
= attr
.max_ah
;
191 resp
.max_fmr
= attr
.max_fmr
;
192 resp
.max_map_per_fmr
= attr
.max_map_per_fmr
;
193 resp
.max_srq
= attr
.max_srq
;
194 resp
.max_srq_wr
= attr
.max_srq_wr
;
195 resp
.max_srq_sge
= attr
.max_srq_sge
;
196 resp
.max_pkeys
= attr
.max_pkeys
;
197 resp
.local_ca_ack_delay
= attr
.local_ca_ack_delay
;
198 resp
.phys_port_cnt
= file
->device
->ib_dev
->phys_port_cnt
;
200 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
207 ssize_t
ib_uverbs_query_port(struct ib_uverbs_file
*file
,
208 const char __user
*buf
,
209 int in_len
, int out_len
)
211 struct ib_uverbs_query_port cmd
;
212 struct ib_uverbs_query_port_resp resp
;
213 struct ib_port_attr attr
;
216 if (out_len
< sizeof resp
)
219 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
222 ret
= ib_query_port(file
->device
->ib_dev
, cmd
.port_num
, &attr
);
226 memset(&resp
, 0, sizeof resp
);
228 resp
.state
= attr
.state
;
229 resp
.max_mtu
= attr
.max_mtu
;
230 resp
.active_mtu
= attr
.active_mtu
;
231 resp
.gid_tbl_len
= attr
.gid_tbl_len
;
232 resp
.port_cap_flags
= attr
.port_cap_flags
;
233 resp
.max_msg_sz
= attr
.max_msg_sz
;
234 resp
.bad_pkey_cntr
= attr
.bad_pkey_cntr
;
235 resp
.qkey_viol_cntr
= attr
.qkey_viol_cntr
;
236 resp
.pkey_tbl_len
= attr
.pkey_tbl_len
;
238 resp
.sm_lid
= attr
.sm_lid
;
240 resp
.max_vl_num
= attr
.max_vl_num
;
241 resp
.sm_sl
= attr
.sm_sl
;
242 resp
.subnet_timeout
= attr
.subnet_timeout
;
243 resp
.init_type_reply
= attr
.init_type_reply
;
244 resp
.active_width
= attr
.active_width
;
245 resp
.active_speed
= attr
.active_speed
;
246 resp
.phys_state
= attr
.phys_state
;
248 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
255 ssize_t
ib_uverbs_alloc_pd(struct ib_uverbs_file
*file
,
256 const char __user
*buf
,
257 int in_len
, int out_len
)
259 struct ib_uverbs_alloc_pd cmd
;
260 struct ib_uverbs_alloc_pd_resp resp
;
261 struct ib_udata udata
;
262 struct ib_uobject
*uobj
;
266 if (out_len
< sizeof resp
)
269 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
272 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
273 (unsigned long) cmd
.response
+ sizeof resp
,
274 in_len
- sizeof cmd
, out_len
- sizeof resp
);
276 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
280 uobj
->context
= file
->ucontext
;
282 pd
= file
->device
->ib_dev
->alloc_pd(file
->device
->ib_dev
,
283 file
->ucontext
, &udata
);
289 pd
->device
= file
->device
->ib_dev
;
291 atomic_set(&pd
->usecnt
, 0);
293 down(&ib_uverbs_idr_mutex
);
296 if (!idr_pre_get(&ib_uverbs_pd_idr
, GFP_KERNEL
)) {
301 ret
= idr_get_new(&ib_uverbs_pd_idr
, pd
, &uobj
->id
);
308 memset(&resp
, 0, sizeof resp
);
309 resp
.pd_handle
= uobj
->id
;
311 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
312 &resp
, sizeof resp
)) {
318 list_add_tail(&uobj
->list
, &file
->ucontext
->pd_list
);
321 up(&ib_uverbs_idr_mutex
);
326 idr_remove(&ib_uverbs_pd_idr
, uobj
->id
);
329 up(&ib_uverbs_idr_mutex
);
337 ssize_t
ib_uverbs_dealloc_pd(struct ib_uverbs_file
*file
,
338 const char __user
*buf
,
339 int in_len
, int out_len
)
341 struct ib_uverbs_dealloc_pd cmd
;
343 struct ib_uobject
*uobj
;
346 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
349 down(&ib_uverbs_idr_mutex
);
351 pd
= idr_find(&ib_uverbs_pd_idr
, cmd
.pd_handle
);
352 if (!pd
|| pd
->uobject
->context
!= file
->ucontext
)
357 ret
= ib_dealloc_pd(pd
);
361 idr_remove(&ib_uverbs_pd_idr
, cmd
.pd_handle
);
364 list_del(&uobj
->list
);
370 up(&ib_uverbs_idr_mutex
);
372 return ret
? ret
: in_len
;
375 ssize_t
ib_uverbs_reg_mr(struct ib_uverbs_file
*file
,
376 const char __user
*buf
, int in_len
,
379 struct ib_uverbs_reg_mr cmd
;
380 struct ib_uverbs_reg_mr_resp resp
;
381 struct ib_udata udata
;
382 struct ib_umem_object
*obj
;
387 if (out_len
< sizeof resp
)
390 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
393 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
394 (unsigned long) cmd
.response
+ sizeof resp
,
395 in_len
- sizeof cmd
, out_len
- sizeof resp
);
397 if ((cmd
.start
& ~PAGE_MASK
) != (cmd
.hca_va
& ~PAGE_MASK
))
401 * Local write permission is required if remote write or
402 * remote atomic permission is also requested.
404 if (cmd
.access_flags
& (IB_ACCESS_REMOTE_ATOMIC
| IB_ACCESS_REMOTE_WRITE
) &&
405 !(cmd
.access_flags
& IB_ACCESS_LOCAL_WRITE
))
408 obj
= kmalloc(sizeof *obj
, GFP_KERNEL
);
412 obj
->uobject
.context
= file
->ucontext
;
415 * We ask for writable memory if any access flags other than
416 * "remote read" are set. "Local write" and "remote write"
417 * obviously require write access. "Remote atomic" can do
418 * things like fetch and add, which will modify memory, and
419 * "MW bind" can change permissions by binding a window.
421 ret
= ib_umem_get(file
->device
->ib_dev
, &obj
->umem
,
422 (void *) (unsigned long) cmd
.start
, cmd
.length
,
423 !!(cmd
.access_flags
& ~IB_ACCESS_REMOTE_READ
));
427 obj
->umem
.virt_base
= cmd
.hca_va
;
429 down(&ib_uverbs_idr_mutex
);
431 pd
= idr_find(&ib_uverbs_pd_idr
, cmd
.pd_handle
);
432 if (!pd
|| pd
->uobject
->context
!= file
->ucontext
) {
437 if (!pd
->device
->reg_user_mr
) {
442 mr
= pd
->device
->reg_user_mr(pd
, &obj
->umem
, cmd
.access_flags
, &udata
);
448 mr
->device
= pd
->device
;
450 mr
->uobject
= &obj
->uobject
;
451 atomic_inc(&pd
->usecnt
);
452 atomic_set(&mr
->usecnt
, 0);
454 memset(&resp
, 0, sizeof resp
);
455 resp
.lkey
= mr
->lkey
;
456 resp
.rkey
= mr
->rkey
;
459 if (!idr_pre_get(&ib_uverbs_mr_idr
, GFP_KERNEL
)) {
464 ret
= idr_get_new(&ib_uverbs_mr_idr
, mr
, &obj
->uobject
.id
);
471 resp
.mr_handle
= obj
->uobject
.id
;
473 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
474 &resp
, sizeof resp
)) {
480 list_add_tail(&obj
->uobject
.list
, &file
->ucontext
->mr_list
);
483 up(&ib_uverbs_idr_mutex
);
488 idr_remove(&ib_uverbs_mr_idr
, obj
->uobject
.id
);
494 up(&ib_uverbs_idr_mutex
);
496 ib_umem_release(file
->device
->ib_dev
, &obj
->umem
);
503 ssize_t
ib_uverbs_dereg_mr(struct ib_uverbs_file
*file
,
504 const char __user
*buf
, int in_len
,
507 struct ib_uverbs_dereg_mr cmd
;
509 struct ib_umem_object
*memobj
;
512 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
515 down(&ib_uverbs_idr_mutex
);
517 mr
= idr_find(&ib_uverbs_mr_idr
, cmd
.mr_handle
);
518 if (!mr
|| mr
->uobject
->context
!= file
->ucontext
)
521 memobj
= container_of(mr
->uobject
, struct ib_umem_object
, uobject
);
523 ret
= ib_dereg_mr(mr
);
527 idr_remove(&ib_uverbs_mr_idr
, cmd
.mr_handle
);
530 list_del(&memobj
->uobject
.list
);
533 ib_umem_release(file
->device
->ib_dev
, &memobj
->umem
);
537 up(&ib_uverbs_idr_mutex
);
539 return ret
? ret
: in_len
;
542 ssize_t
ib_uverbs_create_comp_channel(struct ib_uverbs_file
*file
,
543 const char __user
*buf
, int in_len
,
546 struct ib_uverbs_create_comp_channel cmd
;
547 struct ib_uverbs_create_comp_channel_resp resp
;
550 if (out_len
< sizeof resp
)
553 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
556 filp
= ib_uverbs_alloc_event_file(file
, 0, &resp
.fd
);
558 return PTR_ERR(filp
);
560 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
561 &resp
, sizeof resp
)) {
562 put_unused_fd(resp
.fd
);
567 fd_install(resp
.fd
, filp
);
571 ssize_t
ib_uverbs_create_cq(struct ib_uverbs_file
*file
,
572 const char __user
*buf
, int in_len
,
575 struct ib_uverbs_create_cq cmd
;
576 struct ib_uverbs_create_cq_resp resp
;
577 struct ib_udata udata
;
578 struct ib_ucq_object
*uobj
;
579 struct ib_uverbs_event_file
*ev_file
= NULL
;
583 if (out_len
< sizeof resp
)
586 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
589 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
590 (unsigned long) cmd
.response
+ sizeof resp
,
591 in_len
- sizeof cmd
, out_len
- sizeof resp
);
593 if (cmd
.comp_vector
>= file
->device
->num_comp_vectors
)
596 if (cmd
.comp_channel
>= 0)
597 ev_file
= ib_uverbs_lookup_comp_file(cmd
.comp_channel
);
599 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
603 uobj
->uobject
.user_handle
= cmd
.user_handle
;
604 uobj
->uobject
.context
= file
->ucontext
;
605 uobj
->uverbs_file
= file
;
606 uobj
->comp_events_reported
= 0;
607 uobj
->async_events_reported
= 0;
608 INIT_LIST_HEAD(&uobj
->comp_list
);
609 INIT_LIST_HEAD(&uobj
->async_list
);
611 cq
= file
->device
->ib_dev
->create_cq(file
->device
->ib_dev
, cmd
.cqe
,
612 file
->ucontext
, &udata
);
618 cq
->device
= file
->device
->ib_dev
;
619 cq
->uobject
= &uobj
->uobject
;
620 cq
->comp_handler
= ib_uverbs_comp_handler
;
621 cq
->event_handler
= ib_uverbs_cq_event_handler
;
622 cq
->cq_context
= ev_file
;
623 atomic_set(&cq
->usecnt
, 0);
625 down(&ib_uverbs_idr_mutex
);
628 if (!idr_pre_get(&ib_uverbs_cq_idr
, GFP_KERNEL
)) {
633 ret
= idr_get_new(&ib_uverbs_cq_idr
, cq
, &uobj
->uobject
.id
);
640 memset(&resp
, 0, sizeof resp
);
641 resp
.cq_handle
= uobj
->uobject
.id
;
644 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
645 &resp
, sizeof resp
)) {
651 list_add_tail(&uobj
->uobject
.list
, &file
->ucontext
->cq_list
);
654 up(&ib_uverbs_idr_mutex
);
659 idr_remove(&ib_uverbs_cq_idr
, uobj
->uobject
.id
);
662 up(&ib_uverbs_idr_mutex
);
670 ssize_t
ib_uverbs_poll_cq(struct ib_uverbs_file
*file
,
671 const char __user
*buf
, int in_len
,
674 struct ib_uverbs_poll_cq cmd
;
675 struct ib_uverbs_poll_cq_resp
*resp
;
682 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
685 wc
= kmalloc(cmd
.ne
* sizeof *wc
, GFP_KERNEL
);
689 rsize
= sizeof *resp
+ cmd
.ne
* sizeof(struct ib_uverbs_wc
);
690 resp
= kmalloc(rsize
, GFP_KERNEL
);
696 down(&ib_uverbs_idr_mutex
);
697 cq
= idr_find(&ib_uverbs_cq_idr
, cmd
.cq_handle
);
698 if (!cq
|| cq
->uobject
->context
!= file
->ucontext
) {
703 resp
->count
= ib_poll_cq(cq
, cmd
.ne
, wc
);
705 for (i
= 0; i
< resp
->count
; i
++) {
706 resp
->wc
[i
].wr_id
= wc
[i
].wr_id
;
707 resp
->wc
[i
].status
= wc
[i
].status
;
708 resp
->wc
[i
].opcode
= wc
[i
].opcode
;
709 resp
->wc
[i
].vendor_err
= wc
[i
].vendor_err
;
710 resp
->wc
[i
].byte_len
= wc
[i
].byte_len
;
711 resp
->wc
[i
].imm_data
= (__u32 __force
) wc
[i
].imm_data
;
712 resp
->wc
[i
].qp_num
= wc
[i
].qp_num
;
713 resp
->wc
[i
].src_qp
= wc
[i
].src_qp
;
714 resp
->wc
[i
].wc_flags
= wc
[i
].wc_flags
;
715 resp
->wc
[i
].pkey_index
= wc
[i
].pkey_index
;
716 resp
->wc
[i
].slid
= wc
[i
].slid
;
717 resp
->wc
[i
].sl
= wc
[i
].sl
;
718 resp
->wc
[i
].dlid_path_bits
= wc
[i
].dlid_path_bits
;
719 resp
->wc
[i
].port_num
= wc
[i
].port_num
;
722 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
, resp
, rsize
))
726 up(&ib_uverbs_idr_mutex
);
731 return ret
? ret
: in_len
;
734 ssize_t
ib_uverbs_req_notify_cq(struct ib_uverbs_file
*file
,
735 const char __user
*buf
, int in_len
,
738 struct ib_uverbs_req_notify_cq cmd
;
742 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
745 down(&ib_uverbs_idr_mutex
);
746 cq
= idr_find(&ib_uverbs_cq_idr
, cmd
.cq_handle
);
747 if (cq
&& cq
->uobject
->context
== file
->ucontext
) {
748 ib_req_notify_cq(cq
, cmd
.solicited_only
?
749 IB_CQ_SOLICITED
: IB_CQ_NEXT_COMP
);
752 up(&ib_uverbs_idr_mutex
);
757 ssize_t
ib_uverbs_destroy_cq(struct ib_uverbs_file
*file
,
758 const char __user
*buf
, int in_len
,
761 struct ib_uverbs_destroy_cq cmd
;
762 struct ib_uverbs_destroy_cq_resp resp
;
764 struct ib_ucq_object
*uobj
;
765 struct ib_uverbs_event_file
*ev_file
;
769 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
772 memset(&resp
, 0, sizeof resp
);
774 down(&ib_uverbs_idr_mutex
);
776 cq
= idr_find(&ib_uverbs_cq_idr
, cmd
.cq_handle
);
777 if (!cq
|| cq
->uobject
->context
!= file
->ucontext
)
780 user_handle
= cq
->uobject
->user_handle
;
781 uobj
= container_of(cq
->uobject
, struct ib_ucq_object
, uobject
);
782 ev_file
= cq
->cq_context
;
784 ret
= ib_destroy_cq(cq
);
788 idr_remove(&ib_uverbs_cq_idr
, cmd
.cq_handle
);
791 list_del(&uobj
->uobject
.list
);
794 ib_uverbs_release_ucq(file
, ev_file
, uobj
);
796 resp
.comp_events_reported
= uobj
->comp_events_reported
;
797 resp
.async_events_reported
= uobj
->async_events_reported
;
801 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
806 up(&ib_uverbs_idr_mutex
);
808 return ret
? ret
: in_len
;
811 ssize_t
ib_uverbs_create_qp(struct ib_uverbs_file
*file
,
812 const char __user
*buf
, int in_len
,
815 struct ib_uverbs_create_qp cmd
;
816 struct ib_uverbs_create_qp_resp resp
;
817 struct ib_udata udata
;
818 struct ib_uevent_object
*uobj
;
820 struct ib_cq
*scq
, *rcq
;
823 struct ib_qp_init_attr attr
;
826 if (out_len
< sizeof resp
)
829 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
832 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
833 (unsigned long) cmd
.response
+ sizeof resp
,
834 in_len
- sizeof cmd
, out_len
- sizeof resp
);
836 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
840 down(&ib_uverbs_idr_mutex
);
842 pd
= idr_find(&ib_uverbs_pd_idr
, cmd
.pd_handle
);
843 scq
= idr_find(&ib_uverbs_cq_idr
, cmd
.send_cq_handle
);
844 rcq
= idr_find(&ib_uverbs_cq_idr
, cmd
.recv_cq_handle
);
845 srq
= cmd
.is_srq
? idr_find(&ib_uverbs_srq_idr
, cmd
.srq_handle
) : NULL
;
847 if (!pd
|| pd
->uobject
->context
!= file
->ucontext
||
848 !scq
|| scq
->uobject
->context
!= file
->ucontext
||
849 !rcq
|| rcq
->uobject
->context
!= file
->ucontext
||
850 (cmd
.is_srq
&& (!srq
|| srq
->uobject
->context
!= file
->ucontext
))) {
855 attr
.event_handler
= ib_uverbs_qp_event_handler
;
856 attr
.qp_context
= file
;
860 attr
.sq_sig_type
= cmd
.sq_sig_all
? IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
861 attr
.qp_type
= cmd
.qp_type
;
863 attr
.cap
.max_send_wr
= cmd
.max_send_wr
;
864 attr
.cap
.max_recv_wr
= cmd
.max_recv_wr
;
865 attr
.cap
.max_send_sge
= cmd
.max_send_sge
;
866 attr
.cap
.max_recv_sge
= cmd
.max_recv_sge
;
867 attr
.cap
.max_inline_data
= cmd
.max_inline_data
;
869 uobj
->uobject
.user_handle
= cmd
.user_handle
;
870 uobj
->uobject
.context
= file
->ucontext
;
871 uobj
->events_reported
= 0;
872 INIT_LIST_HEAD(&uobj
->event_list
);
874 qp
= pd
->device
->create_qp(pd
, &attr
, &udata
);
880 qp
->device
= pd
->device
;
882 qp
->send_cq
= attr
.send_cq
;
883 qp
->recv_cq
= attr
.recv_cq
;
885 qp
->uobject
= &uobj
->uobject
;
886 qp
->event_handler
= attr
.event_handler
;
887 qp
->qp_context
= attr
.qp_context
;
888 qp
->qp_type
= attr
.qp_type
;
889 atomic_inc(&pd
->usecnt
);
890 atomic_inc(&attr
.send_cq
->usecnt
);
891 atomic_inc(&attr
.recv_cq
->usecnt
);
893 atomic_inc(&attr
.srq
->usecnt
);
895 memset(&resp
, 0, sizeof resp
);
896 resp
.qpn
= qp
->qp_num
;
899 if (!idr_pre_get(&ib_uverbs_qp_idr
, GFP_KERNEL
)) {
904 ret
= idr_get_new(&ib_uverbs_qp_idr
, qp
, &uobj
->uobject
.id
);
911 resp
.qp_handle
= uobj
->uobject
.id
;
912 resp
.max_recv_sge
= attr
.cap
.max_recv_sge
;
913 resp
.max_send_sge
= attr
.cap
.max_send_sge
;
914 resp
.max_recv_wr
= attr
.cap
.max_recv_wr
;
915 resp
.max_send_wr
= attr
.cap
.max_send_wr
;
916 resp
.max_inline_data
= attr
.cap
.max_inline_data
;
918 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
919 &resp
, sizeof resp
)) {
925 list_add_tail(&uobj
->uobject
.list
, &file
->ucontext
->qp_list
);
928 up(&ib_uverbs_idr_mutex
);
933 idr_remove(&ib_uverbs_qp_idr
, uobj
->uobject
.id
);
939 up(&ib_uverbs_idr_mutex
);
945 ssize_t
ib_uverbs_modify_qp(struct ib_uverbs_file
*file
,
946 const char __user
*buf
, int in_len
,
949 struct ib_uverbs_modify_qp cmd
;
951 struct ib_qp_attr
*attr
;
954 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
957 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
961 down(&ib_uverbs_idr_mutex
);
963 qp
= idr_find(&ib_uverbs_qp_idr
, cmd
.qp_handle
);
964 if (!qp
|| qp
->uobject
->context
!= file
->ucontext
) {
969 attr
->qp_state
= cmd
.qp_state
;
970 attr
->cur_qp_state
= cmd
.cur_qp_state
;
971 attr
->path_mtu
= cmd
.path_mtu
;
972 attr
->path_mig_state
= cmd
.path_mig_state
;
973 attr
->qkey
= cmd
.qkey
;
974 attr
->rq_psn
= cmd
.rq_psn
;
975 attr
->sq_psn
= cmd
.sq_psn
;
976 attr
->dest_qp_num
= cmd
.dest_qp_num
;
977 attr
->qp_access_flags
= cmd
.qp_access_flags
;
978 attr
->pkey_index
= cmd
.pkey_index
;
979 attr
->alt_pkey_index
= cmd
.pkey_index
;
980 attr
->en_sqd_async_notify
= cmd
.en_sqd_async_notify
;
981 attr
->max_rd_atomic
= cmd
.max_rd_atomic
;
982 attr
->max_dest_rd_atomic
= cmd
.max_dest_rd_atomic
;
983 attr
->min_rnr_timer
= cmd
.min_rnr_timer
;
984 attr
->port_num
= cmd
.port_num
;
985 attr
->timeout
= cmd
.timeout
;
986 attr
->retry_cnt
= cmd
.retry_cnt
;
987 attr
->rnr_retry
= cmd
.rnr_retry
;
988 attr
->alt_port_num
= cmd
.alt_port_num
;
989 attr
->alt_timeout
= cmd
.alt_timeout
;
991 memcpy(attr
->ah_attr
.grh
.dgid
.raw
, cmd
.dest
.dgid
, 16);
992 attr
->ah_attr
.grh
.flow_label
= cmd
.dest
.flow_label
;
993 attr
->ah_attr
.grh
.sgid_index
= cmd
.dest
.sgid_index
;
994 attr
->ah_attr
.grh
.hop_limit
= cmd
.dest
.hop_limit
;
995 attr
->ah_attr
.grh
.traffic_class
= cmd
.dest
.traffic_class
;
996 attr
->ah_attr
.dlid
= cmd
.dest
.dlid
;
997 attr
->ah_attr
.sl
= cmd
.dest
.sl
;
998 attr
->ah_attr
.src_path_bits
= cmd
.dest
.src_path_bits
;
999 attr
->ah_attr
.static_rate
= cmd
.dest
.static_rate
;
1000 attr
->ah_attr
.ah_flags
= cmd
.dest
.is_global
? IB_AH_GRH
: 0;
1001 attr
->ah_attr
.port_num
= cmd
.dest
.port_num
;
1003 memcpy(attr
->alt_ah_attr
.grh
.dgid
.raw
, cmd
.alt_dest
.dgid
, 16);
1004 attr
->alt_ah_attr
.grh
.flow_label
= cmd
.alt_dest
.flow_label
;
1005 attr
->alt_ah_attr
.grh
.sgid_index
= cmd
.alt_dest
.sgid_index
;
1006 attr
->alt_ah_attr
.grh
.hop_limit
= cmd
.alt_dest
.hop_limit
;
1007 attr
->alt_ah_attr
.grh
.traffic_class
= cmd
.alt_dest
.traffic_class
;
1008 attr
->alt_ah_attr
.dlid
= cmd
.alt_dest
.dlid
;
1009 attr
->alt_ah_attr
.sl
= cmd
.alt_dest
.sl
;
1010 attr
->alt_ah_attr
.src_path_bits
= cmd
.alt_dest
.src_path_bits
;
1011 attr
->alt_ah_attr
.static_rate
= cmd
.alt_dest
.static_rate
;
1012 attr
->alt_ah_attr
.ah_flags
= cmd
.alt_dest
.is_global
? IB_AH_GRH
: 0;
1013 attr
->alt_ah_attr
.port_num
= cmd
.alt_dest
.port_num
;
1015 ret
= ib_modify_qp(qp
, attr
, cmd
.attr_mask
);
1022 up(&ib_uverbs_idr_mutex
);
1028 ssize_t
ib_uverbs_destroy_qp(struct ib_uverbs_file
*file
,
1029 const char __user
*buf
, int in_len
,
1032 struct ib_uverbs_destroy_qp cmd
;
1033 struct ib_uverbs_destroy_qp_resp resp
;
1035 struct ib_uevent_object
*uobj
;
1038 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1041 memset(&resp
, 0, sizeof resp
);
1043 down(&ib_uverbs_idr_mutex
);
1045 qp
= idr_find(&ib_uverbs_qp_idr
, cmd
.qp_handle
);
1046 if (!qp
|| qp
->uobject
->context
!= file
->ucontext
)
1049 uobj
= container_of(qp
->uobject
, struct ib_uevent_object
, uobject
);
1051 ret
= ib_destroy_qp(qp
);
1055 idr_remove(&ib_uverbs_qp_idr
, cmd
.qp_handle
);
1058 list_del(&uobj
->uobject
.list
);
1061 ib_uverbs_release_uevent(file
, uobj
);
1063 resp
.events_reported
= uobj
->events_reported
;
1067 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1068 &resp
, sizeof resp
))
1072 up(&ib_uverbs_idr_mutex
);
1074 return ret
? ret
: in_len
;
1077 ssize_t
ib_uverbs_post_send(struct ib_uverbs_file
*file
,
1078 const char __user
*buf
, int in_len
,
1081 struct ib_uverbs_post_send cmd
;
1082 struct ib_uverbs_post_send_resp resp
;
1083 struct ib_uverbs_send_wr
*user_wr
;
1084 struct ib_send_wr
*wr
= NULL
, *last
, *next
, *bad_wr
;
1087 ssize_t ret
= -EINVAL
;
1089 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1092 if (in_len
< sizeof cmd
+ cmd
.wqe_size
* cmd
.wr_count
+
1093 cmd
.sge_count
* sizeof (struct ib_uverbs_sge
))
1096 if (cmd
.wqe_size
< sizeof (struct ib_uverbs_send_wr
))
1099 user_wr
= kmalloc(cmd
.wqe_size
, GFP_KERNEL
);
1103 down(&ib_uverbs_idr_mutex
);
1105 qp
= idr_find(&ib_uverbs_qp_idr
, cmd
.qp_handle
);
1106 if (!qp
|| qp
->uobject
->context
!= file
->ucontext
)
1111 for (i
= 0; i
< cmd
.wr_count
; ++i
) {
1112 if (copy_from_user(user_wr
,
1113 buf
+ sizeof cmd
+ i
* cmd
.wqe_size
,
1119 if (user_wr
->num_sge
+ sg_ind
> cmd
.sge_count
) {
1124 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
1125 user_wr
->num_sge
* sizeof (struct ib_sge
),
1139 next
->wr_id
= user_wr
->wr_id
;
1140 next
->num_sge
= user_wr
->num_sge
;
1141 next
->opcode
= user_wr
->opcode
;
1142 next
->send_flags
= user_wr
->send_flags
;
1143 next
->imm_data
= (__be32 __force
) user_wr
->imm_data
;
1145 if (qp
->qp_type
== IB_QPT_UD
) {
1146 next
->wr
.ud
.ah
= idr_find(&ib_uverbs_ah_idr
,
1148 if (!next
->wr
.ud
.ah
) {
1152 next
->wr
.ud
.remote_qpn
= user_wr
->wr
.ud
.remote_qpn
;
1153 next
->wr
.ud
.remote_qkey
= user_wr
->wr
.ud
.remote_qkey
;
1155 switch (next
->opcode
) {
1156 case IB_WR_RDMA_WRITE
:
1157 case IB_WR_RDMA_WRITE_WITH_IMM
:
1158 case IB_WR_RDMA_READ
:
1159 next
->wr
.rdma
.remote_addr
=
1160 user_wr
->wr
.rdma
.remote_addr
;
1161 next
->wr
.rdma
.rkey
=
1162 user_wr
->wr
.rdma
.rkey
;
1164 case IB_WR_ATOMIC_CMP_AND_SWP
:
1165 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1166 next
->wr
.atomic
.remote_addr
=
1167 user_wr
->wr
.atomic
.remote_addr
;
1168 next
->wr
.atomic
.compare_add
=
1169 user_wr
->wr
.atomic
.compare_add
;
1170 next
->wr
.atomic
.swap
= user_wr
->wr
.atomic
.swap
;
1171 next
->wr
.atomic
.rkey
= user_wr
->wr
.atomic
.rkey
;
1178 if (next
->num_sge
) {
1179 next
->sg_list
= (void *) next
+
1180 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
1181 if (copy_from_user(next
->sg_list
,
1183 cmd
.wr_count
* cmd
.wqe_size
+
1184 sg_ind
* sizeof (struct ib_sge
),
1185 next
->num_sge
* sizeof (struct ib_sge
))) {
1189 sg_ind
+= next
->num_sge
;
1191 next
->sg_list
= NULL
;
1195 ret
= qp
->device
->post_send(qp
, wr
, &bad_wr
);
1197 for (next
= wr
; next
; next
= next
->next
) {
1203 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1204 &resp
, sizeof resp
))
1208 up(&ib_uverbs_idr_mutex
);
1218 return ret
? ret
: in_len
;
1221 static struct ib_recv_wr
*ib_uverbs_unmarshall_recv(const char __user
*buf
,
1227 struct ib_uverbs_recv_wr
*user_wr
;
1228 struct ib_recv_wr
*wr
= NULL
, *last
, *next
;
1233 if (in_len
< wqe_size
* wr_count
+
1234 sge_count
* sizeof (struct ib_uverbs_sge
))
1235 return ERR_PTR(-EINVAL
);
1237 if (wqe_size
< sizeof (struct ib_uverbs_recv_wr
))
1238 return ERR_PTR(-EINVAL
);
1240 user_wr
= kmalloc(wqe_size
, GFP_KERNEL
);
1242 return ERR_PTR(-ENOMEM
);
1246 for (i
= 0; i
< wr_count
; ++i
) {
1247 if (copy_from_user(user_wr
, buf
+ i
* wqe_size
,
1253 if (user_wr
->num_sge
+ sg_ind
> sge_count
) {
1258 next
= kmalloc(ALIGN(sizeof *next
, sizeof (struct ib_sge
)) +
1259 user_wr
->num_sge
* sizeof (struct ib_sge
),
1273 next
->wr_id
= user_wr
->wr_id
;
1274 next
->num_sge
= user_wr
->num_sge
;
1276 if (next
->num_sge
) {
1277 next
->sg_list
= (void *) next
+
1278 ALIGN(sizeof *next
, sizeof (struct ib_sge
));
1279 if (copy_from_user(next
->sg_list
,
1280 buf
+ wr_count
* wqe_size
+
1281 sg_ind
* sizeof (struct ib_sge
),
1282 next
->num_sge
* sizeof (struct ib_sge
))) {
1286 sg_ind
+= next
->num_sge
;
1288 next
->sg_list
= NULL
;
1303 return ERR_PTR(ret
);
1306 ssize_t
ib_uverbs_post_recv(struct ib_uverbs_file
*file
,
1307 const char __user
*buf
, int in_len
,
1310 struct ib_uverbs_post_recv cmd
;
1311 struct ib_uverbs_post_recv_resp resp
;
1312 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
1314 ssize_t ret
= -EINVAL
;
1316 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1319 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
1320 in_len
- sizeof cmd
, cmd
.wr_count
,
1321 cmd
.sge_count
, cmd
.wqe_size
);
1325 down(&ib_uverbs_idr_mutex
);
1327 qp
= idr_find(&ib_uverbs_qp_idr
, cmd
.qp_handle
);
1328 if (!qp
|| qp
->uobject
->context
!= file
->ucontext
)
1332 ret
= qp
->device
->post_recv(qp
, wr
, &bad_wr
);
1334 for (next
= wr
; next
; next
= next
->next
) {
1341 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1342 &resp
, sizeof resp
))
1346 up(&ib_uverbs_idr_mutex
);
1354 return ret
? ret
: in_len
;
1357 ssize_t
ib_uverbs_post_srq_recv(struct ib_uverbs_file
*file
,
1358 const char __user
*buf
, int in_len
,
1361 struct ib_uverbs_post_srq_recv cmd
;
1362 struct ib_uverbs_post_srq_recv_resp resp
;
1363 struct ib_recv_wr
*wr
, *next
, *bad_wr
;
1365 ssize_t ret
= -EINVAL
;
1367 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1370 wr
= ib_uverbs_unmarshall_recv(buf
+ sizeof cmd
,
1371 in_len
- sizeof cmd
, cmd
.wr_count
,
1372 cmd
.sge_count
, cmd
.wqe_size
);
1376 down(&ib_uverbs_idr_mutex
);
1378 srq
= idr_find(&ib_uverbs_srq_idr
, cmd
.srq_handle
);
1379 if (!srq
|| srq
->uobject
->context
!= file
->ucontext
)
1383 ret
= srq
->device
->post_srq_recv(srq
, wr
, &bad_wr
);
1385 for (next
= wr
; next
; next
= next
->next
) {
1392 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1393 &resp
, sizeof resp
))
1397 up(&ib_uverbs_idr_mutex
);
1405 return ret
? ret
: in_len
;
1408 ssize_t
ib_uverbs_create_ah(struct ib_uverbs_file
*file
,
1409 const char __user
*buf
, int in_len
,
1412 struct ib_uverbs_create_ah cmd
;
1413 struct ib_uverbs_create_ah_resp resp
;
1414 struct ib_uobject
*uobj
;
1417 struct ib_ah_attr attr
;
1420 if (out_len
< sizeof resp
)
1423 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1426 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
1430 down(&ib_uverbs_idr_mutex
);
1432 pd
= idr_find(&ib_uverbs_pd_idr
, cmd
.pd_handle
);
1433 if (!pd
|| pd
->uobject
->context
!= file
->ucontext
) {
1438 uobj
->user_handle
= cmd
.user_handle
;
1439 uobj
->context
= file
->ucontext
;
1441 attr
.dlid
= cmd
.attr
.dlid
;
1442 attr
.sl
= cmd
.attr
.sl
;
1443 attr
.src_path_bits
= cmd
.attr
.src_path_bits
;
1444 attr
.static_rate
= cmd
.attr
.static_rate
;
1445 attr
.port_num
= cmd
.attr
.port_num
;
1446 attr
.grh
.flow_label
= cmd
.attr
.grh
.flow_label
;
1447 attr
.grh
.sgid_index
= cmd
.attr
.grh
.sgid_index
;
1448 attr
.grh
.hop_limit
= cmd
.attr
.grh
.hop_limit
;
1449 attr
.grh
.traffic_class
= cmd
.attr
.grh
.traffic_class
;
1450 memcpy(attr
.grh
.dgid
.raw
, cmd
.attr
.grh
.dgid
, 16);
1452 ah
= ib_create_ah(pd
, &attr
);
1461 if (!idr_pre_get(&ib_uverbs_ah_idr
, GFP_KERNEL
)) {
1466 ret
= idr_get_new(&ib_uverbs_ah_idr
, ah
, &uobj
->id
);
1473 resp
.ah_handle
= uobj
->id
;
1475 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1476 &resp
, sizeof resp
)) {
1482 list_add_tail(&uobj
->list
, &file
->ucontext
->ah_list
);
1485 up(&ib_uverbs_idr_mutex
);
1490 idr_remove(&ib_uverbs_ah_idr
, uobj
->id
);
1496 up(&ib_uverbs_idr_mutex
);
1502 ssize_t
ib_uverbs_destroy_ah(struct ib_uverbs_file
*file
,
1503 const char __user
*buf
, int in_len
, int out_len
)
1505 struct ib_uverbs_destroy_ah cmd
;
1507 struct ib_uobject
*uobj
;
1510 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1513 down(&ib_uverbs_idr_mutex
);
1515 ah
= idr_find(&ib_uverbs_ah_idr
, cmd
.ah_handle
);
1516 if (!ah
|| ah
->uobject
->context
!= file
->ucontext
)
1521 ret
= ib_destroy_ah(ah
);
1525 idr_remove(&ib_uverbs_ah_idr
, cmd
.ah_handle
);
1528 list_del(&uobj
->list
);
1534 up(&ib_uverbs_idr_mutex
);
1536 return ret
? ret
: in_len
;
1539 ssize_t
ib_uverbs_attach_mcast(struct ib_uverbs_file
*file
,
1540 const char __user
*buf
, int in_len
,
1543 struct ib_uverbs_attach_mcast cmd
;
1547 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1550 down(&ib_uverbs_idr_mutex
);
1552 qp
= idr_find(&ib_uverbs_qp_idr
, cmd
.qp_handle
);
1553 if (qp
&& qp
->uobject
->context
== file
->ucontext
)
1554 ret
= ib_attach_mcast(qp
, (union ib_gid
*) cmd
.gid
, cmd
.mlid
);
1556 up(&ib_uverbs_idr_mutex
);
1558 return ret
? ret
: in_len
;
1561 ssize_t
ib_uverbs_detach_mcast(struct ib_uverbs_file
*file
,
1562 const char __user
*buf
, int in_len
,
1565 struct ib_uverbs_detach_mcast cmd
;
1569 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1572 down(&ib_uverbs_idr_mutex
);
1574 qp
= idr_find(&ib_uverbs_qp_idr
, cmd
.qp_handle
);
1575 if (qp
&& qp
->uobject
->context
== file
->ucontext
)
1576 ret
= ib_detach_mcast(qp
, (union ib_gid
*) cmd
.gid
, cmd
.mlid
);
1578 up(&ib_uverbs_idr_mutex
);
1580 return ret
? ret
: in_len
;
1583 ssize_t
ib_uverbs_create_srq(struct ib_uverbs_file
*file
,
1584 const char __user
*buf
, int in_len
,
1587 struct ib_uverbs_create_srq cmd
;
1588 struct ib_uverbs_create_srq_resp resp
;
1589 struct ib_udata udata
;
1590 struct ib_uevent_object
*uobj
;
1593 struct ib_srq_init_attr attr
;
1596 if (out_len
< sizeof resp
)
1599 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1602 INIT_UDATA(&udata
, buf
+ sizeof cmd
,
1603 (unsigned long) cmd
.response
+ sizeof resp
,
1604 in_len
- sizeof cmd
, out_len
- sizeof resp
);
1606 uobj
= kmalloc(sizeof *uobj
, GFP_KERNEL
);
1610 down(&ib_uverbs_idr_mutex
);
1612 pd
= idr_find(&ib_uverbs_pd_idr
, cmd
.pd_handle
);
1614 if (!pd
|| pd
->uobject
->context
!= file
->ucontext
) {
1619 attr
.event_handler
= ib_uverbs_srq_event_handler
;
1620 attr
.srq_context
= file
;
1621 attr
.attr
.max_wr
= cmd
.max_wr
;
1622 attr
.attr
.max_sge
= cmd
.max_sge
;
1623 attr
.attr
.srq_limit
= cmd
.srq_limit
;
1625 uobj
->uobject
.user_handle
= cmd
.user_handle
;
1626 uobj
->uobject
.context
= file
->ucontext
;
1627 uobj
->events_reported
= 0;
1628 INIT_LIST_HEAD(&uobj
->event_list
);
1630 srq
= pd
->device
->create_srq(pd
, &attr
, &udata
);
1636 srq
->device
= pd
->device
;
1638 srq
->uobject
= &uobj
->uobject
;
1639 srq
->event_handler
= attr
.event_handler
;
1640 srq
->srq_context
= attr
.srq_context
;
1641 atomic_inc(&pd
->usecnt
);
1642 atomic_set(&srq
->usecnt
, 0);
1644 memset(&resp
, 0, sizeof resp
);
1647 if (!idr_pre_get(&ib_uverbs_srq_idr
, GFP_KERNEL
)) {
1652 ret
= idr_get_new(&ib_uverbs_srq_idr
, srq
, &uobj
->uobject
.id
);
1659 resp
.srq_handle
= uobj
->uobject
.id
;
1661 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1662 &resp
, sizeof resp
)) {
1668 list_add_tail(&uobj
->uobject
.list
, &file
->ucontext
->srq_list
);
1671 up(&ib_uverbs_idr_mutex
);
1676 idr_remove(&ib_uverbs_srq_idr
, uobj
->uobject
.id
);
1679 ib_destroy_srq(srq
);
1682 up(&ib_uverbs_idr_mutex
);
1688 ssize_t
ib_uverbs_modify_srq(struct ib_uverbs_file
*file
,
1689 const char __user
*buf
, int in_len
,
1692 struct ib_uverbs_modify_srq cmd
;
1694 struct ib_srq_attr attr
;
1697 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1700 down(&ib_uverbs_idr_mutex
);
1702 srq
= idr_find(&ib_uverbs_srq_idr
, cmd
.srq_handle
);
1703 if (!srq
|| srq
->uobject
->context
!= file
->ucontext
) {
1708 attr
.max_wr
= cmd
.max_wr
;
1709 attr
.srq_limit
= cmd
.srq_limit
;
1711 ret
= ib_modify_srq(srq
, &attr
, cmd
.attr_mask
);
1714 up(&ib_uverbs_idr_mutex
);
1716 return ret
? ret
: in_len
;
1719 ssize_t
ib_uverbs_destroy_srq(struct ib_uverbs_file
*file
,
1720 const char __user
*buf
, int in_len
,
1723 struct ib_uverbs_destroy_srq cmd
;
1724 struct ib_uverbs_destroy_srq_resp resp
;
1726 struct ib_uevent_object
*uobj
;
1729 if (copy_from_user(&cmd
, buf
, sizeof cmd
))
1732 down(&ib_uverbs_idr_mutex
);
1734 memset(&resp
, 0, sizeof resp
);
1736 srq
= idr_find(&ib_uverbs_srq_idr
, cmd
.srq_handle
);
1737 if (!srq
|| srq
->uobject
->context
!= file
->ucontext
)
1740 uobj
= container_of(srq
->uobject
, struct ib_uevent_object
, uobject
);
1742 ret
= ib_destroy_srq(srq
);
1746 idr_remove(&ib_uverbs_srq_idr
, cmd
.srq_handle
);
1749 list_del(&uobj
->uobject
.list
);
1752 ib_uverbs_release_uevent(file
, uobj
);
1754 resp
.events_reported
= uobj
->events_reported
;
1758 if (copy_to_user((void __user
*) (unsigned long) cmd
.response
,
1759 &resp
, sizeof resp
))
1763 up(&ib_uverbs_idr_mutex
);
1765 return ret
? ret
: in_len
;