2 * Copyright (c) 2016-2017 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
55 int pvrdma_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
56 struct ib_recv_wr
**bad_wr
)
58 /* No support for kernel clients. */
63 * pvrdma_query_srq - query shared receive queue
64 * @ibsrq: the shared receive queue to query
65 * @srq_attr: attributes to query and return to client
67 * @return: 0 for success, otherwise returns an errno.
69 int pvrdma_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
)
71 struct pvrdma_dev
*dev
= to_vdev(ibsrq
->device
);
72 struct pvrdma_srq
*srq
= to_vsrq(ibsrq
);
73 union pvrdma_cmd_req req
;
74 union pvrdma_cmd_resp rsp
;
75 struct pvrdma_cmd_query_srq
*cmd
= &req
.query_srq
;
76 struct pvrdma_cmd_query_srq_resp
*resp
= &rsp
.query_srq_resp
;
79 memset(cmd
, 0, sizeof(*cmd
));
80 cmd
->hdr
.cmd
= PVRDMA_CMD_QUERY_SRQ
;
81 cmd
->srq_handle
= srq
->srq_handle
;
83 ret
= pvrdma_cmd_post(dev
, &req
, &rsp
, PVRDMA_CMD_QUERY_SRQ_RESP
);
85 dev_warn(&dev
->pdev
->dev
,
86 "could not query shared receive queue, error: %d\n",
91 srq_attr
->srq_limit
= resp
->attrs
.srq_limit
;
92 srq_attr
->max_wr
= resp
->attrs
.max_wr
;
93 srq_attr
->max_sge
= resp
->attrs
.max_sge
;
99 * pvrdma_create_srq - create shared receive queue
100 * @pd: protection domain
101 * @init_attr: shared receive queue attributes
104 * @return: the ib_srq pointer on success, otherwise returns an errno.
106 struct ib_srq
*pvrdma_create_srq(struct ib_pd
*pd
,
107 struct ib_srq_init_attr
*init_attr
,
108 struct ib_udata
*udata
)
110 struct pvrdma_srq
*srq
= NULL
;
111 struct pvrdma_dev
*dev
= to_vdev(pd
->device
);
112 union pvrdma_cmd_req req
;
113 union pvrdma_cmd_resp rsp
;
114 struct pvrdma_cmd_create_srq
*cmd
= &req
.create_srq
;
115 struct pvrdma_cmd_create_srq_resp
*resp
= &rsp
.create_srq_resp
;
116 struct pvrdma_create_srq ucmd
;
120 if (!(pd
->uobject
&& udata
)) {
121 /* No support for kernel clients. */
122 dev_warn(&dev
->pdev
->dev
,
123 "no shared receive queue support for kernel client\n");
124 return ERR_PTR(-EOPNOTSUPP
);
127 if (init_attr
->srq_type
!= IB_SRQT_BASIC
) {
128 dev_warn(&dev
->pdev
->dev
,
129 "shared receive queue type %d not supported\n",
130 init_attr
->srq_type
);
131 return ERR_PTR(-EINVAL
);
134 if (init_attr
->attr
.max_wr
> dev
->dsr
->caps
.max_srq_wr
||
135 init_attr
->attr
.max_sge
> dev
->dsr
->caps
.max_srq_sge
) {
136 dev_warn(&dev
->pdev
->dev
,
137 "shared receive queue size invalid\n");
138 return ERR_PTR(-EINVAL
);
141 if (!atomic_add_unless(&dev
->num_srqs
, 1, dev
->dsr
->caps
.max_srq
))
142 return ERR_PTR(-ENOMEM
);
144 srq
= kmalloc(sizeof(*srq
), GFP_KERNEL
);
150 spin_lock_init(&srq
->lock
);
151 refcount_set(&srq
->refcnt
, 1);
152 init_completion(&srq
->free
);
154 dev_dbg(&dev
->pdev
->dev
,
155 "create shared receive queue from user space\n");
157 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
162 srq
->umem
= ib_umem_get(pd
->uobject
->context
,
164 ucmd
.buf_size
, 0, 0);
165 if (IS_ERR(srq
->umem
)) {
166 ret
= PTR_ERR(srq
->umem
);
170 srq
->npages
= ib_umem_page_count(srq
->umem
);
172 if (srq
->npages
< 0 || srq
->npages
> PVRDMA_PAGE_DIR_MAX_PAGES
) {
173 dev_warn(&dev
->pdev
->dev
,
174 "overflow pages in shared receive queue\n");
179 ret
= pvrdma_page_dir_init(dev
, &srq
->pdir
, srq
->npages
, false);
181 dev_warn(&dev
->pdev
->dev
,
182 "could not allocate page directory\n");
186 pvrdma_page_dir_insert_umem(&srq
->pdir
, srq
->umem
, 0);
188 memset(cmd
, 0, sizeof(*cmd
));
189 cmd
->hdr
.cmd
= PVRDMA_CMD_CREATE_SRQ
;
190 cmd
->srq_type
= init_attr
->srq_type
;
191 cmd
->nchunks
= srq
->npages
;
192 cmd
->pd_handle
= to_vpd(pd
)->pd_handle
;
193 cmd
->attrs
.max_wr
= init_attr
->attr
.max_wr
;
194 cmd
->attrs
.max_sge
= init_attr
->attr
.max_sge
;
195 cmd
->attrs
.srq_limit
= init_attr
->attr
.srq_limit
;
196 cmd
->pdir_dma
= srq
->pdir
.dir_dma
;
198 ret
= pvrdma_cmd_post(dev
, &req
, &rsp
, PVRDMA_CMD_CREATE_SRQ_RESP
);
200 dev_warn(&dev
->pdev
->dev
,
201 "could not create shared receive queue, error: %d\n",
206 srq
->srq_handle
= resp
->srqn
;
207 spin_lock_irqsave(&dev
->srq_tbl_lock
, flags
);
208 dev
->srq_tbl
[srq
->srq_handle
% dev
->dsr
->caps
.max_srq
] = srq
;
209 spin_unlock_irqrestore(&dev
->srq_tbl_lock
, flags
);
211 /* Copy udata back. */
212 if (ib_copy_to_udata(udata
, &srq
->srq_handle
, sizeof(__u32
))) {
213 dev_warn(&dev
->pdev
->dev
, "failed to copy back udata\n");
214 pvrdma_destroy_srq(&srq
->ibsrq
);
215 return ERR_PTR(-EINVAL
);
221 pvrdma_page_dir_cleanup(dev
, &srq
->pdir
);
223 ib_umem_release(srq
->umem
);
226 atomic_dec(&dev
->num_srqs
);
231 static void pvrdma_free_srq(struct pvrdma_dev
*dev
, struct pvrdma_srq
*srq
)
235 spin_lock_irqsave(&dev
->srq_tbl_lock
, flags
);
236 dev
->srq_tbl
[srq
->srq_handle
] = NULL
;
237 spin_unlock_irqrestore(&dev
->srq_tbl_lock
, flags
);
239 if (refcount_dec_and_test(&srq
->refcnt
))
240 complete(&srq
->free
);
241 wait_for_completion(&srq
->free
);
243 /* There is no support for kernel clients, so this is safe. */
244 ib_umem_release(srq
->umem
);
246 pvrdma_page_dir_cleanup(dev
, &srq
->pdir
);
250 atomic_dec(&dev
->num_srqs
);
254 * pvrdma_destroy_srq - destroy shared receive queue
255 * @srq: the shared receive queue to destroy
257 * @return: 0 for success.
259 int pvrdma_destroy_srq(struct ib_srq
*srq
)
261 struct pvrdma_srq
*vsrq
= to_vsrq(srq
);
262 union pvrdma_cmd_req req
;
263 struct pvrdma_cmd_destroy_srq
*cmd
= &req
.destroy_srq
;
264 struct pvrdma_dev
*dev
= to_vdev(srq
->device
);
267 memset(cmd
, 0, sizeof(*cmd
));
268 cmd
->hdr
.cmd
= PVRDMA_CMD_DESTROY_SRQ
;
269 cmd
->srq_handle
= vsrq
->srq_handle
;
271 ret
= pvrdma_cmd_post(dev
, &req
, NULL
, 0);
273 dev_warn(&dev
->pdev
->dev
,
274 "destroy shared receive queue failed, error: %d\n",
277 pvrdma_free_srq(dev
, vsrq
);
283 * pvrdma_modify_srq - modify shared receive queue attributes
284 * @ibsrq: the shared receive queue to modify
285 * @attr: the shared receive queue's new attributes
286 * @attr_mask: attributes mask
289 * @returns 0 on success, otherwise returns an errno.
291 int pvrdma_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
292 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
)
294 struct pvrdma_srq
*vsrq
= to_vsrq(ibsrq
);
295 union pvrdma_cmd_req req
;
296 struct pvrdma_cmd_modify_srq
*cmd
= &req
.modify_srq
;
297 struct pvrdma_dev
*dev
= to_vdev(ibsrq
->device
);
300 /* Only support SRQ limit. */
301 if (!(attr_mask
& IB_SRQ_LIMIT
))
304 memset(cmd
, 0, sizeof(*cmd
));
305 cmd
->hdr
.cmd
= PVRDMA_CMD_MODIFY_SRQ
;
306 cmd
->srq_handle
= vsrq
->srq_handle
;
307 cmd
->attrs
.srq_limit
= attr
->srq_limit
;
308 cmd
->attr_mask
= attr_mask
;
310 ret
= pvrdma_cmd_post(dev
, &req
, NULL
, 0);
312 dev_warn(&dev
->pdev
->dev
,
313 "could not modify shared receive queue, error: %d\n",