2 * Copyright (c) 2016-2017 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
55 int pvrdma_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
56 struct ib_recv_wr
**bad_wr
)
58 /* No support for kernel clients. */
63 * pvrdma_query_srq - query shared receive queue
64 * @ibsrq: the shared receive queue to query
65 * @srq_attr: attributes to query and return to client
67 * @return: 0 for success, otherwise returns an errno.
69 int pvrdma_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
)
71 struct pvrdma_dev
*dev
= to_vdev(ibsrq
->device
);
72 struct pvrdma_srq
*srq
= to_vsrq(ibsrq
);
73 union pvrdma_cmd_req req
;
74 union pvrdma_cmd_resp rsp
;
75 struct pvrdma_cmd_query_srq
*cmd
= &req
.query_srq
;
76 struct pvrdma_cmd_query_srq_resp
*resp
= &rsp
.query_srq_resp
;
79 memset(cmd
, 0, sizeof(*cmd
));
80 cmd
->hdr
.cmd
= PVRDMA_CMD_QUERY_SRQ
;
81 cmd
->srq_handle
= srq
->srq_handle
;
83 ret
= pvrdma_cmd_post(dev
, &req
, &rsp
, PVRDMA_CMD_QUERY_SRQ_RESP
);
85 dev_warn(&dev
->pdev
->dev
,
86 "could not query shared receive queue, error: %d\n",
91 srq_attr
->srq_limit
= resp
->attrs
.srq_limit
;
92 srq_attr
->max_wr
= resp
->attrs
.max_wr
;
93 srq_attr
->max_sge
= resp
->attrs
.max_sge
;
99 * pvrdma_create_srq - create shared receive queue
100 * @pd: protection domain
101 * @init_attr: shared receive queue attributes
104 * @return: the ib_srq pointer on success, otherwise returns an errno.
106 struct ib_srq
*pvrdma_create_srq(struct ib_pd
*pd
,
107 struct ib_srq_init_attr
*init_attr
,
108 struct ib_udata
*udata
)
110 struct pvrdma_srq
*srq
= NULL
;
111 struct pvrdma_dev
*dev
= to_vdev(pd
->device
);
112 union pvrdma_cmd_req req
;
113 union pvrdma_cmd_resp rsp
;
114 struct pvrdma_cmd_create_srq
*cmd
= &req
.create_srq
;
115 struct pvrdma_cmd_create_srq_resp
*resp
= &rsp
.create_srq_resp
;
116 struct pvrdma_create_srq_resp srq_resp
= {0};
117 struct pvrdma_create_srq ucmd
;
121 if (!(pd
->uobject
&& udata
)) {
122 /* No support for kernel clients. */
123 dev_warn(&dev
->pdev
->dev
,
124 "no shared receive queue support for kernel client\n");
125 return ERR_PTR(-EOPNOTSUPP
);
128 if (init_attr
->srq_type
!= IB_SRQT_BASIC
) {
129 dev_warn(&dev
->pdev
->dev
,
130 "shared receive queue type %d not supported\n",
131 init_attr
->srq_type
);
132 return ERR_PTR(-EINVAL
);
135 if (init_attr
->attr
.max_wr
> dev
->dsr
->caps
.max_srq_wr
||
136 init_attr
->attr
.max_sge
> dev
->dsr
->caps
.max_srq_sge
) {
137 dev_warn(&dev
->pdev
->dev
,
138 "shared receive queue size invalid\n");
139 return ERR_PTR(-EINVAL
);
142 if (!atomic_add_unless(&dev
->num_srqs
, 1, dev
->dsr
->caps
.max_srq
))
143 return ERR_PTR(-ENOMEM
);
145 srq
= kmalloc(sizeof(*srq
), GFP_KERNEL
);
151 spin_lock_init(&srq
->lock
);
152 refcount_set(&srq
->refcnt
, 1);
153 init_completion(&srq
->free
);
155 dev_dbg(&dev
->pdev
->dev
,
156 "create shared receive queue from user space\n");
158 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
163 srq
->umem
= ib_umem_get(pd
->uobject
->context
,
165 ucmd
.buf_size
, 0, 0);
166 if (IS_ERR(srq
->umem
)) {
167 ret
= PTR_ERR(srq
->umem
);
171 srq
->npages
= ib_umem_page_count(srq
->umem
);
173 if (srq
->npages
< 0 || srq
->npages
> PVRDMA_PAGE_DIR_MAX_PAGES
) {
174 dev_warn(&dev
->pdev
->dev
,
175 "overflow pages in shared receive queue\n");
180 ret
= pvrdma_page_dir_init(dev
, &srq
->pdir
, srq
->npages
, false);
182 dev_warn(&dev
->pdev
->dev
,
183 "could not allocate page directory\n");
187 pvrdma_page_dir_insert_umem(&srq
->pdir
, srq
->umem
, 0);
189 memset(cmd
, 0, sizeof(*cmd
));
190 cmd
->hdr
.cmd
= PVRDMA_CMD_CREATE_SRQ
;
191 cmd
->srq_type
= init_attr
->srq_type
;
192 cmd
->nchunks
= srq
->npages
;
193 cmd
->pd_handle
= to_vpd(pd
)->pd_handle
;
194 cmd
->attrs
.max_wr
= init_attr
->attr
.max_wr
;
195 cmd
->attrs
.max_sge
= init_attr
->attr
.max_sge
;
196 cmd
->attrs
.srq_limit
= init_attr
->attr
.srq_limit
;
197 cmd
->pdir_dma
= srq
->pdir
.dir_dma
;
199 ret
= pvrdma_cmd_post(dev
, &req
, &rsp
, PVRDMA_CMD_CREATE_SRQ_RESP
);
201 dev_warn(&dev
->pdev
->dev
,
202 "could not create shared receive queue, error: %d\n",
207 srq
->srq_handle
= resp
->srqn
;
208 srq_resp
.srqn
= resp
->srqn
;
209 spin_lock_irqsave(&dev
->srq_tbl_lock
, flags
);
210 dev
->srq_tbl
[srq
->srq_handle
% dev
->dsr
->caps
.max_srq
] = srq
;
211 spin_unlock_irqrestore(&dev
->srq_tbl_lock
, flags
);
213 /* Copy udata back. */
214 if (ib_copy_to_udata(udata
, &srq_resp
, sizeof(srq_resp
))) {
215 dev_warn(&dev
->pdev
->dev
, "failed to copy back udata\n");
216 pvrdma_destroy_srq(&srq
->ibsrq
);
217 return ERR_PTR(-EINVAL
);
223 pvrdma_page_dir_cleanup(dev
, &srq
->pdir
);
225 ib_umem_release(srq
->umem
);
228 atomic_dec(&dev
->num_srqs
);
233 static void pvrdma_free_srq(struct pvrdma_dev
*dev
, struct pvrdma_srq
*srq
)
237 spin_lock_irqsave(&dev
->srq_tbl_lock
, flags
);
238 dev
->srq_tbl
[srq
->srq_handle
] = NULL
;
239 spin_unlock_irqrestore(&dev
->srq_tbl_lock
, flags
);
241 if (refcount_dec_and_test(&srq
->refcnt
))
242 complete(&srq
->free
);
243 wait_for_completion(&srq
->free
);
245 /* There is no support for kernel clients, so this is safe. */
246 ib_umem_release(srq
->umem
);
248 pvrdma_page_dir_cleanup(dev
, &srq
->pdir
);
252 atomic_dec(&dev
->num_srqs
);
256 * pvrdma_destroy_srq - destroy shared receive queue
257 * @srq: the shared receive queue to destroy
259 * @return: 0 for success.
261 int pvrdma_destroy_srq(struct ib_srq
*srq
)
263 struct pvrdma_srq
*vsrq
= to_vsrq(srq
);
264 union pvrdma_cmd_req req
;
265 struct pvrdma_cmd_destroy_srq
*cmd
= &req
.destroy_srq
;
266 struct pvrdma_dev
*dev
= to_vdev(srq
->device
);
269 memset(cmd
, 0, sizeof(*cmd
));
270 cmd
->hdr
.cmd
= PVRDMA_CMD_DESTROY_SRQ
;
271 cmd
->srq_handle
= vsrq
->srq_handle
;
273 ret
= pvrdma_cmd_post(dev
, &req
, NULL
, 0);
275 dev_warn(&dev
->pdev
->dev
,
276 "destroy shared receive queue failed, error: %d\n",
279 pvrdma_free_srq(dev
, vsrq
);
285 * pvrdma_modify_srq - modify shared receive queue attributes
286 * @ibsrq: the shared receive queue to modify
287 * @attr: the shared receive queue's new attributes
288 * @attr_mask: attributes mask
291 * @returns 0 on success, otherwise returns an errno.
293 int pvrdma_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
294 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
)
296 struct pvrdma_srq
*vsrq
= to_vsrq(ibsrq
);
297 union pvrdma_cmd_req req
;
298 struct pvrdma_cmd_modify_srq
*cmd
= &req
.modify_srq
;
299 struct pvrdma_dev
*dev
= to_vdev(ibsrq
->device
);
302 /* Only support SRQ limit. */
303 if (!(attr_mask
& IB_SRQ_LIMIT
))
306 memset(cmd
, 0, sizeof(*cmd
));
307 cmd
->hdr
.cmd
= PVRDMA_CMD_MODIFY_SRQ
;
308 cmd
->srq_handle
= vsrq
->srq_handle
;
309 cmd
->attrs
.srq_limit
= attr
->srq_limit
;
310 cmd
->attr_mask
= attr_mask
;
312 ret
= pvrdma_cmd_post(dev
, &req
, NULL
, 0);
314 dev_warn(&dev
->pdev
->dev
,
315 "could not modify shared receive queue, error: %d\n",