2 * Copyright (c) 2016-2017 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
56 * pvrdma_query_srq - query shared receive queue
57 * @ibsrq: the shared receive queue to query
58 * @srq_attr: attributes to query and return to client
60 * @return: 0 for success, otherwise returns an errno.
62 int pvrdma_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
)
64 struct pvrdma_dev
*dev
= to_vdev(ibsrq
->device
);
65 struct pvrdma_srq
*srq
= to_vsrq(ibsrq
);
66 union pvrdma_cmd_req req
;
67 union pvrdma_cmd_resp rsp
;
68 struct pvrdma_cmd_query_srq
*cmd
= &req
.query_srq
;
69 struct pvrdma_cmd_query_srq_resp
*resp
= &rsp
.query_srq_resp
;
72 memset(cmd
, 0, sizeof(*cmd
));
73 cmd
->hdr
.cmd
= PVRDMA_CMD_QUERY_SRQ
;
74 cmd
->srq_handle
= srq
->srq_handle
;
76 ret
= pvrdma_cmd_post(dev
, &req
, &rsp
, PVRDMA_CMD_QUERY_SRQ_RESP
);
78 dev_warn(&dev
->pdev
->dev
,
79 "could not query shared receive queue, error: %d\n",
84 srq_attr
->srq_limit
= resp
->attrs
.srq_limit
;
85 srq_attr
->max_wr
= resp
->attrs
.max_wr
;
86 srq_attr
->max_sge
= resp
->attrs
.max_sge
;
92 * pvrdma_create_srq - create shared receive queue
93 * @pd: protection domain
94 * @init_attr: shared receive queue attributes
97 * @return: 0 on success, otherwise returns an errno.
99 int pvrdma_create_srq(struct ib_srq
*ibsrq
, struct ib_srq_init_attr
*init_attr
,
100 struct ib_udata
*udata
)
102 struct pvrdma_srq
*srq
= to_vsrq(ibsrq
);
103 struct pvrdma_dev
*dev
= to_vdev(ibsrq
->device
);
104 union pvrdma_cmd_req req
;
105 union pvrdma_cmd_resp rsp
;
106 struct pvrdma_cmd_create_srq
*cmd
= &req
.create_srq
;
107 struct pvrdma_cmd_create_srq_resp
*resp
= &rsp
.create_srq_resp
;
108 struct pvrdma_create_srq_resp srq_resp
= {};
109 struct pvrdma_create_srq ucmd
;
114 /* No support for kernel clients. */
115 dev_warn(&dev
->pdev
->dev
,
116 "no shared receive queue support for kernel client\n");
120 if (init_attr
->srq_type
!= IB_SRQT_BASIC
) {
121 dev_warn(&dev
->pdev
->dev
,
122 "shared receive queue type %d not supported\n",
123 init_attr
->srq_type
);
127 if (init_attr
->attr
.max_wr
> dev
->dsr
->caps
.max_srq_wr
||
128 init_attr
->attr
.max_sge
> dev
->dsr
->caps
.max_srq_sge
) {
129 dev_warn(&dev
->pdev
->dev
,
130 "shared receive queue size invalid\n");
134 if (!atomic_add_unless(&dev
->num_srqs
, 1, dev
->dsr
->caps
.max_srq
))
137 spin_lock_init(&srq
->lock
);
138 refcount_set(&srq
->refcnt
, 1);
139 init_completion(&srq
->free
);
141 dev_dbg(&dev
->pdev
->dev
,
142 "create shared receive queue from user space\n");
144 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
149 srq
->umem
= ib_umem_get(ibsrq
->device
, ucmd
.buf_addr
, ucmd
.buf_size
, 0);
150 if (IS_ERR(srq
->umem
)) {
151 ret
= PTR_ERR(srq
->umem
);
155 srq
->npages
= ib_umem_page_count(srq
->umem
);
157 if (srq
->npages
< 0 || srq
->npages
> PVRDMA_PAGE_DIR_MAX_PAGES
) {
158 dev_warn(&dev
->pdev
->dev
,
159 "overflow pages in shared receive queue\n");
164 ret
= pvrdma_page_dir_init(dev
, &srq
->pdir
, srq
->npages
, false);
166 dev_warn(&dev
->pdev
->dev
,
167 "could not allocate page directory\n");
171 pvrdma_page_dir_insert_umem(&srq
->pdir
, srq
->umem
, 0);
173 memset(cmd
, 0, sizeof(*cmd
));
174 cmd
->hdr
.cmd
= PVRDMA_CMD_CREATE_SRQ
;
175 cmd
->srq_type
= init_attr
->srq_type
;
176 cmd
->nchunks
= srq
->npages
;
177 cmd
->pd_handle
= to_vpd(ibsrq
->pd
)->pd_handle
;
178 cmd
->attrs
.max_wr
= init_attr
->attr
.max_wr
;
179 cmd
->attrs
.max_sge
= init_attr
->attr
.max_sge
;
180 cmd
->attrs
.srq_limit
= init_attr
->attr
.srq_limit
;
181 cmd
->pdir_dma
= srq
->pdir
.dir_dma
;
183 ret
= pvrdma_cmd_post(dev
, &req
, &rsp
, PVRDMA_CMD_CREATE_SRQ_RESP
);
185 dev_warn(&dev
->pdev
->dev
,
186 "could not create shared receive queue, error: %d\n",
191 srq
->srq_handle
= resp
->srqn
;
192 srq_resp
.srqn
= resp
->srqn
;
193 spin_lock_irqsave(&dev
->srq_tbl_lock
, flags
);
194 dev
->srq_tbl
[srq
->srq_handle
% dev
->dsr
->caps
.max_srq
] = srq
;
195 spin_unlock_irqrestore(&dev
->srq_tbl_lock
, flags
);
197 /* Copy udata back. */
198 if (ib_copy_to_udata(udata
, &srq_resp
, sizeof(srq_resp
))) {
199 dev_warn(&dev
->pdev
->dev
, "failed to copy back udata\n");
200 pvrdma_destroy_srq(&srq
->ibsrq
, udata
);
207 pvrdma_page_dir_cleanup(dev
, &srq
->pdir
);
209 ib_umem_release(srq
->umem
);
211 atomic_dec(&dev
->num_srqs
);
216 static void pvrdma_free_srq(struct pvrdma_dev
*dev
, struct pvrdma_srq
*srq
)
220 spin_lock_irqsave(&dev
->srq_tbl_lock
, flags
);
221 dev
->srq_tbl
[srq
->srq_handle
] = NULL
;
222 spin_unlock_irqrestore(&dev
->srq_tbl_lock
, flags
);
224 if (refcount_dec_and_test(&srq
->refcnt
))
225 complete(&srq
->free
);
226 wait_for_completion(&srq
->free
);
228 /* There is no support for kernel clients, so this is safe. */
229 ib_umem_release(srq
->umem
);
231 pvrdma_page_dir_cleanup(dev
, &srq
->pdir
);
233 atomic_dec(&dev
->num_srqs
);
237 * pvrdma_destroy_srq - destroy shared receive queue
238 * @srq: the shared receive queue to destroy
239 * @udata: user data or null for kernel object
241 * @return: 0 for success.
243 void pvrdma_destroy_srq(struct ib_srq
*srq
, struct ib_udata
*udata
)
245 struct pvrdma_srq
*vsrq
= to_vsrq(srq
);
246 union pvrdma_cmd_req req
;
247 struct pvrdma_cmd_destroy_srq
*cmd
= &req
.destroy_srq
;
248 struct pvrdma_dev
*dev
= to_vdev(srq
->device
);
251 memset(cmd
, 0, sizeof(*cmd
));
252 cmd
->hdr
.cmd
= PVRDMA_CMD_DESTROY_SRQ
;
253 cmd
->srq_handle
= vsrq
->srq_handle
;
255 ret
= pvrdma_cmd_post(dev
, &req
, NULL
, 0);
257 dev_warn(&dev
->pdev
->dev
,
258 "destroy shared receive queue failed, error: %d\n",
261 pvrdma_free_srq(dev
, vsrq
);
265 * pvrdma_modify_srq - modify shared receive queue attributes
266 * @ibsrq: the shared receive queue to modify
267 * @attr: the shared receive queue's new attributes
268 * @attr_mask: attributes mask
271 * @returns 0 on success, otherwise returns an errno.
273 int pvrdma_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
274 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
)
276 struct pvrdma_srq
*vsrq
= to_vsrq(ibsrq
);
277 union pvrdma_cmd_req req
;
278 struct pvrdma_cmd_modify_srq
*cmd
= &req
.modify_srq
;
279 struct pvrdma_dev
*dev
= to_vdev(ibsrq
->device
);
282 /* Only support SRQ limit. */
283 if (!(attr_mask
& IB_SRQ_LIMIT
))
286 memset(cmd
, 0, sizeof(*cmd
));
287 cmd
->hdr
.cmd
= PVRDMA_CMD_MODIFY_SRQ
;
288 cmd
->srq_handle
= vsrq
->srq_handle
;
289 cmd
->attrs
.srq_limit
= attr
->srq_limit
;
290 cmd
->attr_mask
= attr_mask
;
292 ret
= pvrdma_cmd_post(dev
, &req
, NULL
, 0);
294 dev_warn(&dev
->pdev
->dev
,
295 "could not modify shared receive queue, error: %d\n",