2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/err.h>
49 #include <linux/slab.h>
50 #include <linux/vmalloc.h>
51 #include <rdma/uverbs_ioctl.h>
57 * rvt_driver_srq_init - init srq resources on a per driver basis
58 * @rdi: rvt dev structure
60 * Do any initialization needed when a driver registers with rdmavt.
62 void rvt_driver_srq_init(struct rvt_dev_info
*rdi
)
64 spin_lock_init(&rdi
->n_srqs_lock
);
65 rdi
->n_srqs_allocated
= 0;
69 * rvt_create_srq - create a shared receive queue
70 * @ibpd: the protection domain of the SRQ to create
71 * @srq_init_attr: the attributes of the SRQ
72 * @udata: data from libibverbs when creating a user SRQ
74 * Return: 0 on success
76 int rvt_create_srq(struct ib_srq
*ibsrq
, struct ib_srq_init_attr
*srq_init_attr
,
77 struct ib_udata
*udata
)
79 struct rvt_dev_info
*dev
= ib_to_rvt(ibsrq
->device
);
80 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
84 if (srq_init_attr
->srq_type
!= IB_SRQT_BASIC
)
87 if (srq_init_attr
->attr
.max_sge
== 0 ||
88 srq_init_attr
->attr
.max_sge
> dev
->dparms
.props
.max_srq_sge
||
89 srq_init_attr
->attr
.max_wr
== 0 ||
90 srq_init_attr
->attr
.max_wr
> dev
->dparms
.props
.max_srq_wr
)
94 * Need to use vmalloc() if we want to support large #s of entries.
96 srq
->rq
.size
= srq_init_attr
->attr
.max_wr
+ 1;
97 srq
->rq
.max_sge
= srq_init_attr
->attr
.max_sge
;
98 sz
= sizeof(struct ib_sge
) * srq
->rq
.max_sge
+
99 sizeof(struct rvt_rwqe
);
100 if (rvt_alloc_rq(&srq
->rq
, srq
->rq
.size
* sz
,
101 dev
->dparms
.node
, udata
)) {
107 * Return the address of the RWQ as the offset to mmap.
108 * See rvt_mmap() for details.
110 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
111 u32 s
= sizeof(struct rvt_rwq
) + srq
->rq
.size
* sz
;
113 srq
->ip
= rvt_create_mmap_info(dev
, s
, udata
, srq
->rq
.wq
);
114 if (IS_ERR(srq
->ip
)) {
115 ret
= PTR_ERR(srq
->ip
);
119 ret
= ib_copy_to_udata(udata
, &srq
->ip
->offset
,
120 sizeof(srq
->ip
->offset
));
126 * ib_create_srq() will initialize srq->ibsrq.
128 spin_lock_init(&srq
->rq
.lock
);
129 srq
->limit
= srq_init_attr
->attr
.srq_limit
;
131 spin_lock(&dev
->n_srqs_lock
);
132 if (dev
->n_srqs_allocated
== dev
->dparms
.props
.max_srq
) {
133 spin_unlock(&dev
->n_srqs_lock
);
138 dev
->n_srqs_allocated
++;
139 spin_unlock(&dev
->n_srqs_lock
);
142 spin_lock_irq(&dev
->pending_lock
);
143 list_add(&srq
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
144 spin_unlock_irq(&dev
->pending_lock
);
152 rvt_free_rq(&srq
->rq
);
158 * rvt_modify_srq - modify a shared receive queue
159 * @ibsrq: the SRQ to modify
160 * @attr: the new attributes of the SRQ
161 * @attr_mask: indicates which attributes to modify
162 * @udata: user data for libibverbs.so
164 * Return: 0 on success
166 int rvt_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
167 enum ib_srq_attr_mask attr_mask
,
168 struct ib_udata
*udata
)
170 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
171 struct rvt_dev_info
*dev
= ib_to_rvt(ibsrq
->device
);
172 struct rvt_rq tmp_rq
= {};
175 if (attr_mask
& IB_SRQ_MAX_WR
) {
176 struct rvt_krwq
*okwq
= NULL
;
177 struct rvt_rwq
*owq
= NULL
;
179 u32 sz
, size
, n
, head
, tail
;
181 /* Check that the requested sizes are below the limits. */
182 if ((attr
->max_wr
> dev
->dparms
.props
.max_srq_wr
) ||
183 ((attr_mask
& IB_SRQ_LIMIT
) ?
184 attr
->srq_limit
: srq
->limit
) > attr
->max_wr
)
186 sz
= sizeof(struct rvt_rwqe
) +
187 srq
->rq
.max_sge
* sizeof(struct ib_sge
);
188 size
= attr
->max_wr
+ 1;
189 if (rvt_alloc_rq(&tmp_rq
, size
* sz
, dev
->dparms
.node
,
192 /* Check that we can write the offset to mmap. */
193 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
197 ret
= ib_copy_from_udata(&offset_addr
, udata
,
198 sizeof(offset_addr
));
201 udata
->outbuf
= (void __user
*)
202 (unsigned long)offset_addr
;
203 ret
= ib_copy_to_udata(udata
, &offset
,
209 spin_lock_irq(&srq
->rq
.kwq
->c_lock
);
211 * validate head and tail pointer values and compute
212 * the number of remaining WQEs.
216 head
= RDMA_READ_UAPI_ATOMIC(owq
->head
);
217 tail
= RDMA_READ_UAPI_ATOMIC(owq
->tail
);
223 if (head
>= srq
->rq
.size
|| tail
>= srq
->rq
.size
) {
229 n
+= srq
->rq
.size
- tail
;
237 p
= tmp_rq
.kwq
->curr_wq
;
238 while (tail
!= head
) {
239 struct rvt_rwqe
*wqe
;
242 wqe
= rvt_get_rwqe_ptr(&srq
->rq
, tail
);
243 p
->wr_id
= wqe
->wr_id
;
244 p
->num_sge
= wqe
->num_sge
;
245 for (i
= 0; i
< wqe
->num_sge
; i
++)
246 p
->sg_list
[i
] = wqe
->sg_list
[i
];
248 p
= (struct rvt_rwqe
*)((char *)p
+ sz
);
249 if (++tail
>= srq
->rq
.size
)
252 srq
->rq
.kwq
= tmp_rq
.kwq
;
254 srq
->rq
.wq
= tmp_rq
.wq
;
255 RDMA_WRITE_UAPI_ATOMIC(tmp_rq
.wq
->head
, n
);
256 RDMA_WRITE_UAPI_ATOMIC(tmp_rq
.wq
->tail
, 0);
258 tmp_rq
.kwq
->head
= n
;
259 tmp_rq
.kwq
->tail
= 0;
262 if (attr_mask
& IB_SRQ_LIMIT
)
263 srq
->limit
= attr
->srq_limit
;
264 spin_unlock_irq(&srq
->rq
.kwq
->c_lock
);
270 struct rvt_mmap_info
*ip
= srq
->ip
;
271 struct rvt_dev_info
*dev
= ib_to_rvt(srq
->ibsrq
.device
);
272 u32 s
= sizeof(struct rvt_rwq
) + size
* sz
;
274 rvt_update_mmap_info(dev
, ip
, s
, tmp_rq
.wq
);
277 * Return the offset to mmap.
278 * See rvt_mmap() for details.
280 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
281 ret
= ib_copy_to_udata(udata
, &ip
->offset
,
288 * Put user mapping info onto the pending list
289 * unless it already is on the list.
291 spin_lock_irq(&dev
->pending_lock
);
292 if (list_empty(&ip
->pending_mmaps
))
293 list_add(&ip
->pending_mmaps
,
294 &dev
->pending_mmaps
);
295 spin_unlock_irq(&dev
->pending_lock
);
297 } else if (attr_mask
& IB_SRQ_LIMIT
) {
298 spin_lock_irq(&srq
->rq
.kwq
->c_lock
);
299 if (attr
->srq_limit
>= srq
->rq
.size
)
302 srq
->limit
= attr
->srq_limit
;
303 spin_unlock_irq(&srq
->rq
.kwq
->c_lock
);
308 spin_unlock_irq(&srq
->rq
.kwq
->c_lock
);
310 rvt_free_rq(&tmp_rq
);
314 /** rvt_query_srq - query srq data
315 * @ibsrq: srq to query
316 * @attr: return info in attr
320 int rvt_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
)
322 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
324 attr
->max_wr
= srq
->rq
.size
- 1;
325 attr
->max_sge
= srq
->rq
.max_sge
;
326 attr
->srq_limit
= srq
->limit
;
331 * rvt_destroy_srq - destory an srq
332 * @ibsrq: srq object to destroy
335 int rvt_destroy_srq(struct ib_srq
*ibsrq
, struct ib_udata
*udata
)
337 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
338 struct rvt_dev_info
*dev
= ib_to_rvt(ibsrq
->device
);
340 spin_lock(&dev
->n_srqs_lock
);
341 dev
->n_srqs_allocated
--;
342 spin_unlock(&dev
->n_srqs_lock
);
344 kref_put(&srq
->ip
->ref
, rvt_release_mmap_info
);