2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/err.h>
49 #include <linux/slab.h>
50 #include <linux/vmalloc.h>
56 * rvt_driver_srq_init - init srq resources on a per driver basis
57 * @rdi: rvt dev structure
59 * Do any initialization needed when a driver registers with rdmavt.
61 void rvt_driver_srq_init(struct rvt_dev_info
*rdi
)
63 spin_lock_init(&rdi
->n_srqs_lock
);
64 rdi
->n_srqs_allocated
= 0;
68 * rvt_create_srq - create a shared receive queue
69 * @ibpd: the protection domain of the SRQ to create
70 * @srq_init_attr: the attributes of the SRQ
71 * @udata: data from libibverbs when creating a user SRQ
73 * Return: Allocated srq object
75 struct ib_srq
*rvt_create_srq(struct ib_pd
*ibpd
,
76 struct ib_srq_init_attr
*srq_init_attr
,
77 struct ib_udata
*udata
)
79 struct rvt_dev_info
*dev
= ib_to_rvt(ibpd
->device
);
84 if (srq_init_attr
->srq_type
!= IB_SRQT_BASIC
)
85 return ERR_PTR(-ENOSYS
);
87 if (srq_init_attr
->attr
.max_sge
== 0 ||
88 srq_init_attr
->attr
.max_sge
> dev
->dparms
.props
.max_srq_sge
||
89 srq_init_attr
->attr
.max_wr
== 0 ||
90 srq_init_attr
->attr
.max_wr
> dev
->dparms
.props
.max_srq_wr
)
91 return ERR_PTR(-EINVAL
);
93 srq
= kzalloc_node(sizeof(*srq
), GFP_KERNEL
, dev
->dparms
.node
);
95 return ERR_PTR(-ENOMEM
);
98 * Need to use vmalloc() if we want to support large #s of entries.
100 srq
->rq
.size
= srq_init_attr
->attr
.max_wr
+ 1;
101 srq
->rq
.max_sge
= srq_init_attr
->attr
.max_sge
;
102 sz
= sizeof(struct ib_sge
) * srq
->rq
.max_sge
+
103 sizeof(struct rvt_rwqe
);
105 vmalloc_user(sizeof(struct rvt_rwq
) + srq
->rq
.size
* sz
) :
106 vzalloc_node(sizeof(struct rvt_rwq
) + srq
->rq
.size
* sz
,
109 ret
= ERR_PTR(-ENOMEM
);
114 * Return the address of the RWQ as the offset to mmap.
115 * See rvt_mmap() for details.
117 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
119 u32 s
= sizeof(struct rvt_rwq
) + srq
->rq
.size
* sz
;
122 rvt_create_mmap_info(dev
, s
, ibpd
->uobject
->context
,
125 ret
= ERR_PTR(-ENOMEM
);
129 err
= ib_copy_to_udata(udata
, &srq
->ip
->offset
,
130 sizeof(srq
->ip
->offset
));
138 * ib_create_srq() will initialize srq->ibsrq.
140 spin_lock_init(&srq
->rq
.lock
);
141 srq
->limit
= srq_init_attr
->attr
.srq_limit
;
143 spin_lock(&dev
->n_srqs_lock
);
144 if (dev
->n_srqs_allocated
== dev
->dparms
.props
.max_srq
) {
145 spin_unlock(&dev
->n_srqs_lock
);
146 ret
= ERR_PTR(-ENOMEM
);
150 dev
->n_srqs_allocated
++;
151 spin_unlock(&dev
->n_srqs_lock
);
154 spin_lock_irq(&dev
->pending_lock
);
155 list_add(&srq
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
156 spin_unlock_irq(&dev
->pending_lock
);
171 * rvt_modify_srq - modify a shared receive queue
172 * @ibsrq: the SRQ to modify
173 * @attr: the new attributes of the SRQ
174 * @attr_mask: indicates which attributes to modify
175 * @udata: user data for libibverbs.so
177 * Return: 0 on success
179 int rvt_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
180 enum ib_srq_attr_mask attr_mask
,
181 struct ib_udata
*udata
)
183 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
184 struct rvt_dev_info
*dev
= ib_to_rvt(ibsrq
->device
);
188 if (attr_mask
& IB_SRQ_MAX_WR
) {
191 u32 sz
, size
, n
, head
, tail
;
193 /* Check that the requested sizes are below the limits. */
194 if ((attr
->max_wr
> dev
->dparms
.props
.max_srq_wr
) ||
195 ((attr_mask
& IB_SRQ_LIMIT
) ?
196 attr
->srq_limit
: srq
->limit
) > attr
->max_wr
)
199 sz
= sizeof(struct rvt_rwqe
) +
200 srq
->rq
.max_sge
* sizeof(struct ib_sge
);
201 size
= attr
->max_wr
+ 1;
203 vmalloc_user(sizeof(struct rvt_rwq
) + size
* sz
) :
204 vzalloc_node(sizeof(struct rvt_rwq
) + size
* sz
,
209 /* Check that we can write the offset to mmap. */
210 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
214 ret
= ib_copy_from_udata(&offset_addr
, udata
,
215 sizeof(offset_addr
));
218 udata
->outbuf
= (void __user
*)
219 (unsigned long)offset_addr
;
220 ret
= ib_copy_to_udata(udata
, &offset
,
226 spin_lock_irq(&srq
->rq
.lock
);
228 * validate head and tail pointer values and compute
229 * the number of remaining WQEs.
234 if (head
>= srq
->rq
.size
|| tail
>= srq
->rq
.size
) {
240 n
+= srq
->rq
.size
- tail
;
249 while (tail
!= head
) {
250 struct rvt_rwqe
*wqe
;
253 wqe
= rvt_get_rwqe_ptr(&srq
->rq
, tail
);
254 p
->wr_id
= wqe
->wr_id
;
255 p
->num_sge
= wqe
->num_sge
;
256 for (i
= 0; i
< wqe
->num_sge
; i
++)
257 p
->sg_list
[i
] = wqe
->sg_list
[i
];
259 p
= (struct rvt_rwqe
*)((char *)p
+ sz
);
260 if (++tail
>= srq
->rq
.size
)
267 if (attr_mask
& IB_SRQ_LIMIT
)
268 srq
->limit
= attr
->srq_limit
;
269 spin_unlock_irq(&srq
->rq
.lock
);
274 struct rvt_mmap_info
*ip
= srq
->ip
;
275 struct rvt_dev_info
*dev
= ib_to_rvt(srq
->ibsrq
.device
);
276 u32 s
= sizeof(struct rvt_rwq
) + size
* sz
;
278 rvt_update_mmap_info(dev
, ip
, s
, wq
);
281 * Return the offset to mmap.
282 * See rvt_mmap() for details.
284 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
285 ret
= ib_copy_to_udata(udata
, &ip
->offset
,
292 * Put user mapping info onto the pending list
293 * unless it already is on the list.
295 spin_lock_irq(&dev
->pending_lock
);
296 if (list_empty(&ip
->pending_mmaps
))
297 list_add(&ip
->pending_mmaps
,
298 &dev
->pending_mmaps
);
299 spin_unlock_irq(&dev
->pending_lock
);
301 } else if (attr_mask
& IB_SRQ_LIMIT
) {
302 spin_lock_irq(&srq
->rq
.lock
);
303 if (attr
->srq_limit
>= srq
->rq
.size
)
306 srq
->limit
= attr
->srq_limit
;
307 spin_unlock_irq(&srq
->rq
.lock
);
312 spin_unlock_irq(&srq
->rq
.lock
);
318 /** rvt_query_srq - query srq data
319 * @ibsrq: srq to query
320 * @attr: return info in attr
324 int rvt_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
)
326 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
328 attr
->max_wr
= srq
->rq
.size
- 1;
329 attr
->max_sge
= srq
->rq
.max_sge
;
330 attr
->srq_limit
= srq
->limit
;
335 * rvt_destroy_srq - destory an srq
336 * @ibsrq: srq object to destroy
340 int rvt_destroy_srq(struct ib_srq
*ibsrq
)
342 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
343 struct rvt_dev_info
*dev
= ib_to_rvt(ibsrq
->device
);
345 spin_lock(&dev
->n_srqs_lock
);
346 dev
->n_srqs_allocated
--;
347 spin_unlock(&dev
->n_srqs_lock
);
349 kref_put(&srq
->ip
->ref
, rvt_release_mmap_info
);