2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
38 #include "qib_verbs.h"
41 * qib_post_srq_receive - post a receive on a shared receive queue
42 * @ibsrq: the SRQ to post the receive on
43 * @wr: the list of work requests to post
44 * @bad_wr: A pointer to the first WR to cause a problem is put here
46 * This may be called from interrupt context.
48 int qib_post_srq_receive(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
49 struct ib_recv_wr
**bad_wr
)
51 struct qib_srq
*srq
= to_isrq(ibsrq
);
56 for (; wr
; wr
= wr
->next
) {
61 if ((unsigned) wr
->num_sge
> srq
->rq
.max_sge
) {
67 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
70 if (next
>= srq
->rq
.size
)
72 if (next
== wq
->tail
) {
73 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
79 wqe
= get_rwqe_ptr(&srq
->rq
, wq
->head
);
80 wqe
->wr_id
= wr
->wr_id
;
81 wqe
->num_sge
= wr
->num_sge
;
82 for (i
= 0; i
< wr
->num_sge
; i
++)
83 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
84 /* Make sure queue entry is written before the head index. */
87 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
96 * qib_create_srq - create a shared receive queue
97 * @ibpd: the protection domain of the SRQ to create
98 * @srq_init_attr: the attributes of the SRQ
99 * @udata: data from libibverbs when creating a user SRQ
101 struct ib_srq
*qib_create_srq(struct ib_pd
*ibpd
,
102 struct ib_srq_init_attr
*srq_init_attr
,
103 struct ib_udata
*udata
)
105 struct qib_ibdev
*dev
= to_idev(ibpd
->device
);
110 if (srq_init_attr
->srq_type
!= IB_SRQT_BASIC
) {
111 ret
= ERR_PTR(-ENOSYS
);
115 if (srq_init_attr
->attr
.max_sge
== 0 ||
116 srq_init_attr
->attr
.max_sge
> ib_qib_max_srq_sges
||
117 srq_init_attr
->attr
.max_wr
== 0 ||
118 srq_init_attr
->attr
.max_wr
> ib_qib_max_srq_wrs
) {
119 ret
= ERR_PTR(-EINVAL
);
123 srq
= kmalloc(sizeof(*srq
), GFP_KERNEL
);
125 ret
= ERR_PTR(-ENOMEM
);
130 * Need to use vmalloc() if we want to support large #s of entries.
132 srq
->rq
.size
= srq_init_attr
->attr
.max_wr
+ 1;
133 srq
->rq
.max_sge
= srq_init_attr
->attr
.max_sge
;
134 sz
= sizeof(struct ib_sge
) * srq
->rq
.max_sge
+
135 sizeof(struct qib_rwqe
);
136 srq
->rq
.wq
= vmalloc_user(sizeof(struct qib_rwq
) + srq
->rq
.size
* sz
);
138 ret
= ERR_PTR(-ENOMEM
);
143 * Return the address of the RWQ as the offset to mmap.
144 * See qib_mmap() for details.
146 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
148 u32 s
= sizeof(struct qib_rwq
) + srq
->rq
.size
* sz
;
151 qib_create_mmap_info(dev
, s
, ibpd
->uobject
->context
,
154 ret
= ERR_PTR(-ENOMEM
);
158 err
= ib_copy_to_udata(udata
, &srq
->ip
->offset
,
159 sizeof(srq
->ip
->offset
));
168 * ib_create_srq() will initialize srq->ibsrq.
170 spin_lock_init(&srq
->rq
.lock
);
171 srq
->rq
.wq
->head
= 0;
172 srq
->rq
.wq
->tail
= 0;
173 srq
->limit
= srq_init_attr
->attr
.srq_limit
;
175 spin_lock(&dev
->n_srqs_lock
);
176 if (dev
->n_srqs_allocated
== ib_qib_max_srqs
) {
177 spin_unlock(&dev
->n_srqs_lock
);
178 ret
= ERR_PTR(-ENOMEM
);
182 dev
->n_srqs_allocated
++;
183 spin_unlock(&dev
->n_srqs_lock
);
186 spin_lock_irq(&dev
->pending_lock
);
187 list_add(&srq
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
188 spin_unlock_irq(&dev
->pending_lock
);
205 * qib_modify_srq - modify a shared receive queue
206 * @ibsrq: the SRQ to modify
207 * @attr: the new attributes of the SRQ
208 * @attr_mask: indicates which attributes to modify
209 * @udata: user data for libibverbs.so
211 int qib_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
212 enum ib_srq_attr_mask attr_mask
,
213 struct ib_udata
*udata
)
215 struct qib_srq
*srq
= to_isrq(ibsrq
);
219 if (attr_mask
& IB_SRQ_MAX_WR
) {
222 u32 sz
, size
, n
, head
, tail
;
224 /* Check that the requested sizes are below the limits. */
225 if ((attr
->max_wr
> ib_qib_max_srq_wrs
) ||
226 ((attr_mask
& IB_SRQ_LIMIT
) ?
227 attr
->srq_limit
: srq
->limit
) > attr
->max_wr
) {
232 sz
= sizeof(struct qib_rwqe
) +
233 srq
->rq
.max_sge
* sizeof(struct ib_sge
);
234 size
= attr
->max_wr
+ 1;
235 wq
= vmalloc_user(sizeof(struct qib_rwq
) + size
* sz
);
241 /* Check that we can write the offset to mmap. */
242 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
246 ret
= ib_copy_from_udata(&offset_addr
, udata
,
247 sizeof(offset_addr
));
251 (void __user
*) (unsigned long) offset_addr
;
252 ret
= ib_copy_to_udata(udata
, &offset
,
258 spin_lock_irq(&srq
->rq
.lock
);
260 * validate head and tail pointer values and compute
261 * the number of remaining WQEs.
266 if (head
>= srq
->rq
.size
|| tail
>= srq
->rq
.size
) {
272 n
+= srq
->rq
.size
- tail
;
281 while (tail
!= head
) {
282 struct qib_rwqe
*wqe
;
285 wqe
= get_rwqe_ptr(&srq
->rq
, tail
);
286 p
->wr_id
= wqe
->wr_id
;
287 p
->num_sge
= wqe
->num_sge
;
288 for (i
= 0; i
< wqe
->num_sge
; i
++)
289 p
->sg_list
[i
] = wqe
->sg_list
[i
];
291 p
= (struct qib_rwqe
*)((char *) p
+ sz
);
292 if (++tail
>= srq
->rq
.size
)
299 if (attr_mask
& IB_SRQ_LIMIT
)
300 srq
->limit
= attr
->srq_limit
;
301 spin_unlock_irq(&srq
->rq
.lock
);
306 struct qib_mmap_info
*ip
= srq
->ip
;
307 struct qib_ibdev
*dev
= to_idev(srq
->ibsrq
.device
);
308 u32 s
= sizeof(struct qib_rwq
) + size
* sz
;
310 qib_update_mmap_info(dev
, ip
, s
, wq
);
313 * Return the offset to mmap.
314 * See qib_mmap() for details.
316 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
317 ret
= ib_copy_to_udata(udata
, &ip
->offset
,
324 * Put user mapping info onto the pending list
325 * unless it already is on the list.
327 spin_lock_irq(&dev
->pending_lock
);
328 if (list_empty(&ip
->pending_mmaps
))
329 list_add(&ip
->pending_mmaps
,
330 &dev
->pending_mmaps
);
331 spin_unlock_irq(&dev
->pending_lock
);
333 } else if (attr_mask
& IB_SRQ_LIMIT
) {
334 spin_lock_irq(&srq
->rq
.lock
);
335 if (attr
->srq_limit
>= srq
->rq
.size
)
338 srq
->limit
= attr
->srq_limit
;
339 spin_unlock_irq(&srq
->rq
.lock
);
344 spin_unlock_irq(&srq
->rq
.lock
);
351 int qib_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
)
353 struct qib_srq
*srq
= to_isrq(ibsrq
);
355 attr
->max_wr
= srq
->rq
.size
- 1;
356 attr
->max_sge
= srq
->rq
.max_sge
;
357 attr
->srq_limit
= srq
->limit
;
362 * qib_destroy_srq - destroy a shared receive queue
363 * @ibsrq: the SRQ to destroy
365 int qib_destroy_srq(struct ib_srq
*ibsrq
)
367 struct qib_srq
*srq
= to_isrq(ibsrq
);
368 struct qib_ibdev
*dev
= to_idev(ibsrq
->device
);
370 spin_lock(&dev
->n_srqs_lock
);
371 dev
->n_srqs_allocated
--;
372 spin_unlock(&dev
->n_srqs_lock
);
374 kref_put(&srq
->ip
->ref
, qib_release_mmap_info
);