2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
38 #include "qib_verbs.h"
41 * qib_post_srq_receive - post a receive on a shared receive queue
42 * @ibsrq: the SRQ to post the receive on
43 * @wr: the list of work requests to post
44 * @bad_wr: A pointer to the first WR to cause a problem is put here
46 * This may be called from interrupt context.
48 int qib_post_srq_receive(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
49 struct ib_recv_wr
**bad_wr
)
51 struct qib_srq
*srq
= to_isrq(ibsrq
);
56 for (; wr
; wr
= wr
->next
) {
61 if ((unsigned) wr
->num_sge
> srq
->rq
.max_sge
) {
67 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
70 if (next
>= srq
->rq
.size
)
72 if (next
== wq
->tail
) {
73 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
79 wqe
= get_rwqe_ptr(&srq
->rq
, wq
->head
);
80 wqe
->wr_id
= wr
->wr_id
;
81 wqe
->num_sge
= wr
->num_sge
;
82 for (i
= 0; i
< wr
->num_sge
; i
++)
83 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
84 /* Make sure queue entry is written before the head index. */
87 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
96 * qib_create_srq - create a shared receive queue
97 * @ibpd: the protection domain of the SRQ to create
98 * @srq_init_attr: the attributes of the SRQ
99 * @udata: data from libibverbs when creating a user SRQ
101 struct ib_srq
*qib_create_srq(struct ib_pd
*ibpd
,
102 struct ib_srq_init_attr
*srq_init_attr
,
103 struct ib_udata
*udata
)
105 struct qib_ibdev
*dev
= to_idev(ibpd
->device
);
110 if (srq_init_attr
->attr
.max_sge
== 0 ||
111 srq_init_attr
->attr
.max_sge
> ib_qib_max_srq_sges
||
112 srq_init_attr
->attr
.max_wr
== 0 ||
113 srq_init_attr
->attr
.max_wr
> ib_qib_max_srq_wrs
) {
114 ret
= ERR_PTR(-EINVAL
);
118 srq
= kmalloc(sizeof(*srq
), GFP_KERNEL
);
120 ret
= ERR_PTR(-ENOMEM
);
125 * Need to use vmalloc() if we want to support large #s of entries.
127 srq
->rq
.size
= srq_init_attr
->attr
.max_wr
+ 1;
128 srq
->rq
.max_sge
= srq_init_attr
->attr
.max_sge
;
129 sz
= sizeof(struct ib_sge
) * srq
->rq
.max_sge
+
130 sizeof(struct qib_rwqe
);
131 srq
->rq
.wq
= vmalloc_user(sizeof(struct qib_rwq
) + srq
->rq
.size
* sz
);
133 ret
= ERR_PTR(-ENOMEM
);
138 * Return the address of the RWQ as the offset to mmap.
139 * See qib_mmap() for details.
141 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
143 u32 s
= sizeof(struct qib_rwq
) + srq
->rq
.size
* sz
;
146 qib_create_mmap_info(dev
, s
, ibpd
->uobject
->context
,
149 ret
= ERR_PTR(-ENOMEM
);
153 err
= ib_copy_to_udata(udata
, &srq
->ip
->offset
,
154 sizeof(srq
->ip
->offset
));
163 * ib_create_srq() will initialize srq->ibsrq.
165 spin_lock_init(&srq
->rq
.lock
);
166 srq
->rq
.wq
->head
= 0;
167 srq
->rq
.wq
->tail
= 0;
168 srq
->limit
= srq_init_attr
->attr
.srq_limit
;
170 spin_lock(&dev
->n_srqs_lock
);
171 if (dev
->n_srqs_allocated
== ib_qib_max_srqs
) {
172 spin_unlock(&dev
->n_srqs_lock
);
173 ret
= ERR_PTR(-ENOMEM
);
177 dev
->n_srqs_allocated
++;
178 spin_unlock(&dev
->n_srqs_lock
);
181 spin_lock_irq(&dev
->pending_lock
);
182 list_add(&srq
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
183 spin_unlock_irq(&dev
->pending_lock
);
200 * qib_modify_srq - modify a shared receive queue
201 * @ibsrq: the SRQ to modify
202 * @attr: the new attributes of the SRQ
203 * @attr_mask: indicates which attributes to modify
204 * @udata: user data for libibverbs.so
206 int qib_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
207 enum ib_srq_attr_mask attr_mask
,
208 struct ib_udata
*udata
)
210 struct qib_srq
*srq
= to_isrq(ibsrq
);
214 if (attr_mask
& IB_SRQ_MAX_WR
) {
217 u32 sz
, size
, n
, head
, tail
;
219 /* Check that the requested sizes are below the limits. */
220 if ((attr
->max_wr
> ib_qib_max_srq_wrs
) ||
221 ((attr_mask
& IB_SRQ_LIMIT
) ?
222 attr
->srq_limit
: srq
->limit
) > attr
->max_wr
) {
227 sz
= sizeof(struct qib_rwqe
) +
228 srq
->rq
.max_sge
* sizeof(struct ib_sge
);
229 size
= attr
->max_wr
+ 1;
230 wq
= vmalloc_user(sizeof(struct qib_rwq
) + size
* sz
);
236 /* Check that we can write the offset to mmap. */
237 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
241 ret
= ib_copy_from_udata(&offset_addr
, udata
,
242 sizeof(offset_addr
));
246 (void __user
*) (unsigned long) offset_addr
;
247 ret
= ib_copy_to_udata(udata
, &offset
,
253 spin_lock_irq(&srq
->rq
.lock
);
255 * validate head and tail pointer values and compute
256 * the number of remaining WQEs.
261 if (head
>= srq
->rq
.size
|| tail
>= srq
->rq
.size
) {
267 n
+= srq
->rq
.size
- tail
;
276 while (tail
!= head
) {
277 struct qib_rwqe
*wqe
;
280 wqe
= get_rwqe_ptr(&srq
->rq
, tail
);
281 p
->wr_id
= wqe
->wr_id
;
282 p
->num_sge
= wqe
->num_sge
;
283 for (i
= 0; i
< wqe
->num_sge
; i
++)
284 p
->sg_list
[i
] = wqe
->sg_list
[i
];
286 p
= (struct qib_rwqe
*)((char *) p
+ sz
);
287 if (++tail
>= srq
->rq
.size
)
294 if (attr_mask
& IB_SRQ_LIMIT
)
295 srq
->limit
= attr
->srq_limit
;
296 spin_unlock_irq(&srq
->rq
.lock
);
301 struct qib_mmap_info
*ip
= srq
->ip
;
302 struct qib_ibdev
*dev
= to_idev(srq
->ibsrq
.device
);
303 u32 s
= sizeof(struct qib_rwq
) + size
* sz
;
305 qib_update_mmap_info(dev
, ip
, s
, wq
);
308 * Return the offset to mmap.
309 * See qib_mmap() for details.
311 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
312 ret
= ib_copy_to_udata(udata
, &ip
->offset
,
319 * Put user mapping info onto the pending list
320 * unless it already is on the list.
322 spin_lock_irq(&dev
->pending_lock
);
323 if (list_empty(&ip
->pending_mmaps
))
324 list_add(&ip
->pending_mmaps
,
325 &dev
->pending_mmaps
);
326 spin_unlock_irq(&dev
->pending_lock
);
328 } else if (attr_mask
& IB_SRQ_LIMIT
) {
329 spin_lock_irq(&srq
->rq
.lock
);
330 if (attr
->srq_limit
>= srq
->rq
.size
)
333 srq
->limit
= attr
->srq_limit
;
334 spin_unlock_irq(&srq
->rq
.lock
);
339 spin_unlock_irq(&srq
->rq
.lock
);
346 int qib_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
)
348 struct qib_srq
*srq
= to_isrq(ibsrq
);
350 attr
->max_wr
= srq
->rq
.size
- 1;
351 attr
->max_sge
= srq
->rq
.max_sge
;
352 attr
->srq_limit
= srq
->limit
;
357 * qib_destroy_srq - destroy a shared receive queue
358 * @ibsrq: the SRQ to destroy
360 int qib_destroy_srq(struct ib_srq
*ibsrq
)
362 struct qib_srq
*srq
= to_isrq(ibsrq
);
363 struct qib_ibdev
*dev
= to_idev(ibsrq
->device
);
365 spin_lock(&dev
->n_srqs_lock
);
366 dev
->n_srqs_allocated
--;
367 spin_unlock(&dev
->n_srqs_lock
);
369 kref_put(&srq
->ip
->ref
, qib_release_mmap_info
);