2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
40 * ipath_post_srq_receive - post a receive on a shared receive queue
41 * @ibsrq: the SRQ to post the receive on
42 * @wr: the list of work requests to post
43 * @bad_wr: the first WR to cause a problem is put here
45 * This may be called from interrupt context.
47 int ipath_post_srq_receive(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
48 struct ib_recv_wr
**bad_wr
)
50 struct ipath_srq
*srq
= to_isrq(ibsrq
);
55 for (; wr
; wr
= wr
->next
) {
56 struct ipath_rwqe
*wqe
;
60 if ((unsigned) wr
->num_sge
> srq
->rq
.max_sge
) {
66 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
69 if (next
>= srq
->rq
.size
)
71 if (next
== wq
->tail
) {
72 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
78 wqe
= get_rwqe_ptr(&srq
->rq
, wq
->head
);
79 wqe
->wr_id
= wr
->wr_id
;
80 wqe
->num_sge
= wr
->num_sge
;
81 for (i
= 0; i
< wr
->num_sge
; i
++)
82 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
83 /* Make sure queue entry is written before the head index. */
86 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
95 * ipath_create_srq - create a shared receive queue
96 * @ibpd: the protection domain of the SRQ to create
97 * @srq_init_attr: the attributes of the SRQ
98 * @udata: data from libipathverbs when creating a user SRQ
100 struct ib_srq
*ipath_create_srq(struct ib_pd
*ibpd
,
101 struct ib_srq_init_attr
*srq_init_attr
,
102 struct ib_udata
*udata
)
104 struct ipath_ibdev
*dev
= to_idev(ibpd
->device
);
105 struct ipath_srq
*srq
;
109 if (srq_init_attr
->attr
.max_wr
== 0) {
110 ret
= ERR_PTR(-EINVAL
);
114 if ((srq_init_attr
->attr
.max_sge
> ib_ipath_max_srq_sges
) ||
115 (srq_init_attr
->attr
.max_wr
> ib_ipath_max_srq_wrs
)) {
116 ret
= ERR_PTR(-EINVAL
);
120 srq
= kmalloc(sizeof(*srq
), GFP_KERNEL
);
122 ret
= ERR_PTR(-ENOMEM
);
127 * Need to use vmalloc() if we want to support large #s of entries.
129 srq
->rq
.size
= srq_init_attr
->attr
.max_wr
+ 1;
130 srq
->rq
.max_sge
= srq_init_attr
->attr
.max_sge
;
131 sz
= sizeof(struct ib_sge
) * srq
->rq
.max_sge
+
132 sizeof(struct ipath_rwqe
);
133 srq
->rq
.wq
= vmalloc_user(sizeof(struct ipath_rwq
) + srq
->rq
.size
* sz
);
135 ret
= ERR_PTR(-ENOMEM
);
140 * Return the address of the RWQ as the offset to mmap.
141 * See ipath_mmap() for details.
143 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
145 u32 s
= sizeof(struct ipath_rwq
) + srq
->rq
.size
* sz
;
148 ipath_create_mmap_info(dev
, s
,
149 ibpd
->uobject
->context
,
152 ret
= ERR_PTR(-ENOMEM
);
156 err
= ib_copy_to_udata(udata
, &srq
->ip
->offset
,
157 sizeof(srq
->ip
->offset
));
166 * ib_create_srq() will initialize srq->ibsrq.
168 spin_lock_init(&srq
->rq
.lock
);
169 srq
->rq
.wq
->head
= 0;
170 srq
->rq
.wq
->tail
= 0;
171 srq
->limit
= srq_init_attr
->attr
.srq_limit
;
173 spin_lock(&dev
->n_srqs_lock
);
174 if (dev
->n_srqs_allocated
== ib_ipath_max_srqs
) {
175 spin_unlock(&dev
->n_srqs_lock
);
176 ret
= ERR_PTR(-ENOMEM
);
180 dev
->n_srqs_allocated
++;
181 spin_unlock(&dev
->n_srqs_lock
);
184 spin_lock_irq(&dev
->pending_lock
);
185 list_add(&srq
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
186 spin_unlock_irq(&dev
->pending_lock
);
203 * ipath_modify_srq - modify a shared receive queue
204 * @ibsrq: the SRQ to modify
205 * @attr: the new attributes of the SRQ
206 * @attr_mask: indicates which attributes to modify
207 * @udata: user data for ipathverbs.so
209 int ipath_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
210 enum ib_srq_attr_mask attr_mask
,
211 struct ib_udata
*udata
)
213 struct ipath_srq
*srq
= to_isrq(ibsrq
);
214 struct ipath_rwq
*wq
;
217 if (attr_mask
& IB_SRQ_MAX_WR
) {
218 struct ipath_rwq
*owq
;
219 struct ipath_rwqe
*p
;
220 u32 sz
, size
, n
, head
, tail
;
222 /* Check that the requested sizes are below the limits. */
223 if ((attr
->max_wr
> ib_ipath_max_srq_wrs
) ||
224 ((attr_mask
& IB_SRQ_LIMIT
) ?
225 attr
->srq_limit
: srq
->limit
) > attr
->max_wr
) {
230 sz
= sizeof(struct ipath_rwqe
) +
231 srq
->rq
.max_sge
* sizeof(struct ib_sge
);
232 size
= attr
->max_wr
+ 1;
233 wq
= vmalloc_user(sizeof(struct ipath_rwq
) + size
* sz
);
239 /* Check that we can write the offset to mmap. */
240 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
244 ret
= ib_copy_from_udata(&offset_addr
, udata
,
245 sizeof(offset_addr
));
249 (void __user
*) (unsigned long) offset_addr
;
250 ret
= ib_copy_to_udata(udata
, &offset
,
256 spin_lock_irq(&srq
->rq
.lock
);
258 * validate head pointer value and compute
259 * the number of remaining WQEs.
263 if (head
>= srq
->rq
.size
)
266 if (tail
>= srq
->rq
.size
)
270 n
+= srq
->rq
.size
- tail
;
279 while (tail
!= head
) {
280 struct ipath_rwqe
*wqe
;
283 wqe
= get_rwqe_ptr(&srq
->rq
, tail
);
284 p
->wr_id
= wqe
->wr_id
;
285 p
->num_sge
= wqe
->num_sge
;
286 for (i
= 0; i
< wqe
->num_sge
; i
++)
287 p
->sg_list
[i
] = wqe
->sg_list
[i
];
289 p
= (struct ipath_rwqe
*)((char *) p
+ sz
);
290 if (++tail
>= srq
->rq
.size
)
297 if (attr_mask
& IB_SRQ_LIMIT
)
298 srq
->limit
= attr
->srq_limit
;
299 spin_unlock_irq(&srq
->rq
.lock
);
304 struct ipath_mmap_info
*ip
= srq
->ip
;
305 struct ipath_ibdev
*dev
= to_idev(srq
->ibsrq
.device
);
306 u32 s
= sizeof(struct ipath_rwq
) + size
* sz
;
308 ipath_update_mmap_info(dev
, ip
, s
, wq
);
311 * Return the offset to mmap.
312 * See ipath_mmap() for details.
314 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
315 ret
= ib_copy_to_udata(udata
, &ip
->offset
,
321 spin_lock_irq(&dev
->pending_lock
);
322 if (list_empty(&ip
->pending_mmaps
))
323 list_add(&ip
->pending_mmaps
,
324 &dev
->pending_mmaps
);
325 spin_unlock_irq(&dev
->pending_lock
);
327 } else if (attr_mask
& IB_SRQ_LIMIT
) {
328 spin_lock_irq(&srq
->rq
.lock
);
329 if (attr
->srq_limit
>= srq
->rq
.size
)
332 srq
->limit
= attr
->srq_limit
;
333 spin_unlock_irq(&srq
->rq
.lock
);
338 spin_unlock_irq(&srq
->rq
.lock
);
345 int ipath_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
)
347 struct ipath_srq
*srq
= to_isrq(ibsrq
);
349 attr
->max_wr
= srq
->rq
.size
- 1;
350 attr
->max_sge
= srq
->rq
.max_sge
;
351 attr
->srq_limit
= srq
->limit
;
356 * ipath_destroy_srq - destroy a shared receive queue
357 * @ibsrq: the SRQ to destroy
359 int ipath_destroy_srq(struct ib_srq
*ibsrq
)
361 struct ipath_srq
*srq
= to_isrq(ibsrq
);
362 struct ipath_ibdev
*dev
= to_idev(ibsrq
->device
);
364 spin_lock(&dev
->n_srqs_lock
);
365 dev
->n_srqs_allocated
--;
366 spin_unlock(&dev
->n_srqs_lock
);
368 kref_put(&srq
->ip
->ref
, ipath_release_mmap_info
);