2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
40 * ipath_post_srq_receive - post a receive on a shared receive queue
41 * @ibsrq: the SRQ to post the receive on
42 * @wr: the list of work requests to post
43 * @bad_wr: the first WR to cause a problem is put here
45 * This may be called from interrupt context.
47 int ipath_post_srq_receive(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
48 struct ib_recv_wr
**bad_wr
)
50 struct ipath_srq
*srq
= to_isrq(ibsrq
);
55 for (; wr
; wr
= wr
->next
) {
56 struct ipath_rwqe
*wqe
;
60 if ((unsigned) wr
->num_sge
> srq
->rq
.max_sge
) {
66 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
69 if (next
>= srq
->rq
.size
)
71 if (next
== wq
->tail
) {
72 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
78 wqe
= get_rwqe_ptr(&srq
->rq
, wq
->head
);
79 wqe
->wr_id
= wr
->wr_id
;
80 wqe
->num_sge
= wr
->num_sge
;
81 for (i
= 0; i
< wr
->num_sge
; i
++)
82 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
84 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
93 * ipath_create_srq - create a shared receive queue
94 * @ibpd: the protection domain of the SRQ to create
95 * @attr: the attributes of the SRQ
96 * @udata: not used by the InfiniPath verbs driver
98 struct ib_srq
*ipath_create_srq(struct ib_pd
*ibpd
,
99 struct ib_srq_init_attr
*srq_init_attr
,
100 struct ib_udata
*udata
)
102 struct ipath_ibdev
*dev
= to_idev(ibpd
->device
);
103 struct ipath_srq
*srq
;
107 if (srq_init_attr
->attr
.max_wr
== 0) {
108 ret
= ERR_PTR(-EINVAL
);
112 if ((srq_init_attr
->attr
.max_sge
> ib_ipath_max_srq_sges
) ||
113 (srq_init_attr
->attr
.max_wr
> ib_ipath_max_srq_wrs
)) {
114 ret
= ERR_PTR(-EINVAL
);
118 srq
= kmalloc(sizeof(*srq
), GFP_KERNEL
);
120 ret
= ERR_PTR(-ENOMEM
);
125 * Need to use vmalloc() if we want to support large #s of entries.
127 srq
->rq
.size
= srq_init_attr
->attr
.max_wr
+ 1;
128 srq
->rq
.max_sge
= srq_init_attr
->attr
.max_sge
;
129 sz
= sizeof(struct ib_sge
) * srq
->rq
.max_sge
+
130 sizeof(struct ipath_rwqe
);
131 srq
->rq
.wq
= vmalloc_user(sizeof(struct ipath_rwq
) + srq
->rq
.size
* sz
);
133 ret
= ERR_PTR(-ENOMEM
);
138 * Return the address of the RWQ as the offset to mmap.
139 * See ipath_mmap() for details.
141 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
142 struct ipath_mmap_info
*ip
;
143 __u64 offset
= (__u64
) srq
->rq
.wq
;
146 err
= ib_copy_to_udata(udata
, &offset
, sizeof(offset
));
152 /* Allocate info for ipath_mmap(). */
153 ip
= kmalloc(sizeof(*ip
), GFP_KERNEL
);
155 ret
= ERR_PTR(-ENOMEM
);
159 ip
->context
= ibpd
->uobject
->context
;
160 ip
->obj
= srq
->rq
.wq
;
163 ip
->size
= PAGE_ALIGN(sizeof(struct ipath_rwq
) +
165 spin_lock_irq(&dev
->pending_lock
);
166 ip
->next
= dev
->pending_mmaps
;
167 dev
->pending_mmaps
= ip
;
168 spin_unlock_irq(&dev
->pending_lock
);
173 * ib_create_srq() will initialize srq->ibsrq.
175 spin_lock_init(&srq
->rq
.lock
);
176 srq
->rq
.wq
->head
= 0;
177 srq
->rq
.wq
->tail
= 0;
178 srq
->limit
= srq_init_attr
->attr
.srq_limit
;
180 spin_lock(&dev
->n_srqs_lock
);
181 if (dev
->n_srqs_allocated
== ib_ipath_max_srqs
) {
182 spin_unlock(&dev
->n_srqs_lock
);
183 ret
= ERR_PTR(-ENOMEM
);
187 dev
->n_srqs_allocated
++;
188 spin_unlock(&dev
->n_srqs_lock
);
204 * ipath_modify_srq - modify a shared receive queue
205 * @ibsrq: the SRQ to modify
206 * @attr: the new attributes of the SRQ
207 * @attr_mask: indicates which attributes to modify
208 * @udata: user data for ipathverbs.so
210 int ipath_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
211 enum ib_srq_attr_mask attr_mask
,
212 struct ib_udata
*udata
)
214 struct ipath_srq
*srq
= to_isrq(ibsrq
);
217 if (attr_mask
& IB_SRQ_MAX_WR
) {
218 struct ipath_rwq
*owq
;
219 struct ipath_rwq
*wq
;
220 struct ipath_rwqe
*p
;
221 u32 sz
, size
, n
, head
, tail
;
223 /* Check that the requested sizes are below the limits. */
224 if ((attr
->max_wr
> ib_ipath_max_srq_wrs
) ||
225 ((attr_mask
& IB_SRQ_LIMIT
) ?
226 attr
->srq_limit
: srq
->limit
) > attr
->max_wr
) {
231 sz
= sizeof(struct ipath_rwqe
) +
232 srq
->rq
.max_sge
* sizeof(struct ib_sge
);
233 size
= attr
->max_wr
+ 1;
234 wq
= vmalloc_user(sizeof(struct ipath_rwq
) + size
* sz
);
241 * Return the address of the RWQ as the offset to mmap.
242 * See ipath_mmap() for details.
244 if (udata
&& udata
->inlen
>= sizeof(__u64
)) {
246 __u64 offset
= (__u64
) wq
;
248 ret
= ib_copy_from_udata(&offset_addr
, udata
,
249 sizeof(offset_addr
));
254 udata
->outbuf
= (void __user
*) offset_addr
;
255 ret
= ib_copy_to_udata(udata
, &offset
,
263 spin_lock_irq(&srq
->rq
.lock
);
265 * validate head pointer value and compute
266 * the number of remaining WQEs.
270 if (head
>= srq
->rq
.size
)
273 if (tail
>= srq
->rq
.size
)
277 n
+= srq
->rq
.size
- tail
;
281 spin_unlock_irq(&srq
->rq
.lock
);
288 while (tail
!= head
) {
289 struct ipath_rwqe
*wqe
;
292 wqe
= get_rwqe_ptr(&srq
->rq
, tail
);
293 p
->wr_id
= wqe
->wr_id
;
294 p
->num_sge
= wqe
->num_sge
;
295 for (i
= 0; i
< wqe
->num_sge
; i
++)
296 p
->sg_list
[i
] = wqe
->sg_list
[i
];
298 p
= (struct ipath_rwqe
*)((char *) p
+ sz
);
299 if (++tail
>= srq
->rq
.size
)
306 if (attr_mask
& IB_SRQ_LIMIT
)
307 srq
->limit
= attr
->srq_limit
;
308 spin_unlock_irq(&srq
->rq
.lock
);
313 struct ipath_mmap_info
*ip
= srq
->ip
;
314 struct ipath_ibdev
*dev
= to_idev(srq
->ibsrq
.device
);
317 ip
->size
= PAGE_ALIGN(sizeof(struct ipath_rwq
) +
319 spin_lock_irq(&dev
->pending_lock
);
320 ip
->next
= dev
->pending_mmaps
;
321 dev
->pending_mmaps
= ip
;
322 spin_unlock_irq(&dev
->pending_lock
);
324 } else if (attr_mask
& IB_SRQ_LIMIT
) {
325 spin_lock_irq(&srq
->rq
.lock
);
326 if (attr
->srq_limit
>= srq
->rq
.size
)
329 srq
->limit
= attr
->srq_limit
;
330 spin_unlock_irq(&srq
->rq
.lock
);
337 int ipath_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
)
339 struct ipath_srq
*srq
= to_isrq(ibsrq
);
341 attr
->max_wr
= srq
->rq
.size
- 1;
342 attr
->max_sge
= srq
->rq
.max_sge
;
343 attr
->srq_limit
= srq
->limit
;
348 * ipath_destroy_srq - destroy a shared receive queue
349 * @ibsrq: the SRQ to destroy
351 int ipath_destroy_srq(struct ib_srq
*ibsrq
)
353 struct ipath_srq
*srq
= to_isrq(ibsrq
);
354 struct ipath_ibdev
*dev
= to_idev(ibsrq
->device
);
356 spin_lock(&dev
->n_srqs_lock
);
357 dev
->n_srqs_allocated
--;
358 spin_unlock(&dev
->n_srqs_lock
);
360 kref_put(&srq
->ip
->ref
, ipath_release_mmap_info
);