Linux 2.6.21
[linux/fpc-iii.git] / drivers / infiniband / hw / ipath / ipath_srq.c
blob94033503400ca9f5f9d1ef617e799c9dd7ddb62f
1 /*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
39 /**
40 * ipath_post_srq_receive - post a receive on a shared receive queue
41 * @ibsrq: the SRQ to post the receive on
42 * @wr: the list of work requests to post
43 * @bad_wr: the first WR to cause a problem is put here
45 * This may be called from interrupt context.
47 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
48 struct ib_recv_wr **bad_wr)
50 struct ipath_srq *srq = to_isrq(ibsrq);
51 struct ipath_rwq *wq;
52 unsigned long flags;
53 int ret;
55 for (; wr; wr = wr->next) {
56 struct ipath_rwqe *wqe;
57 u32 next;
58 int i;
60 if ((unsigned) wr->num_sge > srq->rq.max_sge) {
61 *bad_wr = wr;
62 ret = -ENOMEM;
63 goto bail;
66 spin_lock_irqsave(&srq->rq.lock, flags);
67 wq = srq->rq.wq;
68 next = wq->head + 1;
69 if (next >= srq->rq.size)
70 next = 0;
71 if (next == wq->tail) {
72 spin_unlock_irqrestore(&srq->rq.lock, flags);
73 *bad_wr = wr;
74 ret = -ENOMEM;
75 goto bail;
78 wqe = get_rwqe_ptr(&srq->rq, wq->head);
79 wqe->wr_id = wr->wr_id;
80 wqe->num_sge = wr->num_sge;
81 for (i = 0; i < wr->num_sge; i++)
82 wqe->sg_list[i] = wr->sg_list[i];
83 wq->head = next;
84 spin_unlock_irqrestore(&srq->rq.lock, flags);
86 ret = 0;
88 bail:
89 return ret;
92 /**
93 * ipath_create_srq - create a shared receive queue
94 * @ibpd: the protection domain of the SRQ to create
95 * @attr: the attributes of the SRQ
96 * @udata: not used by the InfiniPath verbs driver
98 struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
99 struct ib_srq_init_attr *srq_init_attr,
100 struct ib_udata *udata)
102 struct ipath_ibdev *dev = to_idev(ibpd->device);
103 struct ipath_srq *srq;
104 u32 sz;
105 struct ib_srq *ret;
107 if (srq_init_attr->attr.max_wr == 0) {
108 ret = ERR_PTR(-EINVAL);
109 goto done;
112 if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
113 (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
114 ret = ERR_PTR(-EINVAL);
115 goto done;
118 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
119 if (!srq) {
120 ret = ERR_PTR(-ENOMEM);
121 goto done;
125 * Need to use vmalloc() if we want to support large #s of entries.
127 srq->rq.size = srq_init_attr->attr.max_wr + 1;
128 srq->rq.max_sge = srq_init_attr->attr.max_sge;
129 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
130 sizeof(struct ipath_rwqe);
131 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
132 if (!srq->rq.wq) {
133 ret = ERR_PTR(-ENOMEM);
134 goto bail_srq;
138 * Return the address of the RWQ as the offset to mmap.
139 * See ipath_mmap() for details.
141 if (udata && udata->outlen >= sizeof(__u64)) {
142 struct ipath_mmap_info *ip;
143 __u64 offset = (__u64) srq->rq.wq;
144 int err;
146 err = ib_copy_to_udata(udata, &offset, sizeof(offset));
147 if (err) {
148 ret = ERR_PTR(err);
149 goto bail_wq;
152 /* Allocate info for ipath_mmap(). */
153 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
154 if (!ip) {
155 ret = ERR_PTR(-ENOMEM);
156 goto bail_wq;
158 srq->ip = ip;
159 ip->context = ibpd->uobject->context;
160 ip->obj = srq->rq.wq;
161 kref_init(&ip->ref);
162 ip->mmap_cnt = 0;
163 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
164 srq->rq.size * sz);
165 spin_lock_irq(&dev->pending_lock);
166 ip->next = dev->pending_mmaps;
167 dev->pending_mmaps = ip;
168 spin_unlock_irq(&dev->pending_lock);
169 } else
170 srq->ip = NULL;
173 * ib_create_srq() will initialize srq->ibsrq.
175 spin_lock_init(&srq->rq.lock);
176 srq->rq.wq->head = 0;
177 srq->rq.wq->tail = 0;
178 srq->limit = srq_init_attr->attr.srq_limit;
180 spin_lock(&dev->n_srqs_lock);
181 if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
182 spin_unlock(&dev->n_srqs_lock);
183 ret = ERR_PTR(-ENOMEM);
184 goto bail_wq;
187 dev->n_srqs_allocated++;
188 spin_unlock(&dev->n_srqs_lock);
190 ret = &srq->ibsrq;
191 goto done;
193 bail_wq:
194 vfree(srq->rq.wq);
196 bail_srq:
197 kfree(srq);
199 done:
200 return ret;
204 * ipath_modify_srq - modify a shared receive queue
205 * @ibsrq: the SRQ to modify
206 * @attr: the new attributes of the SRQ
207 * @attr_mask: indicates which attributes to modify
208 * @udata: user data for ipathverbs.so
210 int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
211 enum ib_srq_attr_mask attr_mask,
212 struct ib_udata *udata)
214 struct ipath_srq *srq = to_isrq(ibsrq);
215 int ret = 0;
217 if (attr_mask & IB_SRQ_MAX_WR) {
218 struct ipath_rwq *owq;
219 struct ipath_rwq *wq;
220 struct ipath_rwqe *p;
221 u32 sz, size, n, head, tail;
223 /* Check that the requested sizes are below the limits. */
224 if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
225 ((attr_mask & IB_SRQ_LIMIT) ?
226 attr->srq_limit : srq->limit) > attr->max_wr) {
227 ret = -EINVAL;
228 goto bail;
231 sz = sizeof(struct ipath_rwqe) +
232 srq->rq.max_sge * sizeof(struct ib_sge);
233 size = attr->max_wr + 1;
234 wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
235 if (!wq) {
236 ret = -ENOMEM;
237 goto bail;
241 * Return the address of the RWQ as the offset to mmap.
242 * See ipath_mmap() for details.
244 if (udata && udata->inlen >= sizeof(__u64)) {
245 __u64 offset_addr;
246 __u64 offset = (__u64) wq;
248 ret = ib_copy_from_udata(&offset_addr, udata,
249 sizeof(offset_addr));
250 if (ret) {
251 vfree(wq);
252 goto bail;
254 udata->outbuf = (void __user *) offset_addr;
255 ret = ib_copy_to_udata(udata, &offset,
256 sizeof(offset));
257 if (ret) {
258 vfree(wq);
259 goto bail;
263 spin_lock_irq(&srq->rq.lock);
265 * validate head pointer value and compute
266 * the number of remaining WQEs.
268 owq = srq->rq.wq;
269 head = owq->head;
270 if (head >= srq->rq.size)
271 head = 0;
272 tail = owq->tail;
273 if (tail >= srq->rq.size)
274 tail = 0;
275 n = head;
276 if (n < tail)
277 n += srq->rq.size - tail;
278 else
279 n -= tail;
280 if (size <= n) {
281 spin_unlock_irq(&srq->rq.lock);
282 vfree(wq);
283 ret = -EINVAL;
284 goto bail;
286 n = 0;
287 p = wq->wq;
288 while (tail != head) {
289 struct ipath_rwqe *wqe;
290 int i;
292 wqe = get_rwqe_ptr(&srq->rq, tail);
293 p->wr_id = wqe->wr_id;
294 p->num_sge = wqe->num_sge;
295 for (i = 0; i < wqe->num_sge; i++)
296 p->sg_list[i] = wqe->sg_list[i];
297 n++;
298 p = (struct ipath_rwqe *)((char *) p + sz);
299 if (++tail >= srq->rq.size)
300 tail = 0;
302 srq->rq.wq = wq;
303 srq->rq.size = size;
304 wq->head = n;
305 wq->tail = 0;
306 if (attr_mask & IB_SRQ_LIMIT)
307 srq->limit = attr->srq_limit;
308 spin_unlock_irq(&srq->rq.lock);
310 vfree(owq);
312 if (srq->ip) {
313 struct ipath_mmap_info *ip = srq->ip;
314 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
316 ip->obj = wq;
317 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
318 size * sz);
319 spin_lock_irq(&dev->pending_lock);
320 ip->next = dev->pending_mmaps;
321 dev->pending_mmaps = ip;
322 spin_unlock_irq(&dev->pending_lock);
324 } else if (attr_mask & IB_SRQ_LIMIT) {
325 spin_lock_irq(&srq->rq.lock);
326 if (attr->srq_limit >= srq->rq.size)
327 ret = -EINVAL;
328 else
329 srq->limit = attr->srq_limit;
330 spin_unlock_irq(&srq->rq.lock);
333 bail:
334 return ret;
337 int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
339 struct ipath_srq *srq = to_isrq(ibsrq);
341 attr->max_wr = srq->rq.size - 1;
342 attr->max_sge = srq->rq.max_sge;
343 attr->srq_limit = srq->limit;
344 return 0;
348 * ipath_destroy_srq - destroy a shared receive queue
349 * @ibsrq: the SRQ to destroy
351 int ipath_destroy_srq(struct ib_srq *ibsrq)
353 struct ipath_srq *srq = to_isrq(ibsrq);
354 struct ipath_ibdev *dev = to_idev(ibsrq->device);
356 spin_lock(&dev->n_srqs_lock);
357 dev->n_srqs_allocated--;
358 spin_unlock(&dev->n_srqs_lock);
359 if (srq->ip)
360 kref_put(&srq->ip->ref, ipath_release_mmap_info);
361 else
362 vfree(srq->rq.wq);
363 kfree(srq);
365 return 0;