m68k/defconfig: Enable automounting of devtmpfs at /dev
[linux/fpc-iii.git] / drivers / infiniband / hw / mlx4 / srq.c
blob62d9285300af09c64af29e8b19cf33395767a6f8
1 /*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/mlx4/qp.h>
35 #include <linux/mlx4/srq.h>
36 #include <linux/slab.h>
38 #include "mlx4_ib.h"
39 #include "user.h"
41 static void *get_wqe(struct mlx4_ib_srq *srq, int n)
43 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
46 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
48 struct ib_event event;
49 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
51 if (ibsrq->event_handler) {
52 event.device = ibsrq->device;
53 event.element.srq = ibsrq;
54 switch (type) {
55 case MLX4_EVENT_TYPE_SRQ_LIMIT:
56 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
57 break;
58 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
59 event.event = IB_EVENT_SRQ_ERR;
60 break;
61 default:
62 pr_warn("Unexpected event type %d "
63 "on SRQ %06x\n", type, srq->srqn);
64 return;
67 ibsrq->event_handler(&event, ibsrq->srq_context);
71 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
72 struct ib_srq_init_attr *init_attr,
73 struct ib_udata *udata)
75 struct mlx4_ib_dev *dev = to_mdev(pd->device);
76 struct mlx4_ib_srq *srq;
77 struct mlx4_wqe_srq_next_seg *next;
78 struct mlx4_wqe_data_seg *scatter;
79 u32 cqn;
80 u16 xrcdn;
81 int desc_size;
82 int buf_size;
83 int err;
84 int i;
86 /* Sanity check SRQ size before proceeding */
87 if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
88 init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
89 return ERR_PTR(-EINVAL);
91 srq = kmalloc(sizeof *srq, GFP_KERNEL);
92 if (!srq)
93 return ERR_PTR(-ENOMEM);
95 mutex_init(&srq->mutex);
96 spin_lock_init(&srq->lock);
97 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
98 srq->msrq.max_gs = init_attr->attr.max_sge;
100 desc_size = max(32UL,
101 roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
102 srq->msrq.max_gs *
103 sizeof (struct mlx4_wqe_data_seg)));
104 srq->msrq.wqe_shift = ilog2(desc_size);
106 buf_size = srq->msrq.max * desc_size;
108 if (pd->uobject) {
109 struct mlx4_ib_create_srq ucmd;
111 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
112 err = -EFAULT;
113 goto err_srq;
116 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
117 buf_size, 0, 0);
118 if (IS_ERR(srq->umem)) {
119 err = PTR_ERR(srq->umem);
120 goto err_srq;
123 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
124 ilog2(srq->umem->page_size), &srq->mtt);
125 if (err)
126 goto err_buf;
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
129 if (err)
130 goto err_mtt;
132 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
133 ucmd.db_addr, &srq->db);
134 if (err)
135 goto err_mtt;
136 } else {
137 err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
138 if (err)
139 goto err_srq;
141 *srq->db.db = 0;
143 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
144 GFP_KERNEL)) {
145 err = -ENOMEM;
146 goto err_db;
149 srq->head = 0;
150 srq->tail = srq->msrq.max - 1;
151 srq->wqe_ctr = 0;
153 for (i = 0; i < srq->msrq.max; ++i) {
154 next = get_wqe(srq, i);
155 next->next_wqe_index =
156 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
158 for (scatter = (void *) (next + 1);
159 (void *) scatter < (void *) next + desc_size;
160 ++scatter)
161 scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
164 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
165 &srq->mtt);
166 if (err)
167 goto err_buf;
169 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
170 if (err)
171 goto err_mtt;
173 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
174 if (!srq->wrid) {
175 err = -ENOMEM;
176 goto err_mtt;
180 cqn = (init_attr->srq_type == IB_SRQT_XRC) ?
181 to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0;
182 xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
183 to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
184 (u16) dev->dev->caps.reserved_xrcds;
185 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt,
186 srq->db.dma, &srq->msrq);
187 if (err)
188 goto err_wrid;
190 srq->msrq.event = mlx4_ib_srq_event;
191 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
193 if (pd->uobject)
194 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
195 err = -EFAULT;
196 goto err_wrid;
199 init_attr->attr.max_wr = srq->msrq.max - 1;
201 return &srq->ibsrq;
203 err_wrid:
204 if (pd->uobject)
205 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
206 else
207 kfree(srq->wrid);
209 err_mtt:
210 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
212 err_buf:
213 if (pd->uobject)
214 ib_umem_release(srq->umem);
215 else
216 mlx4_buf_free(dev->dev, buf_size, &srq->buf);
218 err_db:
219 if (!pd->uobject)
220 mlx4_db_free(dev->dev, &srq->db);
222 err_srq:
223 kfree(srq);
225 return ERR_PTR(err);
228 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
229 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
231 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
232 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
233 int ret;
235 /* We don't support resizing SRQs (yet?) */
236 if (attr_mask & IB_SRQ_MAX_WR)
237 return -EINVAL;
239 if (attr_mask & IB_SRQ_LIMIT) {
240 if (attr->srq_limit >= srq->msrq.max)
241 return -EINVAL;
243 mutex_lock(&srq->mutex);
244 ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
245 mutex_unlock(&srq->mutex);
247 if (ret)
248 return ret;
251 return 0;
254 int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
256 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
257 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
258 int ret;
259 int limit_watermark;
261 ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
262 if (ret)
263 return ret;
265 srq_attr->srq_limit = limit_watermark;
266 srq_attr->max_wr = srq->msrq.max - 1;
267 srq_attr->max_sge = srq->msrq.max_gs;
269 return 0;
272 int mlx4_ib_destroy_srq(struct ib_srq *srq)
274 struct mlx4_ib_dev *dev = to_mdev(srq->device);
275 struct mlx4_ib_srq *msrq = to_msrq(srq);
277 mlx4_srq_free(dev->dev, &msrq->msrq);
278 mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
280 if (srq->uobject) {
281 mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
282 ib_umem_release(msrq->umem);
283 } else {
284 kfree(msrq->wrid);
285 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
286 &msrq->buf);
287 mlx4_db_free(dev->dev, &msrq->db);
290 kfree(msrq);
292 return 0;
295 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
297 struct mlx4_wqe_srq_next_seg *next;
299 /* always called with interrupts disabled. */
300 spin_lock(&srq->lock);
302 next = get_wqe(srq, srq->tail);
303 next->next_wqe_index = cpu_to_be16(wqe_index);
304 srq->tail = wqe_index;
306 spin_unlock(&srq->lock);
309 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
310 struct ib_recv_wr **bad_wr)
312 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
313 struct mlx4_wqe_srq_next_seg *next;
314 struct mlx4_wqe_data_seg *scat;
315 unsigned long flags;
316 int err = 0;
317 int nreq;
318 int i;
320 spin_lock_irqsave(&srq->lock, flags);
322 for (nreq = 0; wr; ++nreq, wr = wr->next) {
323 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
324 err = -EINVAL;
325 *bad_wr = wr;
326 break;
329 if (unlikely(srq->head == srq->tail)) {
330 err = -ENOMEM;
331 *bad_wr = wr;
332 break;
335 srq->wrid[srq->head] = wr->wr_id;
337 next = get_wqe(srq, srq->head);
338 srq->head = be16_to_cpu(next->next_wqe_index);
339 scat = (struct mlx4_wqe_data_seg *) (next + 1);
341 for (i = 0; i < wr->num_sge; ++i) {
342 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
343 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
344 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
347 if (i < srq->msrq.max_gs) {
348 scat[i].byte_count = 0;
349 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
350 scat[i].addr = 0;
354 if (likely(nreq)) {
355 srq->wqe_ctr += nreq;
358 * Make sure that descriptors are written before
359 * doorbell record.
361 wmb();
363 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
366 spin_unlock_irqrestore(&srq->lock, flags);
368 return err;