Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / drivers / infiniband / hw / mlx4 / cq.c
blob5ecf38d97269f1d98189c5e8543198fcab26ecbc
1 /*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 #include <linux/slab.h>
38 #include "mlx4_ib.h"
39 #include "user.h"
41 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
44 ibcq->comp_handler(ibcq, ibcq->cq_context);
47 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
49 struct ib_event event;
50 struct ib_cq *ibcq;
52 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
53 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
54 "on CQ %06x\n", type, cq->cqn);
55 return;
58 ibcq = &to_mibcq(cq)->ibcq;
59 if (ibcq->event_handler) {
60 event.device = ibcq->device;
61 event.event = IB_EVENT_CQ_ERR;
62 event.element.cq = ibcq;
63 ibcq->event_handler(&event, ibcq->cq_context);
67 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
69 return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
72 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
74 return get_cqe_from_buf(&cq->buf, n);
77 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
79 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
81 return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
82 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
85 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
87 return get_sw_cqe(cq, cq->mcq.cons_index);
90 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
92 struct mlx4_ib_cq *mcq = to_mcq(cq);
93 struct mlx4_ib_dev *dev = to_mdev(cq->device);
95 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
98 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
100 int err;
102 err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
103 PAGE_SIZE * 2, &buf->buf);
105 if (err)
106 goto out;
108 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
109 &buf->mtt);
110 if (err)
111 goto err_buf;
113 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
114 if (err)
115 goto err_mtt;
117 return 0;
119 err_mtt:
120 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
122 err_buf:
123 mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
124 &buf->buf);
126 out:
127 return err;
130 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
132 mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
135 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
136 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
137 u64 buf_addr, int cqe)
139 int err;
141 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
142 IB_ACCESS_LOCAL_WRITE, 1);
143 if (IS_ERR(*umem))
144 return PTR_ERR(*umem);
146 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
147 ilog2((*umem)->page_size), &buf->mtt);
148 if (err)
149 goto err_buf;
151 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
152 if (err)
153 goto err_mtt;
155 return 0;
157 err_mtt:
158 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
160 err_buf:
161 ib_umem_release(*umem);
163 return err;
166 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
167 struct ib_ucontext *context,
168 struct ib_udata *udata)
170 struct mlx4_ib_dev *dev = to_mdev(ibdev);
171 struct mlx4_ib_cq *cq;
172 struct mlx4_uar *uar;
173 int err;
175 if (entries < 1 || entries > dev->dev->caps.max_cqes)
176 return ERR_PTR(-EINVAL);
178 cq = kmalloc(sizeof *cq, GFP_KERNEL);
179 if (!cq)
180 return ERR_PTR(-ENOMEM);
182 entries = roundup_pow_of_two(entries + 1);
183 cq->ibcq.cqe = entries - 1;
184 mutex_init(&cq->resize_mutex);
185 spin_lock_init(&cq->lock);
186 cq->resize_buf = NULL;
187 cq->resize_umem = NULL;
189 if (context) {
190 struct mlx4_ib_create_cq ucmd;
192 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
193 err = -EFAULT;
194 goto err_cq;
197 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
198 ucmd.buf_addr, entries);
199 if (err)
200 goto err_cq;
202 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
203 &cq->db);
204 if (err)
205 goto err_mtt;
207 uar = &to_mucontext(context)->uar;
208 } else {
209 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
210 if (err)
211 goto err_cq;
213 cq->mcq.set_ci_db = cq->db.db;
214 cq->mcq.arm_db = cq->db.db + 1;
215 *cq->mcq.set_ci_db = 0;
216 *cq->mcq.arm_db = 0;
218 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
219 if (err)
220 goto err_db;
222 uar = &dev->priv_uar;
225 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
226 cq->db.dma, &cq->mcq, vector, 0);
227 if (err)
228 goto err_dbmap;
230 cq->mcq.comp = mlx4_ib_cq_comp;
231 cq->mcq.event = mlx4_ib_cq_event;
233 if (context)
234 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
235 err = -EFAULT;
236 goto err_dbmap;
239 return &cq->ibcq;
241 err_dbmap:
242 if (context)
243 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
245 err_mtt:
246 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
248 if (context)
249 ib_umem_release(cq->umem);
250 else
251 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
253 err_db:
254 if (!context)
255 mlx4_db_free(dev->dev, &cq->db);
257 err_cq:
258 kfree(cq);
260 return ERR_PTR(err);
263 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
264 int entries)
266 int err;
268 if (cq->resize_buf)
269 return -EBUSY;
271 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
272 if (!cq->resize_buf)
273 return -ENOMEM;
275 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
276 if (err) {
277 kfree(cq->resize_buf);
278 cq->resize_buf = NULL;
279 return err;
282 cq->resize_buf->cqe = entries - 1;
284 return 0;
287 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
288 int entries, struct ib_udata *udata)
290 struct mlx4_ib_resize_cq ucmd;
291 int err;
293 if (cq->resize_umem)
294 return -EBUSY;
296 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
297 return -EFAULT;
299 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
300 if (!cq->resize_buf)
301 return -ENOMEM;
303 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
304 &cq->resize_umem, ucmd.buf_addr, entries);
305 if (err) {
306 kfree(cq->resize_buf);
307 cq->resize_buf = NULL;
308 return err;
311 cq->resize_buf->cqe = entries - 1;
313 return 0;
316 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
318 u32 i;
320 i = cq->mcq.cons_index;
321 while (get_sw_cqe(cq, i & cq->ibcq.cqe))
322 ++i;
324 return i - cq->mcq.cons_index;
327 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
329 struct mlx4_cqe *cqe, *new_cqe;
330 int i;
332 i = cq->mcq.cons_index;
333 cqe = get_cqe(cq, i & cq->ibcq.cqe);
334 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
335 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
336 (i + 1) & cq->resize_buf->cqe);
337 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
338 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
339 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
340 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
342 ++cq->mcq.cons_index;
345 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
347 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
348 struct mlx4_ib_cq *cq = to_mcq(ibcq);
349 struct mlx4_mtt mtt;
350 int outst_cqe;
351 int err;
353 mutex_lock(&cq->resize_mutex);
355 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
356 err = -EINVAL;
357 goto out;
360 entries = roundup_pow_of_two(entries + 1);
361 if (entries == ibcq->cqe + 1) {
362 err = 0;
363 goto out;
366 if (ibcq->uobject) {
367 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
368 if (err)
369 goto out;
370 } else {
371 /* Can't be smaller than the number of outstanding CQEs */
372 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
373 if (entries < outst_cqe + 1) {
374 err = 0;
375 goto out;
378 err = mlx4_alloc_resize_buf(dev, cq, entries);
379 if (err)
380 goto out;
383 mtt = cq->buf.mtt;
385 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
386 if (err)
387 goto err_buf;
389 mlx4_mtt_cleanup(dev->dev, &mtt);
390 if (ibcq->uobject) {
391 cq->buf = cq->resize_buf->buf;
392 cq->ibcq.cqe = cq->resize_buf->cqe;
393 ib_umem_release(cq->umem);
394 cq->umem = cq->resize_umem;
396 kfree(cq->resize_buf);
397 cq->resize_buf = NULL;
398 cq->resize_umem = NULL;
399 } else {
400 struct mlx4_ib_cq_buf tmp_buf;
401 int tmp_cqe = 0;
403 spin_lock_irq(&cq->lock);
404 if (cq->resize_buf) {
405 mlx4_ib_cq_resize_copy_cqes(cq);
406 tmp_buf = cq->buf;
407 tmp_cqe = cq->ibcq.cqe;
408 cq->buf = cq->resize_buf->buf;
409 cq->ibcq.cqe = cq->resize_buf->cqe;
411 kfree(cq->resize_buf);
412 cq->resize_buf = NULL;
414 spin_unlock_irq(&cq->lock);
416 if (tmp_cqe)
417 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
420 goto out;
422 err_buf:
423 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
424 if (!ibcq->uobject)
425 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
426 cq->resize_buf->cqe);
428 kfree(cq->resize_buf);
429 cq->resize_buf = NULL;
431 if (cq->resize_umem) {
432 ib_umem_release(cq->resize_umem);
433 cq->resize_umem = NULL;
436 out:
437 mutex_unlock(&cq->resize_mutex);
438 return err;
441 int mlx4_ib_destroy_cq(struct ib_cq *cq)
443 struct mlx4_ib_dev *dev = to_mdev(cq->device);
444 struct mlx4_ib_cq *mcq = to_mcq(cq);
446 mlx4_cq_free(dev->dev, &mcq->mcq);
447 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
449 if (cq->uobject) {
450 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
451 ib_umem_release(mcq->umem);
452 } else {
453 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
454 mlx4_db_free(dev->dev, &mcq->db);
457 kfree(mcq);
459 return 0;
462 static void dump_cqe(void *cqe)
464 __be32 *buf = cqe;
466 printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
467 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
468 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
469 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
472 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
473 struct ib_wc *wc)
475 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
476 printk(KERN_DEBUG "local QP operation err "
477 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
478 "opcode = %02x)\n",
479 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
480 cqe->vendor_err_syndrome,
481 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
482 dump_cqe(cqe);
485 switch (cqe->syndrome) {
486 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
487 wc->status = IB_WC_LOC_LEN_ERR;
488 break;
489 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
490 wc->status = IB_WC_LOC_QP_OP_ERR;
491 break;
492 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
493 wc->status = IB_WC_LOC_PROT_ERR;
494 break;
495 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
496 wc->status = IB_WC_WR_FLUSH_ERR;
497 break;
498 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
499 wc->status = IB_WC_MW_BIND_ERR;
500 break;
501 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
502 wc->status = IB_WC_BAD_RESP_ERR;
503 break;
504 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
505 wc->status = IB_WC_LOC_ACCESS_ERR;
506 break;
507 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
508 wc->status = IB_WC_REM_INV_REQ_ERR;
509 break;
510 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
511 wc->status = IB_WC_REM_ACCESS_ERR;
512 break;
513 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
514 wc->status = IB_WC_REM_OP_ERR;
515 break;
516 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
517 wc->status = IB_WC_RETRY_EXC_ERR;
518 break;
519 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
520 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
521 break;
522 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
523 wc->status = IB_WC_REM_ABORT_ERR;
524 break;
525 default:
526 wc->status = IB_WC_GENERAL_ERR;
527 break;
530 wc->vendor_err = cqe->vendor_err_syndrome;
533 static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
535 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
536 MLX4_CQE_STATUS_IPV4F |
537 MLX4_CQE_STATUS_IPV4OPT |
538 MLX4_CQE_STATUS_IPV6 |
539 MLX4_CQE_STATUS_IPOK)) ==
540 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
541 MLX4_CQE_STATUS_IPOK)) &&
542 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
543 MLX4_CQE_STATUS_TCP)) &&
544 checksum == cpu_to_be16(0xffff);
547 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
548 struct mlx4_ib_qp **cur_qp,
549 struct ib_wc *wc)
551 struct mlx4_cqe *cqe;
552 struct mlx4_qp *mqp;
553 struct mlx4_ib_wq *wq;
554 struct mlx4_ib_srq *srq;
555 int is_send;
556 int is_error;
557 u32 g_mlpath_rqpn;
558 u16 wqe_ctr;
560 repoll:
561 cqe = next_cqe_sw(cq);
562 if (!cqe)
563 return -EAGAIN;
565 ++cq->mcq.cons_index;
568 * Make sure we read CQ entry contents after we've checked the
569 * ownership bit.
571 rmb();
573 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
574 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
575 MLX4_CQE_OPCODE_ERROR;
577 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
578 is_send)) {
579 printk(KERN_WARNING "Completion for NOP opcode detected!\n");
580 return -EINVAL;
583 /* Resize CQ in progress */
584 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
585 if (cq->resize_buf) {
586 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
588 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
589 cq->buf = cq->resize_buf->buf;
590 cq->ibcq.cqe = cq->resize_buf->cqe;
592 kfree(cq->resize_buf);
593 cq->resize_buf = NULL;
596 goto repoll;
599 if (!*cur_qp ||
600 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
602 * We do not have to take the QP table lock here,
603 * because CQs will be locked while QPs are removed
604 * from the table.
606 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
607 be32_to_cpu(cqe->vlan_my_qpn));
608 if (unlikely(!mqp)) {
609 printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
610 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
611 return -EINVAL;
614 *cur_qp = to_mibqp(mqp);
617 wc->qp = &(*cur_qp)->ibqp;
619 if (is_send) {
620 wq = &(*cur_qp)->sq;
621 if (!(*cur_qp)->sq_signal_bits) {
622 wqe_ctr = be16_to_cpu(cqe->wqe_index);
623 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
625 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
626 ++wq->tail;
627 } else if ((*cur_qp)->ibqp.srq) {
628 srq = to_msrq((*cur_qp)->ibqp.srq);
629 wqe_ctr = be16_to_cpu(cqe->wqe_index);
630 wc->wr_id = srq->wrid[wqe_ctr];
631 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
632 } else {
633 wq = &(*cur_qp)->rq;
634 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
635 ++wq->tail;
638 if (unlikely(is_error)) {
639 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
640 return 0;
643 wc->status = IB_WC_SUCCESS;
645 if (is_send) {
646 wc->wc_flags = 0;
647 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
648 case MLX4_OPCODE_RDMA_WRITE_IMM:
649 wc->wc_flags |= IB_WC_WITH_IMM;
650 case MLX4_OPCODE_RDMA_WRITE:
651 wc->opcode = IB_WC_RDMA_WRITE;
652 break;
653 case MLX4_OPCODE_SEND_IMM:
654 wc->wc_flags |= IB_WC_WITH_IMM;
655 case MLX4_OPCODE_SEND:
656 case MLX4_OPCODE_SEND_INVAL:
657 wc->opcode = IB_WC_SEND;
658 break;
659 case MLX4_OPCODE_RDMA_READ:
660 wc->opcode = IB_WC_RDMA_READ;
661 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
662 break;
663 case MLX4_OPCODE_ATOMIC_CS:
664 wc->opcode = IB_WC_COMP_SWAP;
665 wc->byte_len = 8;
666 break;
667 case MLX4_OPCODE_ATOMIC_FA:
668 wc->opcode = IB_WC_FETCH_ADD;
669 wc->byte_len = 8;
670 break;
671 case MLX4_OPCODE_MASKED_ATOMIC_CS:
672 wc->opcode = IB_WC_MASKED_COMP_SWAP;
673 wc->byte_len = 8;
674 break;
675 case MLX4_OPCODE_MASKED_ATOMIC_FA:
676 wc->opcode = IB_WC_MASKED_FETCH_ADD;
677 wc->byte_len = 8;
678 break;
679 case MLX4_OPCODE_BIND_MW:
680 wc->opcode = IB_WC_BIND_MW;
681 break;
682 case MLX4_OPCODE_LSO:
683 wc->opcode = IB_WC_LSO;
684 break;
685 case MLX4_OPCODE_FMR:
686 wc->opcode = IB_WC_FAST_REG_MR;
687 break;
688 case MLX4_OPCODE_LOCAL_INVAL:
689 wc->opcode = IB_WC_LOCAL_INV;
690 break;
692 } else {
693 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
695 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
696 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
697 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
698 wc->wc_flags = IB_WC_WITH_IMM;
699 wc->ex.imm_data = cqe->immed_rss_invalid;
700 break;
701 case MLX4_RECV_OPCODE_SEND_INVAL:
702 wc->opcode = IB_WC_RECV;
703 wc->wc_flags = IB_WC_WITH_INVALIDATE;
704 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
705 break;
706 case MLX4_RECV_OPCODE_SEND:
707 wc->opcode = IB_WC_RECV;
708 wc->wc_flags = 0;
709 break;
710 case MLX4_RECV_OPCODE_SEND_IMM:
711 wc->opcode = IB_WC_RECV;
712 wc->wc_flags = IB_WC_WITH_IMM;
713 wc->ex.imm_data = cqe->immed_rss_invalid;
714 break;
717 wc->slid = be16_to_cpu(cqe->rlid);
718 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
719 wc->src_qp = g_mlpath_rqpn & 0xffffff;
720 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
721 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
722 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
723 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
724 if (rdma_port_get_link_layer(wc->qp->device,
725 (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
726 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
727 else
728 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
731 return 0;
734 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
736 struct mlx4_ib_cq *cq = to_mcq(ibcq);
737 struct mlx4_ib_qp *cur_qp = NULL;
738 unsigned long flags;
739 int npolled;
740 int err = 0;
742 spin_lock_irqsave(&cq->lock, flags);
744 for (npolled = 0; npolled < num_entries; ++npolled) {
745 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
746 if (err)
747 break;
750 if (npolled)
751 mlx4_cq_set_ci(&cq->mcq);
753 spin_unlock_irqrestore(&cq->lock, flags);
755 if (err == 0 || err == -EAGAIN)
756 return npolled;
757 else
758 return err;
761 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
763 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
764 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
765 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
766 to_mdev(ibcq->device)->uar_map,
767 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
769 return 0;
772 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
774 u32 prod_index;
775 int nfreed = 0;
776 struct mlx4_cqe *cqe, *dest;
777 u8 owner_bit;
780 * First we need to find the current producer index, so we
781 * know where to start cleaning from. It doesn't matter if HW
782 * adds new entries after this loop -- the QP we're worried
783 * about is already in RESET, so the new entries won't come
784 * from our QP and therefore don't need to be checked.
786 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
787 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
788 break;
791 * Now sweep backwards through the CQ, removing CQ entries
792 * that match our QP by copying older entries on top of them.
794 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
795 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
796 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
797 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
798 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
799 ++nfreed;
800 } else if (nfreed) {
801 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
802 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
803 memcpy(dest, cqe, sizeof *cqe);
804 dest->owner_sr_opcode = owner_bit |
805 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
809 if (nfreed) {
810 cq->mcq.cons_index += nfreed;
812 * Make sure update of buffer contents is done before
813 * updating consumer index.
815 wmb();
816 mlx4_cq_set_ci(&cq->mcq);
820 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
822 spin_lock_irq(&cq->lock);
823 __mlx4_ib_cq_clean(cq, qpn, srq);
824 spin_unlock_irq(&cq->lock);