Linux 3.11-rc3
[cris-mirror.git] / drivers / infiniband / hw / mlx4 / cq.c
blobd5e60f44ba5ad7c4f5ad53d7e0fe348248ed797b
1 /*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 #include <linux/mlx4/srq.h>
37 #include <linux/slab.h>
39 #include "mlx4_ib.h"
40 #include "user.h"
42 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
45 ibcq->comp_handler(ibcq, ibcq->cq_context);
48 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
50 struct ib_event event;
51 struct ib_cq *ibcq;
53 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
54 pr_warn("Unexpected event type %d "
55 "on CQ %06x\n", type, cq->cqn);
56 return;
59 ibcq = &to_mibcq(cq)->ibcq;
60 if (ibcq->event_handler) {
61 event.device = ibcq->device;
62 event.event = IB_EVENT_CQ_ERR;
63 event.element.cq = ibcq;
64 ibcq->event_handler(&event, ibcq->cq_context);
68 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
70 return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
73 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
75 return get_cqe_from_buf(&cq->buf, n);
78 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
83 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
87 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
89 return get_sw_cqe(cq, cq->mcq.cons_index);
92 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
94 struct mlx4_ib_cq *mcq = to_mcq(cq);
95 struct mlx4_ib_dev *dev = to_mdev(cq->device);
97 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
100 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
102 int err;
104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
105 PAGE_SIZE * 2, &buf->buf);
107 if (err)
108 goto out;
110 buf->entry_size = dev->dev->caps.cqe_size;
111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
112 &buf->mtt);
113 if (err)
114 goto err_buf;
116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
117 if (err)
118 goto err_mtt;
120 return 0;
122 err_mtt:
123 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
125 err_buf:
126 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
128 out:
129 return err;
132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
137 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
138 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
139 u64 buf_addr, int cqe)
141 int err;
142 int cqe_size = dev->dev->caps.cqe_size;
144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
145 IB_ACCESS_LOCAL_WRITE, 1);
146 if (IS_ERR(*umem))
147 return PTR_ERR(*umem);
149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
150 ilog2((*umem)->page_size), &buf->mtt);
151 if (err)
152 goto err_buf;
154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
155 if (err)
156 goto err_mtt;
158 return 0;
160 err_mtt:
161 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
163 err_buf:
164 ib_umem_release(*umem);
166 return err;
169 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
170 struct ib_ucontext *context,
171 struct ib_udata *udata)
173 struct mlx4_ib_dev *dev = to_mdev(ibdev);
174 struct mlx4_ib_cq *cq;
175 struct mlx4_uar *uar;
176 int err;
178 if (entries < 1 || entries > dev->dev->caps.max_cqes)
179 return ERR_PTR(-EINVAL);
181 cq = kmalloc(sizeof *cq, GFP_KERNEL);
182 if (!cq)
183 return ERR_PTR(-ENOMEM);
185 entries = roundup_pow_of_two(entries + 1);
186 cq->ibcq.cqe = entries - 1;
187 mutex_init(&cq->resize_mutex);
188 spin_lock_init(&cq->lock);
189 cq->resize_buf = NULL;
190 cq->resize_umem = NULL;
192 if (context) {
193 struct mlx4_ib_create_cq ucmd;
195 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
196 err = -EFAULT;
197 goto err_cq;
200 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
201 ucmd.buf_addr, entries);
202 if (err)
203 goto err_cq;
205 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
206 &cq->db);
207 if (err)
208 goto err_mtt;
210 uar = &to_mucontext(context)->uar;
211 } else {
212 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
213 if (err)
214 goto err_cq;
216 cq->mcq.set_ci_db = cq->db.db;
217 cq->mcq.arm_db = cq->db.db + 1;
218 *cq->mcq.set_ci_db = 0;
219 *cq->mcq.arm_db = 0;
221 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
222 if (err)
223 goto err_db;
225 uar = &dev->priv_uar;
228 if (dev->eq_table)
229 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
231 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
232 cq->db.dma, &cq->mcq, vector, 0, 0);
233 if (err)
234 goto err_dbmap;
236 cq->mcq.comp = mlx4_ib_cq_comp;
237 cq->mcq.event = mlx4_ib_cq_event;
239 if (context)
240 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
241 err = -EFAULT;
242 goto err_dbmap;
245 return &cq->ibcq;
247 err_dbmap:
248 if (context)
249 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
251 err_mtt:
252 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
254 if (context)
255 ib_umem_release(cq->umem);
256 else
257 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
259 err_db:
260 if (!context)
261 mlx4_db_free(dev->dev, &cq->db);
263 err_cq:
264 kfree(cq);
266 return ERR_PTR(err);
269 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
270 int entries)
272 int err;
274 if (cq->resize_buf)
275 return -EBUSY;
277 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
278 if (!cq->resize_buf)
279 return -ENOMEM;
281 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
282 if (err) {
283 kfree(cq->resize_buf);
284 cq->resize_buf = NULL;
285 return err;
288 cq->resize_buf->cqe = entries - 1;
290 return 0;
293 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
294 int entries, struct ib_udata *udata)
296 struct mlx4_ib_resize_cq ucmd;
297 int err;
299 if (cq->resize_umem)
300 return -EBUSY;
302 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
303 return -EFAULT;
305 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
306 if (!cq->resize_buf)
307 return -ENOMEM;
309 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
310 &cq->resize_umem, ucmd.buf_addr, entries);
311 if (err) {
312 kfree(cq->resize_buf);
313 cq->resize_buf = NULL;
314 return err;
317 cq->resize_buf->cqe = entries - 1;
319 return 0;
322 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
324 u32 i;
326 i = cq->mcq.cons_index;
327 while (get_sw_cqe(cq, i & cq->ibcq.cqe))
328 ++i;
330 return i - cq->mcq.cons_index;
333 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
335 struct mlx4_cqe *cqe, *new_cqe;
336 int i;
337 int cqe_size = cq->buf.entry_size;
338 int cqe_inc = cqe_size == 64 ? 1 : 0;
340 i = cq->mcq.cons_index;
341 cqe = get_cqe(cq, i & cq->ibcq.cqe);
342 cqe += cqe_inc;
344 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
345 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
346 (i + 1) & cq->resize_buf->cqe);
347 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
348 new_cqe += cqe_inc;
350 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
351 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
352 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
353 cqe += cqe_inc;
355 ++cq->mcq.cons_index;
358 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
360 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
361 struct mlx4_ib_cq *cq = to_mcq(ibcq);
362 struct mlx4_mtt mtt;
363 int outst_cqe;
364 int err;
366 mutex_lock(&cq->resize_mutex);
368 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
369 err = -EINVAL;
370 goto out;
373 entries = roundup_pow_of_two(entries + 1);
374 if (entries == ibcq->cqe + 1) {
375 err = 0;
376 goto out;
379 if (ibcq->uobject) {
380 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
381 if (err)
382 goto out;
383 } else {
384 /* Can't be smaller than the number of outstanding CQEs */
385 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
386 if (entries < outst_cqe + 1) {
387 err = 0;
388 goto out;
391 err = mlx4_alloc_resize_buf(dev, cq, entries);
392 if (err)
393 goto out;
396 mtt = cq->buf.mtt;
398 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
399 if (err)
400 goto err_buf;
402 mlx4_mtt_cleanup(dev->dev, &mtt);
403 if (ibcq->uobject) {
404 cq->buf = cq->resize_buf->buf;
405 cq->ibcq.cqe = cq->resize_buf->cqe;
406 ib_umem_release(cq->umem);
407 cq->umem = cq->resize_umem;
409 kfree(cq->resize_buf);
410 cq->resize_buf = NULL;
411 cq->resize_umem = NULL;
412 } else {
413 struct mlx4_ib_cq_buf tmp_buf;
414 int tmp_cqe = 0;
416 spin_lock_irq(&cq->lock);
417 if (cq->resize_buf) {
418 mlx4_ib_cq_resize_copy_cqes(cq);
419 tmp_buf = cq->buf;
420 tmp_cqe = cq->ibcq.cqe;
421 cq->buf = cq->resize_buf->buf;
422 cq->ibcq.cqe = cq->resize_buf->cqe;
424 kfree(cq->resize_buf);
425 cq->resize_buf = NULL;
427 spin_unlock_irq(&cq->lock);
429 if (tmp_cqe)
430 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
433 goto out;
435 err_buf:
436 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
437 if (!ibcq->uobject)
438 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
439 cq->resize_buf->cqe);
441 kfree(cq->resize_buf);
442 cq->resize_buf = NULL;
444 if (cq->resize_umem) {
445 ib_umem_release(cq->resize_umem);
446 cq->resize_umem = NULL;
449 out:
450 mutex_unlock(&cq->resize_mutex);
452 return err;
455 int mlx4_ib_destroy_cq(struct ib_cq *cq)
457 struct mlx4_ib_dev *dev = to_mdev(cq->device);
458 struct mlx4_ib_cq *mcq = to_mcq(cq);
460 mlx4_cq_free(dev->dev, &mcq->mcq);
461 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
463 if (cq->uobject) {
464 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
465 ib_umem_release(mcq->umem);
466 } else {
467 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
468 mlx4_db_free(dev->dev, &mcq->db);
471 kfree(mcq);
473 return 0;
476 static void dump_cqe(void *cqe)
478 __be32 *buf = cqe;
480 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
481 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
482 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
483 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
486 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
487 struct ib_wc *wc)
489 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
490 pr_debug("local QP operation err "
491 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
492 "opcode = %02x)\n",
493 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
494 cqe->vendor_err_syndrome,
495 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
496 dump_cqe(cqe);
499 switch (cqe->syndrome) {
500 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
501 wc->status = IB_WC_LOC_LEN_ERR;
502 break;
503 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
504 wc->status = IB_WC_LOC_QP_OP_ERR;
505 break;
506 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
507 wc->status = IB_WC_LOC_PROT_ERR;
508 break;
509 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
510 wc->status = IB_WC_WR_FLUSH_ERR;
511 break;
512 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
513 wc->status = IB_WC_MW_BIND_ERR;
514 break;
515 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
516 wc->status = IB_WC_BAD_RESP_ERR;
517 break;
518 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
519 wc->status = IB_WC_LOC_ACCESS_ERR;
520 break;
521 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
522 wc->status = IB_WC_REM_INV_REQ_ERR;
523 break;
524 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
525 wc->status = IB_WC_REM_ACCESS_ERR;
526 break;
527 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
528 wc->status = IB_WC_REM_OP_ERR;
529 break;
530 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
531 wc->status = IB_WC_RETRY_EXC_ERR;
532 break;
533 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
534 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
535 break;
536 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
537 wc->status = IB_WC_REM_ABORT_ERR;
538 break;
539 default:
540 wc->status = IB_WC_GENERAL_ERR;
541 break;
544 wc->vendor_err = cqe->vendor_err_syndrome;
547 static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
549 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
550 MLX4_CQE_STATUS_IPV4F |
551 MLX4_CQE_STATUS_IPV4OPT |
552 MLX4_CQE_STATUS_IPV6 |
553 MLX4_CQE_STATUS_IPOK)) ==
554 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
555 MLX4_CQE_STATUS_IPOK)) &&
556 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
557 MLX4_CQE_STATUS_TCP)) &&
558 checksum == cpu_to_be16(0xffff);
561 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
562 unsigned tail, struct mlx4_cqe *cqe)
564 struct mlx4_ib_proxy_sqp_hdr *hdr;
566 ib_dma_sync_single_for_cpu(qp->ibqp.device,
567 qp->sqp_proxy_rcv[tail].map,
568 sizeof (struct mlx4_ib_proxy_sqp_hdr),
569 DMA_FROM_DEVICE);
570 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
571 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
572 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
573 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
574 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
575 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
576 wc->dlid_path_bits = 0;
578 return 0;
581 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
582 struct mlx4_ib_qp **cur_qp,
583 struct ib_wc *wc)
585 struct mlx4_cqe *cqe;
586 struct mlx4_qp *mqp;
587 struct mlx4_ib_wq *wq;
588 struct mlx4_ib_srq *srq;
589 struct mlx4_srq *msrq = NULL;
590 int is_send;
591 int is_error;
592 u32 g_mlpath_rqpn;
593 u16 wqe_ctr;
594 unsigned tail = 0;
596 repoll:
597 cqe = next_cqe_sw(cq);
598 if (!cqe)
599 return -EAGAIN;
601 if (cq->buf.entry_size == 64)
602 cqe++;
604 ++cq->mcq.cons_index;
607 * Make sure we read CQ entry contents after we've checked the
608 * ownership bit.
610 rmb();
612 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
613 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
614 MLX4_CQE_OPCODE_ERROR;
616 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
617 is_send)) {
618 pr_warn("Completion for NOP opcode detected!\n");
619 return -EINVAL;
622 /* Resize CQ in progress */
623 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
624 if (cq->resize_buf) {
625 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
627 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
628 cq->buf = cq->resize_buf->buf;
629 cq->ibcq.cqe = cq->resize_buf->cqe;
631 kfree(cq->resize_buf);
632 cq->resize_buf = NULL;
635 goto repoll;
638 if (!*cur_qp ||
639 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
641 * We do not have to take the QP table lock here,
642 * because CQs will be locked while QPs are removed
643 * from the table.
645 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
646 be32_to_cpu(cqe->vlan_my_qpn));
647 if (unlikely(!mqp)) {
648 pr_warn("CQ %06x with entry for unknown QPN %06x\n",
649 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
650 return -EINVAL;
653 *cur_qp = to_mibqp(mqp);
656 wc->qp = &(*cur_qp)->ibqp;
658 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
659 u32 srq_num;
660 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
661 srq_num = g_mlpath_rqpn & 0xffffff;
662 /* SRQ is also in the radix tree */
663 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
664 srq_num);
665 if (unlikely(!msrq)) {
666 pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
667 cq->mcq.cqn, srq_num);
668 return -EINVAL;
672 if (is_send) {
673 wq = &(*cur_qp)->sq;
674 if (!(*cur_qp)->sq_signal_bits) {
675 wqe_ctr = be16_to_cpu(cqe->wqe_index);
676 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
678 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
679 ++wq->tail;
680 } else if ((*cur_qp)->ibqp.srq) {
681 srq = to_msrq((*cur_qp)->ibqp.srq);
682 wqe_ctr = be16_to_cpu(cqe->wqe_index);
683 wc->wr_id = srq->wrid[wqe_ctr];
684 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
685 } else if (msrq) {
686 srq = to_mibsrq(msrq);
687 wqe_ctr = be16_to_cpu(cqe->wqe_index);
688 wc->wr_id = srq->wrid[wqe_ctr];
689 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
690 } else {
691 wq = &(*cur_qp)->rq;
692 tail = wq->tail & (wq->wqe_cnt - 1);
693 wc->wr_id = wq->wrid[tail];
694 ++wq->tail;
697 if (unlikely(is_error)) {
698 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
699 return 0;
702 wc->status = IB_WC_SUCCESS;
704 if (is_send) {
705 wc->wc_flags = 0;
706 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
707 case MLX4_OPCODE_RDMA_WRITE_IMM:
708 wc->wc_flags |= IB_WC_WITH_IMM;
709 case MLX4_OPCODE_RDMA_WRITE:
710 wc->opcode = IB_WC_RDMA_WRITE;
711 break;
712 case MLX4_OPCODE_SEND_IMM:
713 wc->wc_flags |= IB_WC_WITH_IMM;
714 case MLX4_OPCODE_SEND:
715 case MLX4_OPCODE_SEND_INVAL:
716 wc->opcode = IB_WC_SEND;
717 break;
718 case MLX4_OPCODE_RDMA_READ:
719 wc->opcode = IB_WC_RDMA_READ;
720 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
721 break;
722 case MLX4_OPCODE_ATOMIC_CS:
723 wc->opcode = IB_WC_COMP_SWAP;
724 wc->byte_len = 8;
725 break;
726 case MLX4_OPCODE_ATOMIC_FA:
727 wc->opcode = IB_WC_FETCH_ADD;
728 wc->byte_len = 8;
729 break;
730 case MLX4_OPCODE_MASKED_ATOMIC_CS:
731 wc->opcode = IB_WC_MASKED_COMP_SWAP;
732 wc->byte_len = 8;
733 break;
734 case MLX4_OPCODE_MASKED_ATOMIC_FA:
735 wc->opcode = IB_WC_MASKED_FETCH_ADD;
736 wc->byte_len = 8;
737 break;
738 case MLX4_OPCODE_BIND_MW:
739 wc->opcode = IB_WC_BIND_MW;
740 break;
741 case MLX4_OPCODE_LSO:
742 wc->opcode = IB_WC_LSO;
743 break;
744 case MLX4_OPCODE_FMR:
745 wc->opcode = IB_WC_FAST_REG_MR;
746 break;
747 case MLX4_OPCODE_LOCAL_INVAL:
748 wc->opcode = IB_WC_LOCAL_INV;
749 break;
751 } else {
752 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
754 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
755 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
756 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
757 wc->wc_flags = IB_WC_WITH_IMM;
758 wc->ex.imm_data = cqe->immed_rss_invalid;
759 break;
760 case MLX4_RECV_OPCODE_SEND_INVAL:
761 wc->opcode = IB_WC_RECV;
762 wc->wc_flags = IB_WC_WITH_INVALIDATE;
763 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
764 break;
765 case MLX4_RECV_OPCODE_SEND:
766 wc->opcode = IB_WC_RECV;
767 wc->wc_flags = 0;
768 break;
769 case MLX4_RECV_OPCODE_SEND_IMM:
770 wc->opcode = IB_WC_RECV;
771 wc->wc_flags = IB_WC_WITH_IMM;
772 wc->ex.imm_data = cqe->immed_rss_invalid;
773 break;
776 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
777 if ((*cur_qp)->mlx4_ib_qp_type &
778 (MLX4_IB_QPT_PROXY_SMI_OWNER |
779 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
780 return use_tunnel_data(*cur_qp, cq, wc, tail, cqe);
783 wc->slid = be16_to_cpu(cqe->rlid);
784 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
785 wc->src_qp = g_mlpath_rqpn & 0xffffff;
786 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
787 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
788 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
789 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
790 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
791 if (rdma_port_get_link_layer(wc->qp->device,
792 (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
793 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
794 else
795 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
798 return 0;
801 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
803 struct mlx4_ib_cq *cq = to_mcq(ibcq);
804 struct mlx4_ib_qp *cur_qp = NULL;
805 unsigned long flags;
806 int npolled;
807 int err = 0;
809 spin_lock_irqsave(&cq->lock, flags);
811 for (npolled = 0; npolled < num_entries; ++npolled) {
812 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
813 if (err)
814 break;
817 mlx4_cq_set_ci(&cq->mcq);
819 spin_unlock_irqrestore(&cq->lock, flags);
821 if (err == 0 || err == -EAGAIN)
822 return npolled;
823 else
824 return err;
827 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
829 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
830 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
831 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
832 to_mdev(ibcq->device)->uar_map,
833 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
835 return 0;
838 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
840 u32 prod_index;
841 int nfreed = 0;
842 struct mlx4_cqe *cqe, *dest;
843 u8 owner_bit;
844 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
847 * First we need to find the current producer index, so we
848 * know where to start cleaning from. It doesn't matter if HW
849 * adds new entries after this loop -- the QP we're worried
850 * about is already in RESET, so the new entries won't come
851 * from our QP and therefore don't need to be checked.
853 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
854 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
855 break;
858 * Now sweep backwards through the CQ, removing CQ entries
859 * that match our QP by copying older entries on top of them.
861 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
862 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
863 cqe += cqe_inc;
865 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
866 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
867 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
868 ++nfreed;
869 } else if (nfreed) {
870 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
871 dest += cqe_inc;
873 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
874 memcpy(dest, cqe, sizeof *cqe);
875 dest->owner_sr_opcode = owner_bit |
876 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
880 if (nfreed) {
881 cq->mcq.cons_index += nfreed;
883 * Make sure update of buffer contents is done before
884 * updating consumer index.
886 wmb();
887 mlx4_cq_set_ci(&cq->mcq);
891 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
893 spin_lock_irq(&cq->lock);
894 __mlx4_ib_cq_clean(cq, qpn, srq);
895 spin_unlock_irq(&cq->lock);