2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 #include <linux/slab.h>
41 static void mlx4_ib_cq_comp(struct mlx4_cq
*cq
)
43 struct ib_cq
*ibcq
= &to_mibcq(cq
)->ibcq
;
44 ibcq
->comp_handler(ibcq
, ibcq
->cq_context
);
47 static void mlx4_ib_cq_event(struct mlx4_cq
*cq
, enum mlx4_event type
)
49 struct ib_event event
;
52 if (type
!= MLX4_EVENT_TYPE_CQ_ERROR
) {
53 printk(KERN_WARNING
"mlx4_ib: Unexpected event type %d "
54 "on CQ %06x\n", type
, cq
->cqn
);
58 ibcq
= &to_mibcq(cq
)->ibcq
;
59 if (ibcq
->event_handler
) {
60 event
.device
= ibcq
->device
;
61 event
.event
= IB_EVENT_CQ_ERR
;
62 event
.element
.cq
= ibcq
;
63 ibcq
->event_handler(&event
, ibcq
->cq_context
);
67 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf
*buf
, int n
)
69 return mlx4_buf_offset(&buf
->buf
, n
* sizeof (struct mlx4_cqe
));
72 static void *get_cqe(struct mlx4_ib_cq
*cq
, int n
)
74 return get_cqe_from_buf(&cq
->buf
, n
);
77 static void *get_sw_cqe(struct mlx4_ib_cq
*cq
, int n
)
79 struct mlx4_cqe
*cqe
= get_cqe(cq
, n
& cq
->ibcq
.cqe
);
81 return (!!(cqe
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
) ^
82 !!(n
& (cq
->ibcq
.cqe
+ 1))) ? NULL
: cqe
;
85 static struct mlx4_cqe
*next_cqe_sw(struct mlx4_ib_cq
*cq
)
87 return get_sw_cqe(cq
, cq
->mcq
.cons_index
);
90 int mlx4_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
92 struct mlx4_ib_cq
*mcq
= to_mcq(cq
);
93 struct mlx4_ib_dev
*dev
= to_mdev(cq
->device
);
95 return mlx4_cq_modify(dev
->dev
, &mcq
->mcq
, cq_count
, cq_period
);
98 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq_buf
*buf
, int nent
)
102 err
= mlx4_buf_alloc(dev
->dev
, nent
* sizeof(struct mlx4_cqe
),
103 PAGE_SIZE
* 2, &buf
->buf
);
108 err
= mlx4_mtt_init(dev
->dev
, buf
->buf
.npages
, buf
->buf
.page_shift
,
113 err
= mlx4_buf_write_mtt(dev
->dev
, &buf
->mtt
, &buf
->buf
);
120 mlx4_mtt_cleanup(dev
->dev
, &buf
->mtt
);
123 mlx4_buf_free(dev
->dev
, nent
* sizeof(struct mlx4_cqe
),
130 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq_buf
*buf
, int cqe
)
132 mlx4_buf_free(dev
->dev
, (cqe
+ 1) * sizeof(struct mlx4_cqe
), &buf
->buf
);
135 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev
*dev
, struct ib_ucontext
*context
,
136 struct mlx4_ib_cq_buf
*buf
, struct ib_umem
**umem
,
137 u64 buf_addr
, int cqe
)
141 *umem
= ib_umem_get(context
, buf_addr
, cqe
* sizeof (struct mlx4_cqe
),
142 IB_ACCESS_LOCAL_WRITE
, 1);
144 return PTR_ERR(*umem
);
146 err
= mlx4_mtt_init(dev
->dev
, ib_umem_page_count(*umem
),
147 ilog2((*umem
)->page_size
), &buf
->mtt
);
151 err
= mlx4_ib_umem_write_mtt(dev
, &buf
->mtt
, *umem
);
158 mlx4_mtt_cleanup(dev
->dev
, &buf
->mtt
);
161 ib_umem_release(*umem
);
166 struct ib_cq
*mlx4_ib_create_cq(struct ib_device
*ibdev
, int entries
, int vector
,
167 struct ib_ucontext
*context
,
168 struct ib_udata
*udata
)
170 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
171 struct mlx4_ib_cq
*cq
;
172 struct mlx4_uar
*uar
;
175 if (entries
< 1 || entries
> dev
->dev
->caps
.max_cqes
)
176 return ERR_PTR(-EINVAL
);
178 cq
= kmalloc(sizeof *cq
, GFP_KERNEL
);
180 return ERR_PTR(-ENOMEM
);
182 entries
= roundup_pow_of_two(entries
+ 1);
183 cq
->ibcq
.cqe
= entries
- 1;
184 mutex_init(&cq
->resize_mutex
);
185 spin_lock_init(&cq
->lock
);
186 cq
->resize_buf
= NULL
;
187 cq
->resize_umem
= NULL
;
190 struct mlx4_ib_create_cq ucmd
;
192 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
)) {
197 err
= mlx4_ib_get_cq_umem(dev
, context
, &cq
->buf
, &cq
->umem
,
198 ucmd
.buf_addr
, entries
);
202 err
= mlx4_ib_db_map_user(to_mucontext(context
), ucmd
.db_addr
,
207 uar
= &to_mucontext(context
)->uar
;
209 err
= mlx4_db_alloc(dev
->dev
, &cq
->db
, 1);
213 cq
->mcq
.set_ci_db
= cq
->db
.db
;
214 cq
->mcq
.arm_db
= cq
->db
.db
+ 1;
215 *cq
->mcq
.set_ci_db
= 0;
218 err
= mlx4_ib_alloc_cq_buf(dev
, &cq
->buf
, entries
);
222 uar
= &dev
->priv_uar
;
225 err
= mlx4_cq_alloc(dev
->dev
, entries
, &cq
->buf
.mtt
, uar
,
226 cq
->db
.dma
, &cq
->mcq
, vector
, 0);
230 cq
->mcq
.comp
= mlx4_ib_cq_comp
;
231 cq
->mcq
.event
= mlx4_ib_cq_event
;
234 if (ib_copy_to_udata(udata
, &cq
->mcq
.cqn
, sizeof (__u32
))) {
243 mlx4_ib_db_unmap_user(to_mucontext(context
), &cq
->db
);
246 mlx4_mtt_cleanup(dev
->dev
, &cq
->buf
.mtt
);
249 ib_umem_release(cq
->umem
);
251 mlx4_ib_free_cq_buf(dev
, &cq
->buf
, cq
->ibcq
.cqe
);
255 mlx4_db_free(dev
->dev
, &cq
->db
);
263 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq
*cq
,
271 cq
->resize_buf
= kmalloc(sizeof *cq
->resize_buf
, GFP_ATOMIC
);
275 err
= mlx4_ib_alloc_cq_buf(dev
, &cq
->resize_buf
->buf
, entries
);
277 kfree(cq
->resize_buf
);
278 cq
->resize_buf
= NULL
;
282 cq
->resize_buf
->cqe
= entries
- 1;
287 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq
*cq
,
288 int entries
, struct ib_udata
*udata
)
290 struct mlx4_ib_resize_cq ucmd
;
296 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
))
299 cq
->resize_buf
= kmalloc(sizeof *cq
->resize_buf
, GFP_ATOMIC
);
303 err
= mlx4_ib_get_cq_umem(dev
, cq
->umem
->context
, &cq
->resize_buf
->buf
,
304 &cq
->resize_umem
, ucmd
.buf_addr
, entries
);
306 kfree(cq
->resize_buf
);
307 cq
->resize_buf
= NULL
;
311 cq
->resize_buf
->cqe
= entries
- 1;
316 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq
*cq
)
320 i
= cq
->mcq
.cons_index
;
321 while (get_sw_cqe(cq
, i
& cq
->ibcq
.cqe
))
324 return i
- cq
->mcq
.cons_index
;
327 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq
*cq
)
329 struct mlx4_cqe
*cqe
, *new_cqe
;
332 i
= cq
->mcq
.cons_index
;
333 cqe
= get_cqe(cq
, i
& cq
->ibcq
.cqe
);
334 while ((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) != MLX4_CQE_OPCODE_RESIZE
) {
335 new_cqe
= get_cqe_from_buf(&cq
->resize_buf
->buf
,
336 (i
+ 1) & cq
->resize_buf
->cqe
);
337 memcpy(new_cqe
, get_cqe(cq
, i
& cq
->ibcq
.cqe
), sizeof(struct mlx4_cqe
));
338 new_cqe
->owner_sr_opcode
= (cqe
->owner_sr_opcode
& ~MLX4_CQE_OWNER_MASK
) |
339 (((i
+ 1) & (cq
->resize_buf
->cqe
+ 1)) ? MLX4_CQE_OWNER_MASK
: 0);
340 cqe
= get_cqe(cq
, ++i
& cq
->ibcq
.cqe
);
342 ++cq
->mcq
.cons_index
;
345 int mlx4_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
)
347 struct mlx4_ib_dev
*dev
= to_mdev(ibcq
->device
);
348 struct mlx4_ib_cq
*cq
= to_mcq(ibcq
);
353 mutex_lock(&cq
->resize_mutex
);
355 if (entries
< 1 || entries
> dev
->dev
->caps
.max_cqes
) {
360 entries
= roundup_pow_of_two(entries
+ 1);
361 if (entries
== ibcq
->cqe
+ 1) {
367 err
= mlx4_alloc_resize_umem(dev
, cq
, entries
, udata
);
371 /* Can't be smaller than the number of outstanding CQEs */
372 outst_cqe
= mlx4_ib_get_outstanding_cqes(cq
);
373 if (entries
< outst_cqe
+ 1) {
378 err
= mlx4_alloc_resize_buf(dev
, cq
, entries
);
385 err
= mlx4_cq_resize(dev
->dev
, &cq
->mcq
, entries
, &cq
->resize_buf
->buf
.mtt
);
389 mlx4_mtt_cleanup(dev
->dev
, &mtt
);
391 cq
->buf
= cq
->resize_buf
->buf
;
392 cq
->ibcq
.cqe
= cq
->resize_buf
->cqe
;
393 ib_umem_release(cq
->umem
);
394 cq
->umem
= cq
->resize_umem
;
396 kfree(cq
->resize_buf
);
397 cq
->resize_buf
= NULL
;
398 cq
->resize_umem
= NULL
;
400 struct mlx4_ib_cq_buf tmp_buf
;
403 spin_lock_irq(&cq
->lock
);
404 if (cq
->resize_buf
) {
405 mlx4_ib_cq_resize_copy_cqes(cq
);
407 tmp_cqe
= cq
->ibcq
.cqe
;
408 cq
->buf
= cq
->resize_buf
->buf
;
409 cq
->ibcq
.cqe
= cq
->resize_buf
->cqe
;
411 kfree(cq
->resize_buf
);
412 cq
->resize_buf
= NULL
;
414 spin_unlock_irq(&cq
->lock
);
417 mlx4_ib_free_cq_buf(dev
, &tmp_buf
, tmp_cqe
);
423 mlx4_mtt_cleanup(dev
->dev
, &cq
->resize_buf
->buf
.mtt
);
425 mlx4_ib_free_cq_buf(dev
, &cq
->resize_buf
->buf
,
426 cq
->resize_buf
->cqe
);
428 kfree(cq
->resize_buf
);
429 cq
->resize_buf
= NULL
;
431 if (cq
->resize_umem
) {
432 ib_umem_release(cq
->resize_umem
);
433 cq
->resize_umem
= NULL
;
437 mutex_unlock(&cq
->resize_mutex
);
441 int mlx4_ib_destroy_cq(struct ib_cq
*cq
)
443 struct mlx4_ib_dev
*dev
= to_mdev(cq
->device
);
444 struct mlx4_ib_cq
*mcq
= to_mcq(cq
);
446 mlx4_cq_free(dev
->dev
, &mcq
->mcq
);
447 mlx4_mtt_cleanup(dev
->dev
, &mcq
->buf
.mtt
);
450 mlx4_ib_db_unmap_user(to_mucontext(cq
->uobject
->context
), &mcq
->db
);
451 ib_umem_release(mcq
->umem
);
453 mlx4_ib_free_cq_buf(dev
, &mcq
->buf
, cq
->cqe
);
454 mlx4_db_free(dev
->dev
, &mcq
->db
);
462 static void dump_cqe(void *cqe
)
466 printk(KERN_DEBUG
"CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
467 be32_to_cpu(buf
[0]), be32_to_cpu(buf
[1]), be32_to_cpu(buf
[2]),
468 be32_to_cpu(buf
[3]), be32_to_cpu(buf
[4]), be32_to_cpu(buf
[5]),
469 be32_to_cpu(buf
[6]), be32_to_cpu(buf
[7]));
472 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe
*cqe
,
475 if (cqe
->syndrome
== MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR
) {
476 printk(KERN_DEBUG
"local QP operation err "
477 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
479 be32_to_cpu(cqe
->my_qpn
), be16_to_cpu(cqe
->wqe_index
),
480 cqe
->vendor_err_syndrome
,
481 cqe
->owner_sr_opcode
& ~MLX4_CQE_OWNER_MASK
);
485 switch (cqe
->syndrome
) {
486 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR
:
487 wc
->status
= IB_WC_LOC_LEN_ERR
;
489 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR
:
490 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
492 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR
:
493 wc
->status
= IB_WC_LOC_PROT_ERR
;
495 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR
:
496 wc
->status
= IB_WC_WR_FLUSH_ERR
;
498 case MLX4_CQE_SYNDROME_MW_BIND_ERR
:
499 wc
->status
= IB_WC_MW_BIND_ERR
;
501 case MLX4_CQE_SYNDROME_BAD_RESP_ERR
:
502 wc
->status
= IB_WC_BAD_RESP_ERR
;
504 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR
:
505 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
507 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR
:
508 wc
->status
= IB_WC_REM_INV_REQ_ERR
;
510 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR
:
511 wc
->status
= IB_WC_REM_ACCESS_ERR
;
513 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR
:
514 wc
->status
= IB_WC_REM_OP_ERR
;
516 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR
:
517 wc
->status
= IB_WC_RETRY_EXC_ERR
;
519 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR
:
520 wc
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
522 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR
:
523 wc
->status
= IB_WC_REM_ABORT_ERR
;
526 wc
->status
= IB_WC_GENERAL_ERR
;
530 wc
->vendor_err
= cqe
->vendor_err_syndrome
;
533 static int mlx4_ib_ipoib_csum_ok(__be16 status
, __be16 checksum
)
535 return ((status
& cpu_to_be16(MLX4_CQE_STATUS_IPV4
|
536 MLX4_CQE_STATUS_IPV4F
|
537 MLX4_CQE_STATUS_IPV4OPT
|
538 MLX4_CQE_STATUS_IPV6
|
539 MLX4_CQE_STATUS_IPOK
)) ==
540 cpu_to_be16(MLX4_CQE_STATUS_IPV4
|
541 MLX4_CQE_STATUS_IPOK
)) &&
542 (status
& cpu_to_be16(MLX4_CQE_STATUS_UDP
|
543 MLX4_CQE_STATUS_TCP
)) &&
544 checksum
== cpu_to_be16(0xffff);
547 static int mlx4_ib_poll_one(struct mlx4_ib_cq
*cq
,
548 struct mlx4_ib_qp
**cur_qp
,
551 struct mlx4_cqe
*cqe
;
553 struct mlx4_ib_wq
*wq
;
554 struct mlx4_ib_srq
*srq
;
561 cqe
= next_cqe_sw(cq
);
565 ++cq
->mcq
.cons_index
;
568 * Make sure we read CQ entry contents after we've checked the
573 is_send
= cqe
->owner_sr_opcode
& MLX4_CQE_IS_SEND_MASK
;
574 is_error
= (cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) ==
575 MLX4_CQE_OPCODE_ERROR
;
577 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) == MLX4_OPCODE_NOP
&&
579 printk(KERN_WARNING
"Completion for NOP opcode detected!\n");
583 /* Resize CQ in progress */
584 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) == MLX4_CQE_OPCODE_RESIZE
)) {
585 if (cq
->resize_buf
) {
586 struct mlx4_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
588 mlx4_ib_free_cq_buf(dev
, &cq
->buf
, cq
->ibcq
.cqe
);
589 cq
->buf
= cq
->resize_buf
->buf
;
590 cq
->ibcq
.cqe
= cq
->resize_buf
->cqe
;
592 kfree(cq
->resize_buf
);
593 cq
->resize_buf
= NULL
;
600 (be32_to_cpu(cqe
->vlan_my_qpn
) & MLX4_CQE_QPN_MASK
) != (*cur_qp
)->mqp
.qpn
) {
602 * We do not have to take the QP table lock here,
603 * because CQs will be locked while QPs are removed
606 mqp
= __mlx4_qp_lookup(to_mdev(cq
->ibcq
.device
)->dev
,
607 be32_to_cpu(cqe
->vlan_my_qpn
));
608 if (unlikely(!mqp
)) {
609 printk(KERN_WARNING
"CQ %06x with entry for unknown QPN %06x\n",
610 cq
->mcq
.cqn
, be32_to_cpu(cqe
->vlan_my_qpn
) & MLX4_CQE_QPN_MASK
);
614 *cur_qp
= to_mibqp(mqp
);
617 wc
->qp
= &(*cur_qp
)->ibqp
;
621 if (!(*cur_qp
)->sq_signal_bits
) {
622 wqe_ctr
= be16_to_cpu(cqe
->wqe_index
);
623 wq
->tail
+= (u16
) (wqe_ctr
- (u16
) wq
->tail
);
625 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
627 } else if ((*cur_qp
)->ibqp
.srq
) {
628 srq
= to_msrq((*cur_qp
)->ibqp
.srq
);
629 wqe_ctr
= be16_to_cpu(cqe
->wqe_index
);
630 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
631 mlx4_ib_free_srq_wqe(srq
, wqe_ctr
);
634 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
638 if (unlikely(is_error
)) {
639 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe
*) cqe
, wc
);
643 wc
->status
= IB_WC_SUCCESS
;
647 switch (cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) {
648 case MLX4_OPCODE_RDMA_WRITE_IMM
:
649 wc
->wc_flags
|= IB_WC_WITH_IMM
;
650 case MLX4_OPCODE_RDMA_WRITE
:
651 wc
->opcode
= IB_WC_RDMA_WRITE
;
653 case MLX4_OPCODE_SEND_IMM
:
654 wc
->wc_flags
|= IB_WC_WITH_IMM
;
655 case MLX4_OPCODE_SEND
:
656 case MLX4_OPCODE_SEND_INVAL
:
657 wc
->opcode
= IB_WC_SEND
;
659 case MLX4_OPCODE_RDMA_READ
:
660 wc
->opcode
= IB_WC_RDMA_READ
;
661 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
663 case MLX4_OPCODE_ATOMIC_CS
:
664 wc
->opcode
= IB_WC_COMP_SWAP
;
667 case MLX4_OPCODE_ATOMIC_FA
:
668 wc
->opcode
= IB_WC_FETCH_ADD
;
671 case MLX4_OPCODE_MASKED_ATOMIC_CS
:
672 wc
->opcode
= IB_WC_MASKED_COMP_SWAP
;
675 case MLX4_OPCODE_MASKED_ATOMIC_FA
:
676 wc
->opcode
= IB_WC_MASKED_FETCH_ADD
;
679 case MLX4_OPCODE_BIND_MW
:
680 wc
->opcode
= IB_WC_BIND_MW
;
682 case MLX4_OPCODE_LSO
:
683 wc
->opcode
= IB_WC_LSO
;
685 case MLX4_OPCODE_FMR
:
686 wc
->opcode
= IB_WC_FAST_REG_MR
;
688 case MLX4_OPCODE_LOCAL_INVAL
:
689 wc
->opcode
= IB_WC_LOCAL_INV
;
693 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
695 switch (cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) {
696 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM
:
697 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
698 wc
->wc_flags
= IB_WC_WITH_IMM
;
699 wc
->ex
.imm_data
= cqe
->immed_rss_invalid
;
701 case MLX4_RECV_OPCODE_SEND_INVAL
:
702 wc
->opcode
= IB_WC_RECV
;
703 wc
->wc_flags
= IB_WC_WITH_INVALIDATE
;
704 wc
->ex
.invalidate_rkey
= be32_to_cpu(cqe
->immed_rss_invalid
);
706 case MLX4_RECV_OPCODE_SEND
:
707 wc
->opcode
= IB_WC_RECV
;
710 case MLX4_RECV_OPCODE_SEND_IMM
:
711 wc
->opcode
= IB_WC_RECV
;
712 wc
->wc_flags
= IB_WC_WITH_IMM
;
713 wc
->ex
.imm_data
= cqe
->immed_rss_invalid
;
717 wc
->slid
= be16_to_cpu(cqe
->rlid
);
718 g_mlpath_rqpn
= be32_to_cpu(cqe
->g_mlpath_rqpn
);
719 wc
->src_qp
= g_mlpath_rqpn
& 0xffffff;
720 wc
->dlid_path_bits
= (g_mlpath_rqpn
>> 24) & 0x7f;
721 wc
->wc_flags
|= g_mlpath_rqpn
& 0x80000000 ? IB_WC_GRH
: 0;
722 wc
->pkey_index
= be32_to_cpu(cqe
->immed_rss_invalid
) & 0x7f;
723 wc
->csum_ok
= mlx4_ib_ipoib_csum_ok(cqe
->status
, cqe
->checksum
);
724 if (rdma_port_get_link_layer(wc
->qp
->device
,
725 (*cur_qp
)->port
) == IB_LINK_LAYER_ETHERNET
)
726 wc
->sl
= be16_to_cpu(cqe
->sl_vid
) >> 13;
728 wc
->sl
= be16_to_cpu(cqe
->sl_vid
) >> 12;
734 int mlx4_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
736 struct mlx4_ib_cq
*cq
= to_mcq(ibcq
);
737 struct mlx4_ib_qp
*cur_qp
= NULL
;
742 spin_lock_irqsave(&cq
->lock
, flags
);
744 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
745 err
= mlx4_ib_poll_one(cq
, &cur_qp
, wc
+ npolled
);
751 mlx4_cq_set_ci(&cq
->mcq
);
753 spin_unlock_irqrestore(&cq
->lock
, flags
);
755 if (err
== 0 || err
== -EAGAIN
)
761 int mlx4_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
763 mlx4_cq_arm(&to_mcq(ibcq
)->mcq
,
764 (flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
765 MLX4_CQ_DB_REQ_NOT_SOL
: MLX4_CQ_DB_REQ_NOT
,
766 to_mdev(ibcq
->device
)->uar_map
,
767 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq
->device
)->uar_lock
));
772 void __mlx4_ib_cq_clean(struct mlx4_ib_cq
*cq
, u32 qpn
, struct mlx4_ib_srq
*srq
)
776 struct mlx4_cqe
*cqe
, *dest
;
780 * First we need to find the current producer index, so we
781 * know where to start cleaning from. It doesn't matter if HW
782 * adds new entries after this loop -- the QP we're worried
783 * about is already in RESET, so the new entries won't come
784 * from our QP and therefore don't need to be checked.
786 for (prod_index
= cq
->mcq
.cons_index
; get_sw_cqe(cq
, prod_index
); ++prod_index
)
787 if (prod_index
== cq
->mcq
.cons_index
+ cq
->ibcq
.cqe
)
791 * Now sweep backwards through the CQ, removing CQ entries
792 * that match our QP by copying older entries on top of them.
794 while ((int) --prod_index
- (int) cq
->mcq
.cons_index
>= 0) {
795 cqe
= get_cqe(cq
, prod_index
& cq
->ibcq
.cqe
);
796 if ((be32_to_cpu(cqe
->vlan_my_qpn
) & MLX4_CQE_QPN_MASK
) == qpn
) {
797 if (srq
&& !(cqe
->owner_sr_opcode
& MLX4_CQE_IS_SEND_MASK
))
798 mlx4_ib_free_srq_wqe(srq
, be16_to_cpu(cqe
->wqe_index
));
801 dest
= get_cqe(cq
, (prod_index
+ nfreed
) & cq
->ibcq
.cqe
);
802 owner_bit
= dest
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
;
803 memcpy(dest
, cqe
, sizeof *cqe
);
804 dest
->owner_sr_opcode
= owner_bit
|
805 (dest
->owner_sr_opcode
& ~MLX4_CQE_OWNER_MASK
);
810 cq
->mcq
.cons_index
+= nfreed
;
812 * Make sure update of buffer contents is done before
813 * updating consumer index.
816 mlx4_cq_set_ci(&cq
->mcq
);
820 void mlx4_ib_cq_clean(struct mlx4_ib_cq
*cq
, u32 qpn
, struct mlx4_ib_srq
*srq
)
822 spin_lock_irq(&cq
->lock
);
823 __mlx4_ib_cq_clean(cq
, qpn
, srq
);
824 spin_unlock_irq(&cq
->lock
);