2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 #include <linux/mlx4/srq.h>
37 #include <linux/slab.h>
42 static void mlx4_ib_cq_comp(struct mlx4_cq
*cq
)
44 struct ib_cq
*ibcq
= &to_mibcq(cq
)->ibcq
;
45 ibcq
->comp_handler(ibcq
, ibcq
->cq_context
);
48 static void mlx4_ib_cq_event(struct mlx4_cq
*cq
, enum mlx4_event type
)
50 struct ib_event event
;
53 if (type
!= MLX4_EVENT_TYPE_CQ_ERROR
) {
54 pr_warn("Unexpected event type %d "
55 "on CQ %06x\n", type
, cq
->cqn
);
59 ibcq
= &to_mibcq(cq
)->ibcq
;
60 if (ibcq
->event_handler
) {
61 event
.device
= ibcq
->device
;
62 event
.event
= IB_EVENT_CQ_ERR
;
63 event
.element
.cq
= ibcq
;
64 ibcq
->event_handler(&event
, ibcq
->cq_context
);
68 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf
*buf
, int n
)
70 return mlx4_buf_offset(&buf
->buf
, n
* buf
->entry_size
);
73 static void *get_cqe(struct mlx4_ib_cq
*cq
, int n
)
75 return get_cqe_from_buf(&cq
->buf
, n
);
78 static void *get_sw_cqe(struct mlx4_ib_cq
*cq
, int n
)
80 struct mlx4_cqe
*cqe
= get_cqe(cq
, n
& cq
->ibcq
.cqe
);
81 struct mlx4_cqe
*tcqe
= ((cq
->buf
.entry_size
== 64) ? (cqe
+ 1) : cqe
);
83 return (!!(tcqe
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
) ^
84 !!(n
& (cq
->ibcq
.cqe
+ 1))) ? NULL
: cqe
;
87 static struct mlx4_cqe
*next_cqe_sw(struct mlx4_ib_cq
*cq
)
89 return get_sw_cqe(cq
, cq
->mcq
.cons_index
);
92 int mlx4_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
94 struct mlx4_ib_cq
*mcq
= to_mcq(cq
);
95 struct mlx4_ib_dev
*dev
= to_mdev(cq
->device
);
97 return mlx4_cq_modify(dev
->dev
, &mcq
->mcq
, cq_count
, cq_period
);
100 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq_buf
*buf
, int nent
)
104 err
= mlx4_buf_alloc(dev
->dev
, nent
* dev
->dev
->caps
.cqe_size
,
105 PAGE_SIZE
* 2, &buf
->buf
, GFP_KERNEL
);
110 buf
->entry_size
= dev
->dev
->caps
.cqe_size
;
111 err
= mlx4_mtt_init(dev
->dev
, buf
->buf
.npages
, buf
->buf
.page_shift
,
116 err
= mlx4_buf_write_mtt(dev
->dev
, &buf
->mtt
, &buf
->buf
, GFP_KERNEL
);
123 mlx4_mtt_cleanup(dev
->dev
, &buf
->mtt
);
126 mlx4_buf_free(dev
->dev
, nent
* buf
->entry_size
, &buf
->buf
);
132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq_buf
*buf
, int cqe
)
134 mlx4_buf_free(dev
->dev
, (cqe
+ 1) * buf
->entry_size
, &buf
->buf
);
137 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev
*dev
, struct ib_ucontext
*context
,
138 struct mlx4_ib_cq_buf
*buf
, struct ib_umem
**umem
,
139 u64 buf_addr
, int cqe
)
142 int cqe_size
= dev
->dev
->caps
.cqe_size
;
144 *umem
= ib_umem_get(context
, buf_addr
, cqe
* cqe_size
,
145 IB_ACCESS_LOCAL_WRITE
, 1);
147 return PTR_ERR(*umem
);
149 err
= mlx4_mtt_init(dev
->dev
, ib_umem_page_count(*umem
),
150 ilog2((*umem
)->page_size
), &buf
->mtt
);
154 err
= mlx4_ib_umem_write_mtt(dev
, &buf
->mtt
, *umem
);
161 mlx4_mtt_cleanup(dev
->dev
, &buf
->mtt
);
164 ib_umem_release(*umem
);
169 #define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
170 struct ib_cq
*mlx4_ib_create_cq(struct ib_device
*ibdev
,
171 const struct ib_cq_init_attr
*attr
,
172 struct ib_ucontext
*context
,
173 struct ib_udata
*udata
)
175 int entries
= attr
->cqe
;
176 int vector
= attr
->comp_vector
;
177 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
178 struct mlx4_ib_cq
*cq
;
179 struct mlx4_uar
*uar
;
182 if (entries
< 1 || entries
> dev
->dev
->caps
.max_cqes
)
183 return ERR_PTR(-EINVAL
);
185 if (attr
->flags
& ~CQ_CREATE_FLAGS_SUPPORTED
)
186 return ERR_PTR(-EINVAL
);
188 cq
= kmalloc(sizeof *cq
, GFP_KERNEL
);
190 return ERR_PTR(-ENOMEM
);
192 entries
= roundup_pow_of_two(entries
+ 1);
193 cq
->ibcq
.cqe
= entries
- 1;
194 mutex_init(&cq
->resize_mutex
);
195 spin_lock_init(&cq
->lock
);
196 cq
->resize_buf
= NULL
;
197 cq
->resize_umem
= NULL
;
198 cq
->create_flags
= attr
->flags
;
199 INIT_LIST_HEAD(&cq
->send_qp_list
);
200 INIT_LIST_HEAD(&cq
->recv_qp_list
);
203 struct mlx4_ib_create_cq ucmd
;
205 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
)) {
210 err
= mlx4_ib_get_cq_umem(dev
, context
, &cq
->buf
, &cq
->umem
,
211 ucmd
.buf_addr
, entries
);
215 err
= mlx4_ib_db_map_user(to_mucontext(context
), ucmd
.db_addr
,
220 uar
= &to_mucontext(context
)->uar
;
222 err
= mlx4_db_alloc(dev
->dev
, &cq
->db
, 1, GFP_KERNEL
);
226 cq
->mcq
.set_ci_db
= cq
->db
.db
;
227 cq
->mcq
.arm_db
= cq
->db
.db
+ 1;
228 *cq
->mcq
.set_ci_db
= 0;
231 err
= mlx4_ib_alloc_cq_buf(dev
, &cq
->buf
, entries
);
235 uar
= &dev
->priv_uar
;
239 vector
= dev
->eq_table
[vector
% ibdev
->num_comp_vectors
];
241 err
= mlx4_cq_alloc(dev
->dev
, entries
, &cq
->buf
.mtt
, uar
,
242 cq
->db
.dma
, &cq
->mcq
, vector
, 0,
243 !!(cq
->create_flags
& IB_CQ_FLAGS_TIMESTAMP_COMPLETION
));
248 cq
->mcq
.tasklet_ctx
.comp
= mlx4_ib_cq_comp
;
250 cq
->mcq
.comp
= mlx4_ib_cq_comp
;
251 cq
->mcq
.event
= mlx4_ib_cq_event
;
254 if (ib_copy_to_udata(udata
, &cq
->mcq
.cqn
, sizeof (__u32
))) {
263 mlx4_ib_db_unmap_user(to_mucontext(context
), &cq
->db
);
266 mlx4_mtt_cleanup(dev
->dev
, &cq
->buf
.mtt
);
269 ib_umem_release(cq
->umem
);
271 mlx4_ib_free_cq_buf(dev
, &cq
->buf
, cq
->ibcq
.cqe
);
275 mlx4_db_free(dev
->dev
, &cq
->db
);
283 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq
*cq
,
291 cq
->resize_buf
= kmalloc(sizeof *cq
->resize_buf
, GFP_ATOMIC
);
295 err
= mlx4_ib_alloc_cq_buf(dev
, &cq
->resize_buf
->buf
, entries
);
297 kfree(cq
->resize_buf
);
298 cq
->resize_buf
= NULL
;
302 cq
->resize_buf
->cqe
= entries
- 1;
307 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev
*dev
, struct mlx4_ib_cq
*cq
,
308 int entries
, struct ib_udata
*udata
)
310 struct mlx4_ib_resize_cq ucmd
;
316 if (ib_copy_from_udata(&ucmd
, udata
, sizeof ucmd
))
319 cq
->resize_buf
= kmalloc(sizeof *cq
->resize_buf
, GFP_ATOMIC
);
323 err
= mlx4_ib_get_cq_umem(dev
, cq
->umem
->context
, &cq
->resize_buf
->buf
,
324 &cq
->resize_umem
, ucmd
.buf_addr
, entries
);
326 kfree(cq
->resize_buf
);
327 cq
->resize_buf
= NULL
;
331 cq
->resize_buf
->cqe
= entries
- 1;
336 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq
*cq
)
340 i
= cq
->mcq
.cons_index
;
341 while (get_sw_cqe(cq
, i
))
344 return i
- cq
->mcq
.cons_index
;
347 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq
*cq
)
349 struct mlx4_cqe
*cqe
, *new_cqe
;
351 int cqe_size
= cq
->buf
.entry_size
;
352 int cqe_inc
= cqe_size
== 64 ? 1 : 0;
354 i
= cq
->mcq
.cons_index
;
355 cqe
= get_cqe(cq
, i
& cq
->ibcq
.cqe
);
358 while ((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) != MLX4_CQE_OPCODE_RESIZE
) {
359 new_cqe
= get_cqe_from_buf(&cq
->resize_buf
->buf
,
360 (i
+ 1) & cq
->resize_buf
->cqe
);
361 memcpy(new_cqe
, get_cqe(cq
, i
& cq
->ibcq
.cqe
), cqe_size
);
364 new_cqe
->owner_sr_opcode
= (cqe
->owner_sr_opcode
& ~MLX4_CQE_OWNER_MASK
) |
365 (((i
+ 1) & (cq
->resize_buf
->cqe
+ 1)) ? MLX4_CQE_OWNER_MASK
: 0);
366 cqe
= get_cqe(cq
, ++i
& cq
->ibcq
.cqe
);
369 ++cq
->mcq
.cons_index
;
372 int mlx4_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
)
374 struct mlx4_ib_dev
*dev
= to_mdev(ibcq
->device
);
375 struct mlx4_ib_cq
*cq
= to_mcq(ibcq
);
380 mutex_lock(&cq
->resize_mutex
);
381 if (entries
< 1 || entries
> dev
->dev
->caps
.max_cqes
) {
386 entries
= roundup_pow_of_two(entries
+ 1);
387 if (entries
== ibcq
->cqe
+ 1) {
392 if (entries
> dev
->dev
->caps
.max_cqes
+ 1) {
398 err
= mlx4_alloc_resize_umem(dev
, cq
, entries
, udata
);
402 /* Can't be smaller than the number of outstanding CQEs */
403 outst_cqe
= mlx4_ib_get_outstanding_cqes(cq
);
404 if (entries
< outst_cqe
+ 1) {
409 err
= mlx4_alloc_resize_buf(dev
, cq
, entries
);
416 err
= mlx4_cq_resize(dev
->dev
, &cq
->mcq
, entries
, &cq
->resize_buf
->buf
.mtt
);
420 mlx4_mtt_cleanup(dev
->dev
, &mtt
);
422 cq
->buf
= cq
->resize_buf
->buf
;
423 cq
->ibcq
.cqe
= cq
->resize_buf
->cqe
;
424 ib_umem_release(cq
->umem
);
425 cq
->umem
= cq
->resize_umem
;
427 kfree(cq
->resize_buf
);
428 cq
->resize_buf
= NULL
;
429 cq
->resize_umem
= NULL
;
431 struct mlx4_ib_cq_buf tmp_buf
;
434 spin_lock_irq(&cq
->lock
);
435 if (cq
->resize_buf
) {
436 mlx4_ib_cq_resize_copy_cqes(cq
);
438 tmp_cqe
= cq
->ibcq
.cqe
;
439 cq
->buf
= cq
->resize_buf
->buf
;
440 cq
->ibcq
.cqe
= cq
->resize_buf
->cqe
;
442 kfree(cq
->resize_buf
);
443 cq
->resize_buf
= NULL
;
445 spin_unlock_irq(&cq
->lock
);
448 mlx4_ib_free_cq_buf(dev
, &tmp_buf
, tmp_cqe
);
454 mlx4_mtt_cleanup(dev
->dev
, &cq
->resize_buf
->buf
.mtt
);
456 mlx4_ib_free_cq_buf(dev
, &cq
->resize_buf
->buf
,
457 cq
->resize_buf
->cqe
);
459 kfree(cq
->resize_buf
);
460 cq
->resize_buf
= NULL
;
462 if (cq
->resize_umem
) {
463 ib_umem_release(cq
->resize_umem
);
464 cq
->resize_umem
= NULL
;
468 mutex_unlock(&cq
->resize_mutex
);
473 int mlx4_ib_destroy_cq(struct ib_cq
*cq
)
475 struct mlx4_ib_dev
*dev
= to_mdev(cq
->device
);
476 struct mlx4_ib_cq
*mcq
= to_mcq(cq
);
478 mlx4_cq_free(dev
->dev
, &mcq
->mcq
);
479 mlx4_mtt_cleanup(dev
->dev
, &mcq
->buf
.mtt
);
482 mlx4_ib_db_unmap_user(to_mucontext(cq
->uobject
->context
), &mcq
->db
);
483 ib_umem_release(mcq
->umem
);
485 mlx4_ib_free_cq_buf(dev
, &mcq
->buf
, cq
->cqe
);
486 mlx4_db_free(dev
->dev
, &mcq
->db
);
494 static void dump_cqe(void *cqe
)
498 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
499 be32_to_cpu(buf
[0]), be32_to_cpu(buf
[1]), be32_to_cpu(buf
[2]),
500 be32_to_cpu(buf
[3]), be32_to_cpu(buf
[4]), be32_to_cpu(buf
[5]),
501 be32_to_cpu(buf
[6]), be32_to_cpu(buf
[7]));
504 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe
*cqe
,
507 if (cqe
->syndrome
== MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR
) {
508 pr_debug("local QP operation err "
509 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
511 be32_to_cpu(cqe
->my_qpn
), be16_to_cpu(cqe
->wqe_index
),
512 cqe
->vendor_err_syndrome
,
513 cqe
->owner_sr_opcode
& ~MLX4_CQE_OWNER_MASK
);
517 switch (cqe
->syndrome
) {
518 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR
:
519 wc
->status
= IB_WC_LOC_LEN_ERR
;
521 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR
:
522 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
524 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR
:
525 wc
->status
= IB_WC_LOC_PROT_ERR
;
527 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR
:
528 wc
->status
= IB_WC_WR_FLUSH_ERR
;
530 case MLX4_CQE_SYNDROME_MW_BIND_ERR
:
531 wc
->status
= IB_WC_MW_BIND_ERR
;
533 case MLX4_CQE_SYNDROME_BAD_RESP_ERR
:
534 wc
->status
= IB_WC_BAD_RESP_ERR
;
536 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR
:
537 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
539 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR
:
540 wc
->status
= IB_WC_REM_INV_REQ_ERR
;
542 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR
:
543 wc
->status
= IB_WC_REM_ACCESS_ERR
;
545 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR
:
546 wc
->status
= IB_WC_REM_OP_ERR
;
548 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR
:
549 wc
->status
= IB_WC_RETRY_EXC_ERR
;
551 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR
:
552 wc
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
554 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR
:
555 wc
->status
= IB_WC_REM_ABORT_ERR
;
558 wc
->status
= IB_WC_GENERAL_ERR
;
562 wc
->vendor_err
= cqe
->vendor_err_syndrome
;
565 static int mlx4_ib_ipoib_csum_ok(__be16 status
, __be16 checksum
)
567 return ((status
& cpu_to_be16(MLX4_CQE_STATUS_IPV4
|
568 MLX4_CQE_STATUS_IPV4F
|
569 MLX4_CQE_STATUS_IPV4OPT
|
570 MLX4_CQE_STATUS_IPV6
|
571 MLX4_CQE_STATUS_IPOK
)) ==
572 cpu_to_be16(MLX4_CQE_STATUS_IPV4
|
573 MLX4_CQE_STATUS_IPOK
)) &&
574 (status
& cpu_to_be16(MLX4_CQE_STATUS_UDP
|
575 MLX4_CQE_STATUS_TCP
)) &&
576 checksum
== cpu_to_be16(0xffff);
579 static int use_tunnel_data(struct mlx4_ib_qp
*qp
, struct mlx4_ib_cq
*cq
, struct ib_wc
*wc
,
580 unsigned tail
, struct mlx4_cqe
*cqe
, int is_eth
)
582 struct mlx4_ib_proxy_sqp_hdr
*hdr
;
584 ib_dma_sync_single_for_cpu(qp
->ibqp
.device
,
585 qp
->sqp_proxy_rcv
[tail
].map
,
586 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
588 hdr
= (struct mlx4_ib_proxy_sqp_hdr
*) (qp
->sqp_proxy_rcv
[tail
].addr
);
589 wc
->pkey_index
= be16_to_cpu(hdr
->tun
.pkey_index
);
590 wc
->src_qp
= be32_to_cpu(hdr
->tun
.flags_src_qp
) & 0xFFFFFF;
591 wc
->wc_flags
|= (hdr
->tun
.g_ml_path
& 0x80) ? (IB_WC_GRH
) : 0;
592 wc
->dlid_path_bits
= 0;
595 wc
->vlan_id
= be16_to_cpu(hdr
->tun
.sl_vid
);
596 memcpy(&(wc
->smac
[0]), (char *)&hdr
->tun
.mac_31_0
, 4);
597 memcpy(&(wc
->smac
[4]), (char *)&hdr
->tun
.slid_mac_47_32
, 2);
598 wc
->wc_flags
|= (IB_WC_WITH_VLAN
| IB_WC_WITH_SMAC
);
600 wc
->slid
= be16_to_cpu(hdr
->tun
.slid_mac_47_32
);
601 wc
->sl
= (u8
) (be16_to_cpu(hdr
->tun
.sl_vid
) >> 12);
607 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp
*qp
, int num_entries
,
608 struct ib_wc
*wc
, int *npolled
, int is_send
)
610 struct mlx4_ib_wq
*wq
;
614 wq
= is_send
? &qp
->sq
: &qp
->rq
;
615 cur
= wq
->head
- wq
->tail
;
620 for (i
= 0; i
< cur
&& *npolled
< num_entries
; i
++) {
621 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
622 wc
->status
= IB_WC_WR_FLUSH_ERR
;
623 wc
->vendor_err
= MLX4_CQE_SYNDROME_WR_FLUSH_ERR
;
631 static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq
*cq
, int num_entries
,
632 struct ib_wc
*wc
, int *npolled
)
634 struct mlx4_ib_qp
*qp
;
637 /* Find uncompleted WQEs belonging to that cq and retrun
638 * simulated FLUSH_ERR completions
640 list_for_each_entry(qp
, &cq
->send_qp_list
, cq_send_list
) {
641 mlx4_ib_qp_sw_comp(qp
, num_entries
, wc
+ *npolled
, npolled
, 1);
642 if (*npolled
>= num_entries
)
646 list_for_each_entry(qp
, &cq
->recv_qp_list
, cq_recv_list
) {
647 mlx4_ib_qp_sw_comp(qp
, num_entries
, wc
+ *npolled
, npolled
, 0);
648 if (*npolled
>= num_entries
)
656 static int mlx4_ib_poll_one(struct mlx4_ib_cq
*cq
,
657 struct mlx4_ib_qp
**cur_qp
,
660 struct mlx4_cqe
*cqe
;
662 struct mlx4_ib_wq
*wq
;
663 struct mlx4_ib_srq
*srq
;
664 struct mlx4_srq
*msrq
= NULL
;
673 cqe
= next_cqe_sw(cq
);
677 if (cq
->buf
.entry_size
== 64)
680 ++cq
->mcq
.cons_index
;
683 * Make sure we read CQ entry contents after we've checked the
688 is_send
= cqe
->owner_sr_opcode
& MLX4_CQE_IS_SEND_MASK
;
689 is_error
= (cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) ==
690 MLX4_CQE_OPCODE_ERROR
;
692 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) == MLX4_OPCODE_NOP
&&
694 pr_warn("Completion for NOP opcode detected!\n");
698 /* Resize CQ in progress */
699 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) == MLX4_CQE_OPCODE_RESIZE
)) {
700 if (cq
->resize_buf
) {
701 struct mlx4_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
703 mlx4_ib_free_cq_buf(dev
, &cq
->buf
, cq
->ibcq
.cqe
);
704 cq
->buf
= cq
->resize_buf
->buf
;
705 cq
->ibcq
.cqe
= cq
->resize_buf
->cqe
;
707 kfree(cq
->resize_buf
);
708 cq
->resize_buf
= NULL
;
715 (be32_to_cpu(cqe
->vlan_my_qpn
) & MLX4_CQE_QPN_MASK
) != (*cur_qp
)->mqp
.qpn
) {
717 * We do not have to take the QP table lock here,
718 * because CQs will be locked while QPs are removed
721 mqp
= __mlx4_qp_lookup(to_mdev(cq
->ibcq
.device
)->dev
,
722 be32_to_cpu(cqe
->vlan_my_qpn
));
723 if (unlikely(!mqp
)) {
724 pr_warn("CQ %06x with entry for unknown QPN %06x\n",
725 cq
->mcq
.cqn
, be32_to_cpu(cqe
->vlan_my_qpn
) & MLX4_CQE_QPN_MASK
);
729 *cur_qp
= to_mibqp(mqp
);
732 wc
->qp
= &(*cur_qp
)->ibqp
;
734 if (wc
->qp
->qp_type
== IB_QPT_XRC_TGT
) {
736 g_mlpath_rqpn
= be32_to_cpu(cqe
->g_mlpath_rqpn
);
737 srq_num
= g_mlpath_rqpn
& 0xffffff;
738 /* SRQ is also in the radix tree */
739 msrq
= mlx4_srq_lookup(to_mdev(cq
->ibcq
.device
)->dev
,
741 if (unlikely(!msrq
)) {
742 pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
743 cq
->mcq
.cqn
, srq_num
);
750 if (!(*cur_qp
)->sq_signal_bits
) {
751 wqe_ctr
= be16_to_cpu(cqe
->wqe_index
);
752 wq
->tail
+= (u16
) (wqe_ctr
- (u16
) wq
->tail
);
754 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
756 } else if ((*cur_qp
)->ibqp
.srq
) {
757 srq
= to_msrq((*cur_qp
)->ibqp
.srq
);
758 wqe_ctr
= be16_to_cpu(cqe
->wqe_index
);
759 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
760 mlx4_ib_free_srq_wqe(srq
, wqe_ctr
);
762 srq
= to_mibsrq(msrq
);
763 wqe_ctr
= be16_to_cpu(cqe
->wqe_index
);
764 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
765 mlx4_ib_free_srq_wqe(srq
, wqe_ctr
);
768 tail
= wq
->tail
& (wq
->wqe_cnt
- 1);
769 wc
->wr_id
= wq
->wrid
[tail
];
773 if (unlikely(is_error
)) {
774 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe
*) cqe
, wc
);
778 wc
->status
= IB_WC_SUCCESS
;
782 switch (cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) {
783 case MLX4_OPCODE_RDMA_WRITE_IMM
:
784 wc
->wc_flags
|= IB_WC_WITH_IMM
;
785 case MLX4_OPCODE_RDMA_WRITE
:
786 wc
->opcode
= IB_WC_RDMA_WRITE
;
788 case MLX4_OPCODE_SEND_IMM
:
789 wc
->wc_flags
|= IB_WC_WITH_IMM
;
790 case MLX4_OPCODE_SEND
:
791 case MLX4_OPCODE_SEND_INVAL
:
792 wc
->opcode
= IB_WC_SEND
;
794 case MLX4_OPCODE_RDMA_READ
:
795 wc
->opcode
= IB_WC_RDMA_READ
;
796 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
798 case MLX4_OPCODE_ATOMIC_CS
:
799 wc
->opcode
= IB_WC_COMP_SWAP
;
802 case MLX4_OPCODE_ATOMIC_FA
:
803 wc
->opcode
= IB_WC_FETCH_ADD
;
806 case MLX4_OPCODE_MASKED_ATOMIC_CS
:
807 wc
->opcode
= IB_WC_MASKED_COMP_SWAP
;
810 case MLX4_OPCODE_MASKED_ATOMIC_FA
:
811 wc
->opcode
= IB_WC_MASKED_FETCH_ADD
;
814 case MLX4_OPCODE_BIND_MW
:
815 wc
->opcode
= IB_WC_BIND_MW
;
817 case MLX4_OPCODE_LSO
:
818 wc
->opcode
= IB_WC_LSO
;
820 case MLX4_OPCODE_FMR
:
821 wc
->opcode
= IB_WC_REG_MR
;
823 case MLX4_OPCODE_LOCAL_INVAL
:
824 wc
->opcode
= IB_WC_LOCAL_INV
;
828 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
830 switch (cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) {
831 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM
:
832 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
833 wc
->wc_flags
= IB_WC_WITH_IMM
;
834 wc
->ex
.imm_data
= cqe
->immed_rss_invalid
;
836 case MLX4_RECV_OPCODE_SEND_INVAL
:
837 wc
->opcode
= IB_WC_RECV
;
838 wc
->wc_flags
= IB_WC_WITH_INVALIDATE
;
839 wc
->ex
.invalidate_rkey
= be32_to_cpu(cqe
->immed_rss_invalid
);
841 case MLX4_RECV_OPCODE_SEND
:
842 wc
->opcode
= IB_WC_RECV
;
845 case MLX4_RECV_OPCODE_SEND_IMM
:
846 wc
->opcode
= IB_WC_RECV
;
847 wc
->wc_flags
= IB_WC_WITH_IMM
;
848 wc
->ex
.imm_data
= cqe
->immed_rss_invalid
;
852 is_eth
= (rdma_port_get_link_layer(wc
->qp
->device
,
854 IB_LINK_LAYER_ETHERNET
);
855 if (mlx4_is_mfunc(to_mdev(cq
->ibcq
.device
)->dev
)) {
856 if ((*cur_qp
)->mlx4_ib_qp_type
&
857 (MLX4_IB_QPT_PROXY_SMI_OWNER
|
858 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
))
859 return use_tunnel_data(*cur_qp
, cq
, wc
, tail
,
863 wc
->slid
= be16_to_cpu(cqe
->rlid
);
864 g_mlpath_rqpn
= be32_to_cpu(cqe
->g_mlpath_rqpn
);
865 wc
->src_qp
= g_mlpath_rqpn
& 0xffffff;
866 wc
->dlid_path_bits
= (g_mlpath_rqpn
>> 24) & 0x7f;
867 wc
->wc_flags
|= g_mlpath_rqpn
& 0x80000000 ? IB_WC_GRH
: 0;
868 wc
->pkey_index
= be32_to_cpu(cqe
->immed_rss_invalid
) & 0x7f;
869 wc
->wc_flags
|= mlx4_ib_ipoib_csum_ok(cqe
->status
,
870 cqe
->checksum
) ? IB_WC_IP_CSUM_OK
: 0;
872 wc
->sl
= be16_to_cpu(cqe
->sl_vid
) >> 13;
873 if (be32_to_cpu(cqe
->vlan_my_qpn
) &
874 MLX4_CQE_CVLAN_PRESENT_MASK
) {
875 wc
->vlan_id
= be16_to_cpu(cqe
->sl_vid
) &
878 wc
->vlan_id
= 0xffff;
880 memcpy(wc
->smac
, cqe
->smac
, ETH_ALEN
);
881 wc
->wc_flags
|= (IB_WC_WITH_VLAN
| IB_WC_WITH_SMAC
);
883 wc
->sl
= be16_to_cpu(cqe
->sl_vid
) >> 12;
884 wc
->vlan_id
= 0xffff;
891 int mlx4_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
893 struct mlx4_ib_cq
*cq
= to_mcq(ibcq
);
894 struct mlx4_ib_qp
*cur_qp
= NULL
;
898 struct mlx4_ib_dev
*mdev
= to_mdev(cq
->ibcq
.device
);
900 spin_lock_irqsave(&cq
->lock
, flags
);
901 if (mdev
->dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
902 mlx4_ib_poll_sw_comp(cq
, num_entries
, wc
, &npolled
);
906 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
907 err
= mlx4_ib_poll_one(cq
, &cur_qp
, wc
+ npolled
);
912 mlx4_cq_set_ci(&cq
->mcq
);
915 spin_unlock_irqrestore(&cq
->lock
, flags
);
917 if (err
== 0 || err
== -EAGAIN
)
923 int mlx4_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
925 mlx4_cq_arm(&to_mcq(ibcq
)->mcq
,
926 (flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
927 MLX4_CQ_DB_REQ_NOT_SOL
: MLX4_CQ_DB_REQ_NOT
,
928 to_mdev(ibcq
->device
)->uar_map
,
929 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq
->device
)->uar_lock
));
934 void __mlx4_ib_cq_clean(struct mlx4_ib_cq
*cq
, u32 qpn
, struct mlx4_ib_srq
*srq
)
938 struct mlx4_cqe
*cqe
, *dest
;
940 int cqe_inc
= cq
->buf
.entry_size
== 64 ? 1 : 0;
943 * First we need to find the current producer index, so we
944 * know where to start cleaning from. It doesn't matter if HW
945 * adds new entries after this loop -- the QP we're worried
946 * about is already in RESET, so the new entries won't come
947 * from our QP and therefore don't need to be checked.
949 for (prod_index
= cq
->mcq
.cons_index
; get_sw_cqe(cq
, prod_index
); ++prod_index
)
950 if (prod_index
== cq
->mcq
.cons_index
+ cq
->ibcq
.cqe
)
954 * Now sweep backwards through the CQ, removing CQ entries
955 * that match our QP by copying older entries on top of them.
957 while ((int) --prod_index
- (int) cq
->mcq
.cons_index
>= 0) {
958 cqe
= get_cqe(cq
, prod_index
& cq
->ibcq
.cqe
);
961 if ((be32_to_cpu(cqe
->vlan_my_qpn
) & MLX4_CQE_QPN_MASK
) == qpn
) {
962 if (srq
&& !(cqe
->owner_sr_opcode
& MLX4_CQE_IS_SEND_MASK
))
963 mlx4_ib_free_srq_wqe(srq
, be16_to_cpu(cqe
->wqe_index
));
966 dest
= get_cqe(cq
, (prod_index
+ nfreed
) & cq
->ibcq
.cqe
);
969 owner_bit
= dest
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
;
970 memcpy(dest
, cqe
, sizeof *cqe
);
971 dest
->owner_sr_opcode
= owner_bit
|
972 (dest
->owner_sr_opcode
& ~MLX4_CQE_OWNER_MASK
);
977 cq
->mcq
.cons_index
+= nfreed
;
979 * Make sure update of buffer contents is done before
980 * updating consumer index.
983 mlx4_cq_set_ci(&cq
->mcq
);
987 void mlx4_ib_cq_clean(struct mlx4_ib_cq
*cq
, u32 qpn
, struct mlx4_ib_srq
*srq
)
989 spin_lock_irq(&cq
->lock
);
990 __mlx4_ib_cq_clean(cq
, qpn
, srq
);
991 spin_unlock_irq(&cq
->lock
);