2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include "iwch_provider.h"
36 * Get one cq entry from cxio and map it to openib.
41 * -EAGAIN caller must try again
42 * any other -errno fatal error
44 static int iwch_poll_cq_one(struct iwch_dev
*rhp
, struct iwch_cq
*chp
,
47 struct iwch_qp
*qhp
= NULL
;
48 struct t3_cqe cqe
, *rd_cqe
;
55 rd_cqe
= cxio_next_cqe(&chp
->cq
);
60 qhp
= get_qhp(rhp
, CQE_QPID(*rd_cqe
));
64 spin_lock(&qhp
->lock
);
67 ret
= cxio_poll_cq(wq
, &(chp
->cq
), &cqe
, &cqe_flushed
, &cookie
,
69 if (t3a_device(chp
->rhp
) && credit
) {
70 pr_debug("%s updating %d cq credits on id %d\n", __func__
,
71 credit
, chp
->cq
.cqid
);
72 cxio_hal_cq_op(&rhp
->rdev
, &chp
->cq
, CQ_CREDIT_UPDATE
, credit
);
83 wc
->vendor_err
= CQE_STATUS(cqe
);
86 pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
88 CQE_QPID(cqe
), CQE_TYPE(cqe
),
89 CQE_OPCODE(cqe
), CQE_STATUS(cqe
), CQE_WRID_HI(cqe
),
90 CQE_WRID_LOW(cqe
), (unsigned long long)cookie
);
92 if (CQE_TYPE(cqe
) == 0) {
94 wc
->byte_len
= CQE_LEN(cqe
);
97 wc
->opcode
= IB_WC_RECV
;
98 if (CQE_OPCODE(cqe
) == T3_SEND_WITH_INV
||
99 CQE_OPCODE(cqe
) == T3_SEND_WITH_SE_INV
) {
100 wc
->ex
.invalidate_rkey
= CQE_WRID_STAG(cqe
);
101 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
104 switch (CQE_OPCODE(cqe
)) {
106 wc
->opcode
= IB_WC_RDMA_WRITE
;
109 wc
->opcode
= IB_WC_RDMA_READ
;
110 wc
->byte_len
= CQE_LEN(cqe
);
113 case T3_SEND_WITH_SE
:
114 case T3_SEND_WITH_INV
:
115 case T3_SEND_WITH_SE_INV
:
116 wc
->opcode
= IB_WC_SEND
;
119 wc
->opcode
= IB_WC_LOCAL_INV
;
121 case T3_FAST_REGISTER
:
122 wc
->opcode
= IB_WC_REG_MR
;
125 pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
126 CQE_OPCODE(cqe
), CQE_QPID(cqe
));
133 wc
->status
= IB_WC_WR_FLUSH_ERR
;
136 switch (CQE_STATUS(cqe
)) {
137 case TPT_ERR_SUCCESS
:
138 wc
->status
= IB_WC_SUCCESS
;
141 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
144 wc
->status
= IB_WC_LOC_PROT_ERR
;
148 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
151 wc
->status
= IB_WC_GENERAL_ERR
;
154 wc
->status
= IB_WC_LOC_LEN_ERR
;
156 case TPT_ERR_INVALIDATE_SHARED_MR
:
157 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
158 wc
->status
= IB_WC_MW_BIND_ERR
;
162 case TPT_ERR_PDU_LEN_ERR
:
163 case TPT_ERR_OUT_OF_RQE
:
164 case TPT_ERR_DDP_VERSION
:
165 case TPT_ERR_RDMA_VERSION
:
166 case TPT_ERR_DDP_QUEUE_NUM
:
170 case TPT_ERR_MSN_RANGE
:
171 case TPT_ERR_IRD_OVERFLOW
:
173 wc
->status
= IB_WC_FATAL_ERR
;
175 case TPT_ERR_SWFLUSH
:
176 wc
->status
= IB_WC_WR_FLUSH_ERR
;
179 pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
180 CQE_STATUS(cqe
), CQE_QPID(cqe
));
186 spin_unlock(&qhp
->lock
);
190 int iwch_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
192 struct iwch_dev
*rhp
;
198 chp
= to_iwch_cq(ibcq
);
201 spin_lock_irqsave(&chp
->lock
, flags
);
202 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
208 * Because T3 can post CQEs that are _not_ associated
209 * with a WR, we might have to poll again after removing
213 err
= iwch_poll_cq_one(rhp
, chp
, wc
+ npolled
);
217 } while (err
== -EAGAIN
);
221 spin_unlock_irqrestore(&chp
->lock
, flags
);