2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include "iwch_provider.h"
36 * Get one cq entry from cxio and map it to openib.
41 * -EAGAIN caller must try again
42 * any other -errno fatal error
44 static int iwch_poll_cq_one(struct iwch_dev
*rhp
, struct iwch_cq
*chp
,
47 struct iwch_qp
*qhp
= NULL
;
48 struct t3_cqe cqe
, *rd_cqe
;
55 rd_cqe
= cxio_next_cqe(&chp
->cq
);
60 qhp
= get_qhp(rhp
, CQE_QPID(*rd_cqe
));
64 spin_lock(&qhp
->lock
);
67 ret
= cxio_poll_cq(wq
, &(chp
->cq
), &cqe
, &cqe_flushed
, &cookie
,
69 if (t3a_device(chp
->rhp
) && credit
) {
70 PDBG("%s updating %d cq credits on id %d\n", __func__
,
71 credit
, chp
->cq
.cqid
);
72 cxio_hal_cq_op(&rhp
->rdev
, &chp
->cq
, CQ_CREDIT_UPDATE
, credit
);
83 wc
->vendor_err
= CQE_STATUS(cqe
);
86 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
87 "lo 0x%x cookie 0x%llx\n", __func__
,
88 CQE_QPID(cqe
), CQE_TYPE(cqe
),
89 CQE_OPCODE(cqe
), CQE_STATUS(cqe
), CQE_WRID_HI(cqe
),
90 CQE_WRID_LOW(cqe
), (unsigned long long) cookie
);
92 if (CQE_TYPE(cqe
) == 0) {
94 wc
->byte_len
= CQE_LEN(cqe
);
97 wc
->opcode
= IB_WC_RECV
;
98 if (CQE_OPCODE(cqe
) == T3_SEND_WITH_INV
||
99 CQE_OPCODE(cqe
) == T3_SEND_WITH_SE_INV
) {
100 wc
->ex
.invalidate_rkey
= CQE_WRID_STAG(cqe
);
101 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
104 switch (CQE_OPCODE(cqe
)) {
106 wc
->opcode
= IB_WC_RDMA_WRITE
;
109 wc
->opcode
= IB_WC_RDMA_READ
;
110 wc
->byte_len
= CQE_LEN(cqe
);
113 case T3_SEND_WITH_SE
:
114 case T3_SEND_WITH_INV
:
115 case T3_SEND_WITH_SE_INV
:
116 wc
->opcode
= IB_WC_SEND
;
119 wc
->opcode
= IB_WC_LOCAL_INV
;
121 case T3_FAST_REGISTER
:
122 wc
->opcode
= IB_WC_REG_MR
;
125 printk(KERN_ERR MOD
"Unexpected opcode %d "
126 "in the CQE received for QPID=0x%0x\n",
127 CQE_OPCODE(cqe
), CQE_QPID(cqe
));
134 wc
->status
= IB_WC_WR_FLUSH_ERR
;
137 switch (CQE_STATUS(cqe
)) {
138 case TPT_ERR_SUCCESS
:
139 wc
->status
= IB_WC_SUCCESS
;
142 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
145 wc
->status
= IB_WC_LOC_PROT_ERR
;
149 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
152 wc
->status
= IB_WC_GENERAL_ERR
;
155 wc
->status
= IB_WC_LOC_LEN_ERR
;
157 case TPT_ERR_INVALIDATE_SHARED_MR
:
158 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
159 wc
->status
= IB_WC_MW_BIND_ERR
;
163 case TPT_ERR_PDU_LEN_ERR
:
164 case TPT_ERR_OUT_OF_RQE
:
165 case TPT_ERR_DDP_VERSION
:
166 case TPT_ERR_RDMA_VERSION
:
167 case TPT_ERR_DDP_QUEUE_NUM
:
171 case TPT_ERR_MSN_RANGE
:
172 case TPT_ERR_IRD_OVERFLOW
:
174 wc
->status
= IB_WC_FATAL_ERR
;
176 case TPT_ERR_SWFLUSH
:
177 wc
->status
= IB_WC_WR_FLUSH_ERR
;
180 printk(KERN_ERR MOD
"Unexpected cqe_status 0x%x for "
181 "QPID=0x%0x\n", CQE_STATUS(cqe
), CQE_QPID(cqe
));
187 spin_unlock(&qhp
->lock
);
191 int iwch_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
193 struct iwch_dev
*rhp
;
199 chp
= to_iwch_cq(ibcq
);
202 spin_lock_irqsave(&chp
->lock
, flags
);
203 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
209 * Because T3 can post CQEs that are _not_ associated
210 * with a WR, we might have to poll again after removing
214 err
= iwch_poll_cq_one(rhp
, chp
, wc
+ npolled
);
218 } while (err
== -EAGAIN
);
222 spin_unlock_irqrestore(&chp
->lock
, flags
);