2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include "iwch_provider.h"
39 static int iwch_build_rdma_send(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
47 case IB_WR_SEND_WITH_IMM
:
48 if (wr
->send_flags
& IB_SEND_SOLICITED
)
49 wqe
->send
.rdmaop
= T3_SEND_WITH_SE
;
51 wqe
->send
.rdmaop
= T3_SEND
;
52 wqe
->send
.rem_stag
= 0;
54 #if 0 /* Not currently supported */
55 case TYPE_SEND_INVALIDATE
:
56 case TYPE_SEND_INVALIDATE_IMMEDIATE
:
57 wqe
->send
.rdmaop
= T3_SEND_WITH_INV
;
58 wqe
->send
.rem_stag
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
60 case TYPE_SEND_SE_INVALIDATE
:
61 wqe
->send
.rdmaop
= T3_SEND_WITH_SE_INV
;
62 wqe
->send
.rem_stag
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
68 if (wr
->num_sge
> T3_MAX_SGE
)
70 wqe
->send
.reserved
[0] = 0;
71 wqe
->send
.reserved
[1] = 0;
72 wqe
->send
.reserved
[2] = 0;
73 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
75 wqe
->send
.sgl
[0].stag
= wr
->imm_data
;
76 wqe
->send
.sgl
[0].len
= __constant_cpu_to_be32(0);
77 wqe
->send
.num_sgle
= __constant_cpu_to_be32(0);
81 for (i
= 0; i
< wr
->num_sge
; i
++) {
82 if ((plen
+ wr
->sg_list
[i
].length
) < plen
) {
85 plen
+= wr
->sg_list
[i
].length
;
86 wqe
->send
.sgl
[i
].stag
=
87 cpu_to_be32(wr
->sg_list
[i
].lkey
);
88 wqe
->send
.sgl
[i
].len
=
89 cpu_to_be32(wr
->sg_list
[i
].length
);
90 wqe
->send
.sgl
[i
].to
= cpu_to_be64(wr
->sg_list
[i
].addr
);
92 wqe
->send
.num_sgle
= cpu_to_be32(wr
->num_sge
);
93 *flit_cnt
= 4 + ((wr
->num_sge
) << 1);
95 wqe
->send
.plen
= cpu_to_be32(plen
);
99 static int iwch_build_rdma_write(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
104 if (wr
->num_sge
> T3_MAX_SGE
)
106 wqe
->write
.rdmaop
= T3_RDMA_WRITE
;
107 wqe
->write
.reserved
[0] = 0;
108 wqe
->write
.reserved
[1] = 0;
109 wqe
->write
.reserved
[2] = 0;
110 wqe
->write
.stag_sink
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
111 wqe
->write
.to_sink
= cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
113 if (wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
) {
115 wqe
->write
.sgl
[0].stag
= wr
->imm_data
;
116 wqe
->write
.sgl
[0].len
= __constant_cpu_to_be32(0);
117 wqe
->write
.num_sgle
= __constant_cpu_to_be32(0);
121 for (i
= 0; i
< wr
->num_sge
; i
++) {
122 if ((plen
+ wr
->sg_list
[i
].length
) < plen
) {
125 plen
+= wr
->sg_list
[i
].length
;
126 wqe
->write
.sgl
[i
].stag
=
127 cpu_to_be32(wr
->sg_list
[i
].lkey
);
128 wqe
->write
.sgl
[i
].len
=
129 cpu_to_be32(wr
->sg_list
[i
].length
);
130 wqe
->write
.sgl
[i
].to
=
131 cpu_to_be64(wr
->sg_list
[i
].addr
);
133 wqe
->write
.num_sgle
= cpu_to_be32(wr
->num_sge
);
134 *flit_cnt
= 5 + ((wr
->num_sge
) << 1);
136 wqe
->write
.plen
= cpu_to_be32(plen
);
140 static int iwch_build_rdma_read(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
145 wqe
->read
.rdmaop
= T3_READ_REQ
;
146 wqe
->read
.reserved
[0] = 0;
147 wqe
->read
.reserved
[1] = 0;
148 wqe
->read
.reserved
[2] = 0;
149 wqe
->read
.rem_stag
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
150 wqe
->read
.rem_to
= cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
151 wqe
->read
.local_stag
= cpu_to_be32(wr
->sg_list
[0].lkey
);
152 wqe
->read
.local_len
= cpu_to_be32(wr
->sg_list
[0].length
);
153 wqe
->read
.local_to
= cpu_to_be64(wr
->sg_list
[0].addr
);
154 *flit_cnt
= sizeof(struct t3_rdma_read_wr
) >> 3;
159 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
161 static int iwch_sgl2pbl_map(struct iwch_dev
*rhp
, struct ib_sge
*sg_list
,
162 u32 num_sgle
, u32
* pbl_addr
, u8
* page_size
)
167 for (i
= 0; i
< num_sgle
; i
++) {
169 mhp
= get_mhp(rhp
, (sg_list
[i
].lkey
) >> 8);
171 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
174 if (!mhp
->attr
.state
) {
175 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
178 if (mhp
->attr
.zbva
) {
179 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
183 if (sg_list
[i
].addr
< mhp
->attr
.va_fbo
) {
184 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
187 if (sg_list
[i
].addr
+ ((u64
) sg_list
[i
].length
) <
189 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
192 if (sg_list
[i
].addr
+ ((u64
) sg_list
[i
].length
) >
193 mhp
->attr
.va_fbo
+ ((u64
) mhp
->attr
.len
)) {
194 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
197 offset
= sg_list
[i
].addr
- mhp
->attr
.va_fbo
;
198 offset
+= ((u32
) mhp
->attr
.va_fbo
) %
199 (1UL << (12 + mhp
->attr
.page_size
));
200 pbl_addr
[i
] = ((mhp
->attr
.pbl_addr
-
201 rhp
->rdev
.rnic_info
.pbl_base
) >> 3) +
202 (offset
>> (12 + mhp
->attr
.page_size
));
203 page_size
[i
] = mhp
->attr
.page_size
;
208 static int iwch_build_rdma_recv(struct iwch_dev
*rhp
, union t3_wr
*wqe
,
209 struct ib_recv_wr
*wr
)
212 if (wr
->num_sge
> T3_MAX_SGE
)
214 wqe
->recv
.num_sgle
= cpu_to_be32(wr
->num_sge
);
215 for (i
= 0; i
< wr
->num_sge
; i
++) {
216 wqe
->recv
.sgl
[i
].stag
= cpu_to_be32(wr
->sg_list
[i
].lkey
);
217 wqe
->recv
.sgl
[i
].len
= cpu_to_be32(wr
->sg_list
[i
].length
);
218 wqe
->recv
.sgl
[i
].to
= cpu_to_be64(wr
->sg_list
[i
].addr
);
220 for (; i
< T3_MAX_SGE
; i
++) {
221 wqe
->recv
.sgl
[i
].stag
= 0;
222 wqe
->recv
.sgl
[i
].len
= 0;
223 wqe
->recv
.sgl
[i
].to
= 0;
228 int iwch_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
229 struct ib_send_wr
**bad_wr
)
233 enum t3_wr_opcode t3_wr_opcode
= 0;
234 enum t3_wr_flags t3_wr_flags
;
242 qhp
= to_iwch_qp(ibqp
);
243 spin_lock_irqsave(&qhp
->lock
, flag
);
244 if (qhp
->attr
.state
> IWCH_QP_STATE_RTS
) {
245 spin_unlock_irqrestore(&qhp
->lock
, flag
);
248 num_wrs
= Q_FREECNT(qhp
->wq
.sq_rptr
, qhp
->wq
.sq_wptr
,
249 qhp
->wq
.sq_size_log2
);
251 spin_unlock_irqrestore(&qhp
->lock
, flag
);
260 idx
= Q_PTR2IDX(qhp
->wq
.wptr
, qhp
->wq
.size_log2
);
261 wqe
= (union t3_wr
*) (qhp
->wq
.queue
+ idx
);
263 if (wr
->send_flags
& IB_SEND_SOLICITED
)
264 t3_wr_flags
|= T3_SOLICITED_EVENT_FLAG
;
265 if (wr
->send_flags
& IB_SEND_FENCE
)
266 t3_wr_flags
|= T3_READ_FENCE_FLAG
;
267 if (wr
->send_flags
& IB_SEND_SIGNALED
)
268 t3_wr_flags
|= T3_COMPLETION_FLAG
;
270 Q_PTR2IDX(qhp
->wq
.sq_wptr
, qhp
->wq
.sq_size_log2
);
271 switch (wr
->opcode
) {
273 case IB_WR_SEND_WITH_IMM
:
274 t3_wr_opcode
= T3_WR_SEND
;
275 err
= iwch_build_rdma_send(wqe
, wr
, &t3_wr_flit_cnt
);
277 case IB_WR_RDMA_WRITE
:
278 case IB_WR_RDMA_WRITE_WITH_IMM
:
279 t3_wr_opcode
= T3_WR_WRITE
;
280 err
= iwch_build_rdma_write(wqe
, wr
, &t3_wr_flit_cnt
);
282 case IB_WR_RDMA_READ
:
283 t3_wr_opcode
= T3_WR_READ
;
284 t3_wr_flags
= 0; /* T3 reads are always signaled */
285 err
= iwch_build_rdma_read(wqe
, wr
, &t3_wr_flit_cnt
);
288 sqp
->read_len
= wqe
->read
.local_len
;
289 if (!qhp
->wq
.oldest_read
)
290 qhp
->wq
.oldest_read
= sqp
;
293 PDBG("%s post of type=%d TBD!\n", __FUNCTION__
,
301 wqe
->send
.wrid
.id0
.hi
= qhp
->wq
.sq_wptr
;
302 sqp
->wr_id
= wr
->wr_id
;
303 sqp
->opcode
= wr2opcode(t3_wr_opcode
);
304 sqp
->sq_wptr
= qhp
->wq
.sq_wptr
;
306 sqp
->signaled
= (wr
->send_flags
& IB_SEND_SIGNALED
);
308 build_fw_riwrh((void *) wqe
, t3_wr_opcode
, t3_wr_flags
,
309 Q_GENBIT(qhp
->wq
.wptr
, qhp
->wq
.size_log2
),
311 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
312 __FUNCTION__
, (unsigned long long) wr
->wr_id
, idx
,
313 Q_PTR2IDX(qhp
->wq
.sq_wptr
, qhp
->wq
.sq_size_log2
),
320 spin_unlock_irqrestore(&qhp
->lock
, flag
);
321 ring_doorbell(qhp
->wq
.doorbell
, qhp
->wq
.qpid
);
325 int iwch_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
326 struct ib_recv_wr
**bad_wr
)
335 qhp
= to_iwch_qp(ibqp
);
336 spin_lock_irqsave(&qhp
->lock
, flag
);
337 if (qhp
->attr
.state
> IWCH_QP_STATE_RTS
) {
338 spin_unlock_irqrestore(&qhp
->lock
, flag
);
341 num_wrs
= Q_FREECNT(qhp
->wq
.rq_rptr
, qhp
->wq
.rq_wptr
,
342 qhp
->wq
.rq_size_log2
) - 1;
344 spin_unlock_irqrestore(&qhp
->lock
, flag
);
348 idx
= Q_PTR2IDX(qhp
->wq
.wptr
, qhp
->wq
.size_log2
);
349 wqe
= (union t3_wr
*) (qhp
->wq
.queue
+ idx
);
351 err
= iwch_build_rdma_recv(qhp
->rhp
, wqe
, wr
);
358 qhp
->wq
.rq
[Q_PTR2IDX(qhp
->wq
.rq_wptr
, qhp
->wq
.rq_size_log2
)] =
360 build_fw_riwrh((void *) wqe
, T3_WR_RCV
, T3_COMPLETION_FLAG
,
361 Q_GENBIT(qhp
->wq
.wptr
, qhp
->wq
.size_log2
),
362 0, sizeof(struct t3_receive_wr
) >> 3);
363 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
364 "wqe %p \n", __FUNCTION__
, (unsigned long long) wr
->wr_id
,
365 idx
, qhp
->wq
.rq_wptr
, qhp
->wq
.rq_rptr
, wqe
);
371 spin_unlock_irqrestore(&qhp
->lock
, flag
);
372 ring_doorbell(qhp
->wq
.doorbell
, qhp
->wq
.qpid
);
376 int iwch_bind_mw(struct ib_qp
*qp
,
378 struct ib_mw_bind
*mw_bind
)
380 struct iwch_dev
*rhp
;
390 enum t3_wr_flags t3_wr_flags
;
394 qhp
= to_iwch_qp(qp
);
395 mhp
= to_iwch_mw(mw
);
398 spin_lock_irqsave(&qhp
->lock
, flag
);
399 if (qhp
->attr
.state
> IWCH_QP_STATE_RTS
) {
400 spin_unlock_irqrestore(&qhp
->lock
, flag
);
403 num_wrs
= Q_FREECNT(qhp
->wq
.sq_rptr
, qhp
->wq
.sq_wptr
,
404 qhp
->wq
.sq_size_log2
);
405 if ((num_wrs
) <= 0) {
406 spin_unlock_irqrestore(&qhp
->lock
, flag
);
409 idx
= Q_PTR2IDX(qhp
->wq
.wptr
, qhp
->wq
.size_log2
);
410 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __FUNCTION__
, idx
,
412 wqe
= (union t3_wr
*) (qhp
->wq
.queue
+ idx
);
415 if (mw_bind
->send_flags
& IB_SEND_SIGNALED
)
416 t3_wr_flags
= T3_COMPLETION_FLAG
;
418 sgl
.addr
= mw_bind
->addr
;
419 sgl
.lkey
= mw_bind
->mr
->lkey
;
420 sgl
.length
= mw_bind
->length
;
421 wqe
->bind
.reserved
= 0;
422 wqe
->bind
.type
= T3_VA_BASED_TO
;
424 /* TBD: check perms */
425 wqe
->bind
.perms
= iwch_ib_to_mwbind_access(mw_bind
->mw_access_flags
);
426 wqe
->bind
.mr_stag
= cpu_to_be32(mw_bind
->mr
->lkey
);
427 wqe
->bind
.mw_stag
= cpu_to_be32(mw
->rkey
);
428 wqe
->bind
.mw_len
= cpu_to_be32(mw_bind
->length
);
429 wqe
->bind
.mw_va
= cpu_to_be64(mw_bind
->addr
);
430 err
= iwch_sgl2pbl_map(rhp
, &sgl
, 1, &pbl_addr
, &page_size
);
432 spin_unlock_irqrestore(&qhp
->lock
, flag
);
435 wqe
->send
.wrid
.id0
.hi
= qhp
->wq
.sq_wptr
;
436 sqp
= qhp
->wq
.sq
+ Q_PTR2IDX(qhp
->wq
.sq_wptr
, qhp
->wq
.sq_size_log2
);
437 sqp
->wr_id
= mw_bind
->wr_id
;
438 sqp
->opcode
= T3_BIND_MW
;
439 sqp
->sq_wptr
= qhp
->wq
.sq_wptr
;
441 sqp
->signaled
= (mw_bind
->send_flags
& IB_SEND_SIGNALED
);
442 wqe
->bind
.mr_pbl_addr
= cpu_to_be32(pbl_addr
);
443 wqe
->bind
.mr_pagesz
= page_size
;
444 wqe
->flit
[T3_SQ_COOKIE_FLIT
] = mw_bind
->wr_id
;
445 build_fw_riwrh((void *)wqe
, T3_WR_BIND
, t3_wr_flags
,
446 Q_GENBIT(qhp
->wq
.wptr
, qhp
->wq
.size_log2
), 0,
447 sizeof(struct t3_bind_mw_wr
) >> 3);
450 spin_unlock_irqrestore(&qhp
->lock
, flag
);
452 ring_doorbell(qhp
->wq
.doorbell
, qhp
->wq
.qpid
);
457 static inline void build_term_codes(struct respQ_msg_t
*rsp_msg
,
458 u8
*layer_type
, u8
*ecode
)
460 int status
= TPT_ERR_INTERNAL_ERR
;
467 status
= CQE_STATUS(rsp_msg
->cqe
);
468 opcode
= CQE_OPCODE(rsp_msg
->cqe
);
469 rqtype
= RQ_TYPE(rsp_msg
->cqe
);
470 send_inv
= (opcode
== T3_SEND_WITH_INV
) ||
471 (opcode
== T3_SEND_WITH_SE_INV
);
472 tagged
= (opcode
== T3_RDMA_WRITE
) ||
473 (rqtype
&& (opcode
== T3_READ_RESP
));
479 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
480 *ecode
= RDMAP_CANT_INV_STAG
;
482 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
483 *ecode
= RDMAP_INV_STAG
;
487 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
488 if ((opcode
== T3_SEND_WITH_INV
) ||
489 (opcode
== T3_SEND_WITH_SE_INV
))
490 *ecode
= RDMAP_CANT_INV_STAG
;
492 *ecode
= RDMAP_STAG_NOT_ASSOC
;
495 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
496 *ecode
= RDMAP_STAG_NOT_ASSOC
;
499 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
500 *ecode
= RDMAP_ACC_VIOL
;
503 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
504 *ecode
= RDMAP_TO_WRAP
;
508 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
509 *ecode
= DDPT_BASE_BOUNDS
;
511 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
512 *ecode
= RDMAP_BASE_BOUNDS
;
515 case TPT_ERR_INVALIDATE_SHARED_MR
:
516 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
517 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
518 *ecode
= RDMAP_CANT_INV_STAG
;
521 case TPT_ERR_ECC_PSTAG
:
522 case TPT_ERR_INTERNAL_ERR
:
523 *layer_type
= LAYER_RDMAP
|RDMAP_LOCAL_CATA
;
526 case TPT_ERR_OUT_OF_RQE
:
527 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
528 *ecode
= DDPU_INV_MSN_NOBUF
;
530 case TPT_ERR_PBL_ADDR_BOUND
:
531 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
532 *ecode
= DDPT_BASE_BOUNDS
;
535 *layer_type
= LAYER_MPA
|DDP_LLP
;
536 *ecode
= MPA_CRC_ERR
;
539 *layer_type
= LAYER_MPA
|DDP_LLP
;
540 *ecode
= MPA_MARKER_ERR
;
542 case TPT_ERR_PDU_LEN_ERR
:
543 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
544 *ecode
= DDPU_MSG_TOOBIG
;
546 case TPT_ERR_DDP_VERSION
:
548 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
549 *ecode
= DDPT_INV_VERS
;
551 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
552 *ecode
= DDPU_INV_VERS
;
555 case TPT_ERR_RDMA_VERSION
:
556 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
557 *ecode
= RDMAP_INV_VERS
;
560 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
561 *ecode
= RDMAP_INV_OPCODE
;
563 case TPT_ERR_DDP_QUEUE_NUM
:
564 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
565 *ecode
= DDPU_INV_QN
;
568 case TPT_ERR_MSN_GAP
:
569 case TPT_ERR_MSN_RANGE
:
570 case TPT_ERR_IRD_OVERFLOW
:
571 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
572 *ecode
= DDPU_INV_MSN_RANGE
;
575 *layer_type
= LAYER_DDP
|DDP_LOCAL_CATA
;
579 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
580 *ecode
= DDPU_INV_MO
;
583 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
590 * This posts a TERMINATE with layer=RDMA, type=catastrophic.
592 int iwch_post_terminate(struct iwch_qp
*qhp
, struct respQ_msg_t
*rsp_msg
)
595 struct terminate_message
*term
;
598 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
599 skb
= alloc_skb(40, GFP_ATOMIC
);
601 printk(KERN_ERR
"%s cannot send TERMINATE!\n", __FUNCTION__
);
604 wqe
= (union t3_wr
*)skb_put(skb
, 40);
606 wqe
->send
.rdmaop
= T3_TERMINATE
;
608 /* immediate data length */
609 wqe
->send
.plen
= htonl(4);
611 /* immediate data starts here. */
612 term
= (struct terminate_message
*)wqe
->send
.sgl
;
613 build_term_codes(rsp_msg
, &term
->layer_etype
, &term
->ecode
);
614 wqe
->send
.wrh
.op_seop_flags
= cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND
) |
615 V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG
| T3_NOTIFY_FLAG
));
616 wqe
->send
.wrh
.gen_tid_len
= cpu_to_be32(V_FW_RIWR_TID(qhp
->ep
->hwtid
));
617 skb
->priority
= CPL_PRIORITY_DATA
;
618 return cxgb3_ofld_send(qhp
->rhp
->rdev
.t3cdev_p
, skb
);
622 * Assumes qhp lock is held.
624 static void __flush_qp(struct iwch_qp
*qhp
, unsigned long *flag
)
626 struct iwch_cq
*rchp
, *schp
;
629 rchp
= get_chp(qhp
->rhp
, qhp
->attr
.rcq
);
630 schp
= get_chp(qhp
->rhp
, qhp
->attr
.scq
);
632 PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__
, qhp
, rchp
, schp
);
633 /* take a ref on the qhp since we must release the lock */
634 atomic_inc(&qhp
->refcnt
);
635 spin_unlock_irqrestore(&qhp
->lock
, *flag
);
637 /* locking heirarchy: cq lock first, then qp lock. */
638 spin_lock_irqsave(&rchp
->lock
, *flag
);
639 spin_lock(&qhp
->lock
);
640 cxio_flush_hw_cq(&rchp
->cq
);
641 cxio_count_rcqes(&rchp
->cq
, &qhp
->wq
, &count
);
642 cxio_flush_rq(&qhp
->wq
, &rchp
->cq
, count
);
643 spin_unlock(&qhp
->lock
);
644 spin_unlock_irqrestore(&rchp
->lock
, *flag
);
645 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
, rchp
->ibcq
.cq_context
);
647 /* locking heirarchy: cq lock first, then qp lock. */
648 spin_lock_irqsave(&schp
->lock
, *flag
);
649 spin_lock(&qhp
->lock
);
650 cxio_flush_hw_cq(&schp
->cq
);
651 cxio_count_scqes(&schp
->cq
, &qhp
->wq
, &count
);
652 cxio_flush_sq(&qhp
->wq
, &schp
->cq
, count
);
653 spin_unlock(&qhp
->lock
);
654 spin_unlock_irqrestore(&schp
->lock
, *flag
);
655 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
, schp
->ibcq
.cq_context
);
658 if (atomic_dec_and_test(&qhp
->refcnt
))
661 spin_lock_irqsave(&qhp
->lock
, *flag
);
664 static void flush_qp(struct iwch_qp
*qhp
, unsigned long *flag
)
666 if (qhp
->ibqp
.uobject
)
667 cxio_set_wq_in_error(&qhp
->wq
);
669 __flush_qp(qhp
, flag
);
674 * Return non zero if at least one RECV was pre-posted.
676 static int rqes_posted(struct iwch_qp
*qhp
)
678 return fw_riwrh_opcode((struct fw_riwrh
*)qhp
->wq
.queue
) == T3_WR_RCV
;
681 static int rdma_init(struct iwch_dev
*rhp
, struct iwch_qp
*qhp
,
682 enum iwch_qp_attr_mask mask
,
683 struct iwch_qp_attributes
*attrs
)
685 struct t3_rdma_init_attr init_attr
;
688 init_attr
.tid
= qhp
->ep
->hwtid
;
689 init_attr
.qpid
= qhp
->wq
.qpid
;
690 init_attr
.pdid
= qhp
->attr
.pd
;
691 init_attr
.scqid
= qhp
->attr
.scq
;
692 init_attr
.rcqid
= qhp
->attr
.rcq
;
693 init_attr
.rq_addr
= qhp
->wq
.rq_addr
;
694 init_attr
.rq_size
= 1 << qhp
->wq
.rq_size_log2
;
695 init_attr
.mpaattrs
= uP_RI_MPA_IETF_ENABLE
|
696 qhp
->attr
.mpa_attr
.recv_marker_enabled
|
697 (qhp
->attr
.mpa_attr
.xmit_marker_enabled
<< 1) |
698 (qhp
->attr
.mpa_attr
.crc_enabled
<< 2);
701 * XXX - The IWCM doesn't quite handle getting these
702 * attrs set before going into RTS. For now, just turn
706 init_attr
.qpcaps
= qhp
->attr
.enableRdmaRead
|
707 (qhp
->attr
.enableRdmaWrite
<< 1) |
708 (qhp
->attr
.enableBind
<< 2) |
709 (qhp
->attr
.enable_stag0_fastreg
<< 3) |
710 (qhp
->attr
.enable_stag0_fastreg
<< 4);
712 init_attr
.qpcaps
= 0x1f;
714 init_attr
.tcp_emss
= qhp
->ep
->emss
;
715 init_attr
.ord
= qhp
->attr
.max_ord
;
716 init_attr
.ird
= qhp
->attr
.max_ird
;
717 init_attr
.qp_dma_addr
= qhp
->wq
.dma_addr
;
718 init_attr
.qp_dma_size
= (1UL << qhp
->wq
.size_log2
);
719 init_attr
.flags
= rqes_posted(qhp
) ? RECVS_POSTED
: 0;
720 init_attr
.flags
|= capable(CAP_NET_BIND_SERVICE
) ? PRIV_QP
: 0;
721 init_attr
.irs
= qhp
->ep
->rcv_seq
;
722 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
723 "flags 0x%x qpcaps 0x%x\n", __FUNCTION__
,
724 init_attr
.rq_addr
, init_attr
.rq_size
,
725 init_attr
.flags
, init_attr
.qpcaps
);
726 ret
= cxio_rdma_init(&rhp
->rdev
, &init_attr
);
727 PDBG("%s ret %d\n", __FUNCTION__
, ret
);
731 int iwch_modify_qp(struct iwch_dev
*rhp
, struct iwch_qp
*qhp
,
732 enum iwch_qp_attr_mask mask
,
733 struct iwch_qp_attributes
*attrs
,
737 struct iwch_qp_attributes newattr
= qhp
->attr
;
743 struct iwch_ep
*ep
= NULL
;
745 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __FUNCTION__
,
746 qhp
, qhp
->wq
.qpid
, qhp
->ep
, qhp
->attr
.state
,
747 (mask
& IWCH_QP_ATTR_NEXT_STATE
) ? attrs
->next_state
: -1);
749 spin_lock_irqsave(&qhp
->lock
, flag
);
751 /* Process attr changes if in IDLE */
752 if (mask
& IWCH_QP_ATTR_VALID_MODIFY
) {
753 if (qhp
->attr
.state
!= IWCH_QP_STATE_IDLE
) {
757 if (mask
& IWCH_QP_ATTR_ENABLE_RDMA_READ
)
758 newattr
.enable_rdma_read
= attrs
->enable_rdma_read
;
759 if (mask
& IWCH_QP_ATTR_ENABLE_RDMA_WRITE
)
760 newattr
.enable_rdma_write
= attrs
->enable_rdma_write
;
761 if (mask
& IWCH_QP_ATTR_ENABLE_RDMA_BIND
)
762 newattr
.enable_bind
= attrs
->enable_bind
;
763 if (mask
& IWCH_QP_ATTR_MAX_ORD
) {
765 rhp
->attr
.max_rdma_read_qp_depth
) {
769 newattr
.max_ord
= attrs
->max_ord
;
771 if (mask
& IWCH_QP_ATTR_MAX_IRD
) {
773 rhp
->attr
.max_rdma_reads_per_qp
) {
777 newattr
.max_ird
= attrs
->max_ird
;
782 if (!(mask
& IWCH_QP_ATTR_NEXT_STATE
))
784 if (qhp
->attr
.state
== attrs
->next_state
)
787 switch (qhp
->attr
.state
) {
788 case IWCH_QP_STATE_IDLE
:
789 switch (attrs
->next_state
) {
790 case IWCH_QP_STATE_RTS
:
791 if (!(mask
& IWCH_QP_ATTR_LLP_STREAM_HANDLE
)) {
795 if (!(mask
& IWCH_QP_ATTR_MPA_ATTR
)) {
799 qhp
->attr
.mpa_attr
= attrs
->mpa_attr
;
800 qhp
->attr
.llp_stream_handle
= attrs
->llp_stream_handle
;
801 qhp
->ep
= qhp
->attr
.llp_stream_handle
;
802 qhp
->attr
.state
= IWCH_QP_STATE_RTS
;
805 * Ref the endpoint here and deref when we
806 * disassociate the endpoint from the QP. This
807 * happens in CLOSING->IDLE transition or *->ERROR
810 get_ep(&qhp
->ep
->com
);
811 spin_unlock_irqrestore(&qhp
->lock
, flag
);
812 ret
= rdma_init(rhp
, qhp
, mask
, attrs
);
813 spin_lock_irqsave(&qhp
->lock
, flag
);
817 case IWCH_QP_STATE_ERROR
:
818 qhp
->attr
.state
= IWCH_QP_STATE_ERROR
;
819 flush_qp(qhp
, &flag
);
826 case IWCH_QP_STATE_RTS
:
827 switch (attrs
->next_state
) {
828 case IWCH_QP_STATE_CLOSING
:
829 BUG_ON(atomic_read(&qhp
->ep
->com
.kref
.refcount
) < 2);
830 qhp
->attr
.state
= IWCH_QP_STATE_CLOSING
;
836 flush_qp(qhp
, &flag
);
838 case IWCH_QP_STATE_TERMINATE
:
839 qhp
->attr
.state
= IWCH_QP_STATE_TERMINATE
;
840 if (qhp
->ibqp
.uobject
)
841 cxio_set_wq_in_error(&qhp
->wq
);
845 case IWCH_QP_STATE_ERROR
:
846 qhp
->attr
.state
= IWCH_QP_STATE_ERROR
;
859 case IWCH_QP_STATE_CLOSING
:
864 switch (attrs
->next_state
) {
865 case IWCH_QP_STATE_IDLE
:
866 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
867 qhp
->attr
.llp_stream_handle
= NULL
;
868 put_ep(&qhp
->ep
->com
);
872 case IWCH_QP_STATE_ERROR
:
879 case IWCH_QP_STATE_ERROR
:
880 if (attrs
->next_state
!= IWCH_QP_STATE_IDLE
) {
885 if (!Q_EMPTY(qhp
->wq
.sq_rptr
, qhp
->wq
.sq_wptr
) ||
886 !Q_EMPTY(qhp
->wq
.rq_rptr
, qhp
->wq
.rq_wptr
)) {
890 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
891 memset(&qhp
->attr
, 0, sizeof(qhp
->attr
));
893 case IWCH_QP_STATE_TERMINATE
:
901 printk(KERN_ERR
"%s in a bad state %d\n",
902 __FUNCTION__
, qhp
->attr
.state
);
909 PDBG("%s disassociating ep %p qpid 0x%x\n", __FUNCTION__
, qhp
->ep
,
912 /* disassociate the LLP connection */
913 qhp
->attr
.llp_stream_handle
= NULL
;
916 qhp
->attr
.state
= IWCH_QP_STATE_ERROR
;
920 flush_qp(qhp
, &flag
);
922 spin_unlock_irqrestore(&qhp
->lock
, flag
);
925 iwch_post_terminate(qhp
, NULL
);
928 * If disconnect is 1, then we need to initiate a disconnect
929 * on the EP. This can be a normal close (RTS->CLOSING) or
930 * an abnormal close (RTS/CLOSING->ERROR).
933 iwch_ep_disconnect(ep
, abort
, GFP_KERNEL
);
936 * If free is 1, then we've disassociated the EP from the QP
937 * and we need to dereference the EP.
942 PDBG("%s exit state %d\n", __FUNCTION__
, qhp
->attr
.state
);
946 static int quiesce_qp(struct iwch_qp
*qhp
)
948 spin_lock_irq(&qhp
->lock
);
949 iwch_quiesce_tid(qhp
->ep
);
950 qhp
->flags
|= QP_QUIESCED
;
951 spin_unlock_irq(&qhp
->lock
);
955 static int resume_qp(struct iwch_qp
*qhp
)
957 spin_lock_irq(&qhp
->lock
);
958 iwch_resume_tid(qhp
->ep
);
959 qhp
->flags
&= ~QP_QUIESCED
;
960 spin_unlock_irq(&qhp
->lock
);
964 int iwch_quiesce_qps(struct iwch_cq
*chp
)
969 for (i
=0; i
< T3_MAX_NUM_QP
; i
++) {
970 qhp
= get_qhp(chp
->rhp
, i
);
973 if ((qhp
->attr
.rcq
== chp
->cq
.cqid
) && !qp_quiesced(qhp
)) {
977 if ((qhp
->attr
.scq
== chp
->cq
.cqid
) && !qp_quiesced(qhp
))
983 int iwch_resume_qps(struct iwch_cq
*chp
)
988 for (i
=0; i
< T3_MAX_NUM_QP
; i
++) {
989 qhp
= get_qhp(chp
->rhp
, i
);
992 if ((qhp
->attr
.rcq
== chp
->cq
.cqid
) && qp_quiesced(qhp
)) {
996 if ((qhp
->attr
.scq
== chp
->cq
.cqid
) && qp_quiesced(qhp
))