2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/pci.h>
37 #include <linux/timer.h>
38 #include "firmware_exports.h"
41 #define T3_MAX_INLINE 64
42 #define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
43 #define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
44 #define T3_STAG0_PAGE_SHIFT 15
46 #define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
47 #define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
49 #define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
50 #define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
51 #define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
52 #define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
54 static inline void ring_doorbell(void __iomem
*doorbell
, u32 qpid
)
56 writel(((1<<31) | qpid
), doorbell
);
59 #define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
62 T3_COMPLETION_FLAG
= 0x01,
63 T3_NOTIFY_FLAG
= 0x02,
64 T3_SOLICITED_EVENT_FLAG
= 0x04,
65 T3_READ_FENCE_FLAG
= 0x08,
66 T3_LOCAL_FENCE_FLAG
= 0x10
67 } __attribute__ ((packed
));
70 T3_WR_BP
= FW_WROPCODE_RI_BYPASS
,
71 T3_WR_SEND
= FW_WROPCODE_RI_SEND
,
72 T3_WR_WRITE
= FW_WROPCODE_RI_RDMA_WRITE
,
73 T3_WR_READ
= FW_WROPCODE_RI_RDMA_READ
,
74 T3_WR_INV_STAG
= FW_WROPCODE_RI_LOCAL_INV
,
75 T3_WR_BIND
= FW_WROPCODE_RI_BIND_MW
,
76 T3_WR_RCV
= FW_WROPCODE_RI_RECEIVE
,
77 T3_WR_INIT
= FW_WROPCODE_RI_RDMA_INIT
,
78 T3_WR_QP_MOD
= FW_WROPCODE_RI_MODIFY_QP
,
79 T3_WR_FASTREG
= FW_WROPCODE_RI_FASTREGISTER_MR
80 } __attribute__ ((packed
));
83 T3_RDMA_WRITE
, /* IETF RDMAP v1.0 ... */
91 T3_RDMA_INIT
, /* CHELSIO RI specific ... */
97 T3_RDMA_READ_REQ_WITH_INV
,
98 } __attribute__ ((packed
));
100 static inline enum t3_rdma_opcode
wr2opcode(enum t3_wr_opcode wrop
)
103 case T3_WR_BP
: return T3_BYPASS
;
104 case T3_WR_SEND
: return T3_SEND
;
105 case T3_WR_WRITE
: return T3_RDMA_WRITE
;
106 case T3_WR_READ
: return T3_READ_REQ
;
107 case T3_WR_INV_STAG
: return T3_LOCAL_INV
;
108 case T3_WR_BIND
: return T3_BIND_MW
;
109 case T3_WR_INIT
: return T3_RDMA_INIT
;
110 case T3_WR_QP_MOD
: return T3_QP_MOD
;
111 case T3_WR_FASTREG
: return T3_FAST_REGISTER
;
118 /* Work request id */
127 #define WRID(wrid) (wrid.id1)
128 #define WRID_GEN(wrid) (wrid.id0.wr_gen)
129 #define WRID_IDX(wrid) (wrid.id0.wr_idx)
130 #define WRID_LO(wrid) (wrid.id0.wr_lo)
133 __be32 op_seop_flags
;
137 #define S_FW_RIWR_OP 24
138 #define M_FW_RIWR_OP 0xff
139 #define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
140 #define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
142 #define S_FW_RIWR_SOPEOP 22
143 #define M_FW_RIWR_SOPEOP 0x3
144 #define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
146 #define S_FW_RIWR_FLAGS 8
147 #define M_FW_RIWR_FLAGS 0x3fffff
148 #define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
149 #define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
151 #define S_FW_RIWR_TID 8
152 #define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
154 #define S_FW_RIWR_LEN 0
155 #define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
157 #define S_FW_RIWR_GEN 31
158 #define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
166 /* If num_sgle is zero, flit 5+ contains immediate data.*/
168 struct fw_riwrh wrh
; /* 0 */
169 union t3_wrid wrid
; /* 1 */
176 struct t3_sge sgl
[T3_MAX_SGE
]; /* 4+ */
179 #define T3_MAX_FASTREG_DEPTH 10
180 #define T3_MAX_FASTREG_FRAG 10
182 struct t3_fastreg_wr
{
183 struct fw_riwrh wrh
; /* 0 */
184 union t3_wrid wrid
; /* 1 */
187 __be32 va_base_hi
; /* 3 */
188 __be32 va_base_lo_fbo
;
189 __be32 page_type_perms
; /* 4 */
191 __be64 pbl_addrs
[0]; /* 5+ */
195 * If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
198 struct fw_riwrh wrh
; /* 0 */
199 __be64 pbl_addrs
[14]; /* 1..14 */
202 #define S_FR_PAGE_COUNT 24
203 #define M_FR_PAGE_COUNT 0xff
204 #define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)
205 #define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
207 #define S_FR_PAGE_SIZE 16
208 #define M_FR_PAGE_SIZE 0x1f
209 #define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)
210 #define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
213 #define M_FR_TYPE 0x1
214 #define V_FR_TYPE(x) ((x) << S_FR_TYPE)
215 #define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
218 #define M_FR_PERMS 0xff
219 #define V_FR_PERMS(x) ((x) << S_FR_PERMS)
220 #define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
222 struct t3_local_inv_wr
{
223 struct fw_riwrh wrh
; /* 0 */
224 union t3_wrid wrid
; /* 1 */
229 struct t3_rdma_write_wr
{
230 struct fw_riwrh wrh
; /* 0 */
231 union t3_wrid wrid
; /* 1 */
235 __be64 to_sink
; /* 3 */
238 struct t3_sge sgl
[T3_MAX_SGE
]; /* 5+ */
241 struct t3_rdma_read_wr
{
242 struct fw_riwrh wrh
; /* 0 */
243 union t3_wrid wrid
; /* 1 */
248 __be64 rem_to
; /* 3 */
249 __be32 local_stag
; /* 4 */
251 __be64 local_to
; /* 5 */
254 struct t3_bind_mw_wr
{
255 struct fw_riwrh wrh
; /* 0 */
256 union t3_wrid wrid
; /* 1 */
257 u16 reserved
; /* 2 */
261 __be32 mw_stag
; /* 3 */
263 __be64 mw_va
; /* 4 */
264 __be32 mr_pbl_addr
; /* 5 */
269 struct t3_receive_wr
{
270 struct fw_riwrh wrh
; /* 0 */
271 union t3_wrid wrid
; /* 1 */
272 u8 pagesz
[T3_MAX_SGE
];
273 __be32 num_sgle
; /* 2 */
274 struct t3_sge sgl
[T3_MAX_SGE
]; /* 3+ */
275 __be32 pbl_addr
[T3_MAX_SGE
];
278 struct t3_bypass_wr
{
280 union t3_wrid wrid
; /* 1 */
283 struct t3_modify_qp_wr
{
284 struct fw_riwrh wrh
; /* 0 */
285 union t3_wrid wrid
; /* 1 */
286 __be32 flags
; /* 2 */
287 __be32 quiesce
; /* 2 */
288 __be32 max_ird
; /* 3 */
289 __be32 max_ord
; /* 3 */
290 __be64 sge_cmd
; /* 4 */
295 enum t3_modify_qp_flags
{
296 MODQP_QUIESCE
= 0x01,
297 MODQP_MAX_IRD
= 0x02,
298 MODQP_MAX_ORD
= 0x04,
299 MODQP_WRITE_EC
= 0x08,
300 MODQP_READ_EC
= 0x10,
305 uP_RI_MPA_RX_MARKER_ENABLE
= 0x1,
306 uP_RI_MPA_TX_MARKER_ENABLE
= 0x2,
307 uP_RI_MPA_CRC_ENABLE
= 0x4,
308 uP_RI_MPA_IETF_ENABLE
= 0x8
309 } __attribute__ ((packed
));
312 uP_RI_QP_RDMA_READ_ENABLE
= 0x01,
313 uP_RI_QP_RDMA_WRITE_ENABLE
= 0x02,
314 uP_RI_QP_BIND_ENABLE
= 0x04,
315 uP_RI_QP_FAST_REGISTER_ENABLE
= 0x08,
316 uP_RI_QP_STAG0_ENABLE
= 0x10
317 } __attribute__ ((packed
));
319 enum rdma_init_rtr_types
{
326 #define M_RTR_TYPE 0x3
327 #define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
328 #define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
332 #define V_CHAN(x) ((x) << S_CHAN)
333 #define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
335 struct t3_rdma_init_attr
{
343 enum t3_mpa_attrs mpaattrs
;
344 enum t3_qp_caps qpcaps
;
350 enum rdma_init_rtr_types rtr_type
;
357 struct t3_rdma_init_wr
{
358 struct fw_riwrh wrh
; /* 0 */
359 union t3_wrid wrid
; /* 1 */
362 __be32 scqid
; /* 3 */
364 __be32 rq_addr
; /* 4 */
369 __be16 flags_rtr_type
;
373 __be64 qp_dma_addr
; /* 7 */
374 __be32 qp_dma_size
; /* 8 */
383 struct t3_wq_in_err
{
388 enum rdma_init_wr_flags
{
389 MPA_INITIATOR
= (1<<0),
394 struct t3_send_wr send
;
395 struct t3_rdma_write_wr write
;
396 struct t3_rdma_read_wr read
;
397 struct t3_receive_wr recv
;
398 struct t3_fastreg_wr fastreg
;
399 struct t3_pbl_frag pbl_frag
;
400 struct t3_local_inv_wr local_inv
;
401 struct t3_bind_mw_wr bind
;
402 struct t3_bypass_wr bypass
;
403 struct t3_rdma_init_wr init
;
404 struct t3_modify_qp_wr qp_mod
;
405 struct t3_genbit genbit
;
406 struct t3_wq_in_err wq_in_err
;
410 #define T3_SQ_CQE_FLIT 13
411 #define T3_SQ_COOKIE_FLIT 14
413 #define T3_RQ_COOKIE_FLIT 13
414 #define T3_RQ_CQE_FLIT 14
416 static inline enum t3_wr_opcode
fw_riwrh_opcode(struct fw_riwrh
*wqe
)
418 return G_FW_RIWR_OP(be32_to_cpu(wqe
->op_seop_flags
));
421 enum t3_wr_hdr_bits
{
424 T3_SOPEOP
= T3_EOP
|T3_SOP
,
427 static inline void build_fw_riwrh(struct fw_riwrh
*wqe
, enum t3_wr_opcode op
,
428 enum t3_wr_flags flags
, u8 genbit
, u32 tid
,
431 wqe
->op_seop_flags
= cpu_to_be32(V_FW_RIWR_OP(op
) |
432 V_FW_RIWR_SOPEOP(sopeop
) |
433 V_FW_RIWR_FLAGS(flags
));
435 wqe
->gen_tid_len
= cpu_to_be32(V_FW_RIWR_GEN(genbit
) |
439 ((union t3_wr
*)wqe
)->genbit
.genbit
= cpu_to_be64(genbit
);
443 * T3 ULP2_TX commands
450 /* T3 MC7 RDMA TPT entry format */
453 TPT_NON_SHARED_MR
= 0x0,
456 TPT_MW_RELAXED_PROTECTION
= 0x3
466 TPT_LOCAL_READ
= 0x8,
467 TPT_LOCAL_WRITE
= 0x4,
468 TPT_REMOTE_READ
= 0x2,
469 TPT_REMOTE_WRITE
= 0x1
473 __be32 valid_stag_pdid
;
474 __be32 flags_pagesize_qpid
;
476 __be32 rsvd_pbl_addr
;
479 __be32 va_low_or_fbo
;
481 __be32 rsvd_bind_cnt_or_pstag
;
482 __be32 rsvd_pbl_size
;
485 #define S_TPT_VALID 31
486 #define V_TPT_VALID(x) ((x) << S_TPT_VALID)
487 #define F_TPT_VALID V_TPT_VALID(1U)
489 #define S_TPT_STAG_KEY 23
490 #define M_TPT_STAG_KEY 0xFF
491 #define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
492 #define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
494 #define S_TPT_STAG_STATE 22
495 #define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
496 #define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
498 #define S_TPT_STAG_TYPE 20
499 #define M_TPT_STAG_TYPE 0x3
500 #define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
501 #define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
504 #define M_TPT_PDID 0xFFFFF
505 #define V_TPT_PDID(x) ((x) << S_TPT_PDID)
506 #define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
508 #define S_TPT_PERM 28
509 #define M_TPT_PERM 0xF
510 #define V_TPT_PERM(x) ((x) << S_TPT_PERM)
511 #define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
513 #define S_TPT_REM_INV_DIS 27
514 #define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
515 #define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
517 #define S_TPT_ADDR_TYPE 26
518 #define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
519 #define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
521 #define S_TPT_MW_BIND_ENABLE 25
522 #define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
523 #define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
525 #define S_TPT_PAGE_SIZE 20
526 #define M_TPT_PAGE_SIZE 0x1F
527 #define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
528 #define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
530 #define S_TPT_PBL_ADDR 0
531 #define M_TPT_PBL_ADDR 0x1FFFFFFF
532 #define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
533 #define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
536 #define M_TPT_QPID 0xFFFFF
537 #define V_TPT_QPID(x) ((x) << S_TPT_QPID)
538 #define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
540 #define S_TPT_PSTAG 0
541 #define M_TPT_PSTAG 0xFFFFFF
542 #define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
543 #define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
545 #define S_TPT_PBL_SIZE 0
546 #define M_TPT_PBL_SIZE 0xFFFFF
547 #define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
548 #define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
569 #define M_CQE_OOO 0x1
570 #define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
571 #define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
573 #define S_CQE_QPID 12
574 #define M_CQE_QPID 0x7FFFF
575 #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
576 #define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
578 #define S_CQE_SWCQE 11
579 #define M_CQE_SWCQE 0x1
580 #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
581 #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
583 #define S_CQE_GENBIT 10
584 #define M_CQE_GENBIT 0x1
585 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
586 #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
588 #define S_CQE_STATUS 5
589 #define M_CQE_STATUS 0x1F
590 #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
591 #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
594 #define M_CQE_TYPE 0x1
595 #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
596 #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
598 #define S_CQE_OPCODE 0
599 #define M_CQE_OPCODE 0xF
600 #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
601 #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
603 #define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
604 #define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
605 #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
606 #define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
607 #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
608 #define SQ_TYPE(x) (CQE_TYPE((x)))
609 #define RQ_TYPE(x) (!CQE_TYPE((x)))
610 #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
611 #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
613 #define CQE_SEND_OPCODE(x)( \
614 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
615 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
616 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
617 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
619 #define CQE_LEN(x) (be32_to_cpu((x).len))
621 /* used for RQ completion processing */
622 #define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
623 #define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
625 /* used for SQ completion processing */
626 #define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
627 #define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
629 /* generic accessor macros */
630 #define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
631 #define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
633 #define TPT_ERR_SUCCESS 0x0
634 #define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
635 /* STAG is offlimt, being 0, */
636 /* or STAG_key mismatch */
637 #define TPT_ERR_PDID 0x2 /* PDID mismatch */
638 #define TPT_ERR_QPID 0x3 /* QPID mismatch */
639 #define TPT_ERR_ACCESS 0x4 /* Invalid access right */
640 #define TPT_ERR_WRAP 0x5 /* Wrap error */
641 #define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
642 #define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
643 /* shared memory region */
644 #define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
645 /* shared memory region */
646 #define TPT_ERR_ECC 0x9 /* ECC error detected */
647 #define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
648 /* reading PSTAG for a MW */
650 #define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
652 #define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
653 #define TPT_ERR_CRC 0x10 /* CRC error */
654 #define TPT_ERR_MARKER 0x11 /* Marker error */
655 #define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
656 #define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
657 #define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
658 #define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
659 #define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
660 #define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
661 #define TPT_ERR_MSN 0x18 /* MSN error */
662 #define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
663 #define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
665 #define TPT_ERR_MSN_GAP 0x1B
666 #define TPT_ERR_MSN_RANGE 0x1C
667 #define TPT_ERR_IRD_OVERFLOW 0x1D
668 #define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
670 #define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
689 * A T3 WQ implements both the SQ and RQ.
692 union t3_wr
*queue
; /* DMA accessible memory */
693 dma_addr_t dma_addr
; /* DMA address for HW */
694 DEFINE_DMA_UNMAP_ADDR(mapping
); /* unmap kruft */
695 u32 error
; /* 1 once we go to ERROR */
697 u32 wptr
; /* idx to next available WR slot */
698 u32 size_log2
; /* total wq size */
699 struct t3_swsq
*sq
; /* SW SQ */
700 struct t3_swsq
*oldest_read
; /* tracks oldest pending read */
701 u32 sq_wptr
; /* sq_wptr - sq_rptr == count of */
702 u32 sq_rptr
; /* pending wrs */
703 u32 sq_size_log2
; /* sq size */
704 struct t3_swrq
*rq
; /* SW RQ (holds consumer wr_ids */
705 u32 rq_wptr
; /* rq_wptr - rq_rptr == count of */
706 u32 rq_rptr
; /* pending wrs */
707 struct t3_swrq
*rq_oldest_wr
; /* oldest wr on the SW RQ */
708 u32 rq_size_log2
; /* rq size */
709 u32 rq_addr
; /* rq adapter address */
710 void __iomem
*doorbell
; /* kernel db */
711 u64 udb
; /* user db if any */
712 struct cxio_rdev
*rdev
;
721 DEFINE_DMA_UNMAP_ADDR(mapping
);
722 struct t3_cqe
*queue
;
723 struct t3_cqe
*sw_queue
;
728 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
731 struct t3_cq_status_page
{
735 static inline int cxio_cq_in_error(struct t3_cq
*cq
)
737 return ((struct t3_cq_status_page
*)
738 &cq
->queue
[1 << cq
->size_log2
])->cq_err
;
741 static inline void cxio_set_cq_in_error(struct t3_cq
*cq
)
743 ((struct t3_cq_status_page
*)
744 &cq
->queue
[1 << cq
->size_log2
])->cq_err
= 1;
747 static inline void cxio_set_wq_in_error(struct t3_wq
*wq
)
749 wq
->queue
->wq_in_err
.err
|= 1;
752 static inline void cxio_disable_wq_db(struct t3_wq
*wq
)
754 wq
->queue
->wq_in_err
.err
|= 2;
757 static inline void cxio_enable_wq_db(struct t3_wq
*wq
)
759 wq
->queue
->wq_in_err
.err
&= ~2;
762 static inline int cxio_wq_db_enabled(struct t3_wq
*wq
)
764 return !(wq
->queue
->wq_in_err
.err
& 2);
767 static inline struct t3_cqe
*cxio_next_hw_cqe(struct t3_cq
*cq
)
771 cqe
= cq
->queue
+ (Q_PTR2IDX(cq
->rptr
, cq
->size_log2
));
772 if (CQ_VLD_ENTRY(cq
->rptr
, cq
->size_log2
, cqe
))
777 static inline struct t3_cqe
*cxio_next_sw_cqe(struct t3_cq
*cq
)
781 if (!Q_EMPTY(cq
->sw_rptr
, cq
->sw_wptr
)) {
782 cqe
= cq
->sw_queue
+ (Q_PTR2IDX(cq
->sw_rptr
, cq
->size_log2
));
788 static inline struct t3_cqe
*cxio_next_cqe(struct t3_cq
*cq
)
792 if (!Q_EMPTY(cq
->sw_rptr
, cq
->sw_wptr
)) {
793 cqe
= cq
->sw_queue
+ (Q_PTR2IDX(cq
->sw_rptr
, cq
->size_log2
));
796 cqe
= cq
->queue
+ (Q_PTR2IDX(cq
->rptr
, cq
->size_log2
));
797 if (CQ_VLD_ENTRY(cq
->rptr
, cq
->size_log2
, cqe
))