1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #ifndef _CQ_EXCH_DESC_H_
7 #define _CQ_EXCH_DESC_H_
11 /* Exchange completion queue descriptor: 16B */
12 struct cq_exch_wq_desc
{
24 #define CQ_EXCH_WQ_STATUS_BITS 2
25 #define CQ_EXCH_WQ_STATUS_MASK ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1)
27 enum cq_exch_status_types
{
28 CQ_EXCH_WQ_STATUS_TYPE_COMPLETE
= 0,
29 CQ_EXCH_WQ_STATUS_TYPE_ABORT
= 1,
30 CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF
= 2,
31 CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR
= 3,
34 static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc
*desc_ptr
,
41 cq_desc_dec((struct cq_desc
*)desc_ptr
, type
,
42 color
, q_number
, completed_index
);
43 *exch_status
= desc_ptr
->exch_status
& CQ_EXCH_WQ_STATUS_MASK
;
46 struct cq_fcp_rq_desc
{
47 u16 completed_index_eop_sop_prt
;
59 #define CQ_FCP_RQ_DESC_FLAGS_SOP (1 << 15)
60 #define CQ_FCP_RQ_DESC_FLAGS_EOP (1 << 14)
61 #define CQ_FCP_RQ_DESC_FLAGS_PRT (1 << 12)
62 #define CQ_FCP_RQ_DESC_TMPL_MASK 0x1f
63 #define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK 0x3fff
64 #define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT 14
65 #define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT)
66 #define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT 15
67 #define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT)
68 #define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK 0x1
69 #define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT 1
70 #define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT)
71 #define CQ_FCP_RQ_DESC_FCS_OK_SHIFT 7
72 #define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT)
74 static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc
*desc_ptr
,
94 cq_desc_dec((struct cq_desc
*)desc_ptr
, type
,
95 color
, q_number
, completed_index
);
96 *eop
= (desc_ptr
->completed_index_eop_sop_prt
&
97 CQ_FCP_RQ_DESC_FLAGS_EOP
) ? 1 : 0;
98 *sop
= (desc_ptr
->completed_index_eop_sop_prt
&
99 CQ_FCP_RQ_DESC_FLAGS_SOP
) ? 1 : 0;
101 (desc_ptr
->completed_index_eop_sop_prt
&
102 CQ_FCP_RQ_DESC_FLAGS_PRT
) ? 1 : 0;
103 *exchange_id
= desc_ptr
->exchange_id
;
104 *tmpl
= desc_ptr
->tmpl
& CQ_FCP_RQ_DESC_TMPL_MASK
;
106 desc_ptr
->bytes_written
& CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK
;
108 (desc_ptr
->bytes_written
& CQ_FCP_RQ_DESC_PACKET_ERR_MASK
) >>
109 CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT
;
111 (desc_ptr
->bytes_written
& CQ_FCP_RQ_DESC_VS_STRIPPED_MASK
) >>
112 CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT
;
113 *vlan
= desc_ptr
->vlan
;
114 *sof
= desc_ptr
->sof
;
115 *fck
= desc_ptr
->fcs_fer_fck
& CQ_FCP_RQ_DESC_FC_CRC_OK_MASK
;
116 *fcoe_err
= (desc_ptr
->fcs_fer_fck
& CQ_FCP_RQ_DESC_FCOE_ERR_MASK
) >>
117 CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT
;
118 *eof
= desc_ptr
->eof
;
120 (desc_ptr
->fcs_fer_fck
& CQ_FCP_RQ_DESC_FCS_OK_MASK
) >>
121 CQ_FCP_RQ_DESC_FCS_OK_SHIFT
;
127 u32 active_burst_offset
;
134 enum cq_sgl_err_types
{
135 CQ_SGL_ERR_NO_ERROR
= 0,
136 CQ_SGL_ERR_OVERFLOW
, /* data ran beyond end of SGL */
137 CQ_SGL_ERR_SGL_LCL_ADDR_ERR
, /* sgl access to local vnic addr illegal*/
138 CQ_SGL_ERR_ADDR_RSP_ERR
, /* sgl address error */
139 CQ_SGL_ERR_DATA_RSP_ERR
, /* sgl data rsp error */
140 CQ_SGL_ERR_CNT_ZERO_ERR
, /* SGL count is 0 */
141 CQ_SGL_ERR_CNT_MAX_ERR
, /* SGL count is larger than supported */
142 CQ_SGL_ERR_ORDER_ERR
, /* frames recv on both ports, order err */
143 CQ_SGL_ERR_DATA_LCL_ADDR_ERR
,/* sgl data buf to local vnic addr ill */
144 CQ_SGL_ERR_HOST_CQ_ERR
, /* host cq entry to local vnic addr ill */
147 #define CQ_SGL_SGL_ERR_MASK 0x1f
148 #define CQ_SGL_TMPL_MASK 0x1f
150 static inline void cq_sgl_desc_dec(struct cq_sgl_desc
*desc_ptr
,
155 u32
*active_burst_offset
,
160 /* Cheat a little by assuming exchange_id is the same as completed
162 cq_desc_dec((struct cq_desc
*)desc_ptr
, type
, color
, q_number
,
164 *active_burst_offset
= desc_ptr
->active_burst_offset
;
165 *tot_data_bytes
= desc_ptr
->tot_data_bytes
;
166 *tmpl
= desc_ptr
->tmpl
& CQ_SGL_TMPL_MASK
;
167 *sgl_err
= desc_ptr
->sgl_err
& CQ_SGL_SGL_ERR_MASK
;
170 #endif /* _CQ_EXCH_DESC_H_ */