1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
4 #ifndef _ICE_LAN_TX_RX_H_
5 #define _ICE_LAN_TX_RX_H_
7 union ice_32byte_rx_desc
{
9 __le64 pkt_addr
; /* Packet buffer address */
10 __le64 hdr_addr
; /* Header buffer address */
11 /* bit 0 of hdr_addr is DD bit */
18 __le16 mirroring_status
;
22 __le32 rss
; /* RSS Hash */
23 __le32 fd_id
; /* Flow Director filter id */
27 /* status/error/PTYPE/length */
28 __le64 status_error_len
;
31 __le16 ext_status
; /* extended status */
43 struct ice_rx_ptype_decoded
{
50 u32 tunnel_end_prot
:2;
51 u32 tunnel_end_frag
:1;
56 enum ice_rx_ptype_outer_ip
{
57 ICE_RX_PTYPE_OUTER_L2
= 0,
58 ICE_RX_PTYPE_OUTER_IP
= 1,
61 enum ice_rx_ptype_outer_ip_ver
{
62 ICE_RX_PTYPE_OUTER_NONE
= 0,
63 ICE_RX_PTYPE_OUTER_IPV4
= 1,
64 ICE_RX_PTYPE_OUTER_IPV6
= 2,
67 enum ice_rx_ptype_outer_fragmented
{
68 ICE_RX_PTYPE_NOT_FRAG
= 0,
69 ICE_RX_PTYPE_FRAG
= 1,
72 enum ice_rx_ptype_tunnel_type
{
73 ICE_RX_PTYPE_TUNNEL_NONE
= 0,
74 ICE_RX_PTYPE_TUNNEL_IP_IP
= 1,
75 ICE_RX_PTYPE_TUNNEL_IP_GRENAT
= 2,
76 ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC
= 3,
77 ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN
= 4,
80 enum ice_rx_ptype_tunnel_end_prot
{
81 ICE_RX_PTYPE_TUNNEL_END_NONE
= 0,
82 ICE_RX_PTYPE_TUNNEL_END_IPV4
= 1,
83 ICE_RX_PTYPE_TUNNEL_END_IPV6
= 2,
86 enum ice_rx_ptype_inner_prot
{
87 ICE_RX_PTYPE_INNER_PROT_NONE
= 0,
88 ICE_RX_PTYPE_INNER_PROT_UDP
= 1,
89 ICE_RX_PTYPE_INNER_PROT_TCP
= 2,
90 ICE_RX_PTYPE_INNER_PROT_SCTP
= 3,
91 ICE_RX_PTYPE_INNER_PROT_ICMP
= 4,
92 ICE_RX_PTYPE_INNER_PROT_TIMESYNC
= 5,
95 enum ice_rx_ptype_payload_layer
{
96 ICE_RX_PTYPE_PAYLOAD_LAYER_NONE
= 0,
97 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2
= 1,
98 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3
= 2,
99 ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4
= 3,
102 /* RX Flex Descriptor
103 * This descriptor is used instead of the legacy version descriptor when
104 * ice_rlan_ctx.adv_desc is set
106 union ice_32b_rx_flex_desc
{
108 __le64 pkt_addr
; /* Packet buffer address */
109 __le64 hdr_addr
; /* Header buffer address */
110 /* bit 0 of hdr_addr is DD bit */
116 u8 rxdid
; /* descriptor builder profile id */
117 u8 mir_id_umb_cast
; /* mirror=[5:0], umb=[7:6] */
118 __le16 ptype_flex_flags0
; /* ptype=[9:0], ff0=[15:10] */
119 __le16 pkt_len
; /* [15:14] are reserved */
120 __le16 hdr_len_sph_flex_flags1
; /* header=[10:0] */
122 /* ff1/ext=[15:12] */
125 __le16 status_error0
;
131 __le16 status_error1
;
147 } wb
; /* writeback */
150 /* Rx Flex Descriptor NIC Profile
151 * This descriptor corresponds to RxDID 2 which contains
152 * metadata fields for RSS, flow id and timestamp info
154 struct ice_32b_rx_flex_desc_nic
{
158 __le16 ptype_flexi_flags0
;
160 __le16 hdr_len_sph_flex_flags1
;
163 __le16 status_error0
;
168 __le16 status_error1
;
185 /* Receive Flex Descriptor profile IDs: There are a total
186 * of 64 profiles where profile IDs 0/1 are for legacy; and
187 * profiles 2-63 are flex profiles that can be programmed
188 * with a specific metadata (profile 7 reserved for HW)
192 ICE_RXDID_LEGACY_0
= ICE_RXDID_START
,
195 ICE_RXDID_FLEX_NIC
= ICE_RXDID_FLX_START
,
196 ICE_RXDID_FLX_LAST
= 63,
197 ICE_RXDID_LAST
= ICE_RXDID_FLX_LAST
200 /* Receive Flex Descriptor Rx opcode values */
201 #define ICE_RX_OPC_MDID 0x01
203 /* Receive Descriptor MDID values */
204 #define ICE_RX_MDID_FLOW_ID_LOWER 5
205 #define ICE_RX_MDID_FLOW_ID_HIGH 6
206 #define ICE_RX_MDID_HASH_LOW 56
207 #define ICE_RX_MDID_HASH_HIGH 57
209 /* Rx Flag64 packet flag bits */
210 enum ice_rx_flg64_bits
{
211 ICE_RXFLG_PKT_DSI
= 0,
212 ICE_RXFLG_EVLAN_x8100
= 15,
213 ICE_RXFLG_EVLAN_x9100
,
214 ICE_RXFLG_VLAN_x8100
,
215 ICE_RXFLG_TNL_MAC
= 22,
228 /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
229 #define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
231 /* for ice_32byte_rx_flex_desc.pkt_length member */
232 #define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
234 enum ice_rx_flex_desc_status_error_0_bits
{
235 /* Note: These are predefined bit offsets */
236 ICE_RX_FLEX_DESC_STATUS0_DD_S
= 0,
237 ICE_RX_FLEX_DESC_STATUS0_EOF_S
,
238 ICE_RX_FLEX_DESC_STATUS0_HBO_S
,
239 ICE_RX_FLEX_DESC_STATUS0_L3L4P_S
,
240 ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S
,
241 ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S
,
242 ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S
,
243 ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S
,
244 ICE_RX_FLEX_DESC_STATUS0_LPBK_S
,
245 ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S
,
246 ICE_RX_FLEX_DESC_STATUS0_RXE_S
,
247 ICE_RX_FLEX_DESC_STATUS0_CRCP_S
,
248 ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S
,
249 ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S
,
250 ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S
,
251 ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S
,
252 ICE_RX_FLEX_DESC_STATUS0_LAST
/* this entry must be last!!! */
255 #define ICE_RXQ_CTX_SIZE_DWORDS 8
256 #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
258 /* RLAN Rx queue context data
260 * The sizes of the variables may be larger than needed due to crossing byte
261 * boundaries. If we do not have the width of the variable set to the correct
262 * size then we could end up shifting bits off the top of the variable when the
263 * variable is at the top of a byte and crosses over into the next byte.
265 struct ice_rlan_ctx
{
267 u16 cpuid
; /* bigger than needed, see above for reason */
268 #define ICE_RLAN_BASE_S 7
271 #define ICE_RLAN_CTX_DBUF_S 7
272 u16 dbuf
; /* bigger than needed, see above for reason */
273 #define ICE_RLAN_CTX_HBUF_S 6
274 u16 hbuf
; /* bigger than needed, see above for reason */
282 u32 rxmax
; /* bigger than needed, see above for reason */
287 u16 lrxqthresh
; /* bigger than needed, see above for reason */
297 #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
298 .offset = offsetof(struct _struct, _ele), \
299 .size_of = FIELD_SIZEOF(struct _struct, _ele), \
304 /* for hsplit_0 field of Rx RLAN context */
305 enum ice_rlan_ctx_rx_hsplit_0
{
306 ICE_RLAN_RX_HSPLIT_0_NO_SPLIT
= 0,
307 ICE_RLAN_RX_HSPLIT_0_SPLIT_L2
= 1,
308 ICE_RLAN_RX_HSPLIT_0_SPLIT_IP
= 2,
309 ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP
= 4,
310 ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP
= 8,
313 /* for hsplit_1 field of Rx RLAN context */
314 enum ice_rlan_ctx_rx_hsplit_1
{
315 ICE_RLAN_RX_HSPLIT_1_NO_SPLIT
= 0,
316 ICE_RLAN_RX_HSPLIT_1_SPLIT_L2
= 1,
317 ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS
= 2,
322 __le64 buf_addr
; /* Address of descriptor's data buf */
323 __le64 cmd_type_offset_bsz
;
326 enum ice_tx_desc_dtype_value
{
327 ICE_TX_DESC_DTYPE_DATA
= 0x0,
328 ICE_TX_DESC_DTYPE_CTX
= 0x1,
329 /* DESC_DONE - HW has completed write-back of descriptor */
330 ICE_TX_DESC_DTYPE_DESC_DONE
= 0xF,
333 #define ICE_TXD_QW1_CMD_S 4
334 #define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S)
336 enum ice_tx_desc_cmd_bits
{
337 ICE_TX_DESC_CMD_EOP
= 0x0001,
338 ICE_TX_DESC_CMD_RS
= 0x0002,
339 ICE_TX_DESC_CMD_IL2TAG1
= 0x0008,
340 ICE_TX_DESC_CMD_IIPT_IPV6
= 0x0020, /* 2 BITS */
341 ICE_TX_DESC_CMD_IIPT_IPV4
= 0x0040, /* 2 BITS */
342 ICE_TX_DESC_CMD_IIPT_IPV4_CSUM
= 0x0060, /* 2 BITS */
343 ICE_TX_DESC_CMD_L4T_EOFT_TCP
= 0x0100, /* 2 BITS */
344 ICE_TX_DESC_CMD_L4T_EOFT_UDP
= 0x0300, /* 2 BITS */
347 #define ICE_TXD_QW1_OFFSET_S 16
348 #define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S)
350 enum ice_tx_desc_len_fields
{
351 /* Note: These are predefined bit offsets */
352 ICE_TX_DESC_LEN_MACLEN_S
= 0, /* 7 BITS */
353 ICE_TX_DESC_LEN_IPLEN_S
= 7, /* 7 BITS */
354 ICE_TX_DESC_LEN_L4_LEN_S
= 14 /* 4 BITS */
357 #define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S)
358 #define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S)
359 #define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S)
361 /* Tx descriptor field limits in bytes */
362 #define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \
363 ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD)
364 #define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \
365 ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD)
366 #define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \
367 ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD)
369 #define ICE_TXD_QW1_TX_BUF_SZ_S 34
370 #define ICE_TXD_QW1_L2TAG1_S 48
372 /* Context descriptors */
373 struct ice_tx_ctx_desc
{
374 __le32 tunneling_params
;
380 #define ICE_TXD_CTX_QW1_CMD_S 4
381 #define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
383 #define ICE_TXD_CTX_QW1_TSO_LEN_S 30
384 #define ICE_TXD_CTX_QW1_TSO_LEN_M \
385 (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
387 #define ICE_TXD_CTX_QW1_MSS_S 50
389 enum ice_tx_ctx_desc_cmd_bits
{
390 ICE_TX_CTX_DESC_TSO
= 0x01,
391 ICE_TX_CTX_DESC_TSYN
= 0x02,
392 ICE_TX_CTX_DESC_IL2TAG2
= 0x04,
393 ICE_TX_CTX_DESC_IL2TAG2_IL2H
= 0x08,
394 ICE_TX_CTX_DESC_SWTCH_NOTAG
= 0x00,
395 ICE_TX_CTX_DESC_SWTCH_UPLINK
= 0x10,
396 ICE_TX_CTX_DESC_SWTCH_LOCAL
= 0x20,
397 ICE_TX_CTX_DESC_SWTCH_VSI
= 0x30,
398 ICE_TX_CTX_DESC_RESERVED
= 0x40
401 #define ICE_LAN_TXQ_MAX_QGRPS 127
402 #define ICE_LAN_TXQ_MAX_QDIS 1023
404 /* Tx queue context data
406 * The sizes of the variables may be larger than needed due to crossing byte
407 * boundaries. If we do not have the width of the variable set to the correct
408 * size then we could end up shifting bits off the top of the variable when the
409 * variable is at the top of a byte and crosses over into the next byte.
411 struct ice_tlan_ctx
{
412 #define ICE_TLAN_CTX_BASE_S 7
413 u64 base
; /* base is defined in 128-byte units */
415 u16 cgd_num
; /* bigger than needed, see above for reason */
419 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
420 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2
424 u16 cpuid
; /* bigger than needed, see above for reason */
431 u8 itr_notification_mode
;
433 u32 qlen
; /* bigger than needed, see above for reason */
440 u8 pkt_shaper_prof_idx
;
441 u8 int_q_state
; /* width not needed - internal do not write */
444 /* macro to make the table lines short */
445 #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
448 ICE_RX_PTYPE_OUTER_##OUTER_IP, \
449 ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
450 ICE_RX_PTYPE_##OUTER_FRAG, \
451 ICE_RX_PTYPE_TUNNEL_##T, \
452 ICE_RX_PTYPE_TUNNEL_END_##TE, \
453 ICE_RX_PTYPE_##TEF, \
454 ICE_RX_PTYPE_INNER_PROT_##I, \
455 ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
457 #define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
459 /* shorter macros makes the table fit but are terse */
460 #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
462 /* Lookup table mapping the HW PTYPE to the bit field for decoding */
463 static const struct ice_rx_ptype_decoded ice_ptype_lkup
[] = {
464 /* L2 Packet types */
465 ICE_PTT_UNUSED_ENTRY(0),
466 ICE_PTT(1, L2
, NONE
, NOF
, NONE
, NONE
, NOF
, NONE
, PAY2
),
467 ICE_PTT(2, L2
, NONE
, NOF
, NONE
, NONE
, NOF
, NONE
, NONE
),
470 static inline struct ice_rx_ptype_decoded
ice_decode_rx_desc_ptype(u16 ptype
)
472 return ice_ptype_lkup
[ptype
];
474 #endif /* _ICE_LAN_TX_RX_H_ */