2 * This file contains HW queue descriptor formats, config register
5 * Copyright (C) 2015 Cavium, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
15 /* Load transaction types for reading segment bytes specified by
16 * NIC_SEND_GATHER_S[LD_TYPE].
18 enum nic_send_ld_type_e
{
19 NIC_SEND_LD_TYPE_E_LDD
= 0x0,
20 NIC_SEND_LD_TYPE_E_LDT
= 0x1,
21 NIC_SEND_LD_TYPE_E_LDWB
= 0x2,
22 NIC_SEND_LD_TYPE_E_ENUM_LAST
= 0x3,
25 enum ether_type_algorithm
{
28 ETYPE_ALG_ENDPARSE
= 0x2,
30 ETYPE_ALG_VLAN_STRIP
= 0x4,
37 L3TYPE_IPV4_OPTIONS
= 0x05,
39 L3TYPE_IPV6_OPTIONS
= 0x07,
40 L3TYPE_ET_STOP
= 0x0D,
46 L4TYPE_IPSEC_ESP
= 0x01,
53 L4TYPE_ROCE_BTH
= 0x08,
57 /* CPI and RSSI configuration */
58 enum cpi_algorithm_type
{
65 enum rss_algorithm_type
{
69 RSS_ALG_TCP_IP
= 0x03,
70 RSS_ALG_UDP_IP
= 0x04,
71 RSS_ALG_SCTP_IP
= 0x05,
72 RSS_ALG_GRE_IP
= 0x06,
77 RSS_HASH_L2ETC
= 0x00,
80 RSS_HASH_TCP_SYN_DIS
= 0x03,
82 RSS_HASH_L4ETC
= 0x05,
88 /* Completion queue entry types */
90 CQE_TYPE_INVALID
= 0x0,
92 CQE_TYPE_RX_SPLIT
= 0x3,
93 CQE_TYPE_RX_TCP
= 0x4,
95 CQE_TYPE_SEND_PTP
= 0x9,
98 enum cqe_rx_tcp_status
{
99 CQE_RX_STATUS_VALID_TCP_CNXT
= 0x00,
100 CQE_RX_STATUS_INVALID_TCP_CNXT
= 0x0F,
103 enum cqe_send_status
{
104 CQE_SEND_STATUS_GOOD
= 0x00,
105 CQE_SEND_STATUS_DESC_FAULT
= 0x01,
106 CQE_SEND_STATUS_HDR_CONS_ERR
= 0x11,
107 CQE_SEND_STATUS_SUBDESC_ERR
= 0x12,
108 CQE_SEND_STATUS_IMM_SIZE_OFLOW
= 0x80,
109 CQE_SEND_STATUS_CRC_SEQ_ERR
= 0x81,
110 CQE_SEND_STATUS_DATA_SEQ_ERR
= 0x82,
111 CQE_SEND_STATUS_MEM_SEQ_ERR
= 0x83,
112 CQE_SEND_STATUS_LOCK_VIOL
= 0x84,
113 CQE_SEND_STATUS_LOCK_UFLOW
= 0x85,
114 CQE_SEND_STATUS_DATA_FAULT
= 0x86,
115 CQE_SEND_STATUS_TSTMP_CONFLICT
= 0x87,
116 CQE_SEND_STATUS_TSTMP_TIMEOUT
= 0x88,
117 CQE_SEND_STATUS_MEM_FAULT
= 0x89,
118 CQE_SEND_STATUS_CSUM_OVERLAP
= 0x8A,
119 CQE_SEND_STATUS_CSUM_OVERFLOW
= 0x8B,
122 enum cqe_rx_tcp_end_reason
{
123 CQE_RX_TCP_END_FIN_FLAG_DET
= 0,
124 CQE_RX_TCP_END_INVALID_FLAG
= 1,
125 CQE_RX_TCP_END_TIMEOUT
= 2,
126 CQE_RX_TCP_END_OUT_OF_SEQ
= 3,
127 CQE_RX_TCP_END_PKT_ERR
= 4,
128 CQE_RX_TCP_END_QS_DISABLED
= 0x0F,
131 /* Packet protocol level error enumeration */
132 enum cqe_rx_err_level
{
133 CQE_RX_ERRLVL_RE
= 0x0,
134 CQE_RX_ERRLVL_L2
= 0x1,
135 CQE_RX_ERRLVL_L3
= 0x2,
136 CQE_RX_ERRLVL_L4
= 0x3,
139 /* Packet protocol level error type enumeration */
140 enum cqe_rx_err_opcode
{
141 CQE_RX_ERR_RE_NONE
= 0x0,
142 CQE_RX_ERR_RE_PARTIAL
= 0x1,
143 CQE_RX_ERR_RE_JABBER
= 0x2,
144 CQE_RX_ERR_RE_FCS
= 0x7,
145 CQE_RX_ERR_RE_TERMINATE
= 0x9,
146 CQE_RX_ERR_RE_RX_CTL
= 0xb,
147 CQE_RX_ERR_PREL2_ERR
= 0x1f,
148 CQE_RX_ERR_L2_FRAGMENT
= 0x20,
149 CQE_RX_ERR_L2_OVERRUN
= 0x21,
150 CQE_RX_ERR_L2_PFCS
= 0x22,
151 CQE_RX_ERR_L2_PUNY
= 0x23,
152 CQE_RX_ERR_L2_MAL
= 0x24,
153 CQE_RX_ERR_L2_OVERSIZE
= 0x25,
154 CQE_RX_ERR_L2_UNDERSIZE
= 0x26,
155 CQE_RX_ERR_L2_LENMISM
= 0x27,
156 CQE_RX_ERR_L2_PCLP
= 0x28,
157 CQE_RX_ERR_IP_NOT
= 0x41,
158 CQE_RX_ERR_IP_CHK
= 0x42,
159 CQE_RX_ERR_IP_MAL
= 0x43,
160 CQE_RX_ERR_IP_MALD
= 0x44,
161 CQE_RX_ERR_IP_HOP
= 0x45,
162 CQE_RX_ERR_L3_ICRC
= 0x46,
163 CQE_RX_ERR_L3_PCLP
= 0x47,
164 CQE_RX_ERR_L4_MAL
= 0x61,
165 CQE_RX_ERR_L4_CHK
= 0x62,
166 CQE_RX_ERR_UDP_LEN
= 0x63,
167 CQE_RX_ERR_L4_PORT
= 0x64,
168 CQE_RX_ERR_TCP_FLAG
= 0x65,
169 CQE_RX_ERR_TCP_OFFSET
= 0x66,
170 CQE_RX_ERR_L4_PCLP
= 0x67,
171 CQE_RX_ERR_RBDR_TRUNC
= 0x70,
175 #if defined(__BIG_ENDIAN_BITFIELD)
176 u64 cqe_type
:4; /* W0 */
188 u64 vlan2_stripped
:1;
195 u64 pkt_len
:16; /* W1 */
204 u64 rss_tag
:32; /* W2 */
209 u64 rb3_sz
:16; /* W3 */
214 u64 rb7_sz
:16; /* W4 */
219 u64 rb11_sz
:16; /* W5 */
223 #elif defined(__LITTLE_ENDIAN_BITFIELD)
229 u64 vlan2_stripped
:1;
241 u64 cqe_type
:4; /* W0 */
249 u64 pkt_len
:16; /* W1 */
253 u64 rss_tag
:32; /* W2 */
257 u64 rb3_sz
:16; /* W3 */
261 u64 rb7_sz
:16; /* W4 */
265 u64 rb11_sz
:16; /* W5 */
281 struct cqe_rx_tcp_err_t
{
282 #if defined(__BIG_ENDIAN_BITFIELD)
283 u64 cqe_type
:4; /* W0 */
286 u64 rsvd1
:4; /* W1 */
291 #elif defined(__LITTLE_ENDIAN_BITFIELD)
303 struct cqe_rx_tcp_t
{
304 #if defined(__BIG_ENDIAN_BITFIELD)
305 u64 cqe_type
:4; /* W0 */
309 u64 rsvd1
:32; /* W1 */
310 u64 tcp_cntx_bytes
:8;
312 u64 tcp_err_bytes
:16;
313 #elif defined(__LITTLE_ENDIAN_BITFIELD)
316 u64 cqe_type
:4; /* W0 */
318 u64 tcp_err_bytes
:16;
320 u64 tcp_cntx_bytes
:8;
321 u64 rsvd1
:32; /* W1 */
326 #if defined(__BIG_ENDIAN_BITFIELD)
327 u64 cqe_type
:4; /* W0 */
337 u64 ptp_timestamp
:64; /* W1 */
338 #elif defined(__LITTLE_ENDIAN_BITFIELD)
347 u64 cqe_type
:4; /* W0 */
349 u64 ptp_timestamp
:64; /* W1 */
355 struct cqe_send_t snd_hdr
;
356 struct cqe_rx_t rx_hdr
;
357 struct cqe_rx_tcp_t rx_tcp_hdr
;
358 struct cqe_rx_tcp_err_t rx_tcp_err_hdr
;
361 struct rbdr_entry_t
{
365 /* TCP reassembly context */
366 struct rbe_tcp_cnxt_t
{
367 #if defined(__BIG_ENDIAN_BITFIELD)
370 u64 align_hdr_bytes
:4;
371 u64 align_ptr_bytes
:4;
376 u64 tcp_end_reason
:2;
378 #elif defined(__LITTLE_ENDIAN_BITFIELD)
380 u64 tcp_end_reason
:2;
385 u64 align_ptr_bytes
:4;
386 u64 align_hdr_bytes
:4;
392 /* Always Big endian */
398 u64 disable_tcp_reassembly
:1;
405 enum send_l4_csum_type
{
406 SEND_L4_CSUM_DISABLE
= 0x00,
407 SEND_L4_CSUM_UDP
= 0x01,
408 SEND_L4_CSUM_TCP
= 0x02,
409 SEND_L4_CSUM_SCTP
= 0x03,
413 SEND_CRCALG_CRC32
= 0x00,
414 SEND_CRCALG_CRC32C
= 0x01,
415 SEND_CRCALG_ICRC
= 0x02,
418 enum send_load_type
{
419 SEND_LD_TYPE_LDD
= 0x00,
420 SEND_LD_TYPE_LDT
= 0x01,
421 SEND_LD_TYPE_LDWB
= 0x02,
424 enum send_mem_alg_type
{
425 SEND_MEMALG_SET
= 0x00,
426 SEND_MEMALG_ADD
= 0x08,
427 SEND_MEMALG_SUB
= 0x09,
428 SEND_MEMALG_ADDLEN
= 0x0A,
429 SEND_MEMALG_SUBLEN
= 0x0B,
432 enum send_mem_dsz_type
{
433 SEND_MEMDSZ_B64
= 0x00,
434 SEND_MEMDSZ_B32
= 0x01,
435 SEND_MEMDSZ_B8
= 0x03,
438 enum sq_subdesc_type
{
439 SQ_DESC_TYPE_INVALID
= 0x00,
440 SQ_DESC_TYPE_HEADER
= 0x01,
441 SQ_DESC_TYPE_CRC
= 0x02,
442 SQ_DESC_TYPE_IMMEDIATE
= 0x03,
443 SQ_DESC_TYPE_GATHER
= 0x04,
444 SQ_DESC_TYPE_MEMORY
= 0x05,
447 struct sq_crc_subdesc
{
448 #if defined(__BIG_ENDIAN_BITFIELD)
454 u64 crc_insert_pos
:16;
457 #elif defined(__LITTLE_ENDIAN_BITFIELD)
460 u64 crc_insert_pos
:16;
469 struct sq_gather_subdesc
{
470 #if defined(__BIG_ENDIAN_BITFIELD)
471 u64 subdesc_type
:4; /* W0 */
476 u64 rsvd1
:15; /* W1 */
478 #elif defined(__LITTLE_ENDIAN_BITFIELD)
482 u64 subdesc_type
:4; /* W0 */
485 u64 rsvd1
:15; /* W1 */
489 /* SQ immediate subdescriptor */
490 struct sq_imm_subdesc
{
491 #if defined(__BIG_ENDIAN_BITFIELD)
492 u64 subdesc_type
:4; /* W0 */
496 u64 data
:64; /* W1 */
497 #elif defined(__LITTLE_ENDIAN_BITFIELD)
500 u64 subdesc_type
:4; /* W0 */
502 u64 data
:64; /* W1 */
506 struct sq_mem_subdesc
{
507 #if defined(__BIG_ENDIAN_BITFIELD)
508 u64 subdesc_type
:4; /* W0 */
515 u64 rsvd1
:15; /* W1 */
517 #elif defined(__LITTLE_ENDIAN_BITFIELD)
523 u64 subdesc_type
:4; /* W0 */
526 u64 rsvd1
:15; /* W1 */
530 struct sq_hdr_subdesc
{
531 #if defined(__BIG_ENDIAN_BITFIELD)
534 u64 post_cqe
:1; /* Post CQE on no error also */
546 u64 tot_len
:20; /* W0 */
549 u64 inner_l4_offset
:8;
550 u64 inner_l3_offset
:8;
553 u64 tso_max_paysize
:14; /* W1 */
554 #elif defined(__LITTLE_ENDIAN_BITFIELD)
567 u64 post_cqe
:1; /* Post CQE on no error also */
569 u64 subdesc_type
:4; /* W0 */
571 u64 tso_max_paysize
:14;
574 u64 inner_l3_offset
:8;
575 u64 inner_l4_offset
:8;
576 u64 rsvd2
:24; /* W1 */
580 /* Queue config register formats */
582 #if defined(__BIG_ENDIAN_BITFIELD)
583 u64 reserved_2_63
:62;
586 #elif defined(__LITTLE_ENDIAN_BITFIELD)
589 u64 reserved_2_63
:62;
594 #if defined(__BIG_ENDIAN_BITFIELD)
595 u64 reserved_43_63
:21;
599 u64 reserved_35_39
:5;
601 u64 reserved_25_31
:7;
603 u64 reserved_0_15
:16;
604 #elif defined(__LITTLE_ENDIAN_BITFIELD)
605 u64 reserved_0_15
:16;
607 u64 reserved_25_31
:7;
609 u64 reserved_35_39
:5;
613 u64 reserved_43_63
:21;
618 #if defined(__BIG_ENDIAN_BITFIELD)
619 u64 reserved_32_63
:32;
621 u64 reserved_20_23
:4;
623 u64 reserved_18_18
:1;
626 u64 reserved_11_15
:5;
629 u64 tstmp_bgx_intf
:3;
630 #elif defined(__LITTLE_ENDIAN_BITFIELD)
631 u64 tstmp_bgx_intf
:3;
634 u64 reserved_11_15
:5;
637 u64 reserved_18_18
:1;
639 u64 reserved_20_23
:4;
641 u64 reserved_32_63
:32;
646 #if defined(__BIG_ENDIAN_BITFIELD)
647 u64 reserved_45_63
:19;
651 u64 reserved_36_41
:6;
653 u64 reserved_25_31
:7;
655 u64 reserved_12_15
:4;
657 #elif defined(__LITTLE_ENDIAN_BITFIELD)
659 u64 reserved_12_15
:4;
661 u64 reserved_25_31
:7;
663 u64 reserved_36_41
:6;
667 u64 reserved_45_63
:19;
672 #if defined(__BIG_ENDIAN_BITFIELD)
673 u64 reserved_32_63
:32;
675 u64 reserved_27_30
:4;
679 u64 lock_viol_cqe_ena
:1;
680 u64 send_tstmp_ena
:1;
684 #elif defined(__LITTLE_ENDIAN_BITFIELD)
688 u64 send_tstmp_ena
:1;
689 u64 lock_viol_cqe_ena
:1;
693 u64 reserved_27_30
:4;
695 u64 reserved_32_63
:32;
699 #endif /* Q_STRUCT_H */