1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
8 /* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
11 #include "../aq_hw_utils.h"
12 #include "../aq_ring.h"
13 #include "../aq_nic.h"
14 #include "hw_atl_a0.h"
15 #include "hw_atl_utils.h"
16 #include "hw_atl_llh.h"
17 #include "hw_atl_a0_internal.h"
19 #define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
24 .vecs = HW_ATL_A0_RSS_MAX, \
25 .tcs_max = HW_ATL_A0_TC_MAX, \
26 .rxd_alignment = 1U, \
27 .rxd_size = HW_ATL_A0_RXD_SIZE, \
28 .rxds_max = HW_ATL_A0_MAX_RXD, \
29 .rxds_min = HW_ATL_A0_MIN_RXD, \
30 .txd_alignment = 1U, \
31 .txd_size = HW_ATL_A0_TXD_SIZE, \
32 .txds_max = HW_ATL_A0_MAX_TXD, \
33 .txds_min = HW_ATL_A0_MIN_RXD, \
34 .txhwb_alignment = 4096U, \
35 .tx_rings = HW_ATL_A0_TX_RINGS, \
36 .rx_rings = HW_ATL_A0_RX_RINGS, \
37 .hw_features = NETIF_F_HW_CSUM | \
43 NETIF_F_HW_VLAN_CTAG_FILTER, \
44 .hw_priv_flags = IFF_UNICAST_FLT, \
45 .flow_control = true, \
46 .mtu = HW_ATL_A0_MTU_JUMBO, \
47 .mac_regs_count = 88, \
48 .hw_alive_check_addr = 0x10U
50 const struct aq_hw_caps_s hw_atl_a0_caps_aqc100
= {
51 DEFAULT_A0_BOARD_BASIC_CAPABILITIES
,
52 .media_type
= AQ_HW_MEDIA_TYPE_FIBRE
,
53 .link_speed_msk
= AQ_NIC_RATE_5G
|
59 const struct aq_hw_caps_s hw_atl_a0_caps_aqc107
= {
60 DEFAULT_A0_BOARD_BASIC_CAPABILITIES
,
61 .media_type
= AQ_HW_MEDIA_TYPE_TP
,
62 .link_speed_msk
= AQ_NIC_RATE_10G
|
69 const struct aq_hw_caps_s hw_atl_a0_caps_aqc108
= {
70 DEFAULT_A0_BOARD_BASIC_CAPABILITIES
,
71 .media_type
= AQ_HW_MEDIA_TYPE_TP
,
72 .link_speed_msk
= AQ_NIC_RATE_5G
|
78 const struct aq_hw_caps_s hw_atl_a0_caps_aqc109
= {
79 DEFAULT_A0_BOARD_BASIC_CAPABILITIES
,
80 .media_type
= AQ_HW_MEDIA_TYPE_TP
,
81 .link_speed_msk
= AQ_NIC_RATE_2G5
|
86 static int hw_atl_a0_hw_reset(struct aq_hw_s
*self
)
91 hw_atl_glb_glb_reg_res_dis_set(self
, 1U);
92 hw_atl_pci_pci_reg_res_dis_set(self
, 0U);
93 hw_atl_rx_rx_reg_res_dis_set(self
, 0U);
94 hw_atl_tx_tx_reg_res_dis_set(self
, 0U);
97 hw_atl_glb_soft_res_set(self
, 1);
99 /* check 10 times by 1ms */
100 err
= readx_poll_timeout_atomic(hw_atl_glb_soft_res_get
,
106 hw_atl_itr_irq_reg_res_dis_set(self
, 0U);
107 hw_atl_itr_res_irq_set(self
, 1U);
109 /* check 10 times by 1ms */
110 err
= readx_poll_timeout_atomic(hw_atl_itr_res_irq_get
,
116 self
->aq_fw_ops
->set_state(self
, MPI_RESET
);
118 err
= aq_hw_err_from_flags(self
);
124 static int hw_atl_a0_hw_qos_set(struct aq_hw_s
*self
)
126 bool is_rx_flow_control
= false;
127 unsigned int i_priority
= 0U;
131 /* TPS Descriptor rate init */
132 hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self
, 0x0U
);
133 hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self
, 0xA);
136 hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self
, 0U);
138 /* TPS TC credits init */
139 hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self
, 0U);
140 hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self
, 0U);
142 hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self
, 0U, 0xFFF);
143 hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self
, 0U, 0x64);
144 hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self
, 0U, 0x50);
145 hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self
, 0U, 0x1E);
148 buff_size
= HW_ATL_A0_TXBUF_MAX
;
150 hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self
, buff_size
, tc
);
151 hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self
,
153 (1024 / 32U) * 66U) /
155 hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self
,
157 (1024 / 32U) * 50U) /
160 /* QoS Rx buf size per TC */
162 is_rx_flow_control
= (AQ_NIC_FC_RX
& self
->aq_nic_cfg
->fc
.req
);
163 buff_size
= HW_ATL_A0_RXBUF_MAX
;
165 hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self
, buff_size
, tc
);
166 hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self
,
168 (1024U / 32U) * 66U) /
170 hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self
,
172 (1024U / 32U) * 50U) /
174 hw_atl_rpb_rx_xoff_en_per_tc_set(self
, is_rx_flow_control
? 1U : 0U, tc
);
176 /* QoS 802.1p priority -> TC mapping */
177 for (i_priority
= 8U; i_priority
--;)
178 hw_atl_rpf_rpb_user_priority_tc_map_set(self
, i_priority
, 0U);
180 return aq_hw_err_from_flags(self
);
183 static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s
*self
,
184 struct aq_rss_parameters
*rss_params
)
186 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
187 unsigned int addr
= 0U;
192 for (i
= 10, addr
= 0U; i
--; ++addr
) {
193 u32 key_data
= cfg
->is_rss
?
194 __swab32(rss_params
->hash_secret_key
[i
]) : 0U;
195 hw_atl_rpf_rss_key_wr_data_set(self
, key_data
);
196 hw_atl_rpf_rss_key_addr_set(self
, addr
);
197 hw_atl_rpf_rss_key_wr_en_set(self
, 1U);
198 err
= readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get
,
205 err
= aq_hw_err_from_flags(self
);
211 static int hw_atl_a0_hw_rss_set(struct aq_hw_s
*self
,
212 struct aq_rss_parameters
*rss_params
)
214 u32 num_rss_queues
= max(1U, self
->aq_nic_cfg
->num_rss_queues
);
215 u8
*indirection_table
= rss_params
->indirection_table
;
216 u16 bitary
[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX
*
217 HW_ATL_A0_RSS_REDIRECTION_BITS
/ 16U)];
222 memset(bitary
, 0, sizeof(bitary
));
224 for (i
= HW_ATL_A0_RSS_REDIRECTION_MAX
; i
--; ) {
225 (*(u32
*)(bitary
+ ((i
* 3U) / 16U))) |=
226 ((indirection_table
[i
] % num_rss_queues
) <<
230 for (i
= ARRAY_SIZE(bitary
); i
--;) {
231 hw_atl_rpf_rss_redir_tbl_wr_data_set(self
, bitary
[i
]);
232 hw_atl_rpf_rss_redir_tbl_addr_set(self
, i
);
233 hw_atl_rpf_rss_redir_wr_en_set(self
, 1U);
234 err
= readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get
,
241 err
= aq_hw_err_from_flags(self
);
247 static int hw_atl_a0_hw_offload_set(struct aq_hw_s
*self
,
248 struct aq_nic_cfg_s
*aq_nic_cfg
)
250 /* TX checksums offloads*/
251 hw_atl_tpo_ipv4header_crc_offload_en_set(self
, 1);
252 hw_atl_tpo_tcp_udp_crc_offload_en_set(self
, 1);
254 /* RX checksums offloads*/
255 hw_atl_rpo_ipv4header_crc_offload_en_set(self
, 1);
256 hw_atl_rpo_tcp_udp_crc_offload_en_set(self
, 1);
259 hw_atl_tdm_large_send_offload_en_set(self
, 0xFFFFFFFFU
);
261 return aq_hw_err_from_flags(self
);
264 static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s
*self
)
266 hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self
, 0x0FF6U
);
267 hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self
, 0x0FF6U
);
268 hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self
, 0x0F7FU
);
271 hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self
, 1U);
274 aq_hw_write_reg(self
, 0x00007040U
, 0x00000000U
);
275 hw_atl_tdm_tx_dca_en_set(self
, 0U);
276 hw_atl_tdm_tx_dca_mode_set(self
, 0U);
278 hw_atl_tpb_tx_path_scp_ins_en_set(self
, 1U);
280 return aq_hw_err_from_flags(self
);
283 static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s
*self
)
285 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
288 /* Rx TC/RSS number config */
289 hw_atl_rpb_rpf_rx_traf_class_mode_set(self
, 1U);
291 /* Rx flow control */
292 hw_atl_rpb_rx_flow_ctl_mode_set(self
, 1U);
294 /* RSS Ring selection */
295 hw_atl_reg_rx_flr_rss_control1set(self
, cfg
->is_rss
?
296 0xB3333333U
: 0x00000000U
);
298 /* Multicast filters */
299 for (i
= HW_ATL_A0_MAC_MAX
; i
--;) {
300 hw_atl_rpfl2_uc_flr_en_set(self
, (i
== 0U) ? 1U : 0U, i
);
301 hw_atl_rpfl2unicast_flr_act_set(self
, 1U, i
);
304 hw_atl_reg_rx_flr_mcst_flr_msk_set(self
, 0x00000000U
);
305 hw_atl_reg_rx_flr_mcst_flr_set(self
, 0x00010FFFU
, 0U);
308 hw_atl_rpf_vlan_outer_etht_set(self
, 0x88A8U
);
309 hw_atl_rpf_vlan_inner_etht_set(self
, 0x8100U
);
310 hw_atl_rpf_vlan_prom_mode_en_set(self
, 1);
313 hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self
, 1U);
316 hw_atl_rpfl2broadcast_flr_act_set(self
, 1U);
317 hw_atl_rpfl2broadcast_count_threshold_set(self
, 0xFFFFU
& (~0U / 256U));
319 hw_atl_rdm_rx_dca_en_set(self
, 0U);
320 hw_atl_rdm_rx_dca_mode_set(self
, 0U);
322 return aq_hw_err_from_flags(self
);
325 static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s
*self
, u8
*mac_addr
)
336 h
= (mac_addr
[0] << 8) | (mac_addr
[1]);
337 l
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
338 (mac_addr
[4] << 8) | mac_addr
[5];
340 hw_atl_rpfl2_uc_flr_en_set(self
, 0U, HW_ATL_A0_MAC
);
341 hw_atl_rpfl2unicast_dest_addresslsw_set(self
, l
, HW_ATL_A0_MAC
);
342 hw_atl_rpfl2unicast_dest_addressmsw_set(self
, h
, HW_ATL_A0_MAC
);
343 hw_atl_rpfl2_uc_flr_en_set(self
, 1U, HW_ATL_A0_MAC
);
345 err
= aq_hw_err_from_flags(self
);
351 static int hw_atl_a0_hw_init(struct aq_hw_s
*self
, u8
*mac_addr
)
353 static u32 aq_hw_atl_igcr_table_
[4][2] = {
354 [AQ_HW_IRQ_INVALID
] = { 0x20000000U
, 0x20000000U
},
355 [AQ_HW_IRQ_LEGACY
] = { 0x20000080U
, 0x20000080U
},
356 [AQ_HW_IRQ_MSI
] = { 0x20000021U
, 0x20000025U
},
357 [AQ_HW_IRQ_MSIX
] = { 0x20000022U
, 0x20000026U
},
359 struct aq_nic_cfg_s
*aq_nic_cfg
= self
->aq_nic_cfg
;
362 hw_atl_a0_hw_init_tx_path(self
);
363 hw_atl_a0_hw_init_rx_path(self
);
365 hw_atl_a0_hw_mac_addr_set(self
, mac_addr
);
367 self
->aq_fw_ops
->set_link_speed(self
, aq_nic_cfg
->link_speed_msk
);
368 self
->aq_fw_ops
->set_state(self
, MPI_INIT
);
370 hw_atl_reg_tx_dma_debug_ctl_set(self
, 0x800000b8U
);
371 hw_atl_reg_tx_dma_debug_ctl_set(self
, 0x000000b8U
);
373 hw_atl_a0_hw_qos_set(self
);
374 hw_atl_a0_hw_rss_set(self
, &aq_nic_cfg
->aq_rss
);
375 hw_atl_a0_hw_rss_hash_set(self
, &aq_nic_cfg
->aq_rss
);
377 /* Reset link status and read out initial hardware counters */
378 self
->aq_link_status
.mbps
= 0;
379 self
->aq_fw_ops
->update_stats(self
);
381 err
= aq_hw_err_from_flags(self
);
386 hw_atl_reg_irq_glb_ctl_set(self
,
387 aq_hw_atl_igcr_table_
[aq_nic_cfg
->irq_type
]
388 [(aq_nic_cfg
->vecs
> 1U) ? 1 : 0]);
390 hw_atl_itr_irq_auto_masklsw_set(self
, aq_nic_cfg
->aq_hw_caps
->irq_mask
);
393 hw_atl_reg_gen_irq_map_set(self
,
394 ((HW_ATL_A0_ERR_INT
<< 0x18) | (1U << 0x1F)) |
395 ((HW_ATL_A0_ERR_INT
<< 0x10) | (1U << 0x17)) |
396 ((HW_ATL_A0_ERR_INT
<< 8) | (1U << 0xF)) |
397 ((HW_ATL_A0_ERR_INT
) | (1U << 0x7)), 0U);
399 hw_atl_a0_hw_offload_set(self
, aq_nic_cfg
);
405 static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s
*self
,
406 struct aq_ring_s
*ring
)
408 hw_atl_tdm_tx_desc_en_set(self
, 1, ring
->idx
);
410 return aq_hw_err_from_flags(self
);
413 static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s
*self
,
414 struct aq_ring_s
*ring
)
416 hw_atl_rdm_rx_desc_en_set(self
, 1, ring
->idx
);
418 return aq_hw_err_from_flags(self
);
421 static int hw_atl_a0_hw_start(struct aq_hw_s
*self
)
423 hw_atl_tpb_tx_buff_en_set(self
, 1);
424 hw_atl_rpb_rx_buff_en_set(self
, 1);
426 return aq_hw_err_from_flags(self
);
429 static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s
*self
,
430 struct aq_ring_s
*ring
)
432 hw_atl_reg_tx_dma_desc_tail_ptr_set(self
, ring
->sw_tail
, ring
->idx
);
437 static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s
*self
,
438 struct aq_ring_s
*ring
,
441 struct aq_ring_buff_s
*buff
= NULL
;
442 struct hw_atl_txd_s
*txd
= NULL
;
443 unsigned int buff_pa_len
= 0U;
444 unsigned int frag_count
= 0U;
445 unsigned int pkt_len
= 0U;
448 buff
= &ring
->buff_ring
[ring
->sw_tail
];
449 pkt_len
= (buff
->is_eop
&& buff
->is_sop
) ? buff
->len
: buff
->len_pkt
;
451 for (frag_count
= 0; frag_count
< frags
; frag_count
++) {
452 txd
= (struct hw_atl_txd_s
*)&ring
->dx_ring
[ring
->sw_tail
*
458 buff
= &ring
->buff_ring
[ring
->sw_tail
];
460 if (buff
->is_gso_tcp
) {
461 txd
->ctl
|= (buff
->len_l3
<< 31) |
462 (buff
->len_l2
<< 24) |
463 HW_ATL_A0_TXD_CTL_CMD_TCP
|
464 HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC
;
465 txd
->ctl2
|= (buff
->mss
<< 16) |
466 (buff
->len_l4
<< 8) |
469 pkt_len
-= (buff
->len_l4
+
475 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_IPV6
;
477 buff_pa_len
= buff
->len
;
479 txd
->buf_addr
= buff
->pa
;
480 txd
->ctl
|= (HW_ATL_A0_TXD_CTL_BLEN
&
481 ((u32
)buff_pa_len
<< 4));
482 txd
->ctl
|= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD
;
484 txd
->ctl2
|= HW_ATL_A0_TXD_CTL2_LEN
& (pkt_len
<< 14);
487 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_LSO
;
488 txd
->ctl2
|= HW_ATL_A0_TXD_CTL2_CTX_EN
;
491 /* Tx checksum offloads */
493 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_IPCSO
;
495 if (buff
->is_udp_cso
|| buff
->is_tcp_cso
)
496 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_TUCSO
;
498 if (unlikely(buff
->is_eop
)) {
499 txd
->ctl
|= HW_ATL_A0_TXD_CTL_EOP
;
500 txd
->ctl
|= HW_ATL_A0_TXD_CTL_CMD_WB
;
505 ring
->sw_tail
= aq_ring_next_dx(ring
, ring
->sw_tail
);
508 hw_atl_a0_hw_tx_ring_tail_update(self
, ring
);
510 return aq_hw_err_from_flags(self
);
513 static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s
*self
,
514 struct aq_ring_s
*aq_ring
,
515 struct aq_ring_param_s
*aq_ring_param
)
517 u32 dma_desc_addr_msw
= (u32
)(((u64
)aq_ring
->dx_ring_pa
) >> 32);
518 u32 dma_desc_addr_lsw
= (u32
)aq_ring
->dx_ring_pa
;
520 hw_atl_rdm_rx_desc_en_set(self
, false, aq_ring
->idx
);
522 hw_atl_rdm_rx_desc_head_splitting_set(self
, 0U, aq_ring
->idx
);
524 hw_atl_reg_rx_dma_desc_base_addresslswset(self
, dma_desc_addr_lsw
,
527 hw_atl_reg_rx_dma_desc_base_addressmswset(self
,
531 hw_atl_rdm_rx_desc_len_set(self
, aq_ring
->size
/ 8U, aq_ring
->idx
);
533 hw_atl_rdm_rx_desc_data_buff_size_set(self
,
534 AQ_CFG_RX_FRAME_MAX
/ 1024U,
537 hw_atl_rdm_rx_desc_head_buff_size_set(self
, 0U, aq_ring
->idx
);
538 hw_atl_rdm_rx_desc_head_splitting_set(self
, 0U, aq_ring
->idx
);
539 hw_atl_rpo_rx_desc_vlan_stripping_set(self
, 0U, aq_ring
->idx
);
541 /* Rx ring set mode */
543 /* Mapping interrupt vector */
544 hw_atl_itr_irq_map_rx_set(self
, aq_ring_param
->vec_idx
, aq_ring
->idx
);
545 hw_atl_itr_irq_map_en_rx_set(self
, true, aq_ring
->idx
);
547 hw_atl_rdm_cpu_id_set(self
, aq_ring_param
->cpu
, aq_ring
->idx
);
548 hw_atl_rdm_rx_desc_dca_en_set(self
, 0U, aq_ring
->idx
);
549 hw_atl_rdm_rx_head_dca_en_set(self
, 0U, aq_ring
->idx
);
550 hw_atl_rdm_rx_pld_dca_en_set(self
, 0U, aq_ring
->idx
);
552 return aq_hw_err_from_flags(self
);
555 static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s
*self
,
556 struct aq_ring_s
*aq_ring
,
557 struct aq_ring_param_s
*aq_ring_param
)
559 u32 dma_desc_msw_addr
= (u32
)(((u64
)aq_ring
->dx_ring_pa
) >> 32);
560 u32 dma_desc_lsw_addr
= (u32
)aq_ring
->dx_ring_pa
;
562 hw_atl_reg_tx_dma_desc_base_addresslswset(self
, dma_desc_lsw_addr
,
565 hw_atl_reg_tx_dma_desc_base_addressmswset(self
, dma_desc_msw_addr
,
568 hw_atl_tdm_tx_desc_len_set(self
, aq_ring
->size
/ 8U, aq_ring
->idx
);
570 hw_atl_a0_hw_tx_ring_tail_update(self
, aq_ring
);
572 /* Set Tx threshold */
573 hw_atl_tdm_tx_desc_wr_wb_threshold_set(self
, 0U, aq_ring
->idx
);
575 /* Mapping interrupt vector */
576 hw_atl_itr_irq_map_tx_set(self
, aq_ring_param
->vec_idx
, aq_ring
->idx
);
577 hw_atl_itr_irq_map_en_tx_set(self
, true, aq_ring
->idx
);
579 hw_atl_tdm_cpu_id_set(self
, aq_ring_param
->cpu
, aq_ring
->idx
);
580 hw_atl_tdm_tx_desc_dca_en_set(self
, 0U, aq_ring
->idx
);
582 return aq_hw_err_from_flags(self
);
585 static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s
*self
,
586 struct aq_ring_s
*ring
,
587 unsigned int sw_tail_old
)
589 for (; sw_tail_old
!= ring
->sw_tail
;
590 sw_tail_old
= aq_ring_next_dx(ring
, sw_tail_old
)) {
591 struct hw_atl_rxd_s
*rxd
=
592 (struct hw_atl_rxd_s
*)&ring
->dx_ring
[sw_tail_old
*
595 struct aq_ring_buff_s
*buff
= &ring
->buff_ring
[sw_tail_old
];
597 rxd
->buf_addr
= buff
->pa
;
601 hw_atl_reg_rx_dma_desc_tail_ptr_set(self
, sw_tail_old
, ring
->idx
);
603 return aq_hw_err_from_flags(self
);
606 static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s
*self
,
607 struct aq_ring_s
*ring
)
609 unsigned int hw_head
= hw_atl_tdm_tx_desc_head_ptr_get(self
, ring
->idx
);
612 if (aq_utils_obj_test(&self
->flags
, AQ_HW_FLAG_ERR_UNPLUG
)) {
616 ring
->hw_head
= hw_head
;
617 err
= aq_hw_err_from_flags(self
);
623 static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s
*self
,
624 struct aq_ring_s
*ring
)
626 for (; ring
->hw_head
!= ring
->sw_tail
;
627 ring
->hw_head
= aq_ring_next_dx(ring
, ring
->hw_head
)) {
628 struct aq_ring_buff_s
*buff
= NULL
;
629 struct hw_atl_rxd_wb_s
*rxd_wb
= (struct hw_atl_rxd_wb_s
*)
630 &ring
->dx_ring
[ring
->hw_head
* HW_ATL_A0_RXD_SIZE
];
632 unsigned int is_err
= 1U;
633 unsigned int is_rx_check_sum_enabled
= 0U;
634 unsigned int pkt_type
= 0U;
636 if (!(rxd_wb
->status
& 0x5U
)) { /* RxD is not done */
638 hw_atl_reg_rx_dma_desc_status_get(self
, ring
->idx
)) {
639 hw_atl_rdm_rx_desc_en_set(self
, false, ring
->idx
);
640 hw_atl_rdm_rx_desc_res_set(self
, true, ring
->idx
);
641 hw_atl_rdm_rx_desc_res_set(self
, false, ring
->idx
);
642 hw_atl_rdm_rx_desc_en_set(self
, true, ring
->idx
);
646 (hw_atl_rdm_rx_desc_head_ptr_get(self
,
649 } else if (!(rxd_wb
->status
& 0x1U
)) {
650 struct hw_atl_rxd_wb_s
*rxd_wb1
=
651 (struct hw_atl_rxd_wb_s
*)
652 (&ring
->dx_ring
[(1U) *
653 HW_ATL_A0_RXD_SIZE
]);
655 if ((rxd_wb1
->status
& 0x1U
)) {
656 rxd_wb
->pkt_len
= 1514U;
664 buff
= &ring
->buff_ring
[ring
->hw_head
];
666 if (0x3U
!= (rxd_wb
->status
& 0x3U
))
669 is_err
= (0x0000001CU
& rxd_wb
->status
);
670 is_rx_check_sum_enabled
= (rxd_wb
->type
) & (0x3U
<< 19);
671 pkt_type
= 0xFFU
& (rxd_wb
->type
>> 4);
673 if (is_rx_check_sum_enabled
) {
674 if (0x0U
== (pkt_type
& 0x3U
))
675 buff
->is_ip_cso
= (is_err
& 0x08U
) ? 0 : 1;
677 if (0x4U
== (pkt_type
& 0x1CU
))
678 buff
->is_udp_cso
= (is_err
& 0x10U
) ? 0 : 1;
679 else if (0x0U
== (pkt_type
& 0x1CU
))
680 buff
->is_tcp_cso
= (is_err
& 0x10U
) ? 0 : 1;
682 /* Checksum offload workaround for small packets */
683 if (rxd_wb
->pkt_len
<= 60) {
684 buff
->is_ip_cso
= 0U;
685 buff
->is_cso_err
= 0U;
692 if (is_err
|| rxd_wb
->type
& 0x1000U
) {
693 /* status error or DMA error */
696 if (self
->aq_nic_cfg
->is_rss
) {
698 u16 rss_type
= rxd_wb
->type
& 0xFU
;
700 if (rss_type
&& rss_type
< 0x8U
) {
701 buff
->is_hash_l4
= (rss_type
== 0x4 ||
703 buff
->rss_hash
= rxd_wb
->rss_hash
;
707 if (HW_ATL_A0_RXD_WB_STAT2_EOP
& rxd_wb
->status
) {
708 buff
->len
= rxd_wb
->pkt_len
%
710 buff
->len
= buff
->len
?
711 buff
->len
: AQ_CFG_RX_FRAME_MAX
;
716 buff
->next
= aq_ring_next_dx(ring
,
718 ++ring
->stats
.rx
.jumbo_packets
;
723 return aq_hw_err_from_flags(self
);
726 static int hw_atl_a0_hw_irq_enable(struct aq_hw_s
*self
, u64 mask
)
728 hw_atl_itr_irq_msk_setlsw_set(self
, LODWORD(mask
) |
729 (1U << HW_ATL_A0_ERR_INT
));
731 return aq_hw_err_from_flags(self
);
734 static int hw_atl_a0_hw_irq_disable(struct aq_hw_s
*self
, u64 mask
)
736 hw_atl_itr_irq_msk_clearlsw_set(self
, LODWORD(mask
));
737 hw_atl_itr_irq_status_clearlsw_set(self
, LODWORD(mask
));
739 if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self
))
740 atomic_inc(&self
->dpc
);
742 return aq_hw_err_from_flags(self
);
745 static int hw_atl_a0_hw_irq_read(struct aq_hw_s
*self
, u64
*mask
)
747 *mask
= hw_atl_itr_irq_statuslsw_get(self
);
749 return aq_hw_err_from_flags(self
);
752 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
754 static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s
*self
,
755 unsigned int packet_filter
)
757 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
760 hw_atl_rpfl2promiscuous_mode_en_set(self
,
761 IS_FILTER_ENABLED(IFF_PROMISC
));
762 hw_atl_rpfl2multicast_flr_en_set(self
,
763 IS_FILTER_ENABLED(IFF_MULTICAST
), 0);
764 hw_atl_rpfl2broadcast_en_set(self
, IS_FILTER_ENABLED(IFF_BROADCAST
));
766 cfg
->is_mc_list_enabled
= IS_FILTER_ENABLED(IFF_MULTICAST
);
768 for (i
= HW_ATL_A0_MAC_MIN
; i
< HW_ATL_A0_MAC_MAX
; ++i
)
769 hw_atl_rpfl2_uc_flr_en_set(self
,
770 (cfg
->is_mc_list_enabled
&&
771 (i
<= cfg
->mc_list_count
)) ? 1U : 0U,
774 return aq_hw_err_from_flags(self
);
777 #undef IS_FILTER_ENABLED
779 static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s
*self
,
781 [AQ_HW_MULTICAST_ADDRESS_MAX
]
785 struct aq_nic_cfg_s
*cfg
= self
->aq_nic_cfg
;
788 if (count
> (HW_ATL_A0_MAC_MAX
- HW_ATL_A0_MAC_MIN
)) {
792 for (cfg
->mc_list_count
= 0U; cfg
->mc_list_count
< count
; ++cfg
->mc_list_count
) {
793 u32 i
= cfg
->mc_list_count
;
794 u32 h
= (ar_mac
[i
][0] << 8) | (ar_mac
[i
][1]);
795 u32 l
= (ar_mac
[i
][2] << 24) | (ar_mac
[i
][3] << 16) |
796 (ar_mac
[i
][4] << 8) | ar_mac
[i
][5];
798 hw_atl_rpfl2_uc_flr_en_set(self
, 0U, HW_ATL_A0_MAC_MIN
+ i
);
800 hw_atl_rpfl2unicast_dest_addresslsw_set(self
,
802 HW_ATL_A0_MAC_MIN
+ i
);
804 hw_atl_rpfl2unicast_dest_addressmsw_set(self
,
806 HW_ATL_A0_MAC_MIN
+ i
);
808 hw_atl_rpfl2_uc_flr_en_set(self
,
809 (cfg
->is_mc_list_enabled
),
810 HW_ATL_A0_MAC_MIN
+ i
);
813 err
= aq_hw_err_from_flags(self
);
819 static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s
*self
)
824 if (self
->aq_nic_cfg
->itr
) {
825 if (self
->aq_nic_cfg
->itr
!= AQ_CFG_INTERRUPT_MODERATION_AUTO
) {
826 u32 itr_
= (self
->aq_nic_cfg
->itr
>> 1);
828 itr_
= min(AQ_CFG_IRQ_MASK
, itr_
);
830 itr_rx
= 0x80000000U
| (itr_
<< 0x10);
832 u32 n
= 0xFFFFU
& aq_hw_read_reg(self
, 0x00002A00U
);
834 if (n
< self
->aq_link_status
.mbps
) {
837 static unsigned int hw_timers_tbl_
[] = {
840 0x039U
, /* 5Gbit 5GS */
841 0x073U
, /* 2.5Gbit */
843 0x1FFU
, /* 100Mbit */
846 unsigned int speed_index
=
847 hw_atl_utils_mbps_2_speed_index(
848 self
->aq_link_status
.mbps
);
850 itr_rx
= 0x80000000U
|
851 (hw_timers_tbl_
[speed_index
] << 0x10U
);
854 aq_hw_write_reg(self
, 0x00002A00U
, 0x40000000U
);
855 aq_hw_write_reg(self
, 0x00002A00U
, 0x8D000000U
);
861 for (i
= HW_ATL_A0_RINGS_MAX
; i
--;)
862 hw_atl_reg_irq_thr_set(self
, itr_rx
, i
);
864 return aq_hw_err_from_flags(self
);
867 static int hw_atl_a0_hw_stop(struct aq_hw_s
*self
)
869 hw_atl_a0_hw_irq_disable(self
, HW_ATL_A0_INT_MASK
);
871 return aq_hw_err_from_flags(self
);
874 static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s
*self
,
875 struct aq_ring_s
*ring
)
877 hw_atl_tdm_tx_desc_en_set(self
, 0U, ring
->idx
);
879 return aq_hw_err_from_flags(self
);
882 static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s
*self
,
883 struct aq_ring_s
*ring
)
885 hw_atl_rdm_rx_desc_en_set(self
, 0U, ring
->idx
);
887 return aq_hw_err_from_flags(self
);
890 static int hw_atl_a0_hw_fl3l4_clear(struct aq_hw_s
*self
,
891 struct aq_rx_filter_l3l4
*data
)
893 u8 location
= data
->location
;
895 if (!data
->is_ipv6
) {
896 hw_atl_rpfl3l4_cmd_clear(self
, location
);
897 hw_atl_rpf_l4_spd_set(self
, 0U, location
);
898 hw_atl_rpf_l4_dpd_set(self
, 0U, location
);
899 hw_atl_rpfl3l4_ipv4_src_addr_clear(self
, location
);
900 hw_atl_rpfl3l4_ipv4_dest_addr_clear(self
, location
);
904 for (i
= 0; i
< HW_ATL_RX_CNT_REG_ADDR_IPV6
; ++i
) {
905 hw_atl_rpfl3l4_cmd_clear(self
, location
+ i
);
906 hw_atl_rpf_l4_spd_set(self
, 0U, location
+ i
);
907 hw_atl_rpf_l4_dpd_set(self
, 0U, location
+ i
);
909 hw_atl_rpfl3l4_ipv6_src_addr_clear(self
, location
);
910 hw_atl_rpfl3l4_ipv6_dest_addr_clear(self
, location
);
913 return aq_hw_err_from_flags(self
);
916 static int hw_atl_a0_hw_fl3l4_set(struct aq_hw_s
*self
,
917 struct aq_rx_filter_l3l4
*data
)
919 u8 location
= data
->location
;
921 hw_atl_a0_hw_fl3l4_clear(self
, data
);
924 if (!data
->is_ipv6
) {
925 hw_atl_rpfl3l4_ipv4_dest_addr_set(self
,
928 hw_atl_rpfl3l4_ipv4_src_addr_set(self
,
932 hw_atl_rpfl3l4_ipv6_dest_addr_set(self
,
935 hw_atl_rpfl3l4_ipv6_src_addr_set(self
,
940 hw_atl_rpf_l4_dpd_set(self
, data
->p_dst
, location
);
941 hw_atl_rpf_l4_spd_set(self
, data
->p_src
, location
);
942 hw_atl_rpfl3l4_cmd_set(self
, location
, data
->cmd
);
944 return aq_hw_err_from_flags(self
);
947 const struct aq_hw_ops hw_atl_ops_a0
= {
948 .hw_soft_reset
= hw_atl_utils_soft_reset
,
949 .hw_prepare
= hw_atl_utils_initfw
,
950 .hw_set_mac_address
= hw_atl_a0_hw_mac_addr_set
,
951 .hw_init
= hw_atl_a0_hw_init
,
952 .hw_reset
= hw_atl_a0_hw_reset
,
953 .hw_start
= hw_atl_a0_hw_start
,
954 .hw_ring_tx_start
= hw_atl_a0_hw_ring_tx_start
,
955 .hw_ring_tx_stop
= hw_atl_a0_hw_ring_tx_stop
,
956 .hw_ring_rx_start
= hw_atl_a0_hw_ring_rx_start
,
957 .hw_ring_rx_stop
= hw_atl_a0_hw_ring_rx_stop
,
958 .hw_stop
= hw_atl_a0_hw_stop
,
960 .hw_ring_tx_xmit
= hw_atl_a0_hw_ring_tx_xmit
,
961 .hw_ring_tx_head_update
= hw_atl_a0_hw_ring_tx_head_update
,
963 .hw_ring_rx_receive
= hw_atl_a0_hw_ring_rx_receive
,
964 .hw_ring_rx_fill
= hw_atl_a0_hw_ring_rx_fill
,
966 .hw_irq_enable
= hw_atl_a0_hw_irq_enable
,
967 .hw_irq_disable
= hw_atl_a0_hw_irq_disable
,
968 .hw_irq_read
= hw_atl_a0_hw_irq_read
,
970 .hw_ring_rx_init
= hw_atl_a0_hw_ring_rx_init
,
971 .hw_ring_tx_init
= hw_atl_a0_hw_ring_tx_init
,
972 .hw_packet_filter_set
= hw_atl_a0_hw_packet_filter_set
,
973 .hw_filter_l3l4_set
= hw_atl_a0_hw_fl3l4_set
,
974 .hw_multicast_list_set
= hw_atl_a0_hw_multicast_list_set
,
975 .hw_interrupt_moderation_set
= hw_atl_a0_hw_interrupt_moderation_set
,
976 .hw_rss_set
= hw_atl_a0_hw_rss_set
,
977 .hw_rss_hash_set
= hw_atl_a0_hw_rss_hash_set
,
978 .hw_get_regs
= hw_atl_utils_hw_get_regs
,
979 .hw_get_hw_stats
= hw_atl_utils_get_hw_stats
,
980 .hw_get_fw_version
= hw_atl_utils_get_fw_version
,