1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/iopoll.h>
7 #include <linux/minmax.h>
8 #include "hbg_common.h"
12 #define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
13 #define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
14 /* little endian or big endian.
15 * ctrl means packet description, data means skb packet data
17 #define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0
18 #define HBG_PCU_FRAME_LEN_PLUS 4
20 static bool hbg_hw_spec_is_valid(struct hbg_priv
*priv
)
22 return hbg_reg_read(priv
, HBG_REG_SPEC_VALID_ADDR
) &&
23 !hbg_reg_read(priv
, HBG_REG_EVENT_REQ_ADDR
);
26 int hbg_hw_event_notify(struct hbg_priv
*priv
,
27 enum hbg_hw_event_type event_type
)
32 if (test_and_set_bit(HBG_NIC_STATE_EVENT_HANDLING
, &priv
->state
))
36 hbg_reg_write(priv
, HBG_REG_EVENT_REQ_ADDR
, event_type
);
38 ret
= read_poll_timeout(hbg_hw_spec_is_valid
, is_valid
, is_valid
,
39 HBG_HW_EVENT_WAIT_INTERVAL_US
,
40 HBG_HW_EVENT_WAIT_TIMEOUT_US
,
41 HBG_HW_EVENT_WAIT_INTERVAL_US
, priv
);
43 clear_bit(HBG_NIC_STATE_EVENT_HANDLING
, &priv
->state
);
46 dev_err(&priv
->pdev
->dev
,
47 "event %d wait timeout\n", event_type
);
52 static int hbg_hw_dev_specs_init(struct hbg_priv
*priv
)
54 struct hbg_dev_specs
*specs
= &priv
->dev_specs
;
57 if (!hbg_hw_spec_is_valid(priv
)) {
58 dev_err(&priv
->pdev
->dev
, "dev_specs not init\n");
62 specs
->mac_id
= hbg_reg_read(priv
, HBG_REG_MAC_ID_ADDR
);
63 specs
->phy_addr
= hbg_reg_read(priv
, HBG_REG_PHY_ID_ADDR
);
64 specs
->mdio_frequency
= hbg_reg_read(priv
, HBG_REG_MDIO_FREQ_ADDR
);
65 specs
->max_mtu
= hbg_reg_read(priv
, HBG_REG_MAX_MTU_ADDR
);
66 specs
->min_mtu
= hbg_reg_read(priv
, HBG_REG_MIN_MTU_ADDR
);
67 specs
->vlan_layers
= hbg_reg_read(priv
, HBG_REG_VLAN_LAYERS_ADDR
);
68 specs
->rx_fifo_num
= hbg_reg_read(priv
, HBG_REG_RX_FIFO_NUM_ADDR
);
69 specs
->tx_fifo_num
= hbg_reg_read(priv
, HBG_REG_TX_FIFO_NUM_ADDR
);
70 mac_addr
= hbg_reg_read64(priv
, HBG_REG_MAC_ADDR_ADDR
);
71 u64_to_ether_addr(mac_addr
, (u8
*)specs
->mac_addr
.sa_data
);
73 if (!is_valid_ether_addr((u8
*)specs
->mac_addr
.sa_data
))
74 return -EADDRNOTAVAIL
;
76 specs
->max_frame_len
= HBG_PCU_CACHE_LINE_SIZE
+ specs
->max_mtu
;
77 specs
->rx_buf_size
= HBG_PACKET_HEAD_SIZE
+ specs
->max_frame_len
;
81 u32
hbg_hw_get_irq_status(struct hbg_priv
*priv
)
85 status
= hbg_reg_read(priv
, HBG_REG_CF_INTRPT_STAT_ADDR
);
87 hbg_field_modify(status
, HBG_INT_MSK_TX_B
,
88 hbg_reg_read(priv
, HBG_REG_CF_IND_TXINT_STAT_ADDR
));
89 hbg_field_modify(status
, HBG_INT_MSK_RX_B
,
90 hbg_reg_read(priv
, HBG_REG_CF_IND_RXINT_STAT_ADDR
));
95 void hbg_hw_irq_clear(struct hbg_priv
*priv
, u32 mask
)
97 if (FIELD_GET(HBG_INT_MSK_TX_B
, mask
))
98 return hbg_reg_write(priv
, HBG_REG_CF_IND_TXINT_CLR_ADDR
, 0x1);
100 if (FIELD_GET(HBG_INT_MSK_RX_B
, mask
))
101 return hbg_reg_write(priv
, HBG_REG_CF_IND_RXINT_CLR_ADDR
, 0x1);
103 return hbg_reg_write(priv
, HBG_REG_CF_INTRPT_CLR_ADDR
, mask
);
106 bool hbg_hw_irq_is_enabled(struct hbg_priv
*priv
, u32 mask
)
108 if (FIELD_GET(HBG_INT_MSK_TX_B
, mask
))
109 return hbg_reg_read(priv
, HBG_REG_CF_IND_TXINT_MSK_ADDR
);
111 if (FIELD_GET(HBG_INT_MSK_RX_B
, mask
))
112 return hbg_reg_read(priv
, HBG_REG_CF_IND_RXINT_MSK_ADDR
);
114 return hbg_reg_read(priv
, HBG_REG_CF_INTRPT_MSK_ADDR
) & mask
;
117 void hbg_hw_irq_enable(struct hbg_priv
*priv
, u32 mask
, bool enable
)
121 if (FIELD_GET(HBG_INT_MSK_TX_B
, mask
))
122 return hbg_reg_write(priv
,
123 HBG_REG_CF_IND_TXINT_MSK_ADDR
, enable
);
125 if (FIELD_GET(HBG_INT_MSK_RX_B
, mask
))
126 return hbg_reg_write(priv
,
127 HBG_REG_CF_IND_RXINT_MSK_ADDR
, enable
);
129 value
= hbg_reg_read(priv
, HBG_REG_CF_INTRPT_MSK_ADDR
);
135 hbg_reg_write(priv
, HBG_REG_CF_INTRPT_MSK_ADDR
, value
);
138 void hbg_hw_set_uc_addr(struct hbg_priv
*priv
, u64 mac_addr
)
140 hbg_reg_write64(priv
, HBG_REG_STATION_ADDR_LOW_2_ADDR
, mac_addr
);
143 static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv
*priv
,
146 max_frame_len
= max_t(u32
, max_frame_len
, ETH_DATA_LEN
);
148 /* lower two bits of value must be set to 0 */
149 max_frame_len
= round_up(max_frame_len
, HBG_PCU_FRAME_LEN_PLUS
);
151 hbg_reg_write_field(priv
, HBG_REG_MAX_FRAME_LEN_ADDR
,
152 HBG_REG_MAX_FRAME_LEN_M
, max_frame_len
);
155 static void hbg_hw_set_mac_max_frame_len(struct hbg_priv
*priv
,
158 hbg_reg_write_field(priv
, HBG_REG_MAX_FRAME_SIZE_ADDR
,
159 HBG_REG_MAX_FRAME_LEN_M
, max_frame_size
);
162 void hbg_hw_set_mtu(struct hbg_priv
*priv
, u16 mtu
)
164 hbg_hw_set_pcu_max_frame_len(priv
, mtu
);
165 hbg_hw_set_mac_max_frame_len(priv
, mtu
);
168 void hbg_hw_mac_enable(struct hbg_priv
*priv
, u32 enable
)
170 hbg_reg_write_field(priv
, HBG_REG_PORT_ENABLE_ADDR
,
171 HBG_REG_PORT_ENABLE_TX_B
, enable
);
172 hbg_reg_write_field(priv
, HBG_REG_PORT_ENABLE_ADDR
,
173 HBG_REG_PORT_ENABLE_RX_B
, enable
);
176 u32
hbg_hw_get_fifo_used_num(struct hbg_priv
*priv
, enum hbg_dir dir
)
178 if (dir
& HBG_DIR_TX
)
179 return hbg_reg_read_field(priv
, HBG_REG_CF_CFF_DATA_NUM_ADDR
,
180 HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M
);
182 if (dir
& HBG_DIR_RX
)
183 return hbg_reg_read_field(priv
, HBG_REG_CF_CFF_DATA_NUM_ADDR
,
184 HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M
);
189 void hbg_hw_set_tx_desc(struct hbg_priv
*priv
, struct hbg_tx_desc
*tx_desc
)
191 hbg_reg_write(priv
, HBG_REG_TX_CFF_ADDR_0_ADDR
, tx_desc
->word0
);
192 hbg_reg_write(priv
, HBG_REG_TX_CFF_ADDR_1_ADDR
, tx_desc
->word1
);
193 hbg_reg_write(priv
, HBG_REG_TX_CFF_ADDR_2_ADDR
, tx_desc
->word2
);
194 hbg_reg_write(priv
, HBG_REG_TX_CFF_ADDR_3_ADDR
, tx_desc
->word3
);
197 void hbg_hw_fill_buffer(struct hbg_priv
*priv
, u32 buffer_dma_addr
)
199 hbg_reg_write(priv
, HBG_REG_RX_CFF_ADDR_ADDR
, buffer_dma_addr
);
202 void hbg_hw_adjust_link(struct hbg_priv
*priv
, u32 speed
, u32 duplex
)
204 hbg_reg_write_field(priv
, HBG_REG_PORT_MODE_ADDR
,
205 HBG_REG_PORT_MODE_M
, speed
);
206 hbg_reg_write_field(priv
, HBG_REG_DUPLEX_TYPE_ADDR
,
207 HBG_REG_DUPLEX_B
, duplex
);
210 static void hbg_hw_init_transmit_ctrl(struct hbg_priv
*priv
)
214 ctrl
|= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_AN_EN_B
, HBG_STATUS_ENABLE
);
215 ctrl
|= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_CRC_ADD_B
, HBG_STATUS_ENABLE
);
216 ctrl
|= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_PAD_EN_B
, HBG_STATUS_ENABLE
);
218 hbg_reg_write(priv
, HBG_REG_TRANSMIT_CTRL_ADDR
, ctrl
);
221 static void hbg_hw_init_rx_ctrl(struct hbg_priv
*priv
)
225 ctrl
|= FIELD_PREP(HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B
,
227 ctrl
|= FIELD_PREP(HBG_REG_RX_CTRL_TIME_INF_EN_B
, HBG_STATUS_DISABLE
);
228 ctrl
|= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M
, HBG_RX_SKIP1
);
229 ctrl
|= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M
,
231 ctrl
|= FIELD_PREP(HBG_REG_RX_CTRL_RX_ALIGN_NUM_M
, NET_IP_ALIGN
);
232 ctrl
|= FIELD_PREP(HBG_REG_RX_CTRL_PORT_NUM
, priv
->dev_specs
.mac_id
);
234 hbg_reg_write(priv
, HBG_REG_RX_CTRL_ADDR
, ctrl
);
237 static void hbg_hw_init_rx_control(struct hbg_priv
*priv
)
239 hbg_hw_init_rx_ctrl(priv
);
241 /* parse from L2 layer */
242 hbg_reg_write_field(priv
, HBG_REG_RX_PKT_MODE_ADDR
,
243 HBG_REG_RX_PKT_MODE_PARSE_MODE_M
, 0x1);
245 hbg_reg_write_field(priv
, HBG_REG_RECV_CTRL_ADDR
,
246 HBG_REG_RECV_CTRL_STRIP_PAD_EN_B
,
248 hbg_reg_write_field(priv
, HBG_REG_RX_BUF_SIZE_ADDR
,
249 HBG_REG_RX_BUF_SIZE_M
, priv
->dev_specs
.rx_buf_size
);
250 hbg_reg_write_field(priv
, HBG_REG_CF_CRC_STRIP_ADDR
,
251 HBG_REG_CF_CRC_STRIP_B
, HBG_STATUS_DISABLE
);
254 int hbg_hw_init(struct hbg_priv
*priv
)
258 ret
= hbg_hw_dev_specs_init(priv
);
262 hbg_reg_write_field(priv
, HBG_REG_BUS_CTRL_ADDR
,
263 HBG_REG_BUS_CTRL_ENDIAN_M
,
264 HBG_ENDIAN_CTRL_LE_DATA_BE
);
265 hbg_reg_write_field(priv
, HBG_REG_MODE_CHANGE_EN_ADDR
,
266 HBG_REG_MODE_CHANGE_EN_B
, HBG_STATUS_ENABLE
);
268 hbg_hw_init_rx_control(priv
);
269 hbg_hw_init_transmit_ctrl(priv
);