1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Applied Micro X-Gene SoC Ethernet Driver
4 * Copyright (c) 2014, Applied Micro Circuits Corporation
5 * Authors: Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
9 #include <linux/of_gpio.h>
10 #include <linux/gpio.h>
11 #include "xgene_enet_main.h"
12 #include "xgene_enet_hw.h"
13 #include "xgene_enet_xgmac.h"
15 static void xgene_enet_wr_csr(struct xgene_enet_pdata
*pdata
,
18 void __iomem
*addr
= pdata
->eth_csr_addr
+ offset
;
23 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata
*pdata
,
26 void __iomem
*addr
= pdata
->eth_ring_if_addr
+ offset
;
31 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata
*pdata
,
34 void __iomem
*addr
= pdata
->eth_diag_csr_addr
+ offset
;
39 static bool xgene_enet_wr_indirect(void __iomem
*addr
, void __iomem
*wr
,
40 void __iomem
*cmd
, void __iomem
*cmd_done
,
41 u32 wr_addr
, u32 wr_data
)
46 iowrite32(wr_addr
, addr
);
47 iowrite32(wr_data
, wr
);
48 iowrite32(XGENE_ENET_WR_CMD
, cmd
);
50 /* wait for write command to complete */
51 while (!(done
= ioread32(cmd_done
)) && wait
--)
62 static void xgene_enet_wr_pcs(struct xgene_enet_pdata
*pdata
,
63 u32 wr_addr
, u32 wr_data
)
65 void __iomem
*addr
, *wr
, *cmd
, *cmd_done
;
67 addr
= pdata
->pcs_addr
+ PCS_ADDR_REG_OFFSET
;
68 wr
= pdata
->pcs_addr
+ PCS_WRITE_REG_OFFSET
;
69 cmd
= pdata
->pcs_addr
+ PCS_COMMAND_REG_OFFSET
;
70 cmd_done
= pdata
->pcs_addr
+ PCS_COMMAND_DONE_REG_OFFSET
;
72 if (!xgene_enet_wr_indirect(addr
, wr
, cmd
, cmd_done
, wr_addr
, wr_data
))
73 netdev_err(pdata
->ndev
, "PCS write failed, addr: %04x\n",
77 static void xgene_enet_wr_axg_csr(struct xgene_enet_pdata
*pdata
,
80 void __iomem
*addr
= pdata
->mcx_mac_csr_addr
+ offset
;
85 static void xgene_enet_rd_csr(struct xgene_enet_pdata
*pdata
,
88 void __iomem
*addr
= pdata
->eth_csr_addr
+ offset
;
90 *val
= ioread32(addr
);
93 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata
*pdata
,
96 void __iomem
*addr
= pdata
->eth_diag_csr_addr
+ offset
;
98 *val
= ioread32(addr
);
101 static bool xgene_enet_rd_indirect(void __iomem
*addr
, void __iomem
*rd
,
102 void __iomem
*cmd
, void __iomem
*cmd_done
,
103 u32 rd_addr
, u32
*rd_data
)
108 iowrite32(rd_addr
, addr
);
109 iowrite32(XGENE_ENET_RD_CMD
, cmd
);
111 /* wait for read command to complete */
112 while (!(done
= ioread32(cmd_done
)) && wait
--)
118 *rd_data
= ioread32(rd
);
124 static bool xgene_enet_rd_pcs(struct xgene_enet_pdata
*pdata
,
125 u32 rd_addr
, u32
*rd_data
)
127 void __iomem
*addr
, *rd
, *cmd
, *cmd_done
;
130 addr
= pdata
->pcs_addr
+ PCS_ADDR_REG_OFFSET
;
131 rd
= pdata
->pcs_addr
+ PCS_READ_REG_OFFSET
;
132 cmd
= pdata
->pcs_addr
+ PCS_COMMAND_REG_OFFSET
;
133 cmd_done
= pdata
->pcs_addr
+ PCS_COMMAND_DONE_REG_OFFSET
;
135 success
= xgene_enet_rd_indirect(addr
, rd
, cmd
, cmd_done
, rd_addr
, rd_data
);
137 netdev_err(pdata
->ndev
, "PCS read failed, addr: %04x\n",
143 static void xgene_enet_rd_axg_csr(struct xgene_enet_pdata
*pdata
,
144 u32 offset
, u32
*val
)
146 void __iomem
*addr
= pdata
->mcx_mac_csr_addr
+ offset
;
148 *val
= ioread32(addr
);
151 static int xgene_enet_ecc_init(struct xgene_enet_pdata
*pdata
)
153 struct net_device
*ndev
= pdata
->ndev
;
157 xgene_enet_wr_diag_csr(pdata
, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR
, 0x0);
159 usleep_range(100, 110);
160 xgene_enet_rd_diag_csr(pdata
, ENET_BLOCK_MEM_RDY_ADDR
, &data
);
161 } while ((data
!= 0xffffffff) && wait
--);
163 if (data
!= 0xffffffff) {
164 netdev_err(ndev
, "Failed to release memory from shutdown\n");
171 static void xgene_xgmac_get_drop_cnt(struct xgene_enet_pdata
*pdata
,
176 xgene_enet_rd_axg_csr(pdata
, XGENET_ICM_ECM_DROP_COUNT_REG0
, &count
);
177 *rx
= ICM_DROP_COUNT(count
);
178 *tx
= ECM_DROP_COUNT(count
);
179 /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
180 xgene_enet_rd_axg_csr(pdata
, XGENET_ECM_CONFIG0_REG_0
, &count
);
183 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata
*pdata
)
185 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIWQASSOC_ADDR
, 0);
186 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIFPQASSOC_ADDR
, 0);
187 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIQMLITEWQASSOC_ADDR
, 0);
188 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR
, 0);
191 static void xgene_xgmac_reset(struct xgene_enet_pdata
*pdata
)
193 xgene_enet_wr_mac(pdata
, AXGMAC_CONFIG_0
, HSTMACRST
);
194 xgene_enet_wr_mac(pdata
, AXGMAC_CONFIG_0
, 0);
197 static void xgene_pcs_reset(struct xgene_enet_pdata
*pdata
)
201 if (!xgene_enet_rd_pcs(pdata
, PCS_CONTROL_1
, &data
))
204 xgene_enet_wr_pcs(pdata
, PCS_CONTROL_1
, data
| PCS_CTRL_PCS_RST
);
205 xgene_enet_wr_pcs(pdata
, PCS_CONTROL_1
, data
& ~PCS_CTRL_PCS_RST
);
208 static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata
*pdata
)
211 u8
*dev_addr
= pdata
->ndev
->dev_addr
;
213 addr0
= (dev_addr
[3] << 24) | (dev_addr
[2] << 16) |
214 (dev_addr
[1] << 8) | dev_addr
[0];
215 addr1
= (dev_addr
[5] << 24) | (dev_addr
[4] << 16);
217 xgene_enet_wr_mac(pdata
, HSTMACADR_LSW_ADDR
, addr0
);
218 xgene_enet_wr_mac(pdata
, HSTMACADR_MSW_ADDR
, addr1
);
221 static void xgene_xgmac_set_mss(struct xgene_enet_pdata
*pdata
,
227 offset
= (index
< 2) ? 0 : 4;
228 xgene_enet_rd_csr(pdata
, XG_TSIF_MSS_REG0_ADDR
+ offset
, &data
);
231 data
= SET_VAL(TSO_MSS1
, data
>> TSO_MSS1_POS
) |
232 SET_VAL(TSO_MSS0
, mss
);
234 data
= SET_VAL(TSO_MSS1
, mss
) | SET_VAL(TSO_MSS0
, data
);
236 xgene_enet_wr_csr(pdata
, XG_TSIF_MSS_REG0_ADDR
+ offset
, data
);
239 static void xgene_xgmac_set_frame_size(struct xgene_enet_pdata
*pdata
, int size
)
241 xgene_enet_wr_mac(pdata
, HSTMAXFRAME_LENGTH_ADDR
,
242 ((((size
+ 2) >> 2) << 16) | size
));
245 static u32
xgene_enet_link_status(struct xgene_enet_pdata
*pdata
)
249 xgene_enet_rd_csr(pdata
, XG_LINK_STATUS_ADDR
, &data
);
254 static void xgene_xgmac_enable_tx_pause(struct xgene_enet_pdata
*pdata
,
259 xgene_enet_rd_axg_csr(pdata
, XGENET_CSR_ECM_CFG_0_ADDR
, &data
);
262 data
|= MULTI_DPF_AUTOCTRL
| PAUSE_XON_EN
;
264 data
&= ~(MULTI_DPF_AUTOCTRL
| PAUSE_XON_EN
);
266 xgene_enet_wr_axg_csr(pdata
, XGENET_CSR_ECM_CFG_0_ADDR
, data
);
269 static void xgene_xgmac_flowctl_tx(struct xgene_enet_pdata
*pdata
, bool enable
)
273 data
= xgene_enet_rd_mac(pdata
, AXGMAC_CONFIG_1
);
280 xgene_enet_wr_mac(pdata
, AXGMAC_CONFIG_1
, data
);
282 pdata
->mac_ops
->enable_tx_pause(pdata
, enable
);
285 static void xgene_xgmac_flowctl_rx(struct xgene_enet_pdata
*pdata
, bool enable
)
289 data
= xgene_enet_rd_mac(pdata
, AXGMAC_CONFIG_1
);
296 xgene_enet_wr_mac(pdata
, AXGMAC_CONFIG_1
, data
);
299 static void xgene_xgmac_init(struct xgene_enet_pdata
*pdata
)
303 xgene_xgmac_reset(pdata
);
305 data
= xgene_enet_rd_mac(pdata
, AXGMAC_CONFIG_1
);
308 xgene_enet_wr_mac(pdata
, AXGMAC_CONFIG_1
, data
);
310 xgene_xgmac_set_mac_addr(pdata
);
312 xgene_enet_rd_csr(pdata
, XG_RSIF_CONFIG_REG_ADDR
, &data
);
313 data
|= CFG_RSIF_FPBUFF_TIMEOUT_EN
;
314 /* Errata 10GE_1 - FIFO threshold default value incorrect */
315 RSIF_CLE_BUFF_THRESH_SET(&data
, XG_RSIF_CLE_BUFF_THRESH
);
316 xgene_enet_wr_csr(pdata
, XG_RSIF_CONFIG_REG_ADDR
, data
);
318 /* Errata 10GE_1 - FIFO threshold default value incorrect */
319 xgene_enet_rd_csr(pdata
, XG_RSIF_CONFIG1_REG_ADDR
, &data
);
320 RSIF_PLC_CLE_BUFF_THRESH_SET(&data
, XG_RSIF_PLC_CLE_BUFF_THRESH
);
321 xgene_enet_wr_csr(pdata
, XG_RSIF_CONFIG1_REG_ADDR
, data
);
323 xgene_enet_rd_csr(pdata
, XG_ENET_SPARE_CFG_REG_ADDR
, &data
);
325 xgene_enet_wr_csr(pdata
, XG_ENET_SPARE_CFG_REG_ADDR
, data
);
326 xgene_enet_wr_csr(pdata
, XG_ENET_SPARE_CFG_REG_1_ADDR
, 0x82);
327 xgene_enet_wr_csr(pdata
, XGENET_RX_DV_GATE_REG_0_ADDR
, 0);
328 xgene_enet_wr_csr(pdata
, XG_CFG_BYPASS_ADDR
, RESUME_TX
);
330 /* Configure HW pause frame generation */
331 xgene_enet_rd_axg_csr(pdata
, XGENET_CSR_MULTI_DPF0_ADDR
, &data
);
332 data
= (DEF_QUANTA
<< 16) | (data
& 0xFFFF);
333 xgene_enet_wr_axg_csr(pdata
, XGENET_CSR_MULTI_DPF0_ADDR
, data
);
335 if (pdata
->enet_id
!= XGENE_ENET1
) {
336 xgene_enet_rd_axg_csr(pdata
, XGENET_CSR_MULTI_DPF1_ADDR
, &data
);
337 data
= (NORM_PAUSE_OPCODE
<< 16) | (data
& 0xFFFF);
338 xgene_enet_wr_axg_csr(pdata
, XGENET_CSR_MULTI_DPF1_ADDR
, data
);
341 data
= (XG_DEF_PAUSE_OFF_THRES
<< 16) | XG_DEF_PAUSE_THRES
;
342 xgene_enet_wr_csr(pdata
, XG_RXBUF_PAUSE_THRESH
, data
);
344 xgene_xgmac_flowctl_tx(pdata
, pdata
->tx_pause
);
345 xgene_xgmac_flowctl_rx(pdata
, pdata
->rx_pause
);
348 static void xgene_xgmac_rx_enable(struct xgene_enet_pdata
*pdata
)
352 data
= xgene_enet_rd_mac(pdata
, AXGMAC_CONFIG_1
);
353 xgene_enet_wr_mac(pdata
, AXGMAC_CONFIG_1
, data
| HSTRFEN
);
356 static void xgene_xgmac_tx_enable(struct xgene_enet_pdata
*pdata
)
360 data
= xgene_enet_rd_mac(pdata
, AXGMAC_CONFIG_1
);
361 xgene_enet_wr_mac(pdata
, AXGMAC_CONFIG_1
, data
| HSTTFEN
);
364 static void xgene_xgmac_rx_disable(struct xgene_enet_pdata
*pdata
)
368 data
= xgene_enet_rd_mac(pdata
, AXGMAC_CONFIG_1
);
369 xgene_enet_wr_mac(pdata
, AXGMAC_CONFIG_1
, data
& ~HSTRFEN
);
372 static void xgene_xgmac_tx_disable(struct xgene_enet_pdata
*pdata
)
376 data
= xgene_enet_rd_mac(pdata
, AXGMAC_CONFIG_1
);
377 xgene_enet_wr_mac(pdata
, AXGMAC_CONFIG_1
, data
& ~HSTTFEN
);
380 static int xgene_enet_reset(struct xgene_enet_pdata
*pdata
)
382 struct device
*dev
= &pdata
->pdev
->dev
;
384 if (!xgene_ring_mgr_init(pdata
))
388 clk_prepare_enable(pdata
->clk
);
390 clk_disable_unprepare(pdata
->clk
);
392 clk_prepare_enable(pdata
->clk
);
398 status
= acpi_evaluate_object(ACPI_HANDLE(&pdata
->pdev
->dev
),
400 if (ACPI_FAILURE(status
)) {
401 acpi_evaluate_object(ACPI_HANDLE(&pdata
->pdev
->dev
),
407 xgene_enet_ecc_init(pdata
);
408 xgene_enet_config_ring_if_assoc(pdata
);
413 static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata
*pdata
,
414 u32 dst_ring_num
, u16 bufpool_id
,
417 u32 cb
, fpsel
, nxtfpsel
;
419 xgene_enet_rd_csr(pdata
, XCLE_BYPASS_REG0_ADDR
, &cb
);
420 cb
|= CFG_CLE_BYPASS_EN0
;
421 CFG_CLE_IP_PROTOCOL0_SET(&cb
, 3);
422 xgene_enet_wr_csr(pdata
, XCLE_BYPASS_REG0_ADDR
, cb
);
424 fpsel
= xgene_enet_get_fpsel(bufpool_id
);
425 nxtfpsel
= xgene_enet_get_fpsel(nxtbufpool_id
);
426 xgene_enet_rd_csr(pdata
, XCLE_BYPASS_REG1_ADDR
, &cb
);
427 CFG_CLE_DSTQID0_SET(&cb
, dst_ring_num
);
428 CFG_CLE_FPSEL0_SET(&cb
, fpsel
);
429 CFG_CLE_NXTFPSEL0_SET(&cb
, nxtfpsel
);
430 xgene_enet_wr_csr(pdata
, XCLE_BYPASS_REG1_ADDR
, cb
);
431 pr_info("+ cle_bypass: fpsel: %d nxtfpsel: %d\n", fpsel
, nxtfpsel
);
434 static void xgene_enet_shutdown(struct xgene_enet_pdata
*pdata
)
436 struct device
*dev
= &pdata
->pdev
->dev
;
439 if (!IS_ERR(pdata
->clk
))
440 clk_disable_unprepare(pdata
->clk
);
444 static void xgene_enet_clear(struct xgene_enet_pdata
*pdata
,
445 struct xgene_enet_desc_ring
*ring
)
449 if (xgene_enet_is_bufpool(ring
->id
)) {
450 addr
= ENET_CFGSSQMIFPRESET_ADDR
;
451 data
= BIT(xgene_enet_get_fpsel(ring
->id
));
453 addr
= ENET_CFGSSQMIWQRESET_ADDR
;
454 data
= BIT(xgene_enet_ring_bufnum(ring
->id
));
457 xgene_enet_wr_ring_if(pdata
, addr
, data
);
460 static int xgene_enet_gpio_lookup(struct xgene_enet_pdata
*pdata
)
462 struct device
*dev
= &pdata
->pdev
->dev
;
464 pdata
->sfp_rdy
= gpiod_get(dev
, "rxlos", GPIOD_IN
);
465 if (IS_ERR(pdata
->sfp_rdy
))
466 pdata
->sfp_rdy
= gpiod_get(dev
, "sfp", GPIOD_IN
);
468 if (IS_ERR(pdata
->sfp_rdy
))
474 static void xgene_enet_link_state(struct work_struct
*work
)
476 struct xgene_enet_pdata
*pdata
= container_of(to_delayed_work(work
),
477 struct xgene_enet_pdata
, link_work
);
478 struct net_device
*ndev
= pdata
->ndev
;
479 u32 link_status
, poll_interval
;
481 link_status
= xgene_enet_link_status(pdata
);
482 if (pdata
->sfp_gpio_en
&& link_status
&&
483 (!IS_ERR(pdata
->sfp_rdy
) || !xgene_enet_gpio_lookup(pdata
)) &&
484 !gpiod_get_value(pdata
->sfp_rdy
))
488 if (!netif_carrier_ok(ndev
)) {
489 netif_carrier_on(ndev
);
490 xgene_xgmac_rx_enable(pdata
);
491 xgene_xgmac_tx_enable(pdata
);
492 netdev_info(ndev
, "Link is Up - 10Gbps\n");
494 poll_interval
= PHY_POLL_LINK_ON
;
496 if (netif_carrier_ok(ndev
)) {
497 xgene_xgmac_rx_disable(pdata
);
498 xgene_xgmac_tx_disable(pdata
);
499 netif_carrier_off(ndev
);
500 netdev_info(ndev
, "Link is Down\n");
502 poll_interval
= PHY_POLL_LINK_OFF
;
504 xgene_pcs_reset(pdata
);
507 schedule_delayed_work(&pdata
->link_work
, poll_interval
);
510 const struct xgene_mac_ops xgene_xgmac_ops
= {
511 .init
= xgene_xgmac_init
,
512 .reset
= xgene_xgmac_reset
,
513 .rx_enable
= xgene_xgmac_rx_enable
,
514 .tx_enable
= xgene_xgmac_tx_enable
,
515 .rx_disable
= xgene_xgmac_rx_disable
,
516 .tx_disable
= xgene_xgmac_tx_disable
,
517 .set_mac_addr
= xgene_xgmac_set_mac_addr
,
518 .set_framesize
= xgene_xgmac_set_frame_size
,
519 .set_mss
= xgene_xgmac_set_mss
,
520 .get_drop_cnt
= xgene_xgmac_get_drop_cnt
,
521 .link_state
= xgene_enet_link_state
,
522 .enable_tx_pause
= xgene_xgmac_enable_tx_pause
,
523 .flowctl_rx
= xgene_xgmac_flowctl_rx
,
524 .flowctl_tx
= xgene_xgmac_flowctl_tx
527 const struct xgene_port_ops xgene_xgport_ops
= {
528 .reset
= xgene_enet_reset
,
529 .clear
= xgene_enet_clear
,
530 .cle_bypass
= xgene_enet_xgcle_bypass
,
531 .shutdown
= xgene_enet_shutdown
,