1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Applied Micro X-Gene SoC Ethernet Driver
4 * Copyright (c) 2014, Applied Micro Circuits Corporation
5 * Authors: Iyappan Subramanian <isubramanian@apm.com>
6 * Ravi Patel <rapatel@apm.com>
7 * Keyur Chudgar <kchudgar@apm.com>
10 #include "xgene_enet_main.h"
11 #include "xgene_enet_hw.h"
13 static void xgene_enet_ring_init(struct xgene_enet_desc_ring
*ring
)
15 u32
*ring_cfg
= ring
->state
;
17 enum xgene_enet_ring_cfgsize cfgsize
= ring
->cfgsize
;
19 ring_cfg
[4] |= (1 << SELTHRSH_POS
) &
20 CREATE_MASK(SELTHRSH_POS
, SELTHRSH_LEN
);
21 ring_cfg
[3] |= ACCEPTLERR
;
22 ring_cfg
[2] |= QCOHERENT
;
25 ring_cfg
[2] |= (addr
<< RINGADDRL_POS
) &
26 CREATE_MASK_ULL(RINGADDRL_POS
, RINGADDRL_LEN
);
27 addr
>>= RINGADDRL_LEN
;
28 ring_cfg
[3] |= addr
& CREATE_MASK_ULL(RINGADDRH_POS
, RINGADDRH_LEN
);
29 ring_cfg
[3] |= ((u32
)cfgsize
<< RINGSIZE_POS
) &
30 CREATE_MASK(RINGSIZE_POS
, RINGSIZE_LEN
);
33 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring
*ring
)
35 u32
*ring_cfg
= ring
->state
;
39 is_bufpool
= xgene_enet_is_bufpool(ring
->id
);
40 val
= (is_bufpool
) ? RING_BUFPOOL
: RING_REGULAR
;
41 ring_cfg
[4] |= (val
<< RINGTYPE_POS
) &
42 CREATE_MASK(RINGTYPE_POS
, RINGTYPE_LEN
);
45 ring_cfg
[3] |= (BUFPOOL_MODE
<< RINGMODE_POS
) &
46 CREATE_MASK(RINGMODE_POS
, RINGMODE_LEN
);
50 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring
*ring
)
52 u32
*ring_cfg
= ring
->state
;
54 ring_cfg
[3] |= RECOMBBUF
;
55 ring_cfg
[3] |= (0xf << RECOMTIMEOUTL_POS
) &
56 CREATE_MASK(RECOMTIMEOUTL_POS
, RECOMTIMEOUTL_LEN
);
57 ring_cfg
[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS
, RECOMTIMEOUTH_LEN
);
60 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring
*ring
,
63 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
65 iowrite32(data
, pdata
->ring_csr_addr
+ offset
);
68 static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring
*ring
,
69 u32 offset
, u32
*data
)
71 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
73 *data
= ioread32(pdata
->ring_csr_addr
+ offset
);
76 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring
*ring
)
78 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
81 xgene_enet_ring_wr32(ring
, CSR_RING_CONFIG
, ring
->num
);
82 for (i
= 0; i
< pdata
->ring_ops
->num_ring_config
; i
++) {
83 xgene_enet_ring_wr32(ring
, CSR_RING_WR_BASE
+ (i
* 4),
88 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring
*ring
)
90 memset(ring
->state
, 0, sizeof(ring
->state
));
91 xgene_enet_write_ring_state(ring
);
94 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring
*ring
)
96 xgene_enet_ring_set_type(ring
);
98 if (xgene_enet_ring_owner(ring
->id
) == RING_OWNER_ETH0
||
99 xgene_enet_ring_owner(ring
->id
) == RING_OWNER_ETH1
)
100 xgene_enet_ring_set_recombbuf(ring
);
102 xgene_enet_ring_init(ring
);
103 xgene_enet_write_ring_state(ring
);
106 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring
*ring
)
108 u32 ring_id_val
, ring_id_buf
;
111 is_bufpool
= xgene_enet_is_bufpool(ring
->id
);
113 ring_id_val
= ring
->id
& GENMASK(9, 0);
114 ring_id_val
|= OVERWRITE
;
116 ring_id_buf
= (ring
->num
<< 9) & GENMASK(18, 9);
117 ring_id_buf
|= PREFETCH_BUF_EN
;
119 ring_id_buf
|= IS_BUFFER_POOL
;
121 xgene_enet_ring_wr32(ring
, CSR_RING_ID
, ring_id_val
);
122 xgene_enet_ring_wr32(ring
, CSR_RING_ID_BUF
, ring_id_buf
);
125 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring
*ring
)
129 ring_id
= ring
->id
| OVERWRITE
;
130 xgene_enet_ring_wr32(ring
, CSR_RING_ID
, ring_id
);
131 xgene_enet_ring_wr32(ring
, CSR_RING_ID_BUF
, 0);
134 static struct xgene_enet_desc_ring
*xgene_enet_setup_ring(
135 struct xgene_enet_desc_ring
*ring
)
137 u32 size
= ring
->size
;
141 xgene_enet_clr_ring_state(ring
);
142 xgene_enet_set_ring_state(ring
);
143 xgene_enet_set_ring_id(ring
);
145 ring
->slots
= xgene_enet_get_numslots(ring
->id
, size
);
147 is_bufpool
= xgene_enet_is_bufpool(ring
->id
);
148 if (is_bufpool
|| xgene_enet_ring_owner(ring
->id
) != RING_OWNER_CPU
)
151 for (i
= 0; i
< ring
->slots
; i
++)
152 xgene_enet_mark_desc_slot_empty(&ring
->raw_desc
[i
]);
154 xgene_enet_ring_rd32(ring
, CSR_RING_NE_INT_MODE
, &data
);
155 data
|= BIT(31 - xgene_enet_ring_bufnum(ring
->id
));
156 xgene_enet_ring_wr32(ring
, CSR_RING_NE_INT_MODE
, data
);
161 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring
*ring
)
166 is_bufpool
= xgene_enet_is_bufpool(ring
->id
);
167 if (is_bufpool
|| xgene_enet_ring_owner(ring
->id
) != RING_OWNER_CPU
)
170 xgene_enet_ring_rd32(ring
, CSR_RING_NE_INT_MODE
, &data
);
171 data
&= ~BIT(31 - xgene_enet_ring_bufnum(ring
->id
));
172 xgene_enet_ring_wr32(ring
, CSR_RING_NE_INT_MODE
, data
);
175 xgene_enet_clr_desc_ring_id(ring
);
176 xgene_enet_clr_ring_state(ring
);
179 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring
*ring
, int count
)
181 iowrite32(count
, ring
->cmd
);
184 static u32
xgene_enet_ring_len(struct xgene_enet_desc_ring
*ring
)
186 u32 __iomem
*cmd_base
= ring
->cmd_base
;
187 u32 ring_state
, num_msgs
;
189 ring_state
= ioread32(&cmd_base
[1]);
190 num_msgs
= GET_VAL(NUMMSGSINQ
, ring_state
);
195 void xgene_enet_parse_error(struct xgene_enet_desc_ring
*ring
,
196 enum xgene_enet_err_code status
)
200 ring
->rx_crc_errors
++;
202 case INGRESS_CHECKSUM
:
203 case INGRESS_CHECKSUM_COMPUTE
:
206 case INGRESS_TRUNC_FRAME
:
207 ring
->rx_frame_errors
++;
209 case INGRESS_PKT_LEN
:
210 ring
->rx_length_errors
++;
212 case INGRESS_PKT_UNDER
:
213 ring
->rx_frame_errors
++;
215 case INGRESS_FIFO_OVERRUN
:
216 ring
->rx_fifo_errors
++;
223 static void xgene_enet_wr_csr(struct xgene_enet_pdata
*pdata
,
226 void __iomem
*addr
= pdata
->eth_csr_addr
+ offset
;
228 iowrite32(val
, addr
);
231 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata
*pdata
,
234 void __iomem
*addr
= pdata
->eth_ring_if_addr
+ offset
;
236 iowrite32(val
, addr
);
239 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata
*pdata
,
242 void __iomem
*addr
= pdata
->eth_diag_csr_addr
+ offset
;
244 iowrite32(val
, addr
);
247 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata
*pdata
,
250 void __iomem
*addr
= pdata
->mcx_mac_csr_addr
+ offset
;
252 iowrite32(val
, addr
);
255 void xgene_enet_wr_mac(struct xgene_enet_pdata
*pdata
, u32 wr_addr
, u32 wr_data
)
257 void __iomem
*addr
, *wr
, *cmd
, *cmd_done
;
258 struct net_device
*ndev
= pdata
->ndev
;
262 if (pdata
->mdio_driver
&& ndev
->phydev
&&
263 phy_interface_mode_is_rgmii(pdata
->phy_mode
)) {
264 struct mii_bus
*bus
= ndev
->phydev
->mdio
.bus
;
266 return xgene_mdio_wr_mac(bus
->priv
, wr_addr
, wr_data
);
269 addr
= pdata
->mcx_mac_addr
+ MAC_ADDR_REG_OFFSET
;
270 wr
= pdata
->mcx_mac_addr
+ MAC_WRITE_REG_OFFSET
;
271 cmd
= pdata
->mcx_mac_addr
+ MAC_COMMAND_REG_OFFSET
;
272 cmd_done
= pdata
->mcx_mac_addr
+ MAC_COMMAND_DONE_REG_OFFSET
;
274 spin_lock(&pdata
->mac_lock
);
275 iowrite32(wr_addr
, addr
);
276 iowrite32(wr_data
, wr
);
277 iowrite32(XGENE_ENET_WR_CMD
, cmd
);
279 while (!(done
= ioread32(cmd_done
)) && wait
--)
283 netdev_err(ndev
, "mac write failed, addr: %04x data: %08x\n",
287 spin_unlock(&pdata
->mac_lock
);
290 static void xgene_enet_rd_csr(struct xgene_enet_pdata
*pdata
,
291 u32 offset
, u32
*val
)
293 void __iomem
*addr
= pdata
->eth_csr_addr
+ offset
;
295 *val
= ioread32(addr
);
298 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata
*pdata
,
299 u32 offset
, u32
*val
)
301 void __iomem
*addr
= pdata
->eth_diag_csr_addr
+ offset
;
303 *val
= ioread32(addr
);
306 static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata
*pdata
,
307 u32 offset
, u32
*val
)
309 void __iomem
*addr
= pdata
->mcx_mac_csr_addr
+ offset
;
311 *val
= ioread32(addr
);
314 u32
xgene_enet_rd_mac(struct xgene_enet_pdata
*pdata
, u32 rd_addr
)
316 void __iomem
*addr
, *rd
, *cmd
, *cmd_done
;
317 struct net_device
*ndev
= pdata
->ndev
;
321 if (pdata
->mdio_driver
&& ndev
->phydev
&&
322 phy_interface_mode_is_rgmii(pdata
->phy_mode
)) {
323 struct mii_bus
*bus
= ndev
->phydev
->mdio
.bus
;
325 return xgene_mdio_rd_mac(bus
->priv
, rd_addr
);
328 addr
= pdata
->mcx_mac_addr
+ MAC_ADDR_REG_OFFSET
;
329 rd
= pdata
->mcx_mac_addr
+ MAC_READ_REG_OFFSET
;
330 cmd
= pdata
->mcx_mac_addr
+ MAC_COMMAND_REG_OFFSET
;
331 cmd_done
= pdata
->mcx_mac_addr
+ MAC_COMMAND_DONE_REG_OFFSET
;
333 spin_lock(&pdata
->mac_lock
);
334 iowrite32(rd_addr
, addr
);
335 iowrite32(XGENE_ENET_RD_CMD
, cmd
);
337 while (!(done
= ioread32(cmd_done
)) && wait
--)
341 netdev_err(ndev
, "mac read failed, addr: %04x\n", rd_addr
);
343 rd_data
= ioread32(rd
);
345 spin_unlock(&pdata
->mac_lock
);
350 u32
xgene_enet_rd_stat(struct xgene_enet_pdata
*pdata
, u32 rd_addr
)
352 void __iomem
*addr
, *rd
, *cmd
, *cmd_done
;
356 addr
= pdata
->mcx_stats_addr
+ STAT_ADDR_REG_OFFSET
;
357 rd
= pdata
->mcx_stats_addr
+ STAT_READ_REG_OFFSET
;
358 cmd
= pdata
->mcx_stats_addr
+ STAT_COMMAND_REG_OFFSET
;
359 cmd_done
= pdata
->mcx_stats_addr
+ STAT_COMMAND_DONE_REG_OFFSET
;
361 spin_lock(&pdata
->stats_lock
);
362 iowrite32(rd_addr
, addr
);
363 iowrite32(XGENE_ENET_RD_CMD
, cmd
);
365 while (!(done
= ioread32(cmd_done
)) && wait
--)
369 netdev_err(pdata
->ndev
, "mac stats read failed, addr: %04x\n",
372 rd_data
= ioread32(rd
);
374 spin_unlock(&pdata
->stats_lock
);
379 static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata
*pdata
)
381 const u8
*dev_addr
= pdata
->ndev
->dev_addr
;
384 addr0
= (dev_addr
[3] << 24) | (dev_addr
[2] << 16) |
385 (dev_addr
[1] << 8) | dev_addr
[0];
386 addr1
= (dev_addr
[5] << 24) | (dev_addr
[4] << 16);
388 xgene_enet_wr_mac(pdata
, STATION_ADDR0_ADDR
, addr0
);
389 xgene_enet_wr_mac(pdata
, STATION_ADDR1_ADDR
, addr1
);
392 static int xgene_enet_ecc_init(struct xgene_enet_pdata
*pdata
)
394 struct net_device
*ndev
= pdata
->ndev
;
398 xgene_enet_wr_diag_csr(pdata
, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR
, 0x0);
400 usleep_range(100, 110);
401 xgene_enet_rd_diag_csr(pdata
, ENET_BLOCK_MEM_RDY_ADDR
, &data
);
402 } while ((data
!= 0xffffffff) && wait
--);
404 if (data
!= 0xffffffff) {
405 netdev_err(ndev
, "Failed to release memory from shutdown\n");
412 static void xgene_gmac_reset(struct xgene_enet_pdata
*pdata
)
414 xgene_enet_wr_mac(pdata
, MAC_CONFIG_1_ADDR
, SOFT_RESET1
);
415 xgene_enet_wr_mac(pdata
, MAC_CONFIG_1_ADDR
, 0);
418 static void xgene_enet_configure_clock(struct xgene_enet_pdata
*pdata
)
420 struct device
*dev
= &pdata
->pdev
->dev
;
423 struct clk
*parent
= clk_get_parent(pdata
->clk
);
425 switch (pdata
->phy_speed
) {
427 clk_set_rate(parent
, 2500000);
430 clk_set_rate(parent
, 25000000);
433 clk_set_rate(parent
, 125000000);
439 switch (pdata
->phy_speed
) {
441 acpi_evaluate_object(ACPI_HANDLE(dev
),
445 acpi_evaluate_object(ACPI_HANDLE(dev
),
449 acpi_evaluate_object(ACPI_HANDLE(dev
),
457 static void xgene_gmac_set_speed(struct xgene_enet_pdata
*pdata
)
460 u32 intf_ctl
, rgmii
, value
;
462 xgene_enet_rd_mcx_csr(pdata
, ICM_CONFIG0_REG_0_ADDR
, &icm0
);
463 xgene_enet_rd_mcx_csr(pdata
, ICM_CONFIG2_REG_0_ADDR
, &icm2
);
464 mc2
= xgene_enet_rd_mac(pdata
, MAC_CONFIG_2_ADDR
);
465 intf_ctl
= xgene_enet_rd_mac(pdata
, INTERFACE_CONTROL_ADDR
);
466 xgene_enet_rd_csr(pdata
, RGMII_REG_0_ADDR
, &rgmii
);
468 switch (pdata
->phy_speed
) {
470 ENET_INTERFACE_MODE2_SET(&mc2
, 1);
471 intf_ctl
&= ~(ENET_LHD_MODE
| ENET_GHD_MODE
);
472 CFG_MACMODE_SET(&icm0
, 0);
473 CFG_WAITASYNCRD_SET(&icm2
, 500);
474 rgmii
&= ~CFG_SPEED_1250
;
477 ENET_INTERFACE_MODE2_SET(&mc2
, 1);
478 intf_ctl
&= ~ENET_GHD_MODE
;
479 intf_ctl
|= ENET_LHD_MODE
;
480 CFG_MACMODE_SET(&icm0
, 1);
481 CFG_WAITASYNCRD_SET(&icm2
, 80);
482 rgmii
&= ~CFG_SPEED_1250
;
485 ENET_INTERFACE_MODE2_SET(&mc2
, 2);
486 intf_ctl
&= ~ENET_LHD_MODE
;
487 intf_ctl
|= ENET_GHD_MODE
;
488 CFG_MACMODE_SET(&icm0
, 2);
489 CFG_WAITASYNCRD_SET(&icm2
, 0);
490 CFG_TXCLK_MUXSEL0_SET(&rgmii
, pdata
->tx_delay
);
491 CFG_RXCLK_MUXSEL0_SET(&rgmii
, pdata
->rx_delay
);
492 rgmii
|= CFG_SPEED_1250
;
494 xgene_enet_rd_csr(pdata
, DEBUG_REG_ADDR
, &value
);
495 value
|= CFG_BYPASS_UNISEC_TX
| CFG_BYPASS_UNISEC_RX
;
496 xgene_enet_wr_csr(pdata
, DEBUG_REG_ADDR
, value
);
500 mc2
|= FULL_DUPLEX2
| PAD_CRC
| LENGTH_CHK
;
501 xgene_enet_wr_mac(pdata
, MAC_CONFIG_2_ADDR
, mc2
);
502 xgene_enet_wr_mac(pdata
, INTERFACE_CONTROL_ADDR
, intf_ctl
);
503 xgene_enet_wr_csr(pdata
, RGMII_REG_0_ADDR
, rgmii
);
504 xgene_enet_configure_clock(pdata
);
506 xgene_enet_wr_mcx_csr(pdata
, ICM_CONFIG0_REG_0_ADDR
, icm0
);
507 xgene_enet_wr_mcx_csr(pdata
, ICM_CONFIG2_REG_0_ADDR
, icm2
);
510 static void xgene_enet_set_frame_size(struct xgene_enet_pdata
*pdata
, int size
)
512 xgene_enet_wr_mac(pdata
, MAX_FRAME_LEN_ADDR
, size
);
515 static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata
*pdata
,
520 xgene_enet_rd_mcx_csr(pdata
, CSR_ECM_CFG_0_ADDR
, &data
);
523 data
|= MULTI_DPF_AUTOCTRL
| PAUSE_XON_EN
;
525 data
&= ~(MULTI_DPF_AUTOCTRL
| PAUSE_XON_EN
);
527 xgene_enet_wr_mcx_csr(pdata
, CSR_ECM_CFG_0_ADDR
, data
);
530 static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata
*pdata
, bool enable
)
534 data
= xgene_enet_rd_mac(pdata
, MAC_CONFIG_1_ADDR
);
541 xgene_enet_wr_mac(pdata
, MAC_CONFIG_1_ADDR
, data
);
543 pdata
->mac_ops
->enable_tx_pause(pdata
, enable
);
546 static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata
*pdata
, bool enable
)
550 data
= xgene_enet_rd_mac(pdata
, MAC_CONFIG_1_ADDR
);
557 xgene_enet_wr_mac(pdata
, MAC_CONFIG_1_ADDR
, data
);
560 static void xgene_gmac_init(struct xgene_enet_pdata
*pdata
)
564 if (!pdata
->mdio_driver
)
565 xgene_gmac_reset(pdata
);
567 xgene_gmac_set_speed(pdata
);
568 xgene_gmac_set_mac_addr(pdata
);
570 /* Adjust MDC clock frequency */
571 value
= xgene_enet_rd_mac(pdata
, MII_MGMT_CONFIG_ADDR
);
572 MGMT_CLOCK_SEL_SET(&value
, 7);
573 xgene_enet_wr_mac(pdata
, MII_MGMT_CONFIG_ADDR
, value
);
575 /* Enable drop if bufpool not available */
576 xgene_enet_rd_csr(pdata
, RSIF_CONFIG_REG_ADDR
, &value
);
577 value
|= CFG_RSIF_FPBUFF_TIMEOUT_EN
;
578 xgene_enet_wr_csr(pdata
, RSIF_CONFIG_REG_ADDR
, value
);
580 /* Rtype should be copied from FP */
581 xgene_enet_wr_csr(pdata
, RSIF_RAM_DBG_REG0_ADDR
, 0);
583 /* Configure HW pause frame generation */
584 xgene_enet_rd_mcx_csr(pdata
, CSR_MULTI_DPF0_ADDR
, &value
);
585 value
= (DEF_QUANTA
<< 16) | (value
& 0xFFFF);
586 xgene_enet_wr_mcx_csr(pdata
, CSR_MULTI_DPF0_ADDR
, value
);
588 xgene_enet_wr_csr(pdata
, RXBUF_PAUSE_THRESH
, DEF_PAUSE_THRES
);
589 xgene_enet_wr_csr(pdata
, RXBUF_PAUSE_OFF_THRESH
, DEF_PAUSE_OFF_THRES
);
591 xgene_gmac_flowctl_tx(pdata
, pdata
->tx_pause
);
592 xgene_gmac_flowctl_rx(pdata
, pdata
->rx_pause
);
594 /* Rx-Tx traffic resume */
595 xgene_enet_wr_csr(pdata
, CFG_LINK_AGGR_RESUME_0_ADDR
, TX_PORT0
);
597 xgene_enet_rd_mcx_csr(pdata
, RX_DV_GATE_REG_0_ADDR
, &value
);
598 value
&= ~TX_DV_GATE_EN0
;
599 value
&= ~RX_DV_GATE_EN0
;
601 xgene_enet_wr_mcx_csr(pdata
, RX_DV_GATE_REG_0_ADDR
, value
);
603 xgene_enet_wr_csr(pdata
, CFG_BYPASS_ADDR
, RESUME_TX
);
606 static void xgene_gmac_get_drop_cnt(struct xgene_enet_pdata
*pdata
,
611 xgene_enet_rd_mcx_csr(pdata
, ICM_ECM_DROP_COUNT_REG0_ADDR
, &count
);
612 *rx
= ICM_DROP_COUNT(count
);
613 *tx
= ECM_DROP_COUNT(count
);
614 /* Errata: 10GE_4 - Fix ICM_ECM_DROP_COUNT not clear-on-read */
615 xgene_enet_rd_mcx_csr(pdata
, ECM_CONFIG0_REG_0_ADDR
, &count
);
618 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata
*pdata
)
620 u32 val
= 0xffffffff;
622 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIWQASSOC_ADDR
, val
);
623 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIFPQASSOC_ADDR
, val
);
624 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIQMLITEWQASSOC_ADDR
, val
);
625 xgene_enet_wr_ring_if(pdata
, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR
, val
);
628 static void xgene_enet_cle_bypass(struct xgene_enet_pdata
*pdata
,
629 u32 dst_ring_num
, u16 bufpool_id
,
635 fpsel
= xgene_enet_get_fpsel(bufpool_id
);
636 nxtfpsel
= xgene_enet_get_fpsel(nxtbufpool_id
);
638 xgene_enet_rd_csr(pdata
, CLE_BYPASS_REG0_0_ADDR
, &cb
);
639 cb
|= CFG_CLE_BYPASS_EN0
;
640 CFG_CLE_IP_PROTOCOL0_SET(&cb
, 3);
641 CFG_CLE_IP_HDR_LEN_SET(&cb
, 0);
642 xgene_enet_wr_csr(pdata
, CLE_BYPASS_REG0_0_ADDR
, cb
);
644 xgene_enet_rd_csr(pdata
, CLE_BYPASS_REG1_0_ADDR
, &cb
);
645 CFG_CLE_DSTQID0_SET(&cb
, dst_ring_num
);
646 CFG_CLE_FPSEL0_SET(&cb
, fpsel
);
647 CFG_CLE_NXTFPSEL0_SET(&cb
, nxtfpsel
);
648 xgene_enet_wr_csr(pdata
, CLE_BYPASS_REG1_0_ADDR
, cb
);
651 static void xgene_gmac_rx_enable(struct xgene_enet_pdata
*pdata
)
655 data
= xgene_enet_rd_mac(pdata
, MAC_CONFIG_1_ADDR
);
656 xgene_enet_wr_mac(pdata
, MAC_CONFIG_1_ADDR
, data
| RX_EN
);
659 static void xgene_gmac_tx_enable(struct xgene_enet_pdata
*pdata
)
663 data
= xgene_enet_rd_mac(pdata
, MAC_CONFIG_1_ADDR
);
664 xgene_enet_wr_mac(pdata
, MAC_CONFIG_1_ADDR
, data
| TX_EN
);
667 static void xgene_gmac_rx_disable(struct xgene_enet_pdata
*pdata
)
671 data
= xgene_enet_rd_mac(pdata
, MAC_CONFIG_1_ADDR
);
672 xgene_enet_wr_mac(pdata
, MAC_CONFIG_1_ADDR
, data
& ~RX_EN
);
675 static void xgene_gmac_tx_disable(struct xgene_enet_pdata
*pdata
)
679 data
= xgene_enet_rd_mac(pdata
, MAC_CONFIG_1_ADDR
);
680 xgene_enet_wr_mac(pdata
, MAC_CONFIG_1_ADDR
, data
& ~TX_EN
);
683 bool xgene_ring_mgr_init(struct xgene_enet_pdata
*p
)
685 if (!ioread32(p
->ring_csr_addr
+ CLKEN_ADDR
))
688 if (ioread32(p
->ring_csr_addr
+ SRST_ADDR
))
694 static int xgene_enet_reset(struct xgene_enet_pdata
*pdata
)
696 struct device
*dev
= &pdata
->pdev
->dev
;
698 if (!xgene_ring_mgr_init(pdata
))
701 if (pdata
->mdio_driver
) {
702 xgene_enet_config_ring_if_assoc(pdata
);
707 clk_prepare_enable(pdata
->clk
);
709 clk_disable_unprepare(pdata
->clk
);
711 clk_prepare_enable(pdata
->clk
);
717 status
= acpi_evaluate_object(ACPI_HANDLE(&pdata
->pdev
->dev
),
719 if (ACPI_FAILURE(status
)) {
720 acpi_evaluate_object(ACPI_HANDLE(&pdata
->pdev
->dev
),
726 xgene_enet_ecc_init(pdata
);
727 xgene_enet_config_ring_if_assoc(pdata
);
732 static void xgene_enet_clear(struct xgene_enet_pdata
*pdata
,
733 struct xgene_enet_desc_ring
*ring
)
737 if (xgene_enet_is_bufpool(ring
->id
)) {
738 addr
= ENET_CFGSSQMIFPRESET_ADDR
;
739 data
= BIT(xgene_enet_get_fpsel(ring
->id
));
741 addr
= ENET_CFGSSQMIWQRESET_ADDR
;
742 data
= BIT(xgene_enet_ring_bufnum(ring
->id
));
745 xgene_enet_wr_ring_if(pdata
, addr
, data
);
748 static void xgene_gport_shutdown(struct xgene_enet_pdata
*pdata
)
750 struct device
*dev
= &pdata
->pdev
->dev
;
753 if (!IS_ERR(pdata
->clk
))
754 clk_disable_unprepare(pdata
->clk
);
758 static u32
xgene_enet_flowctrl_cfg(struct net_device
*ndev
)
760 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
761 struct phy_device
*phydev
= ndev
->phydev
;
762 u16 lcladv
, rmtadv
= 0;
763 u32 rx_pause
, tx_pause
;
766 if (!phydev
->duplex
|| !pdata
->pause_autoneg
)
770 flowctl
|= FLOW_CTRL_TX
;
773 flowctl
|= FLOW_CTRL_RX
;
775 lcladv
= mii_advertise_flowctrl(flowctl
);
778 rmtadv
= LPA_PAUSE_CAP
;
780 if (phydev
->asym_pause
)
781 rmtadv
|= LPA_PAUSE_ASYM
;
783 flowctl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
784 tx_pause
= !!(flowctl
& FLOW_CTRL_TX
);
785 rx_pause
= !!(flowctl
& FLOW_CTRL_RX
);
787 if (tx_pause
!= pdata
->tx_pause
) {
788 pdata
->tx_pause
= tx_pause
;
789 pdata
->mac_ops
->flowctl_tx(pdata
, pdata
->tx_pause
);
792 if (rx_pause
!= pdata
->rx_pause
) {
793 pdata
->rx_pause
= rx_pause
;
794 pdata
->mac_ops
->flowctl_rx(pdata
, pdata
->rx_pause
);
800 static void xgene_enet_adjust_link(struct net_device
*ndev
)
802 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
803 const struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
804 struct phy_device
*phydev
= ndev
->phydev
;
807 if (pdata
->phy_speed
!= phydev
->speed
) {
808 pdata
->phy_speed
= phydev
->speed
;
809 mac_ops
->set_speed(pdata
);
810 mac_ops
->rx_enable(pdata
);
811 mac_ops
->tx_enable(pdata
);
812 phy_print_status(phydev
);
815 xgene_enet_flowctrl_cfg(ndev
);
817 mac_ops
->rx_disable(pdata
);
818 mac_ops
->tx_disable(pdata
);
819 pdata
->phy_speed
= SPEED_UNKNOWN
;
820 phy_print_status(phydev
);
825 static struct acpi_device
*acpi_phy_find_device(struct device
*dev
)
827 struct fwnode_reference_args args
;
828 struct fwnode_handle
*fw_node
;
831 fw_node
= acpi_fwnode_handle(ACPI_COMPANION(dev
));
832 status
= acpi_node_get_property_reference(fw_node
, "phy-handle", 0,
834 if (ACPI_FAILURE(status
) || !is_acpi_device_node(args
.fwnode
)) {
835 dev_dbg(dev
, "No matching phy in ACPI table\n");
839 return to_acpi_device_node(args
.fwnode
);
843 int xgene_enet_phy_connect(struct net_device
*ndev
)
845 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
846 struct device_node
*np
;
847 struct phy_device
*phy_dev
;
848 struct device
*dev
= &pdata
->pdev
->dev
;
852 for (i
= 0 ; i
< 2; i
++) {
853 np
= of_parse_phandle(dev
->of_node
, "phy-handle", i
);
854 phy_dev
= of_phy_connect(ndev
, np
,
855 &xgene_enet_adjust_link
,
863 netdev_err(ndev
, "Could not connect to PHY\n");
868 struct acpi_device
*adev
= acpi_phy_find_device(dev
);
870 phy_dev
= adev
->driver_data
;
875 phy_connect_direct(ndev
, phy_dev
, &xgene_enet_adjust_link
,
877 netdev_err(ndev
, "Could not connect to PHY\n");
885 pdata
->phy_speed
= SPEED_UNKNOWN
;
886 phy_remove_link_mode(phy_dev
, ETHTOOL_LINK_MODE_10baseT_Half_BIT
);
887 phy_remove_link_mode(phy_dev
, ETHTOOL_LINK_MODE_100baseT_Half_BIT
);
888 phy_remove_link_mode(phy_dev
, ETHTOOL_LINK_MODE_1000baseT_Half_BIT
);
889 phy_support_asym_pause(phy_dev
);
894 static int xgene_mdiobus_register(struct xgene_enet_pdata
*pdata
,
895 struct mii_bus
*mdio
)
897 struct device
*dev
= &pdata
->pdev
->dev
;
898 struct net_device
*ndev
= pdata
->ndev
;
899 struct phy_device
*phy
;
900 struct device_node
*child_np
;
901 struct device_node
*mdio_np
= NULL
;
906 for_each_child_of_node(dev
->of_node
, child_np
) {
907 if (of_device_is_compatible(child_np
,
915 netdev_dbg(ndev
, "No mdio node in the dts\n");
919 return of_mdiobus_register(mdio
, mdio_np
);
922 /* Mask out all PHYs from auto probing. */
925 /* Register the MDIO bus */
926 ret
= mdiobus_register(mdio
);
930 ret
= device_property_read_u32(dev
, "phy-channel", &phy_addr
);
932 ret
= device_property_read_u32(dev
, "phy-addr", &phy_addr
);
936 phy
= xgene_enet_phy_register(mdio
, phy_addr
);
943 int xgene_enet_mdio_config(struct xgene_enet_pdata
*pdata
)
945 struct net_device
*ndev
= pdata
->ndev
;
946 struct mii_bus
*mdio_bus
;
949 mdio_bus
= mdiobus_alloc();
953 mdio_bus
->name
= "APM X-Gene MDIO bus";
954 mdio_bus
->read
= xgene_mdio_rgmii_read
;
955 mdio_bus
->write
= xgene_mdio_rgmii_write
;
956 snprintf(mdio_bus
->id
, MII_BUS_ID_SIZE
, "%s-%s", "xgene-mii",
959 mdio_bus
->priv
= (void __force
*)pdata
->mcx_mac_addr
;
960 mdio_bus
->parent
= &pdata
->pdev
->dev
;
962 ret
= xgene_mdiobus_register(pdata
, mdio_bus
);
964 netdev_err(ndev
, "Failed to register MDIO bus\n");
965 mdiobus_free(mdio_bus
);
968 pdata
->mdio_bus
= mdio_bus
;
970 ret
= xgene_enet_phy_connect(ndev
);
972 xgene_enet_mdio_remove(pdata
);
977 void xgene_enet_phy_disconnect(struct xgene_enet_pdata
*pdata
)
979 struct net_device
*ndev
= pdata
->ndev
;
982 phy_disconnect(ndev
->phydev
);
985 void xgene_enet_mdio_remove(struct xgene_enet_pdata
*pdata
)
987 struct net_device
*ndev
= pdata
->ndev
;
990 phy_disconnect(ndev
->phydev
);
992 mdiobus_unregister(pdata
->mdio_bus
);
993 mdiobus_free(pdata
->mdio_bus
);
994 pdata
->mdio_bus
= NULL
;
997 const struct xgene_mac_ops xgene_gmac_ops
= {
998 .init
= xgene_gmac_init
,
999 .reset
= xgene_gmac_reset
,
1000 .rx_enable
= xgene_gmac_rx_enable
,
1001 .tx_enable
= xgene_gmac_tx_enable
,
1002 .rx_disable
= xgene_gmac_rx_disable
,
1003 .tx_disable
= xgene_gmac_tx_disable
,
1004 .get_drop_cnt
= xgene_gmac_get_drop_cnt
,
1005 .set_speed
= xgene_gmac_set_speed
,
1006 .set_mac_addr
= xgene_gmac_set_mac_addr
,
1007 .set_framesize
= xgene_enet_set_frame_size
,
1008 .enable_tx_pause
= xgene_gmac_enable_tx_pause
,
1009 .flowctl_tx
= xgene_gmac_flowctl_tx
,
1010 .flowctl_rx
= xgene_gmac_flowctl_rx
,
1013 const struct xgene_port_ops xgene_gport_ops
= {
1014 .reset
= xgene_enet_reset
,
1015 .clear
= xgene_enet_clear
,
1016 .cle_bypass
= xgene_enet_cle_bypass
,
1017 .shutdown
= xgene_gport_shutdown
,
1020 struct xgene_ring_ops xgene_ring1_ops
= {
1021 .num_ring_config
= NUM_RING_CONFIG
,
1022 .num_ring_id_shift
= 6,
1023 .setup
= xgene_enet_setup_ring
,
1024 .clear
= xgene_enet_clear_ring
,
1025 .wr_cmd
= xgene_enet_wr_cmd
,
1026 .len
= xgene_enet_ring_len
,