1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Keyur Chudgar <kchudgar@apm.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "xgene_enet_main.h"
22 #include "xgene_enet_hw.h"
23 #include "xgene_enet_sgmac.h"
24 #include "xgene_enet_xgmac.h"
26 static void xgene_enet_wr_csr(struct xgene_enet_pdata
*p
, u32 offset
, u32 val
)
28 iowrite32(val
, p
->eth_csr_addr
+ offset
);
31 static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata
*p
, u32 offset
,
34 iowrite32(val
, p
->base_addr
+ offset
);
37 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata
*p
,
40 iowrite32(val
, p
->eth_ring_if_addr
+ offset
);
43 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata
*p
,
46 iowrite32(val
, p
->eth_diag_csr_addr
+ offset
);
49 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata
*pdata
,
52 void __iomem
*addr
= pdata
->mcx_mac_csr_addr
+ offset
;
57 static u32
xgene_enet_rd_csr(struct xgene_enet_pdata
*p
, u32 offset
)
59 return ioread32(p
->eth_csr_addr
+ offset
);
62 static u32
xgene_enet_rd_diag_csr(struct xgene_enet_pdata
*p
, u32 offset
)
64 return ioread32(p
->eth_diag_csr_addr
+ offset
);
67 static u32
xgene_enet_rd_mcx_csr(struct xgene_enet_pdata
*p
, u32 offset
)
69 return ioread32(p
->mcx_mac_csr_addr
+ offset
);
72 static int xgene_enet_ecc_init(struct xgene_enet_pdata
*p
)
74 struct net_device
*ndev
= p
->ndev
;
78 shutdown
= xgene_enet_rd_diag_csr(p
, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR
);
79 data
= xgene_enet_rd_diag_csr(p
, ENET_BLOCK_MEM_RDY_ADDR
);
81 if (!shutdown
&& data
== ~0U) {
82 netdev_dbg(ndev
, "+ ecc_init done, skipping\n");
86 xgene_enet_wr_diag_csr(p
, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR
, 0);
88 usleep_range(100, 110);
89 data
= xgene_enet_rd_diag_csr(p
, ENET_BLOCK_MEM_RDY_ADDR
);
94 netdev_err(ndev
, "Failed to release memory from shutdown\n");
98 static void xgene_sgmac_get_drop_cnt(struct xgene_enet_pdata
*pdata
,
103 addr
= (pdata
->enet_id
!= XGENE_ENET1
) ?
104 XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR
:
105 ICM_ECM_DROP_COUNT_REG0_ADDR
+ pdata
->port_id
* OFFSET_4
;
106 count
= xgene_enet_rd_mcx_csr(pdata
, addr
);
107 *rx
= ICM_DROP_COUNT(count
);
108 *tx
= ECM_DROP_COUNT(count
);
109 /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
110 addr
= (pdata
->enet_id
!= XGENE_ENET1
) ?
111 XG_MCX_ECM_CONFIG0_REG_0_ADDR
:
112 ECM_CONFIG0_REG_0_ADDR
+ pdata
->port_id
* OFFSET_4
;
113 xgene_enet_rd_mcx_csr(pdata
, addr
);
116 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata
*p
)
120 val
= (p
->enet_id
== XGENE_ENET1
) ? 0xffffffff : 0;
121 xgene_enet_wr_ring_if(p
, ENET_CFGSSQMIWQASSOC_ADDR
, val
);
122 xgene_enet_wr_ring_if(p
, ENET_CFGSSQMIFPQASSOC_ADDR
, val
);
125 static void xgene_mii_phy_write(struct xgene_enet_pdata
*p
, u8 phy_id
,
128 u32 addr
, wr_data
, done
;
131 addr
= PHY_ADDR(phy_id
) | REG_ADDR(reg
);
132 xgene_enet_wr_mac(p
, MII_MGMT_ADDRESS_ADDR
, addr
);
134 wr_data
= PHY_CONTROL(data
);
135 xgene_enet_wr_mac(p
, MII_MGMT_CONTROL_ADDR
, wr_data
);
137 for (i
= 0; i
< 10; i
++) {
138 done
= xgene_enet_rd_mac(p
, MII_MGMT_INDICATORS_ADDR
);
139 if (!(done
& BUSY_MASK
))
141 usleep_range(10, 20);
144 netdev_err(p
->ndev
, "MII_MGMT write failed\n");
147 static u32
xgene_mii_phy_read(struct xgene_enet_pdata
*p
, u8 phy_id
, u32 reg
)
149 u32 addr
, data
, done
;
152 addr
= PHY_ADDR(phy_id
) | REG_ADDR(reg
);
153 xgene_enet_wr_mac(p
, MII_MGMT_ADDRESS_ADDR
, addr
);
154 xgene_enet_wr_mac(p
, MII_MGMT_COMMAND_ADDR
, READ_CYCLE_MASK
);
156 for (i
= 0; i
< 10; i
++) {
157 done
= xgene_enet_rd_mac(p
, MII_MGMT_INDICATORS_ADDR
);
158 if (!(done
& BUSY_MASK
)) {
159 data
= xgene_enet_rd_mac(p
, MII_MGMT_STATUS_ADDR
);
160 xgene_enet_wr_mac(p
, MII_MGMT_COMMAND_ADDR
, 0);
164 usleep_range(10, 20);
167 netdev_err(p
->ndev
, "MII_MGMT read failed\n");
172 static void xgene_sgmac_reset(struct xgene_enet_pdata
*p
)
174 xgene_enet_wr_mac(p
, MAC_CONFIG_1_ADDR
, SOFT_RESET1
);
175 xgene_enet_wr_mac(p
, MAC_CONFIG_1_ADDR
, 0);
178 static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata
*p
)
181 u8
*dev_addr
= p
->ndev
->dev_addr
;
183 addr0
= (dev_addr
[3] << 24) | (dev_addr
[2] << 16) |
184 (dev_addr
[1] << 8) | dev_addr
[0];
185 xgene_enet_wr_mac(p
, STATION_ADDR0_ADDR
, addr0
);
187 addr1
= xgene_enet_rd_mac(p
, STATION_ADDR1_ADDR
);
188 addr1
|= (dev_addr
[5] << 24) | (dev_addr
[4] << 16);
189 xgene_enet_wr_mac(p
, STATION_ADDR1_ADDR
, addr1
);
192 static u32
xgene_enet_link_status(struct xgene_enet_pdata
*p
)
196 data
= xgene_mii_phy_read(p
, INT_PHY_ADDR
,
197 SGMII_BASE_PAGE_ABILITY_ADDR
>> 2);
199 if (LINK_SPEED(data
) == PHY_SPEED_1000
)
200 p
->phy_speed
= SPEED_1000
;
201 else if (LINK_SPEED(data
) == PHY_SPEED_100
)
202 p
->phy_speed
= SPEED_100
;
204 p
->phy_speed
= SPEED_10
;
206 return data
& LINK_UP
;
209 static void xgene_sgmii_configure(struct xgene_enet_pdata
*p
)
211 xgene_mii_phy_write(p
, INT_PHY_ADDR
, SGMII_TBI_CONTROL_ADDR
>> 2,
213 xgene_mii_phy_write(p
, INT_PHY_ADDR
, SGMII_CONTROL_ADDR
>> 2, 0x9000);
214 xgene_mii_phy_write(p
, INT_PHY_ADDR
, SGMII_TBI_CONTROL_ADDR
>> 2, 0);
217 static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata
*p
)
219 xgene_mii_phy_write(p
, INT_PHY_ADDR
, SGMII_TBI_CONTROL_ADDR
>> 2,
221 xgene_mii_phy_write(p
, INT_PHY_ADDR
, SGMII_TBI_CONTROL_ADDR
>> 2, 0);
224 static void xgene_sgmii_reset(struct xgene_enet_pdata
*p
)
228 if (p
->phy_speed
== SPEED_UNKNOWN
)
231 value
= xgene_mii_phy_read(p
, INT_PHY_ADDR
,
232 SGMII_BASE_PAGE_ABILITY_ADDR
>> 2);
233 if (!(value
& LINK_UP
))
234 xgene_sgmii_tbi_control_reset(p
);
237 static void xgene_sgmac_set_speed(struct xgene_enet_pdata
*p
)
239 u32 icm0_addr
, icm2_addr
, debug_addr
;
240 u32 icm0
, icm2
, intf_ctl
;
243 xgene_sgmii_reset(p
);
245 if (p
->enet_id
== XGENE_ENET1
) {
246 icm0_addr
= ICM_CONFIG0_REG_0_ADDR
+ p
->port_id
* OFFSET_8
;
247 icm2_addr
= ICM_CONFIG2_REG_0_ADDR
+ p
->port_id
* OFFSET_4
;
248 debug_addr
= DEBUG_REG_ADDR
;
250 icm0_addr
= XG_MCX_ICM_CONFIG0_REG_0_ADDR
;
251 icm2_addr
= XG_MCX_ICM_CONFIG2_REG_0_ADDR
;
252 debug_addr
= XG_DEBUG_REG_ADDR
;
255 icm0
= xgene_enet_rd_mcx_csr(p
, icm0_addr
);
256 icm2
= xgene_enet_rd_mcx_csr(p
, icm2_addr
);
257 mc2
= xgene_enet_rd_mac(p
, MAC_CONFIG_2_ADDR
);
258 intf_ctl
= xgene_enet_rd_mac(p
, INTERFACE_CONTROL_ADDR
);
260 switch (p
->phy_speed
) {
262 ENET_INTERFACE_MODE2_SET(&mc2
, 1);
263 intf_ctl
&= ~(ENET_LHD_MODE
| ENET_GHD_MODE
);
264 CFG_MACMODE_SET(&icm0
, 0);
265 CFG_WAITASYNCRD_SET(&icm2
, 500);
268 ENET_INTERFACE_MODE2_SET(&mc2
, 1);
269 intf_ctl
&= ~ENET_GHD_MODE
;
270 intf_ctl
|= ENET_LHD_MODE
;
271 CFG_MACMODE_SET(&icm0
, 1);
272 CFG_WAITASYNCRD_SET(&icm2
, 80);
275 ENET_INTERFACE_MODE2_SET(&mc2
, 2);
276 intf_ctl
&= ~ENET_LHD_MODE
;
277 intf_ctl
|= ENET_GHD_MODE
;
278 CFG_MACMODE_SET(&icm0
, 2);
279 CFG_WAITASYNCRD_SET(&icm2
, 16);
280 value
= xgene_enet_rd_csr(p
, debug_addr
);
281 value
|= CFG_BYPASS_UNISEC_TX
| CFG_BYPASS_UNISEC_RX
;
282 xgene_enet_wr_csr(p
, debug_addr
, value
);
286 mc2
|= FULL_DUPLEX2
| PAD_CRC
;
287 xgene_enet_wr_mac(p
, MAC_CONFIG_2_ADDR
, mc2
);
288 xgene_enet_wr_mac(p
, INTERFACE_CONTROL_ADDR
, intf_ctl
);
289 xgene_enet_wr_mcx_csr(p
, icm0_addr
, icm0
);
290 xgene_enet_wr_mcx_csr(p
, icm2_addr
, icm2
);
293 static void xgene_sgmac_set_frame_size(struct xgene_enet_pdata
*pdata
, int size
)
295 xgene_enet_wr_mac(pdata
, MAX_FRAME_LEN_ADDR
, size
);
298 static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata
*p
)
302 xgene_sgmii_configure(p
);
305 data
= xgene_mii_phy_read(p
, INT_PHY_ADDR
,
306 SGMII_STATUS_ADDR
>> 2);
307 if ((data
& AUTO_NEG_COMPLETE
) && (data
& LINK_STATUS
))
309 usleep_range(1000, 2000);
311 if (!(data
& AUTO_NEG_COMPLETE
) || !(data
& LINK_STATUS
))
312 netdev_err(p
->ndev
, "Auto-negotiation failed\n");
315 static void xgene_sgmac_rxtx(struct xgene_enet_pdata
*p
, u32 bits
, bool set
)
319 data
= xgene_enet_rd_mac(p
, MAC_CONFIG_1_ADDR
);
326 xgene_enet_wr_mac(p
, MAC_CONFIG_1_ADDR
, data
);
329 static void xgene_sgmac_flowctl_tx(struct xgene_enet_pdata
*p
, bool enable
)
331 xgene_sgmac_rxtx(p
, TX_FLOW_EN
, enable
);
333 p
->mac_ops
->enable_tx_pause(p
, enable
);
336 static void xgene_sgmac_flowctl_rx(struct xgene_enet_pdata
*pdata
, bool enable
)
338 xgene_sgmac_rxtx(pdata
, RX_FLOW_EN
, enable
);
341 static void xgene_sgmac_init(struct xgene_enet_pdata
*p
)
343 u32 pause_thres_reg
, pause_off_thres_reg
;
344 u32 enet_spare_cfg_reg
, rsif_config_reg
;
345 u32 cfg_bypass_reg
, rx_dv_gate_reg
;
346 u32 data
, data1
, data2
, offset
;
349 if (!(p
->enet_id
== XGENE_ENET2
&& p
->mdio_driver
))
350 xgene_sgmac_reset(p
);
352 xgene_sgmii_enable_autoneg(p
);
353 xgene_sgmac_set_speed(p
);
354 xgene_sgmac_set_mac_addr(p
);
356 if (p
->enet_id
== XGENE_ENET1
) {
357 enet_spare_cfg_reg
= ENET_SPARE_CFG_REG_ADDR
;
358 rsif_config_reg
= RSIF_CONFIG_REG_ADDR
;
359 cfg_bypass_reg
= CFG_BYPASS_ADDR
;
360 offset
= p
->port_id
* OFFSET_4
;
361 rx_dv_gate_reg
= SG_RX_DV_GATE_REG_0_ADDR
+ offset
;
363 enet_spare_cfg_reg
= XG_ENET_SPARE_CFG_REG_ADDR
;
364 rsif_config_reg
= XG_RSIF_CONFIG_REG_ADDR
;
365 cfg_bypass_reg
= XG_CFG_BYPASS_ADDR
;
366 rx_dv_gate_reg
= XG_MCX_RX_DV_GATE_REG_0_ADDR
;
369 data
= xgene_enet_rd_csr(p
, enet_spare_cfg_reg
);
370 data
|= MPA_IDLE_WITH_QMI_EMPTY
;
371 xgene_enet_wr_csr(p
, enet_spare_cfg_reg
, data
);
373 /* Adjust MDC clock frequency */
374 data
= xgene_enet_rd_mac(p
, MII_MGMT_CONFIG_ADDR
);
375 MGMT_CLOCK_SEL_SET(&data
, 7);
376 xgene_enet_wr_mac(p
, MII_MGMT_CONFIG_ADDR
, data
);
378 /* Enable drop if bufpool not available */
379 data
= xgene_enet_rd_csr(p
, rsif_config_reg
);
380 data
|= CFG_RSIF_FPBUFF_TIMEOUT_EN
;
381 xgene_enet_wr_csr(p
, rsif_config_reg
, data
);
383 /* Configure HW pause frame generation */
384 multi_dpf_reg
= (p
->enet_id
== XGENE_ENET1
) ? CSR_MULTI_DPF0_ADDR
:
385 XG_MCX_MULTI_DPF0_ADDR
;
386 data
= xgene_enet_rd_mcx_csr(p
, multi_dpf_reg
);
387 data
= (DEF_QUANTA
<< 16) | (data
& 0xffff);
388 xgene_enet_wr_mcx_csr(p
, multi_dpf_reg
, data
);
390 if (p
->enet_id
!= XGENE_ENET1
) {
391 data
= xgene_enet_rd_mcx_csr(p
, XG_MCX_MULTI_DPF1_ADDR
);
392 data
= (NORM_PAUSE_OPCODE
<< 16) | (data
& 0xFFFF);
393 xgene_enet_wr_mcx_csr(p
, XG_MCX_MULTI_DPF1_ADDR
, data
);
396 pause_thres_reg
= (p
->enet_id
== XGENE_ENET1
) ? RXBUF_PAUSE_THRESH
:
397 XG_RXBUF_PAUSE_THRESH
;
398 pause_off_thres_reg
= (p
->enet_id
== XGENE_ENET1
) ?
399 RXBUF_PAUSE_OFF_THRESH
: 0;
401 if (p
->enet_id
== XGENE_ENET1
) {
402 data1
= xgene_enet_rd_csr(p
, pause_thres_reg
);
403 data2
= xgene_enet_rd_csr(p
, pause_off_thres_reg
);
405 if (!(p
->port_id
% 2)) {
406 data1
= (data1
& 0xffff0000) | DEF_PAUSE_THRES
;
407 data2
= (data2
& 0xffff0000) | DEF_PAUSE_OFF_THRES
;
409 data1
= (data1
& 0xffff) | (DEF_PAUSE_THRES
<< 16);
410 data2
= (data2
& 0xffff) | (DEF_PAUSE_OFF_THRES
<< 16);
413 xgene_enet_wr_csr(p
, pause_thres_reg
, data1
);
414 xgene_enet_wr_csr(p
, pause_off_thres_reg
, data2
);
416 data
= (DEF_PAUSE_OFF_THRES
<< 16) | DEF_PAUSE_THRES
;
417 xgene_enet_wr_csr(p
, pause_thres_reg
, data
);
420 xgene_sgmac_flowctl_tx(p
, p
->tx_pause
);
421 xgene_sgmac_flowctl_rx(p
, p
->rx_pause
);
423 /* Bypass traffic gating */
424 xgene_enet_wr_csr(p
, XG_ENET_SPARE_CFG_REG_1_ADDR
, 0x84);
425 xgene_enet_wr_csr(p
, cfg_bypass_reg
, RESUME_TX
);
426 xgene_enet_wr_mcx_csr(p
, rx_dv_gate_reg
, RESUME_RX0
);
429 static void xgene_sgmac_rx_enable(struct xgene_enet_pdata
*p
)
431 xgene_sgmac_rxtx(p
, RX_EN
, true);
434 static void xgene_sgmac_tx_enable(struct xgene_enet_pdata
*p
)
436 xgene_sgmac_rxtx(p
, TX_EN
, true);
439 static void xgene_sgmac_rx_disable(struct xgene_enet_pdata
*p
)
441 xgene_sgmac_rxtx(p
, RX_EN
, false);
444 static void xgene_sgmac_tx_disable(struct xgene_enet_pdata
*p
)
446 xgene_sgmac_rxtx(p
, TX_EN
, false);
449 static int xgene_enet_reset(struct xgene_enet_pdata
*p
)
451 struct device
*dev
= &p
->pdev
->dev
;
453 if (!xgene_ring_mgr_init(p
))
456 if (p
->mdio_driver
&& p
->enet_id
== XGENE_ENET2
) {
457 xgene_enet_config_ring_if_assoc(p
);
461 if (p
->enet_id
== XGENE_ENET2
)
462 xgene_enet_wr_clkrst_csr(p
, XGENET_CONFIG_REG_ADDR
, SGMII_EN
);
465 if (!IS_ERR(p
->clk
)) {
466 clk_prepare_enable(p
->clk
);
468 clk_disable_unprepare(p
->clk
);
470 clk_prepare_enable(p
->clk
);
475 if (acpi_has_method(ACPI_HANDLE(&p
->pdev
->dev
), "_RST"))
476 acpi_evaluate_object(ACPI_HANDLE(&p
->pdev
->dev
),
478 else if (acpi_has_method(ACPI_HANDLE(&p
->pdev
->dev
), "_INI"))
479 acpi_evaluate_object(ACPI_HANDLE(&p
->pdev
->dev
),
485 xgene_enet_ecc_init(p
);
486 xgene_enet_config_ring_if_assoc(p
);
492 static void xgene_enet_cle_bypass(struct xgene_enet_pdata
*p
,
493 u32 dst_ring_num
, u16 bufpool_id
,
496 u32 cle_bypass_reg0
, cle_bypass_reg1
;
497 u32 offset
= p
->port_id
* MAC_OFFSET
;
498 u32 data
, fpsel
, nxtfpsel
;
500 if (p
->enet_id
== XGENE_ENET1
) {
501 cle_bypass_reg0
= CLE_BYPASS_REG0_0_ADDR
;
502 cle_bypass_reg1
= CLE_BYPASS_REG1_0_ADDR
;
504 cle_bypass_reg0
= XCLE_BYPASS_REG0_ADDR
;
505 cle_bypass_reg1
= XCLE_BYPASS_REG1_ADDR
;
508 data
= CFG_CLE_BYPASS_EN0
;
509 xgene_enet_wr_csr(p
, cle_bypass_reg0
+ offset
, data
);
511 fpsel
= xgene_enet_get_fpsel(bufpool_id
);
512 nxtfpsel
= xgene_enet_get_fpsel(nxtbufpool_id
);
513 data
= CFG_CLE_DSTQID0(dst_ring_num
) | CFG_CLE_FPSEL0(fpsel
) |
514 CFG_CLE_NXTFPSEL0(nxtfpsel
);
515 xgene_enet_wr_csr(p
, cle_bypass_reg1
+ offset
, data
);
518 static void xgene_enet_clear(struct xgene_enet_pdata
*pdata
,
519 struct xgene_enet_desc_ring
*ring
)
523 if (xgene_enet_is_bufpool(ring
->id
)) {
524 addr
= ENET_CFGSSQMIFPRESET_ADDR
;
525 data
= BIT(xgene_enet_get_fpsel(ring
->id
));
527 addr
= ENET_CFGSSQMIWQRESET_ADDR
;
528 data
= BIT(xgene_enet_ring_bufnum(ring
->id
));
531 xgene_enet_wr_ring_if(pdata
, addr
, data
);
534 static void xgene_enet_shutdown(struct xgene_enet_pdata
*p
)
536 struct device
*dev
= &p
->pdev
->dev
;
540 clk_disable_unprepare(p
->clk
);
544 static void xgene_enet_link_state(struct work_struct
*work
)
546 struct xgene_enet_pdata
*p
= container_of(to_delayed_work(work
),
547 struct xgene_enet_pdata
, link_work
);
548 struct net_device
*ndev
= p
->ndev
;
549 u32 link
, poll_interval
;
551 link
= xgene_enet_link_status(p
);
553 if (!netif_carrier_ok(ndev
)) {
554 netif_carrier_on(ndev
);
555 xgene_sgmac_set_speed(p
);
556 xgene_sgmac_rx_enable(p
);
557 xgene_sgmac_tx_enable(p
);
558 netdev_info(ndev
, "Link is Up - %dMbps\n",
561 poll_interval
= PHY_POLL_LINK_ON
;
563 if (netif_carrier_ok(ndev
)) {
564 xgene_sgmac_rx_disable(p
);
565 xgene_sgmac_tx_disable(p
);
566 netif_carrier_off(ndev
);
567 netdev_info(ndev
, "Link is Down\n");
569 poll_interval
= PHY_POLL_LINK_OFF
;
572 schedule_delayed_work(&p
->link_work
, poll_interval
);
575 static void xgene_sgmac_enable_tx_pause(struct xgene_enet_pdata
*p
, bool enable
)
577 u32 data
, ecm_cfg_addr
;
579 if (p
->enet_id
== XGENE_ENET1
) {
580 ecm_cfg_addr
= (!(p
->port_id
% 2)) ? CSR_ECM_CFG_0_ADDR
:
583 ecm_cfg_addr
= XG_MCX_ECM_CFG_0_ADDR
;
586 data
= xgene_enet_rd_mcx_csr(p
, ecm_cfg_addr
);
588 data
|= MULTI_DPF_AUTOCTRL
| PAUSE_XON_EN
;
590 data
&= ~(MULTI_DPF_AUTOCTRL
| PAUSE_XON_EN
);
591 xgene_enet_wr_mcx_csr(p
, ecm_cfg_addr
, data
);
594 const struct xgene_mac_ops xgene_sgmac_ops
= {
595 .init
= xgene_sgmac_init
,
596 .reset
= xgene_sgmac_reset
,
597 .rx_enable
= xgene_sgmac_rx_enable
,
598 .tx_enable
= xgene_sgmac_tx_enable
,
599 .rx_disable
= xgene_sgmac_rx_disable
,
600 .tx_disable
= xgene_sgmac_tx_disable
,
601 .get_drop_cnt
= xgene_sgmac_get_drop_cnt
,
602 .set_speed
= xgene_sgmac_set_speed
,
603 .set_mac_addr
= xgene_sgmac_set_mac_addr
,
604 .set_framesize
= xgene_sgmac_set_frame_size
,
605 .link_state
= xgene_enet_link_state
,
606 .enable_tx_pause
= xgene_sgmac_enable_tx_pause
,
607 .flowctl_tx
= xgene_sgmac_flowctl_tx
,
608 .flowctl_rx
= xgene_sgmac_flowctl_rx
611 const struct xgene_port_ops xgene_sgport_ops
= {
612 .reset
= xgene_enet_reset
,
613 .clear
= xgene_enet_clear
,
614 .cle_bypass
= xgene_enet_cle_bypass
,
615 .shutdown
= xgene_enet_shutdown