2 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 * DWC Ether MAC version 4.00 has been used for developing this code.
5 * This only implements the mac core functions for this chip.
7 * Copyright (C) 2015 STMicroelectronics Ltd
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * Author: Alexandre Torgue <alexandre.torgue@st.com>
16 #include <linux/crc32.h>
17 #include <linux/slab.h>
18 #include <linux/ethtool.h>
22 #include "stmmac_pcs.h"
26 static void dwmac4_core_init(struct mac_device_info
*hw
,
27 struct net_device
*dev
)
29 void __iomem
*ioaddr
= hw
->pcsr
;
30 u32 value
= readl(ioaddr
+ GMAC_CONFIG
);
33 value
|= GMAC_CORE_INIT
;
36 value
|= GMAC_CONFIG_2K
;
38 value
|= GMAC_CONFIG_JE
;
41 value
|= GMAC_CONFIG_TE
;
43 value
&= hw
->link
.speed_mask
;
46 value
|= hw
->link
.speed1000
;
49 value
|= hw
->link
.speed100
;
52 value
|= hw
->link
.speed10
;
57 writel(value
, ioaddr
+ GMAC_CONFIG
);
59 /* Enable GMAC interrupts */
60 value
= GMAC_INT_DEFAULT_ENABLE
;
63 value
|= GMAC_PCS_IRQ_DEFAULT
;
65 writel(value
, ioaddr
+ GMAC_INT_EN
);
68 static void dwmac4_rx_queue_enable(struct mac_device_info
*hw
,
71 void __iomem
*ioaddr
= hw
->pcsr
;
72 u32 value
= readl(ioaddr
+ GMAC_RXQ_CTRL0
);
74 value
&= GMAC_RX_QUEUE_CLEAR(queue
);
75 if (mode
== MTL_QUEUE_AVB
)
76 value
|= GMAC_RX_AV_QUEUE_ENABLE(queue
);
77 else if (mode
== MTL_QUEUE_DCB
)
78 value
|= GMAC_RX_DCB_QUEUE_ENABLE(queue
);
80 writel(value
, ioaddr
+ GMAC_RXQ_CTRL0
);
83 static void dwmac4_rx_queue_priority(struct mac_device_info
*hw
,
86 void __iomem
*ioaddr
= hw
->pcsr
;
90 base_register
= (queue
< 4) ? GMAC_RXQ_CTRL2
: GMAC_RXQ_CTRL3
;
94 value
= readl(ioaddr
+ base_register
);
96 value
&= ~GMAC_RXQCTRL_PSRQX_MASK(queue
);
97 value
|= (prio
<< GMAC_RXQCTRL_PSRQX_SHIFT(queue
)) &
98 GMAC_RXQCTRL_PSRQX_MASK(queue
);
99 writel(value
, ioaddr
+ base_register
);
102 static void dwmac4_tx_queue_priority(struct mac_device_info
*hw
,
105 void __iomem
*ioaddr
= hw
->pcsr
;
109 base_register
= (queue
< 4) ? GMAC_TXQ_PRTY_MAP0
: GMAC_TXQ_PRTY_MAP1
;
113 value
= readl(ioaddr
+ base_register
);
115 value
&= ~GMAC_TXQCTRL_PSTQX_MASK(queue
);
116 value
|= (prio
<< GMAC_TXQCTRL_PSTQX_SHIFT(queue
)) &
117 GMAC_TXQCTRL_PSTQX_MASK(queue
);
119 writel(value
, ioaddr
+ base_register
);
122 static void dwmac4_rx_queue_routing(struct mac_device_info
*hw
,
123 u8 packet
, u32 queue
)
125 void __iomem
*ioaddr
= hw
->pcsr
;
128 static const struct stmmac_rx_routing route_possibilities
[] = {
129 { GMAC_RXQCTRL_AVCPQ_MASK
, GMAC_RXQCTRL_AVCPQ_SHIFT
},
130 { GMAC_RXQCTRL_PTPQ_MASK
, GMAC_RXQCTRL_PTPQ_SHIFT
},
131 { GMAC_RXQCTRL_DCBCPQ_MASK
, GMAC_RXQCTRL_DCBCPQ_SHIFT
},
132 { GMAC_RXQCTRL_UPQ_MASK
, GMAC_RXQCTRL_UPQ_SHIFT
},
133 { GMAC_RXQCTRL_MCBCQ_MASK
, GMAC_RXQCTRL_MCBCQ_SHIFT
},
136 value
= readl(ioaddr
+ GMAC_RXQ_CTRL1
);
138 /* routing configuration */
139 value
&= ~route_possibilities
[packet
- 1].reg_mask
;
140 value
|= (queue
<< route_possibilities
[packet
-1].reg_shift
) &
141 route_possibilities
[packet
- 1].reg_mask
;
143 /* some packets require extra ops */
144 if (packet
== PACKET_AVCPQ
) {
145 value
&= ~GMAC_RXQCTRL_TACPQE
;
146 value
|= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT
;
147 } else if (packet
== PACKET_MCBCQ
) {
148 value
&= ~GMAC_RXQCTRL_MCBCQEN
;
149 value
|= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT
;
152 writel(value
, ioaddr
+ GMAC_RXQ_CTRL1
);
155 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info
*hw
,
158 void __iomem
*ioaddr
= hw
->pcsr
;
159 u32 value
= readl(ioaddr
+ MTL_OPERATION_MODE
);
161 value
&= ~MTL_OPERATION_RAA
;
163 case MTL_RX_ALGORITHM_SP
:
164 value
|= MTL_OPERATION_RAA_SP
;
166 case MTL_RX_ALGORITHM_WSP
:
167 value
|= MTL_OPERATION_RAA_WSP
;
173 writel(value
, ioaddr
+ MTL_OPERATION_MODE
);
176 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info
*hw
,
179 void __iomem
*ioaddr
= hw
->pcsr
;
180 u32 value
= readl(ioaddr
+ MTL_OPERATION_MODE
);
182 value
&= ~MTL_OPERATION_SCHALG_MASK
;
184 case MTL_TX_ALGORITHM_WRR
:
185 value
|= MTL_OPERATION_SCHALG_WRR
;
187 case MTL_TX_ALGORITHM_WFQ
:
188 value
|= MTL_OPERATION_SCHALG_WFQ
;
190 case MTL_TX_ALGORITHM_DWRR
:
191 value
|= MTL_OPERATION_SCHALG_DWRR
;
193 case MTL_TX_ALGORITHM_SP
:
194 value
|= MTL_OPERATION_SCHALG_SP
;
201 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info
*hw
,
202 u32 weight
, u32 queue
)
204 void __iomem
*ioaddr
= hw
->pcsr
;
205 u32 value
= readl(ioaddr
+ MTL_TXQX_WEIGHT_BASE_ADDR(queue
));
207 value
&= ~MTL_TXQ_WEIGHT_ISCQW_MASK
;
208 value
|= weight
& MTL_TXQ_WEIGHT_ISCQW_MASK
;
209 writel(value
, ioaddr
+ MTL_TXQX_WEIGHT_BASE_ADDR(queue
));
212 static void dwmac4_map_mtl_dma(struct mac_device_info
*hw
, u32 queue
, u32 chan
)
214 void __iomem
*ioaddr
= hw
->pcsr
;
218 value
= readl(ioaddr
+ MTL_RXQ_DMA_MAP0
);
220 value
= readl(ioaddr
+ MTL_RXQ_DMA_MAP1
);
222 if (queue
== 0 || queue
== 4) {
223 value
&= ~MTL_RXQ_DMA_Q04MDMACH_MASK
;
224 value
|= MTL_RXQ_DMA_Q04MDMACH(chan
);
226 value
&= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue
);
227 value
|= MTL_RXQ_DMA_QXMDMACH(chan
, queue
);
231 writel(value
, ioaddr
+ MTL_RXQ_DMA_MAP0
);
233 writel(value
, ioaddr
+ MTL_RXQ_DMA_MAP1
);
236 static void dwmac4_config_cbs(struct mac_device_info
*hw
,
237 u32 send_slope
, u32 idle_slope
,
238 u32 high_credit
, u32 low_credit
, u32 queue
)
240 void __iomem
*ioaddr
= hw
->pcsr
;
243 pr_debug("Queue %d configured as AVB. Parameters:\n", queue
);
244 pr_debug("\tsend_slope: 0x%08x\n", send_slope
);
245 pr_debug("\tidle_slope: 0x%08x\n", idle_slope
);
246 pr_debug("\thigh_credit: 0x%08x\n", high_credit
);
247 pr_debug("\tlow_credit: 0x%08x\n", low_credit
);
249 /* enable AV algorithm */
250 value
= readl(ioaddr
+ MTL_ETSX_CTRL_BASE_ADDR(queue
));
251 value
|= MTL_ETS_CTRL_AVALG
;
252 value
|= MTL_ETS_CTRL_CC
;
253 writel(value
, ioaddr
+ MTL_ETSX_CTRL_BASE_ADDR(queue
));
255 /* configure send slope */
256 value
= readl(ioaddr
+ MTL_SEND_SLP_CREDX_BASE_ADDR(queue
));
257 value
&= ~MTL_SEND_SLP_CRED_SSC_MASK
;
258 value
|= send_slope
& MTL_SEND_SLP_CRED_SSC_MASK
;
259 writel(value
, ioaddr
+ MTL_SEND_SLP_CREDX_BASE_ADDR(queue
));
261 /* configure idle slope (same register as tx weight) */
262 dwmac4_set_mtl_tx_queue_weight(hw
, idle_slope
, queue
);
264 /* configure high credit */
265 value
= readl(ioaddr
+ MTL_HIGH_CREDX_BASE_ADDR(queue
));
266 value
&= ~MTL_HIGH_CRED_HC_MASK
;
267 value
|= high_credit
& MTL_HIGH_CRED_HC_MASK
;
268 writel(value
, ioaddr
+ MTL_HIGH_CREDX_BASE_ADDR(queue
));
270 /* configure high credit */
271 value
= readl(ioaddr
+ MTL_LOW_CREDX_BASE_ADDR(queue
));
272 value
&= ~MTL_HIGH_CRED_LC_MASK
;
273 value
|= low_credit
& MTL_HIGH_CRED_LC_MASK
;
274 writel(value
, ioaddr
+ MTL_LOW_CREDX_BASE_ADDR(queue
));
277 static void dwmac4_dump_regs(struct mac_device_info
*hw
, u32
*reg_space
)
279 void __iomem
*ioaddr
= hw
->pcsr
;
282 for (i
= 0; i
< GMAC_REG_NUM
; i
++)
283 reg_space
[i
] = readl(ioaddr
+ i
* 4);
286 static int dwmac4_rx_ipc_enable(struct mac_device_info
*hw
)
288 void __iomem
*ioaddr
= hw
->pcsr
;
289 u32 value
= readl(ioaddr
+ GMAC_CONFIG
);
292 value
|= GMAC_CONFIG_IPC
;
294 value
&= ~GMAC_CONFIG_IPC
;
296 writel(value
, ioaddr
+ GMAC_CONFIG
);
298 value
= readl(ioaddr
+ GMAC_CONFIG
);
300 return !!(value
& GMAC_CONFIG_IPC
);
303 static void dwmac4_pmt(struct mac_device_info
*hw
, unsigned long mode
)
305 void __iomem
*ioaddr
= hw
->pcsr
;
306 unsigned int pmt
= 0;
309 if (mode
& WAKE_MAGIC
) {
310 pr_debug("GMAC: WOL Magic frame\n");
311 pmt
|= power_down
| magic_pkt_en
;
313 if (mode
& WAKE_UCAST
) {
314 pr_debug("GMAC: WOL on global unicast\n");
315 pmt
|= power_down
| global_unicast
| wake_up_frame_en
;
319 /* The receiver must be enabled for WOL before powering down */
320 config
= readl(ioaddr
+ GMAC_CONFIG
);
321 config
|= GMAC_CONFIG_RE
;
322 writel(config
, ioaddr
+ GMAC_CONFIG
);
324 writel(pmt
, ioaddr
+ GMAC_PMT
);
327 static void dwmac4_set_umac_addr(struct mac_device_info
*hw
,
328 unsigned char *addr
, unsigned int reg_n
)
330 void __iomem
*ioaddr
= hw
->pcsr
;
332 stmmac_dwmac4_set_mac_addr(ioaddr
, addr
, GMAC_ADDR_HIGH(reg_n
),
333 GMAC_ADDR_LOW(reg_n
));
336 static void dwmac4_get_umac_addr(struct mac_device_info
*hw
,
337 unsigned char *addr
, unsigned int reg_n
)
339 void __iomem
*ioaddr
= hw
->pcsr
;
341 stmmac_dwmac4_get_mac_addr(ioaddr
, addr
, GMAC_ADDR_HIGH(reg_n
),
342 GMAC_ADDR_LOW(reg_n
));
345 static void dwmac4_set_eee_mode(struct mac_device_info
*hw
,
346 bool en_tx_lpi_clockgating
)
348 void __iomem
*ioaddr
= hw
->pcsr
;
351 /* Enable the link status receive on RGMII, SGMII ore SMII
352 * receive path and instruct the transmit to enter in LPI
355 value
= readl(ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
356 value
|= GMAC4_LPI_CTRL_STATUS_LPIEN
| GMAC4_LPI_CTRL_STATUS_LPITXA
;
358 if (en_tx_lpi_clockgating
)
359 value
|= GMAC4_LPI_CTRL_STATUS_LPITCSE
;
361 writel(value
, ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
364 static void dwmac4_reset_eee_mode(struct mac_device_info
*hw
)
366 void __iomem
*ioaddr
= hw
->pcsr
;
369 value
= readl(ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
370 value
&= ~(GMAC4_LPI_CTRL_STATUS_LPIEN
| GMAC4_LPI_CTRL_STATUS_LPITXA
);
371 writel(value
, ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
374 static void dwmac4_set_eee_pls(struct mac_device_info
*hw
, int link
)
376 void __iomem
*ioaddr
= hw
->pcsr
;
379 value
= readl(ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
382 value
|= GMAC4_LPI_CTRL_STATUS_PLS
;
384 value
&= ~GMAC4_LPI_CTRL_STATUS_PLS
;
386 writel(value
, ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
389 static void dwmac4_set_eee_timer(struct mac_device_info
*hw
, int ls
, int tw
)
391 void __iomem
*ioaddr
= hw
->pcsr
;
392 int value
= ((tw
& 0xffff)) | ((ls
& 0x3ff) << 16);
394 /* Program the timers in the LPI timer control register:
395 * LS: minimum time (ms) for which the link
396 * status from PHY should be ok before transmitting
398 * TW: minimum time (us) for which the core waits
399 * after it has stopped transmitting the LPI pattern.
401 writel(value
, ioaddr
+ GMAC4_LPI_TIMER_CTRL
);
404 static void dwmac4_set_filter(struct mac_device_info
*hw
,
405 struct net_device
*dev
)
407 void __iomem
*ioaddr
= (void __iomem
*)dev
->base_addr
;
408 unsigned int value
= 0;
410 if (dev
->flags
& IFF_PROMISC
) {
411 value
= GMAC_PACKET_FILTER_PR
;
412 } else if ((dev
->flags
& IFF_ALLMULTI
) ||
413 (netdev_mc_count(dev
) > HASH_TABLE_SIZE
)) {
415 value
= GMAC_PACKET_FILTER_PM
;
416 /* Set the 64 bits of the HASH tab. To be updated if taller
419 writel(0xffffffff, ioaddr
+ GMAC_HASH_TAB_0_31
);
420 writel(0xffffffff, ioaddr
+ GMAC_HASH_TAB_32_63
);
421 } else if (!netdev_mc_empty(dev
)) {
423 struct netdev_hw_addr
*ha
;
425 /* Hash filter for multicast */
426 value
= GMAC_PACKET_FILTER_HMC
;
428 memset(mc_filter
, 0, sizeof(mc_filter
));
429 netdev_for_each_mc_addr(ha
, dev
) {
430 /* The upper 6 bits of the calculated CRC are used to
431 * index the content of the Hash Table Reg 0 and 1.
434 (bitrev32(~crc32_le(~0, ha
->addr
, 6)) >> 26);
435 /* The most significant bit determines the register
436 * to use while the other 5 bits determines the bit
437 * within the selected register
439 mc_filter
[bit_nr
>> 5] |= (1 << (bit_nr
& 0x1F));
441 writel(mc_filter
[0], ioaddr
+ GMAC_HASH_TAB_0_31
);
442 writel(mc_filter
[1], ioaddr
+ GMAC_HASH_TAB_32_63
);
445 /* Handle multiple unicast addresses */
446 if (netdev_uc_count(dev
) > hw
->unicast_filter_entries
) {
447 /* Switch to promiscuous mode if more than 128 addrs
450 value
|= GMAC_PACKET_FILTER_PR
;
452 struct netdev_hw_addr
*ha
;
455 netdev_for_each_uc_addr(ha
, dev
) {
456 dwmac4_set_umac_addr(hw
, ha
->addr
, reg
);
460 while (reg
<= GMAC_MAX_PERFECT_ADDRESSES
) {
461 writel(0, ioaddr
+ GMAC_ADDR_HIGH(reg
));
462 writel(0, ioaddr
+ GMAC_ADDR_LOW(reg
));
467 writel(value
, ioaddr
+ GMAC_PACKET_FILTER
);
470 static void dwmac4_flow_ctrl(struct mac_device_info
*hw
, unsigned int duplex
,
471 unsigned int fc
, unsigned int pause_time
,
474 void __iomem
*ioaddr
= hw
->pcsr
;
475 unsigned int flow
= 0;
478 pr_debug("GMAC Flow-Control:\n");
480 pr_debug("\tReceive Flow-Control ON\n");
481 flow
|= GMAC_RX_FLOW_CTRL_RFE
;
483 writel(flow
, ioaddr
+ GMAC_RX_FLOW_CTRL
);
486 pr_debug("\tTransmit Flow-Control ON\n");
489 pr_debug("\tduplex mode: PAUSE %d\n", pause_time
);
491 for (queue
= 0; queue
< tx_cnt
; queue
++) {
492 flow
= GMAC_TX_FLOW_CTRL_TFE
;
496 (pause_time
<< GMAC_TX_FLOW_CTRL_PT_SHIFT
);
498 writel(flow
, ioaddr
+ GMAC_QX_TX_FLOW_CTRL(queue
));
501 for (queue
= 0; queue
< tx_cnt
; queue
++)
502 writel(0, ioaddr
+ GMAC_QX_TX_FLOW_CTRL(queue
));
506 static void dwmac4_ctrl_ane(void __iomem
*ioaddr
, bool ane
, bool srgmi_ral
,
509 dwmac_ctrl_ane(ioaddr
, GMAC_PCS_BASE
, ane
, srgmi_ral
, loopback
);
512 static void dwmac4_rane(void __iomem
*ioaddr
, bool restart
)
514 dwmac_rane(ioaddr
, GMAC_PCS_BASE
, restart
);
517 static void dwmac4_get_adv_lp(void __iomem
*ioaddr
, struct rgmii_adv
*adv
)
519 dwmac_get_adv_lp(ioaddr
, GMAC_PCS_BASE
, adv
);
522 /* RGMII or SMII interface */
523 static void dwmac4_phystatus(void __iomem
*ioaddr
, struct stmmac_extra_stats
*x
)
527 status
= readl(ioaddr
+ GMAC_PHYIF_CONTROL_STATUS
);
530 /* Check the link status */
531 if (status
& GMAC_PHYIF_CTRLSTATUS_LNKSTS
) {
536 speed_value
= ((status
& GMAC_PHYIF_CTRLSTATUS_SPEED
) >>
537 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT
);
538 if (speed_value
== GMAC_PHYIF_CTRLSTATUS_SPEED_125
)
539 x
->pcs_speed
= SPEED_1000
;
540 else if (speed_value
== GMAC_PHYIF_CTRLSTATUS_SPEED_25
)
541 x
->pcs_speed
= SPEED_100
;
543 x
->pcs_speed
= SPEED_10
;
545 x
->pcs_duplex
= (status
& GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK
);
547 pr_info("Link is Up - %d/%s\n", (int)x
->pcs_speed
,
548 x
->pcs_duplex
? "Full" : "Half");
551 pr_info("Link is Down\n");
555 static int dwmac4_irq_mtl_status(struct mac_device_info
*hw
, u32 chan
)
557 void __iomem
*ioaddr
= hw
->pcsr
;
558 u32 mtl_int_qx_status
;
561 mtl_int_qx_status
= readl(ioaddr
+ MTL_INT_STATUS
);
563 /* Check MTL Interrupt */
564 if (mtl_int_qx_status
& MTL_INT_QX(chan
)) {
565 /* read Queue x Interrupt status */
566 u32 status
= readl(ioaddr
+ MTL_CHAN_INT_CTRL(chan
));
568 if (status
& MTL_RX_OVERFLOW_INT
) {
569 /* clear Interrupt */
570 writel(status
| MTL_RX_OVERFLOW_INT
,
571 ioaddr
+ MTL_CHAN_INT_CTRL(chan
));
572 ret
= CORE_IRQ_MTL_RX_OVERFLOW
;
579 static int dwmac4_irq_status(struct mac_device_info
*hw
,
580 struct stmmac_extra_stats
*x
)
582 void __iomem
*ioaddr
= hw
->pcsr
;
583 u32 intr_status
= readl(ioaddr
+ GMAC_INT_STATUS
);
584 u32 intr_enable
= readl(ioaddr
+ GMAC_INT_EN
);
587 /* Discard disabled bits */
588 intr_status
&= intr_enable
;
590 /* Not used events (e.g. MMC interrupts) are not handled. */
591 if ((intr_status
& mmc_tx_irq
))
593 if (unlikely(intr_status
& mmc_rx_irq
))
595 if (unlikely(intr_status
& mmc_rx_csum_offload_irq
))
596 x
->mmc_rx_csum_offload_irq_n
++;
597 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
598 if (unlikely(intr_status
& pmt_irq
)) {
599 readl(ioaddr
+ GMAC_PMT
);
600 x
->irq_receive_pmt_irq_n
++;
603 /* MAC tx/rx EEE LPI entry/exit interrupts */
604 if (intr_status
& lpi_irq
) {
605 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
606 u32 status
= readl(ioaddr
+ GMAC4_LPI_CTRL_STATUS
);
608 if (status
& GMAC4_LPI_CTRL_STATUS_TLPIEN
) {
609 ret
|= CORE_IRQ_TX_PATH_IN_LPI_MODE
;
610 x
->irq_tx_path_in_lpi_mode_n
++;
612 if (status
& GMAC4_LPI_CTRL_STATUS_TLPIEX
) {
613 ret
|= CORE_IRQ_TX_PATH_EXIT_LPI_MODE
;
614 x
->irq_tx_path_exit_lpi_mode_n
++;
616 if (status
& GMAC4_LPI_CTRL_STATUS_RLPIEN
)
617 x
->irq_rx_path_in_lpi_mode_n
++;
618 if (status
& GMAC4_LPI_CTRL_STATUS_RLPIEX
)
619 x
->irq_rx_path_exit_lpi_mode_n
++;
622 dwmac_pcs_isr(ioaddr
, GMAC_PCS_BASE
, intr_status
, x
);
623 if (intr_status
& PCS_RGSMIIIS_IRQ
)
624 dwmac4_phystatus(ioaddr
, x
);
629 static void dwmac4_debug(void __iomem
*ioaddr
, struct stmmac_extra_stats
*x
,
630 u32 rx_queues
, u32 tx_queues
)
635 for (queue
= 0; queue
< tx_queues
; queue
++) {
636 value
= readl(ioaddr
+ MTL_CHAN_TX_DEBUG(queue
));
638 if (value
& MTL_DEBUG_TXSTSFSTS
)
639 x
->mtl_tx_status_fifo_full
++;
640 if (value
& MTL_DEBUG_TXFSTS
)
641 x
->mtl_tx_fifo_not_empty
++;
642 if (value
& MTL_DEBUG_TWCSTS
)
644 if (value
& MTL_DEBUG_TRCSTS_MASK
) {
645 u32 trcsts
= (value
& MTL_DEBUG_TRCSTS_MASK
)
646 >> MTL_DEBUG_TRCSTS_SHIFT
;
647 if (trcsts
== MTL_DEBUG_TRCSTS_WRITE
)
648 x
->mtl_tx_fifo_read_ctrl_write
++;
649 else if (trcsts
== MTL_DEBUG_TRCSTS_TXW
)
650 x
->mtl_tx_fifo_read_ctrl_wait
++;
651 else if (trcsts
== MTL_DEBUG_TRCSTS_READ
)
652 x
->mtl_tx_fifo_read_ctrl_read
++;
654 x
->mtl_tx_fifo_read_ctrl_idle
++;
656 if (value
& MTL_DEBUG_TXPAUSED
)
657 x
->mac_tx_in_pause
++;
660 for (queue
= 0; queue
< rx_queues
; queue
++) {
661 value
= readl(ioaddr
+ MTL_CHAN_RX_DEBUG(queue
));
663 if (value
& MTL_DEBUG_RXFSTS_MASK
) {
664 u32 rxfsts
= (value
& MTL_DEBUG_RXFSTS_MASK
)
665 >> MTL_DEBUG_RRCSTS_SHIFT
;
667 if (rxfsts
== MTL_DEBUG_RXFSTS_FULL
)
668 x
->mtl_rx_fifo_fill_level_full
++;
669 else if (rxfsts
== MTL_DEBUG_RXFSTS_AT
)
670 x
->mtl_rx_fifo_fill_above_thresh
++;
671 else if (rxfsts
== MTL_DEBUG_RXFSTS_BT
)
672 x
->mtl_rx_fifo_fill_below_thresh
++;
674 x
->mtl_rx_fifo_fill_level_empty
++;
676 if (value
& MTL_DEBUG_RRCSTS_MASK
) {
677 u32 rrcsts
= (value
& MTL_DEBUG_RRCSTS_MASK
) >>
678 MTL_DEBUG_RRCSTS_SHIFT
;
680 if (rrcsts
== MTL_DEBUG_RRCSTS_FLUSH
)
681 x
->mtl_rx_fifo_read_ctrl_flush
++;
682 else if (rrcsts
== MTL_DEBUG_RRCSTS_RSTAT
)
683 x
->mtl_rx_fifo_read_ctrl_read_data
++;
684 else if (rrcsts
== MTL_DEBUG_RRCSTS_RDATA
)
685 x
->mtl_rx_fifo_read_ctrl_status
++;
687 x
->mtl_rx_fifo_read_ctrl_idle
++;
689 if (value
& MTL_DEBUG_RWCSTS
)
690 x
->mtl_rx_fifo_ctrl_active
++;
694 value
= readl(ioaddr
+ GMAC_DEBUG
);
696 if (value
& GMAC_DEBUG_TFCSTS_MASK
) {
697 u32 tfcsts
= (value
& GMAC_DEBUG_TFCSTS_MASK
)
698 >> GMAC_DEBUG_TFCSTS_SHIFT
;
700 if (tfcsts
== GMAC_DEBUG_TFCSTS_XFER
)
701 x
->mac_tx_frame_ctrl_xfer
++;
702 else if (tfcsts
== GMAC_DEBUG_TFCSTS_GEN_PAUSE
)
703 x
->mac_tx_frame_ctrl_pause
++;
704 else if (tfcsts
== GMAC_DEBUG_TFCSTS_WAIT
)
705 x
->mac_tx_frame_ctrl_wait
++;
707 x
->mac_tx_frame_ctrl_idle
++;
709 if (value
& GMAC_DEBUG_TPESTS
)
710 x
->mac_gmii_tx_proto_engine
++;
711 if (value
& GMAC_DEBUG_RFCFCSTS_MASK
)
712 x
->mac_rx_frame_ctrl_fifo
= (value
& GMAC_DEBUG_RFCFCSTS_MASK
)
713 >> GMAC_DEBUG_RFCFCSTS_SHIFT
;
714 if (value
& GMAC_DEBUG_RPESTS
)
715 x
->mac_gmii_rx_proto_engine
++;
718 const struct stmmac_ops dwmac4_ops
= {
719 .core_init
= dwmac4_core_init
,
720 .set_mac
= stmmac_set_mac
,
721 .rx_ipc
= dwmac4_rx_ipc_enable
,
722 .rx_queue_enable
= dwmac4_rx_queue_enable
,
723 .rx_queue_prio
= dwmac4_rx_queue_priority
,
724 .tx_queue_prio
= dwmac4_tx_queue_priority
,
725 .rx_queue_routing
= dwmac4_rx_queue_routing
,
726 .prog_mtl_rx_algorithms
= dwmac4_prog_mtl_rx_algorithms
,
727 .prog_mtl_tx_algorithms
= dwmac4_prog_mtl_tx_algorithms
,
728 .set_mtl_tx_queue_weight
= dwmac4_set_mtl_tx_queue_weight
,
729 .map_mtl_to_dma
= dwmac4_map_mtl_dma
,
730 .config_cbs
= dwmac4_config_cbs
,
731 .dump_regs
= dwmac4_dump_regs
,
732 .host_irq_status
= dwmac4_irq_status
,
733 .host_mtl_irq_status
= dwmac4_irq_mtl_status
,
734 .flow_ctrl
= dwmac4_flow_ctrl
,
736 .set_umac_addr
= dwmac4_set_umac_addr
,
737 .get_umac_addr
= dwmac4_get_umac_addr
,
738 .set_eee_mode
= dwmac4_set_eee_mode
,
739 .reset_eee_mode
= dwmac4_reset_eee_mode
,
740 .set_eee_timer
= dwmac4_set_eee_timer
,
741 .set_eee_pls
= dwmac4_set_eee_pls
,
742 .pcs_ctrl_ane
= dwmac4_ctrl_ane
,
743 .pcs_rane
= dwmac4_rane
,
744 .pcs_get_adv_lp
= dwmac4_get_adv_lp
,
745 .debug
= dwmac4_debug
,
746 .set_filter
= dwmac4_set_filter
,
749 const struct stmmac_ops dwmac410_ops
= {
750 .core_init
= dwmac4_core_init
,
751 .set_mac
= stmmac_dwmac4_set_mac
,
752 .rx_ipc
= dwmac4_rx_ipc_enable
,
753 .rx_queue_enable
= dwmac4_rx_queue_enable
,
754 .rx_queue_prio
= dwmac4_rx_queue_priority
,
755 .tx_queue_prio
= dwmac4_tx_queue_priority
,
756 .rx_queue_routing
= dwmac4_rx_queue_routing
,
757 .prog_mtl_rx_algorithms
= dwmac4_prog_mtl_rx_algorithms
,
758 .prog_mtl_tx_algorithms
= dwmac4_prog_mtl_tx_algorithms
,
759 .set_mtl_tx_queue_weight
= dwmac4_set_mtl_tx_queue_weight
,
760 .map_mtl_to_dma
= dwmac4_map_mtl_dma
,
761 .config_cbs
= dwmac4_config_cbs
,
762 .dump_regs
= dwmac4_dump_regs
,
763 .host_irq_status
= dwmac4_irq_status
,
764 .host_mtl_irq_status
= dwmac4_irq_mtl_status
,
765 .flow_ctrl
= dwmac4_flow_ctrl
,
767 .set_umac_addr
= dwmac4_set_umac_addr
,
768 .get_umac_addr
= dwmac4_get_umac_addr
,
769 .set_eee_mode
= dwmac4_set_eee_mode
,
770 .reset_eee_mode
= dwmac4_reset_eee_mode
,
771 .set_eee_timer
= dwmac4_set_eee_timer
,
772 .set_eee_pls
= dwmac4_set_eee_pls
,
773 .pcs_ctrl_ane
= dwmac4_ctrl_ane
,
774 .pcs_rane
= dwmac4_rane
,
775 .pcs_get_adv_lp
= dwmac4_get_adv_lp
,
776 .debug
= dwmac4_debug
,
777 .set_filter
= dwmac4_set_filter
,
780 const struct stmmac_ops dwmac510_ops
= {
781 .core_init
= dwmac4_core_init
,
782 .set_mac
= stmmac_dwmac4_set_mac
,
783 .rx_ipc
= dwmac4_rx_ipc_enable
,
784 .rx_queue_enable
= dwmac4_rx_queue_enable
,
785 .rx_queue_prio
= dwmac4_rx_queue_priority
,
786 .tx_queue_prio
= dwmac4_tx_queue_priority
,
787 .rx_queue_routing
= dwmac4_rx_queue_routing
,
788 .prog_mtl_rx_algorithms
= dwmac4_prog_mtl_rx_algorithms
,
789 .prog_mtl_tx_algorithms
= dwmac4_prog_mtl_tx_algorithms
,
790 .set_mtl_tx_queue_weight
= dwmac4_set_mtl_tx_queue_weight
,
791 .map_mtl_to_dma
= dwmac4_map_mtl_dma
,
792 .config_cbs
= dwmac4_config_cbs
,
793 .dump_regs
= dwmac4_dump_regs
,
794 .host_irq_status
= dwmac4_irq_status
,
795 .host_mtl_irq_status
= dwmac4_irq_mtl_status
,
796 .flow_ctrl
= dwmac4_flow_ctrl
,
798 .set_umac_addr
= dwmac4_set_umac_addr
,
799 .get_umac_addr
= dwmac4_get_umac_addr
,
800 .set_eee_mode
= dwmac4_set_eee_mode
,
801 .reset_eee_mode
= dwmac4_reset_eee_mode
,
802 .set_eee_timer
= dwmac4_set_eee_timer
,
803 .set_eee_pls
= dwmac4_set_eee_pls
,
804 .pcs_ctrl_ane
= dwmac4_ctrl_ane
,
805 .pcs_rane
= dwmac4_rane
,
806 .pcs_get_adv_lp
= dwmac4_get_adv_lp
,
807 .debug
= dwmac4_debug
,
808 .set_filter
= dwmac4_set_filter
,
809 .safety_feat_config
= dwmac5_safety_feat_config
,
810 .safety_feat_irq_status
= dwmac5_safety_feat_irq_status
,
811 .safety_feat_dump
= dwmac5_safety_feat_dump
,
812 .rxp_config
= dwmac5_rxp_config
,
813 .flex_pps_config
= dwmac5_flex_pps_config
,
816 int dwmac4_setup(struct stmmac_priv
*priv
)
818 struct mac_device_info
*mac
= priv
->hw
;
820 dev_info(priv
->device
, "\tDWMAC4/5\n");
822 priv
->dev
->priv_flags
|= IFF_UNICAST_FLT
;
823 mac
->pcsr
= priv
->ioaddr
;
824 mac
->multicast_filter_bins
= priv
->plat
->multicast_filter_bins
;
825 mac
->unicast_filter_entries
= priv
->plat
->unicast_filter_entries
;
826 mac
->mcast_bits_log2
= 0;
828 if (mac
->multicast_filter_bins
)
829 mac
->mcast_bits_log2
= ilog2(mac
->multicast_filter_bins
);
831 mac
->link
.duplex
= GMAC_CONFIG_DM
;
832 mac
->link
.speed10
= GMAC_CONFIG_PS
;
833 mac
->link
.speed100
= GMAC_CONFIG_FES
| GMAC_CONFIG_PS
;
834 mac
->link
.speed1000
= 0;
835 mac
->link
.speed_mask
= GMAC_CONFIG_FES
| GMAC_CONFIG_PS
;
836 mac
->mii
.addr
= GMAC_MDIO_ADDR
;
837 mac
->mii
.data
= GMAC_MDIO_DATA
;
838 mac
->mii
.addr_shift
= 21;
839 mac
->mii
.addr_mask
= GENMASK(25, 21);
840 mac
->mii
.reg_shift
= 16;
841 mac
->mii
.reg_mask
= GENMASK(20, 16);
842 mac
->mii
.clk_csr_shift
= 8;
843 mac
->mii
.clk_csr_mask
= GENMASK(11, 8);