treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / ethernet / stmicro / stmmac / dwmac4_core.c
blobf0c0ea616032bf6c35692028706feb474381deb3
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
6 * This only implements the mac core functions for this chip.
8 * Copyright (C) 2015 STMicroelectronics Ltd
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <net/dsa.h>
18 #include "stmmac.h"
19 #include "stmmac_pcs.h"
20 #include "dwmac4.h"
21 #include "dwmac5.h"
23 static void dwmac4_core_init(struct mac_device_info *hw,
24 struct net_device *dev)
26 void __iomem *ioaddr = hw->pcsr;
27 u32 value = readl(ioaddr + GMAC_CONFIG);
29 value |= GMAC_CORE_INIT;
31 if (hw->ps) {
32 value |= GMAC_CONFIG_TE;
34 value &= hw->link.speed_mask;
35 switch (hw->ps) {
36 case SPEED_1000:
37 value |= hw->link.speed1000;
38 break;
39 case SPEED_100:
40 value |= hw->link.speed100;
41 break;
42 case SPEED_10:
43 value |= hw->link.speed10;
44 break;
48 writel(value, ioaddr + GMAC_CONFIG);
50 /* Enable GMAC interrupts */
51 value = GMAC_INT_DEFAULT_ENABLE;
53 if (hw->pcs)
54 value |= GMAC_PCS_IRQ_DEFAULT;
56 writel(value, ioaddr + GMAC_INT_EN);
59 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
60 u8 mode, u32 queue)
62 void __iomem *ioaddr = hw->pcsr;
63 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
65 value &= GMAC_RX_QUEUE_CLEAR(queue);
66 if (mode == MTL_QUEUE_AVB)
67 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
68 else if (mode == MTL_QUEUE_DCB)
69 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
71 writel(value, ioaddr + GMAC_RXQ_CTRL0);
74 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
75 u32 prio, u32 queue)
77 void __iomem *ioaddr = hw->pcsr;
78 u32 base_register;
79 u32 value;
81 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
82 if (queue >= 4)
83 queue -= 4;
85 value = readl(ioaddr + base_register);
87 value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
88 value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
89 GMAC_RXQCTRL_PSRQX_MASK(queue);
90 writel(value, ioaddr + base_register);
93 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
94 u32 prio, u32 queue)
96 void __iomem *ioaddr = hw->pcsr;
97 u32 base_register;
98 u32 value;
100 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
101 if (queue >= 4)
102 queue -= 4;
104 value = readl(ioaddr + base_register);
106 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
107 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
108 GMAC_TXQCTRL_PSTQX_MASK(queue);
110 writel(value, ioaddr + base_register);
113 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
114 u8 packet, u32 queue)
116 void __iomem *ioaddr = hw->pcsr;
117 u32 value;
119 static const struct stmmac_rx_routing route_possibilities[] = {
120 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
121 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
122 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
123 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
124 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
127 value = readl(ioaddr + GMAC_RXQ_CTRL1);
129 /* routing configuration */
130 value &= ~route_possibilities[packet - 1].reg_mask;
131 value |= (queue << route_possibilities[packet-1].reg_shift) &
132 route_possibilities[packet - 1].reg_mask;
134 /* some packets require extra ops */
135 if (packet == PACKET_AVCPQ) {
136 value &= ~GMAC_RXQCTRL_TACPQE;
137 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
138 } else if (packet == PACKET_MCBCQ) {
139 value &= ~GMAC_RXQCTRL_MCBCQEN;
140 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
143 writel(value, ioaddr + GMAC_RXQ_CTRL1);
146 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
147 u32 rx_alg)
149 void __iomem *ioaddr = hw->pcsr;
150 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
152 value &= ~MTL_OPERATION_RAA;
153 switch (rx_alg) {
154 case MTL_RX_ALGORITHM_SP:
155 value |= MTL_OPERATION_RAA_SP;
156 break;
157 case MTL_RX_ALGORITHM_WSP:
158 value |= MTL_OPERATION_RAA_WSP;
159 break;
160 default:
161 break;
164 writel(value, ioaddr + MTL_OPERATION_MODE);
167 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
168 u32 tx_alg)
170 void __iomem *ioaddr = hw->pcsr;
171 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
173 value &= ~MTL_OPERATION_SCHALG_MASK;
174 switch (tx_alg) {
175 case MTL_TX_ALGORITHM_WRR:
176 value |= MTL_OPERATION_SCHALG_WRR;
177 break;
178 case MTL_TX_ALGORITHM_WFQ:
179 value |= MTL_OPERATION_SCHALG_WFQ;
180 break;
181 case MTL_TX_ALGORITHM_DWRR:
182 value |= MTL_OPERATION_SCHALG_DWRR;
183 break;
184 case MTL_TX_ALGORITHM_SP:
185 value |= MTL_OPERATION_SCHALG_SP;
186 break;
187 default:
188 break;
191 writel(value, ioaddr + MTL_OPERATION_MODE);
194 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
195 u32 weight, u32 queue)
197 void __iomem *ioaddr = hw->pcsr;
198 u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
200 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
201 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
202 writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
205 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
207 void __iomem *ioaddr = hw->pcsr;
208 u32 value;
210 if (queue < 4)
211 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
212 else
213 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
215 if (queue == 0 || queue == 4) {
216 value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
217 value |= MTL_RXQ_DMA_Q04MDMACH(chan);
218 } else {
219 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
220 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
223 if (queue < 4)
224 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
225 else
226 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
229 static void dwmac4_config_cbs(struct mac_device_info *hw,
230 u32 send_slope, u32 idle_slope,
231 u32 high_credit, u32 low_credit, u32 queue)
233 void __iomem *ioaddr = hw->pcsr;
234 u32 value;
236 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
237 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
238 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
239 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
240 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
242 /* enable AV algorithm */
243 value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
244 value |= MTL_ETS_CTRL_AVALG;
245 value |= MTL_ETS_CTRL_CC;
246 writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
248 /* configure send slope */
249 value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
250 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
251 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
252 writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
254 /* configure idle slope (same register as tx weight) */
255 dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
257 /* configure high credit */
258 value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
259 value &= ~MTL_HIGH_CRED_HC_MASK;
260 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
261 writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
263 /* configure high credit */
264 value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
265 value &= ~MTL_HIGH_CRED_LC_MASK;
266 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
267 writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
270 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
272 void __iomem *ioaddr = hw->pcsr;
273 int i;
275 for (i = 0; i < GMAC_REG_NUM; i++)
276 reg_space[i] = readl(ioaddr + i * 4);
279 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
281 void __iomem *ioaddr = hw->pcsr;
282 u32 value = readl(ioaddr + GMAC_CONFIG);
284 if (hw->rx_csum)
285 value |= GMAC_CONFIG_IPC;
286 else
287 value &= ~GMAC_CONFIG_IPC;
289 writel(value, ioaddr + GMAC_CONFIG);
291 value = readl(ioaddr + GMAC_CONFIG);
293 return !!(value & GMAC_CONFIG_IPC);
296 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
298 void __iomem *ioaddr = hw->pcsr;
299 unsigned int pmt = 0;
300 u32 config;
302 if (mode & WAKE_MAGIC) {
303 pr_debug("GMAC: WOL Magic frame\n");
304 pmt |= power_down | magic_pkt_en;
306 if (mode & WAKE_UCAST) {
307 pr_debug("GMAC: WOL on global unicast\n");
308 pmt |= power_down | global_unicast | wake_up_frame_en;
311 if (pmt) {
312 /* The receiver must be enabled for WOL before powering down */
313 config = readl(ioaddr + GMAC_CONFIG);
314 config |= GMAC_CONFIG_RE;
315 writel(config, ioaddr + GMAC_CONFIG);
317 writel(pmt, ioaddr + GMAC_PMT);
320 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
321 unsigned char *addr, unsigned int reg_n)
323 void __iomem *ioaddr = hw->pcsr;
325 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
326 GMAC_ADDR_LOW(reg_n));
329 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
330 unsigned char *addr, unsigned int reg_n)
332 void __iomem *ioaddr = hw->pcsr;
334 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
335 GMAC_ADDR_LOW(reg_n));
338 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
339 bool en_tx_lpi_clockgating)
341 void __iomem *ioaddr = hw->pcsr;
342 u32 value;
344 /* Enable the link status receive on RGMII, SGMII ore SMII
345 * receive path and instruct the transmit to enter in LPI
346 * state.
348 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
349 value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
351 if (en_tx_lpi_clockgating)
352 value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
354 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
357 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
359 void __iomem *ioaddr = hw->pcsr;
360 u32 value;
362 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
363 value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
364 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
367 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
369 void __iomem *ioaddr = hw->pcsr;
370 u32 value;
372 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
374 if (link)
375 value |= GMAC4_LPI_CTRL_STATUS_PLS;
376 else
377 value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
379 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
382 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
384 void __iomem *ioaddr = hw->pcsr;
385 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
387 /* Program the timers in the LPI timer control register:
388 * LS: minimum time (ms) for which the link
389 * status from PHY should be ok before transmitting
390 * the LPI pattern.
391 * TW: minimum time (us) for which the core waits
392 * after it has stopped transmitting the LPI pattern.
394 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
397 static void dwmac4_set_filter(struct mac_device_info *hw,
398 struct net_device *dev)
400 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
401 int numhashregs = (hw->multicast_filter_bins >> 5);
402 int mcbitslog2 = hw->mcast_bits_log2;
403 unsigned int value;
404 u32 mc_filter[8];
405 int i;
407 memset(mc_filter, 0, sizeof(mc_filter));
409 value = readl(ioaddr + GMAC_PACKET_FILTER);
410 value &= ~GMAC_PACKET_FILTER_HMC;
411 value &= ~GMAC_PACKET_FILTER_HPF;
412 value &= ~GMAC_PACKET_FILTER_PCF;
413 value &= ~GMAC_PACKET_FILTER_PM;
414 value &= ~GMAC_PACKET_FILTER_PR;
415 if (dev->flags & IFF_PROMISC) {
416 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
417 } else if ((dev->flags & IFF_ALLMULTI) ||
418 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
419 /* Pass all multi */
420 value |= GMAC_PACKET_FILTER_PM;
421 /* Set all the bits of the HASH tab */
422 memset(mc_filter, 0xff, sizeof(mc_filter));
423 } else if (!netdev_mc_empty(dev)) {
424 struct netdev_hw_addr *ha;
426 /* Hash filter for multicast */
427 value |= GMAC_PACKET_FILTER_HMC;
429 netdev_for_each_mc_addr(ha, dev) {
430 /* The upper n bits of the calculated CRC are used to
431 * index the contents of the hash table. The number of
432 * bits used depends on the hardware configuration
433 * selected at core configuration time.
435 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
436 ETH_ALEN)) >> (32 - mcbitslog2);
437 /* The most significant bit determines the register to
438 * use (H/L) while the other 5 bits determine the bit
439 * within the register.
441 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
445 for (i = 0; i < numhashregs; i++)
446 writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
448 value |= GMAC_PACKET_FILTER_HPF;
450 /* Handle multiple unicast addresses */
451 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
452 /* Switch to promiscuous mode if more than 128 addrs
453 * are required
455 value |= GMAC_PACKET_FILTER_PR;
456 } else {
457 struct netdev_hw_addr *ha;
458 int reg = 1;
460 netdev_for_each_uc_addr(ha, dev) {
461 dwmac4_set_umac_addr(hw, ha->addr, reg);
462 reg++;
465 while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
466 writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
467 writel(0, ioaddr + GMAC_ADDR_LOW(reg));
468 reg++;
472 writel(value, ioaddr + GMAC_PACKET_FILTER);
475 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
476 unsigned int fc, unsigned int pause_time,
477 u32 tx_cnt)
479 void __iomem *ioaddr = hw->pcsr;
480 unsigned int flow = 0;
481 u32 queue = 0;
483 pr_debug("GMAC Flow-Control:\n");
484 if (fc & FLOW_RX) {
485 pr_debug("\tReceive Flow-Control ON\n");
486 flow |= GMAC_RX_FLOW_CTRL_RFE;
488 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
490 if (fc & FLOW_TX) {
491 pr_debug("\tTransmit Flow-Control ON\n");
493 if (duplex)
494 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
496 for (queue = 0; queue < tx_cnt; queue++) {
497 flow = GMAC_TX_FLOW_CTRL_TFE;
499 if (duplex)
500 flow |=
501 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
503 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
505 } else {
506 for (queue = 0; queue < tx_cnt; queue++)
507 writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
511 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
512 bool loopback)
514 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
517 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
519 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
522 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
524 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
527 /* RGMII or SMII interface */
528 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
530 u32 status;
532 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
533 x->irq_rgmii_n++;
535 /* Check the link status */
536 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
537 int speed_value;
539 x->pcs_link = 1;
541 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
542 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
543 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
544 x->pcs_speed = SPEED_1000;
545 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
546 x->pcs_speed = SPEED_100;
547 else
548 x->pcs_speed = SPEED_10;
550 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
552 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
553 x->pcs_duplex ? "Full" : "Half");
554 } else {
555 x->pcs_link = 0;
556 pr_info("Link is Down\n");
560 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
562 void __iomem *ioaddr = hw->pcsr;
563 u32 mtl_int_qx_status;
564 int ret = 0;
566 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
568 /* Check MTL Interrupt */
569 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
570 /* read Queue x Interrupt status */
571 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
573 if (status & MTL_RX_OVERFLOW_INT) {
574 /* clear Interrupt */
575 writel(status | MTL_RX_OVERFLOW_INT,
576 ioaddr + MTL_CHAN_INT_CTRL(chan));
577 ret = CORE_IRQ_MTL_RX_OVERFLOW;
581 return ret;
584 static int dwmac4_irq_status(struct mac_device_info *hw,
585 struct stmmac_extra_stats *x)
587 void __iomem *ioaddr = hw->pcsr;
588 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
589 u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
590 int ret = 0;
592 /* Discard disabled bits */
593 intr_status &= intr_enable;
595 /* Not used events (e.g. MMC interrupts) are not handled. */
596 if ((intr_status & mmc_tx_irq))
597 x->mmc_tx_irq_n++;
598 if (unlikely(intr_status & mmc_rx_irq))
599 x->mmc_rx_irq_n++;
600 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
601 x->mmc_rx_csum_offload_irq_n++;
602 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
603 if (unlikely(intr_status & pmt_irq)) {
604 readl(ioaddr + GMAC_PMT);
605 x->irq_receive_pmt_irq_n++;
608 /* MAC tx/rx EEE LPI entry/exit interrupts */
609 if (intr_status & lpi_irq) {
610 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
611 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
613 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
614 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
615 x->irq_tx_path_in_lpi_mode_n++;
617 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
618 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
619 x->irq_tx_path_exit_lpi_mode_n++;
621 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
622 x->irq_rx_path_in_lpi_mode_n++;
623 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
624 x->irq_rx_path_exit_lpi_mode_n++;
627 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
628 if (intr_status & PCS_RGSMIIIS_IRQ)
629 dwmac4_phystatus(ioaddr, x);
631 return ret;
634 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
635 u32 rx_queues, u32 tx_queues)
637 u32 value;
638 u32 queue;
640 for (queue = 0; queue < tx_queues; queue++) {
641 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
643 if (value & MTL_DEBUG_TXSTSFSTS)
644 x->mtl_tx_status_fifo_full++;
645 if (value & MTL_DEBUG_TXFSTS)
646 x->mtl_tx_fifo_not_empty++;
647 if (value & MTL_DEBUG_TWCSTS)
648 x->mmtl_fifo_ctrl++;
649 if (value & MTL_DEBUG_TRCSTS_MASK) {
650 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
651 >> MTL_DEBUG_TRCSTS_SHIFT;
652 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
653 x->mtl_tx_fifo_read_ctrl_write++;
654 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
655 x->mtl_tx_fifo_read_ctrl_wait++;
656 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
657 x->mtl_tx_fifo_read_ctrl_read++;
658 else
659 x->mtl_tx_fifo_read_ctrl_idle++;
661 if (value & MTL_DEBUG_TXPAUSED)
662 x->mac_tx_in_pause++;
665 for (queue = 0; queue < rx_queues; queue++) {
666 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
668 if (value & MTL_DEBUG_RXFSTS_MASK) {
669 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
670 >> MTL_DEBUG_RRCSTS_SHIFT;
672 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
673 x->mtl_rx_fifo_fill_level_full++;
674 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
675 x->mtl_rx_fifo_fill_above_thresh++;
676 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
677 x->mtl_rx_fifo_fill_below_thresh++;
678 else
679 x->mtl_rx_fifo_fill_level_empty++;
681 if (value & MTL_DEBUG_RRCSTS_MASK) {
682 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
683 MTL_DEBUG_RRCSTS_SHIFT;
685 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
686 x->mtl_rx_fifo_read_ctrl_flush++;
687 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
688 x->mtl_rx_fifo_read_ctrl_read_data++;
689 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
690 x->mtl_rx_fifo_read_ctrl_status++;
691 else
692 x->mtl_rx_fifo_read_ctrl_idle++;
694 if (value & MTL_DEBUG_RWCSTS)
695 x->mtl_rx_fifo_ctrl_active++;
698 /* GMAC debug */
699 value = readl(ioaddr + GMAC_DEBUG);
701 if (value & GMAC_DEBUG_TFCSTS_MASK) {
702 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
703 >> GMAC_DEBUG_TFCSTS_SHIFT;
705 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
706 x->mac_tx_frame_ctrl_xfer++;
707 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
708 x->mac_tx_frame_ctrl_pause++;
709 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
710 x->mac_tx_frame_ctrl_wait++;
711 else
712 x->mac_tx_frame_ctrl_idle++;
714 if (value & GMAC_DEBUG_TPESTS)
715 x->mac_gmii_tx_proto_engine++;
716 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
717 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
718 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
719 if (value & GMAC_DEBUG_RPESTS)
720 x->mac_gmii_rx_proto_engine++;
723 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
725 u32 value = readl(ioaddr + GMAC_CONFIG);
727 if (enable)
728 value |= GMAC_CONFIG_LM;
729 else
730 value &= ~GMAC_CONFIG_LM;
732 writel(value, ioaddr + GMAC_CONFIG);
735 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
736 __le16 perfect_match, bool is_double)
738 void __iomem *ioaddr = hw->pcsr;
740 writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
742 if (hash) {
743 u32 value = GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
744 if (is_double) {
745 value |= GMAC_VLAN_EDVLP;
746 value |= GMAC_VLAN_ESVL;
747 value |= GMAC_VLAN_DOVLTC;
750 writel(value, ioaddr + GMAC_VLAN_TAG);
751 } else if (perfect_match) {
752 u32 value = GMAC_VLAN_ETV;
754 if (is_double) {
755 value |= GMAC_VLAN_EDVLP;
756 value |= GMAC_VLAN_ESVL;
757 value |= GMAC_VLAN_DOVLTC;
760 writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
761 } else {
762 u32 value = readl(ioaddr + GMAC_VLAN_TAG);
764 value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
765 value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
766 value &= ~GMAC_VLAN_DOVLTC;
767 value &= ~GMAC_VLAN_VID;
769 writel(value, ioaddr + GMAC_VLAN_TAG);
773 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
775 u32 value = readl(ioaddr + GMAC_CONFIG);
777 value &= ~GMAC_CONFIG_SARC;
778 value |= val << GMAC_CONFIG_SARC_SHIFT;
780 writel(value, ioaddr + GMAC_CONFIG);
783 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
785 void __iomem *ioaddr = hw->pcsr;
786 u32 value;
788 value = readl(ioaddr + GMAC_VLAN_INCL);
789 value |= GMAC_VLAN_VLTI;
790 value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
791 value &= ~GMAC_VLAN_VLC;
792 value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
793 writel(value, ioaddr + GMAC_VLAN_INCL);
796 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
797 u32 addr)
799 void __iomem *ioaddr = hw->pcsr;
800 u32 value;
802 writel(addr, ioaddr + GMAC_ARP_ADDR);
804 value = readl(ioaddr + GMAC_CONFIG);
805 if (en)
806 value |= GMAC_CONFIG_ARPEN;
807 else
808 value &= ~GMAC_CONFIG_ARPEN;
809 writel(value, ioaddr + GMAC_CONFIG);
812 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
813 bool en, bool ipv6, bool sa, bool inv,
814 u32 match)
816 void __iomem *ioaddr = hw->pcsr;
817 u32 value;
819 value = readl(ioaddr + GMAC_PACKET_FILTER);
820 value |= GMAC_PACKET_FILTER_IPFE;
821 writel(value, ioaddr + GMAC_PACKET_FILTER);
823 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
825 /* For IPv6 not both SA/DA filters can be active */
826 if (ipv6) {
827 value |= GMAC_L3PEN0;
828 value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
829 value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
830 if (sa) {
831 value |= GMAC_L3SAM0;
832 if (inv)
833 value |= GMAC_L3SAIM0;
834 } else {
835 value |= GMAC_L3DAM0;
836 if (inv)
837 value |= GMAC_L3DAIM0;
839 } else {
840 value &= ~GMAC_L3PEN0;
841 if (sa) {
842 value |= GMAC_L3SAM0;
843 if (inv)
844 value |= GMAC_L3SAIM0;
845 } else {
846 value |= GMAC_L3DAM0;
847 if (inv)
848 value |= GMAC_L3DAIM0;
852 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
854 if (sa) {
855 writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
856 } else {
857 writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
860 if (!en)
861 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
863 return 0;
866 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
867 bool en, bool udp, bool sa, bool inv,
868 u32 match)
870 void __iomem *ioaddr = hw->pcsr;
871 u32 value;
873 value = readl(ioaddr + GMAC_PACKET_FILTER);
874 value |= GMAC_PACKET_FILTER_IPFE;
875 writel(value, ioaddr + GMAC_PACKET_FILTER);
877 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
878 if (udp) {
879 value |= GMAC_L4PEN0;
880 } else {
881 value &= ~GMAC_L4PEN0;
884 value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
885 value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
886 if (sa) {
887 value |= GMAC_L4SPM0;
888 if (inv)
889 value |= GMAC_L4SPIM0;
890 } else {
891 value |= GMAC_L4DPM0;
892 if (inv)
893 value |= GMAC_L4DPIM0;
896 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
898 if (sa) {
899 value = match & GMAC_L4SP0;
900 } else {
901 value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
904 writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
906 if (!en)
907 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
909 return 0;
912 const struct stmmac_ops dwmac4_ops = {
913 .core_init = dwmac4_core_init,
914 .set_mac = stmmac_set_mac,
915 .rx_ipc = dwmac4_rx_ipc_enable,
916 .rx_queue_enable = dwmac4_rx_queue_enable,
917 .rx_queue_prio = dwmac4_rx_queue_priority,
918 .tx_queue_prio = dwmac4_tx_queue_priority,
919 .rx_queue_routing = dwmac4_rx_queue_routing,
920 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
921 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
922 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
923 .map_mtl_to_dma = dwmac4_map_mtl_dma,
924 .config_cbs = dwmac4_config_cbs,
925 .dump_regs = dwmac4_dump_regs,
926 .host_irq_status = dwmac4_irq_status,
927 .host_mtl_irq_status = dwmac4_irq_mtl_status,
928 .flow_ctrl = dwmac4_flow_ctrl,
929 .pmt = dwmac4_pmt,
930 .set_umac_addr = dwmac4_set_umac_addr,
931 .get_umac_addr = dwmac4_get_umac_addr,
932 .set_eee_mode = dwmac4_set_eee_mode,
933 .reset_eee_mode = dwmac4_reset_eee_mode,
934 .set_eee_timer = dwmac4_set_eee_timer,
935 .set_eee_pls = dwmac4_set_eee_pls,
936 .pcs_ctrl_ane = dwmac4_ctrl_ane,
937 .pcs_rane = dwmac4_rane,
938 .pcs_get_adv_lp = dwmac4_get_adv_lp,
939 .debug = dwmac4_debug,
940 .set_filter = dwmac4_set_filter,
941 .flex_pps_config = dwmac5_flex_pps_config,
942 .set_mac_loopback = dwmac4_set_mac_loopback,
943 .update_vlan_hash = dwmac4_update_vlan_hash,
944 .sarc_configure = dwmac4_sarc_configure,
945 .enable_vlan = dwmac4_enable_vlan,
946 .set_arp_offload = dwmac4_set_arp_offload,
947 .config_l3_filter = dwmac4_config_l3_filter,
948 .config_l4_filter = dwmac4_config_l4_filter,
951 const struct stmmac_ops dwmac410_ops = {
952 .core_init = dwmac4_core_init,
953 .set_mac = stmmac_dwmac4_set_mac,
954 .rx_ipc = dwmac4_rx_ipc_enable,
955 .rx_queue_enable = dwmac4_rx_queue_enable,
956 .rx_queue_prio = dwmac4_rx_queue_priority,
957 .tx_queue_prio = dwmac4_tx_queue_priority,
958 .rx_queue_routing = dwmac4_rx_queue_routing,
959 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
960 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
961 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
962 .map_mtl_to_dma = dwmac4_map_mtl_dma,
963 .config_cbs = dwmac4_config_cbs,
964 .dump_regs = dwmac4_dump_regs,
965 .host_irq_status = dwmac4_irq_status,
966 .host_mtl_irq_status = dwmac4_irq_mtl_status,
967 .flow_ctrl = dwmac4_flow_ctrl,
968 .pmt = dwmac4_pmt,
969 .set_umac_addr = dwmac4_set_umac_addr,
970 .get_umac_addr = dwmac4_get_umac_addr,
971 .set_eee_mode = dwmac4_set_eee_mode,
972 .reset_eee_mode = dwmac4_reset_eee_mode,
973 .set_eee_timer = dwmac4_set_eee_timer,
974 .set_eee_pls = dwmac4_set_eee_pls,
975 .pcs_ctrl_ane = dwmac4_ctrl_ane,
976 .pcs_rane = dwmac4_rane,
977 .pcs_get_adv_lp = dwmac4_get_adv_lp,
978 .debug = dwmac4_debug,
979 .set_filter = dwmac4_set_filter,
980 .set_mac_loopback = dwmac4_set_mac_loopback,
981 .update_vlan_hash = dwmac4_update_vlan_hash,
982 .sarc_configure = dwmac4_sarc_configure,
983 .enable_vlan = dwmac4_enable_vlan,
984 .set_arp_offload = dwmac4_set_arp_offload,
985 .config_l3_filter = dwmac4_config_l3_filter,
986 .config_l4_filter = dwmac4_config_l4_filter,
987 .est_configure = dwmac5_est_configure,
988 .fpe_configure = dwmac5_fpe_configure,
991 const struct stmmac_ops dwmac510_ops = {
992 .core_init = dwmac4_core_init,
993 .set_mac = stmmac_dwmac4_set_mac,
994 .rx_ipc = dwmac4_rx_ipc_enable,
995 .rx_queue_enable = dwmac4_rx_queue_enable,
996 .rx_queue_prio = dwmac4_rx_queue_priority,
997 .tx_queue_prio = dwmac4_tx_queue_priority,
998 .rx_queue_routing = dwmac4_rx_queue_routing,
999 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1000 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1001 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1002 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1003 .config_cbs = dwmac4_config_cbs,
1004 .dump_regs = dwmac4_dump_regs,
1005 .host_irq_status = dwmac4_irq_status,
1006 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1007 .flow_ctrl = dwmac4_flow_ctrl,
1008 .pmt = dwmac4_pmt,
1009 .set_umac_addr = dwmac4_set_umac_addr,
1010 .get_umac_addr = dwmac4_get_umac_addr,
1011 .set_eee_mode = dwmac4_set_eee_mode,
1012 .reset_eee_mode = dwmac4_reset_eee_mode,
1013 .set_eee_timer = dwmac4_set_eee_timer,
1014 .set_eee_pls = dwmac4_set_eee_pls,
1015 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1016 .pcs_rane = dwmac4_rane,
1017 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1018 .debug = dwmac4_debug,
1019 .set_filter = dwmac4_set_filter,
1020 .safety_feat_config = dwmac5_safety_feat_config,
1021 .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1022 .safety_feat_dump = dwmac5_safety_feat_dump,
1023 .rxp_config = dwmac5_rxp_config,
1024 .flex_pps_config = dwmac5_flex_pps_config,
1025 .set_mac_loopback = dwmac4_set_mac_loopback,
1026 .update_vlan_hash = dwmac4_update_vlan_hash,
1027 .sarc_configure = dwmac4_sarc_configure,
1028 .enable_vlan = dwmac4_enable_vlan,
1029 .set_arp_offload = dwmac4_set_arp_offload,
1030 .config_l3_filter = dwmac4_config_l3_filter,
1031 .config_l4_filter = dwmac4_config_l4_filter,
1032 .est_configure = dwmac5_est_configure,
1033 .fpe_configure = dwmac5_fpe_configure,
1036 int dwmac4_setup(struct stmmac_priv *priv)
1038 struct mac_device_info *mac = priv->hw;
1040 dev_info(priv->device, "\tDWMAC4/5\n");
1042 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1043 mac->pcsr = priv->ioaddr;
1044 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1045 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1046 mac->mcast_bits_log2 = 0;
1048 if (mac->multicast_filter_bins)
1049 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1051 mac->link.duplex = GMAC_CONFIG_DM;
1052 mac->link.speed10 = GMAC_CONFIG_PS;
1053 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1054 mac->link.speed1000 = 0;
1055 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1056 mac->mii.addr = GMAC_MDIO_ADDR;
1057 mac->mii.data = GMAC_MDIO_DATA;
1058 mac->mii.addr_shift = 21;
1059 mac->mii.addr_mask = GENMASK(25, 21);
1060 mac->mii.reg_shift = 16;
1061 mac->mii.reg_mask = GENMASK(20, 16);
1062 mac->mii.clk_csr_shift = 8;
1063 mac->mii.clk_csr_mask = GENMASK(11, 8);
1065 return 0;