1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac XGMAC support.
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
11 #include "stmmac_ptp.h"
12 #include "dwxlgmac2.h"
15 static void dwxgmac2_core_init(struct mac_device_info
*hw
,
16 struct net_device
*dev
)
18 void __iomem
*ioaddr
= hw
->pcsr
;
21 tx
= readl(ioaddr
+ XGMAC_TX_CONFIG
);
22 rx
= readl(ioaddr
+ XGMAC_RX_CONFIG
);
24 tx
|= XGMAC_CORE_INIT_TX
;
25 rx
|= XGMAC_CORE_INIT_RX
;
28 tx
|= XGMAC_CONFIG_TE
;
29 tx
&= ~hw
->link
.speed_mask
;
33 tx
|= hw
->link
.xgmii
.speed10000
;
36 tx
|= hw
->link
.speed2500
;
40 tx
|= hw
->link
.speed1000
;
45 writel(tx
, ioaddr
+ XGMAC_TX_CONFIG
);
46 writel(rx
, ioaddr
+ XGMAC_RX_CONFIG
);
47 writel(XGMAC_INT_DEFAULT_EN
, ioaddr
+ XGMAC_INT_EN
);
50 static void dwxgmac2_set_mac(void __iomem
*ioaddr
, bool enable
)
52 u32 tx
= readl(ioaddr
+ XGMAC_TX_CONFIG
);
53 u32 rx
= readl(ioaddr
+ XGMAC_RX_CONFIG
);
56 tx
|= XGMAC_CONFIG_TE
;
57 rx
|= XGMAC_CONFIG_RE
;
59 tx
&= ~XGMAC_CONFIG_TE
;
60 rx
&= ~XGMAC_CONFIG_RE
;
63 writel(tx
, ioaddr
+ XGMAC_TX_CONFIG
);
64 writel(rx
, ioaddr
+ XGMAC_RX_CONFIG
);
67 static int dwxgmac2_rx_ipc(struct mac_device_info
*hw
)
69 void __iomem
*ioaddr
= hw
->pcsr
;
72 value
= readl(ioaddr
+ XGMAC_RX_CONFIG
);
74 value
|= XGMAC_CONFIG_IPC
;
76 value
&= ~XGMAC_CONFIG_IPC
;
77 writel(value
, ioaddr
+ XGMAC_RX_CONFIG
);
79 return !!(readl(ioaddr
+ XGMAC_RX_CONFIG
) & XGMAC_CONFIG_IPC
);
82 static void dwxgmac2_rx_queue_enable(struct mac_device_info
*hw
, u8 mode
,
85 void __iomem
*ioaddr
= hw
->pcsr
;
88 value
= readl(ioaddr
+ XGMAC_RXQ_CTRL0
) & ~XGMAC_RXQEN(queue
);
89 if (mode
== MTL_QUEUE_AVB
)
90 value
|= 0x1 << XGMAC_RXQEN_SHIFT(queue
);
91 else if (mode
== MTL_QUEUE_DCB
)
92 value
|= 0x2 << XGMAC_RXQEN_SHIFT(queue
);
93 writel(value
, ioaddr
+ XGMAC_RXQ_CTRL0
);
96 static void dwxgmac2_rx_queue_prio(struct mac_device_info
*hw
, u32 prio
,
99 void __iomem
*ioaddr
= hw
->pcsr
;
102 reg
= (queue
< 4) ? XGMAC_RXQ_CTRL2
: XGMAC_RXQ_CTRL3
;
106 value
= readl(ioaddr
+ reg
);
107 value
&= ~XGMAC_PSRQ(queue
);
108 value
|= (prio
<< XGMAC_PSRQ_SHIFT(queue
)) & XGMAC_PSRQ(queue
);
110 writel(value
, ioaddr
+ reg
);
113 static void dwxgmac2_tx_queue_prio(struct mac_device_info
*hw
, u32 prio
,
116 void __iomem
*ioaddr
= hw
->pcsr
;
119 reg
= (queue
< 4) ? XGMAC_TC_PRTY_MAP0
: XGMAC_TC_PRTY_MAP1
;
123 value
= readl(ioaddr
+ reg
);
124 value
&= ~XGMAC_PSTC(queue
);
125 value
|= (prio
<< XGMAC_PSTC_SHIFT(queue
)) & XGMAC_PSTC(queue
);
127 writel(value
, ioaddr
+ reg
);
130 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info
*hw
,
133 void __iomem
*ioaddr
= hw
->pcsr
;
136 value
= readl(ioaddr
+ XGMAC_MTL_OPMODE
);
140 case MTL_RX_ALGORITHM_SP
:
142 case MTL_RX_ALGORITHM_WSP
:
149 writel(value
, ioaddr
+ XGMAC_MTL_OPMODE
);
152 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info
*hw
,
155 void __iomem
*ioaddr
= hw
->pcsr
;
160 value
= readl(ioaddr
+ XGMAC_MTL_OPMODE
);
161 value
&= ~XGMAC_ETSALG
;
164 case MTL_TX_ALGORITHM_WRR
:
167 case MTL_TX_ALGORITHM_WFQ
:
170 case MTL_TX_ALGORITHM_DWRR
:
178 writel(value
, ioaddr
+ XGMAC_MTL_OPMODE
);
180 /* Set ETS if desired */
181 for (i
= 0; i
< MTL_MAX_TX_QUEUES
; i
++) {
182 value
= readl(ioaddr
+ XGMAC_MTL_TCx_ETS_CONTROL(i
));
186 writel(value
, ioaddr
+ XGMAC_MTL_TCx_ETS_CONTROL(i
));
190 static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info
*hw
,
191 u32 weight
, u32 queue
)
193 void __iomem
*ioaddr
= hw
->pcsr
;
195 writel(weight
, ioaddr
+ XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue
));
198 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info
*hw
, u32 queue
,
201 void __iomem
*ioaddr
= hw
->pcsr
;
204 reg
= (queue
< 4) ? XGMAC_MTL_RXQ_DMA_MAP0
: XGMAC_MTL_RXQ_DMA_MAP1
;
208 value
= readl(ioaddr
+ reg
);
209 value
&= ~XGMAC_QxMDMACH(queue
);
210 value
|= (chan
<< XGMAC_QxMDMACH_SHIFT(queue
)) & XGMAC_QxMDMACH(queue
);
212 writel(value
, ioaddr
+ reg
);
215 static void dwxgmac2_config_cbs(struct mac_device_info
*hw
,
216 u32 send_slope
, u32 idle_slope
,
217 u32 high_credit
, u32 low_credit
, u32 queue
)
219 void __iomem
*ioaddr
= hw
->pcsr
;
222 writel(send_slope
, ioaddr
+ XGMAC_MTL_TCx_SENDSLOPE(queue
));
223 writel(idle_slope
, ioaddr
+ XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue
));
224 writel(high_credit
, ioaddr
+ XGMAC_MTL_TCx_HICREDIT(queue
));
225 writel(low_credit
, ioaddr
+ XGMAC_MTL_TCx_LOCREDIT(queue
));
227 value
= readl(ioaddr
+ XGMAC_MTL_TCx_ETS_CONTROL(queue
));
229 value
|= XGMAC_CC
| XGMAC_CBS
;
230 writel(value
, ioaddr
+ XGMAC_MTL_TCx_ETS_CONTROL(queue
));
233 static void dwxgmac2_dump_regs(struct mac_device_info
*hw
, u32
*reg_space
)
235 void __iomem
*ioaddr
= hw
->pcsr
;
238 for (i
= 0; i
< XGMAC_MAC_REGSIZE
; i
++)
239 reg_space
[i
] = readl(ioaddr
+ i
* 4);
242 static int dwxgmac2_host_irq_status(struct mac_device_info
*hw
,
243 struct stmmac_extra_stats
*x
)
245 void __iomem
*ioaddr
= hw
->pcsr
;
249 en
= readl(ioaddr
+ XGMAC_INT_EN
);
250 stat
= readl(ioaddr
+ XGMAC_INT_STATUS
);
254 if (stat
& XGMAC_PMTIS
) {
255 x
->irq_receive_pmt_irq_n
++;
256 readl(ioaddr
+ XGMAC_PMT
);
259 if (stat
& XGMAC_LPIIS
) {
260 u32 lpi
= readl(ioaddr
+ XGMAC_LPI_CTRL
);
262 if (lpi
& XGMAC_TLPIEN
) {
263 ret
|= CORE_IRQ_TX_PATH_IN_LPI_MODE
;
264 x
->irq_tx_path_in_lpi_mode_n
++;
266 if (lpi
& XGMAC_TLPIEX
) {
267 ret
|= CORE_IRQ_TX_PATH_EXIT_LPI_MODE
;
268 x
->irq_tx_path_exit_lpi_mode_n
++;
270 if (lpi
& XGMAC_RLPIEN
)
271 x
->irq_rx_path_in_lpi_mode_n
++;
272 if (lpi
& XGMAC_RLPIEX
)
273 x
->irq_rx_path_exit_lpi_mode_n
++;
279 static int dwxgmac2_host_mtl_irq_status(struct mac_device_info
*hw
, u32 chan
)
281 void __iomem
*ioaddr
= hw
->pcsr
;
285 status
= readl(ioaddr
+ XGMAC_MTL_INT_STATUS
);
286 if (status
& BIT(chan
)) {
287 u32 chan_status
= readl(ioaddr
+ XGMAC_MTL_QINT_STATUS(chan
));
289 if (chan_status
& XGMAC_RXOVFIS
)
290 ret
|= CORE_IRQ_MTL_RX_OVERFLOW
;
292 writel(~0x0, ioaddr
+ XGMAC_MTL_QINT_STATUS(chan
));
298 static void dwxgmac2_flow_ctrl(struct mac_device_info
*hw
, unsigned int duplex
,
299 unsigned int fc
, unsigned int pause_time
,
302 void __iomem
*ioaddr
= hw
->pcsr
;
306 writel(XGMAC_RFE
, ioaddr
+ XGMAC_RX_FLOW_CTRL
);
308 for (i
= 0; i
< tx_cnt
; i
++) {
309 u32 value
= XGMAC_TFE
;
312 value
|= pause_time
<< XGMAC_PT_SHIFT
;
314 writel(value
, ioaddr
+ XGMAC_Qx_TX_FLOW_CTRL(i
));
319 static void dwxgmac2_pmt(struct mac_device_info
*hw
, unsigned long mode
)
321 void __iomem
*ioaddr
= hw
->pcsr
;
324 if (mode
& WAKE_MAGIC
)
325 val
|= XGMAC_PWRDWN
| XGMAC_MGKPKTEN
;
326 if (mode
& WAKE_UCAST
)
327 val
|= XGMAC_PWRDWN
| XGMAC_GLBLUCAST
| XGMAC_RWKPKTEN
;
329 u32 cfg
= readl(ioaddr
+ XGMAC_RX_CONFIG
);
330 cfg
|= XGMAC_CONFIG_RE
;
331 writel(cfg
, ioaddr
+ XGMAC_RX_CONFIG
);
334 writel(val
, ioaddr
+ XGMAC_PMT
);
337 static void dwxgmac2_set_umac_addr(struct mac_device_info
*hw
,
338 unsigned char *addr
, unsigned int reg_n
)
340 void __iomem
*ioaddr
= hw
->pcsr
;
343 value
= (addr
[5] << 8) | addr
[4];
344 writel(value
| XGMAC_AE
, ioaddr
+ XGMAC_ADDRx_HIGH(reg_n
));
346 value
= (addr
[3] << 24) | (addr
[2] << 16) | (addr
[1] << 8) | addr
[0];
347 writel(value
, ioaddr
+ XGMAC_ADDRx_LOW(reg_n
));
350 static void dwxgmac2_get_umac_addr(struct mac_device_info
*hw
,
351 unsigned char *addr
, unsigned int reg_n
)
353 void __iomem
*ioaddr
= hw
->pcsr
;
354 u32 hi_addr
, lo_addr
;
356 /* Read the MAC address from the hardware */
357 hi_addr
= readl(ioaddr
+ XGMAC_ADDRx_HIGH(reg_n
));
358 lo_addr
= readl(ioaddr
+ XGMAC_ADDRx_LOW(reg_n
));
360 /* Extract the MAC address from the high and low words */
361 addr
[0] = lo_addr
& 0xff;
362 addr
[1] = (lo_addr
>> 8) & 0xff;
363 addr
[2] = (lo_addr
>> 16) & 0xff;
364 addr
[3] = (lo_addr
>> 24) & 0xff;
365 addr
[4] = hi_addr
& 0xff;
366 addr
[5] = (hi_addr
>> 8) & 0xff;
369 static void dwxgmac2_set_eee_mode(struct mac_device_info
*hw
,
370 bool en_tx_lpi_clockgating
)
372 void __iomem
*ioaddr
= hw
->pcsr
;
375 value
= readl(ioaddr
+ XGMAC_LPI_CTRL
);
377 value
|= XGMAC_LPITXEN
| XGMAC_LPITXA
;
378 if (en_tx_lpi_clockgating
)
379 value
|= XGMAC_TXCGE
;
381 writel(value
, ioaddr
+ XGMAC_LPI_CTRL
);
384 static void dwxgmac2_reset_eee_mode(struct mac_device_info
*hw
)
386 void __iomem
*ioaddr
= hw
->pcsr
;
389 value
= readl(ioaddr
+ XGMAC_LPI_CTRL
);
390 value
&= ~(XGMAC_LPITXEN
| XGMAC_LPITXA
| XGMAC_TXCGE
);
391 writel(value
, ioaddr
+ XGMAC_LPI_CTRL
);
394 static void dwxgmac2_set_eee_pls(struct mac_device_info
*hw
, int link
)
396 void __iomem
*ioaddr
= hw
->pcsr
;
399 value
= readl(ioaddr
+ XGMAC_LPI_CTRL
);
404 writel(value
, ioaddr
+ XGMAC_LPI_CTRL
);
407 static void dwxgmac2_set_eee_timer(struct mac_device_info
*hw
, int ls
, int tw
)
409 void __iomem
*ioaddr
= hw
->pcsr
;
412 value
= (tw
& 0xffff) | ((ls
& 0x3ff) << 16);
413 writel(value
, ioaddr
+ XGMAC_LPI_TIMER_CTRL
);
416 static void dwxgmac2_set_mchash(void __iomem
*ioaddr
, u32
*mcfilterbits
,
419 int numhashregs
, regs
;
421 switch (mcbitslog2
) {
435 for (regs
= 0; regs
< numhashregs
; regs
++)
436 writel(mcfilterbits
[regs
], ioaddr
+ XGMAC_HASH_TABLE(regs
));
439 static void dwxgmac2_set_filter(struct mac_device_info
*hw
,
440 struct net_device
*dev
)
442 void __iomem
*ioaddr
= (void __iomem
*)dev
->base_addr
;
443 u32 value
= readl(ioaddr
+ XGMAC_PACKET_FILTER
);
444 int mcbitslog2
= hw
->mcast_bits_log2
;
448 value
&= ~(XGMAC_FILTER_PR
| XGMAC_FILTER_HMC
| XGMAC_FILTER_PM
);
449 value
|= XGMAC_FILTER_HPF
;
451 memset(mc_filter
, 0, sizeof(mc_filter
));
453 if (dev
->flags
& IFF_PROMISC
) {
454 value
|= XGMAC_FILTER_PR
;
455 value
|= XGMAC_FILTER_PCF
;
456 } else if ((dev
->flags
& IFF_ALLMULTI
) ||
457 (netdev_mc_count(dev
) > hw
->multicast_filter_bins
)) {
458 value
|= XGMAC_FILTER_PM
;
460 for (i
= 0; i
< XGMAC_MAX_HASH_TABLE
; i
++)
461 writel(~0x0, ioaddr
+ XGMAC_HASH_TABLE(i
));
462 } else if (!netdev_mc_empty(dev
) && (dev
->flags
& IFF_MULTICAST
)) {
463 struct netdev_hw_addr
*ha
;
465 value
|= XGMAC_FILTER_HMC
;
467 netdev_for_each_mc_addr(ha
, dev
) {
468 u32 nr
= (bitrev32(~crc32_le(~0, ha
->addr
, 6)) >>
470 mc_filter
[nr
>> 5] |= (1 << (nr
& 0x1F));
474 dwxgmac2_set_mchash(ioaddr
, mc_filter
, mcbitslog2
);
476 /* Handle multiple unicast addresses */
477 if (netdev_uc_count(dev
) > hw
->unicast_filter_entries
) {
478 value
|= XGMAC_FILTER_PR
;
480 struct netdev_hw_addr
*ha
;
483 netdev_for_each_uc_addr(ha
, dev
) {
484 dwxgmac2_set_umac_addr(hw
, ha
->addr
, reg
);
488 for ( ; reg
< XGMAC_ADDR_MAX
; reg
++) {
489 writel(0, ioaddr
+ XGMAC_ADDRx_HIGH(reg
));
490 writel(0, ioaddr
+ XGMAC_ADDRx_LOW(reg
));
494 writel(value
, ioaddr
+ XGMAC_PACKET_FILTER
);
497 static void dwxgmac2_set_mac_loopback(void __iomem
*ioaddr
, bool enable
)
499 u32 value
= readl(ioaddr
+ XGMAC_RX_CONFIG
);
502 value
|= XGMAC_CONFIG_LM
;
504 value
&= ~XGMAC_CONFIG_LM
;
506 writel(value
, ioaddr
+ XGMAC_RX_CONFIG
);
509 static int dwxgmac2_rss_write_reg(void __iomem
*ioaddr
, bool is_key
, int idx
,
514 writel(val
, ioaddr
+ XGMAC_RSS_DATA
);
515 ctrl
|= idx
<< XGMAC_RSSIA_SHIFT
;
516 ctrl
|= is_key
? XGMAC_ADDRT
: 0x0;
518 writel(ctrl
, ioaddr
+ XGMAC_RSS_ADDR
);
520 return readl_poll_timeout(ioaddr
+ XGMAC_RSS_ADDR
, ctrl
,
521 !(ctrl
& XGMAC_OB
), 100, 10000);
524 static int dwxgmac2_rss_configure(struct mac_device_info
*hw
,
525 struct stmmac_rss
*cfg
, u32 num_rxq
)
527 void __iomem
*ioaddr
= hw
->pcsr
;
531 value
= readl(ioaddr
+ XGMAC_RSS_CTRL
);
532 if (!cfg
|| !cfg
->enable
) {
533 value
&= ~XGMAC_RSSE
;
534 writel(value
, ioaddr
+ XGMAC_RSS_CTRL
);
538 key
= (u32
*)cfg
->key
;
539 for (i
= 0; i
< (ARRAY_SIZE(cfg
->key
) / sizeof(u32
)); i
++) {
540 ret
= dwxgmac2_rss_write_reg(ioaddr
, true, i
, key
[i
]);
545 for (i
= 0; i
< ARRAY_SIZE(cfg
->table
); i
++) {
546 ret
= dwxgmac2_rss_write_reg(ioaddr
, false, i
, cfg
->table
[i
]);
551 for (i
= 0; i
< num_rxq
; i
++)
552 dwxgmac2_map_mtl_to_dma(hw
, i
, XGMAC_QDDMACH
);
554 value
|= XGMAC_UDP4TE
| XGMAC_TCP4TE
| XGMAC_IP2TE
| XGMAC_RSSE
;
555 writel(value
, ioaddr
+ XGMAC_RSS_CTRL
);
559 static void dwxgmac2_update_vlan_hash(struct mac_device_info
*hw
, u32 hash
,
560 __le16 perfect_match
, bool is_double
)
562 void __iomem
*ioaddr
= hw
->pcsr
;
564 writel(hash
, ioaddr
+ XGMAC_VLAN_HASH_TABLE
);
567 u32 value
= readl(ioaddr
+ XGMAC_PACKET_FILTER
);
569 value
|= XGMAC_FILTER_VTFE
;
571 writel(value
, ioaddr
+ XGMAC_PACKET_FILTER
);
573 value
= readl(ioaddr
+ XGMAC_VLAN_TAG
);
575 value
|= XGMAC_VLAN_VTHM
| XGMAC_VLAN_ETV
;
577 value
|= XGMAC_VLAN_EDVLP
;
578 value
|= XGMAC_VLAN_ESVL
;
579 value
|= XGMAC_VLAN_DOVLTC
;
581 value
&= ~XGMAC_VLAN_EDVLP
;
582 value
&= ~XGMAC_VLAN_ESVL
;
583 value
&= ~XGMAC_VLAN_DOVLTC
;
586 value
&= ~XGMAC_VLAN_VID
;
587 writel(value
, ioaddr
+ XGMAC_VLAN_TAG
);
588 } else if (perfect_match
) {
589 u32 value
= readl(ioaddr
+ XGMAC_PACKET_FILTER
);
591 value
|= XGMAC_FILTER_VTFE
;
593 writel(value
, ioaddr
+ XGMAC_PACKET_FILTER
);
595 value
= readl(ioaddr
+ XGMAC_VLAN_TAG
);
597 value
&= ~XGMAC_VLAN_VTHM
;
598 value
|= XGMAC_VLAN_ETV
;
600 value
|= XGMAC_VLAN_EDVLP
;
601 value
|= XGMAC_VLAN_ESVL
;
602 value
|= XGMAC_VLAN_DOVLTC
;
604 value
&= ~XGMAC_VLAN_EDVLP
;
605 value
&= ~XGMAC_VLAN_ESVL
;
606 value
&= ~XGMAC_VLAN_DOVLTC
;
609 value
&= ~XGMAC_VLAN_VID
;
610 writel(value
| perfect_match
, ioaddr
+ XGMAC_VLAN_TAG
);
612 u32 value
= readl(ioaddr
+ XGMAC_PACKET_FILTER
);
614 value
&= ~XGMAC_FILTER_VTFE
;
616 writel(value
, ioaddr
+ XGMAC_PACKET_FILTER
);
618 value
= readl(ioaddr
+ XGMAC_VLAN_TAG
);
620 value
&= ~(XGMAC_VLAN_VTHM
| XGMAC_VLAN_ETV
);
621 value
&= ~(XGMAC_VLAN_EDVLP
| XGMAC_VLAN_ESVL
);
622 value
&= ~XGMAC_VLAN_DOVLTC
;
623 value
&= ~XGMAC_VLAN_VID
;
625 writel(value
, ioaddr
+ XGMAC_VLAN_TAG
);
629 struct dwxgmac3_error_desc
{
632 const char *detailed_desc
;
635 #define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
637 static void dwxgmac3_log_error(struct net_device
*ndev
, u32 value
, bool corr
,
638 const char *module_name
,
639 const struct dwxgmac3_error_desc
*desc
,
640 unsigned long field_offset
,
641 struct stmmac_safety_stats
*stats
)
643 unsigned long loc
, mask
;
644 u8
*bptr
= (u8
*)stats
;
647 ptr
= (unsigned long *)(bptr
+ field_offset
);
650 for_each_set_bit(loc
, &mask
, 32) {
651 netdev_err(ndev
, "Found %s error in %s: '%s: %s'\n", corr
?
652 "correctable" : "uncorrectable", module_name
,
653 desc
[loc
].desc
, desc
[loc
].detailed_desc
);
655 /* Update counters */
660 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors
[32]= {
661 { true, "ATPES", "Application Transmit Interface Parity Check Error" },
662 { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
663 { true, "TPES", "TSO Data Path Parity Check Error" },
664 { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
665 { true, "MTPES", "MTL Data Path Parity Check Error" },
666 { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
667 { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
668 { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
669 { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
670 { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
671 { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
672 { true, "CWPES", "CSR Write Data Path Parity Check Error" },
673 { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
674 { true, "TTES", "TX FSM Timeout Error" },
675 { true, "RTES", "RX FSM Timeout Error" },
676 { true, "CTES", "CSR FSM Timeout Error" },
677 { true, "ATES", "APP FSM Timeout Error" },
678 { true, "PTES", "PTP FSM Timeout Error" },
679 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
680 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
681 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
682 { true, "MSTTES", "Master Read/Write Timeout Error" },
683 { true, "SLVTES", "Slave Read/Write Timeout Error" },
684 { true, "ATITES", "Application Timeout on ATI Interface Error" },
685 { true, "ARITES", "Application Timeout on ARI Interface Error" },
686 { true, "FSMPES", "FSM State Parity Error" },
687 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
688 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
689 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
690 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
691 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
692 { true, "CPI", "Control Register Parity Check Error" },
695 static void dwxgmac3_handle_mac_err(struct net_device
*ndev
,
696 void __iomem
*ioaddr
, bool correctable
,
697 struct stmmac_safety_stats
*stats
)
701 value
= readl(ioaddr
+ XGMAC_MAC_DPP_FSM_INT_STATUS
);
702 writel(value
, ioaddr
+ XGMAC_MAC_DPP_FSM_INT_STATUS
);
704 dwxgmac3_log_error(ndev
, value
, correctable
, "MAC",
705 dwxgmac3_mac_errors
, STAT_OFF(mac_errors
), stats
);
708 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors
[32]= {
709 { true, "TXCES", "MTL TX Memory Error" },
710 { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
711 { true, "TXUES", "MTL TX Memory Error" },
712 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
713 { true, "RXCES", "MTL RX Memory Error" },
714 { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
715 { true, "RXUES", "MTL RX Memory Error" },
716 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
717 { true, "ECES", "MTL EST Memory Error" },
718 { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
719 { true, "EUES", "MTL EST Memory Error" },
720 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
721 { true, "RPCES", "MTL RX Parser Memory Error" },
722 { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
723 { true, "RPUES", "MTL RX Parser Memory Error" },
724 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
725 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
726 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
727 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
728 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
729 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
730 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
731 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
732 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
733 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
734 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
735 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
736 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
737 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
738 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
739 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
740 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
743 static void dwxgmac3_handle_mtl_err(struct net_device
*ndev
,
744 void __iomem
*ioaddr
, bool correctable
,
745 struct stmmac_safety_stats
*stats
)
749 value
= readl(ioaddr
+ XGMAC_MTL_ECC_INT_STATUS
);
750 writel(value
, ioaddr
+ XGMAC_MTL_ECC_INT_STATUS
);
752 dwxgmac3_log_error(ndev
, value
, correctable
, "MTL",
753 dwxgmac3_mtl_errors
, STAT_OFF(mtl_errors
), stats
);
756 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors
[32]= {
757 { true, "TCES", "DMA TSO Memory Error" },
758 { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
759 { true, "TUES", "DMA TSO Memory Error" },
760 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
761 { true, "DCES", "DMA DCACHE Memory Error" },
762 { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
763 { true, "DUES", "DMA DCACHE Memory Error" },
764 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
765 { false, "UNKNOWN", "Unknown Error" }, /* 8 */
766 { false, "UNKNOWN", "Unknown Error" }, /* 9 */
767 { false, "UNKNOWN", "Unknown Error" }, /* 10 */
768 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
769 { false, "UNKNOWN", "Unknown Error" }, /* 12 */
770 { false, "UNKNOWN", "Unknown Error" }, /* 13 */
771 { false, "UNKNOWN", "Unknown Error" }, /* 14 */
772 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
773 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
774 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
775 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
776 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
777 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
778 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
779 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
780 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
781 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
782 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
783 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
784 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
785 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
786 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
787 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
788 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
791 static void dwxgmac3_handle_dma_err(struct net_device
*ndev
,
792 void __iomem
*ioaddr
, bool correctable
,
793 struct stmmac_safety_stats
*stats
)
797 value
= readl(ioaddr
+ XGMAC_DMA_ECC_INT_STATUS
);
798 writel(value
, ioaddr
+ XGMAC_DMA_ECC_INT_STATUS
);
800 dwxgmac3_log_error(ndev
, value
, correctable
, "DMA",
801 dwxgmac3_dma_errors
, STAT_OFF(dma_errors
), stats
);
804 static int dwxgmac3_safety_feat_config(void __iomem
*ioaddr
, unsigned int asp
)
811 /* 1. Enable Safety Features */
812 writel(0x0, ioaddr
+ XGMAC_MTL_ECC_CONTROL
);
814 /* 2. Enable MTL Safety Interrupts */
815 value
= readl(ioaddr
+ XGMAC_MTL_ECC_INT_ENABLE
);
816 value
|= XGMAC_RPCEIE
; /* RX Parser Memory Correctable Error */
817 value
|= XGMAC_ECEIE
; /* EST Memory Correctable Error */
818 value
|= XGMAC_RXCEIE
; /* RX Memory Correctable Error */
819 value
|= XGMAC_TXCEIE
; /* TX Memory Correctable Error */
820 writel(value
, ioaddr
+ XGMAC_MTL_ECC_INT_ENABLE
);
822 /* 3. Enable DMA Safety Interrupts */
823 value
= readl(ioaddr
+ XGMAC_DMA_ECC_INT_ENABLE
);
824 value
|= XGMAC_DCEIE
; /* Descriptor Cache Memory Correctable Error */
825 value
|= XGMAC_TCEIE
; /* TSO Memory Correctable Error */
826 writel(value
, ioaddr
+ XGMAC_DMA_ECC_INT_ENABLE
);
828 /* Only ECC Protection for External Memory feature is selected */
832 /* 4. Enable Parity and Timeout for FSM */
833 value
= readl(ioaddr
+ XGMAC_MAC_FSM_CONTROL
);
834 value
|= XGMAC_PRTYEN
; /* FSM Parity Feature */
835 value
|= XGMAC_TMOUTEN
; /* FSM Timeout Feature */
836 writel(value
, ioaddr
+ XGMAC_MAC_FSM_CONTROL
);
841 static int dwxgmac3_safety_feat_irq_status(struct net_device
*ndev
,
842 void __iomem
*ioaddr
,
844 struct stmmac_safety_stats
*stats
)
853 mtl
= readl(ioaddr
+ XGMAC_MTL_SAFETY_INT_STATUS
);
854 dma
= readl(ioaddr
+ XGMAC_DMA_SAFETY_INT_STATUS
);
856 err
= (mtl
& XGMAC_MCSIS
) || (dma
& XGMAC_MCSIS
);
859 dwxgmac3_handle_mac_err(ndev
, ioaddr
, corr
, stats
);
863 err
= (mtl
& (XGMAC_MEUIS
| XGMAC_MECIS
)) ||
864 (dma
& (XGMAC_MSUIS
| XGMAC_MSCIS
));
865 corr
= (mtl
& XGMAC_MECIS
) || (dma
& XGMAC_MSCIS
);
867 dwxgmac3_handle_mtl_err(ndev
, ioaddr
, corr
, stats
);
871 err
= dma
& (XGMAC_DEUIS
| XGMAC_DECIS
);
872 corr
= dma
& XGMAC_DECIS
;
874 dwxgmac3_handle_dma_err(ndev
, ioaddr
, corr
, stats
);
881 static const struct dwxgmac3_error
{
882 const struct dwxgmac3_error_desc
*desc
;
883 } dwxgmac3_all_errors
[] = {
884 { dwxgmac3_mac_errors
},
885 { dwxgmac3_mtl_errors
},
886 { dwxgmac3_dma_errors
},
889 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats
*stats
,
890 int index
, unsigned long *count
,
893 int module
= index
/ 32, offset
= index
% 32;
894 unsigned long *ptr
= (unsigned long *)stats
;
896 if (module
>= ARRAY_SIZE(dwxgmac3_all_errors
))
898 if (!dwxgmac3_all_errors
[module
].desc
[offset
].valid
)
901 *count
= *(ptr
+ index
);
903 *desc
= dwxgmac3_all_errors
[module
].desc
[offset
].desc
;
907 static int dwxgmac3_rxp_disable(void __iomem
*ioaddr
)
909 u32 val
= readl(ioaddr
+ XGMAC_MTL_OPMODE
);
912 writel(val
, ioaddr
+ XGMAC_MTL_OPMODE
);
917 static void dwxgmac3_rxp_enable(void __iomem
*ioaddr
)
921 val
= readl(ioaddr
+ XGMAC_MTL_OPMODE
);
923 writel(val
, ioaddr
+ XGMAC_MTL_OPMODE
);
926 static int dwxgmac3_rxp_update_single_entry(void __iomem
*ioaddr
,
927 struct stmmac_tc_entry
*entry
,
932 for (i
= 0; i
< (sizeof(entry
->val
) / sizeof(u32
)); i
++) {
933 int real_pos
= pos
* (sizeof(entry
->val
) / sizeof(u32
)) + i
;
937 ret
= readl_poll_timeout(ioaddr
+ XGMAC_MTL_RXP_IACC_CTRL_ST
,
938 val
, !(val
& XGMAC_STARTBUSY
), 1, 10000);
943 val
= *((u32
*)&entry
->val
+ i
);
944 writel(val
, ioaddr
+ XGMAC_MTL_RXP_IACC_DATA
);
947 val
= real_pos
& XGMAC_ADDR
;
948 writel(val
, ioaddr
+ XGMAC_MTL_RXP_IACC_CTRL_ST
);
952 writel(val
, ioaddr
+ XGMAC_MTL_RXP_IACC_CTRL_ST
);
955 val
|= XGMAC_STARTBUSY
;
956 writel(val
, ioaddr
+ XGMAC_MTL_RXP_IACC_CTRL_ST
);
959 ret
= readl_poll_timeout(ioaddr
+ XGMAC_MTL_RXP_IACC_CTRL_ST
,
960 val
, !(val
& XGMAC_STARTBUSY
), 1, 10000);
968 static struct stmmac_tc_entry
*
969 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry
*entries
,
970 unsigned int count
, u32 curr_prio
)
972 struct stmmac_tc_entry
*entry
;
977 for (i
= count
- 1; i
>= 0; i
--) {
980 /* Do not update unused entries */
983 /* Do not update already updated entries (i.e. fragments) */
986 /* Let last entry be updated last */
989 /* Do not return fragments */
992 /* Check if we already checked this prio */
993 if (entry
->prio
< curr_prio
)
995 /* Check if this is the minimum prio */
996 if (entry
->prio
< min_prio
) {
997 min_prio
= entry
->prio
;
1004 return &entries
[min_prio_idx
];
1008 static int dwxgmac3_rxp_config(void __iomem
*ioaddr
,
1009 struct stmmac_tc_entry
*entries
,
1012 struct stmmac_tc_entry
*entry
, *frag
;
1013 int i
, ret
, nve
= 0;
1017 /* Force disable RX */
1018 old_val
= readl(ioaddr
+ XGMAC_RX_CONFIG
);
1019 val
= old_val
& ~XGMAC_CONFIG_RE
;
1020 writel(val
, ioaddr
+ XGMAC_RX_CONFIG
);
1022 /* Disable RX Parser */
1023 ret
= dwxgmac3_rxp_disable(ioaddr
);
1027 /* Set all entries as NOT in HW */
1028 for (i
= 0; i
< count
; i
++) {
1029 entry
= &entries
[i
];
1030 entry
->in_hw
= false;
1033 /* Update entries by reverse order */
1035 entry
= dwxgmac3_rxp_get_next_entry(entries
, count
, curr_prio
);
1039 curr_prio
= entry
->prio
;
1040 frag
= entry
->frag_ptr
;
1042 /* Set special fragment requirements */
1047 entry
->val
.ok_index
= nve
+ 2;
1050 ret
= dwxgmac3_rxp_update_single_entry(ioaddr
, entry
, nve
);
1054 entry
->table_pos
= nve
++;
1055 entry
->in_hw
= true;
1057 if (frag
&& !frag
->in_hw
) {
1058 ret
= dwxgmac3_rxp_update_single_entry(ioaddr
, frag
, nve
);
1061 frag
->table_pos
= nve
++;
1069 /* Update all pass entry */
1070 for (i
= 0; i
< count
; i
++) {
1071 entry
= &entries
[i
];
1072 if (!entry
->is_last
)
1075 ret
= dwxgmac3_rxp_update_single_entry(ioaddr
, entry
, nve
);
1079 entry
->table_pos
= nve
++;
1082 /* Assume n. of parsable entries == n. of valid entries */
1083 val
= (nve
<< 16) & XGMAC_NPE
;
1084 val
|= nve
& XGMAC_NVE
;
1085 writel(val
, ioaddr
+ XGMAC_MTL_RXP_CONTROL_STATUS
);
1087 /* Enable RX Parser */
1088 dwxgmac3_rxp_enable(ioaddr
);
1092 writel(old_val
, ioaddr
+ XGMAC_RX_CONFIG
);
1096 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info
*hw
, u64
*ts
)
1098 void __iomem
*ioaddr
= hw
->pcsr
;
1101 if (readl_poll_timeout_atomic(ioaddr
+ XGMAC_TIMESTAMP_STATUS
,
1102 value
, value
& XGMAC_TXTSC
, 100, 10000))
1105 *ts
= readl(ioaddr
+ XGMAC_TXTIMESTAMP_NSEC
) & XGMAC_TXTSSTSLO
;
1106 *ts
+= readl(ioaddr
+ XGMAC_TXTIMESTAMP_SEC
) * 1000000000ULL;
1110 static int dwxgmac2_flex_pps_config(void __iomem
*ioaddr
, int index
,
1111 struct stmmac_pps_cfg
*cfg
, bool enable
,
1112 u32 sub_second_inc
, u32 systime_flags
)
1114 u32 tnsec
= readl(ioaddr
+ XGMAC_PPSx_TARGET_TIME_NSEC(index
));
1115 u32 val
= readl(ioaddr
+ XGMAC_PPS_CONTROL
);
1118 if (!cfg
->available
)
1120 if (tnsec
& XGMAC_TRGTBUSY0
)
1122 if (!sub_second_inc
|| !systime_flags
)
1125 val
&= ~XGMAC_PPSx_MASK(index
);
1128 val
|= XGMAC_PPSCMDx(index
, XGMAC_PPSCMD_STOP
);
1129 writel(val
, ioaddr
+ XGMAC_PPS_CONTROL
);
1133 val
|= XGMAC_PPSCMDx(index
, XGMAC_PPSCMD_START
);
1134 val
|= XGMAC_TRGTMODSELx(index
, XGMAC_PPSCMD_START
);
1135 val
|= XGMAC_PPSEN0
;
1137 writel(cfg
->start
.tv_sec
, ioaddr
+ XGMAC_PPSx_TARGET_TIME_SEC(index
));
1139 if (!(systime_flags
& PTP_TCR_TSCTRLSSR
))
1140 cfg
->start
.tv_nsec
= (cfg
->start
.tv_nsec
* 1000) / 465;
1141 writel(cfg
->start
.tv_nsec
, ioaddr
+ XGMAC_PPSx_TARGET_TIME_NSEC(index
));
1143 period
= cfg
->period
.tv_sec
* 1000000000;
1144 period
+= cfg
->period
.tv_nsec
;
1146 do_div(period
, sub_second_inc
);
1151 writel(period
- 1, ioaddr
+ XGMAC_PPSx_INTERVAL(index
));
1157 writel(period
- 1, ioaddr
+ XGMAC_PPSx_WIDTH(index
));
1159 /* Finally, activate it */
1160 writel(val
, ioaddr
+ XGMAC_PPS_CONTROL
);
1164 static void dwxgmac2_sarc_configure(void __iomem
*ioaddr
, int val
)
1166 u32 value
= readl(ioaddr
+ XGMAC_TX_CONFIG
);
1168 value
&= ~XGMAC_CONFIG_SARC
;
1169 value
|= val
<< XGMAC_CONFIG_SARC_SHIFT
;
1171 writel(value
, ioaddr
+ XGMAC_TX_CONFIG
);
1174 static void dwxgmac2_enable_vlan(struct mac_device_info
*hw
, u32 type
)
1176 void __iomem
*ioaddr
= hw
->pcsr
;
1179 value
= readl(ioaddr
+ XGMAC_VLAN_INCL
);
1180 value
|= XGMAC_VLAN_VLTI
;
1181 value
|= XGMAC_VLAN_CSVL
; /* Only use SVLAN */
1182 value
&= ~XGMAC_VLAN_VLC
;
1183 value
|= (type
<< XGMAC_VLAN_VLC_SHIFT
) & XGMAC_VLAN_VLC
;
1184 writel(value
, ioaddr
+ XGMAC_VLAN_INCL
);
1187 static int dwxgmac2_filter_wait(struct mac_device_info
*hw
)
1189 void __iomem
*ioaddr
= hw
->pcsr
;
1192 if (readl_poll_timeout(ioaddr
+ XGMAC_L3L4_ADDR_CTRL
, value
,
1193 !(value
& XGMAC_XB
), 100, 10000))
1198 static int dwxgmac2_filter_read(struct mac_device_info
*hw
, u32 filter_no
,
1201 void __iomem
*ioaddr
= hw
->pcsr
;
1205 ret
= dwxgmac2_filter_wait(hw
);
1209 value
= ((filter_no
<< XGMAC_IDDR_FNUM
) | reg
) << XGMAC_IDDR_SHIFT
;
1210 value
|= XGMAC_TT
| XGMAC_XB
;
1211 writel(value
, ioaddr
+ XGMAC_L3L4_ADDR_CTRL
);
1213 ret
= dwxgmac2_filter_wait(hw
);
1217 *data
= readl(ioaddr
+ XGMAC_L3L4_DATA
);
1221 static int dwxgmac2_filter_write(struct mac_device_info
*hw
, u32 filter_no
,
1224 void __iomem
*ioaddr
= hw
->pcsr
;
1228 ret
= dwxgmac2_filter_wait(hw
);
1232 writel(data
, ioaddr
+ XGMAC_L3L4_DATA
);
1234 value
= ((filter_no
<< XGMAC_IDDR_FNUM
) | reg
) << XGMAC_IDDR_SHIFT
;
1236 writel(value
, ioaddr
+ XGMAC_L3L4_ADDR_CTRL
);
1238 return dwxgmac2_filter_wait(hw
);
1241 static int dwxgmac2_config_l3_filter(struct mac_device_info
*hw
, u32 filter_no
,
1242 bool en
, bool ipv6
, bool sa
, bool inv
,
1245 void __iomem
*ioaddr
= hw
->pcsr
;
1249 value
= readl(ioaddr
+ XGMAC_PACKET_FILTER
);
1250 value
|= XGMAC_FILTER_IPFE
;
1251 writel(value
, ioaddr
+ XGMAC_PACKET_FILTER
);
1253 ret
= dwxgmac2_filter_read(hw
, filter_no
, XGMAC_L3L4_CTRL
, &value
);
1257 /* For IPv6 not both SA/DA filters can be active */
1259 value
|= XGMAC_L3PEN0
;
1260 value
&= ~(XGMAC_L3SAM0
| XGMAC_L3SAIM0
);
1261 value
&= ~(XGMAC_L3DAM0
| XGMAC_L3DAIM0
);
1263 value
|= XGMAC_L3SAM0
;
1265 value
|= XGMAC_L3SAIM0
;
1267 value
|= XGMAC_L3DAM0
;
1269 value
|= XGMAC_L3DAIM0
;
1272 value
&= ~XGMAC_L3PEN0
;
1274 value
|= XGMAC_L3SAM0
;
1276 value
|= XGMAC_L3SAIM0
;
1278 value
|= XGMAC_L3DAM0
;
1280 value
|= XGMAC_L3DAIM0
;
1284 ret
= dwxgmac2_filter_write(hw
, filter_no
, XGMAC_L3L4_CTRL
, value
);
1289 ret
= dwxgmac2_filter_write(hw
, filter_no
, XGMAC_L3_ADDR0
, match
);
1293 ret
= dwxgmac2_filter_write(hw
, filter_no
, XGMAC_L3_ADDR1
, match
);
1299 return dwxgmac2_filter_write(hw
, filter_no
, XGMAC_L3L4_CTRL
, 0);
1304 static int dwxgmac2_config_l4_filter(struct mac_device_info
*hw
, u32 filter_no
,
1305 bool en
, bool udp
, bool sa
, bool inv
,
1308 void __iomem
*ioaddr
= hw
->pcsr
;
1312 value
= readl(ioaddr
+ XGMAC_PACKET_FILTER
);
1313 value
|= XGMAC_FILTER_IPFE
;
1314 writel(value
, ioaddr
+ XGMAC_PACKET_FILTER
);
1316 ret
= dwxgmac2_filter_read(hw
, filter_no
, XGMAC_L3L4_CTRL
, &value
);
1321 value
|= XGMAC_L4PEN0
;
1323 value
&= ~XGMAC_L4PEN0
;
1326 value
&= ~(XGMAC_L4SPM0
| XGMAC_L4SPIM0
);
1327 value
&= ~(XGMAC_L4DPM0
| XGMAC_L4DPIM0
);
1329 value
|= XGMAC_L4SPM0
;
1331 value
|= XGMAC_L4SPIM0
;
1333 value
|= XGMAC_L4DPM0
;
1335 value
|= XGMAC_L4DPIM0
;
1338 ret
= dwxgmac2_filter_write(hw
, filter_no
, XGMAC_L3L4_CTRL
, value
);
1343 value
= match
& XGMAC_L4SP0
;
1345 ret
= dwxgmac2_filter_write(hw
, filter_no
, XGMAC_L4_ADDR
, value
);
1349 value
= (match
<< XGMAC_L4DP0_SHIFT
) & XGMAC_L4DP0
;
1351 ret
= dwxgmac2_filter_write(hw
, filter_no
, XGMAC_L4_ADDR
, value
);
1357 return dwxgmac2_filter_write(hw
, filter_no
, XGMAC_L3L4_CTRL
, 0);
1362 static void dwxgmac2_set_arp_offload(struct mac_device_info
*hw
, bool en
,
1365 void __iomem
*ioaddr
= hw
->pcsr
;
1368 writel(addr
, ioaddr
+ XGMAC_ARP_ADDR
);
1370 value
= readl(ioaddr
+ XGMAC_RX_CONFIG
);
1372 value
|= XGMAC_CONFIG_ARPEN
;
1374 value
&= ~XGMAC_CONFIG_ARPEN
;
1375 writel(value
, ioaddr
+ XGMAC_RX_CONFIG
);
1378 static int dwxgmac3_est_write(void __iomem
*ioaddr
, u32 reg
, u32 val
, bool gcl
)
1382 writel(val
, ioaddr
+ XGMAC_MTL_EST_GCL_DATA
);
1384 ctrl
= (reg
<< XGMAC_ADDR_SHIFT
);
1385 ctrl
|= gcl
? 0 : XGMAC_GCRR
;
1387 writel(ctrl
, ioaddr
+ XGMAC_MTL_EST_GCL_CONTROL
);
1390 writel(ctrl
, ioaddr
+ XGMAC_MTL_EST_GCL_CONTROL
);
1392 return readl_poll_timeout_atomic(ioaddr
+ XGMAC_MTL_EST_GCL_CONTROL
,
1393 ctrl
, !(ctrl
& XGMAC_SRWO
), 100, 5000);
1396 static int dwxgmac3_est_configure(void __iomem
*ioaddr
, struct stmmac_est
*cfg
,
1397 unsigned int ptp_rate
)
1402 ret
|= dwxgmac3_est_write(ioaddr
, XGMAC_BTR_LOW
, cfg
->btr
[0], false);
1403 ret
|= dwxgmac3_est_write(ioaddr
, XGMAC_BTR_HIGH
, cfg
->btr
[1], false);
1404 ret
|= dwxgmac3_est_write(ioaddr
, XGMAC_TER
, cfg
->ter
, false);
1405 ret
|= dwxgmac3_est_write(ioaddr
, XGMAC_LLR
, cfg
->gcl_size
, false);
1406 ret
|= dwxgmac3_est_write(ioaddr
, XGMAC_CTR_LOW
, cfg
->ctr
[0], false);
1407 ret
|= dwxgmac3_est_write(ioaddr
, XGMAC_CTR_HIGH
, cfg
->ctr
[1], false);
1411 for (i
= 0; i
< cfg
->gcl_size
; i
++) {
1412 ret
= dwxgmac3_est_write(ioaddr
, i
, cfg
->gcl
[i
], true);
1417 ctrl
= readl(ioaddr
+ XGMAC_MTL_EST_CONTROL
);
1418 ctrl
&= ~XGMAC_PTOV
;
1419 ctrl
|= ((1000000000 / ptp_rate
) * 9) << XGMAC_PTOV_SHIFT
;
1421 ctrl
|= XGMAC_EEST
| XGMAC_SSWL
;
1423 ctrl
&= ~XGMAC_EEST
;
1425 writel(ctrl
, ioaddr
+ XGMAC_MTL_EST_CONTROL
);
1429 static void dwxgmac3_fpe_configure(void __iomem
*ioaddr
, u32 num_txq
,
1430 u32 num_rxq
, bool enable
)
1435 value
= readl(ioaddr
+ XGMAC_FPE_CTRL_STS
);
1437 value
&= ~XGMAC_EFPE
;
1439 writel(value
, ioaddr
+ XGMAC_FPE_CTRL_STS
);
1443 value
= readl(ioaddr
+ XGMAC_RXQ_CTRL1
);
1445 value
|= (num_rxq
- 1) << XGMAC_RQ_SHIFT
;
1446 writel(value
, ioaddr
+ XGMAC_RXQ_CTRL1
);
1448 value
= readl(ioaddr
+ XGMAC_FPE_CTRL_STS
);
1449 value
|= XGMAC_EFPE
;
1450 writel(value
, ioaddr
+ XGMAC_FPE_CTRL_STS
);
1453 const struct stmmac_ops dwxgmac210_ops
= {
1454 .core_init
= dwxgmac2_core_init
,
1455 .set_mac
= dwxgmac2_set_mac
,
1456 .rx_ipc
= dwxgmac2_rx_ipc
,
1457 .rx_queue_enable
= dwxgmac2_rx_queue_enable
,
1458 .rx_queue_prio
= dwxgmac2_rx_queue_prio
,
1459 .tx_queue_prio
= dwxgmac2_tx_queue_prio
,
1460 .rx_queue_routing
= NULL
,
1461 .prog_mtl_rx_algorithms
= dwxgmac2_prog_mtl_rx_algorithms
,
1462 .prog_mtl_tx_algorithms
= dwxgmac2_prog_mtl_tx_algorithms
,
1463 .set_mtl_tx_queue_weight
= dwxgmac2_set_mtl_tx_queue_weight
,
1464 .map_mtl_to_dma
= dwxgmac2_map_mtl_to_dma
,
1465 .config_cbs
= dwxgmac2_config_cbs
,
1466 .dump_regs
= dwxgmac2_dump_regs
,
1467 .host_irq_status
= dwxgmac2_host_irq_status
,
1468 .host_mtl_irq_status
= dwxgmac2_host_mtl_irq_status
,
1469 .flow_ctrl
= dwxgmac2_flow_ctrl
,
1470 .pmt
= dwxgmac2_pmt
,
1471 .set_umac_addr
= dwxgmac2_set_umac_addr
,
1472 .get_umac_addr
= dwxgmac2_get_umac_addr
,
1473 .set_eee_mode
= dwxgmac2_set_eee_mode
,
1474 .reset_eee_mode
= dwxgmac2_reset_eee_mode
,
1475 .set_eee_timer
= dwxgmac2_set_eee_timer
,
1476 .set_eee_pls
= dwxgmac2_set_eee_pls
,
1477 .pcs_ctrl_ane
= NULL
,
1479 .pcs_get_adv_lp
= NULL
,
1481 .set_filter
= dwxgmac2_set_filter
,
1482 .safety_feat_config
= dwxgmac3_safety_feat_config
,
1483 .safety_feat_irq_status
= dwxgmac3_safety_feat_irq_status
,
1484 .safety_feat_dump
= dwxgmac3_safety_feat_dump
,
1485 .set_mac_loopback
= dwxgmac2_set_mac_loopback
,
1486 .rss_configure
= dwxgmac2_rss_configure
,
1487 .update_vlan_hash
= dwxgmac2_update_vlan_hash
,
1488 .rxp_config
= dwxgmac3_rxp_config
,
1489 .get_mac_tx_timestamp
= dwxgmac2_get_mac_tx_timestamp
,
1490 .flex_pps_config
= dwxgmac2_flex_pps_config
,
1491 .sarc_configure
= dwxgmac2_sarc_configure
,
1492 .enable_vlan
= dwxgmac2_enable_vlan
,
1493 .config_l3_filter
= dwxgmac2_config_l3_filter
,
1494 .config_l4_filter
= dwxgmac2_config_l4_filter
,
1495 .set_arp_offload
= dwxgmac2_set_arp_offload
,
1496 .est_configure
= dwxgmac3_est_configure
,
1497 .fpe_configure
= dwxgmac3_fpe_configure
,
1500 static void dwxlgmac2_rx_queue_enable(struct mac_device_info
*hw
, u8 mode
,
1503 void __iomem
*ioaddr
= hw
->pcsr
;
1506 value
= readl(ioaddr
+ XLGMAC_RXQ_ENABLE_CTRL0
) & ~XGMAC_RXQEN(queue
);
1507 if (mode
== MTL_QUEUE_AVB
)
1508 value
|= 0x1 << XGMAC_RXQEN_SHIFT(queue
);
1509 else if (mode
== MTL_QUEUE_DCB
)
1510 value
|= 0x2 << XGMAC_RXQEN_SHIFT(queue
);
1511 writel(value
, ioaddr
+ XLGMAC_RXQ_ENABLE_CTRL0
);
1514 const struct stmmac_ops dwxlgmac2_ops
= {
1515 .core_init
= dwxgmac2_core_init
,
1516 .set_mac
= dwxgmac2_set_mac
,
1517 .rx_ipc
= dwxgmac2_rx_ipc
,
1518 .rx_queue_enable
= dwxlgmac2_rx_queue_enable
,
1519 .rx_queue_prio
= dwxgmac2_rx_queue_prio
,
1520 .tx_queue_prio
= dwxgmac2_tx_queue_prio
,
1521 .rx_queue_routing
= NULL
,
1522 .prog_mtl_rx_algorithms
= dwxgmac2_prog_mtl_rx_algorithms
,
1523 .prog_mtl_tx_algorithms
= dwxgmac2_prog_mtl_tx_algorithms
,
1524 .set_mtl_tx_queue_weight
= dwxgmac2_set_mtl_tx_queue_weight
,
1525 .map_mtl_to_dma
= dwxgmac2_map_mtl_to_dma
,
1526 .config_cbs
= dwxgmac2_config_cbs
,
1527 .dump_regs
= dwxgmac2_dump_regs
,
1528 .host_irq_status
= dwxgmac2_host_irq_status
,
1529 .host_mtl_irq_status
= dwxgmac2_host_mtl_irq_status
,
1530 .flow_ctrl
= dwxgmac2_flow_ctrl
,
1531 .pmt
= dwxgmac2_pmt
,
1532 .set_umac_addr
= dwxgmac2_set_umac_addr
,
1533 .get_umac_addr
= dwxgmac2_get_umac_addr
,
1534 .set_eee_mode
= dwxgmac2_set_eee_mode
,
1535 .reset_eee_mode
= dwxgmac2_reset_eee_mode
,
1536 .set_eee_timer
= dwxgmac2_set_eee_timer
,
1537 .set_eee_pls
= dwxgmac2_set_eee_pls
,
1538 .pcs_ctrl_ane
= NULL
,
1540 .pcs_get_adv_lp
= NULL
,
1542 .set_filter
= dwxgmac2_set_filter
,
1543 .safety_feat_config
= dwxgmac3_safety_feat_config
,
1544 .safety_feat_irq_status
= dwxgmac3_safety_feat_irq_status
,
1545 .safety_feat_dump
= dwxgmac3_safety_feat_dump
,
1546 .set_mac_loopback
= dwxgmac2_set_mac_loopback
,
1547 .rss_configure
= dwxgmac2_rss_configure
,
1548 .update_vlan_hash
= dwxgmac2_update_vlan_hash
,
1549 .rxp_config
= dwxgmac3_rxp_config
,
1550 .get_mac_tx_timestamp
= dwxgmac2_get_mac_tx_timestamp
,
1551 .flex_pps_config
= dwxgmac2_flex_pps_config
,
1552 .sarc_configure
= dwxgmac2_sarc_configure
,
1553 .enable_vlan
= dwxgmac2_enable_vlan
,
1554 .config_l3_filter
= dwxgmac2_config_l3_filter
,
1555 .config_l4_filter
= dwxgmac2_config_l4_filter
,
1556 .set_arp_offload
= dwxgmac2_set_arp_offload
,
1557 .est_configure
= dwxgmac3_est_configure
,
1558 .fpe_configure
= dwxgmac3_fpe_configure
,
1561 int dwxgmac2_setup(struct stmmac_priv
*priv
)
1563 struct mac_device_info
*mac
= priv
->hw
;
1565 dev_info(priv
->device
, "\tXGMAC2\n");
1567 priv
->dev
->priv_flags
|= IFF_UNICAST_FLT
;
1568 mac
->pcsr
= priv
->ioaddr
;
1569 mac
->multicast_filter_bins
= priv
->plat
->multicast_filter_bins
;
1570 mac
->unicast_filter_entries
= priv
->plat
->unicast_filter_entries
;
1571 mac
->mcast_bits_log2
= 0;
1573 if (mac
->multicast_filter_bins
)
1574 mac
->mcast_bits_log2
= ilog2(mac
->multicast_filter_bins
);
1576 mac
->link
.duplex
= 0;
1577 mac
->link
.speed10
= XGMAC_CONFIG_SS_10_MII
;
1578 mac
->link
.speed100
= XGMAC_CONFIG_SS_100_MII
;
1579 mac
->link
.speed1000
= XGMAC_CONFIG_SS_1000_GMII
;
1580 mac
->link
.speed2500
= XGMAC_CONFIG_SS_2500_GMII
;
1581 mac
->link
.xgmii
.speed2500
= XGMAC_CONFIG_SS_2500
;
1582 mac
->link
.xgmii
.speed5000
= XGMAC_CONFIG_SS_5000
;
1583 mac
->link
.xgmii
.speed10000
= XGMAC_CONFIG_SS_10000
;
1584 mac
->link
.speed_mask
= XGMAC_CONFIG_SS_MASK
;
1586 mac
->mii
.addr
= XGMAC_MDIO_ADDR
;
1587 mac
->mii
.data
= XGMAC_MDIO_DATA
;
1588 mac
->mii
.addr_shift
= 16;
1589 mac
->mii
.addr_mask
= GENMASK(20, 16);
1590 mac
->mii
.reg_shift
= 0;
1591 mac
->mii
.reg_mask
= GENMASK(15, 0);
1592 mac
->mii
.clk_csr_shift
= 19;
1593 mac
->mii
.clk_csr_mask
= GENMASK(21, 19);
1598 int dwxlgmac2_setup(struct stmmac_priv
*priv
)
1600 struct mac_device_info
*mac
= priv
->hw
;
1602 dev_info(priv
->device
, "\tXLGMAC\n");
1604 priv
->dev
->priv_flags
|= IFF_UNICAST_FLT
;
1605 mac
->pcsr
= priv
->ioaddr
;
1606 mac
->multicast_filter_bins
= priv
->plat
->multicast_filter_bins
;
1607 mac
->unicast_filter_entries
= priv
->plat
->unicast_filter_entries
;
1608 mac
->mcast_bits_log2
= 0;
1610 if (mac
->multicast_filter_bins
)
1611 mac
->mcast_bits_log2
= ilog2(mac
->multicast_filter_bins
);
1613 mac
->link
.duplex
= 0;
1614 mac
->link
.speed1000
= XLGMAC_CONFIG_SS_1000
;
1615 mac
->link
.speed2500
= XLGMAC_CONFIG_SS_2500
;
1616 mac
->link
.xgmii
.speed10000
= XLGMAC_CONFIG_SS_10G
;
1617 mac
->link
.xlgmii
.speed25000
= XLGMAC_CONFIG_SS_25G
;
1618 mac
->link
.xlgmii
.speed40000
= XLGMAC_CONFIG_SS_40G
;
1619 mac
->link
.xlgmii
.speed50000
= XLGMAC_CONFIG_SS_50G
;
1620 mac
->link
.xlgmii
.speed100000
= XLGMAC_CONFIG_SS_100G
;
1621 mac
->link
.speed_mask
= XLGMAC_CONFIG_SS
;
1623 mac
->mii
.addr
= XGMAC_MDIO_ADDR
;
1624 mac
->mii
.data
= XGMAC_MDIO_DATA
;
1625 mac
->mii
.addr_shift
= 16;
1626 mac
->mii
.addr_mask
= GENMASK(20, 16);
1627 mac
->mii
.reg_shift
= 0;
1628 mac
->mii
.reg_mask
= GENMASK(15, 0);
1629 mac
->mii
.clk_csr_shift
= 19;
1630 mac
->mii
.clk_csr_mask
= GENMASK(21, 19);