1 // SPDX-License-Identifier: GPL-2.0
2 /* Ethernet device driver for Cortina Systems Gemini SoC
3 * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus
4 * Net Engine and Gigabit Ethernet MAC (GMAC)
5 * This hardware contains a TCP Offload Engine (TOE) but currently the
6 * driver does not make use of it.
9 * Linus Walleij <linus.walleij@linaro.org>
10 * Tobias Waldvogel <tobias.waldvogel@gmail.com> (OpenWRT)
11 * Michał Mirosław <mirq-linux@rere.qmqm.pl>
12 * Paulius Zaleckas <paulius.zaleckas@gmail.com>
13 * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it>
14 * Gary Chen & Ch Hsu Storlink Semiconductor
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/cache.h>
24 #include <linux/interrupt.h>
25 #include <linux/reset.h>
26 #include <linux/clk.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/of_platform.h>
31 #include <linux/etherdevice.h>
32 #include <linux/if_vlan.h>
33 #include <linux/skbuff.h>
34 #include <linux/phy.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/tcp.h>
38 #include <linux/u64_stats_sync.h>
42 #include <linux/ipv6.h>
46 #define DRV_NAME "gmac-gemini"
47 #define DRV_VERSION "1.0"
49 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
50 static int debug
= -1;
51 module_param(debug
, int, 0);
52 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
58 #define HBURST_SINGLE 0x00
59 #define HBURST_INCR 0x01
60 #define HBURST_INCR4 0x02
61 #define HBURST_INCR8 0x03
63 #define HPROT_DATA_CACHE BIT(0)
64 #define HPROT_PRIVILIGED BIT(1)
65 #define HPROT_BUFFERABLE BIT(2)
66 #define HPROT_CACHABLE BIT(3)
68 #define DEFAULT_RX_COALESCE_NSECS 0
69 #define DEFAULT_GMAC_RXQ_ORDER 9
70 #define DEFAULT_GMAC_TXQ_ORDER 8
71 #define DEFAULT_RX_BUF_ORDER 11
72 #define DEFAULT_NAPI_WEIGHT 64
73 #define TX_MAX_FRAGS 16
74 #define TX_QUEUE_NUM 1 /* max: 6 */
75 #define RX_MAX_ALLOC_ORDER 2
77 #define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \
78 GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT)
79 #define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \
80 GMAC0_SWTQ00_FIN_INT_BIT)
81 #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
83 #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
84 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
85 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
88 * struct gmac_queue_page - page buffer per-page info
90 struct gmac_queue_page
{
96 struct gmac_txdesc
*ring
;
99 unsigned int noirq_packets
;
102 struct gemini_ethernet
;
104 struct gemini_ethernet_port
{
107 struct gemini_ethernet
*geth
;
108 struct net_device
*netdev
;
110 void __iomem
*dma_base
;
111 void __iomem
*gmac_base
;
113 struct reset_control
*reset
;
117 void __iomem
*rxq_rwptr
;
118 struct gmac_rxdesc
*rxq_ring
;
119 unsigned int rxq_order
;
121 struct napi_struct napi
;
122 struct hrtimer rx_coalesce_timer
;
123 unsigned int rx_coalesce_nsecs
;
124 unsigned int freeq_refill
;
125 struct gmac_txq txq
[TX_QUEUE_NUM
];
126 unsigned int txq_order
;
127 unsigned int irq_every_tx_packets
;
129 dma_addr_t rxq_dma_base
;
130 dma_addr_t txq_dma_base
;
132 unsigned int msg_enable
;
133 spinlock_t config_lock
; /* Locks config register */
135 struct u64_stats_sync tx_stats_syncp
;
136 struct u64_stats_sync rx_stats_syncp
;
137 struct u64_stats_sync ir_stats_syncp
;
139 struct rtnl_link_stats64 stats
;
140 u64 hw_stats
[RX_STATS_NUM
];
141 u64 rx_stats
[RX_STATUS_NUM
];
142 u64 rx_csum_stats
[RX_CHKSUM_NUM
];
144 u64 tx_frag_stats
[TX_MAX_FRAGS
];
145 u64 tx_frags_linearized
;
149 struct gemini_ethernet
{
152 struct gemini_ethernet_port
*port0
;
153 struct gemini_ethernet_port
*port1
;
156 spinlock_t irq_lock
; /* Locks IRQ-related registers */
157 unsigned int freeq_order
;
158 unsigned int freeq_frag_order
;
159 struct gmac_rxdesc
*freeq_ring
;
160 dma_addr_t freeq_dma_base
;
161 struct gmac_queue_page
*freeq_pages
;
162 unsigned int num_freeq_pages
;
163 spinlock_t freeq_lock
; /* Locks queue from reentrance */
166 #define GMAC_STATS_NUM ( \
167 RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
170 static const char gmac_stats_strings
[GMAC_STATS_NUM
][ETH_GSTRING_LEN
] = {
177 "RX_STATUS_GOOD_FRAME",
178 "RX_STATUS_TOO_LONG_GOOD_CRC",
179 "RX_STATUS_RUNT_FRAME",
180 "RX_STATUS_SFD_NOT_FOUND",
181 "RX_STATUS_CRC_ERROR",
182 "RX_STATUS_TOO_LONG_BAD_CRC",
183 "RX_STATUS_ALIGNMENT_ERROR",
184 "RX_STATUS_TOO_LONG_BAD_ALIGN",
186 "RX_STATUS_DA_FILTERED",
187 "RX_STATUS_BUFFER_FULL",
193 "RX_CHKSUM_IP_UDP_TCP_OK",
194 "RX_CHKSUM_IP_OK_ONLY",
197 "RX_CHKSUM_IP_ERR_UNKNOWN",
199 "RX_CHKSUM_TCP_UDP_ERR",
218 "TX_FRAGS_LINEARIZED",
222 static void gmac_dump_dma_state(struct net_device
*netdev
);
224 static void gmac_update_config0_reg(struct net_device
*netdev
,
227 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
231 spin_lock_irqsave(&port
->config_lock
, flags
);
233 reg
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
234 reg
= (reg
& ~vmask
) | val
;
235 writel(reg
, port
->gmac_base
+ GMAC_CONFIG0
);
237 spin_unlock_irqrestore(&port
->config_lock
, flags
);
240 static void gmac_enable_tx_rx(struct net_device
*netdev
)
242 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
246 spin_lock_irqsave(&port
->config_lock
, flags
);
248 reg
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
249 reg
&= ~CONFIG0_TX_RX_DISABLE
;
250 writel(reg
, port
->gmac_base
+ GMAC_CONFIG0
);
252 spin_unlock_irqrestore(&port
->config_lock
, flags
);
255 static void gmac_disable_tx_rx(struct net_device
*netdev
)
257 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
261 spin_lock_irqsave(&port
->config_lock
, flags
);
263 val
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
264 val
|= CONFIG0_TX_RX_DISABLE
;
265 writel(val
, port
->gmac_base
+ GMAC_CONFIG0
);
267 spin_unlock_irqrestore(&port
->config_lock
, flags
);
269 mdelay(10); /* let GMAC consume packet */
272 static void gmac_set_flow_control(struct net_device
*netdev
, bool tx
, bool rx
)
274 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
278 spin_lock_irqsave(&port
->config_lock
, flags
);
280 val
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
281 val
&= ~CONFIG0_FLOW_CTL
;
283 val
|= CONFIG0_FLOW_TX
;
285 val
|= CONFIG0_FLOW_RX
;
286 writel(val
, port
->gmac_base
+ GMAC_CONFIG0
);
288 spin_unlock_irqrestore(&port
->config_lock
, flags
);
291 static void gmac_speed_set(struct net_device
*netdev
)
293 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
294 struct phy_device
*phydev
= netdev
->phydev
;
295 union gmac_status status
, old_status
;
299 status
.bits32
= readl(port
->gmac_base
+ GMAC_STATUS
);
300 old_status
.bits32
= status
.bits32
;
301 status
.bits
.link
= phydev
->link
;
302 status
.bits
.duplex
= phydev
->duplex
;
304 switch (phydev
->speed
) {
306 status
.bits
.speed
= GMAC_SPEED_1000
;
307 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII
)
308 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_1000
;
309 netdev_dbg(netdev
, "connect %s to RGMII @ 1Gbit\n",
310 phydev_name(phydev
));
313 status
.bits
.speed
= GMAC_SPEED_100
;
314 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII
)
315 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_100_10
;
316 netdev_dbg(netdev
, "connect %s to RGMII @ 100 Mbit\n",
317 phydev_name(phydev
));
320 status
.bits
.speed
= GMAC_SPEED_10
;
321 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII
)
322 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_100_10
;
323 netdev_dbg(netdev
, "connect %s to RGMII @ 10 Mbit\n",
324 phydev_name(phydev
));
327 netdev_warn(netdev
, "Unsupported PHY speed (%d) on %s\n",
328 phydev
->speed
, phydev_name(phydev
));
331 if (phydev
->duplex
== DUPLEX_FULL
) {
332 u16 lcladv
= phy_read(phydev
, MII_ADVERTISE
);
333 u16 rmtadv
= phy_read(phydev
, MII_LPA
);
334 u8 cap
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
336 if (cap
& FLOW_CTRL_RX
)
338 if (cap
& FLOW_CTRL_TX
)
342 gmac_set_flow_control(netdev
, pause_tx
, pause_rx
);
344 if (old_status
.bits32
== status
.bits32
)
347 if (netif_msg_link(port
)) {
348 phy_print_status(phydev
);
349 netdev_info(netdev
, "link flow control: %s\n",
351 ? (phydev
->asym_pause
? "tx" : "both")
352 : (phydev
->asym_pause
? "rx" : "none")
356 gmac_disable_tx_rx(netdev
);
357 writel(status
.bits32
, port
->gmac_base
+ GMAC_STATUS
);
358 gmac_enable_tx_rx(netdev
);
361 static int gmac_setup_phy(struct net_device
*netdev
)
363 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
364 union gmac_status status
= { .bits32
= 0 };
365 struct device
*dev
= port
->dev
;
366 struct phy_device
*phy
;
368 phy
= of_phy_get_and_connect(netdev
,
373 netdev
->phydev
= phy
;
375 phy_set_max_speed(phy
, SPEED_1000
);
376 phy_support_asym_pause(phy
);
378 /* set PHY interface type */
379 switch (phy
->interface
) {
380 case PHY_INTERFACE_MODE_MII
:
382 "MII: set GMAC0 to GMII mode, GMAC1 disabled\n");
383 status
.bits
.mii_rmii
= GMAC_PHY_MII
;
385 case PHY_INTERFACE_MODE_GMII
:
387 "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n");
388 status
.bits
.mii_rmii
= GMAC_PHY_GMII
;
390 case PHY_INTERFACE_MODE_RGMII
:
392 "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
393 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_100_10
;
396 netdev_err(netdev
, "Unsupported MII interface\n");
398 netdev
->phydev
= NULL
;
401 writel(status
.bits32
, port
->gmac_base
+ GMAC_STATUS
);
403 if (netif_msg_link(port
))
404 phy_attached_info(phy
);
409 /* The maximum frame length is not logically enumerated in the
410 * hardware, so we do a table lookup to find the applicable max
413 struct gmac_max_framelen
{
414 unsigned int max_l3_len
;
418 static const struct gmac_max_framelen gmac_maxlens
[] = {
421 .val
= CONFIG0_MAXLEN_1518
,
425 .val
= CONFIG0_MAXLEN_1522
,
429 .val
= CONFIG0_MAXLEN_1536
,
433 .val
= CONFIG0_MAXLEN_1542
,
437 .val
= CONFIG0_MAXLEN_9k
,
441 .val
= CONFIG0_MAXLEN_10k
,
445 static int gmac_pick_rx_max_len(unsigned int max_l3_len
)
447 const struct gmac_max_framelen
*maxlen
;
451 maxtot
= max_l3_len
+ ETH_HLEN
+ VLAN_HLEN
;
453 for (i
= 0; i
< ARRAY_SIZE(gmac_maxlens
); i
++) {
454 maxlen
= &gmac_maxlens
[i
];
455 if (maxtot
<= maxlen
->max_l3_len
)
462 static int gmac_init(struct net_device
*netdev
)
464 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
465 union gmac_config0 config0
= { .bits
= {
476 .port0_chk_classq
= 1,
477 .port1_chk_classq
= 1,
479 union gmac_ahb_weight ahb_weight
= { .bits
= {
484 .tq_dv_threshold
= 0,
486 union gmac_tx_wcr0 hw_weigh
= { .bits
= {
492 union gmac_tx_wcr1 sw_weigh
= { .bits
= {
500 union gmac_config1 config1
= { .bits
= {
504 union gmac_config2 config2
= { .bits
= {
508 union gmac_config3 config3
= { .bits
= {
512 union gmac_config0 tmp
;
515 config0
.bits
.max_len
= gmac_pick_rx_max_len(netdev
->mtu
);
516 tmp
.bits32
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
517 config0
.bits
.reserved
= tmp
.bits
.reserved
;
518 writel(config0
.bits32
, port
->gmac_base
+ GMAC_CONFIG0
);
519 writel(config1
.bits32
, port
->gmac_base
+ GMAC_CONFIG1
);
520 writel(config2
.bits32
, port
->gmac_base
+ GMAC_CONFIG2
);
521 writel(config3
.bits32
, port
->gmac_base
+ GMAC_CONFIG3
);
523 val
= readl(port
->dma_base
+ GMAC_AHB_WEIGHT_REG
);
524 writel(ahb_weight
.bits32
, port
->dma_base
+ GMAC_AHB_WEIGHT_REG
);
526 writel(hw_weigh
.bits32
,
527 port
->dma_base
+ GMAC_TX_WEIGHTING_CTRL_0_REG
);
528 writel(sw_weigh
.bits32
,
529 port
->dma_base
+ GMAC_TX_WEIGHTING_CTRL_1_REG
);
531 port
->rxq_order
= DEFAULT_GMAC_RXQ_ORDER
;
532 port
->txq_order
= DEFAULT_GMAC_TXQ_ORDER
;
533 port
->rx_coalesce_nsecs
= DEFAULT_RX_COALESCE_NSECS
;
535 /* Mark every quarter of the queue a packet for interrupt
536 * in order to be able to wake up the queue if it was stopped
538 port
->irq_every_tx_packets
= 1 << (port
->txq_order
- 2);
543 static void gmac_uninit(struct net_device
*netdev
)
546 phy_disconnect(netdev
->phydev
);
549 static int gmac_setup_txqs(struct net_device
*netdev
)
551 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
552 unsigned int n_txq
= netdev
->num_tx_queues
;
553 struct gemini_ethernet
*geth
= port
->geth
;
554 size_t entries
= 1 << port
->txq_order
;
555 struct gmac_txq
*txq
= port
->txq
;
556 struct gmac_txdesc
*desc_ring
;
557 size_t len
= n_txq
* entries
;
558 struct sk_buff
**skb_tab
;
559 void __iomem
*rwptr_reg
;
563 rwptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE0_PTR_REG
;
565 skb_tab
= kcalloc(len
, sizeof(*skb_tab
), GFP_KERNEL
);
569 desc_ring
= dma_alloc_coherent(geth
->dev
, len
* sizeof(*desc_ring
),
570 &port
->txq_dma_base
, GFP_KERNEL
);
577 if (port
->txq_dma_base
& ~DMA_Q_BASE_MASK
) {
578 dev_warn(geth
->dev
, "TX queue base is not aligned\n");
579 dma_free_coherent(geth
->dev
, len
* sizeof(*desc_ring
),
580 desc_ring
, port
->txq_dma_base
);
585 writel(port
->txq_dma_base
| port
->txq_order
,
586 port
->dma_base
+ GMAC_SW_TX_QUEUE_BASE_REG
);
588 for (i
= 0; i
< n_txq
; i
++) {
589 txq
->ring
= desc_ring
;
591 txq
->noirq_packets
= 0;
593 r
= readw(rwptr_reg
);
595 writew(r
, rwptr_reg
);
600 desc_ring
+= entries
;
607 static void gmac_clean_txq(struct net_device
*netdev
, struct gmac_txq
*txq
,
610 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
611 unsigned int m
= (1 << port
->txq_order
) - 1;
612 struct gemini_ethernet
*geth
= port
->geth
;
613 unsigned int c
= txq
->cptr
;
614 union gmac_txdesc_0 word0
;
615 union gmac_txdesc_1 word1
;
616 unsigned int hwchksum
= 0;
617 unsigned long bytes
= 0;
618 struct gmac_txdesc
*txd
;
619 unsigned short nfrags
;
620 unsigned int errs
= 0;
621 unsigned int pkts
= 0;
632 mapping
= txd
->word2
.buf_adr
;
633 word3
= txd
->word3
.bits32
;
635 dma_unmap_single(geth
->dev
, mapping
,
636 word0
.bits
.buffer_size
, DMA_TO_DEVICE
);
639 dev_kfree_skb(txq
->skb
[c
]);
644 if (!(word3
& SOF_BIT
))
647 if (!word0
.bits
.status_tx_ok
) {
653 bytes
+= txd
->word1
.bits
.byte_count
;
655 if (word1
.bits32
& TSS_CHECKUM_ENABLE
)
658 nfrags
= word0
.bits
.desc_count
- 1;
660 if (nfrags
>= TX_MAX_FRAGS
)
661 nfrags
= TX_MAX_FRAGS
- 1;
663 u64_stats_update_begin(&port
->tx_stats_syncp
);
664 port
->tx_frag_stats
[nfrags
]++;
665 u64_stats_update_end(&port
->tx_stats_syncp
);
669 u64_stats_update_begin(&port
->ir_stats_syncp
);
670 port
->stats
.tx_errors
+= errs
;
671 port
->stats
.tx_packets
+= pkts
;
672 port
->stats
.tx_bytes
+= bytes
;
673 port
->tx_hw_csummed
+= hwchksum
;
674 u64_stats_update_end(&port
->ir_stats_syncp
);
679 static void gmac_cleanup_txqs(struct net_device
*netdev
)
681 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
682 unsigned int n_txq
= netdev
->num_tx_queues
;
683 struct gemini_ethernet
*geth
= port
->geth
;
684 void __iomem
*rwptr_reg
;
687 rwptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE0_PTR_REG
;
689 for (i
= 0; i
< n_txq
; i
++) {
690 r
= readw(rwptr_reg
);
692 writew(r
, rwptr_reg
);
695 gmac_clean_txq(netdev
, port
->txq
+ i
, r
);
697 writel(0, port
->dma_base
+ GMAC_SW_TX_QUEUE_BASE_REG
);
699 kfree(port
->txq
->skb
);
700 dma_free_coherent(geth
->dev
,
701 n_txq
* sizeof(*port
->txq
->ring
) << port
->txq_order
,
702 port
->txq
->ring
, port
->txq_dma_base
);
705 static int gmac_setup_rxq(struct net_device
*netdev
)
707 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
708 struct gemini_ethernet
*geth
= port
->geth
;
709 struct nontoe_qhdr __iomem
*qhdr
;
711 qhdr
= geth
->base
+ TOE_DEFAULT_Q_HDR_BASE(netdev
->dev_id
);
712 port
->rxq_rwptr
= &qhdr
->word1
;
714 /* Remap a slew of memory to use for the RX queue */
715 port
->rxq_ring
= dma_alloc_coherent(geth
->dev
,
716 sizeof(*port
->rxq_ring
) << port
->rxq_order
,
717 &port
->rxq_dma_base
, GFP_KERNEL
);
720 if (port
->rxq_dma_base
& ~NONTOE_QHDR0_BASE_MASK
) {
721 dev_warn(geth
->dev
, "RX queue base is not aligned\n");
725 writel(port
->rxq_dma_base
| port
->rxq_order
, &qhdr
->word0
);
726 writel(0, port
->rxq_rwptr
);
730 static struct gmac_queue_page
*
731 gmac_get_queue_page(struct gemini_ethernet
*geth
,
732 struct gemini_ethernet_port
*port
,
735 struct gmac_queue_page
*gpage
;
739 /* Only look for even pages */
740 mapping
= addr
& PAGE_MASK
;
742 if (!geth
->freeq_pages
) {
743 dev_err(geth
->dev
, "try to get page with no page list\n");
747 /* Look up a ring buffer page from virtual mapping */
748 for (i
= 0; i
< geth
->num_freeq_pages
; i
++) {
749 gpage
= &geth
->freeq_pages
[i
];
750 if (gpage
->mapping
== mapping
)
757 static void gmac_cleanup_rxq(struct net_device
*netdev
)
759 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
760 struct gemini_ethernet
*geth
= port
->geth
;
761 struct gmac_rxdesc
*rxd
= port
->rxq_ring
;
762 static struct gmac_queue_page
*gpage
;
763 struct nontoe_qhdr __iomem
*qhdr
;
764 void __iomem
*dma_reg
;
765 void __iomem
*ptr_reg
;
771 TOE_DEFAULT_Q_HDR_BASE(netdev
->dev_id
);
772 dma_reg
= &qhdr
->word0
;
773 ptr_reg
= &qhdr
->word1
;
775 rw
.bits32
= readl(ptr_reg
);
778 writew(r
, ptr_reg
+ 2);
782 /* Loop from read pointer to write pointer of the RX queue
783 * and free up all pages by the queue.
786 mapping
= rxd
[r
].word2
.buf_adr
;
788 r
&= ((1 << port
->rxq_order
) - 1);
793 /* Freeq pointers are one page off */
794 gpage
= gmac_get_queue_page(geth
, port
, mapping
+ PAGE_SIZE
);
796 dev_err(geth
->dev
, "could not find page\n");
799 /* Release the RX queue reference to the page */
800 put_page(gpage
->page
);
803 dma_free_coherent(geth
->dev
, sizeof(*port
->rxq_ring
) << port
->rxq_order
,
804 port
->rxq_ring
, port
->rxq_dma_base
);
807 static struct page
*geth_freeq_alloc_map_page(struct gemini_ethernet
*geth
,
810 struct gmac_rxdesc
*freeq_entry
;
811 struct gmac_queue_page
*gpage
;
812 unsigned int fpp_order
;
813 unsigned int frag_len
;
818 /* First allocate and DMA map a single page */
819 page
= alloc_page(GFP_ATOMIC
);
823 mapping
= dma_map_single(geth
->dev
, page_address(page
),
824 PAGE_SIZE
, DMA_FROM_DEVICE
);
825 if (dma_mapping_error(geth
->dev
, mapping
)) {
830 /* The assign the page mapping (physical address) to the buffer address
831 * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes,
832 * 4k), and the default RX frag order is 11 (fragments are up 20 2048
833 * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus
834 * each page normally needs two entries in the queue.
836 frag_len
= 1 << geth
->freeq_frag_order
; /* Usually 2048 */
837 fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
838 freeq_entry
= geth
->freeq_ring
+ (pn
<< fpp_order
);
839 dev_dbg(geth
->dev
, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n",
840 pn
, frag_len
, (1 << fpp_order
), freeq_entry
);
841 for (i
= (1 << fpp_order
); i
> 0; i
--) {
842 freeq_entry
->word2
.buf_adr
= mapping
;
847 /* If the freeq entry already has a page mapped, then unmap it. */
848 gpage
= &geth
->freeq_pages
[pn
];
850 mapping
= geth
->freeq_ring
[pn
<< fpp_order
].word2
.buf_adr
;
851 dma_unmap_single(geth
->dev
, mapping
, frag_len
, DMA_FROM_DEVICE
);
852 /* This should be the last reference to the page so it gets
855 put_page(gpage
->page
);
858 /* Then put our new mapping into the page table */
859 dev_dbg(geth
->dev
, "page %d, DMA addr: %08x, page %p\n",
860 pn
, (unsigned int)mapping
, page
);
861 gpage
->mapping
= mapping
;
868 * geth_fill_freeq() - Fill the freeq with empty fragments to use
869 * @geth: the ethernet adapter
870 * @refill: whether to reset the queue by filling in all freeq entries or
871 * just refill it, usually the interrupt to refill the queue happens when
872 * the queue is half empty.
874 static unsigned int geth_fill_freeq(struct gemini_ethernet
*geth
, bool refill
)
876 unsigned int fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
877 unsigned int count
= 0;
878 unsigned int pn
, epn
;
884 m_pn
= (1 << (geth
->freeq_order
- fpp_order
)) - 1;
886 spin_lock_irqsave(&geth
->freeq_lock
, flags
);
888 rw
.bits32
= readl(geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
);
889 pn
= (refill
? rw
.bits
.wptr
: rw
.bits
.rptr
) >> fpp_order
;
890 epn
= (rw
.bits
.rptr
>> fpp_order
) - 1;
893 /* Loop over the freeq ring buffer entries */
895 struct gmac_queue_page
*gpage
;
898 gpage
= &geth
->freeq_pages
[pn
];
901 dev_dbg(geth
->dev
, "fill entry %d page ref count %d add %d refs\n",
902 pn
, page_ref_count(page
), 1 << fpp_order
);
904 if (page_ref_count(page
) > 1) {
905 unsigned int fl
= (pn
- epn
) & m_pn
;
907 if (fl
> 64 >> fpp_order
)
910 page
= geth_freeq_alloc_map_page(geth
, pn
);
915 /* Add one reference per fragment in the page */
916 page_ref_add(page
, 1 << fpp_order
);
917 count
+= 1 << fpp_order
;
922 writew(pn
<< fpp_order
, geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
+ 2);
924 spin_unlock_irqrestore(&geth
->freeq_lock
, flags
);
929 static int geth_setup_freeq(struct gemini_ethernet
*geth
)
931 unsigned int fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
932 unsigned int frag_len
= 1 << geth
->freeq_frag_order
;
933 unsigned int len
= 1 << geth
->freeq_order
;
934 unsigned int pages
= len
>> fpp_order
;
935 union queue_threshold qt
;
936 union dma_skb_size skbsz
;
940 geth
->freeq_ring
= dma_alloc_coherent(geth
->dev
,
941 sizeof(*geth
->freeq_ring
) << geth
->freeq_order
,
942 &geth
->freeq_dma_base
, GFP_KERNEL
);
943 if (!geth
->freeq_ring
)
945 if (geth
->freeq_dma_base
& ~DMA_Q_BASE_MASK
) {
946 dev_warn(geth
->dev
, "queue ring base is not aligned\n");
950 /* Allocate a mapping to page look-up index */
951 geth
->freeq_pages
= kcalloc(pages
, sizeof(*geth
->freeq_pages
),
953 if (!geth
->freeq_pages
)
955 geth
->num_freeq_pages
= pages
;
957 dev_info(geth
->dev
, "allocate %d pages for queue\n", pages
);
958 for (pn
= 0; pn
< pages
; pn
++)
959 if (!geth_freeq_alloc_map_page(geth
, pn
))
960 goto err_freeq_alloc
;
962 filled
= geth_fill_freeq(geth
, false);
964 goto err_freeq_alloc
;
966 qt
.bits32
= readl(geth
->base
+ GLOBAL_QUEUE_THRESHOLD_REG
);
967 qt
.bits
.swfq_empty
= 32;
968 writel(qt
.bits32
, geth
->base
+ GLOBAL_QUEUE_THRESHOLD_REG
);
970 skbsz
.bits
.sw_skb_size
= 1 << geth
->freeq_frag_order
;
971 writel(skbsz
.bits32
, geth
->base
+ GLOBAL_DMA_SKB_SIZE_REG
);
972 writel(geth
->freeq_dma_base
| geth
->freeq_order
,
973 geth
->base
+ GLOBAL_SW_FREEQ_BASE_SIZE_REG
);
979 struct gmac_queue_page
*gpage
;
983 mapping
= geth
->freeq_ring
[pn
<< fpp_order
].word2
.buf_adr
;
984 dma_unmap_single(geth
->dev
, mapping
, frag_len
, DMA_FROM_DEVICE
);
985 gpage
= &geth
->freeq_pages
[pn
];
986 put_page(gpage
->page
);
989 kfree(geth
->freeq_pages
);
991 dma_free_coherent(geth
->dev
,
992 sizeof(*geth
->freeq_ring
) << geth
->freeq_order
,
993 geth
->freeq_ring
, geth
->freeq_dma_base
);
994 geth
->freeq_ring
= NULL
;
999 * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue
1000 * @geth: the Gemini global ethernet state
1002 static void geth_cleanup_freeq(struct gemini_ethernet
*geth
)
1004 unsigned int fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
1005 unsigned int frag_len
= 1 << geth
->freeq_frag_order
;
1006 unsigned int len
= 1 << geth
->freeq_order
;
1007 unsigned int pages
= len
>> fpp_order
;
1010 writew(readw(geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
),
1011 geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
+ 2);
1012 writel(0, geth
->base
+ GLOBAL_SW_FREEQ_BASE_SIZE_REG
);
1014 for (pn
= 0; pn
< pages
; pn
++) {
1015 struct gmac_queue_page
*gpage
;
1018 mapping
= geth
->freeq_ring
[pn
<< fpp_order
].word2
.buf_adr
;
1019 dma_unmap_single(geth
->dev
, mapping
, frag_len
, DMA_FROM_DEVICE
);
1021 gpage
= &geth
->freeq_pages
[pn
];
1022 while (page_ref_count(gpage
->page
) > 0)
1023 put_page(gpage
->page
);
1026 kfree(geth
->freeq_pages
);
1028 dma_free_coherent(geth
->dev
,
1029 sizeof(*geth
->freeq_ring
) << geth
->freeq_order
,
1030 geth
->freeq_ring
, geth
->freeq_dma_base
);
1034 * geth_resize_freeq() - resize the software queue depth
1035 * @port: the port requesting the change
1037 * This gets called at least once during probe() so the device queue gets
1038 * "resized" from the hardware defaults. Since both ports/net devices share
1039 * the same hardware queue, some synchronization between the ports is
1042 static int geth_resize_freeq(struct gemini_ethernet_port
*port
)
1044 struct gemini_ethernet
*geth
= port
->geth
;
1045 struct net_device
*netdev
= port
->netdev
;
1046 struct gemini_ethernet_port
*other_port
;
1047 struct net_device
*other_netdev
;
1048 unsigned int new_size
= 0;
1049 unsigned int new_order
;
1050 unsigned long flags
;
1054 if (netdev
->dev_id
== 0)
1055 other_netdev
= geth
->port1
->netdev
;
1057 other_netdev
= geth
->port0
->netdev
;
1059 if (other_netdev
&& netif_running(other_netdev
))
1062 new_size
= 1 << (port
->rxq_order
+ 1);
1063 netdev_dbg(netdev
, "port %d size: %d order %d\n",
1068 other_port
= netdev_priv(other_netdev
);
1069 new_size
+= 1 << (other_port
->rxq_order
+ 1);
1070 netdev_dbg(other_netdev
, "port %d size: %d order %d\n",
1071 other_netdev
->dev_id
,
1072 (1 << (other_port
->rxq_order
+ 1)),
1073 other_port
->rxq_order
);
1076 new_order
= min(15, ilog2(new_size
- 1) + 1);
1077 dev_dbg(geth
->dev
, "set shared queue to size %d order %d\n",
1078 new_size
, new_order
);
1079 if (geth
->freeq_order
== new_order
)
1082 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1084 /* Disable the software queue IRQs */
1085 en
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1086 en
&= ~SWFQ_EMPTY_INT_BIT
;
1087 writel(en
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1088 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1090 /* Drop the old queue */
1091 if (geth
->freeq_ring
)
1092 geth_cleanup_freeq(geth
);
1094 /* Allocate a new queue with the desired order */
1095 geth
->freeq_order
= new_order
;
1096 ret
= geth_setup_freeq(geth
);
1098 /* Restart the interrupts - NOTE if this is the first resize
1099 * after probe(), this is where the interrupts get turned on
1100 * in the first place.
1102 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1103 en
|= SWFQ_EMPTY_INT_BIT
;
1104 writel(en
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1105 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1110 static void gmac_tx_irq_enable(struct net_device
*netdev
,
1111 unsigned int txq
, int en
)
1113 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1114 struct gemini_ethernet
*geth
= port
->geth
;
1117 netdev_dbg(netdev
, "%s device %d\n", __func__
, netdev
->dev_id
);
1119 mask
= GMAC0_IRQ0_TXQ0_INTS
<< (6 * netdev
->dev_id
+ txq
);
1122 writel(mask
, geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
);
1124 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1125 val
= en
? val
| mask
: val
& ~mask
;
1126 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1129 static void gmac_tx_irq(struct net_device
*netdev
, unsigned int txq_num
)
1131 struct netdev_queue
*ntxq
= netdev_get_tx_queue(netdev
, txq_num
);
1133 gmac_tx_irq_enable(netdev
, txq_num
, 0);
1134 netif_tx_wake_queue(ntxq
);
1137 static int gmac_map_tx_bufs(struct net_device
*netdev
, struct sk_buff
*skb
,
1138 struct gmac_txq
*txq
, unsigned short *desc
)
1140 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1141 struct skb_shared_info
*skb_si
= skb_shinfo(skb
);
1142 unsigned short m
= (1 << port
->txq_order
) - 1;
1143 short frag
, last_frag
= skb_si
->nr_frags
- 1;
1144 struct gemini_ethernet
*geth
= port
->geth
;
1145 unsigned int word1
, word3
, buflen
;
1146 unsigned short w
= *desc
;
1147 struct gmac_txdesc
*txd
;
1148 skb_frag_t
*skb_frag
;
1155 if (skb
->protocol
== htons(ETH_P_8021Q
))
1162 word1
|= TSS_MTU_ENABLE_BIT
;
1166 if (skb
->ip_summed
!= CHECKSUM_NONE
) {
1169 if (skb
->protocol
== htons(ETH_P_IP
)) {
1170 word1
|= TSS_IP_CHKSUM_BIT
;
1171 tcp
= ip_hdr(skb
)->protocol
== IPPROTO_TCP
;
1173 word1
|= TSS_IPV6_ENABLE_BIT
;
1174 tcp
= ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
;
1177 word1
|= tcp
? TSS_TCP_CHKSUM_BIT
: TSS_UDP_CHKSUM_BIT
;
1181 while (frag
<= last_frag
) {
1184 buflen
= skb_headlen(skb
);
1186 skb_frag
= skb_si
->frags
+ frag
;
1187 buffer
= skb_frag_address(skb_frag
);
1188 buflen
= skb_frag_size(skb_frag
);
1191 if (frag
== last_frag
) {
1196 mapping
= dma_map_single(geth
->dev
, buffer
, buflen
,
1198 if (dma_mapping_error(geth
->dev
, mapping
))
1201 txd
= txq
->ring
+ w
;
1202 txd
->word0
.bits32
= buflen
;
1203 txd
->word1
.bits32
= word1
;
1204 txd
->word2
.buf_adr
= mapping
;
1205 txd
->word3
.bits32
= word3
;
1207 word3
&= MTU_SIZE_BIT_MASK
;
1217 while (w
!= *desc
) {
1221 dma_unmap_page(geth
->dev
, txq
->ring
[w
].word2
.buf_adr
,
1222 txq
->ring
[w
].word0
.bits
.buffer_size
,
1228 static int gmac_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1230 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1231 unsigned short m
= (1 << port
->txq_order
) - 1;
1232 struct netdev_queue
*ntxq
;
1233 unsigned short r
, w
, d
;
1234 void __iomem
*ptr_reg
;
1235 struct gmac_txq
*txq
;
1236 int txq_num
, nfrags
;
1239 if (skb
->len
>= 0x10000)
1242 txq_num
= skb_get_queue_mapping(skb
);
1243 ptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE_PTR_REG(txq_num
);
1244 txq
= &port
->txq
[txq_num
];
1245 ntxq
= netdev_get_tx_queue(netdev
, txq_num
);
1246 nfrags
= skb_shinfo(skb
)->nr_frags
;
1248 rw
.bits32
= readl(ptr_reg
);
1252 d
= txq
->cptr
- w
- 1;
1255 if (d
< nfrags
+ 2) {
1256 gmac_clean_txq(netdev
, txq
, r
);
1257 d
= txq
->cptr
- w
- 1;
1260 if (d
< nfrags
+ 2) {
1261 netif_tx_stop_queue(ntxq
);
1263 d
= txq
->cptr
+ nfrags
+ 16;
1265 txq
->ring
[d
].word3
.bits
.eofie
= 1;
1266 gmac_tx_irq_enable(netdev
, txq_num
, 1);
1268 u64_stats_update_begin(&port
->tx_stats_syncp
);
1269 netdev
->stats
.tx_fifo_errors
++;
1270 u64_stats_update_end(&port
->tx_stats_syncp
);
1271 return NETDEV_TX_BUSY
;
1275 if (gmac_map_tx_bufs(netdev
, skb
, txq
, &w
)) {
1276 if (skb_linearize(skb
))
1279 u64_stats_update_begin(&port
->tx_stats_syncp
);
1280 port
->tx_frags_linearized
++;
1281 u64_stats_update_end(&port
->tx_stats_syncp
);
1283 if (gmac_map_tx_bufs(netdev
, skb
, txq
, &w
))
1287 writew(w
, ptr_reg
+ 2);
1289 gmac_clean_txq(netdev
, txq
, r
);
1290 return NETDEV_TX_OK
;
1295 u64_stats_update_begin(&port
->tx_stats_syncp
);
1296 port
->stats
.tx_dropped
++;
1297 u64_stats_update_end(&port
->tx_stats_syncp
);
1298 return NETDEV_TX_OK
;
1301 static void gmac_tx_timeout(struct net_device
*netdev
, unsigned int txqueue
)
1303 netdev_err(netdev
, "Tx timeout\n");
1304 gmac_dump_dma_state(netdev
);
1307 static void gmac_enable_irq(struct net_device
*netdev
, int enable
)
1309 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1310 struct gemini_ethernet
*geth
= port
->geth
;
1311 unsigned long flags
;
1314 netdev_dbg(netdev
, "%s device %d %s\n", __func__
,
1315 netdev
->dev_id
, enable
? "enable" : "disable");
1316 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1318 mask
= GMAC0_IRQ0_2
<< (netdev
->dev_id
* 2);
1319 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1320 val
= enable
? (val
| mask
) : (val
& ~mask
);
1321 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1323 mask
= DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
;
1324 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1325 val
= enable
? (val
| mask
) : (val
& ~mask
);
1326 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1328 mask
= GMAC0_IRQ4_8
<< (netdev
->dev_id
* 8);
1329 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1330 val
= enable
? (val
| mask
) : (val
& ~mask
);
1331 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1333 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1336 static void gmac_enable_rx_irq(struct net_device
*netdev
, int enable
)
1338 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1339 struct gemini_ethernet
*geth
= port
->geth
;
1340 unsigned long flags
;
1343 netdev_dbg(netdev
, "%s device %d %s\n", __func__
, netdev
->dev_id
,
1344 enable
? "enable" : "disable");
1345 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1346 mask
= DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
;
1348 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1349 val
= enable
? (val
| mask
) : (val
& ~mask
);
1350 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1352 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1355 static struct sk_buff
*gmac_skb_if_good_frame(struct gemini_ethernet_port
*port
,
1356 union gmac_rxdesc_0 word0
,
1357 unsigned int frame_len
)
1359 unsigned int rx_csum
= word0
.bits
.chksum_status
;
1360 unsigned int rx_status
= word0
.bits
.status
;
1361 struct sk_buff
*skb
= NULL
;
1363 port
->rx_stats
[rx_status
]++;
1364 port
->rx_csum_stats
[rx_csum
]++;
1366 if (word0
.bits
.derr
|| word0
.bits
.perr
||
1367 rx_status
|| frame_len
< ETH_ZLEN
||
1368 rx_csum
>= RX_CHKSUM_IP_ERR_UNKNOWN
) {
1369 port
->stats
.rx_errors
++;
1371 if (frame_len
< ETH_ZLEN
|| RX_ERROR_LENGTH(rx_status
))
1372 port
->stats
.rx_length_errors
++;
1373 if (RX_ERROR_OVER(rx_status
))
1374 port
->stats
.rx_over_errors
++;
1375 if (RX_ERROR_CRC(rx_status
))
1376 port
->stats
.rx_crc_errors
++;
1377 if (RX_ERROR_FRAME(rx_status
))
1378 port
->stats
.rx_frame_errors
++;
1382 skb
= napi_get_frags(&port
->napi
);
1386 if (rx_csum
== RX_CHKSUM_IP_UDP_TCP_OK
)
1387 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1390 port
->stats
.rx_bytes
+= frame_len
;
1391 port
->stats
.rx_packets
++;
1395 static unsigned int gmac_rx(struct net_device
*netdev
, unsigned int budget
)
1397 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1398 unsigned short m
= (1 << port
->rxq_order
) - 1;
1399 struct gemini_ethernet
*geth
= port
->geth
;
1400 void __iomem
*ptr_reg
= port
->rxq_rwptr
;
1401 unsigned int frame_len
, frag_len
;
1402 struct gmac_rxdesc
*rx
= NULL
;
1403 struct gmac_queue_page
*gpage
;
1404 static struct sk_buff
*skb
;
1405 union gmac_rxdesc_0 word0
;
1406 union gmac_rxdesc_1 word1
;
1407 union gmac_rxdesc_3 word3
;
1408 struct page
*page
= NULL
;
1409 unsigned int page_offs
;
1410 unsigned short r
, w
;
1415 rw
.bits32
= readl(ptr_reg
);
1416 /* Reset interrupt as all packages until here are taken into account */
1417 writel(DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
,
1418 geth
->base
+ GLOBAL_INTERRUPT_STATUS_1_REG
);
1422 while (budget
&& w
!= r
) {
1423 rx
= port
->rxq_ring
+ r
;
1426 mapping
= rx
->word2
.buf_adr
;
1432 frag_len
= word0
.bits
.buffer_size
;
1433 frame_len
= word1
.bits
.byte_count
;
1434 page_offs
= mapping
& ~PAGE_MASK
;
1438 "rxq[%u]: HW BUG: zero DMA desc\n", r
);
1442 /* Freeq pointers are one page off */
1443 gpage
= gmac_get_queue_page(geth
, port
, mapping
+ PAGE_SIZE
);
1445 dev_err(geth
->dev
, "could not find mapping\n");
1450 if (word3
.bits32
& SOF_BIT
) {
1452 napi_free_frags(&port
->napi
);
1453 port
->stats
.rx_dropped
++;
1456 skb
= gmac_skb_if_good_frame(port
, word0
, frame_len
);
1460 page_offs
+= NET_IP_ALIGN
;
1461 frag_len
-= NET_IP_ALIGN
;
1469 if (word3
.bits32
& EOF_BIT
)
1470 frag_len
= frame_len
- skb
->len
;
1472 /* append page frag to skb */
1473 if (frag_nr
== MAX_SKB_FRAGS
)
1477 netdev_err(netdev
, "Received fragment with len = 0\n");
1479 skb_fill_page_desc(skb
, frag_nr
, page
, page_offs
, frag_len
);
1480 skb
->len
+= frag_len
;
1481 skb
->data_len
+= frag_len
;
1482 skb
->truesize
+= frag_len
;
1485 if (word3
.bits32
& EOF_BIT
) {
1486 napi_gro_frags(&port
->napi
);
1494 napi_free_frags(&port
->napi
);
1501 port
->stats
.rx_dropped
++;
1508 static int gmac_napi_poll(struct napi_struct
*napi
, int budget
)
1510 struct gemini_ethernet_port
*port
= netdev_priv(napi
->dev
);
1511 struct gemini_ethernet
*geth
= port
->geth
;
1512 unsigned int freeq_threshold
;
1513 unsigned int received
;
1515 freeq_threshold
= 1 << (geth
->freeq_order
- 1);
1516 u64_stats_update_begin(&port
->rx_stats_syncp
);
1518 received
= gmac_rx(napi
->dev
, budget
);
1519 if (received
< budget
) {
1520 napi_gro_flush(napi
, false);
1521 napi_complete_done(napi
, received
);
1522 gmac_enable_rx_irq(napi
->dev
, 1);
1523 ++port
->rx_napi_exits
;
1526 port
->freeq_refill
+= (budget
- received
);
1527 if (port
->freeq_refill
> freeq_threshold
) {
1528 port
->freeq_refill
-= freeq_threshold
;
1529 geth_fill_freeq(geth
, true);
1532 u64_stats_update_end(&port
->rx_stats_syncp
);
1536 static void gmac_dump_dma_state(struct net_device
*netdev
)
1538 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1539 struct gemini_ethernet
*geth
= port
->geth
;
1540 void __iomem
*ptr_reg
;
1543 /* Interrupt status */
1544 reg
[0] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
);
1545 reg
[1] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_1_REG
);
1546 reg
[2] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_2_REG
);
1547 reg
[3] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_3_REG
);
1548 reg
[4] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
1549 netdev_err(netdev
, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1550 reg
[0], reg
[1], reg
[2], reg
[3], reg
[4]);
1552 /* Interrupt enable */
1553 reg
[0] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1554 reg
[1] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1555 reg
[2] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_2_REG
);
1556 reg
[3] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_3_REG
);
1557 reg
[4] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1558 netdev_err(netdev
, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1559 reg
[0], reg
[1], reg
[2], reg
[3], reg
[4]);
1562 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_RX_FIRST_DESC_REG
);
1563 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_RX_CURR_DESC_REG
);
1564 reg
[2] = GET_RPTR(port
->rxq_rwptr
);
1565 reg
[3] = GET_WPTR(port
->rxq_rwptr
);
1566 netdev_err(netdev
, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1567 reg
[0], reg
[1], reg
[2], reg
[3]);
1569 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD0_REG
);
1570 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD1_REG
);
1571 reg
[2] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD2_REG
);
1572 reg
[3] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD3_REG
);
1573 netdev_err(netdev
, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1574 reg
[0], reg
[1], reg
[2], reg
[3]);
1577 ptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE0_PTR_REG
;
1579 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_TX_FIRST_DESC_REG
);
1580 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_TX_CURR_DESC_REG
);
1581 reg
[2] = GET_RPTR(ptr_reg
);
1582 reg
[3] = GET_WPTR(ptr_reg
);
1583 netdev_err(netdev
, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1584 reg
[0], reg
[1], reg
[2], reg
[3]);
1586 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD0_REG
);
1587 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD1_REG
);
1588 reg
[2] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD2_REG
);
1589 reg
[3] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD3_REG
);
1590 netdev_err(netdev
, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1591 reg
[0], reg
[1], reg
[2], reg
[3]);
1593 /* FREE queues status */
1594 ptr_reg
= geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
;
1596 reg
[0] = GET_RPTR(ptr_reg
);
1597 reg
[1] = GET_WPTR(ptr_reg
);
1599 ptr_reg
= geth
->base
+ GLOBAL_HWFQ_RWPTR_REG
;
1601 reg
[2] = GET_RPTR(ptr_reg
);
1602 reg
[3] = GET_WPTR(ptr_reg
);
1603 netdev_err(netdev
, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
1604 reg
[0], reg
[1], reg
[2], reg
[3]);
1607 static void gmac_update_hw_stats(struct net_device
*netdev
)
1609 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1610 unsigned int rx_discards
, rx_mcast
, rx_bcast
;
1611 struct gemini_ethernet
*geth
= port
->geth
;
1612 unsigned long flags
;
1614 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1615 u64_stats_update_begin(&port
->ir_stats_syncp
);
1617 rx_discards
= readl(port
->gmac_base
+ GMAC_IN_DISCARDS
);
1618 port
->hw_stats
[0] += rx_discards
;
1619 port
->hw_stats
[1] += readl(port
->gmac_base
+ GMAC_IN_ERRORS
);
1620 rx_mcast
= readl(port
->gmac_base
+ GMAC_IN_MCAST
);
1621 port
->hw_stats
[2] += rx_mcast
;
1622 rx_bcast
= readl(port
->gmac_base
+ GMAC_IN_BCAST
);
1623 port
->hw_stats
[3] += rx_bcast
;
1624 port
->hw_stats
[4] += readl(port
->gmac_base
+ GMAC_IN_MAC1
);
1625 port
->hw_stats
[5] += readl(port
->gmac_base
+ GMAC_IN_MAC2
);
1627 port
->stats
.rx_missed_errors
+= rx_discards
;
1628 port
->stats
.multicast
+= rx_mcast
;
1629 port
->stats
.multicast
+= rx_bcast
;
1631 writel(GMAC0_MIB_INT_BIT
<< (netdev
->dev_id
* 8),
1632 geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
1634 u64_stats_update_end(&port
->ir_stats_syncp
);
1635 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1639 * gmac_get_intr_flags() - get interrupt status flags for a port from
1640 * @netdev: the net device for the port to get flags from
1641 * @i: the interrupt status register 0..4
1643 static u32
gmac_get_intr_flags(struct net_device
*netdev
, int i
)
1645 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1646 struct gemini_ethernet
*geth
= port
->geth
;
1647 void __iomem
*irqif_reg
, *irqen_reg
;
1648 unsigned int offs
, val
;
1650 /* Calculate the offset using the stride of the status registers */
1651 offs
= i
* (GLOBAL_INTERRUPT_STATUS_1_REG
-
1652 GLOBAL_INTERRUPT_STATUS_0_REG
);
1654 irqif_reg
= geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
+ offs
;
1655 irqen_reg
= geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
+ offs
;
1657 val
= readl(irqif_reg
) & readl(irqen_reg
);
1661 static enum hrtimer_restart
gmac_coalesce_delay_expired(struct hrtimer
*timer
)
1663 struct gemini_ethernet_port
*port
=
1664 container_of(timer
, struct gemini_ethernet_port
,
1667 napi_schedule(&port
->napi
);
1668 return HRTIMER_NORESTART
;
1671 static irqreturn_t
gmac_irq(int irq
, void *data
)
1673 struct gemini_ethernet_port
*port
;
1674 struct net_device
*netdev
= data
;
1675 struct gemini_ethernet
*geth
;
1678 port
= netdev_priv(netdev
);
1681 val
= gmac_get_intr_flags(netdev
, 0);
1684 if (val
& (GMAC0_IRQ0_2
<< (netdev
->dev_id
* 2))) {
1686 netdev_err(netdev
, "hw failure/sw bug\n");
1687 gmac_dump_dma_state(netdev
);
1689 /* don't know how to recover, just reduce losses */
1690 gmac_enable_irq(netdev
, 0);
1694 if (val
& (GMAC0_IRQ0_TXQ0_INTS
<< (netdev
->dev_id
* 6)))
1695 gmac_tx_irq(netdev
, 0);
1697 val
= gmac_get_intr_flags(netdev
, 1);
1700 if (val
& (DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
)) {
1701 gmac_enable_rx_irq(netdev
, 0);
1703 if (!port
->rx_coalesce_nsecs
) {
1704 napi_schedule(&port
->napi
);
1708 ktime
= ktime_set(0, port
->rx_coalesce_nsecs
);
1709 hrtimer_start(&port
->rx_coalesce_timer
, ktime
,
1714 val
= gmac_get_intr_flags(netdev
, 4);
1717 if (val
& (GMAC0_MIB_INT_BIT
<< (netdev
->dev_id
* 8)))
1718 gmac_update_hw_stats(netdev
);
1720 if (val
& (GMAC0_RX_OVERRUN_INT_BIT
<< (netdev
->dev_id
* 8))) {
1721 writel(GMAC0_RXDERR_INT_BIT
<< (netdev
->dev_id
* 8),
1722 geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
1724 spin_lock(&geth
->irq_lock
);
1725 u64_stats_update_begin(&port
->ir_stats_syncp
);
1726 ++port
->stats
.rx_fifo_errors
;
1727 u64_stats_update_end(&port
->ir_stats_syncp
);
1728 spin_unlock(&geth
->irq_lock
);
1731 return orr
? IRQ_HANDLED
: IRQ_NONE
;
1734 static void gmac_start_dma(struct gemini_ethernet_port
*port
)
1736 void __iomem
*dma_ctrl_reg
= port
->dma_base
+ GMAC_DMA_CTRL_REG
;
1737 union gmac_dma_ctrl dma_ctrl
;
1739 dma_ctrl
.bits32
= readl(dma_ctrl_reg
);
1740 dma_ctrl
.bits
.rd_enable
= 1;
1741 dma_ctrl
.bits
.td_enable
= 1;
1742 dma_ctrl
.bits
.loopback
= 0;
1743 dma_ctrl
.bits
.drop_small_ack
= 0;
1744 dma_ctrl
.bits
.rd_insert_bytes
= NET_IP_ALIGN
;
1745 dma_ctrl
.bits
.rd_prot
= HPROT_DATA_CACHE
| HPROT_PRIVILIGED
;
1746 dma_ctrl
.bits
.rd_burst_size
= HBURST_INCR8
;
1747 dma_ctrl
.bits
.rd_bus
= HSIZE_8
;
1748 dma_ctrl
.bits
.td_prot
= HPROT_DATA_CACHE
;
1749 dma_ctrl
.bits
.td_burst_size
= HBURST_INCR8
;
1750 dma_ctrl
.bits
.td_bus
= HSIZE_8
;
1752 writel(dma_ctrl
.bits32
, dma_ctrl_reg
);
1755 static void gmac_stop_dma(struct gemini_ethernet_port
*port
)
1757 void __iomem
*dma_ctrl_reg
= port
->dma_base
+ GMAC_DMA_CTRL_REG
;
1758 union gmac_dma_ctrl dma_ctrl
;
1760 dma_ctrl
.bits32
= readl(dma_ctrl_reg
);
1761 dma_ctrl
.bits
.rd_enable
= 0;
1762 dma_ctrl
.bits
.td_enable
= 0;
1763 writel(dma_ctrl
.bits32
, dma_ctrl_reg
);
1766 static int gmac_open(struct net_device
*netdev
)
1768 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1771 if (!netdev
->phydev
) {
1772 err
= gmac_setup_phy(netdev
);
1774 netif_err(port
, ifup
, netdev
,
1775 "PHY init failed: %d\n", err
);
1780 err
= request_irq(netdev
->irq
, gmac_irq
,
1781 IRQF_SHARED
, netdev
->name
, netdev
);
1783 netdev_err(netdev
, "no IRQ\n");
1787 netif_carrier_off(netdev
);
1788 phy_start(netdev
->phydev
);
1790 err
= geth_resize_freeq(port
);
1791 /* It's fine if it's just busy, the other port has set up
1792 * the freeq in that case.
1794 if (err
&& (err
!= -EBUSY
)) {
1795 netdev_err(netdev
, "could not resize freeq\n");
1799 err
= gmac_setup_rxq(netdev
);
1801 netdev_err(netdev
, "could not setup RXQ\n");
1805 err
= gmac_setup_txqs(netdev
);
1807 netdev_err(netdev
, "could not setup TXQs\n");
1808 gmac_cleanup_rxq(netdev
);
1812 napi_enable(&port
->napi
);
1814 gmac_start_dma(port
);
1815 gmac_enable_irq(netdev
, 1);
1816 gmac_enable_tx_rx(netdev
);
1817 netif_tx_start_all_queues(netdev
);
1819 hrtimer_init(&port
->rx_coalesce_timer
, CLOCK_MONOTONIC
,
1821 port
->rx_coalesce_timer
.function
= &gmac_coalesce_delay_expired
;
1823 netdev_dbg(netdev
, "opened\n");
1828 phy_stop(netdev
->phydev
);
1829 free_irq(netdev
->irq
, netdev
);
1833 static int gmac_stop(struct net_device
*netdev
)
1835 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1837 hrtimer_cancel(&port
->rx_coalesce_timer
);
1838 netif_tx_stop_all_queues(netdev
);
1839 gmac_disable_tx_rx(netdev
);
1840 gmac_stop_dma(port
);
1841 napi_disable(&port
->napi
);
1843 gmac_enable_irq(netdev
, 0);
1844 gmac_cleanup_rxq(netdev
);
1845 gmac_cleanup_txqs(netdev
);
1847 phy_stop(netdev
->phydev
);
1848 free_irq(netdev
->irq
, netdev
);
1850 gmac_update_hw_stats(netdev
);
1854 static void gmac_set_rx_mode(struct net_device
*netdev
)
1856 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1857 union gmac_rx_fltr filter
= { .bits
= {
1862 struct netdev_hw_addr
*ha
;
1863 unsigned int bit_nr
;
1869 if (netdev
->flags
& IFF_PROMISC
) {
1870 filter
.bits
.error
= 1;
1871 filter
.bits
.promiscuous
= 1;
1874 } else if (netdev
->flags
& IFF_ALLMULTI
) {
1878 netdev_for_each_mc_addr(ha
, netdev
) {
1879 bit_nr
= ~crc32_le(~0, ha
->addr
, ETH_ALEN
) & 0x3f;
1880 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 0x1f);
1884 writel(mc_filter
[0], port
->gmac_base
+ GMAC_MCAST_FIL0
);
1885 writel(mc_filter
[1], port
->gmac_base
+ GMAC_MCAST_FIL1
);
1886 writel(filter
.bits32
, port
->gmac_base
+ GMAC_RX_FLTR
);
1889 static void gmac_write_mac_address(struct net_device
*netdev
)
1891 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1894 memset(addr
, 0, sizeof(addr
));
1895 memcpy(addr
, netdev
->dev_addr
, ETH_ALEN
);
1897 writel(le32_to_cpu(addr
[0]), port
->gmac_base
+ GMAC_STA_ADD0
);
1898 writel(le32_to_cpu(addr
[1]), port
->gmac_base
+ GMAC_STA_ADD1
);
1899 writel(le32_to_cpu(addr
[2]), port
->gmac_base
+ GMAC_STA_ADD2
);
1902 static int gmac_set_mac_address(struct net_device
*netdev
, void *addr
)
1904 struct sockaddr
*sa
= addr
;
1906 memcpy(netdev
->dev_addr
, sa
->sa_data
, ETH_ALEN
);
1907 gmac_write_mac_address(netdev
);
1912 static void gmac_clear_hw_stats(struct net_device
*netdev
)
1914 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1916 readl(port
->gmac_base
+ GMAC_IN_DISCARDS
);
1917 readl(port
->gmac_base
+ GMAC_IN_ERRORS
);
1918 readl(port
->gmac_base
+ GMAC_IN_MCAST
);
1919 readl(port
->gmac_base
+ GMAC_IN_BCAST
);
1920 readl(port
->gmac_base
+ GMAC_IN_MAC1
);
1921 readl(port
->gmac_base
+ GMAC_IN_MAC2
);
1924 static void gmac_get_stats64(struct net_device
*netdev
,
1925 struct rtnl_link_stats64
*stats
)
1927 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1930 gmac_update_hw_stats(netdev
);
1932 /* Racing with RX NAPI */
1934 start
= u64_stats_fetch_begin(&port
->rx_stats_syncp
);
1936 stats
->rx_packets
= port
->stats
.rx_packets
;
1937 stats
->rx_bytes
= port
->stats
.rx_bytes
;
1938 stats
->rx_errors
= port
->stats
.rx_errors
;
1939 stats
->rx_dropped
= port
->stats
.rx_dropped
;
1941 stats
->rx_length_errors
= port
->stats
.rx_length_errors
;
1942 stats
->rx_over_errors
= port
->stats
.rx_over_errors
;
1943 stats
->rx_crc_errors
= port
->stats
.rx_crc_errors
;
1944 stats
->rx_frame_errors
= port
->stats
.rx_frame_errors
;
1946 } while (u64_stats_fetch_retry(&port
->rx_stats_syncp
, start
));
1948 /* Racing with MIB and TX completion interrupts */
1950 start
= u64_stats_fetch_begin(&port
->ir_stats_syncp
);
1952 stats
->tx_errors
= port
->stats
.tx_errors
;
1953 stats
->tx_packets
= port
->stats
.tx_packets
;
1954 stats
->tx_bytes
= port
->stats
.tx_bytes
;
1956 stats
->multicast
= port
->stats
.multicast
;
1957 stats
->rx_missed_errors
= port
->stats
.rx_missed_errors
;
1958 stats
->rx_fifo_errors
= port
->stats
.rx_fifo_errors
;
1960 } while (u64_stats_fetch_retry(&port
->ir_stats_syncp
, start
));
1962 /* Racing with hard_start_xmit */
1964 start
= u64_stats_fetch_begin(&port
->tx_stats_syncp
);
1966 stats
->tx_dropped
= port
->stats
.tx_dropped
;
1968 } while (u64_stats_fetch_retry(&port
->tx_stats_syncp
, start
));
1970 stats
->rx_dropped
+= stats
->rx_missed_errors
;
1973 static int gmac_change_mtu(struct net_device
*netdev
, int new_mtu
)
1975 int max_len
= gmac_pick_rx_max_len(new_mtu
);
1980 gmac_disable_tx_rx(netdev
);
1982 netdev
->mtu
= new_mtu
;
1983 gmac_update_config0_reg(netdev
, max_len
<< CONFIG0_MAXLEN_SHIFT
,
1984 CONFIG0_MAXLEN_MASK
);
1986 netdev_update_features(netdev
);
1988 gmac_enable_tx_rx(netdev
);
1993 static netdev_features_t
gmac_fix_features(struct net_device
*netdev
,
1994 netdev_features_t features
)
1996 if (netdev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
> MTU_SIZE_BIT_MASK
)
1997 features
&= ~GMAC_OFFLOAD_FEATURES
;
2002 static int gmac_set_features(struct net_device
*netdev
,
2003 netdev_features_t features
)
2005 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2006 int enable
= features
& NETIF_F_RXCSUM
;
2007 unsigned long flags
;
2010 spin_lock_irqsave(&port
->config_lock
, flags
);
2012 reg
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
2013 reg
= enable
? reg
| CONFIG0_RX_CHKSUM
: reg
& ~CONFIG0_RX_CHKSUM
;
2014 writel(reg
, port
->gmac_base
+ GMAC_CONFIG0
);
2016 spin_unlock_irqrestore(&port
->config_lock
, flags
);
2020 static int gmac_get_sset_count(struct net_device
*netdev
, int sset
)
2022 return sset
== ETH_SS_STATS
? GMAC_STATS_NUM
: 0;
2025 static void gmac_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
2027 if (stringset
!= ETH_SS_STATS
)
2030 memcpy(data
, gmac_stats_strings
, sizeof(gmac_stats_strings
));
2033 static void gmac_get_ethtool_stats(struct net_device
*netdev
,
2034 struct ethtool_stats
*estats
, u64
*values
)
2036 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2041 gmac_update_hw_stats(netdev
);
2043 /* Racing with MIB interrupt */
2046 start
= u64_stats_fetch_begin(&port
->ir_stats_syncp
);
2048 for (i
= 0; i
< RX_STATS_NUM
; i
++)
2049 *p
++ = port
->hw_stats
[i
];
2051 } while (u64_stats_fetch_retry(&port
->ir_stats_syncp
, start
));
2054 /* Racing with RX NAPI */
2057 start
= u64_stats_fetch_begin(&port
->rx_stats_syncp
);
2059 for (i
= 0; i
< RX_STATUS_NUM
; i
++)
2060 *p
++ = port
->rx_stats
[i
];
2061 for (i
= 0; i
< RX_CHKSUM_NUM
; i
++)
2062 *p
++ = port
->rx_csum_stats
[i
];
2063 *p
++ = port
->rx_napi_exits
;
2065 } while (u64_stats_fetch_retry(&port
->rx_stats_syncp
, start
));
2068 /* Racing with TX start_xmit */
2071 start
= u64_stats_fetch_begin(&port
->tx_stats_syncp
);
2073 for (i
= 0; i
< TX_MAX_FRAGS
; i
++) {
2074 *values
++ = port
->tx_frag_stats
[i
];
2075 port
->tx_frag_stats
[i
] = 0;
2077 *values
++ = port
->tx_frags_linearized
;
2078 *values
++ = port
->tx_hw_csummed
;
2080 } while (u64_stats_fetch_retry(&port
->tx_stats_syncp
, start
));
2083 static int gmac_get_ksettings(struct net_device
*netdev
,
2084 struct ethtool_link_ksettings
*cmd
)
2086 if (!netdev
->phydev
)
2088 phy_ethtool_ksettings_get(netdev
->phydev
, cmd
);
2093 static int gmac_set_ksettings(struct net_device
*netdev
,
2094 const struct ethtool_link_ksettings
*cmd
)
2096 if (!netdev
->phydev
)
2098 return phy_ethtool_ksettings_set(netdev
->phydev
, cmd
);
2101 static int gmac_nway_reset(struct net_device
*netdev
)
2103 if (!netdev
->phydev
)
2105 return phy_start_aneg(netdev
->phydev
);
2108 static void gmac_get_pauseparam(struct net_device
*netdev
,
2109 struct ethtool_pauseparam
*pparam
)
2111 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2112 union gmac_config0 config0
;
2114 config0
.bits32
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
2116 pparam
->rx_pause
= config0
.bits
.rx_fc_en
;
2117 pparam
->tx_pause
= config0
.bits
.tx_fc_en
;
2118 pparam
->autoneg
= true;
2121 static void gmac_get_ringparam(struct net_device
*netdev
,
2122 struct ethtool_ringparam
*rp
)
2124 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2125 union gmac_config0 config0
;
2127 config0
.bits32
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
2129 rp
->rx_max_pending
= 1 << 15;
2130 rp
->rx_mini_max_pending
= 0;
2131 rp
->rx_jumbo_max_pending
= 0;
2132 rp
->tx_max_pending
= 1 << 15;
2134 rp
->rx_pending
= 1 << port
->rxq_order
;
2135 rp
->rx_mini_pending
= 0;
2136 rp
->rx_jumbo_pending
= 0;
2137 rp
->tx_pending
= 1 << port
->txq_order
;
2140 static int gmac_set_ringparam(struct net_device
*netdev
,
2141 struct ethtool_ringparam
*rp
)
2143 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2146 if (netif_running(netdev
))
2149 if (rp
->rx_pending
) {
2150 port
->rxq_order
= min(15, ilog2(rp
->rx_pending
- 1) + 1);
2151 err
= geth_resize_freeq(port
);
2153 if (rp
->tx_pending
) {
2154 port
->txq_order
= min(15, ilog2(rp
->tx_pending
- 1) + 1);
2155 port
->irq_every_tx_packets
= 1 << (port
->txq_order
- 2);
2161 static int gmac_get_coalesce(struct net_device
*netdev
,
2162 struct ethtool_coalesce
*ecmd
)
2164 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2166 ecmd
->rx_max_coalesced_frames
= 1;
2167 ecmd
->tx_max_coalesced_frames
= port
->irq_every_tx_packets
;
2168 ecmd
->rx_coalesce_usecs
= port
->rx_coalesce_nsecs
/ 1000;
2173 static int gmac_set_coalesce(struct net_device
*netdev
,
2174 struct ethtool_coalesce
*ecmd
)
2176 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2178 if (ecmd
->tx_max_coalesced_frames
< 1)
2180 if (ecmd
->tx_max_coalesced_frames
>= 1 << port
->txq_order
)
2183 port
->irq_every_tx_packets
= ecmd
->tx_max_coalesced_frames
;
2184 port
->rx_coalesce_nsecs
= ecmd
->rx_coalesce_usecs
* 1000;
2189 static u32
gmac_get_msglevel(struct net_device
*netdev
)
2191 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2193 return port
->msg_enable
;
2196 static void gmac_set_msglevel(struct net_device
*netdev
, u32 level
)
2198 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2200 port
->msg_enable
= level
;
2203 static void gmac_get_drvinfo(struct net_device
*netdev
,
2204 struct ethtool_drvinfo
*info
)
2206 strcpy(info
->driver
, DRV_NAME
);
2207 strcpy(info
->version
, DRV_VERSION
);
2208 strcpy(info
->bus_info
, netdev
->dev_id
? "1" : "0");
2211 static const struct net_device_ops gmac_351x_ops
= {
2212 .ndo_init
= gmac_init
,
2213 .ndo_uninit
= gmac_uninit
,
2214 .ndo_open
= gmac_open
,
2215 .ndo_stop
= gmac_stop
,
2216 .ndo_start_xmit
= gmac_start_xmit
,
2217 .ndo_tx_timeout
= gmac_tx_timeout
,
2218 .ndo_set_rx_mode
= gmac_set_rx_mode
,
2219 .ndo_set_mac_address
= gmac_set_mac_address
,
2220 .ndo_get_stats64
= gmac_get_stats64
,
2221 .ndo_change_mtu
= gmac_change_mtu
,
2222 .ndo_fix_features
= gmac_fix_features
,
2223 .ndo_set_features
= gmac_set_features
,
2226 static const struct ethtool_ops gmac_351x_ethtool_ops
= {
2227 .get_sset_count
= gmac_get_sset_count
,
2228 .get_strings
= gmac_get_strings
,
2229 .get_ethtool_stats
= gmac_get_ethtool_stats
,
2230 .get_link
= ethtool_op_get_link
,
2231 .get_link_ksettings
= gmac_get_ksettings
,
2232 .set_link_ksettings
= gmac_set_ksettings
,
2233 .nway_reset
= gmac_nway_reset
,
2234 .get_pauseparam
= gmac_get_pauseparam
,
2235 .get_ringparam
= gmac_get_ringparam
,
2236 .set_ringparam
= gmac_set_ringparam
,
2237 .get_coalesce
= gmac_get_coalesce
,
2238 .set_coalesce
= gmac_set_coalesce
,
2239 .get_msglevel
= gmac_get_msglevel
,
2240 .set_msglevel
= gmac_set_msglevel
,
2241 .get_drvinfo
= gmac_get_drvinfo
,
2244 static irqreturn_t
gemini_port_irq_thread(int irq
, void *data
)
2246 unsigned long irqmask
= SWFQ_EMPTY_INT_BIT
;
2247 struct gemini_ethernet_port
*port
= data
;
2248 struct gemini_ethernet
*geth
;
2249 unsigned long flags
;
2252 /* The queue is half empty so refill it */
2253 geth_fill_freeq(geth
, true);
2255 spin_lock_irqsave(&geth
->irq_lock
, flags
);
2256 /* ACK queue interrupt */
2257 writel(irqmask
, geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
2258 /* Enable queue interrupt again */
2259 irqmask
|= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2260 writel(irqmask
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2261 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
2266 static irqreturn_t
gemini_port_irq(int irq
, void *data
)
2268 struct gemini_ethernet_port
*port
= data
;
2269 struct gemini_ethernet
*geth
;
2270 irqreturn_t ret
= IRQ_NONE
;
2274 spin_lock(&geth
->irq_lock
);
2276 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
2277 en
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2279 if (val
& en
& SWFQ_EMPTY_INT_BIT
) {
2280 /* Disable the queue empty interrupt while we work on
2281 * processing the queue. Also disable overrun interrupts
2282 * as there is not much we can do about it here.
2284 en
&= ~(SWFQ_EMPTY_INT_BIT
| GMAC0_RX_OVERRUN_INT_BIT
2285 | GMAC1_RX_OVERRUN_INT_BIT
);
2286 writel(en
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2287 ret
= IRQ_WAKE_THREAD
;
2290 spin_unlock(&geth
->irq_lock
);
2295 static void gemini_port_remove(struct gemini_ethernet_port
*port
)
2298 unregister_netdev(port
->netdev
);
2299 clk_disable_unprepare(port
->pclk
);
2300 geth_cleanup_freeq(port
->geth
);
2303 static void gemini_ethernet_init(struct gemini_ethernet
*geth
)
2305 /* Only do this once both ports are online */
2306 if (geth
->initialized
)
2308 if (geth
->port0
&& geth
->port1
)
2309 geth
->initialized
= true;
2313 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
2314 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
2315 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_2_REG
);
2316 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_3_REG
);
2317 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2319 /* Interrupt config:
2321 * GMAC0 intr bits ------> int0 ----> eth0
2322 * GMAC1 intr bits ------> int1 ----> eth1
2323 * TOE intr -------------> int1 ----> eth1
2324 * Classification Intr --> int0 ----> eth0
2325 * Default Q0 -----------> int0 ----> eth0
2326 * Default Q1 -----------> int1 ----> eth1
2327 * FreeQ intr -----------> int1 ----> eth1
2329 writel(0xCCFC0FC0, geth
->base
+ GLOBAL_INTERRUPT_SELECT_0_REG
);
2330 writel(0x00F00002, geth
->base
+ GLOBAL_INTERRUPT_SELECT_1_REG
);
2331 writel(0xFFFFFFFF, geth
->base
+ GLOBAL_INTERRUPT_SELECT_2_REG
);
2332 writel(0xFFFFFFFF, geth
->base
+ GLOBAL_INTERRUPT_SELECT_3_REG
);
2333 writel(0xFF000003, geth
->base
+ GLOBAL_INTERRUPT_SELECT_4_REG
);
2335 /* edge-triggered interrupts packed to level-triggered one... */
2336 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
);
2337 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_1_REG
);
2338 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_2_REG
);
2339 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_3_REG
);
2340 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
2343 writel(0, geth
->base
+ GLOBAL_SW_FREEQ_BASE_SIZE_REG
);
2344 writel(0, geth
->base
+ GLOBAL_HW_FREEQ_BASE_SIZE_REG
);
2345 writel(0, geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
);
2346 writel(0, geth
->base
+ GLOBAL_HWFQ_RWPTR_REG
);
2348 geth
->freeq_frag_order
= DEFAULT_RX_BUF_ORDER
;
2349 /* This makes the queue resize on probe() so that we
2350 * set up and enable the queue IRQ. FIXME: fragile.
2352 geth
->freeq_order
= 1;
2355 static void gemini_port_save_mac_addr(struct gemini_ethernet_port
*port
)
2358 cpu_to_le32(readl(port
->gmac_base
+ GMAC_STA_ADD0
));
2360 cpu_to_le32(readl(port
->gmac_base
+ GMAC_STA_ADD1
));
2362 cpu_to_le32(readl(port
->gmac_base
+ GMAC_STA_ADD2
));
2365 static int gemini_ethernet_port_probe(struct platform_device
*pdev
)
2367 char *port_names
[2] = { "ethernet0", "ethernet1" };
2368 struct gemini_ethernet_port
*port
;
2369 struct device
*dev
= &pdev
->dev
;
2370 struct gemini_ethernet
*geth
;
2371 struct net_device
*netdev
;
2372 struct resource
*gmacres
;
2373 struct resource
*dmares
;
2374 struct device
*parent
;
2379 parent
= dev
->parent
;
2380 geth
= dev_get_drvdata(parent
);
2382 if (!strcmp(dev_name(dev
), "60008000.ethernet-port"))
2384 else if (!strcmp(dev_name(dev
), "6000c000.ethernet-port"))
2389 dev_info(dev
, "probe %s ID %d\n", dev_name(dev
), id
);
2391 netdev
= alloc_etherdev_mq(sizeof(*port
), TX_QUEUE_NUM
);
2393 dev_err(dev
, "Can't allocate ethernet device #%d\n", id
);
2397 port
= netdev_priv(netdev
);
2398 SET_NETDEV_DEV(netdev
, dev
);
2399 port
->netdev
= netdev
;
2403 port
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
2406 dmares
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2408 dev_err(dev
, "no DMA resource\n");
2411 port
->dma_base
= devm_ioremap_resource(dev
, dmares
);
2412 if (IS_ERR(port
->dma_base
))
2413 return PTR_ERR(port
->dma_base
);
2415 /* GMAC config memory */
2416 gmacres
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2418 dev_err(dev
, "no GMAC resource\n");
2421 port
->gmac_base
= devm_ioremap_resource(dev
, gmacres
);
2422 if (IS_ERR(port
->gmac_base
))
2423 return PTR_ERR(port
->gmac_base
);
2426 irq
= platform_get_irq(pdev
, 0);
2428 return irq
? irq
: -ENODEV
;
2431 /* Clock the port */
2432 port
->pclk
= devm_clk_get(dev
, "PCLK");
2433 if (IS_ERR(port
->pclk
)) {
2434 dev_err(dev
, "no PCLK\n");
2435 return PTR_ERR(port
->pclk
);
2437 ret
= clk_prepare_enable(port
->pclk
);
2441 /* Maybe there is a nice ethernet address we should use */
2442 gemini_port_save_mac_addr(port
);
2444 /* Reset the port */
2445 port
->reset
= devm_reset_control_get_exclusive(dev
, NULL
);
2446 if (IS_ERR(port
->reset
)) {
2447 dev_err(dev
, "no reset\n");
2448 return PTR_ERR(port
->reset
);
2450 reset_control_reset(port
->reset
);
2451 usleep_range(100, 500);
2453 /* Assign pointer in the main state container */
2459 /* This will just be done once both ports are up and reset */
2460 gemini_ethernet_init(geth
);
2462 platform_set_drvdata(pdev
, port
);
2464 /* Set up and register the netdev */
2465 netdev
->dev_id
= port
->id
;
2467 netdev
->netdev_ops
= &gmac_351x_ops
;
2468 netdev
->ethtool_ops
= &gmac_351x_ethtool_ops
;
2470 spin_lock_init(&port
->config_lock
);
2471 gmac_clear_hw_stats(netdev
);
2473 netdev
->hw_features
= GMAC_OFFLOAD_FEATURES
;
2474 netdev
->features
|= GMAC_OFFLOAD_FEATURES
| NETIF_F_GRO
;
2475 /* We can handle jumbo frames up to 10236 bytes so, let's accept
2476 * payloads of 10236 bytes minus VLAN and ethernet header
2478 netdev
->min_mtu
= ETH_MIN_MTU
;
2479 netdev
->max_mtu
= 10236 - VLAN_ETH_HLEN
;
2481 port
->freeq_refill
= 0;
2482 netif_napi_add(netdev
, &port
->napi
, gmac_napi_poll
,
2483 DEFAULT_NAPI_WEIGHT
);
2485 if (is_valid_ether_addr((void *)port
->mac_addr
)) {
2486 memcpy(netdev
->dev_addr
, port
->mac_addr
, ETH_ALEN
);
2488 dev_dbg(dev
, "ethernet address 0x%08x%08x%08x invalid\n",
2489 port
->mac_addr
[0], port
->mac_addr
[1],
2491 dev_info(dev
, "using a random ethernet address\n");
2492 eth_random_addr(netdev
->dev_addr
);
2494 gmac_write_mac_address(netdev
);
2496 ret
= devm_request_threaded_irq(port
->dev
,
2499 gemini_port_irq_thread
,
2501 port_names
[port
->id
],
2506 ret
= register_netdev(netdev
);
2509 "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
2510 port
->irq
, &dmares
->start
,
2512 ret
= gmac_setup_phy(netdev
);
2515 "PHY init failed, deferring to ifup time\n");
2519 port
->netdev
= NULL
;
2520 free_netdev(netdev
);
2524 static int gemini_ethernet_port_remove(struct platform_device
*pdev
)
2526 struct gemini_ethernet_port
*port
= platform_get_drvdata(pdev
);
2528 gemini_port_remove(port
);
2529 free_netdev(port
->netdev
);
2533 static const struct of_device_id gemini_ethernet_port_of_match
[] = {
2535 .compatible
= "cortina,gemini-ethernet-port",
2539 MODULE_DEVICE_TABLE(of
, gemini_ethernet_port_of_match
);
2541 static struct platform_driver gemini_ethernet_port_driver
= {
2543 .name
= "gemini-ethernet-port",
2544 .of_match_table
= of_match_ptr(gemini_ethernet_port_of_match
),
2546 .probe
= gemini_ethernet_port_probe
,
2547 .remove
= gemini_ethernet_port_remove
,
2550 static int gemini_ethernet_probe(struct platform_device
*pdev
)
2552 struct device
*dev
= &pdev
->dev
;
2553 struct gemini_ethernet
*geth
;
2554 unsigned int retry
= 5;
2555 struct resource
*res
;
2558 /* Global registers */
2559 geth
= devm_kzalloc(dev
, sizeof(*geth
), GFP_KERNEL
);
2562 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2565 geth
->base
= devm_ioremap_resource(dev
, res
);
2566 if (IS_ERR(geth
->base
))
2567 return PTR_ERR(geth
->base
);
2570 /* Wait for ports to stabilize */
2573 val
= readl(geth
->base
+ GLOBAL_TOE_VERSION_REG
);
2575 } while (!val
&& --retry
);
2577 dev_err(dev
, "failed to reset ethernet\n");
2580 dev_info(dev
, "Ethernet device ID: 0x%03x, revision 0x%01x\n",
2581 (val
>> 4) & 0xFFFU
, val
& 0xFU
);
2583 spin_lock_init(&geth
->irq_lock
);
2584 spin_lock_init(&geth
->freeq_lock
);
2586 /* The children will use this */
2587 platform_set_drvdata(pdev
, geth
);
2589 /* Spawn child devices for the two ports */
2590 return devm_of_platform_populate(dev
);
2593 static int gemini_ethernet_remove(struct platform_device
*pdev
)
2595 struct gemini_ethernet
*geth
= platform_get_drvdata(pdev
);
2597 geth_cleanup_freeq(geth
);
2598 geth
->initialized
= false;
2603 static const struct of_device_id gemini_ethernet_of_match
[] = {
2605 .compatible
= "cortina,gemini-ethernet",
2609 MODULE_DEVICE_TABLE(of
, gemini_ethernet_of_match
);
2611 static struct platform_driver gemini_ethernet_driver
= {
2614 .of_match_table
= of_match_ptr(gemini_ethernet_of_match
),
2616 .probe
= gemini_ethernet_probe
,
2617 .remove
= gemini_ethernet_remove
,
2620 static int __init
gemini_ethernet_module_init(void)
2624 ret
= platform_driver_register(&gemini_ethernet_port_driver
);
2628 ret
= platform_driver_register(&gemini_ethernet_driver
);
2630 platform_driver_unregister(&gemini_ethernet_port_driver
);
2636 module_init(gemini_ethernet_module_init
);
2638 static void __exit
gemini_ethernet_module_exit(void)
2640 platform_driver_unregister(&gemini_ethernet_driver
);
2641 platform_driver_unregister(&gemini_ethernet_port_driver
);
2643 module_exit(gemini_ethernet_module_exit
);
2645 MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
2646 MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
2647 MODULE_LICENSE("GPL");
2648 MODULE_ALIAS("platform:" DRV_NAME
);