1 // SPDX-License-Identifier: GPL-2.0
2 /* Ethernet device driver for Cortina Systems Gemini SoC
3 * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus
4 * Net Engine and Gigabit Ethernet MAC (GMAC)
5 * This hardware contains a TCP Offload Engine (TOE) but currently the
6 * driver does not make use of it.
9 * Linus Walleij <linus.walleij@linaro.org>
10 * Tobias Waldvogel <tobias.waldvogel@gmail.com> (OpenWRT)
11 * Michał Mirosław <mirq-linux@rere.qmqm.pl>
12 * Paulius Zaleckas <paulius.zaleckas@gmail.com>
13 * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it>
14 * Gary Chen & Ch Hsu Storlink Semiconductor
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/cache.h>
24 #include <linux/interrupt.h>
25 #include <linux/reset.h>
26 #include <linux/clk.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/of_platform.h>
31 #include <linux/etherdevice.h>
32 #include <linux/if_vlan.h>
33 #include <linux/skbuff.h>
34 #include <linux/phy.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/tcp.h>
38 #include <linux/u64_stats_sync.h>
42 #include <linux/ipv6.h>
46 #define DRV_NAME "gmac-gemini"
47 #define DRV_VERSION "1.0"
49 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
50 static int debug
= -1;
51 module_param(debug
, int, 0);
52 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
58 #define HBURST_SINGLE 0x00
59 #define HBURST_INCR 0x01
60 #define HBURST_INCR4 0x02
61 #define HBURST_INCR8 0x03
63 #define HPROT_DATA_CACHE BIT(0)
64 #define HPROT_PRIVILIGED BIT(1)
65 #define HPROT_BUFFERABLE BIT(2)
66 #define HPROT_CACHABLE BIT(3)
68 #define DEFAULT_RX_COALESCE_NSECS 0
69 #define DEFAULT_GMAC_RXQ_ORDER 9
70 #define DEFAULT_GMAC_TXQ_ORDER 8
71 #define DEFAULT_RX_BUF_ORDER 11
72 #define DEFAULT_NAPI_WEIGHT 64
73 #define TX_MAX_FRAGS 16
74 #define TX_QUEUE_NUM 1 /* max: 6 */
75 #define RX_MAX_ALLOC_ORDER 2
77 #define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \
78 GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT)
79 #define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \
80 GMAC0_SWTQ00_FIN_INT_BIT)
81 #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
83 #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
84 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
85 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
88 * struct gmac_queue_page - page buffer per-page info
90 struct gmac_queue_page
{
96 struct gmac_txdesc
*ring
;
99 unsigned int noirq_packets
;
102 struct gemini_ethernet
;
104 struct gemini_ethernet_port
{
107 struct gemini_ethernet
*geth
;
108 struct net_device
*netdev
;
110 void __iomem
*dma_base
;
111 void __iomem
*gmac_base
;
113 struct reset_control
*reset
;
117 void __iomem
*rxq_rwptr
;
118 struct gmac_rxdesc
*rxq_ring
;
119 unsigned int rxq_order
;
121 struct napi_struct napi
;
122 struct hrtimer rx_coalesce_timer
;
123 unsigned int rx_coalesce_nsecs
;
124 unsigned int freeq_refill
;
125 struct gmac_txq txq
[TX_QUEUE_NUM
];
126 unsigned int txq_order
;
127 unsigned int irq_every_tx_packets
;
129 dma_addr_t rxq_dma_base
;
130 dma_addr_t txq_dma_base
;
132 unsigned int msg_enable
;
133 spinlock_t config_lock
; /* Locks config register */
135 struct u64_stats_sync tx_stats_syncp
;
136 struct u64_stats_sync rx_stats_syncp
;
137 struct u64_stats_sync ir_stats_syncp
;
139 struct rtnl_link_stats64 stats
;
140 u64 hw_stats
[RX_STATS_NUM
];
141 u64 rx_stats
[RX_STATUS_NUM
];
142 u64 rx_csum_stats
[RX_CHKSUM_NUM
];
144 u64 tx_frag_stats
[TX_MAX_FRAGS
];
145 u64 tx_frags_linearized
;
149 struct gemini_ethernet
{
152 struct gemini_ethernet_port
*port0
;
153 struct gemini_ethernet_port
*port1
;
156 spinlock_t irq_lock
; /* Locks IRQ-related registers */
157 unsigned int freeq_order
;
158 unsigned int freeq_frag_order
;
159 struct gmac_rxdesc
*freeq_ring
;
160 dma_addr_t freeq_dma_base
;
161 struct gmac_queue_page
*freeq_pages
;
162 unsigned int num_freeq_pages
;
163 spinlock_t freeq_lock
; /* Locks queue from reentrance */
166 #define GMAC_STATS_NUM ( \
167 RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
170 static const char gmac_stats_strings
[GMAC_STATS_NUM
][ETH_GSTRING_LEN
] = {
177 "RX_STATUS_GOOD_FRAME",
178 "RX_STATUS_TOO_LONG_GOOD_CRC",
179 "RX_STATUS_RUNT_FRAME",
180 "RX_STATUS_SFD_NOT_FOUND",
181 "RX_STATUS_CRC_ERROR",
182 "RX_STATUS_TOO_LONG_BAD_CRC",
183 "RX_STATUS_ALIGNMENT_ERROR",
184 "RX_STATUS_TOO_LONG_BAD_ALIGN",
186 "RX_STATUS_DA_FILTERED",
187 "RX_STATUS_BUFFER_FULL",
193 "RX_CHKSUM_IP_UDP_TCP_OK",
194 "RX_CHKSUM_IP_OK_ONLY",
197 "RX_CHKSUM_IP_ERR_UNKNOWN",
199 "RX_CHKSUM_TCP_UDP_ERR",
218 "TX_FRAGS_LINEARIZED",
222 static void gmac_dump_dma_state(struct net_device
*netdev
);
224 static void gmac_update_config0_reg(struct net_device
*netdev
,
227 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
231 spin_lock_irqsave(&port
->config_lock
, flags
);
233 reg
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
234 reg
= (reg
& ~vmask
) | val
;
235 writel(reg
, port
->gmac_base
+ GMAC_CONFIG0
);
237 spin_unlock_irqrestore(&port
->config_lock
, flags
);
240 static void gmac_enable_tx_rx(struct net_device
*netdev
)
242 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
246 spin_lock_irqsave(&port
->config_lock
, flags
);
248 reg
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
249 reg
&= ~CONFIG0_TX_RX_DISABLE
;
250 writel(reg
, port
->gmac_base
+ GMAC_CONFIG0
);
252 spin_unlock_irqrestore(&port
->config_lock
, flags
);
255 static void gmac_disable_tx_rx(struct net_device
*netdev
)
257 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
261 spin_lock_irqsave(&port
->config_lock
, flags
);
263 val
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
264 val
|= CONFIG0_TX_RX_DISABLE
;
265 writel(val
, port
->gmac_base
+ GMAC_CONFIG0
);
267 spin_unlock_irqrestore(&port
->config_lock
, flags
);
269 mdelay(10); /* let GMAC consume packet */
272 static void gmac_set_flow_control(struct net_device
*netdev
, bool tx
, bool rx
)
274 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
278 spin_lock_irqsave(&port
->config_lock
, flags
);
280 val
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
281 val
&= ~CONFIG0_FLOW_CTL
;
283 val
|= CONFIG0_FLOW_TX
;
285 val
|= CONFIG0_FLOW_RX
;
286 writel(val
, port
->gmac_base
+ GMAC_CONFIG0
);
288 spin_unlock_irqrestore(&port
->config_lock
, flags
);
291 static void gmac_speed_set(struct net_device
*netdev
)
293 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
294 struct phy_device
*phydev
= netdev
->phydev
;
295 union gmac_status status
, old_status
;
299 status
.bits32
= readl(port
->gmac_base
+ GMAC_STATUS
);
300 old_status
.bits32
= status
.bits32
;
301 status
.bits
.link
= phydev
->link
;
302 status
.bits
.duplex
= phydev
->duplex
;
304 switch (phydev
->speed
) {
306 status
.bits
.speed
= GMAC_SPEED_1000
;
307 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII
)
308 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_1000
;
309 netdev_dbg(netdev
, "connect %s to RGMII @ 1Gbit\n",
310 phydev_name(phydev
));
313 status
.bits
.speed
= GMAC_SPEED_100
;
314 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII
)
315 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_100_10
;
316 netdev_dbg(netdev
, "connect %s to RGMII @ 100 Mbit\n",
317 phydev_name(phydev
));
320 status
.bits
.speed
= GMAC_SPEED_10
;
321 if (phydev
->interface
== PHY_INTERFACE_MODE_RGMII
)
322 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_100_10
;
323 netdev_dbg(netdev
, "connect %s to RGMII @ 10 Mbit\n",
324 phydev_name(phydev
));
327 netdev_warn(netdev
, "Unsupported PHY speed (%d) on %s\n",
328 phydev
->speed
, phydev_name(phydev
));
331 if (phydev
->duplex
== DUPLEX_FULL
) {
332 u16 lcladv
= phy_read(phydev
, MII_ADVERTISE
);
333 u16 rmtadv
= phy_read(phydev
, MII_LPA
);
334 u8 cap
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
336 if (cap
& FLOW_CTRL_RX
)
338 if (cap
& FLOW_CTRL_TX
)
342 gmac_set_flow_control(netdev
, pause_tx
, pause_rx
);
344 if (old_status
.bits32
== status
.bits32
)
347 if (netif_msg_link(port
)) {
348 phy_print_status(phydev
);
349 netdev_info(netdev
, "link flow control: %s\n",
351 ? (phydev
->asym_pause
? "tx" : "both")
352 : (phydev
->asym_pause
? "rx" : "none")
356 gmac_disable_tx_rx(netdev
);
357 writel(status
.bits32
, port
->gmac_base
+ GMAC_STATUS
);
358 gmac_enable_tx_rx(netdev
);
361 static int gmac_setup_phy(struct net_device
*netdev
)
363 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
364 union gmac_status status
= { .bits32
= 0 };
365 struct device
*dev
= port
->dev
;
366 struct phy_device
*phy
;
368 phy
= of_phy_get_and_connect(netdev
,
373 netdev
->phydev
= phy
;
375 phy_set_max_speed(phy
, SPEED_1000
);
376 phy_support_asym_pause(phy
);
378 /* set PHY interface type */
379 switch (phy
->interface
) {
380 case PHY_INTERFACE_MODE_MII
:
382 "MII: set GMAC0 to GMII mode, GMAC1 disabled\n");
383 status
.bits
.mii_rmii
= GMAC_PHY_MII
;
385 case PHY_INTERFACE_MODE_GMII
:
387 "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n");
388 status
.bits
.mii_rmii
= GMAC_PHY_GMII
;
390 case PHY_INTERFACE_MODE_RGMII
:
392 "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
393 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_100_10
;
396 netdev_err(netdev
, "Unsupported MII interface\n");
398 netdev
->phydev
= NULL
;
401 writel(status
.bits32
, port
->gmac_base
+ GMAC_STATUS
);
403 if (netif_msg_link(port
))
404 phy_attached_info(phy
);
409 /* The maximum frame length is not logically enumerated in the
410 * hardware, so we do a table lookup to find the applicable max
413 struct gmac_max_framelen
{
414 unsigned int max_l3_len
;
418 static const struct gmac_max_framelen gmac_maxlens
[] = {
421 .val
= CONFIG0_MAXLEN_1518
,
425 .val
= CONFIG0_MAXLEN_1522
,
429 .val
= CONFIG0_MAXLEN_1536
,
433 .val
= CONFIG0_MAXLEN_1542
,
437 .val
= CONFIG0_MAXLEN_9k
,
441 .val
= CONFIG0_MAXLEN_10k
,
445 static int gmac_pick_rx_max_len(unsigned int max_l3_len
)
447 const struct gmac_max_framelen
*maxlen
;
451 maxtot
= max_l3_len
+ ETH_HLEN
+ VLAN_HLEN
;
453 for (i
= 0; i
< ARRAY_SIZE(gmac_maxlens
); i
++) {
454 maxlen
= &gmac_maxlens
[i
];
455 if (maxtot
<= maxlen
->max_l3_len
)
462 static int gmac_init(struct net_device
*netdev
)
464 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
465 union gmac_config0 config0
= { .bits
= {
476 .port0_chk_classq
= 1,
477 .port1_chk_classq
= 1,
479 union gmac_ahb_weight ahb_weight
= { .bits
= {
484 .tq_dv_threshold
= 0,
486 union gmac_tx_wcr0 hw_weigh
= { .bits
= {
492 union gmac_tx_wcr1 sw_weigh
= { .bits
= {
500 union gmac_config1 config1
= { .bits
= {
504 union gmac_config2 config2
= { .bits
= {
508 union gmac_config3 config3
= { .bits
= {
512 union gmac_config0 tmp
;
515 config0
.bits
.max_len
= gmac_pick_rx_max_len(netdev
->mtu
);
516 tmp
.bits32
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
517 config0
.bits
.reserved
= tmp
.bits
.reserved
;
518 writel(config0
.bits32
, port
->gmac_base
+ GMAC_CONFIG0
);
519 writel(config1
.bits32
, port
->gmac_base
+ GMAC_CONFIG1
);
520 writel(config2
.bits32
, port
->gmac_base
+ GMAC_CONFIG2
);
521 writel(config3
.bits32
, port
->gmac_base
+ GMAC_CONFIG3
);
523 val
= readl(port
->dma_base
+ GMAC_AHB_WEIGHT_REG
);
524 writel(ahb_weight
.bits32
, port
->dma_base
+ GMAC_AHB_WEIGHT_REG
);
526 writel(hw_weigh
.bits32
,
527 port
->dma_base
+ GMAC_TX_WEIGHTING_CTRL_0_REG
);
528 writel(sw_weigh
.bits32
,
529 port
->dma_base
+ GMAC_TX_WEIGHTING_CTRL_1_REG
);
531 port
->rxq_order
= DEFAULT_GMAC_RXQ_ORDER
;
532 port
->txq_order
= DEFAULT_GMAC_TXQ_ORDER
;
533 port
->rx_coalesce_nsecs
= DEFAULT_RX_COALESCE_NSECS
;
535 /* Mark every quarter of the queue a packet for interrupt
536 * in order to be able to wake up the queue if it was stopped
538 port
->irq_every_tx_packets
= 1 << (port
->txq_order
- 2);
543 static void gmac_uninit(struct net_device
*netdev
)
546 phy_disconnect(netdev
->phydev
);
549 static int gmac_setup_txqs(struct net_device
*netdev
)
551 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
552 unsigned int n_txq
= netdev
->num_tx_queues
;
553 struct gemini_ethernet
*geth
= port
->geth
;
554 size_t entries
= 1 << port
->txq_order
;
555 struct gmac_txq
*txq
= port
->txq
;
556 struct gmac_txdesc
*desc_ring
;
557 size_t len
= n_txq
* entries
;
558 struct sk_buff
**skb_tab
;
559 void __iomem
*rwptr_reg
;
563 rwptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE0_PTR_REG
;
565 skb_tab
= kcalloc(len
, sizeof(*skb_tab
), GFP_KERNEL
);
569 desc_ring
= dma_alloc_coherent(geth
->dev
, len
* sizeof(*desc_ring
),
570 &port
->txq_dma_base
, GFP_KERNEL
);
577 if (port
->txq_dma_base
& ~DMA_Q_BASE_MASK
) {
578 dev_warn(geth
->dev
, "TX queue base is not aligned\n");
583 writel(port
->txq_dma_base
| port
->txq_order
,
584 port
->dma_base
+ GMAC_SW_TX_QUEUE_BASE_REG
);
586 for (i
= 0; i
< n_txq
; i
++) {
587 txq
->ring
= desc_ring
;
589 txq
->noirq_packets
= 0;
591 r
= readw(rwptr_reg
);
593 writew(r
, rwptr_reg
);
598 desc_ring
+= entries
;
605 static void gmac_clean_txq(struct net_device
*netdev
, struct gmac_txq
*txq
,
608 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
609 unsigned int m
= (1 << port
->txq_order
) - 1;
610 struct gemini_ethernet
*geth
= port
->geth
;
611 unsigned int c
= txq
->cptr
;
612 union gmac_txdesc_0 word0
;
613 union gmac_txdesc_1 word1
;
614 unsigned int hwchksum
= 0;
615 unsigned long bytes
= 0;
616 struct gmac_txdesc
*txd
;
617 unsigned short nfrags
;
618 unsigned int errs
= 0;
619 unsigned int pkts
= 0;
630 mapping
= txd
->word2
.buf_adr
;
631 word3
= txd
->word3
.bits32
;
633 dma_unmap_single(geth
->dev
, mapping
,
634 word0
.bits
.buffer_size
, DMA_TO_DEVICE
);
637 dev_kfree_skb(txq
->skb
[c
]);
642 if (!(word3
& SOF_BIT
))
645 if (!word0
.bits
.status_tx_ok
) {
651 bytes
+= txd
->word1
.bits
.byte_count
;
653 if (word1
.bits32
& TSS_CHECKUM_ENABLE
)
656 nfrags
= word0
.bits
.desc_count
- 1;
658 if (nfrags
>= TX_MAX_FRAGS
)
659 nfrags
= TX_MAX_FRAGS
- 1;
661 u64_stats_update_begin(&port
->tx_stats_syncp
);
662 port
->tx_frag_stats
[nfrags
]++;
663 u64_stats_update_end(&port
->tx_stats_syncp
);
667 u64_stats_update_begin(&port
->ir_stats_syncp
);
668 port
->stats
.tx_errors
+= errs
;
669 port
->stats
.tx_packets
+= pkts
;
670 port
->stats
.tx_bytes
+= bytes
;
671 port
->tx_hw_csummed
+= hwchksum
;
672 u64_stats_update_end(&port
->ir_stats_syncp
);
677 static void gmac_cleanup_txqs(struct net_device
*netdev
)
679 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
680 unsigned int n_txq
= netdev
->num_tx_queues
;
681 struct gemini_ethernet
*geth
= port
->geth
;
682 void __iomem
*rwptr_reg
;
685 rwptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE0_PTR_REG
;
687 for (i
= 0; i
< n_txq
; i
++) {
688 r
= readw(rwptr_reg
);
690 writew(r
, rwptr_reg
);
693 gmac_clean_txq(netdev
, port
->txq
+ i
, r
);
695 writel(0, port
->dma_base
+ GMAC_SW_TX_QUEUE_BASE_REG
);
697 kfree(port
->txq
->skb
);
698 dma_free_coherent(geth
->dev
,
699 n_txq
* sizeof(*port
->txq
->ring
) << port
->txq_order
,
700 port
->txq
->ring
, port
->txq_dma_base
);
703 static int gmac_setup_rxq(struct net_device
*netdev
)
705 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
706 struct gemini_ethernet
*geth
= port
->geth
;
707 struct nontoe_qhdr __iomem
*qhdr
;
709 qhdr
= geth
->base
+ TOE_DEFAULT_Q_HDR_BASE(netdev
->dev_id
);
710 port
->rxq_rwptr
= &qhdr
->word1
;
712 /* Remap a slew of memory to use for the RX queue */
713 port
->rxq_ring
= dma_alloc_coherent(geth
->dev
,
714 sizeof(*port
->rxq_ring
) << port
->rxq_order
,
715 &port
->rxq_dma_base
, GFP_KERNEL
);
718 if (port
->rxq_dma_base
& ~NONTOE_QHDR0_BASE_MASK
) {
719 dev_warn(geth
->dev
, "RX queue base is not aligned\n");
723 writel(port
->rxq_dma_base
| port
->rxq_order
, &qhdr
->word0
);
724 writel(0, port
->rxq_rwptr
);
728 static struct gmac_queue_page
*
729 gmac_get_queue_page(struct gemini_ethernet
*geth
,
730 struct gemini_ethernet_port
*port
,
733 struct gmac_queue_page
*gpage
;
737 /* Only look for even pages */
738 mapping
= addr
& PAGE_MASK
;
740 if (!geth
->freeq_pages
) {
741 dev_err(geth
->dev
, "try to get page with no page list\n");
745 /* Look up a ring buffer page from virtual mapping */
746 for (i
= 0; i
< geth
->num_freeq_pages
; i
++) {
747 gpage
= &geth
->freeq_pages
[i
];
748 if (gpage
->mapping
== mapping
)
755 static void gmac_cleanup_rxq(struct net_device
*netdev
)
757 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
758 struct gemini_ethernet
*geth
= port
->geth
;
759 struct gmac_rxdesc
*rxd
= port
->rxq_ring
;
760 static struct gmac_queue_page
*gpage
;
761 struct nontoe_qhdr __iomem
*qhdr
;
762 void __iomem
*dma_reg
;
763 void __iomem
*ptr_reg
;
769 TOE_DEFAULT_Q_HDR_BASE(netdev
->dev_id
);
770 dma_reg
= &qhdr
->word0
;
771 ptr_reg
= &qhdr
->word1
;
773 rw
.bits32
= readl(ptr_reg
);
776 writew(r
, ptr_reg
+ 2);
780 /* Loop from read pointer to write pointer of the RX queue
781 * and free up all pages by the queue.
784 mapping
= rxd
[r
].word2
.buf_adr
;
786 r
&= ((1 << port
->rxq_order
) - 1);
791 /* Freeq pointers are one page off */
792 gpage
= gmac_get_queue_page(geth
, port
, mapping
+ PAGE_SIZE
);
794 dev_err(geth
->dev
, "could not find page\n");
797 /* Release the RX queue reference to the page */
798 put_page(gpage
->page
);
801 dma_free_coherent(geth
->dev
, sizeof(*port
->rxq_ring
) << port
->rxq_order
,
802 port
->rxq_ring
, port
->rxq_dma_base
);
805 static struct page
*geth_freeq_alloc_map_page(struct gemini_ethernet
*geth
,
808 struct gmac_rxdesc
*freeq_entry
;
809 struct gmac_queue_page
*gpage
;
810 unsigned int fpp_order
;
811 unsigned int frag_len
;
816 /* First allocate and DMA map a single page */
817 page
= alloc_page(GFP_ATOMIC
);
821 mapping
= dma_map_single(geth
->dev
, page_address(page
),
822 PAGE_SIZE
, DMA_FROM_DEVICE
);
823 if (dma_mapping_error(geth
->dev
, mapping
)) {
828 /* The assign the page mapping (physical address) to the buffer address
829 * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes,
830 * 4k), and the default RX frag order is 11 (fragments are up 20 2048
831 * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus
832 * each page normally needs two entries in the queue.
834 frag_len
= 1 << geth
->freeq_frag_order
; /* Usually 2048 */
835 fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
836 freeq_entry
= geth
->freeq_ring
+ (pn
<< fpp_order
);
837 dev_dbg(geth
->dev
, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n",
838 pn
, frag_len
, (1 << fpp_order
), freeq_entry
);
839 for (i
= (1 << fpp_order
); i
> 0; i
--) {
840 freeq_entry
->word2
.buf_adr
= mapping
;
845 /* If the freeq entry already has a page mapped, then unmap it. */
846 gpage
= &geth
->freeq_pages
[pn
];
848 mapping
= geth
->freeq_ring
[pn
<< fpp_order
].word2
.buf_adr
;
849 dma_unmap_single(geth
->dev
, mapping
, frag_len
, DMA_FROM_DEVICE
);
850 /* This should be the last reference to the page so it gets
853 put_page(gpage
->page
);
856 /* Then put our new mapping into the page table */
857 dev_dbg(geth
->dev
, "page %d, DMA addr: %08x, page %p\n",
858 pn
, (unsigned int)mapping
, page
);
859 gpage
->mapping
= mapping
;
866 * geth_fill_freeq() - Fill the freeq with empty fragments to use
867 * @geth: the ethernet adapter
868 * @refill: whether to reset the queue by filling in all freeq entries or
869 * just refill it, usually the interrupt to refill the queue happens when
870 * the queue is half empty.
872 static unsigned int geth_fill_freeq(struct gemini_ethernet
*geth
, bool refill
)
874 unsigned int fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
875 unsigned int count
= 0;
876 unsigned int pn
, epn
;
882 m_pn
= (1 << (geth
->freeq_order
- fpp_order
)) - 1;
884 spin_lock_irqsave(&geth
->freeq_lock
, flags
);
886 rw
.bits32
= readl(geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
);
887 pn
= (refill
? rw
.bits
.wptr
: rw
.bits
.rptr
) >> fpp_order
;
888 epn
= (rw
.bits
.rptr
>> fpp_order
) - 1;
891 /* Loop over the freeq ring buffer entries */
893 struct gmac_queue_page
*gpage
;
896 gpage
= &geth
->freeq_pages
[pn
];
899 dev_dbg(geth
->dev
, "fill entry %d page ref count %d add %d refs\n",
900 pn
, page_ref_count(page
), 1 << fpp_order
);
902 if (page_ref_count(page
) > 1) {
903 unsigned int fl
= (pn
- epn
) & m_pn
;
905 if (fl
> 64 >> fpp_order
)
908 page
= geth_freeq_alloc_map_page(geth
, pn
);
913 /* Add one reference per fragment in the page */
914 page_ref_add(page
, 1 << fpp_order
);
915 count
+= 1 << fpp_order
;
920 writew(pn
<< fpp_order
, geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
+ 2);
922 spin_unlock_irqrestore(&geth
->freeq_lock
, flags
);
927 static int geth_setup_freeq(struct gemini_ethernet
*geth
)
929 unsigned int fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
930 unsigned int frag_len
= 1 << geth
->freeq_frag_order
;
931 unsigned int len
= 1 << geth
->freeq_order
;
932 unsigned int pages
= len
>> fpp_order
;
933 union queue_threshold qt
;
934 union dma_skb_size skbsz
;
938 geth
->freeq_ring
= dma_alloc_coherent(geth
->dev
,
939 sizeof(*geth
->freeq_ring
) << geth
->freeq_order
,
940 &geth
->freeq_dma_base
, GFP_KERNEL
);
941 if (!geth
->freeq_ring
)
943 if (geth
->freeq_dma_base
& ~DMA_Q_BASE_MASK
) {
944 dev_warn(geth
->dev
, "queue ring base is not aligned\n");
948 /* Allocate a mapping to page look-up index */
949 geth
->freeq_pages
= kcalloc(pages
, sizeof(*geth
->freeq_pages
),
951 if (!geth
->freeq_pages
)
953 geth
->num_freeq_pages
= pages
;
955 dev_info(geth
->dev
, "allocate %d pages for queue\n", pages
);
956 for (pn
= 0; pn
< pages
; pn
++)
957 if (!geth_freeq_alloc_map_page(geth
, pn
))
958 goto err_freeq_alloc
;
960 filled
= geth_fill_freeq(geth
, false);
962 goto err_freeq_alloc
;
964 qt
.bits32
= readl(geth
->base
+ GLOBAL_QUEUE_THRESHOLD_REG
);
965 qt
.bits
.swfq_empty
= 32;
966 writel(qt
.bits32
, geth
->base
+ GLOBAL_QUEUE_THRESHOLD_REG
);
968 skbsz
.bits
.sw_skb_size
= 1 << geth
->freeq_frag_order
;
969 writel(skbsz
.bits32
, geth
->base
+ GLOBAL_DMA_SKB_SIZE_REG
);
970 writel(geth
->freeq_dma_base
| geth
->freeq_order
,
971 geth
->base
+ GLOBAL_SW_FREEQ_BASE_SIZE_REG
);
977 struct gmac_queue_page
*gpage
;
981 mapping
= geth
->freeq_ring
[pn
<< fpp_order
].word2
.buf_adr
;
982 dma_unmap_single(geth
->dev
, mapping
, frag_len
, DMA_FROM_DEVICE
);
983 gpage
= &geth
->freeq_pages
[pn
];
984 put_page(gpage
->page
);
987 kfree(geth
->freeq_pages
);
989 dma_free_coherent(geth
->dev
,
990 sizeof(*geth
->freeq_ring
) << geth
->freeq_order
,
991 geth
->freeq_ring
, geth
->freeq_dma_base
);
992 geth
->freeq_ring
= NULL
;
997 * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue
998 * @geth: the Gemini global ethernet state
1000 static void geth_cleanup_freeq(struct gemini_ethernet
*geth
)
1002 unsigned int fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
1003 unsigned int frag_len
= 1 << geth
->freeq_frag_order
;
1004 unsigned int len
= 1 << geth
->freeq_order
;
1005 unsigned int pages
= len
>> fpp_order
;
1008 writew(readw(geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
),
1009 geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
+ 2);
1010 writel(0, geth
->base
+ GLOBAL_SW_FREEQ_BASE_SIZE_REG
);
1012 for (pn
= 0; pn
< pages
; pn
++) {
1013 struct gmac_queue_page
*gpage
;
1016 mapping
= geth
->freeq_ring
[pn
<< fpp_order
].word2
.buf_adr
;
1017 dma_unmap_single(geth
->dev
, mapping
, frag_len
, DMA_FROM_DEVICE
);
1019 gpage
= &geth
->freeq_pages
[pn
];
1020 while (page_ref_count(gpage
->page
) > 0)
1021 put_page(gpage
->page
);
1024 kfree(geth
->freeq_pages
);
1026 dma_free_coherent(geth
->dev
,
1027 sizeof(*geth
->freeq_ring
) << geth
->freeq_order
,
1028 geth
->freeq_ring
, geth
->freeq_dma_base
);
1032 * geth_resize_freeq() - resize the software queue depth
1033 * @port: the port requesting the change
1035 * This gets called at least once during probe() so the device queue gets
1036 * "resized" from the hardware defaults. Since both ports/net devices share
1037 * the same hardware queue, some synchronization between the ports is
1040 static int geth_resize_freeq(struct gemini_ethernet_port
*port
)
1042 struct gemini_ethernet
*geth
= port
->geth
;
1043 struct net_device
*netdev
= port
->netdev
;
1044 struct gemini_ethernet_port
*other_port
;
1045 struct net_device
*other_netdev
;
1046 unsigned int new_size
= 0;
1047 unsigned int new_order
;
1048 unsigned long flags
;
1052 if (netdev
->dev_id
== 0)
1053 other_netdev
= geth
->port1
->netdev
;
1055 other_netdev
= geth
->port0
->netdev
;
1057 if (other_netdev
&& netif_running(other_netdev
))
1060 new_size
= 1 << (port
->rxq_order
+ 1);
1061 netdev_dbg(netdev
, "port %d size: %d order %d\n",
1066 other_port
= netdev_priv(other_netdev
);
1067 new_size
+= 1 << (other_port
->rxq_order
+ 1);
1068 netdev_dbg(other_netdev
, "port %d size: %d order %d\n",
1069 other_netdev
->dev_id
,
1070 (1 << (other_port
->rxq_order
+ 1)),
1071 other_port
->rxq_order
);
1074 new_order
= min(15, ilog2(new_size
- 1) + 1);
1075 dev_dbg(geth
->dev
, "set shared queue to size %d order %d\n",
1076 new_size
, new_order
);
1077 if (geth
->freeq_order
== new_order
)
1080 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1082 /* Disable the software queue IRQs */
1083 en
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1084 en
&= ~SWFQ_EMPTY_INT_BIT
;
1085 writel(en
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1086 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1088 /* Drop the old queue */
1089 if (geth
->freeq_ring
)
1090 geth_cleanup_freeq(geth
);
1092 /* Allocate a new queue with the desired order */
1093 geth
->freeq_order
= new_order
;
1094 ret
= geth_setup_freeq(geth
);
1096 /* Restart the interrupts - NOTE if this is the first resize
1097 * after probe(), this is where the interrupts get turned on
1098 * in the first place.
1100 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1101 en
|= SWFQ_EMPTY_INT_BIT
;
1102 writel(en
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1103 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1108 static void gmac_tx_irq_enable(struct net_device
*netdev
,
1109 unsigned int txq
, int en
)
1111 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1112 struct gemini_ethernet
*geth
= port
->geth
;
1115 netdev_dbg(netdev
, "%s device %d\n", __func__
, netdev
->dev_id
);
1117 mask
= GMAC0_IRQ0_TXQ0_INTS
<< (6 * netdev
->dev_id
+ txq
);
1120 writel(mask
, geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
);
1122 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1123 val
= en
? val
| mask
: val
& ~mask
;
1124 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1127 static void gmac_tx_irq(struct net_device
*netdev
, unsigned int txq_num
)
1129 struct netdev_queue
*ntxq
= netdev_get_tx_queue(netdev
, txq_num
);
1131 gmac_tx_irq_enable(netdev
, txq_num
, 0);
1132 netif_tx_wake_queue(ntxq
);
1135 static int gmac_map_tx_bufs(struct net_device
*netdev
, struct sk_buff
*skb
,
1136 struct gmac_txq
*txq
, unsigned short *desc
)
1138 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1139 struct skb_shared_info
*skb_si
= skb_shinfo(skb
);
1140 unsigned short m
= (1 << port
->txq_order
) - 1;
1141 short frag
, last_frag
= skb_si
->nr_frags
- 1;
1142 struct gemini_ethernet
*geth
= port
->geth
;
1143 unsigned int word1
, word3
, buflen
;
1144 unsigned short w
= *desc
;
1145 struct gmac_txdesc
*txd
;
1146 skb_frag_t
*skb_frag
;
1153 if (skb
->protocol
== htons(ETH_P_8021Q
))
1160 word1
|= TSS_MTU_ENABLE_BIT
;
1164 if (skb
->ip_summed
!= CHECKSUM_NONE
) {
1167 if (skb
->protocol
== htons(ETH_P_IP
)) {
1168 word1
|= TSS_IP_CHKSUM_BIT
;
1169 tcp
= ip_hdr(skb
)->protocol
== IPPROTO_TCP
;
1171 word1
|= TSS_IPV6_ENABLE_BIT
;
1172 tcp
= ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
;
1175 word1
|= tcp
? TSS_TCP_CHKSUM_BIT
: TSS_UDP_CHKSUM_BIT
;
1179 while (frag
<= last_frag
) {
1182 buflen
= skb_headlen(skb
);
1184 skb_frag
= skb_si
->frags
+ frag
;
1185 buffer
= page_address(skb_frag_page(skb_frag
)) +
1186 skb_frag
->page_offset
;
1187 buflen
= skb_frag
->size
;
1190 if (frag
== last_frag
) {
1195 mapping
= dma_map_single(geth
->dev
, buffer
, buflen
,
1197 if (dma_mapping_error(geth
->dev
, mapping
))
1200 txd
= txq
->ring
+ w
;
1201 txd
->word0
.bits32
= buflen
;
1202 txd
->word1
.bits32
= word1
;
1203 txd
->word2
.buf_adr
= mapping
;
1204 txd
->word3
.bits32
= word3
;
1206 word3
&= MTU_SIZE_BIT_MASK
;
1216 while (w
!= *desc
) {
1220 dma_unmap_page(geth
->dev
, txq
->ring
[w
].word2
.buf_adr
,
1221 txq
->ring
[w
].word0
.bits
.buffer_size
,
1227 static int gmac_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1229 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1230 unsigned short m
= (1 << port
->txq_order
) - 1;
1231 struct netdev_queue
*ntxq
;
1232 unsigned short r
, w
, d
;
1233 void __iomem
*ptr_reg
;
1234 struct gmac_txq
*txq
;
1235 int txq_num
, nfrags
;
1238 SKB_FRAG_ASSERT(skb
);
1240 if (skb
->len
>= 0x10000)
1243 txq_num
= skb_get_queue_mapping(skb
);
1244 ptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE_PTR_REG(txq_num
);
1245 txq
= &port
->txq
[txq_num
];
1246 ntxq
= netdev_get_tx_queue(netdev
, txq_num
);
1247 nfrags
= skb_shinfo(skb
)->nr_frags
;
1249 rw
.bits32
= readl(ptr_reg
);
1253 d
= txq
->cptr
- w
- 1;
1256 if (d
< nfrags
+ 2) {
1257 gmac_clean_txq(netdev
, txq
, r
);
1258 d
= txq
->cptr
- w
- 1;
1261 if (d
< nfrags
+ 2) {
1262 netif_tx_stop_queue(ntxq
);
1264 d
= txq
->cptr
+ nfrags
+ 16;
1266 txq
->ring
[d
].word3
.bits
.eofie
= 1;
1267 gmac_tx_irq_enable(netdev
, txq_num
, 1);
1269 u64_stats_update_begin(&port
->tx_stats_syncp
);
1270 netdev
->stats
.tx_fifo_errors
++;
1271 u64_stats_update_end(&port
->tx_stats_syncp
);
1272 return NETDEV_TX_BUSY
;
1276 if (gmac_map_tx_bufs(netdev
, skb
, txq
, &w
)) {
1277 if (skb_linearize(skb
))
1280 u64_stats_update_begin(&port
->tx_stats_syncp
);
1281 port
->tx_frags_linearized
++;
1282 u64_stats_update_end(&port
->tx_stats_syncp
);
1284 if (gmac_map_tx_bufs(netdev
, skb
, txq
, &w
))
1288 writew(w
, ptr_reg
+ 2);
1290 gmac_clean_txq(netdev
, txq
, r
);
1291 return NETDEV_TX_OK
;
1296 u64_stats_update_begin(&port
->tx_stats_syncp
);
1297 port
->stats
.tx_dropped
++;
1298 u64_stats_update_end(&port
->tx_stats_syncp
);
1299 return NETDEV_TX_OK
;
1302 static void gmac_tx_timeout(struct net_device
*netdev
)
1304 netdev_err(netdev
, "Tx timeout\n");
1305 gmac_dump_dma_state(netdev
);
1308 static void gmac_enable_irq(struct net_device
*netdev
, int enable
)
1310 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1311 struct gemini_ethernet
*geth
= port
->geth
;
1312 unsigned long flags
;
1315 netdev_dbg(netdev
, "%s device %d %s\n", __func__
,
1316 netdev
->dev_id
, enable
? "enable" : "disable");
1317 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1319 mask
= GMAC0_IRQ0_2
<< (netdev
->dev_id
* 2);
1320 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1321 val
= enable
? (val
| mask
) : (val
& ~mask
);
1322 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1324 mask
= DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
;
1325 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1326 val
= enable
? (val
| mask
) : (val
& ~mask
);
1327 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1329 mask
= GMAC0_IRQ4_8
<< (netdev
->dev_id
* 8);
1330 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1331 val
= enable
? (val
| mask
) : (val
& ~mask
);
1332 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1334 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1337 static void gmac_enable_rx_irq(struct net_device
*netdev
, int enable
)
1339 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1340 struct gemini_ethernet
*geth
= port
->geth
;
1341 unsigned long flags
;
1344 netdev_dbg(netdev
, "%s device %d %s\n", __func__
, netdev
->dev_id
,
1345 enable
? "enable" : "disable");
1346 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1347 mask
= DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
;
1349 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1350 val
= enable
? (val
| mask
) : (val
& ~mask
);
1351 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1353 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1356 static struct sk_buff
*gmac_skb_if_good_frame(struct gemini_ethernet_port
*port
,
1357 union gmac_rxdesc_0 word0
,
1358 unsigned int frame_len
)
1360 unsigned int rx_csum
= word0
.bits
.chksum_status
;
1361 unsigned int rx_status
= word0
.bits
.status
;
1362 struct sk_buff
*skb
= NULL
;
1364 port
->rx_stats
[rx_status
]++;
1365 port
->rx_csum_stats
[rx_csum
]++;
1367 if (word0
.bits
.derr
|| word0
.bits
.perr
||
1368 rx_status
|| frame_len
< ETH_ZLEN
||
1369 rx_csum
>= RX_CHKSUM_IP_ERR_UNKNOWN
) {
1370 port
->stats
.rx_errors
++;
1372 if (frame_len
< ETH_ZLEN
|| RX_ERROR_LENGTH(rx_status
))
1373 port
->stats
.rx_length_errors
++;
1374 if (RX_ERROR_OVER(rx_status
))
1375 port
->stats
.rx_over_errors
++;
1376 if (RX_ERROR_CRC(rx_status
))
1377 port
->stats
.rx_crc_errors
++;
1378 if (RX_ERROR_FRAME(rx_status
))
1379 port
->stats
.rx_frame_errors
++;
1383 skb
= napi_get_frags(&port
->napi
);
1387 if (rx_csum
== RX_CHKSUM_IP_UDP_TCP_OK
)
1388 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1391 port
->stats
.rx_bytes
+= frame_len
;
1392 port
->stats
.rx_packets
++;
1396 static unsigned int gmac_rx(struct net_device
*netdev
, unsigned int budget
)
1398 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1399 unsigned short m
= (1 << port
->rxq_order
) - 1;
1400 struct gemini_ethernet
*geth
= port
->geth
;
1401 void __iomem
*ptr_reg
= port
->rxq_rwptr
;
1402 unsigned int frame_len
, frag_len
;
1403 struct gmac_rxdesc
*rx
= NULL
;
1404 struct gmac_queue_page
*gpage
;
1405 static struct sk_buff
*skb
;
1406 union gmac_rxdesc_0 word0
;
1407 union gmac_rxdesc_1 word1
;
1408 union gmac_rxdesc_3 word3
;
1409 struct page
*page
= NULL
;
1410 unsigned int page_offs
;
1411 unsigned short r
, w
;
1416 rw
.bits32
= readl(ptr_reg
);
1417 /* Reset interrupt as all packages until here are taken into account */
1418 writel(DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
,
1419 geth
->base
+ GLOBAL_INTERRUPT_STATUS_1_REG
);
1423 while (budget
&& w
!= r
) {
1424 rx
= port
->rxq_ring
+ r
;
1427 mapping
= rx
->word2
.buf_adr
;
1433 frag_len
= word0
.bits
.buffer_size
;
1434 frame_len
= word1
.bits
.byte_count
;
1435 page_offs
= mapping
& ~PAGE_MASK
;
1439 "rxq[%u]: HW BUG: zero DMA desc\n", r
);
1443 /* Freeq pointers are one page off */
1444 gpage
= gmac_get_queue_page(geth
, port
, mapping
+ PAGE_SIZE
);
1446 dev_err(geth
->dev
, "could not find mapping\n");
1451 if (word3
.bits32
& SOF_BIT
) {
1453 napi_free_frags(&port
->napi
);
1454 port
->stats
.rx_dropped
++;
1457 skb
= gmac_skb_if_good_frame(port
, word0
, frame_len
);
1461 page_offs
+= NET_IP_ALIGN
;
1462 frag_len
-= NET_IP_ALIGN
;
1470 if (word3
.bits32
& EOF_BIT
)
1471 frag_len
= frame_len
- skb
->len
;
1473 /* append page frag to skb */
1474 if (frag_nr
== MAX_SKB_FRAGS
)
1478 netdev_err(netdev
, "Received fragment with len = 0\n");
1480 skb_fill_page_desc(skb
, frag_nr
, page
, page_offs
, frag_len
);
1481 skb
->len
+= frag_len
;
1482 skb
->data_len
+= frag_len
;
1483 skb
->truesize
+= frag_len
;
1486 if (word3
.bits32
& EOF_BIT
) {
1487 napi_gro_frags(&port
->napi
);
1495 napi_free_frags(&port
->napi
);
1502 port
->stats
.rx_dropped
++;
1509 static int gmac_napi_poll(struct napi_struct
*napi
, int budget
)
1511 struct gemini_ethernet_port
*port
= netdev_priv(napi
->dev
);
1512 struct gemini_ethernet
*geth
= port
->geth
;
1513 unsigned int freeq_threshold
;
1514 unsigned int received
;
1516 freeq_threshold
= 1 << (geth
->freeq_order
- 1);
1517 u64_stats_update_begin(&port
->rx_stats_syncp
);
1519 received
= gmac_rx(napi
->dev
, budget
);
1520 if (received
< budget
) {
1521 napi_gro_flush(napi
, false);
1522 napi_complete_done(napi
, received
);
1523 gmac_enable_rx_irq(napi
->dev
, 1);
1524 ++port
->rx_napi_exits
;
1527 port
->freeq_refill
+= (budget
- received
);
1528 if (port
->freeq_refill
> freeq_threshold
) {
1529 port
->freeq_refill
-= freeq_threshold
;
1530 geth_fill_freeq(geth
, true);
1533 u64_stats_update_end(&port
->rx_stats_syncp
);
1537 static void gmac_dump_dma_state(struct net_device
*netdev
)
1539 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1540 struct gemini_ethernet
*geth
= port
->geth
;
1541 void __iomem
*ptr_reg
;
1544 /* Interrupt status */
1545 reg
[0] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
);
1546 reg
[1] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_1_REG
);
1547 reg
[2] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_2_REG
);
1548 reg
[3] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_3_REG
);
1549 reg
[4] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
1550 netdev_err(netdev
, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1551 reg
[0], reg
[1], reg
[2], reg
[3], reg
[4]);
1553 /* Interrupt enable */
1554 reg
[0] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1555 reg
[1] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1556 reg
[2] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_2_REG
);
1557 reg
[3] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_3_REG
);
1558 reg
[4] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1559 netdev_err(netdev
, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1560 reg
[0], reg
[1], reg
[2], reg
[3], reg
[4]);
1563 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_RX_FIRST_DESC_REG
);
1564 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_RX_CURR_DESC_REG
);
1565 reg
[2] = GET_RPTR(port
->rxq_rwptr
);
1566 reg
[3] = GET_WPTR(port
->rxq_rwptr
);
1567 netdev_err(netdev
, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1568 reg
[0], reg
[1], reg
[2], reg
[3]);
1570 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD0_REG
);
1571 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD1_REG
);
1572 reg
[2] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD2_REG
);
1573 reg
[3] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD3_REG
);
1574 netdev_err(netdev
, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1575 reg
[0], reg
[1], reg
[2], reg
[3]);
1578 ptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE0_PTR_REG
;
1580 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_TX_FIRST_DESC_REG
);
1581 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_TX_CURR_DESC_REG
);
1582 reg
[2] = GET_RPTR(ptr_reg
);
1583 reg
[3] = GET_WPTR(ptr_reg
);
1584 netdev_err(netdev
, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1585 reg
[0], reg
[1], reg
[2], reg
[3]);
1587 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD0_REG
);
1588 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD1_REG
);
1589 reg
[2] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD2_REG
);
1590 reg
[3] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD3_REG
);
1591 netdev_err(netdev
, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1592 reg
[0], reg
[1], reg
[2], reg
[3]);
1594 /* FREE queues status */
1595 ptr_reg
= geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
;
1597 reg
[0] = GET_RPTR(ptr_reg
);
1598 reg
[1] = GET_WPTR(ptr_reg
);
1600 ptr_reg
= geth
->base
+ GLOBAL_HWFQ_RWPTR_REG
;
1602 reg
[2] = GET_RPTR(ptr_reg
);
1603 reg
[3] = GET_WPTR(ptr_reg
);
1604 netdev_err(netdev
, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
1605 reg
[0], reg
[1], reg
[2], reg
[3]);
1608 static void gmac_update_hw_stats(struct net_device
*netdev
)
1610 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1611 unsigned int rx_discards
, rx_mcast
, rx_bcast
;
1612 struct gemini_ethernet
*geth
= port
->geth
;
1613 unsigned long flags
;
1615 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1616 u64_stats_update_begin(&port
->ir_stats_syncp
);
1618 rx_discards
= readl(port
->gmac_base
+ GMAC_IN_DISCARDS
);
1619 port
->hw_stats
[0] += rx_discards
;
1620 port
->hw_stats
[1] += readl(port
->gmac_base
+ GMAC_IN_ERRORS
);
1621 rx_mcast
= readl(port
->gmac_base
+ GMAC_IN_MCAST
);
1622 port
->hw_stats
[2] += rx_mcast
;
1623 rx_bcast
= readl(port
->gmac_base
+ GMAC_IN_BCAST
);
1624 port
->hw_stats
[3] += rx_bcast
;
1625 port
->hw_stats
[4] += readl(port
->gmac_base
+ GMAC_IN_MAC1
);
1626 port
->hw_stats
[5] += readl(port
->gmac_base
+ GMAC_IN_MAC2
);
1628 port
->stats
.rx_missed_errors
+= rx_discards
;
1629 port
->stats
.multicast
+= rx_mcast
;
1630 port
->stats
.multicast
+= rx_bcast
;
1632 writel(GMAC0_MIB_INT_BIT
<< (netdev
->dev_id
* 8),
1633 geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
1635 u64_stats_update_end(&port
->ir_stats_syncp
);
1636 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1640 * gmac_get_intr_flags() - get interrupt status flags for a port from
1641 * @netdev: the net device for the port to get flags from
1642 * @i: the interrupt status register 0..4
1644 static u32
gmac_get_intr_flags(struct net_device
*netdev
, int i
)
1646 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1647 struct gemini_ethernet
*geth
= port
->geth
;
1648 void __iomem
*irqif_reg
, *irqen_reg
;
1649 unsigned int offs
, val
;
1651 /* Calculate the offset using the stride of the status registers */
1652 offs
= i
* (GLOBAL_INTERRUPT_STATUS_1_REG
-
1653 GLOBAL_INTERRUPT_STATUS_0_REG
);
1655 irqif_reg
= geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
+ offs
;
1656 irqen_reg
= geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
+ offs
;
1658 val
= readl(irqif_reg
) & readl(irqen_reg
);
1662 static enum hrtimer_restart
gmac_coalesce_delay_expired(struct hrtimer
*timer
)
1664 struct gemini_ethernet_port
*port
=
1665 container_of(timer
, struct gemini_ethernet_port
,
1668 napi_schedule(&port
->napi
);
1669 return HRTIMER_NORESTART
;
1672 static irqreturn_t
gmac_irq(int irq
, void *data
)
1674 struct gemini_ethernet_port
*port
;
1675 struct net_device
*netdev
= data
;
1676 struct gemini_ethernet
*geth
;
1679 port
= netdev_priv(netdev
);
1682 val
= gmac_get_intr_flags(netdev
, 0);
1685 if (val
& (GMAC0_IRQ0_2
<< (netdev
->dev_id
* 2))) {
1687 netdev_err(netdev
, "hw failure/sw bug\n");
1688 gmac_dump_dma_state(netdev
);
1690 /* don't know how to recover, just reduce losses */
1691 gmac_enable_irq(netdev
, 0);
1695 if (val
& (GMAC0_IRQ0_TXQ0_INTS
<< (netdev
->dev_id
* 6)))
1696 gmac_tx_irq(netdev
, 0);
1698 val
= gmac_get_intr_flags(netdev
, 1);
1701 if (val
& (DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
)) {
1702 gmac_enable_rx_irq(netdev
, 0);
1704 if (!port
->rx_coalesce_nsecs
) {
1705 napi_schedule(&port
->napi
);
1709 ktime
= ktime_set(0, port
->rx_coalesce_nsecs
);
1710 hrtimer_start(&port
->rx_coalesce_timer
, ktime
,
1715 val
= gmac_get_intr_flags(netdev
, 4);
1718 if (val
& (GMAC0_MIB_INT_BIT
<< (netdev
->dev_id
* 8)))
1719 gmac_update_hw_stats(netdev
);
1721 if (val
& (GMAC0_RX_OVERRUN_INT_BIT
<< (netdev
->dev_id
* 8))) {
1722 writel(GMAC0_RXDERR_INT_BIT
<< (netdev
->dev_id
* 8),
1723 geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
1725 spin_lock(&geth
->irq_lock
);
1726 u64_stats_update_begin(&port
->ir_stats_syncp
);
1727 ++port
->stats
.rx_fifo_errors
;
1728 u64_stats_update_end(&port
->ir_stats_syncp
);
1729 spin_unlock(&geth
->irq_lock
);
1732 return orr
? IRQ_HANDLED
: IRQ_NONE
;
1735 static void gmac_start_dma(struct gemini_ethernet_port
*port
)
1737 void __iomem
*dma_ctrl_reg
= port
->dma_base
+ GMAC_DMA_CTRL_REG
;
1738 union gmac_dma_ctrl dma_ctrl
;
1740 dma_ctrl
.bits32
= readl(dma_ctrl_reg
);
1741 dma_ctrl
.bits
.rd_enable
= 1;
1742 dma_ctrl
.bits
.td_enable
= 1;
1743 dma_ctrl
.bits
.loopback
= 0;
1744 dma_ctrl
.bits
.drop_small_ack
= 0;
1745 dma_ctrl
.bits
.rd_insert_bytes
= NET_IP_ALIGN
;
1746 dma_ctrl
.bits
.rd_prot
= HPROT_DATA_CACHE
| HPROT_PRIVILIGED
;
1747 dma_ctrl
.bits
.rd_burst_size
= HBURST_INCR8
;
1748 dma_ctrl
.bits
.rd_bus
= HSIZE_8
;
1749 dma_ctrl
.bits
.td_prot
= HPROT_DATA_CACHE
;
1750 dma_ctrl
.bits
.td_burst_size
= HBURST_INCR8
;
1751 dma_ctrl
.bits
.td_bus
= HSIZE_8
;
1753 writel(dma_ctrl
.bits32
, dma_ctrl_reg
);
1756 static void gmac_stop_dma(struct gemini_ethernet_port
*port
)
1758 void __iomem
*dma_ctrl_reg
= port
->dma_base
+ GMAC_DMA_CTRL_REG
;
1759 union gmac_dma_ctrl dma_ctrl
;
1761 dma_ctrl
.bits32
= readl(dma_ctrl_reg
);
1762 dma_ctrl
.bits
.rd_enable
= 0;
1763 dma_ctrl
.bits
.td_enable
= 0;
1764 writel(dma_ctrl
.bits32
, dma_ctrl_reg
);
1767 static int gmac_open(struct net_device
*netdev
)
1769 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1772 if (!netdev
->phydev
) {
1773 err
= gmac_setup_phy(netdev
);
1775 netif_err(port
, ifup
, netdev
,
1776 "PHY init failed: %d\n", err
);
1781 err
= request_irq(netdev
->irq
, gmac_irq
,
1782 IRQF_SHARED
, netdev
->name
, netdev
);
1784 netdev_err(netdev
, "no IRQ\n");
1788 netif_carrier_off(netdev
);
1789 phy_start(netdev
->phydev
);
1791 err
= geth_resize_freeq(port
);
1792 /* It's fine if it's just busy, the other port has set up
1793 * the freeq in that case.
1795 if (err
&& (err
!= -EBUSY
)) {
1796 netdev_err(netdev
, "could not resize freeq\n");
1800 err
= gmac_setup_rxq(netdev
);
1802 netdev_err(netdev
, "could not setup RXQ\n");
1806 err
= gmac_setup_txqs(netdev
);
1808 netdev_err(netdev
, "could not setup TXQs\n");
1809 gmac_cleanup_rxq(netdev
);
1813 napi_enable(&port
->napi
);
1815 gmac_start_dma(port
);
1816 gmac_enable_irq(netdev
, 1);
1817 gmac_enable_tx_rx(netdev
);
1818 netif_tx_start_all_queues(netdev
);
1820 hrtimer_init(&port
->rx_coalesce_timer
, CLOCK_MONOTONIC
,
1822 port
->rx_coalesce_timer
.function
= &gmac_coalesce_delay_expired
;
1824 netdev_dbg(netdev
, "opened\n");
1829 phy_stop(netdev
->phydev
);
1830 free_irq(netdev
->irq
, netdev
);
1834 static int gmac_stop(struct net_device
*netdev
)
1836 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1838 hrtimer_cancel(&port
->rx_coalesce_timer
);
1839 netif_tx_stop_all_queues(netdev
);
1840 gmac_disable_tx_rx(netdev
);
1841 gmac_stop_dma(port
);
1842 napi_disable(&port
->napi
);
1844 gmac_enable_irq(netdev
, 0);
1845 gmac_cleanup_rxq(netdev
);
1846 gmac_cleanup_txqs(netdev
);
1848 phy_stop(netdev
->phydev
);
1849 free_irq(netdev
->irq
, netdev
);
1851 gmac_update_hw_stats(netdev
);
1855 static void gmac_set_rx_mode(struct net_device
*netdev
)
1857 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1858 union gmac_rx_fltr filter
= { .bits
= {
1863 struct netdev_hw_addr
*ha
;
1864 unsigned int bit_nr
;
1870 if (netdev
->flags
& IFF_PROMISC
) {
1871 filter
.bits
.error
= 1;
1872 filter
.bits
.promiscuous
= 1;
1875 } else if (netdev
->flags
& IFF_ALLMULTI
) {
1879 netdev_for_each_mc_addr(ha
, netdev
) {
1880 bit_nr
= ~crc32_le(~0, ha
->addr
, ETH_ALEN
) & 0x3f;
1881 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 0x1f);
1885 writel(mc_filter
[0], port
->gmac_base
+ GMAC_MCAST_FIL0
);
1886 writel(mc_filter
[1], port
->gmac_base
+ GMAC_MCAST_FIL1
);
1887 writel(filter
.bits32
, port
->gmac_base
+ GMAC_RX_FLTR
);
1890 static void gmac_write_mac_address(struct net_device
*netdev
)
1892 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1895 memset(addr
, 0, sizeof(addr
));
1896 memcpy(addr
, netdev
->dev_addr
, ETH_ALEN
);
1898 writel(le32_to_cpu(addr
[0]), port
->gmac_base
+ GMAC_STA_ADD0
);
1899 writel(le32_to_cpu(addr
[1]), port
->gmac_base
+ GMAC_STA_ADD1
);
1900 writel(le32_to_cpu(addr
[2]), port
->gmac_base
+ GMAC_STA_ADD2
);
1903 static int gmac_set_mac_address(struct net_device
*netdev
, void *addr
)
1905 struct sockaddr
*sa
= addr
;
1907 memcpy(netdev
->dev_addr
, sa
->sa_data
, ETH_ALEN
);
1908 gmac_write_mac_address(netdev
);
1913 static void gmac_clear_hw_stats(struct net_device
*netdev
)
1915 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1917 readl(port
->gmac_base
+ GMAC_IN_DISCARDS
);
1918 readl(port
->gmac_base
+ GMAC_IN_ERRORS
);
1919 readl(port
->gmac_base
+ GMAC_IN_MCAST
);
1920 readl(port
->gmac_base
+ GMAC_IN_BCAST
);
1921 readl(port
->gmac_base
+ GMAC_IN_MAC1
);
1922 readl(port
->gmac_base
+ GMAC_IN_MAC2
);
1925 static void gmac_get_stats64(struct net_device
*netdev
,
1926 struct rtnl_link_stats64
*stats
)
1928 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1931 gmac_update_hw_stats(netdev
);
1933 /* Racing with RX NAPI */
1935 start
= u64_stats_fetch_begin(&port
->rx_stats_syncp
);
1937 stats
->rx_packets
= port
->stats
.rx_packets
;
1938 stats
->rx_bytes
= port
->stats
.rx_bytes
;
1939 stats
->rx_errors
= port
->stats
.rx_errors
;
1940 stats
->rx_dropped
= port
->stats
.rx_dropped
;
1942 stats
->rx_length_errors
= port
->stats
.rx_length_errors
;
1943 stats
->rx_over_errors
= port
->stats
.rx_over_errors
;
1944 stats
->rx_crc_errors
= port
->stats
.rx_crc_errors
;
1945 stats
->rx_frame_errors
= port
->stats
.rx_frame_errors
;
1947 } while (u64_stats_fetch_retry(&port
->rx_stats_syncp
, start
));
1949 /* Racing with MIB and TX completion interrupts */
1951 start
= u64_stats_fetch_begin(&port
->ir_stats_syncp
);
1953 stats
->tx_errors
= port
->stats
.tx_errors
;
1954 stats
->tx_packets
= port
->stats
.tx_packets
;
1955 stats
->tx_bytes
= port
->stats
.tx_bytes
;
1957 stats
->multicast
= port
->stats
.multicast
;
1958 stats
->rx_missed_errors
= port
->stats
.rx_missed_errors
;
1959 stats
->rx_fifo_errors
= port
->stats
.rx_fifo_errors
;
1961 } while (u64_stats_fetch_retry(&port
->ir_stats_syncp
, start
));
1963 /* Racing with hard_start_xmit */
1965 start
= u64_stats_fetch_begin(&port
->tx_stats_syncp
);
1967 stats
->tx_dropped
= port
->stats
.tx_dropped
;
1969 } while (u64_stats_fetch_retry(&port
->tx_stats_syncp
, start
));
1971 stats
->rx_dropped
+= stats
->rx_missed_errors
;
1974 static int gmac_change_mtu(struct net_device
*netdev
, int new_mtu
)
1976 int max_len
= gmac_pick_rx_max_len(new_mtu
);
1981 gmac_disable_tx_rx(netdev
);
1983 netdev
->mtu
= new_mtu
;
1984 gmac_update_config0_reg(netdev
, max_len
<< CONFIG0_MAXLEN_SHIFT
,
1985 CONFIG0_MAXLEN_MASK
);
1987 netdev_update_features(netdev
);
1989 gmac_enable_tx_rx(netdev
);
1994 static netdev_features_t
gmac_fix_features(struct net_device
*netdev
,
1995 netdev_features_t features
)
1997 if (netdev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
> MTU_SIZE_BIT_MASK
)
1998 features
&= ~GMAC_OFFLOAD_FEATURES
;
2003 static int gmac_set_features(struct net_device
*netdev
,
2004 netdev_features_t features
)
2006 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2007 int enable
= features
& NETIF_F_RXCSUM
;
2008 unsigned long flags
;
2011 spin_lock_irqsave(&port
->config_lock
, flags
);
2013 reg
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
2014 reg
= enable
? reg
| CONFIG0_RX_CHKSUM
: reg
& ~CONFIG0_RX_CHKSUM
;
2015 writel(reg
, port
->gmac_base
+ GMAC_CONFIG0
);
2017 spin_unlock_irqrestore(&port
->config_lock
, flags
);
2021 static int gmac_get_sset_count(struct net_device
*netdev
, int sset
)
2023 return sset
== ETH_SS_STATS
? GMAC_STATS_NUM
: 0;
2026 static void gmac_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
2028 if (stringset
!= ETH_SS_STATS
)
2031 memcpy(data
, gmac_stats_strings
, sizeof(gmac_stats_strings
));
2034 static void gmac_get_ethtool_stats(struct net_device
*netdev
,
2035 struct ethtool_stats
*estats
, u64
*values
)
2037 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2042 gmac_update_hw_stats(netdev
);
2044 /* Racing with MIB interrupt */
2047 start
= u64_stats_fetch_begin(&port
->ir_stats_syncp
);
2049 for (i
= 0; i
< RX_STATS_NUM
; i
++)
2050 *p
++ = port
->hw_stats
[i
];
2052 } while (u64_stats_fetch_retry(&port
->ir_stats_syncp
, start
));
2055 /* Racing with RX NAPI */
2058 start
= u64_stats_fetch_begin(&port
->rx_stats_syncp
);
2060 for (i
= 0; i
< RX_STATUS_NUM
; i
++)
2061 *p
++ = port
->rx_stats
[i
];
2062 for (i
= 0; i
< RX_CHKSUM_NUM
; i
++)
2063 *p
++ = port
->rx_csum_stats
[i
];
2064 *p
++ = port
->rx_napi_exits
;
2066 } while (u64_stats_fetch_retry(&port
->rx_stats_syncp
, start
));
2069 /* Racing with TX start_xmit */
2072 start
= u64_stats_fetch_begin(&port
->tx_stats_syncp
);
2074 for (i
= 0; i
< TX_MAX_FRAGS
; i
++) {
2075 *values
++ = port
->tx_frag_stats
[i
];
2076 port
->tx_frag_stats
[i
] = 0;
2078 *values
++ = port
->tx_frags_linearized
;
2079 *values
++ = port
->tx_hw_csummed
;
2081 } while (u64_stats_fetch_retry(&port
->tx_stats_syncp
, start
));
2084 static int gmac_get_ksettings(struct net_device
*netdev
,
2085 struct ethtool_link_ksettings
*cmd
)
2087 if (!netdev
->phydev
)
2089 phy_ethtool_ksettings_get(netdev
->phydev
, cmd
);
2094 static int gmac_set_ksettings(struct net_device
*netdev
,
2095 const struct ethtool_link_ksettings
*cmd
)
2097 if (!netdev
->phydev
)
2099 return phy_ethtool_ksettings_set(netdev
->phydev
, cmd
);
2102 static int gmac_nway_reset(struct net_device
*netdev
)
2104 if (!netdev
->phydev
)
2106 return phy_start_aneg(netdev
->phydev
);
2109 static void gmac_get_pauseparam(struct net_device
*netdev
,
2110 struct ethtool_pauseparam
*pparam
)
2112 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2113 union gmac_config0 config0
;
2115 config0
.bits32
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
2117 pparam
->rx_pause
= config0
.bits
.rx_fc_en
;
2118 pparam
->tx_pause
= config0
.bits
.tx_fc_en
;
2119 pparam
->autoneg
= true;
2122 static void gmac_get_ringparam(struct net_device
*netdev
,
2123 struct ethtool_ringparam
*rp
)
2125 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2126 union gmac_config0 config0
;
2128 config0
.bits32
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
2130 rp
->rx_max_pending
= 1 << 15;
2131 rp
->rx_mini_max_pending
= 0;
2132 rp
->rx_jumbo_max_pending
= 0;
2133 rp
->tx_max_pending
= 1 << 15;
2135 rp
->rx_pending
= 1 << port
->rxq_order
;
2136 rp
->rx_mini_pending
= 0;
2137 rp
->rx_jumbo_pending
= 0;
2138 rp
->tx_pending
= 1 << port
->txq_order
;
2141 static int gmac_set_ringparam(struct net_device
*netdev
,
2142 struct ethtool_ringparam
*rp
)
2144 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2147 if (netif_running(netdev
))
2150 if (rp
->rx_pending
) {
2151 port
->rxq_order
= min(15, ilog2(rp
->rx_pending
- 1) + 1);
2152 err
= geth_resize_freeq(port
);
2154 if (rp
->tx_pending
) {
2155 port
->txq_order
= min(15, ilog2(rp
->tx_pending
- 1) + 1);
2156 port
->irq_every_tx_packets
= 1 << (port
->txq_order
- 2);
2162 static int gmac_get_coalesce(struct net_device
*netdev
,
2163 struct ethtool_coalesce
*ecmd
)
2165 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2167 ecmd
->rx_max_coalesced_frames
= 1;
2168 ecmd
->tx_max_coalesced_frames
= port
->irq_every_tx_packets
;
2169 ecmd
->rx_coalesce_usecs
= port
->rx_coalesce_nsecs
/ 1000;
2174 static int gmac_set_coalesce(struct net_device
*netdev
,
2175 struct ethtool_coalesce
*ecmd
)
2177 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2179 if (ecmd
->tx_max_coalesced_frames
< 1)
2181 if (ecmd
->tx_max_coalesced_frames
>= 1 << port
->txq_order
)
2184 port
->irq_every_tx_packets
= ecmd
->tx_max_coalesced_frames
;
2185 port
->rx_coalesce_nsecs
= ecmd
->rx_coalesce_usecs
* 1000;
2190 static u32
gmac_get_msglevel(struct net_device
*netdev
)
2192 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2194 return port
->msg_enable
;
2197 static void gmac_set_msglevel(struct net_device
*netdev
, u32 level
)
2199 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2201 port
->msg_enable
= level
;
2204 static void gmac_get_drvinfo(struct net_device
*netdev
,
2205 struct ethtool_drvinfo
*info
)
2207 strcpy(info
->driver
, DRV_NAME
);
2208 strcpy(info
->version
, DRV_VERSION
);
2209 strcpy(info
->bus_info
, netdev
->dev_id
? "1" : "0");
2212 static const struct net_device_ops gmac_351x_ops
= {
2213 .ndo_init
= gmac_init
,
2214 .ndo_uninit
= gmac_uninit
,
2215 .ndo_open
= gmac_open
,
2216 .ndo_stop
= gmac_stop
,
2217 .ndo_start_xmit
= gmac_start_xmit
,
2218 .ndo_tx_timeout
= gmac_tx_timeout
,
2219 .ndo_set_rx_mode
= gmac_set_rx_mode
,
2220 .ndo_set_mac_address
= gmac_set_mac_address
,
2221 .ndo_get_stats64
= gmac_get_stats64
,
2222 .ndo_change_mtu
= gmac_change_mtu
,
2223 .ndo_fix_features
= gmac_fix_features
,
2224 .ndo_set_features
= gmac_set_features
,
2227 static const struct ethtool_ops gmac_351x_ethtool_ops
= {
2228 .get_sset_count
= gmac_get_sset_count
,
2229 .get_strings
= gmac_get_strings
,
2230 .get_ethtool_stats
= gmac_get_ethtool_stats
,
2231 .get_link
= ethtool_op_get_link
,
2232 .get_link_ksettings
= gmac_get_ksettings
,
2233 .set_link_ksettings
= gmac_set_ksettings
,
2234 .nway_reset
= gmac_nway_reset
,
2235 .get_pauseparam
= gmac_get_pauseparam
,
2236 .get_ringparam
= gmac_get_ringparam
,
2237 .set_ringparam
= gmac_set_ringparam
,
2238 .get_coalesce
= gmac_get_coalesce
,
2239 .set_coalesce
= gmac_set_coalesce
,
2240 .get_msglevel
= gmac_get_msglevel
,
2241 .set_msglevel
= gmac_set_msglevel
,
2242 .get_drvinfo
= gmac_get_drvinfo
,
2245 static irqreturn_t
gemini_port_irq_thread(int irq
, void *data
)
2247 unsigned long irqmask
= SWFQ_EMPTY_INT_BIT
;
2248 struct gemini_ethernet_port
*port
= data
;
2249 struct gemini_ethernet
*geth
;
2250 unsigned long flags
;
2253 /* The queue is half empty so refill it */
2254 geth_fill_freeq(geth
, true);
2256 spin_lock_irqsave(&geth
->irq_lock
, flags
);
2257 /* ACK queue interrupt */
2258 writel(irqmask
, geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
2259 /* Enable queue interrupt again */
2260 irqmask
|= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2261 writel(irqmask
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2262 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
2267 static irqreturn_t
gemini_port_irq(int irq
, void *data
)
2269 struct gemini_ethernet_port
*port
= data
;
2270 struct gemini_ethernet
*geth
;
2271 irqreturn_t ret
= IRQ_NONE
;
2275 spin_lock(&geth
->irq_lock
);
2277 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
2278 en
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2280 if (val
& en
& SWFQ_EMPTY_INT_BIT
) {
2281 /* Disable the queue empty interrupt while we work on
2282 * processing the queue. Also disable overrun interrupts
2283 * as there is not much we can do about it here.
2285 en
&= ~(SWFQ_EMPTY_INT_BIT
| GMAC0_RX_OVERRUN_INT_BIT
2286 | GMAC1_RX_OVERRUN_INT_BIT
);
2287 writel(en
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2288 ret
= IRQ_WAKE_THREAD
;
2291 spin_unlock(&geth
->irq_lock
);
2296 static void gemini_port_remove(struct gemini_ethernet_port
*port
)
2299 unregister_netdev(port
->netdev
);
2300 clk_disable_unprepare(port
->pclk
);
2301 geth_cleanup_freeq(port
->geth
);
2304 static void gemini_ethernet_init(struct gemini_ethernet
*geth
)
2306 /* Only do this once both ports are online */
2307 if (geth
->initialized
)
2309 if (geth
->port0
&& geth
->port1
)
2310 geth
->initialized
= true;
2314 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
2315 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
2316 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_2_REG
);
2317 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_3_REG
);
2318 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2320 /* Interrupt config:
2322 * GMAC0 intr bits ------> int0 ----> eth0
2323 * GMAC1 intr bits ------> int1 ----> eth1
2324 * TOE intr -------------> int1 ----> eth1
2325 * Classification Intr --> int0 ----> eth0
2326 * Default Q0 -----------> int0 ----> eth0
2327 * Default Q1 -----------> int1 ----> eth1
2328 * FreeQ intr -----------> int1 ----> eth1
2330 writel(0xCCFC0FC0, geth
->base
+ GLOBAL_INTERRUPT_SELECT_0_REG
);
2331 writel(0x00F00002, geth
->base
+ GLOBAL_INTERRUPT_SELECT_1_REG
);
2332 writel(0xFFFFFFFF, geth
->base
+ GLOBAL_INTERRUPT_SELECT_2_REG
);
2333 writel(0xFFFFFFFF, geth
->base
+ GLOBAL_INTERRUPT_SELECT_3_REG
);
2334 writel(0xFF000003, geth
->base
+ GLOBAL_INTERRUPT_SELECT_4_REG
);
2336 /* edge-triggered interrupts packed to level-triggered one... */
2337 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
);
2338 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_1_REG
);
2339 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_2_REG
);
2340 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_3_REG
);
2341 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
2344 writel(0, geth
->base
+ GLOBAL_SW_FREEQ_BASE_SIZE_REG
);
2345 writel(0, geth
->base
+ GLOBAL_HW_FREEQ_BASE_SIZE_REG
);
2346 writel(0, geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
);
2347 writel(0, geth
->base
+ GLOBAL_HWFQ_RWPTR_REG
);
2349 geth
->freeq_frag_order
= DEFAULT_RX_BUF_ORDER
;
2350 /* This makes the queue resize on probe() so that we
2351 * set up and enable the queue IRQ. FIXME: fragile.
2353 geth
->freeq_order
= 1;
2356 static void gemini_port_save_mac_addr(struct gemini_ethernet_port
*port
)
2359 cpu_to_le32(readl(port
->gmac_base
+ GMAC_STA_ADD0
));
2361 cpu_to_le32(readl(port
->gmac_base
+ GMAC_STA_ADD1
));
2363 cpu_to_le32(readl(port
->gmac_base
+ GMAC_STA_ADD2
));
2366 static int gemini_ethernet_port_probe(struct platform_device
*pdev
)
2368 char *port_names
[2] = { "ethernet0", "ethernet1" };
2369 struct gemini_ethernet_port
*port
;
2370 struct device
*dev
= &pdev
->dev
;
2371 struct gemini_ethernet
*geth
;
2372 struct net_device
*netdev
;
2373 struct resource
*gmacres
;
2374 struct resource
*dmares
;
2375 struct device
*parent
;
2380 parent
= dev
->parent
;
2381 geth
= dev_get_drvdata(parent
);
2383 if (!strcmp(dev_name(dev
), "60008000.ethernet-port"))
2385 else if (!strcmp(dev_name(dev
), "6000c000.ethernet-port"))
2390 dev_info(dev
, "probe %s ID %d\n", dev_name(dev
), id
);
2392 netdev
= alloc_etherdev_mq(sizeof(*port
), TX_QUEUE_NUM
);
2394 dev_err(dev
, "Can't allocate ethernet device #%d\n", id
);
2398 port
= netdev_priv(netdev
);
2399 SET_NETDEV_DEV(netdev
, dev
);
2400 port
->netdev
= netdev
;
2404 port
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
2407 dmares
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2409 dev_err(dev
, "no DMA resource\n");
2412 port
->dma_base
= devm_ioremap_resource(dev
, dmares
);
2413 if (IS_ERR(port
->dma_base
))
2414 return PTR_ERR(port
->dma_base
);
2416 /* GMAC config memory */
2417 gmacres
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2419 dev_err(dev
, "no GMAC resource\n");
2422 port
->gmac_base
= devm_ioremap_resource(dev
, gmacres
);
2423 if (IS_ERR(port
->gmac_base
))
2424 return PTR_ERR(port
->gmac_base
);
2427 irq
= platform_get_irq(pdev
, 0);
2429 dev_err(dev
, "no IRQ\n");
2430 return irq
? irq
: -ENODEV
;
2434 /* Clock the port */
2435 port
->pclk
= devm_clk_get(dev
, "PCLK");
2436 if (IS_ERR(port
->pclk
)) {
2437 dev_err(dev
, "no PCLK\n");
2438 return PTR_ERR(port
->pclk
);
2440 ret
= clk_prepare_enable(port
->pclk
);
2444 /* Maybe there is a nice ethernet address we should use */
2445 gemini_port_save_mac_addr(port
);
2447 /* Reset the port */
2448 port
->reset
= devm_reset_control_get_exclusive(dev
, NULL
);
2449 if (IS_ERR(port
->reset
)) {
2450 dev_err(dev
, "no reset\n");
2451 return PTR_ERR(port
->reset
);
2453 reset_control_reset(port
->reset
);
2454 usleep_range(100, 500);
2456 /* Assign pointer in the main state container */
2462 /* This will just be done once both ports are up and reset */
2463 gemini_ethernet_init(geth
);
2465 platform_set_drvdata(pdev
, port
);
2467 /* Set up and register the netdev */
2468 netdev
->dev_id
= port
->id
;
2470 netdev
->netdev_ops
= &gmac_351x_ops
;
2471 netdev
->ethtool_ops
= &gmac_351x_ethtool_ops
;
2473 spin_lock_init(&port
->config_lock
);
2474 gmac_clear_hw_stats(netdev
);
2476 netdev
->hw_features
= GMAC_OFFLOAD_FEATURES
;
2477 netdev
->features
|= GMAC_OFFLOAD_FEATURES
| NETIF_F_GRO
;
2478 /* We can handle jumbo frames up to 10236 bytes so, let's accept
2479 * payloads of 10236 bytes minus VLAN and ethernet header
2481 netdev
->min_mtu
= ETH_MIN_MTU
;
2482 netdev
->max_mtu
= 10236 - VLAN_ETH_HLEN
;
2484 port
->freeq_refill
= 0;
2485 netif_napi_add(netdev
, &port
->napi
, gmac_napi_poll
,
2486 DEFAULT_NAPI_WEIGHT
);
2488 if (is_valid_ether_addr((void *)port
->mac_addr
)) {
2489 memcpy(netdev
->dev_addr
, port
->mac_addr
, ETH_ALEN
);
2491 dev_dbg(dev
, "ethernet address 0x%08x%08x%08x invalid\n",
2492 port
->mac_addr
[0], port
->mac_addr
[1],
2494 dev_info(dev
, "using a random ethernet address\n");
2495 eth_random_addr(netdev
->dev_addr
);
2497 gmac_write_mac_address(netdev
);
2499 ret
= devm_request_threaded_irq(port
->dev
,
2502 gemini_port_irq_thread
,
2504 port_names
[port
->id
],
2509 ret
= register_netdev(netdev
);
2512 "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
2513 port
->irq
, &dmares
->start
,
2515 ret
= gmac_setup_phy(netdev
);
2518 "PHY init failed, deferring to ifup time\n");
2522 port
->netdev
= NULL
;
2523 free_netdev(netdev
);
2527 static int gemini_ethernet_port_remove(struct platform_device
*pdev
)
2529 struct gemini_ethernet_port
*port
= platform_get_drvdata(pdev
);
2531 gemini_port_remove(port
);
2535 static const struct of_device_id gemini_ethernet_port_of_match
[] = {
2537 .compatible
= "cortina,gemini-ethernet-port",
2541 MODULE_DEVICE_TABLE(of
, gemini_ethernet_port_of_match
);
2543 static struct platform_driver gemini_ethernet_port_driver
= {
2545 .name
= "gemini-ethernet-port",
2546 .of_match_table
= of_match_ptr(gemini_ethernet_port_of_match
),
2548 .probe
= gemini_ethernet_port_probe
,
2549 .remove
= gemini_ethernet_port_remove
,
2552 static int gemini_ethernet_probe(struct platform_device
*pdev
)
2554 struct device
*dev
= &pdev
->dev
;
2555 struct gemini_ethernet
*geth
;
2556 unsigned int retry
= 5;
2557 struct resource
*res
;
2560 /* Global registers */
2561 geth
= devm_kzalloc(dev
, sizeof(*geth
), GFP_KERNEL
);
2564 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2567 geth
->base
= devm_ioremap_resource(dev
, res
);
2568 if (IS_ERR(geth
->base
))
2569 return PTR_ERR(geth
->base
);
2572 /* Wait for ports to stabilize */
2575 val
= readl(geth
->base
+ GLOBAL_TOE_VERSION_REG
);
2577 } while (!val
&& --retry
);
2579 dev_err(dev
, "failed to reset ethernet\n");
2582 dev_info(dev
, "Ethernet device ID: 0x%03x, revision 0x%01x\n",
2583 (val
>> 4) & 0xFFFU
, val
& 0xFU
);
2585 spin_lock_init(&geth
->irq_lock
);
2586 spin_lock_init(&geth
->freeq_lock
);
2588 /* The children will use this */
2589 platform_set_drvdata(pdev
, geth
);
2591 /* Spawn child devices for the two ports */
2592 return devm_of_platform_populate(dev
);
2595 static int gemini_ethernet_remove(struct platform_device
*pdev
)
2597 struct gemini_ethernet
*geth
= platform_get_drvdata(pdev
);
2599 geth_cleanup_freeq(geth
);
2600 geth
->initialized
= false;
2605 static const struct of_device_id gemini_ethernet_of_match
[] = {
2607 .compatible
= "cortina,gemini-ethernet",
2611 MODULE_DEVICE_TABLE(of
, gemini_ethernet_of_match
);
2613 static struct platform_driver gemini_ethernet_driver
= {
2616 .of_match_table
= of_match_ptr(gemini_ethernet_of_match
),
2618 .probe
= gemini_ethernet_probe
,
2619 .remove
= gemini_ethernet_remove
,
2622 static int __init
gemini_ethernet_module_init(void)
2626 ret
= platform_driver_register(&gemini_ethernet_port_driver
);
2630 ret
= platform_driver_register(&gemini_ethernet_driver
);
2632 platform_driver_unregister(&gemini_ethernet_port_driver
);
2638 module_init(gemini_ethernet_module_init
);
2640 static void __exit
gemini_ethernet_module_exit(void)
2642 platform_driver_unregister(&gemini_ethernet_driver
);
2643 platform_driver_unregister(&gemini_ethernet_port_driver
);
2645 module_exit(gemini_ethernet_module_exit
);
2647 MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
2648 MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
2649 MODULE_LICENSE("GPL");
2650 MODULE_ALIAS("platform:" DRV_NAME
);