1 // SPDX-License-Identifier: GPL-2.0
2 /* Ethernet device driver for Cortina Systems Gemini SoC
3 * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus
4 * Net Engine and Gigabit Ethernet MAC (GMAC)
5 * This hardware contains a TCP Offload Engine (TOE) but currently the
6 * driver does not make use of it.
9 * Linus Walleij <linus.walleij@linaro.org>
10 * Tobias Waldvogel <tobias.waldvogel@gmail.com> (OpenWRT)
11 * Michał Mirosław <mirq-linux@rere.qmqm.pl>
12 * Paulius Zaleckas <paulius.zaleckas@gmail.com>
13 * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it>
14 * Gary Chen & Ch Hsu Storlink Semiconductor
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/cache.h>
24 #include <linux/interrupt.h>
25 #include <linux/reset.h>
26 #include <linux/clk.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/of_platform.h>
31 #include <linux/etherdevice.h>
32 #include <linux/if_vlan.h>
33 #include <linux/skbuff.h>
34 #include <linux/phy.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/tcp.h>
38 #include <linux/u64_stats_sync.h>
42 #include <linux/ipv6.h>
46 #define DRV_NAME "gmac-gemini"
48 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
49 static int debug
= -1;
50 module_param(debug
, int, 0);
51 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
57 #define HBURST_SINGLE 0x00
58 #define HBURST_INCR 0x01
59 #define HBURST_INCR4 0x02
60 #define HBURST_INCR8 0x03
62 #define HPROT_DATA_CACHE BIT(0)
63 #define HPROT_PRIVILIGED BIT(1)
64 #define HPROT_BUFFERABLE BIT(2)
65 #define HPROT_CACHABLE BIT(3)
67 #define DEFAULT_RX_COALESCE_NSECS 0
68 #define DEFAULT_GMAC_RXQ_ORDER 9
69 #define DEFAULT_GMAC_TXQ_ORDER 8
70 #define DEFAULT_RX_BUF_ORDER 11
71 #define TX_MAX_FRAGS 16
72 #define TX_QUEUE_NUM 1 /* max: 6 */
73 #define RX_MAX_ALLOC_ORDER 2
75 #define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \
76 GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT)
77 #define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \
78 GMAC0_SWTQ00_FIN_INT_BIT)
79 #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
81 #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
82 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
83 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
86 * struct gmac_queue_page - page buffer per-page info
87 * @page: the page struct
88 * @mapping: the dma address handle
90 struct gmac_queue_page
{
96 struct gmac_txdesc
*ring
;
99 unsigned int noirq_packets
;
102 struct gemini_ethernet
;
104 struct gemini_ethernet_port
{
107 struct gemini_ethernet
*geth
;
108 struct net_device
*netdev
;
110 void __iomem
*dma_base
;
111 void __iomem
*gmac_base
;
113 struct reset_control
*reset
;
117 void __iomem
*rxq_rwptr
;
118 struct gmac_rxdesc
*rxq_ring
;
119 unsigned int rxq_order
;
121 struct napi_struct napi
;
122 struct hrtimer rx_coalesce_timer
;
123 unsigned int rx_coalesce_nsecs
;
124 unsigned int freeq_refill
;
125 struct gmac_txq txq
[TX_QUEUE_NUM
];
126 unsigned int txq_order
;
127 unsigned int irq_every_tx_packets
;
129 dma_addr_t rxq_dma_base
;
130 dma_addr_t txq_dma_base
;
132 unsigned int msg_enable
;
133 spinlock_t config_lock
; /* Locks config register */
135 struct u64_stats_sync tx_stats_syncp
;
136 struct u64_stats_sync rx_stats_syncp
;
137 struct u64_stats_sync ir_stats_syncp
;
139 struct rtnl_link_stats64 stats
;
140 u64 hw_stats
[RX_STATS_NUM
];
141 u64 rx_stats
[RX_STATUS_NUM
];
142 u64 rx_csum_stats
[RX_CHKSUM_NUM
];
144 u64 tx_frag_stats
[TX_MAX_FRAGS
];
145 u64 tx_frags_linearized
;
149 struct gemini_ethernet
{
152 struct gemini_ethernet_port
*port0
;
153 struct gemini_ethernet_port
*port1
;
156 spinlock_t irq_lock
; /* Locks IRQ-related registers */
157 unsigned int freeq_order
;
158 unsigned int freeq_frag_order
;
159 struct gmac_rxdesc
*freeq_ring
;
160 dma_addr_t freeq_dma_base
;
161 struct gmac_queue_page
*freeq_pages
;
162 unsigned int num_freeq_pages
;
163 spinlock_t freeq_lock
; /* Locks queue from reentrance */
166 #define GMAC_STATS_NUM ( \
167 RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
170 static const char gmac_stats_strings
[GMAC_STATS_NUM
][ETH_GSTRING_LEN
] = {
177 "RX_STATUS_GOOD_FRAME",
178 "RX_STATUS_TOO_LONG_GOOD_CRC",
179 "RX_STATUS_RUNT_FRAME",
180 "RX_STATUS_SFD_NOT_FOUND",
181 "RX_STATUS_CRC_ERROR",
182 "RX_STATUS_TOO_LONG_BAD_CRC",
183 "RX_STATUS_ALIGNMENT_ERROR",
184 "RX_STATUS_TOO_LONG_BAD_ALIGN",
186 "RX_STATUS_DA_FILTERED",
187 "RX_STATUS_BUFFER_FULL",
193 "RX_CHKSUM_IP_UDP_TCP_OK",
194 "RX_CHKSUM_IP_OK_ONLY",
197 "RX_CHKSUM_IP_ERR_UNKNOWN",
199 "RX_CHKSUM_TCP_UDP_ERR",
218 "TX_FRAGS_LINEARIZED",
222 static void gmac_dump_dma_state(struct net_device
*netdev
);
224 static void gmac_update_config0_reg(struct net_device
*netdev
,
227 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
231 spin_lock_irqsave(&port
->config_lock
, flags
);
233 reg
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
234 reg
= (reg
& ~vmask
) | val
;
235 writel(reg
, port
->gmac_base
+ GMAC_CONFIG0
);
237 spin_unlock_irqrestore(&port
->config_lock
, flags
);
240 static void gmac_enable_tx_rx(struct net_device
*netdev
)
242 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
246 spin_lock_irqsave(&port
->config_lock
, flags
);
248 reg
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
249 reg
&= ~CONFIG0_TX_RX_DISABLE
;
250 writel(reg
, port
->gmac_base
+ GMAC_CONFIG0
);
252 spin_unlock_irqrestore(&port
->config_lock
, flags
);
255 static void gmac_disable_tx_rx(struct net_device
*netdev
)
257 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
261 spin_lock_irqsave(&port
->config_lock
, flags
);
263 val
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
264 val
|= CONFIG0_TX_RX_DISABLE
;
265 writel(val
, port
->gmac_base
+ GMAC_CONFIG0
);
267 spin_unlock_irqrestore(&port
->config_lock
, flags
);
269 mdelay(10); /* let GMAC consume packet */
272 static void gmac_set_flow_control(struct net_device
*netdev
, bool tx
, bool rx
)
274 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
278 spin_lock_irqsave(&port
->config_lock
, flags
);
280 val
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
281 val
&= ~CONFIG0_FLOW_CTL
;
283 val
|= CONFIG0_FLOW_TX
;
285 val
|= CONFIG0_FLOW_RX
;
286 writel(val
, port
->gmac_base
+ GMAC_CONFIG0
);
288 spin_unlock_irqrestore(&port
->config_lock
, flags
);
291 static void gmac_adjust_link(struct net_device
*netdev
)
293 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
294 struct phy_device
*phydev
= netdev
->phydev
;
295 union gmac_status status
, old_status
;
296 bool pause_tx
= false;
297 bool pause_rx
= false;
299 status
.bits32
= readl(port
->gmac_base
+ GMAC_STATUS
);
300 old_status
.bits32
= status
.bits32
;
301 status
.bits
.link
= phydev
->link
;
302 status
.bits
.duplex
= phydev
->duplex
;
304 switch (phydev
->speed
) {
306 status
.bits
.speed
= GMAC_SPEED_1000
;
307 if (phy_interface_mode_is_rgmii(phydev
->interface
))
308 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_1000
;
309 netdev_dbg(netdev
, "connect %s to RGMII @ 1Gbit\n",
310 phydev_name(phydev
));
313 status
.bits
.speed
= GMAC_SPEED_100
;
314 if (phy_interface_mode_is_rgmii(phydev
->interface
))
315 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_100_10
;
316 netdev_dbg(netdev
, "connect %s to RGMII @ 100 Mbit\n",
317 phydev_name(phydev
));
320 status
.bits
.speed
= GMAC_SPEED_10
;
321 if (phy_interface_mode_is_rgmii(phydev
->interface
))
322 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_100_10
;
323 netdev_dbg(netdev
, "connect %s to RGMII @ 10 Mbit\n",
324 phydev_name(phydev
));
327 netdev_warn(netdev
, "Unsupported PHY speed (%d) on %s\n",
328 phydev
->speed
, phydev_name(phydev
));
331 if (phydev
->duplex
== DUPLEX_FULL
) {
332 phy_get_pause(phydev
, &pause_tx
, &pause_rx
);
333 netdev_dbg(netdev
, "set negotiated pause params pause TX = %s, pause RX = %s\n",
334 pause_tx
? "ON" : "OFF", pause_rx
? "ON" : "OFF");
337 gmac_set_flow_control(netdev
, pause_tx
, pause_rx
);
339 if (old_status
.bits32
== status
.bits32
)
342 if (netif_msg_link(port
)) {
343 phy_print_status(phydev
);
344 netdev_info(netdev
, "link flow control: %s\n",
346 ? (phydev
->asym_pause
? "tx" : "both")
347 : (phydev
->asym_pause
? "rx" : "none")
351 gmac_disable_tx_rx(netdev
);
352 writel(status
.bits32
, port
->gmac_base
+ GMAC_STATUS
);
353 gmac_enable_tx_rx(netdev
);
356 static int gmac_setup_phy(struct net_device
*netdev
)
358 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
359 union gmac_status status
= { .bits32
= 0 };
360 struct device
*dev
= port
->dev
;
361 struct phy_device
*phy
;
363 phy
= of_phy_get_and_connect(netdev
,
368 netdev
->phydev
= phy
;
370 phy_set_max_speed(phy
, SPEED_1000
);
371 phy_support_asym_pause(phy
);
373 /* set PHY interface type */
374 switch (phy
->interface
) {
375 case PHY_INTERFACE_MODE_MII
:
377 "MII: set GMAC0 to GMII mode, GMAC1 disabled\n");
378 status
.bits
.mii_rmii
= GMAC_PHY_MII
;
380 case PHY_INTERFACE_MODE_GMII
:
382 "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n");
383 status
.bits
.mii_rmii
= GMAC_PHY_GMII
;
385 case PHY_INTERFACE_MODE_RGMII
:
386 case PHY_INTERFACE_MODE_RGMII_ID
:
387 case PHY_INTERFACE_MODE_RGMII_TXID
:
388 case PHY_INTERFACE_MODE_RGMII_RXID
:
390 "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
391 status
.bits
.mii_rmii
= GMAC_PHY_RGMII_100_10
;
394 netdev_err(netdev
, "Unsupported MII interface\n");
396 netdev
->phydev
= NULL
;
399 writel(status
.bits32
, port
->gmac_base
+ GMAC_STATUS
);
401 if (netif_msg_link(port
))
402 phy_attached_info(phy
);
407 /* The maximum frame length is not logically enumerated in the
408 * hardware, so we do a table lookup to find the applicable max
411 struct gmac_max_framelen
{
412 unsigned int max_l3_len
;
416 static const struct gmac_max_framelen gmac_maxlens
[] = {
419 .val
= CONFIG0_MAXLEN_1518
,
423 .val
= CONFIG0_MAXLEN_1522
,
427 .val
= CONFIG0_MAXLEN_1536
,
431 .val
= CONFIG0_MAXLEN_1548
,
435 .val
= CONFIG0_MAXLEN_9k
,
439 .val
= CONFIG0_MAXLEN_10k
,
443 static int gmac_pick_rx_max_len(unsigned int max_l3_len
)
445 const struct gmac_max_framelen
*maxlen
;
449 maxtot
= max_l3_len
+ ETH_HLEN
+ VLAN_HLEN
;
451 for (i
= 0; i
< ARRAY_SIZE(gmac_maxlens
); i
++) {
452 maxlen
= &gmac_maxlens
[i
];
453 if (maxtot
<= maxlen
->max_l3_len
)
460 static int gmac_init(struct net_device
*netdev
)
462 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
463 union gmac_config0 config0
= { .bits
= {
474 .port0_chk_classq
= 1,
475 .port1_chk_classq
= 1,
477 union gmac_ahb_weight ahb_weight
= { .bits
= {
482 .tq_dv_threshold
= 0,
484 union gmac_tx_wcr0 hw_weigh
= { .bits
= {
490 union gmac_tx_wcr1 sw_weigh
= { .bits
= {
498 union gmac_config1 config1
= { .bits
= {
502 union gmac_config2 config2
= { .bits
= {
506 union gmac_config3 config3
= { .bits
= {
510 union gmac_config0 tmp
;
512 config0
.bits
.max_len
= gmac_pick_rx_max_len(netdev
->mtu
);
513 tmp
.bits32
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
514 config0
.bits
.reserved
= tmp
.bits
.reserved
;
515 writel(config0
.bits32
, port
->gmac_base
+ GMAC_CONFIG0
);
516 writel(config1
.bits32
, port
->gmac_base
+ GMAC_CONFIG1
);
517 writel(config2
.bits32
, port
->gmac_base
+ GMAC_CONFIG2
);
518 writel(config3
.bits32
, port
->gmac_base
+ GMAC_CONFIG3
);
520 readl(port
->dma_base
+ GMAC_AHB_WEIGHT_REG
);
521 writel(ahb_weight
.bits32
, port
->dma_base
+ GMAC_AHB_WEIGHT_REG
);
523 writel(hw_weigh
.bits32
,
524 port
->dma_base
+ GMAC_TX_WEIGHTING_CTRL_0_REG
);
525 writel(sw_weigh
.bits32
,
526 port
->dma_base
+ GMAC_TX_WEIGHTING_CTRL_1_REG
);
528 port
->rxq_order
= DEFAULT_GMAC_RXQ_ORDER
;
529 port
->txq_order
= DEFAULT_GMAC_TXQ_ORDER
;
530 port
->rx_coalesce_nsecs
= DEFAULT_RX_COALESCE_NSECS
;
532 /* Mark every quarter of the queue a packet for interrupt
533 * in order to be able to wake up the queue if it was stopped
535 port
->irq_every_tx_packets
= 1 << (port
->txq_order
- 2);
540 static int gmac_setup_txqs(struct net_device
*netdev
)
542 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
543 unsigned int n_txq
= netdev
->num_tx_queues
;
544 struct gemini_ethernet
*geth
= port
->geth
;
545 size_t entries
= 1 << port
->txq_order
;
546 struct gmac_txq
*txq
= port
->txq
;
547 struct gmac_txdesc
*desc_ring
;
548 size_t len
= n_txq
* entries
;
549 struct sk_buff
**skb_tab
;
550 void __iomem
*rwptr_reg
;
554 rwptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE0_PTR_REG
;
556 skb_tab
= kcalloc(len
, sizeof(*skb_tab
), GFP_KERNEL
);
560 desc_ring
= dma_alloc_coherent(geth
->dev
, len
* sizeof(*desc_ring
),
561 &port
->txq_dma_base
, GFP_KERNEL
);
568 if (port
->txq_dma_base
& ~DMA_Q_BASE_MASK
) {
569 dev_warn(geth
->dev
, "TX queue base is not aligned\n");
570 dma_free_coherent(geth
->dev
, len
* sizeof(*desc_ring
),
571 desc_ring
, port
->txq_dma_base
);
576 writel(port
->txq_dma_base
| port
->txq_order
,
577 port
->dma_base
+ GMAC_SW_TX_QUEUE_BASE_REG
);
579 for (i
= 0; i
< n_txq
; i
++) {
580 txq
->ring
= desc_ring
;
582 txq
->noirq_packets
= 0;
584 r
= readw(rwptr_reg
);
586 writew(r
, rwptr_reg
);
591 desc_ring
+= entries
;
598 static void gmac_clean_txq(struct net_device
*netdev
, struct gmac_txq
*txq
,
601 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
602 unsigned int m
= (1 << port
->txq_order
) - 1;
603 struct gemini_ethernet
*geth
= port
->geth
;
604 unsigned int c
= txq
->cptr
;
605 union gmac_txdesc_0 word0
;
606 union gmac_txdesc_1 word1
;
607 unsigned int hwchksum
= 0;
608 unsigned long bytes
= 0;
609 struct gmac_txdesc
*txd
;
610 unsigned short nfrags
;
611 unsigned int errs
= 0;
612 unsigned int pkts
= 0;
623 mapping
= txd
->word2
.buf_adr
;
624 word3
= txd
->word3
.bits32
;
626 dma_unmap_single(geth
->dev
, mapping
,
627 word0
.bits
.buffer_size
, DMA_TO_DEVICE
);
630 dev_kfree_skb(txq
->skb
[c
]);
635 if (!(word3
& SOF_BIT
))
638 if (!word0
.bits
.status_tx_ok
) {
644 bytes
+= txd
->word1
.bits
.byte_count
;
646 if (word1
.bits32
& TSS_CHECKUM_ENABLE
)
649 nfrags
= word0
.bits
.desc_count
- 1;
651 if (nfrags
>= TX_MAX_FRAGS
)
652 nfrags
= TX_MAX_FRAGS
- 1;
654 u64_stats_update_begin(&port
->tx_stats_syncp
);
655 port
->tx_frag_stats
[nfrags
]++;
656 u64_stats_update_end(&port
->tx_stats_syncp
);
660 u64_stats_update_begin(&port
->ir_stats_syncp
);
661 port
->stats
.tx_errors
+= errs
;
662 port
->stats
.tx_packets
+= pkts
;
663 port
->stats
.tx_bytes
+= bytes
;
664 port
->tx_hw_csummed
+= hwchksum
;
665 u64_stats_update_end(&port
->ir_stats_syncp
);
670 static void gmac_cleanup_txqs(struct net_device
*netdev
)
672 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
673 unsigned int n_txq
= netdev
->num_tx_queues
;
674 struct gemini_ethernet
*geth
= port
->geth
;
675 void __iomem
*rwptr_reg
;
678 rwptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE0_PTR_REG
;
680 for (i
= 0; i
< n_txq
; i
++) {
681 r
= readw(rwptr_reg
);
683 writew(r
, rwptr_reg
);
686 gmac_clean_txq(netdev
, port
->txq
+ i
, r
);
688 writel(0, port
->dma_base
+ GMAC_SW_TX_QUEUE_BASE_REG
);
690 kfree(port
->txq
->skb
);
691 dma_free_coherent(geth
->dev
,
692 n_txq
* sizeof(*port
->txq
->ring
) << port
->txq_order
,
693 port
->txq
->ring
, port
->txq_dma_base
);
696 static int gmac_setup_rxq(struct net_device
*netdev
)
698 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
699 struct gemini_ethernet
*geth
= port
->geth
;
700 struct nontoe_qhdr __iomem
*qhdr
;
702 qhdr
= geth
->base
+ TOE_DEFAULT_Q_HDR_BASE(netdev
->dev_id
);
703 port
->rxq_rwptr
= &qhdr
->word1
;
705 /* Remap a slew of memory to use for the RX queue */
706 port
->rxq_ring
= dma_alloc_coherent(geth
->dev
,
707 sizeof(*port
->rxq_ring
) << port
->rxq_order
,
708 &port
->rxq_dma_base
, GFP_KERNEL
);
711 if (port
->rxq_dma_base
& ~NONTOE_QHDR0_BASE_MASK
) {
712 dev_warn(geth
->dev
, "RX queue base is not aligned\n");
716 writel(port
->rxq_dma_base
| port
->rxq_order
, &qhdr
->word0
);
717 writel(0, port
->rxq_rwptr
);
721 static struct gmac_queue_page
*
722 gmac_get_queue_page(struct gemini_ethernet
*geth
,
723 struct gemini_ethernet_port
*port
,
726 struct gmac_queue_page
*gpage
;
730 /* Only look for even pages */
731 mapping
= addr
& PAGE_MASK
;
733 if (!geth
->freeq_pages
) {
734 dev_err(geth
->dev
, "try to get page with no page list\n");
738 /* Look up a ring buffer page from virtual mapping */
739 for (i
= 0; i
< geth
->num_freeq_pages
; i
++) {
740 gpage
= &geth
->freeq_pages
[i
];
741 if (gpage
->mapping
== mapping
)
748 static void gmac_cleanup_rxq(struct net_device
*netdev
)
750 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
751 struct gemini_ethernet
*geth
= port
->geth
;
752 struct gmac_rxdesc
*rxd
= port
->rxq_ring
;
753 static struct gmac_queue_page
*gpage
;
754 struct nontoe_qhdr __iomem
*qhdr
;
755 void __iomem
*dma_reg
;
756 void __iomem
*ptr_reg
;
762 TOE_DEFAULT_Q_HDR_BASE(netdev
->dev_id
);
763 dma_reg
= &qhdr
->word0
;
764 ptr_reg
= &qhdr
->word1
;
766 rw
.bits32
= readl(ptr_reg
);
769 writew(r
, ptr_reg
+ 2);
773 /* Loop from read pointer to write pointer of the RX queue
774 * and free up all pages by the queue.
777 mapping
= rxd
[r
].word2
.buf_adr
;
779 r
&= ((1 << port
->rxq_order
) - 1);
784 /* Freeq pointers are one page off */
785 gpage
= gmac_get_queue_page(geth
, port
, mapping
+ PAGE_SIZE
);
787 dev_err(geth
->dev
, "could not find page\n");
790 /* Release the RX queue reference to the page */
791 put_page(gpage
->page
);
794 dma_free_coherent(geth
->dev
, sizeof(*port
->rxq_ring
) << port
->rxq_order
,
795 port
->rxq_ring
, port
->rxq_dma_base
);
798 static struct page
*geth_freeq_alloc_map_page(struct gemini_ethernet
*geth
,
801 struct gmac_rxdesc
*freeq_entry
;
802 struct gmac_queue_page
*gpage
;
803 unsigned int fpp_order
;
804 unsigned int frag_len
;
809 /* First allocate and DMA map a single page */
810 page
= alloc_page(GFP_ATOMIC
);
814 mapping
= dma_map_single(geth
->dev
, page_address(page
),
815 PAGE_SIZE
, DMA_FROM_DEVICE
);
816 if (dma_mapping_error(geth
->dev
, mapping
)) {
821 /* The assign the page mapping (physical address) to the buffer address
822 * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes,
823 * 4k), and the default RX frag order is 11 (fragments are up 20 2048
824 * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus
825 * each page normally needs two entries in the queue.
827 frag_len
= 1 << geth
->freeq_frag_order
; /* Usually 2048 */
828 fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
829 freeq_entry
= geth
->freeq_ring
+ (pn
<< fpp_order
);
830 dev_dbg(geth
->dev
, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n",
831 pn
, frag_len
, (1 << fpp_order
), freeq_entry
);
832 for (i
= (1 << fpp_order
); i
> 0; i
--) {
833 freeq_entry
->word2
.buf_adr
= mapping
;
838 /* If the freeq entry already has a page mapped, then unmap it. */
839 gpage
= &geth
->freeq_pages
[pn
];
841 mapping
= geth
->freeq_ring
[pn
<< fpp_order
].word2
.buf_adr
;
842 dma_unmap_single(geth
->dev
, mapping
, frag_len
, DMA_FROM_DEVICE
);
843 /* This should be the last reference to the page so it gets
846 put_page(gpage
->page
);
849 /* Then put our new mapping into the page table */
850 dev_dbg(geth
->dev
, "page %d, DMA addr: %08x, page %p\n",
851 pn
, (unsigned int)mapping
, page
);
852 gpage
->mapping
= mapping
;
859 * geth_fill_freeq() - Fill the freeq with empty fragments to use
860 * @geth: the ethernet adapter
861 * @refill: whether to reset the queue by filling in all freeq entries or
862 * just refill it, usually the interrupt to refill the queue happens when
863 * the queue is half empty.
865 static unsigned int geth_fill_freeq(struct gemini_ethernet
*geth
, bool refill
)
867 unsigned int fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
868 unsigned int count
= 0;
869 unsigned int pn
, epn
;
875 m_pn
= (1 << (geth
->freeq_order
- fpp_order
)) - 1;
877 spin_lock_irqsave(&geth
->freeq_lock
, flags
);
879 rw
.bits32
= readl(geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
);
880 pn
= (refill
? rw
.bits
.wptr
: rw
.bits
.rptr
) >> fpp_order
;
881 epn
= (rw
.bits
.rptr
>> fpp_order
) - 1;
884 /* Loop over the freeq ring buffer entries */
886 struct gmac_queue_page
*gpage
;
889 gpage
= &geth
->freeq_pages
[pn
];
892 dev_dbg(geth
->dev
, "fill entry %d page ref count %d add %d refs\n",
893 pn
, page_ref_count(page
), 1 << fpp_order
);
895 if (page_ref_count(page
) > 1) {
896 unsigned int fl
= (pn
- epn
) & m_pn
;
898 if (fl
> 64 >> fpp_order
)
901 page
= geth_freeq_alloc_map_page(geth
, pn
);
906 /* Add one reference per fragment in the page */
907 page_ref_add(page
, 1 << fpp_order
);
908 count
+= 1 << fpp_order
;
913 writew(pn
<< fpp_order
, geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
+ 2);
915 spin_unlock_irqrestore(&geth
->freeq_lock
, flags
);
920 static int geth_setup_freeq(struct gemini_ethernet
*geth
)
922 unsigned int fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
923 unsigned int frag_len
= 1 << geth
->freeq_frag_order
;
924 unsigned int len
= 1 << geth
->freeq_order
;
925 unsigned int pages
= len
>> fpp_order
;
926 union queue_threshold qt
;
927 union dma_skb_size skbsz
;
931 geth
->freeq_ring
= dma_alloc_coherent(geth
->dev
,
932 sizeof(*geth
->freeq_ring
) << geth
->freeq_order
,
933 &geth
->freeq_dma_base
, GFP_KERNEL
);
934 if (!geth
->freeq_ring
)
936 if (geth
->freeq_dma_base
& ~DMA_Q_BASE_MASK
) {
937 dev_warn(geth
->dev
, "queue ring base is not aligned\n");
941 /* Allocate a mapping to page look-up index */
942 geth
->freeq_pages
= kcalloc(pages
, sizeof(*geth
->freeq_pages
),
944 if (!geth
->freeq_pages
)
946 geth
->num_freeq_pages
= pages
;
948 dev_info(geth
->dev
, "allocate %d pages for queue\n", pages
);
949 for (pn
= 0; pn
< pages
; pn
++)
950 if (!geth_freeq_alloc_map_page(geth
, pn
))
951 goto err_freeq_alloc
;
953 filled
= geth_fill_freeq(geth
, false);
955 goto err_freeq_alloc
;
957 qt
.bits32
= readl(geth
->base
+ GLOBAL_QUEUE_THRESHOLD_REG
);
958 qt
.bits
.swfq_empty
= 32;
959 writel(qt
.bits32
, geth
->base
+ GLOBAL_QUEUE_THRESHOLD_REG
);
961 skbsz
.bits
.sw_skb_size
= 1 << geth
->freeq_frag_order
;
962 writel(skbsz
.bits32
, geth
->base
+ GLOBAL_DMA_SKB_SIZE_REG
);
963 writel(geth
->freeq_dma_base
| geth
->freeq_order
,
964 geth
->base
+ GLOBAL_SW_FREEQ_BASE_SIZE_REG
);
970 struct gmac_queue_page
*gpage
;
974 mapping
= geth
->freeq_ring
[pn
<< fpp_order
].word2
.buf_adr
;
975 dma_unmap_single(geth
->dev
, mapping
, frag_len
, DMA_FROM_DEVICE
);
976 gpage
= &geth
->freeq_pages
[pn
];
977 put_page(gpage
->page
);
980 kfree(geth
->freeq_pages
);
982 dma_free_coherent(geth
->dev
,
983 sizeof(*geth
->freeq_ring
) << geth
->freeq_order
,
984 geth
->freeq_ring
, geth
->freeq_dma_base
);
985 geth
->freeq_ring
= NULL
;
990 * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue
991 * @geth: the Gemini global ethernet state
993 static void geth_cleanup_freeq(struct gemini_ethernet
*geth
)
995 unsigned int fpp_order
= PAGE_SHIFT
- geth
->freeq_frag_order
;
996 unsigned int frag_len
= 1 << geth
->freeq_frag_order
;
997 unsigned int len
= 1 << geth
->freeq_order
;
998 unsigned int pages
= len
>> fpp_order
;
1001 writew(readw(geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
),
1002 geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
+ 2);
1003 writel(0, geth
->base
+ GLOBAL_SW_FREEQ_BASE_SIZE_REG
);
1005 for (pn
= 0; pn
< pages
; pn
++) {
1006 struct gmac_queue_page
*gpage
;
1009 mapping
= geth
->freeq_ring
[pn
<< fpp_order
].word2
.buf_adr
;
1010 dma_unmap_single(geth
->dev
, mapping
, frag_len
, DMA_FROM_DEVICE
);
1012 gpage
= &geth
->freeq_pages
[pn
];
1013 while (page_ref_count(gpage
->page
) > 0)
1014 put_page(gpage
->page
);
1017 kfree(geth
->freeq_pages
);
1019 dma_free_coherent(geth
->dev
,
1020 sizeof(*geth
->freeq_ring
) << geth
->freeq_order
,
1021 geth
->freeq_ring
, geth
->freeq_dma_base
);
1025 * geth_resize_freeq() - resize the software queue depth
1026 * @port: the port requesting the change
1028 * This gets called at least once during probe() so the device queue gets
1029 * "resized" from the hardware defaults. Since both ports/net devices share
1030 * the same hardware queue, some synchronization between the ports is
1033 static int geth_resize_freeq(struct gemini_ethernet_port
*port
)
1035 struct gemini_ethernet
*geth
= port
->geth
;
1036 struct net_device
*netdev
= port
->netdev
;
1037 struct gemini_ethernet_port
*other_port
;
1038 struct net_device
*other_netdev
;
1039 unsigned int new_size
= 0;
1040 unsigned int new_order
;
1041 unsigned long flags
;
1045 if (netdev
->dev_id
== 0)
1046 other_netdev
= geth
->port1
->netdev
;
1048 other_netdev
= geth
->port0
->netdev
;
1050 if (other_netdev
&& netif_running(other_netdev
))
1053 new_size
= 1 << (port
->rxq_order
+ 1);
1054 netdev_dbg(netdev
, "port %d size: %d order %d\n",
1059 other_port
= netdev_priv(other_netdev
);
1060 new_size
+= 1 << (other_port
->rxq_order
+ 1);
1061 netdev_dbg(other_netdev
, "port %d size: %d order %d\n",
1062 other_netdev
->dev_id
,
1063 (1 << (other_port
->rxq_order
+ 1)),
1064 other_port
->rxq_order
);
1067 new_order
= min(15, ilog2(new_size
- 1) + 1);
1068 dev_dbg(geth
->dev
, "set shared queue to size %d order %d\n",
1069 new_size
, new_order
);
1070 if (geth
->freeq_order
== new_order
)
1073 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1075 /* Disable the software queue IRQs */
1076 en
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1077 en
&= ~SWFQ_EMPTY_INT_BIT
;
1078 writel(en
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1079 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1081 /* Drop the old queue */
1082 if (geth
->freeq_ring
)
1083 geth_cleanup_freeq(geth
);
1085 /* Allocate a new queue with the desired order */
1086 geth
->freeq_order
= new_order
;
1087 ret
= geth_setup_freeq(geth
);
1089 /* Restart the interrupts - NOTE if this is the first resize
1090 * after probe(), this is where the interrupts get turned on
1091 * in the first place.
1093 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1094 en
|= SWFQ_EMPTY_INT_BIT
;
1095 writel(en
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1096 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1101 static void gmac_tx_irq_enable(struct net_device
*netdev
,
1102 unsigned int txq
, int en
)
1104 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1105 struct gemini_ethernet
*geth
= port
->geth
;
1106 unsigned long flags
;
1109 netdev_dbg(netdev
, "%s device %d\n", __func__
, netdev
->dev_id
);
1111 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1113 mask
= GMAC0_IRQ0_TXQ0_INTS
<< (6 * netdev
->dev_id
+ txq
);
1116 writel(mask
, geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
);
1118 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1119 val
= en
? val
| mask
: val
& ~mask
;
1120 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1122 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1125 static void gmac_tx_irq(struct net_device
*netdev
, unsigned int txq_num
)
1127 struct netdev_queue
*ntxq
= netdev_get_tx_queue(netdev
, txq_num
);
1129 gmac_tx_irq_enable(netdev
, txq_num
, 0);
1130 netif_tx_wake_queue(ntxq
);
1133 static int gmac_map_tx_bufs(struct net_device
*netdev
, struct sk_buff
*skb
,
1134 struct gmac_txq
*txq
, unsigned short *desc
)
1136 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1137 struct skb_shared_info
*skb_si
= skb_shinfo(skb
);
1138 unsigned short m
= (1 << port
->txq_order
) - 1;
1139 short frag
, last_frag
= skb_si
->nr_frags
- 1;
1140 struct gemini_ethernet
*geth
= port
->geth
;
1141 unsigned int word1
, word3
, buflen
;
1142 unsigned short w
= *desc
;
1143 struct gmac_txdesc
*txd
;
1144 skb_frag_t
*skb_frag
;
1153 mss
= skb_shinfo(skb
)->gso_size
;
1155 /* This means we are dealing with TCP and skb->len is the
1156 * sum total of all the segments. The TSO will deal with
1157 * chopping this up for us.
1159 /* The accelerator needs the full frame size here */
1160 mss
+= skb_tcp_all_headers(skb
);
1161 netdev_dbg(netdev
, "segment offloading mss = %04x len=%04x\n",
1163 word1
|= TSS_MTU_ENABLE_BIT
;
1165 } else if (skb
->len
>= ETH_FRAME_LEN
) {
1166 /* Hardware offloaded checksumming isn't working on frames
1167 * bigger than 1514 bytes. A hypothesis about this is that the
1168 * checksum buffer is only 1518 bytes, so when the frames get
1169 * bigger they get truncated, or the last few bytes get
1170 * overwritten by the FCS.
1172 * Just use software checksumming and bypass on bigger frames.
1174 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1175 ret
= skb_checksum_help(skb
);
1179 word1
|= TSS_BYPASS_BIT
;
1182 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1185 /* We do not switch off the checksumming on non TCP/UDP
1186 * frames: as is shown from tests, the checksumming engine
1187 * is smart enough to see that a frame is not actually TCP
1188 * or UDP and then just pass it through without any changes
1191 if (skb
->protocol
== htons(ETH_P_IP
)) {
1192 word1
|= TSS_IP_CHKSUM_BIT
;
1193 tcp
= ip_hdr(skb
)->protocol
== IPPROTO_TCP
;
1195 word1
|= TSS_IPV6_ENABLE_BIT
;
1196 tcp
= ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
;
1199 word1
|= tcp
? TSS_TCP_CHKSUM_BIT
: TSS_UDP_CHKSUM_BIT
;
1203 while (frag
<= last_frag
) {
1206 buflen
= skb_headlen(skb
);
1208 skb_frag
= skb_si
->frags
+ frag
;
1209 buffer
= skb_frag_address(skb_frag
);
1210 buflen
= skb_frag_size(skb_frag
);
1213 if (frag
== last_frag
) {
1218 mapping
= dma_map_single(geth
->dev
, buffer
, buflen
,
1220 if (dma_mapping_error(geth
->dev
, mapping
))
1223 txd
= txq
->ring
+ w
;
1224 txd
->word0
.bits32
= buflen
;
1225 txd
->word1
.bits32
= word1
;
1226 txd
->word2
.buf_adr
= mapping
;
1227 txd
->word3
.bits32
= word3
;
1229 word3
&= MTU_SIZE_BIT_MASK
;
1239 while (w
!= *desc
) {
1243 dma_unmap_page(geth
->dev
, txq
->ring
[w
].word2
.buf_adr
,
1244 txq
->ring
[w
].word0
.bits
.buffer_size
,
1250 static netdev_tx_t
gmac_start_xmit(struct sk_buff
*skb
,
1251 struct net_device
*netdev
)
1253 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1254 unsigned short m
= (1 << port
->txq_order
) - 1;
1255 struct netdev_queue
*ntxq
;
1256 unsigned short r
, w
, d
;
1257 void __iomem
*ptr_reg
;
1258 struct gmac_txq
*txq
;
1259 int txq_num
, nfrags
;
1262 if (skb
->len
>= 0x10000)
1265 txq_num
= skb_get_queue_mapping(skb
);
1266 ptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE_PTR_REG(txq_num
);
1267 txq
= &port
->txq
[txq_num
];
1268 ntxq
= netdev_get_tx_queue(netdev
, txq_num
);
1269 nfrags
= skb_shinfo(skb
)->nr_frags
;
1271 rw
.bits32
= readl(ptr_reg
);
1275 d
= txq
->cptr
- w
- 1;
1278 if (d
< nfrags
+ 2) {
1279 gmac_clean_txq(netdev
, txq
, r
);
1280 d
= txq
->cptr
- w
- 1;
1283 if (d
< nfrags
+ 2) {
1284 netif_tx_stop_queue(ntxq
);
1286 d
= txq
->cptr
+ nfrags
+ 16;
1288 txq
->ring
[d
].word3
.bits
.eofie
= 1;
1289 gmac_tx_irq_enable(netdev
, txq_num
, 1);
1291 u64_stats_update_begin(&port
->tx_stats_syncp
);
1292 netdev
->stats
.tx_fifo_errors
++;
1293 u64_stats_update_end(&port
->tx_stats_syncp
);
1294 return NETDEV_TX_BUSY
;
1298 if (gmac_map_tx_bufs(netdev
, skb
, txq
, &w
)) {
1299 if (skb_linearize(skb
))
1302 u64_stats_update_begin(&port
->tx_stats_syncp
);
1303 port
->tx_frags_linearized
++;
1304 u64_stats_update_end(&port
->tx_stats_syncp
);
1306 if (gmac_map_tx_bufs(netdev
, skb
, txq
, &w
))
1310 writew(w
, ptr_reg
+ 2);
1312 gmac_clean_txq(netdev
, txq
, r
);
1313 return NETDEV_TX_OK
;
1318 u64_stats_update_begin(&port
->tx_stats_syncp
);
1319 port
->stats
.tx_dropped
++;
1320 u64_stats_update_end(&port
->tx_stats_syncp
);
1321 return NETDEV_TX_OK
;
1324 static void gmac_tx_timeout(struct net_device
*netdev
, unsigned int txqueue
)
1326 netdev_err(netdev
, "Tx timeout\n");
1327 gmac_dump_dma_state(netdev
);
1330 static void gmac_enable_irq(struct net_device
*netdev
, int enable
)
1332 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1333 struct gemini_ethernet
*geth
= port
->geth
;
1334 unsigned long flags
;
1337 netdev_dbg(netdev
, "%s device %d %s\n", __func__
,
1338 netdev
->dev_id
, enable
? "enable" : "disable");
1339 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1341 mask
= GMAC0_IRQ0_2
<< (netdev
->dev_id
* 2);
1342 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1343 val
= enable
? (val
| mask
) : (val
& ~mask
);
1344 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1346 mask
= DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
;
1347 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1348 val
= enable
? (val
| mask
) : (val
& ~mask
);
1349 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1351 mask
= GMAC0_IRQ4_8
<< (netdev
->dev_id
* 8);
1352 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1353 val
= enable
? (val
| mask
) : (val
& ~mask
);
1354 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1356 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1359 static void gmac_enable_rx_irq(struct net_device
*netdev
, int enable
)
1361 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1362 struct gemini_ethernet
*geth
= port
->geth
;
1363 unsigned long flags
;
1366 netdev_dbg(netdev
, "%s device %d %s\n", __func__
, netdev
->dev_id
,
1367 enable
? "enable" : "disable");
1368 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1369 mask
= DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
;
1371 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1372 val
= enable
? (val
| mask
) : (val
& ~mask
);
1373 writel(val
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1375 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1378 static struct sk_buff
*gmac_skb_if_good_frame(struct gemini_ethernet_port
*port
,
1379 union gmac_rxdesc_0 word0
,
1380 unsigned int frame_len
)
1382 unsigned int rx_csum
= word0
.bits
.chksum_status
;
1383 unsigned int rx_status
= word0
.bits
.status
;
1384 struct sk_buff
*skb
= NULL
;
1386 port
->rx_stats
[rx_status
]++;
1387 port
->rx_csum_stats
[rx_csum
]++;
1389 if (word0
.bits
.derr
|| word0
.bits
.perr
||
1390 rx_status
|| frame_len
< ETH_ZLEN
||
1391 rx_csum
>= RX_CHKSUM_IP_ERR_UNKNOWN
) {
1392 port
->stats
.rx_errors
++;
1394 if (frame_len
< ETH_ZLEN
|| RX_ERROR_LENGTH(rx_status
))
1395 port
->stats
.rx_length_errors
++;
1396 if (RX_ERROR_OVER(rx_status
))
1397 port
->stats
.rx_over_errors
++;
1398 if (RX_ERROR_CRC(rx_status
))
1399 port
->stats
.rx_crc_errors
++;
1400 if (RX_ERROR_FRAME(rx_status
))
1401 port
->stats
.rx_frame_errors
++;
1405 skb
= napi_get_frags(&port
->napi
);
1409 if (rx_csum
== RX_CHKSUM_IP_UDP_TCP_OK
)
1410 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1413 port
->stats
.rx_bytes
+= frame_len
;
1414 port
->stats
.rx_packets
++;
1418 static unsigned int gmac_rx(struct net_device
*netdev
, unsigned int budget
)
1420 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1421 unsigned short m
= (1 << port
->rxq_order
) - 1;
1422 struct gemini_ethernet
*geth
= port
->geth
;
1423 void __iomem
*ptr_reg
= port
->rxq_rwptr
;
1424 unsigned int frame_len
, frag_len
;
1425 struct gmac_rxdesc
*rx
= NULL
;
1426 struct gmac_queue_page
*gpage
;
1427 static struct sk_buff
*skb
;
1428 union gmac_rxdesc_0 word0
;
1429 union gmac_rxdesc_1 word1
;
1430 union gmac_rxdesc_3 word3
;
1431 struct page
*page
= NULL
;
1432 unsigned int page_offs
;
1433 unsigned long flags
;
1434 unsigned short r
, w
;
1439 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1440 rw
.bits32
= readl(ptr_reg
);
1441 /* Reset interrupt as all packages until here are taken into account */
1442 writel(DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
,
1443 geth
->base
+ GLOBAL_INTERRUPT_STATUS_1_REG
);
1444 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1449 while (budget
&& w
!= r
) {
1450 rx
= port
->rxq_ring
+ r
;
1453 mapping
= rx
->word2
.buf_adr
;
1459 frag_len
= word0
.bits
.buffer_size
;
1460 frame_len
= word1
.bits
.byte_count
;
1461 page_offs
= mapping
& ~PAGE_MASK
;
1465 "rxq[%u]: HW BUG: zero DMA desc\n", r
);
1469 /* Freeq pointers are one page off */
1470 gpage
= gmac_get_queue_page(geth
, port
, mapping
+ PAGE_SIZE
);
1472 dev_err(geth
->dev
, "could not find mapping\n");
1477 if (word3
.bits32
& SOF_BIT
) {
1479 napi_free_frags(&port
->napi
);
1480 port
->stats
.rx_dropped
++;
1483 skb
= gmac_skb_if_good_frame(port
, word0
, frame_len
);
1487 page_offs
+= NET_IP_ALIGN
;
1488 frag_len
-= NET_IP_ALIGN
;
1496 if (word3
.bits32
& EOF_BIT
)
1497 frag_len
= frame_len
- skb
->len
;
1499 /* append page frag to skb */
1500 if (frag_nr
== MAX_SKB_FRAGS
)
1504 netdev_err(netdev
, "Received fragment with len = 0\n");
1506 skb_fill_page_desc(skb
, frag_nr
, page
, page_offs
, frag_len
);
1507 skb
->len
+= frag_len
;
1508 skb
->data_len
+= frag_len
;
1509 skb
->truesize
+= frag_len
;
1512 if (word3
.bits32
& EOF_BIT
) {
1513 napi_gro_frags(&port
->napi
);
1521 napi_free_frags(&port
->napi
);
1528 port
->stats
.rx_dropped
++;
1535 static int gmac_napi_poll(struct napi_struct
*napi
, int budget
)
1537 struct gemini_ethernet_port
*port
= netdev_priv(napi
->dev
);
1538 struct gemini_ethernet
*geth
= port
->geth
;
1539 unsigned int freeq_threshold
;
1540 unsigned int received
;
1542 freeq_threshold
= 1 << (geth
->freeq_order
- 1);
1543 u64_stats_update_begin(&port
->rx_stats_syncp
);
1545 received
= gmac_rx(napi
->dev
, budget
);
1546 if (received
< budget
) {
1547 napi_gro_flush(napi
, false);
1548 napi_complete_done(napi
, received
);
1549 gmac_enable_rx_irq(napi
->dev
, 1);
1550 ++port
->rx_napi_exits
;
1553 port
->freeq_refill
+= (budget
- received
);
1554 if (port
->freeq_refill
> freeq_threshold
) {
1555 port
->freeq_refill
-= freeq_threshold
;
1556 geth_fill_freeq(geth
, true);
1559 u64_stats_update_end(&port
->rx_stats_syncp
);
1563 static void gmac_dump_dma_state(struct net_device
*netdev
)
1565 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1566 struct gemini_ethernet
*geth
= port
->geth
;
1567 void __iomem
*ptr_reg
;
1570 /* Interrupt status */
1571 reg
[0] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
);
1572 reg
[1] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_1_REG
);
1573 reg
[2] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_2_REG
);
1574 reg
[3] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_3_REG
);
1575 reg
[4] = readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
1576 netdev_err(netdev
, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1577 reg
[0], reg
[1], reg
[2], reg
[3], reg
[4]);
1579 /* Interrupt enable */
1580 reg
[0] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
1581 reg
[1] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
1582 reg
[2] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_2_REG
);
1583 reg
[3] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_3_REG
);
1584 reg
[4] = readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
1585 netdev_err(netdev
, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1586 reg
[0], reg
[1], reg
[2], reg
[3], reg
[4]);
1589 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_RX_FIRST_DESC_REG
);
1590 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_RX_CURR_DESC_REG
);
1591 reg
[2] = GET_RPTR(port
->rxq_rwptr
);
1592 reg
[3] = GET_WPTR(port
->rxq_rwptr
);
1593 netdev_err(netdev
, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1594 reg
[0], reg
[1], reg
[2], reg
[3]);
1596 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD0_REG
);
1597 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD1_REG
);
1598 reg
[2] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD2_REG
);
1599 reg
[3] = readl(port
->dma_base
+ GMAC_DMA_RX_DESC_WORD3_REG
);
1600 netdev_err(netdev
, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1601 reg
[0], reg
[1], reg
[2], reg
[3]);
1604 ptr_reg
= port
->dma_base
+ GMAC_SW_TX_QUEUE0_PTR_REG
;
1606 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_TX_FIRST_DESC_REG
);
1607 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_TX_CURR_DESC_REG
);
1608 reg
[2] = GET_RPTR(ptr_reg
);
1609 reg
[3] = GET_WPTR(ptr_reg
);
1610 netdev_err(netdev
, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1611 reg
[0], reg
[1], reg
[2], reg
[3]);
1613 reg
[0] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD0_REG
);
1614 reg
[1] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD1_REG
);
1615 reg
[2] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD2_REG
);
1616 reg
[3] = readl(port
->dma_base
+ GMAC_DMA_TX_DESC_WORD3_REG
);
1617 netdev_err(netdev
, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1618 reg
[0], reg
[1], reg
[2], reg
[3]);
1620 /* FREE queues status */
1621 ptr_reg
= geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
;
1623 reg
[0] = GET_RPTR(ptr_reg
);
1624 reg
[1] = GET_WPTR(ptr_reg
);
1626 ptr_reg
= geth
->base
+ GLOBAL_HWFQ_RWPTR_REG
;
1628 reg
[2] = GET_RPTR(ptr_reg
);
1629 reg
[3] = GET_WPTR(ptr_reg
);
1630 netdev_err(netdev
, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
1631 reg
[0], reg
[1], reg
[2], reg
[3]);
1634 static void gmac_update_hw_stats(struct net_device
*netdev
)
1636 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1637 unsigned int rx_discards
, rx_mcast
, rx_bcast
;
1638 struct gemini_ethernet
*geth
= port
->geth
;
1639 unsigned long flags
;
1641 spin_lock_irqsave(&geth
->irq_lock
, flags
);
1642 u64_stats_update_begin(&port
->ir_stats_syncp
);
1644 rx_discards
= readl(port
->gmac_base
+ GMAC_IN_DISCARDS
);
1645 port
->hw_stats
[0] += rx_discards
;
1646 port
->hw_stats
[1] += readl(port
->gmac_base
+ GMAC_IN_ERRORS
);
1647 rx_mcast
= readl(port
->gmac_base
+ GMAC_IN_MCAST
);
1648 port
->hw_stats
[2] += rx_mcast
;
1649 rx_bcast
= readl(port
->gmac_base
+ GMAC_IN_BCAST
);
1650 port
->hw_stats
[3] += rx_bcast
;
1651 port
->hw_stats
[4] += readl(port
->gmac_base
+ GMAC_IN_MAC1
);
1652 port
->hw_stats
[5] += readl(port
->gmac_base
+ GMAC_IN_MAC2
);
1654 port
->stats
.rx_missed_errors
+= rx_discards
;
1655 port
->stats
.multicast
+= rx_mcast
;
1656 port
->stats
.multicast
+= rx_bcast
;
1658 writel(GMAC0_MIB_INT_BIT
<< (netdev
->dev_id
* 8),
1659 geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
1661 u64_stats_update_end(&port
->ir_stats_syncp
);
1662 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
1666 * gmac_get_intr_flags() - get interrupt status flags for a port from
1667 * @netdev: the net device for the port to get flags from
1668 * @i: the interrupt status register 0..4
1670 static u32
gmac_get_intr_flags(struct net_device
*netdev
, int i
)
1672 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1673 struct gemini_ethernet
*geth
= port
->geth
;
1674 void __iomem
*irqif_reg
, *irqen_reg
;
1675 unsigned int offs
, val
;
1677 /* Calculate the offset using the stride of the status registers */
1678 offs
= i
* (GLOBAL_INTERRUPT_STATUS_1_REG
-
1679 GLOBAL_INTERRUPT_STATUS_0_REG
);
1681 irqif_reg
= geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
+ offs
;
1682 irqen_reg
= geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
+ offs
;
1684 val
= readl(irqif_reg
) & readl(irqen_reg
);
1688 static enum hrtimer_restart
gmac_coalesce_delay_expired(struct hrtimer
*timer
)
1690 struct gemini_ethernet_port
*port
=
1691 container_of(timer
, struct gemini_ethernet_port
,
1694 napi_schedule(&port
->napi
);
1695 return HRTIMER_NORESTART
;
1698 static irqreturn_t
gmac_irq(int irq
, void *data
)
1700 struct gemini_ethernet_port
*port
;
1701 struct net_device
*netdev
= data
;
1702 struct gemini_ethernet
*geth
;
1705 port
= netdev_priv(netdev
);
1708 val
= gmac_get_intr_flags(netdev
, 0);
1711 if (val
& (GMAC0_IRQ0_2
<< (netdev
->dev_id
* 2))) {
1713 netdev_err(netdev
, "hw failure/sw bug\n");
1714 gmac_dump_dma_state(netdev
);
1716 /* don't know how to recover, just reduce losses */
1717 gmac_enable_irq(netdev
, 0);
1721 if (val
& (GMAC0_IRQ0_TXQ0_INTS
<< (netdev
->dev_id
* 6)))
1722 gmac_tx_irq(netdev
, 0);
1724 val
= gmac_get_intr_flags(netdev
, 1);
1727 if (val
& (DEFAULT_Q0_INT_BIT
<< netdev
->dev_id
)) {
1728 gmac_enable_rx_irq(netdev
, 0);
1730 if (!port
->rx_coalesce_nsecs
) {
1731 napi_schedule(&port
->napi
);
1735 ktime
= ktime_set(0, port
->rx_coalesce_nsecs
);
1736 hrtimer_start(&port
->rx_coalesce_timer
, ktime
,
1741 val
= gmac_get_intr_flags(netdev
, 4);
1744 if (val
& (GMAC0_MIB_INT_BIT
<< (netdev
->dev_id
* 8)))
1745 gmac_update_hw_stats(netdev
);
1747 if (val
& (GMAC0_RX_OVERRUN_INT_BIT
<< (netdev
->dev_id
* 8))) {
1748 spin_lock(&geth
->irq_lock
);
1749 writel(GMAC0_RXDERR_INT_BIT
<< (netdev
->dev_id
* 8),
1750 geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
1751 u64_stats_update_begin(&port
->ir_stats_syncp
);
1752 ++port
->stats
.rx_fifo_errors
;
1753 u64_stats_update_end(&port
->ir_stats_syncp
);
1754 spin_unlock(&geth
->irq_lock
);
1757 return orr
? IRQ_HANDLED
: IRQ_NONE
;
1760 static void gmac_start_dma(struct gemini_ethernet_port
*port
)
1762 void __iomem
*dma_ctrl_reg
= port
->dma_base
+ GMAC_DMA_CTRL_REG
;
1763 union gmac_dma_ctrl dma_ctrl
;
1765 dma_ctrl
.bits32
= readl(dma_ctrl_reg
);
1766 dma_ctrl
.bits
.rd_enable
= 1;
1767 dma_ctrl
.bits
.td_enable
= 1;
1768 dma_ctrl
.bits
.loopback
= 0;
1769 dma_ctrl
.bits
.drop_small_ack
= 0;
1770 dma_ctrl
.bits
.rd_insert_bytes
= NET_IP_ALIGN
;
1771 dma_ctrl
.bits
.rd_prot
= HPROT_DATA_CACHE
| HPROT_PRIVILIGED
;
1772 dma_ctrl
.bits
.rd_burst_size
= HBURST_INCR8
;
1773 dma_ctrl
.bits
.rd_bus
= HSIZE_8
;
1774 dma_ctrl
.bits
.td_prot
= HPROT_DATA_CACHE
;
1775 dma_ctrl
.bits
.td_burst_size
= HBURST_INCR8
;
1776 dma_ctrl
.bits
.td_bus
= HSIZE_8
;
1778 writel(dma_ctrl
.bits32
, dma_ctrl_reg
);
1781 static void gmac_stop_dma(struct gemini_ethernet_port
*port
)
1783 void __iomem
*dma_ctrl_reg
= port
->dma_base
+ GMAC_DMA_CTRL_REG
;
1784 union gmac_dma_ctrl dma_ctrl
;
1786 dma_ctrl
.bits32
= readl(dma_ctrl_reg
);
1787 dma_ctrl
.bits
.rd_enable
= 0;
1788 dma_ctrl
.bits
.td_enable
= 0;
1789 writel(dma_ctrl
.bits32
, dma_ctrl_reg
);
1792 static int gmac_open(struct net_device
*netdev
)
1794 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1797 err
= request_irq(netdev
->irq
, gmac_irq
,
1798 IRQF_SHARED
, netdev
->name
, netdev
);
1800 netdev_err(netdev
, "no IRQ\n");
1804 netif_carrier_off(netdev
);
1805 phy_start(netdev
->phydev
);
1807 err
= geth_resize_freeq(port
);
1808 /* It's fine if it's just busy, the other port has set up
1809 * the freeq in that case.
1811 if (err
&& (err
!= -EBUSY
)) {
1812 netdev_err(netdev
, "could not resize freeq\n");
1816 err
= gmac_setup_rxq(netdev
);
1818 netdev_err(netdev
, "could not setup RXQ\n");
1822 err
= gmac_setup_txqs(netdev
);
1824 netdev_err(netdev
, "could not setup TXQs\n");
1825 gmac_cleanup_rxq(netdev
);
1829 napi_enable(&port
->napi
);
1831 gmac_start_dma(port
);
1832 gmac_enable_irq(netdev
, 1);
1833 gmac_enable_tx_rx(netdev
);
1834 netif_tx_start_all_queues(netdev
);
1836 hrtimer_init(&port
->rx_coalesce_timer
, CLOCK_MONOTONIC
,
1838 port
->rx_coalesce_timer
.function
= &gmac_coalesce_delay_expired
;
1840 netdev_dbg(netdev
, "opened\n");
1845 phy_stop(netdev
->phydev
);
1846 free_irq(netdev
->irq
, netdev
);
1850 static int gmac_stop(struct net_device
*netdev
)
1852 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1854 hrtimer_cancel(&port
->rx_coalesce_timer
);
1855 netif_tx_stop_all_queues(netdev
);
1856 gmac_disable_tx_rx(netdev
);
1857 gmac_stop_dma(port
);
1858 napi_disable(&port
->napi
);
1860 gmac_enable_irq(netdev
, 0);
1861 gmac_cleanup_rxq(netdev
);
1862 gmac_cleanup_txqs(netdev
);
1864 phy_stop(netdev
->phydev
);
1865 free_irq(netdev
->irq
, netdev
);
1867 gmac_update_hw_stats(netdev
);
1871 static void gmac_set_rx_mode(struct net_device
*netdev
)
1873 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1874 union gmac_rx_fltr filter
= { .bits
= {
1879 struct netdev_hw_addr
*ha
;
1880 unsigned int bit_nr
;
1886 if (netdev
->flags
& IFF_PROMISC
) {
1887 filter
.bits
.error
= 1;
1888 filter
.bits
.promiscuous
= 1;
1891 } else if (netdev
->flags
& IFF_ALLMULTI
) {
1895 netdev_for_each_mc_addr(ha
, netdev
) {
1896 bit_nr
= ~crc32_le(~0, ha
->addr
, ETH_ALEN
) & 0x3f;
1897 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 0x1f);
1901 writel(mc_filter
[0], port
->gmac_base
+ GMAC_MCAST_FIL0
);
1902 writel(mc_filter
[1], port
->gmac_base
+ GMAC_MCAST_FIL1
);
1903 writel(filter
.bits32
, port
->gmac_base
+ GMAC_RX_FLTR
);
1906 static void gmac_write_mac_address(struct net_device
*netdev
)
1908 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1911 memset(addr
, 0, sizeof(addr
));
1912 memcpy(addr
, netdev
->dev_addr
, ETH_ALEN
);
1914 writel(le32_to_cpu(addr
[0]), port
->gmac_base
+ GMAC_STA_ADD0
);
1915 writel(le32_to_cpu(addr
[1]), port
->gmac_base
+ GMAC_STA_ADD1
);
1916 writel(le32_to_cpu(addr
[2]), port
->gmac_base
+ GMAC_STA_ADD2
);
1919 static int gmac_set_mac_address(struct net_device
*netdev
, void *addr
)
1921 struct sockaddr
*sa
= addr
;
1923 eth_hw_addr_set(netdev
, sa
->sa_data
);
1924 gmac_write_mac_address(netdev
);
1929 static void gmac_clear_hw_stats(struct net_device
*netdev
)
1931 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1933 readl(port
->gmac_base
+ GMAC_IN_DISCARDS
);
1934 readl(port
->gmac_base
+ GMAC_IN_ERRORS
);
1935 readl(port
->gmac_base
+ GMAC_IN_MCAST
);
1936 readl(port
->gmac_base
+ GMAC_IN_BCAST
);
1937 readl(port
->gmac_base
+ GMAC_IN_MAC1
);
1938 readl(port
->gmac_base
+ GMAC_IN_MAC2
);
1941 static void gmac_get_stats64(struct net_device
*netdev
,
1942 struct rtnl_link_stats64
*stats
)
1944 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
1947 gmac_update_hw_stats(netdev
);
1949 /* Racing with RX NAPI */
1951 start
= u64_stats_fetch_begin(&port
->rx_stats_syncp
);
1953 stats
->rx_packets
= port
->stats
.rx_packets
;
1954 stats
->rx_bytes
= port
->stats
.rx_bytes
;
1955 stats
->rx_errors
= port
->stats
.rx_errors
;
1956 stats
->rx_dropped
= port
->stats
.rx_dropped
;
1958 stats
->rx_length_errors
= port
->stats
.rx_length_errors
;
1959 stats
->rx_over_errors
= port
->stats
.rx_over_errors
;
1960 stats
->rx_crc_errors
= port
->stats
.rx_crc_errors
;
1961 stats
->rx_frame_errors
= port
->stats
.rx_frame_errors
;
1963 } while (u64_stats_fetch_retry(&port
->rx_stats_syncp
, start
));
1965 /* Racing with MIB and TX completion interrupts */
1967 start
= u64_stats_fetch_begin(&port
->ir_stats_syncp
);
1969 stats
->tx_errors
= port
->stats
.tx_errors
;
1970 stats
->tx_packets
= port
->stats
.tx_packets
;
1971 stats
->tx_bytes
= port
->stats
.tx_bytes
;
1973 stats
->multicast
= port
->stats
.multicast
;
1974 stats
->rx_missed_errors
= port
->stats
.rx_missed_errors
;
1975 stats
->rx_fifo_errors
= port
->stats
.rx_fifo_errors
;
1977 } while (u64_stats_fetch_retry(&port
->ir_stats_syncp
, start
));
1979 /* Racing with hard_start_xmit */
1981 start
= u64_stats_fetch_begin(&port
->tx_stats_syncp
);
1983 stats
->tx_dropped
= port
->stats
.tx_dropped
;
1985 } while (u64_stats_fetch_retry(&port
->tx_stats_syncp
, start
));
1987 stats
->rx_dropped
+= stats
->rx_missed_errors
;
1990 static int gmac_change_mtu(struct net_device
*netdev
, int new_mtu
)
1992 int max_len
= gmac_pick_rx_max_len(new_mtu
);
1997 gmac_disable_tx_rx(netdev
);
1999 WRITE_ONCE(netdev
->mtu
, new_mtu
);
2000 gmac_update_config0_reg(netdev
, max_len
<< CONFIG0_MAXLEN_SHIFT
,
2001 CONFIG0_MAXLEN_MASK
);
2003 netdev_update_features(netdev
);
2005 gmac_enable_tx_rx(netdev
);
2010 static int gmac_set_features(struct net_device
*netdev
,
2011 netdev_features_t features
)
2013 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2014 int enable
= features
& NETIF_F_RXCSUM
;
2015 unsigned long flags
;
2018 spin_lock_irqsave(&port
->config_lock
, flags
);
2020 reg
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
2021 reg
= enable
? reg
| CONFIG0_RX_CHKSUM
: reg
& ~CONFIG0_RX_CHKSUM
;
2022 writel(reg
, port
->gmac_base
+ GMAC_CONFIG0
);
2024 spin_unlock_irqrestore(&port
->config_lock
, flags
);
2028 static int gmac_get_sset_count(struct net_device
*netdev
, int sset
)
2030 return sset
== ETH_SS_STATS
? GMAC_STATS_NUM
: 0;
2033 static void gmac_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
2035 if (stringset
!= ETH_SS_STATS
)
2038 memcpy(data
, gmac_stats_strings
, sizeof(gmac_stats_strings
));
2041 static void gmac_get_ethtool_stats(struct net_device
*netdev
,
2042 struct ethtool_stats
*estats
, u64
*values
)
2044 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2049 gmac_update_hw_stats(netdev
);
2051 /* Racing with MIB interrupt */
2054 start
= u64_stats_fetch_begin(&port
->ir_stats_syncp
);
2056 for (i
= 0; i
< RX_STATS_NUM
; i
++)
2057 *p
++ = port
->hw_stats
[i
];
2059 } while (u64_stats_fetch_retry(&port
->ir_stats_syncp
, start
));
2062 /* Racing with RX NAPI */
2065 start
= u64_stats_fetch_begin(&port
->rx_stats_syncp
);
2067 for (i
= 0; i
< RX_STATUS_NUM
; i
++)
2068 *p
++ = port
->rx_stats
[i
];
2069 for (i
= 0; i
< RX_CHKSUM_NUM
; i
++)
2070 *p
++ = port
->rx_csum_stats
[i
];
2071 *p
++ = port
->rx_napi_exits
;
2073 } while (u64_stats_fetch_retry(&port
->rx_stats_syncp
, start
));
2076 /* Racing with TX start_xmit */
2079 start
= u64_stats_fetch_begin(&port
->tx_stats_syncp
);
2081 for (i
= 0; i
< TX_MAX_FRAGS
; i
++) {
2082 *values
++ = port
->tx_frag_stats
[i
];
2083 port
->tx_frag_stats
[i
] = 0;
2085 *values
++ = port
->tx_frags_linearized
;
2086 *values
++ = port
->tx_hw_csummed
;
2088 } while (u64_stats_fetch_retry(&port
->tx_stats_syncp
, start
));
2091 static int gmac_get_ksettings(struct net_device
*netdev
,
2092 struct ethtool_link_ksettings
*cmd
)
2094 if (!netdev
->phydev
)
2096 phy_ethtool_ksettings_get(netdev
->phydev
, cmd
);
2101 static int gmac_set_ksettings(struct net_device
*netdev
,
2102 const struct ethtool_link_ksettings
*cmd
)
2104 if (!netdev
->phydev
)
2106 return phy_ethtool_ksettings_set(netdev
->phydev
, cmd
);
2109 static int gmac_nway_reset(struct net_device
*netdev
)
2111 if (!netdev
->phydev
)
2113 return phy_start_aneg(netdev
->phydev
);
2116 static void gmac_get_pauseparam(struct net_device
*netdev
,
2117 struct ethtool_pauseparam
*pparam
)
2119 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2120 union gmac_config0 config0
;
2122 config0
.bits32
= readl(port
->gmac_base
+ GMAC_CONFIG0
);
2124 pparam
->rx_pause
= config0
.bits
.rx_fc_en
;
2125 pparam
->tx_pause
= config0
.bits
.tx_fc_en
;
2126 pparam
->autoneg
= true;
2129 static int gmac_set_pauseparam(struct net_device
*netdev
,
2130 struct ethtool_pauseparam
*pparam
)
2132 struct phy_device
*phydev
= netdev
->phydev
;
2134 if (!pparam
->autoneg
)
2137 phy_set_asym_pause(phydev
, pparam
->rx_pause
, pparam
->tx_pause
);
2142 static void gmac_get_ringparam(struct net_device
*netdev
,
2143 struct ethtool_ringparam
*rp
,
2144 struct kernel_ethtool_ringparam
*kernel_rp
,
2145 struct netlink_ext_ack
*extack
)
2147 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2149 readl(port
->gmac_base
+ GMAC_CONFIG0
);
2151 rp
->rx_max_pending
= 1 << 15;
2152 rp
->rx_mini_max_pending
= 0;
2153 rp
->rx_jumbo_max_pending
= 0;
2154 rp
->tx_max_pending
= 1 << 15;
2156 rp
->rx_pending
= 1 << port
->rxq_order
;
2157 rp
->rx_mini_pending
= 0;
2158 rp
->rx_jumbo_pending
= 0;
2159 rp
->tx_pending
= 1 << port
->txq_order
;
2162 static int gmac_set_ringparam(struct net_device
*netdev
,
2163 struct ethtool_ringparam
*rp
,
2164 struct kernel_ethtool_ringparam
*kernel_rp
,
2165 struct netlink_ext_ack
*extack
)
2167 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2170 if (netif_running(netdev
))
2173 if (rp
->rx_pending
) {
2174 port
->rxq_order
= min(15, ilog2(rp
->rx_pending
- 1) + 1);
2175 err
= geth_resize_freeq(port
);
2177 if (rp
->tx_pending
) {
2178 port
->txq_order
= min(15, ilog2(rp
->tx_pending
- 1) + 1);
2179 port
->irq_every_tx_packets
= 1 << (port
->txq_order
- 2);
2185 static int gmac_get_coalesce(struct net_device
*netdev
,
2186 struct ethtool_coalesce
*ecmd
,
2187 struct kernel_ethtool_coalesce
*kernel_coal
,
2188 struct netlink_ext_ack
*extack
)
2190 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2192 ecmd
->rx_max_coalesced_frames
= 1;
2193 ecmd
->tx_max_coalesced_frames
= port
->irq_every_tx_packets
;
2194 ecmd
->rx_coalesce_usecs
= port
->rx_coalesce_nsecs
/ 1000;
2199 static int gmac_set_coalesce(struct net_device
*netdev
,
2200 struct ethtool_coalesce
*ecmd
,
2201 struct kernel_ethtool_coalesce
*kernel_coal
,
2202 struct netlink_ext_ack
*extack
)
2204 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2206 if (ecmd
->tx_max_coalesced_frames
< 1)
2208 if (ecmd
->tx_max_coalesced_frames
>= 1 << port
->txq_order
)
2211 port
->irq_every_tx_packets
= ecmd
->tx_max_coalesced_frames
;
2212 port
->rx_coalesce_nsecs
= ecmd
->rx_coalesce_usecs
* 1000;
2217 static u32
gmac_get_msglevel(struct net_device
*netdev
)
2219 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2221 return port
->msg_enable
;
2224 static void gmac_set_msglevel(struct net_device
*netdev
, u32 level
)
2226 struct gemini_ethernet_port
*port
= netdev_priv(netdev
);
2228 port
->msg_enable
= level
;
2231 static void gmac_get_drvinfo(struct net_device
*netdev
,
2232 struct ethtool_drvinfo
*info
)
2234 strcpy(info
->driver
, DRV_NAME
);
2235 strcpy(info
->bus_info
, netdev
->dev_id
? "1" : "0");
2238 static const struct net_device_ops gmac_351x_ops
= {
2239 .ndo_init
= gmac_init
,
2240 .ndo_open
= gmac_open
,
2241 .ndo_stop
= gmac_stop
,
2242 .ndo_start_xmit
= gmac_start_xmit
,
2243 .ndo_tx_timeout
= gmac_tx_timeout
,
2244 .ndo_set_rx_mode
= gmac_set_rx_mode
,
2245 .ndo_set_mac_address
= gmac_set_mac_address
,
2246 .ndo_get_stats64
= gmac_get_stats64
,
2247 .ndo_change_mtu
= gmac_change_mtu
,
2248 .ndo_set_features
= gmac_set_features
,
2251 static const struct ethtool_ops gmac_351x_ethtool_ops
= {
2252 .supported_coalesce_params
= ETHTOOL_COALESCE_RX_USECS
|
2253 ETHTOOL_COALESCE_MAX_FRAMES
,
2254 .get_sset_count
= gmac_get_sset_count
,
2255 .get_strings
= gmac_get_strings
,
2256 .get_ethtool_stats
= gmac_get_ethtool_stats
,
2257 .get_link
= ethtool_op_get_link
,
2258 .get_link_ksettings
= gmac_get_ksettings
,
2259 .set_link_ksettings
= gmac_set_ksettings
,
2260 .nway_reset
= gmac_nway_reset
,
2261 .get_pauseparam
= gmac_get_pauseparam
,
2262 .set_pauseparam
= gmac_set_pauseparam
,
2263 .get_ringparam
= gmac_get_ringparam
,
2264 .set_ringparam
= gmac_set_ringparam
,
2265 .get_coalesce
= gmac_get_coalesce
,
2266 .set_coalesce
= gmac_set_coalesce
,
2267 .get_msglevel
= gmac_get_msglevel
,
2268 .set_msglevel
= gmac_set_msglevel
,
2269 .get_drvinfo
= gmac_get_drvinfo
,
2272 static irqreturn_t
gemini_port_irq_thread(int irq
, void *data
)
2274 unsigned long irqmask
= SWFQ_EMPTY_INT_BIT
;
2275 struct gemini_ethernet_port
*port
= data
;
2276 struct gemini_ethernet
*geth
;
2277 unsigned long flags
;
2280 /* The queue is half empty so refill it */
2281 geth_fill_freeq(geth
, true);
2283 spin_lock_irqsave(&geth
->irq_lock
, flags
);
2284 /* ACK queue interrupt */
2285 writel(irqmask
, geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
2286 /* Enable queue interrupt again */
2287 irqmask
|= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2288 writel(irqmask
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2289 spin_unlock_irqrestore(&geth
->irq_lock
, flags
);
2294 static irqreturn_t
gemini_port_irq(int irq
, void *data
)
2296 struct gemini_ethernet_port
*port
= data
;
2297 struct gemini_ethernet
*geth
;
2298 irqreturn_t ret
= IRQ_NONE
;
2302 spin_lock(&geth
->irq_lock
);
2304 val
= readl(geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
2305 en
= readl(geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2307 if (val
& en
& SWFQ_EMPTY_INT_BIT
) {
2308 /* Disable the queue empty interrupt while we work on
2309 * processing the queue. Also disable overrun interrupts
2310 * as there is not much we can do about it here.
2312 en
&= ~(SWFQ_EMPTY_INT_BIT
| GMAC0_RX_OVERRUN_INT_BIT
2313 | GMAC1_RX_OVERRUN_INT_BIT
);
2314 writel(en
, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2315 ret
= IRQ_WAKE_THREAD
;
2318 spin_unlock(&geth
->irq_lock
);
2323 static void gemini_port_remove(struct gemini_ethernet_port
*port
)
2326 phy_disconnect(port
->netdev
->phydev
);
2327 unregister_netdev(port
->netdev
);
2329 clk_disable_unprepare(port
->pclk
);
2330 geth_cleanup_freeq(port
->geth
);
2333 static void gemini_ethernet_init(struct gemini_ethernet
*geth
)
2335 /* Only do this once both ports are online */
2336 if (geth
->initialized
)
2338 if (geth
->port0
&& geth
->port1
)
2339 geth
->initialized
= true;
2343 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_0_REG
);
2344 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_1_REG
);
2345 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_2_REG
);
2346 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_3_REG
);
2347 writel(0, geth
->base
+ GLOBAL_INTERRUPT_ENABLE_4_REG
);
2349 /* Interrupt config:
2351 * GMAC0 intr bits ------> int0 ----> eth0
2352 * GMAC1 intr bits ------> int1 ----> eth1
2353 * TOE intr -------------> int1 ----> eth1
2354 * Classification Intr --> int0 ----> eth0
2355 * Default Q0 -----------> int0 ----> eth0
2356 * Default Q1 -----------> int1 ----> eth1
2357 * FreeQ intr -----------> int1 ----> eth1
2359 writel(0xCCFC0FC0, geth
->base
+ GLOBAL_INTERRUPT_SELECT_0_REG
);
2360 writel(0x00F00002, geth
->base
+ GLOBAL_INTERRUPT_SELECT_1_REG
);
2361 writel(0xFFFFFFFF, geth
->base
+ GLOBAL_INTERRUPT_SELECT_2_REG
);
2362 writel(0xFFFFFFFF, geth
->base
+ GLOBAL_INTERRUPT_SELECT_3_REG
);
2363 writel(0xFF000003, geth
->base
+ GLOBAL_INTERRUPT_SELECT_4_REG
);
2365 /* edge-triggered interrupts packed to level-triggered one... */
2366 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_0_REG
);
2367 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_1_REG
);
2368 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_2_REG
);
2369 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_3_REG
);
2370 writel(~0, geth
->base
+ GLOBAL_INTERRUPT_STATUS_4_REG
);
2373 writel(0, geth
->base
+ GLOBAL_SW_FREEQ_BASE_SIZE_REG
);
2374 writel(0, geth
->base
+ GLOBAL_HW_FREEQ_BASE_SIZE_REG
);
2375 writel(0, geth
->base
+ GLOBAL_SWFQ_RWPTR_REG
);
2376 writel(0, geth
->base
+ GLOBAL_HWFQ_RWPTR_REG
);
2378 geth
->freeq_frag_order
= DEFAULT_RX_BUF_ORDER
;
2379 /* This makes the queue resize on probe() so that we
2380 * set up and enable the queue IRQ. FIXME: fragile.
2382 geth
->freeq_order
= 1;
2385 static void gemini_port_save_mac_addr(struct gemini_ethernet_port
*port
)
2388 cpu_to_le32(readl(port
->gmac_base
+ GMAC_STA_ADD0
));
2390 cpu_to_le32(readl(port
->gmac_base
+ GMAC_STA_ADD1
));
2392 cpu_to_le32(readl(port
->gmac_base
+ GMAC_STA_ADD2
));
2395 static int gemini_ethernet_port_probe(struct platform_device
*pdev
)
2397 char *port_names
[2] = { "ethernet0", "ethernet1" };
2398 struct device_node
*np
= pdev
->dev
.of_node
;
2399 struct gemini_ethernet_port
*port
;
2400 struct device
*dev
= &pdev
->dev
;
2401 struct gemini_ethernet
*geth
;
2402 struct net_device
*netdev
;
2403 struct device
*parent
;
2409 parent
= dev
->parent
;
2410 geth
= dev_get_drvdata(parent
);
2412 if (!strcmp(dev_name(dev
), "60008000.ethernet-port"))
2414 else if (!strcmp(dev_name(dev
), "6000c000.ethernet-port"))
2419 dev_info(dev
, "probe %s ID %d\n", dev_name(dev
), id
);
2421 netdev
= devm_alloc_etherdev_mqs(dev
, sizeof(*port
), TX_QUEUE_NUM
, TX_QUEUE_NUM
);
2423 dev_err(dev
, "Can't allocate ethernet device #%d\n", id
);
2427 port
= netdev_priv(netdev
);
2428 SET_NETDEV_DEV(netdev
, dev
);
2429 port
->netdev
= netdev
;
2433 port
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
2436 port
->dma_base
= devm_platform_get_and_ioremap_resource(pdev
, 0, NULL
);
2437 if (IS_ERR(port
->dma_base
)) {
2438 dev_err(dev
, "get DMA address failed\n");
2439 return PTR_ERR(port
->dma_base
);
2442 /* GMAC config memory */
2443 port
->gmac_base
= devm_platform_get_and_ioremap_resource(pdev
, 1, NULL
);
2444 if (IS_ERR(port
->gmac_base
)) {
2445 dev_err(dev
, "get GMAC address failed\n");
2446 return PTR_ERR(port
->gmac_base
);
2450 irq
= platform_get_irq(pdev
, 0);
2455 /* Clock the port */
2456 port
->pclk
= devm_clk_get(dev
, "PCLK");
2457 if (IS_ERR(port
->pclk
)) {
2458 dev_err(dev
, "no PCLK\n");
2459 return PTR_ERR(port
->pclk
);
2461 ret
= clk_prepare_enable(port
->pclk
);
2465 /* Maybe there is a nice ethernet address we should use */
2466 gemini_port_save_mac_addr(port
);
2468 /* Reset the port */
2469 port
->reset
= devm_reset_control_get_exclusive(dev
, NULL
);
2470 if (IS_ERR(port
->reset
)) {
2471 dev_err(dev
, "no reset\n");
2472 ret
= PTR_ERR(port
->reset
);
2475 reset_control_reset(port
->reset
);
2476 usleep_range(100, 500);
2478 /* Assign pointer in the main state container */
2484 /* This will just be done once both ports are up and reset */
2485 gemini_ethernet_init(geth
);
2487 platform_set_drvdata(pdev
, port
);
2489 /* Set up and register the netdev */
2490 netdev
->dev_id
= port
->id
;
2492 netdev
->netdev_ops
= &gmac_351x_ops
;
2493 netdev
->ethtool_ops
= &gmac_351x_ethtool_ops
;
2495 spin_lock_init(&port
->config_lock
);
2496 gmac_clear_hw_stats(netdev
);
2498 netdev
->hw_features
= GMAC_OFFLOAD_FEATURES
;
2499 netdev
->features
|= GMAC_OFFLOAD_FEATURES
| NETIF_F_GRO
;
2500 /* We can receive jumbo frames up to 10236 bytes but only
2501 * transmit 2047 bytes so, let's accept payloads of 2047
2502 * bytes minus VLAN and ethernet header
2504 netdev
->min_mtu
= ETH_MIN_MTU
;
2505 netdev
->max_mtu
= MTU_SIZE_BIT_MASK
- VLAN_ETH_HLEN
;
2507 port
->freeq_refill
= 0;
2508 netif_napi_add(netdev
, &port
->napi
, gmac_napi_poll
);
2510 ret
= of_get_mac_address(np
, mac
);
2512 dev_info(dev
, "Setting macaddr from DT %pM\n", mac
);
2513 memcpy(port
->mac_addr
, mac
, ETH_ALEN
);
2516 if (is_valid_ether_addr((void *)port
->mac_addr
)) {
2517 eth_hw_addr_set(netdev
, (u8
*)port
->mac_addr
);
2519 dev_dbg(dev
, "ethernet address 0x%08x%08x%08x invalid\n",
2520 port
->mac_addr
[0], port
->mac_addr
[1],
2522 dev_info(dev
, "using a random ethernet address\n");
2523 eth_hw_addr_random(netdev
);
2525 gmac_write_mac_address(netdev
);
2527 ret
= devm_request_threaded_irq(port
->dev
,
2530 gemini_port_irq_thread
,
2532 port_names
[port
->id
],
2537 ret
= gmac_setup_phy(netdev
);
2540 "PHY init failed\n");
2544 ret
= register_netdev(netdev
);
2551 clk_disable_unprepare(port
->pclk
);
2555 static void gemini_ethernet_port_remove(struct platform_device
*pdev
)
2557 struct gemini_ethernet_port
*port
= platform_get_drvdata(pdev
);
2559 gemini_port_remove(port
);
2562 static const struct of_device_id gemini_ethernet_port_of_match
[] = {
2564 .compatible
= "cortina,gemini-ethernet-port",
2568 MODULE_DEVICE_TABLE(of
, gemini_ethernet_port_of_match
);
2570 static struct platform_driver gemini_ethernet_port_driver
= {
2572 .name
= "gemini-ethernet-port",
2573 .of_match_table
= gemini_ethernet_port_of_match
,
2575 .probe
= gemini_ethernet_port_probe
,
2576 .remove
= gemini_ethernet_port_remove
,
2579 static int gemini_ethernet_probe(struct platform_device
*pdev
)
2581 struct device
*dev
= &pdev
->dev
;
2582 struct gemini_ethernet
*geth
;
2583 unsigned int retry
= 5;
2586 /* Global registers */
2587 geth
= devm_kzalloc(dev
, sizeof(*geth
), GFP_KERNEL
);
2590 geth
->base
= devm_platform_get_and_ioremap_resource(pdev
, 0, NULL
);
2591 if (IS_ERR(geth
->base
))
2592 return PTR_ERR(geth
->base
);
2595 /* Wait for ports to stabilize */
2598 val
= readl(geth
->base
+ GLOBAL_TOE_VERSION_REG
);
2600 } while (!val
&& --retry
);
2602 dev_err(dev
, "failed to reset ethernet\n");
2605 dev_info(dev
, "Ethernet device ID: 0x%03x, revision 0x%01x\n",
2606 (val
>> 4) & 0xFFFU
, val
& 0xFU
);
2608 spin_lock_init(&geth
->irq_lock
);
2609 spin_lock_init(&geth
->freeq_lock
);
2611 /* The children will use this */
2612 platform_set_drvdata(pdev
, geth
);
2614 /* Spawn child devices for the two ports */
2615 return devm_of_platform_populate(dev
);
2618 static void gemini_ethernet_remove(struct platform_device
*pdev
)
2620 struct gemini_ethernet
*geth
= platform_get_drvdata(pdev
);
2622 geth_cleanup_freeq(geth
);
2623 geth
->initialized
= false;
2626 static const struct of_device_id gemini_ethernet_of_match
[] = {
2628 .compatible
= "cortina,gemini-ethernet",
2632 MODULE_DEVICE_TABLE(of
, gemini_ethernet_of_match
);
2634 static struct platform_driver gemini_ethernet_driver
= {
2637 .of_match_table
= gemini_ethernet_of_match
,
2639 .probe
= gemini_ethernet_probe
,
2640 .remove
= gemini_ethernet_remove
,
2643 static int __init
gemini_ethernet_module_init(void)
2647 ret
= platform_driver_register(&gemini_ethernet_port_driver
);
2651 ret
= platform_driver_register(&gemini_ethernet_driver
);
2653 platform_driver_unregister(&gemini_ethernet_port_driver
);
2659 module_init(gemini_ethernet_module_init
);
2661 static void __exit
gemini_ethernet_module_exit(void)
2663 platform_driver_unregister(&gemini_ethernet_driver
);
2664 platform_driver_unregister(&gemini_ethernet_port_driver
);
2666 module_exit(gemini_ethernet_module_exit
);
2668 MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
2669 MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
2670 MODULE_LICENSE("GPL");
2671 MODULE_ALIAS("platform:" DRV_NAME
);