1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Applied Micro X-Gene SoC Ethernet v2 Driver
5 * Copyright (c) 2017, Applied Micro Circuits Corporation
6 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
7 * Keyur Chudgar <kchudgar@apm.com>
12 static const struct acpi_device_id xge_acpi_match
[];
14 static int xge_get_resources(struct xge_pdata
*pdata
)
16 struct platform_device
*pdev
;
17 struct net_device
*ndev
;
18 int phy_mode
, ret
= 0;
26 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
28 dev_err(dev
, "Resource enet_csr not defined\n");
32 pdata
->resources
.base_addr
= devm_ioremap(dev
, res
->start
,
34 if (!pdata
->resources
.base_addr
) {
35 dev_err(dev
, "Unable to retrieve ENET Port CSR region\n");
39 if (!device_get_mac_address(dev
, ndev
->dev_addr
, ETH_ALEN
))
40 eth_hw_addr_random(ndev
);
42 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
44 phy_mode
= device_get_phy_mode(dev
);
46 dev_err(dev
, "Unable to get phy-connection-type\n");
49 pdata
->resources
.phy_mode
= phy_mode
;
51 if (pdata
->resources
.phy_mode
!= PHY_INTERFACE_MODE_RGMII
) {
52 dev_err(dev
, "Incorrect phy-connection-type specified\n");
56 ret
= platform_get_irq(pdev
, 0);
59 pdata
->resources
.irq
= ret
;
64 static int xge_refill_buffers(struct net_device
*ndev
, u32 nbuf
)
66 struct xge_pdata
*pdata
= netdev_priv(ndev
);
67 struct xge_desc_ring
*ring
= pdata
->rx_ring
;
68 const u8 slots
= XGENE_ENET_NUM_DESC
- 1;
69 struct device
*dev
= &pdata
->pdev
->dev
;
70 struct xge_raw_desc
*raw_desc
;
78 for (i
= 0; i
< nbuf
; i
++) {
79 raw_desc
= &ring
->raw_desc
[tail
];
81 len
= XGENE_ENET_STD_MTU
;
82 skb
= netdev_alloc_skb(ndev
, len
);
86 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_FROM_DEVICE
);
87 if (dma_mapping_error(dev
, dma_addr
)) {
88 netdev_err(ndev
, "DMA mapping error\n");
89 dev_kfree_skb_any(skb
);
93 ring
->pkt_info
[tail
].skb
= skb
;
94 ring
->pkt_info
[tail
].dma_addr
= dma_addr
;
96 addr_hi
= GET_BITS(NEXT_DESC_ADDRH
, le64_to_cpu(raw_desc
->m1
));
97 addr_lo
= GET_BITS(NEXT_DESC_ADDRL
, le64_to_cpu(raw_desc
->m1
));
98 raw_desc
->m1
= cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL
, addr_lo
) |
99 SET_BITS(NEXT_DESC_ADDRH
, addr_hi
) |
101 upper_32_bits(dma_addr
)));
104 raw_desc
->m0
= cpu_to_le64(SET_BITS(PKT_ADDRL
, dma_addr
) |
106 tail
= (tail
+ 1) & slots
;
114 static int xge_init_hw(struct net_device
*ndev
)
116 struct xge_pdata
*pdata
= netdev_priv(ndev
);
119 ret
= xge_port_reset(ndev
);
124 pdata
->nbufs
= NUM_BUFS
;
129 static irqreturn_t
xge_irq(const int irq
, void *data
)
131 struct xge_pdata
*pdata
= data
;
133 if (napi_schedule_prep(&pdata
->napi
)) {
134 xge_intr_disable(pdata
);
135 __napi_schedule(&pdata
->napi
);
141 static int xge_request_irq(struct net_device
*ndev
)
143 struct xge_pdata
*pdata
= netdev_priv(ndev
);
146 snprintf(pdata
->irq_name
, IRQ_ID_SIZE
, "%s", ndev
->name
);
148 ret
= request_irq(pdata
->resources
.irq
, xge_irq
, 0, pdata
->irq_name
,
151 netdev_err(ndev
, "Failed to request irq %s\n", pdata
->irq_name
);
156 static void xge_free_irq(struct net_device
*ndev
)
158 struct xge_pdata
*pdata
= netdev_priv(ndev
);
160 free_irq(pdata
->resources
.irq
, pdata
);
163 static bool is_tx_slot_available(struct xge_raw_desc
*raw_desc
)
165 if (GET_BITS(E
, le64_to_cpu(raw_desc
->m0
)) &&
166 (GET_BITS(PKT_SIZE
, le64_to_cpu(raw_desc
->m0
)) == SLOT_EMPTY
))
172 static netdev_tx_t
xge_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
174 struct xge_pdata
*pdata
= netdev_priv(ndev
);
175 struct device
*dev
= &pdata
->pdev
->dev
;
176 struct xge_desc_ring
*tx_ring
;
177 struct xge_raw_desc
*raw_desc
;
178 static dma_addr_t dma_addr
;
179 u64 addr_lo
, addr_hi
;
184 tx_ring
= pdata
->tx_ring
;
185 tail
= tx_ring
->tail
;
186 len
= skb_headlen(skb
);
187 raw_desc
= &tx_ring
->raw_desc
[tail
];
189 if (!is_tx_slot_available(raw_desc
)) {
190 netif_stop_queue(ndev
);
191 return NETDEV_TX_BUSY
;
194 /* Packet buffers should be 64B aligned */
195 pkt_buf
= dma_alloc_coherent(dev
, XGENE_ENET_STD_MTU
, &dma_addr
,
197 if (unlikely(!pkt_buf
)) {
198 dev_kfree_skb_any(skb
);
201 memcpy(pkt_buf
, skb
->data
, len
);
203 addr_hi
= GET_BITS(NEXT_DESC_ADDRH
, le64_to_cpu(raw_desc
->m1
));
204 addr_lo
= GET_BITS(NEXT_DESC_ADDRL
, le64_to_cpu(raw_desc
->m1
));
205 raw_desc
->m1
= cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL
, addr_lo
) |
206 SET_BITS(NEXT_DESC_ADDRH
, addr_hi
) |
208 upper_32_bits(dma_addr
)));
210 tx_ring
->pkt_info
[tail
].skb
= skb
;
211 tx_ring
->pkt_info
[tail
].dma_addr
= dma_addr
;
212 tx_ring
->pkt_info
[tail
].pkt_buf
= pkt_buf
;
216 raw_desc
->m0
= cpu_to_le64(SET_BITS(PKT_ADDRL
, dma_addr
) |
217 SET_BITS(PKT_SIZE
, len
) |
219 skb_tx_timestamp(skb
);
220 xge_wr_csr(pdata
, DMATXCTRL
, 1);
222 tx_ring
->tail
= (tail
+ 1) & (XGENE_ENET_NUM_DESC
- 1);
227 static bool is_tx_hw_done(struct xge_raw_desc
*raw_desc
)
229 if (GET_BITS(E
, le64_to_cpu(raw_desc
->m0
)) &&
230 !GET_BITS(PKT_SIZE
, le64_to_cpu(raw_desc
->m0
)))
236 static void xge_txc_poll(struct net_device
*ndev
)
238 struct xge_pdata
*pdata
= netdev_priv(ndev
);
239 struct device
*dev
= &pdata
->pdev
->dev
;
240 struct xge_desc_ring
*tx_ring
;
241 struct xge_raw_desc
*raw_desc
;
248 tx_ring
= pdata
->tx_ring
;
249 head
= tx_ring
->head
;
251 data
= xge_rd_csr(pdata
, DMATXSTATUS
);
252 if (!GET_BITS(TXPKTCOUNT
, data
))
256 raw_desc
= &tx_ring
->raw_desc
[head
];
258 if (!is_tx_hw_done(raw_desc
))
263 skb
= tx_ring
->pkt_info
[head
].skb
;
264 dma_addr
= tx_ring
->pkt_info
[head
].dma_addr
;
265 pkt_buf
= tx_ring
->pkt_info
[head
].pkt_buf
;
266 pdata
->stats
.tx_packets
++;
267 pdata
->stats
.tx_bytes
+= skb
->len
;
268 dma_free_coherent(dev
, XGENE_ENET_STD_MTU
, pkt_buf
, dma_addr
);
269 dev_kfree_skb_any(skb
);
271 /* clear pktstart address and pktsize */
272 raw_desc
->m0
= cpu_to_le64(SET_BITS(E
, 1) |
273 SET_BITS(PKT_SIZE
, SLOT_EMPTY
));
274 xge_wr_csr(pdata
, DMATXSTATUS
, 1);
276 head
= (head
+ 1) & (XGENE_ENET_NUM_DESC
- 1);
279 if (netif_queue_stopped(ndev
))
280 netif_wake_queue(ndev
);
282 tx_ring
->head
= head
;
285 static int xge_rx_poll(struct net_device
*ndev
, unsigned int budget
)
287 struct xge_pdata
*pdata
= netdev_priv(ndev
);
288 struct device
*dev
= &pdata
->pdev
->dev
;
289 struct xge_desc_ring
*rx_ring
;
290 struct xge_raw_desc
*raw_desc
;
299 rx_ring
= pdata
->rx_ring
;
300 head
= rx_ring
->head
;
302 data
= xge_rd_csr(pdata
, DMARXSTATUS
);
303 if (!GET_BITS(RXPKTCOUNT
, data
))
306 for (i
= 0; i
< budget
; i
++) {
307 raw_desc
= &rx_ring
->raw_desc
[head
];
309 if (GET_BITS(E
, le64_to_cpu(raw_desc
->m0
)))
314 skb
= rx_ring
->pkt_info
[head
].skb
;
315 rx_ring
->pkt_info
[head
].skb
= NULL
;
316 dma_addr
= rx_ring
->pkt_info
[head
].dma_addr
;
317 len
= GET_BITS(PKT_SIZE
, le64_to_cpu(raw_desc
->m0
));
318 dma_unmap_single(dev
, dma_addr
, XGENE_ENET_STD_MTU
,
321 rx_error
= GET_BITS(D
, le64_to_cpu(raw_desc
->m2
));
322 if (unlikely(rx_error
)) {
323 pdata
->stats
.rx_errors
++;
324 dev_kfree_skb_any(skb
);
329 skb
->protocol
= eth_type_trans(skb
, ndev
);
331 pdata
->stats
.rx_packets
++;
332 pdata
->stats
.rx_bytes
+= len
;
333 napi_gro_receive(&pdata
->napi
, skb
);
335 ret
= xge_refill_buffers(ndev
, 1);
336 xge_wr_csr(pdata
, DMARXSTATUS
, 1);
337 xge_wr_csr(pdata
, DMARXCTRL
, 1);
342 head
= (head
+ 1) & (XGENE_ENET_NUM_DESC
- 1);
346 rx_ring
->head
= head
;
351 static void xge_delete_desc_ring(struct net_device
*ndev
,
352 struct xge_desc_ring
*ring
)
354 struct xge_pdata
*pdata
= netdev_priv(ndev
);
355 struct device
*dev
= &pdata
->pdev
->dev
;
361 size
= XGENE_ENET_DESC_SIZE
* XGENE_ENET_NUM_DESC
;
363 dma_free_coherent(dev
, size
, ring
->desc_addr
, ring
->dma_addr
);
365 kfree(ring
->pkt_info
);
369 static void xge_free_buffers(struct net_device
*ndev
)
371 struct xge_pdata
*pdata
= netdev_priv(ndev
);
372 struct xge_desc_ring
*ring
= pdata
->rx_ring
;
373 struct device
*dev
= &pdata
->pdev
->dev
;
378 for (i
= 0; i
< XGENE_ENET_NUM_DESC
; i
++) {
379 skb
= ring
->pkt_info
[i
].skb
;
380 dma_addr
= ring
->pkt_info
[i
].dma_addr
;
385 dma_unmap_single(dev
, dma_addr
, XGENE_ENET_STD_MTU
,
387 dev_kfree_skb_any(skb
);
391 static void xge_delete_desc_rings(struct net_device
*ndev
)
393 struct xge_pdata
*pdata
= netdev_priv(ndev
);
396 xge_delete_desc_ring(ndev
, pdata
->tx_ring
);
398 xge_rx_poll(ndev
, 64);
399 xge_free_buffers(ndev
);
400 xge_delete_desc_ring(ndev
, pdata
->rx_ring
);
403 static struct xge_desc_ring
*xge_create_desc_ring(struct net_device
*ndev
)
405 struct xge_pdata
*pdata
= netdev_priv(ndev
);
406 struct device
*dev
= &pdata
->pdev
->dev
;
407 struct xge_desc_ring
*ring
;
410 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
416 size
= XGENE_ENET_DESC_SIZE
* XGENE_ENET_NUM_DESC
;
417 ring
->desc_addr
= dma_alloc_coherent(dev
, size
, &ring
->dma_addr
,
419 if (!ring
->desc_addr
)
422 ring
->pkt_info
= kcalloc(XGENE_ENET_NUM_DESC
, sizeof(*ring
->pkt_info
),
427 xge_setup_desc(ring
);
432 xge_delete_desc_ring(ndev
, ring
);
437 static int xge_create_desc_rings(struct net_device
*ndev
)
439 struct xge_pdata
*pdata
= netdev_priv(ndev
);
440 struct xge_desc_ring
*ring
;
444 ring
= xge_create_desc_ring(ndev
);
448 pdata
->tx_ring
= ring
;
449 xge_update_tx_desc_addr(pdata
);
452 ring
= xge_create_desc_ring(ndev
);
456 pdata
->rx_ring
= ring
;
457 xge_update_rx_desc_addr(pdata
);
459 ret
= xge_refill_buffers(ndev
, XGENE_ENET_NUM_DESC
);
465 xge_delete_desc_rings(ndev
);
470 static int xge_open(struct net_device
*ndev
)
472 struct xge_pdata
*pdata
= netdev_priv(ndev
);
475 ret
= xge_create_desc_rings(ndev
);
479 napi_enable(&pdata
->napi
);
480 ret
= xge_request_irq(ndev
);
484 xge_intr_enable(pdata
);
485 xge_wr_csr(pdata
, DMARXCTRL
, 1);
487 phy_start(ndev
->phydev
);
488 xge_mac_enable(pdata
);
489 netif_start_queue(ndev
);
494 static int xge_close(struct net_device
*ndev
)
496 struct xge_pdata
*pdata
= netdev_priv(ndev
);
498 netif_stop_queue(ndev
);
499 xge_mac_disable(pdata
);
500 phy_stop(ndev
->phydev
);
502 xge_intr_disable(pdata
);
504 napi_disable(&pdata
->napi
);
505 xge_delete_desc_rings(ndev
);
510 static int xge_napi(struct napi_struct
*napi
, const int budget
)
512 struct net_device
*ndev
= napi
->dev
;
513 struct xge_pdata
*pdata
;
516 pdata
= netdev_priv(ndev
);
519 processed
= xge_rx_poll(ndev
, budget
);
521 if (processed
< budget
) {
522 napi_complete_done(napi
, processed
);
523 xge_intr_enable(pdata
);
529 static int xge_set_mac_addr(struct net_device
*ndev
, void *addr
)
531 struct xge_pdata
*pdata
= netdev_priv(ndev
);
534 ret
= eth_mac_addr(ndev
, addr
);
538 xge_mac_set_station_addr(pdata
);
543 static bool is_tx_pending(struct xge_raw_desc
*raw_desc
)
545 if (!GET_BITS(E
, le64_to_cpu(raw_desc
->m0
)))
551 static void xge_free_pending_skb(struct net_device
*ndev
)
553 struct xge_pdata
*pdata
= netdev_priv(ndev
);
554 struct device
*dev
= &pdata
->pdev
->dev
;
555 struct xge_desc_ring
*tx_ring
;
556 struct xge_raw_desc
*raw_desc
;
562 tx_ring
= pdata
->tx_ring
;
564 for (i
= 0; i
< XGENE_ENET_NUM_DESC
; i
++) {
565 raw_desc
= &tx_ring
->raw_desc
[i
];
567 if (!is_tx_pending(raw_desc
))
570 skb
= tx_ring
->pkt_info
[i
].skb
;
571 dma_addr
= tx_ring
->pkt_info
[i
].dma_addr
;
572 pkt_buf
= tx_ring
->pkt_info
[i
].pkt_buf
;
573 dma_free_coherent(dev
, XGENE_ENET_STD_MTU
, pkt_buf
, dma_addr
);
574 dev_kfree_skb_any(skb
);
578 static void xge_timeout(struct net_device
*ndev
, unsigned int txqueue
)
580 struct xge_pdata
*pdata
= netdev_priv(ndev
);
584 if (!netif_running(ndev
))
587 netif_stop_queue(ndev
);
588 xge_intr_disable(pdata
);
589 napi_disable(&pdata
->napi
);
591 xge_wr_csr(pdata
, DMATXCTRL
, 0);
593 xge_free_pending_skb(ndev
);
594 xge_wr_csr(pdata
, DMATXSTATUS
, ~0U);
596 xge_setup_desc(pdata
->tx_ring
);
597 xge_update_tx_desc_addr(pdata
);
600 napi_enable(&pdata
->napi
);
601 xge_intr_enable(pdata
);
602 xge_mac_enable(pdata
);
603 netif_start_queue(ndev
);
609 static void xge_get_stats64(struct net_device
*ndev
,
610 struct rtnl_link_stats64
*storage
)
612 struct xge_pdata
*pdata
= netdev_priv(ndev
);
613 struct xge_stats
*stats
= &pdata
->stats
;
615 storage
->tx_packets
+= stats
->tx_packets
;
616 storage
->tx_bytes
+= stats
->tx_bytes
;
618 storage
->rx_packets
+= stats
->rx_packets
;
619 storage
->rx_bytes
+= stats
->rx_bytes
;
620 storage
->rx_errors
+= stats
->rx_errors
;
623 static const struct net_device_ops xgene_ndev_ops
= {
624 .ndo_open
= xge_open
,
625 .ndo_stop
= xge_close
,
626 .ndo_start_xmit
= xge_start_xmit
,
627 .ndo_set_mac_address
= xge_set_mac_addr
,
628 .ndo_tx_timeout
= xge_timeout
,
629 .ndo_get_stats64
= xge_get_stats64
,
632 static int xge_probe(struct platform_device
*pdev
)
634 struct device
*dev
= &pdev
->dev
;
635 struct net_device
*ndev
;
636 struct xge_pdata
*pdata
;
639 ndev
= alloc_etherdev(sizeof(*pdata
));
643 pdata
= netdev_priv(ndev
);
647 SET_NETDEV_DEV(ndev
, dev
);
648 platform_set_drvdata(pdev
, pdata
);
649 ndev
->netdev_ops
= &xgene_ndev_ops
;
651 ndev
->features
|= NETIF_F_GSO
|
654 ret
= xge_get_resources(pdata
);
658 ndev
->hw_features
= ndev
->features
;
659 xge_set_ethtool_ops(ndev
);
661 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(64));
663 netdev_err(ndev
, "No usable DMA configuration\n");
667 ret
= xge_init_hw(ndev
);
671 ret
= xge_mdio_config(ndev
);
675 netif_napi_add(ndev
, &pdata
->napi
, xge_napi
, NAPI_POLL_WEIGHT
);
677 ret
= register_netdev(ndev
);
679 netdev_err(ndev
, "Failed to register netdev\n");
691 static int xge_remove(struct platform_device
*pdev
)
693 struct xge_pdata
*pdata
;
694 struct net_device
*ndev
;
696 pdata
= platform_get_drvdata(pdev
);
700 if (netif_running(ndev
))
704 xge_mdio_remove(ndev
);
705 unregister_netdev(ndev
);
711 static void xge_shutdown(struct platform_device
*pdev
)
713 struct xge_pdata
*pdata
;
715 pdata
= platform_get_drvdata(pdev
);
725 static const struct acpi_device_id xge_acpi_match
[] = {
729 MODULE_DEVICE_TABLE(acpi
, xge_acpi_match
);
731 static struct platform_driver xge_driver
= {
733 .name
= "xgene-enet-v2",
734 .acpi_match_table
= ACPI_PTR(xge_acpi_match
),
737 .remove
= xge_remove
,
738 .shutdown
= xge_shutdown
,
740 module_platform_driver(xge_driver
);
742 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
743 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
744 MODULE_LICENSE("GPL");