Linux 4.16.11
[linux/fpc-iii.git] / drivers / net / ethernet / apm / xgene-v2 / main.c
blob0f2ad50f3bd7831694e0b2d0f6d09b711ec3ff20
1 /*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "main.h"
24 static const struct acpi_device_id xge_acpi_match[];
26 static int xge_get_resources(struct xge_pdata *pdata)
28 struct platform_device *pdev;
29 struct net_device *ndev;
30 int phy_mode, ret = 0;
31 struct resource *res;
32 struct device *dev;
34 pdev = pdata->pdev;
35 dev = &pdev->dev;
36 ndev = pdata->ndev;
38 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
39 if (!res) {
40 dev_err(dev, "Resource enet_csr not defined\n");
41 return -ENODEV;
44 pdata->resources.base_addr = devm_ioremap(dev, res->start,
45 resource_size(res));
46 if (!pdata->resources.base_addr) {
47 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
48 return -ENOMEM;
51 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
52 eth_hw_addr_random(ndev);
54 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
56 phy_mode = device_get_phy_mode(dev);
57 if (phy_mode < 0) {
58 dev_err(dev, "Unable to get phy-connection-type\n");
59 return phy_mode;
61 pdata->resources.phy_mode = phy_mode;
63 if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
64 dev_err(dev, "Incorrect phy-connection-type specified\n");
65 return -ENODEV;
68 ret = platform_get_irq(pdev, 0);
69 if (ret < 0) {
70 dev_err(dev, "Unable to get irq\n");
71 return ret;
73 pdata->resources.irq = ret;
75 return 0;
78 static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
80 struct xge_pdata *pdata = netdev_priv(ndev);
81 struct xge_desc_ring *ring = pdata->rx_ring;
82 const u8 slots = XGENE_ENET_NUM_DESC - 1;
83 struct device *dev = &pdata->pdev->dev;
84 struct xge_raw_desc *raw_desc;
85 u64 addr_lo, addr_hi;
86 u8 tail = ring->tail;
87 struct sk_buff *skb;
88 dma_addr_t dma_addr;
89 u16 len;
90 int i;
92 for (i = 0; i < nbuf; i++) {
93 raw_desc = &ring->raw_desc[tail];
95 len = XGENE_ENET_STD_MTU;
96 skb = netdev_alloc_skb(ndev, len);
97 if (unlikely(!skb))
98 return -ENOMEM;
100 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
101 if (dma_mapping_error(dev, dma_addr)) {
102 netdev_err(ndev, "DMA mapping error\n");
103 dev_kfree_skb_any(skb);
104 return -EINVAL;
107 ring->pkt_info[tail].skb = skb;
108 ring->pkt_info[tail].dma_addr = dma_addr;
110 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
111 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
112 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
113 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
114 SET_BITS(PKT_ADDRH,
115 upper_32_bits(dma_addr)));
117 dma_wmb();
118 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
119 SET_BITS(E, 1));
120 tail = (tail + 1) & slots;
123 ring->tail = tail;
125 return 0;
128 static int xge_init_hw(struct net_device *ndev)
130 struct xge_pdata *pdata = netdev_priv(ndev);
131 int ret;
133 ret = xge_port_reset(ndev);
134 if (ret)
135 return ret;
137 xge_port_init(ndev);
138 pdata->nbufs = NUM_BUFS;
140 return 0;
143 static irqreturn_t xge_irq(const int irq, void *data)
145 struct xge_pdata *pdata = data;
147 if (napi_schedule_prep(&pdata->napi)) {
148 xge_intr_disable(pdata);
149 __napi_schedule(&pdata->napi);
152 return IRQ_HANDLED;
155 static int xge_request_irq(struct net_device *ndev)
157 struct xge_pdata *pdata = netdev_priv(ndev);
158 int ret;
160 snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
162 ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
163 pdata);
164 if (ret)
165 netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
167 return ret;
170 static void xge_free_irq(struct net_device *ndev)
172 struct xge_pdata *pdata = netdev_priv(ndev);
174 free_irq(pdata->resources.irq, pdata);
177 static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
179 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
180 (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
181 return true;
183 return false;
186 static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
188 struct xge_pdata *pdata = netdev_priv(ndev);
189 struct device *dev = &pdata->pdev->dev;
190 struct xge_desc_ring *tx_ring;
191 struct xge_raw_desc *raw_desc;
192 static dma_addr_t dma_addr;
193 u64 addr_lo, addr_hi;
194 void *pkt_buf;
195 u8 tail;
196 u16 len;
198 tx_ring = pdata->tx_ring;
199 tail = tx_ring->tail;
200 len = skb_headlen(skb);
201 raw_desc = &tx_ring->raw_desc[tail];
203 if (!is_tx_slot_available(raw_desc)) {
204 netif_stop_queue(ndev);
205 return NETDEV_TX_BUSY;
208 /* Packet buffers should be 64B aligned */
209 pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
210 GFP_ATOMIC);
211 if (unlikely(!pkt_buf)) {
212 dev_kfree_skb_any(skb);
213 return NETDEV_TX_OK;
215 memcpy(pkt_buf, skb->data, len);
217 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
218 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
219 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
220 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
221 SET_BITS(PKT_ADDRH,
222 upper_32_bits(dma_addr)));
224 tx_ring->pkt_info[tail].skb = skb;
225 tx_ring->pkt_info[tail].dma_addr = dma_addr;
226 tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
228 dma_wmb();
230 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
231 SET_BITS(PKT_SIZE, len) |
232 SET_BITS(E, 0));
233 skb_tx_timestamp(skb);
234 xge_wr_csr(pdata, DMATXCTRL, 1);
236 tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
238 return NETDEV_TX_OK;
241 static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
243 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
244 !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
245 return true;
247 return false;
250 static void xge_txc_poll(struct net_device *ndev)
252 struct xge_pdata *pdata = netdev_priv(ndev);
253 struct device *dev = &pdata->pdev->dev;
254 struct xge_desc_ring *tx_ring;
255 struct xge_raw_desc *raw_desc;
256 dma_addr_t dma_addr;
257 struct sk_buff *skb;
258 void *pkt_buf;
259 u32 data;
260 u8 head;
262 tx_ring = pdata->tx_ring;
263 head = tx_ring->head;
265 data = xge_rd_csr(pdata, DMATXSTATUS);
266 if (!GET_BITS(TXPKTCOUNT, data))
267 return;
269 while (1) {
270 raw_desc = &tx_ring->raw_desc[head];
272 if (!is_tx_hw_done(raw_desc))
273 break;
275 dma_rmb();
277 skb = tx_ring->pkt_info[head].skb;
278 dma_addr = tx_ring->pkt_info[head].dma_addr;
279 pkt_buf = tx_ring->pkt_info[head].pkt_buf;
280 pdata->stats.tx_packets++;
281 pdata->stats.tx_bytes += skb->len;
282 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
283 dev_kfree_skb_any(skb);
285 /* clear pktstart address and pktsize */
286 raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
287 SET_BITS(PKT_SIZE, SLOT_EMPTY));
288 xge_wr_csr(pdata, DMATXSTATUS, 1);
290 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
293 if (netif_queue_stopped(ndev))
294 netif_wake_queue(ndev);
296 tx_ring->head = head;
299 static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
301 struct xge_pdata *pdata = netdev_priv(ndev);
302 struct device *dev = &pdata->pdev->dev;
303 struct xge_desc_ring *rx_ring;
304 struct xge_raw_desc *raw_desc;
305 struct sk_buff *skb;
306 dma_addr_t dma_addr;
307 int processed = 0;
308 u8 head, rx_error;
309 int i, ret;
310 u32 data;
311 u16 len;
313 rx_ring = pdata->rx_ring;
314 head = rx_ring->head;
316 data = xge_rd_csr(pdata, DMARXSTATUS);
317 if (!GET_BITS(RXPKTCOUNT, data))
318 return 0;
320 for (i = 0; i < budget; i++) {
321 raw_desc = &rx_ring->raw_desc[head];
323 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
324 break;
326 dma_rmb();
328 skb = rx_ring->pkt_info[head].skb;
329 rx_ring->pkt_info[head].skb = NULL;
330 dma_addr = rx_ring->pkt_info[head].dma_addr;
331 len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
332 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
333 DMA_FROM_DEVICE);
335 rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
336 if (unlikely(rx_error)) {
337 pdata->stats.rx_errors++;
338 dev_kfree_skb_any(skb);
339 goto out;
342 skb_put(skb, len);
343 skb->protocol = eth_type_trans(skb, ndev);
345 pdata->stats.rx_packets++;
346 pdata->stats.rx_bytes += len;
347 napi_gro_receive(&pdata->napi, skb);
348 out:
349 ret = xge_refill_buffers(ndev, 1);
350 xge_wr_csr(pdata, DMARXSTATUS, 1);
351 xge_wr_csr(pdata, DMARXCTRL, 1);
353 if (ret)
354 break;
356 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
357 processed++;
360 rx_ring->head = head;
362 return processed;
365 static void xge_delete_desc_ring(struct net_device *ndev,
366 struct xge_desc_ring *ring)
368 struct xge_pdata *pdata = netdev_priv(ndev);
369 struct device *dev = &pdata->pdev->dev;
370 u16 size;
372 if (!ring)
373 return;
375 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
376 if (ring->desc_addr)
377 dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
379 kfree(ring->pkt_info);
380 kfree(ring);
383 static void xge_free_buffers(struct net_device *ndev)
385 struct xge_pdata *pdata = netdev_priv(ndev);
386 struct xge_desc_ring *ring = pdata->rx_ring;
387 struct device *dev = &pdata->pdev->dev;
388 struct sk_buff *skb;
389 dma_addr_t dma_addr;
390 int i;
392 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
393 skb = ring->pkt_info[i].skb;
394 dma_addr = ring->pkt_info[i].dma_addr;
396 if (!skb)
397 continue;
399 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
400 DMA_FROM_DEVICE);
401 dev_kfree_skb_any(skb);
405 static void xge_delete_desc_rings(struct net_device *ndev)
407 struct xge_pdata *pdata = netdev_priv(ndev);
409 xge_txc_poll(ndev);
410 xge_delete_desc_ring(ndev, pdata->tx_ring);
412 xge_rx_poll(ndev, 64);
413 xge_free_buffers(ndev);
414 xge_delete_desc_ring(ndev, pdata->rx_ring);
417 static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
419 struct xge_pdata *pdata = netdev_priv(ndev);
420 struct device *dev = &pdata->pdev->dev;
421 struct xge_desc_ring *ring;
422 u16 size;
424 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
425 if (!ring)
426 return NULL;
428 ring->ndev = ndev;
430 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
431 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
432 GFP_KERNEL);
433 if (!ring->desc_addr)
434 goto err;
436 ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
437 GFP_KERNEL);
438 if (!ring->pkt_info)
439 goto err;
441 xge_setup_desc(ring);
443 return ring;
445 err:
446 xge_delete_desc_ring(ndev, ring);
448 return NULL;
451 static int xge_create_desc_rings(struct net_device *ndev)
453 struct xge_pdata *pdata = netdev_priv(ndev);
454 struct xge_desc_ring *ring;
455 int ret;
457 /* create tx ring */
458 ring = xge_create_desc_ring(ndev);
459 if (!ring)
460 goto err;
462 pdata->tx_ring = ring;
463 xge_update_tx_desc_addr(pdata);
465 /* create rx ring */
466 ring = xge_create_desc_ring(ndev);
467 if (!ring)
468 goto err;
470 pdata->rx_ring = ring;
471 xge_update_rx_desc_addr(pdata);
473 ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
474 if (ret)
475 goto err;
477 return 0;
478 err:
479 xge_delete_desc_rings(ndev);
481 return -ENOMEM;
484 static int xge_open(struct net_device *ndev)
486 struct xge_pdata *pdata = netdev_priv(ndev);
487 int ret;
489 ret = xge_create_desc_rings(ndev);
490 if (ret)
491 return ret;
493 napi_enable(&pdata->napi);
494 ret = xge_request_irq(ndev);
495 if (ret)
496 return ret;
498 xge_intr_enable(pdata);
499 xge_wr_csr(pdata, DMARXCTRL, 1);
501 phy_start(ndev->phydev);
502 xge_mac_enable(pdata);
503 netif_start_queue(ndev);
505 return 0;
508 static int xge_close(struct net_device *ndev)
510 struct xge_pdata *pdata = netdev_priv(ndev);
512 netif_stop_queue(ndev);
513 xge_mac_disable(pdata);
514 phy_stop(ndev->phydev);
516 xge_intr_disable(pdata);
517 xge_free_irq(ndev);
518 napi_disable(&pdata->napi);
519 xge_delete_desc_rings(ndev);
521 return 0;
524 static int xge_napi(struct napi_struct *napi, const int budget)
526 struct net_device *ndev = napi->dev;
527 struct xge_pdata *pdata;
528 int processed;
530 pdata = netdev_priv(ndev);
532 xge_txc_poll(ndev);
533 processed = xge_rx_poll(ndev, budget);
535 if (processed < budget) {
536 napi_complete_done(napi, processed);
537 xge_intr_enable(pdata);
540 return processed;
543 static int xge_set_mac_addr(struct net_device *ndev, void *addr)
545 struct xge_pdata *pdata = netdev_priv(ndev);
546 int ret;
548 ret = eth_mac_addr(ndev, addr);
549 if (ret)
550 return ret;
552 xge_mac_set_station_addr(pdata);
554 return 0;
557 static bool is_tx_pending(struct xge_raw_desc *raw_desc)
559 if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
560 return true;
562 return false;
565 static void xge_free_pending_skb(struct net_device *ndev)
567 struct xge_pdata *pdata = netdev_priv(ndev);
568 struct device *dev = &pdata->pdev->dev;
569 struct xge_desc_ring *tx_ring;
570 struct xge_raw_desc *raw_desc;
571 dma_addr_t dma_addr;
572 struct sk_buff *skb;
573 void *pkt_buf;
574 int i;
576 tx_ring = pdata->tx_ring;
578 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
579 raw_desc = &tx_ring->raw_desc[i];
581 if (!is_tx_pending(raw_desc))
582 continue;
584 skb = tx_ring->pkt_info[i].skb;
585 dma_addr = tx_ring->pkt_info[i].dma_addr;
586 pkt_buf = tx_ring->pkt_info[i].pkt_buf;
587 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
588 dev_kfree_skb_any(skb);
592 static void xge_timeout(struct net_device *ndev)
594 struct xge_pdata *pdata = netdev_priv(ndev);
596 rtnl_lock();
598 if (!netif_running(ndev))
599 goto out;
601 netif_stop_queue(ndev);
602 xge_intr_disable(pdata);
603 napi_disable(&pdata->napi);
605 xge_wr_csr(pdata, DMATXCTRL, 0);
606 xge_txc_poll(ndev);
607 xge_free_pending_skb(ndev);
608 xge_wr_csr(pdata, DMATXSTATUS, ~0U);
610 xge_setup_desc(pdata->tx_ring);
611 xge_update_tx_desc_addr(pdata);
612 xge_mac_init(pdata);
614 napi_enable(&pdata->napi);
615 xge_intr_enable(pdata);
616 xge_mac_enable(pdata);
617 netif_start_queue(ndev);
619 out:
620 rtnl_unlock();
623 static void xge_get_stats64(struct net_device *ndev,
624 struct rtnl_link_stats64 *storage)
626 struct xge_pdata *pdata = netdev_priv(ndev);
627 struct xge_stats *stats = &pdata->stats;
629 storage->tx_packets += stats->tx_packets;
630 storage->tx_bytes += stats->tx_bytes;
632 storage->rx_packets += stats->rx_packets;
633 storage->rx_bytes += stats->rx_bytes;
634 storage->rx_errors += stats->rx_errors;
637 static const struct net_device_ops xgene_ndev_ops = {
638 .ndo_open = xge_open,
639 .ndo_stop = xge_close,
640 .ndo_start_xmit = xge_start_xmit,
641 .ndo_set_mac_address = xge_set_mac_addr,
642 .ndo_tx_timeout = xge_timeout,
643 .ndo_get_stats64 = xge_get_stats64,
646 static int xge_probe(struct platform_device *pdev)
648 struct device *dev = &pdev->dev;
649 struct net_device *ndev;
650 struct xge_pdata *pdata;
651 int ret;
653 ndev = alloc_etherdev(sizeof(*pdata));
654 if (!ndev)
655 return -ENOMEM;
657 pdata = netdev_priv(ndev);
659 pdata->pdev = pdev;
660 pdata->ndev = ndev;
661 SET_NETDEV_DEV(ndev, dev);
662 platform_set_drvdata(pdev, pdata);
663 ndev->netdev_ops = &xgene_ndev_ops;
665 ndev->features |= NETIF_F_GSO |
666 NETIF_F_GRO;
668 ret = xge_get_resources(pdata);
669 if (ret)
670 goto err;
672 ndev->hw_features = ndev->features;
673 xge_set_ethtool_ops(ndev);
675 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
676 if (ret) {
677 netdev_err(ndev, "No usable DMA configuration\n");
678 goto err;
681 ret = xge_init_hw(ndev);
682 if (ret)
683 goto err;
685 ret = xge_mdio_config(ndev);
686 if (ret)
687 goto err;
689 netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
691 ret = register_netdev(ndev);
692 if (ret) {
693 netdev_err(ndev, "Failed to register netdev\n");
694 goto err;
697 return 0;
699 err:
700 free_netdev(ndev);
702 return ret;
705 static int xge_remove(struct platform_device *pdev)
707 struct xge_pdata *pdata;
708 struct net_device *ndev;
710 pdata = platform_get_drvdata(pdev);
711 ndev = pdata->ndev;
713 rtnl_lock();
714 if (netif_running(ndev))
715 dev_close(ndev);
716 rtnl_unlock();
718 xge_mdio_remove(ndev);
719 unregister_netdev(ndev);
720 free_netdev(ndev);
722 return 0;
725 static void xge_shutdown(struct platform_device *pdev)
727 struct xge_pdata *pdata;
729 pdata = platform_get_drvdata(pdev);
730 if (!pdata)
731 return;
733 if (!pdata->ndev)
734 return;
736 xge_remove(pdev);
739 static const struct acpi_device_id xge_acpi_match[] = {
740 { "APMC0D80" },
743 MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
745 static struct platform_driver xge_driver = {
746 .driver = {
747 .name = "xgene-enet-v2",
748 .acpi_match_table = ACPI_PTR(xge_acpi_match),
750 .probe = xge_probe,
751 .remove = xge_remove,
752 .shutdown = xge_shutdown,
754 module_platform_driver(xge_driver);
756 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
757 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
758 MODULE_VERSION(XGENE_ENET_V2_VERSION);
759 MODULE_LICENSE("GPL");