1 // SPDX-License-Identifier: GPL-2.0
3 * Lantiq / Intel PMAC driver for XRX200 SoCs
5 * Copyright (C) 2010 Lantiq Deutschland
6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
7 * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
10 #include <linux/etherdevice.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/interrupt.h>
14 #include <linux/clk.h>
15 #include <linux/delay.h>
17 #include <linux/if_vlan.h>
19 #include <linux/of_net.h>
20 #include <linux/of_platform.h>
25 #define XRX200_DMA_DATA_LEN (SZ_64K - 1)
26 #define XRX200_DMA_RX 0
27 #define XRX200_DMA_TX 1
28 #define XRX200_DMA_BURST_LEN 8
30 #define XRX200_DMA_PACKET_COMPLETE 0
31 #define XRX200_DMA_PACKET_IN_PROGRESS 1
34 #define PMAC_RX_IPG 0x0024
35 #define PMAC_RX_IPG_MASK 0xf
37 #define PMAC_HD_CTL 0x0000
38 /* Add Ethernet header to packets from DMA to PMAC */
39 #define PMAC_HD_CTL_ADD BIT(0)
40 /* Add VLAN tag to Packets from DMA to PMAC */
41 #define PMAC_HD_CTL_TAG BIT(1)
42 /* Add CRC to packets from DMA to PMAC */
43 #define PMAC_HD_CTL_AC BIT(2)
44 /* Add status header to packets from PMAC to DMA */
45 #define PMAC_HD_CTL_AS BIT(3)
46 /* Remove CRC from packets from PMAC to DMA */
47 #define PMAC_HD_CTL_RC BIT(4)
48 /* Remove Layer-2 header from packets from PMAC to DMA */
49 #define PMAC_HD_CTL_RL2 BIT(5)
50 /* Status header is present from DMA to PMAC */
51 #define PMAC_HD_CTL_RXSH BIT(6)
52 /* Add special tag from PMAC to switch */
53 #define PMAC_HD_CTL_AST BIT(7)
54 /* Remove specail Tag from PMAC to DMA */
55 #define PMAC_HD_CTL_RST BIT(8)
56 /* Check CRC from DMA to PMAC */
57 #define PMAC_HD_CTL_CCRC BIT(9)
58 /* Enable reaction to Pause frames in the PMAC */
59 #define PMAC_HD_CTL_FC BIT(10)
64 struct napi_struct napi
;
65 struct ltq_dma_channel dma
;
68 struct sk_buff
*skb
[LTQ_DESC_NUM
];
69 void *rx_buff
[LTQ_DESC_NUM
];
72 struct sk_buff
*skb_head
;
73 struct sk_buff
*skb_tail
;
75 struct xrx200_priv
*priv
;
81 struct xrx200_chan chan_tx
;
82 struct xrx200_chan chan_rx
;
87 struct net_device
*net_dev
;
90 __iomem
void *pmac_reg
;
93 static u32
xrx200_pmac_r32(struct xrx200_priv
*priv
, u32 offset
)
95 return __raw_readl(priv
->pmac_reg
+ offset
);
98 static void xrx200_pmac_w32(struct xrx200_priv
*priv
, u32 val
, u32 offset
)
100 __raw_writel(val
, priv
->pmac_reg
+ offset
);
103 static void xrx200_pmac_mask(struct xrx200_priv
*priv
, u32 clear
, u32 set
,
106 u32 val
= xrx200_pmac_r32(priv
, offset
);
110 xrx200_pmac_w32(priv
, val
, offset
);
113 static int xrx200_max_frame_len(int mtu
)
115 return VLAN_ETH_HLEN
+ mtu
;
118 static int xrx200_buffer_size(int mtu
)
120 return round_up(xrx200_max_frame_len(mtu
), 4 * XRX200_DMA_BURST_LEN
);
123 static int xrx200_skb_size(u16 buf_size
)
125 return SKB_DATA_ALIGN(buf_size
+ NET_SKB_PAD
+ NET_IP_ALIGN
) +
126 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
129 /* drop all the packets from the DMA ring */
130 static void xrx200_flush_dma(struct xrx200_chan
*ch
)
134 for (i
= 0; i
< LTQ_DESC_NUM
; i
++) {
135 struct ltq_dma_desc
*desc
= &ch
->dma
.desc_base
[ch
->dma
.desc
];
137 if ((desc
->ctl
& (LTQ_DMA_OWN
| LTQ_DMA_C
)) != LTQ_DMA_C
)
140 desc
->ctl
= LTQ_DMA_OWN
| LTQ_DMA_RX_OFFSET(NET_IP_ALIGN
) |
141 ch
->priv
->rx_buf_size
;
143 ch
->dma
.desc
%= LTQ_DESC_NUM
;
147 static int xrx200_open(struct net_device
*net_dev
)
149 struct xrx200_priv
*priv
= netdev_priv(net_dev
);
151 napi_enable(&priv
->chan_tx
.napi
);
152 ltq_dma_open(&priv
->chan_tx
.dma
);
153 ltq_dma_enable_irq(&priv
->chan_tx
.dma
);
155 napi_enable(&priv
->chan_rx
.napi
);
156 ltq_dma_open(&priv
->chan_rx
.dma
);
157 /* The boot loader does not always deactivate the receiving of frames
158 * on the ports and then some packets queue up in the PPE buffers.
159 * They already passed the PMAC so they do not have the tags
160 * configured here. Read the these packets here and drop them.
161 * The HW should have written them into memory after 10us
163 usleep_range(20, 40);
164 xrx200_flush_dma(&priv
->chan_rx
);
165 ltq_dma_enable_irq(&priv
->chan_rx
.dma
);
167 netif_wake_queue(net_dev
);
172 static int xrx200_close(struct net_device
*net_dev
)
174 struct xrx200_priv
*priv
= netdev_priv(net_dev
);
176 netif_stop_queue(net_dev
);
178 napi_disable(&priv
->chan_rx
.napi
);
179 ltq_dma_close(&priv
->chan_rx
.dma
);
181 napi_disable(&priv
->chan_tx
.napi
);
182 ltq_dma_close(&priv
->chan_tx
.dma
);
187 static int xrx200_alloc_buf(struct xrx200_chan
*ch
, void *(*alloc
)(unsigned int size
))
189 void *buf
= ch
->rx_buff
[ch
->dma
.desc
];
190 struct xrx200_priv
*priv
= ch
->priv
;
194 ch
->rx_buff
[ch
->dma
.desc
] = alloc(priv
->rx_skb_size
);
195 if (!ch
->rx_buff
[ch
->dma
.desc
]) {
196 ch
->rx_buff
[ch
->dma
.desc
] = buf
;
201 mapping
= dma_map_single(priv
->dev
, ch
->rx_buff
[ch
->dma
.desc
],
202 priv
->rx_buf_size
, DMA_FROM_DEVICE
);
203 if (unlikely(dma_mapping_error(priv
->dev
, mapping
))) {
204 skb_free_frag(ch
->rx_buff
[ch
->dma
.desc
]);
205 ch
->rx_buff
[ch
->dma
.desc
] = buf
;
210 ch
->dma
.desc_base
[ch
->dma
.desc
].addr
= mapping
+ NET_SKB_PAD
+ NET_IP_ALIGN
;
211 /* Make sure the address is written before we give it to HW */
214 ch
->dma
.desc_base
[ch
->dma
.desc
].ctl
=
215 LTQ_DMA_OWN
| LTQ_DMA_RX_OFFSET(NET_IP_ALIGN
) | priv
->rx_buf_size
;
220 static int xrx200_hw_receive(struct xrx200_chan
*ch
)
222 struct xrx200_priv
*priv
= ch
->priv
;
223 struct ltq_dma_desc
*desc
= &ch
->dma
.desc_base
[ch
->dma
.desc
];
224 void *buf
= ch
->rx_buff
[ch
->dma
.desc
];
226 int len
= (ctl
& LTQ_DMA_SIZE_MASK
);
227 struct net_device
*net_dev
= priv
->net_dev
;
231 ret
= xrx200_alloc_buf(ch
, napi_alloc_frag
);
234 ch
->dma
.desc
%= LTQ_DESC_NUM
;
237 net_dev
->stats
.rx_dropped
++;
238 netdev_err(net_dev
, "failed to allocate new rx buffer\n");
242 skb
= build_skb(buf
, priv
->rx_skb_size
);
245 net_dev
->stats
.rx_dropped
++;
249 skb_reserve(skb
, NET_SKB_PAD
);
252 /* add buffers to skb via skb->frag_list */
253 if (ctl
& LTQ_DMA_SOP
) {
256 skb_reserve(skb
, NET_IP_ALIGN
);
257 } else if (ch
->skb_head
) {
258 if (ch
->skb_head
== ch
->skb_tail
)
259 skb_shinfo(ch
->skb_tail
)->frag_list
= skb
;
261 ch
->skb_tail
->next
= skb
;
263 ch
->skb_head
->len
+= skb
->len
;
264 ch
->skb_head
->data_len
+= skb
->len
;
265 ch
->skb_head
->truesize
+= skb
->truesize
;
268 if (ctl
& LTQ_DMA_EOP
) {
269 ch
->skb_head
->protocol
= eth_type_trans(ch
->skb_head
, net_dev
);
270 net_dev
->stats
.rx_packets
++;
271 net_dev
->stats
.rx_bytes
+= ch
->skb_head
->len
;
272 netif_receive_skb(ch
->skb_head
);
275 ret
= XRX200_DMA_PACKET_COMPLETE
;
277 ret
= XRX200_DMA_PACKET_IN_PROGRESS
;
283 static int xrx200_poll_rx(struct napi_struct
*napi
, int budget
)
285 struct xrx200_chan
*ch
= container_of(napi
,
286 struct xrx200_chan
, napi
);
290 while (rx
< budget
) {
291 struct ltq_dma_desc
*desc
= &ch
->dma
.desc_base
[ch
->dma
.desc
];
293 if ((desc
->ctl
& (LTQ_DMA_OWN
| LTQ_DMA_C
)) == LTQ_DMA_C
) {
294 ret
= xrx200_hw_receive(ch
);
295 if (ret
== XRX200_DMA_PACKET_IN_PROGRESS
)
297 if (ret
!= XRX200_DMA_PACKET_COMPLETE
)
306 if (napi_complete_done(&ch
->napi
, rx
))
307 ltq_dma_enable_irq(&ch
->dma
);
313 static int xrx200_tx_housekeeping(struct napi_struct
*napi
, int budget
)
315 struct xrx200_chan
*ch
= container_of(napi
,
316 struct xrx200_chan
, napi
);
317 struct net_device
*net_dev
= ch
->priv
->net_dev
;
321 netif_tx_lock(net_dev
);
322 while (pkts
< budget
) {
323 struct ltq_dma_desc
*desc
= &ch
->dma
.desc_base
[ch
->tx_free
];
325 if ((desc
->ctl
& (LTQ_DMA_OWN
| LTQ_DMA_C
)) == LTQ_DMA_C
) {
326 struct sk_buff
*skb
= ch
->skb
[ch
->tx_free
];
330 ch
->skb
[ch
->tx_free
] = NULL
;
332 memset(&ch
->dma
.desc_base
[ch
->tx_free
], 0,
333 sizeof(struct ltq_dma_desc
));
335 ch
->tx_free
%= LTQ_DESC_NUM
;
341 net_dev
->stats
.tx_packets
+= pkts
;
342 net_dev
->stats
.tx_bytes
+= bytes
;
343 netdev_completed_queue(ch
->priv
->net_dev
, pkts
, bytes
);
345 netif_tx_unlock(net_dev
);
346 if (netif_queue_stopped(net_dev
))
347 netif_wake_queue(net_dev
);
350 if (napi_complete_done(&ch
->napi
, pkts
))
351 ltq_dma_enable_irq(&ch
->dma
);
357 static netdev_tx_t
xrx200_start_xmit(struct sk_buff
*skb
,
358 struct net_device
*net_dev
)
360 struct xrx200_priv
*priv
= netdev_priv(net_dev
);
361 struct xrx200_chan
*ch
= &priv
->chan_tx
;
362 struct ltq_dma_desc
*desc
= &ch
->dma
.desc_base
[ch
->dma
.desc
];
368 if (skb_put_padto(skb
, ETH_ZLEN
)) {
369 net_dev
->stats
.tx_dropped
++;
375 if ((desc
->ctl
& (LTQ_DMA_OWN
| LTQ_DMA_C
)) || ch
->skb
[ch
->dma
.desc
]) {
376 netdev_err(net_dev
, "tx ring full\n");
377 netif_stop_queue(net_dev
);
378 return NETDEV_TX_BUSY
;
381 ch
->skb
[ch
->dma
.desc
] = skb
;
383 mapping
= dma_map_single(priv
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
384 if (unlikely(dma_mapping_error(priv
->dev
, mapping
)))
387 /* dma needs to start on a burst length value aligned address */
388 byte_offset
= mapping
% (XRX200_DMA_BURST_LEN
* 4);
390 desc
->addr
= mapping
- byte_offset
;
391 /* Make sure the address is written before we give it to HW */
393 desc
->ctl
= LTQ_DMA_OWN
| LTQ_DMA_SOP
| LTQ_DMA_EOP
|
394 LTQ_DMA_TX_OFFSET(byte_offset
) | (len
& LTQ_DMA_SIZE_MASK
);
396 ch
->dma
.desc
%= LTQ_DESC_NUM
;
397 if (ch
->dma
.desc
== ch
->tx_free
)
398 netif_stop_queue(net_dev
);
400 netdev_sent_queue(net_dev
, len
);
406 net_dev
->stats
.tx_dropped
++;
407 net_dev
->stats
.tx_errors
++;
412 xrx200_change_mtu(struct net_device
*net_dev
, int new_mtu
)
414 struct xrx200_priv
*priv
= netdev_priv(net_dev
);
415 struct xrx200_chan
*ch_rx
= &priv
->chan_rx
;
416 int old_mtu
= net_dev
->mtu
;
417 bool running
= false;
422 WRITE_ONCE(net_dev
->mtu
, new_mtu
);
423 priv
->rx_buf_size
= xrx200_buffer_size(new_mtu
);
424 priv
->rx_skb_size
= xrx200_skb_size(priv
->rx_buf_size
);
426 if (new_mtu
<= old_mtu
)
429 running
= netif_running(net_dev
);
431 napi_disable(&ch_rx
->napi
);
432 ltq_dma_close(&ch_rx
->dma
);
435 xrx200_poll_rx(&ch_rx
->napi
, LTQ_DESC_NUM
);
436 curr_desc
= ch_rx
->dma
.desc
;
438 for (ch_rx
->dma
.desc
= 0; ch_rx
->dma
.desc
< LTQ_DESC_NUM
;
440 buff
= ch_rx
->rx_buff
[ch_rx
->dma
.desc
];
441 ret
= xrx200_alloc_buf(ch_rx
, netdev_alloc_frag
);
443 WRITE_ONCE(net_dev
->mtu
, old_mtu
);
444 priv
->rx_buf_size
= xrx200_buffer_size(old_mtu
);
445 priv
->rx_skb_size
= xrx200_skb_size(priv
->rx_buf_size
);
451 ch_rx
->dma
.desc
= curr_desc
;
453 napi_enable(&ch_rx
->napi
);
454 ltq_dma_open(&ch_rx
->dma
);
455 ltq_dma_enable_irq(&ch_rx
->dma
);
461 static const struct net_device_ops xrx200_netdev_ops
= {
462 .ndo_open
= xrx200_open
,
463 .ndo_stop
= xrx200_close
,
464 .ndo_start_xmit
= xrx200_start_xmit
,
465 .ndo_change_mtu
= xrx200_change_mtu
,
466 .ndo_set_mac_address
= eth_mac_addr
,
467 .ndo_validate_addr
= eth_validate_addr
,
470 static irqreturn_t
xrx200_dma_irq(int irq
, void *ptr
)
472 struct xrx200_chan
*ch
= ptr
;
474 if (napi_schedule_prep(&ch
->napi
)) {
475 ltq_dma_disable_irq(&ch
->dma
);
476 __napi_schedule(&ch
->napi
);
479 ltq_dma_ack_irq(&ch
->dma
);
484 static int xrx200_dma_init(struct xrx200_priv
*priv
)
486 struct xrx200_chan
*ch_rx
= &priv
->chan_rx
;
487 struct xrx200_chan
*ch_tx
= &priv
->chan_tx
;
491 ltq_dma_init_port(DMA_PORT_ETOP
, XRX200_DMA_BURST_LEN
,
492 XRX200_DMA_BURST_LEN
);
494 ch_rx
->dma
.nr
= XRX200_DMA_RX
;
495 ch_rx
->dma
.dev
= priv
->dev
;
498 ltq_dma_alloc_rx(&ch_rx
->dma
);
499 for (ch_rx
->dma
.desc
= 0; ch_rx
->dma
.desc
< LTQ_DESC_NUM
;
501 ret
= xrx200_alloc_buf(ch_rx
, netdev_alloc_frag
);
506 ret
= devm_request_irq(priv
->dev
, ch_rx
->dma
.irq
, xrx200_dma_irq
, 0,
507 "xrx200_net_rx", &priv
->chan_rx
);
509 dev_err(priv
->dev
, "failed to request RX irq %d\n",
514 ch_tx
->dma
.nr
= XRX200_DMA_TX
;
515 ch_tx
->dma
.dev
= priv
->dev
;
518 ltq_dma_alloc_tx(&ch_tx
->dma
);
519 ret
= devm_request_irq(priv
->dev
, ch_tx
->dma
.irq
, xrx200_dma_irq
, 0,
520 "xrx200_net_tx", &priv
->chan_tx
);
522 dev_err(priv
->dev
, "failed to request TX irq %d\n",
530 ltq_dma_free(&ch_tx
->dma
);
533 /* free the allocated RX ring */
534 for (i
= 0; i
< LTQ_DESC_NUM
; i
++) {
535 if (priv
->chan_rx
.skb
[i
])
536 skb_free_frag(priv
->chan_rx
.rx_buff
[i
]);
540 ltq_dma_free(&ch_rx
->dma
);
544 static void xrx200_hw_cleanup(struct xrx200_priv
*priv
)
548 ltq_dma_free(&priv
->chan_tx
.dma
);
549 ltq_dma_free(&priv
->chan_rx
.dma
);
551 /* free the allocated RX ring */
552 for (i
= 0; i
< LTQ_DESC_NUM
; i
++)
553 skb_free_frag(priv
->chan_rx
.rx_buff
[i
]);
556 static int xrx200_probe(struct platform_device
*pdev
)
558 struct device
*dev
= &pdev
->dev
;
559 struct device_node
*np
= dev
->of_node
;
560 struct xrx200_priv
*priv
;
561 struct net_device
*net_dev
;
564 /* alloc the network device */
565 net_dev
= devm_alloc_etherdev(dev
, sizeof(struct xrx200_priv
));
569 priv
= netdev_priv(net_dev
);
570 priv
->net_dev
= net_dev
;
573 net_dev
->netdev_ops
= &xrx200_netdev_ops
;
574 SET_NETDEV_DEV(net_dev
, dev
);
575 net_dev
->min_mtu
= ETH_ZLEN
;
576 net_dev
->max_mtu
= XRX200_DMA_DATA_LEN
- xrx200_max_frame_len(0);
577 priv
->rx_buf_size
= xrx200_buffer_size(ETH_DATA_LEN
);
578 priv
->rx_skb_size
= xrx200_skb_size(priv
->rx_buf_size
);
580 /* load the memory ranges */
581 priv
->pmac_reg
= devm_platform_get_and_ioremap_resource(pdev
, 0, NULL
);
582 if (IS_ERR(priv
->pmac_reg
))
583 return PTR_ERR(priv
->pmac_reg
);
585 priv
->chan_rx
.dma
.irq
= platform_get_irq_byname(pdev
, "rx");
586 if (priv
->chan_rx
.dma
.irq
< 0)
588 priv
->chan_tx
.dma
.irq
= platform_get_irq_byname(pdev
, "tx");
589 if (priv
->chan_tx
.dma
.irq
< 0)
593 priv
->clk
= devm_clk_get(dev
, NULL
);
594 if (IS_ERR(priv
->clk
)) {
595 dev_err(dev
, "failed to get clock\n");
596 return PTR_ERR(priv
->clk
);
599 err
= of_get_ethdev_address(np
, net_dev
);
601 eth_hw_addr_random(net_dev
);
603 /* bring up the dma engine and IP core */
604 err
= xrx200_dma_init(priv
);
608 /* enable clock gate */
609 err
= clk_prepare_enable(priv
->clk
);
614 xrx200_pmac_mask(priv
, PMAC_RX_IPG_MASK
, 0xb, PMAC_RX_IPG
);
616 /* enable status header, enable CRC */
617 xrx200_pmac_mask(priv
, 0,
618 PMAC_HD_CTL_RST
| PMAC_HD_CTL_AST
| PMAC_HD_CTL_RXSH
|
619 PMAC_HD_CTL_AS
| PMAC_HD_CTL_AC
| PMAC_HD_CTL_RC
,
623 netif_napi_add(net_dev
, &priv
->chan_rx
.napi
, xrx200_poll_rx
);
624 netif_napi_add_tx(net_dev
, &priv
->chan_tx
.napi
,
625 xrx200_tx_housekeeping
);
627 platform_set_drvdata(pdev
, priv
);
629 err
= register_netdev(net_dev
);
631 goto err_unprepare_clk
;
636 clk_disable_unprepare(priv
->clk
);
639 xrx200_hw_cleanup(priv
);
644 static void xrx200_remove(struct platform_device
*pdev
)
646 struct xrx200_priv
*priv
= platform_get_drvdata(pdev
);
647 struct net_device
*net_dev
= priv
->net_dev
;
649 /* free stack related instances */
650 netif_stop_queue(net_dev
);
651 netif_napi_del(&priv
->chan_tx
.napi
);
652 netif_napi_del(&priv
->chan_rx
.napi
);
654 /* remove the actual device */
655 unregister_netdev(net_dev
);
657 /* release the clock */
658 clk_disable_unprepare(priv
->clk
);
660 /* shut down hardware */
661 xrx200_hw_cleanup(priv
);
664 static const struct of_device_id xrx200_match
[] = {
665 { .compatible
= "lantiq,xrx200-net" },
668 MODULE_DEVICE_TABLE(of
, xrx200_match
);
670 static struct platform_driver xrx200_driver
= {
671 .probe
= xrx200_probe
,
672 .remove
= xrx200_remove
,
674 .name
= "lantiq,xrx200-net",
675 .of_match_table
= xrx200_match
,
679 module_platform_driver(xrx200_driver
);
681 MODULE_AUTHOR("John Crispin <john@phrozen.org>");
682 MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
683 MODULE_LICENSE("GPL");