1 // SPDX-License-Identifier: GPL-2.0
3 * Lantiq / Intel PMAC driver for XRX200 SoCs
5 * Copyright (C) 2010 Lantiq Deutschland
6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
7 * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
10 #include <linux/etherdevice.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/interrupt.h>
14 #include <linux/clk.h>
15 #include <linux/delay.h>
17 #include <linux/of_net.h>
18 #include <linux/of_platform.h>
23 #define XRX200_DMA_DATA_LEN 0x600
24 #define XRX200_DMA_RX 0
25 #define XRX200_DMA_TX 1
28 #define PMAC_RX_IPG 0x0024
29 #define PMAC_RX_IPG_MASK 0xf
31 #define PMAC_HD_CTL 0x0000
32 /* Add Ethernet header to packets from DMA to PMAC */
33 #define PMAC_HD_CTL_ADD BIT(0)
34 /* Add VLAN tag to Packets from DMA to PMAC */
35 #define PMAC_HD_CTL_TAG BIT(1)
36 /* Add CRC to packets from DMA to PMAC */
37 #define PMAC_HD_CTL_AC BIT(2)
38 /* Add status header to packets from PMAC to DMA */
39 #define PMAC_HD_CTL_AS BIT(3)
40 /* Remove CRC from packets from PMAC to DMA */
41 #define PMAC_HD_CTL_RC BIT(4)
42 /* Remove Layer-2 header from packets from PMAC to DMA */
43 #define PMAC_HD_CTL_RL2 BIT(5)
44 /* Status header is present from DMA to PMAC */
45 #define PMAC_HD_CTL_RXSH BIT(6)
46 /* Add special tag from PMAC to switch */
47 #define PMAC_HD_CTL_AST BIT(7)
48 /* Remove specail Tag from PMAC to DMA */
49 #define PMAC_HD_CTL_RST BIT(8)
50 /* Check CRC from DMA to PMAC */
51 #define PMAC_HD_CTL_CCRC BIT(9)
52 /* Enable reaction to Pause frames in the PMAC */
53 #define PMAC_HD_CTL_FC BIT(10)
58 struct napi_struct napi
;
59 struct ltq_dma_channel dma
;
60 struct sk_buff
*skb
[LTQ_DESC_NUM
];
62 struct xrx200_priv
*priv
;
68 struct xrx200_chan chan_tx
;
69 struct xrx200_chan chan_rx
;
71 struct net_device
*net_dev
;
74 __iomem
void *pmac_reg
;
77 static u32
xrx200_pmac_r32(struct xrx200_priv
*priv
, u32 offset
)
79 return __raw_readl(priv
->pmac_reg
+ offset
);
82 static void xrx200_pmac_w32(struct xrx200_priv
*priv
, u32 val
, u32 offset
)
84 __raw_writel(val
, priv
->pmac_reg
+ offset
);
87 static void xrx200_pmac_mask(struct xrx200_priv
*priv
, u32 clear
, u32 set
,
90 u32 val
= xrx200_pmac_r32(priv
, offset
);
94 xrx200_pmac_w32(priv
, val
, offset
);
97 /* drop all the packets from the DMA ring */
98 static void xrx200_flush_dma(struct xrx200_chan
*ch
)
102 for (i
= 0; i
< LTQ_DESC_NUM
; i
++) {
103 struct ltq_dma_desc
*desc
= &ch
->dma
.desc_base
[ch
->dma
.desc
];
105 if ((desc
->ctl
& (LTQ_DMA_OWN
| LTQ_DMA_C
)) != LTQ_DMA_C
)
108 desc
->ctl
= LTQ_DMA_OWN
| LTQ_DMA_RX_OFFSET(NET_IP_ALIGN
) |
111 ch
->dma
.desc
%= LTQ_DESC_NUM
;
115 static int xrx200_open(struct net_device
*net_dev
)
117 struct xrx200_priv
*priv
= netdev_priv(net_dev
);
119 napi_enable(&priv
->chan_tx
.napi
);
120 ltq_dma_open(&priv
->chan_tx
.dma
);
121 ltq_dma_enable_irq(&priv
->chan_tx
.dma
);
123 napi_enable(&priv
->chan_rx
.napi
);
124 ltq_dma_open(&priv
->chan_rx
.dma
);
125 /* The boot loader does not always deactivate the receiving of frames
126 * on the ports and then some packets queue up in the PPE buffers.
127 * They already passed the PMAC so they do not have the tags
128 * configured here. Read the these packets here and drop them.
129 * The HW should have written them into memory after 10us
131 usleep_range(20, 40);
132 xrx200_flush_dma(&priv
->chan_rx
);
133 ltq_dma_enable_irq(&priv
->chan_rx
.dma
);
135 netif_wake_queue(net_dev
);
140 static int xrx200_close(struct net_device
*net_dev
)
142 struct xrx200_priv
*priv
= netdev_priv(net_dev
);
144 netif_stop_queue(net_dev
);
146 napi_disable(&priv
->chan_rx
.napi
);
147 ltq_dma_close(&priv
->chan_rx
.dma
);
149 napi_disable(&priv
->chan_tx
.napi
);
150 ltq_dma_close(&priv
->chan_tx
.dma
);
155 static int xrx200_alloc_skb(struct xrx200_chan
*ch
)
159 ch
->skb
[ch
->dma
.desc
] = netdev_alloc_skb_ip_align(ch
->priv
->net_dev
,
160 XRX200_DMA_DATA_LEN
);
161 if (!ch
->skb
[ch
->dma
.desc
]) {
166 ch
->dma
.desc_base
[ch
->dma
.desc
].addr
= dma_map_single(ch
->priv
->dev
,
167 ch
->skb
[ch
->dma
.desc
]->data
, XRX200_DMA_DATA_LEN
,
169 if (unlikely(dma_mapping_error(ch
->priv
->dev
,
170 ch
->dma
.desc_base
[ch
->dma
.desc
].addr
))) {
171 dev_kfree_skb_any(ch
->skb
[ch
->dma
.desc
]);
177 ch
->dma
.desc_base
[ch
->dma
.desc
].ctl
=
178 LTQ_DMA_OWN
| LTQ_DMA_RX_OFFSET(NET_IP_ALIGN
) |
184 static int xrx200_hw_receive(struct xrx200_chan
*ch
)
186 struct xrx200_priv
*priv
= ch
->priv
;
187 struct ltq_dma_desc
*desc
= &ch
->dma
.desc_base
[ch
->dma
.desc
];
188 struct sk_buff
*skb
= ch
->skb
[ch
->dma
.desc
];
189 int len
= (desc
->ctl
& LTQ_DMA_SIZE_MASK
);
190 struct net_device
*net_dev
= priv
->net_dev
;
193 ret
= xrx200_alloc_skb(ch
);
196 ch
->dma
.desc
%= LTQ_DESC_NUM
;
199 netdev_err(net_dev
, "failed to allocate new rx buffer\n");
204 skb
->protocol
= eth_type_trans(skb
, net_dev
);
205 netif_receive_skb(skb
);
206 net_dev
->stats
.rx_packets
++;
207 net_dev
->stats
.rx_bytes
+= len
- ETH_FCS_LEN
;
212 static int xrx200_poll_rx(struct napi_struct
*napi
, int budget
)
214 struct xrx200_chan
*ch
= container_of(napi
,
215 struct xrx200_chan
, napi
);
219 while (rx
< budget
) {
220 struct ltq_dma_desc
*desc
= &ch
->dma
.desc_base
[ch
->dma
.desc
];
222 if ((desc
->ctl
& (LTQ_DMA_OWN
| LTQ_DMA_C
)) == LTQ_DMA_C
) {
223 ret
= xrx200_hw_receive(ch
);
233 napi_complete(&ch
->napi
);
234 ltq_dma_enable_irq(&ch
->dma
);
240 static int xrx200_tx_housekeeping(struct napi_struct
*napi
, int budget
)
242 struct xrx200_chan
*ch
= container_of(napi
,
243 struct xrx200_chan
, napi
);
244 struct net_device
*net_dev
= ch
->priv
->net_dev
;
248 while (pkts
< budget
) {
249 struct ltq_dma_desc
*desc
= &ch
->dma
.desc_base
[ch
->tx_free
];
251 if ((desc
->ctl
& (LTQ_DMA_OWN
| LTQ_DMA_C
)) == LTQ_DMA_C
) {
252 struct sk_buff
*skb
= ch
->skb
[ch
->tx_free
];
256 ch
->skb
[ch
->tx_free
] = NULL
;
258 memset(&ch
->dma
.desc_base
[ch
->tx_free
], 0,
259 sizeof(struct ltq_dma_desc
));
261 ch
->tx_free
%= LTQ_DESC_NUM
;
267 net_dev
->stats
.tx_packets
+= pkts
;
268 net_dev
->stats
.tx_bytes
+= bytes
;
269 netdev_completed_queue(ch
->priv
->net_dev
, pkts
, bytes
);
272 napi_complete(&ch
->napi
);
273 ltq_dma_enable_irq(&ch
->dma
);
279 static int xrx200_start_xmit(struct sk_buff
*skb
, struct net_device
*net_dev
)
281 struct xrx200_priv
*priv
= netdev_priv(net_dev
);
282 struct xrx200_chan
*ch
= &priv
->chan_tx
;
283 struct ltq_dma_desc
*desc
= &ch
->dma
.desc_base
[ch
->dma
.desc
];
289 if (skb_put_padto(skb
, ETH_ZLEN
)) {
290 net_dev
->stats
.tx_dropped
++;
296 if ((desc
->ctl
& (LTQ_DMA_OWN
| LTQ_DMA_C
)) || ch
->skb
[ch
->dma
.desc
]) {
297 netdev_err(net_dev
, "tx ring full\n");
298 netif_stop_queue(net_dev
);
299 return NETDEV_TX_BUSY
;
302 ch
->skb
[ch
->dma
.desc
] = skb
;
304 mapping
= dma_map_single(priv
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
305 if (unlikely(dma_mapping_error(priv
->dev
, mapping
)))
308 /* dma needs to start on a 16 byte aligned address */
309 byte_offset
= mapping
% 16;
311 desc
->addr
= mapping
- byte_offset
;
312 /* Make sure the address is written before we give it to HW */
314 desc
->ctl
= LTQ_DMA_OWN
| LTQ_DMA_SOP
| LTQ_DMA_EOP
|
315 LTQ_DMA_TX_OFFSET(byte_offset
) | (len
& LTQ_DMA_SIZE_MASK
);
317 ch
->dma
.desc
%= LTQ_DESC_NUM
;
318 if (ch
->dma
.desc
== ch
->tx_free
)
319 netif_stop_queue(net_dev
);
321 netdev_sent_queue(net_dev
, len
);
327 net_dev
->stats
.tx_dropped
++;
328 net_dev
->stats
.tx_errors
++;
332 static const struct net_device_ops xrx200_netdev_ops
= {
333 .ndo_open
= xrx200_open
,
334 .ndo_stop
= xrx200_close
,
335 .ndo_start_xmit
= xrx200_start_xmit
,
336 .ndo_set_mac_address
= eth_mac_addr
,
337 .ndo_validate_addr
= eth_validate_addr
,
340 static irqreturn_t
xrx200_dma_irq(int irq
, void *ptr
)
342 struct xrx200_chan
*ch
= ptr
;
344 ltq_dma_disable_irq(&ch
->dma
);
345 ltq_dma_ack_irq(&ch
->dma
);
347 napi_schedule(&ch
->napi
);
352 static int xrx200_dma_init(struct xrx200_priv
*priv
)
354 struct xrx200_chan
*ch_rx
= &priv
->chan_rx
;
355 struct xrx200_chan
*ch_tx
= &priv
->chan_tx
;
359 ltq_dma_init_port(DMA_PORT_ETOP
);
361 ch_rx
->dma
.nr
= XRX200_DMA_RX
;
362 ch_rx
->dma
.dev
= priv
->dev
;
365 ltq_dma_alloc_rx(&ch_rx
->dma
);
366 for (ch_rx
->dma
.desc
= 0; ch_rx
->dma
.desc
< LTQ_DESC_NUM
;
368 ret
= xrx200_alloc_skb(ch_rx
);
373 ret
= devm_request_irq(priv
->dev
, ch_rx
->dma
.irq
, xrx200_dma_irq
, 0,
374 "xrx200_net_rx", &priv
->chan_rx
);
376 dev_err(priv
->dev
, "failed to request RX irq %d\n",
381 ch_tx
->dma
.nr
= XRX200_DMA_TX
;
382 ch_tx
->dma
.dev
= priv
->dev
;
385 ltq_dma_alloc_tx(&ch_tx
->dma
);
386 ret
= devm_request_irq(priv
->dev
, ch_tx
->dma
.irq
, xrx200_dma_irq
, 0,
387 "xrx200_net_tx", &priv
->chan_tx
);
389 dev_err(priv
->dev
, "failed to request TX irq %d\n",
397 ltq_dma_free(&ch_tx
->dma
);
400 /* free the allocated RX ring */
401 for (i
= 0; i
< LTQ_DESC_NUM
; i
++) {
402 if (priv
->chan_rx
.skb
[i
])
403 dev_kfree_skb_any(priv
->chan_rx
.skb
[i
]);
407 ltq_dma_free(&ch_rx
->dma
);
411 static void xrx200_hw_cleanup(struct xrx200_priv
*priv
)
415 ltq_dma_free(&priv
->chan_tx
.dma
);
416 ltq_dma_free(&priv
->chan_rx
.dma
);
418 /* free the allocated RX ring */
419 for (i
= 0; i
< LTQ_DESC_NUM
; i
++)
420 dev_kfree_skb_any(priv
->chan_rx
.skb
[i
]);
423 static int xrx200_probe(struct platform_device
*pdev
)
425 struct device
*dev
= &pdev
->dev
;
426 struct device_node
*np
= dev
->of_node
;
427 struct resource
*res
;
428 struct xrx200_priv
*priv
;
429 struct net_device
*net_dev
;
433 /* alloc the network device */
434 net_dev
= devm_alloc_etherdev(dev
, sizeof(struct xrx200_priv
));
438 priv
= netdev_priv(net_dev
);
439 priv
->net_dev
= net_dev
;
442 net_dev
->netdev_ops
= &xrx200_netdev_ops
;
443 SET_NETDEV_DEV(net_dev
, dev
);
444 net_dev
->min_mtu
= ETH_ZLEN
;
445 net_dev
->max_mtu
= XRX200_DMA_DATA_LEN
;
447 /* load the memory ranges */
448 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
450 dev_err(dev
, "failed to get resources\n");
454 priv
->pmac_reg
= devm_ioremap_resource(dev
, res
);
455 if (IS_ERR(priv
->pmac_reg
)) {
456 dev_err(dev
, "failed to request and remap io ranges\n");
457 return PTR_ERR(priv
->pmac_reg
);
460 priv
->chan_rx
.dma
.irq
= platform_get_irq_byname(pdev
, "rx");
461 if (priv
->chan_rx
.dma
.irq
< 0)
463 priv
->chan_tx
.dma
.irq
= platform_get_irq_byname(pdev
, "tx");
464 if (priv
->chan_tx
.dma
.irq
< 0)
468 priv
->clk
= devm_clk_get(dev
, NULL
);
469 if (IS_ERR(priv
->clk
)) {
470 dev_err(dev
, "failed to get clock\n");
471 return PTR_ERR(priv
->clk
);
474 mac
= of_get_mac_address(np
);
476 ether_addr_copy(net_dev
->dev_addr
, mac
);
478 eth_hw_addr_random(net_dev
);
480 /* bring up the dma engine and IP core */
481 err
= xrx200_dma_init(priv
);
485 /* enable clock gate */
486 err
= clk_prepare_enable(priv
->clk
);
491 xrx200_pmac_mask(priv
, PMAC_RX_IPG_MASK
, 0xb, PMAC_RX_IPG
);
493 /* enable status header, enable CRC */
494 xrx200_pmac_mask(priv
, 0,
495 PMAC_HD_CTL_RST
| PMAC_HD_CTL_AST
| PMAC_HD_CTL_RXSH
|
496 PMAC_HD_CTL_AS
| PMAC_HD_CTL_AC
| PMAC_HD_CTL_RC
,
500 netif_napi_add(net_dev
, &priv
->chan_rx
.napi
, xrx200_poll_rx
, 32);
501 netif_napi_add(net_dev
, &priv
->chan_tx
.napi
, xrx200_tx_housekeeping
, 32);
503 platform_set_drvdata(pdev
, priv
);
505 err
= register_netdev(net_dev
);
507 goto err_unprepare_clk
;
512 clk_disable_unprepare(priv
->clk
);
515 xrx200_hw_cleanup(priv
);
520 static int xrx200_remove(struct platform_device
*pdev
)
522 struct xrx200_priv
*priv
= platform_get_drvdata(pdev
);
523 struct net_device
*net_dev
= priv
->net_dev
;
525 /* free stack related instances */
526 netif_stop_queue(net_dev
);
527 netif_napi_del(&priv
->chan_tx
.napi
);
528 netif_napi_del(&priv
->chan_rx
.napi
);
530 /* remove the actual device */
531 unregister_netdev(net_dev
);
533 /* release the clock */
534 clk_disable_unprepare(priv
->clk
);
536 /* shut down hardware */
537 xrx200_hw_cleanup(priv
);
542 static const struct of_device_id xrx200_match
[] = {
543 { .compatible
= "lantiq,xrx200-net" },
546 MODULE_DEVICE_TABLE(of
, xrx200_match
);
548 static struct platform_driver xrx200_driver
= {
549 .probe
= xrx200_probe
,
550 .remove
= xrx200_remove
,
552 .name
= "lantiq,xrx200-net",
553 .of_match_table
= xrx200_match
,
557 module_platform_driver(xrx200_driver
);
559 MODULE_AUTHOR("John Crispin <john@phrozen.org>");
560 MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
561 MODULE_LICENSE("GPL");