1 /* MOXA ART Ethernet (RTL8201CP) driver.
3 * Copyright (C) 2013 Jonas Jensen
5 * Jonas Jensen <jonas.jensen@gmail.com>
8 * Moxa Technology Co., Ltd. <www.moxa.com>
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/ethtool.h>
21 #include <linux/platform_device.h>
22 #include <linux/interrupt.h>
23 #include <linux/irq.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/crc32.h>
27 #include <linux/crc32c.h>
29 #include "moxart_ether.h"
31 static inline void moxart_emac_write(struct net_device
*ndev
,
32 unsigned int reg
, unsigned long value
)
34 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
36 writel(value
, priv
->base
+ reg
);
39 static void moxart_update_mac_address(struct net_device
*ndev
)
41 moxart_emac_write(ndev
, REG_MAC_MS_ADDRESS
,
42 ((ndev
->dev_addr
[0] << 8) | (ndev
->dev_addr
[1])));
43 moxart_emac_write(ndev
, REG_MAC_MS_ADDRESS
+ 4,
44 ((ndev
->dev_addr
[2] << 24) |
45 (ndev
->dev_addr
[3] << 16) |
46 (ndev
->dev_addr
[4] << 8) |
47 (ndev
->dev_addr
[5])));
50 static int moxart_set_mac_address(struct net_device
*ndev
, void *addr
)
52 struct sockaddr
*address
= addr
;
54 if (!is_valid_ether_addr(address
->sa_data
))
55 return -EADDRNOTAVAIL
;
57 memcpy(ndev
->dev_addr
, address
->sa_data
, ndev
->addr_len
);
58 moxart_update_mac_address(ndev
);
63 static void moxart_mac_free_memory(struct net_device
*ndev
)
65 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
68 for (i
= 0; i
< RX_DESC_NUM
; i
++)
69 dma_unmap_single(&ndev
->dev
, priv
->rx_mapping
[i
],
70 priv
->rx_buf_size
, DMA_FROM_DEVICE
);
72 if (priv
->tx_desc_base
)
73 dma_free_coherent(NULL
, TX_REG_DESC_SIZE
* TX_DESC_NUM
,
74 priv
->tx_desc_base
, priv
->tx_base
);
76 if (priv
->rx_desc_base
)
77 dma_free_coherent(NULL
, RX_REG_DESC_SIZE
* RX_DESC_NUM
,
78 priv
->rx_desc_base
, priv
->rx_base
);
80 kfree(priv
->tx_buf_base
);
81 kfree(priv
->rx_buf_base
);
84 static void moxart_mac_reset(struct net_device
*ndev
)
86 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
88 writel(SW_RST
, priv
->base
+ REG_MAC_CTRL
);
89 while (readl(priv
->base
+ REG_MAC_CTRL
) & SW_RST
)
92 writel(0, priv
->base
+ REG_INTERRUPT_MASK
);
94 priv
->reg_maccr
= RX_BROADPKT
| FULLDUP
| CRC_APD
| RX_FTL
;
97 static void moxart_mac_enable(struct net_device
*ndev
)
99 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
101 writel(0x00001010, priv
->base
+ REG_INT_TIMER_CTRL
);
102 writel(0x00000001, priv
->base
+ REG_APOLL_TIMER_CTRL
);
103 writel(0x00000390, priv
->base
+ REG_DMA_BLEN_CTRL
);
105 priv
->reg_imr
|= (RPKT_FINISH_M
| XPKT_FINISH_M
);
106 writel(priv
->reg_imr
, priv
->base
+ REG_INTERRUPT_MASK
);
108 priv
->reg_maccr
|= (RCV_EN
| XMT_EN
| RDMA_EN
| XDMA_EN
);
109 writel(priv
->reg_maccr
, priv
->base
+ REG_MAC_CTRL
);
112 static void moxart_mac_setup_desc_ring(struct net_device
*ndev
)
114 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
118 for (i
= 0; i
< TX_DESC_NUM
; i
++) {
119 desc
= priv
->tx_desc_base
+ i
* TX_REG_DESC_SIZE
;
120 memset(desc
, 0, TX_REG_DESC_SIZE
);
122 priv
->tx_buf
[i
] = priv
->tx_buf_base
+ priv
->tx_buf_size
* i
;
124 writel(TX_DESC1_END
, desc
+ TX_REG_OFFSET_DESC1
);
129 for (i
= 0; i
< RX_DESC_NUM
; i
++) {
130 desc
= priv
->rx_desc_base
+ i
* RX_REG_DESC_SIZE
;
131 memset(desc
, 0, RX_REG_DESC_SIZE
);
132 writel(RX_DESC0_DMA_OWN
, desc
+ RX_REG_OFFSET_DESC0
);
133 writel(RX_BUF_SIZE
& RX_DESC1_BUF_SIZE_MASK
,
134 desc
+ RX_REG_OFFSET_DESC1
);
136 priv
->rx_buf
[i
] = priv
->rx_buf_base
+ priv
->rx_buf_size
* i
;
137 priv
->rx_mapping
[i
] = dma_map_single(&ndev
->dev
,
141 if (dma_mapping_error(&ndev
->dev
, priv
->rx_mapping
[i
]))
142 netdev_err(ndev
, "DMA mapping error\n");
144 writel(priv
->rx_mapping
[i
],
145 desc
+ RX_REG_OFFSET_DESC2
+ RX_DESC2_ADDRESS_PHYS
);
146 writel(priv
->rx_buf
[i
],
147 desc
+ RX_REG_OFFSET_DESC2
+ RX_DESC2_ADDRESS_VIRT
);
149 writel(RX_DESC1_END
, desc
+ RX_REG_OFFSET_DESC1
);
153 /* reset the MAC controller TX/RX desciptor base address */
154 writel(priv
->tx_base
, priv
->base
+ REG_TXR_BASE_ADDRESS
);
155 writel(priv
->rx_base
, priv
->base
+ REG_RXR_BASE_ADDRESS
);
158 static int moxart_mac_open(struct net_device
*ndev
)
160 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
162 if (!is_valid_ether_addr(ndev
->dev_addr
))
163 return -EADDRNOTAVAIL
;
165 napi_enable(&priv
->napi
);
167 moxart_mac_reset(ndev
);
168 moxart_update_mac_address(ndev
);
169 moxart_mac_setup_desc_ring(ndev
);
170 moxart_mac_enable(ndev
);
171 netif_start_queue(ndev
);
173 netdev_dbg(ndev
, "%s: IMR=0x%x, MACCR=0x%x\n",
174 __func__
, readl(priv
->base
+ REG_INTERRUPT_MASK
),
175 readl(priv
->base
+ REG_MAC_CTRL
));
180 static int moxart_mac_stop(struct net_device
*ndev
)
182 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
184 napi_disable(&priv
->napi
);
186 netif_stop_queue(ndev
);
188 /* disable all interrupts */
189 writel(0, priv
->base
+ REG_INTERRUPT_MASK
);
191 /* disable all functions */
192 writel(0, priv
->base
+ REG_MAC_CTRL
);
197 static int moxart_rx_poll(struct napi_struct
*napi
, int budget
)
199 struct moxart_mac_priv_t
*priv
= container_of(napi
,
200 struct moxart_mac_priv_t
,
202 struct net_device
*ndev
= priv
->ndev
;
205 unsigned int desc0
, len
;
206 int rx_head
= priv
->rx_head
;
209 while (rx
< budget
) {
210 desc
= priv
->rx_desc_base
+ (RX_REG_DESC_SIZE
* rx_head
);
211 desc0
= readl(desc
+ RX_REG_OFFSET_DESC0
);
213 if (desc0
& RX_DESC0_DMA_OWN
)
216 if (desc0
& (RX_DESC0_ERR
| RX_DESC0_CRC_ERR
| RX_DESC0_FTL
|
217 RX_DESC0_RUNT
| RX_DESC0_ODD_NB
)) {
218 net_dbg_ratelimited("packet error\n");
219 priv
->stats
.rx_dropped
++;
220 priv
->stats
.rx_errors
++;
224 len
= desc0
& RX_DESC0_FRAME_LEN_MASK
;
226 if (len
> RX_BUF_SIZE
)
229 dma_sync_single_for_cpu(&ndev
->dev
,
230 priv
->rx_mapping
[rx_head
],
231 priv
->rx_buf_size
, DMA_FROM_DEVICE
);
232 skb
= netdev_alloc_skb_ip_align(ndev
, len
);
234 if (unlikely(!skb
)) {
235 net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
236 priv
->stats
.rx_dropped
++;
237 priv
->stats
.rx_errors
++;
241 memcpy(skb
->data
, priv
->rx_buf
[rx_head
], len
);
243 skb
->protocol
= eth_type_trans(skb
, ndev
);
244 napi_gro_receive(&priv
->napi
, skb
);
247 priv
->stats
.rx_packets
++;
248 priv
->stats
.rx_bytes
+= len
;
249 if (desc0
& RX_DESC0_MULTICAST
)
250 priv
->stats
.multicast
++;
253 writel(RX_DESC0_DMA_OWN
, desc
+ RX_REG_OFFSET_DESC0
);
255 rx_head
= RX_NEXT(rx_head
);
256 priv
->rx_head
= rx_head
;
263 priv
->reg_imr
|= RPKT_FINISH_M
;
264 writel(priv
->reg_imr
, priv
->base
+ REG_INTERRUPT_MASK
);
269 static void moxart_tx_finished(struct net_device
*ndev
)
271 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
272 unsigned tx_head
= priv
->tx_head
;
273 unsigned tx_tail
= priv
->tx_tail
;
275 while (tx_tail
!= tx_head
) {
276 dma_unmap_single(&ndev
->dev
, priv
->tx_mapping
[tx_tail
],
277 priv
->tx_len
[tx_tail
], DMA_TO_DEVICE
);
279 priv
->stats
.tx_packets
++;
280 priv
->stats
.tx_bytes
+= priv
->tx_skb
[tx_tail
]->len
;
282 dev_kfree_skb_irq(priv
->tx_skb
[tx_tail
]);
283 priv
->tx_skb
[tx_tail
] = NULL
;
285 tx_tail
= TX_NEXT(tx_tail
);
287 priv
->tx_tail
= tx_tail
;
290 static irqreturn_t
moxart_mac_interrupt(int irq
, void *dev_id
)
292 struct net_device
*ndev
= (struct net_device
*) dev_id
;
293 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
294 unsigned int ists
= readl(priv
->base
+ REG_INTERRUPT_STATUS
);
296 if (ists
& XPKT_OK_INT_STS
)
297 moxart_tx_finished(ndev
);
299 if (ists
& RPKT_FINISH
) {
300 if (napi_schedule_prep(&priv
->napi
)) {
301 priv
->reg_imr
&= ~RPKT_FINISH_M
;
302 writel(priv
->reg_imr
, priv
->base
+ REG_INTERRUPT_MASK
);
303 __napi_schedule(&priv
->napi
);
310 static int moxart_mac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
312 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
315 unsigned int tx_head
= priv
->tx_head
;
317 int ret
= NETDEV_TX_BUSY
;
319 desc
= priv
->tx_desc_base
+ (TX_REG_DESC_SIZE
* tx_head
);
321 spin_lock_irq(&priv
->txlock
);
322 if (readl(desc
+ TX_REG_OFFSET_DESC0
) & TX_DESC0_DMA_OWN
) {
323 net_dbg_ratelimited("no TX space for packet\n");
324 priv
->stats
.tx_dropped
++;
328 len
= skb
->len
> TX_BUF_SIZE
? TX_BUF_SIZE
: skb
->len
;
330 priv
->tx_mapping
[tx_head
] = dma_map_single(&ndev
->dev
, skb
->data
,
332 if (dma_mapping_error(&ndev
->dev
, priv
->tx_mapping
[tx_head
])) {
333 netdev_err(ndev
, "DMA mapping error\n");
337 priv
->tx_len
[tx_head
] = len
;
338 priv
->tx_skb
[tx_head
] = skb
;
340 writel(priv
->tx_mapping
[tx_head
],
341 desc
+ TX_REG_OFFSET_DESC2
+ TX_DESC2_ADDRESS_PHYS
);
343 desc
+ TX_REG_OFFSET_DESC2
+ TX_DESC2_ADDRESS_VIRT
);
345 if (skb
->len
< ETH_ZLEN
) {
346 memset(&skb
->data
[skb
->len
],
347 0, ETH_ZLEN
- skb
->len
);
351 dma_sync_single_for_device(&ndev
->dev
, priv
->tx_mapping
[tx_head
],
352 priv
->tx_buf_size
, DMA_TO_DEVICE
);
354 txdes1
= TX_DESC1_LTS
| TX_DESC1_FTS
| (len
& TX_DESC1_BUF_SIZE_MASK
);
355 if (tx_head
== TX_DESC_NUM_MASK
)
356 txdes1
|= TX_DESC1_END
;
357 writel(txdes1
, desc
+ TX_REG_OFFSET_DESC1
);
358 writel(TX_DESC0_DMA_OWN
, desc
+ TX_REG_OFFSET_DESC0
);
360 /* start to send packet */
361 writel(0xffffffff, priv
->base
+ REG_TX_POLL_DEMAND
);
363 priv
->tx_head
= TX_NEXT(tx_head
);
365 ndev
->trans_start
= jiffies
;
368 spin_unlock_irq(&priv
->txlock
);
373 static struct net_device_stats
*moxart_mac_get_stats(struct net_device
*ndev
)
375 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
380 static void moxart_mac_setmulticast(struct net_device
*ndev
)
382 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
383 struct netdev_hw_addr
*ha
;
386 netdev_for_each_mc_addr(ha
, ndev
) {
387 crc_val
= crc32_le(~0, ha
->addr
, ETH_ALEN
);
388 crc_val
= (crc_val
>> 26) & 0x3f;
390 writel(readl(priv
->base
+ REG_MCAST_HASH_TABLE1
) |
391 (1UL << (crc_val
- 32)),
392 priv
->base
+ REG_MCAST_HASH_TABLE1
);
394 writel(readl(priv
->base
+ REG_MCAST_HASH_TABLE0
) |
396 priv
->base
+ REG_MCAST_HASH_TABLE0
);
401 static void moxart_mac_set_rx_mode(struct net_device
*ndev
)
403 struct moxart_mac_priv_t
*priv
= netdev_priv(ndev
);
405 spin_lock_irq(&priv
->txlock
);
407 (ndev
->flags
& IFF_PROMISC
) ? (priv
->reg_maccr
|= RCV_ALL
) :
408 (priv
->reg_maccr
&= ~RCV_ALL
);
410 (ndev
->flags
& IFF_ALLMULTI
) ? (priv
->reg_maccr
|= RX_MULTIPKT
) :
411 (priv
->reg_maccr
&= ~RX_MULTIPKT
);
413 if ((ndev
->flags
& IFF_MULTICAST
) && netdev_mc_count(ndev
)) {
414 priv
->reg_maccr
|= HT_MULTI_EN
;
415 moxart_mac_setmulticast(ndev
);
417 priv
->reg_maccr
&= ~HT_MULTI_EN
;
420 writel(priv
->reg_maccr
, priv
->base
+ REG_MAC_CTRL
);
422 spin_unlock_irq(&priv
->txlock
);
425 static struct net_device_ops moxart_netdev_ops
= {
426 .ndo_open
= moxart_mac_open
,
427 .ndo_stop
= moxart_mac_stop
,
428 .ndo_start_xmit
= moxart_mac_start_xmit
,
429 .ndo_get_stats
= moxart_mac_get_stats
,
430 .ndo_set_rx_mode
= moxart_mac_set_rx_mode
,
431 .ndo_set_mac_address
= moxart_set_mac_address
,
432 .ndo_validate_addr
= eth_validate_addr
,
433 .ndo_change_mtu
= eth_change_mtu
,
436 static int moxart_mac_probe(struct platform_device
*pdev
)
438 struct device
*p_dev
= &pdev
->dev
;
439 struct device_node
*node
= p_dev
->of_node
;
440 struct net_device
*ndev
;
441 struct moxart_mac_priv_t
*priv
;
442 struct resource
*res
;
446 ndev
= alloc_etherdev(sizeof(struct moxart_mac_priv_t
));
450 irq
= irq_of_parse_and_map(node
, 0);
452 netdev_err(ndev
, "irq_of_parse_and_map failed\n");
457 priv
= netdev_priv(ndev
);
460 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
461 ndev
->base_addr
= res
->start
;
462 priv
->base
= devm_ioremap_resource(p_dev
, res
);
463 ret
= IS_ERR(priv
->base
);
465 dev_err(p_dev
, "devm_ioremap_resource failed\n");
469 spin_lock_init(&priv
->txlock
);
471 priv
->tx_buf_size
= TX_BUF_SIZE
;
472 priv
->rx_buf_size
= RX_BUF_SIZE
;
474 priv
->tx_desc_base
= dma_alloc_coherent(NULL
, TX_REG_DESC_SIZE
*
475 TX_DESC_NUM
, &priv
->tx_base
,
476 GFP_DMA
| GFP_KERNEL
);
477 if (priv
->tx_desc_base
== NULL
) {
482 priv
->rx_desc_base
= dma_alloc_coherent(NULL
, RX_REG_DESC_SIZE
*
483 RX_DESC_NUM
, &priv
->rx_base
,
484 GFP_DMA
| GFP_KERNEL
);
485 if (priv
->rx_desc_base
== NULL
) {
490 priv
->tx_buf_base
= kmalloc(priv
->tx_buf_size
* TX_DESC_NUM
,
492 if (!priv
->tx_buf_base
) {
497 priv
->rx_buf_base
= kmalloc(priv
->rx_buf_size
* RX_DESC_NUM
,
499 if (!priv
->rx_buf_base
) {
504 platform_set_drvdata(pdev
, ndev
);
506 ret
= devm_request_irq(p_dev
, irq
, moxart_mac_interrupt
, 0,
509 netdev_err(ndev
, "devm_request_irq failed\n");
513 ndev
->netdev_ops
= &moxart_netdev_ops
;
514 netif_napi_add(ndev
, &priv
->napi
, moxart_rx_poll
, RX_DESC_NUM
);
515 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
518 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
520 ret
= register_netdev(ndev
);
526 netdev_dbg(ndev
, "%s: IRQ=%d address=%pM\n",
527 __func__
, ndev
->irq
, ndev
->dev_addr
);
532 netdev_err(ndev
, "init failed\n");
533 moxart_mac_free_memory(ndev
);
539 static int moxart_remove(struct platform_device
*pdev
)
541 struct net_device
*ndev
= platform_get_drvdata(pdev
);
543 unregister_netdev(ndev
);
544 free_irq(ndev
->irq
, ndev
);
545 moxart_mac_free_memory(ndev
);
551 static const struct of_device_id moxart_mac_match
[] = {
552 { .compatible
= "moxa,moxart-mac" },
555 MODULE_DEVICE_TABLE(of
, moxart_mac_match
);
557 static struct platform_driver moxart_mac_driver
= {
558 .probe
= moxart_mac_probe
,
559 .remove
= moxart_remove
,
561 .name
= "moxart-ethernet",
562 .of_match_table
= moxart_mac_match
,
565 module_platform_driver(moxart_mac_driver
);
567 MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver");
568 MODULE_LICENSE("GPL v2");
569 MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");