gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / net / ethernet / wiznet / w5100.c
blobc0d181a7f83ae4a130739fcf47429f3b5dc30ccd
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Ethernet driver for the WIZnet W5100 chip.
5 * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
6 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
7 */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/platform_data/wiznet.h>
15 #include <linux/ethtool.h>
16 #include <linux/skbuff.h>
17 #include <linux/types.h>
18 #include <linux/errno.h>
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/io.h>
23 #include <linux/ioport.h>
24 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/gpio.h>
28 #include "w5100.h"
30 #define DRV_NAME "w5100"
31 #define DRV_VERSION "2012-04-04"
33 MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
34 MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
35 MODULE_ALIAS("platform:"DRV_NAME);
36 MODULE_LICENSE("GPL");
39 * W5100/W5200/W5500 common registers
41 #define W5100_COMMON_REGS 0x0000
42 #define W5100_MR 0x0000 /* Mode Register */
43 #define MR_RST 0x80 /* S/W reset */
44 #define MR_PB 0x10 /* Ping block */
45 #define MR_AI 0x02 /* Address Auto-Increment */
46 #define MR_IND 0x01 /* Indirect mode */
47 #define W5100_SHAR 0x0009 /* Source MAC address */
48 #define W5100_IR 0x0015 /* Interrupt Register */
49 #define W5100_COMMON_REGS_LEN 0x0040
51 #define W5100_Sn_MR 0x0000 /* Sn Mode Register */
52 #define W5100_Sn_CR 0x0001 /* Sn Command Register */
53 #define W5100_Sn_IR 0x0002 /* Sn Interrupt Register */
54 #define W5100_Sn_SR 0x0003 /* Sn Status Register */
55 #define W5100_Sn_TX_FSR 0x0020 /* Sn Transmit free memory size */
56 #define W5100_Sn_TX_RD 0x0022 /* Sn Transmit memory read pointer */
57 #define W5100_Sn_TX_WR 0x0024 /* Sn Transmit memory write pointer */
58 #define W5100_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */
59 #define W5100_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */
61 #define S0_REGS(priv) ((priv)->s0_regs)
63 #define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR)
64 #define S0_MR_MACRAW 0x04 /* MAC RAW mode */
65 #define S0_MR_MF 0x40 /* MAC Filter for W5100 and W5200 */
66 #define W5500_S0_MR_MF 0x80 /* MAC Filter for W5500 */
67 #define W5100_S0_CR(priv) (S0_REGS(priv) + W5100_Sn_CR)
68 #define S0_CR_OPEN 0x01 /* OPEN command */
69 #define S0_CR_CLOSE 0x10 /* CLOSE command */
70 #define S0_CR_SEND 0x20 /* SEND command */
71 #define S0_CR_RECV 0x40 /* RECV command */
72 #define W5100_S0_IR(priv) (S0_REGS(priv) + W5100_Sn_IR)
73 #define S0_IR_SENDOK 0x10 /* complete sending */
74 #define S0_IR_RECV 0x04 /* receiving data */
75 #define W5100_S0_SR(priv) (S0_REGS(priv) + W5100_Sn_SR)
76 #define S0_SR_MACRAW 0x42 /* mac raw mode */
77 #define W5100_S0_TX_FSR(priv) (S0_REGS(priv) + W5100_Sn_TX_FSR)
78 #define W5100_S0_TX_RD(priv) (S0_REGS(priv) + W5100_Sn_TX_RD)
79 #define W5100_S0_TX_WR(priv) (S0_REGS(priv) + W5100_Sn_TX_WR)
80 #define W5100_S0_RX_RSR(priv) (S0_REGS(priv) + W5100_Sn_RX_RSR)
81 #define W5100_S0_RX_RD(priv) (S0_REGS(priv) + W5100_Sn_RX_RD)
83 #define W5100_S0_REGS_LEN 0x0040
86 * W5100 and W5200 common registers
88 #define W5100_IMR 0x0016 /* Interrupt Mask Register */
89 #define IR_S0 0x01 /* S0 interrupt */
90 #define W5100_RTR 0x0017 /* Retry Time-value Register */
91 #define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
94 * W5100 specific register and memory
96 #define W5100_RMSR 0x001a /* Receive Memory Size */
97 #define W5100_TMSR 0x001b /* Transmit Memory Size */
99 #define W5100_S0_REGS 0x0400
101 #define W5100_TX_MEM_START 0x4000
102 #define W5100_TX_MEM_SIZE 0x2000
103 #define W5100_RX_MEM_START 0x6000
104 #define W5100_RX_MEM_SIZE 0x2000
107 * W5200 specific register and memory
109 #define W5200_S0_REGS 0x4000
111 #define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */
112 #define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */
114 #define W5200_TX_MEM_START 0x8000
115 #define W5200_TX_MEM_SIZE 0x4000
116 #define W5200_RX_MEM_START 0xc000
117 #define W5200_RX_MEM_SIZE 0x4000
120 * W5500 specific register and memory
122 * W5500 register and memory are organized by multiple blocks. Each one is
123 * selected by 16bits offset address and 5bits block select bits. So we
124 * encode it into 32bits address. (lower 16bits is offset address and
125 * upper 16bits is block select bits)
127 #define W5500_SIMR 0x0018 /* Socket Interrupt Mask Register */
128 #define W5500_RTR 0x0019 /* Retry Time-value Register */
130 #define W5500_S0_REGS 0x10000
132 #define W5500_Sn_RXMEM_SIZE(n) \
133 (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */
134 #define W5500_Sn_TXMEM_SIZE(n) \
135 (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */
137 #define W5500_TX_MEM_START 0x20000
138 #define W5500_TX_MEM_SIZE 0x04000
139 #define W5500_RX_MEM_START 0x30000
140 #define W5500_RX_MEM_SIZE 0x04000
143 * Device driver private data structure
146 struct w5100_priv {
147 const struct w5100_ops *ops;
149 /* Socket 0 register offset address */
150 u32 s0_regs;
151 /* Socket 0 TX buffer offset address and size */
152 u32 s0_tx_buf;
153 u16 s0_tx_buf_size;
154 /* Socket 0 RX buffer offset address and size */
155 u32 s0_rx_buf;
156 u16 s0_rx_buf_size;
158 int irq;
159 int link_irq;
160 int link_gpio;
162 struct napi_struct napi;
163 struct net_device *ndev;
164 bool promisc;
165 u32 msg_enable;
167 struct workqueue_struct *xfer_wq;
168 struct work_struct rx_work;
169 struct sk_buff *tx_skb;
170 struct work_struct tx_work;
171 struct work_struct setrx_work;
172 struct work_struct restart_work;
175 /************************************************************************
177 * Lowlevel I/O functions
179 ***********************************************************************/
181 struct w5100_mmio_priv {
182 void __iomem *base;
183 /* Serialize access in indirect address mode */
184 spinlock_t reg_lock;
187 static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev)
189 return w5100_ops_priv(dev);
192 static inline void __iomem *w5100_mmio(struct net_device *ndev)
194 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
196 return mmio_priv->base;
200 * In direct address mode host system can directly access W5100 registers
201 * after mapping to Memory-Mapped I/O space.
203 * 0x8000 bytes are required for memory space.
205 static inline int w5100_read_direct(struct net_device *ndev, u32 addr)
207 return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
210 static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
211 u8 data)
213 iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
215 return 0;
218 static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
220 __w5100_write_direct(ndev, addr, data);
222 return 0;
225 static int w5100_read16_direct(struct net_device *ndev, u32 addr)
227 u16 data;
228 data = w5100_read_direct(ndev, addr) << 8;
229 data |= w5100_read_direct(ndev, addr + 1);
230 return data;
233 static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
235 __w5100_write_direct(ndev, addr, data >> 8);
236 __w5100_write_direct(ndev, addr + 1, data);
238 return 0;
241 static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf,
242 int len)
244 int i;
246 for (i = 0; i < len; i++, addr++)
247 *buf++ = w5100_read_direct(ndev, addr);
249 return 0;
252 static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
253 const u8 *buf, int len)
255 int i;
257 for (i = 0; i < len; i++, addr++)
258 __w5100_write_direct(ndev, addr, *buf++);
260 return 0;
263 static int w5100_mmio_init(struct net_device *ndev)
265 struct platform_device *pdev = to_platform_device(ndev->dev.parent);
266 struct w5100_priv *priv = netdev_priv(ndev);
267 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
268 struct resource *mem;
270 spin_lock_init(&mmio_priv->reg_lock);
272 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
273 mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
274 if (IS_ERR(mmio_priv->base))
275 return PTR_ERR(mmio_priv->base);
277 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
279 return 0;
282 static const struct w5100_ops w5100_mmio_direct_ops = {
283 .chip_id = W5100,
284 .read = w5100_read_direct,
285 .write = w5100_write_direct,
286 .read16 = w5100_read16_direct,
287 .write16 = w5100_write16_direct,
288 .readbulk = w5100_readbulk_direct,
289 .writebulk = w5100_writebulk_direct,
290 .init = w5100_mmio_init,
294 * In indirect address mode host system indirectly accesses registers by
295 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
296 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
297 * Mode Register (MR) is directly accessible.
299 * Only 0x04 bytes are required for memory space.
301 #define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
302 #define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
304 static int w5100_read_indirect(struct net_device *ndev, u32 addr)
306 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
307 unsigned long flags;
308 u8 data;
310 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
311 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
312 data = w5100_read_direct(ndev, W5100_IDM_DR);
313 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
315 return data;
318 static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data)
320 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
321 unsigned long flags;
323 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
324 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
325 w5100_write_direct(ndev, W5100_IDM_DR, data);
326 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
328 return 0;
331 static int w5100_read16_indirect(struct net_device *ndev, u32 addr)
333 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
334 unsigned long flags;
335 u16 data;
337 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
338 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
339 data = w5100_read_direct(ndev, W5100_IDM_DR) << 8;
340 data |= w5100_read_direct(ndev, W5100_IDM_DR);
341 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
343 return data;
346 static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data)
348 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
349 unsigned long flags;
351 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
352 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
353 __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8);
354 w5100_write_direct(ndev, W5100_IDM_DR, data);
355 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
357 return 0;
360 static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
361 int len)
363 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
364 unsigned long flags;
365 int i;
367 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
368 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
370 for (i = 0; i < len; i++)
371 *buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
373 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
375 return 0;
378 static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
379 const u8 *buf, int len)
381 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
382 unsigned long flags;
383 int i;
385 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
386 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
388 for (i = 0; i < len; i++)
389 __w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
391 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
393 return 0;
396 static int w5100_reset_indirect(struct net_device *ndev)
398 w5100_write_direct(ndev, W5100_MR, MR_RST);
399 mdelay(5);
400 w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND);
402 return 0;
405 static const struct w5100_ops w5100_mmio_indirect_ops = {
406 .chip_id = W5100,
407 .read = w5100_read_indirect,
408 .write = w5100_write_indirect,
409 .read16 = w5100_read16_indirect,
410 .write16 = w5100_write16_indirect,
411 .readbulk = w5100_readbulk_indirect,
412 .writebulk = w5100_writebulk_indirect,
413 .init = w5100_mmio_init,
414 .reset = w5100_reset_indirect,
417 #if defined(CONFIG_WIZNET_BUS_DIRECT)
419 static int w5100_read(struct w5100_priv *priv, u32 addr)
421 return w5100_read_direct(priv->ndev, addr);
424 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
426 return w5100_write_direct(priv->ndev, addr, data);
429 static int w5100_read16(struct w5100_priv *priv, u32 addr)
431 return w5100_read16_direct(priv->ndev, addr);
434 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
436 return w5100_write16_direct(priv->ndev, addr, data);
439 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
441 return w5100_readbulk_direct(priv->ndev, addr, buf, len);
444 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
445 int len)
447 return w5100_writebulk_direct(priv->ndev, addr, buf, len);
450 #elif defined(CONFIG_WIZNET_BUS_INDIRECT)
452 static int w5100_read(struct w5100_priv *priv, u32 addr)
454 return w5100_read_indirect(priv->ndev, addr);
457 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
459 return w5100_write_indirect(priv->ndev, addr, data);
462 static int w5100_read16(struct w5100_priv *priv, u32 addr)
464 return w5100_read16_indirect(priv->ndev, addr);
467 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
469 return w5100_write16_indirect(priv->ndev, addr, data);
472 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
474 return w5100_readbulk_indirect(priv->ndev, addr, buf, len);
477 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
478 int len)
480 return w5100_writebulk_indirect(priv->ndev, addr, buf, len);
483 #else /* CONFIG_WIZNET_BUS_ANY */
485 static int w5100_read(struct w5100_priv *priv, u32 addr)
487 return priv->ops->read(priv->ndev, addr);
490 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
492 return priv->ops->write(priv->ndev, addr, data);
495 static int w5100_read16(struct w5100_priv *priv, u32 addr)
497 return priv->ops->read16(priv->ndev, addr);
500 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
502 return priv->ops->write16(priv->ndev, addr, data);
505 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
507 return priv->ops->readbulk(priv->ndev, addr, buf, len);
510 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
511 int len)
513 return priv->ops->writebulk(priv->ndev, addr, buf, len);
516 #endif
518 static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len)
520 u32 addr;
521 int remain = 0;
522 int ret;
523 const u32 mem_start = priv->s0_rx_buf;
524 const u16 mem_size = priv->s0_rx_buf_size;
526 offset %= mem_size;
527 addr = mem_start + offset;
529 if (offset + len > mem_size) {
530 remain = (offset + len) % mem_size;
531 len = mem_size - offset;
534 ret = w5100_readbulk(priv, addr, buf, len);
535 if (ret || !remain)
536 return ret;
538 return w5100_readbulk(priv, mem_start, buf + len, remain);
541 static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf,
542 int len)
544 u32 addr;
545 int ret;
546 int remain = 0;
547 const u32 mem_start = priv->s0_tx_buf;
548 const u16 mem_size = priv->s0_tx_buf_size;
550 offset %= mem_size;
551 addr = mem_start + offset;
553 if (offset + len > mem_size) {
554 remain = (offset + len) % mem_size;
555 len = mem_size - offset;
558 ret = w5100_writebulk(priv, addr, buf, len);
559 if (ret || !remain)
560 return ret;
562 return w5100_writebulk(priv, mem_start, buf + len, remain);
565 static int w5100_reset(struct w5100_priv *priv)
567 if (priv->ops->reset)
568 return priv->ops->reset(priv->ndev);
570 w5100_write(priv, W5100_MR, MR_RST);
571 mdelay(5);
572 w5100_write(priv, W5100_MR, MR_PB);
574 return 0;
577 static int w5100_command(struct w5100_priv *priv, u16 cmd)
579 unsigned long timeout;
581 w5100_write(priv, W5100_S0_CR(priv), cmd);
583 timeout = jiffies + msecs_to_jiffies(100);
585 while (w5100_read(priv, W5100_S0_CR(priv)) != 0) {
586 if (time_after(jiffies, timeout))
587 return -EIO;
588 cpu_relax();
591 return 0;
594 static void w5100_write_macaddr(struct w5100_priv *priv)
596 struct net_device *ndev = priv->ndev;
598 w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN);
601 static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask)
603 u32 imr;
605 if (priv->ops->chip_id == W5500)
606 imr = W5500_SIMR;
607 else
608 imr = W5100_IMR;
610 w5100_write(priv, imr, mask);
613 static void w5100_enable_intr(struct w5100_priv *priv)
615 w5100_socket_intr_mask(priv, IR_S0);
618 static void w5100_disable_intr(struct w5100_priv *priv)
620 w5100_socket_intr_mask(priv, 0);
623 static void w5100_memory_configure(struct w5100_priv *priv)
625 /* Configure 16K of internal memory
626 * as 8K RX buffer and 8K TX buffer
628 w5100_write(priv, W5100_RMSR, 0x03);
629 w5100_write(priv, W5100_TMSR, 0x03);
632 static void w5200_memory_configure(struct w5100_priv *priv)
634 int i;
636 /* Configure internal RX memory as 16K RX buffer and
637 * internal TX memory as 16K TX buffer
639 w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10);
640 w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10);
642 for (i = 1; i < 8; i++) {
643 w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0);
644 w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0);
648 static void w5500_memory_configure(struct w5100_priv *priv)
650 int i;
652 /* Configure internal RX memory as 16K RX buffer and
653 * internal TX memory as 16K TX buffer
655 w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10);
656 w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10);
658 for (i = 1; i < 8; i++) {
659 w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0);
660 w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0);
664 static int w5100_hw_reset(struct w5100_priv *priv)
666 u32 rtr;
668 w5100_reset(priv);
670 w5100_disable_intr(priv);
671 w5100_write_macaddr(priv);
673 switch (priv->ops->chip_id) {
674 case W5100:
675 w5100_memory_configure(priv);
676 rtr = W5100_RTR;
677 break;
678 case W5200:
679 w5200_memory_configure(priv);
680 rtr = W5100_RTR;
681 break;
682 case W5500:
683 w5500_memory_configure(priv);
684 rtr = W5500_RTR;
685 break;
686 default:
687 return -EINVAL;
690 if (w5100_read16(priv, rtr) != RTR_DEFAULT)
691 return -ENODEV;
693 return 0;
696 static void w5100_hw_start(struct w5100_priv *priv)
698 u8 mode = S0_MR_MACRAW;
700 if (!priv->promisc) {
701 if (priv->ops->chip_id == W5500)
702 mode |= W5500_S0_MR_MF;
703 else
704 mode |= S0_MR_MF;
707 w5100_write(priv, W5100_S0_MR(priv), mode);
708 w5100_command(priv, S0_CR_OPEN);
709 w5100_enable_intr(priv);
712 static void w5100_hw_close(struct w5100_priv *priv)
714 w5100_disable_intr(priv);
715 w5100_command(priv, S0_CR_CLOSE);
718 /***********************************************************************
720 * Device driver functions / callbacks
722 ***********************************************************************/
724 static void w5100_get_drvinfo(struct net_device *ndev,
725 struct ethtool_drvinfo *info)
727 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
728 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
729 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
730 sizeof(info->bus_info));
733 static u32 w5100_get_link(struct net_device *ndev)
735 struct w5100_priv *priv = netdev_priv(ndev);
737 if (gpio_is_valid(priv->link_gpio))
738 return !!gpio_get_value(priv->link_gpio);
740 return 1;
743 static u32 w5100_get_msglevel(struct net_device *ndev)
745 struct w5100_priv *priv = netdev_priv(ndev);
747 return priv->msg_enable;
750 static void w5100_set_msglevel(struct net_device *ndev, u32 value)
752 struct w5100_priv *priv = netdev_priv(ndev);
754 priv->msg_enable = value;
757 static int w5100_get_regs_len(struct net_device *ndev)
759 return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
762 static void w5100_get_regs(struct net_device *ndev,
763 struct ethtool_regs *regs, void *buf)
765 struct w5100_priv *priv = netdev_priv(ndev);
767 regs->version = 1;
768 w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN);
769 buf += W5100_COMMON_REGS_LEN;
770 w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN);
773 static void w5100_restart(struct net_device *ndev)
775 struct w5100_priv *priv = netdev_priv(ndev);
777 netif_stop_queue(ndev);
778 w5100_hw_reset(priv);
779 w5100_hw_start(priv);
780 ndev->stats.tx_errors++;
781 netif_trans_update(ndev);
782 netif_wake_queue(ndev);
785 static void w5100_restart_work(struct work_struct *work)
787 struct w5100_priv *priv = container_of(work, struct w5100_priv,
788 restart_work);
790 w5100_restart(priv->ndev);
793 static void w5100_tx_timeout(struct net_device *ndev, unsigned int txqueue)
795 struct w5100_priv *priv = netdev_priv(ndev);
797 if (priv->ops->may_sleep)
798 schedule_work(&priv->restart_work);
799 else
800 w5100_restart(ndev);
803 static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
805 struct w5100_priv *priv = netdev_priv(ndev);
806 u16 offset;
808 offset = w5100_read16(priv, W5100_S0_TX_WR(priv));
809 w5100_writebuf(priv, offset, skb->data, skb->len);
810 w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len);
811 ndev->stats.tx_bytes += skb->len;
812 ndev->stats.tx_packets++;
813 dev_kfree_skb(skb);
815 w5100_command(priv, S0_CR_SEND);
818 static void w5100_tx_work(struct work_struct *work)
820 struct w5100_priv *priv = container_of(work, struct w5100_priv,
821 tx_work);
822 struct sk_buff *skb = priv->tx_skb;
824 priv->tx_skb = NULL;
826 if (WARN_ON(!skb))
827 return;
828 w5100_tx_skb(priv->ndev, skb);
831 static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
833 struct w5100_priv *priv = netdev_priv(ndev);
835 netif_stop_queue(ndev);
837 if (priv->ops->may_sleep) {
838 WARN_ON(priv->tx_skb);
839 priv->tx_skb = skb;
840 queue_work(priv->xfer_wq, &priv->tx_work);
841 } else {
842 w5100_tx_skb(ndev, skb);
845 return NETDEV_TX_OK;
848 static struct sk_buff *w5100_rx_skb(struct net_device *ndev)
850 struct w5100_priv *priv = netdev_priv(ndev);
851 struct sk_buff *skb;
852 u16 rx_len;
853 u16 offset;
854 u8 header[2];
855 u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv));
857 if (rx_buf_len == 0)
858 return NULL;
860 offset = w5100_read16(priv, W5100_S0_RX_RD(priv));
861 w5100_readbuf(priv, offset, header, 2);
862 rx_len = get_unaligned_be16(header) - 2;
864 skb = netdev_alloc_skb_ip_align(ndev, rx_len);
865 if (unlikely(!skb)) {
866 w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len);
867 w5100_command(priv, S0_CR_RECV);
868 ndev->stats.rx_dropped++;
869 return NULL;
872 skb_put(skb, rx_len);
873 w5100_readbuf(priv, offset + 2, skb->data, rx_len);
874 w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len);
875 w5100_command(priv, S0_CR_RECV);
876 skb->protocol = eth_type_trans(skb, ndev);
878 ndev->stats.rx_packets++;
879 ndev->stats.rx_bytes += rx_len;
881 return skb;
884 static void w5100_rx_work(struct work_struct *work)
886 struct w5100_priv *priv = container_of(work, struct w5100_priv,
887 rx_work);
888 struct sk_buff *skb;
890 while ((skb = w5100_rx_skb(priv->ndev)))
891 netif_rx_ni(skb);
893 w5100_enable_intr(priv);
896 static int w5100_napi_poll(struct napi_struct *napi, int budget)
898 struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
899 int rx_count;
901 for (rx_count = 0; rx_count < budget; rx_count++) {
902 struct sk_buff *skb = w5100_rx_skb(priv->ndev);
904 if (skb)
905 netif_receive_skb(skb);
906 else
907 break;
910 if (rx_count < budget) {
911 napi_complete_done(napi, rx_count);
912 w5100_enable_intr(priv);
915 return rx_count;
918 static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
920 struct net_device *ndev = ndev_instance;
921 struct w5100_priv *priv = netdev_priv(ndev);
923 int ir = w5100_read(priv, W5100_S0_IR(priv));
924 if (!ir)
925 return IRQ_NONE;
926 w5100_write(priv, W5100_S0_IR(priv), ir);
928 if (ir & S0_IR_SENDOK) {
929 netif_dbg(priv, tx_done, ndev, "tx done\n");
930 netif_wake_queue(ndev);
933 if (ir & S0_IR_RECV) {
934 w5100_disable_intr(priv);
936 if (priv->ops->may_sleep)
937 queue_work(priv->xfer_wq, &priv->rx_work);
938 else if (napi_schedule_prep(&priv->napi))
939 __napi_schedule(&priv->napi);
942 return IRQ_HANDLED;
945 static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
947 struct net_device *ndev = ndev_instance;
948 struct w5100_priv *priv = netdev_priv(ndev);
950 if (netif_running(ndev)) {
951 if (gpio_get_value(priv->link_gpio) != 0) {
952 netif_info(priv, link, ndev, "link is up\n");
953 netif_carrier_on(ndev);
954 } else {
955 netif_info(priv, link, ndev, "link is down\n");
956 netif_carrier_off(ndev);
960 return IRQ_HANDLED;
963 static void w5100_setrx_work(struct work_struct *work)
965 struct w5100_priv *priv = container_of(work, struct w5100_priv,
966 setrx_work);
968 w5100_hw_start(priv);
971 static void w5100_set_rx_mode(struct net_device *ndev)
973 struct w5100_priv *priv = netdev_priv(ndev);
974 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
976 if (priv->promisc != set_promisc) {
977 priv->promisc = set_promisc;
979 if (priv->ops->may_sleep)
980 schedule_work(&priv->setrx_work);
981 else
982 w5100_hw_start(priv);
986 static int w5100_set_macaddr(struct net_device *ndev, void *addr)
988 struct w5100_priv *priv = netdev_priv(ndev);
989 struct sockaddr *sock_addr = addr;
991 if (!is_valid_ether_addr(sock_addr->sa_data))
992 return -EADDRNOTAVAIL;
993 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
994 w5100_write_macaddr(priv);
995 return 0;
998 static int w5100_open(struct net_device *ndev)
1000 struct w5100_priv *priv = netdev_priv(ndev);
1002 netif_info(priv, ifup, ndev, "enabling\n");
1003 w5100_hw_start(priv);
1004 napi_enable(&priv->napi);
1005 netif_start_queue(ndev);
1006 if (!gpio_is_valid(priv->link_gpio) ||
1007 gpio_get_value(priv->link_gpio) != 0)
1008 netif_carrier_on(ndev);
1009 return 0;
1012 static int w5100_stop(struct net_device *ndev)
1014 struct w5100_priv *priv = netdev_priv(ndev);
1016 netif_info(priv, ifdown, ndev, "shutting down\n");
1017 w5100_hw_close(priv);
1018 netif_carrier_off(ndev);
1019 netif_stop_queue(ndev);
1020 napi_disable(&priv->napi);
1021 return 0;
1024 static const struct ethtool_ops w5100_ethtool_ops = {
1025 .get_drvinfo = w5100_get_drvinfo,
1026 .get_msglevel = w5100_get_msglevel,
1027 .set_msglevel = w5100_set_msglevel,
1028 .get_link = w5100_get_link,
1029 .get_regs_len = w5100_get_regs_len,
1030 .get_regs = w5100_get_regs,
1033 static const struct net_device_ops w5100_netdev_ops = {
1034 .ndo_open = w5100_open,
1035 .ndo_stop = w5100_stop,
1036 .ndo_start_xmit = w5100_start_tx,
1037 .ndo_tx_timeout = w5100_tx_timeout,
1038 .ndo_set_rx_mode = w5100_set_rx_mode,
1039 .ndo_set_mac_address = w5100_set_macaddr,
1040 .ndo_validate_addr = eth_validate_addr,
1043 static int w5100_mmio_probe(struct platform_device *pdev)
1045 struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
1046 const void *mac_addr = NULL;
1047 struct resource *mem;
1048 const struct w5100_ops *ops;
1049 int irq;
1051 if (data && is_valid_ether_addr(data->mac_addr))
1052 mac_addr = data->mac_addr;
1054 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1055 if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
1056 ops = &w5100_mmio_indirect_ops;
1057 else
1058 ops = &w5100_mmio_direct_ops;
1060 irq = platform_get_irq(pdev, 0);
1061 if (irq < 0)
1062 return irq;
1064 return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv),
1065 mac_addr, irq, data ? data->link_gpio : -EINVAL);
1068 static int w5100_mmio_remove(struct platform_device *pdev)
1070 return w5100_remove(&pdev->dev);
1073 void *w5100_ops_priv(const struct net_device *ndev)
1075 return netdev_priv(ndev) +
1076 ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN);
1078 EXPORT_SYMBOL_GPL(w5100_ops_priv);
1080 int w5100_probe(struct device *dev, const struct w5100_ops *ops,
1081 int sizeof_ops_priv, const void *mac_addr, int irq,
1082 int link_gpio)
1084 struct w5100_priv *priv;
1085 struct net_device *ndev;
1086 int err;
1087 size_t alloc_size;
1089 alloc_size = sizeof(*priv);
1090 if (sizeof_ops_priv) {
1091 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
1092 alloc_size += sizeof_ops_priv;
1094 alloc_size += NETDEV_ALIGN - 1;
1096 ndev = alloc_etherdev(alloc_size);
1097 if (!ndev)
1098 return -ENOMEM;
1099 SET_NETDEV_DEV(ndev, dev);
1100 dev_set_drvdata(dev, ndev);
1101 priv = netdev_priv(ndev);
1103 switch (ops->chip_id) {
1104 case W5100:
1105 priv->s0_regs = W5100_S0_REGS;
1106 priv->s0_tx_buf = W5100_TX_MEM_START;
1107 priv->s0_tx_buf_size = W5100_TX_MEM_SIZE;
1108 priv->s0_rx_buf = W5100_RX_MEM_START;
1109 priv->s0_rx_buf_size = W5100_RX_MEM_SIZE;
1110 break;
1111 case W5200:
1112 priv->s0_regs = W5200_S0_REGS;
1113 priv->s0_tx_buf = W5200_TX_MEM_START;
1114 priv->s0_tx_buf_size = W5200_TX_MEM_SIZE;
1115 priv->s0_rx_buf = W5200_RX_MEM_START;
1116 priv->s0_rx_buf_size = W5200_RX_MEM_SIZE;
1117 break;
1118 case W5500:
1119 priv->s0_regs = W5500_S0_REGS;
1120 priv->s0_tx_buf = W5500_TX_MEM_START;
1121 priv->s0_tx_buf_size = W5500_TX_MEM_SIZE;
1122 priv->s0_rx_buf = W5500_RX_MEM_START;
1123 priv->s0_rx_buf_size = W5500_RX_MEM_SIZE;
1124 break;
1125 default:
1126 err = -EINVAL;
1127 goto err_register;
1130 priv->ndev = ndev;
1131 priv->ops = ops;
1132 priv->irq = irq;
1133 priv->link_gpio = link_gpio;
1135 ndev->netdev_ops = &w5100_netdev_ops;
1136 ndev->ethtool_ops = &w5100_ethtool_ops;
1137 netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
1139 /* This chip doesn't support VLAN packets with normal MTU,
1140 * so disable VLAN for this device.
1142 ndev->features |= NETIF_F_VLAN_CHALLENGED;
1144 err = register_netdev(ndev);
1145 if (err < 0)
1146 goto err_register;
1148 priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
1149 netdev_name(ndev));
1150 if (!priv->xfer_wq) {
1151 err = -ENOMEM;
1152 goto err_wq;
1155 INIT_WORK(&priv->rx_work, w5100_rx_work);
1156 INIT_WORK(&priv->tx_work, w5100_tx_work);
1157 INIT_WORK(&priv->setrx_work, w5100_setrx_work);
1158 INIT_WORK(&priv->restart_work, w5100_restart_work);
1160 if (!IS_ERR_OR_NULL(mac_addr))
1161 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
1162 else
1163 eth_hw_addr_random(ndev);
1165 if (priv->ops->init) {
1166 err = priv->ops->init(priv->ndev);
1167 if (err)
1168 goto err_hw;
1171 err = w5100_hw_reset(priv);
1172 if (err)
1173 goto err_hw;
1175 if (ops->may_sleep) {
1176 err = request_threaded_irq(priv->irq, NULL, w5100_interrupt,
1177 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1178 netdev_name(ndev), ndev);
1179 } else {
1180 err = request_irq(priv->irq, w5100_interrupt,
1181 IRQF_TRIGGER_LOW, netdev_name(ndev), ndev);
1183 if (err)
1184 goto err_hw;
1186 if (gpio_is_valid(priv->link_gpio)) {
1187 char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL);
1189 if (!link_name) {
1190 err = -ENOMEM;
1191 goto err_gpio;
1193 snprintf(link_name, 16, "%s-link", netdev_name(ndev));
1194 priv->link_irq = gpio_to_irq(priv->link_gpio);
1195 if (request_any_context_irq(priv->link_irq, w5100_detect_link,
1196 IRQF_TRIGGER_RISING |
1197 IRQF_TRIGGER_FALLING,
1198 link_name, priv->ndev) < 0)
1199 priv->link_gpio = -EINVAL;
1202 return 0;
1204 err_gpio:
1205 free_irq(priv->irq, ndev);
1206 err_hw:
1207 destroy_workqueue(priv->xfer_wq);
1208 err_wq:
1209 unregister_netdev(ndev);
1210 err_register:
1211 free_netdev(ndev);
1212 return err;
1214 EXPORT_SYMBOL_GPL(w5100_probe);
1216 int w5100_remove(struct device *dev)
1218 struct net_device *ndev = dev_get_drvdata(dev);
1219 struct w5100_priv *priv = netdev_priv(ndev);
1221 w5100_hw_reset(priv);
1222 free_irq(priv->irq, ndev);
1223 if (gpio_is_valid(priv->link_gpio))
1224 free_irq(priv->link_irq, ndev);
1226 flush_work(&priv->setrx_work);
1227 flush_work(&priv->restart_work);
1228 destroy_workqueue(priv->xfer_wq);
1230 unregister_netdev(ndev);
1231 free_netdev(ndev);
1232 return 0;
1234 EXPORT_SYMBOL_GPL(w5100_remove);
1236 #ifdef CONFIG_PM_SLEEP
1237 static int w5100_suspend(struct device *dev)
1239 struct net_device *ndev = dev_get_drvdata(dev);
1240 struct w5100_priv *priv = netdev_priv(ndev);
1242 if (netif_running(ndev)) {
1243 netif_carrier_off(ndev);
1244 netif_device_detach(ndev);
1246 w5100_hw_close(priv);
1248 return 0;
1251 static int w5100_resume(struct device *dev)
1253 struct net_device *ndev = dev_get_drvdata(dev);
1254 struct w5100_priv *priv = netdev_priv(ndev);
1256 if (netif_running(ndev)) {
1257 w5100_hw_reset(priv);
1258 w5100_hw_start(priv);
1260 netif_device_attach(ndev);
1261 if (!gpio_is_valid(priv->link_gpio) ||
1262 gpio_get_value(priv->link_gpio) != 0)
1263 netif_carrier_on(ndev);
1265 return 0;
1267 #endif /* CONFIG_PM_SLEEP */
1269 SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
1270 EXPORT_SYMBOL_GPL(w5100_pm_ops);
1272 static struct platform_driver w5100_mmio_driver = {
1273 .driver = {
1274 .name = DRV_NAME,
1275 .pm = &w5100_pm_ops,
1277 .probe = w5100_mmio_probe,
1278 .remove = w5100_mmio_remove,
1280 module_platform_driver(w5100_mmio_driver);