PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / net / ethernet / nxp / lpc_eth.c
blob422d9b51ac2408da844669b1c2b0f1364e8e093a
1 /*
2 * drivers/net/ethernet/nxp/lpc_eth.c
4 * Author: Kevin Wells <kevin.wells@nxp.com>
6 * Copyright (C) 2010 NXP Semiconductors
7 * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/interrupt.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/crc32.h>
31 #include <linux/platform_device.h>
32 #include <linux/spinlock.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/clk.h>
36 #include <linux/workqueue.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/skbuff.h>
40 #include <linux/phy.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/of.h>
43 #include <linux/of_net.h>
44 #include <linux/types.h>
46 #include <linux/io.h>
47 #include <mach/board.h>
48 #include <mach/platform.h>
49 #include <mach/hardware.h>
51 #define MODNAME "lpc-eth"
52 #define DRV_VERSION "1.00"
54 #define ENET_MAXF_SIZE 1536
55 #define ENET_RX_DESC 48
56 #define ENET_TX_DESC 16
58 #define NAPI_WEIGHT 16
61 * Ethernet MAC controller Register offsets
63 #define LPC_ENET_MAC1(x) (x + 0x000)
64 #define LPC_ENET_MAC2(x) (x + 0x004)
65 #define LPC_ENET_IPGT(x) (x + 0x008)
66 #define LPC_ENET_IPGR(x) (x + 0x00C)
67 #define LPC_ENET_CLRT(x) (x + 0x010)
68 #define LPC_ENET_MAXF(x) (x + 0x014)
69 #define LPC_ENET_SUPP(x) (x + 0x018)
70 #define LPC_ENET_TEST(x) (x + 0x01C)
71 #define LPC_ENET_MCFG(x) (x + 0x020)
72 #define LPC_ENET_MCMD(x) (x + 0x024)
73 #define LPC_ENET_MADR(x) (x + 0x028)
74 #define LPC_ENET_MWTD(x) (x + 0x02C)
75 #define LPC_ENET_MRDD(x) (x + 0x030)
76 #define LPC_ENET_MIND(x) (x + 0x034)
77 #define LPC_ENET_SA0(x) (x + 0x040)
78 #define LPC_ENET_SA1(x) (x + 0x044)
79 #define LPC_ENET_SA2(x) (x + 0x048)
80 #define LPC_ENET_COMMAND(x) (x + 0x100)
81 #define LPC_ENET_STATUS(x) (x + 0x104)
82 #define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
83 #define LPC_ENET_RXSTATUS(x) (x + 0x10C)
84 #define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
85 #define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
86 #define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
87 #define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
88 #define LPC_ENET_TXSTATUS(x) (x + 0x120)
89 #define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
90 #define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
91 #define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
92 #define LPC_ENET_TSV0(x) (x + 0x158)
93 #define LPC_ENET_TSV1(x) (x + 0x15C)
94 #define LPC_ENET_RSV(x) (x + 0x160)
95 #define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
96 #define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
97 #define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
98 #define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
99 #define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
100 #define LPC_ENET_HASHFILTERL(x) (x + 0x210)
101 #define LPC_ENET_HASHFILTERH(x) (x + 0x214)
102 #define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
103 #define LPC_ENET_INTENABLE(x) (x + 0xFE4)
104 #define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
105 #define LPC_ENET_INTSET(x) (x + 0xFEC)
106 #define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
109 * mac1 register definitions
111 #define LPC_MAC1_RECV_ENABLE (1 << 0)
112 #define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
113 #define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
114 #define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
115 #define LPC_MAC1_LOOPBACK (1 << 4)
116 #define LPC_MAC1_RESET_TX (1 << 8)
117 #define LPC_MAC1_RESET_MCS_TX (1 << 9)
118 #define LPC_MAC1_RESET_RX (1 << 10)
119 #define LPC_MAC1_RESET_MCS_RX (1 << 11)
120 #define LPC_MAC1_SIMULATION_RESET (1 << 14)
121 #define LPC_MAC1_SOFT_RESET (1 << 15)
124 * mac2 register definitions
126 #define LPC_MAC2_FULL_DUPLEX (1 << 0)
127 #define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
128 #define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
129 #define LPC_MAC2_DELAYED_CRC (1 << 3)
130 #define LPC_MAC2_CRC_ENABLE (1 << 4)
131 #define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
132 #define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
133 #define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
134 #define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
135 #define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
136 #define LPC_MAC2_NO_BACKOFF (1 << 12)
137 #define LPC_MAC2_BACK_PRESSURE (1 << 13)
138 #define LPC_MAC2_EXCESS_DEFER (1 << 14)
141 * ipgt register definitions
143 #define LPC_IPGT_LOAD(n) ((n) & 0x7F)
146 * ipgr register definitions
148 #define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
149 #define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
152 * clrt register definitions
154 #define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
155 #define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
158 * maxf register definitions
160 #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
163 * supp register definitions
165 #define LPC_SUPP_SPEED (1 << 8)
166 #define LPC_SUPP_RESET_RMII (1 << 11)
169 * test register definitions
171 #define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
172 #define LPC_TEST_PAUSE (1 << 1)
173 #define LPC_TEST_BACKPRESSURE (1 << 2)
176 * mcfg register definitions
178 #define LPC_MCFG_SCAN_INCREMENT (1 << 0)
179 #define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
180 #define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
181 #define LPC_MCFG_CLOCK_HOST_DIV_4 0
182 #define LPC_MCFG_CLOCK_HOST_DIV_6 2
183 #define LPC_MCFG_CLOCK_HOST_DIV_8 3
184 #define LPC_MCFG_CLOCK_HOST_DIV_10 4
185 #define LPC_MCFG_CLOCK_HOST_DIV_14 5
186 #define LPC_MCFG_CLOCK_HOST_DIV_20 6
187 #define LPC_MCFG_CLOCK_HOST_DIV_28 7
188 #define LPC_MCFG_RESET_MII_MGMT (1 << 15)
191 * mcmd register definitions
193 #define LPC_MCMD_READ (1 << 0)
194 #define LPC_MCMD_SCAN (1 << 1)
197 * madr register definitions
199 #define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
200 #define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
203 * mwtd register definitions
205 #define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
208 * mrdd register definitions
210 #define LPC_MRDD_READ_MASK 0xFFFF
213 * mind register definitions
215 #define LPC_MIND_BUSY (1 << 0)
216 #define LPC_MIND_SCANNING (1 << 1)
217 #define LPC_MIND_NOT_VALID (1 << 2)
218 #define LPC_MIND_MII_LINK_FAIL (1 << 3)
221 * command register definitions
223 #define LPC_COMMAND_RXENABLE (1 << 0)
224 #define LPC_COMMAND_TXENABLE (1 << 1)
225 #define LPC_COMMAND_REG_RESET (1 << 3)
226 #define LPC_COMMAND_TXRESET (1 << 4)
227 #define LPC_COMMAND_RXRESET (1 << 5)
228 #define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
229 #define LPC_COMMAND_PASSRXFILTER (1 << 7)
230 #define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
231 #define LPC_COMMAND_RMII (1 << 9)
232 #define LPC_COMMAND_FULLDUPLEX (1 << 10)
235 * status register definitions
237 #define LPC_STATUS_RXACTIVE (1 << 0)
238 #define LPC_STATUS_TXACTIVE (1 << 1)
241 * tsv0 register definitions
243 #define LPC_TSV0_CRC_ERROR (1 << 0)
244 #define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
245 #define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
246 #define LPC_TSV0_DONE (1 << 3)
247 #define LPC_TSV0_MULTICAST (1 << 4)
248 #define LPC_TSV0_BROADCAST (1 << 5)
249 #define LPC_TSV0_PACKET_DEFER (1 << 6)
250 #define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
251 #define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
252 #define LPC_TSV0_LATE_COLLISION (1 << 9)
253 #define LPC_TSV0_GIANT (1 << 10)
254 #define LPC_TSV0_UNDERRUN (1 << 11)
255 #define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
256 #define LPC_TSV0_CONTROL_FRAME (1 << 28)
257 #define LPC_TSV0_PAUSE (1 << 29)
258 #define LPC_TSV0_BACKPRESSURE (1 << 30)
259 #define LPC_TSV0_VLAN (1 << 31)
262 * tsv1 register definitions
264 #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
265 #define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
268 * rsv register definitions
270 #define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
271 #define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
272 #define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
273 #define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
274 #define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
275 #define LPC_RSV_CRC_ERROR (1 << 20)
276 #define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
277 #define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
278 #define LPC_RSV_RECEIVE_OK (1 << 23)
279 #define LPC_RSV_MULTICAST (1 << 24)
280 #define LPC_RSV_BROADCAST (1 << 25)
281 #define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
282 #define LPC_RSV_CONTROL_FRAME (1 << 27)
283 #define LPC_RSV_PAUSE (1 << 28)
284 #define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
285 #define LPC_RSV_VLAN (1 << 30)
288 * flowcontrolcounter register definitions
290 #define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
291 #define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
294 * flowcontrolstatus register definitions
296 #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
299 * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
300 * register definitions
302 #define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
303 #define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
304 #define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
305 #define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
306 #define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
307 #define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
310 * rxfliterctrl register definitions
312 #define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
313 #define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
316 * rxfilterwolstatus/rxfilterwolclear register definitions
318 #define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
319 #define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
322 * intstatus, intenable, intclear, and Intset shared register
323 * definitions
325 #define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
326 #define LPC_MACINT_RXERRORONINT (1 << 1)
327 #define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
328 #define LPC_MACINT_RXDONEINTEN (1 << 3)
329 #define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
330 #define LPC_MACINT_TXERRORINTEN (1 << 5)
331 #define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
332 #define LPC_MACINT_TXDONEINTEN (1 << 7)
333 #define LPC_MACINT_SOFTINTEN (1 << 12)
334 #define LPC_MACINT_WAKEUPINTEN (1 << 13)
337 * powerdown register definitions
339 #define LPC_POWERDOWN_MACAHB (1 << 31)
341 static phy_interface_t lpc_phy_interface_mode(struct device *dev)
343 if (dev && dev->of_node) {
344 const char *mode = of_get_property(dev->of_node,
345 "phy-mode", NULL);
346 if (mode && !strcmp(mode, "mii"))
347 return PHY_INTERFACE_MODE_MII;
349 return PHY_INTERFACE_MODE_RMII;
352 static bool use_iram_for_net(struct device *dev)
354 if (dev && dev->of_node)
355 return of_property_read_bool(dev->of_node, "use-iram");
356 return false;
359 /* Receive Status information word */
360 #define RXSTATUS_SIZE 0x000007FF
361 #define RXSTATUS_CONTROL (1 << 18)
362 #define RXSTATUS_VLAN (1 << 19)
363 #define RXSTATUS_FILTER (1 << 20)
364 #define RXSTATUS_MULTICAST (1 << 21)
365 #define RXSTATUS_BROADCAST (1 << 22)
366 #define RXSTATUS_CRC (1 << 23)
367 #define RXSTATUS_SYMBOL (1 << 24)
368 #define RXSTATUS_LENGTH (1 << 25)
369 #define RXSTATUS_RANGE (1 << 26)
370 #define RXSTATUS_ALIGN (1 << 27)
371 #define RXSTATUS_OVERRUN (1 << 28)
372 #define RXSTATUS_NODESC (1 << 29)
373 #define RXSTATUS_LAST (1 << 30)
374 #define RXSTATUS_ERROR (1 << 31)
376 #define RXSTATUS_STATUS_ERROR \
377 (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
378 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
380 /* Receive Descriptor control word */
381 #define RXDESC_CONTROL_SIZE 0x000007FF
382 #define RXDESC_CONTROL_INT (1 << 31)
384 /* Transmit Status information word */
385 #define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
386 #define TXSTATUS_DEFER (1 << 25)
387 #define TXSTATUS_EXCESSDEFER (1 << 26)
388 #define TXSTATUS_EXCESSCOLL (1 << 27)
389 #define TXSTATUS_LATECOLL (1 << 28)
390 #define TXSTATUS_UNDERRUN (1 << 29)
391 #define TXSTATUS_NODESC (1 << 30)
392 #define TXSTATUS_ERROR (1 << 31)
394 /* Transmit Descriptor control word */
395 #define TXDESC_CONTROL_SIZE 0x000007FF
396 #define TXDESC_CONTROL_OVERRIDE (1 << 26)
397 #define TXDESC_CONTROL_HUGE (1 << 27)
398 #define TXDESC_CONTROL_PAD (1 << 28)
399 #define TXDESC_CONTROL_CRC (1 << 29)
400 #define TXDESC_CONTROL_LAST (1 << 30)
401 #define TXDESC_CONTROL_INT (1 << 31)
404 * Structure of a TX/RX descriptors and RX status
406 struct txrx_desc_t {
407 __le32 packet;
408 __le32 control;
410 struct rx_status_t {
411 __le32 statusinfo;
412 __le32 statushashcrc;
416 * Device driver data structure
418 struct netdata_local {
419 struct platform_device *pdev;
420 struct net_device *ndev;
421 spinlock_t lock;
422 void __iomem *net_base;
423 u32 msg_enable;
424 unsigned int skblen[ENET_TX_DESC];
425 unsigned int last_tx_idx;
426 unsigned int num_used_tx_buffs;
427 struct mii_bus *mii_bus;
428 struct phy_device *phy_dev;
429 struct clk *clk;
430 dma_addr_t dma_buff_base_p;
431 void *dma_buff_base_v;
432 size_t dma_buff_size;
433 struct txrx_desc_t *tx_desc_v;
434 u32 *tx_stat_v;
435 void *tx_buff_v;
436 struct txrx_desc_t *rx_desc_v;
437 struct rx_status_t *rx_stat_v;
438 void *rx_buff_v;
439 int link;
440 int speed;
441 int duplex;
442 struct napi_struct napi;
446 * MAC support functions
448 static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
450 u32 tmp;
452 /* Set station address */
453 tmp = mac[0] | ((u32)mac[1] << 8);
454 writel(tmp, LPC_ENET_SA2(pldat->net_base));
455 tmp = mac[2] | ((u32)mac[3] << 8);
456 writel(tmp, LPC_ENET_SA1(pldat->net_base));
457 tmp = mac[4] | ((u32)mac[5] << 8);
458 writel(tmp, LPC_ENET_SA0(pldat->net_base));
460 netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
463 static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
465 u32 tmp;
467 /* Get station address */
468 tmp = readl(LPC_ENET_SA2(pldat->net_base));
469 mac[0] = tmp & 0xFF;
470 mac[1] = tmp >> 8;
471 tmp = readl(LPC_ENET_SA1(pldat->net_base));
472 mac[2] = tmp & 0xFF;
473 mac[3] = tmp >> 8;
474 tmp = readl(LPC_ENET_SA0(pldat->net_base));
475 mac[4] = tmp & 0xFF;
476 mac[5] = tmp >> 8;
479 static void __lpc_eth_clock_enable(struct netdata_local *pldat,
480 bool enable)
482 if (enable)
483 clk_enable(pldat->clk);
484 else
485 clk_disable(pldat->clk);
488 static void __lpc_params_setup(struct netdata_local *pldat)
490 u32 tmp;
492 if (pldat->duplex == DUPLEX_FULL) {
493 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
494 tmp |= LPC_MAC2_FULL_DUPLEX;
495 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
496 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
497 tmp |= LPC_COMMAND_FULLDUPLEX;
498 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
499 writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
500 } else {
501 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
502 tmp &= ~LPC_MAC2_FULL_DUPLEX;
503 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
504 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
505 tmp &= ~LPC_COMMAND_FULLDUPLEX;
506 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
507 writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
510 if (pldat->speed == SPEED_100)
511 writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
512 else
513 writel(0, LPC_ENET_SUPP(pldat->net_base));
516 static void __lpc_eth_reset(struct netdata_local *pldat)
518 /* Reset all MAC logic */
519 writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
520 LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
521 LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
522 writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
523 LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
526 static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
528 /* Reset MII management hardware */
529 writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
531 /* Setup MII clock to slowest rate with a /28 divider */
532 writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
533 LPC_ENET_MCFG(pldat->net_base));
535 return 0;
538 static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
540 phys_addr_t phaddr;
542 phaddr = addr - pldat->dma_buff_base_v;
543 phaddr += pldat->dma_buff_base_p;
545 return phaddr;
548 static void lpc_eth_enable_int(void __iomem *regbase)
550 writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
551 LPC_ENET_INTENABLE(regbase));
554 static void lpc_eth_disable_int(void __iomem *regbase)
556 writel(0, LPC_ENET_INTENABLE(regbase));
559 /* Setup TX/RX descriptors */
560 static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
562 u32 *ptxstat;
563 void *tbuff;
564 int i;
565 struct txrx_desc_t *ptxrxdesc;
566 struct rx_status_t *prxstat;
568 tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
570 /* Setup TX descriptors, status, and buffers */
571 pldat->tx_desc_v = tbuff;
572 tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
574 pldat->tx_stat_v = tbuff;
575 tbuff += sizeof(u32) * ENET_TX_DESC;
577 tbuff = PTR_ALIGN(tbuff, 16);
578 pldat->tx_buff_v = tbuff;
579 tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
581 /* Setup RX descriptors, status, and buffers */
582 pldat->rx_desc_v = tbuff;
583 tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
585 tbuff = PTR_ALIGN(tbuff, 16);
586 pldat->rx_stat_v = tbuff;
587 tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
589 tbuff = PTR_ALIGN(tbuff, 16);
590 pldat->rx_buff_v = tbuff;
591 tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
593 /* Map the TX descriptors to the TX buffers in hardware */
594 for (i = 0; i < ENET_TX_DESC; i++) {
595 ptxstat = &pldat->tx_stat_v[i];
596 ptxrxdesc = &pldat->tx_desc_v[i];
598 ptxrxdesc->packet = __va_to_pa(
599 pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
600 ptxrxdesc->control = 0;
601 *ptxstat = 0;
604 /* Map the RX descriptors to the RX buffers in hardware */
605 for (i = 0; i < ENET_RX_DESC; i++) {
606 prxstat = &pldat->rx_stat_v[i];
607 ptxrxdesc = &pldat->rx_desc_v[i];
609 ptxrxdesc->packet = __va_to_pa(
610 pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
611 ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
612 prxstat->statusinfo = 0;
613 prxstat->statushashcrc = 0;
616 /* Setup base addresses in hardware to point to buffers and
617 * descriptors
619 writel((ENET_TX_DESC - 1),
620 LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
621 writel(__va_to_pa(pldat->tx_desc_v, pldat),
622 LPC_ENET_TXDESCRIPTOR(pldat->net_base));
623 writel(__va_to_pa(pldat->tx_stat_v, pldat),
624 LPC_ENET_TXSTATUS(pldat->net_base));
625 writel((ENET_RX_DESC - 1),
626 LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
627 writel(__va_to_pa(pldat->rx_desc_v, pldat),
628 LPC_ENET_RXDESCRIPTOR(pldat->net_base));
629 writel(__va_to_pa(pldat->rx_stat_v, pldat),
630 LPC_ENET_RXSTATUS(pldat->net_base));
633 static void __lpc_eth_init(struct netdata_local *pldat)
635 u32 tmp;
637 /* Disable controller and reset */
638 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
639 tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
640 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
641 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
642 tmp &= ~LPC_MAC1_RECV_ENABLE;
643 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
645 /* Initial MAC setup */
646 writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
647 writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
648 LPC_ENET_MAC2(pldat->net_base));
649 writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
651 /* Collision window, gap */
652 writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
653 LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
654 LPC_ENET_CLRT(pldat->net_base));
655 writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
657 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
658 writel(LPC_COMMAND_PASSRUNTFRAME,
659 LPC_ENET_COMMAND(pldat->net_base));
660 else {
661 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
662 LPC_ENET_COMMAND(pldat->net_base));
663 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
666 __lpc_params_setup(pldat);
668 /* Setup TX and RX descriptors */
669 __lpc_txrx_desc_setup(pldat);
671 /* Setup packet filtering */
672 writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
673 LPC_ENET_RXFILTER_CTRL(pldat->net_base));
675 /* Get the next TX buffer output index */
676 pldat->num_used_tx_buffs = 0;
677 pldat->last_tx_idx =
678 readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
680 /* Clear and enable interrupts */
681 writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
682 smp_wmb();
683 lpc_eth_enable_int(pldat->net_base);
685 /* Enable controller */
686 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
687 tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
688 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
689 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
690 tmp |= LPC_MAC1_RECV_ENABLE;
691 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
694 static void __lpc_eth_shutdown(struct netdata_local *pldat)
696 /* Reset ethernet and power down PHY */
697 __lpc_eth_reset(pldat);
698 writel(0, LPC_ENET_MAC1(pldat->net_base));
699 writel(0, LPC_ENET_MAC2(pldat->net_base));
703 * MAC<--->PHY support functions
705 static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
707 struct netdata_local *pldat = bus->priv;
708 unsigned long timeout = jiffies + msecs_to_jiffies(100);
709 int lps;
711 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
712 writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
714 /* Wait for unbusy status */
715 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
716 if (time_after(jiffies, timeout))
717 return -EIO;
718 cpu_relax();
721 lps = readl(LPC_ENET_MRDD(pldat->net_base));
722 writel(0, LPC_ENET_MCMD(pldat->net_base));
724 return lps;
727 static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
728 u16 phydata)
730 struct netdata_local *pldat = bus->priv;
731 unsigned long timeout = jiffies + msecs_to_jiffies(100);
733 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
734 writel(phydata, LPC_ENET_MWTD(pldat->net_base));
736 /* Wait for completion */
737 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
738 if (time_after(jiffies, timeout))
739 return -EIO;
740 cpu_relax();
743 return 0;
746 static int lpc_mdio_reset(struct mii_bus *bus)
748 return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
751 static void lpc_handle_link_change(struct net_device *ndev)
753 struct netdata_local *pldat = netdev_priv(ndev);
754 struct phy_device *phydev = pldat->phy_dev;
755 unsigned long flags;
757 bool status_change = false;
759 spin_lock_irqsave(&pldat->lock, flags);
761 if (phydev->link) {
762 if ((pldat->speed != phydev->speed) ||
763 (pldat->duplex != phydev->duplex)) {
764 pldat->speed = phydev->speed;
765 pldat->duplex = phydev->duplex;
766 status_change = true;
770 if (phydev->link != pldat->link) {
771 if (!phydev->link) {
772 pldat->speed = 0;
773 pldat->duplex = -1;
775 pldat->link = phydev->link;
777 status_change = true;
780 spin_unlock_irqrestore(&pldat->lock, flags);
782 if (status_change)
783 __lpc_params_setup(pldat);
786 static int lpc_mii_probe(struct net_device *ndev)
788 struct netdata_local *pldat = netdev_priv(ndev);
789 struct phy_device *phydev = phy_find_first(pldat->mii_bus);
791 if (!phydev) {
792 netdev_err(ndev, "no PHY found\n");
793 return -ENODEV;
796 /* Attach to the PHY */
797 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
798 netdev_info(ndev, "using MII interface\n");
799 else
800 netdev_info(ndev, "using RMII interface\n");
801 phydev = phy_connect(ndev, dev_name(&phydev->dev),
802 &lpc_handle_link_change,
803 lpc_phy_interface_mode(&pldat->pdev->dev));
805 if (IS_ERR(phydev)) {
806 netdev_err(ndev, "Could not attach to PHY\n");
807 return PTR_ERR(phydev);
810 /* mask with MAC supported features */
811 phydev->supported &= PHY_BASIC_FEATURES;
813 phydev->advertising = phydev->supported;
815 pldat->link = 0;
816 pldat->speed = 0;
817 pldat->duplex = -1;
818 pldat->phy_dev = phydev;
820 netdev_info(ndev,
821 "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
822 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
823 return 0;
826 static int lpc_mii_init(struct netdata_local *pldat)
828 int err = -ENXIO, i;
830 pldat->mii_bus = mdiobus_alloc();
831 if (!pldat->mii_bus) {
832 err = -ENOMEM;
833 goto err_out;
836 /* Setup MII mode */
837 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
838 writel(LPC_COMMAND_PASSRUNTFRAME,
839 LPC_ENET_COMMAND(pldat->net_base));
840 else {
841 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
842 LPC_ENET_COMMAND(pldat->net_base));
843 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
846 pldat->mii_bus->name = "lpc_mii_bus";
847 pldat->mii_bus->read = &lpc_mdio_read;
848 pldat->mii_bus->write = &lpc_mdio_write;
849 pldat->mii_bus->reset = &lpc_mdio_reset;
850 snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
851 pldat->pdev->name, pldat->pdev->id);
852 pldat->mii_bus->priv = pldat;
853 pldat->mii_bus->parent = &pldat->pdev->dev;
855 pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
856 if (!pldat->mii_bus->irq) {
857 err = -ENOMEM;
858 goto err_out_1;
861 for (i = 0; i < PHY_MAX_ADDR; i++)
862 pldat->mii_bus->irq[i] = PHY_POLL;
864 platform_set_drvdata(pldat->pdev, pldat->mii_bus);
866 if (mdiobus_register(pldat->mii_bus))
867 goto err_out_free_mdio_irq;
869 if (lpc_mii_probe(pldat->ndev) != 0)
870 goto err_out_unregister_bus;
872 return 0;
874 err_out_unregister_bus:
875 mdiobus_unregister(pldat->mii_bus);
876 err_out_free_mdio_irq:
877 kfree(pldat->mii_bus->irq);
878 err_out_1:
879 mdiobus_free(pldat->mii_bus);
880 err_out:
881 return err;
884 static void __lpc_handle_xmit(struct net_device *ndev)
886 struct netdata_local *pldat = netdev_priv(ndev);
887 u32 txcidx, *ptxstat, txstat;
889 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
890 while (pldat->last_tx_idx != txcidx) {
891 unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
893 /* A buffer is available, get buffer status */
894 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
895 txstat = *ptxstat;
897 /* Next buffer and decrement used buffer counter */
898 pldat->num_used_tx_buffs--;
899 pldat->last_tx_idx++;
900 if (pldat->last_tx_idx >= ENET_TX_DESC)
901 pldat->last_tx_idx = 0;
903 /* Update collision counter */
904 ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
906 /* Any errors occurred? */
907 if (txstat & TXSTATUS_ERROR) {
908 if (txstat & TXSTATUS_UNDERRUN) {
909 /* FIFO underrun */
910 ndev->stats.tx_fifo_errors++;
912 if (txstat & TXSTATUS_LATECOLL) {
913 /* Late collision */
914 ndev->stats.tx_aborted_errors++;
916 if (txstat & TXSTATUS_EXCESSCOLL) {
917 /* Excessive collision */
918 ndev->stats.tx_aborted_errors++;
920 if (txstat & TXSTATUS_EXCESSDEFER) {
921 /* Defer limit */
922 ndev->stats.tx_aborted_errors++;
924 ndev->stats.tx_errors++;
925 } else {
926 /* Update stats */
927 ndev->stats.tx_packets++;
928 ndev->stats.tx_bytes += skblen;
931 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
934 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
935 if (netif_queue_stopped(ndev))
936 netif_wake_queue(ndev);
940 static int __lpc_handle_recv(struct net_device *ndev, int budget)
942 struct netdata_local *pldat = netdev_priv(ndev);
943 struct sk_buff *skb;
944 u32 rxconsidx, len, ethst;
945 struct rx_status_t *prxstat;
946 u8 *prdbuf;
947 int rx_done = 0;
949 /* Get the current RX buffer indexes */
950 rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
951 while (rx_done < budget && rxconsidx !=
952 readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
953 /* Get pointer to receive status */
954 prxstat = &pldat->rx_stat_v[rxconsidx];
955 len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
957 /* Status error? */
958 ethst = prxstat->statusinfo;
959 if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
960 (RXSTATUS_ERROR | RXSTATUS_RANGE))
961 ethst &= ~RXSTATUS_ERROR;
963 if (ethst & RXSTATUS_ERROR) {
964 int si = prxstat->statusinfo;
965 /* Check statuses */
966 if (si & RXSTATUS_OVERRUN) {
967 /* Overrun error */
968 ndev->stats.rx_fifo_errors++;
969 } else if (si & RXSTATUS_CRC) {
970 /* CRC error */
971 ndev->stats.rx_crc_errors++;
972 } else if (si & RXSTATUS_LENGTH) {
973 /* Length error */
974 ndev->stats.rx_length_errors++;
975 } else if (si & RXSTATUS_ERROR) {
976 /* Other error */
977 ndev->stats.rx_length_errors++;
979 ndev->stats.rx_errors++;
980 } else {
981 /* Packet is good */
982 skb = dev_alloc_skb(len);
983 if (!skb) {
984 ndev->stats.rx_dropped++;
985 } else {
986 prdbuf = skb_put(skb, len);
988 /* Copy packet from buffer */
989 memcpy(prdbuf, pldat->rx_buff_v +
990 rxconsidx * ENET_MAXF_SIZE, len);
992 /* Pass to upper layer */
993 skb->protocol = eth_type_trans(skb, ndev);
994 netif_receive_skb(skb);
995 ndev->stats.rx_packets++;
996 ndev->stats.rx_bytes += len;
1000 /* Increment consume index */
1001 rxconsidx = rxconsidx + 1;
1002 if (rxconsidx >= ENET_RX_DESC)
1003 rxconsidx = 0;
1004 writel(rxconsidx,
1005 LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
1006 rx_done++;
1009 return rx_done;
1012 static int lpc_eth_poll(struct napi_struct *napi, int budget)
1014 struct netdata_local *pldat = container_of(napi,
1015 struct netdata_local, napi);
1016 struct net_device *ndev = pldat->ndev;
1017 int rx_done = 0;
1018 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
1020 __netif_tx_lock(txq, smp_processor_id());
1021 __lpc_handle_xmit(ndev);
1022 __netif_tx_unlock(txq);
1023 rx_done = __lpc_handle_recv(ndev, budget);
1025 if (rx_done < budget) {
1026 napi_complete(napi);
1027 lpc_eth_enable_int(pldat->net_base);
1030 return rx_done;
1033 static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
1035 struct net_device *ndev = dev_id;
1036 struct netdata_local *pldat = netdev_priv(ndev);
1037 u32 tmp;
1039 spin_lock(&pldat->lock);
1041 tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
1042 /* Clear interrupts */
1043 writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
1045 lpc_eth_disable_int(pldat->net_base);
1046 if (likely(napi_schedule_prep(&pldat->napi)))
1047 __napi_schedule(&pldat->napi);
1049 spin_unlock(&pldat->lock);
1051 return IRQ_HANDLED;
1054 static int lpc_eth_close(struct net_device *ndev)
1056 unsigned long flags;
1057 struct netdata_local *pldat = netdev_priv(ndev);
1059 if (netif_msg_ifdown(pldat))
1060 dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1062 napi_disable(&pldat->napi);
1063 netif_stop_queue(ndev);
1065 if (pldat->phy_dev)
1066 phy_stop(pldat->phy_dev);
1068 spin_lock_irqsave(&pldat->lock, flags);
1069 __lpc_eth_reset(pldat);
1070 netif_carrier_off(ndev);
1071 writel(0, LPC_ENET_MAC1(pldat->net_base));
1072 writel(0, LPC_ENET_MAC2(pldat->net_base));
1073 spin_unlock_irqrestore(&pldat->lock, flags);
1075 __lpc_eth_clock_enable(pldat, false);
1077 return 0;
1080 static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1082 struct netdata_local *pldat = netdev_priv(ndev);
1083 u32 len, txidx;
1084 u32 *ptxstat;
1085 struct txrx_desc_t *ptxrxdesc;
1087 len = skb->len;
1089 spin_lock_irq(&pldat->lock);
1091 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1092 /* This function should never be called when there are no
1093 buffers */
1094 netif_stop_queue(ndev);
1095 spin_unlock_irq(&pldat->lock);
1096 WARN(1, "BUG! TX request when no free TX buffers!\n");
1097 return NETDEV_TX_BUSY;
1100 /* Get the next TX descriptor index */
1101 txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1103 /* Setup control for the transfer */
1104 ptxstat = &pldat->tx_stat_v[txidx];
1105 *ptxstat = 0;
1106 ptxrxdesc = &pldat->tx_desc_v[txidx];
1107 ptxrxdesc->control =
1108 (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1110 /* Copy data to the DMA buffer */
1111 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1113 /* Save the buffer and increment the buffer counter */
1114 pldat->skblen[txidx] = len;
1115 pldat->num_used_tx_buffs++;
1117 /* Start transmit */
1118 txidx++;
1119 if (txidx >= ENET_TX_DESC)
1120 txidx = 0;
1121 writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1123 /* Stop queue if no more TX buffers */
1124 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1125 netif_stop_queue(ndev);
1127 spin_unlock_irq(&pldat->lock);
1129 dev_kfree_skb(skb);
1130 return NETDEV_TX_OK;
1133 static int lpc_set_mac_address(struct net_device *ndev, void *p)
1135 struct sockaddr *addr = p;
1136 struct netdata_local *pldat = netdev_priv(ndev);
1137 unsigned long flags;
1139 if (!is_valid_ether_addr(addr->sa_data))
1140 return -EADDRNOTAVAIL;
1141 memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
1143 spin_lock_irqsave(&pldat->lock, flags);
1145 /* Set station address */
1146 __lpc_set_mac(pldat, ndev->dev_addr);
1148 spin_unlock_irqrestore(&pldat->lock, flags);
1150 return 0;
1153 static void lpc_eth_set_multicast_list(struct net_device *ndev)
1155 struct netdata_local *pldat = netdev_priv(ndev);
1156 struct netdev_hw_addr_list *mcptr = &ndev->mc;
1157 struct netdev_hw_addr *ha;
1158 u32 tmp32, hash_val, hashlo, hashhi;
1159 unsigned long flags;
1161 spin_lock_irqsave(&pldat->lock, flags);
1163 /* Set station address */
1164 __lpc_set_mac(pldat, ndev->dev_addr);
1166 tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1168 if (ndev->flags & IFF_PROMISC)
1169 tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1170 LPC_RXFLTRW_ACCEPTUMULTICAST;
1171 if (ndev->flags & IFF_ALLMULTI)
1172 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1174 if (netdev_hw_addr_list_count(mcptr))
1175 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1177 writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1180 /* Set initial hash table */
1181 hashlo = 0x0;
1182 hashhi = 0x0;
1184 /* 64 bits : multicast address in hash table */
1185 netdev_hw_addr_list_for_each(ha, mcptr) {
1186 hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1188 if (hash_val >= 32)
1189 hashhi |= 1 << (hash_val - 32);
1190 else
1191 hashlo |= 1 << hash_val;
1194 writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1195 writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1197 spin_unlock_irqrestore(&pldat->lock, flags);
1200 static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1202 struct netdata_local *pldat = netdev_priv(ndev);
1203 struct phy_device *phydev = pldat->phy_dev;
1205 if (!netif_running(ndev))
1206 return -EINVAL;
1208 if (!phydev)
1209 return -ENODEV;
1211 return phy_mii_ioctl(phydev, req, cmd);
1214 static int lpc_eth_open(struct net_device *ndev)
1216 struct netdata_local *pldat = netdev_priv(ndev);
1218 if (netif_msg_ifup(pldat))
1219 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1221 __lpc_eth_clock_enable(pldat, true);
1223 /* Reset and initialize */
1224 __lpc_eth_reset(pldat);
1225 __lpc_eth_init(pldat);
1227 /* schedule a link state check */
1228 phy_start(pldat->phy_dev);
1229 netif_start_queue(ndev);
1230 napi_enable(&pldat->napi);
1232 return 0;
1236 * Ethtool ops
1238 static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1239 struct ethtool_drvinfo *info)
1241 strlcpy(info->driver, MODNAME, sizeof(info->driver));
1242 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1243 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1244 sizeof(info->bus_info));
1247 static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1249 struct netdata_local *pldat = netdev_priv(ndev);
1251 return pldat->msg_enable;
1254 static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1256 struct netdata_local *pldat = netdev_priv(ndev);
1258 pldat->msg_enable = level;
1261 static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
1262 struct ethtool_cmd *cmd)
1264 struct netdata_local *pldat = netdev_priv(ndev);
1265 struct phy_device *phydev = pldat->phy_dev;
1267 if (!phydev)
1268 return -EOPNOTSUPP;
1270 return phy_ethtool_gset(phydev, cmd);
1273 static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
1274 struct ethtool_cmd *cmd)
1276 struct netdata_local *pldat = netdev_priv(ndev);
1277 struct phy_device *phydev = pldat->phy_dev;
1279 if (!phydev)
1280 return -EOPNOTSUPP;
1282 return phy_ethtool_sset(phydev, cmd);
1285 static const struct ethtool_ops lpc_eth_ethtool_ops = {
1286 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1287 .get_settings = lpc_eth_ethtool_getsettings,
1288 .set_settings = lpc_eth_ethtool_setsettings,
1289 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1290 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1291 .get_link = ethtool_op_get_link,
1294 static const struct net_device_ops lpc_netdev_ops = {
1295 .ndo_open = lpc_eth_open,
1296 .ndo_stop = lpc_eth_close,
1297 .ndo_start_xmit = lpc_eth_hard_start_xmit,
1298 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1299 .ndo_do_ioctl = lpc_eth_ioctl,
1300 .ndo_set_mac_address = lpc_set_mac_address,
1301 .ndo_validate_addr = eth_validate_addr,
1302 .ndo_change_mtu = eth_change_mtu,
1305 static int lpc_eth_drv_probe(struct platform_device *pdev)
1307 struct resource *res;
1308 struct net_device *ndev;
1309 struct netdata_local *pldat;
1310 struct phy_device *phydev;
1311 dma_addr_t dma_handle;
1312 int irq, ret;
1313 u32 tmp;
1315 /* Setup network interface for RMII or MII mode */
1316 tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL);
1317 tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK;
1318 if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII)
1319 tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS;
1320 else
1321 tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS;
1322 __raw_writel(tmp, LPC32XX_CLKPWR_MACCLK_CTRL);
1324 /* Get platform resources */
1325 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1326 irq = platform_get_irq(pdev, 0);
1327 if ((!res) || (irq < 0) || (irq >= NR_IRQS)) {
1328 dev_err(&pdev->dev, "error getting resources.\n");
1329 ret = -ENXIO;
1330 goto err_exit;
1333 /* Allocate net driver data structure */
1334 ndev = alloc_etherdev(sizeof(struct netdata_local));
1335 if (!ndev) {
1336 dev_err(&pdev->dev, "could not allocate device.\n");
1337 ret = -ENOMEM;
1338 goto err_exit;
1341 SET_NETDEV_DEV(ndev, &pdev->dev);
1343 pldat = netdev_priv(ndev);
1344 pldat->pdev = pdev;
1345 pldat->ndev = ndev;
1347 spin_lock_init(&pldat->lock);
1349 /* Save resources */
1350 ndev->irq = irq;
1352 /* Get clock for the device */
1353 pldat->clk = clk_get(&pdev->dev, NULL);
1354 if (IS_ERR(pldat->clk)) {
1355 dev_err(&pdev->dev, "error getting clock.\n");
1356 ret = PTR_ERR(pldat->clk);
1357 goto err_out_free_dev;
1360 /* Enable network clock */
1361 __lpc_eth_clock_enable(pldat, true);
1363 /* Map IO space */
1364 pldat->net_base = ioremap(res->start, res->end - res->start + 1);
1365 if (!pldat->net_base) {
1366 dev_err(&pdev->dev, "failed to map registers\n");
1367 ret = -ENOMEM;
1368 goto err_out_disable_clocks;
1370 ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1371 ndev->name, ndev);
1372 if (ret) {
1373 dev_err(&pdev->dev, "error requesting interrupt.\n");
1374 goto err_out_iounmap;
1377 /* Fill in the fields of the device structure with ethernet values. */
1378 ether_setup(ndev);
1380 /* Setup driver functions */
1381 ndev->netdev_ops = &lpc_netdev_ops;
1382 ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1383 ndev->watchdog_timeo = msecs_to_jiffies(2500);
1385 /* Get size of DMA buffers/descriptors region */
1386 pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1387 sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1388 pldat->dma_buff_base_v = 0;
1390 if (use_iram_for_net(&pldat->pdev->dev)) {
1391 dma_handle = LPC32XX_IRAM_BASE;
1392 if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
1393 pldat->dma_buff_base_v =
1394 io_p2v(LPC32XX_IRAM_BASE);
1395 else
1396 netdev_err(ndev,
1397 "IRAM not big enough for net buffers, using SDRAM instead.\n");
1400 if (pldat->dma_buff_base_v == 0) {
1401 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1402 if (ret)
1403 goto err_out_free_irq;
1405 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1407 /* Allocate a chunk of memory for the DMA ethernet buffers
1408 and descriptors */
1409 pldat->dma_buff_base_v =
1410 dma_alloc_coherent(&pldat->pdev->dev,
1411 pldat->dma_buff_size, &dma_handle,
1412 GFP_KERNEL);
1413 if (pldat->dma_buff_base_v == NULL) {
1414 ret = -ENOMEM;
1415 goto err_out_free_irq;
1418 pldat->dma_buff_base_p = dma_handle;
1420 netdev_dbg(ndev, "IO address start :0x%08x\n",
1421 res->start);
1422 netdev_dbg(ndev, "IO address size :%d\n",
1423 res->end - res->start + 1);
1424 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1425 pldat->net_base);
1426 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1427 netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
1428 netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
1429 pldat->dma_buff_base_p);
1430 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1431 pldat->dma_buff_base_v);
1433 /* Get MAC address from current HW setting (POR state is all zeros) */
1434 __lpc_get_mac(pldat, ndev->dev_addr);
1436 if (!is_valid_ether_addr(ndev->dev_addr)) {
1437 const char *macaddr = of_get_mac_address(pdev->dev.of_node);
1438 if (macaddr)
1439 memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
1441 if (!is_valid_ether_addr(ndev->dev_addr))
1442 eth_hw_addr_random(ndev);
1444 /* Reset the ethernet controller */
1445 __lpc_eth_reset(pldat);
1447 /* then shut everything down to save power */
1448 __lpc_eth_shutdown(pldat);
1450 /* Set default parameters */
1451 pldat->msg_enable = NETIF_MSG_LINK;
1453 /* Force an MII interface reset and clock setup */
1454 __lpc_mii_mngt_reset(pldat);
1456 /* Force default PHY interface setup in chip, this will probably be
1457 changed by the PHY driver */
1458 pldat->link = 0;
1459 pldat->speed = 100;
1460 pldat->duplex = DUPLEX_FULL;
1461 __lpc_params_setup(pldat);
1463 netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1465 ret = register_netdev(ndev);
1466 if (ret) {
1467 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1468 goto err_out_dma_unmap;
1470 platform_set_drvdata(pdev, ndev);
1472 ret = lpc_mii_init(pldat);
1473 if (ret)
1474 goto err_out_unregister_netdev;
1476 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
1477 res->start, ndev->irq);
1479 phydev = pldat->phy_dev;
1481 device_init_wakeup(&pdev->dev, 1);
1482 device_set_wakeup_enable(&pdev->dev, 0);
1484 return 0;
1486 err_out_unregister_netdev:
1487 unregister_netdev(ndev);
1488 err_out_dma_unmap:
1489 if (!use_iram_for_net(&pldat->pdev->dev) ||
1490 pldat->dma_buff_size > lpc32xx_return_iram_size())
1491 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1492 pldat->dma_buff_base_v,
1493 pldat->dma_buff_base_p);
1494 err_out_free_irq:
1495 free_irq(ndev->irq, ndev);
1496 err_out_iounmap:
1497 iounmap(pldat->net_base);
1498 err_out_disable_clocks:
1499 clk_disable(pldat->clk);
1500 clk_put(pldat->clk);
1501 err_out_free_dev:
1502 free_netdev(ndev);
1503 err_exit:
1504 pr_err("%s: not found (%d).\n", MODNAME, ret);
1505 return ret;
1508 static int lpc_eth_drv_remove(struct platform_device *pdev)
1510 struct net_device *ndev = platform_get_drvdata(pdev);
1511 struct netdata_local *pldat = netdev_priv(ndev);
1513 unregister_netdev(ndev);
1515 if (!use_iram_for_net(&pldat->pdev->dev) ||
1516 pldat->dma_buff_size > lpc32xx_return_iram_size())
1517 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1518 pldat->dma_buff_base_v,
1519 pldat->dma_buff_base_p);
1520 free_irq(ndev->irq, ndev);
1521 iounmap(pldat->net_base);
1522 mdiobus_unregister(pldat->mii_bus);
1523 mdiobus_free(pldat->mii_bus);
1524 clk_disable(pldat->clk);
1525 clk_put(pldat->clk);
1526 free_netdev(ndev);
1528 return 0;
1531 #ifdef CONFIG_PM
1532 static int lpc_eth_drv_suspend(struct platform_device *pdev,
1533 pm_message_t state)
1535 struct net_device *ndev = platform_get_drvdata(pdev);
1536 struct netdata_local *pldat = netdev_priv(ndev);
1538 if (device_may_wakeup(&pdev->dev))
1539 enable_irq_wake(ndev->irq);
1541 if (ndev) {
1542 if (netif_running(ndev)) {
1543 netif_device_detach(ndev);
1544 __lpc_eth_shutdown(pldat);
1545 clk_disable(pldat->clk);
1548 * Reset again now clock is disable to be sure
1549 * EMC_MDC is down
1551 __lpc_eth_reset(pldat);
1555 return 0;
1558 static int lpc_eth_drv_resume(struct platform_device *pdev)
1560 struct net_device *ndev = platform_get_drvdata(pdev);
1561 struct netdata_local *pldat;
1563 if (device_may_wakeup(&pdev->dev))
1564 disable_irq_wake(ndev->irq);
1566 if (ndev) {
1567 if (netif_running(ndev)) {
1568 pldat = netdev_priv(ndev);
1570 /* Enable interface clock */
1571 clk_enable(pldat->clk);
1573 /* Reset and initialize */
1574 __lpc_eth_reset(pldat);
1575 __lpc_eth_init(pldat);
1577 netif_device_attach(ndev);
1581 return 0;
1583 #endif
1585 #ifdef CONFIG_OF
1586 static const struct of_device_id lpc_eth_match[] = {
1587 { .compatible = "nxp,lpc-eth" },
1590 MODULE_DEVICE_TABLE(of, lpc_eth_match);
1591 #endif
1593 static struct platform_driver lpc_eth_driver = {
1594 .probe = lpc_eth_drv_probe,
1595 .remove = lpc_eth_drv_remove,
1596 #ifdef CONFIG_PM
1597 .suspend = lpc_eth_drv_suspend,
1598 .resume = lpc_eth_drv_resume,
1599 #endif
1600 .driver = {
1601 .name = MODNAME,
1602 .of_match_table = of_match_ptr(lpc_eth_match),
1606 module_platform_driver(lpc_eth_driver);
1608 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1609 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1610 MODULE_DESCRIPTION("LPC Ethernet Driver");
1611 MODULE_LICENSE("GPL");