Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / drivers / net / ethernet / ibm / emac / core.c
blob2abce965c7bdf81105238bdc890581141b3ecb85
1 /*
2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
41 #include <linux/of.h>
42 #include <linux/of_net.h>
43 #include <linux/slab.h>
45 #include <asm/processor.h>
46 #include <asm/io.h>
47 #include <asm/dma.h>
48 #include <asm/uaccess.h>
49 #include <asm/dcr.h>
50 #include <asm/dcr-regs.h>
52 #include "core.h"
55 * Lack of dma_unmap_???? calls is intentional.
57 * API-correct usage requires additional support state information to be
58 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
59 * EMAC design (e.g. TX buffer passed from network stack can be split into
60 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
61 * maintaining such information will add additional overhead.
62 * Current DMA API implementation for 4xx processors only ensures cache coherency
63 * and dma_unmap_???? routines are empty and are likely to stay this way.
64 * I decided to omit dma_unmap_??? calls because I don't want to add additional
65 * complexity just for the sake of following some abstract API, when it doesn't
66 * add any real benefit to the driver. I understand that this decision maybe
67 * controversial, but I really tried to make code API-correct and efficient
68 * at the same time and didn't come up with code I liked :(. --ebs
71 #define DRV_NAME "emac"
72 #define DRV_VERSION "3.54"
73 #define DRV_DESC "PPC 4xx OCP EMAC driver"
75 MODULE_DESCRIPTION(DRV_DESC);
76 MODULE_AUTHOR
77 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
78 MODULE_LICENSE("GPL");
81 * PPC64 doesn't (yet) have a cacheable_memcpy
83 #ifdef CONFIG_PPC64
84 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
85 #endif
87 /* minimum number of free TX descriptors required to wake up TX process */
88 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
90 /* If packet size is less than this number, we allocate small skb and copy packet
91 * contents into it instead of just sending original big skb up
93 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
95 /* Since multiple EMACs share MDIO lines in various ways, we need
96 * to avoid re-using the same PHY ID in cases where the arch didn't
97 * setup precise phy_map entries
99 * XXX This is something that needs to be reworked as we can have multiple
100 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
101 * probably require in that case to have explicit PHY IDs in the device-tree
103 static u32 busy_phy_map;
104 static DEFINE_MUTEX(emac_phy_map_lock);
106 /* This is the wait queue used to wait on any event related to probe, that
107 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
109 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
111 /* Having stable interface names is a doomed idea. However, it would be nice
112 * if we didn't have completely random interface names at boot too :-) It's
113 * just a matter of making everybody's life easier. Since we are doing
114 * threaded probing, it's a bit harder though. The base idea here is that
115 * we make up a list of all emacs in the device-tree before we register the
116 * driver. Every emac will then wait for the previous one in the list to
117 * initialize before itself. We should also keep that list ordered by
118 * cell_index.
119 * That list is only 4 entries long, meaning that additional EMACs don't
120 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
123 #define EMAC_BOOT_LIST_SIZE 4
124 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
126 /* How long should I wait for dependent devices ? */
127 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
129 /* I don't want to litter system log with timeout errors
130 * when we have brain-damaged PHY.
132 static inline void emac_report_timeout_error(struct emac_instance *dev,
133 const char *error)
135 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
136 EMAC_FTR_460EX_PHY_CLK_FIX |
137 EMAC_FTR_440EP_PHY_CLK_FIX))
138 DBG(dev, "%s" NL, error);
139 else if (net_ratelimit())
140 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
141 error);
144 /* EMAC PHY clock workaround:
145 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
146 * which allows controlling each EMAC clock
148 static inline void emac_rx_clk_tx(struct emac_instance *dev)
150 #ifdef CONFIG_PPC_DCR_NATIVE
151 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
152 dcri_clrset(SDR0, SDR0_MFR,
153 0, SDR0_MFR_ECS >> dev->cell_index);
154 #endif
157 static inline void emac_rx_clk_default(struct emac_instance *dev)
159 #ifdef CONFIG_PPC_DCR_NATIVE
160 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
161 dcri_clrset(SDR0, SDR0_MFR,
162 SDR0_MFR_ECS >> dev->cell_index, 0);
163 #endif
166 /* PHY polling intervals */
167 #define PHY_POLL_LINK_ON HZ
168 #define PHY_POLL_LINK_OFF (HZ / 5)
170 /* Graceful stop timeouts in us.
171 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
173 #define STOP_TIMEOUT_10 1230
174 #define STOP_TIMEOUT_100 124
175 #define STOP_TIMEOUT_1000 13
176 #define STOP_TIMEOUT_1000_JUMBO 73
178 static unsigned char default_mcast_addr[] = {
179 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
182 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
183 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
184 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
185 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
186 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
187 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
188 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
189 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
190 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
191 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
192 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
193 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
194 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
195 "tx_bd_excessive_collisions", "tx_bd_late_collision",
196 "tx_bd_multple_collisions", "tx_bd_single_collision",
197 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
198 "tx_errors"
201 static irqreturn_t emac_irq(int irq, void *dev_instance);
202 static void emac_clean_tx_ring(struct emac_instance *dev);
203 static void __emac_set_multicast_list(struct emac_instance *dev);
205 static inline int emac_phy_supports_gige(int phy_mode)
207 return phy_mode == PHY_MODE_GMII ||
208 phy_mode == PHY_MODE_RGMII ||
209 phy_mode == PHY_MODE_SGMII ||
210 phy_mode == PHY_MODE_TBI ||
211 phy_mode == PHY_MODE_RTBI;
214 static inline int emac_phy_gpcs(int phy_mode)
216 return phy_mode == PHY_MODE_SGMII ||
217 phy_mode == PHY_MODE_TBI ||
218 phy_mode == PHY_MODE_RTBI;
221 static inline void emac_tx_enable(struct emac_instance *dev)
223 struct emac_regs __iomem *p = dev->emacp;
224 u32 r;
226 DBG(dev, "tx_enable" NL);
228 r = in_be32(&p->mr0);
229 if (!(r & EMAC_MR0_TXE))
230 out_be32(&p->mr0, r | EMAC_MR0_TXE);
233 static void emac_tx_disable(struct emac_instance *dev)
235 struct emac_regs __iomem *p = dev->emacp;
236 u32 r;
238 DBG(dev, "tx_disable" NL);
240 r = in_be32(&p->mr0);
241 if (r & EMAC_MR0_TXE) {
242 int n = dev->stop_timeout;
243 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
244 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
245 udelay(1);
246 --n;
248 if (unlikely(!n))
249 emac_report_timeout_error(dev, "TX disable timeout");
253 static void emac_rx_enable(struct emac_instance *dev)
255 struct emac_regs __iomem *p = dev->emacp;
256 u32 r;
258 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
259 goto out;
261 DBG(dev, "rx_enable" NL);
263 r = in_be32(&p->mr0);
264 if (!(r & EMAC_MR0_RXE)) {
265 if (unlikely(!(r & EMAC_MR0_RXI))) {
266 /* Wait if previous async disable is still in progress */
267 int n = dev->stop_timeout;
268 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
269 udelay(1);
270 --n;
272 if (unlikely(!n))
273 emac_report_timeout_error(dev,
274 "RX disable timeout");
276 out_be32(&p->mr0, r | EMAC_MR0_RXE);
278 out:
282 static void emac_rx_disable(struct emac_instance *dev)
284 struct emac_regs __iomem *p = dev->emacp;
285 u32 r;
287 DBG(dev, "rx_disable" NL);
289 r = in_be32(&p->mr0);
290 if (r & EMAC_MR0_RXE) {
291 int n = dev->stop_timeout;
292 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
293 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
294 udelay(1);
295 --n;
297 if (unlikely(!n))
298 emac_report_timeout_error(dev, "RX disable timeout");
302 static inline void emac_netif_stop(struct emac_instance *dev)
304 netif_tx_lock_bh(dev->ndev);
305 netif_addr_lock(dev->ndev);
306 dev->no_mcast = 1;
307 netif_addr_unlock(dev->ndev);
308 netif_tx_unlock_bh(dev->ndev);
309 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
310 mal_poll_disable(dev->mal, &dev->commac);
311 netif_tx_disable(dev->ndev);
314 static inline void emac_netif_start(struct emac_instance *dev)
316 netif_tx_lock_bh(dev->ndev);
317 netif_addr_lock(dev->ndev);
318 dev->no_mcast = 0;
319 if (dev->mcast_pending && netif_running(dev->ndev))
320 __emac_set_multicast_list(dev);
321 netif_addr_unlock(dev->ndev);
322 netif_tx_unlock_bh(dev->ndev);
324 netif_wake_queue(dev->ndev);
326 /* NOTE: unconditional netif_wake_queue is only appropriate
327 * so long as all callers are assured to have free tx slots
328 * (taken from tg3... though the case where that is wrong is
329 * not terribly harmful)
331 mal_poll_enable(dev->mal, &dev->commac);
334 static inline void emac_rx_disable_async(struct emac_instance *dev)
336 struct emac_regs __iomem *p = dev->emacp;
337 u32 r;
339 DBG(dev, "rx_disable_async" NL);
341 r = in_be32(&p->mr0);
342 if (r & EMAC_MR0_RXE)
343 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
346 static int emac_reset(struct emac_instance *dev)
348 struct emac_regs __iomem *p = dev->emacp;
349 int n = 20;
351 DBG(dev, "reset" NL);
353 if (!dev->reset_failed) {
354 /* 40x erratum suggests stopping RX channel before reset,
355 * we stop TX as well
357 emac_rx_disable(dev);
358 emac_tx_disable(dev);
361 #ifdef CONFIG_PPC_DCR_NATIVE
362 /* Enable internal clock source */
363 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
364 dcri_clrset(SDR0, SDR0_ETH_CFG,
365 0, SDR0_ETH_CFG_ECS << dev->cell_index);
366 #endif
368 out_be32(&p->mr0, EMAC_MR0_SRST);
369 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
370 --n;
372 #ifdef CONFIG_PPC_DCR_NATIVE
373 /* Enable external clock source */
374 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
375 dcri_clrset(SDR0, SDR0_ETH_CFG,
376 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
377 #endif
379 if (n) {
380 dev->reset_failed = 0;
381 return 0;
382 } else {
383 emac_report_timeout_error(dev, "reset timeout");
384 dev->reset_failed = 1;
385 return -ETIMEDOUT;
389 static void emac_hash_mc(struct emac_instance *dev)
391 const int regs = EMAC_XAHT_REGS(dev);
392 u32 *gaht_base = emac_gaht_base(dev);
393 u32 gaht_temp[regs];
394 struct netdev_hw_addr *ha;
395 int i;
397 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
399 memset(gaht_temp, 0, sizeof (gaht_temp));
401 netdev_for_each_mc_addr(ha, dev->ndev) {
402 int slot, reg, mask;
403 DBG2(dev, "mc %pM" NL, ha->addr);
405 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
406 ether_crc(ETH_ALEN, ha->addr));
407 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
408 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
410 gaht_temp[reg] |= mask;
413 for (i = 0; i < regs; i++)
414 out_be32(gaht_base + i, gaht_temp[i]);
417 static inline u32 emac_iff2rmr(struct net_device *ndev)
419 struct emac_instance *dev = netdev_priv(ndev);
420 u32 r;
422 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
424 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
425 r |= EMAC4_RMR_BASE;
426 else
427 r |= EMAC_RMR_BASE;
429 if (ndev->flags & IFF_PROMISC)
430 r |= EMAC_RMR_PME;
431 else if (ndev->flags & IFF_ALLMULTI ||
432 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
433 r |= EMAC_RMR_PMME;
434 else if (!netdev_mc_empty(ndev))
435 r |= EMAC_RMR_MAE;
437 return r;
440 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
442 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
444 DBG2(dev, "__emac_calc_base_mr1" NL);
446 switch(tx_size) {
447 case 2048:
448 ret |= EMAC_MR1_TFS_2K;
449 break;
450 default:
451 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
452 dev->ndev->name, tx_size);
455 switch(rx_size) {
456 case 16384:
457 ret |= EMAC_MR1_RFS_16K;
458 break;
459 case 4096:
460 ret |= EMAC_MR1_RFS_4K;
461 break;
462 default:
463 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
464 dev->ndev->name, rx_size);
467 return ret;
470 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
472 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
473 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
475 DBG2(dev, "__emac4_calc_base_mr1" NL);
477 switch(tx_size) {
478 case 16384:
479 ret |= EMAC4_MR1_TFS_16K;
480 break;
481 case 4096:
482 ret |= EMAC4_MR1_TFS_4K;
483 break;
484 case 2048:
485 ret |= EMAC4_MR1_TFS_2K;
486 break;
487 default:
488 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
489 dev->ndev->name, tx_size);
492 switch(rx_size) {
493 case 16384:
494 ret |= EMAC4_MR1_RFS_16K;
495 break;
496 case 4096:
497 ret |= EMAC4_MR1_RFS_4K;
498 break;
499 case 2048:
500 ret |= EMAC4_MR1_RFS_2K;
501 break;
502 default:
503 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
504 dev->ndev->name, rx_size);
507 return ret;
510 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
512 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
513 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
514 __emac_calc_base_mr1(dev, tx_size, rx_size);
517 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
519 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
520 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
521 else
522 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
525 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
526 unsigned int low, unsigned int high)
528 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
529 return (low << 22) | ( (high & 0x3ff) << 6);
530 else
531 return (low << 23) | ( (high & 0x1ff) << 7);
534 static int emac_configure(struct emac_instance *dev)
536 struct emac_regs __iomem *p = dev->emacp;
537 struct net_device *ndev = dev->ndev;
538 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
539 u32 r, mr1 = 0;
541 DBG(dev, "configure" NL);
543 if (!link) {
544 out_be32(&p->mr1, in_be32(&p->mr1)
545 | EMAC_MR1_FDE | EMAC_MR1_ILE);
546 udelay(100);
547 } else if (emac_reset(dev) < 0)
548 return -ETIMEDOUT;
550 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
551 tah_reset(dev->tah_dev);
553 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
554 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
556 /* Default fifo sizes */
557 tx_size = dev->tx_fifo_size;
558 rx_size = dev->rx_fifo_size;
560 /* No link, force loopback */
561 if (!link)
562 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
564 /* Check for full duplex */
565 else if (dev->phy.duplex == DUPLEX_FULL)
566 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
568 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
569 dev->stop_timeout = STOP_TIMEOUT_10;
570 switch (dev->phy.speed) {
571 case SPEED_1000:
572 if (emac_phy_gpcs(dev->phy.mode)) {
573 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
574 (dev->phy.gpcs_address != 0xffffffff) ?
575 dev->phy.gpcs_address : dev->phy.address);
577 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
578 * identify this GPCS PHY later.
580 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
581 } else
582 mr1 |= EMAC_MR1_MF_1000;
584 /* Extended fifo sizes */
585 tx_size = dev->tx_fifo_size_gige;
586 rx_size = dev->rx_fifo_size_gige;
588 if (dev->ndev->mtu > ETH_DATA_LEN) {
589 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
590 mr1 |= EMAC4_MR1_JPSM;
591 else
592 mr1 |= EMAC_MR1_JPSM;
593 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
594 } else
595 dev->stop_timeout = STOP_TIMEOUT_1000;
596 break;
597 case SPEED_100:
598 mr1 |= EMAC_MR1_MF_100;
599 dev->stop_timeout = STOP_TIMEOUT_100;
600 break;
601 default: /* make gcc happy */
602 break;
605 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
606 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
607 dev->phy.speed);
608 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
609 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
611 /* on 40x erratum forces us to NOT use integrated flow control,
612 * let's hope it works on 44x ;)
614 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
615 dev->phy.duplex == DUPLEX_FULL) {
616 if (dev->phy.pause)
617 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
618 else if (dev->phy.asym_pause)
619 mr1 |= EMAC_MR1_APP;
622 /* Add base settings & fifo sizes & program MR1 */
623 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
624 out_be32(&p->mr1, mr1);
626 /* Set individual MAC address */
627 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
628 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
629 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
630 ndev->dev_addr[5]);
632 /* VLAN Tag Protocol ID */
633 out_be32(&p->vtpid, 0x8100);
635 /* Receive mode register */
636 r = emac_iff2rmr(ndev);
637 if (r & EMAC_RMR_MAE)
638 emac_hash_mc(dev);
639 out_be32(&p->rmr, r);
641 /* FIFOs thresholds */
642 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
643 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
644 tx_size / 2 / dev->fifo_entry_size);
645 else
646 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
647 tx_size / 2 / dev->fifo_entry_size);
648 out_be32(&p->tmr1, r);
649 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
651 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
652 there should be still enough space in FIFO to allow the our link
653 partner time to process this frame and also time to send PAUSE
654 frame itself.
656 Here is the worst case scenario for the RX FIFO "headroom"
657 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
659 1) One maximum-length frame on TX 1522 bytes
660 2) One PAUSE frame time 64 bytes
661 3) PAUSE frame decode time allowance 64 bytes
662 4) One maximum-length frame on RX 1522 bytes
663 5) Round-trip propagation delay of the link (100Mb) 15 bytes
664 ----------
665 3187 bytes
667 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
668 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
670 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
671 rx_size / 4 / dev->fifo_entry_size);
672 out_be32(&p->rwmr, r);
674 /* Set PAUSE timer to the maximum */
675 out_be32(&p->ptr, 0xffff);
677 /* IRQ sources */
678 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
679 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
680 EMAC_ISR_IRE | EMAC_ISR_TE;
681 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
682 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
683 EMAC4_ISR_RXOE | */;
684 out_be32(&p->iser, r);
686 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
687 if (emac_phy_gpcs(dev->phy.mode)) {
688 if (dev->phy.gpcs_address != 0xffffffff)
689 emac_mii_reset_gpcs(&dev->phy);
690 else
691 emac_mii_reset_phy(&dev->phy);
694 return 0;
697 static void emac_reinitialize(struct emac_instance *dev)
699 DBG(dev, "reinitialize" NL);
701 emac_netif_stop(dev);
702 if (!emac_configure(dev)) {
703 emac_tx_enable(dev);
704 emac_rx_enable(dev);
706 emac_netif_start(dev);
709 static void emac_full_tx_reset(struct emac_instance *dev)
711 DBG(dev, "full_tx_reset" NL);
713 emac_tx_disable(dev);
714 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
715 emac_clean_tx_ring(dev);
716 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
718 emac_configure(dev);
720 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
721 emac_tx_enable(dev);
722 emac_rx_enable(dev);
725 static void emac_reset_work(struct work_struct *work)
727 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
729 DBG(dev, "reset_work" NL);
731 mutex_lock(&dev->link_lock);
732 if (dev->opened) {
733 emac_netif_stop(dev);
734 emac_full_tx_reset(dev);
735 emac_netif_start(dev);
737 mutex_unlock(&dev->link_lock);
740 static void emac_tx_timeout(struct net_device *ndev)
742 struct emac_instance *dev = netdev_priv(ndev);
744 DBG(dev, "tx_timeout" NL);
746 schedule_work(&dev->reset_work);
750 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
752 int done = !!(stacr & EMAC_STACR_OC);
754 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
755 done = !done;
757 return done;
760 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
762 struct emac_regs __iomem *p = dev->emacp;
763 u32 r = 0;
764 int n, err = -ETIMEDOUT;
766 mutex_lock(&dev->mdio_lock);
768 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
770 /* Enable proper MDIO port */
771 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
772 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
773 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
774 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
776 /* Wait for management interface to become idle */
777 n = 20;
778 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
779 udelay(1);
780 if (!--n) {
781 DBG2(dev, " -> timeout wait idle\n");
782 goto bail;
786 /* Issue read command */
787 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
788 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
789 else
790 r = EMAC_STACR_BASE(dev->opb_bus_freq);
791 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
792 r |= EMAC_STACR_OC;
793 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
794 r |= EMACX_STACR_STAC_READ;
795 else
796 r |= EMAC_STACR_STAC_READ;
797 r |= (reg & EMAC_STACR_PRA_MASK)
798 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
799 out_be32(&p->stacr, r);
801 /* Wait for read to complete */
802 n = 200;
803 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
804 udelay(1);
805 if (!--n) {
806 DBG2(dev, " -> timeout wait complete\n");
807 goto bail;
811 if (unlikely(r & EMAC_STACR_PHYE)) {
812 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
813 err = -EREMOTEIO;
814 goto bail;
817 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
819 DBG2(dev, "mdio_read -> %04x" NL, r);
820 err = 0;
821 bail:
822 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
823 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
824 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
825 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
826 mutex_unlock(&dev->mdio_lock);
828 return err == 0 ? r : err;
831 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
832 u16 val)
834 struct emac_regs __iomem *p = dev->emacp;
835 u32 r = 0;
836 int n, err = -ETIMEDOUT;
838 mutex_lock(&dev->mdio_lock);
840 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
842 /* Enable proper MDIO port */
843 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
844 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
845 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
846 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
848 /* Wait for management interface to be idle */
849 n = 20;
850 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
851 udelay(1);
852 if (!--n) {
853 DBG2(dev, " -> timeout wait idle\n");
854 goto bail;
858 /* Issue write command */
859 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
860 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
861 else
862 r = EMAC_STACR_BASE(dev->opb_bus_freq);
863 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
864 r |= EMAC_STACR_OC;
865 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
866 r |= EMACX_STACR_STAC_WRITE;
867 else
868 r |= EMAC_STACR_STAC_WRITE;
869 r |= (reg & EMAC_STACR_PRA_MASK) |
870 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
871 (val << EMAC_STACR_PHYD_SHIFT);
872 out_be32(&p->stacr, r);
874 /* Wait for write to complete */
875 n = 200;
876 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
877 udelay(1);
878 if (!--n) {
879 DBG2(dev, " -> timeout wait complete\n");
880 goto bail;
883 err = 0;
884 bail:
885 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
886 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
887 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
888 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
889 mutex_unlock(&dev->mdio_lock);
892 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
894 struct emac_instance *dev = netdev_priv(ndev);
895 int res;
897 res = __emac_mdio_read((dev->mdio_instance &&
898 dev->phy.gpcs_address != id) ?
899 dev->mdio_instance : dev,
900 (u8) id, (u8) reg);
901 return res;
904 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
906 struct emac_instance *dev = netdev_priv(ndev);
908 __emac_mdio_write((dev->mdio_instance &&
909 dev->phy.gpcs_address != id) ?
910 dev->mdio_instance : dev,
911 (u8) id, (u8) reg, (u16) val);
914 /* Tx lock BH */
915 static void __emac_set_multicast_list(struct emac_instance *dev)
917 struct emac_regs __iomem *p = dev->emacp;
918 u32 rmr = emac_iff2rmr(dev->ndev);
920 DBG(dev, "__multicast %08x" NL, rmr);
922 /* I decided to relax register access rules here to avoid
923 * full EMAC reset.
925 * There is a real problem with EMAC4 core if we use MWSW_001 bit
926 * in MR1 register and do a full EMAC reset.
927 * One TX BD status update is delayed and, after EMAC reset, it
928 * never happens, resulting in TX hung (it'll be recovered by TX
929 * timeout handler eventually, but this is just gross).
930 * So we either have to do full TX reset or try to cheat here :)
932 * The only required change is to RX mode register, so I *think* all
933 * we need is just to stop RX channel. This seems to work on all
934 * tested SoCs. --ebs
936 * If we need the full reset, we might just trigger the workqueue
937 * and do it async... a bit nasty but should work --BenH
939 dev->mcast_pending = 0;
940 emac_rx_disable(dev);
941 if (rmr & EMAC_RMR_MAE)
942 emac_hash_mc(dev);
943 out_be32(&p->rmr, rmr);
944 emac_rx_enable(dev);
947 /* Tx lock BH */
948 static void emac_set_multicast_list(struct net_device *ndev)
950 struct emac_instance *dev = netdev_priv(ndev);
952 DBG(dev, "multicast" NL);
954 BUG_ON(!netif_running(dev->ndev));
956 if (dev->no_mcast) {
957 dev->mcast_pending = 1;
958 return;
960 __emac_set_multicast_list(dev);
963 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
965 int rx_sync_size = emac_rx_sync_size(new_mtu);
966 int rx_skb_size = emac_rx_skb_size(new_mtu);
967 int i, ret = 0;
969 mutex_lock(&dev->link_lock);
970 emac_netif_stop(dev);
971 emac_rx_disable(dev);
972 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
974 if (dev->rx_sg_skb) {
975 ++dev->estats.rx_dropped_resize;
976 dev_kfree_skb(dev->rx_sg_skb);
977 dev->rx_sg_skb = NULL;
980 /* Make a first pass over RX ring and mark BDs ready, dropping
981 * non-processed packets on the way. We need this as a separate pass
982 * to simplify error recovery in the case of allocation failure later.
984 for (i = 0; i < NUM_RX_BUFF; ++i) {
985 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
986 ++dev->estats.rx_dropped_resize;
988 dev->rx_desc[i].data_len = 0;
989 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
990 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
993 /* Reallocate RX ring only if bigger skb buffers are required */
994 if (rx_skb_size <= dev->rx_skb_size)
995 goto skip;
997 /* Second pass, allocate new skbs */
998 for (i = 0; i < NUM_RX_BUFF; ++i) {
999 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1000 if (!skb) {
1001 ret = -ENOMEM;
1002 goto oom;
1005 BUG_ON(!dev->rx_skb[i]);
1006 dev_kfree_skb(dev->rx_skb[i]);
1008 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1009 dev->rx_desc[i].data_ptr =
1010 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1011 DMA_FROM_DEVICE) + 2;
1012 dev->rx_skb[i] = skb;
1014 skip:
1015 /* Check if we need to change "Jumbo" bit in MR1 */
1016 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
1017 /* This is to prevent starting RX channel in emac_rx_enable() */
1018 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1020 dev->ndev->mtu = new_mtu;
1021 emac_full_tx_reset(dev);
1024 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1025 oom:
1026 /* Restart RX */
1027 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1028 dev->rx_slot = 0;
1029 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1030 emac_rx_enable(dev);
1031 emac_netif_start(dev);
1032 mutex_unlock(&dev->link_lock);
1034 return ret;
1037 /* Process ctx, rtnl_lock semaphore */
1038 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1040 struct emac_instance *dev = netdev_priv(ndev);
1041 int ret = 0;
1043 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1044 return -EINVAL;
1046 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1048 if (netif_running(ndev)) {
1049 /* Check if we really need to reinitialize RX ring */
1050 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1051 ret = emac_resize_rx_ring(dev, new_mtu);
1054 if (!ret) {
1055 ndev->mtu = new_mtu;
1056 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1057 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1060 return ret;
1063 static void emac_clean_tx_ring(struct emac_instance *dev)
1065 int i;
1067 for (i = 0; i < NUM_TX_BUFF; ++i) {
1068 if (dev->tx_skb[i]) {
1069 dev_kfree_skb(dev->tx_skb[i]);
1070 dev->tx_skb[i] = NULL;
1071 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1072 ++dev->estats.tx_dropped;
1074 dev->tx_desc[i].ctrl = 0;
1075 dev->tx_desc[i].data_ptr = 0;
1079 static void emac_clean_rx_ring(struct emac_instance *dev)
1081 int i;
1083 for (i = 0; i < NUM_RX_BUFF; ++i)
1084 if (dev->rx_skb[i]) {
1085 dev->rx_desc[i].ctrl = 0;
1086 dev_kfree_skb(dev->rx_skb[i]);
1087 dev->rx_skb[i] = NULL;
1088 dev->rx_desc[i].data_ptr = 0;
1091 if (dev->rx_sg_skb) {
1092 dev_kfree_skb(dev->rx_sg_skb);
1093 dev->rx_sg_skb = NULL;
1097 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1098 gfp_t flags)
1100 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1101 if (unlikely(!skb))
1102 return -ENOMEM;
1104 dev->rx_skb[slot] = skb;
1105 dev->rx_desc[slot].data_len = 0;
1107 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1108 dev->rx_desc[slot].data_ptr =
1109 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1110 DMA_FROM_DEVICE) + 2;
1111 wmb();
1112 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1113 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1115 return 0;
1118 static void emac_print_link_status(struct emac_instance *dev)
1120 if (netif_carrier_ok(dev->ndev))
1121 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1122 dev->ndev->name, dev->phy.speed,
1123 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1124 dev->phy.pause ? ", pause enabled" :
1125 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1126 else
1127 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1130 /* Process ctx, rtnl_lock semaphore */
1131 static int emac_open(struct net_device *ndev)
1133 struct emac_instance *dev = netdev_priv(ndev);
1134 int err, i;
1136 DBG(dev, "open" NL);
1138 /* Setup error IRQ handler */
1139 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1140 if (err) {
1141 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1142 ndev->name, dev->emac_irq);
1143 return err;
1146 /* Allocate RX ring */
1147 for (i = 0; i < NUM_RX_BUFF; ++i)
1148 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1149 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1150 ndev->name);
1151 goto oom;
1154 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1155 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1156 dev->rx_sg_skb = NULL;
1158 mutex_lock(&dev->link_lock);
1159 dev->opened = 1;
1161 /* Start PHY polling now.
1163 if (dev->phy.address >= 0) {
1164 int link_poll_interval;
1165 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1166 dev->phy.def->ops->read_link(&dev->phy);
1167 emac_rx_clk_default(dev);
1168 netif_carrier_on(dev->ndev);
1169 link_poll_interval = PHY_POLL_LINK_ON;
1170 } else {
1171 emac_rx_clk_tx(dev);
1172 netif_carrier_off(dev->ndev);
1173 link_poll_interval = PHY_POLL_LINK_OFF;
1175 dev->link_polling = 1;
1176 wmb();
1177 schedule_delayed_work(&dev->link_work, link_poll_interval);
1178 emac_print_link_status(dev);
1179 } else
1180 netif_carrier_on(dev->ndev);
1182 /* Required for Pause packet support in EMAC */
1183 dev_mc_add_global(ndev, default_mcast_addr);
1185 emac_configure(dev);
1186 mal_poll_add(dev->mal, &dev->commac);
1187 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1188 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1189 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1190 emac_tx_enable(dev);
1191 emac_rx_enable(dev);
1192 emac_netif_start(dev);
1194 mutex_unlock(&dev->link_lock);
1196 return 0;
1197 oom:
1198 emac_clean_rx_ring(dev);
1199 free_irq(dev->emac_irq, dev);
1201 return -ENOMEM;
1204 /* BHs disabled */
1205 #if 0
1206 static int emac_link_differs(struct emac_instance *dev)
1208 u32 r = in_be32(&dev->emacp->mr1);
1210 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1211 int speed, pause, asym_pause;
1213 if (r & EMAC_MR1_MF_1000)
1214 speed = SPEED_1000;
1215 else if (r & EMAC_MR1_MF_100)
1216 speed = SPEED_100;
1217 else
1218 speed = SPEED_10;
1220 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1221 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1222 pause = 1;
1223 asym_pause = 0;
1224 break;
1225 case EMAC_MR1_APP:
1226 pause = 0;
1227 asym_pause = 1;
1228 break;
1229 default:
1230 pause = asym_pause = 0;
1232 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1233 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1235 #endif
1237 static void emac_link_timer(struct work_struct *work)
1239 struct emac_instance *dev =
1240 container_of(to_delayed_work(work),
1241 struct emac_instance, link_work);
1242 int link_poll_interval;
1244 mutex_lock(&dev->link_lock);
1245 DBG2(dev, "link timer" NL);
1247 if (!dev->opened)
1248 goto bail;
1250 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1251 if (!netif_carrier_ok(dev->ndev)) {
1252 emac_rx_clk_default(dev);
1253 /* Get new link parameters */
1254 dev->phy.def->ops->read_link(&dev->phy);
1256 netif_carrier_on(dev->ndev);
1257 emac_netif_stop(dev);
1258 emac_full_tx_reset(dev);
1259 emac_netif_start(dev);
1260 emac_print_link_status(dev);
1262 link_poll_interval = PHY_POLL_LINK_ON;
1263 } else {
1264 if (netif_carrier_ok(dev->ndev)) {
1265 emac_rx_clk_tx(dev);
1266 netif_carrier_off(dev->ndev);
1267 netif_tx_disable(dev->ndev);
1268 emac_reinitialize(dev);
1269 emac_print_link_status(dev);
1271 link_poll_interval = PHY_POLL_LINK_OFF;
1273 schedule_delayed_work(&dev->link_work, link_poll_interval);
1274 bail:
1275 mutex_unlock(&dev->link_lock);
1278 static void emac_force_link_update(struct emac_instance *dev)
1280 netif_carrier_off(dev->ndev);
1281 smp_rmb();
1282 if (dev->link_polling) {
1283 cancel_delayed_work_sync(&dev->link_work);
1284 if (dev->link_polling)
1285 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1289 /* Process ctx, rtnl_lock semaphore */
1290 static int emac_close(struct net_device *ndev)
1292 struct emac_instance *dev = netdev_priv(ndev);
1294 DBG(dev, "close" NL);
1296 if (dev->phy.address >= 0) {
1297 dev->link_polling = 0;
1298 cancel_delayed_work_sync(&dev->link_work);
1300 mutex_lock(&dev->link_lock);
1301 emac_netif_stop(dev);
1302 dev->opened = 0;
1303 mutex_unlock(&dev->link_lock);
1305 emac_rx_disable(dev);
1306 emac_tx_disable(dev);
1307 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1308 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1309 mal_poll_del(dev->mal, &dev->commac);
1311 emac_clean_tx_ring(dev);
1312 emac_clean_rx_ring(dev);
1314 free_irq(dev->emac_irq, dev);
1316 netif_carrier_off(ndev);
1318 return 0;
1321 static inline u16 emac_tx_csum(struct emac_instance *dev,
1322 struct sk_buff *skb)
1324 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1325 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1326 ++dev->stats.tx_packets_csum;
1327 return EMAC_TX_CTRL_TAH_CSUM;
1329 return 0;
1332 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1334 struct emac_regs __iomem *p = dev->emacp;
1335 struct net_device *ndev = dev->ndev;
1337 /* Send the packet out. If the if makes a significant perf
1338 * difference, then we can store the TMR0 value in "dev"
1339 * instead
1341 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1342 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1343 else
1344 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1346 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1347 netif_stop_queue(ndev);
1348 DBG2(dev, "stopped TX queue" NL);
1351 ndev->trans_start = jiffies;
1352 ++dev->stats.tx_packets;
1353 dev->stats.tx_bytes += len;
1355 return NETDEV_TX_OK;
1358 /* Tx lock BH */
1359 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1361 struct emac_instance *dev = netdev_priv(ndev);
1362 unsigned int len = skb->len;
1363 int slot;
1365 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1366 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1368 slot = dev->tx_slot++;
1369 if (dev->tx_slot == NUM_TX_BUFF) {
1370 dev->tx_slot = 0;
1371 ctrl |= MAL_TX_CTRL_WRAP;
1374 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1376 dev->tx_skb[slot] = skb;
1377 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1378 skb->data, len,
1379 DMA_TO_DEVICE);
1380 dev->tx_desc[slot].data_len = (u16) len;
1381 wmb();
1382 dev->tx_desc[slot].ctrl = ctrl;
1384 return emac_xmit_finish(dev, len);
1387 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1388 u32 pd, int len, int last, u16 base_ctrl)
1390 while (1) {
1391 u16 ctrl = base_ctrl;
1392 int chunk = min(len, MAL_MAX_TX_SIZE);
1393 len -= chunk;
1395 slot = (slot + 1) % NUM_TX_BUFF;
1397 if (last && !len)
1398 ctrl |= MAL_TX_CTRL_LAST;
1399 if (slot == NUM_TX_BUFF - 1)
1400 ctrl |= MAL_TX_CTRL_WRAP;
1402 dev->tx_skb[slot] = NULL;
1403 dev->tx_desc[slot].data_ptr = pd;
1404 dev->tx_desc[slot].data_len = (u16) chunk;
1405 dev->tx_desc[slot].ctrl = ctrl;
1406 ++dev->tx_cnt;
1408 if (!len)
1409 break;
1411 pd += chunk;
1413 return slot;
1416 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1417 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1419 struct emac_instance *dev = netdev_priv(ndev);
1420 int nr_frags = skb_shinfo(skb)->nr_frags;
1421 int len = skb->len, chunk;
1422 int slot, i;
1423 u16 ctrl;
1424 u32 pd;
1426 /* This is common "fast" path */
1427 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1428 return emac_start_xmit(skb, ndev);
1430 len -= skb->data_len;
1432 /* Note, this is only an *estimation*, we can still run out of empty
1433 * slots because of the additional fragmentation into
1434 * MAL_MAX_TX_SIZE-sized chunks
1436 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1437 goto stop_queue;
1439 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1440 emac_tx_csum(dev, skb);
1441 slot = dev->tx_slot;
1443 /* skb data */
1444 dev->tx_skb[slot] = NULL;
1445 chunk = min(len, MAL_MAX_TX_SIZE);
1446 dev->tx_desc[slot].data_ptr = pd =
1447 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1448 dev->tx_desc[slot].data_len = (u16) chunk;
1449 len -= chunk;
1450 if (unlikely(len))
1451 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1452 ctrl);
1453 /* skb fragments */
1454 for (i = 0; i < nr_frags; ++i) {
1455 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1456 len = skb_frag_size(frag);
1458 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1459 goto undo_frame;
1461 pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1462 DMA_TO_DEVICE);
1464 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1465 ctrl);
1468 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1470 /* Attach skb to the last slot so we don't release it too early */
1471 dev->tx_skb[slot] = skb;
1473 /* Send the packet out */
1474 if (dev->tx_slot == NUM_TX_BUFF - 1)
1475 ctrl |= MAL_TX_CTRL_WRAP;
1476 wmb();
1477 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1478 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1480 return emac_xmit_finish(dev, skb->len);
1482 undo_frame:
1483 /* Well, too bad. Our previous estimation was overly optimistic.
1484 * Undo everything.
1486 while (slot != dev->tx_slot) {
1487 dev->tx_desc[slot].ctrl = 0;
1488 --dev->tx_cnt;
1489 if (--slot < 0)
1490 slot = NUM_TX_BUFF - 1;
1492 ++dev->estats.tx_undo;
1494 stop_queue:
1495 netif_stop_queue(ndev);
1496 DBG2(dev, "stopped TX queue" NL);
1497 return NETDEV_TX_BUSY;
1500 /* Tx lock BHs */
1501 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1503 struct emac_error_stats *st = &dev->estats;
1505 DBG(dev, "BD TX error %04x" NL, ctrl);
1507 ++st->tx_bd_errors;
1508 if (ctrl & EMAC_TX_ST_BFCS)
1509 ++st->tx_bd_bad_fcs;
1510 if (ctrl & EMAC_TX_ST_LCS)
1511 ++st->tx_bd_carrier_loss;
1512 if (ctrl & EMAC_TX_ST_ED)
1513 ++st->tx_bd_excessive_deferral;
1514 if (ctrl & EMAC_TX_ST_EC)
1515 ++st->tx_bd_excessive_collisions;
1516 if (ctrl & EMAC_TX_ST_LC)
1517 ++st->tx_bd_late_collision;
1518 if (ctrl & EMAC_TX_ST_MC)
1519 ++st->tx_bd_multple_collisions;
1520 if (ctrl & EMAC_TX_ST_SC)
1521 ++st->tx_bd_single_collision;
1522 if (ctrl & EMAC_TX_ST_UR)
1523 ++st->tx_bd_underrun;
1524 if (ctrl & EMAC_TX_ST_SQE)
1525 ++st->tx_bd_sqe;
1528 static void emac_poll_tx(void *param)
1530 struct emac_instance *dev = param;
1531 u32 bad_mask;
1533 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1535 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1536 bad_mask = EMAC_IS_BAD_TX_TAH;
1537 else
1538 bad_mask = EMAC_IS_BAD_TX;
1540 netif_tx_lock_bh(dev->ndev);
1541 if (dev->tx_cnt) {
1542 u16 ctrl;
1543 int slot = dev->ack_slot, n = 0;
1544 again:
1545 ctrl = dev->tx_desc[slot].ctrl;
1546 if (!(ctrl & MAL_TX_CTRL_READY)) {
1547 struct sk_buff *skb = dev->tx_skb[slot];
1548 ++n;
1550 if (skb) {
1551 dev_kfree_skb(skb);
1552 dev->tx_skb[slot] = NULL;
1554 slot = (slot + 1) % NUM_TX_BUFF;
1556 if (unlikely(ctrl & bad_mask))
1557 emac_parse_tx_error(dev, ctrl);
1559 if (--dev->tx_cnt)
1560 goto again;
1562 if (n) {
1563 dev->ack_slot = slot;
1564 if (netif_queue_stopped(dev->ndev) &&
1565 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1566 netif_wake_queue(dev->ndev);
1568 DBG2(dev, "tx %d pkts" NL, n);
1571 netif_tx_unlock_bh(dev->ndev);
1574 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1575 int len)
1577 struct sk_buff *skb = dev->rx_skb[slot];
1579 DBG2(dev, "recycle %d %d" NL, slot, len);
1581 if (len)
1582 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1583 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1585 dev->rx_desc[slot].data_len = 0;
1586 wmb();
1587 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1588 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1591 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1593 struct emac_error_stats *st = &dev->estats;
1595 DBG(dev, "BD RX error %04x" NL, ctrl);
1597 ++st->rx_bd_errors;
1598 if (ctrl & EMAC_RX_ST_OE)
1599 ++st->rx_bd_overrun;
1600 if (ctrl & EMAC_RX_ST_BP)
1601 ++st->rx_bd_bad_packet;
1602 if (ctrl & EMAC_RX_ST_RP)
1603 ++st->rx_bd_runt_packet;
1604 if (ctrl & EMAC_RX_ST_SE)
1605 ++st->rx_bd_short_event;
1606 if (ctrl & EMAC_RX_ST_AE)
1607 ++st->rx_bd_alignment_error;
1608 if (ctrl & EMAC_RX_ST_BFCS)
1609 ++st->rx_bd_bad_fcs;
1610 if (ctrl & EMAC_RX_ST_PTL)
1611 ++st->rx_bd_packet_too_long;
1612 if (ctrl & EMAC_RX_ST_ORE)
1613 ++st->rx_bd_out_of_range;
1614 if (ctrl & EMAC_RX_ST_IRE)
1615 ++st->rx_bd_in_range;
1618 static inline void emac_rx_csum(struct emac_instance *dev,
1619 struct sk_buff *skb, u16 ctrl)
1621 #ifdef CONFIG_IBM_EMAC_TAH
1622 if (!ctrl && dev->tah_dev) {
1623 skb->ip_summed = CHECKSUM_UNNECESSARY;
1624 ++dev->stats.rx_packets_csum;
1626 #endif
1629 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1631 if (likely(dev->rx_sg_skb != NULL)) {
1632 int len = dev->rx_desc[slot].data_len;
1633 int tot_len = dev->rx_sg_skb->len + len;
1635 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1636 ++dev->estats.rx_dropped_mtu;
1637 dev_kfree_skb(dev->rx_sg_skb);
1638 dev->rx_sg_skb = NULL;
1639 } else {
1640 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1641 dev->rx_skb[slot]->data, len);
1642 skb_put(dev->rx_sg_skb, len);
1643 emac_recycle_rx_skb(dev, slot, len);
1644 return 0;
1647 emac_recycle_rx_skb(dev, slot, 0);
1648 return -1;
1651 /* NAPI poll context */
1652 static int emac_poll_rx(void *param, int budget)
1654 struct emac_instance *dev = param;
1655 int slot = dev->rx_slot, received = 0;
1657 DBG2(dev, "poll_rx(%d)" NL, budget);
1659 again:
1660 while (budget > 0) {
1661 int len;
1662 struct sk_buff *skb;
1663 u16 ctrl = dev->rx_desc[slot].ctrl;
1665 if (ctrl & MAL_RX_CTRL_EMPTY)
1666 break;
1668 skb = dev->rx_skb[slot];
1669 mb();
1670 len = dev->rx_desc[slot].data_len;
1672 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1673 goto sg;
1675 ctrl &= EMAC_BAD_RX_MASK;
1676 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1677 emac_parse_rx_error(dev, ctrl);
1678 ++dev->estats.rx_dropped_error;
1679 emac_recycle_rx_skb(dev, slot, 0);
1680 len = 0;
1681 goto next;
1684 if (len < ETH_HLEN) {
1685 ++dev->estats.rx_dropped_stack;
1686 emac_recycle_rx_skb(dev, slot, len);
1687 goto next;
1690 if (len && len < EMAC_RX_COPY_THRESH) {
1691 struct sk_buff *copy_skb =
1692 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1693 if (unlikely(!copy_skb))
1694 goto oom;
1696 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1697 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1698 len + 2);
1699 emac_recycle_rx_skb(dev, slot, len);
1700 skb = copy_skb;
1701 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1702 goto oom;
1704 skb_put(skb, len);
1705 push_packet:
1706 skb->protocol = eth_type_trans(skb, dev->ndev);
1707 emac_rx_csum(dev, skb, ctrl);
1709 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1710 ++dev->estats.rx_dropped_stack;
1711 next:
1712 ++dev->stats.rx_packets;
1713 skip:
1714 dev->stats.rx_bytes += len;
1715 slot = (slot + 1) % NUM_RX_BUFF;
1716 --budget;
1717 ++received;
1718 continue;
1720 if (ctrl & MAL_RX_CTRL_FIRST) {
1721 BUG_ON(dev->rx_sg_skb);
1722 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1723 DBG(dev, "rx OOM %d" NL, slot);
1724 ++dev->estats.rx_dropped_oom;
1725 emac_recycle_rx_skb(dev, slot, 0);
1726 } else {
1727 dev->rx_sg_skb = skb;
1728 skb_put(skb, len);
1730 } else if (!emac_rx_sg_append(dev, slot) &&
1731 (ctrl & MAL_RX_CTRL_LAST)) {
1733 skb = dev->rx_sg_skb;
1734 dev->rx_sg_skb = NULL;
1736 ctrl &= EMAC_BAD_RX_MASK;
1737 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1738 emac_parse_rx_error(dev, ctrl);
1739 ++dev->estats.rx_dropped_error;
1740 dev_kfree_skb(skb);
1741 len = 0;
1742 } else
1743 goto push_packet;
1745 goto skip;
1746 oom:
1747 DBG(dev, "rx OOM %d" NL, slot);
1748 /* Drop the packet and recycle skb */
1749 ++dev->estats.rx_dropped_oom;
1750 emac_recycle_rx_skb(dev, slot, 0);
1751 goto next;
1754 if (received) {
1755 DBG2(dev, "rx %d BDs" NL, received);
1756 dev->rx_slot = slot;
1759 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1760 mb();
1761 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1762 DBG2(dev, "rx restart" NL);
1763 received = 0;
1764 goto again;
1767 if (dev->rx_sg_skb) {
1768 DBG2(dev, "dropping partial rx packet" NL);
1769 ++dev->estats.rx_dropped_error;
1770 dev_kfree_skb(dev->rx_sg_skb);
1771 dev->rx_sg_skb = NULL;
1774 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1775 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1776 emac_rx_enable(dev);
1777 dev->rx_slot = 0;
1779 return received;
1782 /* NAPI poll context */
1783 static int emac_peek_rx(void *param)
1785 struct emac_instance *dev = param;
1787 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1790 /* NAPI poll context */
1791 static int emac_peek_rx_sg(void *param)
1793 struct emac_instance *dev = param;
1795 int slot = dev->rx_slot;
1796 while (1) {
1797 u16 ctrl = dev->rx_desc[slot].ctrl;
1798 if (ctrl & MAL_RX_CTRL_EMPTY)
1799 return 0;
1800 else if (ctrl & MAL_RX_CTRL_LAST)
1801 return 1;
1803 slot = (slot + 1) % NUM_RX_BUFF;
1805 /* I'm just being paranoid here :) */
1806 if (unlikely(slot == dev->rx_slot))
1807 return 0;
1811 /* Hard IRQ */
1812 static void emac_rxde(void *param)
1814 struct emac_instance *dev = param;
1816 ++dev->estats.rx_stopped;
1817 emac_rx_disable_async(dev);
1820 /* Hard IRQ */
1821 static irqreturn_t emac_irq(int irq, void *dev_instance)
1823 struct emac_instance *dev = dev_instance;
1824 struct emac_regs __iomem *p = dev->emacp;
1825 struct emac_error_stats *st = &dev->estats;
1826 u32 isr;
1828 spin_lock(&dev->lock);
1830 isr = in_be32(&p->isr);
1831 out_be32(&p->isr, isr);
1833 DBG(dev, "isr = %08x" NL, isr);
1835 if (isr & EMAC4_ISR_TXPE)
1836 ++st->tx_parity;
1837 if (isr & EMAC4_ISR_RXPE)
1838 ++st->rx_parity;
1839 if (isr & EMAC4_ISR_TXUE)
1840 ++st->tx_underrun;
1841 if (isr & EMAC4_ISR_RXOE)
1842 ++st->rx_fifo_overrun;
1843 if (isr & EMAC_ISR_OVR)
1844 ++st->rx_overrun;
1845 if (isr & EMAC_ISR_BP)
1846 ++st->rx_bad_packet;
1847 if (isr & EMAC_ISR_RP)
1848 ++st->rx_runt_packet;
1849 if (isr & EMAC_ISR_SE)
1850 ++st->rx_short_event;
1851 if (isr & EMAC_ISR_ALE)
1852 ++st->rx_alignment_error;
1853 if (isr & EMAC_ISR_BFCS)
1854 ++st->rx_bad_fcs;
1855 if (isr & EMAC_ISR_PTLE)
1856 ++st->rx_packet_too_long;
1857 if (isr & EMAC_ISR_ORE)
1858 ++st->rx_out_of_range;
1859 if (isr & EMAC_ISR_IRE)
1860 ++st->rx_in_range;
1861 if (isr & EMAC_ISR_SQE)
1862 ++st->tx_sqe;
1863 if (isr & EMAC_ISR_TE)
1864 ++st->tx_errors;
1866 spin_unlock(&dev->lock);
1868 return IRQ_HANDLED;
1871 static struct net_device_stats *emac_stats(struct net_device *ndev)
1873 struct emac_instance *dev = netdev_priv(ndev);
1874 struct emac_stats *st = &dev->stats;
1875 struct emac_error_stats *est = &dev->estats;
1876 struct net_device_stats *nst = &dev->nstats;
1877 unsigned long flags;
1879 DBG2(dev, "stats" NL);
1881 /* Compute "legacy" statistics */
1882 spin_lock_irqsave(&dev->lock, flags);
1883 nst->rx_packets = (unsigned long)st->rx_packets;
1884 nst->rx_bytes = (unsigned long)st->rx_bytes;
1885 nst->tx_packets = (unsigned long)st->tx_packets;
1886 nst->tx_bytes = (unsigned long)st->tx_bytes;
1887 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1888 est->rx_dropped_error +
1889 est->rx_dropped_resize +
1890 est->rx_dropped_mtu);
1891 nst->tx_dropped = (unsigned long)est->tx_dropped;
1893 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1894 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1895 est->rx_fifo_overrun +
1896 est->rx_overrun);
1897 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1898 est->rx_alignment_error);
1899 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1900 est->rx_bad_fcs);
1901 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1902 est->rx_bd_short_event +
1903 est->rx_bd_packet_too_long +
1904 est->rx_bd_out_of_range +
1905 est->rx_bd_in_range +
1906 est->rx_runt_packet +
1907 est->rx_short_event +
1908 est->rx_packet_too_long +
1909 est->rx_out_of_range +
1910 est->rx_in_range);
1912 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1913 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1914 est->tx_underrun);
1915 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1916 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1917 est->tx_bd_excessive_collisions +
1918 est->tx_bd_late_collision +
1919 est->tx_bd_multple_collisions);
1920 spin_unlock_irqrestore(&dev->lock, flags);
1921 return nst;
1924 static struct mal_commac_ops emac_commac_ops = {
1925 .poll_tx = &emac_poll_tx,
1926 .poll_rx = &emac_poll_rx,
1927 .peek_rx = &emac_peek_rx,
1928 .rxde = &emac_rxde,
1931 static struct mal_commac_ops emac_commac_sg_ops = {
1932 .poll_tx = &emac_poll_tx,
1933 .poll_rx = &emac_poll_rx,
1934 .peek_rx = &emac_peek_rx_sg,
1935 .rxde = &emac_rxde,
1938 /* Ethtool support */
1939 static int emac_ethtool_get_settings(struct net_device *ndev,
1940 struct ethtool_cmd *cmd)
1942 struct emac_instance *dev = netdev_priv(ndev);
1944 cmd->supported = dev->phy.features;
1945 cmd->port = PORT_MII;
1946 cmd->phy_address = dev->phy.address;
1947 cmd->transceiver =
1948 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1950 mutex_lock(&dev->link_lock);
1951 cmd->advertising = dev->phy.advertising;
1952 cmd->autoneg = dev->phy.autoneg;
1953 cmd->speed = dev->phy.speed;
1954 cmd->duplex = dev->phy.duplex;
1955 mutex_unlock(&dev->link_lock);
1957 return 0;
1960 static int emac_ethtool_set_settings(struct net_device *ndev,
1961 struct ethtool_cmd *cmd)
1963 struct emac_instance *dev = netdev_priv(ndev);
1964 u32 f = dev->phy.features;
1966 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1967 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1969 /* Basic sanity checks */
1970 if (dev->phy.address < 0)
1971 return -EOPNOTSUPP;
1972 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1973 return -EINVAL;
1974 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1975 return -EINVAL;
1976 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1977 return -EINVAL;
1979 if (cmd->autoneg == AUTONEG_DISABLE) {
1980 switch (cmd->speed) {
1981 case SPEED_10:
1982 if (cmd->duplex == DUPLEX_HALF &&
1983 !(f & SUPPORTED_10baseT_Half))
1984 return -EINVAL;
1985 if (cmd->duplex == DUPLEX_FULL &&
1986 !(f & SUPPORTED_10baseT_Full))
1987 return -EINVAL;
1988 break;
1989 case SPEED_100:
1990 if (cmd->duplex == DUPLEX_HALF &&
1991 !(f & SUPPORTED_100baseT_Half))
1992 return -EINVAL;
1993 if (cmd->duplex == DUPLEX_FULL &&
1994 !(f & SUPPORTED_100baseT_Full))
1995 return -EINVAL;
1996 break;
1997 case SPEED_1000:
1998 if (cmd->duplex == DUPLEX_HALF &&
1999 !(f & SUPPORTED_1000baseT_Half))
2000 return -EINVAL;
2001 if (cmd->duplex == DUPLEX_FULL &&
2002 !(f & SUPPORTED_1000baseT_Full))
2003 return -EINVAL;
2004 break;
2005 default:
2006 return -EINVAL;
2009 mutex_lock(&dev->link_lock);
2010 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2011 cmd->duplex);
2012 mutex_unlock(&dev->link_lock);
2014 } else {
2015 if (!(f & SUPPORTED_Autoneg))
2016 return -EINVAL;
2018 mutex_lock(&dev->link_lock);
2019 dev->phy.def->ops->setup_aneg(&dev->phy,
2020 (cmd->advertising & f) |
2021 (dev->phy.advertising &
2022 (ADVERTISED_Pause |
2023 ADVERTISED_Asym_Pause)));
2024 mutex_unlock(&dev->link_lock);
2026 emac_force_link_update(dev);
2028 return 0;
2031 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2032 struct ethtool_ringparam *rp)
2034 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2035 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2038 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2039 struct ethtool_pauseparam *pp)
2041 struct emac_instance *dev = netdev_priv(ndev);
2043 mutex_lock(&dev->link_lock);
2044 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2045 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2046 pp->autoneg = 1;
2048 if (dev->phy.duplex == DUPLEX_FULL) {
2049 if (dev->phy.pause)
2050 pp->rx_pause = pp->tx_pause = 1;
2051 else if (dev->phy.asym_pause)
2052 pp->tx_pause = 1;
2054 mutex_unlock(&dev->link_lock);
2057 static int emac_get_regs_len(struct emac_instance *dev)
2059 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2060 return sizeof(struct emac_ethtool_regs_subhdr) +
2061 EMAC4_ETHTOOL_REGS_SIZE(dev);
2062 else
2063 return sizeof(struct emac_ethtool_regs_subhdr) +
2064 EMAC_ETHTOOL_REGS_SIZE(dev);
2067 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2069 struct emac_instance *dev = netdev_priv(ndev);
2070 int size;
2072 size = sizeof(struct emac_ethtool_regs_hdr) +
2073 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2074 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2075 size += zmii_get_regs_len(dev->zmii_dev);
2076 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2077 size += rgmii_get_regs_len(dev->rgmii_dev);
2078 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2079 size += tah_get_regs_len(dev->tah_dev);
2081 return size;
2084 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2086 struct emac_ethtool_regs_subhdr *hdr = buf;
2088 hdr->index = dev->cell_index;
2089 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2090 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2091 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2092 return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
2093 } else {
2094 hdr->version = EMAC_ETHTOOL_REGS_VER;
2095 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2096 return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
2100 static void emac_ethtool_get_regs(struct net_device *ndev,
2101 struct ethtool_regs *regs, void *buf)
2103 struct emac_instance *dev = netdev_priv(ndev);
2104 struct emac_ethtool_regs_hdr *hdr = buf;
2106 hdr->components = 0;
2107 buf = hdr + 1;
2109 buf = mal_dump_regs(dev->mal, buf);
2110 buf = emac_dump_regs(dev, buf);
2111 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2112 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2113 buf = zmii_dump_regs(dev->zmii_dev, buf);
2115 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2116 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2117 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2119 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2120 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2121 buf = tah_dump_regs(dev->tah_dev, buf);
2125 static int emac_ethtool_nway_reset(struct net_device *ndev)
2127 struct emac_instance *dev = netdev_priv(ndev);
2128 int res = 0;
2130 DBG(dev, "nway_reset" NL);
2132 if (dev->phy.address < 0)
2133 return -EOPNOTSUPP;
2135 mutex_lock(&dev->link_lock);
2136 if (!dev->phy.autoneg) {
2137 res = -EINVAL;
2138 goto out;
2141 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2142 out:
2143 mutex_unlock(&dev->link_lock);
2144 emac_force_link_update(dev);
2145 return res;
2148 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2150 if (stringset == ETH_SS_STATS)
2151 return EMAC_ETHTOOL_STATS_COUNT;
2152 else
2153 return -EINVAL;
2156 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2157 u8 * buf)
2159 if (stringset == ETH_SS_STATS)
2160 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2163 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2164 struct ethtool_stats *estats,
2165 u64 * tmp_stats)
2167 struct emac_instance *dev = netdev_priv(ndev);
2169 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2170 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2171 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2174 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2175 struct ethtool_drvinfo *info)
2177 struct emac_instance *dev = netdev_priv(ndev);
2179 strcpy(info->driver, "ibm_emac");
2180 strcpy(info->version, DRV_VERSION);
2181 info->fw_version[0] = '\0';
2182 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2183 dev->cell_index, dev->ofdev->dev.of_node->full_name);
2184 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2187 static const struct ethtool_ops emac_ethtool_ops = {
2188 .get_settings = emac_ethtool_get_settings,
2189 .set_settings = emac_ethtool_set_settings,
2190 .get_drvinfo = emac_ethtool_get_drvinfo,
2192 .get_regs_len = emac_ethtool_get_regs_len,
2193 .get_regs = emac_ethtool_get_regs,
2195 .nway_reset = emac_ethtool_nway_reset,
2197 .get_ringparam = emac_ethtool_get_ringparam,
2198 .get_pauseparam = emac_ethtool_get_pauseparam,
2200 .get_strings = emac_ethtool_get_strings,
2201 .get_sset_count = emac_ethtool_get_sset_count,
2202 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2204 .get_link = ethtool_op_get_link,
2207 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2209 struct emac_instance *dev = netdev_priv(ndev);
2210 struct mii_ioctl_data *data = if_mii(rq);
2212 DBG(dev, "ioctl %08x" NL, cmd);
2214 if (dev->phy.address < 0)
2215 return -EOPNOTSUPP;
2217 switch (cmd) {
2218 case SIOCGMIIPHY:
2219 data->phy_id = dev->phy.address;
2220 /* Fall through */
2221 case SIOCGMIIREG:
2222 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2223 data->reg_num);
2224 return 0;
2226 case SIOCSMIIREG:
2227 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2228 data->val_in);
2229 return 0;
2230 default:
2231 return -EOPNOTSUPP;
2235 struct emac_depentry {
2236 u32 phandle;
2237 struct device_node *node;
2238 struct platform_device *ofdev;
2239 void *drvdata;
2242 #define EMAC_DEP_MAL_IDX 0
2243 #define EMAC_DEP_ZMII_IDX 1
2244 #define EMAC_DEP_RGMII_IDX 2
2245 #define EMAC_DEP_TAH_IDX 3
2246 #define EMAC_DEP_MDIO_IDX 4
2247 #define EMAC_DEP_PREV_IDX 5
2248 #define EMAC_DEP_COUNT 6
2250 static int __devinit emac_check_deps(struct emac_instance *dev,
2251 struct emac_depentry *deps)
2253 int i, there = 0;
2254 struct device_node *np;
2256 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2257 /* no dependency on that item, allright */
2258 if (deps[i].phandle == 0) {
2259 there++;
2260 continue;
2262 /* special case for blist as the dependency might go away */
2263 if (i == EMAC_DEP_PREV_IDX) {
2264 np = *(dev->blist - 1);
2265 if (np == NULL) {
2266 deps[i].phandle = 0;
2267 there++;
2268 continue;
2270 if (deps[i].node == NULL)
2271 deps[i].node = of_node_get(np);
2273 if (deps[i].node == NULL)
2274 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2275 if (deps[i].node == NULL)
2276 continue;
2277 if (deps[i].ofdev == NULL)
2278 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2279 if (deps[i].ofdev == NULL)
2280 continue;
2281 if (deps[i].drvdata == NULL)
2282 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2283 if (deps[i].drvdata != NULL)
2284 there++;
2286 return there == EMAC_DEP_COUNT;
2289 static void emac_put_deps(struct emac_instance *dev)
2291 if (dev->mal_dev)
2292 of_dev_put(dev->mal_dev);
2293 if (dev->zmii_dev)
2294 of_dev_put(dev->zmii_dev);
2295 if (dev->rgmii_dev)
2296 of_dev_put(dev->rgmii_dev);
2297 if (dev->mdio_dev)
2298 of_dev_put(dev->mdio_dev);
2299 if (dev->tah_dev)
2300 of_dev_put(dev->tah_dev);
2303 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2304 unsigned long action, void *data)
2306 /* We are only intereted in device addition */
2307 if (action == BUS_NOTIFY_BOUND_DRIVER)
2308 wake_up_all(&emac_probe_wait);
2309 return 0;
2312 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2313 .notifier_call = emac_of_bus_notify
2316 static int __devinit emac_wait_deps(struct emac_instance *dev)
2318 struct emac_depentry deps[EMAC_DEP_COUNT];
2319 int i, err;
2321 memset(&deps, 0, sizeof(deps));
2323 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2324 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2325 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2326 if (dev->tah_ph)
2327 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2328 if (dev->mdio_ph)
2329 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2330 if (dev->blist && dev->blist > emac_boot_list)
2331 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2332 bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2333 wait_event_timeout(emac_probe_wait,
2334 emac_check_deps(dev, deps),
2335 EMAC_PROBE_DEP_TIMEOUT);
2336 bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2337 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2338 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2339 if (deps[i].node)
2340 of_node_put(deps[i].node);
2341 if (err && deps[i].ofdev)
2342 of_dev_put(deps[i].ofdev);
2344 if (err == 0) {
2345 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2346 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2347 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2348 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2349 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2351 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2352 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2353 return err;
2356 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2357 u32 *val, int fatal)
2359 int len;
2360 const u32 *prop = of_get_property(np, name, &len);
2361 if (prop == NULL || len < sizeof(u32)) {
2362 if (fatal)
2363 printk(KERN_ERR "%s: missing %s property\n",
2364 np->full_name, name);
2365 return -ENODEV;
2367 *val = *prop;
2368 return 0;
2371 static int __devinit emac_init_phy(struct emac_instance *dev)
2373 struct device_node *np = dev->ofdev->dev.of_node;
2374 struct net_device *ndev = dev->ndev;
2375 u32 phy_map, adv;
2376 int i;
2378 dev->phy.dev = ndev;
2379 dev->phy.mode = dev->phy_mode;
2381 /* PHY-less configuration.
2382 * XXX I probably should move these settings to the dev tree
2384 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2385 emac_reset(dev);
2387 /* PHY-less configuration.
2388 * XXX I probably should move these settings to the dev tree
2390 dev->phy.address = -1;
2391 dev->phy.features = SUPPORTED_MII;
2392 if (emac_phy_supports_gige(dev->phy_mode))
2393 dev->phy.features |= SUPPORTED_1000baseT_Full;
2394 else
2395 dev->phy.features |= SUPPORTED_100baseT_Full;
2396 dev->phy.pause = 1;
2398 return 0;
2401 mutex_lock(&emac_phy_map_lock);
2402 phy_map = dev->phy_map | busy_phy_map;
2404 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2406 dev->phy.mdio_read = emac_mdio_read;
2407 dev->phy.mdio_write = emac_mdio_write;
2409 /* Enable internal clock source */
2410 #ifdef CONFIG_PPC_DCR_NATIVE
2411 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2412 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2413 #endif
2414 /* PHY clock workaround */
2415 emac_rx_clk_tx(dev);
2417 /* Enable internal clock source on 440GX*/
2418 #ifdef CONFIG_PPC_DCR_NATIVE
2419 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2420 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2421 #endif
2422 /* Configure EMAC with defaults so we can at least use MDIO
2423 * This is needed mostly for 440GX
2425 if (emac_phy_gpcs(dev->phy.mode)) {
2426 /* XXX
2427 * Make GPCS PHY address equal to EMAC index.
2428 * We probably should take into account busy_phy_map
2429 * and/or phy_map here.
2431 * Note that the busy_phy_map is currently global
2432 * while it should probably be per-ASIC...
2434 dev->phy.gpcs_address = dev->gpcs_address;
2435 if (dev->phy.gpcs_address == 0xffffffff)
2436 dev->phy.address = dev->cell_index;
2439 emac_configure(dev);
2441 if (dev->phy_address != 0xffffffff)
2442 phy_map = ~(1 << dev->phy_address);
2444 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2445 if (!(phy_map & 1)) {
2446 int r;
2447 busy_phy_map |= 1 << i;
2449 /* Quick check if there is a PHY at the address */
2450 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2451 if (r == 0xffff || r < 0)
2452 continue;
2453 if (!emac_mii_phy_probe(&dev->phy, i))
2454 break;
2457 /* Enable external clock source */
2458 #ifdef CONFIG_PPC_DCR_NATIVE
2459 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2460 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2461 #endif
2462 mutex_unlock(&emac_phy_map_lock);
2463 if (i == 0x20) {
2464 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2465 return -ENXIO;
2468 /* Init PHY */
2469 if (dev->phy.def->ops->init)
2470 dev->phy.def->ops->init(&dev->phy);
2472 /* Disable any PHY features not supported by the platform */
2473 dev->phy.def->features &= ~dev->phy_feat_exc;
2475 /* Setup initial link parameters */
2476 if (dev->phy.features & SUPPORTED_Autoneg) {
2477 adv = dev->phy.features;
2478 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2479 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2480 /* Restart autonegotiation */
2481 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2482 } else {
2483 u32 f = dev->phy.def->features;
2484 int speed = SPEED_10, fd = DUPLEX_HALF;
2486 /* Select highest supported speed/duplex */
2487 if (f & SUPPORTED_1000baseT_Full) {
2488 speed = SPEED_1000;
2489 fd = DUPLEX_FULL;
2490 } else if (f & SUPPORTED_1000baseT_Half)
2491 speed = SPEED_1000;
2492 else if (f & SUPPORTED_100baseT_Full) {
2493 speed = SPEED_100;
2494 fd = DUPLEX_FULL;
2495 } else if (f & SUPPORTED_100baseT_Half)
2496 speed = SPEED_100;
2497 else if (f & SUPPORTED_10baseT_Full)
2498 fd = DUPLEX_FULL;
2500 /* Force link parameters */
2501 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2503 return 0;
2506 static int __devinit emac_init_config(struct emac_instance *dev)
2508 struct device_node *np = dev->ofdev->dev.of_node;
2509 const void *p;
2511 /* Read config from device-tree */
2512 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2513 return -ENXIO;
2514 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2515 return -ENXIO;
2516 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2517 return -ENXIO;
2518 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2519 return -ENXIO;
2520 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2521 dev->max_mtu = 1500;
2522 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2523 dev->rx_fifo_size = 2048;
2524 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2525 dev->tx_fifo_size = 2048;
2526 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2527 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2528 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2529 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2530 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2531 dev->phy_address = 0xffffffff;
2532 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2533 dev->phy_map = 0xffffffff;
2534 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2535 dev->gpcs_address = 0xffffffff;
2536 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2537 return -ENXIO;
2538 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2539 dev->tah_ph = 0;
2540 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2541 dev->tah_port = 0;
2542 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2543 dev->mdio_ph = 0;
2544 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2545 dev->zmii_ph = 0;
2546 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2547 dev->zmii_port = 0xffffffff;
2548 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2549 dev->rgmii_ph = 0;
2550 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2551 dev->rgmii_port = 0xffffffff;
2552 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2553 dev->fifo_entry_size = 16;
2554 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2555 dev->mal_burst_size = 256;
2557 /* PHY mode needs some decoding */
2558 dev->phy_mode = of_get_phy_mode(np);
2559 if (dev->phy_mode < 0)
2560 dev->phy_mode = PHY_MODE_NA;
2562 /* Check EMAC version */
2563 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2564 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2565 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2566 of_device_is_compatible(np, "ibm,emac-460gt"))
2567 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2568 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2569 of_device_is_compatible(np, "ibm,emac-405exr"))
2570 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2571 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2572 dev->features |= EMAC_FTR_EMAC4;
2573 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2574 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2575 } else {
2576 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2577 of_device_is_compatible(np, "ibm,emac-440gr"))
2578 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2579 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2580 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2581 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2582 #else
2583 printk(KERN_ERR "%s: Flow control not disabled!\n",
2584 np->full_name);
2585 return -ENXIO;
2586 #endif
2591 /* Fixup some feature bits based on the device tree */
2592 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2593 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2594 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2595 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2597 /* CAB lacks the appropriate properties */
2598 if (of_device_is_compatible(np, "ibm,emac-axon"))
2599 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2600 EMAC_FTR_STACR_OC_INVERT;
2602 /* Enable TAH/ZMII/RGMII features as found */
2603 if (dev->tah_ph != 0) {
2604 #ifdef CONFIG_IBM_EMAC_TAH
2605 dev->features |= EMAC_FTR_HAS_TAH;
2606 #else
2607 printk(KERN_ERR "%s: TAH support not enabled !\n",
2608 np->full_name);
2609 return -ENXIO;
2610 #endif
2613 if (dev->zmii_ph != 0) {
2614 #ifdef CONFIG_IBM_EMAC_ZMII
2615 dev->features |= EMAC_FTR_HAS_ZMII;
2616 #else
2617 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2618 np->full_name);
2619 return -ENXIO;
2620 #endif
2623 if (dev->rgmii_ph != 0) {
2624 #ifdef CONFIG_IBM_EMAC_RGMII
2625 dev->features |= EMAC_FTR_HAS_RGMII;
2626 #else
2627 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2628 np->full_name);
2629 return -ENXIO;
2630 #endif
2633 /* Read MAC-address */
2634 p = of_get_property(np, "local-mac-address", NULL);
2635 if (p == NULL) {
2636 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2637 np->full_name);
2638 return -ENXIO;
2640 memcpy(dev->ndev->dev_addr, p, 6);
2642 /* IAHT and GAHT filter parameterization */
2643 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2644 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2645 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2646 } else {
2647 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2648 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2651 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2652 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2653 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2654 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2655 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2657 return 0;
2660 static const struct net_device_ops emac_netdev_ops = {
2661 .ndo_open = emac_open,
2662 .ndo_stop = emac_close,
2663 .ndo_get_stats = emac_stats,
2664 .ndo_set_rx_mode = emac_set_multicast_list,
2665 .ndo_do_ioctl = emac_ioctl,
2666 .ndo_tx_timeout = emac_tx_timeout,
2667 .ndo_validate_addr = eth_validate_addr,
2668 .ndo_set_mac_address = eth_mac_addr,
2669 .ndo_start_xmit = emac_start_xmit,
2670 .ndo_change_mtu = eth_change_mtu,
2673 static const struct net_device_ops emac_gige_netdev_ops = {
2674 .ndo_open = emac_open,
2675 .ndo_stop = emac_close,
2676 .ndo_get_stats = emac_stats,
2677 .ndo_set_rx_mode = emac_set_multicast_list,
2678 .ndo_do_ioctl = emac_ioctl,
2679 .ndo_tx_timeout = emac_tx_timeout,
2680 .ndo_validate_addr = eth_validate_addr,
2681 .ndo_set_mac_address = eth_mac_addr,
2682 .ndo_start_xmit = emac_start_xmit_sg,
2683 .ndo_change_mtu = emac_change_mtu,
2686 static int __devinit emac_probe(struct platform_device *ofdev)
2688 struct net_device *ndev;
2689 struct emac_instance *dev;
2690 struct device_node *np = ofdev->dev.of_node;
2691 struct device_node **blist = NULL;
2692 int err, i;
2694 /* Skip unused/unwired EMACS. We leave the check for an unused
2695 * property here for now, but new flat device trees should set a
2696 * status property to "disabled" instead.
2698 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2699 return -ENODEV;
2701 /* Find ourselves in the bootlist if we are there */
2702 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2703 if (emac_boot_list[i] == np)
2704 blist = &emac_boot_list[i];
2706 /* Allocate our net_device structure */
2707 err = -ENOMEM;
2708 ndev = alloc_etherdev(sizeof(struct emac_instance));
2709 if (!ndev) {
2710 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2711 np->full_name);
2712 goto err_gone;
2714 dev = netdev_priv(ndev);
2715 dev->ndev = ndev;
2716 dev->ofdev = ofdev;
2717 dev->blist = blist;
2718 SET_NETDEV_DEV(ndev, &ofdev->dev);
2720 /* Initialize some embedded data structures */
2721 mutex_init(&dev->mdio_lock);
2722 mutex_init(&dev->link_lock);
2723 spin_lock_init(&dev->lock);
2724 INIT_WORK(&dev->reset_work, emac_reset_work);
2726 /* Init various config data based on device-tree */
2727 err = emac_init_config(dev);
2728 if (err != 0)
2729 goto err_free;
2731 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2732 dev->emac_irq = irq_of_parse_and_map(np, 0);
2733 dev->wol_irq = irq_of_parse_and_map(np, 1);
2734 if (dev->emac_irq == NO_IRQ) {
2735 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2736 goto err_free;
2738 ndev->irq = dev->emac_irq;
2740 /* Map EMAC regs */
2741 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2742 printk(KERN_ERR "%s: Can't get registers address\n",
2743 np->full_name);
2744 goto err_irq_unmap;
2746 // TODO : request_mem_region
2747 dev->emacp = ioremap(dev->rsrc_regs.start,
2748 resource_size(&dev->rsrc_regs));
2749 if (dev->emacp == NULL) {
2750 printk(KERN_ERR "%s: Can't map device registers!\n",
2751 np->full_name);
2752 err = -ENOMEM;
2753 goto err_irq_unmap;
2756 /* Wait for dependent devices */
2757 err = emac_wait_deps(dev);
2758 if (err) {
2759 printk(KERN_ERR
2760 "%s: Timeout waiting for dependent devices\n",
2761 np->full_name);
2762 /* display more info about what's missing ? */
2763 goto err_reg_unmap;
2765 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2766 if (dev->mdio_dev != NULL)
2767 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2769 /* Register with MAL */
2770 dev->commac.ops = &emac_commac_ops;
2771 dev->commac.dev = dev;
2772 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2773 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2774 err = mal_register_commac(dev->mal, &dev->commac);
2775 if (err) {
2776 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2777 np->full_name, dev->mal_dev->dev.of_node->full_name);
2778 goto err_rel_deps;
2780 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2781 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2783 /* Get pointers to BD rings */
2784 dev->tx_desc =
2785 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2786 dev->rx_desc =
2787 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2789 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2790 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2792 /* Clean rings */
2793 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2794 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2795 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2796 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2798 /* Attach to ZMII, if needed */
2799 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2800 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2801 goto err_unreg_commac;
2803 /* Attach to RGMII, if needed */
2804 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2805 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2806 goto err_detach_zmii;
2808 /* Attach to TAH, if needed */
2809 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2810 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2811 goto err_detach_rgmii;
2813 /* Set some link defaults before we can find out real parameters */
2814 dev->phy.speed = SPEED_100;
2815 dev->phy.duplex = DUPLEX_FULL;
2816 dev->phy.autoneg = AUTONEG_DISABLE;
2817 dev->phy.pause = dev->phy.asym_pause = 0;
2818 dev->stop_timeout = STOP_TIMEOUT_100;
2819 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2821 /* Find PHY if any */
2822 err = emac_init_phy(dev);
2823 if (err != 0)
2824 goto err_detach_tah;
2826 if (dev->tah_dev) {
2827 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
2828 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2830 ndev->watchdog_timeo = 5 * HZ;
2831 if (emac_phy_supports_gige(dev->phy_mode)) {
2832 ndev->netdev_ops = &emac_gige_netdev_ops;
2833 dev->commac.ops = &emac_commac_sg_ops;
2834 } else
2835 ndev->netdev_ops = &emac_netdev_ops;
2836 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2838 netif_carrier_off(ndev);
2840 err = register_netdev(ndev);
2841 if (err) {
2842 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2843 np->full_name, err);
2844 goto err_detach_tah;
2847 /* Set our drvdata last as we don't want them visible until we are
2848 * fully initialized
2850 wmb();
2851 dev_set_drvdata(&ofdev->dev, dev);
2853 /* There's a new kid in town ! Let's tell everybody */
2854 wake_up_all(&emac_probe_wait);
2857 printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2858 ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2860 if (dev->phy_mode == PHY_MODE_SGMII)
2861 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2863 if (dev->phy.address >= 0)
2864 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2865 dev->phy.def->name, dev->phy.address);
2867 emac_dbg_register(dev);
2869 /* Life is good */
2870 return 0;
2872 /* I have a bad feeling about this ... */
2874 err_detach_tah:
2875 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2876 tah_detach(dev->tah_dev, dev->tah_port);
2877 err_detach_rgmii:
2878 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2879 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2880 err_detach_zmii:
2881 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2882 zmii_detach(dev->zmii_dev, dev->zmii_port);
2883 err_unreg_commac:
2884 mal_unregister_commac(dev->mal, &dev->commac);
2885 err_rel_deps:
2886 emac_put_deps(dev);
2887 err_reg_unmap:
2888 iounmap(dev->emacp);
2889 err_irq_unmap:
2890 if (dev->wol_irq != NO_IRQ)
2891 irq_dispose_mapping(dev->wol_irq);
2892 if (dev->emac_irq != NO_IRQ)
2893 irq_dispose_mapping(dev->emac_irq);
2894 err_free:
2895 free_netdev(ndev);
2896 err_gone:
2897 /* if we were on the bootlist, remove us as we won't show up and
2898 * wake up all waiters to notify them in case they were waiting
2899 * on us
2901 if (blist) {
2902 *blist = NULL;
2903 wake_up_all(&emac_probe_wait);
2905 return err;
2908 static int __devexit emac_remove(struct platform_device *ofdev)
2910 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2912 DBG(dev, "remove" NL);
2914 dev_set_drvdata(&ofdev->dev, NULL);
2916 unregister_netdev(dev->ndev);
2918 cancel_work_sync(&dev->reset_work);
2920 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2921 tah_detach(dev->tah_dev, dev->tah_port);
2922 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2923 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2924 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2925 zmii_detach(dev->zmii_dev, dev->zmii_port);
2927 busy_phy_map &= ~(1 << dev->phy.address);
2928 DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
2930 mal_unregister_commac(dev->mal, &dev->commac);
2931 emac_put_deps(dev);
2933 emac_dbg_unregister(dev);
2934 iounmap(dev->emacp);
2936 if (dev->wol_irq != NO_IRQ)
2937 irq_dispose_mapping(dev->wol_irq);
2938 if (dev->emac_irq != NO_IRQ)
2939 irq_dispose_mapping(dev->emac_irq);
2941 free_netdev(dev->ndev);
2943 return 0;
2946 /* XXX Features in here should be replaced by properties... */
2947 static struct of_device_id emac_match[] =
2950 .type = "network",
2951 .compatible = "ibm,emac",
2954 .type = "network",
2955 .compatible = "ibm,emac4",
2958 .type = "network",
2959 .compatible = "ibm,emac4sync",
2963 MODULE_DEVICE_TABLE(of, emac_match);
2965 static struct platform_driver emac_driver = {
2966 .driver = {
2967 .name = "emac",
2968 .owner = THIS_MODULE,
2969 .of_match_table = emac_match,
2971 .probe = emac_probe,
2972 .remove = emac_remove,
2975 static void __init emac_make_bootlist(void)
2977 struct device_node *np = NULL;
2978 int j, max, i = 0, k;
2979 int cell_indices[EMAC_BOOT_LIST_SIZE];
2981 /* Collect EMACs */
2982 while((np = of_find_all_nodes(np)) != NULL) {
2983 const u32 *idx;
2985 if (of_match_node(emac_match, np) == NULL)
2986 continue;
2987 if (of_get_property(np, "unused", NULL))
2988 continue;
2989 idx = of_get_property(np, "cell-index", NULL);
2990 if (idx == NULL)
2991 continue;
2992 cell_indices[i] = *idx;
2993 emac_boot_list[i++] = of_node_get(np);
2994 if (i >= EMAC_BOOT_LIST_SIZE) {
2995 of_node_put(np);
2996 break;
2999 max = i;
3001 /* Bubble sort them (doh, what a creative algorithm :-) */
3002 for (i = 0; max > 1 && (i < (max - 1)); i++)
3003 for (j = i; j < max; j++) {
3004 if (cell_indices[i] > cell_indices[j]) {
3005 np = emac_boot_list[i];
3006 emac_boot_list[i] = emac_boot_list[j];
3007 emac_boot_list[j] = np;
3008 k = cell_indices[i];
3009 cell_indices[i] = cell_indices[j];
3010 cell_indices[j] = k;
3015 static int __init emac_init(void)
3017 int rc;
3019 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3021 /* Init debug stuff */
3022 emac_init_debug();
3024 /* Build EMAC boot list */
3025 emac_make_bootlist();
3027 /* Init submodules */
3028 rc = mal_init();
3029 if (rc)
3030 goto err;
3031 rc = zmii_init();
3032 if (rc)
3033 goto err_mal;
3034 rc = rgmii_init();
3035 if (rc)
3036 goto err_zmii;
3037 rc = tah_init();
3038 if (rc)
3039 goto err_rgmii;
3040 rc = platform_driver_register(&emac_driver);
3041 if (rc)
3042 goto err_tah;
3044 return 0;
3046 err_tah:
3047 tah_exit();
3048 err_rgmii:
3049 rgmii_exit();
3050 err_zmii:
3051 zmii_exit();
3052 err_mal:
3053 mal_exit();
3054 err:
3055 return rc;
3058 static void __exit emac_exit(void)
3060 int i;
3062 platform_driver_unregister(&emac_driver);
3064 tah_exit();
3065 rgmii_exit();
3066 zmii_exit();
3067 mal_exit();
3068 emac_fini_debug();
3070 /* Destroy EMAC boot list */
3071 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3072 if (emac_boot_list[i])
3073 of_node_put(emac_boot_list[i]);
3076 module_init(emac_init);
3077 module_exit(emac_exit);