Pull bugzilla-9429 into release branch
[pv_ops_mirror.git] / drivers / net / ibm_newemac / core.c
blob0de3aa2a2e44172d99598556ee7d5b2e9c721997
1 /*
2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/crc32.h>
31 #include <linux/ethtool.h>
32 #include <linux/mii.h>
33 #include <linux/bitops.h>
34 #include <linux/workqueue.h>
36 #include <asm/processor.h>
37 #include <asm/io.h>
38 #include <asm/dma.h>
39 #include <asm/uaccess.h>
41 #include "core.h"
44 * Lack of dma_unmap_???? calls is intentional.
46 * API-correct usage requires additional support state information to be
47 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
48 * EMAC design (e.g. TX buffer passed from network stack can be split into
49 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
50 * maintaining such information will add additional overhead.
51 * Current DMA API implementation for 4xx processors only ensures cache coherency
52 * and dma_unmap_???? routines are empty and are likely to stay this way.
53 * I decided to omit dma_unmap_??? calls because I don't want to add additional
54 * complexity just for the sake of following some abstract API, when it doesn't
55 * add any real benefit to the driver. I understand that this decision maybe
56 * controversial, but I really tried to make code API-correct and efficient
57 * at the same time and didn't come up with code I liked :(. --ebs
60 #define DRV_NAME "emac"
61 #define DRV_VERSION "3.54"
62 #define DRV_DESC "PPC 4xx OCP EMAC driver"
64 MODULE_DESCRIPTION(DRV_DESC);
65 MODULE_AUTHOR
66 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
67 MODULE_LICENSE("GPL");
70 * PPC64 doesn't (yet) have a cacheable_memcpy
72 #ifdef CONFIG_PPC64
73 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
74 #endif
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
79 /* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
82 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
88 * XXX This is something that needs to be reworked as we can have multiple
89 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
90 * probably require in that case to have explicit PHY IDs in the device-tree
92 static u32 busy_phy_map;
93 static DEFINE_MUTEX(emac_phy_map_lock);
95 /* This is the wait queue used to wait on any event related to probe, that
96 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
98 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
100 /* Having stable interface names is a doomed idea. However, it would be nice
101 * if we didn't have completely random interface names at boot too :-) It's
102 * just a matter of making everybody's life easier. Since we are doing
103 * threaded probing, it's a bit harder though. The base idea here is that
104 * we make up a list of all emacs in the device-tree before we register the
105 * driver. Every emac will then wait for the previous one in the list to
106 * initialize before itself. We should also keep that list ordered by
107 * cell_index.
108 * That list is only 4 entries long, meaning that additional EMACs don't
109 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
112 #define EMAC_BOOT_LIST_SIZE 4
113 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
115 /* How long should I wait for dependent devices ? */
116 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
118 /* I don't want to litter system log with timeout errors
119 * when we have brain-damaged PHY.
121 static inline void emac_report_timeout_error(struct emac_instance *dev,
122 const char *error)
124 if (net_ratelimit())
125 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
128 /* PHY polling intervals */
129 #define PHY_POLL_LINK_ON HZ
130 #define PHY_POLL_LINK_OFF (HZ / 5)
132 /* Graceful stop timeouts in us.
133 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
135 #define STOP_TIMEOUT_10 1230
136 #define STOP_TIMEOUT_100 124
137 #define STOP_TIMEOUT_1000 13
138 #define STOP_TIMEOUT_1000_JUMBO 73
140 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
141 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
142 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
143 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
144 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
145 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
146 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
147 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
148 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
149 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
150 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
151 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
152 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
153 "tx_bd_excessive_collisions", "tx_bd_late_collision",
154 "tx_bd_multple_collisions", "tx_bd_single_collision",
155 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
156 "tx_errors"
159 static irqreturn_t emac_irq(int irq, void *dev_instance);
160 static void emac_clean_tx_ring(struct emac_instance *dev);
161 static void __emac_set_multicast_list(struct emac_instance *dev);
163 static inline int emac_phy_supports_gige(int phy_mode)
165 return phy_mode == PHY_MODE_GMII ||
166 phy_mode == PHY_MODE_RGMII ||
167 phy_mode == PHY_MODE_TBI ||
168 phy_mode == PHY_MODE_RTBI;
171 static inline int emac_phy_gpcs(int phy_mode)
173 return phy_mode == PHY_MODE_TBI ||
174 phy_mode == PHY_MODE_RTBI;
177 static inline void emac_tx_enable(struct emac_instance *dev)
179 struct emac_regs __iomem *p = dev->emacp;
180 u32 r;
182 DBG(dev, "tx_enable" NL);
184 r = in_be32(&p->mr0);
185 if (!(r & EMAC_MR0_TXE))
186 out_be32(&p->mr0, r | EMAC_MR0_TXE);
189 static void emac_tx_disable(struct emac_instance *dev)
191 struct emac_regs __iomem *p = dev->emacp;
192 u32 r;
194 DBG(dev, "tx_disable" NL);
196 r = in_be32(&p->mr0);
197 if (r & EMAC_MR0_TXE) {
198 int n = dev->stop_timeout;
199 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
200 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
201 udelay(1);
202 --n;
204 if (unlikely(!n))
205 emac_report_timeout_error(dev, "TX disable timeout");
209 static void emac_rx_enable(struct emac_instance *dev)
211 struct emac_regs __iomem *p = dev->emacp;
212 u32 r;
214 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
215 goto out;
217 DBG(dev, "rx_enable" NL);
219 r = in_be32(&p->mr0);
220 if (!(r & EMAC_MR0_RXE)) {
221 if (unlikely(!(r & EMAC_MR0_RXI))) {
222 /* Wait if previous async disable is still in progress */
223 int n = dev->stop_timeout;
224 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
225 udelay(1);
226 --n;
228 if (unlikely(!n))
229 emac_report_timeout_error(dev,
230 "RX disable timeout");
232 out_be32(&p->mr0, r | EMAC_MR0_RXE);
234 out:
238 static void emac_rx_disable(struct emac_instance *dev)
240 struct emac_regs __iomem *p = dev->emacp;
241 u32 r;
243 DBG(dev, "rx_disable" NL);
245 r = in_be32(&p->mr0);
246 if (r & EMAC_MR0_RXE) {
247 int n = dev->stop_timeout;
248 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
249 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
250 udelay(1);
251 --n;
253 if (unlikely(!n))
254 emac_report_timeout_error(dev, "RX disable timeout");
258 static inline void emac_netif_stop(struct emac_instance *dev)
260 netif_tx_lock_bh(dev->ndev);
261 dev->no_mcast = 1;
262 netif_tx_unlock_bh(dev->ndev);
263 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
264 mal_poll_disable(dev->mal, &dev->commac);
265 netif_tx_disable(dev->ndev);
268 static inline void emac_netif_start(struct emac_instance *dev)
270 netif_tx_lock_bh(dev->ndev);
271 dev->no_mcast = 0;
272 if (dev->mcast_pending && netif_running(dev->ndev))
273 __emac_set_multicast_list(dev);
274 netif_tx_unlock_bh(dev->ndev);
276 netif_wake_queue(dev->ndev);
278 /* NOTE: unconditional netif_wake_queue is only appropriate
279 * so long as all callers are assured to have free tx slots
280 * (taken from tg3... though the case where that is wrong is
281 * not terribly harmful)
283 mal_poll_enable(dev->mal, &dev->commac);
286 static inline void emac_rx_disable_async(struct emac_instance *dev)
288 struct emac_regs __iomem *p = dev->emacp;
289 u32 r;
291 DBG(dev, "rx_disable_async" NL);
293 r = in_be32(&p->mr0);
294 if (r & EMAC_MR0_RXE)
295 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
298 static int emac_reset(struct emac_instance *dev)
300 struct emac_regs __iomem *p = dev->emacp;
301 int n = 20;
303 DBG(dev, "reset" NL);
305 if (!dev->reset_failed) {
306 /* 40x erratum suggests stopping RX channel before reset,
307 * we stop TX as well
309 emac_rx_disable(dev);
310 emac_tx_disable(dev);
313 out_be32(&p->mr0, EMAC_MR0_SRST);
314 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
315 --n;
317 if (n) {
318 dev->reset_failed = 0;
319 return 0;
320 } else {
321 emac_report_timeout_error(dev, "reset timeout");
322 dev->reset_failed = 1;
323 return -ETIMEDOUT;
327 static void emac_hash_mc(struct emac_instance *dev)
329 struct emac_regs __iomem *p = dev->emacp;
330 u16 gaht[4] = { 0 };
331 struct dev_mc_list *dmi;
333 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
335 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
336 int bit;
337 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
338 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
339 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
341 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
342 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
344 out_be32(&p->gaht1, gaht[0]);
345 out_be32(&p->gaht2, gaht[1]);
346 out_be32(&p->gaht3, gaht[2]);
347 out_be32(&p->gaht4, gaht[3]);
350 static inline u32 emac_iff2rmr(struct net_device *ndev)
352 struct emac_instance *dev = netdev_priv(ndev);
353 u32 r;
355 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
357 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
358 r |= EMAC4_RMR_BASE;
359 else
360 r |= EMAC_RMR_BASE;
362 if (ndev->flags & IFF_PROMISC)
363 r |= EMAC_RMR_PME;
364 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
365 r |= EMAC_RMR_PMME;
366 else if (ndev->mc_count > 0)
367 r |= EMAC_RMR_MAE;
369 return r;
372 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
374 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
376 DBG2(dev, "__emac_calc_base_mr1" NL);
378 switch(tx_size) {
379 case 2048:
380 ret |= EMAC_MR1_TFS_2K;
381 break;
382 default:
383 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
384 dev->ndev->name, tx_size);
387 switch(rx_size) {
388 case 16384:
389 ret |= EMAC_MR1_RFS_16K;
390 break;
391 case 4096:
392 ret |= EMAC_MR1_RFS_4K;
393 break;
394 default:
395 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
396 dev->ndev->name, rx_size);
399 return ret;
402 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
404 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
405 EMAC4_MR1_OBCI(dev->opb_bus_freq);
407 DBG2(dev, "__emac4_calc_base_mr1" NL);
409 switch(tx_size) {
410 case 4096:
411 ret |= EMAC4_MR1_TFS_4K;
412 break;
413 case 2048:
414 ret |= EMAC4_MR1_TFS_2K;
415 break;
416 default:
417 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
418 dev->ndev->name, tx_size);
421 switch(rx_size) {
422 case 16384:
423 ret |= EMAC4_MR1_RFS_16K;
424 break;
425 case 4096:
426 ret |= EMAC4_MR1_RFS_4K;
427 break;
428 case 2048:
429 ret |= EMAC4_MR1_RFS_2K;
430 break;
431 default:
432 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
433 dev->ndev->name, rx_size);
436 return ret;
439 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
441 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
442 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
443 __emac_calc_base_mr1(dev, tx_size, rx_size);
446 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
448 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
449 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
450 else
451 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
454 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
455 unsigned int low, unsigned int high)
457 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
458 return (low << 22) | ( (high & 0x3ff) << 6);
459 else
460 return (low << 23) | ( (high & 0x1ff) << 7);
463 static int emac_configure(struct emac_instance *dev)
465 struct emac_regs __iomem *p = dev->emacp;
466 struct net_device *ndev = dev->ndev;
467 int tx_size, rx_size;
468 u32 r, mr1 = 0;
470 DBG(dev, "configure" NL);
472 if (emac_reset(dev) < 0)
473 return -ETIMEDOUT;
475 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
476 tah_reset(dev->tah_dev);
478 DBG(dev, " duplex = %d, pause = %d, asym_pause = %d\n",
479 dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
481 /* Default fifo sizes */
482 tx_size = dev->tx_fifo_size;
483 rx_size = dev->rx_fifo_size;
485 /* Check for full duplex */
486 if (dev->phy.duplex == DUPLEX_FULL)
487 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
489 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
490 dev->stop_timeout = STOP_TIMEOUT_10;
491 switch (dev->phy.speed) {
492 case SPEED_1000:
493 if (emac_phy_gpcs(dev->phy.mode)) {
494 mr1 |= EMAC_MR1_MF_1000GPCS |
495 EMAC_MR1_MF_IPPA(dev->phy.address);
497 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
498 * identify this GPCS PHY later.
500 out_be32(&p->ipcr, 0xdeadbeef);
501 } else
502 mr1 |= EMAC_MR1_MF_1000;
504 /* Extended fifo sizes */
505 tx_size = dev->tx_fifo_size_gige;
506 rx_size = dev->rx_fifo_size_gige;
508 if (dev->ndev->mtu > ETH_DATA_LEN) {
509 mr1 |= EMAC_MR1_JPSM;
510 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
511 } else
512 dev->stop_timeout = STOP_TIMEOUT_1000;
513 break;
514 case SPEED_100:
515 mr1 |= EMAC_MR1_MF_100;
516 dev->stop_timeout = STOP_TIMEOUT_100;
517 break;
518 default: /* make gcc happy */
519 break;
522 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
523 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
524 dev->phy.speed);
525 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
526 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
528 /* on 40x erratum forces us to NOT use integrated flow control,
529 * let's hope it works on 44x ;)
531 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
532 dev->phy.duplex == DUPLEX_FULL) {
533 if (dev->phy.pause)
534 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
535 else if (dev->phy.asym_pause)
536 mr1 |= EMAC_MR1_APP;
539 /* Add base settings & fifo sizes & program MR1 */
540 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
541 out_be32(&p->mr1, mr1);
543 /* Set individual MAC address */
544 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
545 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
546 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
547 ndev->dev_addr[5]);
549 /* VLAN Tag Protocol ID */
550 out_be32(&p->vtpid, 0x8100);
552 /* Receive mode register */
553 r = emac_iff2rmr(ndev);
554 if (r & EMAC_RMR_MAE)
555 emac_hash_mc(dev);
556 out_be32(&p->rmr, r);
558 /* FIFOs thresholds */
559 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
560 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
561 tx_size / 2 / dev->fifo_entry_size);
562 else
563 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
564 tx_size / 2 / dev->fifo_entry_size);
565 out_be32(&p->tmr1, r);
566 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
568 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
569 there should be still enough space in FIFO to allow the our link
570 partner time to process this frame and also time to send PAUSE
571 frame itself.
573 Here is the worst case scenario for the RX FIFO "headroom"
574 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
576 1) One maximum-length frame on TX 1522 bytes
577 2) One PAUSE frame time 64 bytes
578 3) PAUSE frame decode time allowance 64 bytes
579 4) One maximum-length frame on RX 1522 bytes
580 5) Round-trip propagation delay of the link (100Mb) 15 bytes
581 ----------
582 3187 bytes
584 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
585 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
587 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
588 rx_size / 4 / dev->fifo_entry_size);
589 out_be32(&p->rwmr, r);
591 /* Set PAUSE timer to the maximum */
592 out_be32(&p->ptr, 0xffff);
594 /* IRQ sources */
595 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
596 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
597 EMAC_ISR_IRE | EMAC_ISR_TE;
598 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
599 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
600 EMAC4_ISR_RXOE | */;
601 out_be32(&p->iser, r);
603 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
604 if (emac_phy_gpcs(dev->phy.mode))
605 emac_mii_reset_phy(&dev->phy);
607 return 0;
610 static void emac_reinitialize(struct emac_instance *dev)
612 DBG(dev, "reinitialize" NL);
614 emac_netif_stop(dev);
615 if (!emac_configure(dev)) {
616 emac_tx_enable(dev);
617 emac_rx_enable(dev);
619 emac_netif_start(dev);
622 static void emac_full_tx_reset(struct emac_instance *dev)
624 DBG(dev, "full_tx_reset" NL);
626 emac_tx_disable(dev);
627 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
628 emac_clean_tx_ring(dev);
629 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
631 emac_configure(dev);
633 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
634 emac_tx_enable(dev);
635 emac_rx_enable(dev);
638 static void emac_reset_work(struct work_struct *work)
640 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
642 DBG(dev, "reset_work" NL);
644 mutex_lock(&dev->link_lock);
645 emac_netif_stop(dev);
646 emac_full_tx_reset(dev);
647 emac_netif_start(dev);
648 mutex_unlock(&dev->link_lock);
651 static void emac_tx_timeout(struct net_device *ndev)
653 struct emac_instance *dev = netdev_priv(ndev);
655 DBG(dev, "tx_timeout" NL);
657 schedule_work(&dev->reset_work);
661 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
663 int done = !!(stacr & EMAC_STACR_OC);
665 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
666 done = !done;
668 return done;
671 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
673 struct emac_regs __iomem *p = dev->emacp;
674 u32 r = 0;
675 int n, err = -ETIMEDOUT;
677 mutex_lock(&dev->mdio_lock);
679 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
681 /* Enable proper MDIO port */
682 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
683 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
684 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
685 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
687 /* Wait for management interface to become idle */
688 n = 10;
689 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
690 udelay(1);
691 if (!--n) {
692 DBG2(dev, " -> timeout wait idle\n");
693 goto bail;
697 /* Issue read command */
698 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
699 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
700 else
701 r = EMAC_STACR_BASE(dev->opb_bus_freq);
702 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
703 r |= EMAC_STACR_OC;
704 if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR))
705 r |= EMACX_STACR_STAC_READ;
706 else
707 r |= EMAC_STACR_STAC_READ;
708 r |= (reg & EMAC_STACR_PRA_MASK)
709 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
710 out_be32(&p->stacr, r);
712 /* Wait for read to complete */
713 n = 100;
714 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
715 udelay(1);
716 if (!--n) {
717 DBG2(dev, " -> timeout wait complete\n");
718 goto bail;
722 if (unlikely(r & EMAC_STACR_PHYE)) {
723 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
724 err = -EREMOTEIO;
725 goto bail;
728 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
730 DBG2(dev, "mdio_read -> %04x" NL, r);
731 err = 0;
732 bail:
733 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
734 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
735 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
736 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
737 mutex_unlock(&dev->mdio_lock);
739 return err == 0 ? r : err;
742 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
743 u16 val)
745 struct emac_regs __iomem *p = dev->emacp;
746 u32 r = 0;
747 int n, err = -ETIMEDOUT;
749 mutex_lock(&dev->mdio_lock);
751 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
753 /* Enable proper MDIO port */
754 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
755 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
756 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
757 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
759 /* Wait for management interface to be idle */
760 n = 10;
761 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
762 udelay(1);
763 if (!--n) {
764 DBG2(dev, " -> timeout wait idle\n");
765 goto bail;
769 /* Issue write command */
770 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
771 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
772 else
773 r = EMAC_STACR_BASE(dev->opb_bus_freq);
774 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
775 r |= EMAC_STACR_OC;
776 if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR))
777 r |= EMACX_STACR_STAC_WRITE;
778 else
779 r |= EMAC_STACR_STAC_WRITE;
780 r |= (reg & EMAC_STACR_PRA_MASK) |
781 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
782 (val << EMAC_STACR_PHYD_SHIFT);
783 out_be32(&p->stacr, r);
785 /* Wait for write to complete */
786 n = 100;
787 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
788 udelay(1);
789 if (!--n) {
790 DBG2(dev, " -> timeout wait complete\n");
791 goto bail;
794 err = 0;
795 bail:
796 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
797 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
798 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
799 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
800 mutex_unlock(&dev->mdio_lock);
803 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
805 struct emac_instance *dev = netdev_priv(ndev);
806 int res;
808 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
809 (u8) id, (u8) reg);
810 return res;
813 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
815 struct emac_instance *dev = netdev_priv(ndev);
817 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
818 (u8) id, (u8) reg, (u16) val);
821 /* Tx lock BH */
822 static void __emac_set_multicast_list(struct emac_instance *dev)
824 struct emac_regs __iomem *p = dev->emacp;
825 u32 rmr = emac_iff2rmr(dev->ndev);
827 DBG(dev, "__multicast %08x" NL, rmr);
829 /* I decided to relax register access rules here to avoid
830 * full EMAC reset.
832 * There is a real problem with EMAC4 core if we use MWSW_001 bit
833 * in MR1 register and do a full EMAC reset.
834 * One TX BD status update is delayed and, after EMAC reset, it
835 * never happens, resulting in TX hung (it'll be recovered by TX
836 * timeout handler eventually, but this is just gross).
837 * So we either have to do full TX reset or try to cheat here :)
839 * The only required change is to RX mode register, so I *think* all
840 * we need is just to stop RX channel. This seems to work on all
841 * tested SoCs. --ebs
843 * If we need the full reset, we might just trigger the workqueue
844 * and do it async... a bit nasty but should work --BenH
846 dev->mcast_pending = 0;
847 emac_rx_disable(dev);
848 if (rmr & EMAC_RMR_MAE)
849 emac_hash_mc(dev);
850 out_be32(&p->rmr, rmr);
851 emac_rx_enable(dev);
854 /* Tx lock BH */
855 static void emac_set_multicast_list(struct net_device *ndev)
857 struct emac_instance *dev = netdev_priv(ndev);
859 DBG(dev, "multicast" NL);
861 BUG_ON(!netif_running(dev->ndev));
863 if (dev->no_mcast) {
864 dev->mcast_pending = 1;
865 return;
867 __emac_set_multicast_list(dev);
870 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
872 int rx_sync_size = emac_rx_sync_size(new_mtu);
873 int rx_skb_size = emac_rx_skb_size(new_mtu);
874 int i, ret = 0;
876 mutex_lock(&dev->link_lock);
877 emac_netif_stop(dev);
878 emac_rx_disable(dev);
879 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
881 if (dev->rx_sg_skb) {
882 ++dev->estats.rx_dropped_resize;
883 dev_kfree_skb(dev->rx_sg_skb);
884 dev->rx_sg_skb = NULL;
887 /* Make a first pass over RX ring and mark BDs ready, dropping
888 * non-processed packets on the way. We need this as a separate pass
889 * to simplify error recovery in the case of allocation failure later.
891 for (i = 0; i < NUM_RX_BUFF; ++i) {
892 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
893 ++dev->estats.rx_dropped_resize;
895 dev->rx_desc[i].data_len = 0;
896 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
897 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
900 /* Reallocate RX ring only if bigger skb buffers are required */
901 if (rx_skb_size <= dev->rx_skb_size)
902 goto skip;
904 /* Second pass, allocate new skbs */
905 for (i = 0; i < NUM_RX_BUFF; ++i) {
906 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
907 if (!skb) {
908 ret = -ENOMEM;
909 goto oom;
912 BUG_ON(!dev->rx_skb[i]);
913 dev_kfree_skb(dev->rx_skb[i]);
915 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
916 dev->rx_desc[i].data_ptr =
917 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
918 DMA_FROM_DEVICE) + 2;
919 dev->rx_skb[i] = skb;
921 skip:
922 /* Check if we need to change "Jumbo" bit in MR1 */
923 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
924 /* This is to prevent starting RX channel in emac_rx_enable() */
925 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
927 dev->ndev->mtu = new_mtu;
928 emac_full_tx_reset(dev);
931 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
932 oom:
933 /* Restart RX */
934 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
935 dev->rx_slot = 0;
936 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
937 emac_rx_enable(dev);
938 emac_netif_start(dev);
939 mutex_unlock(&dev->link_lock);
941 return ret;
944 /* Process ctx, rtnl_lock semaphore */
945 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
947 struct emac_instance *dev = netdev_priv(ndev);
948 int ret = 0;
950 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
951 return -EINVAL;
953 DBG(dev, "change_mtu(%d)" NL, new_mtu);
955 if (netif_running(ndev)) {
956 /* Check if we really need to reinitalize RX ring */
957 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
958 ret = emac_resize_rx_ring(dev, new_mtu);
961 if (!ret) {
962 ndev->mtu = new_mtu;
963 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
964 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
967 return ret;
970 static void emac_clean_tx_ring(struct emac_instance *dev)
972 int i;
974 for (i = 0; i < NUM_TX_BUFF; ++i) {
975 if (dev->tx_skb[i]) {
976 dev_kfree_skb(dev->tx_skb[i]);
977 dev->tx_skb[i] = NULL;
978 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
979 ++dev->estats.tx_dropped;
981 dev->tx_desc[i].ctrl = 0;
982 dev->tx_desc[i].data_ptr = 0;
986 static void emac_clean_rx_ring(struct emac_instance *dev)
988 int i;
990 for (i = 0; i < NUM_RX_BUFF; ++i)
991 if (dev->rx_skb[i]) {
992 dev->rx_desc[i].ctrl = 0;
993 dev_kfree_skb(dev->rx_skb[i]);
994 dev->rx_skb[i] = NULL;
995 dev->rx_desc[i].data_ptr = 0;
998 if (dev->rx_sg_skb) {
999 dev_kfree_skb(dev->rx_sg_skb);
1000 dev->rx_sg_skb = NULL;
1004 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1005 gfp_t flags)
1007 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1008 if (unlikely(!skb))
1009 return -ENOMEM;
1011 dev->rx_skb[slot] = skb;
1012 dev->rx_desc[slot].data_len = 0;
1014 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1015 dev->rx_desc[slot].data_ptr =
1016 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1017 DMA_FROM_DEVICE) + 2;
1018 wmb();
1019 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1020 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1022 return 0;
1025 static void emac_print_link_status(struct emac_instance *dev)
1027 if (netif_carrier_ok(dev->ndev))
1028 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1029 dev->ndev->name, dev->phy.speed,
1030 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1031 dev->phy.pause ? ", pause enabled" :
1032 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1033 else
1034 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1037 /* Process ctx, rtnl_lock semaphore */
1038 static int emac_open(struct net_device *ndev)
1040 struct emac_instance *dev = netdev_priv(ndev);
1041 int err, i;
1043 DBG(dev, "open" NL);
1045 /* Setup error IRQ handler */
1046 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1047 if (err) {
1048 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1049 ndev->name, dev->emac_irq);
1050 return err;
1053 /* Allocate RX ring */
1054 for (i = 0; i < NUM_RX_BUFF; ++i)
1055 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1056 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1057 ndev->name);
1058 goto oom;
1061 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1062 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1063 dev->rx_sg_skb = NULL;
1065 mutex_lock(&dev->link_lock);
1067 /* XXX Start PHY polling now. Shouldn't wr do like sungem instead and
1068 * always poll the PHY even when the iface is down ? That would allow
1069 * things like laptop-net to work. --BenH
1071 if (dev->phy.address >= 0) {
1072 int link_poll_interval;
1073 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1074 dev->phy.def->ops->read_link(&dev->phy);
1075 netif_carrier_on(dev->ndev);
1076 link_poll_interval = PHY_POLL_LINK_ON;
1077 } else {
1078 netif_carrier_off(dev->ndev);
1079 link_poll_interval = PHY_POLL_LINK_OFF;
1081 dev->link_polling = 1;
1082 wmb();
1083 schedule_delayed_work(&dev->link_work, link_poll_interval);
1084 emac_print_link_status(dev);
1085 } else
1086 netif_carrier_on(dev->ndev);
1088 emac_configure(dev);
1089 mal_poll_add(dev->mal, &dev->commac);
1090 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1091 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1092 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1093 emac_tx_enable(dev);
1094 emac_rx_enable(dev);
1095 emac_netif_start(dev);
1097 mutex_unlock(&dev->link_lock);
1099 return 0;
1100 oom:
1101 emac_clean_rx_ring(dev);
1102 free_irq(dev->emac_irq, dev);
1104 return -ENOMEM;
1107 /* BHs disabled */
1108 #if 0
1109 static int emac_link_differs(struct emac_instance *dev)
1111 u32 r = in_be32(&dev->emacp->mr1);
1113 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1114 int speed, pause, asym_pause;
1116 if (r & EMAC_MR1_MF_1000)
1117 speed = SPEED_1000;
1118 else if (r & EMAC_MR1_MF_100)
1119 speed = SPEED_100;
1120 else
1121 speed = SPEED_10;
1123 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1124 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1125 pause = 1;
1126 asym_pause = 0;
1127 break;
1128 case EMAC_MR1_APP:
1129 pause = 0;
1130 asym_pause = 1;
1131 break;
1132 default:
1133 pause = asym_pause = 0;
1135 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1136 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1138 #endif
1140 static void emac_link_timer(struct work_struct *work)
1142 struct emac_instance *dev =
1143 container_of((struct delayed_work *)work,
1144 struct emac_instance, link_work);
1145 int link_poll_interval;
1147 mutex_lock(&dev->link_lock);
1149 DBG2(dev, "link timer" NL);
1151 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1152 if (!netif_carrier_ok(dev->ndev)) {
1153 /* Get new link parameters */
1154 dev->phy.def->ops->read_link(&dev->phy);
1156 netif_carrier_on(dev->ndev);
1157 emac_netif_stop(dev);
1158 emac_full_tx_reset(dev);
1159 emac_netif_start(dev);
1160 emac_print_link_status(dev);
1162 link_poll_interval = PHY_POLL_LINK_ON;
1163 } else {
1164 if (netif_carrier_ok(dev->ndev)) {
1165 emac_reinitialize(dev);
1166 netif_carrier_off(dev->ndev);
1167 netif_tx_disable(dev->ndev);
1168 emac_print_link_status(dev);
1170 link_poll_interval = PHY_POLL_LINK_OFF;
1172 schedule_delayed_work(&dev->link_work, link_poll_interval);
1174 mutex_unlock(&dev->link_lock);
1177 static void emac_force_link_update(struct emac_instance *dev)
1179 netif_carrier_off(dev->ndev);
1180 if (dev->link_polling) {
1181 cancel_rearming_delayed_work(&dev->link_work);
1182 if (dev->link_polling)
1183 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1187 /* Process ctx, rtnl_lock semaphore */
1188 static int emac_close(struct net_device *ndev)
1190 struct emac_instance *dev = netdev_priv(ndev);
1192 DBG(dev, "close" NL);
1194 if (dev->phy.address >= 0)
1195 cancel_rearming_delayed_work(&dev->link_work);
1197 emac_netif_stop(dev);
1198 flush_scheduled_work();
1200 emac_rx_disable(dev);
1201 emac_tx_disable(dev);
1202 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1203 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1204 mal_poll_del(dev->mal, &dev->commac);
1206 emac_clean_tx_ring(dev);
1207 emac_clean_rx_ring(dev);
1209 free_irq(dev->emac_irq, dev);
1211 return 0;
1214 static inline u16 emac_tx_csum(struct emac_instance *dev,
1215 struct sk_buff *skb)
1217 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1218 skb->ip_summed == CHECKSUM_PARTIAL)) {
1219 ++dev->stats.tx_packets_csum;
1220 return EMAC_TX_CTRL_TAH_CSUM;
1222 return 0;
1225 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1227 struct emac_regs __iomem *p = dev->emacp;
1228 struct net_device *ndev = dev->ndev;
1230 /* Send the packet out. If the if makes a significant perf
1231 * difference, then we can store the TMR0 value in "dev"
1232 * instead
1234 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1235 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1236 else
1237 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1239 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1240 netif_stop_queue(ndev);
1241 DBG2(dev, "stopped TX queue" NL);
1244 ndev->trans_start = jiffies;
1245 ++dev->stats.tx_packets;
1246 dev->stats.tx_bytes += len;
1248 return 0;
1251 /* Tx lock BH */
1252 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1254 struct emac_instance *dev = netdev_priv(ndev);
1255 unsigned int len = skb->len;
1256 int slot;
1258 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1259 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1261 slot = dev->tx_slot++;
1262 if (dev->tx_slot == NUM_TX_BUFF) {
1263 dev->tx_slot = 0;
1264 ctrl |= MAL_TX_CTRL_WRAP;
1267 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1269 dev->tx_skb[slot] = skb;
1270 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1271 skb->data, len,
1272 DMA_TO_DEVICE);
1273 dev->tx_desc[slot].data_len = (u16) len;
1274 wmb();
1275 dev->tx_desc[slot].ctrl = ctrl;
1277 return emac_xmit_finish(dev, len);
1280 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1281 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1282 u32 pd, int len, int last, u16 base_ctrl)
1284 while (1) {
1285 u16 ctrl = base_ctrl;
1286 int chunk = min(len, MAL_MAX_TX_SIZE);
1287 len -= chunk;
1289 slot = (slot + 1) % NUM_TX_BUFF;
1291 if (last && !len)
1292 ctrl |= MAL_TX_CTRL_LAST;
1293 if (slot == NUM_TX_BUFF - 1)
1294 ctrl |= MAL_TX_CTRL_WRAP;
1296 dev->tx_skb[slot] = NULL;
1297 dev->tx_desc[slot].data_ptr = pd;
1298 dev->tx_desc[slot].data_len = (u16) chunk;
1299 dev->tx_desc[slot].ctrl = ctrl;
1300 ++dev->tx_cnt;
1302 if (!len)
1303 break;
1305 pd += chunk;
1307 return slot;
1310 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1311 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1313 struct emac_instance *dev = netdev_priv(ndev);
1314 int nr_frags = skb_shinfo(skb)->nr_frags;
1315 int len = skb->len, chunk;
1316 int slot, i;
1317 u16 ctrl;
1318 u32 pd;
1320 /* This is common "fast" path */
1321 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1322 return emac_start_xmit(skb, ndev);
1324 len -= skb->data_len;
1326 /* Note, this is only an *estimation*, we can still run out of empty
1327 * slots because of the additional fragmentation into
1328 * MAL_MAX_TX_SIZE-sized chunks
1330 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1331 goto stop_queue;
1333 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1334 emac_tx_csum(dev, skb);
1335 slot = dev->tx_slot;
1337 /* skb data */
1338 dev->tx_skb[slot] = NULL;
1339 chunk = min(len, MAL_MAX_TX_SIZE);
1340 dev->tx_desc[slot].data_ptr = pd =
1341 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1342 dev->tx_desc[slot].data_len = (u16) chunk;
1343 len -= chunk;
1344 if (unlikely(len))
1345 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1346 ctrl);
1347 /* skb fragments */
1348 for (i = 0; i < nr_frags; ++i) {
1349 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1350 len = frag->size;
1352 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1353 goto undo_frame;
1355 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1356 DMA_TO_DEVICE);
1358 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1359 ctrl);
1362 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1364 /* Attach skb to the last slot so we don't release it too early */
1365 dev->tx_skb[slot] = skb;
1367 /* Send the packet out */
1368 if (dev->tx_slot == NUM_TX_BUFF - 1)
1369 ctrl |= MAL_TX_CTRL_WRAP;
1370 wmb();
1371 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1372 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1374 return emac_xmit_finish(dev, skb->len);
1376 undo_frame:
1377 /* Well, too bad. Our previous estimation was overly optimistic.
1378 * Undo everything.
1380 while (slot != dev->tx_slot) {
1381 dev->tx_desc[slot].ctrl = 0;
1382 --dev->tx_cnt;
1383 if (--slot < 0)
1384 slot = NUM_TX_BUFF - 1;
1386 ++dev->estats.tx_undo;
1388 stop_queue:
1389 netif_stop_queue(ndev);
1390 DBG2(dev, "stopped TX queue" NL);
1391 return 1;
1393 #else
1394 # define emac_start_xmit_sg emac_start_xmit
1395 #endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */
1397 /* Tx lock BHs */
1398 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1400 struct emac_error_stats *st = &dev->estats;
1402 DBG(dev, "BD TX error %04x" NL, ctrl);
1404 ++st->tx_bd_errors;
1405 if (ctrl & EMAC_TX_ST_BFCS)
1406 ++st->tx_bd_bad_fcs;
1407 if (ctrl & EMAC_TX_ST_LCS)
1408 ++st->tx_bd_carrier_loss;
1409 if (ctrl & EMAC_TX_ST_ED)
1410 ++st->tx_bd_excessive_deferral;
1411 if (ctrl & EMAC_TX_ST_EC)
1412 ++st->tx_bd_excessive_collisions;
1413 if (ctrl & EMAC_TX_ST_LC)
1414 ++st->tx_bd_late_collision;
1415 if (ctrl & EMAC_TX_ST_MC)
1416 ++st->tx_bd_multple_collisions;
1417 if (ctrl & EMAC_TX_ST_SC)
1418 ++st->tx_bd_single_collision;
1419 if (ctrl & EMAC_TX_ST_UR)
1420 ++st->tx_bd_underrun;
1421 if (ctrl & EMAC_TX_ST_SQE)
1422 ++st->tx_bd_sqe;
1425 static void emac_poll_tx(void *param)
1427 struct emac_instance *dev = param;
1428 u32 bad_mask;
1430 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1432 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1433 bad_mask = EMAC_IS_BAD_TX_TAH;
1434 else
1435 bad_mask = EMAC_IS_BAD_TX;
1437 netif_tx_lock_bh(dev->ndev);
1438 if (dev->tx_cnt) {
1439 u16 ctrl;
1440 int slot = dev->ack_slot, n = 0;
1441 again:
1442 ctrl = dev->tx_desc[slot].ctrl;
1443 if (!(ctrl & MAL_TX_CTRL_READY)) {
1444 struct sk_buff *skb = dev->tx_skb[slot];
1445 ++n;
1447 if (skb) {
1448 dev_kfree_skb(skb);
1449 dev->tx_skb[slot] = NULL;
1451 slot = (slot + 1) % NUM_TX_BUFF;
1453 if (unlikely(ctrl & bad_mask))
1454 emac_parse_tx_error(dev, ctrl);
1456 if (--dev->tx_cnt)
1457 goto again;
1459 if (n) {
1460 dev->ack_slot = slot;
1461 if (netif_queue_stopped(dev->ndev) &&
1462 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1463 netif_wake_queue(dev->ndev);
1465 DBG2(dev, "tx %d pkts" NL, n);
1468 netif_tx_unlock_bh(dev->ndev);
1471 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1472 int len)
1474 struct sk_buff *skb = dev->rx_skb[slot];
1476 DBG2(dev, "recycle %d %d" NL, slot, len);
1478 if (len)
1479 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1480 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1482 dev->rx_desc[slot].data_len = 0;
1483 wmb();
1484 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1485 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1488 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1490 struct emac_error_stats *st = &dev->estats;
1492 DBG(dev, "BD RX error %04x" NL, ctrl);
1494 ++st->rx_bd_errors;
1495 if (ctrl & EMAC_RX_ST_OE)
1496 ++st->rx_bd_overrun;
1497 if (ctrl & EMAC_RX_ST_BP)
1498 ++st->rx_bd_bad_packet;
1499 if (ctrl & EMAC_RX_ST_RP)
1500 ++st->rx_bd_runt_packet;
1501 if (ctrl & EMAC_RX_ST_SE)
1502 ++st->rx_bd_short_event;
1503 if (ctrl & EMAC_RX_ST_AE)
1504 ++st->rx_bd_alignment_error;
1505 if (ctrl & EMAC_RX_ST_BFCS)
1506 ++st->rx_bd_bad_fcs;
1507 if (ctrl & EMAC_RX_ST_PTL)
1508 ++st->rx_bd_packet_too_long;
1509 if (ctrl & EMAC_RX_ST_ORE)
1510 ++st->rx_bd_out_of_range;
1511 if (ctrl & EMAC_RX_ST_IRE)
1512 ++st->rx_bd_in_range;
1515 static inline void emac_rx_csum(struct emac_instance *dev,
1516 struct sk_buff *skb, u16 ctrl)
1518 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1519 if (!ctrl && dev->tah_dev) {
1520 skb->ip_summed = CHECKSUM_UNNECESSARY;
1521 ++dev->stats.rx_packets_csum;
1523 #endif
1526 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1528 if (likely(dev->rx_sg_skb != NULL)) {
1529 int len = dev->rx_desc[slot].data_len;
1530 int tot_len = dev->rx_sg_skb->len + len;
1532 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1533 ++dev->estats.rx_dropped_mtu;
1534 dev_kfree_skb(dev->rx_sg_skb);
1535 dev->rx_sg_skb = NULL;
1536 } else {
1537 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1538 dev->rx_skb[slot]->data, len);
1539 skb_put(dev->rx_sg_skb, len);
1540 emac_recycle_rx_skb(dev, slot, len);
1541 return 0;
1544 emac_recycle_rx_skb(dev, slot, 0);
1545 return -1;
1548 /* NAPI poll context */
1549 static int emac_poll_rx(void *param, int budget)
1551 struct emac_instance *dev = param;
1552 int slot = dev->rx_slot, received = 0;
1554 DBG2(dev, "poll_rx(%d)" NL, budget);
1556 again:
1557 while (budget > 0) {
1558 int len;
1559 struct sk_buff *skb;
1560 u16 ctrl = dev->rx_desc[slot].ctrl;
1562 if (ctrl & MAL_RX_CTRL_EMPTY)
1563 break;
1565 skb = dev->rx_skb[slot];
1566 mb();
1567 len = dev->rx_desc[slot].data_len;
1569 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1570 goto sg;
1572 ctrl &= EMAC_BAD_RX_MASK;
1573 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1574 emac_parse_rx_error(dev, ctrl);
1575 ++dev->estats.rx_dropped_error;
1576 emac_recycle_rx_skb(dev, slot, 0);
1577 len = 0;
1578 goto next;
1581 if (len && len < EMAC_RX_COPY_THRESH) {
1582 struct sk_buff *copy_skb =
1583 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1584 if (unlikely(!copy_skb))
1585 goto oom;
1587 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1588 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1589 len + 2);
1590 emac_recycle_rx_skb(dev, slot, len);
1591 skb = copy_skb;
1592 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1593 goto oom;
1595 skb_put(skb, len);
1596 push_packet:
1597 skb->dev = dev->ndev;
1598 skb->protocol = eth_type_trans(skb, dev->ndev);
1599 emac_rx_csum(dev, skb, ctrl);
1601 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1602 ++dev->estats.rx_dropped_stack;
1603 next:
1604 ++dev->stats.rx_packets;
1605 skip:
1606 dev->stats.rx_bytes += len;
1607 slot = (slot + 1) % NUM_RX_BUFF;
1608 --budget;
1609 ++received;
1610 continue;
1612 if (ctrl & MAL_RX_CTRL_FIRST) {
1613 BUG_ON(dev->rx_sg_skb);
1614 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1615 DBG(dev, "rx OOM %d" NL, slot);
1616 ++dev->estats.rx_dropped_oom;
1617 emac_recycle_rx_skb(dev, slot, 0);
1618 } else {
1619 dev->rx_sg_skb = skb;
1620 skb_put(skb, len);
1622 } else if (!emac_rx_sg_append(dev, slot) &&
1623 (ctrl & MAL_RX_CTRL_LAST)) {
1625 skb = dev->rx_sg_skb;
1626 dev->rx_sg_skb = NULL;
1628 ctrl &= EMAC_BAD_RX_MASK;
1629 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1630 emac_parse_rx_error(dev, ctrl);
1631 ++dev->estats.rx_dropped_error;
1632 dev_kfree_skb(skb);
1633 len = 0;
1634 } else
1635 goto push_packet;
1637 goto skip;
1638 oom:
1639 DBG(dev, "rx OOM %d" NL, slot);
1640 /* Drop the packet and recycle skb */
1641 ++dev->estats.rx_dropped_oom;
1642 emac_recycle_rx_skb(dev, slot, 0);
1643 goto next;
1646 if (received) {
1647 DBG2(dev, "rx %d BDs" NL, received);
1648 dev->rx_slot = slot;
1651 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1652 mb();
1653 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1654 DBG2(dev, "rx restart" NL);
1655 received = 0;
1656 goto again;
1659 if (dev->rx_sg_skb) {
1660 DBG2(dev, "dropping partial rx packet" NL);
1661 ++dev->estats.rx_dropped_error;
1662 dev_kfree_skb(dev->rx_sg_skb);
1663 dev->rx_sg_skb = NULL;
1666 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1667 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1668 emac_rx_enable(dev);
1669 dev->rx_slot = 0;
1671 return received;
1674 /* NAPI poll context */
1675 static int emac_peek_rx(void *param)
1677 struct emac_instance *dev = param;
1679 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1682 /* NAPI poll context */
1683 static int emac_peek_rx_sg(void *param)
1685 struct emac_instance *dev = param;
1687 int slot = dev->rx_slot;
1688 while (1) {
1689 u16 ctrl = dev->rx_desc[slot].ctrl;
1690 if (ctrl & MAL_RX_CTRL_EMPTY)
1691 return 0;
1692 else if (ctrl & MAL_RX_CTRL_LAST)
1693 return 1;
1695 slot = (slot + 1) % NUM_RX_BUFF;
1697 /* I'm just being paranoid here :) */
1698 if (unlikely(slot == dev->rx_slot))
1699 return 0;
1703 /* Hard IRQ */
1704 static void emac_rxde(void *param)
1706 struct emac_instance *dev = param;
1708 ++dev->estats.rx_stopped;
1709 emac_rx_disable_async(dev);
1712 /* Hard IRQ */
1713 static irqreturn_t emac_irq(int irq, void *dev_instance)
1715 struct emac_instance *dev = dev_instance;
1716 struct emac_regs __iomem *p = dev->emacp;
1717 struct emac_error_stats *st = &dev->estats;
1718 u32 isr;
1720 spin_lock(&dev->lock);
1722 isr = in_be32(&p->isr);
1723 out_be32(&p->isr, isr);
1725 DBG(dev, "isr = %08x" NL, isr);
1727 if (isr & EMAC4_ISR_TXPE)
1728 ++st->tx_parity;
1729 if (isr & EMAC4_ISR_RXPE)
1730 ++st->rx_parity;
1731 if (isr & EMAC4_ISR_TXUE)
1732 ++st->tx_underrun;
1733 if (isr & EMAC4_ISR_RXOE)
1734 ++st->rx_fifo_overrun;
1735 if (isr & EMAC_ISR_OVR)
1736 ++st->rx_overrun;
1737 if (isr & EMAC_ISR_BP)
1738 ++st->rx_bad_packet;
1739 if (isr & EMAC_ISR_RP)
1740 ++st->rx_runt_packet;
1741 if (isr & EMAC_ISR_SE)
1742 ++st->rx_short_event;
1743 if (isr & EMAC_ISR_ALE)
1744 ++st->rx_alignment_error;
1745 if (isr & EMAC_ISR_BFCS)
1746 ++st->rx_bad_fcs;
1747 if (isr & EMAC_ISR_PTLE)
1748 ++st->rx_packet_too_long;
1749 if (isr & EMAC_ISR_ORE)
1750 ++st->rx_out_of_range;
1751 if (isr & EMAC_ISR_IRE)
1752 ++st->rx_in_range;
1753 if (isr & EMAC_ISR_SQE)
1754 ++st->tx_sqe;
1755 if (isr & EMAC_ISR_TE)
1756 ++st->tx_errors;
1758 spin_unlock(&dev->lock);
1760 return IRQ_HANDLED;
1763 static struct net_device_stats *emac_stats(struct net_device *ndev)
1765 struct emac_instance *dev = netdev_priv(ndev);
1766 struct emac_stats *st = &dev->stats;
1767 struct emac_error_stats *est = &dev->estats;
1768 struct net_device_stats *nst = &dev->nstats;
1769 unsigned long flags;
1771 DBG2(dev, "stats" NL);
1773 /* Compute "legacy" statistics */
1774 spin_lock_irqsave(&dev->lock, flags);
1775 nst->rx_packets = (unsigned long)st->rx_packets;
1776 nst->rx_bytes = (unsigned long)st->rx_bytes;
1777 nst->tx_packets = (unsigned long)st->tx_packets;
1778 nst->tx_bytes = (unsigned long)st->tx_bytes;
1779 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1780 est->rx_dropped_error +
1781 est->rx_dropped_resize +
1782 est->rx_dropped_mtu);
1783 nst->tx_dropped = (unsigned long)est->tx_dropped;
1785 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1786 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1787 est->rx_fifo_overrun +
1788 est->rx_overrun);
1789 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1790 est->rx_alignment_error);
1791 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1792 est->rx_bad_fcs);
1793 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1794 est->rx_bd_short_event +
1795 est->rx_bd_packet_too_long +
1796 est->rx_bd_out_of_range +
1797 est->rx_bd_in_range +
1798 est->rx_runt_packet +
1799 est->rx_short_event +
1800 est->rx_packet_too_long +
1801 est->rx_out_of_range +
1802 est->rx_in_range);
1804 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1805 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1806 est->tx_underrun);
1807 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1808 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1809 est->tx_bd_excessive_collisions +
1810 est->tx_bd_late_collision +
1811 est->tx_bd_multple_collisions);
1812 spin_unlock_irqrestore(&dev->lock, flags);
1813 return nst;
1816 static struct mal_commac_ops emac_commac_ops = {
1817 .poll_tx = &emac_poll_tx,
1818 .poll_rx = &emac_poll_rx,
1819 .peek_rx = &emac_peek_rx,
1820 .rxde = &emac_rxde,
1823 static struct mal_commac_ops emac_commac_sg_ops = {
1824 .poll_tx = &emac_poll_tx,
1825 .poll_rx = &emac_poll_rx,
1826 .peek_rx = &emac_peek_rx_sg,
1827 .rxde = &emac_rxde,
1830 /* Ethtool support */
1831 static int emac_ethtool_get_settings(struct net_device *ndev,
1832 struct ethtool_cmd *cmd)
1834 struct emac_instance *dev = netdev_priv(ndev);
1836 cmd->supported = dev->phy.features;
1837 cmd->port = PORT_MII;
1838 cmd->phy_address = dev->phy.address;
1839 cmd->transceiver =
1840 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1842 mutex_lock(&dev->link_lock);
1843 cmd->advertising = dev->phy.advertising;
1844 cmd->autoneg = dev->phy.autoneg;
1845 cmd->speed = dev->phy.speed;
1846 cmd->duplex = dev->phy.duplex;
1847 mutex_unlock(&dev->link_lock);
1849 return 0;
1852 static int emac_ethtool_set_settings(struct net_device *ndev,
1853 struct ethtool_cmd *cmd)
1855 struct emac_instance *dev = netdev_priv(ndev);
1856 u32 f = dev->phy.features;
1858 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1859 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1861 /* Basic sanity checks */
1862 if (dev->phy.address < 0)
1863 return -EOPNOTSUPP;
1864 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1865 return -EINVAL;
1866 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1867 return -EINVAL;
1868 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1869 return -EINVAL;
1871 if (cmd->autoneg == AUTONEG_DISABLE) {
1872 switch (cmd->speed) {
1873 case SPEED_10:
1874 if (cmd->duplex == DUPLEX_HALF
1875 && !(f & SUPPORTED_10baseT_Half))
1876 return -EINVAL;
1877 if (cmd->duplex == DUPLEX_FULL
1878 && !(f & SUPPORTED_10baseT_Full))
1879 return -EINVAL;
1880 break;
1881 case SPEED_100:
1882 if (cmd->duplex == DUPLEX_HALF
1883 && !(f & SUPPORTED_100baseT_Half))
1884 return -EINVAL;
1885 if (cmd->duplex == DUPLEX_FULL
1886 && !(f & SUPPORTED_100baseT_Full))
1887 return -EINVAL;
1888 break;
1889 case SPEED_1000:
1890 if (cmd->duplex == DUPLEX_HALF
1891 && !(f & SUPPORTED_1000baseT_Half))
1892 return -EINVAL;
1893 if (cmd->duplex == DUPLEX_FULL
1894 && !(f & SUPPORTED_1000baseT_Full))
1895 return -EINVAL;
1896 break;
1897 default:
1898 return -EINVAL;
1901 mutex_lock(&dev->link_lock);
1902 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1903 cmd->duplex);
1904 mutex_unlock(&dev->link_lock);
1906 } else {
1907 if (!(f & SUPPORTED_Autoneg))
1908 return -EINVAL;
1910 mutex_lock(&dev->link_lock);
1911 dev->phy.def->ops->setup_aneg(&dev->phy,
1912 (cmd->advertising & f) |
1913 (dev->phy.advertising &
1914 (ADVERTISED_Pause |
1915 ADVERTISED_Asym_Pause)));
1916 mutex_unlock(&dev->link_lock);
1918 emac_force_link_update(dev);
1920 return 0;
1923 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1924 struct ethtool_ringparam *rp)
1926 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1927 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1930 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1931 struct ethtool_pauseparam *pp)
1933 struct emac_instance *dev = netdev_priv(ndev);
1935 mutex_lock(&dev->link_lock);
1936 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1937 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1938 pp->autoneg = 1;
1940 if (dev->phy.duplex == DUPLEX_FULL) {
1941 if (dev->phy.pause)
1942 pp->rx_pause = pp->tx_pause = 1;
1943 else if (dev->phy.asym_pause)
1944 pp->tx_pause = 1;
1946 mutex_unlock(&dev->link_lock);
1949 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1951 struct emac_instance *dev = netdev_priv(ndev);
1953 return dev->tah_dev != NULL;
1956 static int emac_get_regs_len(struct emac_instance *dev)
1958 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1959 return sizeof(struct emac_ethtool_regs_subhdr) +
1960 EMAC4_ETHTOOL_REGS_SIZE;
1961 else
1962 return sizeof(struct emac_ethtool_regs_subhdr) +
1963 EMAC_ETHTOOL_REGS_SIZE;
1966 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1968 struct emac_instance *dev = netdev_priv(ndev);
1969 int size;
1971 size = sizeof(struct emac_ethtool_regs_hdr) +
1972 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1973 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1974 size += zmii_get_regs_len(dev->zmii_dev);
1975 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
1976 size += rgmii_get_regs_len(dev->rgmii_dev);
1977 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1978 size += tah_get_regs_len(dev->tah_dev);
1980 return size;
1983 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
1985 struct emac_ethtool_regs_subhdr *hdr = buf;
1987 hdr->index = dev->cell_index;
1988 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
1989 hdr->version = EMAC4_ETHTOOL_REGS_VER;
1990 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
1991 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
1992 } else {
1993 hdr->version = EMAC_ETHTOOL_REGS_VER;
1994 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1995 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1999 static void emac_ethtool_get_regs(struct net_device *ndev,
2000 struct ethtool_regs *regs, void *buf)
2002 struct emac_instance *dev = netdev_priv(ndev);
2003 struct emac_ethtool_regs_hdr *hdr = buf;
2005 hdr->components = 0;
2006 buf = hdr + 1;
2008 buf = mal_dump_regs(dev->mal, buf);
2009 buf = emac_dump_regs(dev, buf);
2010 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2011 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2012 buf = zmii_dump_regs(dev->zmii_dev, buf);
2014 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2015 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2016 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2018 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2019 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2020 buf = tah_dump_regs(dev->tah_dev, buf);
2024 static int emac_ethtool_nway_reset(struct net_device *ndev)
2026 struct emac_instance *dev = netdev_priv(ndev);
2027 int res = 0;
2029 DBG(dev, "nway_reset" NL);
2031 if (dev->phy.address < 0)
2032 return -EOPNOTSUPP;
2034 mutex_lock(&dev->link_lock);
2035 if (!dev->phy.autoneg) {
2036 res = -EINVAL;
2037 goto out;
2040 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2041 out:
2042 mutex_unlock(&dev->link_lock);
2043 emac_force_link_update(dev);
2044 return res;
2047 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2049 return EMAC_ETHTOOL_STATS_COUNT;
2052 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2053 u8 * buf)
2055 if (stringset == ETH_SS_STATS)
2056 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2059 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2060 struct ethtool_stats *estats,
2061 u64 * tmp_stats)
2063 struct emac_instance *dev = netdev_priv(ndev);
2065 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2066 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2067 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2070 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2071 struct ethtool_drvinfo *info)
2073 struct emac_instance *dev = netdev_priv(ndev);
2075 strcpy(info->driver, "ibm_emac");
2076 strcpy(info->version, DRV_VERSION);
2077 info->fw_version[0] = '\0';
2078 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2079 dev->cell_index, dev->ofdev->node->full_name);
2080 info->n_stats = emac_ethtool_get_stats_count(ndev);
2081 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2084 static const struct ethtool_ops emac_ethtool_ops = {
2085 .get_settings = emac_ethtool_get_settings,
2086 .set_settings = emac_ethtool_set_settings,
2087 .get_drvinfo = emac_ethtool_get_drvinfo,
2089 .get_regs_len = emac_ethtool_get_regs_len,
2090 .get_regs = emac_ethtool_get_regs,
2092 .nway_reset = emac_ethtool_nway_reset,
2094 .get_ringparam = emac_ethtool_get_ringparam,
2095 .get_pauseparam = emac_ethtool_get_pauseparam,
2097 .get_rx_csum = emac_ethtool_get_rx_csum,
2099 .get_strings = emac_ethtool_get_strings,
2100 .get_stats_count = emac_ethtool_get_stats_count,
2101 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2103 .get_link = ethtool_op_get_link,
2104 .get_tx_csum = ethtool_op_get_tx_csum,
2105 .get_sg = ethtool_op_get_sg,
2108 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2110 struct emac_instance *dev = netdev_priv(ndev);
2111 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2113 DBG(dev, "ioctl %08x" NL, cmd);
2115 if (dev->phy.address < 0)
2116 return -EOPNOTSUPP;
2118 switch (cmd) {
2119 case SIOCGMIIPHY:
2120 case SIOCDEVPRIVATE:
2121 data[0] = dev->phy.address;
2122 /* Fall through */
2123 case SIOCGMIIREG:
2124 case SIOCDEVPRIVATE + 1:
2125 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2126 return 0;
2128 case SIOCSMIIREG:
2129 case SIOCDEVPRIVATE + 2:
2130 if (!capable(CAP_NET_ADMIN))
2131 return -EPERM;
2132 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2133 return 0;
2134 default:
2135 return -EOPNOTSUPP;
2139 struct emac_depentry {
2140 u32 phandle;
2141 struct device_node *node;
2142 struct of_device *ofdev;
2143 void *drvdata;
2146 #define EMAC_DEP_MAL_IDX 0
2147 #define EMAC_DEP_ZMII_IDX 1
2148 #define EMAC_DEP_RGMII_IDX 2
2149 #define EMAC_DEP_TAH_IDX 3
2150 #define EMAC_DEP_MDIO_IDX 4
2151 #define EMAC_DEP_PREV_IDX 5
2152 #define EMAC_DEP_COUNT 6
2154 static int __devinit emac_check_deps(struct emac_instance *dev,
2155 struct emac_depentry *deps)
2157 int i, there = 0;
2158 struct device_node *np;
2160 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2161 /* no dependency on that item, allright */
2162 if (deps[i].phandle == 0) {
2163 there++;
2164 continue;
2166 /* special case for blist as the dependency might go away */
2167 if (i == EMAC_DEP_PREV_IDX) {
2168 np = *(dev->blist - 1);
2169 if (np == NULL) {
2170 deps[i].phandle = 0;
2171 there++;
2172 continue;
2174 if (deps[i].node == NULL)
2175 deps[i].node = of_node_get(np);
2177 if (deps[i].node == NULL)
2178 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2179 if (deps[i].node == NULL)
2180 continue;
2181 if (deps[i].ofdev == NULL)
2182 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2183 if (deps[i].ofdev == NULL)
2184 continue;
2185 if (deps[i].drvdata == NULL)
2186 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2187 if (deps[i].drvdata != NULL)
2188 there++;
2190 return (there == EMAC_DEP_COUNT);
2193 static void emac_put_deps(struct emac_instance *dev)
2195 if (dev->mal_dev)
2196 of_dev_put(dev->mal_dev);
2197 if (dev->zmii_dev)
2198 of_dev_put(dev->zmii_dev);
2199 if (dev->rgmii_dev)
2200 of_dev_put(dev->rgmii_dev);
2201 if (dev->mdio_dev)
2202 of_dev_put(dev->mdio_dev);
2203 if (dev->tah_dev)
2204 of_dev_put(dev->tah_dev);
2207 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2208 unsigned long action, void *data)
2210 /* We are only intereted in device addition */
2211 if (action == BUS_NOTIFY_BOUND_DRIVER)
2212 wake_up_all(&emac_probe_wait);
2213 return 0;
2216 static struct notifier_block emac_of_bus_notifier = {
2217 .notifier_call = emac_of_bus_notify
2220 static int __devinit emac_wait_deps(struct emac_instance *dev)
2222 struct emac_depentry deps[EMAC_DEP_COUNT];
2223 int i, err;
2225 memset(&deps, 0, sizeof(deps));
2227 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2228 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2229 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2230 if (dev->tah_ph)
2231 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2232 if (dev->mdio_ph)
2233 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2234 if (dev->blist && dev->blist > emac_boot_list)
2235 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2236 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2237 wait_event_timeout(emac_probe_wait,
2238 emac_check_deps(dev, deps),
2239 EMAC_PROBE_DEP_TIMEOUT);
2240 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2241 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2242 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2243 if (deps[i].node)
2244 of_node_put(deps[i].node);
2245 if (err && deps[i].ofdev)
2246 of_dev_put(deps[i].ofdev);
2248 if (err == 0) {
2249 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2250 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2251 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2252 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2253 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2255 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2256 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2257 return err;
2260 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2261 u32 *val, int fatal)
2263 int len;
2264 const u32 *prop = of_get_property(np, name, &len);
2265 if (prop == NULL || len < sizeof(u32)) {
2266 if (fatal)
2267 printk(KERN_ERR "%s: missing %s property\n",
2268 np->full_name, name);
2269 return -ENODEV;
2271 *val = *prop;
2272 return 0;
2275 static int __devinit emac_init_phy(struct emac_instance *dev)
2277 struct device_node *np = dev->ofdev->node;
2278 struct net_device *ndev = dev->ndev;
2279 u32 phy_map, adv;
2280 int i;
2282 dev->phy.dev = ndev;
2283 dev->phy.mode = dev->phy_mode;
2285 /* PHY-less configuration.
2286 * XXX I probably should move these settings to the dev tree
2288 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2289 emac_reset(dev);
2291 /* PHY-less configuration.
2292 * XXX I probably should move these settings to the dev tree
2294 dev->phy.address = -1;
2295 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2296 dev->phy.pause = 1;
2298 return 0;
2301 mutex_lock(&emac_phy_map_lock);
2302 phy_map = dev->phy_map | busy_phy_map;
2304 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2306 dev->phy.mdio_read = emac_mdio_read;
2307 dev->phy.mdio_write = emac_mdio_write;
2309 /* Configure EMAC with defaults so we can at least use MDIO
2310 * This is needed mostly for 440GX
2312 if (emac_phy_gpcs(dev->phy.mode)) {
2313 /* XXX
2314 * Make GPCS PHY address equal to EMAC index.
2315 * We probably should take into account busy_phy_map
2316 * and/or phy_map here.
2318 * Note that the busy_phy_map is currently global
2319 * while it should probably be per-ASIC...
2321 dev->phy.address = dev->cell_index;
2324 emac_configure(dev);
2326 if (dev->phy_address != 0xffffffff)
2327 phy_map = ~(1 << dev->phy_address);
2329 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2330 if (!(phy_map & 1)) {
2331 int r;
2332 busy_phy_map |= 1 << i;
2334 /* Quick check if there is a PHY at the address */
2335 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2336 if (r == 0xffff || r < 0)
2337 continue;
2338 if (!emac_mii_phy_probe(&dev->phy, i))
2339 break;
2341 mutex_unlock(&emac_phy_map_lock);
2342 if (i == 0x20) {
2343 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2344 return -ENXIO;
2347 /* Init PHY */
2348 if (dev->phy.def->ops->init)
2349 dev->phy.def->ops->init(&dev->phy);
2351 /* Disable any PHY features not supported by the platform */
2352 dev->phy.def->features &= ~dev->phy_feat_exc;
2354 /* Setup initial link parameters */
2355 if (dev->phy.features & SUPPORTED_Autoneg) {
2356 adv = dev->phy.features;
2357 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2358 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2359 /* Restart autonegotiation */
2360 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2361 } else {
2362 u32 f = dev->phy.def->features;
2363 int speed = SPEED_10, fd = DUPLEX_HALF;
2365 /* Select highest supported speed/duplex */
2366 if (f & SUPPORTED_1000baseT_Full) {
2367 speed = SPEED_1000;
2368 fd = DUPLEX_FULL;
2369 } else if (f & SUPPORTED_1000baseT_Half)
2370 speed = SPEED_1000;
2371 else if (f & SUPPORTED_100baseT_Full) {
2372 speed = SPEED_100;
2373 fd = DUPLEX_FULL;
2374 } else if (f & SUPPORTED_100baseT_Half)
2375 speed = SPEED_100;
2376 else if (f & SUPPORTED_10baseT_Full)
2377 fd = DUPLEX_FULL;
2379 /* Force link parameters */
2380 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2382 return 0;
2385 static int __devinit emac_init_config(struct emac_instance *dev)
2387 struct device_node *np = dev->ofdev->node;
2388 const void *p;
2389 unsigned int plen;
2390 const char *pm, *phy_modes[] = {
2391 [PHY_MODE_NA] = "",
2392 [PHY_MODE_MII] = "mii",
2393 [PHY_MODE_RMII] = "rmii",
2394 [PHY_MODE_SMII] = "smii",
2395 [PHY_MODE_RGMII] = "rgmii",
2396 [PHY_MODE_TBI] = "tbi",
2397 [PHY_MODE_GMII] = "gmii",
2398 [PHY_MODE_RTBI] = "rtbi",
2399 [PHY_MODE_SGMII] = "sgmii",
2402 /* Read config from device-tree */
2403 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2404 return -ENXIO;
2405 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2406 return -ENXIO;
2407 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2408 return -ENXIO;
2409 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2410 return -ENXIO;
2411 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2412 dev->max_mtu = 1500;
2413 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2414 dev->rx_fifo_size = 2048;
2415 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2416 dev->tx_fifo_size = 2048;
2417 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2418 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2419 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2420 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2421 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2422 dev->phy_address = 0xffffffff;
2423 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2424 dev->phy_map = 0xffffffff;
2425 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2426 return -ENXIO;
2427 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2428 dev->tah_ph = 0;
2429 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2430 dev->tah_ph = 0;
2431 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2432 dev->mdio_ph = 0;
2433 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2434 dev->zmii_ph = 0;;
2435 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2436 dev->zmii_port = 0xffffffff;;
2437 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2438 dev->rgmii_ph = 0;;
2439 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2440 dev->rgmii_port = 0xffffffff;;
2441 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2442 dev->fifo_entry_size = 16;
2443 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2444 dev->mal_burst_size = 256;
2446 /* PHY mode needs some decoding */
2447 dev->phy_mode = PHY_MODE_NA;
2448 pm = of_get_property(np, "phy-mode", &plen);
2449 if (pm != NULL) {
2450 int i;
2451 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2452 if (!strcasecmp(pm, phy_modes[i])) {
2453 dev->phy_mode = i;
2454 break;
2458 /* Backward compat with non-final DT */
2459 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2460 u32 nmode = *(const u32 *)pm;
2461 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2462 dev->phy_mode = nmode;
2465 /* Check EMAC version */
2466 if (of_device_is_compatible(np, "ibm,emac4"))
2467 dev->features |= EMAC_FTR_EMAC4;
2468 if (of_device_is_compatible(np, "ibm,emac-axon")
2469 || of_device_is_compatible(np, "ibm,emac-440epx"))
2470 dev->features |= EMAC_FTR_HAS_AXON_STACR
2471 | EMAC_FTR_STACR_OC_INVERT;
2472 if (of_device_is_compatible(np, "ibm,emac-440spe"))
2473 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2475 /* Fixup some feature bits based on the device tree and verify
2476 * we have support for them compiled in
2478 if (dev->tah_ph != 0) {
2479 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2480 dev->features |= EMAC_FTR_HAS_TAH;
2481 #else
2482 printk(KERN_ERR "%s: TAH support not enabled !\n",
2483 np->full_name);
2484 return -ENXIO;
2485 #endif
2488 if (dev->zmii_ph != 0) {
2489 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2490 dev->features |= EMAC_FTR_HAS_ZMII;
2491 #else
2492 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2493 np->full_name);
2494 return -ENXIO;
2495 #endif
2498 if (dev->rgmii_ph != 0) {
2499 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2500 dev->features |= EMAC_FTR_HAS_RGMII;
2501 #else
2502 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2503 np->full_name);
2504 return -ENXIO;
2505 #endif
2508 /* Read MAC-address */
2509 p = of_get_property(np, "local-mac-address", NULL);
2510 if (p == NULL) {
2511 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2512 np->full_name);
2513 return -ENXIO;
2515 memcpy(dev->ndev->dev_addr, p, 6);
2517 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2518 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2519 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2520 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2521 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2523 return 0;
2526 static int __devinit emac_probe(struct of_device *ofdev,
2527 const struct of_device_id *match)
2529 struct net_device *ndev;
2530 struct emac_instance *dev;
2531 struct device_node *np = ofdev->node;
2532 struct device_node **blist = NULL;
2533 int err, i;
2535 /* Find ourselves in the bootlist if we are there */
2536 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2537 if (emac_boot_list[i] == np)
2538 blist = &emac_boot_list[i];
2540 /* Allocate our net_device structure */
2541 err = -ENOMEM;
2542 ndev = alloc_etherdev(sizeof(struct emac_instance));
2543 if (!ndev) {
2544 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2545 np->full_name);
2546 goto err_gone;
2548 dev = netdev_priv(ndev);
2549 dev->ndev = ndev;
2550 dev->ofdev = ofdev;
2551 dev->blist = blist;
2552 SET_NETDEV_DEV(ndev, &ofdev->dev);
2554 /* Initialize some embedded data structures */
2555 mutex_init(&dev->mdio_lock);
2556 mutex_init(&dev->link_lock);
2557 spin_lock_init(&dev->lock);
2558 INIT_WORK(&dev->reset_work, emac_reset_work);
2560 /* Init various config data based on device-tree */
2561 err = emac_init_config(dev);
2562 if (err != 0)
2563 goto err_free;
2565 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2566 dev->emac_irq = irq_of_parse_and_map(np, 0);
2567 dev->wol_irq = irq_of_parse_and_map(np, 1);
2568 if (dev->emac_irq == NO_IRQ) {
2569 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2570 goto err_free;
2572 ndev->irq = dev->emac_irq;
2574 /* Map EMAC regs */
2575 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2576 printk(KERN_ERR "%s: Can't get registers address\n",
2577 np->full_name);
2578 goto err_irq_unmap;
2580 // TODO : request_mem_region
2581 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2582 if (dev->emacp == NULL) {
2583 printk(KERN_ERR "%s: Can't map device registers!\n",
2584 np->full_name);
2585 err = -ENOMEM;
2586 goto err_irq_unmap;
2589 /* Wait for dependent devices */
2590 err = emac_wait_deps(dev);
2591 if (err) {
2592 printk(KERN_ERR
2593 "%s: Timeout waiting for dependent devices\n",
2594 np->full_name);
2595 /* display more info about what's missing ? */
2596 goto err_reg_unmap;
2598 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2599 if (dev->mdio_dev != NULL)
2600 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2602 /* Register with MAL */
2603 dev->commac.ops = &emac_commac_ops;
2604 dev->commac.dev = dev;
2605 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2606 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2607 err = mal_register_commac(dev->mal, &dev->commac);
2608 if (err) {
2609 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2610 np->full_name, dev->mal_dev->node->full_name);
2611 goto err_rel_deps;
2613 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2614 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2616 /* Get pointers to BD rings */
2617 dev->tx_desc =
2618 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2619 dev->rx_desc =
2620 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2622 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2623 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2625 /* Clean rings */
2626 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2627 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2629 /* Attach to ZMII, if needed */
2630 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2631 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2632 goto err_unreg_commac;
2634 /* Attach to RGMII, if needed */
2635 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2636 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2637 goto err_detach_zmii;
2639 /* Attach to TAH, if needed */
2640 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2641 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2642 goto err_detach_rgmii;
2644 /* Set some link defaults before we can find out real parameters */
2645 dev->phy.speed = SPEED_100;
2646 dev->phy.duplex = DUPLEX_FULL;
2647 dev->phy.autoneg = AUTONEG_DISABLE;
2648 dev->phy.pause = dev->phy.asym_pause = 0;
2649 dev->stop_timeout = STOP_TIMEOUT_100;
2650 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2652 /* Find PHY if any */
2653 err = emac_init_phy(dev);
2654 if (err != 0)
2655 goto err_detach_tah;
2657 /* Fill in the driver function table */
2658 ndev->open = &emac_open;
2659 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2660 if (dev->tah_dev) {
2661 ndev->hard_start_xmit = &emac_start_xmit_sg;
2662 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2663 } else
2664 #endif
2665 ndev->hard_start_xmit = &emac_start_xmit;
2666 ndev->tx_timeout = &emac_tx_timeout;
2667 ndev->watchdog_timeo = 5 * HZ;
2668 ndev->stop = &emac_close;
2669 ndev->get_stats = &emac_stats;
2670 ndev->set_multicast_list = &emac_set_multicast_list;
2671 ndev->do_ioctl = &emac_ioctl;
2672 if (emac_phy_supports_gige(dev->phy_mode)) {
2673 ndev->change_mtu = &emac_change_mtu;
2674 dev->commac.ops = &emac_commac_sg_ops;
2676 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2678 netif_carrier_off(ndev);
2679 netif_stop_queue(ndev);
2681 err = register_netdev(ndev);
2682 if (err) {
2683 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2684 np->full_name, err);
2685 goto err_detach_tah;
2688 /* Set our drvdata last as we don't want them visible until we are
2689 * fully initialized
2691 wmb();
2692 dev_set_drvdata(&ofdev->dev, dev);
2694 /* There's a new kid in town ! Let's tell everybody */
2695 wake_up_all(&emac_probe_wait);
2698 printk(KERN_INFO
2699 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2700 ndev->name, dev->cell_index, np->full_name,
2701 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2702 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2704 if (dev->phy.address >= 0)
2705 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2706 dev->phy.def->name, dev->phy.address);
2708 emac_dbg_register(dev);
2710 /* Life is good */
2711 return 0;
2713 /* I have a bad feeling about this ... */
2715 err_detach_tah:
2716 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2717 tah_detach(dev->tah_dev, dev->tah_port);
2718 err_detach_rgmii:
2719 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2720 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2721 err_detach_zmii:
2722 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2723 zmii_detach(dev->zmii_dev, dev->zmii_port);
2724 err_unreg_commac:
2725 mal_unregister_commac(dev->mal, &dev->commac);
2726 err_rel_deps:
2727 emac_put_deps(dev);
2728 err_reg_unmap:
2729 iounmap(dev->emacp);
2730 err_irq_unmap:
2731 if (dev->wol_irq != NO_IRQ)
2732 irq_dispose_mapping(dev->wol_irq);
2733 if (dev->emac_irq != NO_IRQ)
2734 irq_dispose_mapping(dev->emac_irq);
2735 err_free:
2736 kfree(ndev);
2737 err_gone:
2738 /* if we were on the bootlist, remove us as we won't show up and
2739 * wake up all waiters to notify them in case they were waiting
2740 * on us
2742 if (blist) {
2743 *blist = NULL;
2744 wake_up_all(&emac_probe_wait);
2746 return err;
2749 static int __devexit emac_remove(struct of_device *ofdev)
2751 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2753 DBG(dev, "remove" NL);
2755 dev_set_drvdata(&ofdev->dev, NULL);
2757 unregister_netdev(dev->ndev);
2759 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2760 tah_detach(dev->tah_dev, dev->tah_port);
2761 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2762 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2763 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2764 zmii_detach(dev->zmii_dev, dev->zmii_port);
2766 mal_unregister_commac(dev->mal, &dev->commac);
2767 emac_put_deps(dev);
2769 emac_dbg_unregister(dev);
2770 iounmap(dev->emacp);
2772 if (dev->wol_irq != NO_IRQ)
2773 irq_dispose_mapping(dev->wol_irq);
2774 if (dev->emac_irq != NO_IRQ)
2775 irq_dispose_mapping(dev->emac_irq);
2777 kfree(dev->ndev);
2779 return 0;
2782 /* XXX Features in here should be replaced by properties... */
2783 static struct of_device_id emac_match[] =
2786 .type = "network",
2787 .compatible = "ibm,emac",
2790 .type = "network",
2791 .compatible = "ibm,emac4",
2796 static struct of_platform_driver emac_driver = {
2797 .name = "emac",
2798 .match_table = emac_match,
2800 .probe = emac_probe,
2801 .remove = emac_remove,
2804 static void __init emac_make_bootlist(void)
2806 struct device_node *np = NULL;
2807 int j, max, i = 0, k;
2808 int cell_indices[EMAC_BOOT_LIST_SIZE];
2810 /* Collect EMACs */
2811 while((np = of_find_all_nodes(np)) != NULL) {
2812 const u32 *idx;
2814 if (of_match_node(emac_match, np) == NULL)
2815 continue;
2816 if (of_get_property(np, "unused", NULL))
2817 continue;
2818 idx = of_get_property(np, "cell-index", NULL);
2819 if (idx == NULL)
2820 continue;
2821 cell_indices[i] = *idx;
2822 emac_boot_list[i++] = of_node_get(np);
2823 if (i >= EMAC_BOOT_LIST_SIZE) {
2824 of_node_put(np);
2825 break;
2828 max = i;
2830 /* Bubble sort them (doh, what a creative algorithm :-) */
2831 for (i = 0; max > 1 && (i < (max - 1)); i++)
2832 for (j = i; j < max; j++) {
2833 if (cell_indices[i] > cell_indices[j]) {
2834 np = emac_boot_list[i];
2835 emac_boot_list[i] = emac_boot_list[j];
2836 emac_boot_list[j] = np;
2837 k = cell_indices[i];
2838 cell_indices[i] = cell_indices[j];
2839 cell_indices[j] = k;
2844 static int __init emac_init(void)
2846 int rc;
2848 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2850 /* Init debug stuff */
2851 emac_init_debug();
2853 /* Build EMAC boot list */
2854 emac_make_bootlist();
2856 /* Init submodules */
2857 rc = mal_init();
2858 if (rc)
2859 goto err;
2860 rc = zmii_init();
2861 if (rc)
2862 goto err_mal;
2863 rc = rgmii_init();
2864 if (rc)
2865 goto err_zmii;
2866 rc = tah_init();
2867 if (rc)
2868 goto err_rgmii;
2869 rc = of_register_platform_driver(&emac_driver);
2870 if (rc)
2871 goto err_tah;
2873 return 0;
2875 err_tah:
2876 tah_exit();
2877 err_rgmii:
2878 rgmii_exit();
2879 err_zmii:
2880 zmii_exit();
2881 err_mal:
2882 mal_exit();
2883 err:
2884 return rc;
2887 static void __exit emac_exit(void)
2889 int i;
2891 of_unregister_platform_driver(&emac_driver);
2893 tah_exit();
2894 rgmii_exit();
2895 zmii_exit();
2896 mal_exit();
2897 emac_fini_debug();
2899 /* Destroy EMAC boot list */
2900 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2901 if (emac_boot_list[i])
2902 of_node_put(emac_boot_list[i]);
2905 module_init(emac_init);
2906 module_exit(emac_exit);