Linux 2.6.17.7
[linux/fpc-iii.git] / drivers / net / ibm_emac / ibm_emac_core.c
blob7e49522b8b3c6a10cc5c75070edb378d1c935658
1 /*
2 * drivers/net/ibm_emac/ibm_emac_core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/string.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
41 #include <asm/processor.h>
42 #include <asm/io.h>
43 #include <asm/dma.h>
44 #include <asm/uaccess.h>
45 #include <asm/ocp.h>
47 #include "ibm_emac_core.h"
48 #include "ibm_emac_debug.h"
51 * Lack of dma_unmap_???? calls is intentional.
53 * API-correct usage requires additional support state information to be
54 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55 * EMAC design (e.g. TX buffer passed from network stack can be split into
56 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57 * maintaining such information will add additional overhead.
58 * Current DMA API implementation for 4xx processors only ensures cache coherency
59 * and dma_unmap_???? routines are empty and are likely to stay this way.
60 * I decided to omit dma_unmap_??? calls because I don't want to add additional
61 * complexity just for the sake of following some abstract API, when it doesn't
62 * add any real benefit to the driver. I understand that this decision maybe
63 * controversial, but I really tried to make code API-correct and efficient
64 * at the same time and didn't come up with code I liked :(. --ebs
67 #define DRV_NAME "emac"
68 #define DRV_VERSION "3.54"
69 #define DRV_DESC "PPC 4xx OCP EMAC driver"
71 MODULE_DESCRIPTION(DRV_DESC);
72 MODULE_AUTHOR
73 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
74 MODULE_LICENSE("GPL");
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
79 /* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
82 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
88 static u32 busy_phy_map;
90 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
91 (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
92 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
93 * with PHY RX clock problem.
94 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
95 * also allows controlling each EMAC clock
97 static inline void EMAC_RX_CLK_TX(int idx)
99 unsigned long flags;
100 local_irq_save(flags);
102 #if defined(CONFIG_405EP)
103 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
104 #else /* CONFIG_440EP || CONFIG_440GR */
105 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
106 #endif
108 local_irq_restore(flags);
111 static inline void EMAC_RX_CLK_DEFAULT(int idx)
113 unsigned long flags;
114 local_irq_save(flags);
116 #if defined(CONFIG_405EP)
117 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
118 #else /* CONFIG_440EP */
119 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
120 #endif
122 local_irq_restore(flags);
124 #else
125 #define EMAC_RX_CLK_TX(idx) ((void)0)
126 #define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
127 #endif
129 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
130 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
131 * unfortunately this is less flexible than 440EP case, because it's a global
132 * setting for all EMACs, therefore we do this clock trick only during probe.
134 #define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
135 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
136 #define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
137 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
138 #else
139 #define EMAC_CLK_INTERNAL ((void)0)
140 #define EMAC_CLK_EXTERNAL ((void)0)
141 #endif
143 /* I don't want to litter system log with timeout errors
144 * when we have brain-damaged PHY.
146 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
147 const char *error)
149 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
150 DBG("%d: %s" NL, dev->def->index, error);
151 #else
152 if (net_ratelimit())
153 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
154 #endif
157 /* PHY polling intervals */
158 #define PHY_POLL_LINK_ON HZ
159 #define PHY_POLL_LINK_OFF (HZ / 5)
161 /* Graceful stop timeouts in us.
162 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
164 #define STOP_TIMEOUT_10 1230
165 #define STOP_TIMEOUT_100 124
166 #define STOP_TIMEOUT_1000 13
167 #define STOP_TIMEOUT_1000_JUMBO 73
169 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
170 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
171 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
172 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
173 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
174 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
175 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
176 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
177 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
178 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
179 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
180 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
181 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
182 "tx_bd_excessive_collisions", "tx_bd_late_collision",
183 "tx_bd_multple_collisions", "tx_bd_single_collision",
184 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
185 "tx_errors"
188 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);
189 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
191 static inline int emac_phy_supports_gige(int phy_mode)
193 return phy_mode == PHY_MODE_GMII ||
194 phy_mode == PHY_MODE_RGMII ||
195 phy_mode == PHY_MODE_TBI ||
196 phy_mode == PHY_MODE_RTBI;
199 static inline int emac_phy_gpcs(int phy_mode)
201 return phy_mode == PHY_MODE_TBI ||
202 phy_mode == PHY_MODE_RTBI;
205 static inline void emac_tx_enable(struct ocp_enet_private *dev)
207 struct emac_regs __iomem *p = dev->emacp;
208 unsigned long flags;
209 u32 r;
211 local_irq_save(flags);
213 DBG("%d: tx_enable" NL, dev->def->index);
215 r = in_be32(&p->mr0);
216 if (!(r & EMAC_MR0_TXE))
217 out_be32(&p->mr0, r | EMAC_MR0_TXE);
218 local_irq_restore(flags);
221 static void emac_tx_disable(struct ocp_enet_private *dev)
223 struct emac_regs __iomem *p = dev->emacp;
224 unsigned long flags;
225 u32 r;
227 local_irq_save(flags);
229 DBG("%d: tx_disable" NL, dev->def->index);
231 r = in_be32(&p->mr0);
232 if (r & EMAC_MR0_TXE) {
233 int n = dev->stop_timeout;
234 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
235 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
236 udelay(1);
237 --n;
239 if (unlikely(!n))
240 emac_report_timeout_error(dev, "TX disable timeout");
242 local_irq_restore(flags);
245 static void emac_rx_enable(struct ocp_enet_private *dev)
247 struct emac_regs __iomem *p = dev->emacp;
248 unsigned long flags;
249 u32 r;
251 local_irq_save(flags);
252 if (unlikely(dev->commac.rx_stopped))
253 goto out;
255 DBG("%d: rx_enable" NL, dev->def->index);
257 r = in_be32(&p->mr0);
258 if (!(r & EMAC_MR0_RXE)) {
259 if (unlikely(!(r & EMAC_MR0_RXI))) {
260 /* Wait if previous async disable is still in progress */
261 int n = dev->stop_timeout;
262 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
263 udelay(1);
264 --n;
266 if (unlikely(!n))
267 emac_report_timeout_error(dev,
268 "RX disable timeout");
270 out_be32(&p->mr0, r | EMAC_MR0_RXE);
272 out:
273 local_irq_restore(flags);
276 static void emac_rx_disable(struct ocp_enet_private *dev)
278 struct emac_regs __iomem *p = dev->emacp;
279 unsigned long flags;
280 u32 r;
282 local_irq_save(flags);
284 DBG("%d: rx_disable" NL, dev->def->index);
286 r = in_be32(&p->mr0);
287 if (r & EMAC_MR0_RXE) {
288 int n = dev->stop_timeout;
289 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
290 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
291 udelay(1);
292 --n;
294 if (unlikely(!n))
295 emac_report_timeout_error(dev, "RX disable timeout");
297 local_irq_restore(flags);
300 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
302 struct emac_regs __iomem *p = dev->emacp;
303 unsigned long flags;
304 u32 r;
306 local_irq_save(flags);
308 DBG("%d: rx_disable_async" NL, dev->def->index);
310 r = in_be32(&p->mr0);
311 if (r & EMAC_MR0_RXE)
312 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
313 local_irq_restore(flags);
316 static int emac_reset(struct ocp_enet_private *dev)
318 struct emac_regs __iomem *p = dev->emacp;
319 unsigned long flags;
320 int n = 20;
322 DBG("%d: reset" NL, dev->def->index);
324 local_irq_save(flags);
326 if (!dev->reset_failed) {
327 /* 40x erratum suggests stopping RX channel before reset,
328 * we stop TX as well
330 emac_rx_disable(dev);
331 emac_tx_disable(dev);
334 out_be32(&p->mr0, EMAC_MR0_SRST);
335 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
336 --n;
337 local_irq_restore(flags);
339 if (n) {
340 dev->reset_failed = 0;
341 return 0;
342 } else {
343 emac_report_timeout_error(dev, "reset timeout");
344 dev->reset_failed = 1;
345 return -ETIMEDOUT;
349 static void emac_hash_mc(struct ocp_enet_private *dev)
351 struct emac_regs __iomem *p = dev->emacp;
352 u16 gaht[4] = { 0 };
353 struct dev_mc_list *dmi;
355 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
357 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
358 int bit;
359 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
360 dev->def->index,
361 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
362 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
364 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
365 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
367 out_be32(&p->gaht1, gaht[0]);
368 out_be32(&p->gaht2, gaht[1]);
369 out_be32(&p->gaht3, gaht[2]);
370 out_be32(&p->gaht4, gaht[3]);
373 static inline u32 emac_iff2rmr(struct net_device *ndev)
375 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
376 EMAC_RMR_BASE;
378 if (ndev->flags & IFF_PROMISC)
379 r |= EMAC_RMR_PME;
380 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
381 r |= EMAC_RMR_PMME;
382 else if (ndev->mc_count > 0)
383 r |= EMAC_RMR_MAE;
385 return r;
388 static inline int emac_opb_mhz(void)
390 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
393 /* BHs disabled */
394 static int emac_configure(struct ocp_enet_private *dev)
396 struct emac_regs __iomem *p = dev->emacp;
397 struct net_device *ndev = dev->ndev;
398 int gige;
399 u32 r;
401 DBG("%d: configure" NL, dev->def->index);
403 if (emac_reset(dev) < 0)
404 return -ETIMEDOUT;
406 tah_reset(dev->tah_dev);
408 /* Mode register */
409 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
410 if (dev->phy.duplex == DUPLEX_FULL)
411 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
412 dev->stop_timeout = STOP_TIMEOUT_10;
413 switch (dev->phy.speed) {
414 case SPEED_1000:
415 if (emac_phy_gpcs(dev->phy.mode)) {
416 r |= EMAC_MR1_MF_1000GPCS |
417 EMAC_MR1_MF_IPPA(dev->phy.address);
419 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
420 * identify this GPCS PHY later.
422 out_be32(&p->ipcr, 0xdeadbeef);
423 } else
424 r |= EMAC_MR1_MF_1000;
425 r |= EMAC_MR1_RFS_16K;
426 gige = 1;
428 if (dev->ndev->mtu > ETH_DATA_LEN) {
429 r |= EMAC_MR1_JPSM;
430 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
431 } else
432 dev->stop_timeout = STOP_TIMEOUT_1000;
433 break;
434 case SPEED_100:
435 r |= EMAC_MR1_MF_100;
436 dev->stop_timeout = STOP_TIMEOUT_100;
437 /* Fall through */
438 default:
439 r |= EMAC_MR1_RFS_4K;
440 gige = 0;
441 break;
444 if (dev->rgmii_dev)
445 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
446 dev->phy.speed);
447 else
448 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
450 #if !defined(CONFIG_40x)
451 /* on 40x erratum forces us to NOT use integrated flow control,
452 * let's hope it works on 44x ;)
454 if (dev->phy.duplex == DUPLEX_FULL) {
455 if (dev->phy.pause)
456 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
457 else if (dev->phy.asym_pause)
458 r |= EMAC_MR1_APP;
460 #endif
461 out_be32(&p->mr1, r);
463 /* Set individual MAC address */
464 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
465 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
466 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
467 ndev->dev_addr[5]);
469 /* VLAN Tag Protocol ID */
470 out_be32(&p->vtpid, 0x8100);
472 /* Receive mode register */
473 r = emac_iff2rmr(ndev);
474 if (r & EMAC_RMR_MAE)
475 emac_hash_mc(dev);
476 out_be32(&p->rmr, r);
478 /* FIFOs thresholds */
479 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
480 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
481 out_be32(&p->tmr1, r);
482 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
484 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
485 there should be still enough space in FIFO to allow the our link
486 partner time to process this frame and also time to send PAUSE
487 frame itself.
489 Here is the worst case scenario for the RX FIFO "headroom"
490 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
492 1) One maximum-length frame on TX 1522 bytes
493 2) One PAUSE frame time 64 bytes
494 3) PAUSE frame decode time allowance 64 bytes
495 4) One maximum-length frame on RX 1522 bytes
496 5) Round-trip propagation delay of the link (100Mb) 15 bytes
497 ----------
498 3187 bytes
500 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
501 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
503 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
504 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
505 out_be32(&p->rwmr, r);
507 /* Set PAUSE timer to the maximum */
508 out_be32(&p->ptr, 0xffff);
510 /* IRQ sources */
511 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
512 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
513 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
514 EMAC_ISR_IRE | EMAC_ISR_TE);
516 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
517 if (emac_phy_gpcs(dev->phy.mode))
518 mii_reset_phy(&dev->phy);
520 return 0;
523 /* BHs disabled */
524 static void emac_reinitialize(struct ocp_enet_private *dev)
526 DBG("%d: reinitialize" NL, dev->def->index);
528 if (!emac_configure(dev)) {
529 emac_tx_enable(dev);
530 emac_rx_enable(dev);
534 /* BHs disabled */
535 static void emac_full_tx_reset(struct net_device *ndev)
537 struct ocp_enet_private *dev = ndev->priv;
538 struct ocp_func_emac_data *emacdata = dev->def->additions;
540 DBG("%d: full_tx_reset" NL, dev->def->index);
542 emac_tx_disable(dev);
543 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
544 emac_clean_tx_ring(dev);
545 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
547 emac_configure(dev);
549 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
550 emac_tx_enable(dev);
551 emac_rx_enable(dev);
553 netif_wake_queue(ndev);
556 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
558 struct emac_regs __iomem *p = dev->emacp;
559 u32 r;
560 int n;
562 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
564 /* Enable proper MDIO port */
565 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
567 /* Wait for management interface to become idle */
568 n = 10;
569 while (!emac_phy_done(in_be32(&p->stacr))) {
570 udelay(1);
571 if (!--n)
572 goto to;
575 /* Issue read command */
576 out_be32(&p->stacr,
577 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
578 (reg & EMAC_STACR_PRA_MASK)
579 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
580 | EMAC_STACR_START);
582 /* Wait for read to complete */
583 n = 100;
584 while (!emac_phy_done(r = in_be32(&p->stacr))) {
585 udelay(1);
586 if (!--n)
587 goto to;
590 if (unlikely(r & EMAC_STACR_PHYE)) {
591 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
592 id, reg);
593 return -EREMOTEIO;
596 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
597 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
598 return r;
600 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
601 return -ETIMEDOUT;
604 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
605 u16 val)
607 struct emac_regs __iomem *p = dev->emacp;
608 int n;
610 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
611 val);
613 /* Enable proper MDIO port */
614 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
616 /* Wait for management interface to be idle */
617 n = 10;
618 while (!emac_phy_done(in_be32(&p->stacr))) {
619 udelay(1);
620 if (!--n)
621 goto to;
624 /* Issue write command */
625 out_be32(&p->stacr,
626 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
627 (reg & EMAC_STACR_PRA_MASK) |
628 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
629 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
631 /* Wait for write to complete */
632 n = 100;
633 while (!emac_phy_done(in_be32(&p->stacr))) {
634 udelay(1);
635 if (!--n)
636 goto to;
638 return;
640 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
643 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
645 struct ocp_enet_private *dev = ndev->priv;
646 int res;
648 local_bh_disable();
649 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
650 (u8) reg);
651 local_bh_enable();
652 return res;
655 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
657 struct ocp_enet_private *dev = ndev->priv;
659 local_bh_disable();
660 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
661 (u8) reg, (u16) val);
662 local_bh_enable();
665 /* BHs disabled */
666 static void emac_set_multicast_list(struct net_device *ndev)
668 struct ocp_enet_private *dev = ndev->priv;
669 struct emac_regs __iomem *p = dev->emacp;
670 u32 rmr = emac_iff2rmr(ndev);
672 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
673 BUG_ON(!netif_running(dev->ndev));
675 /* I decided to relax register access rules here to avoid
676 * full EMAC reset.
678 * There is a real problem with EMAC4 core if we use MWSW_001 bit
679 * in MR1 register and do a full EMAC reset.
680 * One TX BD status update is delayed and, after EMAC reset, it
681 * never happens, resulting in TX hung (it'll be recovered by TX
682 * timeout handler eventually, but this is just gross).
683 * So we either have to do full TX reset or try to cheat here :)
685 * The only required change is to RX mode register, so I *think* all
686 * we need is just to stop RX channel. This seems to work on all
687 * tested SoCs. --ebs
689 emac_rx_disable(dev);
690 if (rmr & EMAC_RMR_MAE)
691 emac_hash_mc(dev);
692 out_be32(&p->rmr, rmr);
693 emac_rx_enable(dev);
696 /* BHs disabled */
697 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
699 struct ocp_func_emac_data *emacdata = dev->def->additions;
700 int rx_sync_size = emac_rx_sync_size(new_mtu);
701 int rx_skb_size = emac_rx_skb_size(new_mtu);
702 int i, ret = 0;
704 emac_rx_disable(dev);
705 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
707 if (dev->rx_sg_skb) {
708 ++dev->estats.rx_dropped_resize;
709 dev_kfree_skb(dev->rx_sg_skb);
710 dev->rx_sg_skb = NULL;
713 /* Make a first pass over RX ring and mark BDs ready, dropping
714 * non-processed packets on the way. We need this as a separate pass
715 * to simplify error recovery in the case of allocation failure later.
717 for (i = 0; i < NUM_RX_BUFF; ++i) {
718 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
719 ++dev->estats.rx_dropped_resize;
721 dev->rx_desc[i].data_len = 0;
722 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
723 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
726 /* Reallocate RX ring only if bigger skb buffers are required */
727 if (rx_skb_size <= dev->rx_skb_size)
728 goto skip;
730 /* Second pass, allocate new skbs */
731 for (i = 0; i < NUM_RX_BUFF; ++i) {
732 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
733 if (!skb) {
734 ret = -ENOMEM;
735 goto oom;
738 BUG_ON(!dev->rx_skb[i]);
739 dev_kfree_skb(dev->rx_skb[i]);
741 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
742 dev->rx_desc[i].data_ptr =
743 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
744 DMA_FROM_DEVICE) + 2;
745 dev->rx_skb[i] = skb;
747 skip:
748 /* Check if we need to change "Jumbo" bit in MR1 */
749 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
750 /* This is to prevent starting RX channel in emac_rx_enable() */
751 dev->commac.rx_stopped = 1;
753 dev->ndev->mtu = new_mtu;
754 emac_full_tx_reset(dev->ndev);
757 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
758 oom:
759 /* Restart RX */
760 dev->commac.rx_stopped = dev->rx_slot = 0;
761 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
762 emac_rx_enable(dev);
764 return ret;
767 /* Process ctx, rtnl_lock semaphore */
768 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
770 struct ocp_enet_private *dev = ndev->priv;
771 int ret = 0;
773 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
774 return -EINVAL;
776 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
778 local_bh_disable();
779 if (netif_running(ndev)) {
780 /* Check if we really need to reinitalize RX ring */
781 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
782 ret = emac_resize_rx_ring(dev, new_mtu);
785 if (!ret) {
786 ndev->mtu = new_mtu;
787 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
788 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
790 local_bh_enable();
792 return ret;
795 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
797 int i;
798 for (i = 0; i < NUM_TX_BUFF; ++i) {
799 if (dev->tx_skb[i]) {
800 dev_kfree_skb(dev->tx_skb[i]);
801 dev->tx_skb[i] = NULL;
802 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
803 ++dev->estats.tx_dropped;
805 dev->tx_desc[i].ctrl = 0;
806 dev->tx_desc[i].data_ptr = 0;
810 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
812 int i;
813 for (i = 0; i < NUM_RX_BUFF; ++i)
814 if (dev->rx_skb[i]) {
815 dev->rx_desc[i].ctrl = 0;
816 dev_kfree_skb(dev->rx_skb[i]);
817 dev->rx_skb[i] = NULL;
818 dev->rx_desc[i].data_ptr = 0;
821 if (dev->rx_sg_skb) {
822 dev_kfree_skb(dev->rx_sg_skb);
823 dev->rx_sg_skb = NULL;
827 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
828 gfp_t flags)
830 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
831 if (unlikely(!skb))
832 return -ENOMEM;
834 dev->rx_skb[slot] = skb;
835 dev->rx_desc[slot].data_len = 0;
837 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
838 dev->rx_desc[slot].data_ptr =
839 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
840 DMA_FROM_DEVICE) + 2;
841 barrier();
842 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
843 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
845 return 0;
848 static void emac_print_link_status(struct ocp_enet_private *dev)
850 if (netif_carrier_ok(dev->ndev))
851 printk(KERN_INFO "%s: link is up, %d %s%s\n",
852 dev->ndev->name, dev->phy.speed,
853 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
854 dev->phy.pause ? ", pause enabled" :
855 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
856 else
857 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
860 /* Process ctx, rtnl_lock semaphore */
861 static int emac_open(struct net_device *ndev)
863 struct ocp_enet_private *dev = ndev->priv;
864 struct ocp_func_emac_data *emacdata = dev->def->additions;
865 int err, i;
867 DBG("%d: open" NL, dev->def->index);
869 /* Setup error IRQ handler */
870 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
871 if (err) {
872 printk(KERN_ERR "%s: failed to request IRQ %d\n",
873 ndev->name, dev->def->irq);
874 return err;
877 /* Allocate RX ring */
878 for (i = 0; i < NUM_RX_BUFF; ++i)
879 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
880 printk(KERN_ERR "%s: failed to allocate RX ring\n",
881 ndev->name);
882 goto oom;
885 local_bh_disable();
886 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
887 dev->commac.rx_stopped = 0;
888 dev->rx_sg_skb = NULL;
890 if (dev->phy.address >= 0) {
891 int link_poll_interval;
892 if (dev->phy.def->ops->poll_link(&dev->phy)) {
893 dev->phy.def->ops->read_link(&dev->phy);
894 EMAC_RX_CLK_DEFAULT(dev->def->index);
895 netif_carrier_on(dev->ndev);
896 link_poll_interval = PHY_POLL_LINK_ON;
897 } else {
898 EMAC_RX_CLK_TX(dev->def->index);
899 netif_carrier_off(dev->ndev);
900 link_poll_interval = PHY_POLL_LINK_OFF;
902 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
903 emac_print_link_status(dev);
904 } else
905 netif_carrier_on(dev->ndev);
907 emac_configure(dev);
908 mal_poll_add(dev->mal, &dev->commac);
909 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
910 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
911 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
912 emac_tx_enable(dev);
913 emac_rx_enable(dev);
914 netif_start_queue(ndev);
915 local_bh_enable();
917 return 0;
918 oom:
919 emac_clean_rx_ring(dev);
920 free_irq(dev->def->irq, dev);
921 return -ENOMEM;
924 /* BHs disabled */
925 static int emac_link_differs(struct ocp_enet_private *dev)
927 u32 r = in_be32(&dev->emacp->mr1);
929 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
930 int speed, pause, asym_pause;
932 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
933 speed = SPEED_1000;
934 else if (r & EMAC_MR1_MF_100)
935 speed = SPEED_100;
936 else
937 speed = SPEED_10;
939 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
940 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
941 pause = 1;
942 asym_pause = 0;
943 break;
944 case EMAC_MR1_APP:
945 pause = 0;
946 asym_pause = 1;
947 break;
948 default:
949 pause = asym_pause = 0;
951 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
952 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
955 /* BHs disabled */
956 static void emac_link_timer(unsigned long data)
958 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
959 int link_poll_interval;
961 DBG2("%d: link timer" NL, dev->def->index);
963 if (dev->phy.def->ops->poll_link(&dev->phy)) {
964 if (!netif_carrier_ok(dev->ndev)) {
965 EMAC_RX_CLK_DEFAULT(dev->def->index);
967 /* Get new link parameters */
968 dev->phy.def->ops->read_link(&dev->phy);
970 if (dev->tah_dev || emac_link_differs(dev))
971 emac_full_tx_reset(dev->ndev);
973 netif_carrier_on(dev->ndev);
974 emac_print_link_status(dev);
976 link_poll_interval = PHY_POLL_LINK_ON;
977 } else {
978 if (netif_carrier_ok(dev->ndev)) {
979 EMAC_RX_CLK_TX(dev->def->index);
980 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
981 emac_reinitialize(dev);
982 #endif
983 netif_carrier_off(dev->ndev);
984 emac_print_link_status(dev);
987 /* Retry reset if the previous attempt failed.
988 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
989 * case, but I left it here because it shouldn't trigger for
990 * sane PHYs anyway.
992 if (unlikely(dev->reset_failed))
993 emac_reinitialize(dev);
995 link_poll_interval = PHY_POLL_LINK_OFF;
997 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
1000 /* BHs disabled */
1001 static void emac_force_link_update(struct ocp_enet_private *dev)
1003 netif_carrier_off(dev->ndev);
1004 if (timer_pending(&dev->link_timer))
1005 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
1008 /* Process ctx, rtnl_lock semaphore */
1009 static int emac_close(struct net_device *ndev)
1011 struct ocp_enet_private *dev = ndev->priv;
1012 struct ocp_func_emac_data *emacdata = dev->def->additions;
1014 DBG("%d: close" NL, dev->def->index);
1016 local_bh_disable();
1018 if (dev->phy.address >= 0)
1019 del_timer_sync(&dev->link_timer);
1021 netif_stop_queue(ndev);
1022 emac_rx_disable(dev);
1023 emac_tx_disable(dev);
1024 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1025 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1026 mal_poll_del(dev->mal, &dev->commac);
1027 local_bh_enable();
1029 emac_clean_tx_ring(dev);
1030 emac_clean_rx_ring(dev);
1031 free_irq(dev->def->irq, dev);
1033 return 0;
1036 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1037 struct sk_buff *skb)
1039 #if defined(CONFIG_IBM_EMAC_TAH)
1040 if (skb->ip_summed == CHECKSUM_HW) {
1041 ++dev->stats.tx_packets_csum;
1042 return EMAC_TX_CTRL_TAH_CSUM;
1044 #endif
1045 return 0;
1048 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1050 struct emac_regs __iomem *p = dev->emacp;
1051 struct net_device *ndev = dev->ndev;
1053 /* Send the packet out */
1054 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1056 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1057 netif_stop_queue(ndev);
1058 DBG2("%d: stopped TX queue" NL, dev->def->index);
1061 ndev->trans_start = jiffies;
1062 ++dev->stats.tx_packets;
1063 dev->stats.tx_bytes += len;
1065 return 0;
1068 /* BHs disabled */
1069 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1071 struct ocp_enet_private *dev = ndev->priv;
1072 unsigned int len = skb->len;
1073 int slot;
1075 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1076 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1078 slot = dev->tx_slot++;
1079 if (dev->tx_slot == NUM_TX_BUFF) {
1080 dev->tx_slot = 0;
1081 ctrl |= MAL_TX_CTRL_WRAP;
1084 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1086 dev->tx_skb[slot] = skb;
1087 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1088 DMA_TO_DEVICE);
1089 dev->tx_desc[slot].data_len = (u16) len;
1090 barrier();
1091 dev->tx_desc[slot].ctrl = ctrl;
1093 return emac_xmit_finish(dev, len);
1096 #if defined(CONFIG_IBM_EMAC_TAH)
1097 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1098 u32 pd, int len, int last, u16 base_ctrl)
1100 while (1) {
1101 u16 ctrl = base_ctrl;
1102 int chunk = min(len, MAL_MAX_TX_SIZE);
1103 len -= chunk;
1105 slot = (slot + 1) % NUM_TX_BUFF;
1107 if (last && !len)
1108 ctrl |= MAL_TX_CTRL_LAST;
1109 if (slot == NUM_TX_BUFF - 1)
1110 ctrl |= MAL_TX_CTRL_WRAP;
1112 dev->tx_skb[slot] = NULL;
1113 dev->tx_desc[slot].data_ptr = pd;
1114 dev->tx_desc[slot].data_len = (u16) chunk;
1115 dev->tx_desc[slot].ctrl = ctrl;
1116 ++dev->tx_cnt;
1118 if (!len)
1119 break;
1121 pd += chunk;
1123 return slot;
1126 /* BHs disabled (SG version for TAH equipped EMACs) */
1127 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1129 struct ocp_enet_private *dev = ndev->priv;
1130 int nr_frags = skb_shinfo(skb)->nr_frags;
1131 int len = skb->len, chunk;
1132 int slot, i;
1133 u16 ctrl;
1134 u32 pd;
1136 /* This is common "fast" path */
1137 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1138 return emac_start_xmit(skb, ndev);
1140 len -= skb->data_len;
1142 /* Note, this is only an *estimation*, we can still run out of empty
1143 * slots because of the additional fragmentation into
1144 * MAL_MAX_TX_SIZE-sized chunks
1146 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1147 goto stop_queue;
1149 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1150 emac_tx_csum(dev, skb);
1151 slot = dev->tx_slot;
1153 /* skb data */
1154 dev->tx_skb[slot] = NULL;
1155 chunk = min(len, MAL_MAX_TX_SIZE);
1156 dev->tx_desc[slot].data_ptr = pd =
1157 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1158 dev->tx_desc[slot].data_len = (u16) chunk;
1159 len -= chunk;
1160 if (unlikely(len))
1161 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1162 ctrl);
1163 /* skb fragments */
1164 for (i = 0; i < nr_frags; ++i) {
1165 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1166 len = frag->size;
1168 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1169 goto undo_frame;
1171 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1172 DMA_TO_DEVICE);
1174 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1175 ctrl);
1178 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1179 dev->tx_slot, slot);
1181 /* Attach skb to the last slot so we don't release it too early */
1182 dev->tx_skb[slot] = skb;
1184 /* Send the packet out */
1185 if (dev->tx_slot == NUM_TX_BUFF - 1)
1186 ctrl |= MAL_TX_CTRL_WRAP;
1187 barrier();
1188 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1189 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1191 return emac_xmit_finish(dev, skb->len);
1193 undo_frame:
1194 /* Well, too bad. Our previous estimation was overly optimistic.
1195 * Undo everything.
1197 while (slot != dev->tx_slot) {
1198 dev->tx_desc[slot].ctrl = 0;
1199 --dev->tx_cnt;
1200 if (--slot < 0)
1201 slot = NUM_TX_BUFF - 1;
1203 ++dev->estats.tx_undo;
1205 stop_queue:
1206 netif_stop_queue(ndev);
1207 DBG2("%d: stopped TX queue" NL, dev->def->index);
1208 return 1;
1210 #else
1211 # define emac_start_xmit_sg emac_start_xmit
1212 #endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1214 /* BHs disabled */
1215 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1217 struct ibm_emac_error_stats *st = &dev->estats;
1218 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1220 ++st->tx_bd_errors;
1221 if (ctrl & EMAC_TX_ST_BFCS)
1222 ++st->tx_bd_bad_fcs;
1223 if (ctrl & EMAC_TX_ST_LCS)
1224 ++st->tx_bd_carrier_loss;
1225 if (ctrl & EMAC_TX_ST_ED)
1226 ++st->tx_bd_excessive_deferral;
1227 if (ctrl & EMAC_TX_ST_EC)
1228 ++st->tx_bd_excessive_collisions;
1229 if (ctrl & EMAC_TX_ST_LC)
1230 ++st->tx_bd_late_collision;
1231 if (ctrl & EMAC_TX_ST_MC)
1232 ++st->tx_bd_multple_collisions;
1233 if (ctrl & EMAC_TX_ST_SC)
1234 ++st->tx_bd_single_collision;
1235 if (ctrl & EMAC_TX_ST_UR)
1236 ++st->tx_bd_underrun;
1237 if (ctrl & EMAC_TX_ST_SQE)
1238 ++st->tx_bd_sqe;
1241 static void emac_poll_tx(void *param)
1243 struct ocp_enet_private *dev = param;
1244 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1245 dev->ack_slot);
1247 if (dev->tx_cnt) {
1248 u16 ctrl;
1249 int slot = dev->ack_slot, n = 0;
1250 again:
1251 ctrl = dev->tx_desc[slot].ctrl;
1252 if (!(ctrl & MAL_TX_CTRL_READY)) {
1253 struct sk_buff *skb = dev->tx_skb[slot];
1254 ++n;
1256 if (skb) {
1257 dev_kfree_skb(skb);
1258 dev->tx_skb[slot] = NULL;
1260 slot = (slot + 1) % NUM_TX_BUFF;
1262 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1263 emac_parse_tx_error(dev, ctrl);
1265 if (--dev->tx_cnt)
1266 goto again;
1268 if (n) {
1269 dev->ack_slot = slot;
1270 if (netif_queue_stopped(dev->ndev) &&
1271 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1272 netif_wake_queue(dev->ndev);
1274 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1279 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1280 int len)
1282 struct sk_buff *skb = dev->rx_skb[slot];
1283 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1285 if (len)
1286 dma_map_single(dev->ldev, skb->data - 2,
1287 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1289 dev->rx_desc[slot].data_len = 0;
1290 barrier();
1291 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1292 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1295 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1297 struct ibm_emac_error_stats *st = &dev->estats;
1298 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1300 ++st->rx_bd_errors;
1301 if (ctrl & EMAC_RX_ST_OE)
1302 ++st->rx_bd_overrun;
1303 if (ctrl & EMAC_RX_ST_BP)
1304 ++st->rx_bd_bad_packet;
1305 if (ctrl & EMAC_RX_ST_RP)
1306 ++st->rx_bd_runt_packet;
1307 if (ctrl & EMAC_RX_ST_SE)
1308 ++st->rx_bd_short_event;
1309 if (ctrl & EMAC_RX_ST_AE)
1310 ++st->rx_bd_alignment_error;
1311 if (ctrl & EMAC_RX_ST_BFCS)
1312 ++st->rx_bd_bad_fcs;
1313 if (ctrl & EMAC_RX_ST_PTL)
1314 ++st->rx_bd_packet_too_long;
1315 if (ctrl & EMAC_RX_ST_ORE)
1316 ++st->rx_bd_out_of_range;
1317 if (ctrl & EMAC_RX_ST_IRE)
1318 ++st->rx_bd_in_range;
1321 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1322 struct sk_buff *skb, u16 ctrl)
1324 #if defined(CONFIG_IBM_EMAC_TAH)
1325 if (!ctrl && dev->tah_dev) {
1326 skb->ip_summed = CHECKSUM_UNNECESSARY;
1327 ++dev->stats.rx_packets_csum;
1329 #endif
1332 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1334 if (likely(dev->rx_sg_skb != NULL)) {
1335 int len = dev->rx_desc[slot].data_len;
1336 int tot_len = dev->rx_sg_skb->len + len;
1338 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1339 ++dev->estats.rx_dropped_mtu;
1340 dev_kfree_skb(dev->rx_sg_skb);
1341 dev->rx_sg_skb = NULL;
1342 } else {
1343 cacheable_memcpy(dev->rx_sg_skb->tail,
1344 dev->rx_skb[slot]->data, len);
1345 skb_put(dev->rx_sg_skb, len);
1346 emac_recycle_rx_skb(dev, slot, len);
1347 return 0;
1350 emac_recycle_rx_skb(dev, slot, 0);
1351 return -1;
1354 /* BHs disabled */
1355 static int emac_poll_rx(void *param, int budget)
1357 struct ocp_enet_private *dev = param;
1358 int slot = dev->rx_slot, received = 0;
1360 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1362 again:
1363 while (budget > 0) {
1364 int len;
1365 struct sk_buff *skb;
1366 u16 ctrl = dev->rx_desc[slot].ctrl;
1368 if (ctrl & MAL_RX_CTRL_EMPTY)
1369 break;
1371 skb = dev->rx_skb[slot];
1372 barrier();
1373 len = dev->rx_desc[slot].data_len;
1375 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1376 goto sg;
1378 ctrl &= EMAC_BAD_RX_MASK;
1379 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1380 emac_parse_rx_error(dev, ctrl);
1381 ++dev->estats.rx_dropped_error;
1382 emac_recycle_rx_skb(dev, slot, 0);
1383 len = 0;
1384 goto next;
1387 if (len && len < EMAC_RX_COPY_THRESH) {
1388 struct sk_buff *copy_skb =
1389 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1390 if (unlikely(!copy_skb))
1391 goto oom;
1393 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1394 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1395 len + 2);
1396 emac_recycle_rx_skb(dev, slot, len);
1397 skb = copy_skb;
1398 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1399 goto oom;
1401 skb_put(skb, len);
1402 push_packet:
1403 skb->dev = dev->ndev;
1404 skb->protocol = eth_type_trans(skb, dev->ndev);
1405 emac_rx_csum(dev, skb, ctrl);
1407 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1408 ++dev->estats.rx_dropped_stack;
1409 next:
1410 ++dev->stats.rx_packets;
1411 skip:
1412 dev->stats.rx_bytes += len;
1413 slot = (slot + 1) % NUM_RX_BUFF;
1414 --budget;
1415 ++received;
1416 continue;
1418 if (ctrl & MAL_RX_CTRL_FIRST) {
1419 BUG_ON(dev->rx_sg_skb);
1420 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1421 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1422 ++dev->estats.rx_dropped_oom;
1423 emac_recycle_rx_skb(dev, slot, 0);
1424 } else {
1425 dev->rx_sg_skb = skb;
1426 skb_put(skb, len);
1428 } else if (!emac_rx_sg_append(dev, slot) &&
1429 (ctrl & MAL_RX_CTRL_LAST)) {
1431 skb = dev->rx_sg_skb;
1432 dev->rx_sg_skb = NULL;
1434 ctrl &= EMAC_BAD_RX_MASK;
1435 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1436 emac_parse_rx_error(dev, ctrl);
1437 ++dev->estats.rx_dropped_error;
1438 dev_kfree_skb(skb);
1439 len = 0;
1440 } else
1441 goto push_packet;
1443 goto skip;
1444 oom:
1445 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1446 /* Drop the packet and recycle skb */
1447 ++dev->estats.rx_dropped_oom;
1448 emac_recycle_rx_skb(dev, slot, 0);
1449 goto next;
1452 if (received) {
1453 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1454 dev->rx_slot = slot;
1457 if (unlikely(budget && dev->commac.rx_stopped)) {
1458 struct ocp_func_emac_data *emacdata = dev->def->additions;
1460 barrier();
1461 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1462 DBG2("%d: rx restart" NL, dev->def->index);
1463 received = 0;
1464 goto again;
1467 if (dev->rx_sg_skb) {
1468 DBG2("%d: dropping partial rx packet" NL,
1469 dev->def->index);
1470 ++dev->estats.rx_dropped_error;
1471 dev_kfree_skb(dev->rx_sg_skb);
1472 dev->rx_sg_skb = NULL;
1475 dev->commac.rx_stopped = 0;
1476 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1477 emac_rx_enable(dev);
1478 dev->rx_slot = 0;
1480 return received;
1483 /* BHs disabled */
1484 static int emac_peek_rx(void *param)
1486 struct ocp_enet_private *dev = param;
1487 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1490 /* BHs disabled */
1491 static int emac_peek_rx_sg(void *param)
1493 struct ocp_enet_private *dev = param;
1494 int slot = dev->rx_slot;
1495 while (1) {
1496 u16 ctrl = dev->rx_desc[slot].ctrl;
1497 if (ctrl & MAL_RX_CTRL_EMPTY)
1498 return 0;
1499 else if (ctrl & MAL_RX_CTRL_LAST)
1500 return 1;
1502 slot = (slot + 1) % NUM_RX_BUFF;
1504 /* I'm just being paranoid here :) */
1505 if (unlikely(slot == dev->rx_slot))
1506 return 0;
1510 /* Hard IRQ */
1511 static void emac_rxde(void *param)
1513 struct ocp_enet_private *dev = param;
1514 ++dev->estats.rx_stopped;
1515 emac_rx_disable_async(dev);
1518 /* Hard IRQ */
1519 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1521 struct ocp_enet_private *dev = dev_instance;
1522 struct emac_regs __iomem *p = dev->emacp;
1523 struct ibm_emac_error_stats *st = &dev->estats;
1525 u32 isr = in_be32(&p->isr);
1526 out_be32(&p->isr, isr);
1528 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1530 if (isr & EMAC_ISR_TXPE)
1531 ++st->tx_parity;
1532 if (isr & EMAC_ISR_RXPE)
1533 ++st->rx_parity;
1534 if (isr & EMAC_ISR_TXUE)
1535 ++st->tx_underrun;
1536 if (isr & EMAC_ISR_RXOE)
1537 ++st->rx_fifo_overrun;
1538 if (isr & EMAC_ISR_OVR)
1539 ++st->rx_overrun;
1540 if (isr & EMAC_ISR_BP)
1541 ++st->rx_bad_packet;
1542 if (isr & EMAC_ISR_RP)
1543 ++st->rx_runt_packet;
1544 if (isr & EMAC_ISR_SE)
1545 ++st->rx_short_event;
1546 if (isr & EMAC_ISR_ALE)
1547 ++st->rx_alignment_error;
1548 if (isr & EMAC_ISR_BFCS)
1549 ++st->rx_bad_fcs;
1550 if (isr & EMAC_ISR_PTLE)
1551 ++st->rx_packet_too_long;
1552 if (isr & EMAC_ISR_ORE)
1553 ++st->rx_out_of_range;
1554 if (isr & EMAC_ISR_IRE)
1555 ++st->rx_in_range;
1556 if (isr & EMAC_ISR_SQE)
1557 ++st->tx_sqe;
1558 if (isr & EMAC_ISR_TE)
1559 ++st->tx_errors;
1561 return IRQ_HANDLED;
1564 static struct net_device_stats *emac_stats(struct net_device *ndev)
1566 struct ocp_enet_private *dev = ndev->priv;
1567 struct ibm_emac_stats *st = &dev->stats;
1568 struct ibm_emac_error_stats *est = &dev->estats;
1569 struct net_device_stats *nst = &dev->nstats;
1571 DBG2("%d: stats" NL, dev->def->index);
1573 /* Compute "legacy" statistics */
1574 local_irq_disable();
1575 nst->rx_packets = (unsigned long)st->rx_packets;
1576 nst->rx_bytes = (unsigned long)st->rx_bytes;
1577 nst->tx_packets = (unsigned long)st->tx_packets;
1578 nst->tx_bytes = (unsigned long)st->tx_bytes;
1579 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1580 est->rx_dropped_error +
1581 est->rx_dropped_resize +
1582 est->rx_dropped_mtu);
1583 nst->tx_dropped = (unsigned long)est->tx_dropped;
1585 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1586 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1587 est->rx_fifo_overrun +
1588 est->rx_overrun);
1589 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1590 est->rx_alignment_error);
1591 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1592 est->rx_bad_fcs);
1593 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1594 est->rx_bd_short_event +
1595 est->rx_bd_packet_too_long +
1596 est->rx_bd_out_of_range +
1597 est->rx_bd_in_range +
1598 est->rx_runt_packet +
1599 est->rx_short_event +
1600 est->rx_packet_too_long +
1601 est->rx_out_of_range +
1602 est->rx_in_range);
1604 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1605 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1606 est->tx_underrun);
1607 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1608 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1609 est->tx_bd_excessive_collisions +
1610 est->tx_bd_late_collision +
1611 est->tx_bd_multple_collisions);
1612 local_irq_enable();
1613 return nst;
1616 static void emac_remove(struct ocp_device *ocpdev)
1618 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1620 DBG("%d: remove" NL, dev->def->index);
1622 ocp_set_drvdata(ocpdev, NULL);
1623 unregister_netdev(dev->ndev);
1625 tah_fini(dev->tah_dev);
1626 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1627 zmii_fini(dev->zmii_dev, dev->zmii_input);
1629 emac_dbg_register(dev->def->index, NULL);
1631 mal_unregister_commac(dev->mal, &dev->commac);
1632 iounmap(dev->emacp);
1633 kfree(dev->ndev);
1636 static struct mal_commac_ops emac_commac_ops = {
1637 .poll_tx = &emac_poll_tx,
1638 .poll_rx = &emac_poll_rx,
1639 .peek_rx = &emac_peek_rx,
1640 .rxde = &emac_rxde,
1643 static struct mal_commac_ops emac_commac_sg_ops = {
1644 .poll_tx = &emac_poll_tx,
1645 .poll_rx = &emac_poll_rx,
1646 .peek_rx = &emac_peek_rx_sg,
1647 .rxde = &emac_rxde,
1650 /* Ethtool support */
1651 static int emac_ethtool_get_settings(struct net_device *ndev,
1652 struct ethtool_cmd *cmd)
1654 struct ocp_enet_private *dev = ndev->priv;
1656 cmd->supported = dev->phy.features;
1657 cmd->port = PORT_MII;
1658 cmd->phy_address = dev->phy.address;
1659 cmd->transceiver =
1660 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1662 local_bh_disable();
1663 cmd->advertising = dev->phy.advertising;
1664 cmd->autoneg = dev->phy.autoneg;
1665 cmd->speed = dev->phy.speed;
1666 cmd->duplex = dev->phy.duplex;
1667 local_bh_enable();
1669 return 0;
1672 static int emac_ethtool_set_settings(struct net_device *ndev,
1673 struct ethtool_cmd *cmd)
1675 struct ocp_enet_private *dev = ndev->priv;
1676 u32 f = dev->phy.features;
1678 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1679 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1681 /* Basic sanity checks */
1682 if (dev->phy.address < 0)
1683 return -EOPNOTSUPP;
1684 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1685 return -EINVAL;
1686 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1687 return -EINVAL;
1688 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1689 return -EINVAL;
1691 if (cmd->autoneg == AUTONEG_DISABLE) {
1692 switch (cmd->speed) {
1693 case SPEED_10:
1694 if (cmd->duplex == DUPLEX_HALF
1695 && !(f & SUPPORTED_10baseT_Half))
1696 return -EINVAL;
1697 if (cmd->duplex == DUPLEX_FULL
1698 && !(f & SUPPORTED_10baseT_Full))
1699 return -EINVAL;
1700 break;
1701 case SPEED_100:
1702 if (cmd->duplex == DUPLEX_HALF
1703 && !(f & SUPPORTED_100baseT_Half))
1704 return -EINVAL;
1705 if (cmd->duplex == DUPLEX_FULL
1706 && !(f & SUPPORTED_100baseT_Full))
1707 return -EINVAL;
1708 break;
1709 case SPEED_1000:
1710 if (cmd->duplex == DUPLEX_HALF
1711 && !(f & SUPPORTED_1000baseT_Half))
1712 return -EINVAL;
1713 if (cmd->duplex == DUPLEX_FULL
1714 && !(f & SUPPORTED_1000baseT_Full))
1715 return -EINVAL;
1716 break;
1717 default:
1718 return -EINVAL;
1721 local_bh_disable();
1722 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1723 cmd->duplex);
1725 } else {
1726 if (!(f & SUPPORTED_Autoneg))
1727 return -EINVAL;
1729 local_bh_disable();
1730 dev->phy.def->ops->setup_aneg(&dev->phy,
1731 (cmd->advertising & f) |
1732 (dev->phy.advertising &
1733 (ADVERTISED_Pause |
1734 ADVERTISED_Asym_Pause)));
1736 emac_force_link_update(dev);
1737 local_bh_enable();
1739 return 0;
1742 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1743 struct ethtool_ringparam *rp)
1745 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1746 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1749 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1750 struct ethtool_pauseparam *pp)
1752 struct ocp_enet_private *dev = ndev->priv;
1754 local_bh_disable();
1755 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1756 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1757 pp->autoneg = 1;
1759 if (dev->phy.duplex == DUPLEX_FULL) {
1760 if (dev->phy.pause)
1761 pp->rx_pause = pp->tx_pause = 1;
1762 else if (dev->phy.asym_pause)
1763 pp->tx_pause = 1;
1765 local_bh_enable();
1768 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1770 struct ocp_enet_private *dev = ndev->priv;
1771 return dev->tah_dev != 0;
1774 static int emac_get_regs_len(struct ocp_enet_private *dev)
1776 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1779 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1781 struct ocp_enet_private *dev = ndev->priv;
1782 return sizeof(struct emac_ethtool_regs_hdr) +
1783 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1784 zmii_get_regs_len(dev->zmii_dev) +
1785 rgmii_get_regs_len(dev->rgmii_dev) +
1786 tah_get_regs_len(dev->tah_dev);
1789 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1791 struct emac_ethtool_regs_subhdr *hdr = buf;
1793 hdr->version = EMAC_ETHTOOL_REGS_VER;
1794 hdr->index = dev->def->index;
1795 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1796 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1799 static void emac_ethtool_get_regs(struct net_device *ndev,
1800 struct ethtool_regs *regs, void *buf)
1802 struct ocp_enet_private *dev = ndev->priv;
1803 struct emac_ethtool_regs_hdr *hdr = buf;
1805 hdr->components = 0;
1806 buf = hdr + 1;
1808 local_irq_disable();
1809 buf = mal_dump_regs(dev->mal, buf);
1810 buf = emac_dump_regs(dev, buf);
1811 if (dev->zmii_dev) {
1812 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1813 buf = zmii_dump_regs(dev->zmii_dev, buf);
1815 if (dev->rgmii_dev) {
1816 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1817 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1819 if (dev->tah_dev) {
1820 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1821 buf = tah_dump_regs(dev->tah_dev, buf);
1823 local_irq_enable();
1826 static int emac_ethtool_nway_reset(struct net_device *ndev)
1828 struct ocp_enet_private *dev = ndev->priv;
1829 int res = 0;
1831 DBG("%d: nway_reset" NL, dev->def->index);
1833 if (dev->phy.address < 0)
1834 return -EOPNOTSUPP;
1836 local_bh_disable();
1837 if (!dev->phy.autoneg) {
1838 res = -EINVAL;
1839 goto out;
1842 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1843 emac_force_link_update(dev);
1845 out:
1846 local_bh_enable();
1847 return res;
1850 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1852 return EMAC_ETHTOOL_STATS_COUNT;
1855 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1856 u8 * buf)
1858 if (stringset == ETH_SS_STATS)
1859 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1862 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1863 struct ethtool_stats *estats,
1864 u64 * tmp_stats)
1866 struct ocp_enet_private *dev = ndev->priv;
1867 local_irq_disable();
1868 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1869 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1870 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1871 local_irq_enable();
1874 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1875 struct ethtool_drvinfo *info)
1877 struct ocp_enet_private *dev = ndev->priv;
1879 strcpy(info->driver, "ibm_emac");
1880 strcpy(info->version, DRV_VERSION);
1881 info->fw_version[0] = '\0';
1882 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1883 info->n_stats = emac_ethtool_get_stats_count(ndev);
1884 info->regdump_len = emac_ethtool_get_regs_len(ndev);
1887 static struct ethtool_ops emac_ethtool_ops = {
1888 .get_settings = emac_ethtool_get_settings,
1889 .set_settings = emac_ethtool_set_settings,
1890 .get_drvinfo = emac_ethtool_get_drvinfo,
1892 .get_regs_len = emac_ethtool_get_regs_len,
1893 .get_regs = emac_ethtool_get_regs,
1895 .nway_reset = emac_ethtool_nway_reset,
1897 .get_ringparam = emac_ethtool_get_ringparam,
1898 .get_pauseparam = emac_ethtool_get_pauseparam,
1900 .get_rx_csum = emac_ethtool_get_rx_csum,
1902 .get_strings = emac_ethtool_get_strings,
1903 .get_stats_count = emac_ethtool_get_stats_count,
1904 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1906 .get_link = ethtool_op_get_link,
1907 .get_tx_csum = ethtool_op_get_tx_csum,
1908 .get_sg = ethtool_op_get_sg,
1911 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1913 struct ocp_enet_private *dev = ndev->priv;
1914 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1916 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1918 if (dev->phy.address < 0)
1919 return -EOPNOTSUPP;
1921 switch (cmd) {
1922 case SIOCGMIIPHY:
1923 case SIOCDEVPRIVATE:
1924 data[0] = dev->phy.address;
1925 /* Fall through */
1926 case SIOCGMIIREG:
1927 case SIOCDEVPRIVATE + 1:
1928 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1929 return 0;
1931 case SIOCSMIIREG:
1932 case SIOCDEVPRIVATE + 2:
1933 if (!capable(CAP_NET_ADMIN))
1934 return -EPERM;
1935 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1936 return 0;
1937 default:
1938 return -EOPNOTSUPP;
1942 static int __init emac_probe(struct ocp_device *ocpdev)
1944 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1945 struct net_device *ndev;
1946 struct ocp_device *maldev;
1947 struct ocp_enet_private *dev;
1948 int err, i;
1950 DBG("%d: probe" NL, ocpdev->def->index);
1952 if (!emacdata) {
1953 printk(KERN_ERR "emac%d: Missing additional data!\n",
1954 ocpdev->def->index);
1955 return -ENODEV;
1958 /* Allocate our net_device structure */
1959 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1960 if (!ndev) {
1961 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1962 ocpdev->def->index);
1963 return -ENOMEM;
1965 dev = ndev->priv;
1966 dev->ndev = ndev;
1967 dev->ldev = &ocpdev->dev;
1968 dev->def = ocpdev->def;
1969 SET_MODULE_OWNER(ndev);
1971 /* Find MAL device we are connected to */
1972 maldev =
1973 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1974 if (!maldev) {
1975 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1976 dev->def->index, emacdata->mal_idx);
1977 err = -ENODEV;
1978 goto out;
1980 dev->mal = ocp_get_drvdata(maldev);
1981 if (!dev->mal) {
1982 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1983 dev->def->index, emacdata->mal_idx);
1984 err = -ENODEV;
1985 goto out;
1988 /* Register with MAL */
1989 dev->commac.ops = &emac_commac_ops;
1990 dev->commac.dev = dev;
1991 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1992 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1993 err = mal_register_commac(dev->mal, &dev->commac);
1994 if (err) {
1995 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1996 dev->def->index, emacdata->mal_idx);
1997 goto out;
1999 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2000 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2002 /* Get pointers to BD rings */
2003 dev->tx_desc =
2004 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
2005 emacdata->mal_tx_chan);
2006 dev->rx_desc =
2007 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
2008 emacdata->mal_rx_chan);
2010 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
2011 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
2013 /* Clean rings */
2014 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2015 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2017 /* If we depend on another EMAC for MDIO, check whether it was probed already */
2018 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2019 struct ocp_device *mdiodev =
2020 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2021 emacdata->mdio_idx);
2022 if (!mdiodev) {
2023 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2024 dev->def->index, emacdata->mdio_idx);
2025 err = -ENODEV;
2026 goto out2;
2028 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2029 if (!dev->mdio_dev) {
2030 printk(KERN_ERR
2031 "emac%d: emac%d hasn't been initialized yet!\n",
2032 dev->def->index, emacdata->mdio_idx);
2033 err = -ENODEV;
2034 goto out2;
2038 /* Attach to ZMII, if needed */
2039 if ((err = zmii_attach(dev)) != 0)
2040 goto out2;
2042 /* Attach to RGMII, if needed */
2043 if ((err = rgmii_attach(dev)) != 0)
2044 goto out3;
2046 /* Attach to TAH, if needed */
2047 if ((err = tah_attach(dev)) != 0)
2048 goto out4;
2050 /* Map EMAC regs */
2051 dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
2052 if (!dev->emacp) {
2053 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2054 dev->def->index);
2055 err = -ENOMEM;
2056 goto out5;
2059 /* Fill in MAC address */
2060 for (i = 0; i < 6; ++i)
2061 ndev->dev_addr[i] = emacdata->mac_addr[i];
2063 /* Set some link defaults before we can find out real parameters */
2064 dev->phy.speed = SPEED_100;
2065 dev->phy.duplex = DUPLEX_FULL;
2066 dev->phy.autoneg = AUTONEG_DISABLE;
2067 dev->phy.pause = dev->phy.asym_pause = 0;
2068 dev->stop_timeout = STOP_TIMEOUT_100;
2069 init_timer(&dev->link_timer);
2070 dev->link_timer.function = emac_link_timer;
2071 dev->link_timer.data = (unsigned long)dev;
2073 /* Find PHY if any */
2074 dev->phy.dev = ndev;
2075 dev->phy.mode = emacdata->phy_mode;
2076 if (emacdata->phy_map != 0xffffffff) {
2077 u32 phy_map = emacdata->phy_map | busy_phy_map;
2078 u32 adv;
2080 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2081 emacdata->phy_map, busy_phy_map);
2083 EMAC_RX_CLK_TX(dev->def->index);
2085 dev->phy.mdio_read = emac_mdio_read;
2086 dev->phy.mdio_write = emac_mdio_write;
2088 /* Configure EMAC with defaults so we can at least use MDIO
2089 * This is needed mostly for 440GX
2091 if (emac_phy_gpcs(dev->phy.mode)) {
2092 /* XXX
2093 * Make GPCS PHY address equal to EMAC index.
2094 * We probably should take into account busy_phy_map
2095 * and/or phy_map here.
2097 dev->phy.address = dev->def->index;
2100 emac_configure(dev);
2102 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2103 if (!(phy_map & 1)) {
2104 int r;
2105 busy_phy_map |= 1 << i;
2107 /* Quick check if there is a PHY at the address */
2108 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2109 if (r == 0xffff || r < 0)
2110 continue;
2111 if (!mii_phy_probe(&dev->phy, i))
2112 break;
2114 if (i == 0x20) {
2115 printk(KERN_WARNING "emac%d: can't find PHY!\n",
2116 dev->def->index);
2117 goto out6;
2120 /* Init PHY */
2121 if (dev->phy.def->ops->init)
2122 dev->phy.def->ops->init(&dev->phy);
2124 /* Disable any PHY features not supported by the platform */
2125 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2127 /* Setup initial link parameters */
2128 if (dev->phy.features & SUPPORTED_Autoneg) {
2129 adv = dev->phy.features;
2130 #if !defined(CONFIG_40x)
2131 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2132 #endif
2133 /* Restart autonegotiation */
2134 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2135 } else {
2136 u32 f = dev->phy.def->features;
2137 int speed = SPEED_10, fd = DUPLEX_HALF;
2139 /* Select highest supported speed/duplex */
2140 if (f & SUPPORTED_1000baseT_Full) {
2141 speed = SPEED_1000;
2142 fd = DUPLEX_FULL;
2143 } else if (f & SUPPORTED_1000baseT_Half)
2144 speed = SPEED_1000;
2145 else if (f & SUPPORTED_100baseT_Full) {
2146 speed = SPEED_100;
2147 fd = DUPLEX_FULL;
2148 } else if (f & SUPPORTED_100baseT_Half)
2149 speed = SPEED_100;
2150 else if (f & SUPPORTED_10baseT_Full)
2151 fd = DUPLEX_FULL;
2153 /* Force link parameters */
2154 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2156 } else {
2157 emac_reset(dev);
2159 /* PHY-less configuration.
2160 * XXX I probably should move these settings to emacdata
2162 dev->phy.address = -1;
2163 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2164 dev->phy.pause = 1;
2167 /* Fill in the driver function table */
2168 ndev->open = &emac_open;
2169 if (dev->tah_dev) {
2170 ndev->hard_start_xmit = &emac_start_xmit_sg;
2171 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2172 } else
2173 ndev->hard_start_xmit = &emac_start_xmit;
2174 ndev->tx_timeout = &emac_full_tx_reset;
2175 ndev->watchdog_timeo = 5 * HZ;
2176 ndev->stop = &emac_close;
2177 ndev->get_stats = &emac_stats;
2178 ndev->set_multicast_list = &emac_set_multicast_list;
2179 ndev->do_ioctl = &emac_ioctl;
2180 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2181 ndev->change_mtu = &emac_change_mtu;
2182 dev->commac.ops = &emac_commac_sg_ops;
2184 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2186 netif_carrier_off(ndev);
2187 netif_stop_queue(ndev);
2189 err = register_netdev(ndev);
2190 if (err) {
2191 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2192 dev->def->index, err);
2193 goto out6;
2196 ocp_set_drvdata(ocpdev, dev);
2198 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2199 ndev->name, dev->def->index,
2200 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2201 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2203 if (dev->phy.address >= 0)
2204 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2205 dev->phy.def->name, dev->phy.address);
2207 emac_dbg_register(dev->def->index, dev);
2209 return 0;
2210 out6:
2211 iounmap(dev->emacp);
2212 out5:
2213 tah_fini(dev->tah_dev);
2214 out4:
2215 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2216 out3:
2217 zmii_fini(dev->zmii_dev, dev->zmii_input);
2218 out2:
2219 mal_unregister_commac(dev->mal, &dev->commac);
2220 out:
2221 kfree(ndev);
2222 return err;
2225 static struct ocp_device_id emac_ids[] = {
2226 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2227 { .vendor = OCP_VENDOR_INVALID}
2230 static struct ocp_driver emac_driver = {
2231 .name = "emac",
2232 .id_table = emac_ids,
2233 .probe = emac_probe,
2234 .remove = emac_remove,
2237 static int __init emac_init(void)
2239 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2241 DBG(": init" NL);
2243 if (mal_init())
2244 return -ENODEV;
2246 EMAC_CLK_INTERNAL;
2247 if (ocp_register_driver(&emac_driver)) {
2248 EMAC_CLK_EXTERNAL;
2249 ocp_unregister_driver(&emac_driver);
2250 mal_exit();
2251 return -ENODEV;
2253 EMAC_CLK_EXTERNAL;
2255 emac_init_debug();
2256 return 0;
2259 static void __exit emac_exit(void)
2261 DBG(": exit" NL);
2262 ocp_unregister_driver(&emac_driver);
2263 mal_exit();
2264 emac_fini_debug();
2267 module_init(emac_init);
2268 module_exit(emac_exit);