2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
50 * Lack of dma_unmap_???? calls is intentional.
52 * API-correct usage requires additional support state information to be
53 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
54 * EMAC design (e.g. TX buffer passed from network stack can be split into
55 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
56 * maintaining such information will add additional overhead.
57 * Current DMA API implementation for 4xx processors only ensures cache coherency
58 * and dma_unmap_???? routines are empty and are likely to stay this way.
59 * I decided to omit dma_unmap_??? calls because I don't want to add additional
60 * complexity just for the sake of following some abstract API, when it doesn't
61 * add any real benefit to the driver. I understand that this decision maybe
62 * controversial, but I really tried to make code API-correct and efficient
63 * at the same time and didn't come up with code I liked :(. --ebs
66 #define DRV_NAME "emac"
67 #define DRV_VERSION "3.54"
68 #define DRV_DESC "PPC 4xx OCP EMAC driver"
70 MODULE_DESCRIPTION(DRV_DESC
);
72 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
73 MODULE_LICENSE("GPL");
76 * PPC64 doesn't (yet) have a cacheable_memcpy
79 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
85 /* If packet size is less than this number, we allocate small skb and copy packet
86 * contents into it instead of just sending original big skb up
88 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91 * to avoid re-using the same PHY ID in cases where the arch didn't
92 * setup precise phy_map entries
94 * XXX This is something that needs to be reworked as we can have multiple
95 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96 * probably require in that case to have explicit PHY IDs in the device-tree
98 static u32 busy_phy_map
;
99 static DEFINE_MUTEX(emac_phy_map_lock
);
101 /* This is the wait queue used to wait on any event related to probe, that
102 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait
);
106 /* Having stable interface names is a doomed idea. However, it would be nice
107 * if we didn't have completely random interface names at boot too :-) It's
108 * just a matter of making everybody's life easier. Since we are doing
109 * threaded probing, it's a bit harder though. The base idea here is that
110 * we make up a list of all emacs in the device-tree before we register the
111 * driver. Every emac will then wait for the previous one in the list to
112 * initialize before itself. We should also keep that list ordered by
114 * That list is only 4 entries long, meaning that additional EMACs don't
115 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118 #define EMAC_BOOT_LIST_SIZE 4
119 static struct device_node
*emac_boot_list
[EMAC_BOOT_LIST_SIZE
];
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
124 /* I don't want to litter system log with timeout errors
125 * when we have brain-damaged PHY.
127 static inline void emac_report_timeout_error(struct emac_instance
*dev
,
131 printk(KERN_ERR
"%s: %s\n", dev
->ndev
->name
, error
);
134 /* PHY polling intervals */
135 #define PHY_POLL_LINK_ON HZ
136 #define PHY_POLL_LINK_OFF (HZ / 5)
138 /* Graceful stop timeouts in us.
139 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
141 #define STOP_TIMEOUT_10 1230
142 #define STOP_TIMEOUT_100 124
143 #define STOP_TIMEOUT_1000 13
144 #define STOP_TIMEOUT_1000_JUMBO 73
146 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
147 static const char emac_stats_keys
[EMAC_ETHTOOL_STATS_COUNT
][ETH_GSTRING_LEN
] = {
148 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
149 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
150 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
151 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
152 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
153 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
154 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
155 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
156 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
157 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
158 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
159 "tx_bd_excessive_collisions", "tx_bd_late_collision",
160 "tx_bd_multple_collisions", "tx_bd_single_collision",
161 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
165 static irqreturn_t
emac_irq(int irq
, void *dev_instance
);
166 static void emac_clean_tx_ring(struct emac_instance
*dev
);
167 static void __emac_set_multicast_list(struct emac_instance
*dev
);
169 static inline int emac_phy_supports_gige(int phy_mode
)
171 return phy_mode
== PHY_MODE_GMII
||
172 phy_mode
== PHY_MODE_RGMII
||
173 phy_mode
== PHY_MODE_TBI
||
174 phy_mode
== PHY_MODE_RTBI
;
177 static inline int emac_phy_gpcs(int phy_mode
)
179 return phy_mode
== PHY_MODE_TBI
||
180 phy_mode
== PHY_MODE_RTBI
;
183 static inline void emac_tx_enable(struct emac_instance
*dev
)
185 struct emac_regs __iomem
*p
= dev
->emacp
;
188 DBG(dev
, "tx_enable" NL
);
190 r
= in_be32(&p
->mr0
);
191 if (!(r
& EMAC_MR0_TXE
))
192 out_be32(&p
->mr0
, r
| EMAC_MR0_TXE
);
195 static void emac_tx_disable(struct emac_instance
*dev
)
197 struct emac_regs __iomem
*p
= dev
->emacp
;
200 DBG(dev
, "tx_disable" NL
);
202 r
= in_be32(&p
->mr0
);
203 if (r
& EMAC_MR0_TXE
) {
204 int n
= dev
->stop_timeout
;
205 out_be32(&p
->mr0
, r
& ~EMAC_MR0_TXE
);
206 while (!(in_be32(&p
->mr0
) & EMAC_MR0_TXI
) && n
) {
211 emac_report_timeout_error(dev
, "TX disable timeout");
215 static void emac_rx_enable(struct emac_instance
*dev
)
217 struct emac_regs __iomem
*p
= dev
->emacp
;
220 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
)))
223 DBG(dev
, "rx_enable" NL
);
225 r
= in_be32(&p
->mr0
);
226 if (!(r
& EMAC_MR0_RXE
)) {
227 if (unlikely(!(r
& EMAC_MR0_RXI
))) {
228 /* Wait if previous async disable is still in progress */
229 int n
= dev
->stop_timeout
;
230 while (!(r
= in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
235 emac_report_timeout_error(dev
,
236 "RX disable timeout");
238 out_be32(&p
->mr0
, r
| EMAC_MR0_RXE
);
244 static void emac_rx_disable(struct emac_instance
*dev
)
246 struct emac_regs __iomem
*p
= dev
->emacp
;
249 DBG(dev
, "rx_disable" NL
);
251 r
= in_be32(&p
->mr0
);
252 if (r
& EMAC_MR0_RXE
) {
253 int n
= dev
->stop_timeout
;
254 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
255 while (!(in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
260 emac_report_timeout_error(dev
, "RX disable timeout");
264 static inline void emac_netif_stop(struct emac_instance
*dev
)
266 netif_tx_lock_bh(dev
->ndev
);
268 netif_tx_unlock_bh(dev
->ndev
);
269 dev
->ndev
->trans_start
= jiffies
; /* prevent tx timeout */
270 mal_poll_disable(dev
->mal
, &dev
->commac
);
271 netif_tx_disable(dev
->ndev
);
274 static inline void emac_netif_start(struct emac_instance
*dev
)
276 netif_tx_lock_bh(dev
->ndev
);
278 if (dev
->mcast_pending
&& netif_running(dev
->ndev
))
279 __emac_set_multicast_list(dev
);
280 netif_tx_unlock_bh(dev
->ndev
);
282 netif_wake_queue(dev
->ndev
);
284 /* NOTE: unconditional netif_wake_queue is only appropriate
285 * so long as all callers are assured to have free tx slots
286 * (taken from tg3... though the case where that is wrong is
287 * not terribly harmful)
289 mal_poll_enable(dev
->mal
, &dev
->commac
);
292 static inline void emac_rx_disable_async(struct emac_instance
*dev
)
294 struct emac_regs __iomem
*p
= dev
->emacp
;
297 DBG(dev
, "rx_disable_async" NL
);
299 r
= in_be32(&p
->mr0
);
300 if (r
& EMAC_MR0_RXE
)
301 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
304 static int emac_reset(struct emac_instance
*dev
)
306 struct emac_regs __iomem
*p
= dev
->emacp
;
309 DBG(dev
, "reset" NL
);
311 if (!dev
->reset_failed
) {
312 /* 40x erratum suggests stopping RX channel before reset,
315 emac_rx_disable(dev
);
316 emac_tx_disable(dev
);
319 out_be32(&p
->mr0
, EMAC_MR0_SRST
);
320 while ((in_be32(&p
->mr0
) & EMAC_MR0_SRST
) && n
)
324 dev
->reset_failed
= 0;
327 emac_report_timeout_error(dev
, "reset timeout");
328 dev
->reset_failed
= 1;
333 static void emac_hash_mc(struct emac_instance
*dev
)
335 struct emac_regs __iomem
*p
= dev
->emacp
;
337 struct dev_mc_list
*dmi
;
339 DBG(dev
, "hash_mc %d" NL
, dev
->ndev
->mc_count
);
341 for (dmi
= dev
->ndev
->mc_list
; dmi
; dmi
= dmi
->next
) {
343 DBG2(dev
, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL
,
344 dmi
->dmi_addr
[0], dmi
->dmi_addr
[1], dmi
->dmi_addr
[2],
345 dmi
->dmi_addr
[3], dmi
->dmi_addr
[4], dmi
->dmi_addr
[5]);
347 bit
= 63 - (ether_crc(ETH_ALEN
, dmi
->dmi_addr
) >> 26);
348 gaht
[bit
>> 4] |= 0x8000 >> (bit
& 0x0f);
350 out_be32(&p
->gaht1
, gaht
[0]);
351 out_be32(&p
->gaht2
, gaht
[1]);
352 out_be32(&p
->gaht3
, gaht
[2]);
353 out_be32(&p
->gaht4
, gaht
[3]);
356 static inline u32
emac_iff2rmr(struct net_device
*ndev
)
358 struct emac_instance
*dev
= netdev_priv(ndev
);
361 r
= EMAC_RMR_SP
| EMAC_RMR_SFCS
| EMAC_RMR_IAE
| EMAC_RMR_BAE
;
363 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
368 if (ndev
->flags
& IFF_PROMISC
)
370 else if (ndev
->flags
& IFF_ALLMULTI
|| ndev
->mc_count
> 32)
372 else if (ndev
->mc_count
> 0)
378 static u32
__emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
380 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC_MR1_TR0_MULT
;
382 DBG2(dev
, "__emac_calc_base_mr1" NL
);
386 ret
|= EMAC_MR1_TFS_2K
;
389 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
390 dev
->ndev
->name
, tx_size
);
395 ret
|= EMAC_MR1_RFS_16K
;
398 ret
|= EMAC_MR1_RFS_4K
;
401 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
402 dev
->ndev
->name
, rx_size
);
408 static u32
__emac4_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
410 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC4_MR1_TR
|
411 EMAC4_MR1_OBCI(dev
->opb_bus_freq
/ 1000000);
413 DBG2(dev
, "__emac4_calc_base_mr1" NL
);
417 ret
|= EMAC4_MR1_TFS_4K
;
420 ret
|= EMAC4_MR1_TFS_2K
;
423 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
424 dev
->ndev
->name
, tx_size
);
429 ret
|= EMAC4_MR1_RFS_16K
;
432 ret
|= EMAC4_MR1_RFS_4K
;
435 ret
|= EMAC4_MR1_RFS_2K
;
438 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
439 dev
->ndev
->name
, rx_size
);
445 static u32
emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
447 return emac_has_feature(dev
, EMAC_FTR_EMAC4
) ?
448 __emac4_calc_base_mr1(dev
, tx_size
, rx_size
) :
449 __emac_calc_base_mr1(dev
, tx_size
, rx_size
);
452 static inline u32
emac_calc_trtr(struct emac_instance
*dev
, unsigned int size
)
454 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
455 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4
;
457 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT
;
460 static inline u32
emac_calc_rwmr(struct emac_instance
*dev
,
461 unsigned int low
, unsigned int high
)
463 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
464 return (low
<< 22) | ( (high
& 0x3ff) << 6);
466 return (low
<< 23) | ( (high
& 0x1ff) << 7);
469 static int emac_configure(struct emac_instance
*dev
)
471 struct emac_regs __iomem
*p
= dev
->emacp
;
472 struct net_device
*ndev
= dev
->ndev
;
473 int tx_size
, rx_size
, link
= netif_carrier_ok(dev
->ndev
);
476 DBG(dev
, "configure" NL
);
479 out_be32(&p
->mr1
, in_be32(&p
->mr1
)
480 | EMAC_MR1_FDE
| EMAC_MR1_ILE
);
482 } else if (emac_reset(dev
) < 0)
485 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
486 tah_reset(dev
->tah_dev
);
488 DBG(dev
, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
489 link
, dev
->phy
.duplex
, dev
->phy
.pause
, dev
->phy
.asym_pause
);
491 /* Default fifo sizes */
492 tx_size
= dev
->tx_fifo_size
;
493 rx_size
= dev
->rx_fifo_size
;
495 /* No link, force loopback */
497 mr1
= EMAC_MR1_FDE
| EMAC_MR1_ILE
;
499 /* Check for full duplex */
500 else if (dev
->phy
.duplex
== DUPLEX_FULL
)
501 mr1
|= EMAC_MR1_FDE
| EMAC_MR1_MWSW_001
;
503 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
504 dev
->stop_timeout
= STOP_TIMEOUT_10
;
505 switch (dev
->phy
.speed
) {
507 if (emac_phy_gpcs(dev
->phy
.mode
)) {
508 mr1
|= EMAC_MR1_MF_1000GPCS
|
509 EMAC_MR1_MF_IPPA(dev
->phy
.address
);
511 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
512 * identify this GPCS PHY later.
514 out_be32(&p
->ipcr
, 0xdeadbeef);
516 mr1
|= EMAC_MR1_MF_1000
;
518 /* Extended fifo sizes */
519 tx_size
= dev
->tx_fifo_size_gige
;
520 rx_size
= dev
->rx_fifo_size_gige
;
522 if (dev
->ndev
->mtu
> ETH_DATA_LEN
) {
523 mr1
|= EMAC_MR1_JPSM
;
524 dev
->stop_timeout
= STOP_TIMEOUT_1000_JUMBO
;
526 dev
->stop_timeout
= STOP_TIMEOUT_1000
;
529 mr1
|= EMAC_MR1_MF_100
;
530 dev
->stop_timeout
= STOP_TIMEOUT_100
;
532 default: /* make gcc happy */
536 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
537 rgmii_set_speed(dev
->rgmii_dev
, dev
->rgmii_port
,
539 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
540 zmii_set_speed(dev
->zmii_dev
, dev
->zmii_port
, dev
->phy
.speed
);
542 /* on 40x erratum forces us to NOT use integrated flow control,
543 * let's hope it works on 44x ;)
545 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
) &&
546 dev
->phy
.duplex
== DUPLEX_FULL
) {
548 mr1
|= EMAC_MR1_EIFC
| EMAC_MR1_APP
;
549 else if (dev
->phy
.asym_pause
)
553 /* Add base settings & fifo sizes & program MR1 */
554 mr1
|= emac_calc_base_mr1(dev
, tx_size
, rx_size
);
555 out_be32(&p
->mr1
, mr1
);
557 /* Set individual MAC address */
558 out_be32(&p
->iahr
, (ndev
->dev_addr
[0] << 8) | ndev
->dev_addr
[1]);
559 out_be32(&p
->ialr
, (ndev
->dev_addr
[2] << 24) |
560 (ndev
->dev_addr
[3] << 16) | (ndev
->dev_addr
[4] << 8) |
563 /* VLAN Tag Protocol ID */
564 out_be32(&p
->vtpid
, 0x8100);
566 /* Receive mode register */
567 r
= emac_iff2rmr(ndev
);
568 if (r
& EMAC_RMR_MAE
)
570 out_be32(&p
->rmr
, r
);
572 /* FIFOs thresholds */
573 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
574 r
= EMAC4_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
575 tx_size
/ 2 / dev
->fifo_entry_size
);
577 r
= EMAC_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
578 tx_size
/ 2 / dev
->fifo_entry_size
);
579 out_be32(&p
->tmr1
, r
);
580 out_be32(&p
->trtr
, emac_calc_trtr(dev
, tx_size
/ 2));
582 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
583 there should be still enough space in FIFO to allow the our link
584 partner time to process this frame and also time to send PAUSE
587 Here is the worst case scenario for the RX FIFO "headroom"
588 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
590 1) One maximum-length frame on TX 1522 bytes
591 2) One PAUSE frame time 64 bytes
592 3) PAUSE frame decode time allowance 64 bytes
593 4) One maximum-length frame on RX 1522 bytes
594 5) Round-trip propagation delay of the link (100Mb) 15 bytes
598 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
599 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
601 r
= emac_calc_rwmr(dev
, rx_size
/ 8 / dev
->fifo_entry_size
,
602 rx_size
/ 4 / dev
->fifo_entry_size
);
603 out_be32(&p
->rwmr
, r
);
605 /* Set PAUSE timer to the maximum */
606 out_be32(&p
->ptr
, 0xffff);
609 r
= EMAC_ISR_OVR
| EMAC_ISR_BP
| EMAC_ISR_SE
|
610 EMAC_ISR_ALE
| EMAC_ISR_BFCS
| EMAC_ISR_PTLE
| EMAC_ISR_ORE
|
611 EMAC_ISR_IRE
| EMAC_ISR_TE
;
612 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
613 r
|= EMAC4_ISR_TXPE
| EMAC4_ISR_RXPE
/* | EMAC4_ISR_TXUE |
615 out_be32(&p
->iser
, r
);
617 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
618 if (emac_phy_gpcs(dev
->phy
.mode
))
619 emac_mii_reset_phy(&dev
->phy
);
624 static void emac_reinitialize(struct emac_instance
*dev
)
626 DBG(dev
, "reinitialize" NL
);
628 emac_netif_stop(dev
);
629 if (!emac_configure(dev
)) {
633 emac_netif_start(dev
);
636 static void emac_full_tx_reset(struct emac_instance
*dev
)
638 DBG(dev
, "full_tx_reset" NL
);
640 emac_tx_disable(dev
);
641 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
642 emac_clean_tx_ring(dev
);
643 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= 0;
647 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
652 static void emac_reset_work(struct work_struct
*work
)
654 struct emac_instance
*dev
= container_of(work
, struct emac_instance
, reset_work
);
656 DBG(dev
, "reset_work" NL
);
658 mutex_lock(&dev
->link_lock
);
660 emac_netif_stop(dev
);
661 emac_full_tx_reset(dev
);
662 emac_netif_start(dev
);
664 mutex_unlock(&dev
->link_lock
);
667 static void emac_tx_timeout(struct net_device
*ndev
)
669 struct emac_instance
*dev
= netdev_priv(ndev
);
671 DBG(dev
, "tx_timeout" NL
);
673 schedule_work(&dev
->reset_work
);
677 static inline int emac_phy_done(struct emac_instance
*dev
, u32 stacr
)
679 int done
= !!(stacr
& EMAC_STACR_OC
);
681 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
687 static int __emac_mdio_read(struct emac_instance
*dev
, u8 id
, u8 reg
)
689 struct emac_regs __iomem
*p
= dev
->emacp
;
691 int n
, err
= -ETIMEDOUT
;
693 mutex_lock(&dev
->mdio_lock
);
695 DBG2(dev
, "mdio_read(%02x,%02x)" NL
, id
, reg
);
697 /* Enable proper MDIO port */
698 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
699 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
700 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
701 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
703 /* Wait for management interface to become idle */
705 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
708 DBG2(dev
, " -> timeout wait idle\n");
713 /* Issue read command */
714 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
715 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
717 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
718 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
720 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
721 r
|= EMACX_STACR_STAC_READ
;
723 r
|= EMAC_STACR_STAC_READ
;
724 r
|= (reg
& EMAC_STACR_PRA_MASK
)
725 | ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
);
726 out_be32(&p
->stacr
, r
);
728 /* Wait for read to complete */
730 while (!emac_phy_done(dev
, (r
= in_be32(&p
->stacr
)))) {
733 DBG2(dev
, " -> timeout wait complete\n");
738 if (unlikely(r
& EMAC_STACR_PHYE
)) {
739 DBG(dev
, "mdio_read(%02x, %02x) failed" NL
, id
, reg
);
744 r
= ((r
>> EMAC_STACR_PHYD_SHIFT
) & EMAC_STACR_PHYD_MASK
);
746 DBG2(dev
, "mdio_read -> %04x" NL
, r
);
749 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
750 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
751 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
752 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
753 mutex_unlock(&dev
->mdio_lock
);
755 return err
== 0 ? r
: err
;
758 static void __emac_mdio_write(struct emac_instance
*dev
, u8 id
, u8 reg
,
761 struct emac_regs __iomem
*p
= dev
->emacp
;
763 int n
, err
= -ETIMEDOUT
;
765 mutex_lock(&dev
->mdio_lock
);
767 DBG2(dev
, "mdio_write(%02x,%02x,%04x)" NL
, id
, reg
, val
);
769 /* Enable proper MDIO port */
770 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
771 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
772 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
773 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
775 /* Wait for management interface to be idle */
777 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
780 DBG2(dev
, " -> timeout wait idle\n");
785 /* Issue write command */
786 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
787 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
789 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
790 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
792 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
793 r
|= EMACX_STACR_STAC_WRITE
;
795 r
|= EMAC_STACR_STAC_WRITE
;
796 r
|= (reg
& EMAC_STACR_PRA_MASK
) |
797 ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
) |
798 (val
<< EMAC_STACR_PHYD_SHIFT
);
799 out_be32(&p
->stacr
, r
);
801 /* Wait for write to complete */
803 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
806 DBG2(dev
, " -> timeout wait complete\n");
812 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
813 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
814 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
815 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
816 mutex_unlock(&dev
->mdio_lock
);
819 static int emac_mdio_read(struct net_device
*ndev
, int id
, int reg
)
821 struct emac_instance
*dev
= netdev_priv(ndev
);
824 res
= __emac_mdio_read(dev
->mdio_instance
? dev
->mdio_instance
: dev
,
829 static void emac_mdio_write(struct net_device
*ndev
, int id
, int reg
, int val
)
831 struct emac_instance
*dev
= netdev_priv(ndev
);
833 __emac_mdio_write(dev
->mdio_instance
? dev
->mdio_instance
: dev
,
834 (u8
) id
, (u8
) reg
, (u16
) val
);
838 static void __emac_set_multicast_list(struct emac_instance
*dev
)
840 struct emac_regs __iomem
*p
= dev
->emacp
;
841 u32 rmr
= emac_iff2rmr(dev
->ndev
);
843 DBG(dev
, "__multicast %08x" NL
, rmr
);
845 /* I decided to relax register access rules here to avoid
848 * There is a real problem with EMAC4 core if we use MWSW_001 bit
849 * in MR1 register and do a full EMAC reset.
850 * One TX BD status update is delayed and, after EMAC reset, it
851 * never happens, resulting in TX hung (it'll be recovered by TX
852 * timeout handler eventually, but this is just gross).
853 * So we either have to do full TX reset or try to cheat here :)
855 * The only required change is to RX mode register, so I *think* all
856 * we need is just to stop RX channel. This seems to work on all
859 * If we need the full reset, we might just trigger the workqueue
860 * and do it async... a bit nasty but should work --BenH
862 dev
->mcast_pending
= 0;
863 emac_rx_disable(dev
);
864 if (rmr
& EMAC_RMR_MAE
)
866 out_be32(&p
->rmr
, rmr
);
871 static void emac_set_multicast_list(struct net_device
*ndev
)
873 struct emac_instance
*dev
= netdev_priv(ndev
);
875 DBG(dev
, "multicast" NL
);
877 BUG_ON(!netif_running(dev
->ndev
));
880 dev
->mcast_pending
= 1;
883 __emac_set_multicast_list(dev
);
886 static int emac_resize_rx_ring(struct emac_instance
*dev
, int new_mtu
)
888 int rx_sync_size
= emac_rx_sync_size(new_mtu
);
889 int rx_skb_size
= emac_rx_skb_size(new_mtu
);
892 mutex_lock(&dev
->link_lock
);
893 emac_netif_stop(dev
);
894 emac_rx_disable(dev
);
895 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
897 if (dev
->rx_sg_skb
) {
898 ++dev
->estats
.rx_dropped_resize
;
899 dev_kfree_skb(dev
->rx_sg_skb
);
900 dev
->rx_sg_skb
= NULL
;
903 /* Make a first pass over RX ring and mark BDs ready, dropping
904 * non-processed packets on the way. We need this as a separate pass
905 * to simplify error recovery in the case of allocation failure later.
907 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
908 if (dev
->rx_desc
[i
].ctrl
& MAL_RX_CTRL_FIRST
)
909 ++dev
->estats
.rx_dropped_resize
;
911 dev
->rx_desc
[i
].data_len
= 0;
912 dev
->rx_desc
[i
].ctrl
= MAL_RX_CTRL_EMPTY
|
913 (i
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
916 /* Reallocate RX ring only if bigger skb buffers are required */
917 if (rx_skb_size
<= dev
->rx_skb_size
)
920 /* Second pass, allocate new skbs */
921 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
922 struct sk_buff
*skb
= alloc_skb(rx_skb_size
, GFP_ATOMIC
);
928 BUG_ON(!dev
->rx_skb
[i
]);
929 dev_kfree_skb(dev
->rx_skb
[i
]);
931 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
932 dev
->rx_desc
[i
].data_ptr
=
933 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, rx_sync_size
,
934 DMA_FROM_DEVICE
) + 2;
935 dev
->rx_skb
[i
] = skb
;
938 /* Check if we need to change "Jumbo" bit in MR1 */
939 if ((new_mtu
> ETH_DATA_LEN
) ^ (dev
->ndev
->mtu
> ETH_DATA_LEN
)) {
940 /* This is to prevent starting RX channel in emac_rx_enable() */
941 set_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
943 dev
->ndev
->mtu
= new_mtu
;
944 emac_full_tx_reset(dev
);
947 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(new_mtu
));
950 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
952 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
954 emac_netif_start(dev
);
955 mutex_unlock(&dev
->link_lock
);
960 /* Process ctx, rtnl_lock semaphore */
961 static int emac_change_mtu(struct net_device
*ndev
, int new_mtu
)
963 struct emac_instance
*dev
= netdev_priv(ndev
);
966 if (new_mtu
< EMAC_MIN_MTU
|| new_mtu
> dev
->max_mtu
)
969 DBG(dev
, "change_mtu(%d)" NL
, new_mtu
);
971 if (netif_running(ndev
)) {
972 /* Check if we really need to reinitalize RX ring */
973 if (emac_rx_skb_size(ndev
->mtu
) != emac_rx_skb_size(new_mtu
))
974 ret
= emac_resize_rx_ring(dev
, new_mtu
);
979 dev
->rx_skb_size
= emac_rx_skb_size(new_mtu
);
980 dev
->rx_sync_size
= emac_rx_sync_size(new_mtu
);
986 static void emac_clean_tx_ring(struct emac_instance
*dev
)
990 for (i
= 0; i
< NUM_TX_BUFF
; ++i
) {
991 if (dev
->tx_skb
[i
]) {
992 dev_kfree_skb(dev
->tx_skb
[i
]);
993 dev
->tx_skb
[i
] = NULL
;
994 if (dev
->tx_desc
[i
].ctrl
& MAL_TX_CTRL_READY
)
995 ++dev
->estats
.tx_dropped
;
997 dev
->tx_desc
[i
].ctrl
= 0;
998 dev
->tx_desc
[i
].data_ptr
= 0;
1002 static void emac_clean_rx_ring(struct emac_instance
*dev
)
1006 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1007 if (dev
->rx_skb
[i
]) {
1008 dev
->rx_desc
[i
].ctrl
= 0;
1009 dev_kfree_skb(dev
->rx_skb
[i
]);
1010 dev
->rx_skb
[i
] = NULL
;
1011 dev
->rx_desc
[i
].data_ptr
= 0;
1014 if (dev
->rx_sg_skb
) {
1015 dev_kfree_skb(dev
->rx_sg_skb
);
1016 dev
->rx_sg_skb
= NULL
;
1020 static inline int emac_alloc_rx_skb(struct emac_instance
*dev
, int slot
,
1023 struct sk_buff
*skb
= alloc_skb(dev
->rx_skb_size
, flags
);
1027 dev
->rx_skb
[slot
] = skb
;
1028 dev
->rx_desc
[slot
].data_len
= 0;
1030 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1031 dev
->rx_desc
[slot
].data_ptr
=
1032 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, dev
->rx_sync_size
,
1033 DMA_FROM_DEVICE
) + 2;
1035 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1036 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1041 static void emac_print_link_status(struct emac_instance
*dev
)
1043 if (netif_carrier_ok(dev
->ndev
))
1044 printk(KERN_INFO
"%s: link is up, %d %s%s\n",
1045 dev
->ndev
->name
, dev
->phy
.speed
,
1046 dev
->phy
.duplex
== DUPLEX_FULL
? "FDX" : "HDX",
1047 dev
->phy
.pause
? ", pause enabled" :
1048 dev
->phy
.asym_pause
? ", asymmetric pause enabled" : "");
1050 printk(KERN_INFO
"%s: link is down\n", dev
->ndev
->name
);
1053 /* Process ctx, rtnl_lock semaphore */
1054 static int emac_open(struct net_device
*ndev
)
1056 struct emac_instance
*dev
= netdev_priv(ndev
);
1059 DBG(dev
, "open" NL
);
1061 /* Setup error IRQ handler */
1062 err
= request_irq(dev
->emac_irq
, emac_irq
, 0, "EMAC", dev
);
1064 printk(KERN_ERR
"%s: failed to request IRQ %d\n",
1065 ndev
->name
, dev
->emac_irq
);
1069 /* Allocate RX ring */
1070 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1071 if (emac_alloc_rx_skb(dev
, i
, GFP_KERNEL
)) {
1072 printk(KERN_ERR
"%s: failed to allocate RX ring\n",
1077 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= dev
->rx_slot
= 0;
1078 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1079 dev
->rx_sg_skb
= NULL
;
1081 mutex_lock(&dev
->link_lock
);
1084 /* Start PHY polling now.
1086 if (dev
->phy
.address
>= 0) {
1087 int link_poll_interval
;
1088 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1089 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1090 netif_carrier_on(dev
->ndev
);
1091 link_poll_interval
= PHY_POLL_LINK_ON
;
1093 netif_carrier_off(dev
->ndev
);
1094 link_poll_interval
= PHY_POLL_LINK_OFF
;
1096 dev
->link_polling
= 1;
1098 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1099 emac_print_link_status(dev
);
1101 netif_carrier_on(dev
->ndev
);
1103 emac_configure(dev
);
1104 mal_poll_add(dev
->mal
, &dev
->commac
);
1105 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1106 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(ndev
->mtu
));
1107 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1108 emac_tx_enable(dev
);
1109 emac_rx_enable(dev
);
1110 emac_netif_start(dev
);
1112 mutex_unlock(&dev
->link_lock
);
1116 emac_clean_rx_ring(dev
);
1117 free_irq(dev
->emac_irq
, dev
);
1124 static int emac_link_differs(struct emac_instance
*dev
)
1126 u32 r
= in_be32(&dev
->emacp
->mr1
);
1128 int duplex
= r
& EMAC_MR1_FDE
? DUPLEX_FULL
: DUPLEX_HALF
;
1129 int speed
, pause
, asym_pause
;
1131 if (r
& EMAC_MR1_MF_1000
)
1133 else if (r
& EMAC_MR1_MF_100
)
1138 switch (r
& (EMAC_MR1_EIFC
| EMAC_MR1_APP
)) {
1139 case (EMAC_MR1_EIFC
| EMAC_MR1_APP
):
1148 pause
= asym_pause
= 0;
1150 return speed
!= dev
->phy
.speed
|| duplex
!= dev
->phy
.duplex
||
1151 pause
!= dev
->phy
.pause
|| asym_pause
!= dev
->phy
.asym_pause
;
1155 static void emac_link_timer(struct work_struct
*work
)
1157 struct emac_instance
*dev
=
1158 container_of((struct delayed_work
*)work
,
1159 struct emac_instance
, link_work
);
1160 int link_poll_interval
;
1162 mutex_lock(&dev
->link_lock
);
1163 DBG2(dev
, "link timer" NL
);
1168 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1169 if (!netif_carrier_ok(dev
->ndev
)) {
1170 /* Get new link parameters */
1171 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1173 netif_carrier_on(dev
->ndev
);
1174 emac_netif_stop(dev
);
1175 emac_full_tx_reset(dev
);
1176 emac_netif_start(dev
);
1177 emac_print_link_status(dev
);
1179 link_poll_interval
= PHY_POLL_LINK_ON
;
1181 if (netif_carrier_ok(dev
->ndev
)) {
1182 netif_carrier_off(dev
->ndev
);
1183 netif_tx_disable(dev
->ndev
);
1184 emac_reinitialize(dev
);
1185 emac_print_link_status(dev
);
1187 link_poll_interval
= PHY_POLL_LINK_OFF
;
1189 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1191 mutex_unlock(&dev
->link_lock
);
1194 static void emac_force_link_update(struct emac_instance
*dev
)
1196 netif_carrier_off(dev
->ndev
);
1198 if (dev
->link_polling
) {
1199 cancel_rearming_delayed_work(&dev
->link_work
);
1200 if (dev
->link_polling
)
1201 schedule_delayed_work(&dev
->link_work
, PHY_POLL_LINK_OFF
);
1205 /* Process ctx, rtnl_lock semaphore */
1206 static int emac_close(struct net_device
*ndev
)
1208 struct emac_instance
*dev
= netdev_priv(ndev
);
1210 DBG(dev
, "close" NL
);
1212 if (dev
->phy
.address
>= 0) {
1213 dev
->link_polling
= 0;
1214 cancel_rearming_delayed_work(&dev
->link_work
);
1216 mutex_lock(&dev
->link_lock
);
1217 emac_netif_stop(dev
);
1219 mutex_unlock(&dev
->link_lock
);
1221 emac_rx_disable(dev
);
1222 emac_tx_disable(dev
);
1223 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1224 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1225 mal_poll_del(dev
->mal
, &dev
->commac
);
1227 emac_clean_tx_ring(dev
);
1228 emac_clean_rx_ring(dev
);
1230 free_irq(dev
->emac_irq
, dev
);
1235 static inline u16
emac_tx_csum(struct emac_instance
*dev
,
1236 struct sk_buff
*skb
)
1238 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
&&
1239 skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1240 ++dev
->stats
.tx_packets_csum
;
1241 return EMAC_TX_CTRL_TAH_CSUM
;
1246 static inline int emac_xmit_finish(struct emac_instance
*dev
, int len
)
1248 struct emac_regs __iomem
*p
= dev
->emacp
;
1249 struct net_device
*ndev
= dev
->ndev
;
1251 /* Send the packet out. If the if makes a significant perf
1252 * difference, then we can store the TMR0 value in "dev"
1255 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
1256 out_be32(&p
->tmr0
, EMAC4_TMR0_XMIT
);
1258 out_be32(&p
->tmr0
, EMAC_TMR0_XMIT
);
1260 if (unlikely(++dev
->tx_cnt
== NUM_TX_BUFF
)) {
1261 netif_stop_queue(ndev
);
1262 DBG2(dev
, "stopped TX queue" NL
);
1265 ndev
->trans_start
= jiffies
;
1266 ++dev
->stats
.tx_packets
;
1267 dev
->stats
.tx_bytes
+= len
;
1273 static int emac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1275 struct emac_instance
*dev
= netdev_priv(ndev
);
1276 unsigned int len
= skb
->len
;
1279 u16 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1280 MAL_TX_CTRL_LAST
| emac_tx_csum(dev
, skb
);
1282 slot
= dev
->tx_slot
++;
1283 if (dev
->tx_slot
== NUM_TX_BUFF
) {
1285 ctrl
|= MAL_TX_CTRL_WRAP
;
1288 DBG2(dev
, "xmit(%u) %d" NL
, len
, slot
);
1290 dev
->tx_skb
[slot
] = skb
;
1291 dev
->tx_desc
[slot
].data_ptr
= dma_map_single(&dev
->ofdev
->dev
,
1294 dev
->tx_desc
[slot
].data_len
= (u16
) len
;
1296 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1298 return emac_xmit_finish(dev
, len
);
1301 static inline int emac_xmit_split(struct emac_instance
*dev
, int slot
,
1302 u32 pd
, int len
, int last
, u16 base_ctrl
)
1305 u16 ctrl
= base_ctrl
;
1306 int chunk
= min(len
, MAL_MAX_TX_SIZE
);
1309 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1312 ctrl
|= MAL_TX_CTRL_LAST
;
1313 if (slot
== NUM_TX_BUFF
- 1)
1314 ctrl
|= MAL_TX_CTRL_WRAP
;
1316 dev
->tx_skb
[slot
] = NULL
;
1317 dev
->tx_desc
[slot
].data_ptr
= pd
;
1318 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1319 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1330 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1331 static int emac_start_xmit_sg(struct sk_buff
*skb
, struct net_device
*ndev
)
1333 struct emac_instance
*dev
= netdev_priv(ndev
);
1334 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1335 int len
= skb
->len
, chunk
;
1340 /* This is common "fast" path */
1341 if (likely(!nr_frags
&& len
<= MAL_MAX_TX_SIZE
))
1342 return emac_start_xmit(skb
, ndev
);
1344 len
-= skb
->data_len
;
1346 /* Note, this is only an *estimation*, we can still run out of empty
1347 * slots because of the additional fragmentation into
1348 * MAL_MAX_TX_SIZE-sized chunks
1350 if (unlikely(dev
->tx_cnt
+ nr_frags
+ mal_tx_chunks(len
) > NUM_TX_BUFF
))
1353 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1354 emac_tx_csum(dev
, skb
);
1355 slot
= dev
->tx_slot
;
1358 dev
->tx_skb
[slot
] = NULL
;
1359 chunk
= min(len
, MAL_MAX_TX_SIZE
);
1360 dev
->tx_desc
[slot
].data_ptr
= pd
=
1361 dma_map_single(&dev
->ofdev
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
1362 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1365 slot
= emac_xmit_split(dev
, slot
, pd
+ chunk
, len
, !nr_frags
,
1368 for (i
= 0; i
< nr_frags
; ++i
) {
1369 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
1372 if (unlikely(dev
->tx_cnt
+ mal_tx_chunks(len
) >= NUM_TX_BUFF
))
1375 pd
= dma_map_page(&dev
->ofdev
->dev
, frag
->page
, frag
->page_offset
, len
,
1378 slot
= emac_xmit_split(dev
, slot
, pd
, len
, i
== nr_frags
- 1,
1382 DBG2(dev
, "xmit_sg(%u) %d - %d" NL
, skb
->len
, dev
->tx_slot
, slot
);
1384 /* Attach skb to the last slot so we don't release it too early */
1385 dev
->tx_skb
[slot
] = skb
;
1387 /* Send the packet out */
1388 if (dev
->tx_slot
== NUM_TX_BUFF
- 1)
1389 ctrl
|= MAL_TX_CTRL_WRAP
;
1391 dev
->tx_desc
[dev
->tx_slot
].ctrl
= ctrl
;
1392 dev
->tx_slot
= (slot
+ 1) % NUM_TX_BUFF
;
1394 return emac_xmit_finish(dev
, skb
->len
);
1397 /* Well, too bad. Our previous estimation was overly optimistic.
1400 while (slot
!= dev
->tx_slot
) {
1401 dev
->tx_desc
[slot
].ctrl
= 0;
1404 slot
= NUM_TX_BUFF
- 1;
1406 ++dev
->estats
.tx_undo
;
1409 netif_stop_queue(ndev
);
1410 DBG2(dev
, "stopped TX queue" NL
);
1415 static void emac_parse_tx_error(struct emac_instance
*dev
, u16 ctrl
)
1417 struct emac_error_stats
*st
= &dev
->estats
;
1419 DBG(dev
, "BD TX error %04x" NL
, ctrl
);
1422 if (ctrl
& EMAC_TX_ST_BFCS
)
1423 ++st
->tx_bd_bad_fcs
;
1424 if (ctrl
& EMAC_TX_ST_LCS
)
1425 ++st
->tx_bd_carrier_loss
;
1426 if (ctrl
& EMAC_TX_ST_ED
)
1427 ++st
->tx_bd_excessive_deferral
;
1428 if (ctrl
& EMAC_TX_ST_EC
)
1429 ++st
->tx_bd_excessive_collisions
;
1430 if (ctrl
& EMAC_TX_ST_LC
)
1431 ++st
->tx_bd_late_collision
;
1432 if (ctrl
& EMAC_TX_ST_MC
)
1433 ++st
->tx_bd_multple_collisions
;
1434 if (ctrl
& EMAC_TX_ST_SC
)
1435 ++st
->tx_bd_single_collision
;
1436 if (ctrl
& EMAC_TX_ST_UR
)
1437 ++st
->tx_bd_underrun
;
1438 if (ctrl
& EMAC_TX_ST_SQE
)
1442 static void emac_poll_tx(void *param
)
1444 struct emac_instance
*dev
= param
;
1447 DBG2(dev
, "poll_tx, %d %d" NL
, dev
->tx_cnt
, dev
->ack_slot
);
1449 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
1450 bad_mask
= EMAC_IS_BAD_TX_TAH
;
1452 bad_mask
= EMAC_IS_BAD_TX
;
1454 netif_tx_lock_bh(dev
->ndev
);
1457 int slot
= dev
->ack_slot
, n
= 0;
1459 ctrl
= dev
->tx_desc
[slot
].ctrl
;
1460 if (!(ctrl
& MAL_TX_CTRL_READY
)) {
1461 struct sk_buff
*skb
= dev
->tx_skb
[slot
];
1466 dev
->tx_skb
[slot
] = NULL
;
1468 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1470 if (unlikely(ctrl
& bad_mask
))
1471 emac_parse_tx_error(dev
, ctrl
);
1477 dev
->ack_slot
= slot
;
1478 if (netif_queue_stopped(dev
->ndev
) &&
1479 dev
->tx_cnt
< EMAC_TX_WAKEUP_THRESH
)
1480 netif_wake_queue(dev
->ndev
);
1482 DBG2(dev
, "tx %d pkts" NL
, n
);
1485 netif_tx_unlock_bh(dev
->ndev
);
1488 static inline void emac_recycle_rx_skb(struct emac_instance
*dev
, int slot
,
1491 struct sk_buff
*skb
= dev
->rx_skb
[slot
];
1493 DBG2(dev
, "recycle %d %d" NL
, slot
, len
);
1496 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2,
1497 EMAC_DMA_ALIGN(len
+ 2), DMA_FROM_DEVICE
);
1499 dev
->rx_desc
[slot
].data_len
= 0;
1501 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1502 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1505 static void emac_parse_rx_error(struct emac_instance
*dev
, u16 ctrl
)
1507 struct emac_error_stats
*st
= &dev
->estats
;
1509 DBG(dev
, "BD RX error %04x" NL
, ctrl
);
1512 if (ctrl
& EMAC_RX_ST_OE
)
1513 ++st
->rx_bd_overrun
;
1514 if (ctrl
& EMAC_RX_ST_BP
)
1515 ++st
->rx_bd_bad_packet
;
1516 if (ctrl
& EMAC_RX_ST_RP
)
1517 ++st
->rx_bd_runt_packet
;
1518 if (ctrl
& EMAC_RX_ST_SE
)
1519 ++st
->rx_bd_short_event
;
1520 if (ctrl
& EMAC_RX_ST_AE
)
1521 ++st
->rx_bd_alignment_error
;
1522 if (ctrl
& EMAC_RX_ST_BFCS
)
1523 ++st
->rx_bd_bad_fcs
;
1524 if (ctrl
& EMAC_RX_ST_PTL
)
1525 ++st
->rx_bd_packet_too_long
;
1526 if (ctrl
& EMAC_RX_ST_ORE
)
1527 ++st
->rx_bd_out_of_range
;
1528 if (ctrl
& EMAC_RX_ST_IRE
)
1529 ++st
->rx_bd_in_range
;
1532 static inline void emac_rx_csum(struct emac_instance
*dev
,
1533 struct sk_buff
*skb
, u16 ctrl
)
1535 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1536 if (!ctrl
&& dev
->tah_dev
) {
1537 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1538 ++dev
->stats
.rx_packets_csum
;
1543 static inline int emac_rx_sg_append(struct emac_instance
*dev
, int slot
)
1545 if (likely(dev
->rx_sg_skb
!= NULL
)) {
1546 int len
= dev
->rx_desc
[slot
].data_len
;
1547 int tot_len
= dev
->rx_sg_skb
->len
+ len
;
1549 if (unlikely(tot_len
+ 2 > dev
->rx_skb_size
)) {
1550 ++dev
->estats
.rx_dropped_mtu
;
1551 dev_kfree_skb(dev
->rx_sg_skb
);
1552 dev
->rx_sg_skb
= NULL
;
1554 cacheable_memcpy(skb_tail_pointer(dev
->rx_sg_skb
),
1555 dev
->rx_skb
[slot
]->data
, len
);
1556 skb_put(dev
->rx_sg_skb
, len
);
1557 emac_recycle_rx_skb(dev
, slot
, len
);
1561 emac_recycle_rx_skb(dev
, slot
, 0);
1565 /* NAPI poll context */
1566 static int emac_poll_rx(void *param
, int budget
)
1568 struct emac_instance
*dev
= param
;
1569 int slot
= dev
->rx_slot
, received
= 0;
1571 DBG2(dev
, "poll_rx(%d)" NL
, budget
);
1574 while (budget
> 0) {
1576 struct sk_buff
*skb
;
1577 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1579 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1582 skb
= dev
->rx_skb
[slot
];
1584 len
= dev
->rx_desc
[slot
].data_len
;
1586 if (unlikely(!MAL_IS_SINGLE_RX(ctrl
)))
1589 ctrl
&= EMAC_BAD_RX_MASK
;
1590 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1591 emac_parse_rx_error(dev
, ctrl
);
1592 ++dev
->estats
.rx_dropped_error
;
1593 emac_recycle_rx_skb(dev
, slot
, 0);
1598 if (len
&& len
< EMAC_RX_COPY_THRESH
) {
1599 struct sk_buff
*copy_skb
=
1600 alloc_skb(len
+ EMAC_RX_SKB_HEADROOM
+ 2, GFP_ATOMIC
);
1601 if (unlikely(!copy_skb
))
1604 skb_reserve(copy_skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1605 cacheable_memcpy(copy_skb
->data
- 2, skb
->data
- 2,
1607 emac_recycle_rx_skb(dev
, slot
, len
);
1609 } else if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
)))
1614 skb
->dev
= dev
->ndev
;
1615 skb
->protocol
= eth_type_trans(skb
, dev
->ndev
);
1616 emac_rx_csum(dev
, skb
, ctrl
);
1618 if (unlikely(netif_receive_skb(skb
) == NET_RX_DROP
))
1619 ++dev
->estats
.rx_dropped_stack
;
1621 ++dev
->stats
.rx_packets
;
1623 dev
->stats
.rx_bytes
+= len
;
1624 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1629 if (ctrl
& MAL_RX_CTRL_FIRST
) {
1630 BUG_ON(dev
->rx_sg_skb
);
1631 if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
))) {
1632 DBG(dev
, "rx OOM %d" NL
, slot
);
1633 ++dev
->estats
.rx_dropped_oom
;
1634 emac_recycle_rx_skb(dev
, slot
, 0);
1636 dev
->rx_sg_skb
= skb
;
1639 } else if (!emac_rx_sg_append(dev
, slot
) &&
1640 (ctrl
& MAL_RX_CTRL_LAST
)) {
1642 skb
= dev
->rx_sg_skb
;
1643 dev
->rx_sg_skb
= NULL
;
1645 ctrl
&= EMAC_BAD_RX_MASK
;
1646 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1647 emac_parse_rx_error(dev
, ctrl
);
1648 ++dev
->estats
.rx_dropped_error
;
1656 DBG(dev
, "rx OOM %d" NL
, slot
);
1657 /* Drop the packet and recycle skb */
1658 ++dev
->estats
.rx_dropped_oom
;
1659 emac_recycle_rx_skb(dev
, slot
, 0);
1664 DBG2(dev
, "rx %d BDs" NL
, received
);
1665 dev
->rx_slot
= slot
;
1668 if (unlikely(budget
&& test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
))) {
1670 if (!(dev
->rx_desc
[slot
].ctrl
& MAL_RX_CTRL_EMPTY
)) {
1671 DBG2(dev
, "rx restart" NL
);
1676 if (dev
->rx_sg_skb
) {
1677 DBG2(dev
, "dropping partial rx packet" NL
);
1678 ++dev
->estats
.rx_dropped_error
;
1679 dev_kfree_skb(dev
->rx_sg_skb
);
1680 dev
->rx_sg_skb
= NULL
;
1683 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1684 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1685 emac_rx_enable(dev
);
1691 /* NAPI poll context */
1692 static int emac_peek_rx(void *param
)
1694 struct emac_instance
*dev
= param
;
1696 return !(dev
->rx_desc
[dev
->rx_slot
].ctrl
& MAL_RX_CTRL_EMPTY
);
1699 /* NAPI poll context */
1700 static int emac_peek_rx_sg(void *param
)
1702 struct emac_instance
*dev
= param
;
1704 int slot
= dev
->rx_slot
;
1706 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1707 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1709 else if (ctrl
& MAL_RX_CTRL_LAST
)
1712 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1714 /* I'm just being paranoid here :) */
1715 if (unlikely(slot
== dev
->rx_slot
))
1721 static void emac_rxde(void *param
)
1723 struct emac_instance
*dev
= param
;
1725 ++dev
->estats
.rx_stopped
;
1726 emac_rx_disable_async(dev
);
1730 static irqreturn_t
emac_irq(int irq
, void *dev_instance
)
1732 struct emac_instance
*dev
= dev_instance
;
1733 struct emac_regs __iomem
*p
= dev
->emacp
;
1734 struct emac_error_stats
*st
= &dev
->estats
;
1737 spin_lock(&dev
->lock
);
1739 isr
= in_be32(&p
->isr
);
1740 out_be32(&p
->isr
, isr
);
1742 DBG(dev
, "isr = %08x" NL
, isr
);
1744 if (isr
& EMAC4_ISR_TXPE
)
1746 if (isr
& EMAC4_ISR_RXPE
)
1748 if (isr
& EMAC4_ISR_TXUE
)
1750 if (isr
& EMAC4_ISR_RXOE
)
1751 ++st
->rx_fifo_overrun
;
1752 if (isr
& EMAC_ISR_OVR
)
1754 if (isr
& EMAC_ISR_BP
)
1755 ++st
->rx_bad_packet
;
1756 if (isr
& EMAC_ISR_RP
)
1757 ++st
->rx_runt_packet
;
1758 if (isr
& EMAC_ISR_SE
)
1759 ++st
->rx_short_event
;
1760 if (isr
& EMAC_ISR_ALE
)
1761 ++st
->rx_alignment_error
;
1762 if (isr
& EMAC_ISR_BFCS
)
1764 if (isr
& EMAC_ISR_PTLE
)
1765 ++st
->rx_packet_too_long
;
1766 if (isr
& EMAC_ISR_ORE
)
1767 ++st
->rx_out_of_range
;
1768 if (isr
& EMAC_ISR_IRE
)
1770 if (isr
& EMAC_ISR_SQE
)
1772 if (isr
& EMAC_ISR_TE
)
1775 spin_unlock(&dev
->lock
);
1780 static struct net_device_stats
*emac_stats(struct net_device
*ndev
)
1782 struct emac_instance
*dev
= netdev_priv(ndev
);
1783 struct emac_stats
*st
= &dev
->stats
;
1784 struct emac_error_stats
*est
= &dev
->estats
;
1785 struct net_device_stats
*nst
= &dev
->nstats
;
1786 unsigned long flags
;
1788 DBG2(dev
, "stats" NL
);
1790 /* Compute "legacy" statistics */
1791 spin_lock_irqsave(&dev
->lock
, flags
);
1792 nst
->rx_packets
= (unsigned long)st
->rx_packets
;
1793 nst
->rx_bytes
= (unsigned long)st
->rx_bytes
;
1794 nst
->tx_packets
= (unsigned long)st
->tx_packets
;
1795 nst
->tx_bytes
= (unsigned long)st
->tx_bytes
;
1796 nst
->rx_dropped
= (unsigned long)(est
->rx_dropped_oom
+
1797 est
->rx_dropped_error
+
1798 est
->rx_dropped_resize
+
1799 est
->rx_dropped_mtu
);
1800 nst
->tx_dropped
= (unsigned long)est
->tx_dropped
;
1802 nst
->rx_errors
= (unsigned long)est
->rx_bd_errors
;
1803 nst
->rx_fifo_errors
= (unsigned long)(est
->rx_bd_overrun
+
1804 est
->rx_fifo_overrun
+
1806 nst
->rx_frame_errors
= (unsigned long)(est
->rx_bd_alignment_error
+
1807 est
->rx_alignment_error
);
1808 nst
->rx_crc_errors
= (unsigned long)(est
->rx_bd_bad_fcs
+
1810 nst
->rx_length_errors
= (unsigned long)(est
->rx_bd_runt_packet
+
1811 est
->rx_bd_short_event
+
1812 est
->rx_bd_packet_too_long
+
1813 est
->rx_bd_out_of_range
+
1814 est
->rx_bd_in_range
+
1815 est
->rx_runt_packet
+
1816 est
->rx_short_event
+
1817 est
->rx_packet_too_long
+
1818 est
->rx_out_of_range
+
1821 nst
->tx_errors
= (unsigned long)(est
->tx_bd_errors
+ est
->tx_errors
);
1822 nst
->tx_fifo_errors
= (unsigned long)(est
->tx_bd_underrun
+
1824 nst
->tx_carrier_errors
= (unsigned long)est
->tx_bd_carrier_loss
;
1825 nst
->collisions
= (unsigned long)(est
->tx_bd_excessive_deferral
+
1826 est
->tx_bd_excessive_collisions
+
1827 est
->tx_bd_late_collision
+
1828 est
->tx_bd_multple_collisions
);
1829 spin_unlock_irqrestore(&dev
->lock
, flags
);
1833 static struct mal_commac_ops emac_commac_ops
= {
1834 .poll_tx
= &emac_poll_tx
,
1835 .poll_rx
= &emac_poll_rx
,
1836 .peek_rx
= &emac_peek_rx
,
1840 static struct mal_commac_ops emac_commac_sg_ops
= {
1841 .poll_tx
= &emac_poll_tx
,
1842 .poll_rx
= &emac_poll_rx
,
1843 .peek_rx
= &emac_peek_rx_sg
,
1847 /* Ethtool support */
1848 static int emac_ethtool_get_settings(struct net_device
*ndev
,
1849 struct ethtool_cmd
*cmd
)
1851 struct emac_instance
*dev
= netdev_priv(ndev
);
1853 cmd
->supported
= dev
->phy
.features
;
1854 cmd
->port
= PORT_MII
;
1855 cmd
->phy_address
= dev
->phy
.address
;
1857 dev
->phy
.address
>= 0 ? XCVR_EXTERNAL
: XCVR_INTERNAL
;
1859 mutex_lock(&dev
->link_lock
);
1860 cmd
->advertising
= dev
->phy
.advertising
;
1861 cmd
->autoneg
= dev
->phy
.autoneg
;
1862 cmd
->speed
= dev
->phy
.speed
;
1863 cmd
->duplex
= dev
->phy
.duplex
;
1864 mutex_unlock(&dev
->link_lock
);
1869 static int emac_ethtool_set_settings(struct net_device
*ndev
,
1870 struct ethtool_cmd
*cmd
)
1872 struct emac_instance
*dev
= netdev_priv(ndev
);
1873 u32 f
= dev
->phy
.features
;
1875 DBG(dev
, "set_settings(%d, %d, %d, 0x%08x)" NL
,
1876 cmd
->autoneg
, cmd
->speed
, cmd
->duplex
, cmd
->advertising
);
1878 /* Basic sanity checks */
1879 if (dev
->phy
.address
< 0)
1881 if (cmd
->autoneg
!= AUTONEG_ENABLE
&& cmd
->autoneg
!= AUTONEG_DISABLE
)
1883 if (cmd
->autoneg
== AUTONEG_ENABLE
&& cmd
->advertising
== 0)
1885 if (cmd
->duplex
!= DUPLEX_HALF
&& cmd
->duplex
!= DUPLEX_FULL
)
1888 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1889 switch (cmd
->speed
) {
1891 if (cmd
->duplex
== DUPLEX_HALF
1892 && !(f
& SUPPORTED_10baseT_Half
))
1894 if (cmd
->duplex
== DUPLEX_FULL
1895 && !(f
& SUPPORTED_10baseT_Full
))
1899 if (cmd
->duplex
== DUPLEX_HALF
1900 && !(f
& SUPPORTED_100baseT_Half
))
1902 if (cmd
->duplex
== DUPLEX_FULL
1903 && !(f
& SUPPORTED_100baseT_Full
))
1907 if (cmd
->duplex
== DUPLEX_HALF
1908 && !(f
& SUPPORTED_1000baseT_Half
))
1910 if (cmd
->duplex
== DUPLEX_FULL
1911 && !(f
& SUPPORTED_1000baseT_Full
))
1918 mutex_lock(&dev
->link_lock
);
1919 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, cmd
->speed
,
1921 mutex_unlock(&dev
->link_lock
);
1924 if (!(f
& SUPPORTED_Autoneg
))
1927 mutex_lock(&dev
->link_lock
);
1928 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
,
1929 (cmd
->advertising
& f
) |
1930 (dev
->phy
.advertising
&
1932 ADVERTISED_Asym_Pause
)));
1933 mutex_unlock(&dev
->link_lock
);
1935 emac_force_link_update(dev
);
1940 static void emac_ethtool_get_ringparam(struct net_device
*ndev
,
1941 struct ethtool_ringparam
*rp
)
1943 rp
->rx_max_pending
= rp
->rx_pending
= NUM_RX_BUFF
;
1944 rp
->tx_max_pending
= rp
->tx_pending
= NUM_TX_BUFF
;
1947 static void emac_ethtool_get_pauseparam(struct net_device
*ndev
,
1948 struct ethtool_pauseparam
*pp
)
1950 struct emac_instance
*dev
= netdev_priv(ndev
);
1952 mutex_lock(&dev
->link_lock
);
1953 if ((dev
->phy
.features
& SUPPORTED_Autoneg
) &&
1954 (dev
->phy
.advertising
& (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
)))
1957 if (dev
->phy
.duplex
== DUPLEX_FULL
) {
1959 pp
->rx_pause
= pp
->tx_pause
= 1;
1960 else if (dev
->phy
.asym_pause
)
1963 mutex_unlock(&dev
->link_lock
);
1966 static u32
emac_ethtool_get_rx_csum(struct net_device
*ndev
)
1968 struct emac_instance
*dev
= netdev_priv(ndev
);
1970 return dev
->tah_dev
!= NULL
;
1973 static int emac_get_regs_len(struct emac_instance
*dev
)
1975 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
1976 return sizeof(struct emac_ethtool_regs_subhdr
) +
1977 EMAC4_ETHTOOL_REGS_SIZE
;
1979 return sizeof(struct emac_ethtool_regs_subhdr
) +
1980 EMAC_ETHTOOL_REGS_SIZE
;
1983 static int emac_ethtool_get_regs_len(struct net_device
*ndev
)
1985 struct emac_instance
*dev
= netdev_priv(ndev
);
1988 size
= sizeof(struct emac_ethtool_regs_hdr
) +
1989 emac_get_regs_len(dev
) + mal_get_regs_len(dev
->mal
);
1990 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
1991 size
+= zmii_get_regs_len(dev
->zmii_dev
);
1992 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
1993 size
+= rgmii_get_regs_len(dev
->rgmii_dev
);
1994 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
1995 size
+= tah_get_regs_len(dev
->tah_dev
);
2000 static void *emac_dump_regs(struct emac_instance
*dev
, void *buf
)
2002 struct emac_ethtool_regs_subhdr
*hdr
= buf
;
2004 hdr
->index
= dev
->cell_index
;
2005 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
)) {
2006 hdr
->version
= EMAC4_ETHTOOL_REGS_VER
;
2007 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC4_ETHTOOL_REGS_SIZE
);
2008 return ((void *)(hdr
+ 1) + EMAC4_ETHTOOL_REGS_SIZE
);
2010 hdr
->version
= EMAC_ETHTOOL_REGS_VER
;
2011 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC_ETHTOOL_REGS_SIZE
);
2012 return ((void *)(hdr
+ 1) + EMAC_ETHTOOL_REGS_SIZE
);
2016 static void emac_ethtool_get_regs(struct net_device
*ndev
,
2017 struct ethtool_regs
*regs
, void *buf
)
2019 struct emac_instance
*dev
= netdev_priv(ndev
);
2020 struct emac_ethtool_regs_hdr
*hdr
= buf
;
2022 hdr
->components
= 0;
2025 buf
= mal_dump_regs(dev
->mal
, buf
);
2026 buf
= emac_dump_regs(dev
, buf
);
2027 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
)) {
2028 hdr
->components
|= EMAC_ETHTOOL_REGS_ZMII
;
2029 buf
= zmii_dump_regs(dev
->zmii_dev
, buf
);
2031 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
)) {
2032 hdr
->components
|= EMAC_ETHTOOL_REGS_RGMII
;
2033 buf
= rgmii_dump_regs(dev
->rgmii_dev
, buf
);
2035 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
)) {
2036 hdr
->components
|= EMAC_ETHTOOL_REGS_TAH
;
2037 buf
= tah_dump_regs(dev
->tah_dev
, buf
);
2041 static int emac_ethtool_nway_reset(struct net_device
*ndev
)
2043 struct emac_instance
*dev
= netdev_priv(ndev
);
2046 DBG(dev
, "nway_reset" NL
);
2048 if (dev
->phy
.address
< 0)
2051 mutex_lock(&dev
->link_lock
);
2052 if (!dev
->phy
.autoneg
) {
2057 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, dev
->phy
.advertising
);
2059 mutex_unlock(&dev
->link_lock
);
2060 emac_force_link_update(dev
);
2064 static int emac_ethtool_get_stats_count(struct net_device
*ndev
)
2066 return EMAC_ETHTOOL_STATS_COUNT
;
2069 static void emac_ethtool_get_strings(struct net_device
*ndev
, u32 stringset
,
2072 if (stringset
== ETH_SS_STATS
)
2073 memcpy(buf
, &emac_stats_keys
, sizeof(emac_stats_keys
));
2076 static void emac_ethtool_get_ethtool_stats(struct net_device
*ndev
,
2077 struct ethtool_stats
*estats
,
2080 struct emac_instance
*dev
= netdev_priv(ndev
);
2082 memcpy(tmp_stats
, &dev
->stats
, sizeof(dev
->stats
));
2083 tmp_stats
+= sizeof(dev
->stats
) / sizeof(u64
);
2084 memcpy(tmp_stats
, &dev
->estats
, sizeof(dev
->estats
));
2087 static void emac_ethtool_get_drvinfo(struct net_device
*ndev
,
2088 struct ethtool_drvinfo
*info
)
2090 struct emac_instance
*dev
= netdev_priv(ndev
);
2092 strcpy(info
->driver
, "ibm_emac");
2093 strcpy(info
->version
, DRV_VERSION
);
2094 info
->fw_version
[0] = '\0';
2095 sprintf(info
->bus_info
, "PPC 4xx EMAC-%d %s",
2096 dev
->cell_index
, dev
->ofdev
->node
->full_name
);
2097 info
->n_stats
= emac_ethtool_get_stats_count(ndev
);
2098 info
->regdump_len
= emac_ethtool_get_regs_len(ndev
);
2101 static const struct ethtool_ops emac_ethtool_ops
= {
2102 .get_settings
= emac_ethtool_get_settings
,
2103 .set_settings
= emac_ethtool_set_settings
,
2104 .get_drvinfo
= emac_ethtool_get_drvinfo
,
2106 .get_regs_len
= emac_ethtool_get_regs_len
,
2107 .get_regs
= emac_ethtool_get_regs
,
2109 .nway_reset
= emac_ethtool_nway_reset
,
2111 .get_ringparam
= emac_ethtool_get_ringparam
,
2112 .get_pauseparam
= emac_ethtool_get_pauseparam
,
2114 .get_rx_csum
= emac_ethtool_get_rx_csum
,
2116 .get_strings
= emac_ethtool_get_strings
,
2117 .get_stats_count
= emac_ethtool_get_stats_count
,
2118 .get_ethtool_stats
= emac_ethtool_get_ethtool_stats
,
2120 .get_link
= ethtool_op_get_link
,
2121 .get_tx_csum
= ethtool_op_get_tx_csum
,
2122 .get_sg
= ethtool_op_get_sg
,
2125 static int emac_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2127 struct emac_instance
*dev
= netdev_priv(ndev
);
2128 uint16_t *data
= (uint16_t *) & rq
->ifr_ifru
;
2130 DBG(dev
, "ioctl %08x" NL
, cmd
);
2132 if (dev
->phy
.address
< 0)
2137 case SIOCDEVPRIVATE
:
2138 data
[0] = dev
->phy
.address
;
2141 case SIOCDEVPRIVATE
+ 1:
2142 data
[3] = emac_mdio_read(ndev
, dev
->phy
.address
, data
[1]);
2146 case SIOCDEVPRIVATE
+ 2:
2147 if (!capable(CAP_NET_ADMIN
))
2149 emac_mdio_write(ndev
, dev
->phy
.address
, data
[1], data
[2]);
2156 struct emac_depentry
{
2158 struct device_node
*node
;
2159 struct of_device
*ofdev
;
2163 #define EMAC_DEP_MAL_IDX 0
2164 #define EMAC_DEP_ZMII_IDX 1
2165 #define EMAC_DEP_RGMII_IDX 2
2166 #define EMAC_DEP_TAH_IDX 3
2167 #define EMAC_DEP_MDIO_IDX 4
2168 #define EMAC_DEP_PREV_IDX 5
2169 #define EMAC_DEP_COUNT 6
2171 static int __devinit
emac_check_deps(struct emac_instance
*dev
,
2172 struct emac_depentry
*deps
)
2175 struct device_node
*np
;
2177 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2178 /* no dependency on that item, allright */
2179 if (deps
[i
].phandle
== 0) {
2183 /* special case for blist as the dependency might go away */
2184 if (i
== EMAC_DEP_PREV_IDX
) {
2185 np
= *(dev
->blist
- 1);
2187 deps
[i
].phandle
= 0;
2191 if (deps
[i
].node
== NULL
)
2192 deps
[i
].node
= of_node_get(np
);
2194 if (deps
[i
].node
== NULL
)
2195 deps
[i
].node
= of_find_node_by_phandle(deps
[i
].phandle
);
2196 if (deps
[i
].node
== NULL
)
2198 if (deps
[i
].ofdev
== NULL
)
2199 deps
[i
].ofdev
= of_find_device_by_node(deps
[i
].node
);
2200 if (deps
[i
].ofdev
== NULL
)
2202 if (deps
[i
].drvdata
== NULL
)
2203 deps
[i
].drvdata
= dev_get_drvdata(&deps
[i
].ofdev
->dev
);
2204 if (deps
[i
].drvdata
!= NULL
)
2207 return (there
== EMAC_DEP_COUNT
);
2210 static void emac_put_deps(struct emac_instance
*dev
)
2213 of_dev_put(dev
->mal_dev
);
2215 of_dev_put(dev
->zmii_dev
);
2217 of_dev_put(dev
->rgmii_dev
);
2219 of_dev_put(dev
->mdio_dev
);
2221 of_dev_put(dev
->tah_dev
);
2224 static int __devinit
emac_of_bus_notify(struct notifier_block
*nb
,
2225 unsigned long action
, void *data
)
2227 /* We are only intereted in device addition */
2228 if (action
== BUS_NOTIFY_BOUND_DRIVER
)
2229 wake_up_all(&emac_probe_wait
);
2233 static struct notifier_block emac_of_bus_notifier
= {
2234 .notifier_call
= emac_of_bus_notify
2237 static int __devinit
emac_wait_deps(struct emac_instance
*dev
)
2239 struct emac_depentry deps
[EMAC_DEP_COUNT
];
2242 memset(&deps
, 0, sizeof(deps
));
2244 deps
[EMAC_DEP_MAL_IDX
].phandle
= dev
->mal_ph
;
2245 deps
[EMAC_DEP_ZMII_IDX
].phandle
= dev
->zmii_ph
;
2246 deps
[EMAC_DEP_RGMII_IDX
].phandle
= dev
->rgmii_ph
;
2248 deps
[EMAC_DEP_TAH_IDX
].phandle
= dev
->tah_ph
;
2250 deps
[EMAC_DEP_MDIO_IDX
].phandle
= dev
->mdio_ph
;
2251 if (dev
->blist
&& dev
->blist
> emac_boot_list
)
2252 deps
[EMAC_DEP_PREV_IDX
].phandle
= 0xffffffffu
;
2253 bus_register_notifier(&of_platform_bus_type
, &emac_of_bus_notifier
);
2254 wait_event_timeout(emac_probe_wait
,
2255 emac_check_deps(dev
, deps
),
2256 EMAC_PROBE_DEP_TIMEOUT
);
2257 bus_unregister_notifier(&of_platform_bus_type
, &emac_of_bus_notifier
);
2258 err
= emac_check_deps(dev
, deps
) ? 0 : -ENODEV
;
2259 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2261 of_node_put(deps
[i
].node
);
2262 if (err
&& deps
[i
].ofdev
)
2263 of_dev_put(deps
[i
].ofdev
);
2266 dev
->mal_dev
= deps
[EMAC_DEP_MAL_IDX
].ofdev
;
2267 dev
->zmii_dev
= deps
[EMAC_DEP_ZMII_IDX
].ofdev
;
2268 dev
->rgmii_dev
= deps
[EMAC_DEP_RGMII_IDX
].ofdev
;
2269 dev
->tah_dev
= deps
[EMAC_DEP_TAH_IDX
].ofdev
;
2270 dev
->mdio_dev
= deps
[EMAC_DEP_MDIO_IDX
].ofdev
;
2272 if (deps
[EMAC_DEP_PREV_IDX
].ofdev
)
2273 of_dev_put(deps
[EMAC_DEP_PREV_IDX
].ofdev
);
2277 static int __devinit
emac_read_uint_prop(struct device_node
*np
, const char *name
,
2278 u32
*val
, int fatal
)
2281 const u32
*prop
= of_get_property(np
, name
, &len
);
2282 if (prop
== NULL
|| len
< sizeof(u32
)) {
2284 printk(KERN_ERR
"%s: missing %s property\n",
2285 np
->full_name
, name
);
2292 static int __devinit
emac_init_phy(struct emac_instance
*dev
)
2294 struct device_node
*np
= dev
->ofdev
->node
;
2295 struct net_device
*ndev
= dev
->ndev
;
2299 dev
->phy
.dev
= ndev
;
2300 dev
->phy
.mode
= dev
->phy_mode
;
2302 /* PHY-less configuration.
2303 * XXX I probably should move these settings to the dev tree
2305 if (dev
->phy_address
== 0xffffffff && dev
->phy_map
== 0xffffffff) {
2308 /* PHY-less configuration.
2309 * XXX I probably should move these settings to the dev tree
2311 dev
->phy
.address
= -1;
2312 dev
->phy
.features
= SUPPORTED_100baseT_Full
| SUPPORTED_MII
;
2318 mutex_lock(&emac_phy_map_lock
);
2319 phy_map
= dev
->phy_map
| busy_phy_map
;
2321 DBG(dev
, "PHY maps %08x %08x" NL
, dev
->phy_map
, busy_phy_map
);
2323 dev
->phy
.mdio_read
= emac_mdio_read
;
2324 dev
->phy
.mdio_write
= emac_mdio_write
;
2326 /* Configure EMAC with defaults so we can at least use MDIO
2327 * This is needed mostly for 440GX
2329 if (emac_phy_gpcs(dev
->phy
.mode
)) {
2331 * Make GPCS PHY address equal to EMAC index.
2332 * We probably should take into account busy_phy_map
2333 * and/or phy_map here.
2335 * Note that the busy_phy_map is currently global
2336 * while it should probably be per-ASIC...
2338 dev
->phy
.address
= dev
->cell_index
;
2341 emac_configure(dev
);
2343 if (dev
->phy_address
!= 0xffffffff)
2344 phy_map
= ~(1 << dev
->phy_address
);
2346 for (i
= 0; i
< 0x20; phy_map
>>= 1, ++i
)
2347 if (!(phy_map
& 1)) {
2349 busy_phy_map
|= 1 << i
;
2351 /* Quick check if there is a PHY at the address */
2352 r
= emac_mdio_read(dev
->ndev
, i
, MII_BMCR
);
2353 if (r
== 0xffff || r
< 0)
2355 if (!emac_mii_phy_probe(&dev
->phy
, i
))
2358 mutex_unlock(&emac_phy_map_lock
);
2360 printk(KERN_WARNING
"%s: can't find PHY!\n", np
->full_name
);
2365 if (dev
->phy
.def
->ops
->init
)
2366 dev
->phy
.def
->ops
->init(&dev
->phy
);
2368 /* Disable any PHY features not supported by the platform */
2369 dev
->phy
.def
->features
&= ~dev
->phy_feat_exc
;
2371 /* Setup initial link parameters */
2372 if (dev
->phy
.features
& SUPPORTED_Autoneg
) {
2373 adv
= dev
->phy
.features
;
2374 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
))
2375 adv
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
2376 /* Restart autonegotiation */
2377 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, adv
);
2379 u32 f
= dev
->phy
.def
->features
;
2380 int speed
= SPEED_10
, fd
= DUPLEX_HALF
;
2382 /* Select highest supported speed/duplex */
2383 if (f
& SUPPORTED_1000baseT_Full
) {
2386 } else if (f
& SUPPORTED_1000baseT_Half
)
2388 else if (f
& SUPPORTED_100baseT_Full
) {
2391 } else if (f
& SUPPORTED_100baseT_Half
)
2393 else if (f
& SUPPORTED_10baseT_Full
)
2396 /* Force link parameters */
2397 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, speed
, fd
);
2402 static int __devinit
emac_init_config(struct emac_instance
*dev
)
2404 struct device_node
*np
= dev
->ofdev
->node
;
2407 const char *pm
, *phy_modes
[] = {
2409 [PHY_MODE_MII
] = "mii",
2410 [PHY_MODE_RMII
] = "rmii",
2411 [PHY_MODE_SMII
] = "smii",
2412 [PHY_MODE_RGMII
] = "rgmii",
2413 [PHY_MODE_TBI
] = "tbi",
2414 [PHY_MODE_GMII
] = "gmii",
2415 [PHY_MODE_RTBI
] = "rtbi",
2416 [PHY_MODE_SGMII
] = "sgmii",
2419 /* Read config from device-tree */
2420 if (emac_read_uint_prop(np
, "mal-device", &dev
->mal_ph
, 1))
2422 if (emac_read_uint_prop(np
, "mal-tx-channel", &dev
->mal_tx_chan
, 1))
2424 if (emac_read_uint_prop(np
, "mal-rx-channel", &dev
->mal_rx_chan
, 1))
2426 if (emac_read_uint_prop(np
, "cell-index", &dev
->cell_index
, 1))
2428 if (emac_read_uint_prop(np
, "max-frame-size", &dev
->max_mtu
, 0))
2429 dev
->max_mtu
= 1500;
2430 if (emac_read_uint_prop(np
, "rx-fifo-size", &dev
->rx_fifo_size
, 0))
2431 dev
->rx_fifo_size
= 2048;
2432 if (emac_read_uint_prop(np
, "tx-fifo-size", &dev
->tx_fifo_size
, 0))
2433 dev
->tx_fifo_size
= 2048;
2434 if (emac_read_uint_prop(np
, "rx-fifo-size-gige", &dev
->rx_fifo_size_gige
, 0))
2435 dev
->rx_fifo_size_gige
= dev
->rx_fifo_size
;
2436 if (emac_read_uint_prop(np
, "tx-fifo-size-gige", &dev
->tx_fifo_size_gige
, 0))
2437 dev
->tx_fifo_size_gige
= dev
->tx_fifo_size
;
2438 if (emac_read_uint_prop(np
, "phy-address", &dev
->phy_address
, 0))
2439 dev
->phy_address
= 0xffffffff;
2440 if (emac_read_uint_prop(np
, "phy-map", &dev
->phy_map
, 0))
2441 dev
->phy_map
= 0xffffffff;
2442 if (emac_read_uint_prop(np
->parent
, "clock-frequency", &dev
->opb_bus_freq
, 1))
2444 if (emac_read_uint_prop(np
, "tah-device", &dev
->tah_ph
, 0))
2446 if (emac_read_uint_prop(np
, "tah-channel", &dev
->tah_port
, 0))
2448 if (emac_read_uint_prop(np
, "mdio-device", &dev
->mdio_ph
, 0))
2450 if (emac_read_uint_prop(np
, "zmii-device", &dev
->zmii_ph
, 0))
2452 if (emac_read_uint_prop(np
, "zmii-channel", &dev
->zmii_port
, 0))
2453 dev
->zmii_port
= 0xffffffff;;
2454 if (emac_read_uint_prop(np
, "rgmii-device", &dev
->rgmii_ph
, 0))
2456 if (emac_read_uint_prop(np
, "rgmii-channel", &dev
->rgmii_port
, 0))
2457 dev
->rgmii_port
= 0xffffffff;;
2458 if (emac_read_uint_prop(np
, "fifo-entry-size", &dev
->fifo_entry_size
, 0))
2459 dev
->fifo_entry_size
= 16;
2460 if (emac_read_uint_prop(np
, "mal-burst-size", &dev
->mal_burst_size
, 0))
2461 dev
->mal_burst_size
= 256;
2463 /* PHY mode needs some decoding */
2464 dev
->phy_mode
= PHY_MODE_NA
;
2465 pm
= of_get_property(np
, "phy-mode", &plen
);
2468 for (i
= 0; i
< ARRAY_SIZE(phy_modes
); i
++)
2469 if (!strcasecmp(pm
, phy_modes
[i
])) {
2475 /* Backward compat with non-final DT */
2476 if (dev
->phy_mode
== PHY_MODE_NA
&& pm
!= NULL
&& plen
== 4) {
2477 u32 nmode
= *(const u32
*)pm
;
2478 if (nmode
> PHY_MODE_NA
&& nmode
<= PHY_MODE_SGMII
)
2479 dev
->phy_mode
= nmode
;
2482 /* Check EMAC version */
2483 if (of_device_is_compatible(np
, "ibm,emac4"))
2484 dev
->features
|= EMAC_FTR_EMAC4
;
2486 /* Fixup some feature bits based on the device tree */
2487 if (of_get_property(np
, "has-inverted-stacr-oc", NULL
))
2488 dev
->features
|= EMAC_FTR_STACR_OC_INVERT
;
2489 if (of_get_property(np
, "has-new-stacr-staopc", NULL
))
2490 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
;
2492 /* CAB lacks the appropriate properties */
2493 if (of_device_is_compatible(np
, "ibm,emac-axon"))
2494 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
|
2495 EMAC_FTR_STACR_OC_INVERT
;
2497 /* Enable TAH/ZMII/RGMII features as found */
2498 if (dev
->tah_ph
!= 0) {
2499 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2500 dev
->features
|= EMAC_FTR_HAS_TAH
;
2502 printk(KERN_ERR
"%s: TAH support not enabled !\n",
2508 if (dev
->zmii_ph
!= 0) {
2509 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2510 dev
->features
|= EMAC_FTR_HAS_ZMII
;
2512 printk(KERN_ERR
"%s: ZMII support not enabled !\n",
2518 if (dev
->rgmii_ph
!= 0) {
2519 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2520 dev
->features
|= EMAC_FTR_HAS_RGMII
;
2522 printk(KERN_ERR
"%s: RGMII support not enabled !\n",
2528 /* Read MAC-address */
2529 p
= of_get_property(np
, "local-mac-address", NULL
);
2531 printk(KERN_ERR
"%s: Can't find local-mac-address property\n",
2535 memcpy(dev
->ndev
->dev_addr
, p
, 6);
2537 DBG(dev
, "features : 0x%08x / 0x%08x\n", dev
->features
, EMAC_FTRS_POSSIBLE
);
2538 DBG(dev
, "tx_fifo_size : %d (%d gige)\n", dev
->tx_fifo_size
, dev
->tx_fifo_size_gige
);
2539 DBG(dev
, "rx_fifo_size : %d (%d gige)\n", dev
->rx_fifo_size
, dev
->rx_fifo_size_gige
);
2540 DBG(dev
, "max_mtu : %d\n", dev
->max_mtu
);
2541 DBG(dev
, "OPB freq : %d\n", dev
->opb_bus_freq
);
2546 static int __devinit
emac_probe(struct of_device
*ofdev
,
2547 const struct of_device_id
*match
)
2549 struct net_device
*ndev
;
2550 struct emac_instance
*dev
;
2551 struct device_node
*np
= ofdev
->node
;
2552 struct device_node
**blist
= NULL
;
2555 /* Skip unused/unwired EMACS */
2556 if (of_get_property(np
, "unused", NULL
))
2559 /* Find ourselves in the bootlist if we are there */
2560 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
2561 if (emac_boot_list
[i
] == np
)
2562 blist
= &emac_boot_list
[i
];
2564 /* Allocate our net_device structure */
2566 ndev
= alloc_etherdev(sizeof(struct emac_instance
));
2568 printk(KERN_ERR
"%s: could not allocate ethernet device!\n",
2572 dev
= netdev_priv(ndev
);
2576 SET_NETDEV_DEV(ndev
, &ofdev
->dev
);
2578 /* Initialize some embedded data structures */
2579 mutex_init(&dev
->mdio_lock
);
2580 mutex_init(&dev
->link_lock
);
2581 spin_lock_init(&dev
->lock
);
2582 INIT_WORK(&dev
->reset_work
, emac_reset_work
);
2584 /* Init various config data based on device-tree */
2585 err
= emac_init_config(dev
);
2589 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2590 dev
->emac_irq
= irq_of_parse_and_map(np
, 0);
2591 dev
->wol_irq
= irq_of_parse_and_map(np
, 1);
2592 if (dev
->emac_irq
== NO_IRQ
) {
2593 printk(KERN_ERR
"%s: Can't map main interrupt\n", np
->full_name
);
2596 ndev
->irq
= dev
->emac_irq
;
2599 if (of_address_to_resource(np
, 0, &dev
->rsrc_regs
)) {
2600 printk(KERN_ERR
"%s: Can't get registers address\n",
2604 // TODO : request_mem_region
2605 dev
->emacp
= ioremap(dev
->rsrc_regs
.start
, sizeof(struct emac_regs
));
2606 if (dev
->emacp
== NULL
) {
2607 printk(KERN_ERR
"%s: Can't map device registers!\n",
2613 /* Wait for dependent devices */
2614 err
= emac_wait_deps(dev
);
2617 "%s: Timeout waiting for dependent devices\n",
2619 /* display more info about what's missing ? */
2622 dev
->mal
= dev_get_drvdata(&dev
->mal_dev
->dev
);
2623 if (dev
->mdio_dev
!= NULL
)
2624 dev
->mdio_instance
= dev_get_drvdata(&dev
->mdio_dev
->dev
);
2626 /* Register with MAL */
2627 dev
->commac
.ops
= &emac_commac_ops
;
2628 dev
->commac
.dev
= dev
;
2629 dev
->commac
.tx_chan_mask
= MAL_CHAN_MASK(dev
->mal_tx_chan
);
2630 dev
->commac
.rx_chan_mask
= MAL_CHAN_MASK(dev
->mal_rx_chan
);
2631 err
= mal_register_commac(dev
->mal
, &dev
->commac
);
2633 printk(KERN_ERR
"%s: failed to register with mal %s!\n",
2634 np
->full_name
, dev
->mal_dev
->node
->full_name
);
2637 dev
->rx_skb_size
= emac_rx_skb_size(ndev
->mtu
);
2638 dev
->rx_sync_size
= emac_rx_sync_size(ndev
->mtu
);
2640 /* Get pointers to BD rings */
2642 dev
->mal
->bd_virt
+ mal_tx_bd_offset(dev
->mal
, dev
->mal_tx_chan
);
2644 dev
->mal
->bd_virt
+ mal_rx_bd_offset(dev
->mal
, dev
->mal_rx_chan
);
2646 DBG(dev
, "tx_desc %p" NL
, dev
->tx_desc
);
2647 DBG(dev
, "rx_desc %p" NL
, dev
->rx_desc
);
2650 memset(dev
->tx_desc
, 0, NUM_TX_BUFF
* sizeof(struct mal_descriptor
));
2651 memset(dev
->rx_desc
, 0, NUM_RX_BUFF
* sizeof(struct mal_descriptor
));
2653 /* Attach to ZMII, if needed */
2654 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
) &&
2655 (err
= zmii_attach(dev
->zmii_dev
, dev
->zmii_port
, &dev
->phy_mode
)) != 0)
2656 goto err_unreg_commac
;
2658 /* Attach to RGMII, if needed */
2659 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
) &&
2660 (err
= rgmii_attach(dev
->rgmii_dev
, dev
->rgmii_port
, dev
->phy_mode
)) != 0)
2661 goto err_detach_zmii
;
2663 /* Attach to TAH, if needed */
2664 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
) &&
2665 (err
= tah_attach(dev
->tah_dev
, dev
->tah_port
)) != 0)
2666 goto err_detach_rgmii
;
2668 /* Set some link defaults before we can find out real parameters */
2669 dev
->phy
.speed
= SPEED_100
;
2670 dev
->phy
.duplex
= DUPLEX_FULL
;
2671 dev
->phy
.autoneg
= AUTONEG_DISABLE
;
2672 dev
->phy
.pause
= dev
->phy
.asym_pause
= 0;
2673 dev
->stop_timeout
= STOP_TIMEOUT_100
;
2674 INIT_DELAYED_WORK(&dev
->link_work
, emac_link_timer
);
2676 /* Find PHY if any */
2677 err
= emac_init_phy(dev
);
2679 goto err_detach_tah
;
2681 /* Fill in the driver function table */
2682 ndev
->open
= &emac_open
;
2684 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
2685 ndev
->tx_timeout
= &emac_tx_timeout
;
2686 ndev
->watchdog_timeo
= 5 * HZ
;
2687 ndev
->stop
= &emac_close
;
2688 ndev
->get_stats
= &emac_stats
;
2689 ndev
->set_multicast_list
= &emac_set_multicast_list
;
2690 ndev
->do_ioctl
= &emac_ioctl
;
2691 if (emac_phy_supports_gige(dev
->phy_mode
)) {
2692 ndev
->hard_start_xmit
= &emac_start_xmit_sg
;
2693 ndev
->change_mtu
= &emac_change_mtu
;
2694 dev
->commac
.ops
= &emac_commac_sg_ops
;
2696 ndev
->hard_start_xmit
= &emac_start_xmit
;
2698 SET_ETHTOOL_OPS(ndev
, &emac_ethtool_ops
);
2700 netif_carrier_off(ndev
);
2701 netif_stop_queue(ndev
);
2703 err
= register_netdev(ndev
);
2705 printk(KERN_ERR
"%s: failed to register net device (%d)!\n",
2706 np
->full_name
, err
);
2707 goto err_detach_tah
;
2710 /* Set our drvdata last as we don't want them visible until we are
2714 dev_set_drvdata(&ofdev
->dev
, dev
);
2716 /* There's a new kid in town ! Let's tell everybody */
2717 wake_up_all(&emac_probe_wait
);
2721 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2722 ndev
->name
, dev
->cell_index
, np
->full_name
,
2723 ndev
->dev_addr
[0], ndev
->dev_addr
[1], ndev
->dev_addr
[2],
2724 ndev
->dev_addr
[3], ndev
->dev_addr
[4], ndev
->dev_addr
[5]);
2726 if (dev
->phy
.address
>= 0)
2727 printk("%s: found %s PHY (0x%02x)\n", ndev
->name
,
2728 dev
->phy
.def
->name
, dev
->phy
.address
);
2730 emac_dbg_register(dev
);
2735 /* I have a bad feeling about this ... */
2738 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2739 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2741 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2742 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2744 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2745 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2747 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2751 iounmap(dev
->emacp
);
2753 if (dev
->wol_irq
!= NO_IRQ
)
2754 irq_dispose_mapping(dev
->wol_irq
);
2755 if (dev
->emac_irq
!= NO_IRQ
)
2756 irq_dispose_mapping(dev
->emac_irq
);
2760 /* if we were on the bootlist, remove us as we won't show up and
2761 * wake up all waiters to notify them in case they were waiting
2766 wake_up_all(&emac_probe_wait
);
2771 static int __devexit
emac_remove(struct of_device
*ofdev
)
2773 struct emac_instance
*dev
= dev_get_drvdata(&ofdev
->dev
);
2775 DBG(dev
, "remove" NL
);
2777 dev_set_drvdata(&ofdev
->dev
, NULL
);
2779 unregister_netdev(dev
->ndev
);
2781 flush_scheduled_work();
2783 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2784 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2785 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2786 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2787 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2788 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2790 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2793 emac_dbg_unregister(dev
);
2794 iounmap(dev
->emacp
);
2796 if (dev
->wol_irq
!= NO_IRQ
)
2797 irq_dispose_mapping(dev
->wol_irq
);
2798 if (dev
->emac_irq
!= NO_IRQ
)
2799 irq_dispose_mapping(dev
->emac_irq
);
2806 /* XXX Features in here should be replaced by properties... */
2807 static struct of_device_id emac_match
[] =
2811 .compatible
= "ibm,emac",
2815 .compatible
= "ibm,emac4",
2820 static struct of_platform_driver emac_driver
= {
2822 .match_table
= emac_match
,
2824 .probe
= emac_probe
,
2825 .remove
= emac_remove
,
2828 static void __init
emac_make_bootlist(void)
2830 struct device_node
*np
= NULL
;
2831 int j
, max
, i
= 0, k
;
2832 int cell_indices
[EMAC_BOOT_LIST_SIZE
];
2835 while((np
= of_find_all_nodes(np
)) != NULL
) {
2838 if (of_match_node(emac_match
, np
) == NULL
)
2840 if (of_get_property(np
, "unused", NULL
))
2842 idx
= of_get_property(np
, "cell-index", NULL
);
2845 cell_indices
[i
] = *idx
;
2846 emac_boot_list
[i
++] = of_node_get(np
);
2847 if (i
>= EMAC_BOOT_LIST_SIZE
) {
2854 /* Bubble sort them (doh, what a creative algorithm :-) */
2855 for (i
= 0; max
> 1 && (i
< (max
- 1)); i
++)
2856 for (j
= i
; j
< max
; j
++) {
2857 if (cell_indices
[i
] > cell_indices
[j
]) {
2858 np
= emac_boot_list
[i
];
2859 emac_boot_list
[i
] = emac_boot_list
[j
];
2860 emac_boot_list
[j
] = np
;
2861 k
= cell_indices
[i
];
2862 cell_indices
[i
] = cell_indices
[j
];
2863 cell_indices
[j
] = k
;
2868 static int __init
emac_init(void)
2872 printk(KERN_INFO DRV_DESC
", version " DRV_VERSION
"\n");
2874 /* Init debug stuff */
2877 /* Build EMAC boot list */
2878 emac_make_bootlist();
2880 /* Init submodules */
2893 rc
= of_register_platform_driver(&emac_driver
);
2911 static void __exit
emac_exit(void)
2915 of_unregister_platform_driver(&emac_driver
);
2923 /* Destroy EMAC boot list */
2924 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
2925 if (emac_boot_list
[i
])
2926 of_node_put(emac_boot_list
[i
]);
2929 module_init(emac_init
);
2930 module_exit(emac_exit
);