2 * drivers/net/ethernet/ibm/emac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
42 #include <linux/of_net.h>
43 #include <linux/slab.h>
45 #include <asm/processor.h>
48 #include <asm/uaccess.h>
50 #include <asm/dcr-regs.h>
55 * Lack of dma_unmap_???? calls is intentional.
57 * API-correct usage requires additional support state information to be
58 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
59 * EMAC design (e.g. TX buffer passed from network stack can be split into
60 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
61 * maintaining such information will add additional overhead.
62 * Current DMA API implementation for 4xx processors only ensures cache coherency
63 * and dma_unmap_???? routines are empty and are likely to stay this way.
64 * I decided to omit dma_unmap_??? calls because I don't want to add additional
65 * complexity just for the sake of following some abstract API, when it doesn't
66 * add any real benefit to the driver. I understand that this decision maybe
67 * controversial, but I really tried to make code API-correct and efficient
68 * at the same time and didn't come up with code I liked :(. --ebs
71 #define DRV_NAME "emac"
72 #define DRV_VERSION "3.54"
73 #define DRV_DESC "PPC 4xx OCP EMAC driver"
75 MODULE_DESCRIPTION(DRV_DESC
);
77 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
78 MODULE_LICENSE("GPL");
81 * PPC64 doesn't (yet) have a cacheable_memcpy
84 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
87 /* minimum number of free TX descriptors required to wake up TX process */
88 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
90 /* If packet size is less than this number, we allocate small skb and copy packet
91 * contents into it instead of just sending original big skb up
93 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
95 /* Since multiple EMACs share MDIO lines in various ways, we need
96 * to avoid re-using the same PHY ID in cases where the arch didn't
97 * setup precise phy_map entries
99 * XXX This is something that needs to be reworked as we can have multiple
100 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
101 * probably require in that case to have explicit PHY IDs in the device-tree
103 static u32 busy_phy_map
;
104 static DEFINE_MUTEX(emac_phy_map_lock
);
106 /* This is the wait queue used to wait on any event related to probe, that
107 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
109 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait
);
111 /* Having stable interface names is a doomed idea. However, it would be nice
112 * if we didn't have completely random interface names at boot too :-) It's
113 * just a matter of making everybody's life easier. Since we are doing
114 * threaded probing, it's a bit harder though. The base idea here is that
115 * we make up a list of all emacs in the device-tree before we register the
116 * driver. Every emac will then wait for the previous one in the list to
117 * initialize before itself. We should also keep that list ordered by
119 * That list is only 4 entries long, meaning that additional EMACs don't
120 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
123 #define EMAC_BOOT_LIST_SIZE 4
124 static struct device_node
*emac_boot_list
[EMAC_BOOT_LIST_SIZE
];
126 /* How long should I wait for dependent devices ? */
127 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
129 /* I don't want to litter system log with timeout errors
130 * when we have brain-damaged PHY.
132 static inline void emac_report_timeout_error(struct emac_instance
*dev
,
135 if (emac_has_feature(dev
, EMAC_FTR_440GX_PHY_CLK_FIX
|
136 EMAC_FTR_460EX_PHY_CLK_FIX
|
137 EMAC_FTR_440EP_PHY_CLK_FIX
))
138 DBG(dev
, "%s" NL
, error
);
139 else if (net_ratelimit())
140 printk(KERN_ERR
"%s: %s\n", dev
->ofdev
->dev
.of_node
->full_name
,
144 /* EMAC PHY clock workaround:
145 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
146 * which allows controlling each EMAC clock
148 static inline void emac_rx_clk_tx(struct emac_instance
*dev
)
150 #ifdef CONFIG_PPC_DCR_NATIVE
151 if (emac_has_feature(dev
, EMAC_FTR_440EP_PHY_CLK_FIX
))
152 dcri_clrset(SDR0
, SDR0_MFR
,
153 0, SDR0_MFR_ECS
>> dev
->cell_index
);
157 static inline void emac_rx_clk_default(struct emac_instance
*dev
)
159 #ifdef CONFIG_PPC_DCR_NATIVE
160 if (emac_has_feature(dev
, EMAC_FTR_440EP_PHY_CLK_FIX
))
161 dcri_clrset(SDR0
, SDR0_MFR
,
162 SDR0_MFR_ECS
>> dev
->cell_index
, 0);
166 /* PHY polling intervals */
167 #define PHY_POLL_LINK_ON HZ
168 #define PHY_POLL_LINK_OFF (HZ / 5)
170 /* Graceful stop timeouts in us.
171 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
173 #define STOP_TIMEOUT_10 1230
174 #define STOP_TIMEOUT_100 124
175 #define STOP_TIMEOUT_1000 13
176 #define STOP_TIMEOUT_1000_JUMBO 73
178 static unsigned char default_mcast_addr
[] = {
179 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
182 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
183 static const char emac_stats_keys
[EMAC_ETHTOOL_STATS_COUNT
][ETH_GSTRING_LEN
] = {
184 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
185 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
186 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
187 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
188 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
189 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
190 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
191 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
192 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
193 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
194 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
195 "tx_bd_excessive_collisions", "tx_bd_late_collision",
196 "tx_bd_multple_collisions", "tx_bd_single_collision",
197 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
201 static irqreturn_t
emac_irq(int irq
, void *dev_instance
);
202 static void emac_clean_tx_ring(struct emac_instance
*dev
);
203 static void __emac_set_multicast_list(struct emac_instance
*dev
);
205 static inline int emac_phy_supports_gige(int phy_mode
)
207 return phy_mode
== PHY_MODE_GMII
||
208 phy_mode
== PHY_MODE_RGMII
||
209 phy_mode
== PHY_MODE_SGMII
||
210 phy_mode
== PHY_MODE_TBI
||
211 phy_mode
== PHY_MODE_RTBI
;
214 static inline int emac_phy_gpcs(int phy_mode
)
216 return phy_mode
== PHY_MODE_SGMII
||
217 phy_mode
== PHY_MODE_TBI
||
218 phy_mode
== PHY_MODE_RTBI
;
221 static inline void emac_tx_enable(struct emac_instance
*dev
)
223 struct emac_regs __iomem
*p
= dev
->emacp
;
226 DBG(dev
, "tx_enable" NL
);
228 r
= in_be32(&p
->mr0
);
229 if (!(r
& EMAC_MR0_TXE
))
230 out_be32(&p
->mr0
, r
| EMAC_MR0_TXE
);
233 static void emac_tx_disable(struct emac_instance
*dev
)
235 struct emac_regs __iomem
*p
= dev
->emacp
;
238 DBG(dev
, "tx_disable" NL
);
240 r
= in_be32(&p
->mr0
);
241 if (r
& EMAC_MR0_TXE
) {
242 int n
= dev
->stop_timeout
;
243 out_be32(&p
->mr0
, r
& ~EMAC_MR0_TXE
);
244 while (!(in_be32(&p
->mr0
) & EMAC_MR0_TXI
) && n
) {
249 emac_report_timeout_error(dev
, "TX disable timeout");
253 static void emac_rx_enable(struct emac_instance
*dev
)
255 struct emac_regs __iomem
*p
= dev
->emacp
;
258 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
)))
261 DBG(dev
, "rx_enable" NL
);
263 r
= in_be32(&p
->mr0
);
264 if (!(r
& EMAC_MR0_RXE
)) {
265 if (unlikely(!(r
& EMAC_MR0_RXI
))) {
266 /* Wait if previous async disable is still in progress */
267 int n
= dev
->stop_timeout
;
268 while (!(r
= in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
273 emac_report_timeout_error(dev
,
274 "RX disable timeout");
276 out_be32(&p
->mr0
, r
| EMAC_MR0_RXE
);
282 static void emac_rx_disable(struct emac_instance
*dev
)
284 struct emac_regs __iomem
*p
= dev
->emacp
;
287 DBG(dev
, "rx_disable" NL
);
289 r
= in_be32(&p
->mr0
);
290 if (r
& EMAC_MR0_RXE
) {
291 int n
= dev
->stop_timeout
;
292 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
293 while (!(in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
298 emac_report_timeout_error(dev
, "RX disable timeout");
302 static inline void emac_netif_stop(struct emac_instance
*dev
)
304 netif_tx_lock_bh(dev
->ndev
);
305 netif_addr_lock(dev
->ndev
);
307 netif_addr_unlock(dev
->ndev
);
308 netif_tx_unlock_bh(dev
->ndev
);
309 dev
->ndev
->trans_start
= jiffies
; /* prevent tx timeout */
310 mal_poll_disable(dev
->mal
, &dev
->commac
);
311 netif_tx_disable(dev
->ndev
);
314 static inline void emac_netif_start(struct emac_instance
*dev
)
316 netif_tx_lock_bh(dev
->ndev
);
317 netif_addr_lock(dev
->ndev
);
319 if (dev
->mcast_pending
&& netif_running(dev
->ndev
))
320 __emac_set_multicast_list(dev
);
321 netif_addr_unlock(dev
->ndev
);
322 netif_tx_unlock_bh(dev
->ndev
);
324 netif_wake_queue(dev
->ndev
);
326 /* NOTE: unconditional netif_wake_queue is only appropriate
327 * so long as all callers are assured to have free tx slots
328 * (taken from tg3... though the case where that is wrong is
329 * not terribly harmful)
331 mal_poll_enable(dev
->mal
, &dev
->commac
);
334 static inline void emac_rx_disable_async(struct emac_instance
*dev
)
336 struct emac_regs __iomem
*p
= dev
->emacp
;
339 DBG(dev
, "rx_disable_async" NL
);
341 r
= in_be32(&p
->mr0
);
342 if (r
& EMAC_MR0_RXE
)
343 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
346 static int emac_reset(struct emac_instance
*dev
)
348 struct emac_regs __iomem
*p
= dev
->emacp
;
351 DBG(dev
, "reset" NL
);
353 if (!dev
->reset_failed
) {
354 /* 40x erratum suggests stopping RX channel before reset,
357 emac_rx_disable(dev
);
358 emac_tx_disable(dev
);
361 #ifdef CONFIG_PPC_DCR_NATIVE
362 /* Enable internal clock source */
363 if (emac_has_feature(dev
, EMAC_FTR_460EX_PHY_CLK_FIX
))
364 dcri_clrset(SDR0
, SDR0_ETH_CFG
,
365 0, SDR0_ETH_CFG_ECS
<< dev
->cell_index
);
368 out_be32(&p
->mr0
, EMAC_MR0_SRST
);
369 while ((in_be32(&p
->mr0
) & EMAC_MR0_SRST
) && n
)
372 #ifdef CONFIG_PPC_DCR_NATIVE
373 /* Enable external clock source */
374 if (emac_has_feature(dev
, EMAC_FTR_460EX_PHY_CLK_FIX
))
375 dcri_clrset(SDR0
, SDR0_ETH_CFG
,
376 SDR0_ETH_CFG_ECS
<< dev
->cell_index
, 0);
380 dev
->reset_failed
= 0;
383 emac_report_timeout_error(dev
, "reset timeout");
384 dev
->reset_failed
= 1;
389 static void emac_hash_mc(struct emac_instance
*dev
)
391 const int regs
= EMAC_XAHT_REGS(dev
);
392 u32
*gaht_base
= emac_gaht_base(dev
);
394 struct netdev_hw_addr
*ha
;
397 DBG(dev
, "hash_mc %d" NL
, netdev_mc_count(dev
->ndev
));
399 memset(gaht_temp
, 0, sizeof (gaht_temp
));
401 netdev_for_each_mc_addr(ha
, dev
->ndev
) {
403 DBG2(dev
, "mc %pM" NL
, ha
->addr
);
405 slot
= EMAC_XAHT_CRC_TO_SLOT(dev
,
406 ether_crc(ETH_ALEN
, ha
->addr
));
407 reg
= EMAC_XAHT_SLOT_TO_REG(dev
, slot
);
408 mask
= EMAC_XAHT_SLOT_TO_MASK(dev
, slot
);
410 gaht_temp
[reg
] |= mask
;
413 for (i
= 0; i
< regs
; i
++)
414 out_be32(gaht_base
+ i
, gaht_temp
[i
]);
417 static inline u32
emac_iff2rmr(struct net_device
*ndev
)
419 struct emac_instance
*dev
= netdev_priv(ndev
);
422 r
= EMAC_RMR_SP
| EMAC_RMR_SFCS
| EMAC_RMR_IAE
| EMAC_RMR_BAE
;
424 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
429 if (ndev
->flags
& IFF_PROMISC
)
431 else if (ndev
->flags
& IFF_ALLMULTI
||
432 (netdev_mc_count(ndev
) > EMAC_XAHT_SLOTS(dev
)))
434 else if (!netdev_mc_empty(ndev
))
437 if (emac_has_feature(dev
, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE
)) {
438 r
&= ~EMAC4_RMR_MJS_MASK
;
439 r
|= EMAC4_RMR_MJS(ndev
->mtu
);
445 static u32
__emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
447 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC_MR1_TR0_MULT
;
449 DBG2(dev
, "__emac_calc_base_mr1" NL
);
453 ret
|= EMAC_MR1_TFS_2K
;
456 printk(KERN_WARNING
"%s: Unknown Tx FIFO size %d\n",
457 dev
->ndev
->name
, tx_size
);
462 ret
|= EMAC_MR1_RFS_16K
;
465 ret
|= EMAC_MR1_RFS_4K
;
468 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
469 dev
->ndev
->name
, rx_size
);
475 static u32
__emac4_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
477 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC4_MR1_TR
|
478 EMAC4_MR1_OBCI(dev
->opb_bus_freq
/ 1000000);
480 DBG2(dev
, "__emac4_calc_base_mr1" NL
);
484 ret
|= EMAC4_MR1_TFS_16K
;
487 ret
|= EMAC4_MR1_TFS_4K
;
490 ret
|= EMAC4_MR1_TFS_2K
;
493 printk(KERN_WARNING
"%s: Unknown Tx FIFO size %d\n",
494 dev
->ndev
->name
, tx_size
);
499 ret
|= EMAC4_MR1_RFS_16K
;
502 ret
|= EMAC4_MR1_RFS_4K
;
505 ret
|= EMAC4_MR1_RFS_2K
;
508 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
509 dev
->ndev
->name
, rx_size
);
515 static u32
emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
517 return emac_has_feature(dev
, EMAC_FTR_EMAC4
) ?
518 __emac4_calc_base_mr1(dev
, tx_size
, rx_size
) :
519 __emac_calc_base_mr1(dev
, tx_size
, rx_size
);
522 static inline u32
emac_calc_trtr(struct emac_instance
*dev
, unsigned int size
)
524 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
525 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4
;
527 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT
;
530 static inline u32
emac_calc_rwmr(struct emac_instance
*dev
,
531 unsigned int low
, unsigned int high
)
533 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
534 return (low
<< 22) | ( (high
& 0x3ff) << 6);
536 return (low
<< 23) | ( (high
& 0x1ff) << 7);
539 static int emac_configure(struct emac_instance
*dev
)
541 struct emac_regs __iomem
*p
= dev
->emacp
;
542 struct net_device
*ndev
= dev
->ndev
;
543 int tx_size
, rx_size
, link
= netif_carrier_ok(dev
->ndev
);
546 DBG(dev
, "configure" NL
);
549 out_be32(&p
->mr1
, in_be32(&p
->mr1
)
550 | EMAC_MR1_FDE
| EMAC_MR1_ILE
);
552 } else if (emac_reset(dev
) < 0)
555 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
556 tah_reset(dev
->tah_dev
);
558 DBG(dev
, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
559 link
, dev
->phy
.duplex
, dev
->phy
.pause
, dev
->phy
.asym_pause
);
561 /* Default fifo sizes */
562 tx_size
= dev
->tx_fifo_size
;
563 rx_size
= dev
->rx_fifo_size
;
565 /* No link, force loopback */
567 mr1
= EMAC_MR1_FDE
| EMAC_MR1_ILE
;
569 /* Check for full duplex */
570 else if (dev
->phy
.duplex
== DUPLEX_FULL
)
571 mr1
|= EMAC_MR1_FDE
| EMAC_MR1_MWSW_001
;
573 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
574 dev
->stop_timeout
= STOP_TIMEOUT_10
;
575 switch (dev
->phy
.speed
) {
577 if (emac_phy_gpcs(dev
->phy
.mode
)) {
578 mr1
|= EMAC_MR1_MF_1000GPCS
| EMAC_MR1_MF_IPPA(
579 (dev
->phy
.gpcs_address
!= 0xffffffff) ?
580 dev
->phy
.gpcs_address
: dev
->phy
.address
);
582 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
583 * identify this GPCS PHY later.
585 out_be32(&p
->u1
.emac4
.ipcr
, 0xdeadbeef);
587 mr1
|= EMAC_MR1_MF_1000
;
589 /* Extended fifo sizes */
590 tx_size
= dev
->tx_fifo_size_gige
;
591 rx_size
= dev
->rx_fifo_size_gige
;
593 if (dev
->ndev
->mtu
> ETH_DATA_LEN
) {
594 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
595 mr1
|= EMAC4_MR1_JPSM
;
597 mr1
|= EMAC_MR1_JPSM
;
598 dev
->stop_timeout
= STOP_TIMEOUT_1000_JUMBO
;
600 dev
->stop_timeout
= STOP_TIMEOUT_1000
;
603 mr1
|= EMAC_MR1_MF_100
;
604 dev
->stop_timeout
= STOP_TIMEOUT_100
;
606 default: /* make gcc happy */
610 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
611 rgmii_set_speed(dev
->rgmii_dev
, dev
->rgmii_port
,
613 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
614 zmii_set_speed(dev
->zmii_dev
, dev
->zmii_port
, dev
->phy
.speed
);
616 /* on 40x erratum forces us to NOT use integrated flow control,
617 * let's hope it works on 44x ;)
619 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
) &&
620 dev
->phy
.duplex
== DUPLEX_FULL
) {
622 mr1
|= EMAC_MR1_EIFC
| EMAC_MR1_APP
;
623 else if (dev
->phy
.asym_pause
)
627 /* Add base settings & fifo sizes & program MR1 */
628 mr1
|= emac_calc_base_mr1(dev
, tx_size
, rx_size
);
629 out_be32(&p
->mr1
, mr1
);
631 /* Set individual MAC address */
632 out_be32(&p
->iahr
, (ndev
->dev_addr
[0] << 8) | ndev
->dev_addr
[1]);
633 out_be32(&p
->ialr
, (ndev
->dev_addr
[2] << 24) |
634 (ndev
->dev_addr
[3] << 16) | (ndev
->dev_addr
[4] << 8) |
637 /* VLAN Tag Protocol ID */
638 out_be32(&p
->vtpid
, 0x8100);
640 /* Receive mode register */
641 r
= emac_iff2rmr(ndev
);
642 if (r
& EMAC_RMR_MAE
)
644 out_be32(&p
->rmr
, r
);
646 /* FIFOs thresholds */
647 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
648 r
= EMAC4_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
649 tx_size
/ 2 / dev
->fifo_entry_size
);
651 r
= EMAC_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
652 tx_size
/ 2 / dev
->fifo_entry_size
);
653 out_be32(&p
->tmr1
, r
);
654 out_be32(&p
->trtr
, emac_calc_trtr(dev
, tx_size
/ 2));
656 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
657 there should be still enough space in FIFO to allow the our link
658 partner time to process this frame and also time to send PAUSE
661 Here is the worst case scenario for the RX FIFO "headroom"
662 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
664 1) One maximum-length frame on TX 1522 bytes
665 2) One PAUSE frame time 64 bytes
666 3) PAUSE frame decode time allowance 64 bytes
667 4) One maximum-length frame on RX 1522 bytes
668 5) Round-trip propagation delay of the link (100Mb) 15 bytes
672 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
673 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
675 r
= emac_calc_rwmr(dev
, rx_size
/ 8 / dev
->fifo_entry_size
,
676 rx_size
/ 4 / dev
->fifo_entry_size
);
677 out_be32(&p
->rwmr
, r
);
679 /* Set PAUSE timer to the maximum */
680 out_be32(&p
->ptr
, 0xffff);
683 r
= EMAC_ISR_OVR
| EMAC_ISR_BP
| EMAC_ISR_SE
|
684 EMAC_ISR_ALE
| EMAC_ISR_BFCS
| EMAC_ISR_PTLE
| EMAC_ISR_ORE
|
685 EMAC_ISR_IRE
| EMAC_ISR_TE
;
686 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
687 r
|= EMAC4_ISR_TXPE
| EMAC4_ISR_RXPE
/* | EMAC4_ISR_TXUE |
689 out_be32(&p
->iser
, r
);
691 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
692 if (emac_phy_gpcs(dev
->phy
.mode
)) {
693 if (dev
->phy
.gpcs_address
!= 0xffffffff)
694 emac_mii_reset_gpcs(&dev
->phy
);
696 emac_mii_reset_phy(&dev
->phy
);
702 static void emac_reinitialize(struct emac_instance
*dev
)
704 DBG(dev
, "reinitialize" NL
);
706 emac_netif_stop(dev
);
707 if (!emac_configure(dev
)) {
711 emac_netif_start(dev
);
714 static void emac_full_tx_reset(struct emac_instance
*dev
)
716 DBG(dev
, "full_tx_reset" NL
);
718 emac_tx_disable(dev
);
719 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
720 emac_clean_tx_ring(dev
);
721 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= 0;
725 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
730 static void emac_reset_work(struct work_struct
*work
)
732 struct emac_instance
*dev
= container_of(work
, struct emac_instance
, reset_work
);
734 DBG(dev
, "reset_work" NL
);
736 mutex_lock(&dev
->link_lock
);
738 emac_netif_stop(dev
);
739 emac_full_tx_reset(dev
);
740 emac_netif_start(dev
);
742 mutex_unlock(&dev
->link_lock
);
745 static void emac_tx_timeout(struct net_device
*ndev
)
747 struct emac_instance
*dev
= netdev_priv(ndev
);
749 DBG(dev
, "tx_timeout" NL
);
751 schedule_work(&dev
->reset_work
);
755 static inline int emac_phy_done(struct emac_instance
*dev
, u32 stacr
)
757 int done
= !!(stacr
& EMAC_STACR_OC
);
759 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
765 static int __emac_mdio_read(struct emac_instance
*dev
, u8 id
, u8 reg
)
767 struct emac_regs __iomem
*p
= dev
->emacp
;
769 int n
, err
= -ETIMEDOUT
;
771 mutex_lock(&dev
->mdio_lock
);
773 DBG2(dev
, "mdio_read(%02x,%02x)" NL
, id
, reg
);
775 /* Enable proper MDIO port */
776 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
777 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
778 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
779 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
781 /* Wait for management interface to become idle */
783 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
786 DBG2(dev
, " -> timeout wait idle\n");
791 /* Issue read command */
792 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
793 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
795 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
796 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
798 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
799 r
|= EMACX_STACR_STAC_READ
;
801 r
|= EMAC_STACR_STAC_READ
;
802 r
|= (reg
& EMAC_STACR_PRA_MASK
)
803 | ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
);
804 out_be32(&p
->stacr
, r
);
806 /* Wait for read to complete */
808 while (!emac_phy_done(dev
, (r
= in_be32(&p
->stacr
)))) {
811 DBG2(dev
, " -> timeout wait complete\n");
816 if (unlikely(r
& EMAC_STACR_PHYE
)) {
817 DBG(dev
, "mdio_read(%02x, %02x) failed" NL
, id
, reg
);
822 r
= ((r
>> EMAC_STACR_PHYD_SHIFT
) & EMAC_STACR_PHYD_MASK
);
824 DBG2(dev
, "mdio_read -> %04x" NL
, r
);
827 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
828 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
829 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
830 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
831 mutex_unlock(&dev
->mdio_lock
);
833 return err
== 0 ? r
: err
;
836 static void __emac_mdio_write(struct emac_instance
*dev
, u8 id
, u8 reg
,
839 struct emac_regs __iomem
*p
= dev
->emacp
;
841 int n
, err
= -ETIMEDOUT
;
843 mutex_lock(&dev
->mdio_lock
);
845 DBG2(dev
, "mdio_write(%02x,%02x,%04x)" NL
, id
, reg
, val
);
847 /* Enable proper MDIO port */
848 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
849 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
850 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
851 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
853 /* Wait for management interface to be idle */
855 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
858 DBG2(dev
, " -> timeout wait idle\n");
863 /* Issue write command */
864 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
865 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
867 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
868 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
870 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
871 r
|= EMACX_STACR_STAC_WRITE
;
873 r
|= EMAC_STACR_STAC_WRITE
;
874 r
|= (reg
& EMAC_STACR_PRA_MASK
) |
875 ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
) |
876 (val
<< EMAC_STACR_PHYD_SHIFT
);
877 out_be32(&p
->stacr
, r
);
879 /* Wait for write to complete */
881 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
884 DBG2(dev
, " -> timeout wait complete\n");
890 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
891 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
892 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
893 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
894 mutex_unlock(&dev
->mdio_lock
);
897 static int emac_mdio_read(struct net_device
*ndev
, int id
, int reg
)
899 struct emac_instance
*dev
= netdev_priv(ndev
);
902 res
= __emac_mdio_read((dev
->mdio_instance
&&
903 dev
->phy
.gpcs_address
!= id
) ?
904 dev
->mdio_instance
: dev
,
909 static void emac_mdio_write(struct net_device
*ndev
, int id
, int reg
, int val
)
911 struct emac_instance
*dev
= netdev_priv(ndev
);
913 __emac_mdio_write((dev
->mdio_instance
&&
914 dev
->phy
.gpcs_address
!= id
) ?
915 dev
->mdio_instance
: dev
,
916 (u8
) id
, (u8
) reg
, (u16
) val
);
920 static void __emac_set_multicast_list(struct emac_instance
*dev
)
922 struct emac_regs __iomem
*p
= dev
->emacp
;
923 u32 rmr
= emac_iff2rmr(dev
->ndev
);
925 DBG(dev
, "__multicast %08x" NL
, rmr
);
927 /* I decided to relax register access rules here to avoid
930 * There is a real problem with EMAC4 core if we use MWSW_001 bit
931 * in MR1 register and do a full EMAC reset.
932 * One TX BD status update is delayed and, after EMAC reset, it
933 * never happens, resulting in TX hung (it'll be recovered by TX
934 * timeout handler eventually, but this is just gross).
935 * So we either have to do full TX reset or try to cheat here :)
937 * The only required change is to RX mode register, so I *think* all
938 * we need is just to stop RX channel. This seems to work on all
941 * If we need the full reset, we might just trigger the workqueue
942 * and do it async... a bit nasty but should work --BenH
944 dev
->mcast_pending
= 0;
945 emac_rx_disable(dev
);
946 if (rmr
& EMAC_RMR_MAE
)
948 out_be32(&p
->rmr
, rmr
);
953 static void emac_set_multicast_list(struct net_device
*ndev
)
955 struct emac_instance
*dev
= netdev_priv(ndev
);
957 DBG(dev
, "multicast" NL
);
959 BUG_ON(!netif_running(dev
->ndev
));
962 dev
->mcast_pending
= 1;
965 __emac_set_multicast_list(dev
);
968 static int emac_resize_rx_ring(struct emac_instance
*dev
, int new_mtu
)
970 int rx_sync_size
= emac_rx_sync_size(new_mtu
);
971 int rx_skb_size
= emac_rx_skb_size(new_mtu
);
973 int mr1_jumbo_bit_change
= 0;
975 mutex_lock(&dev
->link_lock
);
976 emac_netif_stop(dev
);
977 emac_rx_disable(dev
);
978 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
980 if (dev
->rx_sg_skb
) {
981 ++dev
->estats
.rx_dropped_resize
;
982 dev_kfree_skb(dev
->rx_sg_skb
);
983 dev
->rx_sg_skb
= NULL
;
986 /* Make a first pass over RX ring and mark BDs ready, dropping
987 * non-processed packets on the way. We need this as a separate pass
988 * to simplify error recovery in the case of allocation failure later.
990 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
991 if (dev
->rx_desc
[i
].ctrl
& MAL_RX_CTRL_FIRST
)
992 ++dev
->estats
.rx_dropped_resize
;
994 dev
->rx_desc
[i
].data_len
= 0;
995 dev
->rx_desc
[i
].ctrl
= MAL_RX_CTRL_EMPTY
|
996 (i
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
999 /* Reallocate RX ring only if bigger skb buffers are required */
1000 if (rx_skb_size
<= dev
->rx_skb_size
)
1003 /* Second pass, allocate new skbs */
1004 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
1005 struct sk_buff
*skb
= alloc_skb(rx_skb_size
, GFP_ATOMIC
);
1011 BUG_ON(!dev
->rx_skb
[i
]);
1012 dev_kfree_skb(dev
->rx_skb
[i
]);
1014 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1015 dev
->rx_desc
[i
].data_ptr
=
1016 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, rx_sync_size
,
1017 DMA_FROM_DEVICE
) + 2;
1018 dev
->rx_skb
[i
] = skb
;
1021 /* Check if we need to change "Jumbo" bit in MR1 */
1022 if (emac_has_feature(dev
, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE
)) {
1023 mr1_jumbo_bit_change
= (new_mtu
> ETH_DATA_LEN
) ||
1024 (dev
->ndev
->mtu
> ETH_DATA_LEN
);
1026 mr1_jumbo_bit_change
= (new_mtu
> ETH_DATA_LEN
) ^
1027 (dev
->ndev
->mtu
> ETH_DATA_LEN
);
1030 if (mr1_jumbo_bit_change
) {
1031 /* This is to prevent starting RX channel in emac_rx_enable() */
1032 set_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1034 dev
->ndev
->mtu
= new_mtu
;
1035 emac_full_tx_reset(dev
);
1038 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(new_mtu
));
1041 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1043 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1044 emac_rx_enable(dev
);
1045 emac_netif_start(dev
);
1046 mutex_unlock(&dev
->link_lock
);
1051 /* Process ctx, rtnl_lock semaphore */
1052 static int emac_change_mtu(struct net_device
*ndev
, int new_mtu
)
1054 struct emac_instance
*dev
= netdev_priv(ndev
);
1057 if (new_mtu
< EMAC_MIN_MTU
|| new_mtu
> dev
->max_mtu
)
1060 DBG(dev
, "change_mtu(%d)" NL
, new_mtu
);
1062 if (netif_running(ndev
)) {
1063 /* Check if we really need to reinitialize RX ring */
1064 if (emac_rx_skb_size(ndev
->mtu
) != emac_rx_skb_size(new_mtu
))
1065 ret
= emac_resize_rx_ring(dev
, new_mtu
);
1069 ndev
->mtu
= new_mtu
;
1070 dev
->rx_skb_size
= emac_rx_skb_size(new_mtu
);
1071 dev
->rx_sync_size
= emac_rx_sync_size(new_mtu
);
1077 static void emac_clean_tx_ring(struct emac_instance
*dev
)
1081 for (i
= 0; i
< NUM_TX_BUFF
; ++i
) {
1082 if (dev
->tx_skb
[i
]) {
1083 dev_kfree_skb(dev
->tx_skb
[i
]);
1084 dev
->tx_skb
[i
] = NULL
;
1085 if (dev
->tx_desc
[i
].ctrl
& MAL_TX_CTRL_READY
)
1086 ++dev
->estats
.tx_dropped
;
1088 dev
->tx_desc
[i
].ctrl
= 0;
1089 dev
->tx_desc
[i
].data_ptr
= 0;
1093 static void emac_clean_rx_ring(struct emac_instance
*dev
)
1097 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1098 if (dev
->rx_skb
[i
]) {
1099 dev
->rx_desc
[i
].ctrl
= 0;
1100 dev_kfree_skb(dev
->rx_skb
[i
]);
1101 dev
->rx_skb
[i
] = NULL
;
1102 dev
->rx_desc
[i
].data_ptr
= 0;
1105 if (dev
->rx_sg_skb
) {
1106 dev_kfree_skb(dev
->rx_sg_skb
);
1107 dev
->rx_sg_skb
= NULL
;
1111 static inline int emac_alloc_rx_skb(struct emac_instance
*dev
, int slot
,
1114 struct sk_buff
*skb
= alloc_skb(dev
->rx_skb_size
, flags
);
1118 dev
->rx_skb
[slot
] = skb
;
1119 dev
->rx_desc
[slot
].data_len
= 0;
1121 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1122 dev
->rx_desc
[slot
].data_ptr
=
1123 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, dev
->rx_sync_size
,
1124 DMA_FROM_DEVICE
) + 2;
1126 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1127 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1132 static void emac_print_link_status(struct emac_instance
*dev
)
1134 if (netif_carrier_ok(dev
->ndev
))
1135 printk(KERN_INFO
"%s: link is up, %d %s%s\n",
1136 dev
->ndev
->name
, dev
->phy
.speed
,
1137 dev
->phy
.duplex
== DUPLEX_FULL
? "FDX" : "HDX",
1138 dev
->phy
.pause
? ", pause enabled" :
1139 dev
->phy
.asym_pause
? ", asymmetric pause enabled" : "");
1141 printk(KERN_INFO
"%s: link is down\n", dev
->ndev
->name
);
1144 /* Process ctx, rtnl_lock semaphore */
1145 static int emac_open(struct net_device
*ndev
)
1147 struct emac_instance
*dev
= netdev_priv(ndev
);
1150 DBG(dev
, "open" NL
);
1152 /* Setup error IRQ handler */
1153 err
= request_irq(dev
->emac_irq
, emac_irq
, 0, "EMAC", dev
);
1155 printk(KERN_ERR
"%s: failed to request IRQ %d\n",
1156 ndev
->name
, dev
->emac_irq
);
1160 /* Allocate RX ring */
1161 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1162 if (emac_alloc_rx_skb(dev
, i
, GFP_KERNEL
)) {
1163 printk(KERN_ERR
"%s: failed to allocate RX ring\n",
1168 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= dev
->rx_slot
= 0;
1169 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1170 dev
->rx_sg_skb
= NULL
;
1172 mutex_lock(&dev
->link_lock
);
1175 /* Start PHY polling now.
1177 if (dev
->phy
.address
>= 0) {
1178 int link_poll_interval
;
1179 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1180 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1181 emac_rx_clk_default(dev
);
1182 netif_carrier_on(dev
->ndev
);
1183 link_poll_interval
= PHY_POLL_LINK_ON
;
1185 emac_rx_clk_tx(dev
);
1186 netif_carrier_off(dev
->ndev
);
1187 link_poll_interval
= PHY_POLL_LINK_OFF
;
1189 dev
->link_polling
= 1;
1191 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1192 emac_print_link_status(dev
);
1194 netif_carrier_on(dev
->ndev
);
1196 /* Required for Pause packet support in EMAC */
1197 dev_mc_add_global(ndev
, default_mcast_addr
);
1199 emac_configure(dev
);
1200 mal_poll_add(dev
->mal
, &dev
->commac
);
1201 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1202 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(ndev
->mtu
));
1203 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1204 emac_tx_enable(dev
);
1205 emac_rx_enable(dev
);
1206 emac_netif_start(dev
);
1208 mutex_unlock(&dev
->link_lock
);
1212 emac_clean_rx_ring(dev
);
1213 free_irq(dev
->emac_irq
, dev
);
1220 static int emac_link_differs(struct emac_instance
*dev
)
1222 u32 r
= in_be32(&dev
->emacp
->mr1
);
1224 int duplex
= r
& EMAC_MR1_FDE
? DUPLEX_FULL
: DUPLEX_HALF
;
1225 int speed
, pause
, asym_pause
;
1227 if (r
& EMAC_MR1_MF_1000
)
1229 else if (r
& EMAC_MR1_MF_100
)
1234 switch (r
& (EMAC_MR1_EIFC
| EMAC_MR1_APP
)) {
1235 case (EMAC_MR1_EIFC
| EMAC_MR1_APP
):
1244 pause
= asym_pause
= 0;
1246 return speed
!= dev
->phy
.speed
|| duplex
!= dev
->phy
.duplex
||
1247 pause
!= dev
->phy
.pause
|| asym_pause
!= dev
->phy
.asym_pause
;
1251 static void emac_link_timer(struct work_struct
*work
)
1253 struct emac_instance
*dev
=
1254 container_of(to_delayed_work(work
),
1255 struct emac_instance
, link_work
);
1256 int link_poll_interval
;
1258 mutex_lock(&dev
->link_lock
);
1259 DBG2(dev
, "link timer" NL
);
1264 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1265 if (!netif_carrier_ok(dev
->ndev
)) {
1266 emac_rx_clk_default(dev
);
1267 /* Get new link parameters */
1268 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1270 netif_carrier_on(dev
->ndev
);
1271 emac_netif_stop(dev
);
1272 emac_full_tx_reset(dev
);
1273 emac_netif_start(dev
);
1274 emac_print_link_status(dev
);
1276 link_poll_interval
= PHY_POLL_LINK_ON
;
1278 if (netif_carrier_ok(dev
->ndev
)) {
1279 emac_rx_clk_tx(dev
);
1280 netif_carrier_off(dev
->ndev
);
1281 netif_tx_disable(dev
->ndev
);
1282 emac_reinitialize(dev
);
1283 emac_print_link_status(dev
);
1285 link_poll_interval
= PHY_POLL_LINK_OFF
;
1287 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1289 mutex_unlock(&dev
->link_lock
);
1292 static void emac_force_link_update(struct emac_instance
*dev
)
1294 netif_carrier_off(dev
->ndev
);
1296 if (dev
->link_polling
) {
1297 cancel_delayed_work_sync(&dev
->link_work
);
1298 if (dev
->link_polling
)
1299 schedule_delayed_work(&dev
->link_work
, PHY_POLL_LINK_OFF
);
1303 /* Process ctx, rtnl_lock semaphore */
1304 static int emac_close(struct net_device
*ndev
)
1306 struct emac_instance
*dev
= netdev_priv(ndev
);
1308 DBG(dev
, "close" NL
);
1310 if (dev
->phy
.address
>= 0) {
1311 dev
->link_polling
= 0;
1312 cancel_delayed_work_sync(&dev
->link_work
);
1314 mutex_lock(&dev
->link_lock
);
1315 emac_netif_stop(dev
);
1317 mutex_unlock(&dev
->link_lock
);
1319 emac_rx_disable(dev
);
1320 emac_tx_disable(dev
);
1321 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1322 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1323 mal_poll_del(dev
->mal
, &dev
->commac
);
1325 emac_clean_tx_ring(dev
);
1326 emac_clean_rx_ring(dev
);
1328 free_irq(dev
->emac_irq
, dev
);
1330 netif_carrier_off(ndev
);
1335 static inline u16
emac_tx_csum(struct emac_instance
*dev
,
1336 struct sk_buff
*skb
)
1338 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
) &&
1339 (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1340 ++dev
->stats
.tx_packets_csum
;
1341 return EMAC_TX_CTRL_TAH_CSUM
;
1346 static inline int emac_xmit_finish(struct emac_instance
*dev
, int len
)
1348 struct emac_regs __iomem
*p
= dev
->emacp
;
1349 struct net_device
*ndev
= dev
->ndev
;
1351 /* Send the packet out. If the if makes a significant perf
1352 * difference, then we can store the TMR0 value in "dev"
1355 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
1356 out_be32(&p
->tmr0
, EMAC4_TMR0_XMIT
);
1358 out_be32(&p
->tmr0
, EMAC_TMR0_XMIT
);
1360 if (unlikely(++dev
->tx_cnt
== NUM_TX_BUFF
)) {
1361 netif_stop_queue(ndev
);
1362 DBG2(dev
, "stopped TX queue" NL
);
1365 ndev
->trans_start
= jiffies
;
1366 ++dev
->stats
.tx_packets
;
1367 dev
->stats
.tx_bytes
+= len
;
1369 return NETDEV_TX_OK
;
1373 static int emac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1375 struct emac_instance
*dev
= netdev_priv(ndev
);
1376 unsigned int len
= skb
->len
;
1379 u16 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1380 MAL_TX_CTRL_LAST
| emac_tx_csum(dev
, skb
);
1382 slot
= dev
->tx_slot
++;
1383 if (dev
->tx_slot
== NUM_TX_BUFF
) {
1385 ctrl
|= MAL_TX_CTRL_WRAP
;
1388 DBG2(dev
, "xmit(%u) %d" NL
, len
, slot
);
1390 dev
->tx_skb
[slot
] = skb
;
1391 dev
->tx_desc
[slot
].data_ptr
= dma_map_single(&dev
->ofdev
->dev
,
1394 dev
->tx_desc
[slot
].data_len
= (u16
) len
;
1396 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1398 return emac_xmit_finish(dev
, len
);
1401 static inline int emac_xmit_split(struct emac_instance
*dev
, int slot
,
1402 u32 pd
, int len
, int last
, u16 base_ctrl
)
1405 u16 ctrl
= base_ctrl
;
1406 int chunk
= min(len
, MAL_MAX_TX_SIZE
);
1409 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1412 ctrl
|= MAL_TX_CTRL_LAST
;
1413 if (slot
== NUM_TX_BUFF
- 1)
1414 ctrl
|= MAL_TX_CTRL_WRAP
;
1416 dev
->tx_skb
[slot
] = NULL
;
1417 dev
->tx_desc
[slot
].data_ptr
= pd
;
1418 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1419 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1430 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1431 static int emac_start_xmit_sg(struct sk_buff
*skb
, struct net_device
*ndev
)
1433 struct emac_instance
*dev
= netdev_priv(ndev
);
1434 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1435 int len
= skb
->len
, chunk
;
1440 /* This is common "fast" path */
1441 if (likely(!nr_frags
&& len
<= MAL_MAX_TX_SIZE
))
1442 return emac_start_xmit(skb
, ndev
);
1444 len
-= skb
->data_len
;
1446 /* Note, this is only an *estimation*, we can still run out of empty
1447 * slots because of the additional fragmentation into
1448 * MAL_MAX_TX_SIZE-sized chunks
1450 if (unlikely(dev
->tx_cnt
+ nr_frags
+ mal_tx_chunks(len
) > NUM_TX_BUFF
))
1453 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1454 emac_tx_csum(dev
, skb
);
1455 slot
= dev
->tx_slot
;
1458 dev
->tx_skb
[slot
] = NULL
;
1459 chunk
= min(len
, MAL_MAX_TX_SIZE
);
1460 dev
->tx_desc
[slot
].data_ptr
= pd
=
1461 dma_map_single(&dev
->ofdev
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
1462 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1465 slot
= emac_xmit_split(dev
, slot
, pd
+ chunk
, len
, !nr_frags
,
1468 for (i
= 0; i
< nr_frags
; ++i
) {
1469 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
1470 len
= skb_frag_size(frag
);
1472 if (unlikely(dev
->tx_cnt
+ mal_tx_chunks(len
) >= NUM_TX_BUFF
))
1475 pd
= skb_frag_dma_map(&dev
->ofdev
->dev
, frag
, 0, len
,
1478 slot
= emac_xmit_split(dev
, slot
, pd
, len
, i
== nr_frags
- 1,
1482 DBG2(dev
, "xmit_sg(%u) %d - %d" NL
, skb
->len
, dev
->tx_slot
, slot
);
1484 /* Attach skb to the last slot so we don't release it too early */
1485 dev
->tx_skb
[slot
] = skb
;
1487 /* Send the packet out */
1488 if (dev
->tx_slot
== NUM_TX_BUFF
- 1)
1489 ctrl
|= MAL_TX_CTRL_WRAP
;
1491 dev
->tx_desc
[dev
->tx_slot
].ctrl
= ctrl
;
1492 dev
->tx_slot
= (slot
+ 1) % NUM_TX_BUFF
;
1494 return emac_xmit_finish(dev
, skb
->len
);
1497 /* Well, too bad. Our previous estimation was overly optimistic.
1500 while (slot
!= dev
->tx_slot
) {
1501 dev
->tx_desc
[slot
].ctrl
= 0;
1504 slot
= NUM_TX_BUFF
- 1;
1506 ++dev
->estats
.tx_undo
;
1509 netif_stop_queue(ndev
);
1510 DBG2(dev
, "stopped TX queue" NL
);
1511 return NETDEV_TX_BUSY
;
1515 static void emac_parse_tx_error(struct emac_instance
*dev
, u16 ctrl
)
1517 struct emac_error_stats
*st
= &dev
->estats
;
1519 DBG(dev
, "BD TX error %04x" NL
, ctrl
);
1522 if (ctrl
& EMAC_TX_ST_BFCS
)
1523 ++st
->tx_bd_bad_fcs
;
1524 if (ctrl
& EMAC_TX_ST_LCS
)
1525 ++st
->tx_bd_carrier_loss
;
1526 if (ctrl
& EMAC_TX_ST_ED
)
1527 ++st
->tx_bd_excessive_deferral
;
1528 if (ctrl
& EMAC_TX_ST_EC
)
1529 ++st
->tx_bd_excessive_collisions
;
1530 if (ctrl
& EMAC_TX_ST_LC
)
1531 ++st
->tx_bd_late_collision
;
1532 if (ctrl
& EMAC_TX_ST_MC
)
1533 ++st
->tx_bd_multple_collisions
;
1534 if (ctrl
& EMAC_TX_ST_SC
)
1535 ++st
->tx_bd_single_collision
;
1536 if (ctrl
& EMAC_TX_ST_UR
)
1537 ++st
->tx_bd_underrun
;
1538 if (ctrl
& EMAC_TX_ST_SQE
)
1542 static void emac_poll_tx(void *param
)
1544 struct emac_instance
*dev
= param
;
1547 DBG2(dev
, "poll_tx, %d %d" NL
, dev
->tx_cnt
, dev
->ack_slot
);
1549 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
1550 bad_mask
= EMAC_IS_BAD_TX_TAH
;
1552 bad_mask
= EMAC_IS_BAD_TX
;
1554 netif_tx_lock_bh(dev
->ndev
);
1557 int slot
= dev
->ack_slot
, n
= 0;
1559 ctrl
= dev
->tx_desc
[slot
].ctrl
;
1560 if (!(ctrl
& MAL_TX_CTRL_READY
)) {
1561 struct sk_buff
*skb
= dev
->tx_skb
[slot
];
1566 dev
->tx_skb
[slot
] = NULL
;
1568 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1570 if (unlikely(ctrl
& bad_mask
))
1571 emac_parse_tx_error(dev
, ctrl
);
1577 dev
->ack_slot
= slot
;
1578 if (netif_queue_stopped(dev
->ndev
) &&
1579 dev
->tx_cnt
< EMAC_TX_WAKEUP_THRESH
)
1580 netif_wake_queue(dev
->ndev
);
1582 DBG2(dev
, "tx %d pkts" NL
, n
);
1585 netif_tx_unlock_bh(dev
->ndev
);
1588 static inline void emac_recycle_rx_skb(struct emac_instance
*dev
, int slot
,
1591 struct sk_buff
*skb
= dev
->rx_skb
[slot
];
1593 DBG2(dev
, "recycle %d %d" NL
, slot
, len
);
1596 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2,
1597 EMAC_DMA_ALIGN(len
+ 2), DMA_FROM_DEVICE
);
1599 dev
->rx_desc
[slot
].data_len
= 0;
1601 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1602 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1605 static void emac_parse_rx_error(struct emac_instance
*dev
, u16 ctrl
)
1607 struct emac_error_stats
*st
= &dev
->estats
;
1609 DBG(dev
, "BD RX error %04x" NL
, ctrl
);
1612 if (ctrl
& EMAC_RX_ST_OE
)
1613 ++st
->rx_bd_overrun
;
1614 if (ctrl
& EMAC_RX_ST_BP
)
1615 ++st
->rx_bd_bad_packet
;
1616 if (ctrl
& EMAC_RX_ST_RP
)
1617 ++st
->rx_bd_runt_packet
;
1618 if (ctrl
& EMAC_RX_ST_SE
)
1619 ++st
->rx_bd_short_event
;
1620 if (ctrl
& EMAC_RX_ST_AE
)
1621 ++st
->rx_bd_alignment_error
;
1622 if (ctrl
& EMAC_RX_ST_BFCS
)
1623 ++st
->rx_bd_bad_fcs
;
1624 if (ctrl
& EMAC_RX_ST_PTL
)
1625 ++st
->rx_bd_packet_too_long
;
1626 if (ctrl
& EMAC_RX_ST_ORE
)
1627 ++st
->rx_bd_out_of_range
;
1628 if (ctrl
& EMAC_RX_ST_IRE
)
1629 ++st
->rx_bd_in_range
;
1632 static inline void emac_rx_csum(struct emac_instance
*dev
,
1633 struct sk_buff
*skb
, u16 ctrl
)
1635 #ifdef CONFIG_IBM_EMAC_TAH
1636 if (!ctrl
&& dev
->tah_dev
) {
1637 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1638 ++dev
->stats
.rx_packets_csum
;
1643 static inline int emac_rx_sg_append(struct emac_instance
*dev
, int slot
)
1645 if (likely(dev
->rx_sg_skb
!= NULL
)) {
1646 int len
= dev
->rx_desc
[slot
].data_len
;
1647 int tot_len
= dev
->rx_sg_skb
->len
+ len
;
1649 if (unlikely(tot_len
+ 2 > dev
->rx_skb_size
)) {
1650 ++dev
->estats
.rx_dropped_mtu
;
1651 dev_kfree_skb(dev
->rx_sg_skb
);
1652 dev
->rx_sg_skb
= NULL
;
1654 cacheable_memcpy(skb_tail_pointer(dev
->rx_sg_skb
),
1655 dev
->rx_skb
[slot
]->data
, len
);
1656 skb_put(dev
->rx_sg_skb
, len
);
1657 emac_recycle_rx_skb(dev
, slot
, len
);
1661 emac_recycle_rx_skb(dev
, slot
, 0);
1665 /* NAPI poll context */
1666 static int emac_poll_rx(void *param
, int budget
)
1668 struct emac_instance
*dev
= param
;
1669 int slot
= dev
->rx_slot
, received
= 0;
1671 DBG2(dev
, "poll_rx(%d)" NL
, budget
);
1674 while (budget
> 0) {
1676 struct sk_buff
*skb
;
1677 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1679 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1682 skb
= dev
->rx_skb
[slot
];
1684 len
= dev
->rx_desc
[slot
].data_len
;
1686 if (unlikely(!MAL_IS_SINGLE_RX(ctrl
)))
1689 ctrl
&= EMAC_BAD_RX_MASK
;
1690 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1691 emac_parse_rx_error(dev
, ctrl
);
1692 ++dev
->estats
.rx_dropped_error
;
1693 emac_recycle_rx_skb(dev
, slot
, 0);
1698 if (len
< ETH_HLEN
) {
1699 ++dev
->estats
.rx_dropped_stack
;
1700 emac_recycle_rx_skb(dev
, slot
, len
);
1704 if (len
&& len
< EMAC_RX_COPY_THRESH
) {
1705 struct sk_buff
*copy_skb
=
1706 alloc_skb(len
+ EMAC_RX_SKB_HEADROOM
+ 2, GFP_ATOMIC
);
1707 if (unlikely(!copy_skb
))
1710 skb_reserve(copy_skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1711 cacheable_memcpy(copy_skb
->data
- 2, skb
->data
- 2,
1713 emac_recycle_rx_skb(dev
, slot
, len
);
1715 } else if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
)))
1720 skb
->protocol
= eth_type_trans(skb
, dev
->ndev
);
1721 emac_rx_csum(dev
, skb
, ctrl
);
1723 if (unlikely(netif_receive_skb(skb
) == NET_RX_DROP
))
1724 ++dev
->estats
.rx_dropped_stack
;
1726 ++dev
->stats
.rx_packets
;
1728 dev
->stats
.rx_bytes
+= len
;
1729 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1734 if (ctrl
& MAL_RX_CTRL_FIRST
) {
1735 BUG_ON(dev
->rx_sg_skb
);
1736 if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
))) {
1737 DBG(dev
, "rx OOM %d" NL
, slot
);
1738 ++dev
->estats
.rx_dropped_oom
;
1739 emac_recycle_rx_skb(dev
, slot
, 0);
1741 dev
->rx_sg_skb
= skb
;
1744 } else if (!emac_rx_sg_append(dev
, slot
) &&
1745 (ctrl
& MAL_RX_CTRL_LAST
)) {
1747 skb
= dev
->rx_sg_skb
;
1748 dev
->rx_sg_skb
= NULL
;
1750 ctrl
&= EMAC_BAD_RX_MASK
;
1751 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1752 emac_parse_rx_error(dev
, ctrl
);
1753 ++dev
->estats
.rx_dropped_error
;
1761 DBG(dev
, "rx OOM %d" NL
, slot
);
1762 /* Drop the packet and recycle skb */
1763 ++dev
->estats
.rx_dropped_oom
;
1764 emac_recycle_rx_skb(dev
, slot
, 0);
1769 DBG2(dev
, "rx %d BDs" NL
, received
);
1770 dev
->rx_slot
= slot
;
1773 if (unlikely(budget
&& test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
))) {
1775 if (!(dev
->rx_desc
[slot
].ctrl
& MAL_RX_CTRL_EMPTY
)) {
1776 DBG2(dev
, "rx restart" NL
);
1781 if (dev
->rx_sg_skb
) {
1782 DBG2(dev
, "dropping partial rx packet" NL
);
1783 ++dev
->estats
.rx_dropped_error
;
1784 dev_kfree_skb(dev
->rx_sg_skb
);
1785 dev
->rx_sg_skb
= NULL
;
1788 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1789 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1790 emac_rx_enable(dev
);
1796 /* NAPI poll context */
1797 static int emac_peek_rx(void *param
)
1799 struct emac_instance
*dev
= param
;
1801 return !(dev
->rx_desc
[dev
->rx_slot
].ctrl
& MAL_RX_CTRL_EMPTY
);
1804 /* NAPI poll context */
1805 static int emac_peek_rx_sg(void *param
)
1807 struct emac_instance
*dev
= param
;
1809 int slot
= dev
->rx_slot
;
1811 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1812 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1814 else if (ctrl
& MAL_RX_CTRL_LAST
)
1817 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1819 /* I'm just being paranoid here :) */
1820 if (unlikely(slot
== dev
->rx_slot
))
1826 static void emac_rxde(void *param
)
1828 struct emac_instance
*dev
= param
;
1830 ++dev
->estats
.rx_stopped
;
1831 emac_rx_disable_async(dev
);
1835 static irqreturn_t
emac_irq(int irq
, void *dev_instance
)
1837 struct emac_instance
*dev
= dev_instance
;
1838 struct emac_regs __iomem
*p
= dev
->emacp
;
1839 struct emac_error_stats
*st
= &dev
->estats
;
1842 spin_lock(&dev
->lock
);
1844 isr
= in_be32(&p
->isr
);
1845 out_be32(&p
->isr
, isr
);
1847 DBG(dev
, "isr = %08x" NL
, isr
);
1849 if (isr
& EMAC4_ISR_TXPE
)
1851 if (isr
& EMAC4_ISR_RXPE
)
1853 if (isr
& EMAC4_ISR_TXUE
)
1855 if (isr
& EMAC4_ISR_RXOE
)
1856 ++st
->rx_fifo_overrun
;
1857 if (isr
& EMAC_ISR_OVR
)
1859 if (isr
& EMAC_ISR_BP
)
1860 ++st
->rx_bad_packet
;
1861 if (isr
& EMAC_ISR_RP
)
1862 ++st
->rx_runt_packet
;
1863 if (isr
& EMAC_ISR_SE
)
1864 ++st
->rx_short_event
;
1865 if (isr
& EMAC_ISR_ALE
)
1866 ++st
->rx_alignment_error
;
1867 if (isr
& EMAC_ISR_BFCS
)
1869 if (isr
& EMAC_ISR_PTLE
)
1870 ++st
->rx_packet_too_long
;
1871 if (isr
& EMAC_ISR_ORE
)
1872 ++st
->rx_out_of_range
;
1873 if (isr
& EMAC_ISR_IRE
)
1875 if (isr
& EMAC_ISR_SQE
)
1877 if (isr
& EMAC_ISR_TE
)
1880 spin_unlock(&dev
->lock
);
1885 static struct net_device_stats
*emac_stats(struct net_device
*ndev
)
1887 struct emac_instance
*dev
= netdev_priv(ndev
);
1888 struct emac_stats
*st
= &dev
->stats
;
1889 struct emac_error_stats
*est
= &dev
->estats
;
1890 struct net_device_stats
*nst
= &dev
->nstats
;
1891 unsigned long flags
;
1893 DBG2(dev
, "stats" NL
);
1895 /* Compute "legacy" statistics */
1896 spin_lock_irqsave(&dev
->lock
, flags
);
1897 nst
->rx_packets
= (unsigned long)st
->rx_packets
;
1898 nst
->rx_bytes
= (unsigned long)st
->rx_bytes
;
1899 nst
->tx_packets
= (unsigned long)st
->tx_packets
;
1900 nst
->tx_bytes
= (unsigned long)st
->tx_bytes
;
1901 nst
->rx_dropped
= (unsigned long)(est
->rx_dropped_oom
+
1902 est
->rx_dropped_error
+
1903 est
->rx_dropped_resize
+
1904 est
->rx_dropped_mtu
);
1905 nst
->tx_dropped
= (unsigned long)est
->tx_dropped
;
1907 nst
->rx_errors
= (unsigned long)est
->rx_bd_errors
;
1908 nst
->rx_fifo_errors
= (unsigned long)(est
->rx_bd_overrun
+
1909 est
->rx_fifo_overrun
+
1911 nst
->rx_frame_errors
= (unsigned long)(est
->rx_bd_alignment_error
+
1912 est
->rx_alignment_error
);
1913 nst
->rx_crc_errors
= (unsigned long)(est
->rx_bd_bad_fcs
+
1915 nst
->rx_length_errors
= (unsigned long)(est
->rx_bd_runt_packet
+
1916 est
->rx_bd_short_event
+
1917 est
->rx_bd_packet_too_long
+
1918 est
->rx_bd_out_of_range
+
1919 est
->rx_bd_in_range
+
1920 est
->rx_runt_packet
+
1921 est
->rx_short_event
+
1922 est
->rx_packet_too_long
+
1923 est
->rx_out_of_range
+
1926 nst
->tx_errors
= (unsigned long)(est
->tx_bd_errors
+ est
->tx_errors
);
1927 nst
->tx_fifo_errors
= (unsigned long)(est
->tx_bd_underrun
+
1929 nst
->tx_carrier_errors
= (unsigned long)est
->tx_bd_carrier_loss
;
1930 nst
->collisions
= (unsigned long)(est
->tx_bd_excessive_deferral
+
1931 est
->tx_bd_excessive_collisions
+
1932 est
->tx_bd_late_collision
+
1933 est
->tx_bd_multple_collisions
);
1934 spin_unlock_irqrestore(&dev
->lock
, flags
);
1938 static struct mal_commac_ops emac_commac_ops
= {
1939 .poll_tx
= &emac_poll_tx
,
1940 .poll_rx
= &emac_poll_rx
,
1941 .peek_rx
= &emac_peek_rx
,
1945 static struct mal_commac_ops emac_commac_sg_ops
= {
1946 .poll_tx
= &emac_poll_tx
,
1947 .poll_rx
= &emac_poll_rx
,
1948 .peek_rx
= &emac_peek_rx_sg
,
1952 /* Ethtool support */
1953 static int emac_ethtool_get_settings(struct net_device
*ndev
,
1954 struct ethtool_cmd
*cmd
)
1956 struct emac_instance
*dev
= netdev_priv(ndev
);
1958 cmd
->supported
= dev
->phy
.features
;
1959 cmd
->port
= PORT_MII
;
1960 cmd
->phy_address
= dev
->phy
.address
;
1962 dev
->phy
.address
>= 0 ? XCVR_EXTERNAL
: XCVR_INTERNAL
;
1964 mutex_lock(&dev
->link_lock
);
1965 cmd
->advertising
= dev
->phy
.advertising
;
1966 cmd
->autoneg
= dev
->phy
.autoneg
;
1967 cmd
->speed
= dev
->phy
.speed
;
1968 cmd
->duplex
= dev
->phy
.duplex
;
1969 mutex_unlock(&dev
->link_lock
);
1974 static int emac_ethtool_set_settings(struct net_device
*ndev
,
1975 struct ethtool_cmd
*cmd
)
1977 struct emac_instance
*dev
= netdev_priv(ndev
);
1978 u32 f
= dev
->phy
.features
;
1980 DBG(dev
, "set_settings(%d, %d, %d, 0x%08x)" NL
,
1981 cmd
->autoneg
, cmd
->speed
, cmd
->duplex
, cmd
->advertising
);
1983 /* Basic sanity checks */
1984 if (dev
->phy
.address
< 0)
1986 if (cmd
->autoneg
!= AUTONEG_ENABLE
&& cmd
->autoneg
!= AUTONEG_DISABLE
)
1988 if (cmd
->autoneg
== AUTONEG_ENABLE
&& cmd
->advertising
== 0)
1990 if (cmd
->duplex
!= DUPLEX_HALF
&& cmd
->duplex
!= DUPLEX_FULL
)
1993 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1994 switch (cmd
->speed
) {
1996 if (cmd
->duplex
== DUPLEX_HALF
&&
1997 !(f
& SUPPORTED_10baseT_Half
))
1999 if (cmd
->duplex
== DUPLEX_FULL
&&
2000 !(f
& SUPPORTED_10baseT_Full
))
2004 if (cmd
->duplex
== DUPLEX_HALF
&&
2005 !(f
& SUPPORTED_100baseT_Half
))
2007 if (cmd
->duplex
== DUPLEX_FULL
&&
2008 !(f
& SUPPORTED_100baseT_Full
))
2012 if (cmd
->duplex
== DUPLEX_HALF
&&
2013 !(f
& SUPPORTED_1000baseT_Half
))
2015 if (cmd
->duplex
== DUPLEX_FULL
&&
2016 !(f
& SUPPORTED_1000baseT_Full
))
2023 mutex_lock(&dev
->link_lock
);
2024 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, cmd
->speed
,
2026 mutex_unlock(&dev
->link_lock
);
2029 if (!(f
& SUPPORTED_Autoneg
))
2032 mutex_lock(&dev
->link_lock
);
2033 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
,
2034 (cmd
->advertising
& f
) |
2035 (dev
->phy
.advertising
&
2037 ADVERTISED_Asym_Pause
)));
2038 mutex_unlock(&dev
->link_lock
);
2040 emac_force_link_update(dev
);
2045 static void emac_ethtool_get_ringparam(struct net_device
*ndev
,
2046 struct ethtool_ringparam
*rp
)
2048 rp
->rx_max_pending
= rp
->rx_pending
= NUM_RX_BUFF
;
2049 rp
->tx_max_pending
= rp
->tx_pending
= NUM_TX_BUFF
;
2052 static void emac_ethtool_get_pauseparam(struct net_device
*ndev
,
2053 struct ethtool_pauseparam
*pp
)
2055 struct emac_instance
*dev
= netdev_priv(ndev
);
2057 mutex_lock(&dev
->link_lock
);
2058 if ((dev
->phy
.features
& SUPPORTED_Autoneg
) &&
2059 (dev
->phy
.advertising
& (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
)))
2062 if (dev
->phy
.duplex
== DUPLEX_FULL
) {
2064 pp
->rx_pause
= pp
->tx_pause
= 1;
2065 else if (dev
->phy
.asym_pause
)
2068 mutex_unlock(&dev
->link_lock
);
2071 static int emac_get_regs_len(struct emac_instance
*dev
)
2073 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
2074 return sizeof(struct emac_ethtool_regs_subhdr
) +
2075 EMAC4_ETHTOOL_REGS_SIZE(dev
);
2077 return sizeof(struct emac_ethtool_regs_subhdr
) +
2078 EMAC_ETHTOOL_REGS_SIZE(dev
);
2081 static int emac_ethtool_get_regs_len(struct net_device
*ndev
)
2083 struct emac_instance
*dev
= netdev_priv(ndev
);
2086 size
= sizeof(struct emac_ethtool_regs_hdr
) +
2087 emac_get_regs_len(dev
) + mal_get_regs_len(dev
->mal
);
2088 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2089 size
+= zmii_get_regs_len(dev
->zmii_dev
);
2090 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2091 size
+= rgmii_get_regs_len(dev
->rgmii_dev
);
2092 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2093 size
+= tah_get_regs_len(dev
->tah_dev
);
2098 static void *emac_dump_regs(struct emac_instance
*dev
, void *buf
)
2100 struct emac_ethtool_regs_subhdr
*hdr
= buf
;
2102 hdr
->index
= dev
->cell_index
;
2103 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
)) {
2104 hdr
->version
= EMAC4_ETHTOOL_REGS_VER
;
2105 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC4_ETHTOOL_REGS_SIZE(dev
));
2106 return (void *)(hdr
+ 1) + EMAC4_ETHTOOL_REGS_SIZE(dev
);
2108 hdr
->version
= EMAC_ETHTOOL_REGS_VER
;
2109 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC_ETHTOOL_REGS_SIZE(dev
));
2110 return (void *)(hdr
+ 1) + EMAC_ETHTOOL_REGS_SIZE(dev
);
2114 static void emac_ethtool_get_regs(struct net_device
*ndev
,
2115 struct ethtool_regs
*regs
, void *buf
)
2117 struct emac_instance
*dev
= netdev_priv(ndev
);
2118 struct emac_ethtool_regs_hdr
*hdr
= buf
;
2120 hdr
->components
= 0;
2123 buf
= mal_dump_regs(dev
->mal
, buf
);
2124 buf
= emac_dump_regs(dev
, buf
);
2125 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
)) {
2126 hdr
->components
|= EMAC_ETHTOOL_REGS_ZMII
;
2127 buf
= zmii_dump_regs(dev
->zmii_dev
, buf
);
2129 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
)) {
2130 hdr
->components
|= EMAC_ETHTOOL_REGS_RGMII
;
2131 buf
= rgmii_dump_regs(dev
->rgmii_dev
, buf
);
2133 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
)) {
2134 hdr
->components
|= EMAC_ETHTOOL_REGS_TAH
;
2135 buf
= tah_dump_regs(dev
->tah_dev
, buf
);
2139 static int emac_ethtool_nway_reset(struct net_device
*ndev
)
2141 struct emac_instance
*dev
= netdev_priv(ndev
);
2144 DBG(dev
, "nway_reset" NL
);
2146 if (dev
->phy
.address
< 0)
2149 mutex_lock(&dev
->link_lock
);
2150 if (!dev
->phy
.autoneg
) {
2155 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, dev
->phy
.advertising
);
2157 mutex_unlock(&dev
->link_lock
);
2158 emac_force_link_update(dev
);
2162 static int emac_ethtool_get_sset_count(struct net_device
*ndev
, int stringset
)
2164 if (stringset
== ETH_SS_STATS
)
2165 return EMAC_ETHTOOL_STATS_COUNT
;
2170 static void emac_ethtool_get_strings(struct net_device
*ndev
, u32 stringset
,
2173 if (stringset
== ETH_SS_STATS
)
2174 memcpy(buf
, &emac_stats_keys
, sizeof(emac_stats_keys
));
2177 static void emac_ethtool_get_ethtool_stats(struct net_device
*ndev
,
2178 struct ethtool_stats
*estats
,
2181 struct emac_instance
*dev
= netdev_priv(ndev
);
2183 memcpy(tmp_stats
, &dev
->stats
, sizeof(dev
->stats
));
2184 tmp_stats
+= sizeof(dev
->stats
) / sizeof(u64
);
2185 memcpy(tmp_stats
, &dev
->estats
, sizeof(dev
->estats
));
2188 static void emac_ethtool_get_drvinfo(struct net_device
*ndev
,
2189 struct ethtool_drvinfo
*info
)
2191 struct emac_instance
*dev
= netdev_priv(ndev
);
2193 strcpy(info
->driver
, "ibm_emac");
2194 strcpy(info
->version
, DRV_VERSION
);
2195 info
->fw_version
[0] = '\0';
2196 sprintf(info
->bus_info
, "PPC 4xx EMAC-%d %s",
2197 dev
->cell_index
, dev
->ofdev
->dev
.of_node
->full_name
);
2198 info
->regdump_len
= emac_ethtool_get_regs_len(ndev
);
2201 static const struct ethtool_ops emac_ethtool_ops
= {
2202 .get_settings
= emac_ethtool_get_settings
,
2203 .set_settings
= emac_ethtool_set_settings
,
2204 .get_drvinfo
= emac_ethtool_get_drvinfo
,
2206 .get_regs_len
= emac_ethtool_get_regs_len
,
2207 .get_regs
= emac_ethtool_get_regs
,
2209 .nway_reset
= emac_ethtool_nway_reset
,
2211 .get_ringparam
= emac_ethtool_get_ringparam
,
2212 .get_pauseparam
= emac_ethtool_get_pauseparam
,
2214 .get_strings
= emac_ethtool_get_strings
,
2215 .get_sset_count
= emac_ethtool_get_sset_count
,
2216 .get_ethtool_stats
= emac_ethtool_get_ethtool_stats
,
2218 .get_link
= ethtool_op_get_link
,
2221 static int emac_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2223 struct emac_instance
*dev
= netdev_priv(ndev
);
2224 struct mii_ioctl_data
*data
= if_mii(rq
);
2226 DBG(dev
, "ioctl %08x" NL
, cmd
);
2228 if (dev
->phy
.address
< 0)
2233 data
->phy_id
= dev
->phy
.address
;
2236 data
->val_out
= emac_mdio_read(ndev
, dev
->phy
.address
,
2241 emac_mdio_write(ndev
, dev
->phy
.address
, data
->reg_num
,
2249 struct emac_depentry
{
2251 struct device_node
*node
;
2252 struct platform_device
*ofdev
;
2256 #define EMAC_DEP_MAL_IDX 0
2257 #define EMAC_DEP_ZMII_IDX 1
2258 #define EMAC_DEP_RGMII_IDX 2
2259 #define EMAC_DEP_TAH_IDX 3
2260 #define EMAC_DEP_MDIO_IDX 4
2261 #define EMAC_DEP_PREV_IDX 5
2262 #define EMAC_DEP_COUNT 6
2264 static int __devinit
emac_check_deps(struct emac_instance
*dev
,
2265 struct emac_depentry
*deps
)
2268 struct device_node
*np
;
2270 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2271 /* no dependency on that item, allright */
2272 if (deps
[i
].phandle
== 0) {
2276 /* special case for blist as the dependency might go away */
2277 if (i
== EMAC_DEP_PREV_IDX
) {
2278 np
= *(dev
->blist
- 1);
2280 deps
[i
].phandle
= 0;
2284 if (deps
[i
].node
== NULL
)
2285 deps
[i
].node
= of_node_get(np
);
2287 if (deps
[i
].node
== NULL
)
2288 deps
[i
].node
= of_find_node_by_phandle(deps
[i
].phandle
);
2289 if (deps
[i
].node
== NULL
)
2291 if (deps
[i
].ofdev
== NULL
)
2292 deps
[i
].ofdev
= of_find_device_by_node(deps
[i
].node
);
2293 if (deps
[i
].ofdev
== NULL
)
2295 if (deps
[i
].drvdata
== NULL
)
2296 deps
[i
].drvdata
= dev_get_drvdata(&deps
[i
].ofdev
->dev
);
2297 if (deps
[i
].drvdata
!= NULL
)
2300 return there
== EMAC_DEP_COUNT
;
2303 static void emac_put_deps(struct emac_instance
*dev
)
2306 of_dev_put(dev
->mal_dev
);
2308 of_dev_put(dev
->zmii_dev
);
2310 of_dev_put(dev
->rgmii_dev
);
2312 of_dev_put(dev
->mdio_dev
);
2314 of_dev_put(dev
->tah_dev
);
2317 static int __devinit
emac_of_bus_notify(struct notifier_block
*nb
,
2318 unsigned long action
, void *data
)
2320 /* We are only intereted in device addition */
2321 if (action
== BUS_NOTIFY_BOUND_DRIVER
)
2322 wake_up_all(&emac_probe_wait
);
2326 static struct notifier_block emac_of_bus_notifier __devinitdata
= {
2327 .notifier_call
= emac_of_bus_notify
2330 static int __devinit
emac_wait_deps(struct emac_instance
*dev
)
2332 struct emac_depentry deps
[EMAC_DEP_COUNT
];
2335 memset(&deps
, 0, sizeof(deps
));
2337 deps
[EMAC_DEP_MAL_IDX
].phandle
= dev
->mal_ph
;
2338 deps
[EMAC_DEP_ZMII_IDX
].phandle
= dev
->zmii_ph
;
2339 deps
[EMAC_DEP_RGMII_IDX
].phandle
= dev
->rgmii_ph
;
2341 deps
[EMAC_DEP_TAH_IDX
].phandle
= dev
->tah_ph
;
2343 deps
[EMAC_DEP_MDIO_IDX
].phandle
= dev
->mdio_ph
;
2344 if (dev
->blist
&& dev
->blist
> emac_boot_list
)
2345 deps
[EMAC_DEP_PREV_IDX
].phandle
= 0xffffffffu
;
2346 bus_register_notifier(&platform_bus_type
, &emac_of_bus_notifier
);
2347 wait_event_timeout(emac_probe_wait
,
2348 emac_check_deps(dev
, deps
),
2349 EMAC_PROBE_DEP_TIMEOUT
);
2350 bus_unregister_notifier(&platform_bus_type
, &emac_of_bus_notifier
);
2351 err
= emac_check_deps(dev
, deps
) ? 0 : -ENODEV
;
2352 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2354 of_node_put(deps
[i
].node
);
2355 if (err
&& deps
[i
].ofdev
)
2356 of_dev_put(deps
[i
].ofdev
);
2359 dev
->mal_dev
= deps
[EMAC_DEP_MAL_IDX
].ofdev
;
2360 dev
->zmii_dev
= deps
[EMAC_DEP_ZMII_IDX
].ofdev
;
2361 dev
->rgmii_dev
= deps
[EMAC_DEP_RGMII_IDX
].ofdev
;
2362 dev
->tah_dev
= deps
[EMAC_DEP_TAH_IDX
].ofdev
;
2363 dev
->mdio_dev
= deps
[EMAC_DEP_MDIO_IDX
].ofdev
;
2365 if (deps
[EMAC_DEP_PREV_IDX
].ofdev
)
2366 of_dev_put(deps
[EMAC_DEP_PREV_IDX
].ofdev
);
2370 static int __devinit
emac_read_uint_prop(struct device_node
*np
, const char *name
,
2371 u32
*val
, int fatal
)
2374 const u32
*prop
= of_get_property(np
, name
, &len
);
2375 if (prop
== NULL
|| len
< sizeof(u32
)) {
2377 printk(KERN_ERR
"%s: missing %s property\n",
2378 np
->full_name
, name
);
2385 static int __devinit
emac_init_phy(struct emac_instance
*dev
)
2387 struct device_node
*np
= dev
->ofdev
->dev
.of_node
;
2388 struct net_device
*ndev
= dev
->ndev
;
2392 dev
->phy
.dev
= ndev
;
2393 dev
->phy
.mode
= dev
->phy_mode
;
2395 /* PHY-less configuration.
2396 * XXX I probably should move these settings to the dev tree
2398 if (dev
->phy_address
== 0xffffffff && dev
->phy_map
== 0xffffffff) {
2401 /* PHY-less configuration.
2402 * XXX I probably should move these settings to the dev tree
2404 dev
->phy
.address
= -1;
2405 dev
->phy
.features
= SUPPORTED_MII
;
2406 if (emac_phy_supports_gige(dev
->phy_mode
))
2407 dev
->phy
.features
|= SUPPORTED_1000baseT_Full
;
2409 dev
->phy
.features
|= SUPPORTED_100baseT_Full
;
2415 mutex_lock(&emac_phy_map_lock
);
2416 phy_map
= dev
->phy_map
| busy_phy_map
;
2418 DBG(dev
, "PHY maps %08x %08x" NL
, dev
->phy_map
, busy_phy_map
);
2420 dev
->phy
.mdio_read
= emac_mdio_read
;
2421 dev
->phy
.mdio_write
= emac_mdio_write
;
2423 /* Enable internal clock source */
2424 #ifdef CONFIG_PPC_DCR_NATIVE
2425 if (emac_has_feature(dev
, EMAC_FTR_440GX_PHY_CLK_FIX
))
2426 dcri_clrset(SDR0
, SDR0_MFR
, 0, SDR0_MFR_ECS
);
2428 /* PHY clock workaround */
2429 emac_rx_clk_tx(dev
);
2431 /* Enable internal clock source on 440GX*/
2432 #ifdef CONFIG_PPC_DCR_NATIVE
2433 if (emac_has_feature(dev
, EMAC_FTR_440GX_PHY_CLK_FIX
))
2434 dcri_clrset(SDR0
, SDR0_MFR
, 0, SDR0_MFR_ECS
);
2436 /* Configure EMAC with defaults so we can at least use MDIO
2437 * This is needed mostly for 440GX
2439 if (emac_phy_gpcs(dev
->phy
.mode
)) {
2441 * Make GPCS PHY address equal to EMAC index.
2442 * We probably should take into account busy_phy_map
2443 * and/or phy_map here.
2445 * Note that the busy_phy_map is currently global
2446 * while it should probably be per-ASIC...
2448 dev
->phy
.gpcs_address
= dev
->gpcs_address
;
2449 if (dev
->phy
.gpcs_address
== 0xffffffff)
2450 dev
->phy
.address
= dev
->cell_index
;
2453 emac_configure(dev
);
2455 if (dev
->phy_address
!= 0xffffffff)
2456 phy_map
= ~(1 << dev
->phy_address
);
2458 for (i
= 0; i
< 0x20; phy_map
>>= 1, ++i
)
2459 if (!(phy_map
& 1)) {
2461 busy_phy_map
|= 1 << i
;
2463 /* Quick check if there is a PHY at the address */
2464 r
= emac_mdio_read(dev
->ndev
, i
, MII_BMCR
);
2465 if (r
== 0xffff || r
< 0)
2467 if (!emac_mii_phy_probe(&dev
->phy
, i
))
2471 /* Enable external clock source */
2472 #ifdef CONFIG_PPC_DCR_NATIVE
2473 if (emac_has_feature(dev
, EMAC_FTR_440GX_PHY_CLK_FIX
))
2474 dcri_clrset(SDR0
, SDR0_MFR
, SDR0_MFR_ECS
, 0);
2476 mutex_unlock(&emac_phy_map_lock
);
2478 printk(KERN_WARNING
"%s: can't find PHY!\n", np
->full_name
);
2483 if (dev
->phy
.def
->ops
->init
)
2484 dev
->phy
.def
->ops
->init(&dev
->phy
);
2486 /* Disable any PHY features not supported by the platform */
2487 dev
->phy
.def
->features
&= ~dev
->phy_feat_exc
;
2488 dev
->phy
.features
&= ~dev
->phy_feat_exc
;
2490 /* Setup initial link parameters */
2491 if (dev
->phy
.features
& SUPPORTED_Autoneg
) {
2492 adv
= dev
->phy
.features
;
2493 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
))
2494 adv
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
2495 /* Restart autonegotiation */
2496 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, adv
);
2498 u32 f
= dev
->phy
.def
->features
;
2499 int speed
= SPEED_10
, fd
= DUPLEX_HALF
;
2501 /* Select highest supported speed/duplex */
2502 if (f
& SUPPORTED_1000baseT_Full
) {
2505 } else if (f
& SUPPORTED_1000baseT_Half
)
2507 else if (f
& SUPPORTED_100baseT_Full
) {
2510 } else if (f
& SUPPORTED_100baseT_Half
)
2512 else if (f
& SUPPORTED_10baseT_Full
)
2515 /* Force link parameters */
2516 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, speed
, fd
);
2521 static int __devinit
emac_init_config(struct emac_instance
*dev
)
2523 struct device_node
*np
= dev
->ofdev
->dev
.of_node
;
2526 /* Read config from device-tree */
2527 if (emac_read_uint_prop(np
, "mal-device", &dev
->mal_ph
, 1))
2529 if (emac_read_uint_prop(np
, "mal-tx-channel", &dev
->mal_tx_chan
, 1))
2531 if (emac_read_uint_prop(np
, "mal-rx-channel", &dev
->mal_rx_chan
, 1))
2533 if (emac_read_uint_prop(np
, "cell-index", &dev
->cell_index
, 1))
2535 if (emac_read_uint_prop(np
, "max-frame-size", &dev
->max_mtu
, 0))
2536 dev
->max_mtu
= 1500;
2537 if (emac_read_uint_prop(np
, "rx-fifo-size", &dev
->rx_fifo_size
, 0))
2538 dev
->rx_fifo_size
= 2048;
2539 if (emac_read_uint_prop(np
, "tx-fifo-size", &dev
->tx_fifo_size
, 0))
2540 dev
->tx_fifo_size
= 2048;
2541 if (emac_read_uint_prop(np
, "rx-fifo-size-gige", &dev
->rx_fifo_size_gige
, 0))
2542 dev
->rx_fifo_size_gige
= dev
->rx_fifo_size
;
2543 if (emac_read_uint_prop(np
, "tx-fifo-size-gige", &dev
->tx_fifo_size_gige
, 0))
2544 dev
->tx_fifo_size_gige
= dev
->tx_fifo_size
;
2545 if (emac_read_uint_prop(np
, "phy-address", &dev
->phy_address
, 0))
2546 dev
->phy_address
= 0xffffffff;
2547 if (emac_read_uint_prop(np
, "phy-map", &dev
->phy_map
, 0))
2548 dev
->phy_map
= 0xffffffff;
2549 if (emac_read_uint_prop(np
, "gpcs-address", &dev
->gpcs_address
, 0))
2550 dev
->gpcs_address
= 0xffffffff;
2551 if (emac_read_uint_prop(np
->parent
, "clock-frequency", &dev
->opb_bus_freq
, 1))
2553 if (emac_read_uint_prop(np
, "tah-device", &dev
->tah_ph
, 0))
2555 if (emac_read_uint_prop(np
, "tah-channel", &dev
->tah_port
, 0))
2557 if (emac_read_uint_prop(np
, "mdio-device", &dev
->mdio_ph
, 0))
2559 if (emac_read_uint_prop(np
, "zmii-device", &dev
->zmii_ph
, 0))
2561 if (emac_read_uint_prop(np
, "zmii-channel", &dev
->zmii_port
, 0))
2562 dev
->zmii_port
= 0xffffffff;
2563 if (emac_read_uint_prop(np
, "rgmii-device", &dev
->rgmii_ph
, 0))
2565 if (emac_read_uint_prop(np
, "rgmii-channel", &dev
->rgmii_port
, 0))
2566 dev
->rgmii_port
= 0xffffffff;
2567 if (emac_read_uint_prop(np
, "fifo-entry-size", &dev
->fifo_entry_size
, 0))
2568 dev
->fifo_entry_size
= 16;
2569 if (emac_read_uint_prop(np
, "mal-burst-size", &dev
->mal_burst_size
, 0))
2570 dev
->mal_burst_size
= 256;
2572 /* PHY mode needs some decoding */
2573 dev
->phy_mode
= of_get_phy_mode(np
);
2574 if (dev
->phy_mode
< 0)
2575 dev
->phy_mode
= PHY_MODE_NA
;
2577 /* Check EMAC version */
2578 if (of_device_is_compatible(np
, "ibm,emac4sync")) {
2579 dev
->features
|= (EMAC_FTR_EMAC4
| EMAC_FTR_EMAC4SYNC
);
2580 if (of_device_is_compatible(np
, "ibm,emac-460ex") ||
2581 of_device_is_compatible(np
, "ibm,emac-460gt"))
2582 dev
->features
|= EMAC_FTR_460EX_PHY_CLK_FIX
;
2583 if (of_device_is_compatible(np
, "ibm,emac-405ex") ||
2584 of_device_is_compatible(np
, "ibm,emac-405exr"))
2585 dev
->features
|= EMAC_FTR_440EP_PHY_CLK_FIX
;
2586 if (of_device_is_compatible(np
, "ibm,emac-apm821xx")) {
2587 dev
->features
|= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE
|
2588 EMAC_FTR_APM821XX_NO_HALF_DUPLEX
|
2589 EMAC_FTR_460EX_PHY_CLK_FIX
);
2591 } else if (of_device_is_compatible(np
, "ibm,emac4")) {
2592 dev
->features
|= EMAC_FTR_EMAC4
;
2593 if (of_device_is_compatible(np
, "ibm,emac-440gx"))
2594 dev
->features
|= EMAC_FTR_440GX_PHY_CLK_FIX
;
2596 if (of_device_is_compatible(np
, "ibm,emac-440ep") ||
2597 of_device_is_compatible(np
, "ibm,emac-440gr"))
2598 dev
->features
|= EMAC_FTR_440EP_PHY_CLK_FIX
;
2599 if (of_device_is_compatible(np
, "ibm,emac-405ez")) {
2600 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2601 dev
->features
|= EMAC_FTR_NO_FLOW_CONTROL_40x
;
2603 printk(KERN_ERR
"%s: Flow control not disabled!\n",
2611 /* Fixup some feature bits based on the device tree */
2612 if (of_get_property(np
, "has-inverted-stacr-oc", NULL
))
2613 dev
->features
|= EMAC_FTR_STACR_OC_INVERT
;
2614 if (of_get_property(np
, "has-new-stacr-staopc", NULL
))
2615 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
;
2617 /* CAB lacks the appropriate properties */
2618 if (of_device_is_compatible(np
, "ibm,emac-axon"))
2619 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
|
2620 EMAC_FTR_STACR_OC_INVERT
;
2622 /* Enable TAH/ZMII/RGMII features as found */
2623 if (dev
->tah_ph
!= 0) {
2624 #ifdef CONFIG_IBM_EMAC_TAH
2625 dev
->features
|= EMAC_FTR_HAS_TAH
;
2627 printk(KERN_ERR
"%s: TAH support not enabled !\n",
2633 if (dev
->zmii_ph
!= 0) {
2634 #ifdef CONFIG_IBM_EMAC_ZMII
2635 dev
->features
|= EMAC_FTR_HAS_ZMII
;
2637 printk(KERN_ERR
"%s: ZMII support not enabled !\n",
2643 if (dev
->rgmii_ph
!= 0) {
2644 #ifdef CONFIG_IBM_EMAC_RGMII
2645 dev
->features
|= EMAC_FTR_HAS_RGMII
;
2647 printk(KERN_ERR
"%s: RGMII support not enabled !\n",
2653 /* Read MAC-address */
2654 p
= of_get_property(np
, "local-mac-address", NULL
);
2656 printk(KERN_ERR
"%s: Can't find local-mac-address property\n",
2660 memcpy(dev
->ndev
->dev_addr
, p
, 6);
2662 /* IAHT and GAHT filter parameterization */
2663 if (emac_has_feature(dev
, EMAC_FTR_EMAC4SYNC
)) {
2664 dev
->xaht_slots_shift
= EMAC4SYNC_XAHT_SLOTS_SHIFT
;
2665 dev
->xaht_width_shift
= EMAC4SYNC_XAHT_WIDTH_SHIFT
;
2667 dev
->xaht_slots_shift
= EMAC4_XAHT_SLOTS_SHIFT
;
2668 dev
->xaht_width_shift
= EMAC4_XAHT_WIDTH_SHIFT
;
2671 DBG(dev
, "features : 0x%08x / 0x%08x\n", dev
->features
, EMAC_FTRS_POSSIBLE
);
2672 DBG(dev
, "tx_fifo_size : %d (%d gige)\n", dev
->tx_fifo_size
, dev
->tx_fifo_size_gige
);
2673 DBG(dev
, "rx_fifo_size : %d (%d gige)\n", dev
->rx_fifo_size
, dev
->rx_fifo_size_gige
);
2674 DBG(dev
, "max_mtu : %d\n", dev
->max_mtu
);
2675 DBG(dev
, "OPB freq : %d\n", dev
->opb_bus_freq
);
2680 static const struct net_device_ops emac_netdev_ops
= {
2681 .ndo_open
= emac_open
,
2682 .ndo_stop
= emac_close
,
2683 .ndo_get_stats
= emac_stats
,
2684 .ndo_set_rx_mode
= emac_set_multicast_list
,
2685 .ndo_do_ioctl
= emac_ioctl
,
2686 .ndo_tx_timeout
= emac_tx_timeout
,
2687 .ndo_validate_addr
= eth_validate_addr
,
2688 .ndo_set_mac_address
= eth_mac_addr
,
2689 .ndo_start_xmit
= emac_start_xmit
,
2690 .ndo_change_mtu
= eth_change_mtu
,
2693 static const struct net_device_ops emac_gige_netdev_ops
= {
2694 .ndo_open
= emac_open
,
2695 .ndo_stop
= emac_close
,
2696 .ndo_get_stats
= emac_stats
,
2697 .ndo_set_rx_mode
= emac_set_multicast_list
,
2698 .ndo_do_ioctl
= emac_ioctl
,
2699 .ndo_tx_timeout
= emac_tx_timeout
,
2700 .ndo_validate_addr
= eth_validate_addr
,
2701 .ndo_set_mac_address
= eth_mac_addr
,
2702 .ndo_start_xmit
= emac_start_xmit_sg
,
2703 .ndo_change_mtu
= emac_change_mtu
,
2706 static int __devinit
emac_probe(struct platform_device
*ofdev
)
2708 struct net_device
*ndev
;
2709 struct emac_instance
*dev
;
2710 struct device_node
*np
= ofdev
->dev
.of_node
;
2711 struct device_node
**blist
= NULL
;
2714 /* Skip unused/unwired EMACS. We leave the check for an unused
2715 * property here for now, but new flat device trees should set a
2716 * status property to "disabled" instead.
2718 if (of_get_property(np
, "unused", NULL
) || !of_device_is_available(np
))
2721 /* Find ourselves in the bootlist if we are there */
2722 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
2723 if (emac_boot_list
[i
] == np
)
2724 blist
= &emac_boot_list
[i
];
2726 /* Allocate our net_device structure */
2728 ndev
= alloc_etherdev(sizeof(struct emac_instance
));
2732 dev
= netdev_priv(ndev
);
2736 SET_NETDEV_DEV(ndev
, &ofdev
->dev
);
2738 /* Initialize some embedded data structures */
2739 mutex_init(&dev
->mdio_lock
);
2740 mutex_init(&dev
->link_lock
);
2741 spin_lock_init(&dev
->lock
);
2742 INIT_WORK(&dev
->reset_work
, emac_reset_work
);
2744 /* Init various config data based on device-tree */
2745 err
= emac_init_config(dev
);
2749 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2750 dev
->emac_irq
= irq_of_parse_and_map(np
, 0);
2751 dev
->wol_irq
= irq_of_parse_and_map(np
, 1);
2752 if (dev
->emac_irq
== NO_IRQ
) {
2753 printk(KERN_ERR
"%s: Can't map main interrupt\n", np
->full_name
);
2756 ndev
->irq
= dev
->emac_irq
;
2759 if (of_address_to_resource(np
, 0, &dev
->rsrc_regs
)) {
2760 printk(KERN_ERR
"%s: Can't get registers address\n",
2764 // TODO : request_mem_region
2765 dev
->emacp
= ioremap(dev
->rsrc_regs
.start
,
2766 resource_size(&dev
->rsrc_regs
));
2767 if (dev
->emacp
== NULL
) {
2768 printk(KERN_ERR
"%s: Can't map device registers!\n",
2774 /* Wait for dependent devices */
2775 err
= emac_wait_deps(dev
);
2778 "%s: Timeout waiting for dependent devices\n",
2780 /* display more info about what's missing ? */
2783 dev
->mal
= dev_get_drvdata(&dev
->mal_dev
->dev
);
2784 if (dev
->mdio_dev
!= NULL
)
2785 dev
->mdio_instance
= dev_get_drvdata(&dev
->mdio_dev
->dev
);
2787 /* Register with MAL */
2788 dev
->commac
.ops
= &emac_commac_ops
;
2789 dev
->commac
.dev
= dev
;
2790 dev
->commac
.tx_chan_mask
= MAL_CHAN_MASK(dev
->mal_tx_chan
);
2791 dev
->commac
.rx_chan_mask
= MAL_CHAN_MASK(dev
->mal_rx_chan
);
2792 err
= mal_register_commac(dev
->mal
, &dev
->commac
);
2794 printk(KERN_ERR
"%s: failed to register with mal %s!\n",
2795 np
->full_name
, dev
->mal_dev
->dev
.of_node
->full_name
);
2798 dev
->rx_skb_size
= emac_rx_skb_size(ndev
->mtu
);
2799 dev
->rx_sync_size
= emac_rx_sync_size(ndev
->mtu
);
2801 /* Get pointers to BD rings */
2803 dev
->mal
->bd_virt
+ mal_tx_bd_offset(dev
->mal
, dev
->mal_tx_chan
);
2805 dev
->mal
->bd_virt
+ mal_rx_bd_offset(dev
->mal
, dev
->mal_rx_chan
);
2807 DBG(dev
, "tx_desc %p" NL
, dev
->tx_desc
);
2808 DBG(dev
, "rx_desc %p" NL
, dev
->rx_desc
);
2811 memset(dev
->tx_desc
, 0, NUM_TX_BUFF
* sizeof(struct mal_descriptor
));
2812 memset(dev
->rx_desc
, 0, NUM_RX_BUFF
* sizeof(struct mal_descriptor
));
2813 memset(dev
->tx_skb
, 0, NUM_TX_BUFF
* sizeof(struct sk_buff
*));
2814 memset(dev
->rx_skb
, 0, NUM_RX_BUFF
* sizeof(struct sk_buff
*));
2816 /* Attach to ZMII, if needed */
2817 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
) &&
2818 (err
= zmii_attach(dev
->zmii_dev
, dev
->zmii_port
, &dev
->phy_mode
)) != 0)
2819 goto err_unreg_commac
;
2821 /* Attach to RGMII, if needed */
2822 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
) &&
2823 (err
= rgmii_attach(dev
->rgmii_dev
, dev
->rgmii_port
, dev
->phy_mode
)) != 0)
2824 goto err_detach_zmii
;
2826 /* Attach to TAH, if needed */
2827 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
) &&
2828 (err
= tah_attach(dev
->tah_dev
, dev
->tah_port
)) != 0)
2829 goto err_detach_rgmii
;
2831 /* Set some link defaults before we can find out real parameters */
2832 dev
->phy
.speed
= SPEED_100
;
2833 dev
->phy
.duplex
= DUPLEX_FULL
;
2834 dev
->phy
.autoneg
= AUTONEG_DISABLE
;
2835 dev
->phy
.pause
= dev
->phy
.asym_pause
= 0;
2836 dev
->stop_timeout
= STOP_TIMEOUT_100
;
2837 INIT_DELAYED_WORK(&dev
->link_work
, emac_link_timer
);
2839 /* Some SoCs like APM821xx does not support Half Duplex mode. */
2840 if (emac_has_feature(dev
, EMAC_FTR_APM821XX_NO_HALF_DUPLEX
)) {
2841 dev
->phy_feat_exc
= (SUPPORTED_1000baseT_Half
|
2842 SUPPORTED_100baseT_Half
|
2843 SUPPORTED_10baseT_Half
);
2846 /* Find PHY if any */
2847 err
= emac_init_phy(dev
);
2849 goto err_detach_tah
;
2852 ndev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_SG
;
2853 ndev
->features
|= ndev
->hw_features
| NETIF_F_RXCSUM
;
2855 ndev
->watchdog_timeo
= 5 * HZ
;
2856 if (emac_phy_supports_gige(dev
->phy_mode
)) {
2857 ndev
->netdev_ops
= &emac_gige_netdev_ops
;
2858 dev
->commac
.ops
= &emac_commac_sg_ops
;
2860 ndev
->netdev_ops
= &emac_netdev_ops
;
2861 SET_ETHTOOL_OPS(ndev
, &emac_ethtool_ops
);
2863 netif_carrier_off(ndev
);
2865 err
= register_netdev(ndev
);
2867 printk(KERN_ERR
"%s: failed to register net device (%d)!\n",
2868 np
->full_name
, err
);
2869 goto err_detach_tah
;
2872 /* Set our drvdata last as we don't want them visible until we are
2876 dev_set_drvdata(&ofdev
->dev
, dev
);
2878 /* There's a new kid in town ! Let's tell everybody */
2879 wake_up_all(&emac_probe_wait
);
2882 printk(KERN_INFO
"%s: EMAC-%d %s, MAC %pM\n",
2883 ndev
->name
, dev
->cell_index
, np
->full_name
, ndev
->dev_addr
);
2885 if (dev
->phy_mode
== PHY_MODE_SGMII
)
2886 printk(KERN_NOTICE
"%s: in SGMII mode\n", ndev
->name
);
2888 if (dev
->phy
.address
>= 0)
2889 printk("%s: found %s PHY (0x%02x)\n", ndev
->name
,
2890 dev
->phy
.def
->name
, dev
->phy
.address
);
2892 emac_dbg_register(dev
);
2897 /* I have a bad feeling about this ... */
2900 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2901 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2903 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2904 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2906 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2907 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2909 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2913 iounmap(dev
->emacp
);
2915 if (dev
->wol_irq
!= NO_IRQ
)
2916 irq_dispose_mapping(dev
->wol_irq
);
2917 if (dev
->emac_irq
!= NO_IRQ
)
2918 irq_dispose_mapping(dev
->emac_irq
);
2922 /* if we were on the bootlist, remove us as we won't show up and
2923 * wake up all waiters to notify them in case they were waiting
2928 wake_up_all(&emac_probe_wait
);
2933 static int __devexit
emac_remove(struct platform_device
*ofdev
)
2935 struct emac_instance
*dev
= dev_get_drvdata(&ofdev
->dev
);
2937 DBG(dev
, "remove" NL
);
2939 dev_set_drvdata(&ofdev
->dev
, NULL
);
2941 unregister_netdev(dev
->ndev
);
2943 cancel_work_sync(&dev
->reset_work
);
2945 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2946 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2947 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2948 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2949 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2950 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2952 busy_phy_map
&= ~(1 << dev
->phy
.address
);
2953 DBG(dev
, "busy_phy_map now %#x" NL
, busy_phy_map
);
2955 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2958 emac_dbg_unregister(dev
);
2959 iounmap(dev
->emacp
);
2961 if (dev
->wol_irq
!= NO_IRQ
)
2962 irq_dispose_mapping(dev
->wol_irq
);
2963 if (dev
->emac_irq
!= NO_IRQ
)
2964 irq_dispose_mapping(dev
->emac_irq
);
2966 free_netdev(dev
->ndev
);
2971 /* XXX Features in here should be replaced by properties... */
2972 static struct of_device_id emac_match
[] =
2976 .compatible
= "ibm,emac",
2980 .compatible
= "ibm,emac4",
2984 .compatible
= "ibm,emac4sync",
2988 MODULE_DEVICE_TABLE(of
, emac_match
);
2990 static struct platform_driver emac_driver
= {
2993 .owner
= THIS_MODULE
,
2994 .of_match_table
= emac_match
,
2996 .probe
= emac_probe
,
2997 .remove
= emac_remove
,
3000 static void __init
emac_make_bootlist(void)
3002 struct device_node
*np
= NULL
;
3003 int j
, max
, i
= 0, k
;
3004 int cell_indices
[EMAC_BOOT_LIST_SIZE
];
3007 while((np
= of_find_all_nodes(np
)) != NULL
) {
3010 if (of_match_node(emac_match
, np
) == NULL
)
3012 if (of_get_property(np
, "unused", NULL
))
3014 idx
= of_get_property(np
, "cell-index", NULL
);
3017 cell_indices
[i
] = *idx
;
3018 emac_boot_list
[i
++] = of_node_get(np
);
3019 if (i
>= EMAC_BOOT_LIST_SIZE
) {
3026 /* Bubble sort them (doh, what a creative algorithm :-) */
3027 for (i
= 0; max
> 1 && (i
< (max
- 1)); i
++)
3028 for (j
= i
; j
< max
; j
++) {
3029 if (cell_indices
[i
] > cell_indices
[j
]) {
3030 np
= emac_boot_list
[i
];
3031 emac_boot_list
[i
] = emac_boot_list
[j
];
3032 emac_boot_list
[j
] = np
;
3033 k
= cell_indices
[i
];
3034 cell_indices
[i
] = cell_indices
[j
];
3035 cell_indices
[j
] = k
;
3040 static int __init
emac_init(void)
3044 printk(KERN_INFO DRV_DESC
", version " DRV_VERSION
"\n");
3046 /* Init debug stuff */
3049 /* Build EMAC boot list */
3050 emac_make_bootlist();
3052 /* Init submodules */
3065 rc
= platform_driver_register(&emac_driver
);
3083 static void __exit
emac_exit(void)
3087 platform_driver_unregister(&emac_driver
);
3095 /* Destroy EMAC boot list */
3096 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
3097 if (emac_boot_list
[i
])
3098 of_node_put(emac_boot_list
[i
]);
3101 module_init(emac_init
);
3102 module_exit(emac_exit
);