2 * drivers/net/ethernet/ibm/emac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
42 #include <linux/of_net.h>
43 #include <linux/slab.h>
45 #include <asm/processor.h>
48 #include <asm/uaccess.h>
50 #include <asm/dcr-regs.h>
55 * Lack of dma_unmap_???? calls is intentional.
57 * API-correct usage requires additional support state information to be
58 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
59 * EMAC design (e.g. TX buffer passed from network stack can be split into
60 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
61 * maintaining such information will add additional overhead.
62 * Current DMA API implementation for 4xx processors only ensures cache coherency
63 * and dma_unmap_???? routines are empty and are likely to stay this way.
64 * I decided to omit dma_unmap_??? calls because I don't want to add additional
65 * complexity just for the sake of following some abstract API, when it doesn't
66 * add any real benefit to the driver. I understand that this decision maybe
67 * controversial, but I really tried to make code API-correct and efficient
68 * at the same time and didn't come up with code I liked :(. --ebs
71 #define DRV_NAME "emac"
72 #define DRV_VERSION "3.54"
73 #define DRV_DESC "PPC 4xx OCP EMAC driver"
75 MODULE_DESCRIPTION(DRV_DESC
);
77 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
78 MODULE_LICENSE("GPL");
81 * PPC64 doesn't (yet) have a cacheable_memcpy
84 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
87 /* minimum number of free TX descriptors required to wake up TX process */
88 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
90 /* If packet size is less than this number, we allocate small skb and copy packet
91 * contents into it instead of just sending original big skb up
93 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
95 /* Since multiple EMACs share MDIO lines in various ways, we need
96 * to avoid re-using the same PHY ID in cases where the arch didn't
97 * setup precise phy_map entries
99 * XXX This is something that needs to be reworked as we can have multiple
100 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
101 * probably require in that case to have explicit PHY IDs in the device-tree
103 static u32 busy_phy_map
;
104 static DEFINE_MUTEX(emac_phy_map_lock
);
106 /* This is the wait queue used to wait on any event related to probe, that
107 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
109 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait
);
111 /* Having stable interface names is a doomed idea. However, it would be nice
112 * if we didn't have completely random interface names at boot too :-) It's
113 * just a matter of making everybody's life easier. Since we are doing
114 * threaded probing, it's a bit harder though. The base idea here is that
115 * we make up a list of all emacs in the device-tree before we register the
116 * driver. Every emac will then wait for the previous one in the list to
117 * initialize before itself. We should also keep that list ordered by
119 * That list is only 4 entries long, meaning that additional EMACs don't
120 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
123 #define EMAC_BOOT_LIST_SIZE 4
124 static struct device_node
*emac_boot_list
[EMAC_BOOT_LIST_SIZE
];
126 /* How long should I wait for dependent devices ? */
127 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
129 /* I don't want to litter system log with timeout errors
130 * when we have brain-damaged PHY.
132 static inline void emac_report_timeout_error(struct emac_instance
*dev
,
135 if (emac_has_feature(dev
, EMAC_FTR_440GX_PHY_CLK_FIX
|
136 EMAC_FTR_460EX_PHY_CLK_FIX
|
137 EMAC_FTR_440EP_PHY_CLK_FIX
))
138 DBG(dev
, "%s" NL
, error
);
139 else if (net_ratelimit())
140 printk(KERN_ERR
"%s: %s\n", dev
->ofdev
->dev
.of_node
->full_name
,
144 /* EMAC PHY clock workaround:
145 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
146 * which allows controlling each EMAC clock
148 static inline void emac_rx_clk_tx(struct emac_instance
*dev
)
150 #ifdef CONFIG_PPC_DCR_NATIVE
151 if (emac_has_feature(dev
, EMAC_FTR_440EP_PHY_CLK_FIX
))
152 dcri_clrset(SDR0
, SDR0_MFR
,
153 0, SDR0_MFR_ECS
>> dev
->cell_index
);
157 static inline void emac_rx_clk_default(struct emac_instance
*dev
)
159 #ifdef CONFIG_PPC_DCR_NATIVE
160 if (emac_has_feature(dev
, EMAC_FTR_440EP_PHY_CLK_FIX
))
161 dcri_clrset(SDR0
, SDR0_MFR
,
162 SDR0_MFR_ECS
>> dev
->cell_index
, 0);
166 /* PHY polling intervals */
167 #define PHY_POLL_LINK_ON HZ
168 #define PHY_POLL_LINK_OFF (HZ / 5)
170 /* Graceful stop timeouts in us.
171 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
173 #define STOP_TIMEOUT_10 1230
174 #define STOP_TIMEOUT_100 124
175 #define STOP_TIMEOUT_1000 13
176 #define STOP_TIMEOUT_1000_JUMBO 73
178 static unsigned char default_mcast_addr
[] = {
179 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
182 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
183 static const char emac_stats_keys
[EMAC_ETHTOOL_STATS_COUNT
][ETH_GSTRING_LEN
] = {
184 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
185 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
186 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
187 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
188 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
189 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
190 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
191 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
192 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
193 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
194 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
195 "tx_bd_excessive_collisions", "tx_bd_late_collision",
196 "tx_bd_multple_collisions", "tx_bd_single_collision",
197 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
201 static irqreturn_t
emac_irq(int irq
, void *dev_instance
);
202 static void emac_clean_tx_ring(struct emac_instance
*dev
);
203 static void __emac_set_multicast_list(struct emac_instance
*dev
);
205 static inline int emac_phy_supports_gige(int phy_mode
)
207 return phy_mode
== PHY_MODE_GMII
||
208 phy_mode
== PHY_MODE_RGMII
||
209 phy_mode
== PHY_MODE_SGMII
||
210 phy_mode
== PHY_MODE_TBI
||
211 phy_mode
== PHY_MODE_RTBI
;
214 static inline int emac_phy_gpcs(int phy_mode
)
216 return phy_mode
== PHY_MODE_SGMII
||
217 phy_mode
== PHY_MODE_TBI
||
218 phy_mode
== PHY_MODE_RTBI
;
221 static inline void emac_tx_enable(struct emac_instance
*dev
)
223 struct emac_regs __iomem
*p
= dev
->emacp
;
226 DBG(dev
, "tx_enable" NL
);
228 r
= in_be32(&p
->mr0
);
229 if (!(r
& EMAC_MR0_TXE
))
230 out_be32(&p
->mr0
, r
| EMAC_MR0_TXE
);
233 static void emac_tx_disable(struct emac_instance
*dev
)
235 struct emac_regs __iomem
*p
= dev
->emacp
;
238 DBG(dev
, "tx_disable" NL
);
240 r
= in_be32(&p
->mr0
);
241 if (r
& EMAC_MR0_TXE
) {
242 int n
= dev
->stop_timeout
;
243 out_be32(&p
->mr0
, r
& ~EMAC_MR0_TXE
);
244 while (!(in_be32(&p
->mr0
) & EMAC_MR0_TXI
) && n
) {
249 emac_report_timeout_error(dev
, "TX disable timeout");
253 static void emac_rx_enable(struct emac_instance
*dev
)
255 struct emac_regs __iomem
*p
= dev
->emacp
;
258 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
)))
261 DBG(dev
, "rx_enable" NL
);
263 r
= in_be32(&p
->mr0
);
264 if (!(r
& EMAC_MR0_RXE
)) {
265 if (unlikely(!(r
& EMAC_MR0_RXI
))) {
266 /* Wait if previous async disable is still in progress */
267 int n
= dev
->stop_timeout
;
268 while (!(r
= in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
273 emac_report_timeout_error(dev
,
274 "RX disable timeout");
276 out_be32(&p
->mr0
, r
| EMAC_MR0_RXE
);
282 static void emac_rx_disable(struct emac_instance
*dev
)
284 struct emac_regs __iomem
*p
= dev
->emacp
;
287 DBG(dev
, "rx_disable" NL
);
289 r
= in_be32(&p
->mr0
);
290 if (r
& EMAC_MR0_RXE
) {
291 int n
= dev
->stop_timeout
;
292 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
293 while (!(in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
298 emac_report_timeout_error(dev
, "RX disable timeout");
302 static inline void emac_netif_stop(struct emac_instance
*dev
)
304 netif_tx_lock_bh(dev
->ndev
);
305 netif_addr_lock(dev
->ndev
);
307 netif_addr_unlock(dev
->ndev
);
308 netif_tx_unlock_bh(dev
->ndev
);
309 dev
->ndev
->trans_start
= jiffies
; /* prevent tx timeout */
310 mal_poll_disable(dev
->mal
, &dev
->commac
);
311 netif_tx_disable(dev
->ndev
);
314 static inline void emac_netif_start(struct emac_instance
*dev
)
316 netif_tx_lock_bh(dev
->ndev
);
317 netif_addr_lock(dev
->ndev
);
319 if (dev
->mcast_pending
&& netif_running(dev
->ndev
))
320 __emac_set_multicast_list(dev
);
321 netif_addr_unlock(dev
->ndev
);
322 netif_tx_unlock_bh(dev
->ndev
);
324 netif_wake_queue(dev
->ndev
);
326 /* NOTE: unconditional netif_wake_queue is only appropriate
327 * so long as all callers are assured to have free tx slots
328 * (taken from tg3... though the case where that is wrong is
329 * not terribly harmful)
331 mal_poll_enable(dev
->mal
, &dev
->commac
);
334 static inline void emac_rx_disable_async(struct emac_instance
*dev
)
336 struct emac_regs __iomem
*p
= dev
->emacp
;
339 DBG(dev
, "rx_disable_async" NL
);
341 r
= in_be32(&p
->mr0
);
342 if (r
& EMAC_MR0_RXE
)
343 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
346 static int emac_reset(struct emac_instance
*dev
)
348 struct emac_regs __iomem
*p
= dev
->emacp
;
351 DBG(dev
, "reset" NL
);
353 if (!dev
->reset_failed
) {
354 /* 40x erratum suggests stopping RX channel before reset,
357 emac_rx_disable(dev
);
358 emac_tx_disable(dev
);
361 #ifdef CONFIG_PPC_DCR_NATIVE
363 * PPC460EX/GT Embedded Processor Advanced User's Manual
364 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
365 * Note: The PHY must provide a TX Clk in order to perform a soft reset
366 * of the EMAC. If none is present, select the internal clock
367 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
368 * After a soft reset, select the external clock.
370 if (emac_has_feature(dev
, EMAC_FTR_460EX_PHY_CLK_FIX
)) {
371 if (dev
->phy_address
== 0xffffffff &&
372 dev
->phy_map
== 0xffffffff) {
373 /* No PHY: select internal loop clock before reset */
374 dcri_clrset(SDR0
, SDR0_ETH_CFG
,
375 0, SDR0_ETH_CFG_ECS
<< dev
->cell_index
);
377 /* PHY present: select external clock before reset */
378 dcri_clrset(SDR0
, SDR0_ETH_CFG
,
379 SDR0_ETH_CFG_ECS
<< dev
->cell_index
, 0);
384 out_be32(&p
->mr0
, EMAC_MR0_SRST
);
385 while ((in_be32(&p
->mr0
) & EMAC_MR0_SRST
) && n
)
388 #ifdef CONFIG_PPC_DCR_NATIVE
389 if (emac_has_feature(dev
, EMAC_FTR_460EX_PHY_CLK_FIX
)) {
390 if (dev
->phy_address
== 0xffffffff &&
391 dev
->phy_map
== 0xffffffff) {
392 /* No PHY: restore external clock source after reset */
393 dcri_clrset(SDR0
, SDR0_ETH_CFG
,
394 SDR0_ETH_CFG_ECS
<< dev
->cell_index
, 0);
400 dev
->reset_failed
= 0;
403 emac_report_timeout_error(dev
, "reset timeout");
404 dev
->reset_failed
= 1;
409 static void emac_hash_mc(struct emac_instance
*dev
)
411 const int regs
= EMAC_XAHT_REGS(dev
);
412 u32
*gaht_base
= emac_gaht_base(dev
);
414 struct netdev_hw_addr
*ha
;
417 DBG(dev
, "hash_mc %d" NL
, netdev_mc_count(dev
->ndev
));
419 memset(gaht_temp
, 0, sizeof (gaht_temp
));
421 netdev_for_each_mc_addr(ha
, dev
->ndev
) {
423 DBG2(dev
, "mc %pM" NL
, ha
->addr
);
425 slot
= EMAC_XAHT_CRC_TO_SLOT(dev
,
426 ether_crc(ETH_ALEN
, ha
->addr
));
427 reg
= EMAC_XAHT_SLOT_TO_REG(dev
, slot
);
428 mask
= EMAC_XAHT_SLOT_TO_MASK(dev
, slot
);
430 gaht_temp
[reg
] |= mask
;
433 for (i
= 0; i
< regs
; i
++)
434 out_be32(gaht_base
+ i
, gaht_temp
[i
]);
437 static inline u32
emac_iff2rmr(struct net_device
*ndev
)
439 struct emac_instance
*dev
= netdev_priv(ndev
);
442 r
= EMAC_RMR_SP
| EMAC_RMR_SFCS
| EMAC_RMR_IAE
| EMAC_RMR_BAE
;
444 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
449 if (ndev
->flags
& IFF_PROMISC
)
451 else if (ndev
->flags
& IFF_ALLMULTI
||
452 (netdev_mc_count(ndev
) > EMAC_XAHT_SLOTS(dev
)))
454 else if (!netdev_mc_empty(ndev
))
457 if (emac_has_feature(dev
, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE
)) {
458 r
&= ~EMAC4_RMR_MJS_MASK
;
459 r
|= EMAC4_RMR_MJS(ndev
->mtu
);
465 static u32
__emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
467 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC_MR1_TR0_MULT
;
469 DBG2(dev
, "__emac_calc_base_mr1" NL
);
473 ret
|= EMAC_MR1_TFS_2K
;
476 printk(KERN_WARNING
"%s: Unknown Tx FIFO size %d\n",
477 dev
->ndev
->name
, tx_size
);
482 ret
|= EMAC_MR1_RFS_16K
;
485 ret
|= EMAC_MR1_RFS_4K
;
488 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
489 dev
->ndev
->name
, rx_size
);
495 static u32
__emac4_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
497 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC4_MR1_TR
|
498 EMAC4_MR1_OBCI(dev
->opb_bus_freq
/ 1000000);
500 DBG2(dev
, "__emac4_calc_base_mr1" NL
);
504 ret
|= EMAC4_MR1_TFS_16K
;
507 ret
|= EMAC4_MR1_TFS_4K
;
510 ret
|= EMAC4_MR1_TFS_2K
;
513 printk(KERN_WARNING
"%s: Unknown Tx FIFO size %d\n",
514 dev
->ndev
->name
, tx_size
);
519 ret
|= EMAC4_MR1_RFS_16K
;
522 ret
|= EMAC4_MR1_RFS_4K
;
525 ret
|= EMAC4_MR1_RFS_2K
;
528 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
529 dev
->ndev
->name
, rx_size
);
535 static u32
emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
537 return emac_has_feature(dev
, EMAC_FTR_EMAC4
) ?
538 __emac4_calc_base_mr1(dev
, tx_size
, rx_size
) :
539 __emac_calc_base_mr1(dev
, tx_size
, rx_size
);
542 static inline u32
emac_calc_trtr(struct emac_instance
*dev
, unsigned int size
)
544 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
545 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4
;
547 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT
;
550 static inline u32
emac_calc_rwmr(struct emac_instance
*dev
,
551 unsigned int low
, unsigned int high
)
553 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
554 return (low
<< 22) | ( (high
& 0x3ff) << 6);
556 return (low
<< 23) | ( (high
& 0x1ff) << 7);
559 static int emac_configure(struct emac_instance
*dev
)
561 struct emac_regs __iomem
*p
= dev
->emacp
;
562 struct net_device
*ndev
= dev
->ndev
;
563 int tx_size
, rx_size
, link
= netif_carrier_ok(dev
->ndev
);
566 DBG(dev
, "configure" NL
);
569 out_be32(&p
->mr1
, in_be32(&p
->mr1
)
570 | EMAC_MR1_FDE
| EMAC_MR1_ILE
);
572 } else if (emac_reset(dev
) < 0)
575 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
576 tah_reset(dev
->tah_dev
);
578 DBG(dev
, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
579 link
, dev
->phy
.duplex
, dev
->phy
.pause
, dev
->phy
.asym_pause
);
581 /* Default fifo sizes */
582 tx_size
= dev
->tx_fifo_size
;
583 rx_size
= dev
->rx_fifo_size
;
585 /* No link, force loopback */
587 mr1
= EMAC_MR1_FDE
| EMAC_MR1_ILE
;
589 /* Check for full duplex */
590 else if (dev
->phy
.duplex
== DUPLEX_FULL
)
591 mr1
|= EMAC_MR1_FDE
| EMAC_MR1_MWSW_001
;
593 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
594 dev
->stop_timeout
= STOP_TIMEOUT_10
;
595 switch (dev
->phy
.speed
) {
597 if (emac_phy_gpcs(dev
->phy
.mode
)) {
598 mr1
|= EMAC_MR1_MF_1000GPCS
| EMAC_MR1_MF_IPPA(
599 (dev
->phy
.gpcs_address
!= 0xffffffff) ?
600 dev
->phy
.gpcs_address
: dev
->phy
.address
);
602 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
603 * identify this GPCS PHY later.
605 out_be32(&p
->u1
.emac4
.ipcr
, 0xdeadbeef);
607 mr1
|= EMAC_MR1_MF_1000
;
609 /* Extended fifo sizes */
610 tx_size
= dev
->tx_fifo_size_gige
;
611 rx_size
= dev
->rx_fifo_size_gige
;
613 if (dev
->ndev
->mtu
> ETH_DATA_LEN
) {
614 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
615 mr1
|= EMAC4_MR1_JPSM
;
617 mr1
|= EMAC_MR1_JPSM
;
618 dev
->stop_timeout
= STOP_TIMEOUT_1000_JUMBO
;
620 dev
->stop_timeout
= STOP_TIMEOUT_1000
;
623 mr1
|= EMAC_MR1_MF_100
;
624 dev
->stop_timeout
= STOP_TIMEOUT_100
;
626 default: /* make gcc happy */
630 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
631 rgmii_set_speed(dev
->rgmii_dev
, dev
->rgmii_port
,
633 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
634 zmii_set_speed(dev
->zmii_dev
, dev
->zmii_port
, dev
->phy
.speed
);
636 /* on 40x erratum forces us to NOT use integrated flow control,
637 * let's hope it works on 44x ;)
639 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
) &&
640 dev
->phy
.duplex
== DUPLEX_FULL
) {
642 mr1
|= EMAC_MR1_EIFC
| EMAC_MR1_APP
;
643 else if (dev
->phy
.asym_pause
)
647 /* Add base settings & fifo sizes & program MR1 */
648 mr1
|= emac_calc_base_mr1(dev
, tx_size
, rx_size
);
649 out_be32(&p
->mr1
, mr1
);
651 /* Set individual MAC address */
652 out_be32(&p
->iahr
, (ndev
->dev_addr
[0] << 8) | ndev
->dev_addr
[1]);
653 out_be32(&p
->ialr
, (ndev
->dev_addr
[2] << 24) |
654 (ndev
->dev_addr
[3] << 16) | (ndev
->dev_addr
[4] << 8) |
657 /* VLAN Tag Protocol ID */
658 out_be32(&p
->vtpid
, 0x8100);
660 /* Receive mode register */
661 r
= emac_iff2rmr(ndev
);
662 if (r
& EMAC_RMR_MAE
)
664 out_be32(&p
->rmr
, r
);
666 /* FIFOs thresholds */
667 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
668 r
= EMAC4_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
669 tx_size
/ 2 / dev
->fifo_entry_size
);
671 r
= EMAC_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
672 tx_size
/ 2 / dev
->fifo_entry_size
);
673 out_be32(&p
->tmr1
, r
);
674 out_be32(&p
->trtr
, emac_calc_trtr(dev
, tx_size
/ 2));
676 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
677 there should be still enough space in FIFO to allow the our link
678 partner time to process this frame and also time to send PAUSE
681 Here is the worst case scenario for the RX FIFO "headroom"
682 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
684 1) One maximum-length frame on TX 1522 bytes
685 2) One PAUSE frame time 64 bytes
686 3) PAUSE frame decode time allowance 64 bytes
687 4) One maximum-length frame on RX 1522 bytes
688 5) Round-trip propagation delay of the link (100Mb) 15 bytes
692 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
693 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
695 r
= emac_calc_rwmr(dev
, rx_size
/ 8 / dev
->fifo_entry_size
,
696 rx_size
/ 4 / dev
->fifo_entry_size
);
697 out_be32(&p
->rwmr
, r
);
699 /* Set PAUSE timer to the maximum */
700 out_be32(&p
->ptr
, 0xffff);
703 r
= EMAC_ISR_OVR
| EMAC_ISR_BP
| EMAC_ISR_SE
|
704 EMAC_ISR_ALE
| EMAC_ISR_BFCS
| EMAC_ISR_PTLE
| EMAC_ISR_ORE
|
705 EMAC_ISR_IRE
| EMAC_ISR_TE
;
706 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
707 r
|= EMAC4_ISR_TXPE
| EMAC4_ISR_RXPE
/* | EMAC4_ISR_TXUE |
709 out_be32(&p
->iser
, r
);
711 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
712 if (emac_phy_gpcs(dev
->phy
.mode
)) {
713 if (dev
->phy
.gpcs_address
!= 0xffffffff)
714 emac_mii_reset_gpcs(&dev
->phy
);
716 emac_mii_reset_phy(&dev
->phy
);
722 static void emac_reinitialize(struct emac_instance
*dev
)
724 DBG(dev
, "reinitialize" NL
);
726 emac_netif_stop(dev
);
727 if (!emac_configure(dev
)) {
731 emac_netif_start(dev
);
734 static void emac_full_tx_reset(struct emac_instance
*dev
)
736 DBG(dev
, "full_tx_reset" NL
);
738 emac_tx_disable(dev
);
739 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
740 emac_clean_tx_ring(dev
);
741 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= 0;
745 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
750 static void emac_reset_work(struct work_struct
*work
)
752 struct emac_instance
*dev
= container_of(work
, struct emac_instance
, reset_work
);
754 DBG(dev
, "reset_work" NL
);
756 mutex_lock(&dev
->link_lock
);
758 emac_netif_stop(dev
);
759 emac_full_tx_reset(dev
);
760 emac_netif_start(dev
);
762 mutex_unlock(&dev
->link_lock
);
765 static void emac_tx_timeout(struct net_device
*ndev
)
767 struct emac_instance
*dev
= netdev_priv(ndev
);
769 DBG(dev
, "tx_timeout" NL
);
771 schedule_work(&dev
->reset_work
);
775 static inline int emac_phy_done(struct emac_instance
*dev
, u32 stacr
)
777 int done
= !!(stacr
& EMAC_STACR_OC
);
779 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
785 static int __emac_mdio_read(struct emac_instance
*dev
, u8 id
, u8 reg
)
787 struct emac_regs __iomem
*p
= dev
->emacp
;
789 int n
, err
= -ETIMEDOUT
;
791 mutex_lock(&dev
->mdio_lock
);
793 DBG2(dev
, "mdio_read(%02x,%02x)" NL
, id
, reg
);
795 /* Enable proper MDIO port */
796 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
797 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
798 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
799 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
801 /* Wait for management interface to become idle */
803 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
806 DBG2(dev
, " -> timeout wait idle\n");
811 /* Issue read command */
812 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
813 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
815 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
816 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
818 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
819 r
|= EMACX_STACR_STAC_READ
;
821 r
|= EMAC_STACR_STAC_READ
;
822 r
|= (reg
& EMAC_STACR_PRA_MASK
)
823 | ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
);
824 out_be32(&p
->stacr
, r
);
826 /* Wait for read to complete */
828 while (!emac_phy_done(dev
, (r
= in_be32(&p
->stacr
)))) {
831 DBG2(dev
, " -> timeout wait complete\n");
836 if (unlikely(r
& EMAC_STACR_PHYE
)) {
837 DBG(dev
, "mdio_read(%02x, %02x) failed" NL
, id
, reg
);
842 r
= ((r
>> EMAC_STACR_PHYD_SHIFT
) & EMAC_STACR_PHYD_MASK
);
844 DBG2(dev
, "mdio_read -> %04x" NL
, r
);
847 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
848 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
849 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
850 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
851 mutex_unlock(&dev
->mdio_lock
);
853 return err
== 0 ? r
: err
;
856 static void __emac_mdio_write(struct emac_instance
*dev
, u8 id
, u8 reg
,
859 struct emac_regs __iomem
*p
= dev
->emacp
;
861 int n
, err
= -ETIMEDOUT
;
863 mutex_lock(&dev
->mdio_lock
);
865 DBG2(dev
, "mdio_write(%02x,%02x,%04x)" NL
, id
, reg
, val
);
867 /* Enable proper MDIO port */
868 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
869 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
870 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
871 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
873 /* Wait for management interface to be idle */
875 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
878 DBG2(dev
, " -> timeout wait idle\n");
883 /* Issue write command */
884 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
885 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
887 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
888 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
890 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
891 r
|= EMACX_STACR_STAC_WRITE
;
893 r
|= EMAC_STACR_STAC_WRITE
;
894 r
|= (reg
& EMAC_STACR_PRA_MASK
) |
895 ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
) |
896 (val
<< EMAC_STACR_PHYD_SHIFT
);
897 out_be32(&p
->stacr
, r
);
899 /* Wait for write to complete */
901 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
904 DBG2(dev
, " -> timeout wait complete\n");
910 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
911 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
912 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
913 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
914 mutex_unlock(&dev
->mdio_lock
);
917 static int emac_mdio_read(struct net_device
*ndev
, int id
, int reg
)
919 struct emac_instance
*dev
= netdev_priv(ndev
);
922 res
= __emac_mdio_read((dev
->mdio_instance
&&
923 dev
->phy
.gpcs_address
!= id
) ?
924 dev
->mdio_instance
: dev
,
929 static void emac_mdio_write(struct net_device
*ndev
, int id
, int reg
, int val
)
931 struct emac_instance
*dev
= netdev_priv(ndev
);
933 __emac_mdio_write((dev
->mdio_instance
&&
934 dev
->phy
.gpcs_address
!= id
) ?
935 dev
->mdio_instance
: dev
,
936 (u8
) id
, (u8
) reg
, (u16
) val
);
940 static void __emac_set_multicast_list(struct emac_instance
*dev
)
942 struct emac_regs __iomem
*p
= dev
->emacp
;
943 u32 rmr
= emac_iff2rmr(dev
->ndev
);
945 DBG(dev
, "__multicast %08x" NL
, rmr
);
947 /* I decided to relax register access rules here to avoid
950 * There is a real problem with EMAC4 core if we use MWSW_001 bit
951 * in MR1 register and do a full EMAC reset.
952 * One TX BD status update is delayed and, after EMAC reset, it
953 * never happens, resulting in TX hung (it'll be recovered by TX
954 * timeout handler eventually, but this is just gross).
955 * So we either have to do full TX reset or try to cheat here :)
957 * The only required change is to RX mode register, so I *think* all
958 * we need is just to stop RX channel. This seems to work on all
961 * If we need the full reset, we might just trigger the workqueue
962 * and do it async... a bit nasty but should work --BenH
964 dev
->mcast_pending
= 0;
965 emac_rx_disable(dev
);
966 if (rmr
& EMAC_RMR_MAE
)
968 out_be32(&p
->rmr
, rmr
);
973 static void emac_set_multicast_list(struct net_device
*ndev
)
975 struct emac_instance
*dev
= netdev_priv(ndev
);
977 DBG(dev
, "multicast" NL
);
979 BUG_ON(!netif_running(dev
->ndev
));
982 dev
->mcast_pending
= 1;
985 __emac_set_multicast_list(dev
);
988 static int emac_resize_rx_ring(struct emac_instance
*dev
, int new_mtu
)
990 int rx_sync_size
= emac_rx_sync_size(new_mtu
);
991 int rx_skb_size
= emac_rx_skb_size(new_mtu
);
993 int mr1_jumbo_bit_change
= 0;
995 mutex_lock(&dev
->link_lock
);
996 emac_netif_stop(dev
);
997 emac_rx_disable(dev
);
998 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1000 if (dev
->rx_sg_skb
) {
1001 ++dev
->estats
.rx_dropped_resize
;
1002 dev_kfree_skb(dev
->rx_sg_skb
);
1003 dev
->rx_sg_skb
= NULL
;
1006 /* Make a first pass over RX ring and mark BDs ready, dropping
1007 * non-processed packets on the way. We need this as a separate pass
1008 * to simplify error recovery in the case of allocation failure later.
1010 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
1011 if (dev
->rx_desc
[i
].ctrl
& MAL_RX_CTRL_FIRST
)
1012 ++dev
->estats
.rx_dropped_resize
;
1014 dev
->rx_desc
[i
].data_len
= 0;
1015 dev
->rx_desc
[i
].ctrl
= MAL_RX_CTRL_EMPTY
|
1016 (i
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1019 /* Reallocate RX ring only if bigger skb buffers are required */
1020 if (rx_skb_size
<= dev
->rx_skb_size
)
1023 /* Second pass, allocate new skbs */
1024 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
1025 struct sk_buff
*skb
= alloc_skb(rx_skb_size
, GFP_ATOMIC
);
1031 BUG_ON(!dev
->rx_skb
[i
]);
1032 dev_kfree_skb(dev
->rx_skb
[i
]);
1034 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1035 dev
->rx_desc
[i
].data_ptr
=
1036 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, rx_sync_size
,
1037 DMA_FROM_DEVICE
) + 2;
1038 dev
->rx_skb
[i
] = skb
;
1041 /* Check if we need to change "Jumbo" bit in MR1 */
1042 if (emac_has_feature(dev
, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE
)) {
1043 mr1_jumbo_bit_change
= (new_mtu
> ETH_DATA_LEN
) ||
1044 (dev
->ndev
->mtu
> ETH_DATA_LEN
);
1046 mr1_jumbo_bit_change
= (new_mtu
> ETH_DATA_LEN
) ^
1047 (dev
->ndev
->mtu
> ETH_DATA_LEN
);
1050 if (mr1_jumbo_bit_change
) {
1051 /* This is to prevent starting RX channel in emac_rx_enable() */
1052 set_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1054 dev
->ndev
->mtu
= new_mtu
;
1055 emac_full_tx_reset(dev
);
1058 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(new_mtu
));
1061 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1063 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1064 emac_rx_enable(dev
);
1065 emac_netif_start(dev
);
1066 mutex_unlock(&dev
->link_lock
);
1071 /* Process ctx, rtnl_lock semaphore */
1072 static int emac_change_mtu(struct net_device
*ndev
, int new_mtu
)
1074 struct emac_instance
*dev
= netdev_priv(ndev
);
1077 if (new_mtu
< EMAC_MIN_MTU
|| new_mtu
> dev
->max_mtu
)
1080 DBG(dev
, "change_mtu(%d)" NL
, new_mtu
);
1082 if (netif_running(ndev
)) {
1083 /* Check if we really need to reinitialize RX ring */
1084 if (emac_rx_skb_size(ndev
->mtu
) != emac_rx_skb_size(new_mtu
))
1085 ret
= emac_resize_rx_ring(dev
, new_mtu
);
1089 ndev
->mtu
= new_mtu
;
1090 dev
->rx_skb_size
= emac_rx_skb_size(new_mtu
);
1091 dev
->rx_sync_size
= emac_rx_sync_size(new_mtu
);
1097 static void emac_clean_tx_ring(struct emac_instance
*dev
)
1101 for (i
= 0; i
< NUM_TX_BUFF
; ++i
) {
1102 if (dev
->tx_skb
[i
]) {
1103 dev_kfree_skb(dev
->tx_skb
[i
]);
1104 dev
->tx_skb
[i
] = NULL
;
1105 if (dev
->tx_desc
[i
].ctrl
& MAL_TX_CTRL_READY
)
1106 ++dev
->estats
.tx_dropped
;
1108 dev
->tx_desc
[i
].ctrl
= 0;
1109 dev
->tx_desc
[i
].data_ptr
= 0;
1113 static void emac_clean_rx_ring(struct emac_instance
*dev
)
1117 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1118 if (dev
->rx_skb
[i
]) {
1119 dev
->rx_desc
[i
].ctrl
= 0;
1120 dev_kfree_skb(dev
->rx_skb
[i
]);
1121 dev
->rx_skb
[i
] = NULL
;
1122 dev
->rx_desc
[i
].data_ptr
= 0;
1125 if (dev
->rx_sg_skb
) {
1126 dev_kfree_skb(dev
->rx_sg_skb
);
1127 dev
->rx_sg_skb
= NULL
;
1131 static inline int emac_alloc_rx_skb(struct emac_instance
*dev
, int slot
,
1134 struct sk_buff
*skb
= alloc_skb(dev
->rx_skb_size
, flags
);
1138 dev
->rx_skb
[slot
] = skb
;
1139 dev
->rx_desc
[slot
].data_len
= 0;
1141 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1142 dev
->rx_desc
[slot
].data_ptr
=
1143 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, dev
->rx_sync_size
,
1144 DMA_FROM_DEVICE
) + 2;
1146 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1147 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1152 static void emac_print_link_status(struct emac_instance
*dev
)
1154 if (netif_carrier_ok(dev
->ndev
))
1155 printk(KERN_INFO
"%s: link is up, %d %s%s\n",
1156 dev
->ndev
->name
, dev
->phy
.speed
,
1157 dev
->phy
.duplex
== DUPLEX_FULL
? "FDX" : "HDX",
1158 dev
->phy
.pause
? ", pause enabled" :
1159 dev
->phy
.asym_pause
? ", asymmetric pause enabled" : "");
1161 printk(KERN_INFO
"%s: link is down\n", dev
->ndev
->name
);
1164 /* Process ctx, rtnl_lock semaphore */
1165 static int emac_open(struct net_device
*ndev
)
1167 struct emac_instance
*dev
= netdev_priv(ndev
);
1170 DBG(dev
, "open" NL
);
1172 /* Setup error IRQ handler */
1173 err
= request_irq(dev
->emac_irq
, emac_irq
, 0, "EMAC", dev
);
1175 printk(KERN_ERR
"%s: failed to request IRQ %d\n",
1176 ndev
->name
, dev
->emac_irq
);
1180 /* Allocate RX ring */
1181 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1182 if (emac_alloc_rx_skb(dev
, i
, GFP_KERNEL
)) {
1183 printk(KERN_ERR
"%s: failed to allocate RX ring\n",
1188 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= dev
->rx_slot
= 0;
1189 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1190 dev
->rx_sg_skb
= NULL
;
1192 mutex_lock(&dev
->link_lock
);
1195 /* Start PHY polling now.
1197 if (dev
->phy
.address
>= 0) {
1198 int link_poll_interval
;
1199 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1200 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1201 emac_rx_clk_default(dev
);
1202 netif_carrier_on(dev
->ndev
);
1203 link_poll_interval
= PHY_POLL_LINK_ON
;
1205 emac_rx_clk_tx(dev
);
1206 netif_carrier_off(dev
->ndev
);
1207 link_poll_interval
= PHY_POLL_LINK_OFF
;
1209 dev
->link_polling
= 1;
1211 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1212 emac_print_link_status(dev
);
1214 netif_carrier_on(dev
->ndev
);
1216 /* Required for Pause packet support in EMAC */
1217 dev_mc_add_global(ndev
, default_mcast_addr
);
1219 emac_configure(dev
);
1220 mal_poll_add(dev
->mal
, &dev
->commac
);
1221 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1222 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(ndev
->mtu
));
1223 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1224 emac_tx_enable(dev
);
1225 emac_rx_enable(dev
);
1226 emac_netif_start(dev
);
1228 mutex_unlock(&dev
->link_lock
);
1232 emac_clean_rx_ring(dev
);
1233 free_irq(dev
->emac_irq
, dev
);
1240 static int emac_link_differs(struct emac_instance
*dev
)
1242 u32 r
= in_be32(&dev
->emacp
->mr1
);
1244 int duplex
= r
& EMAC_MR1_FDE
? DUPLEX_FULL
: DUPLEX_HALF
;
1245 int speed
, pause
, asym_pause
;
1247 if (r
& EMAC_MR1_MF_1000
)
1249 else if (r
& EMAC_MR1_MF_100
)
1254 switch (r
& (EMAC_MR1_EIFC
| EMAC_MR1_APP
)) {
1255 case (EMAC_MR1_EIFC
| EMAC_MR1_APP
):
1264 pause
= asym_pause
= 0;
1266 return speed
!= dev
->phy
.speed
|| duplex
!= dev
->phy
.duplex
||
1267 pause
!= dev
->phy
.pause
|| asym_pause
!= dev
->phy
.asym_pause
;
1271 static void emac_link_timer(struct work_struct
*work
)
1273 struct emac_instance
*dev
=
1274 container_of(to_delayed_work(work
),
1275 struct emac_instance
, link_work
);
1276 int link_poll_interval
;
1278 mutex_lock(&dev
->link_lock
);
1279 DBG2(dev
, "link timer" NL
);
1284 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1285 if (!netif_carrier_ok(dev
->ndev
)) {
1286 emac_rx_clk_default(dev
);
1287 /* Get new link parameters */
1288 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1290 netif_carrier_on(dev
->ndev
);
1291 emac_netif_stop(dev
);
1292 emac_full_tx_reset(dev
);
1293 emac_netif_start(dev
);
1294 emac_print_link_status(dev
);
1296 link_poll_interval
= PHY_POLL_LINK_ON
;
1298 if (netif_carrier_ok(dev
->ndev
)) {
1299 emac_rx_clk_tx(dev
);
1300 netif_carrier_off(dev
->ndev
);
1301 netif_tx_disable(dev
->ndev
);
1302 emac_reinitialize(dev
);
1303 emac_print_link_status(dev
);
1305 link_poll_interval
= PHY_POLL_LINK_OFF
;
1307 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1309 mutex_unlock(&dev
->link_lock
);
1312 static void emac_force_link_update(struct emac_instance
*dev
)
1314 netif_carrier_off(dev
->ndev
);
1316 if (dev
->link_polling
) {
1317 cancel_delayed_work_sync(&dev
->link_work
);
1318 if (dev
->link_polling
)
1319 schedule_delayed_work(&dev
->link_work
, PHY_POLL_LINK_OFF
);
1323 /* Process ctx, rtnl_lock semaphore */
1324 static int emac_close(struct net_device
*ndev
)
1326 struct emac_instance
*dev
= netdev_priv(ndev
);
1328 DBG(dev
, "close" NL
);
1330 if (dev
->phy
.address
>= 0) {
1331 dev
->link_polling
= 0;
1332 cancel_delayed_work_sync(&dev
->link_work
);
1334 mutex_lock(&dev
->link_lock
);
1335 emac_netif_stop(dev
);
1337 mutex_unlock(&dev
->link_lock
);
1339 emac_rx_disable(dev
);
1340 emac_tx_disable(dev
);
1341 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1342 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1343 mal_poll_del(dev
->mal
, &dev
->commac
);
1345 emac_clean_tx_ring(dev
);
1346 emac_clean_rx_ring(dev
);
1348 free_irq(dev
->emac_irq
, dev
);
1350 netif_carrier_off(ndev
);
1355 static inline u16
emac_tx_csum(struct emac_instance
*dev
,
1356 struct sk_buff
*skb
)
1358 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
) &&
1359 (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1360 ++dev
->stats
.tx_packets_csum
;
1361 return EMAC_TX_CTRL_TAH_CSUM
;
1366 static inline int emac_xmit_finish(struct emac_instance
*dev
, int len
)
1368 struct emac_regs __iomem
*p
= dev
->emacp
;
1369 struct net_device
*ndev
= dev
->ndev
;
1371 /* Send the packet out. If the if makes a significant perf
1372 * difference, then we can store the TMR0 value in "dev"
1375 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
1376 out_be32(&p
->tmr0
, EMAC4_TMR0_XMIT
);
1378 out_be32(&p
->tmr0
, EMAC_TMR0_XMIT
);
1380 if (unlikely(++dev
->tx_cnt
== NUM_TX_BUFF
)) {
1381 netif_stop_queue(ndev
);
1382 DBG2(dev
, "stopped TX queue" NL
);
1385 ndev
->trans_start
= jiffies
;
1386 ++dev
->stats
.tx_packets
;
1387 dev
->stats
.tx_bytes
+= len
;
1389 return NETDEV_TX_OK
;
1393 static int emac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1395 struct emac_instance
*dev
= netdev_priv(ndev
);
1396 unsigned int len
= skb
->len
;
1399 u16 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1400 MAL_TX_CTRL_LAST
| emac_tx_csum(dev
, skb
);
1402 slot
= dev
->tx_slot
++;
1403 if (dev
->tx_slot
== NUM_TX_BUFF
) {
1405 ctrl
|= MAL_TX_CTRL_WRAP
;
1408 DBG2(dev
, "xmit(%u) %d" NL
, len
, slot
);
1410 dev
->tx_skb
[slot
] = skb
;
1411 dev
->tx_desc
[slot
].data_ptr
= dma_map_single(&dev
->ofdev
->dev
,
1414 dev
->tx_desc
[slot
].data_len
= (u16
) len
;
1416 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1418 return emac_xmit_finish(dev
, len
);
1421 static inline int emac_xmit_split(struct emac_instance
*dev
, int slot
,
1422 u32 pd
, int len
, int last
, u16 base_ctrl
)
1425 u16 ctrl
= base_ctrl
;
1426 int chunk
= min(len
, MAL_MAX_TX_SIZE
);
1429 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1432 ctrl
|= MAL_TX_CTRL_LAST
;
1433 if (slot
== NUM_TX_BUFF
- 1)
1434 ctrl
|= MAL_TX_CTRL_WRAP
;
1436 dev
->tx_skb
[slot
] = NULL
;
1437 dev
->tx_desc
[slot
].data_ptr
= pd
;
1438 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1439 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1450 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1451 static int emac_start_xmit_sg(struct sk_buff
*skb
, struct net_device
*ndev
)
1453 struct emac_instance
*dev
= netdev_priv(ndev
);
1454 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1455 int len
= skb
->len
, chunk
;
1460 /* This is common "fast" path */
1461 if (likely(!nr_frags
&& len
<= MAL_MAX_TX_SIZE
))
1462 return emac_start_xmit(skb
, ndev
);
1464 len
-= skb
->data_len
;
1466 /* Note, this is only an *estimation*, we can still run out of empty
1467 * slots because of the additional fragmentation into
1468 * MAL_MAX_TX_SIZE-sized chunks
1470 if (unlikely(dev
->tx_cnt
+ nr_frags
+ mal_tx_chunks(len
) > NUM_TX_BUFF
))
1473 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1474 emac_tx_csum(dev
, skb
);
1475 slot
= dev
->tx_slot
;
1478 dev
->tx_skb
[slot
] = NULL
;
1479 chunk
= min(len
, MAL_MAX_TX_SIZE
);
1480 dev
->tx_desc
[slot
].data_ptr
= pd
=
1481 dma_map_single(&dev
->ofdev
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
1482 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1485 slot
= emac_xmit_split(dev
, slot
, pd
+ chunk
, len
, !nr_frags
,
1488 for (i
= 0; i
< nr_frags
; ++i
) {
1489 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
1490 len
= skb_frag_size(frag
);
1492 if (unlikely(dev
->tx_cnt
+ mal_tx_chunks(len
) >= NUM_TX_BUFF
))
1495 pd
= skb_frag_dma_map(&dev
->ofdev
->dev
, frag
, 0, len
,
1498 slot
= emac_xmit_split(dev
, slot
, pd
, len
, i
== nr_frags
- 1,
1502 DBG2(dev
, "xmit_sg(%u) %d - %d" NL
, skb
->len
, dev
->tx_slot
, slot
);
1504 /* Attach skb to the last slot so we don't release it too early */
1505 dev
->tx_skb
[slot
] = skb
;
1507 /* Send the packet out */
1508 if (dev
->tx_slot
== NUM_TX_BUFF
- 1)
1509 ctrl
|= MAL_TX_CTRL_WRAP
;
1511 dev
->tx_desc
[dev
->tx_slot
].ctrl
= ctrl
;
1512 dev
->tx_slot
= (slot
+ 1) % NUM_TX_BUFF
;
1514 return emac_xmit_finish(dev
, skb
->len
);
1517 /* Well, too bad. Our previous estimation was overly optimistic.
1520 while (slot
!= dev
->tx_slot
) {
1521 dev
->tx_desc
[slot
].ctrl
= 0;
1524 slot
= NUM_TX_BUFF
- 1;
1526 ++dev
->estats
.tx_undo
;
1529 netif_stop_queue(ndev
);
1530 DBG2(dev
, "stopped TX queue" NL
);
1531 return NETDEV_TX_BUSY
;
1535 static void emac_parse_tx_error(struct emac_instance
*dev
, u16 ctrl
)
1537 struct emac_error_stats
*st
= &dev
->estats
;
1539 DBG(dev
, "BD TX error %04x" NL
, ctrl
);
1542 if (ctrl
& EMAC_TX_ST_BFCS
)
1543 ++st
->tx_bd_bad_fcs
;
1544 if (ctrl
& EMAC_TX_ST_LCS
)
1545 ++st
->tx_bd_carrier_loss
;
1546 if (ctrl
& EMAC_TX_ST_ED
)
1547 ++st
->tx_bd_excessive_deferral
;
1548 if (ctrl
& EMAC_TX_ST_EC
)
1549 ++st
->tx_bd_excessive_collisions
;
1550 if (ctrl
& EMAC_TX_ST_LC
)
1551 ++st
->tx_bd_late_collision
;
1552 if (ctrl
& EMAC_TX_ST_MC
)
1553 ++st
->tx_bd_multple_collisions
;
1554 if (ctrl
& EMAC_TX_ST_SC
)
1555 ++st
->tx_bd_single_collision
;
1556 if (ctrl
& EMAC_TX_ST_UR
)
1557 ++st
->tx_bd_underrun
;
1558 if (ctrl
& EMAC_TX_ST_SQE
)
1562 static void emac_poll_tx(void *param
)
1564 struct emac_instance
*dev
= param
;
1567 DBG2(dev
, "poll_tx, %d %d" NL
, dev
->tx_cnt
, dev
->ack_slot
);
1569 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
1570 bad_mask
= EMAC_IS_BAD_TX_TAH
;
1572 bad_mask
= EMAC_IS_BAD_TX
;
1574 netif_tx_lock_bh(dev
->ndev
);
1577 int slot
= dev
->ack_slot
, n
= 0;
1579 ctrl
= dev
->tx_desc
[slot
].ctrl
;
1580 if (!(ctrl
& MAL_TX_CTRL_READY
)) {
1581 struct sk_buff
*skb
= dev
->tx_skb
[slot
];
1586 dev
->tx_skb
[slot
] = NULL
;
1588 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1590 if (unlikely(ctrl
& bad_mask
))
1591 emac_parse_tx_error(dev
, ctrl
);
1597 dev
->ack_slot
= slot
;
1598 if (netif_queue_stopped(dev
->ndev
) &&
1599 dev
->tx_cnt
< EMAC_TX_WAKEUP_THRESH
)
1600 netif_wake_queue(dev
->ndev
);
1602 DBG2(dev
, "tx %d pkts" NL
, n
);
1605 netif_tx_unlock_bh(dev
->ndev
);
1608 static inline void emac_recycle_rx_skb(struct emac_instance
*dev
, int slot
,
1611 struct sk_buff
*skb
= dev
->rx_skb
[slot
];
1613 DBG2(dev
, "recycle %d %d" NL
, slot
, len
);
1616 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2,
1617 EMAC_DMA_ALIGN(len
+ 2), DMA_FROM_DEVICE
);
1619 dev
->rx_desc
[slot
].data_len
= 0;
1621 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1622 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1625 static void emac_parse_rx_error(struct emac_instance
*dev
, u16 ctrl
)
1627 struct emac_error_stats
*st
= &dev
->estats
;
1629 DBG(dev
, "BD RX error %04x" NL
, ctrl
);
1632 if (ctrl
& EMAC_RX_ST_OE
)
1633 ++st
->rx_bd_overrun
;
1634 if (ctrl
& EMAC_RX_ST_BP
)
1635 ++st
->rx_bd_bad_packet
;
1636 if (ctrl
& EMAC_RX_ST_RP
)
1637 ++st
->rx_bd_runt_packet
;
1638 if (ctrl
& EMAC_RX_ST_SE
)
1639 ++st
->rx_bd_short_event
;
1640 if (ctrl
& EMAC_RX_ST_AE
)
1641 ++st
->rx_bd_alignment_error
;
1642 if (ctrl
& EMAC_RX_ST_BFCS
)
1643 ++st
->rx_bd_bad_fcs
;
1644 if (ctrl
& EMAC_RX_ST_PTL
)
1645 ++st
->rx_bd_packet_too_long
;
1646 if (ctrl
& EMAC_RX_ST_ORE
)
1647 ++st
->rx_bd_out_of_range
;
1648 if (ctrl
& EMAC_RX_ST_IRE
)
1649 ++st
->rx_bd_in_range
;
1652 static inline void emac_rx_csum(struct emac_instance
*dev
,
1653 struct sk_buff
*skb
, u16 ctrl
)
1655 #ifdef CONFIG_IBM_EMAC_TAH
1656 if (!ctrl
&& dev
->tah_dev
) {
1657 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1658 ++dev
->stats
.rx_packets_csum
;
1663 static inline int emac_rx_sg_append(struct emac_instance
*dev
, int slot
)
1665 if (likely(dev
->rx_sg_skb
!= NULL
)) {
1666 int len
= dev
->rx_desc
[slot
].data_len
;
1667 int tot_len
= dev
->rx_sg_skb
->len
+ len
;
1669 if (unlikely(tot_len
+ 2 > dev
->rx_skb_size
)) {
1670 ++dev
->estats
.rx_dropped_mtu
;
1671 dev_kfree_skb(dev
->rx_sg_skb
);
1672 dev
->rx_sg_skb
= NULL
;
1674 cacheable_memcpy(skb_tail_pointer(dev
->rx_sg_skb
),
1675 dev
->rx_skb
[slot
]->data
, len
);
1676 skb_put(dev
->rx_sg_skb
, len
);
1677 emac_recycle_rx_skb(dev
, slot
, len
);
1681 emac_recycle_rx_skb(dev
, slot
, 0);
1685 /* NAPI poll context */
1686 static int emac_poll_rx(void *param
, int budget
)
1688 struct emac_instance
*dev
= param
;
1689 int slot
= dev
->rx_slot
, received
= 0;
1691 DBG2(dev
, "poll_rx(%d)" NL
, budget
);
1694 while (budget
> 0) {
1696 struct sk_buff
*skb
;
1697 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1699 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1702 skb
= dev
->rx_skb
[slot
];
1704 len
= dev
->rx_desc
[slot
].data_len
;
1706 if (unlikely(!MAL_IS_SINGLE_RX(ctrl
)))
1709 ctrl
&= EMAC_BAD_RX_MASK
;
1710 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1711 emac_parse_rx_error(dev
, ctrl
);
1712 ++dev
->estats
.rx_dropped_error
;
1713 emac_recycle_rx_skb(dev
, slot
, 0);
1718 if (len
< ETH_HLEN
) {
1719 ++dev
->estats
.rx_dropped_stack
;
1720 emac_recycle_rx_skb(dev
, slot
, len
);
1724 if (len
&& len
< EMAC_RX_COPY_THRESH
) {
1725 struct sk_buff
*copy_skb
=
1726 alloc_skb(len
+ EMAC_RX_SKB_HEADROOM
+ 2, GFP_ATOMIC
);
1727 if (unlikely(!copy_skb
))
1730 skb_reserve(copy_skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1731 cacheable_memcpy(copy_skb
->data
- 2, skb
->data
- 2,
1733 emac_recycle_rx_skb(dev
, slot
, len
);
1735 } else if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
)))
1740 skb
->protocol
= eth_type_trans(skb
, dev
->ndev
);
1741 emac_rx_csum(dev
, skb
, ctrl
);
1743 if (unlikely(netif_receive_skb(skb
) == NET_RX_DROP
))
1744 ++dev
->estats
.rx_dropped_stack
;
1746 ++dev
->stats
.rx_packets
;
1748 dev
->stats
.rx_bytes
+= len
;
1749 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1754 if (ctrl
& MAL_RX_CTRL_FIRST
) {
1755 BUG_ON(dev
->rx_sg_skb
);
1756 if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
))) {
1757 DBG(dev
, "rx OOM %d" NL
, slot
);
1758 ++dev
->estats
.rx_dropped_oom
;
1759 emac_recycle_rx_skb(dev
, slot
, 0);
1761 dev
->rx_sg_skb
= skb
;
1764 } else if (!emac_rx_sg_append(dev
, slot
) &&
1765 (ctrl
& MAL_RX_CTRL_LAST
)) {
1767 skb
= dev
->rx_sg_skb
;
1768 dev
->rx_sg_skb
= NULL
;
1770 ctrl
&= EMAC_BAD_RX_MASK
;
1771 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1772 emac_parse_rx_error(dev
, ctrl
);
1773 ++dev
->estats
.rx_dropped_error
;
1781 DBG(dev
, "rx OOM %d" NL
, slot
);
1782 /* Drop the packet and recycle skb */
1783 ++dev
->estats
.rx_dropped_oom
;
1784 emac_recycle_rx_skb(dev
, slot
, 0);
1789 DBG2(dev
, "rx %d BDs" NL
, received
);
1790 dev
->rx_slot
= slot
;
1793 if (unlikely(budget
&& test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
))) {
1795 if (!(dev
->rx_desc
[slot
].ctrl
& MAL_RX_CTRL_EMPTY
)) {
1796 DBG2(dev
, "rx restart" NL
);
1801 if (dev
->rx_sg_skb
) {
1802 DBG2(dev
, "dropping partial rx packet" NL
);
1803 ++dev
->estats
.rx_dropped_error
;
1804 dev_kfree_skb(dev
->rx_sg_skb
);
1805 dev
->rx_sg_skb
= NULL
;
1808 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1809 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1810 emac_rx_enable(dev
);
1816 /* NAPI poll context */
1817 static int emac_peek_rx(void *param
)
1819 struct emac_instance
*dev
= param
;
1821 return !(dev
->rx_desc
[dev
->rx_slot
].ctrl
& MAL_RX_CTRL_EMPTY
);
1824 /* NAPI poll context */
1825 static int emac_peek_rx_sg(void *param
)
1827 struct emac_instance
*dev
= param
;
1829 int slot
= dev
->rx_slot
;
1831 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1832 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1834 else if (ctrl
& MAL_RX_CTRL_LAST
)
1837 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1839 /* I'm just being paranoid here :) */
1840 if (unlikely(slot
== dev
->rx_slot
))
1846 static void emac_rxde(void *param
)
1848 struct emac_instance
*dev
= param
;
1850 ++dev
->estats
.rx_stopped
;
1851 emac_rx_disable_async(dev
);
1855 static irqreturn_t
emac_irq(int irq
, void *dev_instance
)
1857 struct emac_instance
*dev
= dev_instance
;
1858 struct emac_regs __iomem
*p
= dev
->emacp
;
1859 struct emac_error_stats
*st
= &dev
->estats
;
1862 spin_lock(&dev
->lock
);
1864 isr
= in_be32(&p
->isr
);
1865 out_be32(&p
->isr
, isr
);
1867 DBG(dev
, "isr = %08x" NL
, isr
);
1869 if (isr
& EMAC4_ISR_TXPE
)
1871 if (isr
& EMAC4_ISR_RXPE
)
1873 if (isr
& EMAC4_ISR_TXUE
)
1875 if (isr
& EMAC4_ISR_RXOE
)
1876 ++st
->rx_fifo_overrun
;
1877 if (isr
& EMAC_ISR_OVR
)
1879 if (isr
& EMAC_ISR_BP
)
1880 ++st
->rx_bad_packet
;
1881 if (isr
& EMAC_ISR_RP
)
1882 ++st
->rx_runt_packet
;
1883 if (isr
& EMAC_ISR_SE
)
1884 ++st
->rx_short_event
;
1885 if (isr
& EMAC_ISR_ALE
)
1886 ++st
->rx_alignment_error
;
1887 if (isr
& EMAC_ISR_BFCS
)
1889 if (isr
& EMAC_ISR_PTLE
)
1890 ++st
->rx_packet_too_long
;
1891 if (isr
& EMAC_ISR_ORE
)
1892 ++st
->rx_out_of_range
;
1893 if (isr
& EMAC_ISR_IRE
)
1895 if (isr
& EMAC_ISR_SQE
)
1897 if (isr
& EMAC_ISR_TE
)
1900 spin_unlock(&dev
->lock
);
1905 static struct net_device_stats
*emac_stats(struct net_device
*ndev
)
1907 struct emac_instance
*dev
= netdev_priv(ndev
);
1908 struct emac_stats
*st
= &dev
->stats
;
1909 struct emac_error_stats
*est
= &dev
->estats
;
1910 struct net_device_stats
*nst
= &dev
->nstats
;
1911 unsigned long flags
;
1913 DBG2(dev
, "stats" NL
);
1915 /* Compute "legacy" statistics */
1916 spin_lock_irqsave(&dev
->lock
, flags
);
1917 nst
->rx_packets
= (unsigned long)st
->rx_packets
;
1918 nst
->rx_bytes
= (unsigned long)st
->rx_bytes
;
1919 nst
->tx_packets
= (unsigned long)st
->tx_packets
;
1920 nst
->tx_bytes
= (unsigned long)st
->tx_bytes
;
1921 nst
->rx_dropped
= (unsigned long)(est
->rx_dropped_oom
+
1922 est
->rx_dropped_error
+
1923 est
->rx_dropped_resize
+
1924 est
->rx_dropped_mtu
);
1925 nst
->tx_dropped
= (unsigned long)est
->tx_dropped
;
1927 nst
->rx_errors
= (unsigned long)est
->rx_bd_errors
;
1928 nst
->rx_fifo_errors
= (unsigned long)(est
->rx_bd_overrun
+
1929 est
->rx_fifo_overrun
+
1931 nst
->rx_frame_errors
= (unsigned long)(est
->rx_bd_alignment_error
+
1932 est
->rx_alignment_error
);
1933 nst
->rx_crc_errors
= (unsigned long)(est
->rx_bd_bad_fcs
+
1935 nst
->rx_length_errors
= (unsigned long)(est
->rx_bd_runt_packet
+
1936 est
->rx_bd_short_event
+
1937 est
->rx_bd_packet_too_long
+
1938 est
->rx_bd_out_of_range
+
1939 est
->rx_bd_in_range
+
1940 est
->rx_runt_packet
+
1941 est
->rx_short_event
+
1942 est
->rx_packet_too_long
+
1943 est
->rx_out_of_range
+
1946 nst
->tx_errors
= (unsigned long)(est
->tx_bd_errors
+ est
->tx_errors
);
1947 nst
->tx_fifo_errors
= (unsigned long)(est
->tx_bd_underrun
+
1949 nst
->tx_carrier_errors
= (unsigned long)est
->tx_bd_carrier_loss
;
1950 nst
->collisions
= (unsigned long)(est
->tx_bd_excessive_deferral
+
1951 est
->tx_bd_excessive_collisions
+
1952 est
->tx_bd_late_collision
+
1953 est
->tx_bd_multple_collisions
);
1954 spin_unlock_irqrestore(&dev
->lock
, flags
);
1958 static struct mal_commac_ops emac_commac_ops
= {
1959 .poll_tx
= &emac_poll_tx
,
1960 .poll_rx
= &emac_poll_rx
,
1961 .peek_rx
= &emac_peek_rx
,
1965 static struct mal_commac_ops emac_commac_sg_ops
= {
1966 .poll_tx
= &emac_poll_tx
,
1967 .poll_rx
= &emac_poll_rx
,
1968 .peek_rx
= &emac_peek_rx_sg
,
1972 /* Ethtool support */
1973 static int emac_ethtool_get_settings(struct net_device
*ndev
,
1974 struct ethtool_cmd
*cmd
)
1976 struct emac_instance
*dev
= netdev_priv(ndev
);
1978 cmd
->supported
= dev
->phy
.features
;
1979 cmd
->port
= PORT_MII
;
1980 cmd
->phy_address
= dev
->phy
.address
;
1982 dev
->phy
.address
>= 0 ? XCVR_EXTERNAL
: XCVR_INTERNAL
;
1984 mutex_lock(&dev
->link_lock
);
1985 cmd
->advertising
= dev
->phy
.advertising
;
1986 cmd
->autoneg
= dev
->phy
.autoneg
;
1987 cmd
->speed
= dev
->phy
.speed
;
1988 cmd
->duplex
= dev
->phy
.duplex
;
1989 mutex_unlock(&dev
->link_lock
);
1994 static int emac_ethtool_set_settings(struct net_device
*ndev
,
1995 struct ethtool_cmd
*cmd
)
1997 struct emac_instance
*dev
= netdev_priv(ndev
);
1998 u32 f
= dev
->phy
.features
;
2000 DBG(dev
, "set_settings(%d, %d, %d, 0x%08x)" NL
,
2001 cmd
->autoneg
, cmd
->speed
, cmd
->duplex
, cmd
->advertising
);
2003 /* Basic sanity checks */
2004 if (dev
->phy
.address
< 0)
2006 if (cmd
->autoneg
!= AUTONEG_ENABLE
&& cmd
->autoneg
!= AUTONEG_DISABLE
)
2008 if (cmd
->autoneg
== AUTONEG_ENABLE
&& cmd
->advertising
== 0)
2010 if (cmd
->duplex
!= DUPLEX_HALF
&& cmd
->duplex
!= DUPLEX_FULL
)
2013 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
2014 switch (cmd
->speed
) {
2016 if (cmd
->duplex
== DUPLEX_HALF
&&
2017 !(f
& SUPPORTED_10baseT_Half
))
2019 if (cmd
->duplex
== DUPLEX_FULL
&&
2020 !(f
& SUPPORTED_10baseT_Full
))
2024 if (cmd
->duplex
== DUPLEX_HALF
&&
2025 !(f
& SUPPORTED_100baseT_Half
))
2027 if (cmd
->duplex
== DUPLEX_FULL
&&
2028 !(f
& SUPPORTED_100baseT_Full
))
2032 if (cmd
->duplex
== DUPLEX_HALF
&&
2033 !(f
& SUPPORTED_1000baseT_Half
))
2035 if (cmd
->duplex
== DUPLEX_FULL
&&
2036 !(f
& SUPPORTED_1000baseT_Full
))
2043 mutex_lock(&dev
->link_lock
);
2044 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, cmd
->speed
,
2046 mutex_unlock(&dev
->link_lock
);
2049 if (!(f
& SUPPORTED_Autoneg
))
2052 mutex_lock(&dev
->link_lock
);
2053 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
,
2054 (cmd
->advertising
& f
) |
2055 (dev
->phy
.advertising
&
2057 ADVERTISED_Asym_Pause
)));
2058 mutex_unlock(&dev
->link_lock
);
2060 emac_force_link_update(dev
);
2065 static void emac_ethtool_get_ringparam(struct net_device
*ndev
,
2066 struct ethtool_ringparam
*rp
)
2068 rp
->rx_max_pending
= rp
->rx_pending
= NUM_RX_BUFF
;
2069 rp
->tx_max_pending
= rp
->tx_pending
= NUM_TX_BUFF
;
2072 static void emac_ethtool_get_pauseparam(struct net_device
*ndev
,
2073 struct ethtool_pauseparam
*pp
)
2075 struct emac_instance
*dev
= netdev_priv(ndev
);
2077 mutex_lock(&dev
->link_lock
);
2078 if ((dev
->phy
.features
& SUPPORTED_Autoneg
) &&
2079 (dev
->phy
.advertising
& (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
)))
2082 if (dev
->phy
.duplex
== DUPLEX_FULL
) {
2084 pp
->rx_pause
= pp
->tx_pause
= 1;
2085 else if (dev
->phy
.asym_pause
)
2088 mutex_unlock(&dev
->link_lock
);
2091 static int emac_get_regs_len(struct emac_instance
*dev
)
2093 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
2094 return sizeof(struct emac_ethtool_regs_subhdr
) +
2095 EMAC4_ETHTOOL_REGS_SIZE(dev
);
2097 return sizeof(struct emac_ethtool_regs_subhdr
) +
2098 EMAC_ETHTOOL_REGS_SIZE(dev
);
2101 static int emac_ethtool_get_regs_len(struct net_device
*ndev
)
2103 struct emac_instance
*dev
= netdev_priv(ndev
);
2106 size
= sizeof(struct emac_ethtool_regs_hdr
) +
2107 emac_get_regs_len(dev
) + mal_get_regs_len(dev
->mal
);
2108 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2109 size
+= zmii_get_regs_len(dev
->zmii_dev
);
2110 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2111 size
+= rgmii_get_regs_len(dev
->rgmii_dev
);
2112 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2113 size
+= tah_get_regs_len(dev
->tah_dev
);
2118 static void *emac_dump_regs(struct emac_instance
*dev
, void *buf
)
2120 struct emac_ethtool_regs_subhdr
*hdr
= buf
;
2122 hdr
->index
= dev
->cell_index
;
2123 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
)) {
2124 hdr
->version
= EMAC4_ETHTOOL_REGS_VER
;
2125 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC4_ETHTOOL_REGS_SIZE(dev
));
2126 return (void *)(hdr
+ 1) + EMAC4_ETHTOOL_REGS_SIZE(dev
);
2128 hdr
->version
= EMAC_ETHTOOL_REGS_VER
;
2129 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC_ETHTOOL_REGS_SIZE(dev
));
2130 return (void *)(hdr
+ 1) + EMAC_ETHTOOL_REGS_SIZE(dev
);
2134 static void emac_ethtool_get_regs(struct net_device
*ndev
,
2135 struct ethtool_regs
*regs
, void *buf
)
2137 struct emac_instance
*dev
= netdev_priv(ndev
);
2138 struct emac_ethtool_regs_hdr
*hdr
= buf
;
2140 hdr
->components
= 0;
2143 buf
= mal_dump_regs(dev
->mal
, buf
);
2144 buf
= emac_dump_regs(dev
, buf
);
2145 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
)) {
2146 hdr
->components
|= EMAC_ETHTOOL_REGS_ZMII
;
2147 buf
= zmii_dump_regs(dev
->zmii_dev
, buf
);
2149 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
)) {
2150 hdr
->components
|= EMAC_ETHTOOL_REGS_RGMII
;
2151 buf
= rgmii_dump_regs(dev
->rgmii_dev
, buf
);
2153 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
)) {
2154 hdr
->components
|= EMAC_ETHTOOL_REGS_TAH
;
2155 buf
= tah_dump_regs(dev
->tah_dev
, buf
);
2159 static int emac_ethtool_nway_reset(struct net_device
*ndev
)
2161 struct emac_instance
*dev
= netdev_priv(ndev
);
2164 DBG(dev
, "nway_reset" NL
);
2166 if (dev
->phy
.address
< 0)
2169 mutex_lock(&dev
->link_lock
);
2170 if (!dev
->phy
.autoneg
) {
2175 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, dev
->phy
.advertising
);
2177 mutex_unlock(&dev
->link_lock
);
2178 emac_force_link_update(dev
);
2182 static int emac_ethtool_get_sset_count(struct net_device
*ndev
, int stringset
)
2184 if (stringset
== ETH_SS_STATS
)
2185 return EMAC_ETHTOOL_STATS_COUNT
;
2190 static void emac_ethtool_get_strings(struct net_device
*ndev
, u32 stringset
,
2193 if (stringset
== ETH_SS_STATS
)
2194 memcpy(buf
, &emac_stats_keys
, sizeof(emac_stats_keys
));
2197 static void emac_ethtool_get_ethtool_stats(struct net_device
*ndev
,
2198 struct ethtool_stats
*estats
,
2201 struct emac_instance
*dev
= netdev_priv(ndev
);
2203 memcpy(tmp_stats
, &dev
->stats
, sizeof(dev
->stats
));
2204 tmp_stats
+= sizeof(dev
->stats
) / sizeof(u64
);
2205 memcpy(tmp_stats
, &dev
->estats
, sizeof(dev
->estats
));
2208 static void emac_ethtool_get_drvinfo(struct net_device
*ndev
,
2209 struct ethtool_drvinfo
*info
)
2211 struct emac_instance
*dev
= netdev_priv(ndev
);
2213 strlcpy(info
->driver
, "ibm_emac", sizeof(info
->driver
));
2214 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
2215 snprintf(info
->bus_info
, sizeof(info
->bus_info
), "PPC 4xx EMAC-%d %s",
2216 dev
->cell_index
, dev
->ofdev
->dev
.of_node
->full_name
);
2217 info
->regdump_len
= emac_ethtool_get_regs_len(ndev
);
2220 static const struct ethtool_ops emac_ethtool_ops
= {
2221 .get_settings
= emac_ethtool_get_settings
,
2222 .set_settings
= emac_ethtool_set_settings
,
2223 .get_drvinfo
= emac_ethtool_get_drvinfo
,
2225 .get_regs_len
= emac_ethtool_get_regs_len
,
2226 .get_regs
= emac_ethtool_get_regs
,
2228 .nway_reset
= emac_ethtool_nway_reset
,
2230 .get_ringparam
= emac_ethtool_get_ringparam
,
2231 .get_pauseparam
= emac_ethtool_get_pauseparam
,
2233 .get_strings
= emac_ethtool_get_strings
,
2234 .get_sset_count
= emac_ethtool_get_sset_count
,
2235 .get_ethtool_stats
= emac_ethtool_get_ethtool_stats
,
2237 .get_link
= ethtool_op_get_link
,
2240 static int emac_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2242 struct emac_instance
*dev
= netdev_priv(ndev
);
2243 struct mii_ioctl_data
*data
= if_mii(rq
);
2245 DBG(dev
, "ioctl %08x" NL
, cmd
);
2247 if (dev
->phy
.address
< 0)
2252 data
->phy_id
= dev
->phy
.address
;
2255 data
->val_out
= emac_mdio_read(ndev
, dev
->phy
.address
,
2260 emac_mdio_write(ndev
, dev
->phy
.address
, data
->reg_num
,
2268 struct emac_depentry
{
2270 struct device_node
*node
;
2271 struct platform_device
*ofdev
;
2275 #define EMAC_DEP_MAL_IDX 0
2276 #define EMAC_DEP_ZMII_IDX 1
2277 #define EMAC_DEP_RGMII_IDX 2
2278 #define EMAC_DEP_TAH_IDX 3
2279 #define EMAC_DEP_MDIO_IDX 4
2280 #define EMAC_DEP_PREV_IDX 5
2281 #define EMAC_DEP_COUNT 6
2283 static int emac_check_deps(struct emac_instance
*dev
,
2284 struct emac_depentry
*deps
)
2287 struct device_node
*np
;
2289 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2290 /* no dependency on that item, allright */
2291 if (deps
[i
].phandle
== 0) {
2295 /* special case for blist as the dependency might go away */
2296 if (i
== EMAC_DEP_PREV_IDX
) {
2297 np
= *(dev
->blist
- 1);
2299 deps
[i
].phandle
= 0;
2303 if (deps
[i
].node
== NULL
)
2304 deps
[i
].node
= of_node_get(np
);
2306 if (deps
[i
].node
== NULL
)
2307 deps
[i
].node
= of_find_node_by_phandle(deps
[i
].phandle
);
2308 if (deps
[i
].node
== NULL
)
2310 if (deps
[i
].ofdev
== NULL
)
2311 deps
[i
].ofdev
= of_find_device_by_node(deps
[i
].node
);
2312 if (deps
[i
].ofdev
== NULL
)
2314 if (deps
[i
].drvdata
== NULL
)
2315 deps
[i
].drvdata
= dev_get_drvdata(&deps
[i
].ofdev
->dev
);
2316 if (deps
[i
].drvdata
!= NULL
)
2319 return there
== EMAC_DEP_COUNT
;
2322 static void emac_put_deps(struct emac_instance
*dev
)
2325 of_dev_put(dev
->mal_dev
);
2327 of_dev_put(dev
->zmii_dev
);
2329 of_dev_put(dev
->rgmii_dev
);
2331 of_dev_put(dev
->mdio_dev
);
2333 of_dev_put(dev
->tah_dev
);
2336 static int emac_of_bus_notify(struct notifier_block
*nb
, unsigned long action
,
2339 /* We are only intereted in device addition */
2340 if (action
== BUS_NOTIFY_BOUND_DRIVER
)
2341 wake_up_all(&emac_probe_wait
);
2345 static struct notifier_block emac_of_bus_notifier
= {
2346 .notifier_call
= emac_of_bus_notify
2349 static int emac_wait_deps(struct emac_instance
*dev
)
2351 struct emac_depentry deps
[EMAC_DEP_COUNT
];
2354 memset(&deps
, 0, sizeof(deps
));
2356 deps
[EMAC_DEP_MAL_IDX
].phandle
= dev
->mal_ph
;
2357 deps
[EMAC_DEP_ZMII_IDX
].phandle
= dev
->zmii_ph
;
2358 deps
[EMAC_DEP_RGMII_IDX
].phandle
= dev
->rgmii_ph
;
2360 deps
[EMAC_DEP_TAH_IDX
].phandle
= dev
->tah_ph
;
2362 deps
[EMAC_DEP_MDIO_IDX
].phandle
= dev
->mdio_ph
;
2363 if (dev
->blist
&& dev
->blist
> emac_boot_list
)
2364 deps
[EMAC_DEP_PREV_IDX
].phandle
= 0xffffffffu
;
2365 bus_register_notifier(&platform_bus_type
, &emac_of_bus_notifier
);
2366 wait_event_timeout(emac_probe_wait
,
2367 emac_check_deps(dev
, deps
),
2368 EMAC_PROBE_DEP_TIMEOUT
);
2369 bus_unregister_notifier(&platform_bus_type
, &emac_of_bus_notifier
);
2370 err
= emac_check_deps(dev
, deps
) ? 0 : -ENODEV
;
2371 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2373 of_node_put(deps
[i
].node
);
2374 if (err
&& deps
[i
].ofdev
)
2375 of_dev_put(deps
[i
].ofdev
);
2378 dev
->mal_dev
= deps
[EMAC_DEP_MAL_IDX
].ofdev
;
2379 dev
->zmii_dev
= deps
[EMAC_DEP_ZMII_IDX
].ofdev
;
2380 dev
->rgmii_dev
= deps
[EMAC_DEP_RGMII_IDX
].ofdev
;
2381 dev
->tah_dev
= deps
[EMAC_DEP_TAH_IDX
].ofdev
;
2382 dev
->mdio_dev
= deps
[EMAC_DEP_MDIO_IDX
].ofdev
;
2384 if (deps
[EMAC_DEP_PREV_IDX
].ofdev
)
2385 of_dev_put(deps
[EMAC_DEP_PREV_IDX
].ofdev
);
2389 static int emac_read_uint_prop(struct device_node
*np
, const char *name
,
2390 u32
*val
, int fatal
)
2393 const u32
*prop
= of_get_property(np
, name
, &len
);
2394 if (prop
== NULL
|| len
< sizeof(u32
)) {
2396 printk(KERN_ERR
"%s: missing %s property\n",
2397 np
->full_name
, name
);
2404 static int emac_init_phy(struct emac_instance
*dev
)
2406 struct device_node
*np
= dev
->ofdev
->dev
.of_node
;
2407 struct net_device
*ndev
= dev
->ndev
;
2411 dev
->phy
.dev
= ndev
;
2412 dev
->phy
.mode
= dev
->phy_mode
;
2414 /* PHY-less configuration.
2415 * XXX I probably should move these settings to the dev tree
2417 if (dev
->phy_address
== 0xffffffff && dev
->phy_map
== 0xffffffff) {
2420 /* PHY-less configuration.
2421 * XXX I probably should move these settings to the dev tree
2423 dev
->phy
.address
= -1;
2424 dev
->phy
.features
= SUPPORTED_MII
;
2425 if (emac_phy_supports_gige(dev
->phy_mode
))
2426 dev
->phy
.features
|= SUPPORTED_1000baseT_Full
;
2428 dev
->phy
.features
|= SUPPORTED_100baseT_Full
;
2434 mutex_lock(&emac_phy_map_lock
);
2435 phy_map
= dev
->phy_map
| busy_phy_map
;
2437 DBG(dev
, "PHY maps %08x %08x" NL
, dev
->phy_map
, busy_phy_map
);
2439 dev
->phy
.mdio_read
= emac_mdio_read
;
2440 dev
->phy
.mdio_write
= emac_mdio_write
;
2442 /* Enable internal clock source */
2443 #ifdef CONFIG_PPC_DCR_NATIVE
2444 if (emac_has_feature(dev
, EMAC_FTR_440GX_PHY_CLK_FIX
))
2445 dcri_clrset(SDR0
, SDR0_MFR
, 0, SDR0_MFR_ECS
);
2447 /* PHY clock workaround */
2448 emac_rx_clk_tx(dev
);
2450 /* Enable internal clock source on 440GX*/
2451 #ifdef CONFIG_PPC_DCR_NATIVE
2452 if (emac_has_feature(dev
, EMAC_FTR_440GX_PHY_CLK_FIX
))
2453 dcri_clrset(SDR0
, SDR0_MFR
, 0, SDR0_MFR_ECS
);
2455 /* Configure EMAC with defaults so we can at least use MDIO
2456 * This is needed mostly for 440GX
2458 if (emac_phy_gpcs(dev
->phy
.mode
)) {
2460 * Make GPCS PHY address equal to EMAC index.
2461 * We probably should take into account busy_phy_map
2462 * and/or phy_map here.
2464 * Note that the busy_phy_map is currently global
2465 * while it should probably be per-ASIC...
2467 dev
->phy
.gpcs_address
= dev
->gpcs_address
;
2468 if (dev
->phy
.gpcs_address
== 0xffffffff)
2469 dev
->phy
.address
= dev
->cell_index
;
2472 emac_configure(dev
);
2474 if (dev
->phy_address
!= 0xffffffff)
2475 phy_map
= ~(1 << dev
->phy_address
);
2477 for (i
= 0; i
< 0x20; phy_map
>>= 1, ++i
)
2478 if (!(phy_map
& 1)) {
2480 busy_phy_map
|= 1 << i
;
2482 /* Quick check if there is a PHY at the address */
2483 r
= emac_mdio_read(dev
->ndev
, i
, MII_BMCR
);
2484 if (r
== 0xffff || r
< 0)
2486 if (!emac_mii_phy_probe(&dev
->phy
, i
))
2490 /* Enable external clock source */
2491 #ifdef CONFIG_PPC_DCR_NATIVE
2492 if (emac_has_feature(dev
, EMAC_FTR_440GX_PHY_CLK_FIX
))
2493 dcri_clrset(SDR0
, SDR0_MFR
, SDR0_MFR_ECS
, 0);
2495 mutex_unlock(&emac_phy_map_lock
);
2497 printk(KERN_WARNING
"%s: can't find PHY!\n", np
->full_name
);
2502 if (dev
->phy
.def
->ops
->init
)
2503 dev
->phy
.def
->ops
->init(&dev
->phy
);
2505 /* Disable any PHY features not supported by the platform */
2506 dev
->phy
.def
->features
&= ~dev
->phy_feat_exc
;
2507 dev
->phy
.features
&= ~dev
->phy_feat_exc
;
2509 /* Setup initial link parameters */
2510 if (dev
->phy
.features
& SUPPORTED_Autoneg
) {
2511 adv
= dev
->phy
.features
;
2512 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
))
2513 adv
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
2514 /* Restart autonegotiation */
2515 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, adv
);
2517 u32 f
= dev
->phy
.def
->features
;
2518 int speed
= SPEED_10
, fd
= DUPLEX_HALF
;
2520 /* Select highest supported speed/duplex */
2521 if (f
& SUPPORTED_1000baseT_Full
) {
2524 } else if (f
& SUPPORTED_1000baseT_Half
)
2526 else if (f
& SUPPORTED_100baseT_Full
) {
2529 } else if (f
& SUPPORTED_100baseT_Half
)
2531 else if (f
& SUPPORTED_10baseT_Full
)
2534 /* Force link parameters */
2535 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, speed
, fd
);
2540 static int emac_init_config(struct emac_instance
*dev
)
2542 struct device_node
*np
= dev
->ofdev
->dev
.of_node
;
2545 /* Read config from device-tree */
2546 if (emac_read_uint_prop(np
, "mal-device", &dev
->mal_ph
, 1))
2548 if (emac_read_uint_prop(np
, "mal-tx-channel", &dev
->mal_tx_chan
, 1))
2550 if (emac_read_uint_prop(np
, "mal-rx-channel", &dev
->mal_rx_chan
, 1))
2552 if (emac_read_uint_prop(np
, "cell-index", &dev
->cell_index
, 1))
2554 if (emac_read_uint_prop(np
, "max-frame-size", &dev
->max_mtu
, 0))
2555 dev
->max_mtu
= 1500;
2556 if (emac_read_uint_prop(np
, "rx-fifo-size", &dev
->rx_fifo_size
, 0))
2557 dev
->rx_fifo_size
= 2048;
2558 if (emac_read_uint_prop(np
, "tx-fifo-size", &dev
->tx_fifo_size
, 0))
2559 dev
->tx_fifo_size
= 2048;
2560 if (emac_read_uint_prop(np
, "rx-fifo-size-gige", &dev
->rx_fifo_size_gige
, 0))
2561 dev
->rx_fifo_size_gige
= dev
->rx_fifo_size
;
2562 if (emac_read_uint_prop(np
, "tx-fifo-size-gige", &dev
->tx_fifo_size_gige
, 0))
2563 dev
->tx_fifo_size_gige
= dev
->tx_fifo_size
;
2564 if (emac_read_uint_prop(np
, "phy-address", &dev
->phy_address
, 0))
2565 dev
->phy_address
= 0xffffffff;
2566 if (emac_read_uint_prop(np
, "phy-map", &dev
->phy_map
, 0))
2567 dev
->phy_map
= 0xffffffff;
2568 if (emac_read_uint_prop(np
, "gpcs-address", &dev
->gpcs_address
, 0))
2569 dev
->gpcs_address
= 0xffffffff;
2570 if (emac_read_uint_prop(np
->parent
, "clock-frequency", &dev
->opb_bus_freq
, 1))
2572 if (emac_read_uint_prop(np
, "tah-device", &dev
->tah_ph
, 0))
2574 if (emac_read_uint_prop(np
, "tah-channel", &dev
->tah_port
, 0))
2576 if (emac_read_uint_prop(np
, "mdio-device", &dev
->mdio_ph
, 0))
2578 if (emac_read_uint_prop(np
, "zmii-device", &dev
->zmii_ph
, 0))
2580 if (emac_read_uint_prop(np
, "zmii-channel", &dev
->zmii_port
, 0))
2581 dev
->zmii_port
= 0xffffffff;
2582 if (emac_read_uint_prop(np
, "rgmii-device", &dev
->rgmii_ph
, 0))
2584 if (emac_read_uint_prop(np
, "rgmii-channel", &dev
->rgmii_port
, 0))
2585 dev
->rgmii_port
= 0xffffffff;
2586 if (emac_read_uint_prop(np
, "fifo-entry-size", &dev
->fifo_entry_size
, 0))
2587 dev
->fifo_entry_size
= 16;
2588 if (emac_read_uint_prop(np
, "mal-burst-size", &dev
->mal_burst_size
, 0))
2589 dev
->mal_burst_size
= 256;
2591 /* PHY mode needs some decoding */
2592 dev
->phy_mode
= of_get_phy_mode(np
);
2593 if (dev
->phy_mode
< 0)
2594 dev
->phy_mode
= PHY_MODE_NA
;
2596 /* Check EMAC version */
2597 if (of_device_is_compatible(np
, "ibm,emac4sync")) {
2598 dev
->features
|= (EMAC_FTR_EMAC4
| EMAC_FTR_EMAC4SYNC
);
2599 if (of_device_is_compatible(np
, "ibm,emac-460ex") ||
2600 of_device_is_compatible(np
, "ibm,emac-460gt"))
2601 dev
->features
|= EMAC_FTR_460EX_PHY_CLK_FIX
;
2602 if (of_device_is_compatible(np
, "ibm,emac-405ex") ||
2603 of_device_is_compatible(np
, "ibm,emac-405exr"))
2604 dev
->features
|= EMAC_FTR_440EP_PHY_CLK_FIX
;
2605 if (of_device_is_compatible(np
, "ibm,emac-apm821xx")) {
2606 dev
->features
|= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE
|
2607 EMAC_FTR_APM821XX_NO_HALF_DUPLEX
|
2608 EMAC_FTR_460EX_PHY_CLK_FIX
);
2610 } else if (of_device_is_compatible(np
, "ibm,emac4")) {
2611 dev
->features
|= EMAC_FTR_EMAC4
;
2612 if (of_device_is_compatible(np
, "ibm,emac-440gx"))
2613 dev
->features
|= EMAC_FTR_440GX_PHY_CLK_FIX
;
2615 if (of_device_is_compatible(np
, "ibm,emac-440ep") ||
2616 of_device_is_compatible(np
, "ibm,emac-440gr"))
2617 dev
->features
|= EMAC_FTR_440EP_PHY_CLK_FIX
;
2618 if (of_device_is_compatible(np
, "ibm,emac-405ez")) {
2619 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2620 dev
->features
|= EMAC_FTR_NO_FLOW_CONTROL_40x
;
2622 printk(KERN_ERR
"%s: Flow control not disabled!\n",
2630 /* Fixup some feature bits based on the device tree */
2631 if (of_get_property(np
, "has-inverted-stacr-oc", NULL
))
2632 dev
->features
|= EMAC_FTR_STACR_OC_INVERT
;
2633 if (of_get_property(np
, "has-new-stacr-staopc", NULL
))
2634 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
;
2636 /* CAB lacks the appropriate properties */
2637 if (of_device_is_compatible(np
, "ibm,emac-axon"))
2638 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
|
2639 EMAC_FTR_STACR_OC_INVERT
;
2641 /* Enable TAH/ZMII/RGMII features as found */
2642 if (dev
->tah_ph
!= 0) {
2643 #ifdef CONFIG_IBM_EMAC_TAH
2644 dev
->features
|= EMAC_FTR_HAS_TAH
;
2646 printk(KERN_ERR
"%s: TAH support not enabled !\n",
2652 if (dev
->zmii_ph
!= 0) {
2653 #ifdef CONFIG_IBM_EMAC_ZMII
2654 dev
->features
|= EMAC_FTR_HAS_ZMII
;
2656 printk(KERN_ERR
"%s: ZMII support not enabled !\n",
2662 if (dev
->rgmii_ph
!= 0) {
2663 #ifdef CONFIG_IBM_EMAC_RGMII
2664 dev
->features
|= EMAC_FTR_HAS_RGMII
;
2666 printk(KERN_ERR
"%s: RGMII support not enabled !\n",
2672 /* Read MAC-address */
2673 p
= of_get_property(np
, "local-mac-address", NULL
);
2675 printk(KERN_ERR
"%s: Can't find local-mac-address property\n",
2679 memcpy(dev
->ndev
->dev_addr
, p
, 6);
2681 /* IAHT and GAHT filter parameterization */
2682 if (emac_has_feature(dev
, EMAC_FTR_EMAC4SYNC
)) {
2683 dev
->xaht_slots_shift
= EMAC4SYNC_XAHT_SLOTS_SHIFT
;
2684 dev
->xaht_width_shift
= EMAC4SYNC_XAHT_WIDTH_SHIFT
;
2686 dev
->xaht_slots_shift
= EMAC4_XAHT_SLOTS_SHIFT
;
2687 dev
->xaht_width_shift
= EMAC4_XAHT_WIDTH_SHIFT
;
2690 DBG(dev
, "features : 0x%08x / 0x%08x\n", dev
->features
, EMAC_FTRS_POSSIBLE
);
2691 DBG(dev
, "tx_fifo_size : %d (%d gige)\n", dev
->tx_fifo_size
, dev
->tx_fifo_size_gige
);
2692 DBG(dev
, "rx_fifo_size : %d (%d gige)\n", dev
->rx_fifo_size
, dev
->rx_fifo_size_gige
);
2693 DBG(dev
, "max_mtu : %d\n", dev
->max_mtu
);
2694 DBG(dev
, "OPB freq : %d\n", dev
->opb_bus_freq
);
2699 static const struct net_device_ops emac_netdev_ops
= {
2700 .ndo_open
= emac_open
,
2701 .ndo_stop
= emac_close
,
2702 .ndo_get_stats
= emac_stats
,
2703 .ndo_set_rx_mode
= emac_set_multicast_list
,
2704 .ndo_do_ioctl
= emac_ioctl
,
2705 .ndo_tx_timeout
= emac_tx_timeout
,
2706 .ndo_validate_addr
= eth_validate_addr
,
2707 .ndo_set_mac_address
= eth_mac_addr
,
2708 .ndo_start_xmit
= emac_start_xmit
,
2709 .ndo_change_mtu
= eth_change_mtu
,
2712 static const struct net_device_ops emac_gige_netdev_ops
= {
2713 .ndo_open
= emac_open
,
2714 .ndo_stop
= emac_close
,
2715 .ndo_get_stats
= emac_stats
,
2716 .ndo_set_rx_mode
= emac_set_multicast_list
,
2717 .ndo_do_ioctl
= emac_ioctl
,
2718 .ndo_tx_timeout
= emac_tx_timeout
,
2719 .ndo_validate_addr
= eth_validate_addr
,
2720 .ndo_set_mac_address
= eth_mac_addr
,
2721 .ndo_start_xmit
= emac_start_xmit_sg
,
2722 .ndo_change_mtu
= emac_change_mtu
,
2725 static int emac_probe(struct platform_device
*ofdev
)
2727 struct net_device
*ndev
;
2728 struct emac_instance
*dev
;
2729 struct device_node
*np
= ofdev
->dev
.of_node
;
2730 struct device_node
**blist
= NULL
;
2733 /* Skip unused/unwired EMACS. We leave the check for an unused
2734 * property here for now, but new flat device trees should set a
2735 * status property to "disabled" instead.
2737 if (of_get_property(np
, "unused", NULL
) || !of_device_is_available(np
))
2740 /* Find ourselves in the bootlist if we are there */
2741 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
2742 if (emac_boot_list
[i
] == np
)
2743 blist
= &emac_boot_list
[i
];
2745 /* Allocate our net_device structure */
2747 ndev
= alloc_etherdev(sizeof(struct emac_instance
));
2751 dev
= netdev_priv(ndev
);
2755 SET_NETDEV_DEV(ndev
, &ofdev
->dev
);
2757 /* Initialize some embedded data structures */
2758 mutex_init(&dev
->mdio_lock
);
2759 mutex_init(&dev
->link_lock
);
2760 spin_lock_init(&dev
->lock
);
2761 INIT_WORK(&dev
->reset_work
, emac_reset_work
);
2763 /* Init various config data based on device-tree */
2764 err
= emac_init_config(dev
);
2768 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2769 dev
->emac_irq
= irq_of_parse_and_map(np
, 0);
2770 dev
->wol_irq
= irq_of_parse_and_map(np
, 1);
2771 if (dev
->emac_irq
== NO_IRQ
) {
2772 printk(KERN_ERR
"%s: Can't map main interrupt\n", np
->full_name
);
2775 ndev
->irq
= dev
->emac_irq
;
2778 if (of_address_to_resource(np
, 0, &dev
->rsrc_regs
)) {
2779 printk(KERN_ERR
"%s: Can't get registers address\n",
2783 // TODO : request_mem_region
2784 dev
->emacp
= ioremap(dev
->rsrc_regs
.start
,
2785 resource_size(&dev
->rsrc_regs
));
2786 if (dev
->emacp
== NULL
) {
2787 printk(KERN_ERR
"%s: Can't map device registers!\n",
2793 /* Wait for dependent devices */
2794 err
= emac_wait_deps(dev
);
2797 "%s: Timeout waiting for dependent devices\n",
2799 /* display more info about what's missing ? */
2802 dev
->mal
= dev_get_drvdata(&dev
->mal_dev
->dev
);
2803 if (dev
->mdio_dev
!= NULL
)
2804 dev
->mdio_instance
= dev_get_drvdata(&dev
->mdio_dev
->dev
);
2806 /* Register with MAL */
2807 dev
->commac
.ops
= &emac_commac_ops
;
2808 dev
->commac
.dev
= dev
;
2809 dev
->commac
.tx_chan_mask
= MAL_CHAN_MASK(dev
->mal_tx_chan
);
2810 dev
->commac
.rx_chan_mask
= MAL_CHAN_MASK(dev
->mal_rx_chan
);
2811 err
= mal_register_commac(dev
->mal
, &dev
->commac
);
2813 printk(KERN_ERR
"%s: failed to register with mal %s!\n",
2814 np
->full_name
, dev
->mal_dev
->dev
.of_node
->full_name
);
2817 dev
->rx_skb_size
= emac_rx_skb_size(ndev
->mtu
);
2818 dev
->rx_sync_size
= emac_rx_sync_size(ndev
->mtu
);
2820 /* Get pointers to BD rings */
2822 dev
->mal
->bd_virt
+ mal_tx_bd_offset(dev
->mal
, dev
->mal_tx_chan
);
2824 dev
->mal
->bd_virt
+ mal_rx_bd_offset(dev
->mal
, dev
->mal_rx_chan
);
2826 DBG(dev
, "tx_desc %p" NL
, dev
->tx_desc
);
2827 DBG(dev
, "rx_desc %p" NL
, dev
->rx_desc
);
2830 memset(dev
->tx_desc
, 0, NUM_TX_BUFF
* sizeof(struct mal_descriptor
));
2831 memset(dev
->rx_desc
, 0, NUM_RX_BUFF
* sizeof(struct mal_descriptor
));
2832 memset(dev
->tx_skb
, 0, NUM_TX_BUFF
* sizeof(struct sk_buff
*));
2833 memset(dev
->rx_skb
, 0, NUM_RX_BUFF
* sizeof(struct sk_buff
*));
2835 /* Attach to ZMII, if needed */
2836 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
) &&
2837 (err
= zmii_attach(dev
->zmii_dev
, dev
->zmii_port
, &dev
->phy_mode
)) != 0)
2838 goto err_unreg_commac
;
2840 /* Attach to RGMII, if needed */
2841 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
) &&
2842 (err
= rgmii_attach(dev
->rgmii_dev
, dev
->rgmii_port
, dev
->phy_mode
)) != 0)
2843 goto err_detach_zmii
;
2845 /* Attach to TAH, if needed */
2846 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
) &&
2847 (err
= tah_attach(dev
->tah_dev
, dev
->tah_port
)) != 0)
2848 goto err_detach_rgmii
;
2850 /* Set some link defaults before we can find out real parameters */
2851 dev
->phy
.speed
= SPEED_100
;
2852 dev
->phy
.duplex
= DUPLEX_FULL
;
2853 dev
->phy
.autoneg
= AUTONEG_DISABLE
;
2854 dev
->phy
.pause
= dev
->phy
.asym_pause
= 0;
2855 dev
->stop_timeout
= STOP_TIMEOUT_100
;
2856 INIT_DELAYED_WORK(&dev
->link_work
, emac_link_timer
);
2858 /* Some SoCs like APM821xx does not support Half Duplex mode. */
2859 if (emac_has_feature(dev
, EMAC_FTR_APM821XX_NO_HALF_DUPLEX
)) {
2860 dev
->phy_feat_exc
= (SUPPORTED_1000baseT_Half
|
2861 SUPPORTED_100baseT_Half
|
2862 SUPPORTED_10baseT_Half
);
2865 /* Find PHY if any */
2866 err
= emac_init_phy(dev
);
2868 goto err_detach_tah
;
2871 ndev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_SG
;
2872 ndev
->features
|= ndev
->hw_features
| NETIF_F_RXCSUM
;
2874 ndev
->watchdog_timeo
= 5 * HZ
;
2875 if (emac_phy_supports_gige(dev
->phy_mode
)) {
2876 ndev
->netdev_ops
= &emac_gige_netdev_ops
;
2877 dev
->commac
.ops
= &emac_commac_sg_ops
;
2879 ndev
->netdev_ops
= &emac_netdev_ops
;
2880 SET_ETHTOOL_OPS(ndev
, &emac_ethtool_ops
);
2882 netif_carrier_off(ndev
);
2884 err
= register_netdev(ndev
);
2886 printk(KERN_ERR
"%s: failed to register net device (%d)!\n",
2887 np
->full_name
, err
);
2888 goto err_detach_tah
;
2891 /* Set our drvdata last as we don't want them visible until we are
2895 dev_set_drvdata(&ofdev
->dev
, dev
);
2897 /* There's a new kid in town ! Let's tell everybody */
2898 wake_up_all(&emac_probe_wait
);
2901 printk(KERN_INFO
"%s: EMAC-%d %s, MAC %pM\n",
2902 ndev
->name
, dev
->cell_index
, np
->full_name
, ndev
->dev_addr
);
2904 if (dev
->phy_mode
== PHY_MODE_SGMII
)
2905 printk(KERN_NOTICE
"%s: in SGMII mode\n", ndev
->name
);
2907 if (dev
->phy
.address
>= 0)
2908 printk("%s: found %s PHY (0x%02x)\n", ndev
->name
,
2909 dev
->phy
.def
->name
, dev
->phy
.address
);
2911 emac_dbg_register(dev
);
2916 /* I have a bad feeling about this ... */
2919 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2920 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2922 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2923 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2925 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2926 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2928 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2932 iounmap(dev
->emacp
);
2934 if (dev
->wol_irq
!= NO_IRQ
)
2935 irq_dispose_mapping(dev
->wol_irq
);
2936 if (dev
->emac_irq
!= NO_IRQ
)
2937 irq_dispose_mapping(dev
->emac_irq
);
2941 /* if we were on the bootlist, remove us as we won't show up and
2942 * wake up all waiters to notify them in case they were waiting
2947 wake_up_all(&emac_probe_wait
);
2952 static int emac_remove(struct platform_device
*ofdev
)
2954 struct emac_instance
*dev
= dev_get_drvdata(&ofdev
->dev
);
2956 DBG(dev
, "remove" NL
);
2958 dev_set_drvdata(&ofdev
->dev
, NULL
);
2960 unregister_netdev(dev
->ndev
);
2962 cancel_work_sync(&dev
->reset_work
);
2964 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2965 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2966 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2967 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2968 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2969 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2971 busy_phy_map
&= ~(1 << dev
->phy
.address
);
2972 DBG(dev
, "busy_phy_map now %#x" NL
, busy_phy_map
);
2974 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2977 emac_dbg_unregister(dev
);
2978 iounmap(dev
->emacp
);
2980 if (dev
->wol_irq
!= NO_IRQ
)
2981 irq_dispose_mapping(dev
->wol_irq
);
2982 if (dev
->emac_irq
!= NO_IRQ
)
2983 irq_dispose_mapping(dev
->emac_irq
);
2985 free_netdev(dev
->ndev
);
2990 /* XXX Features in here should be replaced by properties... */
2991 static struct of_device_id emac_match
[] =
2995 .compatible
= "ibm,emac",
2999 .compatible
= "ibm,emac4",
3003 .compatible
= "ibm,emac4sync",
3007 MODULE_DEVICE_TABLE(of
, emac_match
);
3009 static struct platform_driver emac_driver
= {
3012 .owner
= THIS_MODULE
,
3013 .of_match_table
= emac_match
,
3015 .probe
= emac_probe
,
3016 .remove
= emac_remove
,
3019 static void __init
emac_make_bootlist(void)
3021 struct device_node
*np
= NULL
;
3022 int j
, max
, i
= 0, k
;
3023 int cell_indices
[EMAC_BOOT_LIST_SIZE
];
3026 while((np
= of_find_all_nodes(np
)) != NULL
) {
3029 if (of_match_node(emac_match
, np
) == NULL
)
3031 if (of_get_property(np
, "unused", NULL
))
3033 idx
= of_get_property(np
, "cell-index", NULL
);
3036 cell_indices
[i
] = *idx
;
3037 emac_boot_list
[i
++] = of_node_get(np
);
3038 if (i
>= EMAC_BOOT_LIST_SIZE
) {
3045 /* Bubble sort them (doh, what a creative algorithm :-) */
3046 for (i
= 0; max
> 1 && (i
< (max
- 1)); i
++)
3047 for (j
= i
; j
< max
; j
++) {
3048 if (cell_indices
[i
] > cell_indices
[j
]) {
3049 np
= emac_boot_list
[i
];
3050 emac_boot_list
[i
] = emac_boot_list
[j
];
3051 emac_boot_list
[j
] = np
;
3052 k
= cell_indices
[i
];
3053 cell_indices
[i
] = cell_indices
[j
];
3054 cell_indices
[j
] = k
;
3059 static int __init
emac_init(void)
3063 printk(KERN_INFO DRV_DESC
", version " DRV_VERSION
"\n");
3065 /* Init debug stuff */
3068 /* Build EMAC boot list */
3069 emac_make_bootlist();
3071 /* Init submodules */
3084 rc
= platform_driver_register(&emac_driver
);
3102 static void __exit
emac_exit(void)
3106 platform_driver_unregister(&emac_driver
);
3114 /* Destroy EMAC boot list */
3115 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
3116 if (emac_boot_list
[i
])
3117 of_node_put(emac_boot_list
[i
]);
3120 module_init(emac_init
);
3121 module_exit(emac_exit
);