2 * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 rabeeh@galileo.co.il
8 * Copyright (C) 2003 PMC-Sierra, Inc.,
9 * written by Manish Lachwani
11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
13 * Copyright (C) 2004-2006 MontaVista Software, Inc.
14 * Dale Farnsworth <dale@farnsworth.org>
16 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
17 * <sjhill@realitydiluted.com>
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version 2
22 * of the License, or (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
33 #include <linux/init.h>
34 #include <linux/dma-mapping.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/etherdevice.h>
41 #include <linux/bitops.h>
42 #include <linux/delay.h>
43 #include <linux/ethtool.h>
44 #include <linux/platform_device.h>
47 #include <asm/types.h>
48 #include <asm/pgtable.h>
49 #include <asm/system.h>
50 #include <asm/delay.h>
51 #include "mv643xx_eth.h"
53 /* Static function declarations */
54 static void eth_port_uc_addr_get(struct net_device
*dev
,
55 unsigned char *MacAddr
);
56 static void eth_port_set_multicast_list(struct net_device
*);
57 static void mv643xx_eth_port_enable_tx(unsigned int port_num
,
59 static void mv643xx_eth_port_enable_rx(unsigned int port_num
,
61 static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num
);
62 static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num
);
63 static int mv643xx_eth_open(struct net_device
*);
64 static int mv643xx_eth_stop(struct net_device
*);
65 static int mv643xx_eth_change_mtu(struct net_device
*, int);
66 static struct net_device_stats
*mv643xx_eth_get_stats(struct net_device
*);
67 static void eth_port_init_mac_tables(unsigned int eth_port_num
);
69 static int mv643xx_poll(struct net_device
*dev
, int *budget
);
71 static int ethernet_phy_get(unsigned int eth_port_num
);
72 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
);
73 static int ethernet_phy_detect(unsigned int eth_port_num
);
74 static int mv643xx_mdio_read(struct net_device
*dev
, int phy_id
, int location
);
75 static void mv643xx_mdio_write(struct net_device
*dev
, int phy_id
, int location
, int val
);
76 static int mv643xx_eth_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
77 static const struct ethtool_ops mv643xx_ethtool_ops
;
79 static char mv643xx_driver_name
[] = "mv643xx_eth";
80 static char mv643xx_driver_version
[] = "1.0";
82 static void __iomem
*mv643xx_eth_shared_base
;
84 /* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */
85 static DEFINE_SPINLOCK(mv643xx_eth_phy_lock
);
87 static inline u32
mv_read(int offset
)
89 void __iomem
*reg_base
;
91 reg_base
= mv643xx_eth_shared_base
- MV643XX_ETH_SHARED_REGS
;
93 return readl(reg_base
+ offset
);
96 static inline void mv_write(int offset
, u32 data
)
98 void __iomem
*reg_base
;
100 reg_base
= mv643xx_eth_shared_base
- MV643XX_ETH_SHARED_REGS
;
101 writel(data
, reg_base
+ offset
);
105 * Changes MTU (maximum transfer unit) of the gigabit ethenret port
107 * Input : pointer to ethernet interface network device structure
109 * Output : 0 upon success, -EINVAL upon failure
111 static int mv643xx_eth_change_mtu(struct net_device
*dev
, int new_mtu
)
113 if ((new_mtu
> 9500) || (new_mtu
< 64))
118 * Stop then re-open the interface. This will allocate RX skb's with
120 * There is a possible danger that the open will not successed, due
121 * to memory is full, which might fail the open function.
123 if (netif_running(dev
)) {
124 mv643xx_eth_stop(dev
);
125 if (mv643xx_eth_open(dev
))
127 "%s: Fatal error on opening device\n",
135 * mv643xx_eth_rx_refill_descs
137 * Fills / refills RX queue on a certain gigabit ethernet port
139 * Input : pointer to ethernet interface network device structure
142 static void mv643xx_eth_rx_refill_descs(struct net_device
*dev
)
144 struct mv643xx_private
*mp
= netdev_priv(dev
);
145 struct pkt_info pkt_info
;
149 while (mp
->rx_desc_count
< mp
->rx_ring_size
) {
150 skb
= dev_alloc_skb(ETH_RX_SKB_SIZE
+ ETH_DMA_ALIGN
);
154 unaligned
= (u32
)skb
->data
& (ETH_DMA_ALIGN
- 1);
156 skb_reserve(skb
, ETH_DMA_ALIGN
- unaligned
);
157 pkt_info
.cmd_sts
= ETH_RX_ENABLE_INTERRUPT
;
158 pkt_info
.byte_cnt
= ETH_RX_SKB_SIZE
;
159 pkt_info
.buf_ptr
= dma_map_single(NULL
, skb
->data
,
160 ETH_RX_SKB_SIZE
, DMA_FROM_DEVICE
);
161 pkt_info
.return_info
= skb
;
162 if (eth_rx_return_buff(mp
, &pkt_info
) != ETH_OK
) {
164 "%s: Error allocating RX Ring\n", dev
->name
);
167 skb_reserve(skb
, ETH_HW_IP_ALIGN
);
170 * If RX ring is empty of SKB, set a timer to try allocating
171 * again at a later time.
173 if (mp
->rx_desc_count
== 0) {
174 printk(KERN_INFO
"%s: Rx ring is empty\n", dev
->name
);
175 mp
->timeout
.expires
= jiffies
+ (HZ
/ 10); /* 100 mSec */
176 add_timer(&mp
->timeout
);
181 * mv643xx_eth_rx_refill_descs_timer_wrapper
183 * Timer routine to wake up RX queue filling task. This function is
184 * used only in case the RX queue is empty, and all alloc_skb has
185 * failed (due to out of memory event).
187 * Input : pointer to ethernet interface network device structure
190 static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data
)
192 mv643xx_eth_rx_refill_descs((struct net_device
*)data
);
196 * mv643xx_eth_update_mac_address
198 * Update the MAC address of the port in the address table
200 * Input : pointer to ethernet interface network device structure
203 static void mv643xx_eth_update_mac_address(struct net_device
*dev
)
205 struct mv643xx_private
*mp
= netdev_priv(dev
);
206 unsigned int port_num
= mp
->port_num
;
208 eth_port_init_mac_tables(port_num
);
209 eth_port_uc_addr_set(port_num
, dev
->dev_addr
);
213 * mv643xx_eth_set_rx_mode
215 * Change from promiscuos to regular rx mode
217 * Input : pointer to ethernet interface network device structure
220 static void mv643xx_eth_set_rx_mode(struct net_device
*dev
)
222 struct mv643xx_private
*mp
= netdev_priv(dev
);
225 config_reg
= mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp
->port_num
));
226 if (dev
->flags
& IFF_PROMISC
)
227 config_reg
|= (u32
) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE
;
229 config_reg
&= ~(u32
) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE
;
230 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp
->port_num
), config_reg
);
232 eth_port_set_multicast_list(dev
);
236 * mv643xx_eth_set_mac_address
238 * Change the interface's mac address.
239 * No special hardware thing should be done because interface is always
240 * put in promiscuous mode.
242 * Input : pointer to ethernet interface network device structure and
243 * a pointer to the designated entry to be added to the cache.
244 * Output : zero upon success, negative upon failure
246 static int mv643xx_eth_set_mac_address(struct net_device
*dev
, void *addr
)
250 for (i
= 0; i
< 6; i
++)
251 /* +2 is for the offset of the HW addr type */
252 dev
->dev_addr
[i
] = ((unsigned char *)addr
)[i
+ 2];
253 mv643xx_eth_update_mac_address(dev
);
258 * mv643xx_eth_tx_timeout
260 * Called upon a timeout on transmitting a packet
262 * Input : pointer to ethernet interface network device structure.
265 static void mv643xx_eth_tx_timeout(struct net_device
*dev
)
267 struct mv643xx_private
*mp
= netdev_priv(dev
);
269 printk(KERN_INFO
"%s: TX timeout ", dev
->name
);
271 /* Do the reset outside of interrupt context */
272 schedule_work(&mp
->tx_timeout_task
);
276 * mv643xx_eth_tx_timeout_task
278 * Actual routine to reset the adapter when a timeout on Tx has occurred
280 static void mv643xx_eth_tx_timeout_task(struct work_struct
*ugly
)
282 struct mv643xx_private
*mp
= container_of(ugly
, struct mv643xx_private
,
284 struct net_device
*dev
= mp
->mii
.dev
; /* yuck */
286 if (!netif_running(dev
))
289 netif_stop_queue(dev
);
291 eth_port_reset(mp
->port_num
);
294 if (mp
->tx_ring_size
- mp
->tx_desc_count
>= MAX_DESCS_PER_SKB
)
295 netif_wake_queue(dev
);
299 * mv643xx_eth_free_tx_descs - Free the tx desc data for completed descriptors
301 * If force is non-zero, frees uncompleted descriptors as well
303 int mv643xx_eth_free_tx_descs(struct net_device
*dev
, int force
)
305 struct mv643xx_private
*mp
= netdev_priv(dev
);
306 struct eth_tx_desc
*desc
;
315 while (mp
->tx_desc_count
> 0) {
316 spin_lock_irqsave(&mp
->lock
, flags
);
317 tx_index
= mp
->tx_used_desc_q
;
318 desc
= &mp
->p_tx_desc_area
[tx_index
];
319 cmd_sts
= desc
->cmd_sts
;
321 if (!force
&& (cmd_sts
& ETH_BUFFER_OWNED_BY_DMA
)) {
322 spin_unlock_irqrestore(&mp
->lock
, flags
);
326 mp
->tx_used_desc_q
= (tx_index
+ 1) % mp
->tx_ring_size
;
329 addr
= desc
->buf_ptr
;
330 count
= desc
->byte_cnt
;
331 skb
= mp
->tx_skb
[tx_index
];
333 mp
->tx_skb
[tx_index
] = NULL
;
335 spin_unlock_irqrestore(&mp
->lock
, flags
);
337 if (cmd_sts
& ETH_ERROR_SUMMARY
) {
338 printk("%s: Error in TX\n", dev
->name
);
339 mp
->stats
.tx_errors
++;
342 if (cmd_sts
& ETH_TX_FIRST_DESC
)
343 dma_unmap_single(NULL
, addr
, count
, DMA_TO_DEVICE
);
345 dma_unmap_page(NULL
, addr
, count
, DMA_TO_DEVICE
);
348 dev_kfree_skb_irq(skb
);
356 static void mv643xx_eth_free_completed_tx_descs(struct net_device
*dev
)
358 struct mv643xx_private
*mp
= netdev_priv(dev
);
360 if (mv643xx_eth_free_tx_descs(dev
, 0) &&
361 mp
->tx_ring_size
- mp
->tx_desc_count
>= MAX_DESCS_PER_SKB
)
362 netif_wake_queue(dev
);
365 static void mv643xx_eth_free_all_tx_descs(struct net_device
*dev
)
367 mv643xx_eth_free_tx_descs(dev
, 1);
371 * mv643xx_eth_receive
373 * This function is forward packets that are received from the port's
374 * queues toward kernel core or FastRoute them to another interface.
376 * Input : dev - a pointer to the required interface
377 * max - maximum number to receive (0 means unlimted)
379 * Output : number of served packets
381 static int mv643xx_eth_receive_queue(struct net_device
*dev
, int budget
)
383 struct mv643xx_private
*mp
= netdev_priv(dev
);
384 struct net_device_stats
*stats
= &mp
->stats
;
385 unsigned int received_packets
= 0;
387 struct pkt_info pkt_info
;
389 while (budget
-- > 0 && eth_port_receive(mp
, &pkt_info
) == ETH_OK
) {
390 dma_unmap_single(NULL
, pkt_info
.buf_ptr
, ETH_RX_SKB_SIZE
,
397 * Note byte count includes 4 byte CRC count
400 stats
->rx_bytes
+= pkt_info
.byte_cnt
;
401 skb
= pkt_info
.return_info
;
403 * In case received a packet without first / last bits on OR
404 * the error summary bit is on, the packets needs to be dropeed.
406 if (((pkt_info
.cmd_sts
407 & (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
)) !=
408 (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
))
409 || (pkt_info
.cmd_sts
& ETH_ERROR_SUMMARY
)) {
411 if ((pkt_info
.cmd_sts
& (ETH_RX_FIRST_DESC
|
412 ETH_RX_LAST_DESC
)) !=
413 (ETH_RX_FIRST_DESC
| ETH_RX_LAST_DESC
)) {
416 "%s: Received packet spread "
417 "on multiple descriptors\n",
420 if (pkt_info
.cmd_sts
& ETH_ERROR_SUMMARY
)
423 dev_kfree_skb_irq(skb
);
426 * The -4 is for the CRC in the trailer of the
429 skb_put(skb
, pkt_info
.byte_cnt
- 4);
432 if (pkt_info
.cmd_sts
& ETH_LAYER_4_CHECKSUM_OK
) {
433 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
435 (pkt_info
.cmd_sts
& 0x0007fff8) >> 3);
437 skb
->protocol
= eth_type_trans(skb
, dev
);
439 netif_receive_skb(skb
);
444 dev
->last_rx
= jiffies
;
446 mv643xx_eth_rx_refill_descs(dev
); /* Fill RX ring with skb's */
448 return received_packets
;
451 /* Set the mv643xx port configuration register for the speed/duplex mode. */
452 static void mv643xx_eth_update_pscr(struct net_device
*dev
,
453 struct ethtool_cmd
*ecmd
)
455 struct mv643xx_private
*mp
= netdev_priv(dev
);
456 int port_num
= mp
->port_num
;
460 o_pscr
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
463 /* clear speed, duplex and rx buffer size fields */
464 n_pscr
&= ~(MV643XX_ETH_SET_MII_SPEED_TO_100
|
465 MV643XX_ETH_SET_GMII_SPEED_TO_1000
|
466 MV643XX_ETH_SET_FULL_DUPLEX_MODE
|
467 MV643XX_ETH_MAX_RX_PACKET_MASK
);
469 if (ecmd
->duplex
== DUPLEX_FULL
)
470 n_pscr
|= MV643XX_ETH_SET_FULL_DUPLEX_MODE
;
472 if (ecmd
->speed
== SPEED_1000
)
473 n_pscr
|= MV643XX_ETH_SET_GMII_SPEED_TO_1000
|
474 MV643XX_ETH_MAX_RX_PACKET_9700BYTE
;
476 if (ecmd
->speed
== SPEED_100
)
477 n_pscr
|= MV643XX_ETH_SET_MII_SPEED_TO_100
;
478 n_pscr
|= MV643XX_ETH_MAX_RX_PACKET_1522BYTE
;
481 if (n_pscr
!= o_pscr
) {
482 if ((o_pscr
& MV643XX_ETH_SERIAL_PORT_ENABLE
) == 0)
483 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
486 queues
= mv643xx_eth_port_disable_tx(port_num
);
488 o_pscr
&= ~MV643XX_ETH_SERIAL_PORT_ENABLE
;
489 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
491 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
493 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
),
496 mv643xx_eth_port_enable_tx(port_num
, queues
);
502 * mv643xx_eth_int_handler
504 * Main interrupt handler for the gigbit ethernet ports
506 * Input : irq - irq number (not used)
507 * dev_id - a pointer to the required interface's data structure
512 static irqreturn_t
mv643xx_eth_int_handler(int irq
, void *dev_id
)
514 struct net_device
*dev
= (struct net_device
*)dev_id
;
515 struct mv643xx_private
*mp
= netdev_priv(dev
);
516 u32 eth_int_cause
, eth_int_cause_ext
= 0;
517 unsigned int port_num
= mp
->port_num
;
519 /* Read interrupt cause registers */
520 eth_int_cause
= mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
)) &
522 if (eth_int_cause
& ETH_INT_CAUSE_EXT
) {
523 eth_int_cause_ext
= mv_read(
524 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
)) &
525 ETH_INT_UNMASK_ALL_EXT
;
526 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
),
530 /* PHY status changed */
531 if (eth_int_cause_ext
& ETH_INT_CAUSE_PHY
) {
532 struct ethtool_cmd cmd
;
534 if (mii_link_ok(&mp
->mii
)) {
535 mii_ethtool_gset(&mp
->mii
, &cmd
);
536 mv643xx_eth_update_pscr(dev
, &cmd
);
537 mv643xx_eth_port_enable_tx(port_num
,
538 ETH_TX_QUEUES_ENABLED
);
539 if (!netif_carrier_ok(dev
)) {
540 netif_carrier_on(dev
);
541 if (mp
->tx_ring_size
- mp
->tx_desc_count
>=
543 netif_wake_queue(dev
);
545 } else if (netif_carrier_ok(dev
)) {
546 netif_stop_queue(dev
);
547 netif_carrier_off(dev
);
552 if (eth_int_cause
& ETH_INT_CAUSE_RX
) {
553 /* schedule the NAPI poll routine to maintain port */
554 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
556 /* wait for previous write to complete */
557 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
));
559 netif_rx_schedule(dev
);
562 if (eth_int_cause
& ETH_INT_CAUSE_RX
)
563 mv643xx_eth_receive_queue(dev
, INT_MAX
);
565 if (eth_int_cause_ext
& ETH_INT_CAUSE_TX
)
566 mv643xx_eth_free_completed_tx_descs(dev
);
569 * If no real interrupt occured, exit.
570 * This can happen when using gigE interrupt coalescing mechanism.
572 if ((eth_int_cause
== 0x0) && (eth_int_cause_ext
== 0x0))
581 * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
584 * This routine sets the RX coalescing interrupt mechanism parameter.
585 * This parameter is a timeout counter, that counts in 64 t_clk
586 * chunks ; that when timeout event occurs a maskable interrupt
588 * The parameter is calculated using the tClk of the MV-643xx chip
589 * , and the required delay of the interrupt in usec.
592 * unsigned int eth_port_num Ethernet port number
593 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
594 * unsigned int delay Delay in usec
597 * Interrupt coalescing mechanism value is set in MV-643xx chip.
600 * The interrupt coalescing value set in the gigE port.
603 static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num
,
604 unsigned int t_clk
, unsigned int delay
)
606 unsigned int coal
= ((t_clk
/ 1000000) * delay
) / 64;
608 /* Set RX Coalescing mechanism */
609 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num
),
610 ((coal
& 0x3fff) << 8) |
611 (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num
))
619 * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
622 * This routine sets the TX coalescing interrupt mechanism parameter.
623 * This parameter is a timeout counter, that counts in 64 t_clk
624 * chunks ; that when timeout event occurs a maskable interrupt
626 * The parameter is calculated using the t_cLK frequency of the
627 * MV-643xx chip and the required delay in the interrupt in uSec
630 * unsigned int eth_port_num Ethernet port number
631 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
632 * unsigned int delay Delay in uSeconds
635 * Interrupt coalescing mechanism value is set in MV-643xx chip.
638 * The interrupt coalescing value set in the gigE port.
641 static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num
,
642 unsigned int t_clk
, unsigned int delay
)
645 coal
= ((t_clk
/ 1000000) * delay
) / 64;
646 /* Set TX Coalescing mechanism */
647 mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num
),
653 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
656 * This function prepares a Rx chained list of descriptors and packet
657 * buffers in a form of a ring. The routine must be called after port
658 * initialization routine and before port start routine.
659 * The Ethernet SDMA engine uses CPU bus addresses to access the various
660 * devices in the system (i.e. DRAM). This function uses the ethernet
661 * struct 'virtual to physical' routine (set by the user) to set the ring
662 * with physical addresses.
665 * struct mv643xx_private *mp Ethernet Port Control srtuct.
668 * The routine updates the Ethernet port control struct with information
669 * regarding the Rx descriptors and buffers.
674 static void ether_init_rx_desc_ring(struct mv643xx_private
*mp
)
676 volatile struct eth_rx_desc
*p_rx_desc
;
677 int rx_desc_num
= mp
->rx_ring_size
;
680 /* initialize the next_desc_ptr links in the Rx descriptors ring */
681 p_rx_desc
= (struct eth_rx_desc
*)mp
->p_rx_desc_area
;
682 for (i
= 0; i
< rx_desc_num
; i
++) {
683 p_rx_desc
[i
].next_desc_ptr
= mp
->rx_desc_dma
+
684 ((i
+ 1) % rx_desc_num
) * sizeof(struct eth_rx_desc
);
687 /* Save Rx desc pointer to driver struct. */
688 mp
->rx_curr_desc_q
= 0;
689 mp
->rx_used_desc_q
= 0;
691 mp
->rx_desc_area_size
= rx_desc_num
* sizeof(struct eth_rx_desc
);
695 * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
698 * This function prepares a Tx chained list of descriptors and packet
699 * buffers in a form of a ring. The routine must be called after port
700 * initialization routine and before port start routine.
701 * The Ethernet SDMA engine uses CPU bus addresses to access the various
702 * devices in the system (i.e. DRAM). This function uses the ethernet
703 * struct 'virtual to physical' routine (set by the user) to set the ring
704 * with physical addresses.
707 * struct mv643xx_private *mp Ethernet Port Control srtuct.
710 * The routine updates the Ethernet port control struct with information
711 * regarding the Tx descriptors and buffers.
716 static void ether_init_tx_desc_ring(struct mv643xx_private
*mp
)
718 int tx_desc_num
= mp
->tx_ring_size
;
719 struct eth_tx_desc
*p_tx_desc
;
722 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
723 p_tx_desc
= (struct eth_tx_desc
*)mp
->p_tx_desc_area
;
724 for (i
= 0; i
< tx_desc_num
; i
++) {
725 p_tx_desc
[i
].next_desc_ptr
= mp
->tx_desc_dma
+
726 ((i
+ 1) % tx_desc_num
) * sizeof(struct eth_tx_desc
);
729 mp
->tx_curr_desc_q
= 0;
730 mp
->tx_used_desc_q
= 0;
732 mp
->tx_desc_area_size
= tx_desc_num
* sizeof(struct eth_tx_desc
);
735 static int mv643xx_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
737 struct mv643xx_private
*mp
= netdev_priv(dev
);
740 spin_lock_irq(&mp
->lock
);
741 err
= mii_ethtool_sset(&mp
->mii
, cmd
);
742 spin_unlock_irq(&mp
->lock
);
747 static int mv643xx_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
749 struct mv643xx_private
*mp
= netdev_priv(dev
);
752 spin_lock_irq(&mp
->lock
);
753 err
= mii_ethtool_gset(&mp
->mii
, cmd
);
754 spin_unlock_irq(&mp
->lock
);
756 /* The PHY may support 1000baseT_Half, but the mv643xx does not */
757 cmd
->supported
&= ~SUPPORTED_1000baseT_Half
;
758 cmd
->advertising
&= ~ADVERTISED_1000baseT_Half
;
766 * This function is called when openning the network device. The function
767 * should initialize all the hardware, initialize cyclic Rx/Tx
768 * descriptors chain and buffers and allocate an IRQ to the network
771 * Input : a pointer to the network device structure
773 * Output : zero of success , nonzero if fails.
776 static int mv643xx_eth_open(struct net_device
*dev
)
778 struct mv643xx_private
*mp
= netdev_priv(dev
);
779 unsigned int port_num
= mp
->port_num
;
783 err
= request_irq(dev
->irq
, mv643xx_eth_int_handler
,
784 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, dev
);
786 printk(KERN_ERR
"Can not assign IRQ number to MV643XX_eth%d\n",
793 memset(&mp
->timeout
, 0, sizeof(struct timer_list
));
794 mp
->timeout
.function
= mv643xx_eth_rx_refill_descs_timer_wrapper
;
795 mp
->timeout
.data
= (unsigned long)dev
;
797 /* Allocate RX and TX skb rings */
798 mp
->rx_skb
= kmalloc(sizeof(*mp
->rx_skb
) * mp
->rx_ring_size
,
801 printk(KERN_ERR
"%s: Cannot allocate Rx skb ring\n", dev
->name
);
805 mp
->tx_skb
= kmalloc(sizeof(*mp
->tx_skb
) * mp
->tx_ring_size
,
808 printk(KERN_ERR
"%s: Cannot allocate Tx skb ring\n", dev
->name
);
810 goto out_free_rx_skb
;
813 /* Allocate TX ring */
814 mp
->tx_desc_count
= 0;
815 size
= mp
->tx_ring_size
* sizeof(struct eth_tx_desc
);
816 mp
->tx_desc_area_size
= size
;
818 if (mp
->tx_sram_size
) {
819 mp
->p_tx_desc_area
= ioremap(mp
->tx_sram_addr
,
821 mp
->tx_desc_dma
= mp
->tx_sram_addr
;
823 mp
->p_tx_desc_area
= dma_alloc_coherent(NULL
, size
,
827 if (!mp
->p_tx_desc_area
) {
828 printk(KERN_ERR
"%s: Cannot allocate Tx Ring (size %d bytes)\n",
831 goto out_free_tx_skb
;
833 BUG_ON((u32
) mp
->p_tx_desc_area
& 0xf); /* check 16-byte alignment */
834 memset((void *)mp
->p_tx_desc_area
, 0, mp
->tx_desc_area_size
);
836 ether_init_tx_desc_ring(mp
);
838 /* Allocate RX ring */
839 mp
->rx_desc_count
= 0;
840 size
= mp
->rx_ring_size
* sizeof(struct eth_rx_desc
);
841 mp
->rx_desc_area_size
= size
;
843 if (mp
->rx_sram_size
) {
844 mp
->p_rx_desc_area
= ioremap(mp
->rx_sram_addr
,
846 mp
->rx_desc_dma
= mp
->rx_sram_addr
;
848 mp
->p_rx_desc_area
= dma_alloc_coherent(NULL
, size
,
852 if (!mp
->p_rx_desc_area
) {
853 printk(KERN_ERR
"%s: Cannot allocate Rx ring (size %d bytes)\n",
855 printk(KERN_ERR
"%s: Freeing previously allocated TX queues...",
857 if (mp
->rx_sram_size
)
858 iounmap(mp
->p_tx_desc_area
);
860 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
861 mp
->p_tx_desc_area
, mp
->tx_desc_dma
);
863 goto out_free_tx_skb
;
865 memset((void *)mp
->p_rx_desc_area
, 0, size
);
867 ether_init_rx_desc_ring(mp
);
869 mv643xx_eth_rx_refill_descs(dev
); /* Fill RX ring with skb's */
871 /* Clear any pending ethernet port interrupts */
872 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
), 0);
873 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
), 0);
877 /* Interrupt Coalescing */
881 eth_port_set_rx_coal(port_num
, 133000000, MV643XX_RX_COAL
);
885 eth_port_set_tx_coal(port_num
, 133000000, MV643XX_TX_COAL
);
887 /* Unmask phy and link status changes interrupts */
888 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num
),
889 ETH_INT_UNMASK_ALL_EXT
);
891 /* Unmask RX buffer and TX end interrupt */
892 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), ETH_INT_UNMASK_ALL
);
901 free_irq(dev
->irq
, dev
);
906 static void mv643xx_eth_free_tx_rings(struct net_device
*dev
)
908 struct mv643xx_private
*mp
= netdev_priv(dev
);
911 mv643xx_eth_port_disable_tx(mp
->port_num
);
913 /* Free outstanding skb's on TX ring */
914 mv643xx_eth_free_all_tx_descs(dev
);
916 BUG_ON(mp
->tx_used_desc_q
!= mp
->tx_curr_desc_q
);
919 if (mp
->tx_sram_size
)
920 iounmap(mp
->p_tx_desc_area
);
922 dma_free_coherent(NULL
, mp
->tx_desc_area_size
,
923 mp
->p_tx_desc_area
, mp
->tx_desc_dma
);
926 static void mv643xx_eth_free_rx_rings(struct net_device
*dev
)
928 struct mv643xx_private
*mp
= netdev_priv(dev
);
929 unsigned int port_num
= mp
->port_num
;
933 mv643xx_eth_port_disable_rx(port_num
);
935 /* Free preallocated skb's on RX rings */
936 for (curr
= 0; mp
->rx_desc_count
&& curr
< mp
->rx_ring_size
; curr
++) {
937 if (mp
->rx_skb
[curr
]) {
938 dev_kfree_skb(mp
->rx_skb
[curr
]);
943 if (mp
->rx_desc_count
)
945 "%s: Error in freeing Rx Ring. %d skb's still"
946 " stuck in RX Ring - ignoring them\n", dev
->name
,
949 if (mp
->rx_sram_size
)
950 iounmap(mp
->p_rx_desc_area
);
952 dma_free_coherent(NULL
, mp
->rx_desc_area_size
,
953 mp
->p_rx_desc_area
, mp
->rx_desc_dma
);
959 * This function is used when closing the network device.
960 * It updates the hardware,
961 * release all memory that holds buffers and descriptors and release the IRQ.
962 * Input : a pointer to the device structure
963 * Output : zero if success , nonzero if fails
966 static int mv643xx_eth_stop(struct net_device
*dev
)
968 struct mv643xx_private
*mp
= netdev_priv(dev
);
969 unsigned int port_num
= mp
->port_num
;
971 /* Mask all interrupts on ethernet port */
972 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), ETH_INT_MASK_ALL
);
973 /* wait for previous write to complete */
974 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
));
977 netif_poll_disable(dev
);
979 netif_carrier_off(dev
);
980 netif_stop_queue(dev
);
982 eth_port_reset(mp
->port_num
);
984 mv643xx_eth_free_tx_rings(dev
);
985 mv643xx_eth_free_rx_rings(dev
);
988 netif_poll_enable(dev
);
991 free_irq(dev
->irq
, dev
);
1000 * This function is used in case of NAPI
1002 static int mv643xx_poll(struct net_device
*dev
, int *budget
)
1004 struct mv643xx_private
*mp
= netdev_priv(dev
);
1005 int done
= 1, orig_budget
, work_done
;
1006 unsigned int port_num
= mp
->port_num
;
1008 #ifdef MV643XX_TX_FAST_REFILL
1009 if (++mp
->tx_clean_threshold
> 5) {
1010 mv643xx_eth_free_completed_tx_descs(dev
);
1011 mp
->tx_clean_threshold
= 0;
1015 if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num
)))
1016 != (u32
) mp
->rx_used_desc_q
) {
1017 orig_budget
= *budget
;
1018 if (orig_budget
> dev
->quota
)
1019 orig_budget
= dev
->quota
;
1020 work_done
= mv643xx_eth_receive_queue(dev
, orig_budget
);
1021 *budget
-= work_done
;
1022 dev
->quota
-= work_done
;
1023 if (work_done
>= orig_budget
)
1028 netif_rx_complete(dev
);
1029 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num
), 0);
1030 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num
), 0);
1031 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
),
1032 ETH_INT_UNMASK_ALL
);
1035 return done
? 0 : 1;
1040 * has_tiny_unaligned_frags - check if skb has any small, unaligned fragments
1042 * Hardware can't handle unaligned fragments smaller than 9 bytes.
1043 * This helper function detects that case.
1046 static inline unsigned int has_tiny_unaligned_frags(struct sk_buff
*skb
)
1051 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
1052 fragp
= &skb_shinfo(skb
)->frags
[frag
];
1053 if (fragp
->size
<= 8 && fragp
->page_offset
& 0x7)
1060 * eth_alloc_tx_desc_index - return the index of the next available tx desc
1062 static int eth_alloc_tx_desc_index(struct mv643xx_private
*mp
)
1066 BUG_ON(mp
->tx_desc_count
>= mp
->tx_ring_size
);
1068 tx_desc_curr
= mp
->tx_curr_desc_q
;
1069 mp
->tx_curr_desc_q
= (tx_desc_curr
+ 1) % mp
->tx_ring_size
;
1071 BUG_ON(mp
->tx_curr_desc_q
== mp
->tx_used_desc_q
);
1073 return tx_desc_curr
;
1077 * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments.
1079 * Ensure the data for each fragment to be transmitted is mapped properly,
1080 * then fill in descriptors in the tx hw queue.
1082 static void eth_tx_fill_frag_descs(struct mv643xx_private
*mp
,
1083 struct sk_buff
*skb
)
1087 struct eth_tx_desc
*desc
;
1089 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
1090 skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[frag
];
1092 tx_index
= eth_alloc_tx_desc_index(mp
);
1093 desc
= &mp
->p_tx_desc_area
[tx_index
];
1095 desc
->cmd_sts
= ETH_BUFFER_OWNED_BY_DMA
;
1096 /* Last Frag enables interrupt and frees the skb */
1097 if (frag
== (skb_shinfo(skb
)->nr_frags
- 1)) {
1098 desc
->cmd_sts
|= ETH_ZERO_PADDING
|
1100 ETH_TX_ENABLE_INTERRUPT
;
1101 mp
->tx_skb
[tx_index
] = skb
;
1103 mp
->tx_skb
[tx_index
] = NULL
;
1105 desc
= &mp
->p_tx_desc_area
[tx_index
];
1107 desc
->byte_cnt
= this_frag
->size
;
1108 desc
->buf_ptr
= dma_map_page(NULL
, this_frag
->page
,
1109 this_frag
->page_offset
,
1116 * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw
1118 * Ensure the data for an skb to be transmitted is mapped properly,
1119 * then fill in descriptors in the tx hw queue and start the hardware.
1121 static void eth_tx_submit_descs_for_skb(struct mv643xx_private
*mp
,
1122 struct sk_buff
*skb
)
1125 struct eth_tx_desc
*desc
;
1128 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1130 cmd_sts
= ETH_TX_FIRST_DESC
| ETH_GEN_CRC
| ETH_BUFFER_OWNED_BY_DMA
;
1132 tx_index
= eth_alloc_tx_desc_index(mp
);
1133 desc
= &mp
->p_tx_desc_area
[tx_index
];
1136 eth_tx_fill_frag_descs(mp
, skb
);
1138 length
= skb_headlen(skb
);
1139 mp
->tx_skb
[tx_index
] = NULL
;
1141 cmd_sts
|= ETH_ZERO_PADDING
|
1143 ETH_TX_ENABLE_INTERRUPT
;
1145 mp
->tx_skb
[tx_index
] = skb
;
1148 desc
->byte_cnt
= length
;
1149 desc
->buf_ptr
= dma_map_single(NULL
, skb
->data
, length
, DMA_TO_DEVICE
);
1151 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1152 BUG_ON(skb
->protocol
!= ETH_P_IP
);
1154 cmd_sts
|= ETH_GEN_TCP_UDP_CHECKSUM
|
1155 ETH_GEN_IP_V_4_CHECKSUM
|
1156 skb
->nh
.iph
->ihl
<< ETH_TX_IHL_SHIFT
;
1158 switch (skb
->nh
.iph
->protocol
) {
1160 cmd_sts
|= ETH_UDP_FRAME
;
1161 desc
->l4i_chk
= skb
->h
.uh
->check
;
1164 desc
->l4i_chk
= skb
->h
.th
->check
;
1170 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1171 cmd_sts
|= 5 << ETH_TX_IHL_SHIFT
;
1175 /* ensure all other descriptors are written before first cmd_sts */
1177 desc
->cmd_sts
= cmd_sts
;
1179 /* ensure all descriptors are written before poking hardware */
1181 mv643xx_eth_port_enable_tx(mp
->port_num
, ETH_TX_QUEUES_ENABLED
);
1183 mp
->tx_desc_count
+= nr_frags
+ 1;
1187 * mv643xx_eth_start_xmit - queue an skb to the hardware for transmission
1190 static int mv643xx_eth_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1192 struct mv643xx_private
*mp
= netdev_priv(dev
);
1193 struct net_device_stats
*stats
= &mp
->stats
;
1194 unsigned long flags
;
1196 BUG_ON(netif_queue_stopped(dev
));
1197 BUG_ON(skb
== NULL
);
1199 if (mp
->tx_ring_size
- mp
->tx_desc_count
< MAX_DESCS_PER_SKB
) {
1200 printk(KERN_ERR
"%s: transmit with queue full\n", dev
->name
);
1201 netif_stop_queue(dev
);
1205 if (has_tiny_unaligned_frags(skb
)) {
1206 if (__skb_linearize(skb
)) {
1207 stats
->tx_dropped
++;
1208 printk(KERN_DEBUG
"%s: failed to linearize tiny "
1209 "unaligned fragment\n", dev
->name
);
1214 spin_lock_irqsave(&mp
->lock
, flags
);
1216 eth_tx_submit_descs_for_skb(mp
, skb
);
1217 stats
->tx_bytes
= skb
->len
;
1218 stats
->tx_packets
++;
1219 dev
->trans_start
= jiffies
;
1221 if (mp
->tx_ring_size
- mp
->tx_desc_count
< MAX_DESCS_PER_SKB
)
1222 netif_stop_queue(dev
);
1224 spin_unlock_irqrestore(&mp
->lock
, flags
);
1226 return 0; /* success */
1230 * mv643xx_eth_get_stats
1232 * Returns a pointer to the interface statistics.
1234 * Input : dev - a pointer to the required interface
1236 * Output : a pointer to the interface's statistics
1239 static struct net_device_stats
*mv643xx_eth_get_stats(struct net_device
*dev
)
1241 struct mv643xx_private
*mp
= netdev_priv(dev
);
1246 #ifdef CONFIG_NET_POLL_CONTROLLER
1247 static void mv643xx_netpoll(struct net_device
*netdev
)
1249 struct mv643xx_private
*mp
= netdev_priv(netdev
);
1250 int port_num
= mp
->port_num
;
1252 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), ETH_INT_MASK_ALL
);
1253 /* wait for previous write to complete */
1254 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
));
1256 mv643xx_eth_int_handler(netdev
->irq
, netdev
);
1258 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num
), ETH_INT_UNMASK_ALL
);
1262 static void mv643xx_init_ethtool_cmd(struct net_device
*dev
, int phy_address
,
1263 int speed
, int duplex
,
1264 struct ethtool_cmd
*cmd
)
1266 struct mv643xx_private
*mp
= netdev_priv(dev
);
1268 memset(cmd
, 0, sizeof(*cmd
));
1270 cmd
->port
= PORT_MII
;
1271 cmd
->transceiver
= XCVR_INTERNAL
;
1272 cmd
->phy_address
= phy_address
;
1275 cmd
->autoneg
= AUTONEG_ENABLE
;
1276 /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */
1277 cmd
->speed
= SPEED_100
;
1278 cmd
->advertising
= ADVERTISED_10baseT_Half
|
1279 ADVERTISED_10baseT_Full
|
1280 ADVERTISED_100baseT_Half
|
1281 ADVERTISED_100baseT_Full
;
1282 if (mp
->mii
.supports_gmii
)
1283 cmd
->advertising
|= ADVERTISED_1000baseT_Full
;
1285 cmd
->autoneg
= AUTONEG_DISABLE
;
1287 cmd
->duplex
= duplex
;
1294 * First function called after registering the network device.
1295 * It's purpose is to initialize the device as an ethernet device,
1296 * fill the ethernet device structure with pointers * to functions,
1297 * and set the MAC address of the interface
1299 * Input : struct device *
1300 * Output : -ENOMEM if failed , 0 if success
1302 static int mv643xx_eth_probe(struct platform_device
*pdev
)
1304 struct mv643xx_eth_platform_data
*pd
;
1305 int port_num
= pdev
->id
;
1306 struct mv643xx_private
*mp
;
1307 struct net_device
*dev
;
1309 struct resource
*res
;
1311 struct ethtool_cmd cmd
;
1312 int duplex
= DUPLEX_HALF
;
1313 int speed
= 0; /* default to auto-negotiation */
1315 dev
= alloc_etherdev(sizeof(struct mv643xx_private
));
1319 platform_set_drvdata(pdev
, dev
);
1321 mp
= netdev_priv(dev
);
1323 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1325 dev
->irq
= res
->start
;
1327 mp
->port_num
= port_num
;
1329 dev
->open
= mv643xx_eth_open
;
1330 dev
->stop
= mv643xx_eth_stop
;
1331 dev
->hard_start_xmit
= mv643xx_eth_start_xmit
;
1332 dev
->get_stats
= mv643xx_eth_get_stats
;
1333 dev
->set_mac_address
= mv643xx_eth_set_mac_address
;
1334 dev
->set_multicast_list
= mv643xx_eth_set_rx_mode
;
1336 /* No need to Tx Timeout */
1337 dev
->tx_timeout
= mv643xx_eth_tx_timeout
;
1339 dev
->poll
= mv643xx_poll
;
1343 #ifdef CONFIG_NET_POLL_CONTROLLER
1344 dev
->poll_controller
= mv643xx_netpoll
;
1347 dev
->watchdog_timeo
= 2 * HZ
;
1348 dev
->tx_queue_len
= mp
->tx_ring_size
;
1350 dev
->change_mtu
= mv643xx_eth_change_mtu
;
1351 dev
->do_ioctl
= mv643xx_eth_do_ioctl
;
1352 SET_ETHTOOL_OPS(dev
, &mv643xx_ethtool_ops
);
1354 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1355 #ifdef MAX_SKB_FRAGS
1357 * Zero copy can only work if we use Discovery II memory. Else, we will
1358 * have to map the buffers to ISA memory which is only 16 MB
1360 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
;
1364 /* Configure the timeout task */
1365 INIT_WORK(&mp
->tx_timeout_task
, mv643xx_eth_tx_timeout_task
);
1367 spin_lock_init(&mp
->lock
);
1369 /* set default config values */
1370 eth_port_uc_addr_get(dev
, dev
->dev_addr
);
1371 mp
->rx_ring_size
= MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE
;
1372 mp
->tx_ring_size
= MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE
;
1374 pd
= pdev
->dev
.platform_data
;
1377 memcpy(dev
->dev_addr
, pd
->mac_addr
, 6);
1379 if (pd
->phy_addr
|| pd
->force_phy_addr
)
1380 ethernet_phy_set(port_num
, pd
->phy_addr
);
1382 if (pd
->rx_queue_size
)
1383 mp
->rx_ring_size
= pd
->rx_queue_size
;
1385 if (pd
->tx_queue_size
)
1386 mp
->tx_ring_size
= pd
->tx_queue_size
;
1388 if (pd
->tx_sram_size
) {
1389 mp
->tx_sram_size
= pd
->tx_sram_size
;
1390 mp
->tx_sram_addr
= pd
->tx_sram_addr
;
1393 if (pd
->rx_sram_size
) {
1394 mp
->rx_sram_size
= pd
->rx_sram_size
;
1395 mp
->rx_sram_addr
= pd
->rx_sram_addr
;
1398 duplex
= pd
->duplex
;
1402 /* Hook up MII support for ethtool */
1404 mp
->mii
.mdio_read
= mv643xx_mdio_read
;
1405 mp
->mii
.mdio_write
= mv643xx_mdio_write
;
1406 mp
->mii
.phy_id
= ethernet_phy_get(port_num
);
1407 mp
->mii
.phy_id_mask
= 0x3f;
1408 mp
->mii
.reg_num_mask
= 0x1f;
1410 err
= ethernet_phy_detect(port_num
);
1412 pr_debug("MV643xx ethernet port %d: "
1413 "No PHY detected at addr %d\n",
1414 port_num
, ethernet_phy_get(port_num
));
1418 ethernet_phy_reset(port_num
);
1419 mp
->mii
.supports_gmii
= mii_check_gmii_support(&mp
->mii
);
1420 mv643xx_init_ethtool_cmd(dev
, mp
->mii
.phy_id
, speed
, duplex
, &cmd
);
1421 mv643xx_eth_update_pscr(dev
, &cmd
);
1422 mv643xx_set_settings(dev
, &cmd
);
1424 SET_MODULE_OWNER(dev
);
1425 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1426 err
= register_netdev(dev
);
1432 "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
1433 dev
->name
, port_num
, p
[0], p
[1], p
[2], p
[3], p
[4], p
[5]);
1435 if (dev
->features
& NETIF_F_SG
)
1436 printk(KERN_NOTICE
"%s: Scatter Gather Enabled\n", dev
->name
);
1438 if (dev
->features
& NETIF_F_IP_CSUM
)
1439 printk(KERN_NOTICE
"%s: TX TCP/IP Checksumming Supported\n",
1442 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1443 printk(KERN_NOTICE
"%s: RX TCP/UDP Checksum Offload ON \n", dev
->name
);
1447 printk(KERN_NOTICE
"%s: TX and RX Interrupt Coalescing ON \n",
1452 printk(KERN_NOTICE
"%s: RX NAPI Enabled \n", dev
->name
);
1455 if (mp
->tx_sram_size
> 0)
1456 printk(KERN_NOTICE
"%s: Using SRAM\n", dev
->name
);
1466 static int mv643xx_eth_remove(struct platform_device
*pdev
)
1468 struct net_device
*dev
= platform_get_drvdata(pdev
);
1470 unregister_netdev(dev
);
1471 flush_scheduled_work();
1474 platform_set_drvdata(pdev
, NULL
);
1478 static int mv643xx_eth_shared_probe(struct platform_device
*pdev
)
1480 struct resource
*res
;
1482 printk(KERN_NOTICE
"MV-643xx 10/100/1000 Ethernet Driver\n");
1484 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1488 mv643xx_eth_shared_base
= ioremap(res
->start
,
1489 MV643XX_ETH_SHARED_REGS_SIZE
);
1490 if (mv643xx_eth_shared_base
== NULL
)
1497 static int mv643xx_eth_shared_remove(struct platform_device
*pdev
)
1499 iounmap(mv643xx_eth_shared_base
);
1500 mv643xx_eth_shared_base
= NULL
;
1505 static struct platform_driver mv643xx_eth_driver
= {
1506 .probe
= mv643xx_eth_probe
,
1507 .remove
= mv643xx_eth_remove
,
1509 .name
= MV643XX_ETH_NAME
,
1513 static struct platform_driver mv643xx_eth_shared_driver
= {
1514 .probe
= mv643xx_eth_shared_probe
,
1515 .remove
= mv643xx_eth_shared_remove
,
1517 .name
= MV643XX_ETH_SHARED_NAME
,
1522 * mv643xx_init_module
1524 * Registers the network drivers into the Linux kernel
1530 static int __init
mv643xx_init_module(void)
1534 rc
= platform_driver_register(&mv643xx_eth_shared_driver
);
1536 rc
= platform_driver_register(&mv643xx_eth_driver
);
1538 platform_driver_unregister(&mv643xx_eth_shared_driver
);
1544 * mv643xx_cleanup_module
1546 * Registers the network drivers into the Linux kernel
1552 static void __exit
mv643xx_cleanup_module(void)
1554 platform_driver_unregister(&mv643xx_eth_driver
);
1555 platform_driver_unregister(&mv643xx_eth_shared_driver
);
1558 module_init(mv643xx_init_module
);
1559 module_exit(mv643xx_cleanup_module
);
1561 MODULE_LICENSE("GPL");
1562 MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
1563 " and Dale Farnsworth");
1564 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
1567 * The second part is the low level driver of the gigE ethernet ports.
1571 * Marvell's Gigabit Ethernet controller low level driver
1574 * This file introduce low level API to Marvell's Gigabit Ethernet
1575 * controller. This Gigabit Ethernet Controller driver API controls
1576 * 1) Operations (i.e. port init, start, reset etc').
1577 * 2) Data flow (i.e. port send, receive etc').
1578 * Each Gigabit Ethernet port is controlled via
1579 * struct mv643xx_private.
1580 * This struct includes user configuration information as well as
1581 * driver internal data needed for its operations.
1583 * Supported Features:
1584 * - This low level driver is OS independent. Allocating memory for
1585 * the descriptor rings and buffers are not within the scope of
1587 * - The user is free from Rx/Tx queue managing.
1588 * - This low level driver introduce functionality API that enable
1589 * the to operate Marvell's Gigabit Ethernet Controller in a
1591 * - Simple Gigabit Ethernet port operation API.
1592 * - Simple Gigabit Ethernet port data flow API.
1593 * - Data flow and operation API support per queue functionality.
1594 * - Support cached descriptors for better performance.
1595 * - Enable access to all four DRAM banks and internal SRAM memory
1597 * - PHY access and control API.
1598 * - Port control register configuration API.
1599 * - Full control over Unicast and Multicast MAC configurations.
1603 * Initialization phase
1604 * This phase complete the initialization of the the
1605 * mv643xx_private struct.
1606 * User information regarding port configuration has to be set
1607 * prior to calling the port initialization routine.
1609 * In this phase any port Tx/Rx activity is halted, MIB counters
1610 * are cleared, PHY address is set according to user parameter and
1611 * access to DRAM and internal SRAM memory spaces.
1613 * Driver ring initialization
1614 * Allocating memory for the descriptor rings and buffers is not
1615 * within the scope of this driver. Thus, the user is required to
1616 * allocate memory for the descriptors ring and buffers. Those
1617 * memory parameters are used by the Rx and Tx ring initialization
1618 * routines in order to curve the descriptor linked list in a form
1620 * Note: Pay special attention to alignment issues when using
1621 * cached descriptors/buffers. In this phase the driver store
1622 * information in the mv643xx_private struct regarding each queue
1626 * This phase prepares the Ethernet port for Rx and Tx activity.
1627 * It uses the information stored in the mv643xx_private struct to
1628 * initialize the various port registers.
1631 * All packet references to/from the driver are done using
1633 * This struct is a unified struct used with Rx and Tx operations.
1634 * This way the user is not required to be familiar with neither
1635 * Tx nor Rx descriptors structures.
1636 * The driver's descriptors rings are management by indexes.
1637 * Those indexes controls the ring resources and used to indicate
1638 * a SW resource error:
1640 * This index points to the current available resource for use. For
1641 * example in Rx process this index will point to the descriptor
1642 * that will be passed to the user upon calling the receive
1643 * routine. In Tx process, this index will point to the descriptor
1644 * that will be assigned with the user packet info and transmitted.
1646 * This index points to the descriptor that need to restore its
1647 * resources. For example in Rx process, using the Rx buffer return
1648 * API will attach the buffer returned in packet info to the
1649 * descriptor pointed by 'used'. In Tx process, using the Tx
1650 * descriptor return will merely return the user packet info with
1651 * the command status of the transmitted buffer pointed by the
1652 * 'used' index. Nevertheless, it is essential to use this routine
1653 * to update the 'used' index.
1655 * This index supports Tx Scatter-Gather. It points to the first
1656 * descriptor of a packet assembled of multiple buffers. For
1657 * example when in middle of Such packet we have a Tx resource
1658 * error the 'curr' index get the value of 'first' to indicate
1659 * that the ring returned to its state before trying to transmit
1662 * Receive operation:
1663 * The eth_port_receive API set the packet information struct,
1664 * passed by the caller, with received information from the
1665 * 'current' SDMA descriptor.
1666 * It is the user responsibility to return this resource back
1667 * to the Rx descriptor ring to enable the reuse of this source.
1668 * Return Rx resource is done using the eth_rx_return_buff API.
1670 * Prior to calling the initialization routine eth_port_init() the user
1671 * must set the following fields under mv643xx_private struct:
1672 * port_num User Ethernet port number.
1673 * port_config User port configuration value.
1674 * port_config_extend User port config extend value.
1675 * port_sdma_config User port SDMA config value.
1676 * port_serial_control User port serial control value.
1678 * This driver data flow is done using the struct pkt_info which
1679 * is a unified struct for Rx and Tx operations:
1681 * byte_cnt Tx/Rx descriptor buffer byte count.
1682 * l4i_chk CPU provided TCP Checksum. For Tx operation
1684 * cmd_sts Tx/Rx descriptor command status.
1685 * buf_ptr Tx/Rx descriptor buffer pointer.
1686 * return_info Tx/Rx user resource return information.
1690 static int ethernet_phy_get(unsigned int eth_port_num
);
1691 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
);
1693 /* Ethernet Port routines */
1694 static void eth_port_set_filter_table_entry(int table
, unsigned char entry
);
1697 * eth_port_init - Initialize the Ethernet port driver
1700 * This function prepares the ethernet port to start its activity:
1701 * 1) Completes the ethernet port driver struct initialization toward port
1703 * 2) Resets the device to a quiescent state in case of warm reboot.
1704 * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
1705 * 4) Clean MAC tables. The reset status of those tables is unknown.
1706 * 5) Set PHY address.
1707 * Note: Call this routine prior to eth_port_start routine and after
1708 * setting user values in the user fields of Ethernet port control
1712 * struct mv643xx_private *mp Ethernet port control struct
1720 static void eth_port_init(struct mv643xx_private
*mp
)
1722 mp
->rx_resource_err
= 0;
1724 eth_port_reset(mp
->port_num
);
1726 eth_port_init_mac_tables(mp
->port_num
);
1730 * eth_port_start - Start the Ethernet port activity.
1733 * This routine prepares the Ethernet port for Rx and Tx activity:
1734 * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
1735 * has been initialized a descriptor's ring (using
1736 * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx)
1737 * 2. Initialize and enable the Ethernet configuration port by writing to
1738 * the port's configuration and command registers.
1739 * 3. Initialize and enable the SDMA by writing to the SDMA's
1740 * configuration and command registers. After completing these steps,
1741 * the ethernet port SDMA can starts to perform Rx and Tx activities.
1743 * Note: Each Rx and Tx queue descriptor's list must be initialized prior
1744 * to calling this function (use ether_init_tx_desc_ring for Tx queues
1745 * and ether_init_rx_desc_ring for Rx queues).
1748 * dev - a pointer to the required interface
1751 * Ethernet port is ready to receive and transmit.
1756 static void eth_port_start(struct net_device
*dev
)
1758 struct mv643xx_private
*mp
= netdev_priv(dev
);
1759 unsigned int port_num
= mp
->port_num
;
1760 int tx_curr_desc
, rx_curr_desc
;
1762 struct ethtool_cmd ethtool_cmd
;
1764 /* Assignment of Tx CTRP of given queue */
1765 tx_curr_desc
= mp
->tx_curr_desc_q
;
1766 mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num
),
1767 (u32
)((struct eth_tx_desc
*)mp
->tx_desc_dma
+ tx_curr_desc
));
1769 /* Assignment of Rx CRDP of given queue */
1770 rx_curr_desc
= mp
->rx_curr_desc_q
;
1771 mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num
),
1772 (u32
)((struct eth_rx_desc
*)mp
->rx_desc_dma
+ rx_curr_desc
));
1774 /* Add the assigned Ethernet address to the port's address table */
1775 eth_port_uc_addr_set(port_num
, dev
->dev_addr
);
1777 /* Assign port configuration and command. */
1778 mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num
),
1779 MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE
);
1781 mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num
),
1782 MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE
);
1784 pscr
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
1786 pscr
&= ~(MV643XX_ETH_SERIAL_PORT_ENABLE
| MV643XX_ETH_FORCE_LINK_PASS
);
1787 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), pscr
);
1789 pscr
|= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL
|
1790 MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII
|
1791 MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX
|
1792 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL
|
1793 MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED
;
1795 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), pscr
);
1797 pscr
|= MV643XX_ETH_SERIAL_PORT_ENABLE
;
1798 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), pscr
);
1800 /* Assign port SDMA configuration */
1801 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num
),
1802 MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE
);
1804 /* Enable port Rx. */
1805 mv643xx_eth_port_enable_rx(port_num
, ETH_RX_QUEUES_ENABLED
);
1807 /* Disable port bandwidth limits by clearing MTU register */
1808 mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num
), 0);
1810 /* save phy settings across reset */
1811 mv643xx_get_settings(dev
, ðtool_cmd
);
1812 ethernet_phy_reset(mp
->port_num
);
1813 mv643xx_set_settings(dev
, ðtool_cmd
);
1817 * eth_port_uc_addr_set - This function Set the port Unicast address.
1820 * This function Set the port Ethernet MAC address.
1823 * unsigned int eth_port_num Port number.
1824 * char * p_addr Address to be set
1827 * Set MAC address low and high registers. also calls
1828 * eth_port_set_filter_table_entry() to set the unicast
1829 * table with the proper information.
1835 static void eth_port_uc_addr_set(unsigned int eth_port_num
,
1836 unsigned char *p_addr
)
1842 mac_l
= (p_addr
[4] << 8) | (p_addr
[5]);
1843 mac_h
= (p_addr
[0] << 24) | (p_addr
[1] << 16) | (p_addr
[2] << 8) |
1846 mv_write(MV643XX_ETH_MAC_ADDR_LOW(eth_port_num
), mac_l
);
1847 mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num
), mac_h
);
1849 /* Accept frames of this address */
1850 table
= MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(eth_port_num
);
1851 eth_port_set_filter_table_entry(table
, p_addr
[5] & 0x0f);
1855 * eth_port_uc_addr_get - This function retrieves the port Unicast address
1856 * (MAC address) from the ethernet hw registers.
1859 * This function retrieves the port Ethernet MAC address.
1862 * unsigned int eth_port_num Port number.
1863 * char *MacAddr pointer where the MAC address is stored
1866 * Copy the MAC address to the location pointed to by MacAddr
1872 static void eth_port_uc_addr_get(struct net_device
*dev
, unsigned char *p_addr
)
1874 struct mv643xx_private
*mp
= netdev_priv(dev
);
1878 mac_h
= mv_read(MV643XX_ETH_MAC_ADDR_HIGH(mp
->port_num
));
1879 mac_l
= mv_read(MV643XX_ETH_MAC_ADDR_LOW(mp
->port_num
));
1881 p_addr
[0] = (mac_h
>> 24) & 0xff;
1882 p_addr
[1] = (mac_h
>> 16) & 0xff;
1883 p_addr
[2] = (mac_h
>> 8) & 0xff;
1884 p_addr
[3] = mac_h
& 0xff;
1885 p_addr
[4] = (mac_l
>> 8) & 0xff;
1886 p_addr
[5] = mac_l
& 0xff;
1890 * The entries in each table are indexed by a hash of a packet's MAC
1891 * address. One bit in each entry determines whether the packet is
1892 * accepted. There are 4 entries (each 8 bits wide) in each register
1893 * of the table. The bits in each entry are defined as follows:
1894 * 0 Accept=1, Drop=0
1895 * 3-1 Queue (ETH_Q0=0)
1898 static void eth_port_set_filter_table_entry(int table
, unsigned char entry
)
1900 unsigned int table_reg
;
1901 unsigned int tbl_offset
;
1902 unsigned int reg_offset
;
1904 tbl_offset
= (entry
/ 4) * 4; /* Register offset of DA table entry */
1905 reg_offset
= entry
% 4; /* Entry offset within the register */
1907 /* Set "accepts frame bit" at specified table entry */
1908 table_reg
= mv_read(table
+ tbl_offset
);
1909 table_reg
|= 0x01 << (8 * reg_offset
);
1910 mv_write(table
+ tbl_offset
, table_reg
);
1914 * eth_port_mc_addr - Multicast address settings.
1916 * The MV device supports multicast using two tables:
1917 * 1) Special Multicast Table for MAC addresses of the form
1918 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
1919 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1920 * Table entries in the DA-Filter table.
1921 * 2) Other Multicast Table for multicast of another type. A CRC-8bit
1922 * is used as an index to the Other Multicast Table entries in the
1923 * DA-Filter table. This function calculates the CRC-8bit value.
1924 * In either case, eth_port_set_filter_table_entry() is then called
1925 * to set to set the actual table entry.
1927 static void eth_port_mc_addr(unsigned int eth_port_num
, unsigned char *p_addr
)
1931 unsigned char crc_result
= 0;
1937 if ((p_addr
[0] == 0x01) && (p_addr
[1] == 0x00) &&
1938 (p_addr
[2] == 0x5E) && (p_addr
[3] == 0x00) && (p_addr
[4] == 0x00)) {
1939 table
= MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
1941 eth_port_set_filter_table_entry(table
, p_addr
[5]);
1945 /* Calculate CRC-8 out of the given address */
1946 mac_h
= (p_addr
[0] << 8) | (p_addr
[1]);
1947 mac_l
= (p_addr
[2] << 24) | (p_addr
[3] << 16) |
1948 (p_addr
[4] << 8) | (p_addr
[5] << 0);
1950 for (i
= 0; i
< 32; i
++)
1951 mac_array
[i
] = (mac_l
>> i
) & 0x1;
1952 for (i
= 32; i
< 48; i
++)
1953 mac_array
[i
] = (mac_h
>> (i
- 32)) & 0x1;
1955 crc
[0] = mac_array
[45] ^ mac_array
[43] ^ mac_array
[40] ^ mac_array
[39] ^
1956 mac_array
[35] ^ mac_array
[34] ^ mac_array
[31] ^ mac_array
[30] ^
1957 mac_array
[28] ^ mac_array
[23] ^ mac_array
[21] ^ mac_array
[19] ^
1958 mac_array
[18] ^ mac_array
[16] ^ mac_array
[14] ^ mac_array
[12] ^
1959 mac_array
[8] ^ mac_array
[7] ^ mac_array
[6] ^ mac_array
[0];
1961 crc
[1] = mac_array
[46] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[43] ^
1962 mac_array
[41] ^ mac_array
[39] ^ mac_array
[36] ^ mac_array
[34] ^
1963 mac_array
[32] ^ mac_array
[30] ^ mac_array
[29] ^ mac_array
[28] ^
1964 mac_array
[24] ^ mac_array
[23] ^ mac_array
[22] ^ mac_array
[21] ^
1965 mac_array
[20] ^ mac_array
[18] ^ mac_array
[17] ^ mac_array
[16] ^
1966 mac_array
[15] ^ mac_array
[14] ^ mac_array
[13] ^ mac_array
[12] ^
1967 mac_array
[9] ^ mac_array
[6] ^ mac_array
[1] ^ mac_array
[0];
1969 crc
[2] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[44] ^ mac_array
[43] ^
1970 mac_array
[42] ^ mac_array
[39] ^ mac_array
[37] ^ mac_array
[34] ^
1971 mac_array
[33] ^ mac_array
[29] ^ mac_array
[28] ^ mac_array
[25] ^
1972 mac_array
[24] ^ mac_array
[22] ^ mac_array
[17] ^ mac_array
[15] ^
1973 mac_array
[13] ^ mac_array
[12] ^ mac_array
[10] ^ mac_array
[8] ^
1974 mac_array
[6] ^ mac_array
[2] ^ mac_array
[1] ^ mac_array
[0];
1976 crc
[3] = mac_array
[47] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[43] ^
1977 mac_array
[40] ^ mac_array
[38] ^ mac_array
[35] ^ mac_array
[34] ^
1978 mac_array
[30] ^ mac_array
[29] ^ mac_array
[26] ^ mac_array
[25] ^
1979 mac_array
[23] ^ mac_array
[18] ^ mac_array
[16] ^ mac_array
[14] ^
1980 mac_array
[13] ^ mac_array
[11] ^ mac_array
[9] ^ mac_array
[7] ^
1981 mac_array
[3] ^ mac_array
[2] ^ mac_array
[1];
1983 crc
[4] = mac_array
[46] ^ mac_array
[45] ^ mac_array
[44] ^ mac_array
[41] ^
1984 mac_array
[39] ^ mac_array
[36] ^ mac_array
[35] ^ mac_array
[31] ^
1985 mac_array
[30] ^ mac_array
[27] ^ mac_array
[26] ^ mac_array
[24] ^
1986 mac_array
[19] ^ mac_array
[17] ^ mac_array
[15] ^ mac_array
[14] ^
1987 mac_array
[12] ^ mac_array
[10] ^ mac_array
[8] ^ mac_array
[4] ^
1988 mac_array
[3] ^ mac_array
[2];
1990 crc
[5] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[45] ^ mac_array
[42] ^
1991 mac_array
[40] ^ mac_array
[37] ^ mac_array
[36] ^ mac_array
[32] ^
1992 mac_array
[31] ^ mac_array
[28] ^ mac_array
[27] ^ mac_array
[25] ^
1993 mac_array
[20] ^ mac_array
[18] ^ mac_array
[16] ^ mac_array
[15] ^
1994 mac_array
[13] ^ mac_array
[11] ^ mac_array
[9] ^ mac_array
[5] ^
1995 mac_array
[4] ^ mac_array
[3];
1997 crc
[6] = mac_array
[47] ^ mac_array
[46] ^ mac_array
[43] ^ mac_array
[41] ^
1998 mac_array
[38] ^ mac_array
[37] ^ mac_array
[33] ^ mac_array
[32] ^
1999 mac_array
[29] ^ mac_array
[28] ^ mac_array
[26] ^ mac_array
[21] ^
2000 mac_array
[19] ^ mac_array
[17] ^ mac_array
[16] ^ mac_array
[14] ^
2001 mac_array
[12] ^ mac_array
[10] ^ mac_array
[6] ^ mac_array
[5] ^
2004 crc
[7] = mac_array
[47] ^ mac_array
[44] ^ mac_array
[42] ^ mac_array
[39] ^
2005 mac_array
[38] ^ mac_array
[34] ^ mac_array
[33] ^ mac_array
[30] ^
2006 mac_array
[29] ^ mac_array
[27] ^ mac_array
[22] ^ mac_array
[20] ^
2007 mac_array
[18] ^ mac_array
[17] ^ mac_array
[15] ^ mac_array
[13] ^
2008 mac_array
[11] ^ mac_array
[7] ^ mac_array
[6] ^ mac_array
[5];
2010 for (i
= 0; i
< 8; i
++)
2011 crc_result
= crc_result
| (crc
[i
] << i
);
2013 table
= MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num
);
2014 eth_port_set_filter_table_entry(table
, crc_result
);
2018 * Set the entire multicast list based on dev->mc_list.
2020 static void eth_port_set_multicast_list(struct net_device
*dev
)
2023 struct dev_mc_list
*mc_list
;
2026 struct mv643xx_private
*mp
= netdev_priv(dev
);
2027 unsigned int eth_port_num
= mp
->port_num
;
2029 /* If the device is in promiscuous mode or in all multicast mode,
2030 * we will fully populate both multicast tables with accept.
2031 * This is guaranteed to yield a match on all multicast addresses...
2033 if ((dev
->flags
& IFF_PROMISC
) || (dev
->flags
& IFF_ALLMULTI
)) {
2034 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
2035 /* Set all entries in DA filter special multicast
2037 * Set for ETH_Q0 for now
2039 * 0 Accept=1, Drop=0
2040 * 3-1 Queue ETH_Q0=0
2043 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num
) + table_index
, 0x01010101);
2045 /* Set all entries in DA filter other multicast
2047 * Set for ETH_Q0 for now
2049 * 0 Accept=1, Drop=0
2050 * 3-1 Queue ETH_Q0=0
2053 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num
) + table_index
, 0x01010101);
2058 /* We will clear out multicast tables every time we get the list.
2059 * Then add the entire new list...
2061 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
2062 /* Clear DA filter special multicast table (Ex_dFSMT) */
2063 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2064 (eth_port_num
) + table_index
, 0);
2066 /* Clear DA filter other multicast table (Ex_dFOMT) */
2067 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2068 (eth_port_num
) + table_index
, 0);
2071 /* Get pointer to net_device multicast list and add each one... */
2072 for (i
= 0, mc_list
= dev
->mc_list
;
2073 (i
< 256) && (mc_list
!= NULL
) && (i
< dev
->mc_count
);
2074 i
++, mc_list
= mc_list
->next
)
2075 if (mc_list
->dmi_addrlen
== 6)
2076 eth_port_mc_addr(eth_port_num
, mc_list
->dmi_addr
);
2080 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
2083 * Go through all the DA filter tables (Unicast, Special Multicast &
2084 * Other Multicast) and set each entry to 0.
2087 * unsigned int eth_port_num Ethernet Port number.
2090 * Multicast and Unicast packets are rejected.
2095 static void eth_port_init_mac_tables(unsigned int eth_port_num
)
2099 /* Clear DA filter unicast table (Ex_dFUT) */
2100 for (table_index
= 0; table_index
<= 0xC; table_index
+= 4)
2101 mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2102 (eth_port_num
) + table_index
, 0);
2104 for (table_index
= 0; table_index
<= 0xFC; table_index
+= 4) {
2105 /* Clear DA filter special multicast table (Ex_dFSMT) */
2106 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2107 (eth_port_num
) + table_index
, 0);
2108 /* Clear DA filter other multicast table (Ex_dFOMT) */
2109 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2110 (eth_port_num
) + table_index
, 0);
2115 * eth_clear_mib_counters - Clear all MIB counters
2118 * This function clears all MIB counters of a specific ethernet port.
2119 * A read from the MIB counter will reset the counter.
2122 * unsigned int eth_port_num Ethernet Port number.
2125 * After reading all MIB counters, the counters resets.
2128 * MIB counter value.
2131 static void eth_clear_mib_counters(unsigned int eth_port_num
)
2135 /* Perform dummy reads from MIB counters */
2136 for (i
= ETH_MIB_GOOD_OCTETS_RECEIVED_LOW
; i
< ETH_MIB_LATE_COLLISION
;
2138 mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num
) + i
);
2141 static inline u32
read_mib(struct mv643xx_private
*mp
, int offset
)
2143 return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp
->port_num
) + offset
);
2146 static void eth_update_mib_counters(struct mv643xx_private
*mp
)
2148 struct mv643xx_mib_counters
*p
= &mp
->mib_counters
;
2151 p
->good_octets_received
+=
2152 read_mib(mp
, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW
);
2153 p
->good_octets_received
+=
2154 (u64
)read_mib(mp
, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH
) << 32;
2156 for (offset
= ETH_MIB_BAD_OCTETS_RECEIVED
;
2157 offset
<= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS
;
2159 *(u32
*)((char *)p
+ offset
) += read_mib(mp
, offset
);
2161 p
->good_octets_sent
+= read_mib(mp
, ETH_MIB_GOOD_OCTETS_SENT_LOW
);
2162 p
->good_octets_sent
+=
2163 (u64
)read_mib(mp
, ETH_MIB_GOOD_OCTETS_SENT_HIGH
) << 32;
2165 for (offset
= ETH_MIB_GOOD_FRAMES_SENT
;
2166 offset
<= ETH_MIB_LATE_COLLISION
;
2168 *(u32
*)((char *)p
+ offset
) += read_mib(mp
, offset
);
2172 * ethernet_phy_detect - Detect whether a phy is present
2175 * This function tests whether there is a PHY present on
2176 * the specified port.
2179 * unsigned int eth_port_num Ethernet Port number.
2186 * -ENODEV on failure
2189 static int ethernet_phy_detect(unsigned int port_num
)
2191 unsigned int phy_reg_data0
;
2194 eth_port_read_smi_reg(port_num
, 0, &phy_reg_data0
);
2195 auto_neg
= phy_reg_data0
& 0x1000;
2196 phy_reg_data0
^= 0x1000; /* invert auto_neg */
2197 eth_port_write_smi_reg(port_num
, 0, phy_reg_data0
);
2199 eth_port_read_smi_reg(port_num
, 0, &phy_reg_data0
);
2200 if ((phy_reg_data0
& 0x1000) == auto_neg
)
2201 return -ENODEV
; /* change didn't take */
2203 phy_reg_data0
^= 0x1000;
2204 eth_port_write_smi_reg(port_num
, 0, phy_reg_data0
);
2209 * ethernet_phy_get - Get the ethernet port PHY address.
2212 * This routine returns the given ethernet port PHY address.
2215 * unsigned int eth_port_num Ethernet Port number.
2224 static int ethernet_phy_get(unsigned int eth_port_num
)
2226 unsigned int reg_data
;
2228 reg_data
= mv_read(MV643XX_ETH_PHY_ADDR_REG
);
2230 return ((reg_data
>> (5 * eth_port_num
)) & 0x1f);
2234 * ethernet_phy_set - Set the ethernet port PHY address.
2237 * This routine sets the given ethernet port PHY address.
2240 * unsigned int eth_port_num Ethernet Port number.
2241 * int phy_addr PHY address.
2250 static void ethernet_phy_set(unsigned int eth_port_num
, int phy_addr
)
2253 int addr_shift
= 5 * eth_port_num
;
2255 reg_data
= mv_read(MV643XX_ETH_PHY_ADDR_REG
);
2256 reg_data
&= ~(0x1f << addr_shift
);
2257 reg_data
|= (phy_addr
& 0x1f) << addr_shift
;
2258 mv_write(MV643XX_ETH_PHY_ADDR_REG
, reg_data
);
2262 * ethernet_phy_reset - Reset Ethernet port PHY.
2265 * This routine utilizes the SMI interface to reset the ethernet port PHY.
2268 * unsigned int eth_port_num Ethernet Port number.
2277 static void ethernet_phy_reset(unsigned int eth_port_num
)
2279 unsigned int phy_reg_data
;
2282 eth_port_read_smi_reg(eth_port_num
, 0, &phy_reg_data
);
2283 phy_reg_data
|= 0x8000; /* Set bit 15 to reset the PHY */
2284 eth_port_write_smi_reg(eth_port_num
, 0, phy_reg_data
);
2286 /* wait for PHY to come out of reset */
2289 eth_port_read_smi_reg(eth_port_num
, 0, &phy_reg_data
);
2290 } while (phy_reg_data
& 0x8000);
2293 static void mv643xx_eth_port_enable_tx(unsigned int port_num
,
2294 unsigned int queues
)
2296 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
), queues
);
2299 static void mv643xx_eth_port_enable_rx(unsigned int port_num
,
2300 unsigned int queues
)
2302 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
), queues
);
2305 static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num
)
2309 /* Stop Tx port activity. Check port Tx activity. */
2310 queues
= mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
))
2313 /* Issue stop command for active queues only */
2314 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
),
2317 /* Wait for all Tx activity to terminate. */
2318 /* Check port cause register that all Tx queues are stopped */
2319 while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num
))
2321 udelay(PHY_WAIT_MICRO_SECONDS
);
2323 /* Wait for Tx FIFO to empty */
2324 while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num
)) &
2325 ETH_PORT_TX_FIFO_EMPTY
)
2326 udelay(PHY_WAIT_MICRO_SECONDS
);
2332 static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num
)
2336 /* Stop Rx port activity. Check port Rx activity. */
2337 queues
= mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
))
2340 /* Issue stop command for active queues only */
2341 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
),
2344 /* Wait for all Rx activity to terminate. */
2345 /* Check port cause register that all Rx queues are stopped */
2346 while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num
))
2348 udelay(PHY_WAIT_MICRO_SECONDS
);
2355 * eth_port_reset - Reset Ethernet port
2358 * This routine resets the chip by aborting any SDMA engine activity and
2359 * clearing the MIB counters. The Receiver and the Transmit unit are in
2360 * idle state after this command is performed and the port is disabled.
2363 * unsigned int eth_port_num Ethernet Port number.
2366 * Channel activity is halted.
2372 static void eth_port_reset(unsigned int port_num
)
2374 unsigned int reg_data
;
2376 mv643xx_eth_port_disable_tx(port_num
);
2377 mv643xx_eth_port_disable_rx(port_num
);
2379 /* Clear all MIB counters */
2380 eth_clear_mib_counters(port_num
);
2382 /* Reset the Enable bit in the Configuration Register */
2383 reg_data
= mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
));
2384 reg_data
&= ~(MV643XX_ETH_SERIAL_PORT_ENABLE
|
2385 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL
|
2386 MV643XX_ETH_FORCE_LINK_PASS
);
2387 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num
), reg_data
);
2392 * eth_port_read_smi_reg - Read PHY registers
2395 * This routine utilize the SMI interface to interact with the PHY in
2396 * order to perform PHY register read.
2399 * unsigned int port_num Ethernet Port number.
2400 * unsigned int phy_reg PHY register address offset.
2401 * unsigned int *value Register value buffer.
2404 * Write the value of a specified PHY register into given buffer.
2407 * false if the PHY is busy or read data is not in valid state.
2411 static void eth_port_read_smi_reg(unsigned int port_num
,
2412 unsigned int phy_reg
, unsigned int *value
)
2414 int phy_addr
= ethernet_phy_get(port_num
);
2415 unsigned long flags
;
2418 /* the SMI register is a shared resource */
2419 spin_lock_irqsave(&mv643xx_eth_phy_lock
, flags
);
2421 /* wait for the SMI register to become available */
2422 for (i
= 0; mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_BUSY
; i
++) {
2423 if (i
== PHY_WAIT_ITERATIONS
) {
2424 printk("mv643xx PHY busy timeout, port %d\n", port_num
);
2427 udelay(PHY_WAIT_MICRO_SECONDS
);
2430 mv_write(MV643XX_ETH_SMI_REG
,
2431 (phy_addr
<< 16) | (phy_reg
<< 21) | ETH_SMI_OPCODE_READ
);
2433 /* now wait for the data to be valid */
2434 for (i
= 0; !(mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_READ_VALID
); i
++) {
2435 if (i
== PHY_WAIT_ITERATIONS
) {
2436 printk("mv643xx PHY read timeout, port %d\n", port_num
);
2439 udelay(PHY_WAIT_MICRO_SECONDS
);
2442 *value
= mv_read(MV643XX_ETH_SMI_REG
) & 0xffff;
2444 spin_unlock_irqrestore(&mv643xx_eth_phy_lock
, flags
);
2448 * eth_port_write_smi_reg - Write to PHY registers
2451 * This routine utilize the SMI interface to interact with the PHY in
2452 * order to perform writes to PHY registers.
2455 * unsigned int eth_port_num Ethernet Port number.
2456 * unsigned int phy_reg PHY register address offset.
2457 * unsigned int value Register value.
2460 * Write the given value to the specified PHY register.
2463 * false if the PHY is busy.
2467 static void eth_port_write_smi_reg(unsigned int eth_port_num
,
2468 unsigned int phy_reg
, unsigned int value
)
2472 unsigned long flags
;
2474 phy_addr
= ethernet_phy_get(eth_port_num
);
2476 /* the SMI register is a shared resource */
2477 spin_lock_irqsave(&mv643xx_eth_phy_lock
, flags
);
2479 /* wait for the SMI register to become available */
2480 for (i
= 0; mv_read(MV643XX_ETH_SMI_REG
) & ETH_SMI_BUSY
; i
++) {
2481 if (i
== PHY_WAIT_ITERATIONS
) {
2482 printk("mv643xx PHY busy timeout, port %d\n",
2486 udelay(PHY_WAIT_MICRO_SECONDS
);
2489 mv_write(MV643XX_ETH_SMI_REG
, (phy_addr
<< 16) | (phy_reg
<< 21) |
2490 ETH_SMI_OPCODE_WRITE
| (value
& 0xffff));
2492 spin_unlock_irqrestore(&mv643xx_eth_phy_lock
, flags
);
2496 * Wrappers for MII support library.
2498 static int mv643xx_mdio_read(struct net_device
*dev
, int phy_id
, int location
)
2501 struct mv643xx_private
*mp
= netdev_priv(dev
);
2503 eth_port_read_smi_reg(mp
->port_num
, location
, &val
);
2507 static void mv643xx_mdio_write(struct net_device
*dev
, int phy_id
, int location
, int val
)
2509 struct mv643xx_private
*mp
= netdev_priv(dev
);
2510 eth_port_write_smi_reg(mp
->port_num
, location
, val
);
2514 * eth_port_receive - Get received information from Rx ring.
2517 * This routine returns the received data to the caller. There is no
2518 * data copying during routine operation. All information is returned
2519 * using pointer to packet information struct passed from the caller.
2520 * If the routine exhausts Rx ring resources then the resource error flag
2524 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2525 * struct pkt_info *p_pkt_info User packet buffer.
2528 * Rx ring current and used indexes are updated.
2531 * ETH_ERROR in case the routine can not access Rx desc ring.
2532 * ETH_QUEUE_FULL if Rx ring resources are exhausted.
2533 * ETH_END_OF_JOB if there is no received data.
2536 static ETH_FUNC_RET_STATUS
eth_port_receive(struct mv643xx_private
*mp
,
2537 struct pkt_info
*p_pkt_info
)
2539 int rx_next_curr_desc
, rx_curr_desc
, rx_used_desc
;
2540 volatile struct eth_rx_desc
*p_rx_desc
;
2541 unsigned int command_status
;
2542 unsigned long flags
;
2544 /* Do not process Rx ring in case of Rx ring resource error */
2545 if (mp
->rx_resource_err
)
2546 return ETH_QUEUE_FULL
;
2548 spin_lock_irqsave(&mp
->lock
, flags
);
2550 /* Get the Rx Desc ring 'curr and 'used' indexes */
2551 rx_curr_desc
= mp
->rx_curr_desc_q
;
2552 rx_used_desc
= mp
->rx_used_desc_q
;
2554 p_rx_desc
= &mp
->p_rx_desc_area
[rx_curr_desc
];
2556 /* The following parameters are used to save readings from memory */
2557 command_status
= p_rx_desc
->cmd_sts
;
2560 /* Nothing to receive... */
2561 if (command_status
& (ETH_BUFFER_OWNED_BY_DMA
)) {
2562 spin_unlock_irqrestore(&mp
->lock
, flags
);
2563 return ETH_END_OF_JOB
;
2566 p_pkt_info
->byte_cnt
= (p_rx_desc
->byte_cnt
) - RX_BUF_OFFSET
;
2567 p_pkt_info
->cmd_sts
= command_status
;
2568 p_pkt_info
->buf_ptr
= (p_rx_desc
->buf_ptr
) + RX_BUF_OFFSET
;
2569 p_pkt_info
->return_info
= mp
->rx_skb
[rx_curr_desc
];
2570 p_pkt_info
->l4i_chk
= p_rx_desc
->buf_size
;
2573 * Clean the return info field to indicate that the
2574 * packet has been moved to the upper layers
2576 mp
->rx_skb
[rx_curr_desc
] = NULL
;
2578 /* Update current index in data structure */
2579 rx_next_curr_desc
= (rx_curr_desc
+ 1) % mp
->rx_ring_size
;
2580 mp
->rx_curr_desc_q
= rx_next_curr_desc
;
2582 /* Rx descriptors exhausted. Set the Rx ring resource error flag */
2583 if (rx_next_curr_desc
== rx_used_desc
)
2584 mp
->rx_resource_err
= 1;
2586 spin_unlock_irqrestore(&mp
->lock
, flags
);
2592 * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
2595 * This routine returns a Rx buffer back to the Rx ring. It retrieves the
2596 * next 'used' descriptor and attached the returned buffer to it.
2597 * In case the Rx ring was in "resource error" condition, where there are
2598 * no available Rx resources, the function resets the resource error flag.
2601 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2602 * struct pkt_info *p_pkt_info Information on returned buffer.
2605 * New available Rx resource in Rx descriptor ring.
2608 * ETH_ERROR in case the routine can not access Rx desc ring.
2611 static ETH_FUNC_RET_STATUS
eth_rx_return_buff(struct mv643xx_private
*mp
,
2612 struct pkt_info
*p_pkt_info
)
2614 int used_rx_desc
; /* Where to return Rx resource */
2615 volatile struct eth_rx_desc
*p_used_rx_desc
;
2616 unsigned long flags
;
2618 spin_lock_irqsave(&mp
->lock
, flags
);
2620 /* Get 'used' Rx descriptor */
2621 used_rx_desc
= mp
->rx_used_desc_q
;
2622 p_used_rx_desc
= &mp
->p_rx_desc_area
[used_rx_desc
];
2624 p_used_rx_desc
->buf_ptr
= p_pkt_info
->buf_ptr
;
2625 p_used_rx_desc
->buf_size
= p_pkt_info
->byte_cnt
;
2626 mp
->rx_skb
[used_rx_desc
] = p_pkt_info
->return_info
;
2628 /* Flush the write pipe */
2630 /* Return the descriptor to DMA ownership */
2632 p_used_rx_desc
->cmd_sts
=
2633 ETH_BUFFER_OWNED_BY_DMA
| ETH_RX_ENABLE_INTERRUPT
;
2636 /* Move the used descriptor pointer to the next descriptor */
2637 mp
->rx_used_desc_q
= (used_rx_desc
+ 1) % mp
->rx_ring_size
;
2639 /* Any Rx return cancels the Rx resource error status */
2640 mp
->rx_resource_err
= 0;
2642 spin_unlock_irqrestore(&mp
->lock
, flags
);
2647 /************* Begin ethtool support *************************/
2649 struct mv643xx_stats
{
2650 char stat_string
[ETH_GSTRING_LEN
];
2655 #define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \
2656 offsetof(struct mv643xx_private, m)
2658 static const struct mv643xx_stats mv643xx_gstrings_stats
[] = {
2659 { "rx_packets", MV643XX_STAT(stats
.rx_packets
) },
2660 { "tx_packets", MV643XX_STAT(stats
.tx_packets
) },
2661 { "rx_bytes", MV643XX_STAT(stats
.rx_bytes
) },
2662 { "tx_bytes", MV643XX_STAT(stats
.tx_bytes
) },
2663 { "rx_errors", MV643XX_STAT(stats
.rx_errors
) },
2664 { "tx_errors", MV643XX_STAT(stats
.tx_errors
) },
2665 { "rx_dropped", MV643XX_STAT(stats
.rx_dropped
) },
2666 { "tx_dropped", MV643XX_STAT(stats
.tx_dropped
) },
2667 { "good_octets_received", MV643XX_STAT(mib_counters
.good_octets_received
) },
2668 { "bad_octets_received", MV643XX_STAT(mib_counters
.bad_octets_received
) },
2669 { "internal_mac_transmit_err", MV643XX_STAT(mib_counters
.internal_mac_transmit_err
) },
2670 { "good_frames_received", MV643XX_STAT(mib_counters
.good_frames_received
) },
2671 { "bad_frames_received", MV643XX_STAT(mib_counters
.bad_frames_received
) },
2672 { "broadcast_frames_received", MV643XX_STAT(mib_counters
.broadcast_frames_received
) },
2673 { "multicast_frames_received", MV643XX_STAT(mib_counters
.multicast_frames_received
) },
2674 { "frames_64_octets", MV643XX_STAT(mib_counters
.frames_64_octets
) },
2675 { "frames_65_to_127_octets", MV643XX_STAT(mib_counters
.frames_65_to_127_octets
) },
2676 { "frames_128_to_255_octets", MV643XX_STAT(mib_counters
.frames_128_to_255_octets
) },
2677 { "frames_256_to_511_octets", MV643XX_STAT(mib_counters
.frames_256_to_511_octets
) },
2678 { "frames_512_to_1023_octets", MV643XX_STAT(mib_counters
.frames_512_to_1023_octets
) },
2679 { "frames_1024_to_max_octets", MV643XX_STAT(mib_counters
.frames_1024_to_max_octets
) },
2680 { "good_octets_sent", MV643XX_STAT(mib_counters
.good_octets_sent
) },
2681 { "good_frames_sent", MV643XX_STAT(mib_counters
.good_frames_sent
) },
2682 { "excessive_collision", MV643XX_STAT(mib_counters
.excessive_collision
) },
2683 { "multicast_frames_sent", MV643XX_STAT(mib_counters
.multicast_frames_sent
) },
2684 { "broadcast_frames_sent", MV643XX_STAT(mib_counters
.broadcast_frames_sent
) },
2685 { "unrec_mac_control_received", MV643XX_STAT(mib_counters
.unrec_mac_control_received
) },
2686 { "fc_sent", MV643XX_STAT(mib_counters
.fc_sent
) },
2687 { "good_fc_received", MV643XX_STAT(mib_counters
.good_fc_received
) },
2688 { "bad_fc_received", MV643XX_STAT(mib_counters
.bad_fc_received
) },
2689 { "undersize_received", MV643XX_STAT(mib_counters
.undersize_received
) },
2690 { "fragments_received", MV643XX_STAT(mib_counters
.fragments_received
) },
2691 { "oversize_received", MV643XX_STAT(mib_counters
.oversize_received
) },
2692 { "jabber_received", MV643XX_STAT(mib_counters
.jabber_received
) },
2693 { "mac_receive_error", MV643XX_STAT(mib_counters
.mac_receive_error
) },
2694 { "bad_crc_event", MV643XX_STAT(mib_counters
.bad_crc_event
) },
2695 { "collision", MV643XX_STAT(mib_counters
.collision
) },
2696 { "late_collision", MV643XX_STAT(mib_counters
.late_collision
) },
2699 #define MV643XX_STATS_LEN \
2700 sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats)
2702 static void mv643xx_get_drvinfo(struct net_device
*netdev
,
2703 struct ethtool_drvinfo
*drvinfo
)
2705 strncpy(drvinfo
->driver
, mv643xx_driver_name
, 32);
2706 strncpy(drvinfo
->version
, mv643xx_driver_version
, 32);
2707 strncpy(drvinfo
->fw_version
, "N/A", 32);
2708 strncpy(drvinfo
->bus_info
, "mv643xx", 32);
2709 drvinfo
->n_stats
= MV643XX_STATS_LEN
;
2712 static int mv643xx_get_stats_count(struct net_device
*netdev
)
2714 return MV643XX_STATS_LEN
;
2717 static void mv643xx_get_ethtool_stats(struct net_device
*netdev
,
2718 struct ethtool_stats
*stats
, uint64_t *data
)
2720 struct mv643xx_private
*mp
= netdev
->priv
;
2723 eth_update_mib_counters(mp
);
2725 for (i
= 0; i
< MV643XX_STATS_LEN
; i
++) {
2726 char *p
= (char *)mp
+mv643xx_gstrings_stats
[i
].stat_offset
;
2727 data
[i
] = (mv643xx_gstrings_stats
[i
].sizeof_stat
==
2728 sizeof(uint64_t)) ? *(uint64_t *)p
: *(uint32_t *)p
;
2732 static void mv643xx_get_strings(struct net_device
*netdev
, uint32_t stringset
,
2739 for (i
=0; i
< MV643XX_STATS_LEN
; i
++) {
2740 memcpy(data
+ i
* ETH_GSTRING_LEN
,
2741 mv643xx_gstrings_stats
[i
].stat_string
,
2748 static u32
mv643xx_eth_get_link(struct net_device
*dev
)
2750 struct mv643xx_private
*mp
= netdev_priv(dev
);
2752 return mii_link_ok(&mp
->mii
);
2755 static int mv643xx_eth_nway_restart(struct net_device
*dev
)
2757 struct mv643xx_private
*mp
= netdev_priv(dev
);
2759 return mii_nway_restart(&mp
->mii
);
2762 static int mv643xx_eth_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2764 struct mv643xx_private
*mp
= netdev_priv(dev
);
2766 return generic_mii_ioctl(&mp
->mii
, if_mii(ifr
), cmd
, NULL
);
2769 static const struct ethtool_ops mv643xx_ethtool_ops
= {
2770 .get_settings
= mv643xx_get_settings
,
2771 .set_settings
= mv643xx_set_settings
,
2772 .get_drvinfo
= mv643xx_get_drvinfo
,
2773 .get_link
= mv643xx_eth_get_link
,
2774 .get_sg
= ethtool_op_get_sg
,
2775 .set_sg
= ethtool_op_set_sg
,
2776 .get_strings
= mv643xx_get_strings
,
2777 .get_stats_count
= mv643xx_get_stats_count
,
2778 .get_ethtool_stats
= mv643xx_get_ethtool_stats
,
2779 .get_strings
= mv643xx_get_strings
,
2780 .get_stats_count
= mv643xx_get_stats_count
,
2781 .get_ethtool_stats
= mv643xx_get_ethtool_stats
,
2782 .nway_reset
= mv643xx_eth_nway_restart
,
2785 /************* End ethtool support *************************/