1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
4 * Copyright (C) 2004 Advanced Micro Devices
6 * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8 * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10 * Copyright 1993 United States Government as represented by the
11 * Director, National Security Agency.[ pcnet32.c ]
12 * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
22 AMD8111 based 10/100 Ethernet Controller Driver.
32 1. Dynamic interrupt coalescing.
33 2. Removed prev_stats.
35 4. Dynamic IPG support
37 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
38 2. Bug fix: Fixed VLAN support failure.
39 3. Bug fix: Fixed receive interrupt coalescing bug.
40 4. Dynamic IPG support is disabled by default.
42 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
44 1. Added set_mac_address routine for bonding driver support.
45 2. Tested the driver for bonding support
46 3. Bug fix: Fixed mismach in actual receive buffer length and length
48 4. Modified amd8111e_rx() routine to receive all the received packets
49 in the first interrupt.
50 5. Bug fix: Corrected rx_errors reported in get_stats() function.
57 #include <linux/module.h>
58 #include <linux/kernel.h>
59 #include <linux/types.h>
60 #include <linux/compiler.h>
61 #include <linux/delay.h>
62 #include <linux/interrupt.h>
63 #include <linux/ioport.h>
64 #include <linux/pci.h>
65 #include <linux/netdevice.h>
66 #include <linux/etherdevice.h>
67 #include <linux/skbuff.h>
68 #include <linux/ethtool.h>
69 #include <linux/mii.h>
70 #include <linux/if_vlan.h>
71 #include <linux/ctype.h>
72 #include <linux/crc32.h>
73 #include <linux/dma-mapping.h>
76 #include <asm/byteorder.h>
77 #include <linux/uaccess.h>
79 #if IS_ENABLED(CONFIG_VLAN_8021Q)
80 #define AMD8111E_VLAN_TAG_USED 1
82 #define AMD8111E_VLAN_TAG_USED 0
86 #define MODULE_NAME "amd8111e"
87 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
88 MODULE_DESCRIPTION("AMD8111 based 10/100 Ethernet Controller.");
89 MODULE_LICENSE("GPL");
90 module_param_array(speed_duplex
, int, NULL
, 0);
91 MODULE_PARM_DESC(speed_duplex
, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
92 module_param_array(coalesce
, bool, NULL
, 0);
93 MODULE_PARM_DESC(coalesce
, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
94 module_param_array(dynamic_ipg
, bool, NULL
, 0);
95 MODULE_PARM_DESC(dynamic_ipg
, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
97 /* This function will read the PHY registers. */
98 static int amd8111e_read_phy(struct amd8111e_priv
*lp
,
99 int phy_id
, int reg
, u32
*val
)
101 void __iomem
*mmio
= lp
->mmio
;
102 unsigned int reg_val
;
103 unsigned int repeat
= REPEAT_CNT
;
105 reg_val
= readl(mmio
+ PHY_ACCESS
);
106 while (reg_val
& PHY_CMD_ACTIVE
)
107 reg_val
= readl(mmio
+ PHY_ACCESS
);
109 writel(PHY_RD_CMD
| ((phy_id
& 0x1f) << 21) |
110 ((reg
& 0x1f) << 16), mmio
+ PHY_ACCESS
);
112 reg_val
= readl(mmio
+ PHY_ACCESS
);
113 udelay(30); /* It takes 30 us to read/write data */
114 } while (--repeat
&& (reg_val
& PHY_CMD_ACTIVE
));
115 if (reg_val
& PHY_RD_ERR
)
118 *val
= reg_val
& 0xffff;
126 /* This function will write into PHY registers. */
127 static int amd8111e_write_phy(struct amd8111e_priv
*lp
,
128 int phy_id
, int reg
, u32 val
)
130 unsigned int repeat
= REPEAT_CNT
;
131 void __iomem
*mmio
= lp
->mmio
;
132 unsigned int reg_val
;
134 reg_val
= readl(mmio
+ PHY_ACCESS
);
135 while (reg_val
& PHY_CMD_ACTIVE
)
136 reg_val
= readl(mmio
+ PHY_ACCESS
);
138 writel(PHY_WR_CMD
| ((phy_id
& 0x1f) << 21) |
139 ((reg
& 0x1f) << 16)|val
, mmio
+ PHY_ACCESS
);
142 reg_val
= readl(mmio
+ PHY_ACCESS
);
143 udelay(30); /* It takes 30 us to read/write the data */
144 } while (--repeat
&& (reg_val
& PHY_CMD_ACTIVE
));
146 if (reg_val
& PHY_RD_ERR
)
156 /* This is the mii register read function provided to the mii interface. */
157 static int amd8111e_mdio_read(struct net_device
*dev
, int phy_id
, int reg_num
)
159 struct amd8111e_priv
*lp
= netdev_priv(dev
);
160 unsigned int reg_val
;
162 amd8111e_read_phy(lp
, phy_id
, reg_num
, ®_val
);
167 /* This is the mii register write function provided to the mii interface. */
168 static void amd8111e_mdio_write(struct net_device
*dev
,
169 int phy_id
, int reg_num
, int val
)
171 struct amd8111e_priv
*lp
= netdev_priv(dev
);
173 amd8111e_write_phy(lp
, phy_id
, reg_num
, val
);
176 /* This function will set PHY speed. During initialization sets
177 * the original speed to 100 full
179 static void amd8111e_set_ext_phy(struct net_device
*dev
)
181 struct amd8111e_priv
*lp
= netdev_priv(dev
);
182 u32 bmcr
, advert
, tmp
;
184 /* Determine mii register values to set the speed */
185 advert
= amd8111e_mdio_read(dev
, lp
->ext_phy_addr
, MII_ADVERTISE
);
186 tmp
= advert
& ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
);
187 switch (lp
->ext_phy_option
) {
189 case SPEED_AUTONEG
: /* advertise all values */
190 tmp
|= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
191 ADVERTISE_100HALF
| ADVERTISE_100FULL
);
194 tmp
|= ADVERTISE_10HALF
;
197 tmp
|= ADVERTISE_10FULL
;
200 tmp
|= ADVERTISE_100HALF
;
203 tmp
|= ADVERTISE_100FULL
;
208 amd8111e_mdio_write(dev
, lp
->ext_phy_addr
, MII_ADVERTISE
, tmp
);
209 /* Restart auto negotiation */
210 bmcr
= amd8111e_mdio_read(dev
, lp
->ext_phy_addr
, MII_BMCR
);
211 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
212 amd8111e_mdio_write(dev
, lp
->ext_phy_addr
, MII_BMCR
, bmcr
);
216 /* This function will unmap skb->data space and will free
217 * all transmit and receive skbuffs.
219 static int amd8111e_free_skbs(struct net_device
*dev
)
221 struct amd8111e_priv
*lp
= netdev_priv(dev
);
222 struct sk_buff
*rx_skbuff
;
225 /* Freeing transmit skbs */
226 for (i
= 0; i
< NUM_TX_BUFFERS
; i
++) {
227 if (lp
->tx_skbuff
[i
]) {
228 dma_unmap_single(&lp
->pci_dev
->dev
,
230 lp
->tx_skbuff
[i
]->len
, DMA_TO_DEVICE
);
231 dev_kfree_skb(lp
->tx_skbuff
[i
]);
232 lp
->tx_skbuff
[i
] = NULL
;
233 lp
->tx_dma_addr
[i
] = 0;
236 /* Freeing previously allocated receive buffers */
237 for (i
= 0; i
< NUM_RX_BUFFERS
; i
++) {
238 rx_skbuff
= lp
->rx_skbuff
[i
];
240 dma_unmap_single(&lp
->pci_dev
->dev
,
242 lp
->rx_buff_len
- 2, DMA_FROM_DEVICE
);
243 dev_kfree_skb(lp
->rx_skbuff
[i
]);
244 lp
->rx_skbuff
[i
] = NULL
;
245 lp
->rx_dma_addr
[i
] = 0;
252 /* This will set the receive buffer length corresponding
253 * to the mtu size of networkinterface.
255 static inline void amd8111e_set_rx_buff_len(struct net_device
*dev
)
257 struct amd8111e_priv
*lp
= netdev_priv(dev
);
258 unsigned int mtu
= dev
->mtu
;
260 if (mtu
> ETH_DATA_LEN
) {
261 /* MTU + ethernet header + FCS
262 * + optional VLAN tag + skb reserve space 2
264 lp
->rx_buff_len
= mtu
+ ETH_HLEN
+ 10;
265 lp
->options
|= OPTION_JUMBO_ENABLE
;
267 lp
->rx_buff_len
= PKT_BUFF_SZ
;
268 lp
->options
&= ~OPTION_JUMBO_ENABLE
;
272 /* This function will free all the previously allocated buffers,
273 * determine new receive buffer length and will allocate new receive buffers.
274 * This function also allocates and initializes both the transmitter
275 * and receive hardware descriptors.
277 static int amd8111e_init_ring(struct net_device
*dev
)
279 struct amd8111e_priv
*lp
= netdev_priv(dev
);
282 lp
->rx_idx
= lp
->tx_idx
= 0;
283 lp
->tx_complete_idx
= 0;
288 /* Free previously allocated transmit and receive skbs */
289 amd8111e_free_skbs(dev
);
292 /* allocate the tx and rx descriptors */
293 lp
->tx_ring
= dma_alloc_coherent(&lp
->pci_dev
->dev
,
294 sizeof(struct amd8111e_tx_dr
) * NUM_TX_RING_DR
,
295 &lp
->tx_ring_dma_addr
, GFP_ATOMIC
);
299 lp
->rx_ring
= dma_alloc_coherent(&lp
->pci_dev
->dev
,
300 sizeof(struct amd8111e_rx_dr
) * NUM_RX_RING_DR
,
301 &lp
->rx_ring_dma_addr
, GFP_ATOMIC
);
303 goto err_free_tx_ring
;
306 /* Set new receive buff size */
307 amd8111e_set_rx_buff_len(dev
);
309 /* Allocating receive skbs */
310 for (i
= 0; i
< NUM_RX_BUFFERS
; i
++) {
312 lp
->rx_skbuff
[i
] = netdev_alloc_skb(dev
, lp
->rx_buff_len
);
313 if (!lp
->rx_skbuff
[i
]) {
314 /* Release previos allocated skbs */
315 for (--i
; i
>= 0; i
--)
316 dev_kfree_skb(lp
->rx_skbuff
[i
]);
317 goto err_free_rx_ring
;
319 skb_reserve(lp
->rx_skbuff
[i
], 2);
321 /* Initilaizing receive descriptors */
322 for (i
= 0; i
< NUM_RX_BUFFERS
; i
++) {
323 lp
->rx_dma_addr
[i
] = dma_map_single(&lp
->pci_dev
->dev
,
324 lp
->rx_skbuff
[i
]->data
,
328 lp
->rx_ring
[i
].buff_phy_addr
= cpu_to_le32(lp
->rx_dma_addr
[i
]);
329 lp
->rx_ring
[i
].buff_count
= cpu_to_le16(lp
->rx_buff_len
-2);
331 lp
->rx_ring
[i
].rx_flags
= cpu_to_le16(OWN_BIT
);
334 /* Initializing transmit descriptors */
335 for (i
= 0; i
< NUM_TX_RING_DR
; i
++) {
336 lp
->tx_ring
[i
].buff_phy_addr
= 0;
337 lp
->tx_ring
[i
].tx_flags
= 0;
338 lp
->tx_ring
[i
].buff_count
= 0;
345 dma_free_coherent(&lp
->pci_dev
->dev
,
346 sizeof(struct amd8111e_rx_dr
) * NUM_RX_RING_DR
,
347 lp
->rx_ring
, lp
->rx_ring_dma_addr
);
351 dma_free_coherent(&lp
->pci_dev
->dev
,
352 sizeof(struct amd8111e_tx_dr
) * NUM_TX_RING_DR
,
353 lp
->tx_ring
, lp
->tx_ring_dma_addr
);
359 /* This function will set the interrupt coalescing according
360 * to the input arguments
362 static int amd8111e_set_coalesce(struct net_device
*dev
, enum coal_mode cmod
)
364 unsigned int timeout
;
365 unsigned int event_count
;
367 struct amd8111e_priv
*lp
= netdev_priv(dev
);
368 void __iomem
*mmio
= lp
->mmio
;
369 struct amd8111e_coalesce_conf
*coal_conf
= &lp
->coal_conf
;
375 timeout
= coal_conf
->rx_timeout
;
376 event_count
= coal_conf
->rx_event_count
;
377 if (timeout
> MAX_TIMEOUT
||
378 event_count
> MAX_EVENT_COUNT
)
381 timeout
= timeout
* DELAY_TIMER_CONV
;
382 writel(VAL0
|STINTEN
, mmio
+INTEN0
);
383 writel((u32
)DLY_INT_A_R0
| (event_count
<< 16) |
384 timeout
, mmio
+ DLY_INT_A
);
388 timeout
= coal_conf
->tx_timeout
;
389 event_count
= coal_conf
->tx_event_count
;
390 if (timeout
> MAX_TIMEOUT
||
391 event_count
> MAX_EVENT_COUNT
)
395 timeout
= timeout
* DELAY_TIMER_CONV
;
396 writel(VAL0
| STINTEN
, mmio
+ INTEN0
);
397 writel((u32
)DLY_INT_B_T0
| (event_count
<< 16) |
398 timeout
, mmio
+ DLY_INT_B
);
402 writel(0, mmio
+ STVAL
);
403 writel(STINTEN
, mmio
+ INTEN0
);
404 writel(0, mmio
+ DLY_INT_B
);
405 writel(0, mmio
+ DLY_INT_A
);
408 /* Start the timer */
409 writel((u32
)SOFT_TIMER_FREQ
, mmio
+ STVAL
); /* 0.5 sec */
410 writel(VAL0
| STINTEN
, mmio
+ INTEN0
);
420 /* This function initializes the device registers and starts the device. */
421 static int amd8111e_restart(struct net_device
*dev
)
423 struct amd8111e_priv
*lp
= netdev_priv(dev
);
424 void __iomem
*mmio
= lp
->mmio
;
428 writel(RUN
, mmio
+ CMD0
);
430 if (amd8111e_init_ring(dev
))
433 /* enable the port manager and set auto negotiation always */
434 writel((u32
)VAL1
| EN_PMGR
, mmio
+ CMD3
);
435 writel((u32
)XPHYANE
| XPHYRST
, mmio
+ CTRL2
);
437 amd8111e_set_ext_phy(dev
);
439 /* set control registers */
440 reg_val
= readl(mmio
+ CTRL1
);
441 reg_val
&= ~XMTSP_MASK
;
442 writel(reg_val
| XMTSP_128
| CACHE_ALIGN
, mmio
+ CTRL1
);
444 /* enable interrupt */
445 writel(APINT5EN
| APINT4EN
| APINT3EN
| APINT2EN
| APINT1EN
|
446 APINT0EN
| MIIPDTINTEN
| MCCIINTEN
| MCCINTEN
| MREINTEN
|
447 SPNDINTEN
| MPINTEN
| SINTEN
| STINTEN
, mmio
+ INTEN0
);
449 writel(VAL3
| LCINTEN
| VAL1
| TINTEN0
| VAL0
| RINTEN0
, mmio
+ INTEN0
);
451 /* initialize tx and rx ring base addresses */
452 writel((u32
)lp
->tx_ring_dma_addr
, mmio
+ XMT_RING_BASE_ADDR0
);
453 writel((u32
)lp
->rx_ring_dma_addr
, mmio
+ RCV_RING_BASE_ADDR0
);
455 writew((u32
)NUM_TX_RING_DR
, mmio
+ XMT_RING_LEN0
);
456 writew((u16
)NUM_RX_RING_DR
, mmio
+ RCV_RING_LEN0
);
458 /* set default IPG to 96 */
459 writew((u32
)DEFAULT_IPG
, mmio
+ IPG
);
460 writew((u32
)(DEFAULT_IPG
-IFS1_DELTA
), mmio
+ IFS1
);
462 if (lp
->options
& OPTION_JUMBO_ENABLE
) {
463 writel((u32
)VAL2
|JUMBO
, mmio
+ CMD3
);
465 writel(REX_UFLO
, mmio
+ CMD2
);
466 /* Should not set REX_UFLO for jumbo frames */
467 writel(VAL0
| APAD_XMT
| REX_RTRY
, mmio
+ CMD2
);
469 writel(VAL0
| APAD_XMT
| REX_RTRY
| REX_UFLO
, mmio
+ CMD2
);
470 writel((u32
)JUMBO
, mmio
+ CMD3
);
473 #if AMD8111E_VLAN_TAG_USED
474 writel((u32
)VAL2
| VSIZE
| VL_TAG_DEL
, mmio
+ CMD3
);
476 writel(VAL0
| APAD_XMT
| REX_RTRY
, mmio
+ CMD2
);
478 /* Setting the MAC address to the device */
479 for (i
= 0; i
< ETH_ALEN
; i
++)
480 writeb(dev
->dev_addr
[i
], mmio
+ PADR
+ i
);
482 /* Enable interrupt coalesce */
483 if (lp
->options
& OPTION_INTR_COAL_ENABLE
) {
484 netdev_info(dev
, "Interrupt Coalescing Enabled.\n");
485 amd8111e_set_coalesce(dev
, ENABLE_COAL
);
488 /* set RUN bit to start the chip */
489 writel(VAL2
| RDMD0
, mmio
+ CMD0
);
490 writel(VAL0
| INTREN
| RUN
, mmio
+ CMD0
);
492 /* To avoid PCI posting bug */
497 /* This function clears necessary the device registers. */
498 static void amd8111e_init_hw_default(struct amd8111e_priv
*lp
)
500 unsigned int reg_val
;
501 unsigned int logic_filter
[2] = {0,};
502 void __iomem
*mmio
= lp
->mmio
;
506 writel(RUN
, mmio
+ CMD0
);
508 /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
509 writew( 0x8100 | lp
->ext_phy_addr
, mmio
+ AUTOPOLL0
);
511 /* Clear RCV_RING_BASE_ADDR */
512 writel(0, mmio
+ RCV_RING_BASE_ADDR0
);
514 /* Clear XMT_RING_BASE_ADDR */
515 writel(0, mmio
+ XMT_RING_BASE_ADDR0
);
516 writel(0, mmio
+ XMT_RING_BASE_ADDR1
);
517 writel(0, mmio
+ XMT_RING_BASE_ADDR2
);
518 writel(0, mmio
+ XMT_RING_BASE_ADDR3
);
521 writel(CMD0_CLEAR
, mmio
+ CMD0
);
524 writel(CMD2_CLEAR
, mmio
+ CMD2
);
527 writel(CMD7_CLEAR
, mmio
+ CMD7
);
529 /* Clear DLY_INT_A and DLY_INT_B */
530 writel(0x0, mmio
+ DLY_INT_A
);
531 writel(0x0, mmio
+ DLY_INT_B
);
533 /* Clear FLOW_CONTROL */
534 writel(0x0, mmio
+ FLOW_CONTROL
);
536 /* Clear INT0 write 1 to clear register */
537 reg_val
= readl(mmio
+ INT0
);
538 writel(reg_val
, mmio
+ INT0
);
541 writel(0x0, mmio
+ STVAL
);
544 writel(INTEN0_CLEAR
, mmio
+ INTEN0
);
547 writel(0x0, mmio
+ LADRF
);
549 /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
550 writel(0x80010, mmio
+ SRAM_SIZE
);
552 /* Clear RCV_RING0_LEN */
553 writel(0x0, mmio
+ RCV_RING_LEN0
);
555 /* Clear XMT_RING0/1/2/3_LEN */
556 writel(0x0, mmio
+ XMT_RING_LEN0
);
557 writel(0x0, mmio
+ XMT_RING_LEN1
);
558 writel(0x0, mmio
+ XMT_RING_LEN2
);
559 writel(0x0, mmio
+ XMT_RING_LEN3
);
561 /* Clear XMT_RING_LIMIT */
562 writel(0x0, mmio
+ XMT_RING_LIMIT
);
565 writew(MIB_CLEAR
, mmio
+ MIB_ADDR
);
568 amd8111e_writeq(*(u64
*)logic_filter
, mmio
+ LADRF
);
570 /* SRAM_SIZE register */
571 reg_val
= readl(mmio
+ SRAM_SIZE
);
573 if (lp
->options
& OPTION_JUMBO_ENABLE
)
574 writel(VAL2
| JUMBO
, mmio
+ CMD3
);
575 #if AMD8111E_VLAN_TAG_USED
576 writel(VAL2
| VSIZE
| VL_TAG_DEL
, mmio
+ CMD3
);
578 /* Set default value to CTRL1 Register */
579 writel(CTRL1_DEFAULT
, mmio
+ CTRL1
);
581 /* To avoid PCI posting bug */
586 /* This function disables the interrupt and clears all the pending
589 static void amd8111e_disable_interrupt(struct amd8111e_priv
*lp
)
593 /* Disable interrupt */
594 writel(INTREN
, lp
->mmio
+ CMD0
);
597 intr0
= readl(lp
->mmio
+ INT0
);
598 writel(intr0
, lp
->mmio
+ INT0
);
600 /* To avoid PCI posting bug */
601 readl(lp
->mmio
+ INT0
);
605 /* This function stops the chip. */
606 static void amd8111e_stop_chip(struct amd8111e_priv
*lp
)
608 writel(RUN
, lp
->mmio
+ CMD0
);
610 /* To avoid PCI posting bug */
611 readl(lp
->mmio
+ CMD0
);
614 /* This function frees the transmiter and receiver descriptor rings. */
615 static void amd8111e_free_ring(struct amd8111e_priv
*lp
)
617 /* Free transmit and receive descriptor rings */
619 dma_free_coherent(&lp
->pci_dev
->dev
,
620 sizeof(struct amd8111e_rx_dr
) * NUM_RX_RING_DR
,
621 lp
->rx_ring
, lp
->rx_ring_dma_addr
);
626 dma_free_coherent(&lp
->pci_dev
->dev
,
627 sizeof(struct amd8111e_tx_dr
) * NUM_TX_RING_DR
,
628 lp
->tx_ring
, lp
->tx_ring_dma_addr
);
635 /* This function will free all the transmit skbs that are actually
636 * transmitted by the device. It will check the ownership of the
637 * skb before freeing the skb.
639 static int amd8111e_tx(struct net_device
*dev
)
641 struct amd8111e_priv
*lp
= netdev_priv(dev
);
644 /* Complete all the transmit packet */
645 while (lp
->tx_complete_idx
!= lp
->tx_idx
) {
646 tx_index
= lp
->tx_complete_idx
& TX_RING_DR_MOD_MASK
;
647 status
= le16_to_cpu(lp
->tx_ring
[tx_index
].tx_flags
);
649 if (status
& OWN_BIT
)
650 break; /* It still hasn't been Txed */
652 lp
->tx_ring
[tx_index
].buff_phy_addr
= 0;
654 /* We must free the original skb */
655 if (lp
->tx_skbuff
[tx_index
]) {
656 dma_unmap_single(&lp
->pci_dev
->dev
,
657 lp
->tx_dma_addr
[tx_index
],
658 lp
->tx_skbuff
[tx_index
]->len
,
660 dev_consume_skb_irq(lp
->tx_skbuff
[tx_index
]);
661 lp
->tx_skbuff
[tx_index
] = NULL
;
662 lp
->tx_dma_addr
[tx_index
] = 0;
664 lp
->tx_complete_idx
++;
665 /*COAL update tx coalescing parameters */
666 lp
->coal_conf
.tx_packets
++;
667 lp
->coal_conf
.tx_bytes
+=
668 le16_to_cpu(lp
->tx_ring
[tx_index
].buff_count
);
670 if (netif_queue_stopped(dev
) &&
671 lp
->tx_complete_idx
> lp
->tx_idx
- NUM_TX_BUFFERS
+ 2) {
672 /* The ring is no longer full, clear tbusy. */
673 /* lp->tx_full = 0; */
674 netif_wake_queue(dev
);
680 /* This function handles the driver receive operation in polling mode */
681 static int amd8111e_rx_poll(struct napi_struct
*napi
, int budget
)
683 struct amd8111e_priv
*lp
= container_of(napi
, struct amd8111e_priv
, napi
);
684 struct net_device
*dev
= lp
->amd8111e_net_dev
;
685 int rx_index
= lp
->rx_idx
& RX_RING_DR_MOD_MASK
;
686 void __iomem
*mmio
= lp
->mmio
;
687 struct sk_buff
*skb
, *new_skb
;
688 int min_pkt_len
, status
;
691 #if AMD8111E_VLAN_TAG_USED
695 while (num_rx_pkt
< budget
) {
696 status
= le16_to_cpu(lp
->rx_ring
[rx_index
].rx_flags
);
697 if (status
& OWN_BIT
)
700 /* There is a tricky error noted by John Murphy,
701 * <murf@perftech.com> to Russ Nelson: Even with
702 * full-sized * buffers it's possible for a
703 * jabber packet to use two buffers, with only
704 * the last correctly noting the error.
706 if (status
& ERR_BIT
) {
707 /* resetting flags */
708 lp
->rx_ring
[rx_index
].rx_flags
&= RESET_RX_FLAGS
;
711 /* check for STP and ENP */
712 if (!((status
& STP_BIT
) && (status
& ENP_BIT
))) {
713 /* resetting flags */
714 lp
->rx_ring
[rx_index
].rx_flags
&= RESET_RX_FLAGS
;
717 pkt_len
= le16_to_cpu(lp
->rx_ring
[rx_index
].msg_count
) - 4;
719 #if AMD8111E_VLAN_TAG_USED
720 vtag
= status
& TT_MASK
;
721 /* MAC will strip vlan tag */
723 min_pkt_len
= MIN_PKT_LEN
- 4;
726 min_pkt_len
= MIN_PKT_LEN
;
728 if (pkt_len
< min_pkt_len
) {
729 lp
->rx_ring
[rx_index
].rx_flags
&= RESET_RX_FLAGS
;
733 new_skb
= netdev_alloc_skb(dev
, lp
->rx_buff_len
);
735 /* if allocation fail,
736 * ignore that pkt and go to next one
738 lp
->rx_ring
[rx_index
].rx_flags
&= RESET_RX_FLAGS
;
743 skb_reserve(new_skb
, 2);
744 skb
= lp
->rx_skbuff
[rx_index
];
745 dma_unmap_single(&lp
->pci_dev
->dev
, lp
->rx_dma_addr
[rx_index
],
746 lp
->rx_buff_len
- 2, DMA_FROM_DEVICE
);
747 skb_put(skb
, pkt_len
);
748 lp
->rx_skbuff
[rx_index
] = new_skb
;
749 lp
->rx_dma_addr
[rx_index
] = dma_map_single(&lp
->pci_dev
->dev
,
754 skb
->protocol
= eth_type_trans(skb
, dev
);
756 #if AMD8111E_VLAN_TAG_USED
757 if (vtag
== TT_VLAN_TAGGED
) {
758 u16 vlan_tag
= le16_to_cpu(lp
->rx_ring
[rx_index
].tag_ctrl_info
);
759 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
762 napi_gro_receive(napi
, skb
);
763 /* COAL update rx coalescing parameters */
764 lp
->coal_conf
.rx_packets
++;
765 lp
->coal_conf
.rx_bytes
+= pkt_len
;
769 lp
->rx_ring
[rx_index
].buff_phy_addr
770 = cpu_to_le32(lp
->rx_dma_addr
[rx_index
]);
771 lp
->rx_ring
[rx_index
].buff_count
=
772 cpu_to_le16(lp
->rx_buff_len
-2);
774 lp
->rx_ring
[rx_index
].rx_flags
|= cpu_to_le16(OWN_BIT
);
775 rx_index
= (++lp
->rx_idx
) & RX_RING_DR_MOD_MASK
;
778 if (num_rx_pkt
< budget
&& napi_complete_done(napi
, num_rx_pkt
)) {
781 /* Receive descriptor is empty now */
782 spin_lock_irqsave(&lp
->lock
, flags
);
783 writel(VAL0
|RINTEN0
, mmio
+ INTEN0
);
784 writel(VAL2
| RDMD0
, mmio
+ CMD0
);
785 spin_unlock_irqrestore(&lp
->lock
, flags
);
791 /* This function will indicate the link status to the kernel. */
792 static int amd8111e_link_change(struct net_device
*dev
)
794 struct amd8111e_priv
*lp
= netdev_priv(dev
);
797 /* read the link change */
798 status0
= readl(lp
->mmio
+ STAT0
);
800 if (status0
& LINK_STATS
) {
801 if (status0
& AUTONEG_COMPLETE
)
802 lp
->link_config
.autoneg
= AUTONEG_ENABLE
;
804 lp
->link_config
.autoneg
= AUTONEG_DISABLE
;
806 if (status0
& FULL_DPLX
)
807 lp
->link_config
.duplex
= DUPLEX_FULL
;
809 lp
->link_config
.duplex
= DUPLEX_HALF
;
810 speed
= (status0
& SPEED_MASK
) >> 7;
811 if (speed
== PHY_SPEED_10
)
812 lp
->link_config
.speed
= SPEED_10
;
813 else if (speed
== PHY_SPEED_100
)
814 lp
->link_config
.speed
= SPEED_100
;
816 netdev_info(dev
, "Link is Up. Speed is %s Mbps %s Duplex\n",
817 (lp
->link_config
.speed
== SPEED_100
) ?
819 (lp
->link_config
.duplex
== DUPLEX_FULL
) ?
822 netif_carrier_on(dev
);
824 lp
->link_config
.speed
= SPEED_INVALID
;
825 lp
->link_config
.duplex
= DUPLEX_INVALID
;
826 lp
->link_config
.autoneg
= AUTONEG_INVALID
;
827 netdev_info(dev
, "Link is Down.\n");
828 netif_carrier_off(dev
);
834 /* This function reads the mib counters. */
835 static int amd8111e_read_mib(void __iomem
*mmio
, u8 MIB_COUNTER
)
839 unsigned int repeat
= REPEAT_CNT
;
841 writew(MIB_RD_CMD
| MIB_COUNTER
, mmio
+ MIB_ADDR
);
843 status
= readw(mmio
+ MIB_ADDR
);
844 udelay(2); /* controller takes MAX 2 us to get mib data */
846 while (--repeat
&& (status
& MIB_CMD_ACTIVE
));
848 data
= readl(mmio
+ MIB_DATA
);
852 /* This function reads the mib registers and returns the hardware statistics.
853 * It updates previous internal driver statistics with new values.
855 static struct net_device_stats
*amd8111e_get_stats(struct net_device
*dev
)
857 struct amd8111e_priv
*lp
= netdev_priv(dev
);
858 void __iomem
*mmio
= lp
->mmio
;
860 struct net_device_stats
*new_stats
= &dev
->stats
;
864 spin_lock_irqsave(&lp
->lock
, flags
);
866 /* stats.rx_packets */
867 new_stats
->rx_packets
= amd8111e_read_mib(mmio
, rcv_broadcast_pkts
)+
868 amd8111e_read_mib(mmio
, rcv_multicast_pkts
)+
869 amd8111e_read_mib(mmio
, rcv_unicast_pkts
);
871 /* stats.tx_packets */
872 new_stats
->tx_packets
= amd8111e_read_mib(mmio
, xmt_packets
);
875 new_stats
->rx_bytes
= amd8111e_read_mib(mmio
, rcv_octets
);
878 new_stats
->tx_bytes
= amd8111e_read_mib(mmio
, xmt_octets
);
880 /* stats.rx_errors */
881 /* hw errors + errors driver reported */
882 new_stats
->rx_errors
= amd8111e_read_mib(mmio
, rcv_undersize_pkts
)+
883 amd8111e_read_mib(mmio
, rcv_fragments
)+
884 amd8111e_read_mib(mmio
, rcv_jabbers
)+
885 amd8111e_read_mib(mmio
, rcv_alignment_errors
)+
886 amd8111e_read_mib(mmio
, rcv_fcs_errors
)+
887 amd8111e_read_mib(mmio
, rcv_miss_pkts
)+
890 /* stats.tx_errors */
891 new_stats
->tx_errors
= amd8111e_read_mib(mmio
, xmt_underrun_pkts
);
893 /* stats.rx_dropped*/
894 new_stats
->rx_dropped
= amd8111e_read_mib(mmio
, rcv_miss_pkts
);
896 /* stats.tx_dropped*/
897 new_stats
->tx_dropped
= amd8111e_read_mib(mmio
, xmt_underrun_pkts
);
900 new_stats
->multicast
= amd8111e_read_mib(mmio
, rcv_multicast_pkts
);
902 /* stats.collisions*/
903 new_stats
->collisions
= amd8111e_read_mib(mmio
, xmt_collisions
);
905 /* stats.rx_length_errors*/
906 new_stats
->rx_length_errors
=
907 amd8111e_read_mib(mmio
, rcv_undersize_pkts
)+
908 amd8111e_read_mib(mmio
, rcv_oversize_pkts
);
910 /* stats.rx_over_errors*/
911 new_stats
->rx_over_errors
= amd8111e_read_mib(mmio
, rcv_miss_pkts
);
913 /* stats.rx_crc_errors*/
914 new_stats
->rx_crc_errors
= amd8111e_read_mib(mmio
, rcv_fcs_errors
);
916 /* stats.rx_frame_errors*/
917 new_stats
->rx_frame_errors
=
918 amd8111e_read_mib(mmio
, rcv_alignment_errors
);
920 /* stats.rx_fifo_errors */
921 new_stats
->rx_fifo_errors
= amd8111e_read_mib(mmio
, rcv_miss_pkts
);
923 /* stats.rx_missed_errors */
924 new_stats
->rx_missed_errors
= amd8111e_read_mib(mmio
, rcv_miss_pkts
);
926 /* stats.tx_aborted_errors*/
927 new_stats
->tx_aborted_errors
=
928 amd8111e_read_mib(mmio
, xmt_excessive_collision
);
930 /* stats.tx_carrier_errors*/
931 new_stats
->tx_carrier_errors
=
932 amd8111e_read_mib(mmio
, xmt_loss_carrier
);
934 /* stats.tx_fifo_errors*/
935 new_stats
->tx_fifo_errors
= amd8111e_read_mib(mmio
, xmt_underrun_pkts
);
937 /* stats.tx_window_errors*/
938 new_stats
->tx_window_errors
=
939 amd8111e_read_mib(mmio
, xmt_late_collision
);
941 /* Reset the mibs for collecting new statistics */
942 /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
944 spin_unlock_irqrestore(&lp
->lock
, flags
);
949 /* This function recalculate the interrupt coalescing mode on every interrupt
950 * according to the datarate and the packet rate.
952 static int amd8111e_calc_coalesce(struct net_device
*dev
)
954 struct amd8111e_priv
*lp
= netdev_priv(dev
);
955 struct amd8111e_coalesce_conf
*coal_conf
= &lp
->coal_conf
;
963 tx_pkt_rate
= coal_conf
->tx_packets
- coal_conf
->tx_prev_packets
;
964 coal_conf
->tx_prev_packets
= coal_conf
->tx_packets
;
966 tx_data_rate
= coal_conf
->tx_bytes
- coal_conf
->tx_prev_bytes
;
967 coal_conf
->tx_prev_bytes
= coal_conf
->tx_bytes
;
969 rx_pkt_rate
= coal_conf
->rx_packets
- coal_conf
->rx_prev_packets
;
970 coal_conf
->rx_prev_packets
= coal_conf
->rx_packets
;
972 rx_data_rate
= coal_conf
->rx_bytes
- coal_conf
->rx_prev_bytes
;
973 coal_conf
->rx_prev_bytes
= coal_conf
->rx_bytes
;
975 if (rx_pkt_rate
< 800) {
976 if (coal_conf
->rx_coal_type
!= NO_COALESCE
) {
978 coal_conf
->rx_timeout
= 0x0;
979 coal_conf
->rx_event_count
= 0;
980 amd8111e_set_coalesce(dev
, RX_INTR_COAL
);
981 coal_conf
->rx_coal_type
= NO_COALESCE
;
985 rx_pkt_size
= rx_data_rate
/rx_pkt_rate
;
986 if (rx_pkt_size
< 128) {
987 if (coal_conf
->rx_coal_type
!= NO_COALESCE
) {
989 coal_conf
->rx_timeout
= 0;
990 coal_conf
->rx_event_count
= 0;
991 amd8111e_set_coalesce(dev
, RX_INTR_COAL
);
992 coal_conf
->rx_coal_type
= NO_COALESCE
;
995 } else if ((rx_pkt_size
>= 128) && (rx_pkt_size
< 512)) {
997 if (coal_conf
->rx_coal_type
!= LOW_COALESCE
) {
998 coal_conf
->rx_timeout
= 1;
999 coal_conf
->rx_event_count
= 4;
1000 amd8111e_set_coalesce(dev
, RX_INTR_COAL
);
1001 coal_conf
->rx_coal_type
= LOW_COALESCE
;
1003 } else if ((rx_pkt_size
>= 512) && (rx_pkt_size
< 1024)) {
1005 if (coal_conf
->rx_coal_type
!= MEDIUM_COALESCE
) {
1006 coal_conf
->rx_timeout
= 1;
1007 coal_conf
->rx_event_count
= 4;
1008 amd8111e_set_coalesce(dev
, RX_INTR_COAL
);
1009 coal_conf
->rx_coal_type
= MEDIUM_COALESCE
;
1012 } else if (rx_pkt_size
>= 1024) {
1014 if (coal_conf
->rx_coal_type
!= HIGH_COALESCE
) {
1015 coal_conf
->rx_timeout
= 2;
1016 coal_conf
->rx_event_count
= 3;
1017 amd8111e_set_coalesce(dev
, RX_INTR_COAL
);
1018 coal_conf
->rx_coal_type
= HIGH_COALESCE
;
1022 /* NOW FOR TX INTR COALESC */
1023 if (tx_pkt_rate
< 800) {
1024 if (coal_conf
->tx_coal_type
!= NO_COALESCE
) {
1026 coal_conf
->tx_timeout
= 0x0;
1027 coal_conf
->tx_event_count
= 0;
1028 amd8111e_set_coalesce(dev
, TX_INTR_COAL
);
1029 coal_conf
->tx_coal_type
= NO_COALESCE
;
1033 tx_pkt_size
= tx_data_rate
/tx_pkt_rate
;
1034 if (tx_pkt_size
< 128) {
1036 if (coal_conf
->tx_coal_type
!= NO_COALESCE
) {
1038 coal_conf
->tx_timeout
= 0;
1039 coal_conf
->tx_event_count
= 0;
1040 amd8111e_set_coalesce(dev
, TX_INTR_COAL
);
1041 coal_conf
->tx_coal_type
= NO_COALESCE
;
1044 } else if ((tx_pkt_size
>= 128) && (tx_pkt_size
< 512)) {
1046 if (coal_conf
->tx_coal_type
!= LOW_COALESCE
) {
1047 coal_conf
->tx_timeout
= 1;
1048 coal_conf
->tx_event_count
= 2;
1049 amd8111e_set_coalesce(dev
, TX_INTR_COAL
);
1050 coal_conf
->tx_coal_type
= LOW_COALESCE
;
1053 } else if ((tx_pkt_size
>= 512) && (tx_pkt_size
< 1024)) {
1055 if (coal_conf
->tx_coal_type
!= MEDIUM_COALESCE
) {
1056 coal_conf
->tx_timeout
= 2;
1057 coal_conf
->tx_event_count
= 5;
1058 amd8111e_set_coalesce(dev
, TX_INTR_COAL
);
1059 coal_conf
->tx_coal_type
= MEDIUM_COALESCE
;
1061 } else if (tx_pkt_size
>= 1024) {
1062 if (coal_conf
->tx_coal_type
!= HIGH_COALESCE
) {
1063 coal_conf
->tx_timeout
= 4;
1064 coal_conf
->tx_event_count
= 8;
1065 amd8111e_set_coalesce(dev
, TX_INTR_COAL
);
1066 coal_conf
->tx_coal_type
= HIGH_COALESCE
;
1074 /* This is device interrupt function. It handles transmit,
1075 * receive,link change and hardware timer interrupts.
1077 static irqreturn_t
amd8111e_interrupt(int irq
, void *dev_id
)
1080 struct net_device
*dev
= (struct net_device
*)dev_id
;
1081 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1082 void __iomem
*mmio
= lp
->mmio
;
1083 unsigned int intr0
, intren0
;
1084 unsigned int handled
= 1;
1089 spin_lock(&lp
->lock
);
1091 /* disabling interrupt */
1092 writel(INTREN
, mmio
+ CMD0
);
1094 /* Read interrupt status */
1095 intr0
= readl(mmio
+ INT0
);
1096 intren0
= readl(mmio
+ INTEN0
);
1098 /* Process all the INT event until INTR bit is clear. */
1100 if (!(intr0
& INTR
)) {
1102 goto err_no_interrupt
;
1105 /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1106 writel(intr0
, mmio
+ INT0
);
1108 /* Check if Receive Interrupt has occurred. */
1109 if (intr0
& RINT0
) {
1110 if (napi_schedule_prep(&lp
->napi
)) {
1111 /* Disable receive interrupts */
1112 writel(RINTEN0
, mmio
+ INTEN0
);
1113 /* Schedule a polling routine */
1114 __napi_schedule(&lp
->napi
);
1115 } else if (intren0
& RINTEN0
) {
1116 netdev_dbg(dev
, "************Driver bug! interrupt while in poll\n");
1117 /* Fix by disable receive interrupts */
1118 writel(RINTEN0
, mmio
+ INTEN0
);
1122 /* Check if Transmit Interrupt has occurred. */
1126 /* Check if Link Change Interrupt has occurred. */
1128 amd8111e_link_change(dev
);
1130 /* Check if Hardware Timer Interrupt has occurred. */
1132 amd8111e_calc_coalesce(dev
);
1135 writel(VAL0
| INTREN
, mmio
+ CMD0
);
1137 spin_unlock(&lp
->lock
);
1139 return IRQ_RETVAL(handled
);
1142 #ifdef CONFIG_NET_POLL_CONTROLLER
1143 static void amd8111e_poll(struct net_device
*dev
)
1145 unsigned long flags
;
1146 local_irq_save(flags
);
1147 amd8111e_interrupt(0, dev
);
1148 local_irq_restore(flags
);
1153 /* This function closes the network interface and updates
1154 * the statistics so that most recent statistics will be
1155 * available after the interface is down.
1157 static int amd8111e_close(struct net_device
*dev
)
1159 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1160 netif_stop_queue(dev
);
1162 napi_disable(&lp
->napi
);
1164 spin_lock_irq(&lp
->lock
);
1166 amd8111e_disable_interrupt(lp
);
1167 amd8111e_stop_chip(lp
);
1169 /* Free transmit and receive skbs */
1170 amd8111e_free_skbs(lp
->amd8111e_net_dev
);
1172 netif_carrier_off(lp
->amd8111e_net_dev
);
1174 /* Delete ipg timer */
1175 if (lp
->options
& OPTION_DYN_IPG_ENABLE
)
1176 del_timer_sync(&lp
->ipg_data
.ipg_timer
);
1178 spin_unlock_irq(&lp
->lock
);
1179 free_irq(dev
->irq
, dev
);
1180 amd8111e_free_ring(lp
);
1182 /* Update the statistics before closing */
1183 amd8111e_get_stats(dev
);
1188 /* This function opens new interface.It requests irq for the device,
1189 * initializes the device,buffers and descriptors, and starts the device.
1191 static int amd8111e_open(struct net_device
*dev
)
1193 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1195 if (dev
->irq
== 0 || request_irq(dev
->irq
, amd8111e_interrupt
,
1196 IRQF_SHARED
, dev
->name
, dev
))
1199 napi_enable(&lp
->napi
);
1201 spin_lock_irq(&lp
->lock
);
1203 amd8111e_init_hw_default(lp
);
1205 if (amd8111e_restart(dev
)) {
1206 spin_unlock_irq(&lp
->lock
);
1207 napi_disable(&lp
->napi
);
1209 free_irq(dev
->irq
, dev
);
1212 /* Start ipg timer */
1213 if (lp
->options
& OPTION_DYN_IPG_ENABLE
) {
1214 add_timer(&lp
->ipg_data
.ipg_timer
);
1215 netdev_info(dev
, "Dynamic IPG Enabled\n");
1220 spin_unlock_irq(&lp
->lock
);
1222 netif_start_queue(dev
);
1227 /* This function checks if there is any transmit descriptors
1228 * available to queue more packet.
1230 static int amd8111e_tx_queue_avail(struct amd8111e_priv
*lp
)
1232 int tx_index
= lp
->tx_idx
& TX_BUFF_MOD_MASK
;
1233 if (lp
->tx_skbuff
[tx_index
])
1240 /* This function will queue the transmit packets to the
1241 * descriptors and will trigger the send operation. It also
1242 * initializes the transmit descriptors with buffer physical address,
1243 * byte count, ownership to hardware etc.
1245 static netdev_tx_t
amd8111e_start_xmit(struct sk_buff
*skb
,
1246 struct net_device
*dev
)
1248 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1250 unsigned long flags
;
1252 spin_lock_irqsave(&lp
->lock
, flags
);
1254 tx_index
= lp
->tx_idx
& TX_RING_DR_MOD_MASK
;
1256 lp
->tx_ring
[tx_index
].buff_count
= cpu_to_le16(skb
->len
);
1258 lp
->tx_skbuff
[tx_index
] = skb
;
1259 lp
->tx_ring
[tx_index
].tx_flags
= 0;
1261 #if AMD8111E_VLAN_TAG_USED
1262 if (skb_vlan_tag_present(skb
)) {
1263 lp
->tx_ring
[tx_index
].tag_ctrl_cmd
|=
1264 cpu_to_le16(TCC_VLAN_INSERT
);
1265 lp
->tx_ring
[tx_index
].tag_ctrl_info
=
1266 cpu_to_le16(skb_vlan_tag_get(skb
));
1270 lp
->tx_dma_addr
[tx_index
] =
1271 dma_map_single(&lp
->pci_dev
->dev
, skb
->data
, skb
->len
,
1273 lp
->tx_ring
[tx_index
].buff_phy_addr
=
1274 cpu_to_le32(lp
->tx_dma_addr
[tx_index
]);
1276 /* Set FCS and LTINT bits */
1278 lp
->tx_ring
[tx_index
].tx_flags
|=
1279 cpu_to_le16(OWN_BIT
| STP_BIT
| ENP_BIT
|ADD_FCS_BIT
|LTINT_BIT
);
1283 /* Trigger an immediate send poll. */
1284 writel(VAL1
| TDMD0
, lp
->mmio
+ CMD0
);
1285 writel(VAL2
| RDMD0
, lp
->mmio
+ CMD0
);
1287 if (amd8111e_tx_queue_avail(lp
) < 0) {
1288 netif_stop_queue(dev
);
1290 spin_unlock_irqrestore(&lp
->lock
, flags
);
1291 return NETDEV_TX_OK
;
1293 /* This function returns all the memory mapped registers of the device. */
1294 static void amd8111e_read_regs(struct amd8111e_priv
*lp
, u32
*buf
)
1296 void __iomem
*mmio
= lp
->mmio
;
1297 /* Read only necessary registers */
1298 buf
[0] = readl(mmio
+ XMT_RING_BASE_ADDR0
);
1299 buf
[1] = readl(mmio
+ XMT_RING_LEN0
);
1300 buf
[2] = readl(mmio
+ RCV_RING_BASE_ADDR0
);
1301 buf
[3] = readl(mmio
+ RCV_RING_LEN0
);
1302 buf
[4] = readl(mmio
+ CMD0
);
1303 buf
[5] = readl(mmio
+ CMD2
);
1304 buf
[6] = readl(mmio
+ CMD3
);
1305 buf
[7] = readl(mmio
+ CMD7
);
1306 buf
[8] = readl(mmio
+ INT0
);
1307 buf
[9] = readl(mmio
+ INTEN0
);
1308 buf
[10] = readl(mmio
+ LADRF
);
1309 buf
[11] = readl(mmio
+ LADRF
+4);
1310 buf
[12] = readl(mmio
+ STAT0
);
1314 /* This function sets promiscuos mode, all-multi mode or the multicast address
1315 * list to the device.
1317 static void amd8111e_set_multicast_list(struct net_device
*dev
)
1319 struct netdev_hw_addr
*ha
;
1320 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1324 if (dev
->flags
& IFF_PROMISC
) {
1325 writel(VAL2
| PROM
, lp
->mmio
+ CMD2
);
1329 writel(PROM
, lp
->mmio
+ CMD2
);
1330 if (dev
->flags
& IFF_ALLMULTI
||
1331 netdev_mc_count(dev
) > MAX_FILTER_SIZE
) {
1332 /* get all multicast packet */
1333 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
1334 lp
->options
|= OPTION_MULTICAST_ENABLE
;
1335 amd8111e_writeq(*(u64
*)mc_filter
, lp
->mmio
+ LADRF
);
1338 if (netdev_mc_empty(dev
)) {
1339 /* get only own packets */
1340 mc_filter
[1] = mc_filter
[0] = 0;
1341 lp
->options
&= ~OPTION_MULTICAST_ENABLE
;
1342 amd8111e_writeq(*(u64
*)mc_filter
, lp
->mmio
+ LADRF
);
1343 /* disable promiscuous mode */
1344 writel(PROM
, lp
->mmio
+ CMD2
);
1347 /* load all the multicast addresses in the logic filter */
1348 lp
->options
|= OPTION_MULTICAST_ENABLE
;
1349 mc_filter
[1] = mc_filter
[0] = 0;
1350 netdev_for_each_mc_addr(ha
, dev
) {
1351 bit_num
= (ether_crc_le(ETH_ALEN
, ha
->addr
) >> 26) & 0x3f;
1352 mc_filter
[bit_num
>> 5] |= 1 << (bit_num
& 31);
1354 amd8111e_writeq(*(u64
*)mc_filter
, lp
->mmio
+ LADRF
);
1356 /* To eliminate PCI posting bug */
1357 readl(lp
->mmio
+ CMD2
);
1361 static void amd8111e_get_drvinfo(struct net_device
*dev
,
1362 struct ethtool_drvinfo
*info
)
1364 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1365 struct pci_dev
*pci_dev
= lp
->pci_dev
;
1366 strscpy(info
->driver
, MODULE_NAME
, sizeof(info
->driver
));
1367 snprintf(info
->fw_version
, sizeof(info
->fw_version
),
1368 "%u", chip_version
);
1369 strscpy(info
->bus_info
, pci_name(pci_dev
), sizeof(info
->bus_info
));
1372 static int amd8111e_get_regs_len(struct net_device
*dev
)
1374 return AMD8111E_REG_DUMP_LEN
;
1377 static void amd8111e_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *buf
)
1379 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1381 amd8111e_read_regs(lp
, buf
);
1384 static int amd8111e_get_link_ksettings(struct net_device
*dev
,
1385 struct ethtool_link_ksettings
*cmd
)
1387 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1388 spin_lock_irq(&lp
->lock
);
1389 mii_ethtool_get_link_ksettings(&lp
->mii_if
, cmd
);
1390 spin_unlock_irq(&lp
->lock
);
1394 static int amd8111e_set_link_ksettings(struct net_device
*dev
,
1395 const struct ethtool_link_ksettings
*cmd
)
1397 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1399 spin_lock_irq(&lp
->lock
);
1400 res
= mii_ethtool_set_link_ksettings(&lp
->mii_if
, cmd
);
1401 spin_unlock_irq(&lp
->lock
);
1405 static int amd8111e_nway_reset(struct net_device
*dev
)
1407 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1408 return mii_nway_restart(&lp
->mii_if
);
1411 static u32
amd8111e_get_link(struct net_device
*dev
)
1413 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1414 return mii_link_ok(&lp
->mii_if
);
1417 static void amd8111e_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol_info
)
1419 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1420 wol_info
->supported
= WAKE_MAGIC
|WAKE_PHY
;
1421 if (lp
->options
& OPTION_WOL_ENABLE
)
1422 wol_info
->wolopts
= WAKE_MAGIC
;
1425 static int amd8111e_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol_info
)
1427 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1428 if (wol_info
->wolopts
& ~(WAKE_MAGIC
|WAKE_PHY
))
1430 spin_lock_irq(&lp
->lock
);
1431 if (wol_info
->wolopts
& WAKE_MAGIC
)
1433 (OPTION_WOL_ENABLE
| OPTION_WAKE_MAGIC_ENABLE
);
1434 else if (wol_info
->wolopts
& WAKE_PHY
)
1436 (OPTION_WOL_ENABLE
| OPTION_WAKE_PHY_ENABLE
);
1438 lp
->options
&= ~OPTION_WOL_ENABLE
;
1439 spin_unlock_irq(&lp
->lock
);
1443 static const struct ethtool_ops ops
= {
1444 .get_drvinfo
= amd8111e_get_drvinfo
,
1445 .get_regs_len
= amd8111e_get_regs_len
,
1446 .get_regs
= amd8111e_get_regs
,
1447 .nway_reset
= amd8111e_nway_reset
,
1448 .get_link
= amd8111e_get_link
,
1449 .get_wol
= amd8111e_get_wol
,
1450 .set_wol
= amd8111e_set_wol
,
1451 .get_link_ksettings
= amd8111e_get_link_ksettings
,
1452 .set_link_ksettings
= amd8111e_set_link_ksettings
,
1455 /* This function handles all the ethtool ioctls. It gives driver info,
1456 * gets/sets driver speed, gets memory mapped register values, forces
1457 * auto negotiation, sets/gets WOL options for ethtool application.
1459 static int amd8111e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1461 struct mii_ioctl_data
*data
= if_mii(ifr
);
1462 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1468 data
->phy_id
= lp
->ext_phy_addr
;
1473 spin_lock_irq(&lp
->lock
);
1474 err
= amd8111e_read_phy(lp
, data
->phy_id
,
1475 data
->reg_num
& PHY_REG_ADDR_MASK
, &mii_regval
);
1476 spin_unlock_irq(&lp
->lock
);
1478 data
->val_out
= mii_regval
;
1483 spin_lock_irq(&lp
->lock
);
1484 err
= amd8111e_write_phy(lp
, data
->phy_id
,
1485 data
->reg_num
& PHY_REG_ADDR_MASK
, data
->val_in
);
1486 spin_unlock_irq(&lp
->lock
);
1496 static int amd8111e_set_mac_address(struct net_device
*dev
, void *p
)
1498 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1500 struct sockaddr
*addr
= p
;
1502 eth_hw_addr_set(dev
, addr
->sa_data
);
1503 spin_lock_irq(&lp
->lock
);
1504 /* Setting the MAC address to the device */
1505 for (i
= 0; i
< ETH_ALEN
; i
++)
1506 writeb(dev
->dev_addr
[i
], lp
->mmio
+ PADR
+ i
);
1508 spin_unlock_irq(&lp
->lock
);
1513 /* This function changes the mtu of the device. It restarts the device to
1514 * initialize the descriptor with new receive buffers.
1516 static int amd8111e_change_mtu(struct net_device
*dev
, int new_mtu
)
1518 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1521 if (!netif_running(dev
)) {
1522 /* new_mtu will be used
1523 * when device starts next time
1525 WRITE_ONCE(dev
->mtu
, new_mtu
);
1529 spin_lock_irq(&lp
->lock
);
1532 writel(RUN
, lp
->mmio
+ CMD0
);
1534 WRITE_ONCE(dev
->mtu
, new_mtu
);
1536 err
= amd8111e_restart(dev
);
1537 spin_unlock_irq(&lp
->lock
);
1539 netif_start_queue(dev
);
1543 static int amd8111e_enable_magicpkt(struct amd8111e_priv
*lp
)
1545 writel(VAL1
| MPPLBA
, lp
->mmio
+ CMD3
);
1546 writel(VAL0
| MPEN_SW
, lp
->mmio
+ CMD7
);
1548 /* To eliminate PCI posting bug */
1549 readl(lp
->mmio
+ CMD7
);
1553 static int amd8111e_enable_link_change(struct amd8111e_priv
*lp
)
1556 /* Adapter is already stopped/suspended/interrupt-disabled */
1557 writel(VAL0
| LCMODE_SW
, lp
->mmio
+ CMD7
);
1559 /* To eliminate PCI posting bug */
1560 readl(lp
->mmio
+ CMD7
);
1564 /* This function is called when a packet transmission fails to complete
1565 * within a reasonable period, on the assumption that an interrupt have
1566 * failed or the interface is locked up. This function will reinitialize
1569 static void amd8111e_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
1571 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1574 netdev_err(dev
, "transmit timed out, resetting\n");
1576 spin_lock_irq(&lp
->lock
);
1577 err
= amd8111e_restart(dev
);
1578 spin_unlock_irq(&lp
->lock
);
1580 netif_wake_queue(dev
);
1583 static int __maybe_unused
amd8111e_suspend(struct device
*dev_d
)
1585 struct net_device
*dev
= dev_get_drvdata(dev_d
);
1586 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1588 if (!netif_running(dev
))
1591 /* disable the interrupt */
1592 spin_lock_irq(&lp
->lock
);
1593 amd8111e_disable_interrupt(lp
);
1594 spin_unlock_irq(&lp
->lock
);
1596 netif_device_detach(dev
);
1599 spin_lock_irq(&lp
->lock
);
1600 if (lp
->options
& OPTION_DYN_IPG_ENABLE
)
1601 del_timer_sync(&lp
->ipg_data
.ipg_timer
);
1602 amd8111e_stop_chip(lp
);
1603 spin_unlock_irq(&lp
->lock
);
1605 if (lp
->options
& OPTION_WOL_ENABLE
) {
1607 if (lp
->options
& OPTION_WAKE_MAGIC_ENABLE
)
1608 amd8111e_enable_magicpkt(lp
);
1609 if (lp
->options
& OPTION_WAKE_PHY_ENABLE
)
1610 amd8111e_enable_link_change(lp
);
1612 device_set_wakeup_enable(dev_d
, 1);
1615 device_set_wakeup_enable(dev_d
, 0);
1621 static int __maybe_unused
amd8111e_resume(struct device
*dev_d
)
1623 struct net_device
*dev
= dev_get_drvdata(dev_d
);
1624 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1626 if (!netif_running(dev
))
1629 netif_device_attach(dev
);
1631 spin_lock_irq(&lp
->lock
);
1632 amd8111e_restart(dev
);
1633 /* Restart ipg timer */
1634 if (lp
->options
& OPTION_DYN_IPG_ENABLE
)
1635 mod_timer(&lp
->ipg_data
.ipg_timer
,
1636 jiffies
+ IPG_CONVERGE_JIFFIES
);
1637 spin_unlock_irq(&lp
->lock
);
1642 static void amd8111e_config_ipg(struct timer_list
*t
)
1644 struct amd8111e_priv
*lp
= from_timer(lp
, t
, ipg_data
.ipg_timer
);
1645 struct ipg_info
*ipg_data
= &lp
->ipg_data
;
1646 void __iomem
*mmio
= lp
->mmio
;
1647 unsigned int prev_col_cnt
= ipg_data
->col_cnt
;
1648 unsigned int total_col_cnt
;
1649 unsigned int tmp_ipg
;
1651 if (lp
->link_config
.duplex
== DUPLEX_FULL
) {
1652 ipg_data
->ipg
= DEFAULT_IPG
;
1656 if (ipg_data
->ipg_state
== SSTATE
) {
1658 if (ipg_data
->timer_tick
== IPG_STABLE_TIME
) {
1660 ipg_data
->timer_tick
= 0;
1661 ipg_data
->ipg
= MIN_IPG
- IPG_STEP
;
1662 ipg_data
->current_ipg
= MIN_IPG
;
1663 ipg_data
->diff_col_cnt
= 0xFFFFFFFF;
1664 ipg_data
->ipg_state
= CSTATE
;
1667 ipg_data
->timer_tick
++;
1670 if (ipg_data
->ipg_state
== CSTATE
) {
1672 /* Get the current collision count */
1674 total_col_cnt
= ipg_data
->col_cnt
=
1675 amd8111e_read_mib(mmio
, xmt_collisions
);
1677 if ((total_col_cnt
- prev_col_cnt
) <
1678 (ipg_data
->diff_col_cnt
)) {
1680 ipg_data
->diff_col_cnt
=
1681 total_col_cnt
- prev_col_cnt
;
1683 ipg_data
->ipg
= ipg_data
->current_ipg
;
1686 ipg_data
->current_ipg
+= IPG_STEP
;
1688 if (ipg_data
->current_ipg
<= MAX_IPG
)
1689 tmp_ipg
= ipg_data
->current_ipg
;
1691 tmp_ipg
= ipg_data
->ipg
;
1692 ipg_data
->ipg_state
= SSTATE
;
1694 writew((u32
)tmp_ipg
, mmio
+ IPG
);
1695 writew((u32
)(tmp_ipg
- IFS1_DELTA
), mmio
+ IFS1
);
1697 mod_timer(&lp
->ipg_data
.ipg_timer
, jiffies
+ IPG_CONVERGE_JIFFIES
);
1702 static void amd8111e_probe_ext_phy(struct net_device
*dev
)
1704 struct amd8111e_priv
*lp
= netdev_priv(dev
);
1707 for (i
= 0x1e; i
>= 0; i
--) {
1710 if (amd8111e_read_phy(lp
, i
, MII_PHYSID1
, &id1
))
1712 if (amd8111e_read_phy(lp
, i
, MII_PHYSID2
, &id2
))
1714 lp
->ext_phy_id
= (id1
<< 16) | id2
;
1715 lp
->ext_phy_addr
= i
;
1719 lp
->ext_phy_addr
= 1;
1722 static const struct net_device_ops amd8111e_netdev_ops
= {
1723 .ndo_open
= amd8111e_open
,
1724 .ndo_stop
= amd8111e_close
,
1725 .ndo_start_xmit
= amd8111e_start_xmit
,
1726 .ndo_tx_timeout
= amd8111e_tx_timeout
,
1727 .ndo_get_stats
= amd8111e_get_stats
,
1728 .ndo_set_rx_mode
= amd8111e_set_multicast_list
,
1729 .ndo_validate_addr
= eth_validate_addr
,
1730 .ndo_set_mac_address
= amd8111e_set_mac_address
,
1731 .ndo_eth_ioctl
= amd8111e_ioctl
,
1732 .ndo_change_mtu
= amd8111e_change_mtu
,
1733 #ifdef CONFIG_NET_POLL_CONTROLLER
1734 .ndo_poll_controller
= amd8111e_poll
,
1738 static int amd8111e_probe_one(struct pci_dev
*pdev
,
1739 const struct pci_device_id
*ent
)
1742 unsigned long reg_addr
, reg_len
;
1743 struct amd8111e_priv
*lp
;
1744 struct net_device
*dev
;
1747 err
= pci_enable_device(pdev
);
1749 dev_err(&pdev
->dev
, "Cannot enable new PCI device\n");
1753 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
1754 dev_err(&pdev
->dev
, "Cannot find PCI base address\n");
1756 goto err_disable_pdev
;
1759 err
= pci_request_regions(pdev
, MODULE_NAME
);
1761 dev_err(&pdev
->dev
, "Cannot obtain PCI resources\n");
1762 goto err_disable_pdev
;
1765 pci_set_master(pdev
);
1767 /* Find power-management capability. */
1768 if (!pdev
->pm_cap
) {
1769 dev_err(&pdev
->dev
, "No Power Management capability\n");
1774 /* Initialize DMA */
1775 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32)) < 0) {
1776 dev_err(&pdev
->dev
, "DMA not supported\n");
1781 reg_addr
= pci_resource_start(pdev
, 0);
1782 reg_len
= pci_resource_len(pdev
, 0);
1784 dev
= alloc_etherdev(sizeof(struct amd8111e_priv
));
1790 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1792 #if AMD8111E_VLAN_TAG_USED
1793 dev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
1796 lp
= netdev_priv(dev
);
1798 lp
->amd8111e_net_dev
= dev
;
1800 spin_lock_init(&lp
->lock
);
1802 lp
->mmio
= devm_ioremap(&pdev
->dev
, reg_addr
, reg_len
);
1804 dev_err(&pdev
->dev
, "Cannot map device registers\n");
1809 /* Initializing MAC address */
1810 for (i
= 0; i
< ETH_ALEN
; i
++)
1811 addr
[i
] = readb(lp
->mmio
+ PADR
+ i
);
1812 eth_hw_addr_set(dev
, addr
);
1814 /* Setting user defined parametrs */
1815 lp
->ext_phy_option
= speed_duplex
[card_idx
];
1816 if (coalesce
[card_idx
])
1817 lp
->options
|= OPTION_INTR_COAL_ENABLE
;
1818 if (dynamic_ipg
[card_idx
++])
1819 lp
->options
|= OPTION_DYN_IPG_ENABLE
;
1822 /* Initialize driver entry points */
1823 dev
->netdev_ops
= &amd8111e_netdev_ops
;
1824 dev
->ethtool_ops
= &ops
;
1825 dev
->irq
= pdev
->irq
;
1826 dev
->watchdog_timeo
= AMD8111E_TX_TIMEOUT
;
1827 dev
->min_mtu
= AMD8111E_MIN_MTU
;
1828 dev
->max_mtu
= AMD8111E_MAX_MTU
;
1829 netif_napi_add_weight(dev
, &lp
->napi
, amd8111e_rx_poll
, 32);
1831 /* Probe the external PHY */
1832 amd8111e_probe_ext_phy(dev
);
1834 /* setting mii default values */
1835 lp
->mii_if
.dev
= dev
;
1836 lp
->mii_if
.mdio_read
= amd8111e_mdio_read
;
1837 lp
->mii_if
.mdio_write
= amd8111e_mdio_write
;
1838 lp
->mii_if
.phy_id
= lp
->ext_phy_addr
;
1840 /* Set receive buffer length and set jumbo option*/
1841 amd8111e_set_rx_buff_len(dev
);
1844 err
= register_netdev(dev
);
1846 dev_err(&pdev
->dev
, "Cannot register net device\n");
1850 pci_set_drvdata(pdev
, dev
);
1852 /* Initialize software ipg timer */
1853 if (lp
->options
& OPTION_DYN_IPG_ENABLE
) {
1854 timer_setup(&lp
->ipg_data
.ipg_timer
, amd8111e_config_ipg
, 0);
1855 lp
->ipg_data
.ipg_timer
.expires
= jiffies
+
1856 IPG_CONVERGE_JIFFIES
;
1857 lp
->ipg_data
.ipg
= DEFAULT_IPG
;
1858 lp
->ipg_data
.ipg_state
= CSTATE
;
1861 /* display driver and device information */
1862 chip_version
= (readl(lp
->mmio
+ CHIPID
) & 0xf0000000) >> 28;
1863 dev_info(&pdev
->dev
, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1864 chip_version
, dev
->dev_addr
);
1866 dev_info(&pdev
->dev
, "Found MII PHY ID 0x%08x at address 0x%02x\n",
1867 lp
->ext_phy_id
, lp
->ext_phy_addr
);
1869 dev_info(&pdev
->dev
, "Couldn't detect MII PHY, assuming address 0x01\n");
1877 pci_release_regions(pdev
);
1880 pci_disable_device(pdev
);
1885 static void amd8111e_remove_one(struct pci_dev
*pdev
)
1887 struct net_device
*dev
= pci_get_drvdata(pdev
);
1890 unregister_netdev(dev
);
1892 pci_release_regions(pdev
);
1893 pci_disable_device(pdev
);
1897 static const struct pci_device_id amd8111e_pci_tbl
[] = {
1899 .vendor
= PCI_VENDOR_ID_AMD
,
1900 .device
= PCI_DEVICE_ID_AMD8111E_7462
,
1906 MODULE_DEVICE_TABLE(pci
, amd8111e_pci_tbl
);
1908 static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops
, amd8111e_suspend
, amd8111e_resume
);
1910 static struct pci_driver amd8111e_driver
= {
1911 .name
= MODULE_NAME
,
1912 .id_table
= amd8111e_pci_tbl
,
1913 .probe
= amd8111e_probe_one
,
1914 .remove
= amd8111e_remove_one
,
1915 .driver
.pm
= &amd8111e_pm_ops
1918 module_pci_driver(amd8111e_driver
);