2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
53 #define assert(expr) \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit
= 32;
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
81 #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
82 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
84 #define R8169_REGS_SIZE 256
85 #define R8169_NAPI_WEIGHT 64
86 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
87 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
88 #define RX_BUF_SIZE 1536 /* Rx Buffer size */
89 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
90 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
92 #define RTL8169_TX_TIMEOUT (6*HZ)
93 #define RTL8169_PHY_TIMEOUT (10*HZ)
95 #define RTL_EEPROM_SIG cpu_to_le32(0x8129)
96 #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
97 #define RTL_EEPROM_SIG_ADDR 0x0000
99 /* write/read MMIO register */
100 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
101 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
102 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
103 #define RTL_R8(reg) readb (ioaddr + (reg))
104 #define RTL_R16(reg) readw (ioaddr + (reg))
105 #define RTL_R32(reg) readl (ioaddr + (reg))
108 RTL_GIGA_MAC_VER_01
= 0,
149 RTL_GIGA_MAC_NONE
= 0xff,
152 enum rtl_tx_desc_version
{
157 #define JUMBO_1K ETH_DATA_LEN
158 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
159 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
160 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
161 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
163 #define _R(NAME,TD,FW,SZ,B) { \
171 static const struct {
173 enum rtl_tx_desc_version txd_version
;
177 } rtl_chip_infos
[] = {
179 [RTL_GIGA_MAC_VER_01
] =
180 _R("RTL8169", RTL_TD_0
, NULL
, JUMBO_7K
, true),
181 [RTL_GIGA_MAC_VER_02
] =
182 _R("RTL8169s", RTL_TD_0
, NULL
, JUMBO_7K
, true),
183 [RTL_GIGA_MAC_VER_03
] =
184 _R("RTL8110s", RTL_TD_0
, NULL
, JUMBO_7K
, true),
185 [RTL_GIGA_MAC_VER_04
] =
186 _R("RTL8169sb/8110sb", RTL_TD_0
, NULL
, JUMBO_7K
, true),
187 [RTL_GIGA_MAC_VER_05
] =
188 _R("RTL8169sc/8110sc", RTL_TD_0
, NULL
, JUMBO_7K
, true),
189 [RTL_GIGA_MAC_VER_06
] =
190 _R("RTL8169sc/8110sc", RTL_TD_0
, NULL
, JUMBO_7K
, true),
192 [RTL_GIGA_MAC_VER_07
] =
193 _R("RTL8102e", RTL_TD_1
, NULL
, JUMBO_1K
, true),
194 [RTL_GIGA_MAC_VER_08
] =
195 _R("RTL8102e", RTL_TD_1
, NULL
, JUMBO_1K
, true),
196 [RTL_GIGA_MAC_VER_09
] =
197 _R("RTL8102e", RTL_TD_1
, NULL
, JUMBO_1K
, true),
198 [RTL_GIGA_MAC_VER_10
] =
199 _R("RTL8101e", RTL_TD_0
, NULL
, JUMBO_1K
, true),
200 [RTL_GIGA_MAC_VER_11
] =
201 _R("RTL8168b/8111b", RTL_TD_0
, NULL
, JUMBO_4K
, false),
202 [RTL_GIGA_MAC_VER_12
] =
203 _R("RTL8168b/8111b", RTL_TD_0
, NULL
, JUMBO_4K
, false),
204 [RTL_GIGA_MAC_VER_13
] =
205 _R("RTL8101e", RTL_TD_0
, NULL
, JUMBO_1K
, true),
206 [RTL_GIGA_MAC_VER_14
] =
207 _R("RTL8100e", RTL_TD_0
, NULL
, JUMBO_1K
, true),
208 [RTL_GIGA_MAC_VER_15
] =
209 _R("RTL8100e", RTL_TD_0
, NULL
, JUMBO_1K
, true),
210 [RTL_GIGA_MAC_VER_16
] =
211 _R("RTL8101e", RTL_TD_0
, NULL
, JUMBO_1K
, true),
212 [RTL_GIGA_MAC_VER_17
] =
213 _R("RTL8168b/8111b", RTL_TD_1
, NULL
, JUMBO_4K
, false),
214 [RTL_GIGA_MAC_VER_18
] =
215 _R("RTL8168cp/8111cp", RTL_TD_1
, NULL
, JUMBO_6K
, false),
216 [RTL_GIGA_MAC_VER_19
] =
217 _R("RTL8168c/8111c", RTL_TD_1
, NULL
, JUMBO_6K
, false),
218 [RTL_GIGA_MAC_VER_20
] =
219 _R("RTL8168c/8111c", RTL_TD_1
, NULL
, JUMBO_6K
, false),
220 [RTL_GIGA_MAC_VER_21
] =
221 _R("RTL8168c/8111c", RTL_TD_1
, NULL
, JUMBO_6K
, false),
222 [RTL_GIGA_MAC_VER_22
] =
223 _R("RTL8168c/8111c", RTL_TD_1
, NULL
, JUMBO_6K
, false),
224 [RTL_GIGA_MAC_VER_23
] =
225 _R("RTL8168cp/8111cp", RTL_TD_1
, NULL
, JUMBO_6K
, false),
226 [RTL_GIGA_MAC_VER_24
] =
227 _R("RTL8168cp/8111cp", RTL_TD_1
, NULL
, JUMBO_6K
, false),
228 [RTL_GIGA_MAC_VER_25
] =
229 _R("RTL8168d/8111d", RTL_TD_1
, FIRMWARE_8168D_1
,
231 [RTL_GIGA_MAC_VER_26
] =
232 _R("RTL8168d/8111d", RTL_TD_1
, FIRMWARE_8168D_2
,
234 [RTL_GIGA_MAC_VER_27
] =
235 _R("RTL8168dp/8111dp", RTL_TD_1
, NULL
, JUMBO_9K
, false),
236 [RTL_GIGA_MAC_VER_28
] =
237 _R("RTL8168dp/8111dp", RTL_TD_1
, NULL
, JUMBO_9K
, false),
238 [RTL_GIGA_MAC_VER_29
] =
239 _R("RTL8105e", RTL_TD_1
, FIRMWARE_8105E_1
,
241 [RTL_GIGA_MAC_VER_30
] =
242 _R("RTL8105e", RTL_TD_1
, FIRMWARE_8105E_1
,
244 [RTL_GIGA_MAC_VER_31
] =
245 _R("RTL8168dp/8111dp", RTL_TD_1
, NULL
, JUMBO_9K
, false),
246 [RTL_GIGA_MAC_VER_32
] =
247 _R("RTL8168e/8111e", RTL_TD_1
, FIRMWARE_8168E_1
,
249 [RTL_GIGA_MAC_VER_33
] =
250 _R("RTL8168e/8111e", RTL_TD_1
, FIRMWARE_8168E_2
,
252 [RTL_GIGA_MAC_VER_34
] =
253 _R("RTL8168evl/8111evl",RTL_TD_1
, FIRMWARE_8168E_3
,
255 [RTL_GIGA_MAC_VER_35
] =
256 _R("RTL8168f/8111f", RTL_TD_1
, FIRMWARE_8168F_1
,
258 [RTL_GIGA_MAC_VER_36
] =
259 _R("RTL8168f/8111f", RTL_TD_1
, FIRMWARE_8168F_2
,
261 [RTL_GIGA_MAC_VER_37
] =
262 _R("RTL8402", RTL_TD_1
, FIRMWARE_8402_1
,
264 [RTL_GIGA_MAC_VER_38
] =
265 _R("RTL8411", RTL_TD_1
, FIRMWARE_8411_1
,
267 [RTL_GIGA_MAC_VER_39
] =
268 _R("RTL8106e", RTL_TD_1
, FIRMWARE_8106E_1
,
270 [RTL_GIGA_MAC_VER_40
] =
271 _R("RTL8168g/8111g", RTL_TD_1
, FIRMWARE_8168G_1
,
273 [RTL_GIGA_MAC_VER_41
] =
274 _R("RTL8168g/8111g", RTL_TD_1
, NULL
, JUMBO_9K
, false),
284 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl
) = {
285 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0x8129), 0, 0, RTL_CFG_0
},
286 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0x8136), 0, 0, RTL_CFG_2
},
287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0x8167), 0, 0, RTL_CFG_0
},
288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0x8168), 0, 0, RTL_CFG_1
},
289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK
, 0x8169), 0, 0, RTL_CFG_0
},
290 { PCI_DEVICE(PCI_VENDOR_ID_DLINK
, 0x4300), 0, 0, RTL_CFG_0
},
291 { PCI_DEVICE(PCI_VENDOR_ID_DLINK
, 0x4302), 0, 0, RTL_CFG_0
},
292 { PCI_DEVICE(PCI_VENDOR_ID_AT
, 0xc107), 0, 0, RTL_CFG_0
},
293 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0
},
294 { PCI_VENDOR_ID_LINKSYS
, 0x1032,
295 PCI_ANY_ID
, 0x0024, 0, 0, RTL_CFG_0
},
297 PCI_ANY_ID
, 0x2410, 0, 0, RTL_CFG_2
},
301 MODULE_DEVICE_TABLE(pci
, rtl8169_pci_tbl
);
303 static int rx_buf_sz
= 16383;
310 MAC0
= 0, /* Ethernet hardware address. */
312 MAR0
= 8, /* Multicast filter. */
313 CounterAddrLow
= 0x10,
314 CounterAddrHigh
= 0x14,
315 TxDescStartAddrLow
= 0x20,
316 TxDescStartAddrHigh
= 0x24,
317 TxHDescStartAddrLow
= 0x28,
318 TxHDescStartAddrHigh
= 0x2c,
327 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
328 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
331 #define RX128_INT_EN (1 << 15) /* 8111c and later */
332 #define RX_MULTI_EN (1 << 14) /* 8111c only */
333 #define RXCFG_FIFO_SHIFT 13
334 /* No threshold before first PCI xfer */
335 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
336 #define RXCFG_DMA_SHIFT 8
337 /* Unlimited maximum PCI burst. */
338 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
345 #define PME_SIGNAL (1 << 5) /* 8168c and later */
356 RxDescAddrLow
= 0xe4,
357 RxDescAddrHigh
= 0xe8,
358 EarlyTxThres
= 0xec, /* 8169. Unit of 32 bytes. */
360 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
362 MaxTxPacketSize
= 0xec, /* 8101/8168. Unit of 128 bytes. */
364 #define TxPacketMax (8064 >> 7)
365 #define EarlySize 0x27
368 FuncEventMask
= 0xf4,
369 FuncPresetState
= 0xf8,
370 FuncForceEvent
= 0xfc,
373 enum rtl8110_registers
{
379 enum rtl8168_8101_registers
{
382 #define CSIAR_FLAG 0x80000000
383 #define CSIAR_WRITE_CMD 0x80000000
384 #define CSIAR_BYTE_ENABLE 0x0f
385 #define CSIAR_BYTE_ENABLE_SHIFT 12
386 #define CSIAR_ADDR_MASK 0x0fff
387 #define CSIAR_FUNC_CARD 0x00000000
388 #define CSIAR_FUNC_SDIO 0x00010000
389 #define CSIAR_FUNC_NIC 0x00020000
392 #define EPHYAR_FLAG 0x80000000
393 #define EPHYAR_WRITE_CMD 0x80000000
394 #define EPHYAR_REG_MASK 0x1f
395 #define EPHYAR_REG_SHIFT 16
396 #define EPHYAR_DATA_MASK 0xffff
398 #define PFM_EN (1 << 6)
400 #define FIX_NAK_1 (1 << 4)
401 #define FIX_NAK_2 (1 << 3)
404 #define NOW_IS_OOB (1 << 7)
405 #define TX_EMPTY (1 << 5)
406 #define RX_EMPTY (1 << 4)
407 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
408 #define EN_NDP (1 << 3)
409 #define EN_OOB_RESET (1 << 2)
410 #define LINK_LIST_RDY (1 << 1)
412 #define EFUSEAR_FLAG 0x80000000
413 #define EFUSEAR_WRITE_CMD 0x80000000
414 #define EFUSEAR_READ_CMD 0x00000000
415 #define EFUSEAR_REG_MASK 0x03ff
416 #define EFUSEAR_REG_SHIFT 8
417 #define EFUSEAR_DATA_MASK 0xff
420 enum rtl8168_registers
{
425 #define ERIAR_FLAG 0x80000000
426 #define ERIAR_WRITE_CMD 0x80000000
427 #define ERIAR_READ_CMD 0x00000000
428 #define ERIAR_ADDR_BYTE_ALIGN 4
429 #define ERIAR_TYPE_SHIFT 16
430 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
431 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
432 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
433 #define ERIAR_MASK_SHIFT 12
434 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
435 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
436 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
437 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
438 EPHY_RXER_NUM
= 0x7c,
439 OCPDR
= 0xb0, /* OCP GPHY access */
440 #define OCPDR_WRITE_CMD 0x80000000
441 #define OCPDR_READ_CMD 0x00000000
442 #define OCPDR_REG_MASK 0x7f
443 #define OCPDR_GPHY_REG_SHIFT 16
444 #define OCPDR_DATA_MASK 0xffff
446 #define OCPAR_FLAG 0x80000000
447 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
448 #define OCPAR_GPHY_READ_CMD 0x0000f060
450 RDSAR1
= 0xd0, /* 8168c only. Undocumented on 8168dp */
451 MISC
= 0xf0, /* 8168e only. */
452 #define TXPLA_RST (1 << 29)
453 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
454 #define PWM_EN (1 << 22)
455 #define RXDV_GATED_EN (1 << 19)
456 #define EARLY_TALLY_EN (1 << 16)
459 enum rtl_register_content
{
460 /* InterruptStatusBits */
464 TxDescUnavail
= 0x0080,
488 /* TXPoll register p.5 */
489 HPQ
= 0x80, /* Poll cmd on the high prio queue */
490 NPQ
= 0x40, /* Poll cmd on the low prio queue */
491 FSWInt
= 0x01, /* Forced software interrupt */
495 Cfg9346_Unlock
= 0xc0,
500 AcceptBroadcast
= 0x08,
501 AcceptMulticast
= 0x04,
503 AcceptAllPhys
= 0x01,
504 #define RX_CONFIG_ACCEPT_MASK 0x3f
507 TxInterFrameGapShift
= 24,
508 TxDMAShift
= 8, /* DMA burst value (0-7) is shift this many bits */
510 /* Config1 register p.24 */
513 Speed_down
= (1 << 4),
517 PMEnable
= (1 << 0), /* Power Management Enable */
519 /* Config2 register p. 25 */
520 MSIEnable
= (1 << 5), /* 8169 only. Reserved in the 8168. */
521 PCI_Clock_66MHz
= 0x01,
522 PCI_Clock_33MHz
= 0x00,
524 /* Config3 register p.25 */
525 MagicPacket
= (1 << 5), /* Wake up when receives a Magic Packet */
526 LinkUp
= (1 << 4), /* Wake up when the cable connection is re-established */
527 Jumbo_En0
= (1 << 2), /* 8168 only. Reserved in the 8168b */
528 Beacon_en
= (1 << 0), /* 8168 only. Reserved in the 8168b */
530 /* Config4 register */
531 Jumbo_En1
= (1 << 1), /* 8168 only. Reserved in the 8168b */
533 /* Config5 register p.27 */
534 BWF
= (1 << 6), /* Accept Broadcast wakeup frame */
535 MWF
= (1 << 5), /* Accept Multicast wakeup frame */
536 UWF
= (1 << 4), /* Accept Unicast wakeup frame */
538 LanWake
= (1 << 1), /* LanWake enable/disable */
539 PMEStatus
= (1 << 0), /* PME status can be reset by PCI RST# */
542 TBIReset
= 0x80000000,
543 TBILoopback
= 0x40000000,
544 TBINwEnable
= 0x20000000,
545 TBINwRestart
= 0x10000000,
546 TBILinkOk
= 0x02000000,
547 TBINwComplete
= 0x01000000,
550 EnableBist
= (1 << 15), // 8168 8101
551 Mac_dbgo_oe
= (1 << 14), // 8168 8101
552 Normal_mode
= (1 << 13), // unused
553 Force_half_dup
= (1 << 12), // 8168 8101
554 Force_rxflow_en
= (1 << 11), // 8168 8101
555 Force_txflow_en
= (1 << 10), // 8168 8101
556 Cxpl_dbg_sel
= (1 << 9), // 8168 8101
557 ASF
= (1 << 8), // 8168 8101
558 PktCntrDisable
= (1 << 7), // 8168 8101
559 Mac_dbgo_sel
= 0x001c, // 8168
564 INTT_0
= 0x0000, // 8168
565 INTT_1
= 0x0001, // 8168
566 INTT_2
= 0x0002, // 8168
567 INTT_3
= 0x0003, // 8168
569 /* rtl8169_PHYstatus */
580 TBILinkOK
= 0x02000000,
582 /* DumpCounterCommand */
587 /* First doubleword. */
588 DescOwn
= (1 << 31), /* Descriptor is owned by NIC */
589 RingEnd
= (1 << 30), /* End of descriptor ring */
590 FirstFrag
= (1 << 29), /* First segment of a packet */
591 LastFrag
= (1 << 28), /* Final segment of a packet */
595 enum rtl_tx_desc_bit
{
596 /* First doubleword. */
597 TD_LSO
= (1 << 27), /* Large Send Offload */
598 #define TD_MSS_MAX 0x07ffu /* MSS value */
600 /* Second doubleword. */
601 TxVlanTag
= (1 << 17), /* Add VLAN tag */
604 /* 8169, 8168b and 810x except 8102e. */
605 enum rtl_tx_desc_bit_0
{
606 /* First doubleword. */
607 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
608 TD0_TCP_CS
= (1 << 16), /* Calculate TCP/IP checksum */
609 TD0_UDP_CS
= (1 << 17), /* Calculate UDP/IP checksum */
610 TD0_IP_CS
= (1 << 18), /* Calculate IP checksum */
613 /* 8102e, 8168c and beyond. */
614 enum rtl_tx_desc_bit_1
{
615 /* Second doubleword. */
616 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
617 TD1_IP_CS
= (1 << 29), /* Calculate IP checksum */
618 TD1_TCP_CS
= (1 << 30), /* Calculate TCP/IP checksum */
619 TD1_UDP_CS
= (1 << 31), /* Calculate UDP/IP checksum */
622 static const struct rtl_tx_desc_info
{
629 } tx_desc_info
[] = {
632 .udp
= TD0_IP_CS
| TD0_UDP_CS
,
633 .tcp
= TD0_IP_CS
| TD0_TCP_CS
635 .mss_shift
= TD0_MSS_SHIFT
,
640 .udp
= TD1_IP_CS
| TD1_UDP_CS
,
641 .tcp
= TD1_IP_CS
| TD1_TCP_CS
643 .mss_shift
= TD1_MSS_SHIFT
,
648 enum rtl_rx_desc_bit
{
650 PID1
= (1 << 18), /* Protocol ID bit 1/2 */
651 PID0
= (1 << 17), /* Protocol ID bit 2/2 */
653 #define RxProtoUDP (PID1)
654 #define RxProtoTCP (PID0)
655 #define RxProtoIP (PID1 | PID0)
656 #define RxProtoMask RxProtoIP
658 IPFail
= (1 << 16), /* IP checksum failed */
659 UDPFail
= (1 << 15), /* UDP/IP checksum failed */
660 TCPFail
= (1 << 14), /* TCP/IP checksum failed */
661 RxVlanTag
= (1 << 16), /* VLAN tag available */
664 #define RsvdMask 0x3fffc000
681 u8 __pad
[sizeof(void *) - sizeof(u32
)];
685 RTL_FEATURE_WOL
= (1 << 0),
686 RTL_FEATURE_MSI
= (1 << 1),
687 RTL_FEATURE_GMII
= (1 << 2),
690 struct rtl8169_counters
{
697 __le32 tx_one_collision
;
698 __le32 tx_multi_collision
;
707 RTL_FLAG_TASK_ENABLED
,
708 RTL_FLAG_TASK_SLOW_PENDING
,
709 RTL_FLAG_TASK_RESET_PENDING
,
710 RTL_FLAG_TASK_PHY_PENDING
,
714 struct rtl8169_stats
{
717 struct u64_stats_sync syncp
;
720 struct rtl8169_private
{
721 void __iomem
*mmio_addr
; /* memory map physical address */
722 struct pci_dev
*pci_dev
;
723 struct net_device
*dev
;
724 struct napi_struct napi
;
728 u32 cur_rx
; /* Index into the Rx descriptor buffer of next Rx pkt. */
729 u32 cur_tx
; /* Index into the Tx descriptor buffer of next Rx pkt. */
732 struct rtl8169_stats rx_stats
;
733 struct rtl8169_stats tx_stats
;
734 struct TxDesc
*TxDescArray
; /* 256-aligned Tx descriptor ring */
735 struct RxDesc
*RxDescArray
; /* 256-aligned Rx descriptor ring */
736 dma_addr_t TxPhyAddr
;
737 dma_addr_t RxPhyAddr
;
738 void *Rx_databuff
[NUM_RX_DESC
]; /* Rx data buffers */
739 struct ring_info tx_skb
[NUM_TX_DESC
]; /* Tx data buffers */
740 struct timer_list timer
;
746 void (*write
)(struct rtl8169_private
*, int, int);
747 int (*read
)(struct rtl8169_private
*, int);
750 struct pll_power_ops
{
751 void (*down
)(struct rtl8169_private
*);
752 void (*up
)(struct rtl8169_private
*);
756 void (*enable
)(struct rtl8169_private
*);
757 void (*disable
)(struct rtl8169_private
*);
761 void (*write
)(struct rtl8169_private
*, int, int);
762 u32 (*read
)(struct rtl8169_private
*, int);
765 int (*set_speed
)(struct net_device
*, u8 aneg
, u16 sp
, u8 dpx
, u32 adv
);
766 int (*get_settings
)(struct net_device
*, struct ethtool_cmd
*);
767 void (*phy_reset_enable
)(struct rtl8169_private
*tp
);
768 void (*hw_start
)(struct net_device
*);
769 unsigned int (*phy_reset_pending
)(struct rtl8169_private
*tp
);
770 unsigned int (*link_ok
)(void __iomem
*);
771 int (*do_ioctl
)(struct rtl8169_private
*tp
, struct mii_ioctl_data
*data
, int cmd
);
774 DECLARE_BITMAP(flags
, RTL_FLAG_MAX
);
776 struct work_struct work
;
781 struct mii_if_info mii
;
782 struct rtl8169_counters counters
;
787 const struct firmware
*fw
;
789 #define RTL_VER_SIZE 32
791 char version
[RTL_VER_SIZE
];
793 struct rtl_fw_phy_action
{
798 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
803 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
804 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
805 module_param(use_dac
, int, 0);
806 MODULE_PARM_DESC(use_dac
, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
807 module_param_named(debug
, debug
.msg_enable
, int, 0);
808 MODULE_PARM_DESC(debug
, "Debug verbosity level (0=none, ..., 16=all)");
809 MODULE_LICENSE("GPL");
810 MODULE_VERSION(RTL8169_VERSION
);
811 MODULE_FIRMWARE(FIRMWARE_8168D_1
);
812 MODULE_FIRMWARE(FIRMWARE_8168D_2
);
813 MODULE_FIRMWARE(FIRMWARE_8168E_1
);
814 MODULE_FIRMWARE(FIRMWARE_8168E_2
);
815 MODULE_FIRMWARE(FIRMWARE_8168E_3
);
816 MODULE_FIRMWARE(FIRMWARE_8105E_1
);
817 MODULE_FIRMWARE(FIRMWARE_8168F_1
);
818 MODULE_FIRMWARE(FIRMWARE_8168F_2
);
819 MODULE_FIRMWARE(FIRMWARE_8402_1
);
820 MODULE_FIRMWARE(FIRMWARE_8411_1
);
821 MODULE_FIRMWARE(FIRMWARE_8106E_1
);
822 MODULE_FIRMWARE(FIRMWARE_8168G_1
);
824 static void rtl_lock_work(struct rtl8169_private
*tp
)
826 mutex_lock(&tp
->wk
.mutex
);
829 static void rtl_unlock_work(struct rtl8169_private
*tp
)
831 mutex_unlock(&tp
->wk
.mutex
);
834 static void rtl_tx_performance_tweak(struct pci_dev
*pdev
, u16 force
)
836 int cap
= pci_pcie_cap(pdev
);
841 pci_read_config_word(pdev
, cap
+ PCI_EXP_DEVCTL
, &ctl
);
842 ctl
= (ctl
& ~PCI_EXP_DEVCTL_READRQ
) | force
;
843 pci_write_config_word(pdev
, cap
+ PCI_EXP_DEVCTL
, ctl
);
848 bool (*check
)(struct rtl8169_private
*);
852 static void rtl_udelay(unsigned int d
)
857 static bool rtl_loop_wait(struct rtl8169_private
*tp
, const struct rtl_cond
*c
,
858 void (*delay
)(unsigned int), unsigned int d
, int n
,
863 for (i
= 0; i
< n
; i
++) {
865 if (c
->check(tp
) == high
)
868 netif_err(tp
, drv
, tp
->dev
, "%s == %d (loop: %d, delay: %d).\n",
869 c
->msg
, !high
, n
, d
);
873 static bool rtl_udelay_loop_wait_high(struct rtl8169_private
*tp
,
874 const struct rtl_cond
*c
,
875 unsigned int d
, int n
)
877 return rtl_loop_wait(tp
, c
, rtl_udelay
, d
, n
, true);
880 static bool rtl_udelay_loop_wait_low(struct rtl8169_private
*tp
,
881 const struct rtl_cond
*c
,
882 unsigned int d
, int n
)
884 return rtl_loop_wait(tp
, c
, rtl_udelay
, d
, n
, false);
887 static bool rtl_msleep_loop_wait_high(struct rtl8169_private
*tp
,
888 const struct rtl_cond
*c
,
889 unsigned int d
, int n
)
891 return rtl_loop_wait(tp
, c
, msleep
, d
, n
, true);
894 static bool rtl_msleep_loop_wait_low(struct rtl8169_private
*tp
,
895 const struct rtl_cond
*c
,
896 unsigned int d
, int n
)
898 return rtl_loop_wait(tp
, c
, msleep
, d
, n
, false);
901 #define DECLARE_RTL_COND(name) \
902 static bool name ## _check(struct rtl8169_private *); \
904 static const struct rtl_cond name = { \
905 .check = name ## _check, \
909 static bool name ## _check(struct rtl8169_private *tp)
911 DECLARE_RTL_COND(rtl_ocpar_cond
)
913 void __iomem
*ioaddr
= tp
->mmio_addr
;
915 return RTL_R32(OCPAR
) & OCPAR_FLAG
;
918 static u32
ocp_read(struct rtl8169_private
*tp
, u8 mask
, u16 reg
)
920 void __iomem
*ioaddr
= tp
->mmio_addr
;
922 RTL_W32(OCPAR
, ((u32
)mask
& 0x0f) << 12 | (reg
& 0x0fff));
924 return rtl_udelay_loop_wait_high(tp
, &rtl_ocpar_cond
, 100, 20) ?
928 static void ocp_write(struct rtl8169_private
*tp
, u8 mask
, u16 reg
, u32 data
)
930 void __iomem
*ioaddr
= tp
->mmio_addr
;
932 RTL_W32(OCPDR
, data
);
933 RTL_W32(OCPAR
, OCPAR_FLAG
| ((u32
)mask
& 0x0f) << 12 | (reg
& 0x0fff));
935 rtl_udelay_loop_wait_low(tp
, &rtl_ocpar_cond
, 100, 20);
938 DECLARE_RTL_COND(rtl_eriar_cond
)
940 void __iomem
*ioaddr
= tp
->mmio_addr
;
942 return RTL_R32(ERIAR
) & ERIAR_FLAG
;
945 static void rtl8168_oob_notify(struct rtl8169_private
*tp
, u8 cmd
)
947 void __iomem
*ioaddr
= tp
->mmio_addr
;
950 RTL_W32(ERIAR
, 0x800010e8);
953 if (!rtl_udelay_loop_wait_low(tp
, &rtl_eriar_cond
, 100, 5))
956 ocp_write(tp
, 0x1, 0x30, 0x00000001);
959 #define OOB_CMD_RESET 0x00
960 #define OOB_CMD_DRIVER_START 0x05
961 #define OOB_CMD_DRIVER_STOP 0x06
963 static u16
rtl8168_get_ocp_reg(struct rtl8169_private
*tp
)
965 return (tp
->mac_version
== RTL_GIGA_MAC_VER_31
) ? 0xb8 : 0x10;
968 DECLARE_RTL_COND(rtl_ocp_read_cond
)
972 reg
= rtl8168_get_ocp_reg(tp
);
974 return ocp_read(tp
, 0x0f, reg
) & 0x00000800;
977 static void rtl8168_driver_start(struct rtl8169_private
*tp
)
979 rtl8168_oob_notify(tp
, OOB_CMD_DRIVER_START
);
981 rtl_msleep_loop_wait_high(tp
, &rtl_ocp_read_cond
, 10, 10);
984 static void rtl8168_driver_stop(struct rtl8169_private
*tp
)
986 rtl8168_oob_notify(tp
, OOB_CMD_DRIVER_STOP
);
988 rtl_msleep_loop_wait_low(tp
, &rtl_ocp_read_cond
, 10, 10);
991 static int r8168dp_check_dash(struct rtl8169_private
*tp
)
993 u16 reg
= rtl8168_get_ocp_reg(tp
);
995 return (ocp_read(tp
, 0x0f, reg
) & 0x00008000) ? 1 : 0;
998 static bool rtl_ocp_reg_failure(struct rtl8169_private
*tp
, u32 reg
)
1000 if (reg
& 0xffff0001) {
1001 netif_err(tp
, drv
, tp
->dev
, "Invalid ocp reg %x!\n", reg
);
1007 DECLARE_RTL_COND(rtl_ocp_gphy_cond
)
1009 void __iomem
*ioaddr
= tp
->mmio_addr
;
1011 return RTL_R32(GPHY_OCP
) & OCPAR_FLAG
;
1014 static void r8168_phy_ocp_write(struct rtl8169_private
*tp
, u32 reg
, u32 data
)
1016 void __iomem
*ioaddr
= tp
->mmio_addr
;
1018 if (rtl_ocp_reg_failure(tp
, reg
))
1021 RTL_W32(GPHY_OCP
, OCPAR_FLAG
| (reg
<< 15) | data
);
1023 rtl_udelay_loop_wait_low(tp
, &rtl_ocp_gphy_cond
, 25, 10);
1026 static u16
r8168_phy_ocp_read(struct rtl8169_private
*tp
, u32 reg
)
1028 void __iomem
*ioaddr
= tp
->mmio_addr
;
1030 if (rtl_ocp_reg_failure(tp
, reg
))
1033 RTL_W32(GPHY_OCP
, reg
<< 15);
1035 return rtl_udelay_loop_wait_high(tp
, &rtl_ocp_gphy_cond
, 25, 10) ?
1036 (RTL_R32(GPHY_OCP
) & 0xffff) : ~0;
1039 static void rtl_w1w0_phy_ocp(struct rtl8169_private
*tp
, int reg
, int p
, int m
)
1043 val
= r8168_phy_ocp_read(tp
, reg
);
1044 r8168_phy_ocp_write(tp
, reg
, (val
| p
) & ~m
);
1047 static void r8168_mac_ocp_write(struct rtl8169_private
*tp
, u32 reg
, u32 data
)
1049 void __iomem
*ioaddr
= tp
->mmio_addr
;
1051 if (rtl_ocp_reg_failure(tp
, reg
))
1054 RTL_W32(OCPDR
, OCPAR_FLAG
| (reg
<< 15) | data
);
1057 static u16
r8168_mac_ocp_read(struct rtl8169_private
*tp
, u32 reg
)
1059 void __iomem
*ioaddr
= tp
->mmio_addr
;
1061 if (rtl_ocp_reg_failure(tp
, reg
))
1064 RTL_W32(OCPDR
, reg
<< 15);
1066 return RTL_R32(OCPDR
);
1069 #define OCP_STD_PHY_BASE 0xa400
1071 static void r8168g_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
1074 tp
->ocp_base
= value
? value
<< 4 : OCP_STD_PHY_BASE
;
1078 if (tp
->ocp_base
!= OCP_STD_PHY_BASE
)
1081 r8168_phy_ocp_write(tp
, tp
->ocp_base
+ reg
* 2, value
);
1084 static int r8168g_mdio_read(struct rtl8169_private
*tp
, int reg
)
1086 if (tp
->ocp_base
!= OCP_STD_PHY_BASE
)
1089 return r8168_phy_ocp_read(tp
, tp
->ocp_base
+ reg
* 2);
1092 DECLARE_RTL_COND(rtl_phyar_cond
)
1094 void __iomem
*ioaddr
= tp
->mmio_addr
;
1096 return RTL_R32(PHYAR
) & 0x80000000;
1099 static void r8169_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
1101 void __iomem
*ioaddr
= tp
->mmio_addr
;
1103 RTL_W32(PHYAR
, 0x80000000 | (reg
& 0x1f) << 16 | (value
& 0xffff));
1105 rtl_udelay_loop_wait_low(tp
, &rtl_phyar_cond
, 25, 20);
1107 * According to hardware specs a 20us delay is required after write
1108 * complete indication, but before sending next command.
1113 static int r8169_mdio_read(struct rtl8169_private
*tp
, int reg
)
1115 void __iomem
*ioaddr
= tp
->mmio_addr
;
1118 RTL_W32(PHYAR
, 0x0 | (reg
& 0x1f) << 16);
1120 value
= rtl_udelay_loop_wait_high(tp
, &rtl_phyar_cond
, 25, 20) ?
1121 RTL_R32(PHYAR
) & 0xffff : ~0;
1124 * According to hardware specs a 20us delay is required after read
1125 * complete indication, but before sending next command.
1132 static void r8168dp_1_mdio_access(struct rtl8169_private
*tp
, int reg
, u32 data
)
1134 void __iomem
*ioaddr
= tp
->mmio_addr
;
1136 RTL_W32(OCPDR
, data
| ((reg
& OCPDR_REG_MASK
) << OCPDR_GPHY_REG_SHIFT
));
1137 RTL_W32(OCPAR
, OCPAR_GPHY_WRITE_CMD
);
1138 RTL_W32(EPHY_RXER_NUM
, 0);
1140 rtl_udelay_loop_wait_low(tp
, &rtl_ocpar_cond
, 1000, 100);
1143 static void r8168dp_1_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
1145 r8168dp_1_mdio_access(tp
, reg
,
1146 OCPDR_WRITE_CMD
| (value
& OCPDR_DATA_MASK
));
1149 static int r8168dp_1_mdio_read(struct rtl8169_private
*tp
, int reg
)
1151 void __iomem
*ioaddr
= tp
->mmio_addr
;
1153 r8168dp_1_mdio_access(tp
, reg
, OCPDR_READ_CMD
);
1156 RTL_W32(OCPAR
, OCPAR_GPHY_READ_CMD
);
1157 RTL_W32(EPHY_RXER_NUM
, 0);
1159 return rtl_udelay_loop_wait_high(tp
, &rtl_ocpar_cond
, 1000, 100) ?
1160 RTL_R32(OCPDR
) & OCPDR_DATA_MASK
: ~0;
1163 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1165 static void r8168dp_2_mdio_start(void __iomem
*ioaddr
)
1167 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT
);
1170 static void r8168dp_2_mdio_stop(void __iomem
*ioaddr
)
1172 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT
);
1175 static void r8168dp_2_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
1177 void __iomem
*ioaddr
= tp
->mmio_addr
;
1179 r8168dp_2_mdio_start(ioaddr
);
1181 r8169_mdio_write(tp
, reg
, value
);
1183 r8168dp_2_mdio_stop(ioaddr
);
1186 static int r8168dp_2_mdio_read(struct rtl8169_private
*tp
, int reg
)
1188 void __iomem
*ioaddr
= tp
->mmio_addr
;
1191 r8168dp_2_mdio_start(ioaddr
);
1193 value
= r8169_mdio_read(tp
, reg
);
1195 r8168dp_2_mdio_stop(ioaddr
);
1200 static void rtl_writephy(struct rtl8169_private
*tp
, int location
, u32 val
)
1202 tp
->mdio_ops
.write(tp
, location
, val
);
1205 static int rtl_readphy(struct rtl8169_private
*tp
, int location
)
1207 return tp
->mdio_ops
.read(tp
, location
);
1210 static void rtl_patchphy(struct rtl8169_private
*tp
, int reg_addr
, int value
)
1212 rtl_writephy(tp
, reg_addr
, rtl_readphy(tp
, reg_addr
) | value
);
1215 static void rtl_w1w0_phy(struct rtl8169_private
*tp
, int reg_addr
, int p
, int m
)
1219 val
= rtl_readphy(tp
, reg_addr
);
1220 rtl_writephy(tp
, reg_addr
, (val
| p
) & ~m
);
1223 static void rtl_mdio_write(struct net_device
*dev
, int phy_id
, int location
,
1226 struct rtl8169_private
*tp
= netdev_priv(dev
);
1228 rtl_writephy(tp
, location
, val
);
1231 static int rtl_mdio_read(struct net_device
*dev
, int phy_id
, int location
)
1233 struct rtl8169_private
*tp
= netdev_priv(dev
);
1235 return rtl_readphy(tp
, location
);
1238 DECLARE_RTL_COND(rtl_ephyar_cond
)
1240 void __iomem
*ioaddr
= tp
->mmio_addr
;
1242 return RTL_R32(EPHYAR
) & EPHYAR_FLAG
;
1245 static void rtl_ephy_write(struct rtl8169_private
*tp
, int reg_addr
, int value
)
1247 void __iomem
*ioaddr
= tp
->mmio_addr
;
1249 RTL_W32(EPHYAR
, EPHYAR_WRITE_CMD
| (value
& EPHYAR_DATA_MASK
) |
1250 (reg_addr
& EPHYAR_REG_MASK
) << EPHYAR_REG_SHIFT
);
1252 rtl_udelay_loop_wait_low(tp
, &rtl_ephyar_cond
, 10, 100);
1257 static u16
rtl_ephy_read(struct rtl8169_private
*tp
, int reg_addr
)
1259 void __iomem
*ioaddr
= tp
->mmio_addr
;
1261 RTL_W32(EPHYAR
, (reg_addr
& EPHYAR_REG_MASK
) << EPHYAR_REG_SHIFT
);
1263 return rtl_udelay_loop_wait_high(tp
, &rtl_ephyar_cond
, 10, 100) ?
1264 RTL_R32(EPHYAR
) & EPHYAR_DATA_MASK
: ~0;
1267 static void rtl_eri_write(struct rtl8169_private
*tp
, int addr
, u32 mask
,
1270 void __iomem
*ioaddr
= tp
->mmio_addr
;
1272 BUG_ON((addr
& 3) || (mask
== 0));
1273 RTL_W32(ERIDR
, val
);
1274 RTL_W32(ERIAR
, ERIAR_WRITE_CMD
| type
| mask
| addr
);
1276 rtl_udelay_loop_wait_low(tp
, &rtl_eriar_cond
, 100, 100);
1279 static u32
rtl_eri_read(struct rtl8169_private
*tp
, int addr
, int type
)
1281 void __iomem
*ioaddr
= tp
->mmio_addr
;
1283 RTL_W32(ERIAR
, ERIAR_READ_CMD
| type
| ERIAR_MASK_1111
| addr
);
1285 return rtl_udelay_loop_wait_high(tp
, &rtl_eriar_cond
, 100, 100) ?
1286 RTL_R32(ERIDR
) : ~0;
1289 static void rtl_w1w0_eri(struct rtl8169_private
*tp
, int addr
, u32 mask
, u32 p
,
1294 val
= rtl_eri_read(tp
, addr
, type
);
1295 rtl_eri_write(tp
, addr
, mask
, (val
& ~m
) | p
, type
);
1304 static void rtl_write_exgmac_batch(struct rtl8169_private
*tp
,
1305 const struct exgmac_reg
*r
, int len
)
1308 rtl_eri_write(tp
, r
->addr
, r
->mask
, r
->val
, ERIAR_EXGMAC
);
1313 DECLARE_RTL_COND(rtl_efusear_cond
)
1315 void __iomem
*ioaddr
= tp
->mmio_addr
;
1317 return RTL_R32(EFUSEAR
) & EFUSEAR_FLAG
;
1320 static u8
rtl8168d_efuse_read(struct rtl8169_private
*tp
, int reg_addr
)
1322 void __iomem
*ioaddr
= tp
->mmio_addr
;
1324 RTL_W32(EFUSEAR
, (reg_addr
& EFUSEAR_REG_MASK
) << EFUSEAR_REG_SHIFT
);
1326 return rtl_udelay_loop_wait_high(tp
, &rtl_efusear_cond
, 100, 300) ?
1327 RTL_R32(EFUSEAR
) & EFUSEAR_DATA_MASK
: ~0;
1330 static u16
rtl_get_events(struct rtl8169_private
*tp
)
1332 void __iomem
*ioaddr
= tp
->mmio_addr
;
1334 return RTL_R16(IntrStatus
);
1337 static void rtl_ack_events(struct rtl8169_private
*tp
, u16 bits
)
1339 void __iomem
*ioaddr
= tp
->mmio_addr
;
1341 RTL_W16(IntrStatus
, bits
);
1345 static void rtl_irq_disable(struct rtl8169_private
*tp
)
1347 void __iomem
*ioaddr
= tp
->mmio_addr
;
1349 RTL_W16(IntrMask
, 0);
1353 static void rtl_irq_enable(struct rtl8169_private
*tp
, u16 bits
)
1355 void __iomem
*ioaddr
= tp
->mmio_addr
;
1357 RTL_W16(IntrMask
, bits
);
1360 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1361 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1362 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1364 static void rtl_irq_enable_all(struct rtl8169_private
*tp
)
1366 rtl_irq_enable(tp
, RTL_EVENT_NAPI
| tp
->event_slow
);
1369 static void rtl8169_irq_mask_and_ack(struct rtl8169_private
*tp
)
1371 void __iomem
*ioaddr
= tp
->mmio_addr
;
1373 rtl_irq_disable(tp
);
1374 rtl_ack_events(tp
, RTL_EVENT_NAPI
| tp
->event_slow
);
1378 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private
*tp
)
1380 void __iomem
*ioaddr
= tp
->mmio_addr
;
1382 return RTL_R32(TBICSR
) & TBIReset
;
1385 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private
*tp
)
1387 return rtl_readphy(tp
, MII_BMCR
) & BMCR_RESET
;
1390 static unsigned int rtl8169_tbi_link_ok(void __iomem
*ioaddr
)
1392 return RTL_R32(TBICSR
) & TBILinkOk
;
1395 static unsigned int rtl8169_xmii_link_ok(void __iomem
*ioaddr
)
1397 return RTL_R8(PHYstatus
) & LinkStatus
;
1400 static void rtl8169_tbi_reset_enable(struct rtl8169_private
*tp
)
1402 void __iomem
*ioaddr
= tp
->mmio_addr
;
1404 RTL_W32(TBICSR
, RTL_R32(TBICSR
) | TBIReset
);
1407 static void rtl8169_xmii_reset_enable(struct rtl8169_private
*tp
)
1411 val
= rtl_readphy(tp
, MII_BMCR
) | BMCR_RESET
;
1412 rtl_writephy(tp
, MII_BMCR
, val
& 0xffff);
1415 static void rtl_link_chg_patch(struct rtl8169_private
*tp
)
1417 void __iomem
*ioaddr
= tp
->mmio_addr
;
1418 struct net_device
*dev
= tp
->dev
;
1420 if (!netif_running(dev
))
1423 if (tp
->mac_version
== RTL_GIGA_MAC_VER_34
||
1424 tp
->mac_version
== RTL_GIGA_MAC_VER_38
) {
1425 if (RTL_R8(PHYstatus
) & _1000bpsF
) {
1426 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x00000011,
1428 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005,
1430 } else if (RTL_R8(PHYstatus
) & _100bps
) {
1431 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f,
1433 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005,
1436 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f,
1438 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x0000003f,
1441 /* Reset packet filter */
1442 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x00, 0x01,
1444 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x01, 0x00,
1446 } else if (tp
->mac_version
== RTL_GIGA_MAC_VER_35
||
1447 tp
->mac_version
== RTL_GIGA_MAC_VER_36
) {
1448 if (RTL_R8(PHYstatus
) & _1000bpsF
) {
1449 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x00000011,
1451 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005,
1454 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f,
1456 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x0000003f,
1459 } else if (tp
->mac_version
== RTL_GIGA_MAC_VER_37
) {
1460 if (RTL_R8(PHYstatus
) & _10bps
) {
1461 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x4d02,
1463 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_0011
, 0x0060,
1466 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x0000,
1472 static void __rtl8169_check_link_status(struct net_device
*dev
,
1473 struct rtl8169_private
*tp
,
1474 void __iomem
*ioaddr
, bool pm
)
1476 if (tp
->link_ok(ioaddr
)) {
1477 rtl_link_chg_patch(tp
);
1478 /* This is to cancel a scheduled suspend if there's one. */
1480 pm_request_resume(&tp
->pci_dev
->dev
);
1481 netif_carrier_on(dev
);
1482 if (net_ratelimit())
1483 netif_info(tp
, ifup
, dev
, "link up\n");
1485 netif_carrier_off(dev
);
1486 netif_info(tp
, ifdown
, dev
, "link down\n");
1488 pm_schedule_suspend(&tp
->pci_dev
->dev
, 5000);
1492 static void rtl8169_check_link_status(struct net_device
*dev
,
1493 struct rtl8169_private
*tp
,
1494 void __iomem
*ioaddr
)
1496 __rtl8169_check_link_status(dev
, tp
, ioaddr
, false);
1499 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1501 static u32
__rtl8169_get_wol(struct rtl8169_private
*tp
)
1503 void __iomem
*ioaddr
= tp
->mmio_addr
;
1507 options
= RTL_R8(Config1
);
1508 if (!(options
& PMEnable
))
1511 options
= RTL_R8(Config3
);
1512 if (options
& LinkUp
)
1513 wolopts
|= WAKE_PHY
;
1514 if (options
& MagicPacket
)
1515 wolopts
|= WAKE_MAGIC
;
1517 options
= RTL_R8(Config5
);
1519 wolopts
|= WAKE_UCAST
;
1521 wolopts
|= WAKE_BCAST
;
1523 wolopts
|= WAKE_MCAST
;
1528 static void rtl8169_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1530 struct rtl8169_private
*tp
= netdev_priv(dev
);
1534 wol
->supported
= WAKE_ANY
;
1535 wol
->wolopts
= __rtl8169_get_wol(tp
);
1537 rtl_unlock_work(tp
);
1540 static void __rtl8169_set_wol(struct rtl8169_private
*tp
, u32 wolopts
)
1542 void __iomem
*ioaddr
= tp
->mmio_addr
;
1544 static const struct {
1549 { WAKE_PHY
, Config3
, LinkUp
},
1550 { WAKE_MAGIC
, Config3
, MagicPacket
},
1551 { WAKE_UCAST
, Config5
, UWF
},
1552 { WAKE_BCAST
, Config5
, BWF
},
1553 { WAKE_MCAST
, Config5
, MWF
},
1554 { WAKE_ANY
, Config5
, LanWake
}
1558 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
1560 for (i
= 0; i
< ARRAY_SIZE(cfg
); i
++) {
1561 options
= RTL_R8(cfg
[i
].reg
) & ~cfg
[i
].mask
;
1562 if (wolopts
& cfg
[i
].opt
)
1563 options
|= cfg
[i
].mask
;
1564 RTL_W8(cfg
[i
].reg
, options
);
1567 switch (tp
->mac_version
) {
1568 case RTL_GIGA_MAC_VER_01
... RTL_GIGA_MAC_VER_17
:
1569 options
= RTL_R8(Config1
) & ~PMEnable
;
1571 options
|= PMEnable
;
1572 RTL_W8(Config1
, options
);
1575 options
= RTL_R8(Config2
) & ~PME_SIGNAL
;
1577 options
|= PME_SIGNAL
;
1578 RTL_W8(Config2
, options
);
1582 RTL_W8(Cfg9346
, Cfg9346_Lock
);
1585 static int rtl8169_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1587 struct rtl8169_private
*tp
= netdev_priv(dev
);
1592 tp
->features
|= RTL_FEATURE_WOL
;
1594 tp
->features
&= ~RTL_FEATURE_WOL
;
1595 __rtl8169_set_wol(tp
, wol
->wolopts
);
1597 rtl_unlock_work(tp
);
1599 device_set_wakeup_enable(&tp
->pci_dev
->dev
, wol
->wolopts
);
1604 static const char *rtl_lookup_firmware_name(struct rtl8169_private
*tp
)
1606 return rtl_chip_infos
[tp
->mac_version
].fw_name
;
1609 static void rtl8169_get_drvinfo(struct net_device
*dev
,
1610 struct ethtool_drvinfo
*info
)
1612 struct rtl8169_private
*tp
= netdev_priv(dev
);
1613 struct rtl_fw
*rtl_fw
= tp
->rtl_fw
;
1615 strlcpy(info
->driver
, MODULENAME
, sizeof(info
->driver
));
1616 strlcpy(info
->version
, RTL8169_VERSION
, sizeof(info
->version
));
1617 strlcpy(info
->bus_info
, pci_name(tp
->pci_dev
), sizeof(info
->bus_info
));
1618 BUILD_BUG_ON(sizeof(info
->fw_version
) < sizeof(rtl_fw
->version
));
1619 if (!IS_ERR_OR_NULL(rtl_fw
))
1620 strlcpy(info
->fw_version
, rtl_fw
->version
,
1621 sizeof(info
->fw_version
));
1624 static int rtl8169_get_regs_len(struct net_device
*dev
)
1626 return R8169_REGS_SIZE
;
1629 static int rtl8169_set_speed_tbi(struct net_device
*dev
,
1630 u8 autoneg
, u16 speed
, u8 duplex
, u32 ignored
)
1632 struct rtl8169_private
*tp
= netdev_priv(dev
);
1633 void __iomem
*ioaddr
= tp
->mmio_addr
;
1637 reg
= RTL_R32(TBICSR
);
1638 if ((autoneg
== AUTONEG_DISABLE
) && (speed
== SPEED_1000
) &&
1639 (duplex
== DUPLEX_FULL
)) {
1640 RTL_W32(TBICSR
, reg
& ~(TBINwEnable
| TBINwRestart
));
1641 } else if (autoneg
== AUTONEG_ENABLE
)
1642 RTL_W32(TBICSR
, reg
| TBINwEnable
| TBINwRestart
);
1644 netif_warn(tp
, link
, dev
,
1645 "incorrect speed setting refused in TBI mode\n");
1652 static int rtl8169_set_speed_xmii(struct net_device
*dev
,
1653 u8 autoneg
, u16 speed
, u8 duplex
, u32 adv
)
1655 struct rtl8169_private
*tp
= netdev_priv(dev
);
1656 int giga_ctrl
, bmcr
;
1659 rtl_writephy(tp
, 0x1f, 0x0000);
1661 if (autoneg
== AUTONEG_ENABLE
) {
1664 auto_nego
= rtl_readphy(tp
, MII_ADVERTISE
);
1665 auto_nego
&= ~(ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1666 ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1668 if (adv
& ADVERTISED_10baseT_Half
)
1669 auto_nego
|= ADVERTISE_10HALF
;
1670 if (adv
& ADVERTISED_10baseT_Full
)
1671 auto_nego
|= ADVERTISE_10FULL
;
1672 if (adv
& ADVERTISED_100baseT_Half
)
1673 auto_nego
|= ADVERTISE_100HALF
;
1674 if (adv
& ADVERTISED_100baseT_Full
)
1675 auto_nego
|= ADVERTISE_100FULL
;
1677 auto_nego
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1679 giga_ctrl
= rtl_readphy(tp
, MII_CTRL1000
);
1680 giga_ctrl
&= ~(ADVERTISE_1000FULL
| ADVERTISE_1000HALF
);
1682 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1683 if (tp
->mii
.supports_gmii
) {
1684 if (adv
& ADVERTISED_1000baseT_Half
)
1685 giga_ctrl
|= ADVERTISE_1000HALF
;
1686 if (adv
& ADVERTISED_1000baseT_Full
)
1687 giga_ctrl
|= ADVERTISE_1000FULL
;
1688 } else if (adv
& (ADVERTISED_1000baseT_Half
|
1689 ADVERTISED_1000baseT_Full
)) {
1690 netif_info(tp
, link
, dev
,
1691 "PHY does not support 1000Mbps\n");
1695 bmcr
= BMCR_ANENABLE
| BMCR_ANRESTART
;
1697 rtl_writephy(tp
, MII_ADVERTISE
, auto_nego
);
1698 rtl_writephy(tp
, MII_CTRL1000
, giga_ctrl
);
1702 if (speed
== SPEED_10
)
1704 else if (speed
== SPEED_100
)
1705 bmcr
= BMCR_SPEED100
;
1709 if (duplex
== DUPLEX_FULL
)
1710 bmcr
|= BMCR_FULLDPLX
;
1713 rtl_writephy(tp
, MII_BMCR
, bmcr
);
1715 if (tp
->mac_version
== RTL_GIGA_MAC_VER_02
||
1716 tp
->mac_version
== RTL_GIGA_MAC_VER_03
) {
1717 if ((speed
== SPEED_100
) && (autoneg
!= AUTONEG_ENABLE
)) {
1718 rtl_writephy(tp
, 0x17, 0x2138);
1719 rtl_writephy(tp
, 0x0e, 0x0260);
1721 rtl_writephy(tp
, 0x17, 0x2108);
1722 rtl_writephy(tp
, 0x0e, 0x0000);
1731 static int rtl8169_set_speed(struct net_device
*dev
,
1732 u8 autoneg
, u16 speed
, u8 duplex
, u32 advertising
)
1734 struct rtl8169_private
*tp
= netdev_priv(dev
);
1737 ret
= tp
->set_speed(dev
, autoneg
, speed
, duplex
, advertising
);
1741 if (netif_running(dev
) && (autoneg
== AUTONEG_ENABLE
) &&
1742 (advertising
& ADVERTISED_1000baseT_Full
)) {
1743 mod_timer(&tp
->timer
, jiffies
+ RTL8169_PHY_TIMEOUT
);
1749 static int rtl8169_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1751 struct rtl8169_private
*tp
= netdev_priv(dev
);
1754 del_timer_sync(&tp
->timer
);
1757 ret
= rtl8169_set_speed(dev
, cmd
->autoneg
, ethtool_cmd_speed(cmd
),
1758 cmd
->duplex
, cmd
->advertising
);
1759 rtl_unlock_work(tp
);
1764 static netdev_features_t
rtl8169_fix_features(struct net_device
*dev
,
1765 netdev_features_t features
)
1767 struct rtl8169_private
*tp
= netdev_priv(dev
);
1769 if (dev
->mtu
> TD_MSS_MAX
)
1770 features
&= ~NETIF_F_ALL_TSO
;
1772 if (dev
->mtu
> JUMBO_1K
&&
1773 !rtl_chip_infos
[tp
->mac_version
].jumbo_tx_csum
)
1774 features
&= ~NETIF_F_IP_CSUM
;
1779 static void __rtl8169_set_features(struct net_device
*dev
,
1780 netdev_features_t features
)
1782 struct rtl8169_private
*tp
= netdev_priv(dev
);
1783 netdev_features_t changed
= features
^ dev
->features
;
1784 void __iomem
*ioaddr
= tp
->mmio_addr
;
1786 if (!(changed
& (NETIF_F_RXALL
| NETIF_F_RXCSUM
| NETIF_F_HW_VLAN_RX
)))
1789 if (changed
& (NETIF_F_RXCSUM
| NETIF_F_HW_VLAN_RX
)) {
1790 if (features
& NETIF_F_RXCSUM
)
1791 tp
->cp_cmd
|= RxChkSum
;
1793 tp
->cp_cmd
&= ~RxChkSum
;
1795 if (dev
->features
& NETIF_F_HW_VLAN_RX
)
1796 tp
->cp_cmd
|= RxVlan
;
1798 tp
->cp_cmd
&= ~RxVlan
;
1800 RTL_W16(CPlusCmd
, tp
->cp_cmd
);
1803 if (changed
& NETIF_F_RXALL
) {
1804 int tmp
= (RTL_R32(RxConfig
) & ~(AcceptErr
| AcceptRunt
));
1805 if (features
& NETIF_F_RXALL
)
1806 tmp
|= (AcceptErr
| AcceptRunt
);
1807 RTL_W32(RxConfig
, tmp
);
1811 static int rtl8169_set_features(struct net_device
*dev
,
1812 netdev_features_t features
)
1814 struct rtl8169_private
*tp
= netdev_priv(dev
);
1817 __rtl8169_set_features(dev
, features
);
1818 rtl_unlock_work(tp
);
1824 static inline u32
rtl8169_tx_vlan_tag(struct rtl8169_private
*tp
,
1825 struct sk_buff
*skb
)
1827 return (vlan_tx_tag_present(skb
)) ?
1828 TxVlanTag
| swab16(vlan_tx_tag_get(skb
)) : 0x00;
1831 static void rtl8169_rx_vlan_tag(struct RxDesc
*desc
, struct sk_buff
*skb
)
1833 u32 opts2
= le32_to_cpu(desc
->opts2
);
1835 if (opts2
& RxVlanTag
)
1836 __vlan_hwaccel_put_tag(skb
, swab16(opts2
& 0xffff));
1841 static int rtl8169_gset_tbi(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1843 struct rtl8169_private
*tp
= netdev_priv(dev
);
1844 void __iomem
*ioaddr
= tp
->mmio_addr
;
1848 SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
| SUPPORTED_FIBRE
;
1849 cmd
->port
= PORT_FIBRE
;
1850 cmd
->transceiver
= XCVR_INTERNAL
;
1852 status
= RTL_R32(TBICSR
);
1853 cmd
->advertising
= (status
& TBINwEnable
) ? ADVERTISED_Autoneg
: 0;
1854 cmd
->autoneg
= !!(status
& TBINwEnable
);
1856 ethtool_cmd_speed_set(cmd
, SPEED_1000
);
1857 cmd
->duplex
= DUPLEX_FULL
; /* Always set */
1862 static int rtl8169_gset_xmii(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1864 struct rtl8169_private
*tp
= netdev_priv(dev
);
1866 return mii_ethtool_gset(&tp
->mii
, cmd
);
1869 static int rtl8169_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1871 struct rtl8169_private
*tp
= netdev_priv(dev
);
1875 rc
= tp
->get_settings(dev
, cmd
);
1876 rtl_unlock_work(tp
);
1881 static void rtl8169_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1884 struct rtl8169_private
*tp
= netdev_priv(dev
);
1886 if (regs
->len
> R8169_REGS_SIZE
)
1887 regs
->len
= R8169_REGS_SIZE
;
1890 memcpy_fromio(p
, tp
->mmio_addr
, regs
->len
);
1891 rtl_unlock_work(tp
);
1894 static u32
rtl8169_get_msglevel(struct net_device
*dev
)
1896 struct rtl8169_private
*tp
= netdev_priv(dev
);
1898 return tp
->msg_enable
;
1901 static void rtl8169_set_msglevel(struct net_device
*dev
, u32 value
)
1903 struct rtl8169_private
*tp
= netdev_priv(dev
);
1905 tp
->msg_enable
= value
;
1908 static const char rtl8169_gstrings
[][ETH_GSTRING_LEN
] = {
1915 "tx_single_collisions",
1916 "tx_multi_collisions",
1924 static int rtl8169_get_sset_count(struct net_device
*dev
, int sset
)
1928 return ARRAY_SIZE(rtl8169_gstrings
);
1934 DECLARE_RTL_COND(rtl_counters_cond
)
1936 void __iomem
*ioaddr
= tp
->mmio_addr
;
1938 return RTL_R32(CounterAddrLow
) & CounterDump
;
1941 static void rtl8169_update_counters(struct net_device
*dev
)
1943 struct rtl8169_private
*tp
= netdev_priv(dev
);
1944 void __iomem
*ioaddr
= tp
->mmio_addr
;
1945 struct device
*d
= &tp
->pci_dev
->dev
;
1946 struct rtl8169_counters
*counters
;
1951 * Some chips are unable to dump tally counters when the receiver
1954 if ((RTL_R8(ChipCmd
) & CmdRxEnb
) == 0)
1957 counters
= dma_alloc_coherent(d
, sizeof(*counters
), &paddr
, GFP_KERNEL
);
1961 RTL_W32(CounterAddrHigh
, (u64
)paddr
>> 32);
1962 cmd
= (u64
)paddr
& DMA_BIT_MASK(32);
1963 RTL_W32(CounterAddrLow
, cmd
);
1964 RTL_W32(CounterAddrLow
, cmd
| CounterDump
);
1966 if (rtl_udelay_loop_wait_low(tp
, &rtl_counters_cond
, 10, 1000))
1967 memcpy(&tp
->counters
, counters
, sizeof(*counters
));
1969 RTL_W32(CounterAddrLow
, 0);
1970 RTL_W32(CounterAddrHigh
, 0);
1972 dma_free_coherent(d
, sizeof(*counters
), counters
, paddr
);
1975 static void rtl8169_get_ethtool_stats(struct net_device
*dev
,
1976 struct ethtool_stats
*stats
, u64
*data
)
1978 struct rtl8169_private
*tp
= netdev_priv(dev
);
1982 rtl8169_update_counters(dev
);
1984 data
[0] = le64_to_cpu(tp
->counters
.tx_packets
);
1985 data
[1] = le64_to_cpu(tp
->counters
.rx_packets
);
1986 data
[2] = le64_to_cpu(tp
->counters
.tx_errors
);
1987 data
[3] = le32_to_cpu(tp
->counters
.rx_errors
);
1988 data
[4] = le16_to_cpu(tp
->counters
.rx_missed
);
1989 data
[5] = le16_to_cpu(tp
->counters
.align_errors
);
1990 data
[6] = le32_to_cpu(tp
->counters
.tx_one_collision
);
1991 data
[7] = le32_to_cpu(tp
->counters
.tx_multi_collision
);
1992 data
[8] = le64_to_cpu(tp
->counters
.rx_unicast
);
1993 data
[9] = le64_to_cpu(tp
->counters
.rx_broadcast
);
1994 data
[10] = le32_to_cpu(tp
->counters
.rx_multicast
);
1995 data
[11] = le16_to_cpu(tp
->counters
.tx_aborted
);
1996 data
[12] = le16_to_cpu(tp
->counters
.tx_underun
);
1999 static void rtl8169_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
2003 memcpy(data
, *rtl8169_gstrings
, sizeof(rtl8169_gstrings
));
2008 static const struct ethtool_ops rtl8169_ethtool_ops
= {
2009 .get_drvinfo
= rtl8169_get_drvinfo
,
2010 .get_regs_len
= rtl8169_get_regs_len
,
2011 .get_link
= ethtool_op_get_link
,
2012 .get_settings
= rtl8169_get_settings
,
2013 .set_settings
= rtl8169_set_settings
,
2014 .get_msglevel
= rtl8169_get_msglevel
,
2015 .set_msglevel
= rtl8169_set_msglevel
,
2016 .get_regs
= rtl8169_get_regs
,
2017 .get_wol
= rtl8169_get_wol
,
2018 .set_wol
= rtl8169_set_wol
,
2019 .get_strings
= rtl8169_get_strings
,
2020 .get_sset_count
= rtl8169_get_sset_count
,
2021 .get_ethtool_stats
= rtl8169_get_ethtool_stats
,
2022 .get_ts_info
= ethtool_op_get_ts_info
,
2025 static void rtl8169_get_mac_version(struct rtl8169_private
*tp
,
2026 struct net_device
*dev
, u8 default_version
)
2028 void __iomem
*ioaddr
= tp
->mmio_addr
;
2030 * The driver currently handles the 8168Bf and the 8168Be identically
2031 * but they can be identified more specifically through the test below
2034 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2036 * Same thing for the 8101Eb and the 8101Ec:
2038 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2040 static const struct rtl_mac_info
{
2046 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41
},
2047 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40
},
2050 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38
},
2051 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36
},
2052 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35
},
2055 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34
},
2056 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33
},
2057 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32
},
2058 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33
},
2061 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26
},
2062 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25
},
2063 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26
},
2065 /* 8168DP family. */
2066 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27
},
2067 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28
},
2068 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31
},
2071 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24
},
2072 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23
},
2073 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18
},
2074 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24
},
2075 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19
},
2076 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20
},
2077 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21
},
2078 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22
},
2079 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22
},
2082 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12
},
2083 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17
},
2084 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17
},
2085 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11
},
2088 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39
},
2089 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39
},
2090 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37
},
2091 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30
},
2092 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30
},
2093 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29
},
2094 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30
},
2095 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09
},
2096 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09
},
2097 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08
},
2098 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08
},
2099 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07
},
2100 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07
},
2101 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13
},
2102 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10
},
2103 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16
},
2104 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09
},
2105 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09
},
2106 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16
},
2107 /* FIXME: where did these entries come from ? -- FR */
2108 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15
},
2109 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14
},
2112 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06
},
2113 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05
},
2114 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04
},
2115 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03
},
2116 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02
},
2117 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01
},
2120 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE
}
2122 const struct rtl_mac_info
*p
= mac_info
;
2125 reg
= RTL_R32(TxConfig
);
2126 while ((reg
& p
->mask
) != p
->val
)
2128 tp
->mac_version
= p
->mac_version
;
2130 if (tp
->mac_version
== RTL_GIGA_MAC_NONE
) {
2131 netif_notice(tp
, probe
, dev
,
2132 "unknown MAC, using family default\n");
2133 tp
->mac_version
= default_version
;
2137 static void rtl8169_print_mac_version(struct rtl8169_private
*tp
)
2139 dprintk("mac_version = 0x%02x\n", tp
->mac_version
);
2147 static void rtl_writephy_batch(struct rtl8169_private
*tp
,
2148 const struct phy_reg
*regs
, int len
)
2151 rtl_writephy(tp
, regs
->reg
, regs
->val
);
2156 #define PHY_READ 0x00000000
2157 #define PHY_DATA_OR 0x10000000
2158 #define PHY_DATA_AND 0x20000000
2159 #define PHY_BJMPN 0x30000000
2160 #define PHY_READ_EFUSE 0x40000000
2161 #define PHY_READ_MAC_BYTE 0x50000000
2162 #define PHY_WRITE_MAC_BYTE 0x60000000
2163 #define PHY_CLEAR_READCOUNT 0x70000000
2164 #define PHY_WRITE 0x80000000
2165 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2166 #define PHY_COMP_EQ_SKIPN 0xa0000000
2167 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2168 #define PHY_WRITE_PREVIOUS 0xc0000000
2169 #define PHY_SKIPN 0xd0000000
2170 #define PHY_DELAY_MS 0xe0000000
2171 #define PHY_WRITE_ERI_WORD 0xf0000000
2175 char version
[RTL_VER_SIZE
];
2181 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2183 static bool rtl_fw_format_ok(struct rtl8169_private
*tp
, struct rtl_fw
*rtl_fw
)
2185 const struct firmware
*fw
= rtl_fw
->fw
;
2186 struct fw_info
*fw_info
= (struct fw_info
*)fw
->data
;
2187 struct rtl_fw_phy_action
*pa
= &rtl_fw
->phy_action
;
2188 char *version
= rtl_fw
->version
;
2191 if (fw
->size
< FW_OPCODE_SIZE
)
2194 if (!fw_info
->magic
) {
2195 size_t i
, size
, start
;
2198 if (fw
->size
< sizeof(*fw_info
))
2201 for (i
= 0; i
< fw
->size
; i
++)
2202 checksum
+= fw
->data
[i
];
2206 start
= le32_to_cpu(fw_info
->fw_start
);
2207 if (start
> fw
->size
)
2210 size
= le32_to_cpu(fw_info
->fw_len
);
2211 if (size
> (fw
->size
- start
) / FW_OPCODE_SIZE
)
2214 memcpy(version
, fw_info
->version
, RTL_VER_SIZE
);
2216 pa
->code
= (__le32
*)(fw
->data
+ start
);
2219 if (fw
->size
% FW_OPCODE_SIZE
)
2222 strlcpy(version
, rtl_lookup_firmware_name(tp
), RTL_VER_SIZE
);
2224 pa
->code
= (__le32
*)fw
->data
;
2225 pa
->size
= fw
->size
/ FW_OPCODE_SIZE
;
2227 version
[RTL_VER_SIZE
- 1] = 0;
2234 static bool rtl_fw_data_ok(struct rtl8169_private
*tp
, struct net_device
*dev
,
2235 struct rtl_fw_phy_action
*pa
)
2240 for (index
= 0; index
< pa
->size
; index
++) {
2241 u32 action
= le32_to_cpu(pa
->code
[index
]);
2242 u32 regno
= (action
& 0x0fff0000) >> 16;
2244 switch(action
& 0xf0000000) {
2248 case PHY_READ_EFUSE
:
2249 case PHY_CLEAR_READCOUNT
:
2251 case PHY_WRITE_PREVIOUS
:
2256 if (regno
> index
) {
2257 netif_err(tp
, ifup
, tp
->dev
,
2258 "Out of range of firmware\n");
2262 case PHY_READCOUNT_EQ_SKIP
:
2263 if (index
+ 2 >= pa
->size
) {
2264 netif_err(tp
, ifup
, tp
->dev
,
2265 "Out of range of firmware\n");
2269 case PHY_COMP_EQ_SKIPN
:
2270 case PHY_COMP_NEQ_SKIPN
:
2272 if (index
+ 1 + regno
>= pa
->size
) {
2273 netif_err(tp
, ifup
, tp
->dev
,
2274 "Out of range of firmware\n");
2279 case PHY_READ_MAC_BYTE
:
2280 case PHY_WRITE_MAC_BYTE
:
2281 case PHY_WRITE_ERI_WORD
:
2283 netif_err(tp
, ifup
, tp
->dev
,
2284 "Invalid action 0x%08x\n", action
);
2293 static int rtl_check_firmware(struct rtl8169_private
*tp
, struct rtl_fw
*rtl_fw
)
2295 struct net_device
*dev
= tp
->dev
;
2298 if (!rtl_fw_format_ok(tp
, rtl_fw
)) {
2299 netif_err(tp
, ifup
, dev
, "invalid firwmare\n");
2303 if (rtl_fw_data_ok(tp
, dev
, &rtl_fw
->phy_action
))
2309 static void rtl_phy_write_fw(struct rtl8169_private
*tp
, struct rtl_fw
*rtl_fw
)
2311 struct rtl_fw_phy_action
*pa
= &rtl_fw
->phy_action
;
2315 predata
= count
= 0;
2317 for (index
= 0; index
< pa
->size
; ) {
2318 u32 action
= le32_to_cpu(pa
->code
[index
]);
2319 u32 data
= action
& 0x0000ffff;
2320 u32 regno
= (action
& 0x0fff0000) >> 16;
2325 switch(action
& 0xf0000000) {
2327 predata
= rtl_readphy(tp
, regno
);
2342 case PHY_READ_EFUSE
:
2343 predata
= rtl8168d_efuse_read(tp
, regno
);
2346 case PHY_CLEAR_READCOUNT
:
2351 rtl_writephy(tp
, regno
, data
);
2354 case PHY_READCOUNT_EQ_SKIP
:
2355 index
+= (count
== data
) ? 2 : 1;
2357 case PHY_COMP_EQ_SKIPN
:
2358 if (predata
== data
)
2362 case PHY_COMP_NEQ_SKIPN
:
2363 if (predata
!= data
)
2367 case PHY_WRITE_PREVIOUS
:
2368 rtl_writephy(tp
, regno
, predata
);
2379 case PHY_READ_MAC_BYTE
:
2380 case PHY_WRITE_MAC_BYTE
:
2381 case PHY_WRITE_ERI_WORD
:
2388 static void rtl_release_firmware(struct rtl8169_private
*tp
)
2390 if (!IS_ERR_OR_NULL(tp
->rtl_fw
)) {
2391 release_firmware(tp
->rtl_fw
->fw
);
2394 tp
->rtl_fw
= RTL_FIRMWARE_UNKNOWN
;
2397 static void rtl_apply_firmware(struct rtl8169_private
*tp
)
2399 struct rtl_fw
*rtl_fw
= tp
->rtl_fw
;
2401 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2402 if (!IS_ERR_OR_NULL(rtl_fw
))
2403 rtl_phy_write_fw(tp
, rtl_fw
);
2406 static void rtl_apply_firmware_cond(struct rtl8169_private
*tp
, u8 reg
, u16 val
)
2408 if (rtl_readphy(tp
, reg
) != val
)
2409 netif_warn(tp
, hw
, tp
->dev
, "chipset not ready for firmware\n");
2411 rtl_apply_firmware(tp
);
2414 static void rtl8169s_hw_phy_config(struct rtl8169_private
*tp
)
2416 static const struct phy_reg phy_reg_init
[] = {
2478 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2481 static void rtl8169sb_hw_phy_config(struct rtl8169_private
*tp
)
2483 static const struct phy_reg phy_reg_init
[] = {
2489 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2492 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private
*tp
)
2494 struct pci_dev
*pdev
= tp
->pci_dev
;
2496 if ((pdev
->subsystem_vendor
!= PCI_VENDOR_ID_GIGABYTE
) ||
2497 (pdev
->subsystem_device
!= 0xe000))
2500 rtl_writephy(tp
, 0x1f, 0x0001);
2501 rtl_writephy(tp
, 0x10, 0xf01b);
2502 rtl_writephy(tp
, 0x1f, 0x0000);
2505 static void rtl8169scd_hw_phy_config(struct rtl8169_private
*tp
)
2507 static const struct phy_reg phy_reg_init
[] = {
2547 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2549 rtl8169scd_hw_phy_config_quirk(tp
);
2552 static void rtl8169sce_hw_phy_config(struct rtl8169_private
*tp
)
2554 static const struct phy_reg phy_reg_init
[] = {
2602 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2605 static void rtl8168bb_hw_phy_config(struct rtl8169_private
*tp
)
2607 static const struct phy_reg phy_reg_init
[] = {
2612 rtl_writephy(tp
, 0x1f, 0x0001);
2613 rtl_patchphy(tp
, 0x16, 1 << 0);
2615 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2618 static void rtl8168bef_hw_phy_config(struct rtl8169_private
*tp
)
2620 static const struct phy_reg phy_reg_init
[] = {
2626 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2629 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private
*tp
)
2631 static const struct phy_reg phy_reg_init
[] = {
2639 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2642 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private
*tp
)
2644 static const struct phy_reg phy_reg_init
[] = {
2650 rtl_writephy(tp
, 0x1f, 0x0000);
2651 rtl_patchphy(tp
, 0x14, 1 << 5);
2652 rtl_patchphy(tp
, 0x0d, 1 << 5);
2654 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2657 static void rtl8168c_1_hw_phy_config(struct rtl8169_private
*tp
)
2659 static const struct phy_reg phy_reg_init
[] = {
2679 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2681 rtl_patchphy(tp
, 0x14, 1 << 5);
2682 rtl_patchphy(tp
, 0x0d, 1 << 5);
2683 rtl_writephy(tp
, 0x1f, 0x0000);
2686 static void rtl8168c_2_hw_phy_config(struct rtl8169_private
*tp
)
2688 static const struct phy_reg phy_reg_init
[] = {
2706 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2708 rtl_patchphy(tp
, 0x16, 1 << 0);
2709 rtl_patchphy(tp
, 0x14, 1 << 5);
2710 rtl_patchphy(tp
, 0x0d, 1 << 5);
2711 rtl_writephy(tp
, 0x1f, 0x0000);
2714 static void rtl8168c_3_hw_phy_config(struct rtl8169_private
*tp
)
2716 static const struct phy_reg phy_reg_init
[] = {
2728 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2730 rtl_patchphy(tp
, 0x16, 1 << 0);
2731 rtl_patchphy(tp
, 0x14, 1 << 5);
2732 rtl_patchphy(tp
, 0x0d, 1 << 5);
2733 rtl_writephy(tp
, 0x1f, 0x0000);
2736 static void rtl8168c_4_hw_phy_config(struct rtl8169_private
*tp
)
2738 rtl8168c_3_hw_phy_config(tp
);
2741 static void rtl8168d_1_hw_phy_config(struct rtl8169_private
*tp
)
2743 static const struct phy_reg phy_reg_init_0
[] = {
2744 /* Channel Estimation */
2765 * Enhance line driver power
2774 * Can not link to 1Gbps with bad cable
2775 * Decrease SNR threshold form 21.07dB to 19.04dB
2784 rtl_writephy_batch(tp
, phy_reg_init_0
, ARRAY_SIZE(phy_reg_init_0
));
2788 * Fine Tune Switching regulator parameter
2790 rtl_writephy(tp
, 0x1f, 0x0002);
2791 rtl_w1w0_phy(tp
, 0x0b, 0x0010, 0x00ef);
2792 rtl_w1w0_phy(tp
, 0x0c, 0xa200, 0x5d00);
2794 if (rtl8168d_efuse_read(tp
, 0x01) == 0xb1) {
2795 static const struct phy_reg phy_reg_init
[] = {
2805 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2807 val
= rtl_readphy(tp
, 0x0d);
2809 if ((val
& 0x00ff) != 0x006c) {
2810 static const u32 set
[] = {
2811 0x0065, 0x0066, 0x0067, 0x0068,
2812 0x0069, 0x006a, 0x006b, 0x006c
2816 rtl_writephy(tp
, 0x1f, 0x0002);
2819 for (i
= 0; i
< ARRAY_SIZE(set
); i
++)
2820 rtl_writephy(tp
, 0x0d, val
| set
[i
]);
2823 static const struct phy_reg phy_reg_init
[] = {
2831 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2834 /* RSET couple improve */
2835 rtl_writephy(tp
, 0x1f, 0x0002);
2836 rtl_patchphy(tp
, 0x0d, 0x0300);
2837 rtl_patchphy(tp
, 0x0f, 0x0010);
2839 /* Fine tune PLL performance */
2840 rtl_writephy(tp
, 0x1f, 0x0002);
2841 rtl_w1w0_phy(tp
, 0x02, 0x0100, 0x0600);
2842 rtl_w1w0_phy(tp
, 0x03, 0x0000, 0xe000);
2844 rtl_writephy(tp
, 0x1f, 0x0005);
2845 rtl_writephy(tp
, 0x05, 0x001b);
2847 rtl_apply_firmware_cond(tp
, MII_EXPANSION
, 0xbf00);
2849 rtl_writephy(tp
, 0x1f, 0x0000);
2852 static void rtl8168d_2_hw_phy_config(struct rtl8169_private
*tp
)
2854 static const struct phy_reg phy_reg_init_0
[] = {
2855 /* Channel Estimation */
2876 * Enhance line driver power
2885 * Can not link to 1Gbps with bad cable
2886 * Decrease SNR threshold form 21.07dB to 19.04dB
2895 rtl_writephy_batch(tp
, phy_reg_init_0
, ARRAY_SIZE(phy_reg_init_0
));
2897 if (rtl8168d_efuse_read(tp
, 0x01) == 0xb1) {
2898 static const struct phy_reg phy_reg_init
[] = {
2909 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2911 val
= rtl_readphy(tp
, 0x0d);
2912 if ((val
& 0x00ff) != 0x006c) {
2913 static const u32 set
[] = {
2914 0x0065, 0x0066, 0x0067, 0x0068,
2915 0x0069, 0x006a, 0x006b, 0x006c
2919 rtl_writephy(tp
, 0x1f, 0x0002);
2922 for (i
= 0; i
< ARRAY_SIZE(set
); i
++)
2923 rtl_writephy(tp
, 0x0d, val
| set
[i
]);
2926 static const struct phy_reg phy_reg_init
[] = {
2934 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
2937 /* Fine tune PLL performance */
2938 rtl_writephy(tp
, 0x1f, 0x0002);
2939 rtl_w1w0_phy(tp
, 0x02, 0x0100, 0x0600);
2940 rtl_w1w0_phy(tp
, 0x03, 0x0000, 0xe000);
2942 /* Switching regulator Slew rate */
2943 rtl_writephy(tp
, 0x1f, 0x0002);
2944 rtl_patchphy(tp
, 0x0f, 0x0017);
2946 rtl_writephy(tp
, 0x1f, 0x0005);
2947 rtl_writephy(tp
, 0x05, 0x001b);
2949 rtl_apply_firmware_cond(tp
, MII_EXPANSION
, 0xb300);
2951 rtl_writephy(tp
, 0x1f, 0x0000);
2954 static void rtl8168d_3_hw_phy_config(struct rtl8169_private
*tp
)
2956 static const struct phy_reg phy_reg_init
[] = {
3012 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3015 static void rtl8168d_4_hw_phy_config(struct rtl8169_private
*tp
)
3017 static const struct phy_reg phy_reg_init
[] = {
3027 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3028 rtl_patchphy(tp
, 0x0d, 1 << 5);
3031 static void rtl8168e_1_hw_phy_config(struct rtl8169_private
*tp
)
3033 static const struct phy_reg phy_reg_init
[] = {
3034 /* Enable Delay cap */
3040 /* Channel estimation fine tune */
3049 /* Update PFM & 10M TX idle timer */
3061 rtl_apply_firmware(tp
);
3063 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3065 /* DCO enable for 10M IDLE Power */
3066 rtl_writephy(tp
, 0x1f, 0x0007);
3067 rtl_writephy(tp
, 0x1e, 0x0023);
3068 rtl_w1w0_phy(tp
, 0x17, 0x0006, 0x0000);
3069 rtl_writephy(tp
, 0x1f, 0x0000);
3071 /* For impedance matching */
3072 rtl_writephy(tp
, 0x1f, 0x0002);
3073 rtl_w1w0_phy(tp
, 0x08, 0x8000, 0x7f00);
3074 rtl_writephy(tp
, 0x1f, 0x0000);
3076 /* PHY auto speed down */
3077 rtl_writephy(tp
, 0x1f, 0x0007);
3078 rtl_writephy(tp
, 0x1e, 0x002d);
3079 rtl_w1w0_phy(tp
, 0x18, 0x0050, 0x0000);
3080 rtl_writephy(tp
, 0x1f, 0x0000);
3081 rtl_w1w0_phy(tp
, 0x14, 0x8000, 0x0000);
3083 rtl_writephy(tp
, 0x1f, 0x0005);
3084 rtl_writephy(tp
, 0x05, 0x8b86);
3085 rtl_w1w0_phy(tp
, 0x06, 0x0001, 0x0000);
3086 rtl_writephy(tp
, 0x1f, 0x0000);
3088 rtl_writephy(tp
, 0x1f, 0x0005);
3089 rtl_writephy(tp
, 0x05, 0x8b85);
3090 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x2000);
3091 rtl_writephy(tp
, 0x1f, 0x0007);
3092 rtl_writephy(tp
, 0x1e, 0x0020);
3093 rtl_w1w0_phy(tp
, 0x15, 0x0000, 0x1100);
3094 rtl_writephy(tp
, 0x1f, 0x0006);
3095 rtl_writephy(tp
, 0x00, 0x5a00);
3096 rtl_writephy(tp
, 0x1f, 0x0000);
3097 rtl_writephy(tp
, 0x0d, 0x0007);
3098 rtl_writephy(tp
, 0x0e, 0x003c);
3099 rtl_writephy(tp
, 0x0d, 0x4007);
3100 rtl_writephy(tp
, 0x0e, 0x0000);
3101 rtl_writephy(tp
, 0x0d, 0x0000);
3104 static void rtl8168e_2_hw_phy_config(struct rtl8169_private
*tp
)
3106 static const struct phy_reg phy_reg_init
[] = {
3107 /* Enable Delay cap */
3116 /* Channel estimation fine tune */
3133 rtl_apply_firmware(tp
);
3135 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3137 /* For 4-corner performance improve */
3138 rtl_writephy(tp
, 0x1f, 0x0005);
3139 rtl_writephy(tp
, 0x05, 0x8b80);
3140 rtl_w1w0_phy(tp
, 0x17, 0x0006, 0x0000);
3141 rtl_writephy(tp
, 0x1f, 0x0000);
3143 /* PHY auto speed down */
3144 rtl_writephy(tp
, 0x1f, 0x0004);
3145 rtl_writephy(tp
, 0x1f, 0x0007);
3146 rtl_writephy(tp
, 0x1e, 0x002d);
3147 rtl_w1w0_phy(tp
, 0x18, 0x0010, 0x0000);
3148 rtl_writephy(tp
, 0x1f, 0x0002);
3149 rtl_writephy(tp
, 0x1f, 0x0000);
3150 rtl_w1w0_phy(tp
, 0x14, 0x8000, 0x0000);
3152 /* improve 10M EEE waveform */
3153 rtl_writephy(tp
, 0x1f, 0x0005);
3154 rtl_writephy(tp
, 0x05, 0x8b86);
3155 rtl_w1w0_phy(tp
, 0x06, 0x0001, 0x0000);
3156 rtl_writephy(tp
, 0x1f, 0x0000);
3158 /* Improve 2-pair detection performance */
3159 rtl_writephy(tp
, 0x1f, 0x0005);
3160 rtl_writephy(tp
, 0x05, 0x8b85);
3161 rtl_w1w0_phy(tp
, 0x06, 0x4000, 0x0000);
3162 rtl_writephy(tp
, 0x1f, 0x0000);
3165 rtl_w1w0_eri(tp
, 0x1b0, ERIAR_MASK_1111
, 0x0000, 0x0003, ERIAR_EXGMAC
);
3166 rtl_writephy(tp
, 0x1f, 0x0005);
3167 rtl_writephy(tp
, 0x05, 0x8b85);
3168 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x2000);
3169 rtl_writephy(tp
, 0x1f, 0x0004);
3170 rtl_writephy(tp
, 0x1f, 0x0007);
3171 rtl_writephy(tp
, 0x1e, 0x0020);
3172 rtl_w1w0_phy(tp
, 0x15, 0x0000, 0x0100);
3173 rtl_writephy(tp
, 0x1f, 0x0002);
3174 rtl_writephy(tp
, 0x1f, 0x0000);
3175 rtl_writephy(tp
, 0x0d, 0x0007);
3176 rtl_writephy(tp
, 0x0e, 0x003c);
3177 rtl_writephy(tp
, 0x0d, 0x4007);
3178 rtl_writephy(tp
, 0x0e, 0x0000);
3179 rtl_writephy(tp
, 0x0d, 0x0000);
3182 rtl_writephy(tp
, 0x1f, 0x0003);
3183 rtl_w1w0_phy(tp
, 0x19, 0x0000, 0x0001);
3184 rtl_w1w0_phy(tp
, 0x10, 0x0000, 0x0400);
3185 rtl_writephy(tp
, 0x1f, 0x0000);
3188 static void rtl8168f_hw_phy_config(struct rtl8169_private
*tp
)
3190 /* For 4-corner performance improve */
3191 rtl_writephy(tp
, 0x1f, 0x0005);
3192 rtl_writephy(tp
, 0x05, 0x8b80);
3193 rtl_w1w0_phy(tp
, 0x06, 0x0006, 0x0000);
3194 rtl_writephy(tp
, 0x1f, 0x0000);
3196 /* PHY auto speed down */
3197 rtl_writephy(tp
, 0x1f, 0x0007);
3198 rtl_writephy(tp
, 0x1e, 0x002d);
3199 rtl_w1w0_phy(tp
, 0x18, 0x0010, 0x0000);
3200 rtl_writephy(tp
, 0x1f, 0x0000);
3201 rtl_w1w0_phy(tp
, 0x14, 0x8000, 0x0000);
3203 /* Improve 10M EEE waveform */
3204 rtl_writephy(tp
, 0x1f, 0x0005);
3205 rtl_writephy(tp
, 0x05, 0x8b86);
3206 rtl_w1w0_phy(tp
, 0x06, 0x0001, 0x0000);
3207 rtl_writephy(tp
, 0x1f, 0x0000);
3210 static void rtl8168f_1_hw_phy_config(struct rtl8169_private
*tp
)
3212 static const struct phy_reg phy_reg_init
[] = {
3213 /* Channel estimation fine tune */
3218 /* Modify green table for giga & fnet */
3235 /* Modify green table for 10M */
3241 /* Disable hiimpedance detection (RTCT) */
3247 rtl_apply_firmware(tp
);
3249 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3251 rtl8168f_hw_phy_config(tp
);
3253 /* Improve 2-pair detection performance */
3254 rtl_writephy(tp
, 0x1f, 0x0005);
3255 rtl_writephy(tp
, 0x05, 0x8b85);
3256 rtl_w1w0_phy(tp
, 0x06, 0x4000, 0x0000);
3257 rtl_writephy(tp
, 0x1f, 0x0000);
3260 static void rtl8168f_2_hw_phy_config(struct rtl8169_private
*tp
)
3262 rtl_apply_firmware(tp
);
3264 rtl8168f_hw_phy_config(tp
);
3267 static void rtl8411_hw_phy_config(struct rtl8169_private
*tp
)
3269 static const struct phy_reg phy_reg_init
[] = {
3270 /* Channel estimation fine tune */
3275 /* Modify green table for giga & fnet */
3292 /* Modify green table for 10M */
3298 /* Disable hiimpedance detection (RTCT) */
3305 rtl_apply_firmware(tp
);
3307 rtl8168f_hw_phy_config(tp
);
3309 /* Improve 2-pair detection performance */
3310 rtl_writephy(tp
, 0x1f, 0x0005);
3311 rtl_writephy(tp
, 0x05, 0x8b85);
3312 rtl_w1w0_phy(tp
, 0x06, 0x4000, 0x0000);
3313 rtl_writephy(tp
, 0x1f, 0x0000);
3315 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3317 /* Modify green table for giga */
3318 rtl_writephy(tp
, 0x1f, 0x0005);
3319 rtl_writephy(tp
, 0x05, 0x8b54);
3320 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0800);
3321 rtl_writephy(tp
, 0x05, 0x8b5d);
3322 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0800);
3323 rtl_writephy(tp
, 0x05, 0x8a7c);
3324 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0100);
3325 rtl_writephy(tp
, 0x05, 0x8a7f);
3326 rtl_w1w0_phy(tp
, 0x06, 0x0100, 0x0000);
3327 rtl_writephy(tp
, 0x05, 0x8a82);
3328 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0100);
3329 rtl_writephy(tp
, 0x05, 0x8a85);
3330 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0100);
3331 rtl_writephy(tp
, 0x05, 0x8a88);
3332 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x0100);
3333 rtl_writephy(tp
, 0x1f, 0x0000);
3335 /* uc same-seed solution */
3336 rtl_writephy(tp
, 0x1f, 0x0005);
3337 rtl_writephy(tp
, 0x05, 0x8b85);
3338 rtl_w1w0_phy(tp
, 0x06, 0x8000, 0x0000);
3339 rtl_writephy(tp
, 0x1f, 0x0000);
3342 rtl_w1w0_eri(tp
, 0x1b0, ERIAR_MASK_0001
, 0x00, 0x03, ERIAR_EXGMAC
);
3343 rtl_writephy(tp
, 0x1f, 0x0005);
3344 rtl_writephy(tp
, 0x05, 0x8b85);
3345 rtl_w1w0_phy(tp
, 0x06, 0x0000, 0x2000);
3346 rtl_writephy(tp
, 0x1f, 0x0004);
3347 rtl_writephy(tp
, 0x1f, 0x0007);
3348 rtl_writephy(tp
, 0x1e, 0x0020);
3349 rtl_w1w0_phy(tp
, 0x15, 0x0000, 0x0100);
3350 rtl_writephy(tp
, 0x1f, 0x0000);
3351 rtl_writephy(tp
, 0x0d, 0x0007);
3352 rtl_writephy(tp
, 0x0e, 0x003c);
3353 rtl_writephy(tp
, 0x0d, 0x4007);
3354 rtl_writephy(tp
, 0x0e, 0x0000);
3355 rtl_writephy(tp
, 0x0d, 0x0000);
3358 rtl_writephy(tp
, 0x1f, 0x0003);
3359 rtl_w1w0_phy(tp
, 0x19, 0x0000, 0x0001);
3360 rtl_w1w0_phy(tp
, 0x10, 0x0000, 0x0400);
3361 rtl_writephy(tp
, 0x1f, 0x0000);
3364 static void rtl8168g_1_hw_phy_config(struct rtl8169_private
*tp
)
3366 static const u16 mac_ocp_patch
[] = {
3367 0xe008, 0xe01b, 0xe01d, 0xe01f,
3368 0xe021, 0xe023, 0xe025, 0xe027,
3369 0x49d2, 0xf10d, 0x766c, 0x49e2,
3370 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3372 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3373 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3374 0xbe00, 0xb416, 0x0076, 0xe86c,
3375 0xc602, 0xbe00, 0x0000, 0xc602,
3377 0xbe00, 0x0000, 0xc602, 0xbe00,
3378 0x0000, 0xc602, 0xbe00, 0x0000,
3379 0xc602, 0xbe00, 0x0000, 0xc602,
3380 0xbe00, 0x0000, 0xc602, 0xbe00,
3382 0x0000, 0x0000, 0x0000, 0x0000
3386 /* Patch code for GPHY reset */
3387 for (i
= 0; i
< ARRAY_SIZE(mac_ocp_patch
); i
++)
3388 r8168_mac_ocp_write(tp
, 0xf800 + 2*i
, mac_ocp_patch
[i
]);
3389 r8168_mac_ocp_write(tp
, 0xfc26, 0x8000);
3390 r8168_mac_ocp_write(tp
, 0xfc28, 0x0075);
3392 rtl_apply_firmware(tp
);
3394 if (r8168_phy_ocp_read(tp
, 0xa460) & 0x0100)
3395 rtl_w1w0_phy_ocp(tp
, 0xbcc4, 0x0000, 0x8000);
3397 rtl_w1w0_phy_ocp(tp
, 0xbcc4, 0x8000, 0x0000);
3399 if (r8168_phy_ocp_read(tp
, 0xa466) & 0x0100)
3400 rtl_w1w0_phy_ocp(tp
, 0xc41a, 0x0002, 0x0000);
3402 rtl_w1w0_phy_ocp(tp
, 0xbcc4, 0x0000, 0x0002);
3404 rtl_w1w0_phy_ocp(tp
, 0xa442, 0x000c, 0x0000);
3405 rtl_w1w0_phy_ocp(tp
, 0xa4b2, 0x0004, 0x0000);
3407 r8168_phy_ocp_write(tp
, 0xa436, 0x8012);
3408 rtl_w1w0_phy_ocp(tp
, 0xa438, 0x8000, 0x0000);
3410 rtl_w1w0_phy_ocp(tp
, 0xc422, 0x4000, 0x2000);
3413 static void rtl8102e_hw_phy_config(struct rtl8169_private
*tp
)
3415 static const struct phy_reg phy_reg_init
[] = {
3422 rtl_writephy(tp
, 0x1f, 0x0000);
3423 rtl_patchphy(tp
, 0x11, 1 << 12);
3424 rtl_patchphy(tp
, 0x19, 1 << 13);
3425 rtl_patchphy(tp
, 0x10, 1 << 15);
3427 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3430 static void rtl8105e_hw_phy_config(struct rtl8169_private
*tp
)
3432 static const struct phy_reg phy_reg_init
[] = {
3446 /* Disable ALDPS before ram code */
3447 rtl_writephy(tp
, 0x1f, 0x0000);
3448 rtl_writephy(tp
, 0x18, 0x0310);
3451 rtl_apply_firmware(tp
);
3453 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3456 static void rtl8402_hw_phy_config(struct rtl8169_private
*tp
)
3458 /* Disable ALDPS before setting firmware */
3459 rtl_writephy(tp
, 0x1f, 0x0000);
3460 rtl_writephy(tp
, 0x18, 0x0310);
3463 rtl_apply_firmware(tp
);
3466 rtl_eri_write(tp
, 0x1b0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
3467 rtl_writephy(tp
, 0x1f, 0x0004);
3468 rtl_writephy(tp
, 0x10, 0x401f);
3469 rtl_writephy(tp
, 0x19, 0x7030);
3470 rtl_writephy(tp
, 0x1f, 0x0000);
3473 static void rtl8106e_hw_phy_config(struct rtl8169_private
*tp
)
3475 static const struct phy_reg phy_reg_init
[] = {
3482 /* Disable ALDPS before ram code */
3483 rtl_writephy(tp
, 0x1f, 0x0000);
3484 rtl_writephy(tp
, 0x18, 0x0310);
3487 rtl_apply_firmware(tp
);
3489 rtl_eri_write(tp
, 0x1b0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
3490 rtl_writephy_batch(tp
, phy_reg_init
, ARRAY_SIZE(phy_reg_init
));
3492 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
3495 static void rtl_hw_phy_config(struct net_device
*dev
)
3497 struct rtl8169_private
*tp
= netdev_priv(dev
);
3499 rtl8169_print_mac_version(tp
);
3501 switch (tp
->mac_version
) {
3502 case RTL_GIGA_MAC_VER_01
:
3504 case RTL_GIGA_MAC_VER_02
:
3505 case RTL_GIGA_MAC_VER_03
:
3506 rtl8169s_hw_phy_config(tp
);
3508 case RTL_GIGA_MAC_VER_04
:
3509 rtl8169sb_hw_phy_config(tp
);
3511 case RTL_GIGA_MAC_VER_05
:
3512 rtl8169scd_hw_phy_config(tp
);
3514 case RTL_GIGA_MAC_VER_06
:
3515 rtl8169sce_hw_phy_config(tp
);
3517 case RTL_GIGA_MAC_VER_07
:
3518 case RTL_GIGA_MAC_VER_08
:
3519 case RTL_GIGA_MAC_VER_09
:
3520 rtl8102e_hw_phy_config(tp
);
3522 case RTL_GIGA_MAC_VER_11
:
3523 rtl8168bb_hw_phy_config(tp
);
3525 case RTL_GIGA_MAC_VER_12
:
3526 rtl8168bef_hw_phy_config(tp
);
3528 case RTL_GIGA_MAC_VER_17
:
3529 rtl8168bef_hw_phy_config(tp
);
3531 case RTL_GIGA_MAC_VER_18
:
3532 rtl8168cp_1_hw_phy_config(tp
);
3534 case RTL_GIGA_MAC_VER_19
:
3535 rtl8168c_1_hw_phy_config(tp
);
3537 case RTL_GIGA_MAC_VER_20
:
3538 rtl8168c_2_hw_phy_config(tp
);
3540 case RTL_GIGA_MAC_VER_21
:
3541 rtl8168c_3_hw_phy_config(tp
);
3543 case RTL_GIGA_MAC_VER_22
:
3544 rtl8168c_4_hw_phy_config(tp
);
3546 case RTL_GIGA_MAC_VER_23
:
3547 case RTL_GIGA_MAC_VER_24
:
3548 rtl8168cp_2_hw_phy_config(tp
);
3550 case RTL_GIGA_MAC_VER_25
:
3551 rtl8168d_1_hw_phy_config(tp
);
3553 case RTL_GIGA_MAC_VER_26
:
3554 rtl8168d_2_hw_phy_config(tp
);
3556 case RTL_GIGA_MAC_VER_27
:
3557 rtl8168d_3_hw_phy_config(tp
);
3559 case RTL_GIGA_MAC_VER_28
:
3560 rtl8168d_4_hw_phy_config(tp
);
3562 case RTL_GIGA_MAC_VER_29
:
3563 case RTL_GIGA_MAC_VER_30
:
3564 rtl8105e_hw_phy_config(tp
);
3566 case RTL_GIGA_MAC_VER_31
:
3569 case RTL_GIGA_MAC_VER_32
:
3570 case RTL_GIGA_MAC_VER_33
:
3571 rtl8168e_1_hw_phy_config(tp
);
3573 case RTL_GIGA_MAC_VER_34
:
3574 rtl8168e_2_hw_phy_config(tp
);
3576 case RTL_GIGA_MAC_VER_35
:
3577 rtl8168f_1_hw_phy_config(tp
);
3579 case RTL_GIGA_MAC_VER_36
:
3580 rtl8168f_2_hw_phy_config(tp
);
3583 case RTL_GIGA_MAC_VER_37
:
3584 rtl8402_hw_phy_config(tp
);
3587 case RTL_GIGA_MAC_VER_38
:
3588 rtl8411_hw_phy_config(tp
);
3591 case RTL_GIGA_MAC_VER_39
:
3592 rtl8106e_hw_phy_config(tp
);
3595 case RTL_GIGA_MAC_VER_40
:
3596 rtl8168g_1_hw_phy_config(tp
);
3599 case RTL_GIGA_MAC_VER_41
:
3605 static void rtl_phy_work(struct rtl8169_private
*tp
)
3607 struct timer_list
*timer
= &tp
->timer
;
3608 void __iomem
*ioaddr
= tp
->mmio_addr
;
3609 unsigned long timeout
= RTL8169_PHY_TIMEOUT
;
3611 assert(tp
->mac_version
> RTL_GIGA_MAC_VER_01
);
3613 if (tp
->phy_reset_pending(tp
)) {
3615 * A busy loop could burn quite a few cycles on nowadays CPU.
3616 * Let's delay the execution of the timer for a few ticks.
3622 if (tp
->link_ok(ioaddr
))
3625 netif_warn(tp
, link
, tp
->dev
, "PHY reset until link up\n");
3627 tp
->phy_reset_enable(tp
);
3630 mod_timer(timer
, jiffies
+ timeout
);
3633 static void rtl_schedule_task(struct rtl8169_private
*tp
, enum rtl_flag flag
)
3635 if (!test_and_set_bit(flag
, tp
->wk
.flags
))
3636 schedule_work(&tp
->wk
.work
);
3639 static void rtl8169_phy_timer(unsigned long __opaque
)
3641 struct net_device
*dev
= (struct net_device
*)__opaque
;
3642 struct rtl8169_private
*tp
= netdev_priv(dev
);
3644 rtl_schedule_task(tp
, RTL_FLAG_TASK_PHY_PENDING
);
3647 static void rtl8169_release_board(struct pci_dev
*pdev
, struct net_device
*dev
,
3648 void __iomem
*ioaddr
)
3651 pci_release_regions(pdev
);
3652 pci_clear_mwi(pdev
);
3653 pci_disable_device(pdev
);
3657 DECLARE_RTL_COND(rtl_phy_reset_cond
)
3659 return tp
->phy_reset_pending(tp
);
3662 static void rtl8169_phy_reset(struct net_device
*dev
,
3663 struct rtl8169_private
*tp
)
3665 tp
->phy_reset_enable(tp
);
3666 rtl_msleep_loop_wait_low(tp
, &rtl_phy_reset_cond
, 1, 100);
3669 static bool rtl_tbi_enabled(struct rtl8169_private
*tp
)
3671 void __iomem
*ioaddr
= tp
->mmio_addr
;
3673 return (tp
->mac_version
== RTL_GIGA_MAC_VER_01
) &&
3674 (RTL_R8(PHYstatus
) & TBI_Enable
);
3677 static void rtl8169_init_phy(struct net_device
*dev
, struct rtl8169_private
*tp
)
3679 void __iomem
*ioaddr
= tp
->mmio_addr
;
3681 rtl_hw_phy_config(dev
);
3683 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
) {
3684 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3688 pci_write_config_byte(tp
->pci_dev
, PCI_LATENCY_TIMER
, 0x40);
3690 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
)
3691 pci_write_config_byte(tp
->pci_dev
, PCI_CACHE_LINE_SIZE
, 0x08);
3693 if (tp
->mac_version
== RTL_GIGA_MAC_VER_02
) {
3694 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3696 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3697 rtl_writephy(tp
, 0x0b, 0x0000); //w 0x0b 15 0 0
3700 rtl8169_phy_reset(dev
, tp
);
3702 rtl8169_set_speed(dev
, AUTONEG_ENABLE
, SPEED_1000
, DUPLEX_FULL
,
3703 ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
3704 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
3705 (tp
->mii
.supports_gmii
?
3706 ADVERTISED_1000baseT_Half
|
3707 ADVERTISED_1000baseT_Full
: 0));
3709 if (rtl_tbi_enabled(tp
))
3710 netif_info(tp
, link
, dev
, "TBI auto-negotiating\n");
3713 static void rtl_rar_set(struct rtl8169_private
*tp
, u8
*addr
)
3715 void __iomem
*ioaddr
= tp
->mmio_addr
;
3719 low
= addr
[0] | (addr
[1] << 8) | (addr
[2] << 16) | (addr
[3] << 24);
3720 high
= addr
[4] | (addr
[5] << 8);
3724 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
3726 RTL_W32(MAC4
, high
);
3732 if (tp
->mac_version
== RTL_GIGA_MAC_VER_34
) {
3733 const struct exgmac_reg e
[] = {
3734 { .addr
= 0xe0, ERIAR_MASK_1111
, .val
= low
},
3735 { .addr
= 0xe4, ERIAR_MASK_1111
, .val
= high
},
3736 { .addr
= 0xf0, ERIAR_MASK_1111
, .val
= low
<< 16 },
3737 { .addr
= 0xf4, ERIAR_MASK_1111
, .val
= high
<< 16 |
3741 rtl_write_exgmac_batch(tp
, e
, ARRAY_SIZE(e
));
3744 RTL_W8(Cfg9346
, Cfg9346_Lock
);
3746 rtl_unlock_work(tp
);
3749 static int rtl_set_mac_address(struct net_device
*dev
, void *p
)
3751 struct rtl8169_private
*tp
= netdev_priv(dev
);
3752 struct sockaddr
*addr
= p
;
3754 if (!is_valid_ether_addr(addr
->sa_data
))
3755 return -EADDRNOTAVAIL
;
3757 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
3759 rtl_rar_set(tp
, dev
->dev_addr
);
3764 static int rtl8169_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3766 struct rtl8169_private
*tp
= netdev_priv(dev
);
3767 struct mii_ioctl_data
*data
= if_mii(ifr
);
3769 return netif_running(dev
) ? tp
->do_ioctl(tp
, data
, cmd
) : -ENODEV
;
3772 static int rtl_xmii_ioctl(struct rtl8169_private
*tp
,
3773 struct mii_ioctl_data
*data
, int cmd
)
3777 data
->phy_id
= 32; /* Internal PHY */
3781 data
->val_out
= rtl_readphy(tp
, data
->reg_num
& 0x1f);
3785 rtl_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
3791 static int rtl_tbi_ioctl(struct rtl8169_private
*tp
, struct mii_ioctl_data
*data
, int cmd
)
3796 static void rtl_disable_msi(struct pci_dev
*pdev
, struct rtl8169_private
*tp
)
3798 if (tp
->features
& RTL_FEATURE_MSI
) {
3799 pci_disable_msi(pdev
);
3800 tp
->features
&= ~RTL_FEATURE_MSI
;
3804 static void __devinit
rtl_init_mdio_ops(struct rtl8169_private
*tp
)
3806 struct mdio_ops
*ops
= &tp
->mdio_ops
;
3808 switch (tp
->mac_version
) {
3809 case RTL_GIGA_MAC_VER_27
:
3810 ops
->write
= r8168dp_1_mdio_write
;
3811 ops
->read
= r8168dp_1_mdio_read
;
3813 case RTL_GIGA_MAC_VER_28
:
3814 case RTL_GIGA_MAC_VER_31
:
3815 ops
->write
= r8168dp_2_mdio_write
;
3816 ops
->read
= r8168dp_2_mdio_read
;
3818 case RTL_GIGA_MAC_VER_40
:
3819 case RTL_GIGA_MAC_VER_41
:
3820 ops
->write
= r8168g_mdio_write
;
3821 ops
->read
= r8168g_mdio_read
;
3824 ops
->write
= r8169_mdio_write
;
3825 ops
->read
= r8169_mdio_read
;
3830 static void rtl_wol_suspend_quirk(struct rtl8169_private
*tp
)
3832 void __iomem
*ioaddr
= tp
->mmio_addr
;
3834 switch (tp
->mac_version
) {
3835 case RTL_GIGA_MAC_VER_29
:
3836 case RTL_GIGA_MAC_VER_30
:
3837 case RTL_GIGA_MAC_VER_32
:
3838 case RTL_GIGA_MAC_VER_33
:
3839 case RTL_GIGA_MAC_VER_34
:
3840 case RTL_GIGA_MAC_VER_37
:
3841 case RTL_GIGA_MAC_VER_38
:
3842 case RTL_GIGA_MAC_VER_39
:
3843 case RTL_GIGA_MAC_VER_40
:
3844 case RTL_GIGA_MAC_VER_41
:
3845 RTL_W32(RxConfig
, RTL_R32(RxConfig
) |
3846 AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
);
3853 static bool rtl_wol_pll_power_down(struct rtl8169_private
*tp
)
3855 if (!(__rtl8169_get_wol(tp
) & WAKE_ANY
))
3858 rtl_writephy(tp
, 0x1f, 0x0000);
3859 rtl_writephy(tp
, MII_BMCR
, 0x0000);
3861 rtl_wol_suspend_quirk(tp
);
3866 static void r810x_phy_power_down(struct rtl8169_private
*tp
)
3868 rtl_writephy(tp
, 0x1f, 0x0000);
3869 rtl_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3872 static void r810x_phy_power_up(struct rtl8169_private
*tp
)
3874 rtl_writephy(tp
, 0x1f, 0x0000);
3875 rtl_writephy(tp
, MII_BMCR
, BMCR_ANENABLE
);
3878 static void r810x_pll_power_down(struct rtl8169_private
*tp
)
3880 void __iomem
*ioaddr
= tp
->mmio_addr
;
3882 if (rtl_wol_pll_power_down(tp
))
3885 r810x_phy_power_down(tp
);
3887 switch (tp
->mac_version
) {
3888 case RTL_GIGA_MAC_VER_07
:
3889 case RTL_GIGA_MAC_VER_08
:
3890 case RTL_GIGA_MAC_VER_09
:
3891 case RTL_GIGA_MAC_VER_10
:
3892 case RTL_GIGA_MAC_VER_13
:
3893 case RTL_GIGA_MAC_VER_16
:
3896 RTL_W8(PMCH
, RTL_R8(PMCH
) & ~0x80);
3901 static void r810x_pll_power_up(struct rtl8169_private
*tp
)
3903 void __iomem
*ioaddr
= tp
->mmio_addr
;
3905 r810x_phy_power_up(tp
);
3907 switch (tp
->mac_version
) {
3908 case RTL_GIGA_MAC_VER_07
:
3909 case RTL_GIGA_MAC_VER_08
:
3910 case RTL_GIGA_MAC_VER_09
:
3911 case RTL_GIGA_MAC_VER_10
:
3912 case RTL_GIGA_MAC_VER_13
:
3913 case RTL_GIGA_MAC_VER_16
:
3916 RTL_W8(PMCH
, RTL_R8(PMCH
) | 0x80);
3921 static void r8168_phy_power_up(struct rtl8169_private
*tp
)
3923 rtl_writephy(tp
, 0x1f, 0x0000);
3924 switch (tp
->mac_version
) {
3925 case RTL_GIGA_MAC_VER_11
:
3926 case RTL_GIGA_MAC_VER_12
:
3927 case RTL_GIGA_MAC_VER_17
:
3928 case RTL_GIGA_MAC_VER_18
:
3929 case RTL_GIGA_MAC_VER_19
:
3930 case RTL_GIGA_MAC_VER_20
:
3931 case RTL_GIGA_MAC_VER_21
:
3932 case RTL_GIGA_MAC_VER_22
:
3933 case RTL_GIGA_MAC_VER_23
:
3934 case RTL_GIGA_MAC_VER_24
:
3935 case RTL_GIGA_MAC_VER_25
:
3936 case RTL_GIGA_MAC_VER_26
:
3937 case RTL_GIGA_MAC_VER_27
:
3938 case RTL_GIGA_MAC_VER_28
:
3939 case RTL_GIGA_MAC_VER_31
:
3940 rtl_writephy(tp
, 0x0e, 0x0000);
3945 rtl_writephy(tp
, MII_BMCR
, BMCR_ANENABLE
);
3948 static void r8168_phy_power_down(struct rtl8169_private
*tp
)
3950 rtl_writephy(tp
, 0x1f, 0x0000);
3951 switch (tp
->mac_version
) {
3952 case RTL_GIGA_MAC_VER_32
:
3953 case RTL_GIGA_MAC_VER_33
:
3954 rtl_writephy(tp
, MII_BMCR
, BMCR_ANENABLE
| BMCR_PDOWN
);
3957 case RTL_GIGA_MAC_VER_11
:
3958 case RTL_GIGA_MAC_VER_12
:
3959 case RTL_GIGA_MAC_VER_17
:
3960 case RTL_GIGA_MAC_VER_18
:
3961 case RTL_GIGA_MAC_VER_19
:
3962 case RTL_GIGA_MAC_VER_20
:
3963 case RTL_GIGA_MAC_VER_21
:
3964 case RTL_GIGA_MAC_VER_22
:
3965 case RTL_GIGA_MAC_VER_23
:
3966 case RTL_GIGA_MAC_VER_24
:
3967 case RTL_GIGA_MAC_VER_25
:
3968 case RTL_GIGA_MAC_VER_26
:
3969 case RTL_GIGA_MAC_VER_27
:
3970 case RTL_GIGA_MAC_VER_28
:
3971 case RTL_GIGA_MAC_VER_31
:
3972 rtl_writephy(tp
, 0x0e, 0x0200);
3974 rtl_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3979 static void r8168_pll_power_down(struct rtl8169_private
*tp
)
3981 void __iomem
*ioaddr
= tp
->mmio_addr
;
3983 if ((tp
->mac_version
== RTL_GIGA_MAC_VER_27
||
3984 tp
->mac_version
== RTL_GIGA_MAC_VER_28
||
3985 tp
->mac_version
== RTL_GIGA_MAC_VER_31
) &&
3986 r8168dp_check_dash(tp
)) {
3990 if ((tp
->mac_version
== RTL_GIGA_MAC_VER_23
||
3991 tp
->mac_version
== RTL_GIGA_MAC_VER_24
) &&
3992 (RTL_R16(CPlusCmd
) & ASF
)) {
3996 if (tp
->mac_version
== RTL_GIGA_MAC_VER_32
||
3997 tp
->mac_version
== RTL_GIGA_MAC_VER_33
)
3998 rtl_ephy_write(tp
, 0x19, 0xff64);
4000 if (rtl_wol_pll_power_down(tp
))
4003 r8168_phy_power_down(tp
);
4005 switch (tp
->mac_version
) {
4006 case RTL_GIGA_MAC_VER_25
:
4007 case RTL_GIGA_MAC_VER_26
:
4008 case RTL_GIGA_MAC_VER_27
:
4009 case RTL_GIGA_MAC_VER_28
:
4010 case RTL_GIGA_MAC_VER_31
:
4011 case RTL_GIGA_MAC_VER_32
:
4012 case RTL_GIGA_MAC_VER_33
:
4013 RTL_W8(PMCH
, RTL_R8(PMCH
) & ~0x80);
4018 static void r8168_pll_power_up(struct rtl8169_private
*tp
)
4020 void __iomem
*ioaddr
= tp
->mmio_addr
;
4022 switch (tp
->mac_version
) {
4023 case RTL_GIGA_MAC_VER_25
:
4024 case RTL_GIGA_MAC_VER_26
:
4025 case RTL_GIGA_MAC_VER_27
:
4026 case RTL_GIGA_MAC_VER_28
:
4027 case RTL_GIGA_MAC_VER_31
:
4028 case RTL_GIGA_MAC_VER_32
:
4029 case RTL_GIGA_MAC_VER_33
:
4030 RTL_W8(PMCH
, RTL_R8(PMCH
) | 0x80);
4034 r8168_phy_power_up(tp
);
4037 static void rtl_generic_op(struct rtl8169_private
*tp
,
4038 void (*op
)(struct rtl8169_private
*))
4044 static void rtl_pll_power_down(struct rtl8169_private
*tp
)
4046 rtl_generic_op(tp
, tp
->pll_power_ops
.down
);
4049 static void rtl_pll_power_up(struct rtl8169_private
*tp
)
4051 rtl_generic_op(tp
, tp
->pll_power_ops
.up
);
4054 static void __devinit
rtl_init_pll_power_ops(struct rtl8169_private
*tp
)
4056 struct pll_power_ops
*ops
= &tp
->pll_power_ops
;
4058 switch (tp
->mac_version
) {
4059 case RTL_GIGA_MAC_VER_07
:
4060 case RTL_GIGA_MAC_VER_08
:
4061 case RTL_GIGA_MAC_VER_09
:
4062 case RTL_GIGA_MAC_VER_10
:
4063 case RTL_GIGA_MAC_VER_16
:
4064 case RTL_GIGA_MAC_VER_29
:
4065 case RTL_GIGA_MAC_VER_30
:
4066 case RTL_GIGA_MAC_VER_37
:
4067 case RTL_GIGA_MAC_VER_39
:
4068 ops
->down
= r810x_pll_power_down
;
4069 ops
->up
= r810x_pll_power_up
;
4072 case RTL_GIGA_MAC_VER_11
:
4073 case RTL_GIGA_MAC_VER_12
:
4074 case RTL_GIGA_MAC_VER_17
:
4075 case RTL_GIGA_MAC_VER_18
:
4076 case RTL_GIGA_MAC_VER_19
:
4077 case RTL_GIGA_MAC_VER_20
:
4078 case RTL_GIGA_MAC_VER_21
:
4079 case RTL_GIGA_MAC_VER_22
:
4080 case RTL_GIGA_MAC_VER_23
:
4081 case RTL_GIGA_MAC_VER_24
:
4082 case RTL_GIGA_MAC_VER_25
:
4083 case RTL_GIGA_MAC_VER_26
:
4084 case RTL_GIGA_MAC_VER_27
:
4085 case RTL_GIGA_MAC_VER_28
:
4086 case RTL_GIGA_MAC_VER_31
:
4087 case RTL_GIGA_MAC_VER_32
:
4088 case RTL_GIGA_MAC_VER_33
:
4089 case RTL_GIGA_MAC_VER_34
:
4090 case RTL_GIGA_MAC_VER_35
:
4091 case RTL_GIGA_MAC_VER_36
:
4092 case RTL_GIGA_MAC_VER_38
:
4093 case RTL_GIGA_MAC_VER_40
:
4094 case RTL_GIGA_MAC_VER_41
:
4095 ops
->down
= r8168_pll_power_down
;
4096 ops
->up
= r8168_pll_power_up
;
4106 static void rtl_init_rxcfg(struct rtl8169_private
*tp
)
4108 void __iomem
*ioaddr
= tp
->mmio_addr
;
4110 switch (tp
->mac_version
) {
4111 case RTL_GIGA_MAC_VER_01
:
4112 case RTL_GIGA_MAC_VER_02
:
4113 case RTL_GIGA_MAC_VER_03
:
4114 case RTL_GIGA_MAC_VER_04
:
4115 case RTL_GIGA_MAC_VER_05
:
4116 case RTL_GIGA_MAC_VER_06
:
4117 case RTL_GIGA_MAC_VER_10
:
4118 case RTL_GIGA_MAC_VER_11
:
4119 case RTL_GIGA_MAC_VER_12
:
4120 case RTL_GIGA_MAC_VER_13
:
4121 case RTL_GIGA_MAC_VER_14
:
4122 case RTL_GIGA_MAC_VER_15
:
4123 case RTL_GIGA_MAC_VER_16
:
4124 case RTL_GIGA_MAC_VER_17
:
4125 RTL_W32(RxConfig
, RX_FIFO_THRESH
| RX_DMA_BURST
);
4127 case RTL_GIGA_MAC_VER_18
:
4128 case RTL_GIGA_MAC_VER_19
:
4129 case RTL_GIGA_MAC_VER_20
:
4130 case RTL_GIGA_MAC_VER_21
:
4131 case RTL_GIGA_MAC_VER_22
:
4132 case RTL_GIGA_MAC_VER_23
:
4133 case RTL_GIGA_MAC_VER_24
:
4134 case RTL_GIGA_MAC_VER_34
:
4135 RTL_W32(RxConfig
, RX128_INT_EN
| RX_MULTI_EN
| RX_DMA_BURST
);
4138 RTL_W32(RxConfig
, RX128_INT_EN
| RX_DMA_BURST
);
4143 static void rtl8169_init_ring_indexes(struct rtl8169_private
*tp
)
4145 tp
->dirty_tx
= tp
->dirty_rx
= tp
->cur_tx
= tp
->cur_rx
= 0;
4148 static void rtl_hw_jumbo_enable(struct rtl8169_private
*tp
)
4150 void __iomem
*ioaddr
= tp
->mmio_addr
;
4152 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
4153 rtl_generic_op(tp
, tp
->jumbo_ops
.enable
);
4154 RTL_W8(Cfg9346
, Cfg9346_Lock
);
4157 static void rtl_hw_jumbo_disable(struct rtl8169_private
*tp
)
4159 void __iomem
*ioaddr
= tp
->mmio_addr
;
4161 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
4162 rtl_generic_op(tp
, tp
->jumbo_ops
.disable
);
4163 RTL_W8(Cfg9346
, Cfg9346_Lock
);
4166 static void r8168c_hw_jumbo_enable(struct rtl8169_private
*tp
)
4168 void __iomem
*ioaddr
= tp
->mmio_addr
;
4170 RTL_W8(Config3
, RTL_R8(Config3
) | Jumbo_En0
);
4171 RTL_W8(Config4
, RTL_R8(Config4
) | Jumbo_En1
);
4172 rtl_tx_performance_tweak(tp
->pci_dev
, 0x2 << MAX_READ_REQUEST_SHIFT
);
4175 static void r8168c_hw_jumbo_disable(struct rtl8169_private
*tp
)
4177 void __iomem
*ioaddr
= tp
->mmio_addr
;
4179 RTL_W8(Config3
, RTL_R8(Config3
) & ~Jumbo_En0
);
4180 RTL_W8(Config4
, RTL_R8(Config4
) & ~Jumbo_En1
);
4181 rtl_tx_performance_tweak(tp
->pci_dev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4184 static void r8168dp_hw_jumbo_enable(struct rtl8169_private
*tp
)
4186 void __iomem
*ioaddr
= tp
->mmio_addr
;
4188 RTL_W8(Config3
, RTL_R8(Config3
) | Jumbo_En0
);
4191 static void r8168dp_hw_jumbo_disable(struct rtl8169_private
*tp
)
4193 void __iomem
*ioaddr
= tp
->mmio_addr
;
4195 RTL_W8(Config3
, RTL_R8(Config3
) & ~Jumbo_En0
);
4198 static void r8168e_hw_jumbo_enable(struct rtl8169_private
*tp
)
4200 void __iomem
*ioaddr
= tp
->mmio_addr
;
4202 RTL_W8(MaxTxPacketSize
, 0x3f);
4203 RTL_W8(Config3
, RTL_R8(Config3
) | Jumbo_En0
);
4204 RTL_W8(Config4
, RTL_R8(Config4
) | 0x01);
4205 rtl_tx_performance_tweak(tp
->pci_dev
, 0x2 << MAX_READ_REQUEST_SHIFT
);
4208 static void r8168e_hw_jumbo_disable(struct rtl8169_private
*tp
)
4210 void __iomem
*ioaddr
= tp
->mmio_addr
;
4212 RTL_W8(MaxTxPacketSize
, 0x0c);
4213 RTL_W8(Config3
, RTL_R8(Config3
) & ~Jumbo_En0
);
4214 RTL_W8(Config4
, RTL_R8(Config4
) & ~0x01);
4215 rtl_tx_performance_tweak(tp
->pci_dev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4218 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private
*tp
)
4220 rtl_tx_performance_tweak(tp
->pci_dev
,
4221 (0x2 << MAX_READ_REQUEST_SHIFT
) | PCI_EXP_DEVCTL_NOSNOOP_EN
);
4224 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private
*tp
)
4226 rtl_tx_performance_tweak(tp
->pci_dev
,
4227 (0x5 << MAX_READ_REQUEST_SHIFT
) | PCI_EXP_DEVCTL_NOSNOOP_EN
);
4230 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private
*tp
)
4232 void __iomem
*ioaddr
= tp
->mmio_addr
;
4234 r8168b_0_hw_jumbo_enable(tp
);
4236 RTL_W8(Config4
, RTL_R8(Config4
) | (1 << 0));
4239 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private
*tp
)
4241 void __iomem
*ioaddr
= tp
->mmio_addr
;
4243 r8168b_0_hw_jumbo_disable(tp
);
4245 RTL_W8(Config4
, RTL_R8(Config4
) & ~(1 << 0));
4248 static void __devinit
rtl_init_jumbo_ops(struct rtl8169_private
*tp
)
4250 struct jumbo_ops
*ops
= &tp
->jumbo_ops
;
4252 switch (tp
->mac_version
) {
4253 case RTL_GIGA_MAC_VER_11
:
4254 ops
->disable
= r8168b_0_hw_jumbo_disable
;
4255 ops
->enable
= r8168b_0_hw_jumbo_enable
;
4257 case RTL_GIGA_MAC_VER_12
:
4258 case RTL_GIGA_MAC_VER_17
:
4259 ops
->disable
= r8168b_1_hw_jumbo_disable
;
4260 ops
->enable
= r8168b_1_hw_jumbo_enable
;
4262 case RTL_GIGA_MAC_VER_18
: /* Wild guess. Needs info from Realtek. */
4263 case RTL_GIGA_MAC_VER_19
:
4264 case RTL_GIGA_MAC_VER_20
:
4265 case RTL_GIGA_MAC_VER_21
: /* Wild guess. Needs info from Realtek. */
4266 case RTL_GIGA_MAC_VER_22
:
4267 case RTL_GIGA_MAC_VER_23
:
4268 case RTL_GIGA_MAC_VER_24
:
4269 case RTL_GIGA_MAC_VER_25
:
4270 case RTL_GIGA_MAC_VER_26
:
4271 ops
->disable
= r8168c_hw_jumbo_disable
;
4272 ops
->enable
= r8168c_hw_jumbo_enable
;
4274 case RTL_GIGA_MAC_VER_27
:
4275 case RTL_GIGA_MAC_VER_28
:
4276 ops
->disable
= r8168dp_hw_jumbo_disable
;
4277 ops
->enable
= r8168dp_hw_jumbo_enable
;
4279 case RTL_GIGA_MAC_VER_31
: /* Wild guess. Needs info from Realtek. */
4280 case RTL_GIGA_MAC_VER_32
:
4281 case RTL_GIGA_MAC_VER_33
:
4282 case RTL_GIGA_MAC_VER_34
:
4283 ops
->disable
= r8168e_hw_jumbo_disable
;
4284 ops
->enable
= r8168e_hw_jumbo_enable
;
4288 * No action needed for jumbo frames with 8169.
4289 * No jumbo for 810x at all.
4291 case RTL_GIGA_MAC_VER_40
:
4292 case RTL_GIGA_MAC_VER_41
:
4294 ops
->disable
= NULL
;
4300 DECLARE_RTL_COND(rtl_chipcmd_cond
)
4302 void __iomem
*ioaddr
= tp
->mmio_addr
;
4304 return RTL_R8(ChipCmd
) & CmdReset
;
4307 static void rtl_hw_reset(struct rtl8169_private
*tp
)
4309 void __iomem
*ioaddr
= tp
->mmio_addr
;
4311 RTL_W8(ChipCmd
, CmdReset
);
4313 rtl_udelay_loop_wait_low(tp
, &rtl_chipcmd_cond
, 100, 100);
4316 static void rtl_request_uncached_firmware(struct rtl8169_private
*tp
)
4318 struct rtl_fw
*rtl_fw
;
4322 name
= rtl_lookup_firmware_name(tp
);
4324 goto out_no_firmware
;
4326 rtl_fw
= kzalloc(sizeof(*rtl_fw
), GFP_KERNEL
);
4330 rc
= request_firmware(&rtl_fw
->fw
, name
, &tp
->pci_dev
->dev
);
4334 rc
= rtl_check_firmware(tp
, rtl_fw
);
4336 goto err_release_firmware
;
4338 tp
->rtl_fw
= rtl_fw
;
4342 err_release_firmware
:
4343 release_firmware(rtl_fw
->fw
);
4347 netif_warn(tp
, ifup
, tp
->dev
, "unable to load firmware patch %s (%d)\n",
4354 static void rtl_request_firmware(struct rtl8169_private
*tp
)
4356 if (IS_ERR(tp
->rtl_fw
))
4357 rtl_request_uncached_firmware(tp
);
4360 static void rtl_rx_close(struct rtl8169_private
*tp
)
4362 void __iomem
*ioaddr
= tp
->mmio_addr
;
4364 RTL_W32(RxConfig
, RTL_R32(RxConfig
) & ~RX_CONFIG_ACCEPT_MASK
);
4367 DECLARE_RTL_COND(rtl_npq_cond
)
4369 void __iomem
*ioaddr
= tp
->mmio_addr
;
4371 return RTL_R8(TxPoll
) & NPQ
;
4374 DECLARE_RTL_COND(rtl_txcfg_empty_cond
)
4376 void __iomem
*ioaddr
= tp
->mmio_addr
;
4378 return RTL_R32(TxConfig
) & TXCFG_EMPTY
;
4381 static void rtl8169_hw_reset(struct rtl8169_private
*tp
)
4383 void __iomem
*ioaddr
= tp
->mmio_addr
;
4385 /* Disable interrupts */
4386 rtl8169_irq_mask_and_ack(tp
);
4390 if (tp
->mac_version
== RTL_GIGA_MAC_VER_27
||
4391 tp
->mac_version
== RTL_GIGA_MAC_VER_28
||
4392 tp
->mac_version
== RTL_GIGA_MAC_VER_31
) {
4393 rtl_udelay_loop_wait_low(tp
, &rtl_npq_cond
, 20, 42*42);
4394 } else if (tp
->mac_version
== RTL_GIGA_MAC_VER_34
||
4395 tp
->mac_version
== RTL_GIGA_MAC_VER_35
||
4396 tp
->mac_version
== RTL_GIGA_MAC_VER_36
||
4397 tp
->mac_version
== RTL_GIGA_MAC_VER_37
||
4398 tp
->mac_version
== RTL_GIGA_MAC_VER_40
||
4399 tp
->mac_version
== RTL_GIGA_MAC_VER_41
||
4400 tp
->mac_version
== RTL_GIGA_MAC_VER_38
) {
4401 RTL_W8(ChipCmd
, RTL_R8(ChipCmd
) | StopReq
);
4402 rtl_udelay_loop_wait_high(tp
, &rtl_txcfg_empty_cond
, 100, 666);
4404 RTL_W8(ChipCmd
, RTL_R8(ChipCmd
) | StopReq
);
4411 static void rtl_set_rx_tx_config_registers(struct rtl8169_private
*tp
)
4413 void __iomem
*ioaddr
= tp
->mmio_addr
;
4415 /* Set DMA burst size and Interframe Gap Time */
4416 RTL_W32(TxConfig
, (TX_DMA_BURST
<< TxDMAShift
) |
4417 (InterFrameGap
<< TxInterFrameGapShift
));
4420 static void rtl_hw_start(struct net_device
*dev
)
4422 struct rtl8169_private
*tp
= netdev_priv(dev
);
4426 rtl_irq_enable_all(tp
);
4429 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private
*tp
,
4430 void __iomem
*ioaddr
)
4433 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4434 * register to be written before TxDescAddrLow to work.
4435 * Switching from MMIO to I/O access fixes the issue as well.
4437 RTL_W32(TxDescStartAddrHigh
, ((u64
) tp
->TxPhyAddr
) >> 32);
4438 RTL_W32(TxDescStartAddrLow
, ((u64
) tp
->TxPhyAddr
) & DMA_BIT_MASK(32));
4439 RTL_W32(RxDescAddrHigh
, ((u64
) tp
->RxPhyAddr
) >> 32);
4440 RTL_W32(RxDescAddrLow
, ((u64
) tp
->RxPhyAddr
) & DMA_BIT_MASK(32));
4443 static u16
rtl_rw_cpluscmd(void __iomem
*ioaddr
)
4447 cmd
= RTL_R16(CPlusCmd
);
4448 RTL_W16(CPlusCmd
, cmd
);
4452 static void rtl_set_rx_max_size(void __iomem
*ioaddr
, unsigned int rx_buf_sz
)
4454 /* Low hurts. Let's disable the filtering. */
4455 RTL_W16(RxMaxSize
, rx_buf_sz
+ 1);
4458 static void rtl8169_set_magic_reg(void __iomem
*ioaddr
, unsigned mac_version
)
4460 static const struct rtl_cfg2_info
{
4465 { RTL_GIGA_MAC_VER_05
, PCI_Clock_33MHz
, 0x000fff00 }, // 8110SCd
4466 { RTL_GIGA_MAC_VER_05
, PCI_Clock_66MHz
, 0x000fffff },
4467 { RTL_GIGA_MAC_VER_06
, PCI_Clock_33MHz
, 0x00ffff00 }, // 8110SCe
4468 { RTL_GIGA_MAC_VER_06
, PCI_Clock_66MHz
, 0x00ffffff }
4470 const struct rtl_cfg2_info
*p
= cfg2_info
;
4474 clk
= RTL_R8(Config2
) & PCI_Clock_66MHz
;
4475 for (i
= 0; i
< ARRAY_SIZE(cfg2_info
); i
++, p
++) {
4476 if ((p
->mac_version
== mac_version
) && (p
->clk
== clk
)) {
4477 RTL_W32(0x7c, p
->val
);
4483 static void rtl_set_rx_mode(struct net_device
*dev
)
4485 struct rtl8169_private
*tp
= netdev_priv(dev
);
4486 void __iomem
*ioaddr
= tp
->mmio_addr
;
4487 u32 mc_filter
[2]; /* Multicast hash filter */
4491 if (dev
->flags
& IFF_PROMISC
) {
4492 /* Unconditionally log net taps. */
4493 netif_notice(tp
, link
, dev
, "Promiscuous mode enabled\n");
4495 AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
|
4497 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
4498 } else if ((netdev_mc_count(dev
) > multicast_filter_limit
) ||
4499 (dev
->flags
& IFF_ALLMULTI
)) {
4500 /* Too many to filter perfectly -- accept all multicasts. */
4501 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
4502 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
4504 struct netdev_hw_addr
*ha
;
4506 rx_mode
= AcceptBroadcast
| AcceptMyPhys
;
4507 mc_filter
[1] = mc_filter
[0] = 0;
4508 netdev_for_each_mc_addr(ha
, dev
) {
4509 int bit_nr
= ether_crc(ETH_ALEN
, ha
->addr
) >> 26;
4510 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 31);
4511 rx_mode
|= AcceptMulticast
;
4515 if (dev
->features
& NETIF_F_RXALL
)
4516 rx_mode
|= (AcceptErr
| AcceptRunt
);
4518 tmp
= (RTL_R32(RxConfig
) & ~RX_CONFIG_ACCEPT_MASK
) | rx_mode
;
4520 if (tp
->mac_version
> RTL_GIGA_MAC_VER_06
) {
4521 u32 data
= mc_filter
[0];
4523 mc_filter
[0] = swab32(mc_filter
[1]);
4524 mc_filter
[1] = swab32(data
);
4527 RTL_W32(MAR0
+ 4, mc_filter
[1]);
4528 RTL_W32(MAR0
+ 0, mc_filter
[0]);
4530 RTL_W32(RxConfig
, tmp
);
4533 static void rtl_hw_start_8169(struct net_device
*dev
)
4535 struct rtl8169_private
*tp
= netdev_priv(dev
);
4536 void __iomem
*ioaddr
= tp
->mmio_addr
;
4537 struct pci_dev
*pdev
= tp
->pci_dev
;
4539 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
) {
4540 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) | PCIMulRW
);
4541 pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
, 0x08);
4544 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
4545 if (tp
->mac_version
== RTL_GIGA_MAC_VER_01
||
4546 tp
->mac_version
== RTL_GIGA_MAC_VER_02
||
4547 tp
->mac_version
== RTL_GIGA_MAC_VER_03
||
4548 tp
->mac_version
== RTL_GIGA_MAC_VER_04
)
4549 RTL_W8(ChipCmd
, CmdTxEnb
| CmdRxEnb
);
4553 RTL_W8(EarlyTxThres
, NoEarlyTx
);
4555 rtl_set_rx_max_size(ioaddr
, rx_buf_sz
);
4557 if (tp
->mac_version
== RTL_GIGA_MAC_VER_01
||
4558 tp
->mac_version
== RTL_GIGA_MAC_VER_02
||
4559 tp
->mac_version
== RTL_GIGA_MAC_VER_03
||
4560 tp
->mac_version
== RTL_GIGA_MAC_VER_04
)
4561 rtl_set_rx_tx_config_registers(tp
);
4563 tp
->cp_cmd
|= rtl_rw_cpluscmd(ioaddr
) | PCIMulRW
;
4565 if (tp
->mac_version
== RTL_GIGA_MAC_VER_02
||
4566 tp
->mac_version
== RTL_GIGA_MAC_VER_03
) {
4567 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4568 "Bit-3 and bit-14 MUST be 1\n");
4569 tp
->cp_cmd
|= (1 << 14);
4572 RTL_W16(CPlusCmd
, tp
->cp_cmd
);
4574 rtl8169_set_magic_reg(ioaddr
, tp
->mac_version
);
4577 * Undocumented corner. Supposedly:
4578 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4580 RTL_W16(IntrMitigate
, 0x0000);
4582 rtl_set_rx_tx_desc_registers(tp
, ioaddr
);
4584 if (tp
->mac_version
!= RTL_GIGA_MAC_VER_01
&&
4585 tp
->mac_version
!= RTL_GIGA_MAC_VER_02
&&
4586 tp
->mac_version
!= RTL_GIGA_MAC_VER_03
&&
4587 tp
->mac_version
!= RTL_GIGA_MAC_VER_04
) {
4588 RTL_W8(ChipCmd
, CmdTxEnb
| CmdRxEnb
);
4589 rtl_set_rx_tx_config_registers(tp
);
4592 RTL_W8(Cfg9346
, Cfg9346_Lock
);
4594 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4597 RTL_W32(RxMissed
, 0);
4599 rtl_set_rx_mode(dev
);
4601 /* no early-rx interrupts */
4602 RTL_W16(MultiIntr
, RTL_R16(MultiIntr
) & 0xF000);
4605 static void rtl_csi_write(struct rtl8169_private
*tp
, int addr
, int value
)
4607 if (tp
->csi_ops
.write
)
4608 tp
->csi_ops
.write(tp
, addr
, value
);
4611 static u32
rtl_csi_read(struct rtl8169_private
*tp
, int addr
)
4613 return tp
->csi_ops
.read
? tp
->csi_ops
.read(tp
, addr
) : ~0;
4616 static void rtl_csi_access_enable(struct rtl8169_private
*tp
, u32 bits
)
4620 csi
= rtl_csi_read(tp
, 0x070c) & 0x00ffffff;
4621 rtl_csi_write(tp
, 0x070c, csi
| bits
);
4624 static void rtl_csi_access_enable_1(struct rtl8169_private
*tp
)
4626 rtl_csi_access_enable(tp
, 0x17000000);
4629 static void rtl_csi_access_enable_2(struct rtl8169_private
*tp
)
4631 rtl_csi_access_enable(tp
, 0x27000000);
4634 DECLARE_RTL_COND(rtl_csiar_cond
)
4636 void __iomem
*ioaddr
= tp
->mmio_addr
;
4638 return RTL_R32(CSIAR
) & CSIAR_FLAG
;
4641 static void r8169_csi_write(struct rtl8169_private
*tp
, int addr
, int value
)
4643 void __iomem
*ioaddr
= tp
->mmio_addr
;
4645 RTL_W32(CSIDR
, value
);
4646 RTL_W32(CSIAR
, CSIAR_WRITE_CMD
| (addr
& CSIAR_ADDR_MASK
) |
4647 CSIAR_BYTE_ENABLE
<< CSIAR_BYTE_ENABLE_SHIFT
);
4649 rtl_udelay_loop_wait_low(tp
, &rtl_csiar_cond
, 10, 100);
4652 static u32
r8169_csi_read(struct rtl8169_private
*tp
, int addr
)
4654 void __iomem
*ioaddr
= tp
->mmio_addr
;
4656 RTL_W32(CSIAR
, (addr
& CSIAR_ADDR_MASK
) |
4657 CSIAR_BYTE_ENABLE
<< CSIAR_BYTE_ENABLE_SHIFT
);
4659 return rtl_udelay_loop_wait_high(tp
, &rtl_csiar_cond
, 10, 100) ?
4660 RTL_R32(CSIDR
) : ~0;
4663 static void r8402_csi_write(struct rtl8169_private
*tp
, int addr
, int value
)
4665 void __iomem
*ioaddr
= tp
->mmio_addr
;
4667 RTL_W32(CSIDR
, value
);
4668 RTL_W32(CSIAR
, CSIAR_WRITE_CMD
| (addr
& CSIAR_ADDR_MASK
) |
4669 CSIAR_BYTE_ENABLE
<< CSIAR_BYTE_ENABLE_SHIFT
|
4672 rtl_udelay_loop_wait_low(tp
, &rtl_csiar_cond
, 10, 100);
4675 static u32
r8402_csi_read(struct rtl8169_private
*tp
, int addr
)
4677 void __iomem
*ioaddr
= tp
->mmio_addr
;
4679 RTL_W32(CSIAR
, (addr
& CSIAR_ADDR_MASK
) | CSIAR_FUNC_NIC
|
4680 CSIAR_BYTE_ENABLE
<< CSIAR_BYTE_ENABLE_SHIFT
);
4682 return rtl_udelay_loop_wait_high(tp
, &rtl_csiar_cond
, 10, 100) ?
4683 RTL_R32(CSIDR
) : ~0;
4686 static void __devinit
rtl_init_csi_ops(struct rtl8169_private
*tp
)
4688 struct csi_ops
*ops
= &tp
->csi_ops
;
4690 switch (tp
->mac_version
) {
4691 case RTL_GIGA_MAC_VER_01
:
4692 case RTL_GIGA_MAC_VER_02
:
4693 case RTL_GIGA_MAC_VER_03
:
4694 case RTL_GIGA_MAC_VER_04
:
4695 case RTL_GIGA_MAC_VER_05
:
4696 case RTL_GIGA_MAC_VER_06
:
4697 case RTL_GIGA_MAC_VER_10
:
4698 case RTL_GIGA_MAC_VER_11
:
4699 case RTL_GIGA_MAC_VER_12
:
4700 case RTL_GIGA_MAC_VER_13
:
4701 case RTL_GIGA_MAC_VER_14
:
4702 case RTL_GIGA_MAC_VER_15
:
4703 case RTL_GIGA_MAC_VER_16
:
4704 case RTL_GIGA_MAC_VER_17
:
4709 case RTL_GIGA_MAC_VER_37
:
4710 case RTL_GIGA_MAC_VER_38
:
4711 ops
->write
= r8402_csi_write
;
4712 ops
->read
= r8402_csi_read
;
4716 ops
->write
= r8169_csi_write
;
4717 ops
->read
= r8169_csi_read
;
4723 unsigned int offset
;
4728 static void rtl_ephy_init(struct rtl8169_private
*tp
, const struct ephy_info
*e
,
4734 w
= (rtl_ephy_read(tp
, e
->offset
) & ~e
->mask
) | e
->bits
;
4735 rtl_ephy_write(tp
, e
->offset
, w
);
4740 static void rtl_disable_clock_request(struct pci_dev
*pdev
)
4742 int cap
= pci_pcie_cap(pdev
);
4747 pci_read_config_word(pdev
, cap
+ PCI_EXP_LNKCTL
, &ctl
);
4748 ctl
&= ~PCI_EXP_LNKCTL_CLKREQ_EN
;
4749 pci_write_config_word(pdev
, cap
+ PCI_EXP_LNKCTL
, ctl
);
4753 static void rtl_enable_clock_request(struct pci_dev
*pdev
)
4755 int cap
= pci_pcie_cap(pdev
);
4760 pci_read_config_word(pdev
, cap
+ PCI_EXP_LNKCTL
, &ctl
);
4761 ctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
4762 pci_write_config_word(pdev
, cap
+ PCI_EXP_LNKCTL
, ctl
);
4766 #define R8168_CPCMD_QUIRK_MASK (\
4777 static void rtl_hw_start_8168bb(struct rtl8169_private
*tp
)
4779 void __iomem
*ioaddr
= tp
->mmio_addr
;
4780 struct pci_dev
*pdev
= tp
->pci_dev
;
4782 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
4784 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) & ~R8168_CPCMD_QUIRK_MASK
);
4786 rtl_tx_performance_tweak(pdev
,
4787 (0x5 << MAX_READ_REQUEST_SHIFT
) | PCI_EXP_DEVCTL_NOSNOOP_EN
);
4790 static void rtl_hw_start_8168bef(struct rtl8169_private
*tp
)
4792 void __iomem
*ioaddr
= tp
->mmio_addr
;
4794 rtl_hw_start_8168bb(tp
);
4796 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4798 RTL_W8(Config4
, RTL_R8(Config4
) & ~(1 << 0));
4801 static void __rtl_hw_start_8168cp(struct rtl8169_private
*tp
)
4803 void __iomem
*ioaddr
= tp
->mmio_addr
;
4804 struct pci_dev
*pdev
= tp
->pci_dev
;
4806 RTL_W8(Config1
, RTL_R8(Config1
) | Speed_down
);
4808 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
4810 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4812 rtl_disable_clock_request(pdev
);
4814 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) & ~R8168_CPCMD_QUIRK_MASK
);
4817 static void rtl_hw_start_8168cp_1(struct rtl8169_private
*tp
)
4819 static const struct ephy_info e_info_8168cp
[] = {
4820 { 0x01, 0, 0x0001 },
4821 { 0x02, 0x0800, 0x1000 },
4822 { 0x03, 0, 0x0042 },
4823 { 0x06, 0x0080, 0x0000 },
4827 rtl_csi_access_enable_2(tp
);
4829 rtl_ephy_init(tp
, e_info_8168cp
, ARRAY_SIZE(e_info_8168cp
));
4831 __rtl_hw_start_8168cp(tp
);
4834 static void rtl_hw_start_8168cp_2(struct rtl8169_private
*tp
)
4836 void __iomem
*ioaddr
= tp
->mmio_addr
;
4837 struct pci_dev
*pdev
= tp
->pci_dev
;
4839 rtl_csi_access_enable_2(tp
);
4841 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
4843 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4845 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) & ~R8168_CPCMD_QUIRK_MASK
);
4848 static void rtl_hw_start_8168cp_3(struct rtl8169_private
*tp
)
4850 void __iomem
*ioaddr
= tp
->mmio_addr
;
4851 struct pci_dev
*pdev
= tp
->pci_dev
;
4853 rtl_csi_access_enable_2(tp
);
4855 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
4858 RTL_W8(DBG_REG
, 0x20);
4860 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4862 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4864 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) & ~R8168_CPCMD_QUIRK_MASK
);
4867 static void rtl_hw_start_8168c_1(struct rtl8169_private
*tp
)
4869 void __iomem
*ioaddr
= tp
->mmio_addr
;
4870 static const struct ephy_info e_info_8168c_1
[] = {
4871 { 0x02, 0x0800, 0x1000 },
4872 { 0x03, 0, 0x0002 },
4873 { 0x06, 0x0080, 0x0000 }
4876 rtl_csi_access_enable_2(tp
);
4878 RTL_W8(DBG_REG
, 0x06 | FIX_NAK_1
| FIX_NAK_2
);
4880 rtl_ephy_init(tp
, e_info_8168c_1
, ARRAY_SIZE(e_info_8168c_1
));
4882 __rtl_hw_start_8168cp(tp
);
4885 static void rtl_hw_start_8168c_2(struct rtl8169_private
*tp
)
4887 static const struct ephy_info e_info_8168c_2
[] = {
4888 { 0x01, 0, 0x0001 },
4889 { 0x03, 0x0400, 0x0220 }
4892 rtl_csi_access_enable_2(tp
);
4894 rtl_ephy_init(tp
, e_info_8168c_2
, ARRAY_SIZE(e_info_8168c_2
));
4896 __rtl_hw_start_8168cp(tp
);
4899 static void rtl_hw_start_8168c_3(struct rtl8169_private
*tp
)
4901 rtl_hw_start_8168c_2(tp
);
4904 static void rtl_hw_start_8168c_4(struct rtl8169_private
*tp
)
4906 rtl_csi_access_enable_2(tp
);
4908 __rtl_hw_start_8168cp(tp
);
4911 static void rtl_hw_start_8168d(struct rtl8169_private
*tp
)
4913 void __iomem
*ioaddr
= tp
->mmio_addr
;
4914 struct pci_dev
*pdev
= tp
->pci_dev
;
4916 rtl_csi_access_enable_2(tp
);
4918 rtl_disable_clock_request(pdev
);
4920 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4922 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4924 RTL_W16(CPlusCmd
, RTL_R16(CPlusCmd
) & ~R8168_CPCMD_QUIRK_MASK
);
4927 static void rtl_hw_start_8168dp(struct rtl8169_private
*tp
)
4929 void __iomem
*ioaddr
= tp
->mmio_addr
;
4930 struct pci_dev
*pdev
= tp
->pci_dev
;
4932 rtl_csi_access_enable_1(tp
);
4934 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4936 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4938 rtl_disable_clock_request(pdev
);
4941 static void rtl_hw_start_8168d_4(struct rtl8169_private
*tp
)
4943 void __iomem
*ioaddr
= tp
->mmio_addr
;
4944 struct pci_dev
*pdev
= tp
->pci_dev
;
4945 static const struct ephy_info e_info_8168d_4
[] = {
4947 { 0x19, 0x20, 0x50 },
4952 rtl_csi_access_enable_1(tp
);
4954 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4956 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4958 for (i
= 0; i
< ARRAY_SIZE(e_info_8168d_4
); i
++) {
4959 const struct ephy_info
*e
= e_info_8168d_4
+ i
;
4962 w
= rtl_ephy_read(tp
, e
->offset
);
4963 rtl_ephy_write(tp
, 0x03, (w
& e
->mask
) | e
->bits
);
4966 rtl_enable_clock_request(pdev
);
4969 static void rtl_hw_start_8168e_1(struct rtl8169_private
*tp
)
4971 void __iomem
*ioaddr
= tp
->mmio_addr
;
4972 struct pci_dev
*pdev
= tp
->pci_dev
;
4973 static const struct ephy_info e_info_8168e_1
[] = {
4974 { 0x00, 0x0200, 0x0100 },
4975 { 0x00, 0x0000, 0x0004 },
4976 { 0x06, 0x0002, 0x0001 },
4977 { 0x06, 0x0000, 0x0030 },
4978 { 0x07, 0x0000, 0x2000 },
4979 { 0x00, 0x0000, 0x0020 },
4980 { 0x03, 0x5800, 0x2000 },
4981 { 0x03, 0x0000, 0x0001 },
4982 { 0x01, 0x0800, 0x1000 },
4983 { 0x07, 0x0000, 0x4000 },
4984 { 0x1e, 0x0000, 0x2000 },
4985 { 0x19, 0xffff, 0xfe6c },
4986 { 0x0a, 0x0000, 0x0040 }
4989 rtl_csi_access_enable_2(tp
);
4991 rtl_ephy_init(tp
, e_info_8168e_1
, ARRAY_SIZE(e_info_8168e_1
));
4993 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
4995 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
4997 rtl_disable_clock_request(pdev
);
4999 /* Reset tx FIFO pointer */
5000 RTL_W32(MISC
, RTL_R32(MISC
) | TXPLA_RST
);
5001 RTL_W32(MISC
, RTL_R32(MISC
) & ~TXPLA_RST
);
5003 RTL_W8(Config5
, RTL_R8(Config5
) & ~Spi_en
);
5006 static void rtl_hw_start_8168e_2(struct rtl8169_private
*tp
)
5008 void __iomem
*ioaddr
= tp
->mmio_addr
;
5009 struct pci_dev
*pdev
= tp
->pci_dev
;
5010 static const struct ephy_info e_info_8168e_2
[] = {
5011 { 0x09, 0x0000, 0x0080 },
5012 { 0x19, 0x0000, 0x0224 }
5015 rtl_csi_access_enable_1(tp
);
5017 rtl_ephy_init(tp
, e_info_8168e_2
, ARRAY_SIZE(e_info_8168e_2
));
5019 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5021 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5022 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5023 rtl_eri_write(tp
, 0xc8, ERIAR_MASK_1111
, 0x00100002, ERIAR_EXGMAC
);
5024 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_1111
, 0x00100006, ERIAR_EXGMAC
);
5025 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_1111
, 0x00000050, ERIAR_EXGMAC
);
5026 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_1111
, 0x07ff0060, ERIAR_EXGMAC
);
5027 rtl_w1w0_eri(tp
, 0x1b0, ERIAR_MASK_0001
, 0x10, 0x00, ERIAR_EXGMAC
);
5028 rtl_w1w0_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00, 0xff00, ERIAR_EXGMAC
);
5030 RTL_W8(MaxTxPacketSize
, EarlySize
);
5032 rtl_disable_clock_request(pdev
);
5034 RTL_W32(TxConfig
, RTL_R32(TxConfig
) | TXCFG_AUTO_FIFO
);
5035 RTL_W8(MCU
, RTL_R8(MCU
) & ~NOW_IS_OOB
);
5037 /* Adjust EEE LED frequency */
5038 RTL_W8(EEE_LED
, RTL_R8(EEE_LED
) & ~0x07);
5040 RTL_W8(DLLPR
, RTL_R8(DLLPR
) | PFM_EN
);
5041 RTL_W32(MISC
, RTL_R32(MISC
) | PWM_EN
);
5042 RTL_W8(Config5
, RTL_R8(Config5
) & ~Spi_en
);
5045 static void rtl_hw_start_8168f(struct rtl8169_private
*tp
)
5047 void __iomem
*ioaddr
= tp
->mmio_addr
;
5048 struct pci_dev
*pdev
= tp
->pci_dev
;
5050 rtl_csi_access_enable_2(tp
);
5052 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5054 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5055 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5056 rtl_eri_write(tp
, 0xc8, ERIAR_MASK_1111
, 0x00100002, ERIAR_EXGMAC
);
5057 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_1111
, 0x00100006, ERIAR_EXGMAC
);
5058 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x00, 0x01, ERIAR_EXGMAC
);
5059 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x01, 0x00, ERIAR_EXGMAC
);
5060 rtl_w1w0_eri(tp
, 0x1b0, ERIAR_MASK_0001
, 0x10, 0x00, ERIAR_EXGMAC
);
5061 rtl_w1w0_eri(tp
, 0x1d0, ERIAR_MASK_0001
, 0x10, 0x00, ERIAR_EXGMAC
);
5062 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_1111
, 0x00000050, ERIAR_EXGMAC
);
5063 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_1111
, 0x00000060, ERIAR_EXGMAC
);
5065 RTL_W8(MaxTxPacketSize
, EarlySize
);
5067 rtl_disable_clock_request(pdev
);
5069 RTL_W32(TxConfig
, RTL_R32(TxConfig
) | TXCFG_AUTO_FIFO
);
5070 RTL_W8(MCU
, RTL_R8(MCU
) & ~NOW_IS_OOB
);
5071 RTL_W8(DLLPR
, RTL_R8(DLLPR
) | PFM_EN
);
5072 RTL_W32(MISC
, RTL_R32(MISC
) | PWM_EN
);
5073 RTL_W8(Config5
, RTL_R8(Config5
) & ~Spi_en
);
5076 static void rtl_hw_start_8168f_1(struct rtl8169_private
*tp
)
5078 void __iomem
*ioaddr
= tp
->mmio_addr
;
5079 static const struct ephy_info e_info_8168f_1
[] = {
5080 { 0x06, 0x00c0, 0x0020 },
5081 { 0x08, 0x0001, 0x0002 },
5082 { 0x09, 0x0000, 0x0080 },
5083 { 0x19, 0x0000, 0x0224 }
5086 rtl_hw_start_8168f(tp
);
5088 rtl_ephy_init(tp
, e_info_8168f_1
, ARRAY_SIZE(e_info_8168f_1
));
5090 rtl_w1w0_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00, 0xff00, ERIAR_EXGMAC
);
5092 /* Adjust EEE LED frequency */
5093 RTL_W8(EEE_LED
, RTL_R8(EEE_LED
) & ~0x07);
5096 static void rtl_hw_start_8411(struct rtl8169_private
*tp
)
5098 static const struct ephy_info e_info_8168f_1
[] = {
5099 { 0x06, 0x00c0, 0x0020 },
5100 { 0x0f, 0xffff, 0x5200 },
5101 { 0x1e, 0x0000, 0x4000 },
5102 { 0x19, 0x0000, 0x0224 }
5105 rtl_hw_start_8168f(tp
);
5107 rtl_ephy_init(tp
, e_info_8168f_1
, ARRAY_SIZE(e_info_8168f_1
));
5109 rtl_w1w0_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00, 0x0000, ERIAR_EXGMAC
);
5112 static void rtl_hw_start_8168g_1(struct rtl8169_private
*tp
)
5114 void __iomem
*ioaddr
= tp
->mmio_addr
;
5115 struct pci_dev
*pdev
= tp
->pci_dev
;
5117 rtl_eri_write(tp
, 0xc8, ERIAR_MASK_0101
, 0x080002, ERIAR_EXGMAC
);
5118 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_0001
, 0x38, ERIAR_EXGMAC
);
5119 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_0001
, 0x48, ERIAR_EXGMAC
);
5120 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_1111
, 0x00100006, ERIAR_EXGMAC
);
5122 rtl_csi_access_enable_1(tp
);
5124 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5126 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x00, 0x01, ERIAR_EXGMAC
);
5127 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x01, 0x00, ERIAR_EXGMAC
);
5129 RTL_W8(ChipCmd
, CmdTxEnb
| CmdRxEnb
);
5130 RTL_W32(MISC
, RTL_R32(MISC
) & ~RXDV_GATED_EN
);
5131 RTL_W8(MaxTxPacketSize
, EarlySize
);
5133 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5134 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5136 /* Adjust EEE LED frequency */
5137 RTL_W8(EEE_LED
, RTL_R8(EEE_LED
) & ~0x07);
5139 rtl_w1w0_eri(tp
, 0x2fc, ERIAR_MASK_0001
, 0x01, 0x02, ERIAR_EXGMAC
);
5142 static void rtl_hw_start_8168(struct net_device
*dev
)
5144 struct rtl8169_private
*tp
= netdev_priv(dev
);
5145 void __iomem
*ioaddr
= tp
->mmio_addr
;
5147 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
5149 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
5151 rtl_set_rx_max_size(ioaddr
, rx_buf_sz
);
5153 tp
->cp_cmd
|= RTL_R16(CPlusCmd
) | PktCntrDisable
| INTT_1
;
5155 RTL_W16(CPlusCmd
, tp
->cp_cmd
);
5157 RTL_W16(IntrMitigate
, 0x5151);
5159 /* Work around for RxFIFO overflow. */
5160 if (tp
->mac_version
== RTL_GIGA_MAC_VER_11
) {
5161 tp
->event_slow
|= RxFIFOOver
| PCSTimeout
;
5162 tp
->event_slow
&= ~RxOverflow
;
5165 rtl_set_rx_tx_desc_registers(tp
, ioaddr
);
5167 rtl_set_rx_mode(dev
);
5169 RTL_W32(TxConfig
, (TX_DMA_BURST
<< TxDMAShift
) |
5170 (InterFrameGap
<< TxInterFrameGapShift
));
5174 switch (tp
->mac_version
) {
5175 case RTL_GIGA_MAC_VER_11
:
5176 rtl_hw_start_8168bb(tp
);
5179 case RTL_GIGA_MAC_VER_12
:
5180 case RTL_GIGA_MAC_VER_17
:
5181 rtl_hw_start_8168bef(tp
);
5184 case RTL_GIGA_MAC_VER_18
:
5185 rtl_hw_start_8168cp_1(tp
);
5188 case RTL_GIGA_MAC_VER_19
:
5189 rtl_hw_start_8168c_1(tp
);
5192 case RTL_GIGA_MAC_VER_20
:
5193 rtl_hw_start_8168c_2(tp
);
5196 case RTL_GIGA_MAC_VER_21
:
5197 rtl_hw_start_8168c_3(tp
);
5200 case RTL_GIGA_MAC_VER_22
:
5201 rtl_hw_start_8168c_4(tp
);
5204 case RTL_GIGA_MAC_VER_23
:
5205 rtl_hw_start_8168cp_2(tp
);
5208 case RTL_GIGA_MAC_VER_24
:
5209 rtl_hw_start_8168cp_3(tp
);
5212 case RTL_GIGA_MAC_VER_25
:
5213 case RTL_GIGA_MAC_VER_26
:
5214 case RTL_GIGA_MAC_VER_27
:
5215 rtl_hw_start_8168d(tp
);
5218 case RTL_GIGA_MAC_VER_28
:
5219 rtl_hw_start_8168d_4(tp
);
5222 case RTL_GIGA_MAC_VER_31
:
5223 rtl_hw_start_8168dp(tp
);
5226 case RTL_GIGA_MAC_VER_32
:
5227 case RTL_GIGA_MAC_VER_33
:
5228 rtl_hw_start_8168e_1(tp
);
5230 case RTL_GIGA_MAC_VER_34
:
5231 rtl_hw_start_8168e_2(tp
);
5234 case RTL_GIGA_MAC_VER_35
:
5235 case RTL_GIGA_MAC_VER_36
:
5236 rtl_hw_start_8168f_1(tp
);
5239 case RTL_GIGA_MAC_VER_38
:
5240 rtl_hw_start_8411(tp
);
5243 case RTL_GIGA_MAC_VER_40
:
5244 case RTL_GIGA_MAC_VER_41
:
5245 rtl_hw_start_8168g_1(tp
);
5249 printk(KERN_ERR PFX
"%s: unknown chipset (mac_version = %d).\n",
5250 dev
->name
, tp
->mac_version
);
5254 RTL_W8(ChipCmd
, CmdTxEnb
| CmdRxEnb
);
5256 RTL_W8(Cfg9346
, Cfg9346_Lock
);
5258 RTL_W16(MultiIntr
, RTL_R16(MultiIntr
) & 0xF000);
5261 #define R810X_CPCMD_QUIRK_MASK (\
5272 static void rtl_hw_start_8102e_1(struct rtl8169_private
*tp
)
5274 void __iomem
*ioaddr
= tp
->mmio_addr
;
5275 struct pci_dev
*pdev
= tp
->pci_dev
;
5276 static const struct ephy_info e_info_8102e_1
[] = {
5277 { 0x01, 0, 0x6e65 },
5278 { 0x02, 0, 0x091f },
5279 { 0x03, 0, 0xc2f9 },
5280 { 0x06, 0, 0xafb5 },
5281 { 0x07, 0, 0x0e00 },
5282 { 0x19, 0, 0xec80 },
5283 { 0x01, 0, 0x2e65 },
5288 rtl_csi_access_enable_2(tp
);
5290 RTL_W8(DBG_REG
, FIX_NAK_1
);
5292 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5295 LEDS1
| LEDS0
| Speed_down
| MEMMAP
| IOMAP
| VPD
| PMEnable
);
5296 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
5298 cfg1
= RTL_R8(Config1
);
5299 if ((cfg1
& LEDS0
) && (cfg1
& LEDS1
))
5300 RTL_W8(Config1
, cfg1
& ~LEDS0
);
5302 rtl_ephy_init(tp
, e_info_8102e_1
, ARRAY_SIZE(e_info_8102e_1
));
5305 static void rtl_hw_start_8102e_2(struct rtl8169_private
*tp
)
5307 void __iomem
*ioaddr
= tp
->mmio_addr
;
5308 struct pci_dev
*pdev
= tp
->pci_dev
;
5310 rtl_csi_access_enable_2(tp
);
5312 rtl_tx_performance_tweak(pdev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5314 RTL_W8(Config1
, MEMMAP
| IOMAP
| VPD
| PMEnable
);
5315 RTL_W8(Config3
, RTL_R8(Config3
) & ~Beacon_en
);
5318 static void rtl_hw_start_8102e_3(struct rtl8169_private
*tp
)
5320 rtl_hw_start_8102e_2(tp
);
5322 rtl_ephy_write(tp
, 0x03, 0xc2f9);
5325 static void rtl_hw_start_8105e_1(struct rtl8169_private
*tp
)
5327 void __iomem
*ioaddr
= tp
->mmio_addr
;
5328 static const struct ephy_info e_info_8105e_1
[] = {
5329 { 0x07, 0, 0x4000 },
5330 { 0x19, 0, 0x0200 },
5331 { 0x19, 0, 0x0020 },
5332 { 0x1e, 0, 0x2000 },
5333 { 0x03, 0, 0x0001 },
5334 { 0x19, 0, 0x0100 },
5335 { 0x19, 0, 0x0004 },
5339 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5340 RTL_W32(FuncEvent
, RTL_R32(FuncEvent
) | 0x002800);
5342 /* Disable Early Tally Counter */
5343 RTL_W32(FuncEvent
, RTL_R32(FuncEvent
) & ~0x010000);
5345 RTL_W8(MCU
, RTL_R8(MCU
) | EN_NDP
| EN_OOB_RESET
);
5346 RTL_W8(DLLPR
, RTL_R8(DLLPR
) | PFM_EN
);
5348 rtl_ephy_init(tp
, e_info_8105e_1
, ARRAY_SIZE(e_info_8105e_1
));
5351 static void rtl_hw_start_8105e_2(struct rtl8169_private
*tp
)
5353 rtl_hw_start_8105e_1(tp
);
5354 rtl_ephy_write(tp
, 0x1e, rtl_ephy_read(tp
, 0x1e) | 0x8000);
5357 static void rtl_hw_start_8402(struct rtl8169_private
*tp
)
5359 void __iomem
*ioaddr
= tp
->mmio_addr
;
5360 static const struct ephy_info e_info_8402
[] = {
5361 { 0x19, 0xffff, 0xff64 },
5365 rtl_csi_access_enable_2(tp
);
5367 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5368 RTL_W32(FuncEvent
, RTL_R32(FuncEvent
) | 0x002800);
5370 RTL_W32(TxConfig
, RTL_R32(TxConfig
) | TXCFG_AUTO_FIFO
);
5371 RTL_W8(MCU
, RTL_R8(MCU
) & ~NOW_IS_OOB
);
5373 rtl_ephy_init(tp
, e_info_8402
, ARRAY_SIZE(e_info_8402
));
5375 rtl_tx_performance_tweak(tp
->pci_dev
, 0x5 << MAX_READ_REQUEST_SHIFT
);
5377 rtl_eri_write(tp
, 0xc8, ERIAR_MASK_1111
, 0x00000002, ERIAR_EXGMAC
);
5378 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_1111
, 0x00000006, ERIAR_EXGMAC
);
5379 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x00, 0x01, ERIAR_EXGMAC
);
5380 rtl_w1w0_eri(tp
, 0xdc, ERIAR_MASK_0001
, 0x01, 0x00, ERIAR_EXGMAC
);
5381 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5382 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000, ERIAR_EXGMAC
);
5383 rtl_w1w0_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0e00, 0xff00, ERIAR_EXGMAC
);
5386 static void rtl_hw_start_8106(struct rtl8169_private
*tp
)
5388 void __iomem
*ioaddr
= tp
->mmio_addr
;
5390 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5391 RTL_W32(FuncEvent
, RTL_R32(FuncEvent
) | 0x002800);
5393 RTL_W32(MISC
, (RTL_R32(MISC
) | DISABLE_LAN_EN
) & ~EARLY_TALLY_EN
);
5394 RTL_W8(MCU
, RTL_R8(MCU
) | EN_NDP
| EN_OOB_RESET
);
5395 RTL_W8(DLLPR
, RTL_R8(DLLPR
) & ~PFM_EN
);
5398 static void rtl_hw_start_8101(struct net_device
*dev
)
5400 struct rtl8169_private
*tp
= netdev_priv(dev
);
5401 void __iomem
*ioaddr
= tp
->mmio_addr
;
5402 struct pci_dev
*pdev
= tp
->pci_dev
;
5404 if (tp
->mac_version
>= RTL_GIGA_MAC_VER_30
)
5405 tp
->event_slow
&= ~RxFIFOOver
;
5407 if (tp
->mac_version
== RTL_GIGA_MAC_VER_13
||
5408 tp
->mac_version
== RTL_GIGA_MAC_VER_16
) {
5409 int cap
= pci_pcie_cap(pdev
);
5412 pci_write_config_word(pdev
, cap
+ PCI_EXP_DEVCTL
,
5413 PCI_EXP_DEVCTL_NOSNOOP_EN
);
5417 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
5419 switch (tp
->mac_version
) {
5420 case RTL_GIGA_MAC_VER_07
:
5421 rtl_hw_start_8102e_1(tp
);
5424 case RTL_GIGA_MAC_VER_08
:
5425 rtl_hw_start_8102e_3(tp
);
5428 case RTL_GIGA_MAC_VER_09
:
5429 rtl_hw_start_8102e_2(tp
);
5432 case RTL_GIGA_MAC_VER_29
:
5433 rtl_hw_start_8105e_1(tp
);
5435 case RTL_GIGA_MAC_VER_30
:
5436 rtl_hw_start_8105e_2(tp
);
5439 case RTL_GIGA_MAC_VER_37
:
5440 rtl_hw_start_8402(tp
);
5443 case RTL_GIGA_MAC_VER_39
:
5444 rtl_hw_start_8106(tp
);
5448 RTL_W8(Cfg9346
, Cfg9346_Lock
);
5450 RTL_W8(MaxTxPacketSize
, TxPacketMax
);
5452 rtl_set_rx_max_size(ioaddr
, rx_buf_sz
);
5454 tp
->cp_cmd
&= ~R810X_CPCMD_QUIRK_MASK
;
5455 RTL_W16(CPlusCmd
, tp
->cp_cmd
);
5457 RTL_W16(IntrMitigate
, 0x0000);
5459 rtl_set_rx_tx_desc_registers(tp
, ioaddr
);
5461 RTL_W8(ChipCmd
, CmdTxEnb
| CmdRxEnb
);
5462 rtl_set_rx_tx_config_registers(tp
);
5466 rtl_set_rx_mode(dev
);
5468 RTL_W16(MultiIntr
, RTL_R16(MultiIntr
) & 0xf000);
5471 static int rtl8169_change_mtu(struct net_device
*dev
, int new_mtu
)
5473 struct rtl8169_private
*tp
= netdev_priv(dev
);
5475 if (new_mtu
< ETH_ZLEN
||
5476 new_mtu
> rtl_chip_infos
[tp
->mac_version
].jumbo_max
)
5479 if (new_mtu
> ETH_DATA_LEN
)
5480 rtl_hw_jumbo_enable(tp
);
5482 rtl_hw_jumbo_disable(tp
);
5485 netdev_update_features(dev
);
5490 static inline void rtl8169_make_unusable_by_asic(struct RxDesc
*desc
)
5492 desc
->addr
= cpu_to_le64(0x0badbadbadbadbadull
);
5493 desc
->opts1
&= ~cpu_to_le32(DescOwn
| RsvdMask
);
5496 static void rtl8169_free_rx_databuff(struct rtl8169_private
*tp
,
5497 void **data_buff
, struct RxDesc
*desc
)
5499 dma_unmap_single(&tp
->pci_dev
->dev
, le64_to_cpu(desc
->addr
), rx_buf_sz
,
5504 rtl8169_make_unusable_by_asic(desc
);
5507 static inline void rtl8169_mark_to_asic(struct RxDesc
*desc
, u32 rx_buf_sz
)
5509 u32 eor
= le32_to_cpu(desc
->opts1
) & RingEnd
;
5511 desc
->opts1
= cpu_to_le32(DescOwn
| eor
| rx_buf_sz
);
5514 static inline void rtl8169_map_to_asic(struct RxDesc
*desc
, dma_addr_t mapping
,
5517 desc
->addr
= cpu_to_le64(mapping
);
5519 rtl8169_mark_to_asic(desc
, rx_buf_sz
);
5522 static inline void *rtl8169_align(void *data
)
5524 return (void *)ALIGN((long)data
, 16);
5527 static struct sk_buff
*rtl8169_alloc_rx_data(struct rtl8169_private
*tp
,
5528 struct RxDesc
*desc
)
5532 struct device
*d
= &tp
->pci_dev
->dev
;
5533 struct net_device
*dev
= tp
->dev
;
5534 int node
= dev
->dev
.parent
? dev_to_node(dev
->dev
.parent
) : -1;
5536 data
= kmalloc_node(rx_buf_sz
, GFP_KERNEL
, node
);
5540 if (rtl8169_align(data
) != data
) {
5542 data
= kmalloc_node(rx_buf_sz
+ 15, GFP_KERNEL
, node
);
5547 mapping
= dma_map_single(d
, rtl8169_align(data
), rx_buf_sz
,
5549 if (unlikely(dma_mapping_error(d
, mapping
))) {
5550 if (net_ratelimit())
5551 netif_err(tp
, drv
, tp
->dev
, "Failed to map RX DMA!\n");
5555 rtl8169_map_to_asic(desc
, mapping
, rx_buf_sz
);
5563 static void rtl8169_rx_clear(struct rtl8169_private
*tp
)
5567 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
5568 if (tp
->Rx_databuff
[i
]) {
5569 rtl8169_free_rx_databuff(tp
, tp
->Rx_databuff
+ i
,
5570 tp
->RxDescArray
+ i
);
5575 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc
*desc
)
5577 desc
->opts1
|= cpu_to_le32(RingEnd
);
5580 static int rtl8169_rx_fill(struct rtl8169_private
*tp
)
5584 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
5587 if (tp
->Rx_databuff
[i
])
5590 data
= rtl8169_alloc_rx_data(tp
, tp
->RxDescArray
+ i
);
5592 rtl8169_make_unusable_by_asic(tp
->RxDescArray
+ i
);
5595 tp
->Rx_databuff
[i
] = data
;
5598 rtl8169_mark_as_last_descriptor(tp
->RxDescArray
+ NUM_RX_DESC
- 1);
5602 rtl8169_rx_clear(tp
);
5606 static int rtl8169_init_ring(struct net_device
*dev
)
5608 struct rtl8169_private
*tp
= netdev_priv(dev
);
5610 rtl8169_init_ring_indexes(tp
);
5612 memset(tp
->tx_skb
, 0x0, NUM_TX_DESC
* sizeof(struct ring_info
));
5613 memset(tp
->Rx_databuff
, 0x0, NUM_RX_DESC
* sizeof(void *));
5615 return rtl8169_rx_fill(tp
);
5618 static void rtl8169_unmap_tx_skb(struct device
*d
, struct ring_info
*tx_skb
,
5619 struct TxDesc
*desc
)
5621 unsigned int len
= tx_skb
->len
;
5623 dma_unmap_single(d
, le64_to_cpu(desc
->addr
), len
, DMA_TO_DEVICE
);
5631 static void rtl8169_tx_clear_range(struct rtl8169_private
*tp
, u32 start
,
5636 for (i
= 0; i
< n
; i
++) {
5637 unsigned int entry
= (start
+ i
) % NUM_TX_DESC
;
5638 struct ring_info
*tx_skb
= tp
->tx_skb
+ entry
;
5639 unsigned int len
= tx_skb
->len
;
5642 struct sk_buff
*skb
= tx_skb
->skb
;
5644 rtl8169_unmap_tx_skb(&tp
->pci_dev
->dev
, tx_skb
,
5645 tp
->TxDescArray
+ entry
);
5647 tp
->dev
->stats
.tx_dropped
++;
5655 static void rtl8169_tx_clear(struct rtl8169_private
*tp
)
5657 rtl8169_tx_clear_range(tp
, tp
->dirty_tx
, NUM_TX_DESC
);
5658 tp
->cur_tx
= tp
->dirty_tx
= 0;
5661 static void rtl_reset_work(struct rtl8169_private
*tp
)
5663 struct net_device
*dev
= tp
->dev
;
5666 napi_disable(&tp
->napi
);
5667 netif_stop_queue(dev
);
5668 synchronize_sched();
5670 rtl8169_hw_reset(tp
);
5672 for (i
= 0; i
< NUM_RX_DESC
; i
++)
5673 rtl8169_mark_to_asic(tp
->RxDescArray
+ i
, rx_buf_sz
);
5675 rtl8169_tx_clear(tp
);
5676 rtl8169_init_ring_indexes(tp
);
5678 napi_enable(&tp
->napi
);
5680 netif_wake_queue(dev
);
5681 rtl8169_check_link_status(dev
, tp
, tp
->mmio_addr
);
5684 static void rtl8169_tx_timeout(struct net_device
*dev
)
5686 struct rtl8169_private
*tp
= netdev_priv(dev
);
5688 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
5691 static int rtl8169_xmit_frags(struct rtl8169_private
*tp
, struct sk_buff
*skb
,
5694 struct skb_shared_info
*info
= skb_shinfo(skb
);
5695 unsigned int cur_frag
, entry
;
5696 struct TxDesc
* uninitialized_var(txd
);
5697 struct device
*d
= &tp
->pci_dev
->dev
;
5700 for (cur_frag
= 0; cur_frag
< info
->nr_frags
; cur_frag
++) {
5701 const skb_frag_t
*frag
= info
->frags
+ cur_frag
;
5706 entry
= (entry
+ 1) % NUM_TX_DESC
;
5708 txd
= tp
->TxDescArray
+ entry
;
5709 len
= skb_frag_size(frag
);
5710 addr
= skb_frag_address(frag
);
5711 mapping
= dma_map_single(d
, addr
, len
, DMA_TO_DEVICE
);
5712 if (unlikely(dma_mapping_error(d
, mapping
))) {
5713 if (net_ratelimit())
5714 netif_err(tp
, drv
, tp
->dev
,
5715 "Failed to map TX fragments DMA!\n");
5719 /* Anti gcc 2.95.3 bugware (sic) */
5720 status
= opts
[0] | len
|
5721 (RingEnd
* !((entry
+ 1) % NUM_TX_DESC
));
5723 txd
->opts1
= cpu_to_le32(status
);
5724 txd
->opts2
= cpu_to_le32(opts
[1]);
5725 txd
->addr
= cpu_to_le64(mapping
);
5727 tp
->tx_skb
[entry
].len
= len
;
5731 tp
->tx_skb
[entry
].skb
= skb
;
5732 txd
->opts1
|= cpu_to_le32(LastFrag
);
5738 rtl8169_tx_clear_range(tp
, tp
->cur_tx
+ 1, cur_frag
);
5742 static inline void rtl8169_tso_csum(struct rtl8169_private
*tp
,
5743 struct sk_buff
*skb
, u32
*opts
)
5745 const struct rtl_tx_desc_info
*info
= tx_desc_info
+ tp
->txd_version
;
5746 u32 mss
= skb_shinfo(skb
)->gso_size
;
5747 int offset
= info
->opts_offset
;
5751 opts
[offset
] |= min(mss
, TD_MSS_MAX
) << info
->mss_shift
;
5752 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5753 const struct iphdr
*ip
= ip_hdr(skb
);
5755 if (ip
->protocol
== IPPROTO_TCP
)
5756 opts
[offset
] |= info
->checksum
.tcp
;
5757 else if (ip
->protocol
== IPPROTO_UDP
)
5758 opts
[offset
] |= info
->checksum
.udp
;
5764 static netdev_tx_t
rtl8169_start_xmit(struct sk_buff
*skb
,
5765 struct net_device
*dev
)
5767 struct rtl8169_private
*tp
= netdev_priv(dev
);
5768 unsigned int entry
= tp
->cur_tx
% NUM_TX_DESC
;
5769 struct TxDesc
*txd
= tp
->TxDescArray
+ entry
;
5770 void __iomem
*ioaddr
= tp
->mmio_addr
;
5771 struct device
*d
= &tp
->pci_dev
->dev
;
5777 if (unlikely(!TX_FRAGS_READY_FOR(tp
, skb_shinfo(skb
)->nr_frags
))) {
5778 netif_err(tp
, drv
, dev
, "BUG! Tx Ring full when queue awake!\n");
5782 if (unlikely(le32_to_cpu(txd
->opts1
) & DescOwn
))
5785 len
= skb_headlen(skb
);
5786 mapping
= dma_map_single(d
, skb
->data
, len
, DMA_TO_DEVICE
);
5787 if (unlikely(dma_mapping_error(d
, mapping
))) {
5788 if (net_ratelimit())
5789 netif_err(tp
, drv
, dev
, "Failed to map TX DMA!\n");
5793 tp
->tx_skb
[entry
].len
= len
;
5794 txd
->addr
= cpu_to_le64(mapping
);
5796 opts
[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp
, skb
));
5799 rtl8169_tso_csum(tp
, skb
, opts
);
5801 frags
= rtl8169_xmit_frags(tp
, skb
, opts
);
5805 opts
[0] |= FirstFrag
;
5807 opts
[0] |= FirstFrag
| LastFrag
;
5808 tp
->tx_skb
[entry
].skb
= skb
;
5811 txd
->opts2
= cpu_to_le32(opts
[1]);
5813 skb_tx_timestamp(skb
);
5817 /* Anti gcc 2.95.3 bugware (sic) */
5818 status
= opts
[0] | len
| (RingEnd
* !((entry
+ 1) % NUM_TX_DESC
));
5819 txd
->opts1
= cpu_to_le32(status
);
5821 tp
->cur_tx
+= frags
+ 1;
5825 RTL_W8(TxPoll
, NPQ
);
5829 if (!TX_FRAGS_READY_FOR(tp
, MAX_SKB_FRAGS
)) {
5830 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5831 * not miss a ring update when it notices a stopped queue.
5834 netif_stop_queue(dev
);
5835 /* Sync with rtl_tx:
5836 * - publish queue status and cur_tx ring index (write barrier)
5837 * - refresh dirty_tx ring index (read barrier).
5838 * May the current thread have a pessimistic view of the ring
5839 * status and forget to wake up queue, a racing rtl_tx thread
5843 if (TX_FRAGS_READY_FOR(tp
, MAX_SKB_FRAGS
))
5844 netif_wake_queue(dev
);
5847 return NETDEV_TX_OK
;
5850 rtl8169_unmap_tx_skb(d
, tp
->tx_skb
+ entry
, txd
);
5853 dev
->stats
.tx_dropped
++;
5854 return NETDEV_TX_OK
;
5857 netif_stop_queue(dev
);
5858 dev
->stats
.tx_dropped
++;
5859 return NETDEV_TX_BUSY
;
5862 static void rtl8169_pcierr_interrupt(struct net_device
*dev
)
5864 struct rtl8169_private
*tp
= netdev_priv(dev
);
5865 struct pci_dev
*pdev
= tp
->pci_dev
;
5866 u16 pci_status
, pci_cmd
;
5868 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
5869 pci_read_config_word(pdev
, PCI_STATUS
, &pci_status
);
5871 netif_err(tp
, intr
, dev
, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5872 pci_cmd
, pci_status
);
5875 * The recovery sequence below admits a very elaborated explanation:
5876 * - it seems to work;
5877 * - I did not see what else could be done;
5878 * - it makes iop3xx happy.
5880 * Feel free to adjust to your needs.
5882 if (pdev
->broken_parity_status
)
5883 pci_cmd
&= ~PCI_COMMAND_PARITY
;
5885 pci_cmd
|= PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
;
5887 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
5889 pci_write_config_word(pdev
, PCI_STATUS
,
5890 pci_status
& (PCI_STATUS_DETECTED_PARITY
|
5891 PCI_STATUS_SIG_SYSTEM_ERROR
| PCI_STATUS_REC_MASTER_ABORT
|
5892 PCI_STATUS_REC_TARGET_ABORT
| PCI_STATUS_SIG_TARGET_ABORT
));
5894 /* The infamous DAC f*ckup only happens at boot time */
5895 if ((tp
->cp_cmd
& PCIDAC
) && !tp
->dirty_rx
&& !tp
->cur_rx
) {
5896 void __iomem
*ioaddr
= tp
->mmio_addr
;
5898 netif_info(tp
, intr
, dev
, "disabling PCI DAC\n");
5899 tp
->cp_cmd
&= ~PCIDAC
;
5900 RTL_W16(CPlusCmd
, tp
->cp_cmd
);
5901 dev
->features
&= ~NETIF_F_HIGHDMA
;
5904 rtl8169_hw_reset(tp
);
5906 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
5909 static void rtl_tx(struct net_device
*dev
, struct rtl8169_private
*tp
)
5911 unsigned int dirty_tx
, tx_left
;
5913 dirty_tx
= tp
->dirty_tx
;
5915 tx_left
= tp
->cur_tx
- dirty_tx
;
5917 while (tx_left
> 0) {
5918 unsigned int entry
= dirty_tx
% NUM_TX_DESC
;
5919 struct ring_info
*tx_skb
= tp
->tx_skb
+ entry
;
5923 status
= le32_to_cpu(tp
->TxDescArray
[entry
].opts1
);
5924 if (status
& DescOwn
)
5927 rtl8169_unmap_tx_skb(&tp
->pci_dev
->dev
, tx_skb
,
5928 tp
->TxDescArray
+ entry
);
5929 if (status
& LastFrag
) {
5930 u64_stats_update_begin(&tp
->tx_stats
.syncp
);
5931 tp
->tx_stats
.packets
++;
5932 tp
->tx_stats
.bytes
+= tx_skb
->skb
->len
;
5933 u64_stats_update_end(&tp
->tx_stats
.syncp
);
5934 dev_kfree_skb(tx_skb
->skb
);
5941 if (tp
->dirty_tx
!= dirty_tx
) {
5942 tp
->dirty_tx
= dirty_tx
;
5943 /* Sync with rtl8169_start_xmit:
5944 * - publish dirty_tx ring index (write barrier)
5945 * - refresh cur_tx ring index and queue status (read barrier)
5946 * May the current thread miss the stopped queue condition,
5947 * a racing xmit thread can only have a right view of the
5951 if (netif_queue_stopped(dev
) &&
5952 TX_FRAGS_READY_FOR(tp
, MAX_SKB_FRAGS
)) {
5953 netif_wake_queue(dev
);
5956 * 8168 hack: TxPoll requests are lost when the Tx packets are
5957 * too close. Let's kick an extra TxPoll request when a burst
5958 * of start_xmit activity is detected (if it is not detected,
5959 * it is slow enough). -- FR
5961 if (tp
->cur_tx
!= dirty_tx
) {
5962 void __iomem
*ioaddr
= tp
->mmio_addr
;
5964 RTL_W8(TxPoll
, NPQ
);
5969 static inline int rtl8169_fragmented_frame(u32 status
)
5971 return (status
& (FirstFrag
| LastFrag
)) != (FirstFrag
| LastFrag
);
5974 static inline void rtl8169_rx_csum(struct sk_buff
*skb
, u32 opts1
)
5976 u32 status
= opts1
& RxProtoMask
;
5978 if (((status
== RxProtoTCP
) && !(opts1
& TCPFail
)) ||
5979 ((status
== RxProtoUDP
) && !(opts1
& UDPFail
)))
5980 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5982 skb_checksum_none_assert(skb
);
5985 static struct sk_buff
*rtl8169_try_rx_copy(void *data
,
5986 struct rtl8169_private
*tp
,
5990 struct sk_buff
*skb
;
5991 struct device
*d
= &tp
->pci_dev
->dev
;
5993 data
= rtl8169_align(data
);
5994 dma_sync_single_for_cpu(d
, addr
, pkt_size
, DMA_FROM_DEVICE
);
5996 skb
= netdev_alloc_skb_ip_align(tp
->dev
, pkt_size
);
5998 memcpy(skb
->data
, data
, pkt_size
);
5999 dma_sync_single_for_device(d
, addr
, pkt_size
, DMA_FROM_DEVICE
);
6004 static int rtl_rx(struct net_device
*dev
, struct rtl8169_private
*tp
, u32 budget
)
6006 unsigned int cur_rx
, rx_left
;
6009 cur_rx
= tp
->cur_rx
;
6010 rx_left
= NUM_RX_DESC
+ tp
->dirty_rx
- cur_rx
;
6011 rx_left
= min(rx_left
, budget
);
6013 for (; rx_left
> 0; rx_left
--, cur_rx
++) {
6014 unsigned int entry
= cur_rx
% NUM_RX_DESC
;
6015 struct RxDesc
*desc
= tp
->RxDescArray
+ entry
;
6019 status
= le32_to_cpu(desc
->opts1
) & tp
->opts1_mask
;
6021 if (status
& DescOwn
)
6023 if (unlikely(status
& RxRES
)) {
6024 netif_info(tp
, rx_err
, dev
, "Rx ERROR. status = %08x\n",
6026 dev
->stats
.rx_errors
++;
6027 if (status
& (RxRWT
| RxRUNT
))
6028 dev
->stats
.rx_length_errors
++;
6030 dev
->stats
.rx_crc_errors
++;
6031 if (status
& RxFOVF
) {
6032 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
6033 dev
->stats
.rx_fifo_errors
++;
6035 if ((status
& (RxRUNT
| RxCRC
)) &&
6036 !(status
& (RxRWT
| RxFOVF
)) &&
6037 (dev
->features
& NETIF_F_RXALL
))
6040 rtl8169_mark_to_asic(desc
, rx_buf_sz
);
6042 struct sk_buff
*skb
;
6047 addr
= le64_to_cpu(desc
->addr
);
6048 if (likely(!(dev
->features
& NETIF_F_RXFCS
)))
6049 pkt_size
= (status
& 0x00003fff) - 4;
6051 pkt_size
= status
& 0x00003fff;
6054 * The driver does not support incoming fragmented
6055 * frames. They are seen as a symptom of over-mtu
6058 if (unlikely(rtl8169_fragmented_frame(status
))) {
6059 dev
->stats
.rx_dropped
++;
6060 dev
->stats
.rx_length_errors
++;
6061 rtl8169_mark_to_asic(desc
, rx_buf_sz
);
6065 skb
= rtl8169_try_rx_copy(tp
->Rx_databuff
[entry
],
6066 tp
, pkt_size
, addr
);
6067 rtl8169_mark_to_asic(desc
, rx_buf_sz
);
6069 dev
->stats
.rx_dropped
++;
6073 rtl8169_rx_csum(skb
, status
);
6074 skb_put(skb
, pkt_size
);
6075 skb
->protocol
= eth_type_trans(skb
, dev
);
6077 rtl8169_rx_vlan_tag(desc
, skb
);
6079 napi_gro_receive(&tp
->napi
, skb
);
6081 u64_stats_update_begin(&tp
->rx_stats
.syncp
);
6082 tp
->rx_stats
.packets
++;
6083 tp
->rx_stats
.bytes
+= pkt_size
;
6084 u64_stats_update_end(&tp
->rx_stats
.syncp
);
6087 /* Work around for AMD plateform. */
6088 if ((desc
->opts2
& cpu_to_le32(0xfffe000)) &&
6089 (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)) {
6095 count
= cur_rx
- tp
->cur_rx
;
6096 tp
->cur_rx
= cur_rx
;
6098 tp
->dirty_rx
+= count
;
6103 static irqreturn_t
rtl8169_interrupt(int irq
, void *dev_instance
)
6105 struct net_device
*dev
= dev_instance
;
6106 struct rtl8169_private
*tp
= netdev_priv(dev
);
6110 status
= rtl_get_events(tp
);
6111 if (status
&& status
!= 0xffff) {
6112 status
&= RTL_EVENT_NAPI
| tp
->event_slow
;
6116 rtl_irq_disable(tp
);
6117 napi_schedule(&tp
->napi
);
6120 return IRQ_RETVAL(handled
);
6124 * Workqueue context.
6126 static void rtl_slow_event_work(struct rtl8169_private
*tp
)
6128 struct net_device
*dev
= tp
->dev
;
6131 status
= rtl_get_events(tp
) & tp
->event_slow
;
6132 rtl_ack_events(tp
, status
);
6134 if (unlikely(status
& RxFIFOOver
)) {
6135 switch (tp
->mac_version
) {
6136 /* Work around for rx fifo overflow */
6137 case RTL_GIGA_MAC_VER_11
:
6138 netif_stop_queue(dev
);
6139 /* XXX - Hack alert. See rtl_task(). */
6140 set_bit(RTL_FLAG_TASK_RESET_PENDING
, tp
->wk
.flags
);
6146 if (unlikely(status
& SYSErr
))
6147 rtl8169_pcierr_interrupt(dev
);
6149 if (status
& LinkChg
)
6150 __rtl8169_check_link_status(dev
, tp
, tp
->mmio_addr
, true);
6152 rtl_irq_enable_all(tp
);
6155 static void rtl_task(struct work_struct
*work
)
6157 static const struct {
6159 void (*action
)(struct rtl8169_private
*);
6161 /* XXX - keep rtl_slow_event_work() as first element. */
6162 { RTL_FLAG_TASK_SLOW_PENDING
, rtl_slow_event_work
},
6163 { RTL_FLAG_TASK_RESET_PENDING
, rtl_reset_work
},
6164 { RTL_FLAG_TASK_PHY_PENDING
, rtl_phy_work
}
6166 struct rtl8169_private
*tp
=
6167 container_of(work
, struct rtl8169_private
, wk
.work
);
6168 struct net_device
*dev
= tp
->dev
;
6173 if (!netif_running(dev
) ||
6174 !test_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
))
6177 for (i
= 0; i
< ARRAY_SIZE(rtl_work
); i
++) {
6180 pending
= test_and_clear_bit(rtl_work
[i
].bitnr
, tp
->wk
.flags
);
6182 rtl_work
[i
].action(tp
);
6186 rtl_unlock_work(tp
);
6189 static int rtl8169_poll(struct napi_struct
*napi
, int budget
)
6191 struct rtl8169_private
*tp
= container_of(napi
, struct rtl8169_private
, napi
);
6192 struct net_device
*dev
= tp
->dev
;
6193 u16 enable_mask
= RTL_EVENT_NAPI
| tp
->event_slow
;
6197 status
= rtl_get_events(tp
);
6198 rtl_ack_events(tp
, status
& ~tp
->event_slow
);
6200 if (status
& RTL_EVENT_NAPI_RX
)
6201 work_done
= rtl_rx(dev
, tp
, (u32
) budget
);
6203 if (status
& RTL_EVENT_NAPI_TX
)
6206 if (status
& tp
->event_slow
) {
6207 enable_mask
&= ~tp
->event_slow
;
6209 rtl_schedule_task(tp
, RTL_FLAG_TASK_SLOW_PENDING
);
6212 if (work_done
< budget
) {
6213 napi_complete(napi
);
6215 rtl_irq_enable(tp
, enable_mask
);
6222 static void rtl8169_rx_missed(struct net_device
*dev
, void __iomem
*ioaddr
)
6224 struct rtl8169_private
*tp
= netdev_priv(dev
);
6226 if (tp
->mac_version
> RTL_GIGA_MAC_VER_06
)
6229 dev
->stats
.rx_missed_errors
+= (RTL_R32(RxMissed
) & 0xffffff);
6230 RTL_W32(RxMissed
, 0);
6233 static void rtl8169_down(struct net_device
*dev
)
6235 struct rtl8169_private
*tp
= netdev_priv(dev
);
6236 void __iomem
*ioaddr
= tp
->mmio_addr
;
6238 del_timer_sync(&tp
->timer
);
6240 napi_disable(&tp
->napi
);
6241 netif_stop_queue(dev
);
6243 rtl8169_hw_reset(tp
);
6245 * At this point device interrupts can not be enabled in any function,
6246 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6247 * and napi is disabled (rtl8169_poll).
6249 rtl8169_rx_missed(dev
, ioaddr
);
6251 /* Give a racing hard_start_xmit a few cycles to complete. */
6252 synchronize_sched();
6254 rtl8169_tx_clear(tp
);
6256 rtl8169_rx_clear(tp
);
6258 rtl_pll_power_down(tp
);
6261 static int rtl8169_close(struct net_device
*dev
)
6263 struct rtl8169_private
*tp
= netdev_priv(dev
);
6264 struct pci_dev
*pdev
= tp
->pci_dev
;
6266 pm_runtime_get_sync(&pdev
->dev
);
6268 /* Update counters before going down */
6269 rtl8169_update_counters(dev
);
6272 clear_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
6275 rtl_unlock_work(tp
);
6277 free_irq(pdev
->irq
, dev
);
6279 dma_free_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
, tp
->RxDescArray
,
6281 dma_free_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
, tp
->TxDescArray
,
6283 tp
->TxDescArray
= NULL
;
6284 tp
->RxDescArray
= NULL
;
6286 pm_runtime_put_sync(&pdev
->dev
);
6291 #ifdef CONFIG_NET_POLL_CONTROLLER
6292 static void rtl8169_netpoll(struct net_device
*dev
)
6294 struct rtl8169_private
*tp
= netdev_priv(dev
);
6296 rtl8169_interrupt(tp
->pci_dev
->irq
, dev
);
6300 static int rtl_open(struct net_device
*dev
)
6302 struct rtl8169_private
*tp
= netdev_priv(dev
);
6303 void __iomem
*ioaddr
= tp
->mmio_addr
;
6304 struct pci_dev
*pdev
= tp
->pci_dev
;
6305 int retval
= -ENOMEM
;
6307 pm_runtime_get_sync(&pdev
->dev
);
6310 * Rx and Tx descriptors needs 256 bytes alignment.
6311 * dma_alloc_coherent provides more.
6313 tp
->TxDescArray
= dma_alloc_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
,
6314 &tp
->TxPhyAddr
, GFP_KERNEL
);
6315 if (!tp
->TxDescArray
)
6316 goto err_pm_runtime_put
;
6318 tp
->RxDescArray
= dma_alloc_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
,
6319 &tp
->RxPhyAddr
, GFP_KERNEL
);
6320 if (!tp
->RxDescArray
)
6323 retval
= rtl8169_init_ring(dev
);
6327 INIT_WORK(&tp
->wk
.work
, rtl_task
);
6331 rtl_request_firmware(tp
);
6333 retval
= request_irq(pdev
->irq
, rtl8169_interrupt
,
6334 (tp
->features
& RTL_FEATURE_MSI
) ? 0 : IRQF_SHARED
,
6337 goto err_release_fw_2
;
6341 set_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
6343 napi_enable(&tp
->napi
);
6345 rtl8169_init_phy(dev
, tp
);
6347 __rtl8169_set_features(dev
, dev
->features
);
6349 rtl_pll_power_up(tp
);
6353 netif_start_queue(dev
);
6355 rtl_unlock_work(tp
);
6357 tp
->saved_wolopts
= 0;
6358 pm_runtime_put_noidle(&pdev
->dev
);
6360 rtl8169_check_link_status(dev
, tp
, ioaddr
);
6365 rtl_release_firmware(tp
);
6366 rtl8169_rx_clear(tp
);
6368 dma_free_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
, tp
->RxDescArray
,
6370 tp
->RxDescArray
= NULL
;
6372 dma_free_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
, tp
->TxDescArray
,
6374 tp
->TxDescArray
= NULL
;
6376 pm_runtime_put_noidle(&pdev
->dev
);
6380 static struct rtnl_link_stats64
*
6381 rtl8169_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6383 struct rtl8169_private
*tp
= netdev_priv(dev
);
6384 void __iomem
*ioaddr
= tp
->mmio_addr
;
6387 if (netif_running(dev
))
6388 rtl8169_rx_missed(dev
, ioaddr
);
6391 start
= u64_stats_fetch_begin_bh(&tp
->rx_stats
.syncp
);
6392 stats
->rx_packets
= tp
->rx_stats
.packets
;
6393 stats
->rx_bytes
= tp
->rx_stats
.bytes
;
6394 } while (u64_stats_fetch_retry_bh(&tp
->rx_stats
.syncp
, start
));
6398 start
= u64_stats_fetch_begin_bh(&tp
->tx_stats
.syncp
);
6399 stats
->tx_packets
= tp
->tx_stats
.packets
;
6400 stats
->tx_bytes
= tp
->tx_stats
.bytes
;
6401 } while (u64_stats_fetch_retry_bh(&tp
->tx_stats
.syncp
, start
));
6403 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
6404 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
6405 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
6406 stats
->rx_errors
= dev
->stats
.rx_errors
;
6407 stats
->rx_crc_errors
= dev
->stats
.rx_crc_errors
;
6408 stats
->rx_fifo_errors
= dev
->stats
.rx_fifo_errors
;
6409 stats
->rx_missed_errors
= dev
->stats
.rx_missed_errors
;
6414 static void rtl8169_net_suspend(struct net_device
*dev
)
6416 struct rtl8169_private
*tp
= netdev_priv(dev
);
6418 if (!netif_running(dev
))
6421 netif_device_detach(dev
);
6422 netif_stop_queue(dev
);
6425 napi_disable(&tp
->napi
);
6426 clear_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
6427 rtl_unlock_work(tp
);
6429 rtl_pll_power_down(tp
);
6434 static int rtl8169_suspend(struct device
*device
)
6436 struct pci_dev
*pdev
= to_pci_dev(device
);
6437 struct net_device
*dev
= pci_get_drvdata(pdev
);
6439 rtl8169_net_suspend(dev
);
6444 static void __rtl8169_resume(struct net_device
*dev
)
6446 struct rtl8169_private
*tp
= netdev_priv(dev
);
6448 netif_device_attach(dev
);
6450 rtl_pll_power_up(tp
);
6453 napi_enable(&tp
->napi
);
6454 set_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
6455 rtl_unlock_work(tp
);
6457 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
6460 static int rtl8169_resume(struct device
*device
)
6462 struct pci_dev
*pdev
= to_pci_dev(device
);
6463 struct net_device
*dev
= pci_get_drvdata(pdev
);
6464 struct rtl8169_private
*tp
= netdev_priv(dev
);
6466 rtl8169_init_phy(dev
, tp
);
6468 if (netif_running(dev
))
6469 __rtl8169_resume(dev
);
6474 static int rtl8169_runtime_suspend(struct device
*device
)
6476 struct pci_dev
*pdev
= to_pci_dev(device
);
6477 struct net_device
*dev
= pci_get_drvdata(pdev
);
6478 struct rtl8169_private
*tp
= netdev_priv(dev
);
6480 if (!tp
->TxDescArray
)
6484 tp
->saved_wolopts
= __rtl8169_get_wol(tp
);
6485 __rtl8169_set_wol(tp
, WAKE_ANY
);
6486 rtl_unlock_work(tp
);
6488 rtl8169_net_suspend(dev
);
6493 static int rtl8169_runtime_resume(struct device
*device
)
6495 struct pci_dev
*pdev
= to_pci_dev(device
);
6496 struct net_device
*dev
= pci_get_drvdata(pdev
);
6497 struct rtl8169_private
*tp
= netdev_priv(dev
);
6499 if (!tp
->TxDescArray
)
6503 __rtl8169_set_wol(tp
, tp
->saved_wolopts
);
6504 tp
->saved_wolopts
= 0;
6505 rtl_unlock_work(tp
);
6507 rtl8169_init_phy(dev
, tp
);
6509 __rtl8169_resume(dev
);
6514 static int rtl8169_runtime_idle(struct device
*device
)
6516 struct pci_dev
*pdev
= to_pci_dev(device
);
6517 struct net_device
*dev
= pci_get_drvdata(pdev
);
6518 struct rtl8169_private
*tp
= netdev_priv(dev
);
6520 return tp
->TxDescArray
? -EBUSY
: 0;
6523 static const struct dev_pm_ops rtl8169_pm_ops
= {
6524 .suspend
= rtl8169_suspend
,
6525 .resume
= rtl8169_resume
,
6526 .freeze
= rtl8169_suspend
,
6527 .thaw
= rtl8169_resume
,
6528 .poweroff
= rtl8169_suspend
,
6529 .restore
= rtl8169_resume
,
6530 .runtime_suspend
= rtl8169_runtime_suspend
,
6531 .runtime_resume
= rtl8169_runtime_resume
,
6532 .runtime_idle
= rtl8169_runtime_idle
,
6535 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6537 #else /* !CONFIG_PM */
6539 #define RTL8169_PM_OPS NULL
6541 #endif /* !CONFIG_PM */
6543 static void rtl_wol_shutdown_quirk(struct rtl8169_private
*tp
)
6545 void __iomem
*ioaddr
= tp
->mmio_addr
;
6547 /* WoL fails with 8168b when the receiver is disabled. */
6548 switch (tp
->mac_version
) {
6549 case RTL_GIGA_MAC_VER_11
:
6550 case RTL_GIGA_MAC_VER_12
:
6551 case RTL_GIGA_MAC_VER_17
:
6552 pci_clear_master(tp
->pci_dev
);
6554 RTL_W8(ChipCmd
, CmdRxEnb
);
6563 static void rtl_shutdown(struct pci_dev
*pdev
)
6565 struct net_device
*dev
= pci_get_drvdata(pdev
);
6566 struct rtl8169_private
*tp
= netdev_priv(dev
);
6567 struct device
*d
= &pdev
->dev
;
6569 pm_runtime_get_sync(d
);
6571 rtl8169_net_suspend(dev
);
6573 /* Restore original MAC address */
6574 rtl_rar_set(tp
, dev
->perm_addr
);
6576 rtl8169_hw_reset(tp
);
6578 if (system_state
== SYSTEM_POWER_OFF
) {
6579 if (__rtl8169_get_wol(tp
) & WAKE_ANY
) {
6580 rtl_wol_suspend_quirk(tp
);
6581 rtl_wol_shutdown_quirk(tp
);
6584 pci_wake_from_d3(pdev
, true);
6585 pci_set_power_state(pdev
, PCI_D3hot
);
6588 pm_runtime_put_noidle(d
);
6591 static void __devexit
rtl_remove_one(struct pci_dev
*pdev
)
6593 struct net_device
*dev
= pci_get_drvdata(pdev
);
6594 struct rtl8169_private
*tp
= netdev_priv(dev
);
6596 if (tp
->mac_version
== RTL_GIGA_MAC_VER_27
||
6597 tp
->mac_version
== RTL_GIGA_MAC_VER_28
||
6598 tp
->mac_version
== RTL_GIGA_MAC_VER_31
) {
6599 rtl8168_driver_stop(tp
);
6602 cancel_work_sync(&tp
->wk
.work
);
6604 netif_napi_del(&tp
->napi
);
6606 unregister_netdev(dev
);
6608 rtl_release_firmware(tp
);
6610 if (pci_dev_run_wake(pdev
))
6611 pm_runtime_get_noresume(&pdev
->dev
);
6613 /* restore original MAC address */
6614 rtl_rar_set(tp
, dev
->perm_addr
);
6616 rtl_disable_msi(pdev
, tp
);
6617 rtl8169_release_board(pdev
, dev
, tp
->mmio_addr
);
6618 pci_set_drvdata(pdev
, NULL
);
6621 static const struct net_device_ops rtl_netdev_ops
= {
6622 .ndo_open
= rtl_open
,
6623 .ndo_stop
= rtl8169_close
,
6624 .ndo_get_stats64
= rtl8169_get_stats64
,
6625 .ndo_start_xmit
= rtl8169_start_xmit
,
6626 .ndo_tx_timeout
= rtl8169_tx_timeout
,
6627 .ndo_validate_addr
= eth_validate_addr
,
6628 .ndo_change_mtu
= rtl8169_change_mtu
,
6629 .ndo_fix_features
= rtl8169_fix_features
,
6630 .ndo_set_features
= rtl8169_set_features
,
6631 .ndo_set_mac_address
= rtl_set_mac_address
,
6632 .ndo_do_ioctl
= rtl8169_ioctl
,
6633 .ndo_set_rx_mode
= rtl_set_rx_mode
,
6634 #ifdef CONFIG_NET_POLL_CONTROLLER
6635 .ndo_poll_controller
= rtl8169_netpoll
,
6640 static const struct rtl_cfg_info
{
6641 void (*hw_start
)(struct net_device
*);
6642 unsigned int region
;
6647 } rtl_cfg_infos
[] = {
6649 .hw_start
= rtl_hw_start_8169
,
6652 .event_slow
= SYSErr
| LinkChg
| RxOverflow
| RxFIFOOver
,
6653 .features
= RTL_FEATURE_GMII
,
6654 .default_ver
= RTL_GIGA_MAC_VER_01
,
6657 .hw_start
= rtl_hw_start_8168
,
6660 .event_slow
= SYSErr
| LinkChg
| RxOverflow
,
6661 .features
= RTL_FEATURE_GMII
| RTL_FEATURE_MSI
,
6662 .default_ver
= RTL_GIGA_MAC_VER_11
,
6665 .hw_start
= rtl_hw_start_8101
,
6668 .event_slow
= SYSErr
| LinkChg
| RxOverflow
| RxFIFOOver
|
6670 .features
= RTL_FEATURE_MSI
,
6671 .default_ver
= RTL_GIGA_MAC_VER_13
,
6675 /* Cfg9346_Unlock assumed. */
6676 static unsigned rtl_try_msi(struct rtl8169_private
*tp
,
6677 const struct rtl_cfg_info
*cfg
)
6679 void __iomem
*ioaddr
= tp
->mmio_addr
;
6683 cfg2
= RTL_R8(Config2
) & ~MSIEnable
;
6684 if (cfg
->features
& RTL_FEATURE_MSI
) {
6685 if (pci_enable_msi(tp
->pci_dev
)) {
6686 netif_info(tp
, hw
, tp
->dev
, "no MSI. Back to INTx.\n");
6689 msi
= RTL_FEATURE_MSI
;
6692 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
)
6693 RTL_W8(Config2
, cfg2
);
6697 DECLARE_RTL_COND(rtl_link_list_ready_cond
)
6699 void __iomem
*ioaddr
= tp
->mmio_addr
;
6701 return RTL_R8(MCU
) & LINK_LIST_RDY
;
6704 DECLARE_RTL_COND(rtl_rxtx_empty_cond
)
6706 void __iomem
*ioaddr
= tp
->mmio_addr
;
6708 return (RTL_R8(MCU
) & RXTX_EMPTY
) == RXTX_EMPTY
;
6711 static void __devinit
rtl_hw_init_8168g(struct rtl8169_private
*tp
)
6713 void __iomem
*ioaddr
= tp
->mmio_addr
;
6716 tp
->ocp_base
= OCP_STD_PHY_BASE
;
6718 RTL_W32(MISC
, RTL_R32(MISC
) | RXDV_GATED_EN
);
6720 if (!rtl_udelay_loop_wait_high(tp
, &rtl_txcfg_empty_cond
, 100, 42))
6723 if (!rtl_udelay_loop_wait_high(tp
, &rtl_rxtx_empty_cond
, 100, 42))
6726 RTL_W8(ChipCmd
, RTL_R8(ChipCmd
) & ~(CmdTxEnb
| CmdRxEnb
));
6728 RTL_W8(MCU
, RTL_R8(MCU
) & ~NOW_IS_OOB
);
6730 data
= r8168_mac_ocp_read(tp
, 0xe8de);
6732 r8168_mac_ocp_write(tp
, 0xe8de, data
);
6734 if (!rtl_udelay_loop_wait_high(tp
, &rtl_link_list_ready_cond
, 100, 42))
6737 data
= r8168_mac_ocp_read(tp
, 0xe8de);
6739 r8168_mac_ocp_write(tp
, 0xe8de, data
);
6741 if (!rtl_udelay_loop_wait_high(tp
, &rtl_link_list_ready_cond
, 100, 42))
6745 static void __devinit
rtl_hw_initialize(struct rtl8169_private
*tp
)
6747 switch (tp
->mac_version
) {
6748 case RTL_GIGA_MAC_VER_40
:
6749 case RTL_GIGA_MAC_VER_41
:
6750 rtl_hw_init_8168g(tp
);
6758 static int __devinit
6759 rtl_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
6761 const struct rtl_cfg_info
*cfg
= rtl_cfg_infos
+ ent
->driver_data
;
6762 const unsigned int region
= cfg
->region
;
6763 struct rtl8169_private
*tp
;
6764 struct mii_if_info
*mii
;
6765 struct net_device
*dev
;
6766 void __iomem
*ioaddr
;
6770 if (netif_msg_drv(&debug
)) {
6771 printk(KERN_INFO
"%s Gigabit Ethernet driver %s loaded\n",
6772 MODULENAME
, RTL8169_VERSION
);
6775 dev
= alloc_etherdev(sizeof (*tp
));
6781 SET_NETDEV_DEV(dev
, &pdev
->dev
);
6782 dev
->netdev_ops
= &rtl_netdev_ops
;
6783 tp
= netdev_priv(dev
);
6786 tp
->msg_enable
= netif_msg_init(debug
.msg_enable
, R8169_MSG_DEFAULT
);
6790 mii
->mdio_read
= rtl_mdio_read
;
6791 mii
->mdio_write
= rtl_mdio_write
;
6792 mii
->phy_id_mask
= 0x1f;
6793 mii
->reg_num_mask
= 0x1f;
6794 mii
->supports_gmii
= !!(cfg
->features
& RTL_FEATURE_GMII
);
6796 /* disable ASPM completely as that cause random device stop working
6797 * problems as well as full system hangs for some PCIe devices users */
6798 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
| PCIE_LINK_STATE_L1
|
6799 PCIE_LINK_STATE_CLKPM
);
6801 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6802 rc
= pci_enable_device(pdev
);
6804 netif_err(tp
, probe
, dev
, "enable failure\n");
6805 goto err_out_free_dev_1
;
6808 if (pci_set_mwi(pdev
) < 0)
6809 netif_info(tp
, probe
, dev
, "Mem-Wr-Inval unavailable\n");
6811 /* make sure PCI base addr 1 is MMIO */
6812 if (!(pci_resource_flags(pdev
, region
) & IORESOURCE_MEM
)) {
6813 netif_err(tp
, probe
, dev
,
6814 "region #%d not an MMIO resource, aborting\n",
6820 /* check for weird/broken PCI region reporting */
6821 if (pci_resource_len(pdev
, region
) < R8169_REGS_SIZE
) {
6822 netif_err(tp
, probe
, dev
,
6823 "Invalid PCI region size(s), aborting\n");
6828 rc
= pci_request_regions(pdev
, MODULENAME
);
6830 netif_err(tp
, probe
, dev
, "could not request regions\n");
6834 tp
->cp_cmd
= RxChkSum
;
6836 if ((sizeof(dma_addr_t
) > 4) &&
6837 !pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) && use_dac
) {
6838 tp
->cp_cmd
|= PCIDAC
;
6839 dev
->features
|= NETIF_F_HIGHDMA
;
6841 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
6843 netif_err(tp
, probe
, dev
, "DMA configuration failed\n");
6844 goto err_out_free_res_3
;
6848 /* ioremap MMIO region */
6849 ioaddr
= ioremap(pci_resource_start(pdev
, region
), R8169_REGS_SIZE
);
6851 netif_err(tp
, probe
, dev
, "cannot remap MMIO, aborting\n");
6853 goto err_out_free_res_3
;
6855 tp
->mmio_addr
= ioaddr
;
6857 if (!pci_is_pcie(pdev
))
6858 netif_info(tp
, probe
, dev
, "not PCI Express\n");
6860 /* Identify chip attached to board */
6861 rtl8169_get_mac_version(tp
, dev
, cfg
->default_ver
);
6865 rtl_irq_disable(tp
);
6867 rtl_hw_initialize(tp
);
6871 rtl_ack_events(tp
, 0xffff);
6873 pci_set_master(pdev
);
6876 * Pretend we are using VLANs; This bypasses a nasty bug where
6877 * Interrupts stop flowing on high load on 8110SCd controllers.
6879 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)
6880 tp
->cp_cmd
|= RxVlan
;
6882 rtl_init_mdio_ops(tp
);
6883 rtl_init_pll_power_ops(tp
);
6884 rtl_init_jumbo_ops(tp
);
6885 rtl_init_csi_ops(tp
);
6887 rtl8169_print_mac_version(tp
);
6889 chipset
= tp
->mac_version
;
6890 tp
->txd_version
= rtl_chip_infos
[chipset
].txd_version
;
6892 RTL_W8(Cfg9346
, Cfg9346_Unlock
);
6893 RTL_W8(Config1
, RTL_R8(Config1
) | PMEnable
);
6894 RTL_W8(Config5
, RTL_R8(Config5
) & PMEStatus
);
6895 if ((RTL_R8(Config3
) & (LinkUp
| MagicPacket
)) != 0)
6896 tp
->features
|= RTL_FEATURE_WOL
;
6897 if ((RTL_R8(Config5
) & (UWF
| BWF
| MWF
)) != 0)
6898 tp
->features
|= RTL_FEATURE_WOL
;
6899 tp
->features
|= rtl_try_msi(tp
, cfg
);
6900 RTL_W8(Cfg9346
, Cfg9346_Lock
);
6902 if (rtl_tbi_enabled(tp
)) {
6903 tp
->set_speed
= rtl8169_set_speed_tbi
;
6904 tp
->get_settings
= rtl8169_gset_tbi
;
6905 tp
->phy_reset_enable
= rtl8169_tbi_reset_enable
;
6906 tp
->phy_reset_pending
= rtl8169_tbi_reset_pending
;
6907 tp
->link_ok
= rtl8169_tbi_link_ok
;
6908 tp
->do_ioctl
= rtl_tbi_ioctl
;
6910 tp
->set_speed
= rtl8169_set_speed_xmii
;
6911 tp
->get_settings
= rtl8169_gset_xmii
;
6912 tp
->phy_reset_enable
= rtl8169_xmii_reset_enable
;
6913 tp
->phy_reset_pending
= rtl8169_xmii_reset_pending
;
6914 tp
->link_ok
= rtl8169_xmii_link_ok
;
6915 tp
->do_ioctl
= rtl_xmii_ioctl
;
6918 mutex_init(&tp
->wk
.mutex
);
6920 /* Get MAC address */
6921 for (i
= 0; i
< ETH_ALEN
; i
++)
6922 dev
->dev_addr
[i
] = RTL_R8(MAC0
+ i
);
6923 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
6925 SET_ETHTOOL_OPS(dev
, &rtl8169_ethtool_ops
);
6926 dev
->watchdog_timeo
= RTL8169_TX_TIMEOUT
;
6928 netif_napi_add(dev
, &tp
->napi
, rtl8169_poll
, R8169_NAPI_WEIGHT
);
6930 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6931 * properly for all devices */
6932 dev
->features
|= NETIF_F_RXCSUM
|
6933 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
6935 dev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
|
6936 NETIF_F_RXCSUM
| NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
6937 dev
->vlan_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
|
6940 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)
6941 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6942 dev
->hw_features
&= ~NETIF_F_HW_VLAN_RX
;
6944 dev
->hw_features
|= NETIF_F_RXALL
;
6945 dev
->hw_features
|= NETIF_F_RXFCS
;
6947 tp
->hw_start
= cfg
->hw_start
;
6948 tp
->event_slow
= cfg
->event_slow
;
6950 tp
->opts1_mask
= (tp
->mac_version
!= RTL_GIGA_MAC_VER_01
) ?
6951 ~(RxBOVF
| RxFOVF
) : ~0;
6953 init_timer(&tp
->timer
);
6954 tp
->timer
.data
= (unsigned long) dev
;
6955 tp
->timer
.function
= rtl8169_phy_timer
;
6957 tp
->rtl_fw
= RTL_FIRMWARE_UNKNOWN
;
6959 rc
= register_netdev(dev
);
6963 pci_set_drvdata(pdev
, dev
);
6965 netif_info(tp
, probe
, dev
, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6966 rtl_chip_infos
[chipset
].name
, ioaddr
, dev
->dev_addr
,
6967 (u32
)(RTL_R32(TxConfig
) & 0x9cf0f8ff), pdev
->irq
);
6968 if (rtl_chip_infos
[chipset
].jumbo_max
!= JUMBO_1K
) {
6969 netif_info(tp
, probe
, dev
, "jumbo features [frames: %d bytes, "
6970 "tx checksumming: %s]\n",
6971 rtl_chip_infos
[chipset
].jumbo_max
,
6972 rtl_chip_infos
[chipset
].jumbo_tx_csum
? "ok" : "ko");
6975 if (tp
->mac_version
== RTL_GIGA_MAC_VER_27
||
6976 tp
->mac_version
== RTL_GIGA_MAC_VER_28
||
6977 tp
->mac_version
== RTL_GIGA_MAC_VER_31
) {
6978 rtl8168_driver_start(tp
);
6981 device_set_wakeup_enable(&pdev
->dev
, tp
->features
& RTL_FEATURE_WOL
);
6983 if (pci_dev_run_wake(pdev
))
6984 pm_runtime_put_noidle(&pdev
->dev
);
6986 netif_carrier_off(dev
);
6992 netif_napi_del(&tp
->napi
);
6993 rtl_disable_msi(pdev
, tp
);
6996 pci_release_regions(pdev
);
6998 pci_clear_mwi(pdev
);
6999 pci_disable_device(pdev
);
7005 static struct pci_driver rtl8169_pci_driver
= {
7007 .id_table
= rtl8169_pci_tbl
,
7008 .probe
= rtl_init_one
,
7009 .remove
= __devexit_p(rtl_remove_one
),
7010 .shutdown
= rtl_shutdown
,
7011 .driver
.pm
= RTL8169_PM_OPS
,
7014 static int __init
rtl8169_init_module(void)
7016 return pci_register_driver(&rtl8169_pci_driver
);
7019 static void __exit
rtl8169_cleanup_module(void)
7021 pci_unregister_driver(&rtl8169_pci_driver
);
7024 module_init(rtl8169_init_module
);
7025 module_exit(rtl8169_cleanup_module
);