1 // SPDX-License-Identifier: GPL-2.0-only
3 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
5 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
6 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
7 * Copyright (c) a lot of people too. Please respect their work.
9 * See MAINTAINERS file for support contact information.
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/pci.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/clk.h>
18 #include <linux/delay.h>
19 #include <linux/ethtool.h>
20 #include <linux/phy.h>
21 #include <linux/if_vlan.h>
22 #include <linux/crc32.h>
26 #include <linux/tcp.h>
27 #include <linux/interrupt.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/prefetch.h>
31 #include <linux/ipv6.h>
32 #include <net/ip6_checksum.h>
35 #include "r8169_firmware.h"
37 #define MODULENAME "r8169"
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw"
50 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
51 #define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
52 #define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
53 #define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
54 #define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
55 #define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
56 #define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw"
57 #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
58 #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
59 #define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
61 #define R8169_MSG_DEFAULT \
62 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
64 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
65 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
66 #define MC_FILTER_LIMIT 32
68 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
69 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
71 #define R8169_REGS_SIZE 256
72 #define R8169_RX_BUF_SIZE (SZ_16K - 1)
73 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
74 #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
75 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
76 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
78 #define RTL_CFG_NO_GBIT 1
80 /* write/read MMIO register */
81 #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg))
82 #define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg))
83 #define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg))
84 #define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg))
85 #define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg))
86 #define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
88 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
89 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
90 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
91 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
96 } rtl_chip_infos
[] = {
98 [RTL_GIGA_MAC_VER_02
] = {"RTL8169s" },
99 [RTL_GIGA_MAC_VER_03
] = {"RTL8110s" },
100 [RTL_GIGA_MAC_VER_04
] = {"RTL8169sb/8110sb" },
101 [RTL_GIGA_MAC_VER_05
] = {"RTL8169sc/8110sc" },
102 [RTL_GIGA_MAC_VER_06
] = {"RTL8169sc/8110sc" },
104 [RTL_GIGA_MAC_VER_07
] = {"RTL8102e" },
105 [RTL_GIGA_MAC_VER_08
] = {"RTL8102e" },
106 [RTL_GIGA_MAC_VER_09
] = {"RTL8102e/RTL8103e" },
107 [RTL_GIGA_MAC_VER_10
] = {"RTL8101e" },
108 [RTL_GIGA_MAC_VER_11
] = {"RTL8168b/8111b" },
109 [RTL_GIGA_MAC_VER_12
] = {"RTL8168b/8111b" },
110 [RTL_GIGA_MAC_VER_13
] = {"RTL8101e" },
111 [RTL_GIGA_MAC_VER_14
] = {"RTL8100e" },
112 [RTL_GIGA_MAC_VER_15
] = {"RTL8100e" },
113 [RTL_GIGA_MAC_VER_16
] = {"RTL8101e" },
114 [RTL_GIGA_MAC_VER_17
] = {"RTL8168b/8111b" },
115 [RTL_GIGA_MAC_VER_18
] = {"RTL8168cp/8111cp" },
116 [RTL_GIGA_MAC_VER_19
] = {"RTL8168c/8111c" },
117 [RTL_GIGA_MAC_VER_20
] = {"RTL8168c/8111c" },
118 [RTL_GIGA_MAC_VER_21
] = {"RTL8168c/8111c" },
119 [RTL_GIGA_MAC_VER_22
] = {"RTL8168c/8111c" },
120 [RTL_GIGA_MAC_VER_23
] = {"RTL8168cp/8111cp" },
121 [RTL_GIGA_MAC_VER_24
] = {"RTL8168cp/8111cp" },
122 [RTL_GIGA_MAC_VER_25
] = {"RTL8168d/8111d", FIRMWARE_8168D_1
},
123 [RTL_GIGA_MAC_VER_26
] = {"RTL8168d/8111d", FIRMWARE_8168D_2
},
124 [RTL_GIGA_MAC_VER_27
] = {"RTL8168dp/8111dp" },
125 [RTL_GIGA_MAC_VER_28
] = {"RTL8168dp/8111dp" },
126 [RTL_GIGA_MAC_VER_29
] = {"RTL8105e", FIRMWARE_8105E_1
},
127 [RTL_GIGA_MAC_VER_30
] = {"RTL8105e", FIRMWARE_8105E_1
},
128 [RTL_GIGA_MAC_VER_31
] = {"RTL8168dp/8111dp" },
129 [RTL_GIGA_MAC_VER_32
] = {"RTL8168e/8111e", FIRMWARE_8168E_1
},
130 [RTL_GIGA_MAC_VER_33
] = {"RTL8168e/8111e", FIRMWARE_8168E_2
},
131 [RTL_GIGA_MAC_VER_34
] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3
},
132 [RTL_GIGA_MAC_VER_35
] = {"RTL8168f/8111f", FIRMWARE_8168F_1
},
133 [RTL_GIGA_MAC_VER_36
] = {"RTL8168f/8111f", FIRMWARE_8168F_2
},
134 [RTL_GIGA_MAC_VER_37
] = {"RTL8402", FIRMWARE_8402_1
},
135 [RTL_GIGA_MAC_VER_38
] = {"RTL8411", FIRMWARE_8411_1
},
136 [RTL_GIGA_MAC_VER_39
] = {"RTL8106e", FIRMWARE_8106E_1
},
137 [RTL_GIGA_MAC_VER_40
] = {"RTL8168g/8111g", FIRMWARE_8168G_2
},
138 [RTL_GIGA_MAC_VER_41
] = {"RTL8168g/8111g" },
139 [RTL_GIGA_MAC_VER_42
] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3
},
140 [RTL_GIGA_MAC_VER_43
] = {"RTL8106eus", FIRMWARE_8106E_2
},
141 [RTL_GIGA_MAC_VER_44
] = {"RTL8411b", FIRMWARE_8411_2
},
142 [RTL_GIGA_MAC_VER_45
] = {"RTL8168h/8111h", FIRMWARE_8168H_1
},
143 [RTL_GIGA_MAC_VER_46
] = {"RTL8168h/8111h", FIRMWARE_8168H_2
},
144 [RTL_GIGA_MAC_VER_47
] = {"RTL8107e", FIRMWARE_8107E_1
},
145 [RTL_GIGA_MAC_VER_48
] = {"RTL8107e", FIRMWARE_8107E_2
},
146 [RTL_GIGA_MAC_VER_49
] = {"RTL8168ep/8111ep" },
147 [RTL_GIGA_MAC_VER_50
] = {"RTL8168ep/8111ep" },
148 [RTL_GIGA_MAC_VER_51
] = {"RTL8168ep/8111ep" },
149 [RTL_GIGA_MAC_VER_52
] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3
},
150 [RTL_GIGA_MAC_VER_60
] = {"RTL8125" },
151 [RTL_GIGA_MAC_VER_61
] = {"RTL8125", FIRMWARE_8125A_3
},
154 static const struct pci_device_id rtl8169_pci_tbl
[] = {
155 { PCI_VDEVICE(REALTEK
, 0x2502) },
156 { PCI_VDEVICE(REALTEK
, 0x2600) },
157 { PCI_VDEVICE(REALTEK
, 0x8129) },
158 { PCI_VDEVICE(REALTEK
, 0x8136), RTL_CFG_NO_GBIT
},
159 { PCI_VDEVICE(REALTEK
, 0x8161) },
160 { PCI_VDEVICE(REALTEK
, 0x8167) },
161 { PCI_VDEVICE(REALTEK
, 0x8168) },
162 { PCI_VDEVICE(NCUBE
, 0x8168) },
163 { PCI_VDEVICE(REALTEK
, 0x8169) },
164 { PCI_VENDOR_ID_DLINK
, 0x4300,
165 PCI_VENDOR_ID_DLINK
, 0x4b10, 0, 0 },
166 { PCI_VDEVICE(DLINK
, 0x4300) },
167 { PCI_VDEVICE(DLINK
, 0x4302) },
168 { PCI_VDEVICE(AT
, 0xc107) },
169 { PCI_VDEVICE(USR
, 0x0116) },
170 { PCI_VENDOR_ID_LINKSYS
, 0x1032, PCI_ANY_ID
, 0x0024 },
171 { 0x0001, 0x8168, PCI_ANY_ID
, 0x2410 },
172 { PCI_VDEVICE(REALTEK
, 0x8125) },
173 { PCI_VDEVICE(REALTEK
, 0x3000) },
177 MODULE_DEVICE_TABLE(pci
, rtl8169_pci_tbl
);
184 MAC0
= 0, /* Ethernet hardware address. */
186 MAR0
= 8, /* Multicast filter. */
187 CounterAddrLow
= 0x10,
188 CounterAddrHigh
= 0x14,
189 TxDescStartAddrLow
= 0x20,
190 TxDescStartAddrHigh
= 0x24,
191 TxHDescStartAddrLow
= 0x28,
192 TxHDescStartAddrHigh
= 0x2c,
201 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
202 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
205 #define RX128_INT_EN (1 << 15) /* 8111c and later */
206 #define RX_MULTI_EN (1 << 14) /* 8111c only */
207 #define RXCFG_FIFO_SHIFT 13
208 /* No threshold before first PCI xfer */
209 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
210 #define RX_EARLY_OFF (1 << 11)
211 #define RXCFG_DMA_SHIFT 8
212 /* Unlimited maximum PCI burst. */
213 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
219 #define PME_SIGNAL (1 << 5) /* 8168c and later */
230 #define RTL_COALESCE_MASK 0x0f
231 #define RTL_COALESCE_SHIFT 4
232 #define RTL_COALESCE_T_MAX (RTL_COALESCE_MASK)
233 #define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_MASK << 2)
235 RxDescAddrLow
= 0xe4,
236 RxDescAddrHigh
= 0xe8,
237 EarlyTxThres
= 0xec, /* 8169. Unit of 32 bytes. */
239 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
241 MaxTxPacketSize
= 0xec, /* 8101/8168. Unit of 128 bytes. */
243 #define TxPacketMax (8064 >> 7)
244 #define EarlySize 0x27
247 FuncEventMask
= 0xf4,
248 FuncPresetState
= 0xf8,
253 FuncForceEvent
= 0xfc,
256 enum rtl8168_8101_registers
{
259 #define CSIAR_FLAG 0x80000000
260 #define CSIAR_WRITE_CMD 0x80000000
261 #define CSIAR_BYTE_ENABLE 0x0000f000
262 #define CSIAR_ADDR_MASK 0x00000fff
265 #define EPHYAR_FLAG 0x80000000
266 #define EPHYAR_WRITE_CMD 0x80000000
267 #define EPHYAR_REG_MASK 0x1f
268 #define EPHYAR_REG_SHIFT 16
269 #define EPHYAR_DATA_MASK 0xffff
271 #define PFM_EN (1 << 6)
272 #define TX_10M_PS_EN (1 << 7)
274 #define FIX_NAK_1 (1 << 4)
275 #define FIX_NAK_2 (1 << 3)
278 #define NOW_IS_OOB (1 << 7)
279 #define TX_EMPTY (1 << 5)
280 #define RX_EMPTY (1 << 4)
281 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
282 #define EN_NDP (1 << 3)
283 #define EN_OOB_RESET (1 << 2)
284 #define LINK_LIST_RDY (1 << 1)
286 #define EFUSEAR_FLAG 0x80000000
287 #define EFUSEAR_WRITE_CMD 0x80000000
288 #define EFUSEAR_READ_CMD 0x00000000
289 #define EFUSEAR_REG_MASK 0x03ff
290 #define EFUSEAR_REG_SHIFT 8
291 #define EFUSEAR_DATA_MASK 0xff
293 #define PFM_D3COLD_EN (1 << 6)
296 enum rtl8168_registers
{
301 #define ERIAR_FLAG 0x80000000
302 #define ERIAR_WRITE_CMD 0x80000000
303 #define ERIAR_READ_CMD 0x00000000
304 #define ERIAR_ADDR_BYTE_ALIGN 4
305 #define ERIAR_TYPE_SHIFT 16
306 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
307 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
308 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
309 #define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT)
310 #define ERIAR_MASK_SHIFT 12
311 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
312 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
313 #define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT)
314 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
315 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
316 EPHY_RXER_NUM
= 0x7c,
317 OCPDR
= 0xb0, /* OCP GPHY access */
318 #define OCPDR_WRITE_CMD 0x80000000
319 #define OCPDR_READ_CMD 0x00000000
320 #define OCPDR_REG_MASK 0x7f
321 #define OCPDR_GPHY_REG_SHIFT 16
322 #define OCPDR_DATA_MASK 0xffff
324 #define OCPAR_FLAG 0x80000000
325 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
326 #define OCPAR_GPHY_READ_CMD 0x0000f060
328 RDSAR1
= 0xd0, /* 8168c only. Undocumented on 8168dp */
329 MISC
= 0xf0, /* 8168e only. */
330 #define TXPLA_RST (1 << 29)
331 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
332 #define PWM_EN (1 << 22)
333 #define RXDV_GATED_EN (1 << 19)
334 #define EARLY_TALLY_EN (1 << 16)
337 enum rtl8125_registers
{
338 IntrMask_8125
= 0x38,
339 IntrStatus_8125
= 0x3c,
344 #define RX_VLAN_INNER_8125 BIT(22)
345 #define RX_VLAN_OUTER_8125 BIT(23)
346 #define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
348 #define RX_FETCH_DFLT_8125 (8 << 27)
350 enum rtl_register_content
{
351 /* InterruptStatusBits */
355 TxDescUnavail
= 0x0080,
377 /* TXPoll register p.5 */
378 HPQ
= 0x80, /* Poll cmd on the high prio queue */
379 NPQ
= 0x40, /* Poll cmd on the low prio queue */
380 FSWInt
= 0x01, /* Forced software interrupt */
384 Cfg9346_Unlock
= 0xc0,
389 AcceptBroadcast
= 0x08,
390 AcceptMulticast
= 0x04,
392 AcceptAllPhys
= 0x01,
393 #define RX_CONFIG_ACCEPT_MASK 0x3f
396 TxInterFrameGapShift
= 24,
397 TxDMAShift
= 8, /* DMA burst value (0-7) is shift this many bits */
399 /* Config1 register p.24 */
402 Speed_down
= (1 << 4),
406 PMEnable
= (1 << 0), /* Power Management Enable */
408 /* Config2 register p. 25 */
409 ClkReqEn
= (1 << 7), /* Clock Request Enable */
410 MSIEnable
= (1 << 5), /* 8169 only. Reserved in the 8168. */
411 PCI_Clock_66MHz
= 0x01,
412 PCI_Clock_33MHz
= 0x00,
414 /* Config3 register p.25 */
415 MagicPacket
= (1 << 5), /* Wake up when receives a Magic Packet */
416 LinkUp
= (1 << 4), /* Wake up when the cable connection is re-established */
417 Jumbo_En0
= (1 << 2), /* 8168 only. Reserved in the 8168b */
418 Rdy_to_L23
= (1 << 1), /* L23 Enable */
419 Beacon_en
= (1 << 0), /* 8168 only. Reserved in the 8168b */
421 /* Config4 register */
422 Jumbo_En1
= (1 << 1), /* 8168 only. Reserved in the 8168b */
424 /* Config5 register p.27 */
425 BWF
= (1 << 6), /* Accept Broadcast wakeup frame */
426 MWF
= (1 << 5), /* Accept Multicast wakeup frame */
427 UWF
= (1 << 4), /* Accept Unicast wakeup frame */
429 LanWake
= (1 << 1), /* LanWake enable/disable */
430 PMEStatus
= (1 << 0), /* PME status can be reset by PCI RST# */
431 ASPM_en
= (1 << 0), /* ASPM enable */
434 EnableBist
= (1 << 15), // 8168 8101
435 Mac_dbgo_oe
= (1 << 14), // 8168 8101
436 EnAnaPLL
= (1 << 14), // 8169
437 Normal_mode
= (1 << 13), // unused
438 Force_half_dup
= (1 << 12), // 8168 8101
439 Force_rxflow_en
= (1 << 11), // 8168 8101
440 Force_txflow_en
= (1 << 10), // 8168 8101
441 Cxpl_dbg_sel
= (1 << 9), // 8168 8101
442 ASF
= (1 << 8), // 8168 8101
443 PktCntrDisable
= (1 << 7), // 8168 8101
444 Mac_dbgo_sel
= 0x001c, // 8168
449 #define INTT_MASK GENMASK(1, 0)
450 #define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK)
452 /* rtl8169_PHYstatus */
462 /* ResetCounterCommand */
465 /* DumpCounterCommand */
468 /* magic enable v2 */
469 MagicPacket_v2
= (1 << 16), /* Wake up when receives a Magic Packet */
473 /* First doubleword. */
474 DescOwn
= (1 << 31), /* Descriptor is owned by NIC */
475 RingEnd
= (1 << 30), /* End of descriptor ring */
476 FirstFrag
= (1 << 29), /* First segment of a packet */
477 LastFrag
= (1 << 28), /* Final segment of a packet */
481 enum rtl_tx_desc_bit
{
482 /* First doubleword. */
483 TD_LSO
= (1 << 27), /* Large Send Offload */
484 #define TD_MSS_MAX 0x07ffu /* MSS value */
486 /* Second doubleword. */
487 TxVlanTag
= (1 << 17), /* Add VLAN tag */
490 /* 8169, 8168b and 810x except 8102e. */
491 enum rtl_tx_desc_bit_0
{
492 /* First doubleword. */
493 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
494 TD0_TCP_CS
= (1 << 16), /* Calculate TCP/IP checksum */
495 TD0_UDP_CS
= (1 << 17), /* Calculate UDP/IP checksum */
496 TD0_IP_CS
= (1 << 18), /* Calculate IP checksum */
499 /* 8102e, 8168c and beyond. */
500 enum rtl_tx_desc_bit_1
{
501 /* First doubleword. */
502 TD1_GTSENV4
= (1 << 26), /* Giant Send for IPv4 */
503 TD1_GTSENV6
= (1 << 25), /* Giant Send for IPv6 */
504 #define GTTCPHO_SHIFT 18
505 #define GTTCPHO_MAX 0x7f
507 /* Second doubleword. */
508 #define TCPHO_SHIFT 18
509 #define TCPHO_MAX 0x3ff
510 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
511 TD1_IPv6_CS
= (1 << 28), /* Calculate IPv6 checksum */
512 TD1_IPv4_CS
= (1 << 29), /* Calculate IPv4 checksum */
513 TD1_TCP_CS
= (1 << 30), /* Calculate TCP/IP checksum */
514 TD1_UDP_CS
= (1 << 31), /* Calculate UDP/IP checksum */
517 enum rtl_rx_desc_bit
{
519 PID1
= (1 << 18), /* Protocol ID bit 1/2 */
520 PID0
= (1 << 17), /* Protocol ID bit 0/2 */
522 #define RxProtoUDP (PID1)
523 #define RxProtoTCP (PID0)
524 #define RxProtoIP (PID1 | PID0)
525 #define RxProtoMask RxProtoIP
527 IPFail
= (1 << 16), /* IP checksum failed */
528 UDPFail
= (1 << 15), /* UDP/IP checksum failed */
529 TCPFail
= (1 << 14), /* TCP/IP checksum failed */
530 RxVlanTag
= (1 << 16), /* VLAN tag available */
533 #define RsvdMask 0x3fffc000
535 #define RTL_GSO_MAX_SIZE_V1 32000
536 #define RTL_GSO_MAX_SEGS_V1 24
537 #define RTL_GSO_MAX_SIZE_V2 64000
538 #define RTL_GSO_MAX_SEGS_V2 64
557 struct rtl8169_counters
{
564 __le32 tx_one_collision
;
565 __le32 tx_multi_collision
;
573 struct rtl8169_tc_offsets
{
576 __le32 tx_multi_collision
;
582 RTL_FLAG_TASK_ENABLED
= 0,
583 RTL_FLAG_TASK_RESET_PENDING
,
587 struct rtl8169_stats
{
590 struct u64_stats_sync syncp
;
593 struct rtl8169_private
{
594 void __iomem
*mmio_addr
; /* memory map physical address */
595 struct pci_dev
*pci_dev
;
596 struct net_device
*dev
;
597 struct phy_device
*phydev
;
598 struct napi_struct napi
;
600 enum mac_version mac_version
;
601 u32 cur_rx
; /* Index into the Rx descriptor buffer of next Rx pkt. */
602 u32 cur_tx
; /* Index into the Tx descriptor buffer of next Rx pkt. */
604 struct rtl8169_stats rx_stats
;
605 struct rtl8169_stats tx_stats
;
606 struct TxDesc
*TxDescArray
; /* 256-aligned Tx descriptor ring */
607 struct RxDesc
*RxDescArray
; /* 256-aligned Rx descriptor ring */
608 dma_addr_t TxPhyAddr
;
609 dma_addr_t RxPhyAddr
;
610 struct page
*Rx_databuff
[NUM_RX_DESC
]; /* Rx data buffers */
611 struct ring_info tx_skb
[NUM_TX_DESC
]; /* Tx data buffers */
617 DECLARE_BITMAP(flags
, RTL_FLAG_MAX
);
619 struct work_struct work
;
622 unsigned irq_enabled
:1;
623 unsigned supports_gmii
:1;
624 unsigned aspm_manageable
:1;
625 dma_addr_t counters_phys_addr
;
626 struct rtl8169_counters
*counters
;
627 struct rtl8169_tc_offsets tc_offset
;
632 struct rtl_fw
*rtl_fw
;
637 typedef void (*rtl_generic_fct
)(struct rtl8169_private
*tp
);
639 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
640 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
641 module_param_named(debug
, debug
.msg_enable
, int, 0);
642 MODULE_PARM_DESC(debug
, "Debug verbosity level (0=none, ..., 16=all)");
643 MODULE_SOFTDEP("pre: realtek");
644 MODULE_LICENSE("GPL");
645 MODULE_FIRMWARE(FIRMWARE_8168D_1
);
646 MODULE_FIRMWARE(FIRMWARE_8168D_2
);
647 MODULE_FIRMWARE(FIRMWARE_8168E_1
);
648 MODULE_FIRMWARE(FIRMWARE_8168E_2
);
649 MODULE_FIRMWARE(FIRMWARE_8168E_3
);
650 MODULE_FIRMWARE(FIRMWARE_8105E_1
);
651 MODULE_FIRMWARE(FIRMWARE_8168F_1
);
652 MODULE_FIRMWARE(FIRMWARE_8168F_2
);
653 MODULE_FIRMWARE(FIRMWARE_8402_1
);
654 MODULE_FIRMWARE(FIRMWARE_8411_1
);
655 MODULE_FIRMWARE(FIRMWARE_8411_2
);
656 MODULE_FIRMWARE(FIRMWARE_8106E_1
);
657 MODULE_FIRMWARE(FIRMWARE_8106E_2
);
658 MODULE_FIRMWARE(FIRMWARE_8168G_2
);
659 MODULE_FIRMWARE(FIRMWARE_8168G_3
);
660 MODULE_FIRMWARE(FIRMWARE_8168H_1
);
661 MODULE_FIRMWARE(FIRMWARE_8168H_2
);
662 MODULE_FIRMWARE(FIRMWARE_8168FP_3
);
663 MODULE_FIRMWARE(FIRMWARE_8107E_1
);
664 MODULE_FIRMWARE(FIRMWARE_8107E_2
);
665 MODULE_FIRMWARE(FIRMWARE_8125A_3
);
667 static inline struct device
*tp_to_dev(struct rtl8169_private
*tp
)
669 return &tp
->pci_dev
->dev
;
672 static void rtl_lock_work(struct rtl8169_private
*tp
)
674 mutex_lock(&tp
->wk
.mutex
);
677 static void rtl_unlock_work(struct rtl8169_private
*tp
)
679 mutex_unlock(&tp
->wk
.mutex
);
682 static void rtl_lock_config_regs(struct rtl8169_private
*tp
)
684 RTL_W8(tp
, Cfg9346
, Cfg9346_Lock
);
687 static void rtl_unlock_config_regs(struct rtl8169_private
*tp
)
689 RTL_W8(tp
, Cfg9346
, Cfg9346_Unlock
);
692 static void rtl_pci_commit(struct rtl8169_private
*tp
)
694 /* Read an arbitrary register to commit a preceding PCI write */
698 static bool rtl_is_8125(struct rtl8169_private
*tp
)
700 return tp
->mac_version
>= RTL_GIGA_MAC_VER_60
;
703 static bool rtl_is_8168evl_up(struct rtl8169_private
*tp
)
705 return tp
->mac_version
>= RTL_GIGA_MAC_VER_34
&&
706 tp
->mac_version
!= RTL_GIGA_MAC_VER_39
&&
707 tp
->mac_version
<= RTL_GIGA_MAC_VER_52
;
710 static bool rtl_supports_eee(struct rtl8169_private
*tp
)
712 return tp
->mac_version
>= RTL_GIGA_MAC_VER_34
&&
713 tp
->mac_version
!= RTL_GIGA_MAC_VER_37
&&
714 tp
->mac_version
!= RTL_GIGA_MAC_VER_39
;
717 static void rtl_read_mac_from_reg(struct rtl8169_private
*tp
, u8
*mac
, int reg
)
721 for (i
= 0; i
< ETH_ALEN
; i
++)
722 mac
[i
] = RTL_R8(tp
, reg
+ i
);
726 bool (*check
)(struct rtl8169_private
*);
730 static void rtl_udelay(unsigned int d
)
735 static bool rtl_loop_wait(struct rtl8169_private
*tp
, const struct rtl_cond
*c
,
736 void (*delay
)(unsigned int), unsigned int d
, int n
,
741 for (i
= 0; i
< n
; i
++) {
742 if (c
->check(tp
) == high
)
746 netif_err(tp
, drv
, tp
->dev
, "%s == %d (loop: %d, delay: %d).\n",
747 c
->msg
, !high
, n
, d
);
751 static bool rtl_udelay_loop_wait_high(struct rtl8169_private
*tp
,
752 const struct rtl_cond
*c
,
753 unsigned int d
, int n
)
755 return rtl_loop_wait(tp
, c
, rtl_udelay
, d
, n
, true);
758 static bool rtl_udelay_loop_wait_low(struct rtl8169_private
*tp
,
759 const struct rtl_cond
*c
,
760 unsigned int d
, int n
)
762 return rtl_loop_wait(tp
, c
, rtl_udelay
, d
, n
, false);
765 static bool rtl_msleep_loop_wait_high(struct rtl8169_private
*tp
,
766 const struct rtl_cond
*c
,
767 unsigned int d
, int n
)
769 return rtl_loop_wait(tp
, c
, msleep
, d
, n
, true);
772 static bool rtl_msleep_loop_wait_low(struct rtl8169_private
*tp
,
773 const struct rtl_cond
*c
,
774 unsigned int d
, int n
)
776 return rtl_loop_wait(tp
, c
, msleep
, d
, n
, false);
779 #define DECLARE_RTL_COND(name) \
780 static bool name ## _check(struct rtl8169_private *); \
782 static const struct rtl_cond name = { \
783 .check = name ## _check, \
787 static bool name ## _check(struct rtl8169_private *tp)
789 static bool rtl_ocp_reg_failure(struct rtl8169_private
*tp
, u32 reg
)
791 if (reg
& 0xffff0001) {
792 netif_err(tp
, drv
, tp
->dev
, "Invalid ocp reg %x!\n", reg
);
798 DECLARE_RTL_COND(rtl_ocp_gphy_cond
)
800 return RTL_R32(tp
, GPHY_OCP
) & OCPAR_FLAG
;
803 static void r8168_phy_ocp_write(struct rtl8169_private
*tp
, u32 reg
, u32 data
)
805 if (rtl_ocp_reg_failure(tp
, reg
))
808 RTL_W32(tp
, GPHY_OCP
, OCPAR_FLAG
| (reg
<< 15) | data
);
810 rtl_udelay_loop_wait_low(tp
, &rtl_ocp_gphy_cond
, 25, 10);
813 static int r8168_phy_ocp_read(struct rtl8169_private
*tp
, u32 reg
)
815 if (rtl_ocp_reg_failure(tp
, reg
))
818 RTL_W32(tp
, GPHY_OCP
, reg
<< 15);
820 return rtl_udelay_loop_wait_high(tp
, &rtl_ocp_gphy_cond
, 25, 10) ?
821 (RTL_R32(tp
, GPHY_OCP
) & 0xffff) : -ETIMEDOUT
;
824 static void r8168_mac_ocp_write(struct rtl8169_private
*tp
, u32 reg
, u32 data
)
826 if (rtl_ocp_reg_failure(tp
, reg
))
829 RTL_W32(tp
, OCPDR
, OCPAR_FLAG
| (reg
<< 15) | data
);
832 static u16
r8168_mac_ocp_read(struct rtl8169_private
*tp
, u32 reg
)
834 if (rtl_ocp_reg_failure(tp
, reg
))
837 RTL_W32(tp
, OCPDR
, reg
<< 15);
839 return RTL_R32(tp
, OCPDR
);
842 static void r8168_mac_ocp_modify(struct rtl8169_private
*tp
, u32 reg
, u16 mask
,
845 u16 data
= r8168_mac_ocp_read(tp
, reg
);
847 r8168_mac_ocp_write(tp
, reg
, (data
& ~mask
) | set
);
850 #define OCP_STD_PHY_BASE 0xa400
852 static void r8168g_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
855 tp
->ocp_base
= value
? value
<< 4 : OCP_STD_PHY_BASE
;
859 if (tp
->ocp_base
!= OCP_STD_PHY_BASE
)
862 r8168_phy_ocp_write(tp
, tp
->ocp_base
+ reg
* 2, value
);
865 static int r8168g_mdio_read(struct rtl8169_private
*tp
, int reg
)
868 return tp
->ocp_base
== OCP_STD_PHY_BASE
? 0 : tp
->ocp_base
>> 4;
870 if (tp
->ocp_base
!= OCP_STD_PHY_BASE
)
873 return r8168_phy_ocp_read(tp
, tp
->ocp_base
+ reg
* 2);
876 static void mac_mcu_write(struct rtl8169_private
*tp
, int reg
, int value
)
879 tp
->ocp_base
= value
<< 4;
883 r8168_mac_ocp_write(tp
, tp
->ocp_base
+ reg
, value
);
886 static int mac_mcu_read(struct rtl8169_private
*tp
, int reg
)
888 return r8168_mac_ocp_read(tp
, tp
->ocp_base
+ reg
);
891 DECLARE_RTL_COND(rtl_phyar_cond
)
893 return RTL_R32(tp
, PHYAR
) & 0x80000000;
896 static void r8169_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
898 RTL_W32(tp
, PHYAR
, 0x80000000 | (reg
& 0x1f) << 16 | (value
& 0xffff));
900 rtl_udelay_loop_wait_low(tp
, &rtl_phyar_cond
, 25, 20);
902 * According to hardware specs a 20us delay is required after write
903 * complete indication, but before sending next command.
908 static int r8169_mdio_read(struct rtl8169_private
*tp
, int reg
)
912 RTL_W32(tp
, PHYAR
, 0x0 | (reg
& 0x1f) << 16);
914 value
= rtl_udelay_loop_wait_high(tp
, &rtl_phyar_cond
, 25, 20) ?
915 RTL_R32(tp
, PHYAR
) & 0xffff : -ETIMEDOUT
;
918 * According to hardware specs a 20us delay is required after read
919 * complete indication, but before sending next command.
926 DECLARE_RTL_COND(rtl_ocpar_cond
)
928 return RTL_R32(tp
, OCPAR
) & OCPAR_FLAG
;
931 static void r8168dp_1_mdio_access(struct rtl8169_private
*tp
, int reg
, u32 data
)
933 RTL_W32(tp
, OCPDR
, data
| ((reg
& OCPDR_REG_MASK
) << OCPDR_GPHY_REG_SHIFT
));
934 RTL_W32(tp
, OCPAR
, OCPAR_GPHY_WRITE_CMD
);
935 RTL_W32(tp
, EPHY_RXER_NUM
, 0);
937 rtl_udelay_loop_wait_low(tp
, &rtl_ocpar_cond
, 1000, 100);
940 static void r8168dp_1_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
942 r8168dp_1_mdio_access(tp
, reg
,
943 OCPDR_WRITE_CMD
| (value
& OCPDR_DATA_MASK
));
946 static int r8168dp_1_mdio_read(struct rtl8169_private
*tp
, int reg
)
948 r8168dp_1_mdio_access(tp
, reg
, OCPDR_READ_CMD
);
951 RTL_W32(tp
, OCPAR
, OCPAR_GPHY_READ_CMD
);
952 RTL_W32(tp
, EPHY_RXER_NUM
, 0);
954 return rtl_udelay_loop_wait_high(tp
, &rtl_ocpar_cond
, 1000, 100) ?
955 RTL_R32(tp
, OCPDR
) & OCPDR_DATA_MASK
: -ETIMEDOUT
;
958 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
960 static void r8168dp_2_mdio_start(struct rtl8169_private
*tp
)
962 RTL_W32(tp
, 0xd0, RTL_R32(tp
, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT
);
965 static void r8168dp_2_mdio_stop(struct rtl8169_private
*tp
)
967 RTL_W32(tp
, 0xd0, RTL_R32(tp
, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT
);
970 static void r8168dp_2_mdio_write(struct rtl8169_private
*tp
, int reg
, int value
)
972 r8168dp_2_mdio_start(tp
);
974 r8169_mdio_write(tp
, reg
, value
);
976 r8168dp_2_mdio_stop(tp
);
979 static int r8168dp_2_mdio_read(struct rtl8169_private
*tp
, int reg
)
983 /* Work around issue with chip reporting wrong PHY ID */
984 if (reg
== MII_PHYSID2
)
987 r8168dp_2_mdio_start(tp
);
989 value
= r8169_mdio_read(tp
, reg
);
991 r8168dp_2_mdio_stop(tp
);
996 static void rtl_writephy(struct rtl8169_private
*tp
, int location
, int val
)
998 switch (tp
->mac_version
) {
999 case RTL_GIGA_MAC_VER_27
:
1000 r8168dp_1_mdio_write(tp
, location
, val
);
1002 case RTL_GIGA_MAC_VER_28
:
1003 case RTL_GIGA_MAC_VER_31
:
1004 r8168dp_2_mdio_write(tp
, location
, val
);
1006 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_61
:
1007 r8168g_mdio_write(tp
, location
, val
);
1010 r8169_mdio_write(tp
, location
, val
);
1015 static int rtl_readphy(struct rtl8169_private
*tp
, int location
)
1017 switch (tp
->mac_version
) {
1018 case RTL_GIGA_MAC_VER_27
:
1019 return r8168dp_1_mdio_read(tp
, location
);
1020 case RTL_GIGA_MAC_VER_28
:
1021 case RTL_GIGA_MAC_VER_31
:
1022 return r8168dp_2_mdio_read(tp
, location
);
1023 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_61
:
1024 return r8168g_mdio_read(tp
, location
);
1026 return r8169_mdio_read(tp
, location
);
1030 DECLARE_RTL_COND(rtl_ephyar_cond
)
1032 return RTL_R32(tp
, EPHYAR
) & EPHYAR_FLAG
;
1035 static void rtl_ephy_write(struct rtl8169_private
*tp
, int reg_addr
, int value
)
1037 RTL_W32(tp
, EPHYAR
, EPHYAR_WRITE_CMD
| (value
& EPHYAR_DATA_MASK
) |
1038 (reg_addr
& EPHYAR_REG_MASK
) << EPHYAR_REG_SHIFT
);
1040 rtl_udelay_loop_wait_low(tp
, &rtl_ephyar_cond
, 10, 100);
1045 static u16
rtl_ephy_read(struct rtl8169_private
*tp
, int reg_addr
)
1047 RTL_W32(tp
, EPHYAR
, (reg_addr
& EPHYAR_REG_MASK
) << EPHYAR_REG_SHIFT
);
1049 return rtl_udelay_loop_wait_high(tp
, &rtl_ephyar_cond
, 10, 100) ?
1050 RTL_R32(tp
, EPHYAR
) & EPHYAR_DATA_MASK
: ~0;
1053 static void r8168fp_adjust_ocp_cmd(struct rtl8169_private
*tp
, u32
*cmd
, int type
)
1055 /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
1056 if (tp
->mac_version
== RTL_GIGA_MAC_VER_52
&& type
== ERIAR_OOB
)
1057 *cmd
|= 0x7f0 << 18;
1060 DECLARE_RTL_COND(rtl_eriar_cond
)
1062 return RTL_R32(tp
, ERIAR
) & ERIAR_FLAG
;
1065 static void _rtl_eri_write(struct rtl8169_private
*tp
, int addr
, u32 mask
,
1068 u32 cmd
= ERIAR_WRITE_CMD
| type
| mask
| addr
;
1070 BUG_ON((addr
& 3) || (mask
== 0));
1071 RTL_W32(tp
, ERIDR
, val
);
1072 r8168fp_adjust_ocp_cmd(tp
, &cmd
, type
);
1073 RTL_W32(tp
, ERIAR
, cmd
);
1075 rtl_udelay_loop_wait_low(tp
, &rtl_eriar_cond
, 100, 100);
1078 static void rtl_eri_write(struct rtl8169_private
*tp
, int addr
, u32 mask
,
1081 _rtl_eri_write(tp
, addr
, mask
, val
, ERIAR_EXGMAC
);
1084 static u32
_rtl_eri_read(struct rtl8169_private
*tp
, int addr
, int type
)
1086 u32 cmd
= ERIAR_READ_CMD
| type
| ERIAR_MASK_1111
| addr
;
1088 r8168fp_adjust_ocp_cmd(tp
, &cmd
, type
);
1089 RTL_W32(tp
, ERIAR
, cmd
);
1091 return rtl_udelay_loop_wait_high(tp
, &rtl_eriar_cond
, 100, 100) ?
1092 RTL_R32(tp
, ERIDR
) : ~0;
1095 static u32
rtl_eri_read(struct rtl8169_private
*tp
, int addr
)
1097 return _rtl_eri_read(tp
, addr
, ERIAR_EXGMAC
);
1100 static void rtl_w0w1_eri(struct rtl8169_private
*tp
, int addr
, u32 mask
, u32 p
,
1105 val
= rtl_eri_read(tp
, addr
);
1106 rtl_eri_write(tp
, addr
, mask
, (val
& ~m
) | p
);
1109 static void rtl_eri_set_bits(struct rtl8169_private
*tp
, int addr
, u32 mask
,
1112 rtl_w0w1_eri(tp
, addr
, mask
, p
, 0);
1115 static void rtl_eri_clear_bits(struct rtl8169_private
*tp
, int addr
, u32 mask
,
1118 rtl_w0w1_eri(tp
, addr
, mask
, 0, m
);
1121 static u32
r8168dp_ocp_read(struct rtl8169_private
*tp
, u8 mask
, u16 reg
)
1123 RTL_W32(tp
, OCPAR
, ((u32
)mask
& 0x0f) << 12 | (reg
& 0x0fff));
1124 return rtl_udelay_loop_wait_high(tp
, &rtl_ocpar_cond
, 100, 20) ?
1125 RTL_R32(tp
, OCPDR
) : ~0;
1128 static u32
r8168ep_ocp_read(struct rtl8169_private
*tp
, u8 mask
, u16 reg
)
1130 return _rtl_eri_read(tp
, reg
, ERIAR_OOB
);
1133 static void r8168dp_ocp_write(struct rtl8169_private
*tp
, u8 mask
, u16 reg
,
1136 RTL_W32(tp
, OCPDR
, data
);
1137 RTL_W32(tp
, OCPAR
, OCPAR_FLAG
| ((u32
)mask
& 0x0f) << 12 | (reg
& 0x0fff));
1138 rtl_udelay_loop_wait_low(tp
, &rtl_ocpar_cond
, 100, 20);
1141 static void r8168ep_ocp_write(struct rtl8169_private
*tp
, u8 mask
, u16 reg
,
1144 _rtl_eri_write(tp
, reg
, ((u32
)mask
& 0x0f) << ERIAR_MASK_SHIFT
,
1148 static void r8168dp_oob_notify(struct rtl8169_private
*tp
, u8 cmd
)
1150 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_0001
, cmd
);
1152 r8168dp_ocp_write(tp
, 0x1, 0x30, 0x00000001);
1155 #define OOB_CMD_RESET 0x00
1156 #define OOB_CMD_DRIVER_START 0x05
1157 #define OOB_CMD_DRIVER_STOP 0x06
1159 static u16
rtl8168_get_ocp_reg(struct rtl8169_private
*tp
)
1161 return (tp
->mac_version
== RTL_GIGA_MAC_VER_31
) ? 0xb8 : 0x10;
1164 DECLARE_RTL_COND(rtl_dp_ocp_read_cond
)
1168 reg
= rtl8168_get_ocp_reg(tp
);
1170 return r8168dp_ocp_read(tp
, 0x0f, reg
) & 0x00000800;
1173 DECLARE_RTL_COND(rtl_ep_ocp_read_cond
)
1175 return r8168ep_ocp_read(tp
, 0x0f, 0x124) & 0x00000001;
1178 DECLARE_RTL_COND(rtl_ocp_tx_cond
)
1180 return RTL_R8(tp
, IBISR0
) & 0x20;
1183 static void rtl8168ep_stop_cmac(struct rtl8169_private
*tp
)
1185 RTL_W8(tp
, IBCR2
, RTL_R8(tp
, IBCR2
) & ~0x01);
1186 rtl_msleep_loop_wait_high(tp
, &rtl_ocp_tx_cond
, 50, 2000);
1187 RTL_W8(tp
, IBISR0
, RTL_R8(tp
, IBISR0
) | 0x20);
1188 RTL_W8(tp
, IBCR0
, RTL_R8(tp
, IBCR0
) & ~0x01);
1191 static void rtl8168dp_driver_start(struct rtl8169_private
*tp
)
1193 r8168dp_oob_notify(tp
, OOB_CMD_DRIVER_START
);
1194 rtl_msleep_loop_wait_high(tp
, &rtl_dp_ocp_read_cond
, 10, 10);
1197 static void rtl8168ep_driver_start(struct rtl8169_private
*tp
)
1199 r8168ep_ocp_write(tp
, 0x01, 0x180, OOB_CMD_DRIVER_START
);
1200 r8168ep_ocp_write(tp
, 0x01, 0x30,
1201 r8168ep_ocp_read(tp
, 0x01, 0x30) | 0x01);
1202 rtl_msleep_loop_wait_high(tp
, &rtl_ep_ocp_read_cond
, 10, 10);
1205 static void rtl8168_driver_start(struct rtl8169_private
*tp
)
1207 switch (tp
->mac_version
) {
1208 case RTL_GIGA_MAC_VER_27
:
1209 case RTL_GIGA_MAC_VER_28
:
1210 case RTL_GIGA_MAC_VER_31
:
1211 rtl8168dp_driver_start(tp
);
1213 case RTL_GIGA_MAC_VER_49
... RTL_GIGA_MAC_VER_52
:
1214 rtl8168ep_driver_start(tp
);
1222 static void rtl8168dp_driver_stop(struct rtl8169_private
*tp
)
1224 r8168dp_oob_notify(tp
, OOB_CMD_DRIVER_STOP
);
1225 rtl_msleep_loop_wait_low(tp
, &rtl_dp_ocp_read_cond
, 10, 10);
1228 static void rtl8168ep_driver_stop(struct rtl8169_private
*tp
)
1230 rtl8168ep_stop_cmac(tp
);
1231 r8168ep_ocp_write(tp
, 0x01, 0x180, OOB_CMD_DRIVER_STOP
);
1232 r8168ep_ocp_write(tp
, 0x01, 0x30,
1233 r8168ep_ocp_read(tp
, 0x01, 0x30) | 0x01);
1234 rtl_msleep_loop_wait_low(tp
, &rtl_ep_ocp_read_cond
, 10, 10);
1237 static void rtl8168_driver_stop(struct rtl8169_private
*tp
)
1239 switch (tp
->mac_version
) {
1240 case RTL_GIGA_MAC_VER_27
:
1241 case RTL_GIGA_MAC_VER_28
:
1242 case RTL_GIGA_MAC_VER_31
:
1243 rtl8168dp_driver_stop(tp
);
1245 case RTL_GIGA_MAC_VER_49
... RTL_GIGA_MAC_VER_52
:
1246 rtl8168ep_driver_stop(tp
);
1254 static bool r8168dp_check_dash(struct rtl8169_private
*tp
)
1256 u16 reg
= rtl8168_get_ocp_reg(tp
);
1258 return !!(r8168dp_ocp_read(tp
, 0x0f, reg
) & 0x00008000);
1261 static bool r8168ep_check_dash(struct rtl8169_private
*tp
)
1263 return !!(r8168ep_ocp_read(tp
, 0x0f, 0x128) & 0x00000001);
1266 static bool r8168_check_dash(struct rtl8169_private
*tp
)
1268 switch (tp
->mac_version
) {
1269 case RTL_GIGA_MAC_VER_27
:
1270 case RTL_GIGA_MAC_VER_28
:
1271 case RTL_GIGA_MAC_VER_31
:
1272 return r8168dp_check_dash(tp
);
1273 case RTL_GIGA_MAC_VER_49
... RTL_GIGA_MAC_VER_52
:
1274 return r8168ep_check_dash(tp
);
1280 static void rtl_reset_packet_filter(struct rtl8169_private
*tp
)
1282 rtl_eri_clear_bits(tp
, 0xdc, ERIAR_MASK_0001
, BIT(0));
1283 rtl_eri_set_bits(tp
, 0xdc, ERIAR_MASK_0001
, BIT(0));
1286 DECLARE_RTL_COND(rtl_efusear_cond
)
1288 return RTL_R32(tp
, EFUSEAR
) & EFUSEAR_FLAG
;
1291 u8
rtl8168d_efuse_read(struct rtl8169_private
*tp
, int reg_addr
)
1293 RTL_W32(tp
, EFUSEAR
, (reg_addr
& EFUSEAR_REG_MASK
) << EFUSEAR_REG_SHIFT
);
1295 return rtl_udelay_loop_wait_high(tp
, &rtl_efusear_cond
, 100, 300) ?
1296 RTL_R32(tp
, EFUSEAR
) & EFUSEAR_DATA_MASK
: ~0;
1299 static u32
rtl_get_events(struct rtl8169_private
*tp
)
1301 if (rtl_is_8125(tp
))
1302 return RTL_R32(tp
, IntrStatus_8125
);
1304 return RTL_R16(tp
, IntrStatus
);
1307 static void rtl_ack_events(struct rtl8169_private
*tp
, u32 bits
)
1309 if (rtl_is_8125(tp
))
1310 RTL_W32(tp
, IntrStatus_8125
, bits
);
1312 RTL_W16(tp
, IntrStatus
, bits
);
1315 static void rtl_irq_disable(struct rtl8169_private
*tp
)
1317 if (rtl_is_8125(tp
))
1318 RTL_W32(tp
, IntrMask_8125
, 0);
1320 RTL_W16(tp
, IntrMask
, 0);
1321 tp
->irq_enabled
= 0;
1324 static void rtl_irq_enable(struct rtl8169_private
*tp
)
1326 tp
->irq_enabled
= 1;
1327 if (rtl_is_8125(tp
))
1328 RTL_W32(tp
, IntrMask_8125
, tp
->irq_mask
);
1330 RTL_W16(tp
, IntrMask
, tp
->irq_mask
);
1333 static void rtl8169_irq_mask_and_ack(struct rtl8169_private
*tp
)
1335 rtl_irq_disable(tp
);
1336 rtl_ack_events(tp
, 0xffffffff);
1340 static void rtl_link_chg_patch(struct rtl8169_private
*tp
)
1342 struct phy_device
*phydev
= tp
->phydev
;
1344 if (tp
->mac_version
== RTL_GIGA_MAC_VER_34
||
1345 tp
->mac_version
== RTL_GIGA_MAC_VER_38
) {
1346 if (phydev
->speed
== SPEED_1000
) {
1347 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x00000011);
1348 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005);
1349 } else if (phydev
->speed
== SPEED_100
) {
1350 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f);
1351 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005);
1353 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f);
1354 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x0000003f);
1356 rtl_reset_packet_filter(tp
);
1357 } else if (tp
->mac_version
== RTL_GIGA_MAC_VER_35
||
1358 tp
->mac_version
== RTL_GIGA_MAC_VER_36
) {
1359 if (phydev
->speed
== SPEED_1000
) {
1360 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x00000011);
1361 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x00000005);
1363 rtl_eri_write(tp
, 0x1bc, ERIAR_MASK_1111
, 0x0000001f);
1364 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_1111
, 0x0000003f);
1366 } else if (tp
->mac_version
== RTL_GIGA_MAC_VER_37
) {
1367 if (phydev
->speed
== SPEED_10
) {
1368 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x4d02);
1369 rtl_eri_write(tp
, 0x1dc, ERIAR_MASK_0011
, 0x0060a);
1371 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x0000);
1376 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1378 static void rtl8169_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1380 struct rtl8169_private
*tp
= netdev_priv(dev
);
1383 wol
->supported
= WAKE_ANY
;
1384 wol
->wolopts
= tp
->saved_wolopts
;
1385 rtl_unlock_work(tp
);
1388 static void __rtl8169_set_wol(struct rtl8169_private
*tp
, u32 wolopts
)
1390 static const struct {
1395 { WAKE_PHY
, Config3
, LinkUp
},
1396 { WAKE_UCAST
, Config5
, UWF
},
1397 { WAKE_BCAST
, Config5
, BWF
},
1398 { WAKE_MCAST
, Config5
, MWF
},
1399 { WAKE_ANY
, Config5
, LanWake
},
1400 { WAKE_MAGIC
, Config3
, MagicPacket
}
1402 unsigned int i
, tmp
= ARRAY_SIZE(cfg
);
1405 rtl_unlock_config_regs(tp
);
1407 if (rtl_is_8168evl_up(tp
)) {
1409 if (wolopts
& WAKE_MAGIC
)
1410 rtl_eri_set_bits(tp
, 0x0dc, ERIAR_MASK_0100
,
1413 rtl_eri_clear_bits(tp
, 0x0dc, ERIAR_MASK_0100
,
1415 } else if (rtl_is_8125(tp
)) {
1417 if (wolopts
& WAKE_MAGIC
)
1418 r8168_mac_ocp_modify(tp
, 0xc0b6, 0, BIT(0));
1420 r8168_mac_ocp_modify(tp
, 0xc0b6, BIT(0), 0);
1423 for (i
= 0; i
< tmp
; i
++) {
1424 options
= RTL_R8(tp
, cfg
[i
].reg
) & ~cfg
[i
].mask
;
1425 if (wolopts
& cfg
[i
].opt
)
1426 options
|= cfg
[i
].mask
;
1427 RTL_W8(tp
, cfg
[i
].reg
, options
);
1430 switch (tp
->mac_version
) {
1431 case RTL_GIGA_MAC_VER_02
... RTL_GIGA_MAC_VER_06
:
1432 options
= RTL_R8(tp
, Config1
) & ~PMEnable
;
1434 options
|= PMEnable
;
1435 RTL_W8(tp
, Config1
, options
);
1437 case RTL_GIGA_MAC_VER_34
:
1438 case RTL_GIGA_MAC_VER_37
:
1439 case RTL_GIGA_MAC_VER_39
... RTL_GIGA_MAC_VER_52
:
1440 options
= RTL_R8(tp
, Config2
) & ~PME_SIGNAL
;
1442 options
|= PME_SIGNAL
;
1443 RTL_W8(tp
, Config2
, options
);
1449 rtl_lock_config_regs(tp
);
1451 device_set_wakeup_enable(tp_to_dev(tp
), wolopts
);
1452 tp
->dev
->wol_enabled
= wolopts
? 1 : 0;
1455 static int rtl8169_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1457 struct rtl8169_private
*tp
= netdev_priv(dev
);
1458 struct device
*d
= tp_to_dev(tp
);
1460 if (wol
->wolopts
& ~WAKE_ANY
)
1463 pm_runtime_get_noresume(d
);
1467 tp
->saved_wolopts
= wol
->wolopts
;
1469 if (pm_runtime_active(d
))
1470 __rtl8169_set_wol(tp
, tp
->saved_wolopts
);
1472 rtl_unlock_work(tp
);
1474 pm_runtime_put_noidle(d
);
1479 static void rtl8169_get_drvinfo(struct net_device
*dev
,
1480 struct ethtool_drvinfo
*info
)
1482 struct rtl8169_private
*tp
= netdev_priv(dev
);
1483 struct rtl_fw
*rtl_fw
= tp
->rtl_fw
;
1485 strlcpy(info
->driver
, MODULENAME
, sizeof(info
->driver
));
1486 strlcpy(info
->bus_info
, pci_name(tp
->pci_dev
), sizeof(info
->bus_info
));
1487 BUILD_BUG_ON(sizeof(info
->fw_version
) < sizeof(rtl_fw
->version
));
1489 strlcpy(info
->fw_version
, rtl_fw
->version
,
1490 sizeof(info
->fw_version
));
1493 static int rtl8169_get_regs_len(struct net_device
*dev
)
1495 return R8169_REGS_SIZE
;
1498 static netdev_features_t
rtl8169_fix_features(struct net_device
*dev
,
1499 netdev_features_t features
)
1501 struct rtl8169_private
*tp
= netdev_priv(dev
);
1503 if (dev
->mtu
> TD_MSS_MAX
)
1504 features
&= ~NETIF_F_ALL_TSO
;
1506 if (dev
->mtu
> ETH_DATA_LEN
&&
1507 tp
->mac_version
> RTL_GIGA_MAC_VER_06
)
1508 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_ALL_TSO
);
1513 static int rtl8169_set_features(struct net_device
*dev
,
1514 netdev_features_t features
)
1516 struct rtl8169_private
*tp
= netdev_priv(dev
);
1521 rx_config
= RTL_R32(tp
, RxConfig
);
1522 if (features
& NETIF_F_RXALL
)
1523 rx_config
|= (AcceptErr
| AcceptRunt
);
1525 rx_config
&= ~(AcceptErr
| AcceptRunt
);
1527 if (rtl_is_8125(tp
)) {
1528 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1529 rx_config
|= RX_VLAN_8125
;
1531 rx_config
&= ~RX_VLAN_8125
;
1534 RTL_W32(tp
, RxConfig
, rx_config
);
1536 if (features
& NETIF_F_RXCSUM
)
1537 tp
->cp_cmd
|= RxChkSum
;
1539 tp
->cp_cmd
&= ~RxChkSum
;
1541 if (!rtl_is_8125(tp
)) {
1542 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1543 tp
->cp_cmd
|= RxVlan
;
1545 tp
->cp_cmd
&= ~RxVlan
;
1548 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
1551 rtl_unlock_work(tp
);
1556 static inline u32
rtl8169_tx_vlan_tag(struct sk_buff
*skb
)
1558 return (skb_vlan_tag_present(skb
)) ?
1559 TxVlanTag
| swab16(skb_vlan_tag_get(skb
)) : 0x00;
1562 static void rtl8169_rx_vlan_tag(struct RxDesc
*desc
, struct sk_buff
*skb
)
1564 u32 opts2
= le32_to_cpu(desc
->opts2
);
1566 if (opts2
& RxVlanTag
)
1567 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), swab16(opts2
& 0xffff));
1570 static void rtl8169_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1573 struct rtl8169_private
*tp
= netdev_priv(dev
);
1574 u32 __iomem
*data
= tp
->mmio_addr
;
1579 for (i
= 0; i
< R8169_REGS_SIZE
; i
+= 4)
1580 memcpy_fromio(dw
++, data
++, 4);
1581 rtl_unlock_work(tp
);
1584 static u32
rtl8169_get_msglevel(struct net_device
*dev
)
1586 struct rtl8169_private
*tp
= netdev_priv(dev
);
1588 return tp
->msg_enable
;
1591 static void rtl8169_set_msglevel(struct net_device
*dev
, u32 value
)
1593 struct rtl8169_private
*tp
= netdev_priv(dev
);
1595 tp
->msg_enable
= value
;
1598 static const char rtl8169_gstrings
[][ETH_GSTRING_LEN
] = {
1605 "tx_single_collisions",
1606 "tx_multi_collisions",
1614 static int rtl8169_get_sset_count(struct net_device
*dev
, int sset
)
1618 return ARRAY_SIZE(rtl8169_gstrings
);
1624 DECLARE_RTL_COND(rtl_counters_cond
)
1626 return RTL_R32(tp
, CounterAddrLow
) & (CounterReset
| CounterDump
);
1629 static bool rtl8169_do_counters(struct rtl8169_private
*tp
, u32 counter_cmd
)
1631 dma_addr_t paddr
= tp
->counters_phys_addr
;
1634 RTL_W32(tp
, CounterAddrHigh
, (u64
)paddr
>> 32);
1636 cmd
= (u64
)paddr
& DMA_BIT_MASK(32);
1637 RTL_W32(tp
, CounterAddrLow
, cmd
);
1638 RTL_W32(tp
, CounterAddrLow
, cmd
| counter_cmd
);
1640 return rtl_udelay_loop_wait_low(tp
, &rtl_counters_cond
, 10, 1000);
1643 static bool rtl8169_reset_counters(struct rtl8169_private
*tp
)
1646 * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
1649 if (tp
->mac_version
< RTL_GIGA_MAC_VER_19
)
1652 return rtl8169_do_counters(tp
, CounterReset
);
1655 static bool rtl8169_update_counters(struct rtl8169_private
*tp
)
1657 u8 val
= RTL_R8(tp
, ChipCmd
);
1660 * Some chips are unable to dump tally counters when the receiver
1661 * is disabled. If 0xff chip may be in a PCI power-save state.
1663 if (!(val
& CmdRxEnb
) || val
== 0xff)
1666 return rtl8169_do_counters(tp
, CounterDump
);
1669 static bool rtl8169_init_counter_offsets(struct rtl8169_private
*tp
)
1671 struct rtl8169_counters
*counters
= tp
->counters
;
1675 * rtl8169_init_counter_offsets is called from rtl_open. On chip
1676 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
1677 * reset by a power cycle, while the counter values collected by the
1678 * driver are reset at every driver unload/load cycle.
1680 * To make sure the HW values returned by @get_stats64 match the SW
1681 * values, we collect the initial values at first open(*) and use them
1682 * as offsets to normalize the values returned by @get_stats64.
1684 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
1685 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
1686 * set at open time by rtl_hw_start.
1689 if (tp
->tc_offset
.inited
)
1692 /* If both, reset and update fail, propagate to caller. */
1693 if (rtl8169_reset_counters(tp
))
1696 if (rtl8169_update_counters(tp
))
1699 tp
->tc_offset
.tx_errors
= counters
->tx_errors
;
1700 tp
->tc_offset
.tx_multi_collision
= counters
->tx_multi_collision
;
1701 tp
->tc_offset
.tx_aborted
= counters
->tx_aborted
;
1702 tp
->tc_offset
.rx_missed
= counters
->rx_missed
;
1703 tp
->tc_offset
.inited
= true;
1708 static void rtl8169_get_ethtool_stats(struct net_device
*dev
,
1709 struct ethtool_stats
*stats
, u64
*data
)
1711 struct rtl8169_private
*tp
= netdev_priv(dev
);
1712 struct device
*d
= tp_to_dev(tp
);
1713 struct rtl8169_counters
*counters
= tp
->counters
;
1717 pm_runtime_get_noresume(d
);
1719 if (pm_runtime_active(d
))
1720 rtl8169_update_counters(tp
);
1722 pm_runtime_put_noidle(d
);
1724 data
[0] = le64_to_cpu(counters
->tx_packets
);
1725 data
[1] = le64_to_cpu(counters
->rx_packets
);
1726 data
[2] = le64_to_cpu(counters
->tx_errors
);
1727 data
[3] = le32_to_cpu(counters
->rx_errors
);
1728 data
[4] = le16_to_cpu(counters
->rx_missed
);
1729 data
[5] = le16_to_cpu(counters
->align_errors
);
1730 data
[6] = le32_to_cpu(counters
->tx_one_collision
);
1731 data
[7] = le32_to_cpu(counters
->tx_multi_collision
);
1732 data
[8] = le64_to_cpu(counters
->rx_unicast
);
1733 data
[9] = le64_to_cpu(counters
->rx_broadcast
);
1734 data
[10] = le32_to_cpu(counters
->rx_multicast
);
1735 data
[11] = le16_to_cpu(counters
->tx_aborted
);
1736 data
[12] = le16_to_cpu(counters
->tx_underun
);
1739 static void rtl8169_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1743 memcpy(data
, *rtl8169_gstrings
, sizeof(rtl8169_gstrings
));
1749 * Interrupt coalescing
1751 * > 1 - the availability of the IntrMitigate (0xe2) register through the
1752 * > 8169, 8168 and 810x line of chipsets
1754 * 8169, 8168, and 8136(810x) serial chipsets support it.
1756 * > 2 - the Tx timer unit at gigabit speed
1758 * The unit of the timer depends on both the speed and the setting of CPlusCmd
1759 * (0xe0) bit 1 and bit 0.
1762 * bit[1:0] \ speed 1000M 100M 10M
1763 * 0 0 320ns 2.56us 40.96us
1764 * 0 1 2.56us 20.48us 327.7us
1765 * 1 0 5.12us 40.96us 655.4us
1766 * 1 1 10.24us 81.92us 1.31ms
1769 * bit[1:0] \ speed 1000M 100M 10M
1770 * 0 0 5us 2.56us 40.96us
1771 * 0 1 40us 20.48us 327.7us
1772 * 1 0 80us 40.96us 655.4us
1773 * 1 1 160us 81.92us 1.31ms
1776 /* rx/tx scale factors for one particular CPlusCmd[0:1] value */
1777 struct rtl_coalesce_scale
{
1782 /* rx/tx scale factors for all CPlusCmd[0:1] cases */
1783 struct rtl_coalesce_info
{
1785 struct rtl_coalesce_scale scalev
[4]; /* each CPlusCmd[0:1] case */
1788 /* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */
1789 #define rxtx_x1822(r, t) { \
1792 {{(r)*8*2, (t)*8*2}}, \
1793 {{(r)*8*2*2, (t)*8*2*2}}, \
1795 static const struct rtl_coalesce_info rtl_coalesce_info_8169
[] = {
1796 /* speed delays: rx00 tx00 */
1797 { SPEED_10
, rxtx_x1822(40960, 40960) },
1798 { SPEED_100
, rxtx_x1822( 2560, 2560) },
1799 { SPEED_1000
, rxtx_x1822( 320, 320) },
1803 static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136
[] = {
1804 /* speed delays: rx00 tx00 */
1805 { SPEED_10
, rxtx_x1822(40960, 40960) },
1806 { SPEED_100
, rxtx_x1822( 2560, 2560) },
1807 { SPEED_1000
, rxtx_x1822( 5000, 5000) },
1812 /* get rx/tx scale vector corresponding to current speed */
1813 static const struct rtl_coalesce_info
*rtl_coalesce_info(struct net_device
*dev
)
1815 struct rtl8169_private
*tp
= netdev_priv(dev
);
1816 const struct rtl_coalesce_info
*ci
;
1818 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
)
1819 ci
= rtl_coalesce_info_8169
;
1821 ci
= rtl_coalesce_info_8168_8136
;
1823 for (; ci
->speed
; ci
++) {
1824 if (tp
->phydev
->speed
== ci
->speed
)
1828 return ERR_PTR(-ELNRNG
);
1831 static int rtl_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
1833 struct rtl8169_private
*tp
= netdev_priv(dev
);
1834 const struct rtl_coalesce_info
*ci
;
1835 const struct rtl_coalesce_scale
*scale
;
1839 } coal_settings
[] = {
1840 { &ec
->rx_max_coalesced_frames
, &ec
->rx_coalesce_usecs
},
1841 { &ec
->tx_max_coalesced_frames
, &ec
->tx_coalesce_usecs
}
1842 }, *p
= coal_settings
;
1846 if (rtl_is_8125(tp
))
1849 memset(ec
, 0, sizeof(*ec
));
1851 /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
1852 ci
= rtl_coalesce_info(dev
);
1856 scale
= &ci
->scalev
[tp
->cp_cmd
& INTT_MASK
];
1858 /* read IntrMitigate and adjust according to scale */
1859 for (w
= RTL_R16(tp
, IntrMitigate
); w
; w
>>= RTL_COALESCE_SHIFT
, p
++) {
1860 *p
->max_frames
= (w
& RTL_COALESCE_MASK
) << 2;
1861 w
>>= RTL_COALESCE_SHIFT
;
1862 *p
->usecs
= w
& RTL_COALESCE_MASK
;
1865 for (i
= 0; i
< 2; i
++) {
1866 p
= coal_settings
+ i
;
1867 *p
->usecs
= (*p
->usecs
* scale
->nsecs
[i
]) / 1000;
1870 * ethtool_coalesce says it is illegal to set both usecs and
1873 if (!*p
->usecs
&& !*p
->max_frames
)
1880 /* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */
1881 static const struct rtl_coalesce_scale
*rtl_coalesce_choose_scale(
1882 struct net_device
*dev
, u32 nsec
, u16
*cp01
)
1884 const struct rtl_coalesce_info
*ci
;
1887 ci
= rtl_coalesce_info(dev
);
1889 return ERR_CAST(ci
);
1891 for (i
= 0; i
< 4; i
++) {
1892 u32 rxtx_maxscale
= max(ci
->scalev
[i
].nsecs
[0],
1893 ci
->scalev
[i
].nsecs
[1]);
1894 if (nsec
<= rxtx_maxscale
* RTL_COALESCE_T_MAX
) {
1896 return &ci
->scalev
[i
];
1900 return ERR_PTR(-EINVAL
);
1903 static int rtl_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
1905 struct rtl8169_private
*tp
= netdev_priv(dev
);
1906 const struct rtl_coalesce_scale
*scale
;
1910 } coal_settings
[] = {
1911 { ec
->rx_max_coalesced_frames
, ec
->rx_coalesce_usecs
},
1912 { ec
->tx_max_coalesced_frames
, ec
->tx_coalesce_usecs
}
1913 }, *p
= coal_settings
;
1917 if (rtl_is_8125(tp
))
1920 scale
= rtl_coalesce_choose_scale(dev
,
1921 max(p
[0].usecs
, p
[1].usecs
) * 1000, &cp01
);
1923 return PTR_ERR(scale
);
1925 for (i
= 0; i
< 2; i
++, p
++) {
1929 * accept max_frames=1 we returned in rtl_get_coalesce.
1930 * accept it not only when usecs=0 because of e.g. the following scenario:
1932 * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
1933 * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
1934 * - then user does `ethtool -C eth0 rx-usecs 100`
1936 * since ethtool sends to kernel whole ethtool_coalesce
1937 * settings, if we do not handle rx_usecs=!0, rx_frames=1
1938 * we'll reject it below in `frames % 4 != 0`.
1940 if (p
->frames
== 1) {
1944 units
= p
->usecs
* 1000 / scale
->nsecs
[i
];
1945 if (p
->frames
> RTL_COALESCE_FRAME_MAX
|| p
->frames
% 4)
1948 w
<<= RTL_COALESCE_SHIFT
;
1950 w
<<= RTL_COALESCE_SHIFT
;
1951 w
|= p
->frames
>> 2;
1956 RTL_W16(tp
, IntrMitigate
, swab16(w
));
1958 tp
->cp_cmd
= (tp
->cp_cmd
& ~INTT_MASK
) | cp01
;
1959 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
1962 rtl_unlock_work(tp
);
1967 static int rtl8169_get_eee(struct net_device
*dev
, struct ethtool_eee
*data
)
1969 struct rtl8169_private
*tp
= netdev_priv(dev
);
1970 struct device
*d
= tp_to_dev(tp
);
1973 if (!rtl_supports_eee(tp
))
1976 pm_runtime_get_noresume(d
);
1978 if (!pm_runtime_active(d
)) {
1981 ret
= phy_ethtool_get_eee(tp
->phydev
, data
);
1984 pm_runtime_put_noidle(d
);
1989 static int rtl8169_set_eee(struct net_device
*dev
, struct ethtool_eee
*data
)
1991 struct rtl8169_private
*tp
= netdev_priv(dev
);
1992 struct device
*d
= tp_to_dev(tp
);
1995 if (!rtl_supports_eee(tp
))
1998 pm_runtime_get_noresume(d
);
2000 if (!pm_runtime_active(d
)) {
2005 if (dev
->phydev
->autoneg
== AUTONEG_DISABLE
||
2006 dev
->phydev
->duplex
!= DUPLEX_FULL
) {
2007 ret
= -EPROTONOSUPPORT
;
2011 ret
= phy_ethtool_set_eee(tp
->phydev
, data
);
2014 tp
->eee_adv
= phy_read_mmd(dev
->phydev
, MDIO_MMD_AN
,
2017 pm_runtime_put_noidle(d
);
2021 static const struct ethtool_ops rtl8169_ethtool_ops
= {
2022 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
2023 ETHTOOL_COALESCE_MAX_FRAMES
,
2024 .get_drvinfo
= rtl8169_get_drvinfo
,
2025 .get_regs_len
= rtl8169_get_regs_len
,
2026 .get_link
= ethtool_op_get_link
,
2027 .get_coalesce
= rtl_get_coalesce
,
2028 .set_coalesce
= rtl_set_coalesce
,
2029 .get_msglevel
= rtl8169_get_msglevel
,
2030 .set_msglevel
= rtl8169_set_msglevel
,
2031 .get_regs
= rtl8169_get_regs
,
2032 .get_wol
= rtl8169_get_wol
,
2033 .set_wol
= rtl8169_set_wol
,
2034 .get_strings
= rtl8169_get_strings
,
2035 .get_sset_count
= rtl8169_get_sset_count
,
2036 .get_ethtool_stats
= rtl8169_get_ethtool_stats
,
2037 .get_ts_info
= ethtool_op_get_ts_info
,
2038 .nway_reset
= phy_ethtool_nway_reset
,
2039 .get_eee
= rtl8169_get_eee
,
2040 .set_eee
= rtl8169_set_eee
,
2041 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2042 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2045 static void rtl_enable_eee(struct rtl8169_private
*tp
)
2047 struct phy_device
*phydev
= tp
->phydev
;
2050 /* respect EEE advertisement the user may have set */
2051 if (tp
->eee_adv
>= 0)
2054 adv
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, MDIO_PCS_EEE_ABLE
);
2057 phy_write_mmd(phydev
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, adv
);
2060 static enum mac_version
rtl8169_get_mac_version(u16 xid
, bool gmii
)
2063 * The driver currently handles the 8168Bf and the 8168Be identically
2064 * but they can be identified more specifically through the test below
2067 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2069 * Same thing for the 8101Eb and the 8101Ec:
2071 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2073 static const struct rtl_mac_info
{
2076 enum mac_version ver
;
2079 { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60
},
2080 { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61
},
2083 { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52
},
2085 /* 8168EP family. */
2086 { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51
},
2087 { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50
},
2088 { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49
},
2091 { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46
},
2092 { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45
},
2095 { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44
},
2096 { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42
},
2097 { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41
},
2098 { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40
},
2101 { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38
},
2102 { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36
},
2103 { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35
},
2106 { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34
},
2107 { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32
},
2108 { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33
},
2111 { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25
},
2112 { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26
},
2114 /* 8168DP family. */
2115 { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27
},
2116 { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28
},
2117 { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31
},
2120 { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23
},
2121 { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18
},
2122 { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24
},
2123 { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19
},
2124 { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20
},
2125 { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21
},
2126 { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22
},
2129 { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12
},
2130 { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17
},
2131 { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11
},
2134 { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39
},
2135 { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37
},
2136 { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29
},
2137 { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30
},
2138 { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08
},
2139 { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08
},
2140 { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07
},
2141 { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07
},
2142 { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13
},
2143 /* RTL8401, reportedly works if treated as RTL8101e */
2144 { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13
},
2145 { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10
},
2146 { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16
},
2147 { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09
},
2148 { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09
},
2149 { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16
},
2150 /* FIXME: where did these entries come from ? -- FR */
2151 { 0xfc8, 0x388, RTL_GIGA_MAC_VER_15
},
2152 { 0xfc8, 0x308, RTL_GIGA_MAC_VER_14
},
2155 { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06
},
2156 { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05
},
2157 { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04
},
2158 { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03
},
2159 { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02
},
2162 { 0x000, 0x000, RTL_GIGA_MAC_NONE
}
2164 const struct rtl_mac_info
*p
= mac_info
;
2165 enum mac_version ver
;
2167 while ((xid
& p
->mask
) != p
->val
)
2171 if (ver
!= RTL_GIGA_MAC_NONE
&& !gmii
) {
2172 if (ver
== RTL_GIGA_MAC_VER_42
)
2173 ver
= RTL_GIGA_MAC_VER_43
;
2174 else if (ver
== RTL_GIGA_MAC_VER_45
)
2175 ver
= RTL_GIGA_MAC_VER_47
;
2176 else if (ver
== RTL_GIGA_MAC_VER_46
)
2177 ver
= RTL_GIGA_MAC_VER_48
;
2183 static void rtl_release_firmware(struct rtl8169_private
*tp
)
2186 rtl_fw_release_firmware(tp
->rtl_fw
);
2192 void r8169_apply_firmware(struct rtl8169_private
*tp
)
2194 /* TODO: release firmware if rtl_fw_write_firmware signals failure. */
2196 rtl_fw_write_firmware(tp
, tp
->rtl_fw
);
2197 /* At least one firmware doesn't reset tp->ocp_base. */
2198 tp
->ocp_base
= OCP_STD_PHY_BASE
;
2202 static void rtl8168_config_eee_mac(struct rtl8169_private
*tp
)
2204 /* Adjust EEE LED frequency */
2205 if (tp
->mac_version
!= RTL_GIGA_MAC_VER_38
)
2206 RTL_W8(tp
, EEE_LED
, RTL_R8(tp
, EEE_LED
) & ~0x07);
2208 rtl_eri_set_bits(tp
, 0x1b0, ERIAR_MASK_1111
, 0x0003);
2211 static void rtl8125_config_eee_mac(struct rtl8169_private
*tp
)
2213 r8168_mac_ocp_modify(tp
, 0xe040, 0, BIT(1) | BIT(0));
2214 r8168_mac_ocp_modify(tp
, 0xeb62, 0, BIT(2) | BIT(1));
2217 static void rtl_rar_exgmac_set(struct rtl8169_private
*tp
, u8
*addr
)
2220 addr
[0] | (addr
[1] << 8),
2221 addr
[2] | (addr
[3] << 8),
2222 addr
[4] | (addr
[5] << 8)
2225 rtl_eri_write(tp
, 0xe0, ERIAR_MASK_1111
, w
[0] | (w
[1] << 16));
2226 rtl_eri_write(tp
, 0xe4, ERIAR_MASK_1111
, w
[2]);
2227 rtl_eri_write(tp
, 0xf0, ERIAR_MASK_1111
, w
[0] << 16);
2228 rtl_eri_write(tp
, 0xf4, ERIAR_MASK_1111
, w
[1] | (w
[2] << 16));
2231 u16
rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private
*tp
)
2233 u16 data1
, data2
, ioffset
;
2235 r8168_mac_ocp_write(tp
, 0xdd02, 0x807d);
2236 data1
= r8168_mac_ocp_read(tp
, 0xdd02);
2237 data2
= r8168_mac_ocp_read(tp
, 0xdd00);
2239 ioffset
= (data2
>> 1) & 0x7ff8;
2240 ioffset
|= data2
& 0x0007;
2247 static void rtl_schedule_task(struct rtl8169_private
*tp
, enum rtl_flag flag
)
2249 set_bit(flag
, tp
->wk
.flags
);
2250 schedule_work(&tp
->wk
.work
);
2253 static void rtl8169_init_phy(struct rtl8169_private
*tp
)
2255 r8169_hw_phy_config(tp
, tp
->phydev
, tp
->mac_version
);
2257 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
) {
2258 pci_write_config_byte(tp
->pci_dev
, PCI_LATENCY_TIMER
, 0x40);
2259 pci_write_config_byte(tp
->pci_dev
, PCI_CACHE_LINE_SIZE
, 0x08);
2260 /* set undocumented MAC Reg C+CR Offset 0x82h */
2261 RTL_W8(tp
, 0x82, 0x01);
2264 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
&&
2265 tp
->pci_dev
->subsystem_vendor
== PCI_VENDOR_ID_GIGABYTE
&&
2266 tp
->pci_dev
->subsystem_device
== 0xe000)
2267 phy_write_paged(tp
->phydev
, 0x0001, 0x10, 0xf01b);
2269 /* We may have called phy_speed_down before */
2270 phy_speed_up(tp
->phydev
);
2272 if (rtl_supports_eee(tp
))
2275 genphy_soft_reset(tp
->phydev
);
2278 static void rtl_rar_set(struct rtl8169_private
*tp
, u8
*addr
)
2282 rtl_unlock_config_regs(tp
);
2284 RTL_W32(tp
, MAC4
, addr
[4] | addr
[5] << 8);
2287 RTL_W32(tp
, MAC0
, addr
[0] | addr
[1] << 8 | addr
[2] << 16 | addr
[3] << 24);
2290 if (tp
->mac_version
== RTL_GIGA_MAC_VER_34
)
2291 rtl_rar_exgmac_set(tp
, addr
);
2293 rtl_lock_config_regs(tp
);
2295 rtl_unlock_work(tp
);
2298 static int rtl_set_mac_address(struct net_device
*dev
, void *p
)
2300 struct rtl8169_private
*tp
= netdev_priv(dev
);
2301 struct device
*d
= tp_to_dev(tp
);
2304 ret
= eth_mac_addr(dev
, p
);
2308 pm_runtime_get_noresume(d
);
2310 if (pm_runtime_active(d
))
2311 rtl_rar_set(tp
, dev
->dev_addr
);
2313 pm_runtime_put_noidle(d
);
2318 static void rtl_wol_suspend_quirk(struct rtl8169_private
*tp
)
2320 switch (tp
->mac_version
) {
2321 case RTL_GIGA_MAC_VER_25
:
2322 case RTL_GIGA_MAC_VER_26
:
2323 case RTL_GIGA_MAC_VER_29
:
2324 case RTL_GIGA_MAC_VER_30
:
2325 case RTL_GIGA_MAC_VER_32
:
2326 case RTL_GIGA_MAC_VER_33
:
2327 case RTL_GIGA_MAC_VER_34
:
2328 case RTL_GIGA_MAC_VER_37
... RTL_GIGA_MAC_VER_61
:
2329 RTL_W32(tp
, RxConfig
, RTL_R32(tp
, RxConfig
) |
2330 AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
);
2337 static void rtl_pll_power_down(struct rtl8169_private
*tp
)
2339 if (r8168_check_dash(tp
))
2342 if (tp
->mac_version
== RTL_GIGA_MAC_VER_32
||
2343 tp
->mac_version
== RTL_GIGA_MAC_VER_33
)
2344 rtl_ephy_write(tp
, 0x19, 0xff64);
2346 if (device_may_wakeup(tp_to_dev(tp
))) {
2347 phy_speed_down(tp
->phydev
, false);
2348 rtl_wol_suspend_quirk(tp
);
2352 switch (tp
->mac_version
) {
2353 case RTL_GIGA_MAC_VER_25
... RTL_GIGA_MAC_VER_33
:
2354 case RTL_GIGA_MAC_VER_37
:
2355 case RTL_GIGA_MAC_VER_39
:
2356 case RTL_GIGA_MAC_VER_43
:
2357 case RTL_GIGA_MAC_VER_44
:
2358 case RTL_GIGA_MAC_VER_45
:
2359 case RTL_GIGA_MAC_VER_46
:
2360 case RTL_GIGA_MAC_VER_47
:
2361 case RTL_GIGA_MAC_VER_48
:
2362 case RTL_GIGA_MAC_VER_50
:
2363 case RTL_GIGA_MAC_VER_51
:
2364 case RTL_GIGA_MAC_VER_52
:
2365 case RTL_GIGA_MAC_VER_60
:
2366 case RTL_GIGA_MAC_VER_61
:
2367 RTL_W8(tp
, PMCH
, RTL_R8(tp
, PMCH
) & ~0x80);
2369 case RTL_GIGA_MAC_VER_40
:
2370 case RTL_GIGA_MAC_VER_41
:
2371 case RTL_GIGA_MAC_VER_49
:
2372 rtl_eri_clear_bits(tp
, 0x1a8, ERIAR_MASK_1111
, 0xfc000000);
2373 RTL_W8(tp
, PMCH
, RTL_R8(tp
, PMCH
) & ~0x80);
2380 static void rtl_pll_power_up(struct rtl8169_private
*tp
)
2382 switch (tp
->mac_version
) {
2383 case RTL_GIGA_MAC_VER_25
... RTL_GIGA_MAC_VER_33
:
2384 case RTL_GIGA_MAC_VER_37
:
2385 case RTL_GIGA_MAC_VER_39
:
2386 case RTL_GIGA_MAC_VER_43
:
2387 RTL_W8(tp
, PMCH
, RTL_R8(tp
, PMCH
) | 0x80);
2389 case RTL_GIGA_MAC_VER_44
:
2390 case RTL_GIGA_MAC_VER_45
:
2391 case RTL_GIGA_MAC_VER_46
:
2392 case RTL_GIGA_MAC_VER_47
:
2393 case RTL_GIGA_MAC_VER_48
:
2394 case RTL_GIGA_MAC_VER_50
:
2395 case RTL_GIGA_MAC_VER_51
:
2396 case RTL_GIGA_MAC_VER_52
:
2397 case RTL_GIGA_MAC_VER_60
:
2398 case RTL_GIGA_MAC_VER_61
:
2399 RTL_W8(tp
, PMCH
, RTL_R8(tp
, PMCH
) | 0xc0);
2401 case RTL_GIGA_MAC_VER_40
:
2402 case RTL_GIGA_MAC_VER_41
:
2403 case RTL_GIGA_MAC_VER_49
:
2404 RTL_W8(tp
, PMCH
, RTL_R8(tp
, PMCH
) | 0xc0);
2405 rtl_eri_set_bits(tp
, 0x1a8, ERIAR_MASK_1111
, 0xfc000000);
2411 phy_resume(tp
->phydev
);
2412 /* give MAC/PHY some time to resume */
2416 static void rtl_init_rxcfg(struct rtl8169_private
*tp
)
2418 switch (tp
->mac_version
) {
2419 case RTL_GIGA_MAC_VER_02
... RTL_GIGA_MAC_VER_06
:
2420 case RTL_GIGA_MAC_VER_10
... RTL_GIGA_MAC_VER_17
:
2421 RTL_W32(tp
, RxConfig
, RX_FIFO_THRESH
| RX_DMA_BURST
);
2423 case RTL_GIGA_MAC_VER_18
... RTL_GIGA_MAC_VER_24
:
2424 case RTL_GIGA_MAC_VER_34
... RTL_GIGA_MAC_VER_36
:
2425 case RTL_GIGA_MAC_VER_38
:
2426 RTL_W32(tp
, RxConfig
, RX128_INT_EN
| RX_MULTI_EN
| RX_DMA_BURST
);
2428 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_52
:
2429 RTL_W32(tp
, RxConfig
, RX128_INT_EN
| RX_MULTI_EN
| RX_DMA_BURST
| RX_EARLY_OFF
);
2431 case RTL_GIGA_MAC_VER_60
... RTL_GIGA_MAC_VER_61
:
2432 RTL_W32(tp
, RxConfig
, RX_FETCH_DFLT_8125
| RX_VLAN_8125
|
2436 RTL_W32(tp
, RxConfig
, RX128_INT_EN
| RX_DMA_BURST
);
2441 static void rtl8169_init_ring_indexes(struct rtl8169_private
*tp
)
2443 tp
->dirty_tx
= tp
->cur_tx
= tp
->cur_rx
= 0;
2446 static void r8168c_hw_jumbo_enable(struct rtl8169_private
*tp
)
2448 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) | Jumbo_En0
);
2449 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) | Jumbo_En1
);
2452 static void r8168c_hw_jumbo_disable(struct rtl8169_private
*tp
)
2454 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Jumbo_En0
);
2455 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) & ~Jumbo_En1
);
2458 static void r8168dp_hw_jumbo_enable(struct rtl8169_private
*tp
)
2460 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) | Jumbo_En0
);
2463 static void r8168dp_hw_jumbo_disable(struct rtl8169_private
*tp
)
2465 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Jumbo_En0
);
2468 static void r8168e_hw_jumbo_enable(struct rtl8169_private
*tp
)
2470 RTL_W8(tp
, MaxTxPacketSize
, 0x3f);
2471 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) | Jumbo_En0
);
2472 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) | 0x01);
2475 static void r8168e_hw_jumbo_disable(struct rtl8169_private
*tp
)
2477 RTL_W8(tp
, MaxTxPacketSize
, 0x0c);
2478 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Jumbo_En0
);
2479 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) & ~0x01);
2482 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private
*tp
)
2484 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) | (1 << 0));
2487 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private
*tp
)
2489 RTL_W8(tp
, Config4
, RTL_R8(tp
, Config4
) & ~(1 << 0));
2492 static void rtl_jumbo_config(struct rtl8169_private
*tp
)
2494 bool jumbo
= tp
->dev
->mtu
> ETH_DATA_LEN
;
2496 rtl_unlock_config_regs(tp
);
2497 switch (tp
->mac_version
) {
2498 case RTL_GIGA_MAC_VER_12
:
2499 case RTL_GIGA_MAC_VER_17
:
2501 pcie_set_readrq(tp
->pci_dev
, 512);
2502 r8168b_1_hw_jumbo_enable(tp
);
2504 r8168b_1_hw_jumbo_disable(tp
);
2507 case RTL_GIGA_MAC_VER_18
... RTL_GIGA_MAC_VER_26
:
2509 pcie_set_readrq(tp
->pci_dev
, 512);
2510 r8168c_hw_jumbo_enable(tp
);
2512 r8168c_hw_jumbo_disable(tp
);
2515 case RTL_GIGA_MAC_VER_27
... RTL_GIGA_MAC_VER_28
:
2517 r8168dp_hw_jumbo_enable(tp
);
2519 r8168dp_hw_jumbo_disable(tp
);
2521 case RTL_GIGA_MAC_VER_31
... RTL_GIGA_MAC_VER_33
:
2523 pcie_set_readrq(tp
->pci_dev
, 512);
2524 r8168e_hw_jumbo_enable(tp
);
2526 r8168e_hw_jumbo_disable(tp
);
2532 rtl_lock_config_regs(tp
);
2534 if (!jumbo
&& pci_is_pcie(tp
->pci_dev
) && tp
->supports_gmii
)
2535 pcie_set_readrq(tp
->pci_dev
, 4096);
2538 DECLARE_RTL_COND(rtl_chipcmd_cond
)
2540 return RTL_R8(tp
, ChipCmd
) & CmdReset
;
2543 static void rtl_hw_reset(struct rtl8169_private
*tp
)
2545 RTL_W8(tp
, ChipCmd
, CmdReset
);
2547 rtl_udelay_loop_wait_low(tp
, &rtl_chipcmd_cond
, 100, 100);
2550 static void rtl_request_firmware(struct rtl8169_private
*tp
)
2552 struct rtl_fw
*rtl_fw
;
2554 /* firmware loaded already or no firmware available */
2555 if (tp
->rtl_fw
|| !tp
->fw_name
)
2558 rtl_fw
= kzalloc(sizeof(*rtl_fw
), GFP_KERNEL
);
2560 netif_warn(tp
, ifup
, tp
->dev
, "Unable to load firmware, out of memory\n");
2564 rtl_fw
->phy_write
= rtl_writephy
;
2565 rtl_fw
->phy_read
= rtl_readphy
;
2566 rtl_fw
->mac_mcu_write
= mac_mcu_write
;
2567 rtl_fw
->mac_mcu_read
= mac_mcu_read
;
2568 rtl_fw
->fw_name
= tp
->fw_name
;
2569 rtl_fw
->dev
= tp_to_dev(tp
);
2571 if (rtl_fw_request_firmware(rtl_fw
))
2574 tp
->rtl_fw
= rtl_fw
;
2577 static void rtl_rx_close(struct rtl8169_private
*tp
)
2579 RTL_W32(tp
, RxConfig
, RTL_R32(tp
, RxConfig
) & ~RX_CONFIG_ACCEPT_MASK
);
2582 DECLARE_RTL_COND(rtl_npq_cond
)
2584 return RTL_R8(tp
, TxPoll
) & NPQ
;
2587 DECLARE_RTL_COND(rtl_txcfg_empty_cond
)
2589 return RTL_R32(tp
, TxConfig
) & TXCFG_EMPTY
;
2592 static void rtl8169_hw_reset(struct rtl8169_private
*tp
)
2594 /* Disable interrupts */
2595 rtl8169_irq_mask_and_ack(tp
);
2599 switch (tp
->mac_version
) {
2600 case RTL_GIGA_MAC_VER_27
:
2601 case RTL_GIGA_MAC_VER_28
:
2602 case RTL_GIGA_MAC_VER_31
:
2603 rtl_udelay_loop_wait_low(tp
, &rtl_npq_cond
, 20, 42*42);
2605 case RTL_GIGA_MAC_VER_34
... RTL_GIGA_MAC_VER_38
:
2606 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_52
:
2607 RTL_W8(tp
, ChipCmd
, RTL_R8(tp
, ChipCmd
) | StopReq
);
2608 rtl_udelay_loop_wait_high(tp
, &rtl_txcfg_empty_cond
, 100, 666);
2611 RTL_W8(tp
, ChipCmd
, RTL_R8(tp
, ChipCmd
) | StopReq
);
2619 static void rtl_set_tx_config_registers(struct rtl8169_private
*tp
)
2621 u32 val
= TX_DMA_BURST
<< TxDMAShift
|
2622 InterFrameGap
<< TxInterFrameGapShift
;
2624 if (rtl_is_8168evl_up(tp
))
2625 val
|= TXCFG_AUTO_FIFO
;
2627 RTL_W32(tp
, TxConfig
, val
);
2630 static void rtl_set_rx_max_size(struct rtl8169_private
*tp
)
2632 /* Low hurts. Let's disable the filtering. */
2633 RTL_W16(tp
, RxMaxSize
, R8169_RX_BUF_SIZE
+ 1);
2636 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private
*tp
)
2639 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
2640 * register to be written before TxDescAddrLow to work.
2641 * Switching from MMIO to I/O access fixes the issue as well.
2643 RTL_W32(tp
, TxDescStartAddrHigh
, ((u64
) tp
->TxPhyAddr
) >> 32);
2644 RTL_W32(tp
, TxDescStartAddrLow
, ((u64
) tp
->TxPhyAddr
) & DMA_BIT_MASK(32));
2645 RTL_W32(tp
, RxDescAddrHigh
, ((u64
) tp
->RxPhyAddr
) >> 32);
2646 RTL_W32(tp
, RxDescAddrLow
, ((u64
) tp
->RxPhyAddr
) & DMA_BIT_MASK(32));
2649 static void rtl8169_set_magic_reg(struct rtl8169_private
*tp
, unsigned mac_version
)
2653 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)
2655 else if (tp
->mac_version
== RTL_GIGA_MAC_VER_06
)
2660 if (RTL_R8(tp
, Config2
) & PCI_Clock_66MHz
)
2663 RTL_W32(tp
, 0x7c, val
);
2666 static void rtl_set_rx_mode(struct net_device
*dev
)
2668 u32 rx_mode
= AcceptBroadcast
| AcceptMyPhys
| AcceptMulticast
;
2669 /* Multicast hash filter */
2670 u32 mc_filter
[2] = { 0xffffffff, 0xffffffff };
2671 struct rtl8169_private
*tp
= netdev_priv(dev
);
2674 if (dev
->flags
& IFF_PROMISC
) {
2675 /* Unconditionally log net taps. */
2676 netif_notice(tp
, link
, dev
, "Promiscuous mode enabled\n");
2677 rx_mode
|= AcceptAllPhys
;
2678 } else if (netdev_mc_count(dev
) > MC_FILTER_LIMIT
||
2679 dev
->flags
& IFF_ALLMULTI
||
2680 tp
->mac_version
== RTL_GIGA_MAC_VER_35
) {
2681 /* accept all multicasts */
2682 } else if (netdev_mc_empty(dev
)) {
2683 rx_mode
&= ~AcceptMulticast
;
2685 struct netdev_hw_addr
*ha
;
2687 mc_filter
[1] = mc_filter
[0] = 0;
2688 netdev_for_each_mc_addr(ha
, dev
) {
2689 u32 bit_nr
= ether_crc(ETH_ALEN
, ha
->addr
) >> 26;
2690 mc_filter
[bit_nr
>> 5] |= BIT(bit_nr
& 31);
2693 if (tp
->mac_version
> RTL_GIGA_MAC_VER_06
) {
2695 mc_filter
[0] = swab32(mc_filter
[1]);
2696 mc_filter
[1] = swab32(tmp
);
2700 if (dev
->features
& NETIF_F_RXALL
)
2701 rx_mode
|= (AcceptErr
| AcceptRunt
);
2703 RTL_W32(tp
, MAR0
+ 4, mc_filter
[1]);
2704 RTL_W32(tp
, MAR0
+ 0, mc_filter
[0]);
2706 tmp
= RTL_R32(tp
, RxConfig
);
2707 RTL_W32(tp
, RxConfig
, (tmp
& ~RX_CONFIG_ACCEPT_MASK
) | rx_mode
);
2710 DECLARE_RTL_COND(rtl_csiar_cond
)
2712 return RTL_R32(tp
, CSIAR
) & CSIAR_FLAG
;
2715 static void rtl_csi_write(struct rtl8169_private
*tp
, int addr
, int value
)
2717 u32 func
= PCI_FUNC(tp
->pci_dev
->devfn
);
2719 RTL_W32(tp
, CSIDR
, value
);
2720 RTL_W32(tp
, CSIAR
, CSIAR_WRITE_CMD
| (addr
& CSIAR_ADDR_MASK
) |
2721 CSIAR_BYTE_ENABLE
| func
<< 16);
2723 rtl_udelay_loop_wait_low(tp
, &rtl_csiar_cond
, 10, 100);
2726 static u32
rtl_csi_read(struct rtl8169_private
*tp
, int addr
)
2728 u32 func
= PCI_FUNC(tp
->pci_dev
->devfn
);
2730 RTL_W32(tp
, CSIAR
, (addr
& CSIAR_ADDR_MASK
) | func
<< 16 |
2733 return rtl_udelay_loop_wait_high(tp
, &rtl_csiar_cond
, 10, 100) ?
2734 RTL_R32(tp
, CSIDR
) : ~0;
2737 static void rtl_csi_access_enable(struct rtl8169_private
*tp
, u8 val
)
2739 struct pci_dev
*pdev
= tp
->pci_dev
;
2742 /* According to Realtek the value at config space address 0x070f
2743 * controls the L0s/L1 entrance latency. We try standard ECAM access
2744 * first and if it fails fall back to CSI.
2746 if (pdev
->cfg_size
> 0x070f &&
2747 pci_write_config_byte(pdev
, 0x070f, val
) == PCIBIOS_SUCCESSFUL
)
2750 netdev_notice_once(tp
->dev
,
2751 "No native access to PCI extended config space, falling back to CSI\n");
2752 csi
= rtl_csi_read(tp
, 0x070c) & 0x00ffffff;
2753 rtl_csi_write(tp
, 0x070c, csi
| val
<< 24);
2756 static void rtl_set_def_aspm_entry_latency(struct rtl8169_private
*tp
)
2758 rtl_csi_access_enable(tp
, 0x27);
2762 unsigned int offset
;
2767 static void __rtl_ephy_init(struct rtl8169_private
*tp
,
2768 const struct ephy_info
*e
, int len
)
2773 w
= (rtl_ephy_read(tp
, e
->offset
) & ~e
->mask
) | e
->bits
;
2774 rtl_ephy_write(tp
, e
->offset
, w
);
2779 #define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a))
2781 static void rtl_disable_clock_request(struct rtl8169_private
*tp
)
2783 pcie_capability_clear_word(tp
->pci_dev
, PCI_EXP_LNKCTL
,
2784 PCI_EXP_LNKCTL_CLKREQ_EN
);
2787 static void rtl_enable_clock_request(struct rtl8169_private
*tp
)
2789 pcie_capability_set_word(tp
->pci_dev
, PCI_EXP_LNKCTL
,
2790 PCI_EXP_LNKCTL_CLKREQ_EN
);
2793 static void rtl_pcie_state_l2l3_disable(struct rtl8169_private
*tp
)
2795 /* work around an issue when PCI reset occurs during L2/L3 state */
2796 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Rdy_to_L23
);
2799 static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private
*tp
, bool enable
)
2801 /* Don't enable ASPM in the chip if OS can't control ASPM */
2802 if (enable
&& tp
->aspm_manageable
) {
2803 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) | ASPM_en
);
2804 RTL_W8(tp
, Config2
, RTL_R8(tp
, Config2
) | ClkReqEn
);
2806 RTL_W8(tp
, Config2
, RTL_R8(tp
, Config2
) & ~ClkReqEn
);
2807 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) & ~ASPM_en
);
2813 static void rtl_set_fifo_size(struct rtl8169_private
*tp
, u16 rx_stat
,
2814 u16 tx_stat
, u16 rx_dyn
, u16 tx_dyn
)
2816 /* Usage of dynamic vs. static FIFO is controlled by bit
2817 * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known.
2819 rtl_eri_write(tp
, 0xc8, ERIAR_MASK_1111
, (rx_stat
<< 16) | rx_dyn
);
2820 rtl_eri_write(tp
, 0xe8, ERIAR_MASK_1111
, (tx_stat
<< 16) | tx_dyn
);
2823 static void rtl8168g_set_pause_thresholds(struct rtl8169_private
*tp
,
2826 /* FIFO thresholds for pause flow control */
2827 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_0001
, low
);
2828 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_0001
, high
);
2831 static void rtl_hw_start_8168b(struct rtl8169_private
*tp
)
2833 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
2836 static void __rtl_hw_start_8168cp(struct rtl8169_private
*tp
)
2838 RTL_W8(tp
, Config1
, RTL_R8(tp
, Config1
) | Speed_down
);
2840 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
2842 rtl_disable_clock_request(tp
);
2845 static void rtl_hw_start_8168cp_1(struct rtl8169_private
*tp
)
2847 static const struct ephy_info e_info_8168cp
[] = {
2848 { 0x01, 0, 0x0001 },
2849 { 0x02, 0x0800, 0x1000 },
2850 { 0x03, 0, 0x0042 },
2851 { 0x06, 0x0080, 0x0000 },
2855 rtl_set_def_aspm_entry_latency(tp
);
2857 rtl_ephy_init(tp
, e_info_8168cp
);
2859 __rtl_hw_start_8168cp(tp
);
2862 static void rtl_hw_start_8168cp_2(struct rtl8169_private
*tp
)
2864 rtl_set_def_aspm_entry_latency(tp
);
2866 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
2869 static void rtl_hw_start_8168cp_3(struct rtl8169_private
*tp
)
2871 rtl_set_def_aspm_entry_latency(tp
);
2873 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
2876 RTL_W8(tp
, DBG_REG
, 0x20);
2879 static void rtl_hw_start_8168c_1(struct rtl8169_private
*tp
)
2881 static const struct ephy_info e_info_8168c_1
[] = {
2882 { 0x02, 0x0800, 0x1000 },
2883 { 0x03, 0, 0x0002 },
2884 { 0x06, 0x0080, 0x0000 }
2887 rtl_set_def_aspm_entry_latency(tp
);
2889 RTL_W8(tp
, DBG_REG
, 0x06 | FIX_NAK_1
| FIX_NAK_2
);
2891 rtl_ephy_init(tp
, e_info_8168c_1
);
2893 __rtl_hw_start_8168cp(tp
);
2896 static void rtl_hw_start_8168c_2(struct rtl8169_private
*tp
)
2898 static const struct ephy_info e_info_8168c_2
[] = {
2899 { 0x01, 0, 0x0001 },
2900 { 0x03, 0x0400, 0x0020 }
2903 rtl_set_def_aspm_entry_latency(tp
);
2905 rtl_ephy_init(tp
, e_info_8168c_2
);
2907 __rtl_hw_start_8168cp(tp
);
2910 static void rtl_hw_start_8168c_3(struct rtl8169_private
*tp
)
2912 rtl_hw_start_8168c_2(tp
);
2915 static void rtl_hw_start_8168c_4(struct rtl8169_private
*tp
)
2917 rtl_set_def_aspm_entry_latency(tp
);
2919 __rtl_hw_start_8168cp(tp
);
2922 static void rtl_hw_start_8168d(struct rtl8169_private
*tp
)
2924 rtl_set_def_aspm_entry_latency(tp
);
2926 rtl_disable_clock_request(tp
);
2929 static void rtl_hw_start_8168d_4(struct rtl8169_private
*tp
)
2931 static const struct ephy_info e_info_8168d_4
[] = {
2932 { 0x0b, 0x0000, 0x0048 },
2933 { 0x19, 0x0020, 0x0050 },
2934 { 0x0c, 0x0100, 0x0020 },
2935 { 0x10, 0x0004, 0x0000 },
2938 rtl_set_def_aspm_entry_latency(tp
);
2940 rtl_ephy_init(tp
, e_info_8168d_4
);
2942 rtl_enable_clock_request(tp
);
2945 static void rtl_hw_start_8168e_1(struct rtl8169_private
*tp
)
2947 static const struct ephy_info e_info_8168e_1
[] = {
2948 { 0x00, 0x0200, 0x0100 },
2949 { 0x00, 0x0000, 0x0004 },
2950 { 0x06, 0x0002, 0x0001 },
2951 { 0x06, 0x0000, 0x0030 },
2952 { 0x07, 0x0000, 0x2000 },
2953 { 0x00, 0x0000, 0x0020 },
2954 { 0x03, 0x5800, 0x2000 },
2955 { 0x03, 0x0000, 0x0001 },
2956 { 0x01, 0x0800, 0x1000 },
2957 { 0x07, 0x0000, 0x4000 },
2958 { 0x1e, 0x0000, 0x2000 },
2959 { 0x19, 0xffff, 0xfe6c },
2960 { 0x0a, 0x0000, 0x0040 }
2963 rtl_set_def_aspm_entry_latency(tp
);
2965 rtl_ephy_init(tp
, e_info_8168e_1
);
2967 rtl_disable_clock_request(tp
);
2969 /* Reset tx FIFO pointer */
2970 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) | TXPLA_RST
);
2971 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) & ~TXPLA_RST
);
2973 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) & ~Spi_en
);
2976 static void rtl_hw_start_8168e_2(struct rtl8169_private
*tp
)
2978 static const struct ephy_info e_info_8168e_2
[] = {
2979 { 0x09, 0x0000, 0x0080 },
2980 { 0x19, 0x0000, 0x0224 },
2981 { 0x00, 0x0000, 0x0004 },
2982 { 0x0c, 0x3df0, 0x0200 },
2985 rtl_set_def_aspm_entry_latency(tp
);
2987 rtl_ephy_init(tp
, e_info_8168e_2
);
2989 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
2990 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
2991 rtl_set_fifo_size(tp
, 0x10, 0x10, 0x02, 0x06);
2992 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_1111
, 0x00000050);
2993 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_1111
, 0x07ff0060);
2994 rtl_eri_set_bits(tp
, 0x1b0, ERIAR_MASK_0001
, BIT(4));
2995 rtl_w0w1_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00, 0xff00);
2997 rtl_disable_clock_request(tp
);
2999 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) & ~NOW_IS_OOB
);
3001 rtl8168_config_eee_mac(tp
);
3003 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) | PFM_EN
);
3004 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) | PWM_EN
);
3005 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) & ~Spi_en
);
3007 rtl_hw_aspm_clkreq_enable(tp
, true);
3010 static void rtl_hw_start_8168f(struct rtl8169_private
*tp
)
3012 rtl_set_def_aspm_entry_latency(tp
);
3014 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
3015 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
3016 rtl_set_fifo_size(tp
, 0x10, 0x10, 0x02, 0x06);
3017 rtl_reset_packet_filter(tp
);
3018 rtl_eri_set_bits(tp
, 0x1b0, ERIAR_MASK_0001
, BIT(4));
3019 rtl_eri_set_bits(tp
, 0x1d0, ERIAR_MASK_0001
, BIT(4));
3020 rtl_eri_write(tp
, 0xcc, ERIAR_MASK_1111
, 0x00000050);
3021 rtl_eri_write(tp
, 0xd0, ERIAR_MASK_1111
, 0x00000060);
3023 rtl_disable_clock_request(tp
);
3025 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) & ~NOW_IS_OOB
);
3026 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) | PFM_EN
);
3027 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) | PWM_EN
);
3028 RTL_W8(tp
, Config5
, RTL_R8(tp
, Config5
) & ~Spi_en
);
3030 rtl8168_config_eee_mac(tp
);
3033 static void rtl_hw_start_8168f_1(struct rtl8169_private
*tp
)
3035 static const struct ephy_info e_info_8168f_1
[] = {
3036 { 0x06, 0x00c0, 0x0020 },
3037 { 0x08, 0x0001, 0x0002 },
3038 { 0x09, 0x0000, 0x0080 },
3039 { 0x19, 0x0000, 0x0224 },
3040 { 0x00, 0x0000, 0x0004 },
3041 { 0x0c, 0x3df0, 0x0200 },
3044 rtl_hw_start_8168f(tp
);
3046 rtl_ephy_init(tp
, e_info_8168f_1
);
3048 rtl_w0w1_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00, 0xff00);
3051 static void rtl_hw_start_8411(struct rtl8169_private
*tp
)
3053 static const struct ephy_info e_info_8168f_1
[] = {
3054 { 0x06, 0x00c0, 0x0020 },
3055 { 0x0f, 0xffff, 0x5200 },
3056 { 0x19, 0x0000, 0x0224 },
3057 { 0x00, 0x0000, 0x0004 },
3058 { 0x0c, 0x3df0, 0x0200 },
3061 rtl_hw_start_8168f(tp
);
3062 rtl_pcie_state_l2l3_disable(tp
);
3064 rtl_ephy_init(tp
, e_info_8168f_1
);
3066 rtl_eri_set_bits(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0c00);
3069 static void rtl_hw_start_8168g(struct rtl8169_private
*tp
)
3071 rtl_set_fifo_size(tp
, 0x08, 0x10, 0x02, 0x06);
3072 rtl8168g_set_pause_thresholds(tp
, 0x38, 0x48);
3074 rtl_set_def_aspm_entry_latency(tp
);
3076 rtl_reset_packet_filter(tp
);
3077 rtl_eri_write(tp
, 0x2f8, ERIAR_MASK_0011
, 0x1d8f);
3079 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) & ~RXDV_GATED_EN
);
3081 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
3082 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
3084 rtl8168_config_eee_mac(tp
);
3086 rtl_w0w1_eri(tp
, 0x2fc, ERIAR_MASK_0001
, 0x01, 0x06);
3087 rtl_eri_clear_bits(tp
, 0x1b0, ERIAR_MASK_0011
, BIT(12));
3089 rtl_pcie_state_l2l3_disable(tp
);
3092 static void rtl_hw_start_8168g_1(struct rtl8169_private
*tp
)
3094 static const struct ephy_info e_info_8168g_1
[] = {
3095 { 0x00, 0x0008, 0x0000 },
3096 { 0x0c, 0x3ff0, 0x0820 },
3097 { 0x1e, 0x0000, 0x0001 },
3098 { 0x19, 0x8000, 0x0000 }
3101 rtl_hw_start_8168g(tp
);
3103 /* disable aspm and clock request before access ephy */
3104 rtl_hw_aspm_clkreq_enable(tp
, false);
3105 rtl_ephy_init(tp
, e_info_8168g_1
);
3106 rtl_hw_aspm_clkreq_enable(tp
, true);
3109 static void rtl_hw_start_8168g_2(struct rtl8169_private
*tp
)
3111 static const struct ephy_info e_info_8168g_2
[] = {
3112 { 0x00, 0x0008, 0x0000 },
3113 { 0x0c, 0x3ff0, 0x0820 },
3114 { 0x19, 0xffff, 0x7c00 },
3115 { 0x1e, 0xffff, 0x20eb },
3116 { 0x0d, 0xffff, 0x1666 },
3117 { 0x00, 0xffff, 0x10a3 },
3118 { 0x06, 0xffff, 0xf050 },
3119 { 0x04, 0x0000, 0x0010 },
3120 { 0x1d, 0x4000, 0x0000 },
3123 rtl_hw_start_8168g(tp
);
3125 /* disable aspm and clock request before access ephy */
3126 rtl_hw_aspm_clkreq_enable(tp
, false);
3127 rtl_ephy_init(tp
, e_info_8168g_2
);
3130 static void rtl_hw_start_8411_2(struct rtl8169_private
*tp
)
3132 static const struct ephy_info e_info_8411_2
[] = {
3133 { 0x00, 0x0008, 0x0000 },
3134 { 0x0c, 0x37d0, 0x0820 },
3135 { 0x1e, 0x0000, 0x0001 },
3136 { 0x19, 0x8021, 0x0000 },
3137 { 0x1e, 0x0000, 0x2000 },
3138 { 0x0d, 0x0100, 0x0200 },
3139 { 0x00, 0x0000, 0x0080 },
3140 { 0x06, 0x0000, 0x0010 },
3141 { 0x04, 0x0000, 0x0010 },
3142 { 0x1d, 0x0000, 0x4000 },
3145 rtl_hw_start_8168g(tp
);
3147 /* disable aspm and clock request before access ephy */
3148 rtl_hw_aspm_clkreq_enable(tp
, false);
3149 rtl_ephy_init(tp
, e_info_8411_2
);
3151 /* The following Realtek-provided magic fixes an issue with the RX unit
3152 * getting confused after the PHY having been powered-down.
3154 r8168_mac_ocp_write(tp
, 0xFC28, 0x0000);
3155 r8168_mac_ocp_write(tp
, 0xFC2A, 0x0000);
3156 r8168_mac_ocp_write(tp
, 0xFC2C, 0x0000);
3157 r8168_mac_ocp_write(tp
, 0xFC2E, 0x0000);
3158 r8168_mac_ocp_write(tp
, 0xFC30, 0x0000);
3159 r8168_mac_ocp_write(tp
, 0xFC32, 0x0000);
3160 r8168_mac_ocp_write(tp
, 0xFC34, 0x0000);
3161 r8168_mac_ocp_write(tp
, 0xFC36, 0x0000);
3163 r8168_mac_ocp_write(tp
, 0xFC26, 0x0000);
3165 r8168_mac_ocp_write(tp
, 0xF800, 0xE008);
3166 r8168_mac_ocp_write(tp
, 0xF802, 0xE00A);
3167 r8168_mac_ocp_write(tp
, 0xF804, 0xE00C);
3168 r8168_mac_ocp_write(tp
, 0xF806, 0xE00E);
3169 r8168_mac_ocp_write(tp
, 0xF808, 0xE027);
3170 r8168_mac_ocp_write(tp
, 0xF80A, 0xE04F);
3171 r8168_mac_ocp_write(tp
, 0xF80C, 0xE05E);
3172 r8168_mac_ocp_write(tp
, 0xF80E, 0xE065);
3173 r8168_mac_ocp_write(tp
, 0xF810, 0xC602);
3174 r8168_mac_ocp_write(tp
, 0xF812, 0xBE00);
3175 r8168_mac_ocp_write(tp
, 0xF814, 0x0000);
3176 r8168_mac_ocp_write(tp
, 0xF816, 0xC502);
3177 r8168_mac_ocp_write(tp
, 0xF818, 0xBD00);
3178 r8168_mac_ocp_write(tp
, 0xF81A, 0x074C);
3179 r8168_mac_ocp_write(tp
, 0xF81C, 0xC302);
3180 r8168_mac_ocp_write(tp
, 0xF81E, 0xBB00);
3181 r8168_mac_ocp_write(tp
, 0xF820, 0x080A);
3182 r8168_mac_ocp_write(tp
, 0xF822, 0x6420);
3183 r8168_mac_ocp_write(tp
, 0xF824, 0x48C2);
3184 r8168_mac_ocp_write(tp
, 0xF826, 0x8C20);
3185 r8168_mac_ocp_write(tp
, 0xF828, 0xC516);
3186 r8168_mac_ocp_write(tp
, 0xF82A, 0x64A4);
3187 r8168_mac_ocp_write(tp
, 0xF82C, 0x49C0);
3188 r8168_mac_ocp_write(tp
, 0xF82E, 0xF009);
3189 r8168_mac_ocp_write(tp
, 0xF830, 0x74A2);
3190 r8168_mac_ocp_write(tp
, 0xF832, 0x8CA5);
3191 r8168_mac_ocp_write(tp
, 0xF834, 0x74A0);
3192 r8168_mac_ocp_write(tp
, 0xF836, 0xC50E);
3193 r8168_mac_ocp_write(tp
, 0xF838, 0x9CA2);
3194 r8168_mac_ocp_write(tp
, 0xF83A, 0x1C11);
3195 r8168_mac_ocp_write(tp
, 0xF83C, 0x9CA0);
3196 r8168_mac_ocp_write(tp
, 0xF83E, 0xE006);
3197 r8168_mac_ocp_write(tp
, 0xF840, 0x74F8);
3198 r8168_mac_ocp_write(tp
, 0xF842, 0x48C4);
3199 r8168_mac_ocp_write(tp
, 0xF844, 0x8CF8);
3200 r8168_mac_ocp_write(tp
, 0xF846, 0xC404);
3201 r8168_mac_ocp_write(tp
, 0xF848, 0xBC00);
3202 r8168_mac_ocp_write(tp
, 0xF84A, 0xC403);
3203 r8168_mac_ocp_write(tp
, 0xF84C, 0xBC00);
3204 r8168_mac_ocp_write(tp
, 0xF84E, 0x0BF2);
3205 r8168_mac_ocp_write(tp
, 0xF850, 0x0C0A);
3206 r8168_mac_ocp_write(tp
, 0xF852, 0xE434);
3207 r8168_mac_ocp_write(tp
, 0xF854, 0xD3C0);
3208 r8168_mac_ocp_write(tp
, 0xF856, 0x49D9);
3209 r8168_mac_ocp_write(tp
, 0xF858, 0xF01F);
3210 r8168_mac_ocp_write(tp
, 0xF85A, 0xC526);
3211 r8168_mac_ocp_write(tp
, 0xF85C, 0x64A5);
3212 r8168_mac_ocp_write(tp
, 0xF85E, 0x1400);
3213 r8168_mac_ocp_write(tp
, 0xF860, 0xF007);
3214 r8168_mac_ocp_write(tp
, 0xF862, 0x0C01);
3215 r8168_mac_ocp_write(tp
, 0xF864, 0x8CA5);
3216 r8168_mac_ocp_write(tp
, 0xF866, 0x1C15);
3217 r8168_mac_ocp_write(tp
, 0xF868, 0xC51B);
3218 r8168_mac_ocp_write(tp
, 0xF86A, 0x9CA0);
3219 r8168_mac_ocp_write(tp
, 0xF86C, 0xE013);
3220 r8168_mac_ocp_write(tp
, 0xF86E, 0xC519);
3221 r8168_mac_ocp_write(tp
, 0xF870, 0x74A0);
3222 r8168_mac_ocp_write(tp
, 0xF872, 0x48C4);
3223 r8168_mac_ocp_write(tp
, 0xF874, 0x8CA0);
3224 r8168_mac_ocp_write(tp
, 0xF876, 0xC516);
3225 r8168_mac_ocp_write(tp
, 0xF878, 0x74A4);
3226 r8168_mac_ocp_write(tp
, 0xF87A, 0x48C8);
3227 r8168_mac_ocp_write(tp
, 0xF87C, 0x48CA);
3228 r8168_mac_ocp_write(tp
, 0xF87E, 0x9CA4);
3229 r8168_mac_ocp_write(tp
, 0xF880, 0xC512);
3230 r8168_mac_ocp_write(tp
, 0xF882, 0x1B00);
3231 r8168_mac_ocp_write(tp
, 0xF884, 0x9BA0);
3232 r8168_mac_ocp_write(tp
, 0xF886, 0x1B1C);
3233 r8168_mac_ocp_write(tp
, 0xF888, 0x483F);
3234 r8168_mac_ocp_write(tp
, 0xF88A, 0x9BA2);
3235 r8168_mac_ocp_write(tp
, 0xF88C, 0x1B04);
3236 r8168_mac_ocp_write(tp
, 0xF88E, 0xC508);
3237 r8168_mac_ocp_write(tp
, 0xF890, 0x9BA0);
3238 r8168_mac_ocp_write(tp
, 0xF892, 0xC505);
3239 r8168_mac_ocp_write(tp
, 0xF894, 0xBD00);
3240 r8168_mac_ocp_write(tp
, 0xF896, 0xC502);
3241 r8168_mac_ocp_write(tp
, 0xF898, 0xBD00);
3242 r8168_mac_ocp_write(tp
, 0xF89A, 0x0300);
3243 r8168_mac_ocp_write(tp
, 0xF89C, 0x051E);
3244 r8168_mac_ocp_write(tp
, 0xF89E, 0xE434);
3245 r8168_mac_ocp_write(tp
, 0xF8A0, 0xE018);
3246 r8168_mac_ocp_write(tp
, 0xF8A2, 0xE092);
3247 r8168_mac_ocp_write(tp
, 0xF8A4, 0xDE20);
3248 r8168_mac_ocp_write(tp
, 0xF8A6, 0xD3C0);
3249 r8168_mac_ocp_write(tp
, 0xF8A8, 0xC50F);
3250 r8168_mac_ocp_write(tp
, 0xF8AA, 0x76A4);
3251 r8168_mac_ocp_write(tp
, 0xF8AC, 0x49E3);
3252 r8168_mac_ocp_write(tp
, 0xF8AE, 0xF007);
3253 r8168_mac_ocp_write(tp
, 0xF8B0, 0x49C0);
3254 r8168_mac_ocp_write(tp
, 0xF8B2, 0xF103);
3255 r8168_mac_ocp_write(tp
, 0xF8B4, 0xC607);
3256 r8168_mac_ocp_write(tp
, 0xF8B6, 0xBE00);
3257 r8168_mac_ocp_write(tp
, 0xF8B8, 0xC606);
3258 r8168_mac_ocp_write(tp
, 0xF8BA, 0xBE00);
3259 r8168_mac_ocp_write(tp
, 0xF8BC, 0xC602);
3260 r8168_mac_ocp_write(tp
, 0xF8BE, 0xBE00);
3261 r8168_mac_ocp_write(tp
, 0xF8C0, 0x0C4C);
3262 r8168_mac_ocp_write(tp
, 0xF8C2, 0x0C28);
3263 r8168_mac_ocp_write(tp
, 0xF8C4, 0x0C2C);
3264 r8168_mac_ocp_write(tp
, 0xF8C6, 0xDC00);
3265 r8168_mac_ocp_write(tp
, 0xF8C8, 0xC707);
3266 r8168_mac_ocp_write(tp
, 0xF8CA, 0x1D00);
3267 r8168_mac_ocp_write(tp
, 0xF8CC, 0x8DE2);
3268 r8168_mac_ocp_write(tp
, 0xF8CE, 0x48C1);
3269 r8168_mac_ocp_write(tp
, 0xF8D0, 0xC502);
3270 r8168_mac_ocp_write(tp
, 0xF8D2, 0xBD00);
3271 r8168_mac_ocp_write(tp
, 0xF8D4, 0x00AA);
3272 r8168_mac_ocp_write(tp
, 0xF8D6, 0xE0C0);
3273 r8168_mac_ocp_write(tp
, 0xF8D8, 0xC502);
3274 r8168_mac_ocp_write(tp
, 0xF8DA, 0xBD00);
3275 r8168_mac_ocp_write(tp
, 0xF8DC, 0x0132);
3277 r8168_mac_ocp_write(tp
, 0xFC26, 0x8000);
3279 r8168_mac_ocp_write(tp
, 0xFC2A, 0x0743);
3280 r8168_mac_ocp_write(tp
, 0xFC2C, 0x0801);
3281 r8168_mac_ocp_write(tp
, 0xFC2E, 0x0BE9);
3282 r8168_mac_ocp_write(tp
, 0xFC30, 0x02FD);
3283 r8168_mac_ocp_write(tp
, 0xFC32, 0x0C25);
3284 r8168_mac_ocp_write(tp
, 0xFC34, 0x00A9);
3285 r8168_mac_ocp_write(tp
, 0xFC36, 0x012D);
3287 rtl_hw_aspm_clkreq_enable(tp
, true);
3290 static void rtl_hw_start_8168h_1(struct rtl8169_private
*tp
)
3292 static const struct ephy_info e_info_8168h_1
[] = {
3293 { 0x1e, 0x0800, 0x0001 },
3294 { 0x1d, 0x0000, 0x0800 },
3295 { 0x05, 0xffff, 0x2089 },
3296 { 0x06, 0xffff, 0x5881 },
3297 { 0x04, 0xffff, 0x854a },
3298 { 0x01, 0xffff, 0x068b }
3302 /* disable aspm and clock request before access ephy */
3303 rtl_hw_aspm_clkreq_enable(tp
, false);
3304 rtl_ephy_init(tp
, e_info_8168h_1
);
3306 rtl_set_fifo_size(tp
, 0x08, 0x10, 0x02, 0x06);
3307 rtl8168g_set_pause_thresholds(tp
, 0x38, 0x48);
3309 rtl_set_def_aspm_entry_latency(tp
);
3311 rtl_reset_packet_filter(tp
);
3313 rtl_eri_set_bits(tp
, 0xdc, ERIAR_MASK_1111
, BIT(4));
3315 rtl_eri_set_bits(tp
, 0xd4, ERIAR_MASK_1111
, 0x1f00);
3317 rtl_eri_write(tp
, 0x5f0, ERIAR_MASK_0011
, 0x4f87);
3319 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) & ~RXDV_GATED_EN
);
3321 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
3322 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
3324 rtl8168_config_eee_mac(tp
);
3326 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~PFM_EN
);
3327 RTL_W8(tp
, MISC_1
, RTL_R8(tp
, MISC_1
) & ~PFM_D3COLD_EN
);
3329 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~TX_10M_PS_EN
);
3331 rtl_eri_clear_bits(tp
, 0x1b0, ERIAR_MASK_0011
, BIT(12));
3333 rtl_pcie_state_l2l3_disable(tp
);
3335 rg_saw_cnt
= phy_read_paged(tp
->phydev
, 0x0c42, 0x13) & 0x3fff;
3336 if (rg_saw_cnt
> 0) {
3339 sw_cnt_1ms_ini
= 16000000/rg_saw_cnt
;
3340 sw_cnt_1ms_ini
&= 0x0fff;
3341 r8168_mac_ocp_modify(tp
, 0xd412, 0x0fff, sw_cnt_1ms_ini
);
3344 r8168_mac_ocp_modify(tp
, 0xe056, 0x00f0, 0x0070);
3345 r8168_mac_ocp_modify(tp
, 0xe052, 0x6000, 0x8008);
3346 r8168_mac_ocp_modify(tp
, 0xe0d6, 0x01ff, 0x017f);
3347 r8168_mac_ocp_modify(tp
, 0xd420, 0x0fff, 0x047f);
3349 r8168_mac_ocp_write(tp
, 0xe63e, 0x0001);
3350 r8168_mac_ocp_write(tp
, 0xe63e, 0x0000);
3351 r8168_mac_ocp_write(tp
, 0xc094, 0x0000);
3352 r8168_mac_ocp_write(tp
, 0xc09e, 0x0000);
3354 rtl_hw_aspm_clkreq_enable(tp
, true);
3357 static void rtl_hw_start_8168ep(struct rtl8169_private
*tp
)
3359 rtl8168ep_stop_cmac(tp
);
3361 rtl_set_fifo_size(tp
, 0x08, 0x10, 0x02, 0x06);
3362 rtl8168g_set_pause_thresholds(tp
, 0x2f, 0x5f);
3364 rtl_set_def_aspm_entry_latency(tp
);
3366 rtl_reset_packet_filter(tp
);
3368 rtl_eri_set_bits(tp
, 0xd4, ERIAR_MASK_1111
, 0x1f80);
3370 rtl_eri_write(tp
, 0x5f0, ERIAR_MASK_0011
, 0x4f87);
3372 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) & ~RXDV_GATED_EN
);
3374 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
3375 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
3377 rtl8168_config_eee_mac(tp
);
3379 rtl_w0w1_eri(tp
, 0x2fc, ERIAR_MASK_0001
, 0x01, 0x06);
3381 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~TX_10M_PS_EN
);
3383 rtl_pcie_state_l2l3_disable(tp
);
3386 static void rtl_hw_start_8168ep_1(struct rtl8169_private
*tp
)
3388 static const struct ephy_info e_info_8168ep_1
[] = {
3389 { 0x00, 0xffff, 0x10ab },
3390 { 0x06, 0xffff, 0xf030 },
3391 { 0x08, 0xffff, 0x2006 },
3392 { 0x0d, 0xffff, 0x1666 },
3393 { 0x0c, 0x3ff0, 0x0000 }
3396 /* disable aspm and clock request before access ephy */
3397 rtl_hw_aspm_clkreq_enable(tp
, false);
3398 rtl_ephy_init(tp
, e_info_8168ep_1
);
3400 rtl_hw_start_8168ep(tp
);
3402 rtl_hw_aspm_clkreq_enable(tp
, true);
3405 static void rtl_hw_start_8168ep_2(struct rtl8169_private
*tp
)
3407 static const struct ephy_info e_info_8168ep_2
[] = {
3408 { 0x00, 0xffff, 0x10a3 },
3409 { 0x19, 0xffff, 0xfc00 },
3410 { 0x1e, 0xffff, 0x20ea }
3413 /* disable aspm and clock request before access ephy */
3414 rtl_hw_aspm_clkreq_enable(tp
, false);
3415 rtl_ephy_init(tp
, e_info_8168ep_2
);
3417 rtl_hw_start_8168ep(tp
);
3419 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~PFM_EN
);
3420 RTL_W8(tp
, MISC_1
, RTL_R8(tp
, MISC_1
) & ~PFM_D3COLD_EN
);
3422 rtl_hw_aspm_clkreq_enable(tp
, true);
3425 static void rtl_hw_start_8168ep_3(struct rtl8169_private
*tp
)
3427 static const struct ephy_info e_info_8168ep_3
[] = {
3428 { 0x00, 0x0000, 0x0080 },
3429 { 0x0d, 0x0100, 0x0200 },
3430 { 0x19, 0x8021, 0x0000 },
3431 { 0x1e, 0x0000, 0x2000 },
3434 /* disable aspm and clock request before access ephy */
3435 rtl_hw_aspm_clkreq_enable(tp
, false);
3436 rtl_ephy_init(tp
, e_info_8168ep_3
);
3438 rtl_hw_start_8168ep(tp
);
3440 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~PFM_EN
);
3441 RTL_W8(tp
, MISC_1
, RTL_R8(tp
, MISC_1
) & ~PFM_D3COLD_EN
);
3443 r8168_mac_ocp_modify(tp
, 0xd3e2, 0x0fff, 0x0271);
3444 r8168_mac_ocp_modify(tp
, 0xd3e4, 0x00ff, 0x0000);
3445 r8168_mac_ocp_modify(tp
, 0xe860, 0x0000, 0x0080);
3447 rtl_hw_aspm_clkreq_enable(tp
, true);
3450 static void rtl_hw_start_8117(struct rtl8169_private
*tp
)
3452 static const struct ephy_info e_info_8117
[] = {
3453 { 0x19, 0x0040, 0x1100 },
3454 { 0x59, 0x0040, 0x1100 },
3458 rtl8168ep_stop_cmac(tp
);
3460 /* disable aspm and clock request before access ephy */
3461 rtl_hw_aspm_clkreq_enable(tp
, false);
3462 rtl_ephy_init(tp
, e_info_8117
);
3464 rtl_set_fifo_size(tp
, 0x08, 0x10, 0x02, 0x06);
3465 rtl8168g_set_pause_thresholds(tp
, 0x2f, 0x5f);
3467 rtl_set_def_aspm_entry_latency(tp
);
3469 rtl_reset_packet_filter(tp
);
3471 rtl_eri_set_bits(tp
, 0xd4, ERIAR_MASK_1111
, 0x1f90);
3473 rtl_eri_write(tp
, 0x5f0, ERIAR_MASK_0011
, 0x4f87);
3475 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) & ~RXDV_GATED_EN
);
3477 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
3478 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
3480 rtl8168_config_eee_mac(tp
);
3482 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~PFM_EN
);
3483 RTL_W8(tp
, MISC_1
, RTL_R8(tp
, MISC_1
) & ~PFM_D3COLD_EN
);
3485 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~TX_10M_PS_EN
);
3487 rtl_eri_clear_bits(tp
, 0x1b0, ERIAR_MASK_0011
, BIT(12));
3489 rtl_pcie_state_l2l3_disable(tp
);
3491 rg_saw_cnt
= phy_read_paged(tp
->phydev
, 0x0c42, 0x13) & 0x3fff;
3492 if (rg_saw_cnt
> 0) {
3495 sw_cnt_1ms_ini
= (16000000 / rg_saw_cnt
) & 0x0fff;
3496 r8168_mac_ocp_modify(tp
, 0xd412, 0x0fff, sw_cnt_1ms_ini
);
3499 r8168_mac_ocp_modify(tp
, 0xe056, 0x00f0, 0x0070);
3500 r8168_mac_ocp_write(tp
, 0xea80, 0x0003);
3501 r8168_mac_ocp_modify(tp
, 0xe052, 0x0000, 0x0009);
3502 r8168_mac_ocp_modify(tp
, 0xd420, 0x0fff, 0x047f);
3504 r8168_mac_ocp_write(tp
, 0xe63e, 0x0001);
3505 r8168_mac_ocp_write(tp
, 0xe63e, 0x0000);
3506 r8168_mac_ocp_write(tp
, 0xc094, 0x0000);
3507 r8168_mac_ocp_write(tp
, 0xc09e, 0x0000);
3509 /* firmware is for MAC only */
3510 r8169_apply_firmware(tp
);
3512 rtl_hw_aspm_clkreq_enable(tp
, true);
3515 static void rtl_hw_start_8102e_1(struct rtl8169_private
*tp
)
3517 static const struct ephy_info e_info_8102e_1
[] = {
3518 { 0x01, 0, 0x6e65 },
3519 { 0x02, 0, 0x091f },
3520 { 0x03, 0, 0xc2f9 },
3521 { 0x06, 0, 0xafb5 },
3522 { 0x07, 0, 0x0e00 },
3523 { 0x19, 0, 0xec80 },
3524 { 0x01, 0, 0x2e65 },
3529 rtl_set_def_aspm_entry_latency(tp
);
3531 RTL_W8(tp
, DBG_REG
, FIX_NAK_1
);
3534 LEDS1
| LEDS0
| Speed_down
| MEMMAP
| IOMAP
| VPD
| PMEnable
);
3535 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
3537 cfg1
= RTL_R8(tp
, Config1
);
3538 if ((cfg1
& LEDS0
) && (cfg1
& LEDS1
))
3539 RTL_W8(tp
, Config1
, cfg1
& ~LEDS0
);
3541 rtl_ephy_init(tp
, e_info_8102e_1
);
3544 static void rtl_hw_start_8102e_2(struct rtl8169_private
*tp
)
3546 rtl_set_def_aspm_entry_latency(tp
);
3548 RTL_W8(tp
, Config1
, MEMMAP
| IOMAP
| VPD
| PMEnable
);
3549 RTL_W8(tp
, Config3
, RTL_R8(tp
, Config3
) & ~Beacon_en
);
3552 static void rtl_hw_start_8102e_3(struct rtl8169_private
*tp
)
3554 rtl_hw_start_8102e_2(tp
);
3556 rtl_ephy_write(tp
, 0x03, 0xc2f9);
3559 static void rtl_hw_start_8105e_1(struct rtl8169_private
*tp
)
3561 static const struct ephy_info e_info_8105e_1
[] = {
3562 { 0x07, 0, 0x4000 },
3563 { 0x19, 0, 0x0200 },
3564 { 0x19, 0, 0x0020 },
3565 { 0x1e, 0, 0x2000 },
3566 { 0x03, 0, 0x0001 },
3567 { 0x19, 0, 0x0100 },
3568 { 0x19, 0, 0x0004 },
3572 /* Force LAN exit from ASPM if Rx/Tx are not idle */
3573 RTL_W32(tp
, FuncEvent
, RTL_R32(tp
, FuncEvent
) | 0x002800);
3575 /* Disable Early Tally Counter */
3576 RTL_W32(tp
, FuncEvent
, RTL_R32(tp
, FuncEvent
) & ~0x010000);
3578 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) | EN_NDP
| EN_OOB_RESET
);
3579 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) | PFM_EN
);
3581 rtl_ephy_init(tp
, e_info_8105e_1
);
3583 rtl_pcie_state_l2l3_disable(tp
);
3586 static void rtl_hw_start_8105e_2(struct rtl8169_private
*tp
)
3588 rtl_hw_start_8105e_1(tp
);
3589 rtl_ephy_write(tp
, 0x1e, rtl_ephy_read(tp
, 0x1e) | 0x8000);
3592 static void rtl_hw_start_8402(struct rtl8169_private
*tp
)
3594 static const struct ephy_info e_info_8402
[] = {
3595 { 0x19, 0xffff, 0xff64 },
3599 rtl_set_def_aspm_entry_latency(tp
);
3601 /* Force LAN exit from ASPM if Rx/Tx are not idle */
3602 RTL_W32(tp
, FuncEvent
, RTL_R32(tp
, FuncEvent
) | 0x002800);
3604 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) & ~NOW_IS_OOB
);
3606 rtl_ephy_init(tp
, e_info_8402
);
3608 rtl_set_fifo_size(tp
, 0x00, 0x00, 0x02, 0x06);
3609 rtl_reset_packet_filter(tp
);
3610 rtl_eri_write(tp
, 0xc0, ERIAR_MASK_0011
, 0x0000);
3611 rtl_eri_write(tp
, 0xb8, ERIAR_MASK_0011
, 0x0000);
3612 rtl_w0w1_eri(tp
, 0x0d4, ERIAR_MASK_0011
, 0x0e00, 0xff00);
3615 rtl_eri_write(tp
, 0x1b0, ERIAR_MASK_0011
, 0x0000);
3617 rtl_pcie_state_l2l3_disable(tp
);
3620 static void rtl_hw_start_8106(struct rtl8169_private
*tp
)
3622 rtl_hw_aspm_clkreq_enable(tp
, false);
3624 /* Force LAN exit from ASPM if Rx/Tx are not idle */
3625 RTL_W32(tp
, FuncEvent
, RTL_R32(tp
, FuncEvent
) | 0x002800);
3627 RTL_W32(tp
, MISC
, (RTL_R32(tp
, MISC
) | DISABLE_LAN_EN
) & ~EARLY_TALLY_EN
);
3628 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) | EN_NDP
| EN_OOB_RESET
);
3629 RTL_W8(tp
, DLLPR
, RTL_R8(tp
, DLLPR
) & ~PFM_EN
);
3631 rtl_eri_write(tp
, 0x1d0, ERIAR_MASK_0011
, 0x0000);
3634 rtl_eri_write(tp
, 0x1b0, ERIAR_MASK_0011
, 0x0000);
3636 rtl_pcie_state_l2l3_disable(tp
);
3637 rtl_hw_aspm_clkreq_enable(tp
, true);
3640 DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond
)
3642 return r8168_mac_ocp_read(tp
, 0xe00e) & BIT(13);
3645 static void rtl_hw_start_8125_common(struct rtl8169_private
*tp
)
3647 rtl_pcie_state_l2l3_disable(tp
);
3649 RTL_W16(tp
, 0x382, 0x221b);
3650 RTL_W8(tp
, 0x4500, 0);
3651 RTL_W16(tp
, 0x4800, 0);
3654 r8168_mac_ocp_modify(tp
, 0xd40a, 0x0010, 0x0000);
3656 RTL_W8(tp
, Config1
, RTL_R8(tp
, Config1
) & ~0x10);
3658 r8168_mac_ocp_write(tp
, 0xc140, 0xffff);
3659 r8168_mac_ocp_write(tp
, 0xc142, 0xffff);
3661 r8168_mac_ocp_modify(tp
, 0xd3e2, 0x0fff, 0x03a9);
3662 r8168_mac_ocp_modify(tp
, 0xd3e4, 0x00ff, 0x0000);
3663 r8168_mac_ocp_modify(tp
, 0xe860, 0x0000, 0x0080);
3665 /* disable new tx descriptor format */
3666 r8168_mac_ocp_modify(tp
, 0xeb58, 0x0001, 0x0000);
3668 r8168_mac_ocp_modify(tp
, 0xe614, 0x0700, 0x0400);
3669 r8168_mac_ocp_modify(tp
, 0xe63e, 0x0c30, 0x0020);
3670 r8168_mac_ocp_modify(tp
, 0xc0b4, 0x0000, 0x000c);
3671 r8168_mac_ocp_modify(tp
, 0xeb6a, 0x00ff, 0x0033);
3672 r8168_mac_ocp_modify(tp
, 0xeb50, 0x03e0, 0x0040);
3673 r8168_mac_ocp_modify(tp
, 0xe056, 0x00f0, 0x0030);
3674 r8168_mac_ocp_modify(tp
, 0xe040, 0x1000, 0x0000);
3675 r8168_mac_ocp_modify(tp
, 0xe0c0, 0x4f0f, 0x4403);
3676 r8168_mac_ocp_modify(tp
, 0xe052, 0x0080, 0x0067);
3677 r8168_mac_ocp_modify(tp
, 0xc0ac, 0x0080, 0x1f00);
3678 r8168_mac_ocp_modify(tp
, 0xd430, 0x0fff, 0x047f);
3679 r8168_mac_ocp_modify(tp
, 0xe84c, 0x0000, 0x00c0);
3680 r8168_mac_ocp_modify(tp
, 0xea1c, 0x0004, 0x0000);
3681 r8168_mac_ocp_modify(tp
, 0xeb54, 0x0000, 0x0001);
3683 r8168_mac_ocp_modify(tp
, 0xeb54, 0x0001, 0x0000);
3684 RTL_W16(tp
, 0x1880, RTL_R16(tp
, 0x1880) & ~0x0030);
3686 r8168_mac_ocp_write(tp
, 0xe098, 0xc302);
3688 rtl_udelay_loop_wait_low(tp
, &rtl_mac_ocp_e00e_cond
, 1000, 10);
3690 rtl8125_config_eee_mac(tp
);
3692 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) & ~RXDV_GATED_EN
);
3696 static void rtl_hw_start_8125_1(struct rtl8169_private
*tp
)
3698 static const struct ephy_info e_info_8125_1
[] = {
3699 { 0x01, 0xffff, 0xa812 },
3700 { 0x09, 0xffff, 0x520c },
3701 { 0x04, 0xffff, 0xd000 },
3702 { 0x0d, 0xffff, 0xf702 },
3703 { 0x0a, 0xffff, 0x8653 },
3704 { 0x06, 0xffff, 0x001e },
3705 { 0x08, 0xffff, 0x3595 },
3706 { 0x20, 0xffff, 0x9455 },
3707 { 0x21, 0xffff, 0x99ff },
3708 { 0x02, 0xffff, 0x6046 },
3709 { 0x29, 0xffff, 0xfe00 },
3710 { 0x23, 0xffff, 0xab62 },
3712 { 0x41, 0xffff, 0xa80c },
3713 { 0x49, 0xffff, 0x520c },
3714 { 0x44, 0xffff, 0xd000 },
3715 { 0x4d, 0xffff, 0xf702 },
3716 { 0x4a, 0xffff, 0x8653 },
3717 { 0x46, 0xffff, 0x001e },
3718 { 0x48, 0xffff, 0x3595 },
3719 { 0x60, 0xffff, 0x9455 },
3720 { 0x61, 0xffff, 0x99ff },
3721 { 0x42, 0xffff, 0x6046 },
3722 { 0x69, 0xffff, 0xfe00 },
3723 { 0x63, 0xffff, 0xab62 },
3726 rtl_set_def_aspm_entry_latency(tp
);
3728 /* disable aspm and clock request before access ephy */
3729 rtl_hw_aspm_clkreq_enable(tp
, false);
3730 rtl_ephy_init(tp
, e_info_8125_1
);
3732 rtl_hw_start_8125_common(tp
);
3735 static void rtl_hw_start_8125_2(struct rtl8169_private
*tp
)
3737 static const struct ephy_info e_info_8125_2
[] = {
3738 { 0x04, 0xffff, 0xd000 },
3739 { 0x0a, 0xffff, 0x8653 },
3740 { 0x23, 0xffff, 0xab66 },
3741 { 0x20, 0xffff, 0x9455 },
3742 { 0x21, 0xffff, 0x99ff },
3743 { 0x29, 0xffff, 0xfe04 },
3745 { 0x44, 0xffff, 0xd000 },
3746 { 0x4a, 0xffff, 0x8653 },
3747 { 0x63, 0xffff, 0xab66 },
3748 { 0x60, 0xffff, 0x9455 },
3749 { 0x61, 0xffff, 0x99ff },
3750 { 0x69, 0xffff, 0xfe04 },
3753 rtl_set_def_aspm_entry_latency(tp
);
3755 /* disable aspm and clock request before access ephy */
3756 rtl_hw_aspm_clkreq_enable(tp
, false);
3757 rtl_ephy_init(tp
, e_info_8125_2
);
3759 rtl_hw_start_8125_common(tp
);
3762 static void rtl_hw_config(struct rtl8169_private
*tp
)
3764 static const rtl_generic_fct hw_configs
[] = {
3765 [RTL_GIGA_MAC_VER_07
] = rtl_hw_start_8102e_1
,
3766 [RTL_GIGA_MAC_VER_08
] = rtl_hw_start_8102e_3
,
3767 [RTL_GIGA_MAC_VER_09
] = rtl_hw_start_8102e_2
,
3768 [RTL_GIGA_MAC_VER_10
] = NULL
,
3769 [RTL_GIGA_MAC_VER_11
] = rtl_hw_start_8168b
,
3770 [RTL_GIGA_MAC_VER_12
] = rtl_hw_start_8168b
,
3771 [RTL_GIGA_MAC_VER_13
] = NULL
,
3772 [RTL_GIGA_MAC_VER_14
] = NULL
,
3773 [RTL_GIGA_MAC_VER_15
] = NULL
,
3774 [RTL_GIGA_MAC_VER_16
] = NULL
,
3775 [RTL_GIGA_MAC_VER_17
] = rtl_hw_start_8168b
,
3776 [RTL_GIGA_MAC_VER_18
] = rtl_hw_start_8168cp_1
,
3777 [RTL_GIGA_MAC_VER_19
] = rtl_hw_start_8168c_1
,
3778 [RTL_GIGA_MAC_VER_20
] = rtl_hw_start_8168c_2
,
3779 [RTL_GIGA_MAC_VER_21
] = rtl_hw_start_8168c_3
,
3780 [RTL_GIGA_MAC_VER_22
] = rtl_hw_start_8168c_4
,
3781 [RTL_GIGA_MAC_VER_23
] = rtl_hw_start_8168cp_2
,
3782 [RTL_GIGA_MAC_VER_24
] = rtl_hw_start_8168cp_3
,
3783 [RTL_GIGA_MAC_VER_25
] = rtl_hw_start_8168d
,
3784 [RTL_GIGA_MAC_VER_26
] = rtl_hw_start_8168d
,
3785 [RTL_GIGA_MAC_VER_27
] = rtl_hw_start_8168d
,
3786 [RTL_GIGA_MAC_VER_28
] = rtl_hw_start_8168d_4
,
3787 [RTL_GIGA_MAC_VER_29
] = rtl_hw_start_8105e_1
,
3788 [RTL_GIGA_MAC_VER_30
] = rtl_hw_start_8105e_2
,
3789 [RTL_GIGA_MAC_VER_31
] = rtl_hw_start_8168d
,
3790 [RTL_GIGA_MAC_VER_32
] = rtl_hw_start_8168e_1
,
3791 [RTL_GIGA_MAC_VER_33
] = rtl_hw_start_8168e_1
,
3792 [RTL_GIGA_MAC_VER_34
] = rtl_hw_start_8168e_2
,
3793 [RTL_GIGA_MAC_VER_35
] = rtl_hw_start_8168f_1
,
3794 [RTL_GIGA_MAC_VER_36
] = rtl_hw_start_8168f_1
,
3795 [RTL_GIGA_MAC_VER_37
] = rtl_hw_start_8402
,
3796 [RTL_GIGA_MAC_VER_38
] = rtl_hw_start_8411
,
3797 [RTL_GIGA_MAC_VER_39
] = rtl_hw_start_8106
,
3798 [RTL_GIGA_MAC_VER_40
] = rtl_hw_start_8168g_1
,
3799 [RTL_GIGA_MAC_VER_41
] = rtl_hw_start_8168g_1
,
3800 [RTL_GIGA_MAC_VER_42
] = rtl_hw_start_8168g_2
,
3801 [RTL_GIGA_MAC_VER_43
] = rtl_hw_start_8168g_2
,
3802 [RTL_GIGA_MAC_VER_44
] = rtl_hw_start_8411_2
,
3803 [RTL_GIGA_MAC_VER_45
] = rtl_hw_start_8168h_1
,
3804 [RTL_GIGA_MAC_VER_46
] = rtl_hw_start_8168h_1
,
3805 [RTL_GIGA_MAC_VER_47
] = rtl_hw_start_8168h_1
,
3806 [RTL_GIGA_MAC_VER_48
] = rtl_hw_start_8168h_1
,
3807 [RTL_GIGA_MAC_VER_49
] = rtl_hw_start_8168ep_1
,
3808 [RTL_GIGA_MAC_VER_50
] = rtl_hw_start_8168ep_2
,
3809 [RTL_GIGA_MAC_VER_51
] = rtl_hw_start_8168ep_3
,
3810 [RTL_GIGA_MAC_VER_52
] = rtl_hw_start_8117
,
3811 [RTL_GIGA_MAC_VER_60
] = rtl_hw_start_8125_1
,
3812 [RTL_GIGA_MAC_VER_61
] = rtl_hw_start_8125_2
,
3815 if (hw_configs
[tp
->mac_version
])
3816 hw_configs
[tp
->mac_version
](tp
);
3819 static void rtl_hw_start_8125(struct rtl8169_private
*tp
)
3823 /* disable interrupt coalescing */
3824 for (i
= 0xa00; i
< 0xb00; i
+= 4)
3830 static void rtl_hw_start_8168(struct rtl8169_private
*tp
)
3832 if (rtl_is_8168evl_up(tp
))
3833 RTL_W8(tp
, MaxTxPacketSize
, EarlySize
);
3835 RTL_W8(tp
, MaxTxPacketSize
, TxPacketMax
);
3839 /* disable interrupt coalescing */
3840 RTL_W16(tp
, IntrMitigate
, 0x0000);
3843 static void rtl_hw_start_8169(struct rtl8169_private
*tp
)
3845 RTL_W8(tp
, EarlyTxThres
, NoEarlyTx
);
3847 tp
->cp_cmd
|= PCIMulRW
;
3849 if (tp
->mac_version
== RTL_GIGA_MAC_VER_02
||
3850 tp
->mac_version
== RTL_GIGA_MAC_VER_03
)
3851 tp
->cp_cmd
|= EnAnaPLL
;
3853 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
3855 rtl8169_set_magic_reg(tp
, tp
->mac_version
);
3857 /* disable interrupt coalescing */
3858 RTL_W16(tp
, IntrMitigate
, 0x0000);
3861 static void rtl_hw_start(struct rtl8169_private
*tp
)
3863 rtl_unlock_config_regs(tp
);
3865 tp
->cp_cmd
&= CPCMD_MASK
;
3866 RTL_W16(tp
, CPlusCmd
, tp
->cp_cmd
);
3868 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
)
3869 rtl_hw_start_8169(tp
);
3870 else if (rtl_is_8125(tp
))
3871 rtl_hw_start_8125(tp
);
3873 rtl_hw_start_8168(tp
);
3875 rtl_set_rx_max_size(tp
);
3876 rtl_set_rx_tx_desc_registers(tp
);
3877 rtl_lock_config_regs(tp
);
3879 rtl_jumbo_config(tp
);
3881 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
3884 RTL_W8(tp
, ChipCmd
, CmdTxEnb
| CmdRxEnb
);
3886 rtl_set_tx_config_registers(tp
);
3887 rtl_set_rx_mode(tp
->dev
);
3891 static int rtl8169_change_mtu(struct net_device
*dev
, int new_mtu
)
3893 struct rtl8169_private
*tp
= netdev_priv(dev
);
3896 netdev_update_features(dev
);
3897 rtl_jumbo_config(tp
);
3902 static inline void rtl8169_make_unusable_by_asic(struct RxDesc
*desc
)
3904 desc
->addr
= cpu_to_le64(0x0badbadbadbadbadull
);
3905 desc
->opts1
&= ~cpu_to_le32(DescOwn
| RsvdMask
);
3908 static inline void rtl8169_mark_to_asic(struct RxDesc
*desc
)
3910 u32 eor
= le32_to_cpu(desc
->opts1
) & RingEnd
;
3913 /* Force memory writes to complete before releasing descriptor */
3916 desc
->opts1
= cpu_to_le32(DescOwn
| eor
| R8169_RX_BUF_SIZE
);
3919 static struct page
*rtl8169_alloc_rx_data(struct rtl8169_private
*tp
,
3920 struct RxDesc
*desc
)
3922 struct device
*d
= tp_to_dev(tp
);
3923 int node
= dev_to_node(d
);
3927 data
= alloc_pages_node(node
, GFP_KERNEL
, get_order(R8169_RX_BUF_SIZE
));
3931 mapping
= dma_map_page(d
, data
, 0, R8169_RX_BUF_SIZE
, DMA_FROM_DEVICE
);
3932 if (unlikely(dma_mapping_error(d
, mapping
))) {
3933 if (net_ratelimit())
3934 netif_err(tp
, drv
, tp
->dev
, "Failed to map RX DMA!\n");
3935 __free_pages(data
, get_order(R8169_RX_BUF_SIZE
));
3939 desc
->addr
= cpu_to_le64(mapping
);
3940 rtl8169_mark_to_asic(desc
);
3945 static void rtl8169_rx_clear(struct rtl8169_private
*tp
)
3949 for (i
= 0; i
< NUM_RX_DESC
&& tp
->Rx_databuff
[i
]; i
++) {
3950 dma_unmap_page(tp_to_dev(tp
),
3951 le64_to_cpu(tp
->RxDescArray
[i
].addr
),
3952 R8169_RX_BUF_SIZE
, DMA_FROM_DEVICE
);
3953 __free_pages(tp
->Rx_databuff
[i
], get_order(R8169_RX_BUF_SIZE
));
3954 tp
->Rx_databuff
[i
] = NULL
;
3955 rtl8169_make_unusable_by_asic(tp
->RxDescArray
+ i
);
3959 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc
*desc
)
3961 desc
->opts1
|= cpu_to_le32(RingEnd
);
3964 static int rtl8169_rx_fill(struct rtl8169_private
*tp
)
3968 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
3971 data
= rtl8169_alloc_rx_data(tp
, tp
->RxDescArray
+ i
);
3973 rtl8169_rx_clear(tp
);
3976 tp
->Rx_databuff
[i
] = data
;
3979 rtl8169_mark_as_last_descriptor(tp
->RxDescArray
+ NUM_RX_DESC
- 1);
3984 static int rtl8169_init_ring(struct rtl8169_private
*tp
)
3986 rtl8169_init_ring_indexes(tp
);
3988 memset(tp
->tx_skb
, 0, sizeof(tp
->tx_skb
));
3989 memset(tp
->Rx_databuff
, 0, sizeof(tp
->Rx_databuff
));
3991 return rtl8169_rx_fill(tp
);
3994 static void rtl8169_unmap_tx_skb(struct rtl8169_private
*tp
, unsigned int entry
)
3996 struct ring_info
*tx_skb
= tp
->tx_skb
+ entry
;
3997 struct TxDesc
*desc
= tp
->TxDescArray
+ entry
;
3999 dma_unmap_single(tp_to_dev(tp
), le64_to_cpu(desc
->addr
), tx_skb
->len
,
4001 memset(desc
, 0, sizeof(*desc
));
4002 memset(tx_skb
, 0, sizeof(*tx_skb
));
4005 static void rtl8169_tx_clear_range(struct rtl8169_private
*tp
, u32 start
,
4010 for (i
= 0; i
< n
; i
++) {
4011 unsigned int entry
= (start
+ i
) % NUM_TX_DESC
;
4012 struct ring_info
*tx_skb
= tp
->tx_skb
+ entry
;
4013 unsigned int len
= tx_skb
->len
;
4016 struct sk_buff
*skb
= tx_skb
->skb
;
4018 rtl8169_unmap_tx_skb(tp
, entry
);
4020 dev_consume_skb_any(skb
);
4025 static void rtl8169_tx_clear(struct rtl8169_private
*tp
)
4027 rtl8169_tx_clear_range(tp
, tp
->dirty_tx
, NUM_TX_DESC
);
4028 tp
->cur_tx
= tp
->dirty_tx
= 0;
4029 netdev_reset_queue(tp
->dev
);
4032 static void rtl_reset_work(struct rtl8169_private
*tp
)
4034 struct net_device
*dev
= tp
->dev
;
4037 napi_disable(&tp
->napi
);
4038 netif_stop_queue(dev
);
4041 rtl8169_hw_reset(tp
);
4043 for (i
= 0; i
< NUM_RX_DESC
; i
++)
4044 rtl8169_mark_to_asic(tp
->RxDescArray
+ i
);
4046 rtl8169_tx_clear(tp
);
4047 rtl8169_init_ring_indexes(tp
);
4049 napi_enable(&tp
->napi
);
4051 netif_wake_queue(dev
);
4054 static void rtl8169_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
4056 struct rtl8169_private
*tp
= netdev_priv(dev
);
4058 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
4061 static int rtl8169_tx_map(struct rtl8169_private
*tp
, const u32
*opts
, u32 len
,
4062 void *addr
, unsigned int entry
, bool desc_own
)
4064 struct TxDesc
*txd
= tp
->TxDescArray
+ entry
;
4065 struct device
*d
= tp_to_dev(tp
);
4070 mapping
= dma_map_single(d
, addr
, len
, DMA_TO_DEVICE
);
4071 ret
= dma_mapping_error(d
, mapping
);
4072 if (unlikely(ret
)) {
4073 if (net_ratelimit())
4074 netif_err(tp
, drv
, tp
->dev
, "Failed to map TX data!\n");
4078 txd
->addr
= cpu_to_le64(mapping
);
4079 txd
->opts2
= cpu_to_le32(opts
[1]);
4081 opts1
= opts
[0] | len
;
4082 if (entry
== NUM_TX_DESC
- 1)
4086 txd
->opts1
= cpu_to_le32(opts1
);
4088 tp
->tx_skb
[entry
].len
= len
;
4093 static int rtl8169_xmit_frags(struct rtl8169_private
*tp
, struct sk_buff
*skb
,
4094 const u32
*opts
, unsigned int entry
)
4096 struct skb_shared_info
*info
= skb_shinfo(skb
);
4097 unsigned int cur_frag
;
4099 for (cur_frag
= 0; cur_frag
< info
->nr_frags
; cur_frag
++) {
4100 const skb_frag_t
*frag
= info
->frags
+ cur_frag
;
4101 void *addr
= skb_frag_address(frag
);
4102 u32 len
= skb_frag_size(frag
);
4104 entry
= (entry
+ 1) % NUM_TX_DESC
;
4106 if (unlikely(rtl8169_tx_map(tp
, opts
, len
, addr
, entry
, true)))
4113 rtl8169_tx_clear_range(tp
, tp
->cur_tx
+ 1, cur_frag
);
4117 static bool rtl_test_hw_pad_bug(struct rtl8169_private
*tp
, struct sk_buff
*skb
)
4119 return skb
->len
< ETH_ZLEN
&& tp
->mac_version
== RTL_GIGA_MAC_VER_34
;
4122 static void rtl8169_tso_csum_v1(struct sk_buff
*skb
, u32
*opts
)
4124 u32 mss
= skb_shinfo(skb
)->gso_size
;
4128 opts
[0] |= mss
<< TD0_MSS_SHIFT
;
4129 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
4130 const struct iphdr
*ip
= ip_hdr(skb
);
4132 if (ip
->protocol
== IPPROTO_TCP
)
4133 opts
[0] |= TD0_IP_CS
| TD0_TCP_CS
;
4134 else if (ip
->protocol
== IPPROTO_UDP
)
4135 opts
[0] |= TD0_IP_CS
| TD0_UDP_CS
;
4141 static bool rtl8169_tso_csum_v2(struct rtl8169_private
*tp
,
4142 struct sk_buff
*skb
, u32
*opts
)
4144 u32 transport_offset
= (u32
)skb_transport_offset(skb
);
4145 u32 mss
= skb_shinfo(skb
)->gso_size
;
4148 switch (vlan_get_protocol(skb
)) {
4149 case htons(ETH_P_IP
):
4150 opts
[0] |= TD1_GTSENV4
;
4153 case htons(ETH_P_IPV6
):
4154 if (skb_cow_head(skb
, 0))
4157 tcp_v6_gso_csum_prep(skb
);
4158 opts
[0] |= TD1_GTSENV6
;
4166 opts
[0] |= transport_offset
<< GTTCPHO_SHIFT
;
4167 opts
[1] |= mss
<< TD1_MSS_SHIFT
;
4168 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
4171 switch (vlan_get_protocol(skb
)) {
4172 case htons(ETH_P_IP
):
4173 opts
[1] |= TD1_IPv4_CS
;
4174 ip_protocol
= ip_hdr(skb
)->protocol
;
4177 case htons(ETH_P_IPV6
):
4178 opts
[1] |= TD1_IPv6_CS
;
4179 ip_protocol
= ipv6_hdr(skb
)->nexthdr
;
4183 ip_protocol
= IPPROTO_RAW
;
4187 if (ip_protocol
== IPPROTO_TCP
)
4188 opts
[1] |= TD1_TCP_CS
;
4189 else if (ip_protocol
== IPPROTO_UDP
)
4190 opts
[1] |= TD1_UDP_CS
;
4194 opts
[1] |= transport_offset
<< TCPHO_SHIFT
;
4196 if (unlikely(rtl_test_hw_pad_bug(tp
, skb
)))
4197 return !eth_skb_pad(skb
);
4203 static bool rtl_tx_slots_avail(struct rtl8169_private
*tp
,
4204 unsigned int nr_frags
)
4206 unsigned int slots_avail
= tp
->dirty_tx
+ NUM_TX_DESC
- tp
->cur_tx
;
4208 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
4209 return slots_avail
> nr_frags
;
4212 /* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
4213 static bool rtl_chip_supports_csum_v2(struct rtl8169_private
*tp
)
4215 switch (tp
->mac_version
) {
4216 case RTL_GIGA_MAC_VER_02
... RTL_GIGA_MAC_VER_06
:
4217 case RTL_GIGA_MAC_VER_10
... RTL_GIGA_MAC_VER_17
:
4224 static void rtl8169_doorbell(struct rtl8169_private
*tp
)
4226 if (rtl_is_8125(tp
))
4227 RTL_W16(tp
, TxPoll_8125
, BIT(0));
4229 RTL_W8(tp
, TxPoll
, NPQ
);
4232 static netdev_tx_t
rtl8169_start_xmit(struct sk_buff
*skb
,
4233 struct net_device
*dev
)
4235 unsigned int frags
= skb_shinfo(skb
)->nr_frags
;
4236 struct rtl8169_private
*tp
= netdev_priv(dev
);
4237 unsigned int entry
= tp
->cur_tx
% NUM_TX_DESC
;
4238 struct TxDesc
*txd_first
, *txd_last
;
4239 bool stop_queue
, door_bell
;
4242 txd_first
= tp
->TxDescArray
+ entry
;
4244 if (unlikely(!rtl_tx_slots_avail(tp
, frags
))) {
4245 netif_err(tp
, drv
, dev
, "BUG! Tx Ring full when queue awake!\n");
4249 if (unlikely(le32_to_cpu(txd_first
->opts1
) & DescOwn
))
4252 opts
[1] = rtl8169_tx_vlan_tag(skb
);
4255 if (!rtl_chip_supports_csum_v2(tp
))
4256 rtl8169_tso_csum_v1(skb
, opts
);
4257 else if (!rtl8169_tso_csum_v2(tp
, skb
, opts
))
4260 if (unlikely(rtl8169_tx_map(tp
, opts
, skb_headlen(skb
), skb
->data
,
4265 if (rtl8169_xmit_frags(tp
, skb
, opts
, entry
))
4267 entry
= (entry
+ frags
) % NUM_TX_DESC
;
4270 txd_last
= tp
->TxDescArray
+ entry
;
4271 txd_last
->opts1
|= cpu_to_le32(LastFrag
);
4272 tp
->tx_skb
[entry
].skb
= skb
;
4274 skb_tx_timestamp(skb
);
4276 /* Force memory writes to complete before releasing descriptor */
4279 door_bell
= __netdev_sent_queue(dev
, skb
->len
, netdev_xmit_more());
4281 txd_first
->opts1
|= cpu_to_le32(DescOwn
| FirstFrag
);
4283 /* Force all memory writes to complete before notifying device */
4286 tp
->cur_tx
+= frags
+ 1;
4288 stop_queue
= !rtl_tx_slots_avail(tp
, MAX_SKB_FRAGS
);
4289 if (unlikely(stop_queue
)) {
4290 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
4291 * not miss a ring update when it notices a stopped queue.
4294 netif_stop_queue(dev
);
4299 rtl8169_doorbell(tp
);
4301 if (unlikely(stop_queue
)) {
4302 /* Sync with rtl_tx:
4303 * - publish queue status and cur_tx ring index (write barrier)
4304 * - refresh dirty_tx ring index (read barrier).
4305 * May the current thread have a pessimistic view of the ring
4306 * status and forget to wake up queue, a racing rtl_tx thread
4310 if (rtl_tx_slots_avail(tp
, MAX_SKB_FRAGS
))
4311 netif_start_queue(dev
);
4314 return NETDEV_TX_OK
;
4317 rtl8169_unmap_tx_skb(tp
, entry
);
4319 dev_kfree_skb_any(skb
);
4320 dev
->stats
.tx_dropped
++;
4321 return NETDEV_TX_OK
;
4324 netif_stop_queue(dev
);
4325 dev
->stats
.tx_dropped
++;
4326 return NETDEV_TX_BUSY
;
4329 static netdev_features_t
rtl8169_features_check(struct sk_buff
*skb
,
4330 struct net_device
*dev
,
4331 netdev_features_t features
)
4333 int transport_offset
= skb_transport_offset(skb
);
4334 struct rtl8169_private
*tp
= netdev_priv(dev
);
4336 if (skb_is_gso(skb
)) {
4337 if (transport_offset
> GTTCPHO_MAX
&&
4338 rtl_chip_supports_csum_v2(tp
))
4339 features
&= ~NETIF_F_ALL_TSO
;
4340 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
4341 if (skb
->len
< ETH_ZLEN
) {
4342 switch (tp
->mac_version
) {
4343 case RTL_GIGA_MAC_VER_11
:
4344 case RTL_GIGA_MAC_VER_12
:
4345 case RTL_GIGA_MAC_VER_17
:
4346 case RTL_GIGA_MAC_VER_34
:
4347 features
&= ~NETIF_F_CSUM_MASK
;
4354 if (transport_offset
> TCPHO_MAX
&&
4355 rtl_chip_supports_csum_v2(tp
))
4356 features
&= ~NETIF_F_CSUM_MASK
;
4359 return vlan_features_check(skb
, features
);
4362 static void rtl8169_pcierr_interrupt(struct net_device
*dev
)
4364 struct rtl8169_private
*tp
= netdev_priv(dev
);
4365 struct pci_dev
*pdev
= tp
->pci_dev
;
4366 int pci_status_errs
;
4369 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
4371 pci_status_errs
= pci_status_get_and_clear_errors(pdev
);
4373 netif_err(tp
, intr
, dev
, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
4374 pci_cmd
, pci_status_errs
);
4377 * The recovery sequence below admits a very elaborated explanation:
4378 * - it seems to work;
4379 * - I did not see what else could be done;
4380 * - it makes iop3xx happy.
4382 * Feel free to adjust to your needs.
4384 if (pdev
->broken_parity_status
)
4385 pci_cmd
&= ~PCI_COMMAND_PARITY
;
4387 pci_cmd
|= PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
;
4389 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
4391 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
4394 static void rtl_tx(struct net_device
*dev
, struct rtl8169_private
*tp
,
4397 unsigned int dirty_tx
, tx_left
, bytes_compl
= 0, pkts_compl
= 0;
4399 dirty_tx
= tp
->dirty_tx
;
4402 for (tx_left
= tp
->cur_tx
- dirty_tx
; tx_left
> 0; tx_left
--) {
4403 unsigned int entry
= dirty_tx
% NUM_TX_DESC
;
4404 struct sk_buff
*skb
= tp
->tx_skb
[entry
].skb
;
4407 status
= le32_to_cpu(tp
->TxDescArray
[entry
].opts1
);
4408 if (status
& DescOwn
)
4411 rtl8169_unmap_tx_skb(tp
, entry
);
4415 bytes_compl
+= skb
->len
;
4416 napi_consume_skb(skb
, budget
);
4421 if (tp
->dirty_tx
!= dirty_tx
) {
4422 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
4424 u64_stats_update_begin(&tp
->tx_stats
.syncp
);
4425 tp
->tx_stats
.packets
+= pkts_compl
;
4426 tp
->tx_stats
.bytes
+= bytes_compl
;
4427 u64_stats_update_end(&tp
->tx_stats
.syncp
);
4429 tp
->dirty_tx
= dirty_tx
;
4430 /* Sync with rtl8169_start_xmit:
4431 * - publish dirty_tx ring index (write barrier)
4432 * - refresh cur_tx ring index and queue status (read barrier)
4433 * May the current thread miss the stopped queue condition,
4434 * a racing xmit thread can only have a right view of the
4438 if (netif_queue_stopped(dev
) &&
4439 rtl_tx_slots_avail(tp
, MAX_SKB_FRAGS
)) {
4440 netif_wake_queue(dev
);
4443 * 8168 hack: TxPoll requests are lost when the Tx packets are
4444 * too close. Let's kick an extra TxPoll request when a burst
4445 * of start_xmit activity is detected (if it is not detected,
4446 * it is slow enough). -- FR
4448 if (tp
->cur_tx
!= dirty_tx
)
4449 rtl8169_doorbell(tp
);
4453 static inline int rtl8169_fragmented_frame(u32 status
)
4455 return (status
& (FirstFrag
| LastFrag
)) != (FirstFrag
| LastFrag
);
4458 static inline void rtl8169_rx_csum(struct sk_buff
*skb
, u32 opts1
)
4460 u32 status
= opts1
& RxProtoMask
;
4462 if (((status
== RxProtoTCP
) && !(opts1
& TCPFail
)) ||
4463 ((status
== RxProtoUDP
) && !(opts1
& UDPFail
)))
4464 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4466 skb_checksum_none_assert(skb
);
4469 static int rtl_rx(struct net_device
*dev
, struct rtl8169_private
*tp
, u32 budget
)
4471 unsigned int cur_rx
, rx_left
;
4474 cur_rx
= tp
->cur_rx
;
4476 for (rx_left
= min(budget
, NUM_RX_DESC
); rx_left
> 0; rx_left
--, cur_rx
++) {
4477 unsigned int entry
= cur_rx
% NUM_RX_DESC
;
4478 const void *rx_buf
= page_address(tp
->Rx_databuff
[entry
]);
4479 struct RxDesc
*desc
= tp
->RxDescArray
+ entry
;
4482 status
= le32_to_cpu(desc
->opts1
);
4483 if (status
& DescOwn
)
4486 /* This barrier is needed to keep us from reading
4487 * any other fields out of the Rx descriptor until
4488 * we know the status of DescOwn
4492 if (unlikely(status
& RxRES
)) {
4493 netif_info(tp
, rx_err
, dev
, "Rx ERROR. status = %08x\n",
4495 dev
->stats
.rx_errors
++;
4496 if (status
& (RxRWT
| RxRUNT
))
4497 dev
->stats
.rx_length_errors
++;
4499 dev
->stats
.rx_crc_errors
++;
4500 if (status
& (RxRUNT
| RxCRC
) && !(status
& RxRWT
) &&
4501 dev
->features
& NETIF_F_RXALL
) {
4505 unsigned int pkt_size
;
4506 struct sk_buff
*skb
;
4509 pkt_size
= status
& GENMASK(13, 0);
4510 if (likely(!(dev
->features
& NETIF_F_RXFCS
)))
4511 pkt_size
-= ETH_FCS_LEN
;
4513 * The driver does not support incoming fragmented
4514 * frames. They are seen as a symptom of over-mtu
4517 if (unlikely(rtl8169_fragmented_frame(status
))) {
4518 dev
->stats
.rx_dropped
++;
4519 dev
->stats
.rx_length_errors
++;
4520 goto release_descriptor
;
4523 skb
= napi_alloc_skb(&tp
->napi
, pkt_size
);
4524 if (unlikely(!skb
)) {
4525 dev
->stats
.rx_dropped
++;
4526 goto release_descriptor
;
4529 dma_sync_single_for_cpu(tp_to_dev(tp
),
4530 le64_to_cpu(desc
->addr
),
4531 pkt_size
, DMA_FROM_DEVICE
);
4533 skb_copy_to_linear_data(skb
, rx_buf
, pkt_size
);
4534 skb
->tail
+= pkt_size
;
4535 skb
->len
= pkt_size
;
4537 dma_sync_single_for_device(tp_to_dev(tp
),
4538 le64_to_cpu(desc
->addr
),
4539 pkt_size
, DMA_FROM_DEVICE
);
4541 rtl8169_rx_csum(skb
, status
);
4542 skb
->protocol
= eth_type_trans(skb
, dev
);
4544 rtl8169_rx_vlan_tag(desc
, skb
);
4546 if (skb
->pkt_type
== PACKET_MULTICAST
)
4547 dev
->stats
.multicast
++;
4549 napi_gro_receive(&tp
->napi
, skb
);
4551 u64_stats_update_begin(&tp
->rx_stats
.syncp
);
4552 tp
->rx_stats
.packets
++;
4553 tp
->rx_stats
.bytes
+= pkt_size
;
4554 u64_stats_update_end(&tp
->rx_stats
.syncp
);
4557 rtl8169_mark_to_asic(desc
);
4560 count
= cur_rx
- tp
->cur_rx
;
4561 tp
->cur_rx
= cur_rx
;
4566 static irqreturn_t
rtl8169_interrupt(int irq
, void *dev_instance
)
4568 struct rtl8169_private
*tp
= dev_instance
;
4569 u32 status
= rtl_get_events(tp
);
4571 if (!tp
->irq_enabled
|| (status
& 0xffff) == 0xffff ||
4572 !(status
& tp
->irq_mask
))
4575 if (unlikely(status
& SYSErr
)) {
4576 rtl8169_pcierr_interrupt(tp
->dev
);
4580 if (status
& LinkChg
)
4581 phy_mac_interrupt(tp
->phydev
);
4583 if (unlikely(status
& RxFIFOOver
&&
4584 tp
->mac_version
== RTL_GIGA_MAC_VER_11
)) {
4585 netif_stop_queue(tp
->dev
);
4586 rtl_schedule_task(tp
, RTL_FLAG_TASK_RESET_PENDING
);
4589 rtl_irq_disable(tp
);
4590 napi_schedule_irqoff(&tp
->napi
);
4592 rtl_ack_events(tp
, status
);
4597 static void rtl_task(struct work_struct
*work
)
4599 struct rtl8169_private
*tp
=
4600 container_of(work
, struct rtl8169_private
, wk
.work
);
4604 if (!netif_running(tp
->dev
) ||
4605 !test_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
))
4608 if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING
, tp
->wk
.flags
))
4611 rtl_unlock_work(tp
);
4614 static int rtl8169_poll(struct napi_struct
*napi
, int budget
)
4616 struct rtl8169_private
*tp
= container_of(napi
, struct rtl8169_private
, napi
);
4617 struct net_device
*dev
= tp
->dev
;
4620 work_done
= rtl_rx(dev
, tp
, (u32
) budget
);
4622 rtl_tx(dev
, tp
, budget
);
4624 if (work_done
< budget
) {
4625 napi_complete_done(napi
, work_done
);
4632 static void r8169_phylink_handler(struct net_device
*ndev
)
4634 struct rtl8169_private
*tp
= netdev_priv(ndev
);
4636 if (netif_carrier_ok(ndev
)) {
4637 rtl_link_chg_patch(tp
);
4638 pm_request_resume(&tp
->pci_dev
->dev
);
4640 pm_runtime_idle(&tp
->pci_dev
->dev
);
4643 if (net_ratelimit())
4644 phy_print_status(tp
->phydev
);
4647 static int r8169_phy_connect(struct rtl8169_private
*tp
)
4649 struct phy_device
*phydev
= tp
->phydev
;
4650 phy_interface_t phy_mode
;
4653 phy_mode
= tp
->supports_gmii
? PHY_INTERFACE_MODE_GMII
:
4654 PHY_INTERFACE_MODE_MII
;
4656 ret
= phy_connect_direct(tp
->dev
, phydev
, r8169_phylink_handler
,
4661 if (!tp
->supports_gmii
)
4662 phy_set_max_speed(phydev
, SPEED_100
);
4664 phy_support_asym_pause(phydev
);
4666 phy_attached_info(phydev
);
4671 static void rtl8169_down(struct net_device
*dev
)
4673 struct rtl8169_private
*tp
= netdev_priv(dev
);
4675 phy_stop(tp
->phydev
);
4677 napi_disable(&tp
->napi
);
4678 netif_stop_queue(dev
);
4680 rtl8169_hw_reset(tp
);
4682 /* Give a racing hard_start_xmit a few cycles to complete. */
4685 rtl8169_tx_clear(tp
);
4687 rtl8169_rx_clear(tp
);
4689 rtl_pll_power_down(tp
);
4692 static int rtl8169_close(struct net_device
*dev
)
4694 struct rtl8169_private
*tp
= netdev_priv(dev
);
4695 struct pci_dev
*pdev
= tp
->pci_dev
;
4697 pm_runtime_get_sync(&pdev
->dev
);
4699 /* Update counters before going down */
4700 rtl8169_update_counters(tp
);
4703 /* Clear all task flags */
4704 bitmap_zero(tp
->wk
.flags
, RTL_FLAG_MAX
);
4707 rtl_unlock_work(tp
);
4709 cancel_work_sync(&tp
->wk
.work
);
4711 phy_disconnect(tp
->phydev
);
4713 pci_free_irq(pdev
, 0, tp
);
4715 dma_free_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
, tp
->RxDescArray
,
4717 dma_free_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
, tp
->TxDescArray
,
4719 tp
->TxDescArray
= NULL
;
4720 tp
->RxDescArray
= NULL
;
4722 pm_runtime_put_sync(&pdev
->dev
);
4727 #ifdef CONFIG_NET_POLL_CONTROLLER
4728 static void rtl8169_netpoll(struct net_device
*dev
)
4730 struct rtl8169_private
*tp
= netdev_priv(dev
);
4732 rtl8169_interrupt(pci_irq_vector(tp
->pci_dev
, 0), tp
);
4736 static int rtl_open(struct net_device
*dev
)
4738 struct rtl8169_private
*tp
= netdev_priv(dev
);
4739 struct pci_dev
*pdev
= tp
->pci_dev
;
4740 int retval
= -ENOMEM
;
4742 pm_runtime_get_sync(&pdev
->dev
);
4745 * Rx and Tx descriptors needs 256 bytes alignment.
4746 * dma_alloc_coherent provides more.
4748 tp
->TxDescArray
= dma_alloc_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
,
4749 &tp
->TxPhyAddr
, GFP_KERNEL
);
4750 if (!tp
->TxDescArray
)
4751 goto err_pm_runtime_put
;
4753 tp
->RxDescArray
= dma_alloc_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
,
4754 &tp
->RxPhyAddr
, GFP_KERNEL
);
4755 if (!tp
->RxDescArray
)
4758 retval
= rtl8169_init_ring(tp
);
4762 rtl_request_firmware(tp
);
4764 retval
= pci_request_irq(pdev
, 0, rtl8169_interrupt
, NULL
, tp
,
4767 goto err_release_fw_2
;
4769 retval
= r8169_phy_connect(tp
);
4775 set_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
4777 napi_enable(&tp
->napi
);
4779 rtl8169_init_phy(tp
);
4781 rtl_pll_power_up(tp
);
4785 if (!rtl8169_init_counter_offsets(tp
))
4786 netif_warn(tp
, hw
, dev
, "counter reset/update failed\n");
4788 phy_start(tp
->phydev
);
4789 netif_start_queue(dev
);
4791 rtl_unlock_work(tp
);
4793 pm_runtime_put_sync(&pdev
->dev
);
4798 pci_free_irq(pdev
, 0, tp
);
4800 rtl_release_firmware(tp
);
4801 rtl8169_rx_clear(tp
);
4803 dma_free_coherent(&pdev
->dev
, R8169_RX_RING_BYTES
, tp
->RxDescArray
,
4805 tp
->RxDescArray
= NULL
;
4807 dma_free_coherent(&pdev
->dev
, R8169_TX_RING_BYTES
, tp
->TxDescArray
,
4809 tp
->TxDescArray
= NULL
;
4811 pm_runtime_put_noidle(&pdev
->dev
);
4816 rtl8169_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
4818 struct rtl8169_private
*tp
= netdev_priv(dev
);
4819 struct pci_dev
*pdev
= tp
->pci_dev
;
4820 struct rtl8169_counters
*counters
= tp
->counters
;
4823 pm_runtime_get_noresume(&pdev
->dev
);
4825 netdev_stats_to_stats64(stats
, &dev
->stats
);
4828 start
= u64_stats_fetch_begin_irq(&tp
->rx_stats
.syncp
);
4829 stats
->rx_packets
= tp
->rx_stats
.packets
;
4830 stats
->rx_bytes
= tp
->rx_stats
.bytes
;
4831 } while (u64_stats_fetch_retry_irq(&tp
->rx_stats
.syncp
, start
));
4834 start
= u64_stats_fetch_begin_irq(&tp
->tx_stats
.syncp
);
4835 stats
->tx_packets
= tp
->tx_stats
.packets
;
4836 stats
->tx_bytes
= tp
->tx_stats
.bytes
;
4837 } while (u64_stats_fetch_retry_irq(&tp
->tx_stats
.syncp
, start
));
4840 * Fetch additional counter values missing in stats collected by driver
4841 * from tally counters.
4843 if (pm_runtime_active(&pdev
->dev
))
4844 rtl8169_update_counters(tp
);
4847 * Subtract values fetched during initalization.
4848 * See rtl8169_init_counter_offsets for a description why we do that.
4850 stats
->tx_errors
= le64_to_cpu(counters
->tx_errors
) -
4851 le64_to_cpu(tp
->tc_offset
.tx_errors
);
4852 stats
->collisions
= le32_to_cpu(counters
->tx_multi_collision
) -
4853 le32_to_cpu(tp
->tc_offset
.tx_multi_collision
);
4854 stats
->tx_aborted_errors
= le16_to_cpu(counters
->tx_aborted
) -
4855 le16_to_cpu(tp
->tc_offset
.tx_aborted
);
4856 stats
->rx_missed_errors
= le16_to_cpu(counters
->rx_missed
) -
4857 le16_to_cpu(tp
->tc_offset
.rx_missed
);
4859 pm_runtime_put_noidle(&pdev
->dev
);
4862 static void rtl8169_net_suspend(struct net_device
*dev
)
4864 struct rtl8169_private
*tp
= netdev_priv(dev
);
4866 if (!netif_running(dev
))
4869 phy_stop(tp
->phydev
);
4870 netif_device_detach(dev
);
4873 napi_disable(&tp
->napi
);
4874 /* Clear all task flags */
4875 bitmap_zero(tp
->wk
.flags
, RTL_FLAG_MAX
);
4877 rtl_unlock_work(tp
);
4879 rtl_pll_power_down(tp
);
4884 static int rtl8169_suspend(struct device
*device
)
4886 struct net_device
*dev
= dev_get_drvdata(device
);
4887 struct rtl8169_private
*tp
= netdev_priv(dev
);
4889 rtl8169_net_suspend(dev
);
4890 clk_disable_unprepare(tp
->clk
);
4895 static void __rtl8169_resume(struct net_device
*dev
)
4897 struct rtl8169_private
*tp
= netdev_priv(dev
);
4899 netif_device_attach(dev
);
4901 rtl_pll_power_up(tp
);
4902 rtl8169_init_phy(tp
);
4904 phy_start(tp
->phydev
);
4907 napi_enable(&tp
->napi
);
4908 set_bit(RTL_FLAG_TASK_ENABLED
, tp
->wk
.flags
);
4910 rtl_unlock_work(tp
);
4913 static int rtl8169_resume(struct device
*device
)
4915 struct net_device
*dev
= dev_get_drvdata(device
);
4916 struct rtl8169_private
*tp
= netdev_priv(dev
);
4918 rtl_rar_set(tp
, dev
->dev_addr
);
4920 clk_prepare_enable(tp
->clk
);
4922 if (netif_running(dev
))
4923 __rtl8169_resume(dev
);
4928 static int rtl8169_runtime_suspend(struct device
*device
)
4930 struct net_device
*dev
= dev_get_drvdata(device
);
4931 struct rtl8169_private
*tp
= netdev_priv(dev
);
4933 if (!tp
->TxDescArray
)
4937 __rtl8169_set_wol(tp
, WAKE_ANY
);
4938 rtl_unlock_work(tp
);
4940 rtl8169_net_suspend(dev
);
4942 /* Update counters before going runtime suspend */
4943 rtl8169_update_counters(tp
);
4948 static int rtl8169_runtime_resume(struct device
*device
)
4950 struct net_device
*dev
= dev_get_drvdata(device
);
4951 struct rtl8169_private
*tp
= netdev_priv(dev
);
4953 rtl_rar_set(tp
, dev
->dev_addr
);
4955 if (!tp
->TxDescArray
)
4959 __rtl8169_set_wol(tp
, tp
->saved_wolopts
);
4960 rtl_unlock_work(tp
);
4962 __rtl8169_resume(dev
);
4967 static int rtl8169_runtime_idle(struct device
*device
)
4969 struct net_device
*dev
= dev_get_drvdata(device
);
4971 if (!netif_running(dev
) || !netif_carrier_ok(dev
))
4972 pm_schedule_suspend(device
, 10000);
4977 static const struct dev_pm_ops rtl8169_pm_ops
= {
4978 .suspend
= rtl8169_suspend
,
4979 .resume
= rtl8169_resume
,
4980 .freeze
= rtl8169_suspend
,
4981 .thaw
= rtl8169_resume
,
4982 .poweroff
= rtl8169_suspend
,
4983 .restore
= rtl8169_resume
,
4984 .runtime_suspend
= rtl8169_runtime_suspend
,
4985 .runtime_resume
= rtl8169_runtime_resume
,
4986 .runtime_idle
= rtl8169_runtime_idle
,
4989 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
4991 #else /* !CONFIG_PM */
4993 #define RTL8169_PM_OPS NULL
4995 #endif /* !CONFIG_PM */
4997 static void rtl_wol_shutdown_quirk(struct rtl8169_private
*tp
)
4999 /* WoL fails with 8168b when the receiver is disabled. */
5000 switch (tp
->mac_version
) {
5001 case RTL_GIGA_MAC_VER_11
:
5002 case RTL_GIGA_MAC_VER_12
:
5003 case RTL_GIGA_MAC_VER_17
:
5004 pci_clear_master(tp
->pci_dev
);
5006 RTL_W8(tp
, ChipCmd
, CmdRxEnb
);
5014 static void rtl_shutdown(struct pci_dev
*pdev
)
5016 struct net_device
*dev
= pci_get_drvdata(pdev
);
5017 struct rtl8169_private
*tp
= netdev_priv(dev
);
5019 rtl8169_net_suspend(dev
);
5021 /* Restore original MAC address */
5022 rtl_rar_set(tp
, dev
->perm_addr
);
5024 rtl8169_hw_reset(tp
);
5026 if (system_state
== SYSTEM_POWER_OFF
) {
5027 if (tp
->saved_wolopts
) {
5028 rtl_wol_suspend_quirk(tp
);
5029 rtl_wol_shutdown_quirk(tp
);
5032 pci_wake_from_d3(pdev
, true);
5033 pci_set_power_state(pdev
, PCI_D3hot
);
5037 static void rtl_remove_one(struct pci_dev
*pdev
)
5039 struct net_device
*dev
= pci_get_drvdata(pdev
);
5040 struct rtl8169_private
*tp
= netdev_priv(dev
);
5042 if (r8168_check_dash(tp
))
5043 rtl8168_driver_stop(tp
);
5045 netif_napi_del(&tp
->napi
);
5047 unregister_netdev(dev
);
5048 mdiobus_unregister(tp
->phydev
->mdio
.bus
);
5050 rtl_release_firmware(tp
);
5052 if (pci_dev_run_wake(pdev
))
5053 pm_runtime_get_noresume(&pdev
->dev
);
5055 /* restore original MAC address */
5056 rtl_rar_set(tp
, dev
->perm_addr
);
5059 static const struct net_device_ops rtl_netdev_ops
= {
5060 .ndo_open
= rtl_open
,
5061 .ndo_stop
= rtl8169_close
,
5062 .ndo_get_stats64
= rtl8169_get_stats64
,
5063 .ndo_start_xmit
= rtl8169_start_xmit
,
5064 .ndo_features_check
= rtl8169_features_check
,
5065 .ndo_tx_timeout
= rtl8169_tx_timeout
,
5066 .ndo_validate_addr
= eth_validate_addr
,
5067 .ndo_change_mtu
= rtl8169_change_mtu
,
5068 .ndo_fix_features
= rtl8169_fix_features
,
5069 .ndo_set_features
= rtl8169_set_features
,
5070 .ndo_set_mac_address
= rtl_set_mac_address
,
5071 .ndo_do_ioctl
= phy_do_ioctl_running
,
5072 .ndo_set_rx_mode
= rtl_set_rx_mode
,
5073 #ifdef CONFIG_NET_POLL_CONTROLLER
5074 .ndo_poll_controller
= rtl8169_netpoll
,
5079 static void rtl_set_irq_mask(struct rtl8169_private
*tp
)
5081 tp
->irq_mask
= RxOK
| RxErr
| TxOK
| TxErr
| LinkChg
;
5083 if (tp
->mac_version
<= RTL_GIGA_MAC_VER_06
)
5084 tp
->irq_mask
|= SYSErr
| RxOverflow
| RxFIFOOver
;
5085 else if (tp
->mac_version
== RTL_GIGA_MAC_VER_11
)
5086 /* special workaround needed */
5087 tp
->irq_mask
|= RxFIFOOver
;
5089 tp
->irq_mask
|= RxOverflow
;
5092 static int rtl_alloc_irq(struct rtl8169_private
*tp
)
5096 switch (tp
->mac_version
) {
5097 case RTL_GIGA_MAC_VER_02
... RTL_GIGA_MAC_VER_06
:
5098 rtl_unlock_config_regs(tp
);
5099 RTL_W8(tp
, Config2
, RTL_R8(tp
, Config2
) & ~MSIEnable
);
5100 rtl_lock_config_regs(tp
);
5102 case RTL_GIGA_MAC_VER_07
... RTL_GIGA_MAC_VER_17
:
5103 flags
= PCI_IRQ_LEGACY
;
5106 flags
= PCI_IRQ_ALL_TYPES
;
5110 return pci_alloc_irq_vectors(tp
->pci_dev
, 1, 1, flags
);
5113 static void rtl_read_mac_address(struct rtl8169_private
*tp
,
5114 u8 mac_addr
[ETH_ALEN
])
5116 /* Get MAC address */
5117 if (rtl_is_8168evl_up(tp
) && tp
->mac_version
!= RTL_GIGA_MAC_VER_34
) {
5118 u32 value
= rtl_eri_read(tp
, 0xe0);
5120 mac_addr
[0] = (value
>> 0) & 0xff;
5121 mac_addr
[1] = (value
>> 8) & 0xff;
5122 mac_addr
[2] = (value
>> 16) & 0xff;
5123 mac_addr
[3] = (value
>> 24) & 0xff;
5125 value
= rtl_eri_read(tp
, 0xe4);
5126 mac_addr
[4] = (value
>> 0) & 0xff;
5127 mac_addr
[5] = (value
>> 8) & 0xff;
5128 } else if (rtl_is_8125(tp
)) {
5129 rtl_read_mac_from_reg(tp
, mac_addr
, MAC0_BKP
);
5133 DECLARE_RTL_COND(rtl_link_list_ready_cond
)
5135 return RTL_R8(tp
, MCU
) & LINK_LIST_RDY
;
5138 DECLARE_RTL_COND(rtl_rxtx_empty_cond
)
5140 return (RTL_R8(tp
, MCU
) & RXTX_EMPTY
) == RXTX_EMPTY
;
5143 static int r8169_mdio_read_reg(struct mii_bus
*mii_bus
, int phyaddr
, int phyreg
)
5145 struct rtl8169_private
*tp
= mii_bus
->priv
;
5150 return rtl_readphy(tp
, phyreg
);
5153 static int r8169_mdio_write_reg(struct mii_bus
*mii_bus
, int phyaddr
,
5154 int phyreg
, u16 val
)
5156 struct rtl8169_private
*tp
= mii_bus
->priv
;
5161 rtl_writephy(tp
, phyreg
, val
);
5166 static int r8169_mdio_register(struct rtl8169_private
*tp
)
5168 struct pci_dev
*pdev
= tp
->pci_dev
;
5169 struct mii_bus
*new_bus
;
5172 new_bus
= devm_mdiobus_alloc(&pdev
->dev
);
5176 new_bus
->name
= "r8169";
5178 new_bus
->parent
= &pdev
->dev
;
5179 new_bus
->irq
[0] = PHY_IGNORE_INTERRUPT
;
5180 snprintf(new_bus
->id
, MII_BUS_ID_SIZE
, "r8169-%x", pci_dev_id(pdev
));
5182 new_bus
->read
= r8169_mdio_read_reg
;
5183 new_bus
->write
= r8169_mdio_write_reg
;
5185 ret
= mdiobus_register(new_bus
);
5189 tp
->phydev
= mdiobus_get_phy(new_bus
, 0);
5191 mdiobus_unregister(new_bus
);
5193 } else if (!tp
->phydev
->drv
) {
5194 /* Most chip versions fail with the genphy driver.
5195 * Therefore ensure that the dedicated PHY driver is loaded.
5197 dev_err(&pdev
->dev
, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
5198 mdiobus_unregister(new_bus
);
5202 /* PHY will be woken up in rtl_open() */
5203 phy_suspend(tp
->phydev
);
5208 static void rtl_hw_init_8168g(struct rtl8169_private
*tp
)
5210 tp
->ocp_base
= OCP_STD_PHY_BASE
;
5212 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) | RXDV_GATED_EN
);
5214 if (!rtl_udelay_loop_wait_high(tp
, &rtl_txcfg_empty_cond
, 100, 42))
5217 if (!rtl_udelay_loop_wait_high(tp
, &rtl_rxtx_empty_cond
, 100, 42))
5220 RTL_W8(tp
, ChipCmd
, RTL_R8(tp
, ChipCmd
) & ~(CmdTxEnb
| CmdRxEnb
));
5222 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) & ~NOW_IS_OOB
);
5224 r8168_mac_ocp_modify(tp
, 0xe8de, BIT(14), 0);
5226 if (!rtl_udelay_loop_wait_high(tp
, &rtl_link_list_ready_cond
, 100, 42))
5229 r8168_mac_ocp_modify(tp
, 0xe8de, 0, BIT(15));
5231 rtl_udelay_loop_wait_high(tp
, &rtl_link_list_ready_cond
, 100, 42);
5234 static void rtl_hw_init_8125(struct rtl8169_private
*tp
)
5236 tp
->ocp_base
= OCP_STD_PHY_BASE
;
5238 RTL_W32(tp
, MISC
, RTL_R32(tp
, MISC
) | RXDV_GATED_EN
);
5240 if (!rtl_udelay_loop_wait_high(tp
, &rtl_rxtx_empty_cond
, 100, 42))
5243 RTL_W8(tp
, ChipCmd
, RTL_R8(tp
, ChipCmd
) & ~(CmdTxEnb
| CmdRxEnb
));
5245 RTL_W8(tp
, MCU
, RTL_R8(tp
, MCU
) & ~NOW_IS_OOB
);
5247 r8168_mac_ocp_modify(tp
, 0xe8de, BIT(14), 0);
5249 if (!rtl_udelay_loop_wait_high(tp
, &rtl_link_list_ready_cond
, 100, 42))
5252 r8168_mac_ocp_write(tp
, 0xc0aa, 0x07d0);
5253 r8168_mac_ocp_write(tp
, 0xc0a6, 0x0150);
5254 r8168_mac_ocp_write(tp
, 0xc01e, 0x5555);
5256 rtl_udelay_loop_wait_high(tp
, &rtl_link_list_ready_cond
, 100, 42);
5259 static void rtl_hw_initialize(struct rtl8169_private
*tp
)
5261 switch (tp
->mac_version
) {
5262 case RTL_GIGA_MAC_VER_49
... RTL_GIGA_MAC_VER_52
:
5263 rtl8168ep_stop_cmac(tp
);
5265 case RTL_GIGA_MAC_VER_40
... RTL_GIGA_MAC_VER_48
:
5266 rtl_hw_init_8168g(tp
);
5268 case RTL_GIGA_MAC_VER_60
... RTL_GIGA_MAC_VER_61
:
5269 rtl_hw_init_8125(tp
);
5276 static int rtl_jumbo_max(struct rtl8169_private
*tp
)
5278 /* Non-GBit versions don't support jumbo frames */
5279 if (!tp
->supports_gmii
)
5282 switch (tp
->mac_version
) {
5284 case RTL_GIGA_MAC_VER_02
... RTL_GIGA_MAC_VER_06
:
5287 case RTL_GIGA_MAC_VER_11
:
5288 case RTL_GIGA_MAC_VER_12
:
5289 case RTL_GIGA_MAC_VER_17
:
5292 case RTL_GIGA_MAC_VER_18
... RTL_GIGA_MAC_VER_24
:
5299 static void rtl_disable_clk(void *data
)
5301 clk_disable_unprepare(data
);
5304 static int rtl_get_ether_clk(struct rtl8169_private
*tp
)
5306 struct device
*d
= tp_to_dev(tp
);
5310 clk
= devm_clk_get(d
, "ether_clk");
5314 /* clk-core allows NULL (for suspend / resume) */
5316 else if (rc
!= -EPROBE_DEFER
)
5317 dev_err(d
, "failed to get clk: %d\n", rc
);
5320 rc
= clk_prepare_enable(clk
);
5322 dev_err(d
, "failed to enable clk: %d\n", rc
);
5324 rc
= devm_add_action_or_reset(d
, rtl_disable_clk
, clk
);
5330 static void rtl_init_mac_address(struct rtl8169_private
*tp
)
5332 struct net_device
*dev
= tp
->dev
;
5333 u8
*mac_addr
= dev
->dev_addr
;
5336 rc
= eth_platform_get_mac_address(tp_to_dev(tp
), mac_addr
);
5340 rtl_read_mac_address(tp
, mac_addr
);
5341 if (is_valid_ether_addr(mac_addr
))
5344 rtl_read_mac_from_reg(tp
, mac_addr
, MAC0
);
5345 if (is_valid_ether_addr(mac_addr
))
5348 eth_hw_addr_random(dev
);
5349 dev_warn(tp_to_dev(tp
), "can't read MAC address, setting random one\n");
5351 rtl_rar_set(tp
, mac_addr
);
5354 static int rtl_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
5356 struct rtl8169_private
*tp
;
5357 int jumbo_max
, region
, rc
;
5358 enum mac_version chipset
;
5359 struct net_device
*dev
;
5362 dev
= devm_alloc_etherdev(&pdev
->dev
, sizeof (*tp
));
5366 SET_NETDEV_DEV(dev
, &pdev
->dev
);
5367 dev
->netdev_ops
= &rtl_netdev_ops
;
5368 tp
= netdev_priv(dev
);
5371 tp
->msg_enable
= netif_msg_init(debug
.msg_enable
, R8169_MSG_DEFAULT
);
5372 tp
->supports_gmii
= ent
->driver_data
== RTL_CFG_NO_GBIT
? 0 : 1;
5375 /* Get the *optional* external "ether_clk" used on some boards */
5376 rc
= rtl_get_ether_clk(tp
);
5380 /* Disable ASPM completely as that cause random device stop working
5381 * problems as well as full system hangs for some PCIe devices users.
5383 rc
= pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
|
5384 PCIE_LINK_STATE_L1
);
5385 tp
->aspm_manageable
= !rc
;
5387 /* enable device (incl. PCI PM wakeup and hotplug setup) */
5388 rc
= pcim_enable_device(pdev
);
5390 dev_err(&pdev
->dev
, "enable failure\n");
5394 if (pcim_set_mwi(pdev
) < 0)
5395 dev_info(&pdev
->dev
, "Mem-Wr-Inval unavailable\n");
5397 /* use first MMIO region */
5398 region
= ffs(pci_select_bars(pdev
, IORESOURCE_MEM
)) - 1;
5400 dev_err(&pdev
->dev
, "no MMIO resource found\n");
5404 /* check for weird/broken PCI region reporting */
5405 if (pci_resource_len(pdev
, region
) < R8169_REGS_SIZE
) {
5406 dev_err(&pdev
->dev
, "Invalid PCI region size(s), aborting\n");
5410 rc
= pcim_iomap_regions(pdev
, BIT(region
), MODULENAME
);
5412 dev_err(&pdev
->dev
, "cannot remap MMIO, aborting\n");
5416 tp
->mmio_addr
= pcim_iomap_table(pdev
)[region
];
5418 xid
= (RTL_R32(tp
, TxConfig
) >> 20) & 0xfcf;
5420 /* Identify chip attached to board */
5421 chipset
= rtl8169_get_mac_version(xid
, tp
->supports_gmii
);
5422 if (chipset
== RTL_GIGA_MAC_NONE
) {
5423 dev_err(&pdev
->dev
, "unknown chip XID %03x\n", xid
);
5427 tp
->mac_version
= chipset
;
5429 tp
->cp_cmd
= RTL_R16(tp
, CPlusCmd
);
5431 if (sizeof(dma_addr_t
) > 4 && tp
->mac_version
>= RTL_GIGA_MAC_VER_18
&&
5432 !dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64)))
5433 dev
->features
|= NETIF_F_HIGHDMA
;
5437 rtl8169_irq_mask_and_ack(tp
);
5439 rtl_hw_initialize(tp
);
5443 pci_set_master(pdev
);
5445 rc
= rtl_alloc_irq(tp
);
5447 dev_err(&pdev
->dev
, "Can't allocate interrupt\n");
5451 mutex_init(&tp
->wk
.mutex
);
5452 INIT_WORK(&tp
->wk
.work
, rtl_task
);
5453 u64_stats_init(&tp
->rx_stats
.syncp
);
5454 u64_stats_init(&tp
->tx_stats
.syncp
);
5456 rtl_init_mac_address(tp
);
5458 dev
->ethtool_ops
= &rtl8169_ethtool_ops
;
5460 netif_napi_add(dev
, &tp
->napi
, rtl8169_poll
, NAPI_POLL_WEIGHT
);
5462 dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
|
5463 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
5464 dev
->vlan_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
|
5466 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
5468 tp
->cp_cmd
|= RxChkSum
;
5469 /* RTL8125 uses register RxConfig for VLAN offloading config */
5470 if (!rtl_is_8125(tp
))
5471 tp
->cp_cmd
|= RxVlan
;
5473 * Pretend we are using VLANs; This bypasses a nasty bug where
5474 * Interrupts stop flowing on high load on 8110SCd controllers.
5476 if (tp
->mac_version
== RTL_GIGA_MAC_VER_05
)
5477 /* Disallow toggling */
5478 dev
->hw_features
&= ~NETIF_F_HW_VLAN_CTAG_RX
;
5480 if (rtl_chip_supports_csum_v2(tp
))
5481 dev
->hw_features
|= NETIF_F_IPV6_CSUM
;
5483 dev
->features
|= dev
->hw_features
;
5485 /* There has been a number of reports that using SG/TSO results in
5486 * tx timeouts. However for a lot of people SG/TSO works fine.
5487 * Therefore disable both features by default, but allow users to
5488 * enable them. Use at own risk!
5490 if (rtl_chip_supports_csum_v2(tp
)) {
5491 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
;
5492 dev
->gso_max_size
= RTL_GSO_MAX_SIZE_V2
;
5493 dev
->gso_max_segs
= RTL_GSO_MAX_SEGS_V2
;
5495 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_TSO
;
5496 dev
->gso_max_size
= RTL_GSO_MAX_SIZE_V1
;
5497 dev
->gso_max_segs
= RTL_GSO_MAX_SEGS_V1
;
5500 dev
->hw_features
|= NETIF_F_RXALL
;
5501 dev
->hw_features
|= NETIF_F_RXFCS
;
5503 jumbo_max
= rtl_jumbo_max(tp
);
5505 dev
->max_mtu
= jumbo_max
;
5507 rtl_set_irq_mask(tp
);
5509 tp
->fw_name
= rtl_chip_infos
[chipset
].fw_name
;
5511 tp
->counters
= dmam_alloc_coherent (&pdev
->dev
, sizeof(*tp
->counters
),
5512 &tp
->counters_phys_addr
,
5517 pci_set_drvdata(pdev
, dev
);
5519 rc
= r8169_mdio_register(tp
);
5523 /* chip gets powered up in rtl_open() */
5524 rtl_pll_power_down(tp
);
5526 rc
= register_netdev(dev
);
5528 goto err_mdio_unregister
;
5530 netif_info(tp
, probe
, dev
, "%s, %pM, XID %03x, IRQ %d\n",
5531 rtl_chip_infos
[chipset
].name
, dev
->dev_addr
, xid
,
5532 pci_irq_vector(pdev
, 0));
5535 netif_info(tp
, probe
, dev
,
5536 "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
5537 jumbo_max
, tp
->mac_version
<= RTL_GIGA_MAC_VER_06
?
5540 if (r8168_check_dash(tp
))
5541 rtl8168_driver_start(tp
);
5543 if (pci_dev_run_wake(pdev
))
5544 pm_runtime_put_sync(&pdev
->dev
);
5548 err_mdio_unregister
:
5549 mdiobus_unregister(tp
->phydev
->mdio
.bus
);
5553 static struct pci_driver rtl8169_pci_driver
= {
5555 .id_table
= rtl8169_pci_tbl
,
5556 .probe
= rtl_init_one
,
5557 .remove
= rtl_remove_one
,
5558 .shutdown
= rtl_shutdown
,
5559 .driver
.pm
= RTL8169_PM_OPS
,
5562 module_pci_driver(rtl8169_pci_driver
);