Adding support for MOXA ART SoC. Testing port of linux-2.6.32.60-moxart.
[linux-3.6.7-moxart.git] / drivers / net / ethernet / realtek / r8169.c
blobb47d5b35024ed4c3398f6589228b9be813248c66
1 /*
2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
9 */
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
52 #ifdef RTL8169_DEBUG
53 #define assert(expr) \
54 if (!(expr)) { \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
60 #else
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
81 #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
82 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
84 #define R8169_REGS_SIZE 256
85 #define R8169_NAPI_WEIGHT 64
86 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
87 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
88 #define RX_BUF_SIZE 1536 /* Rx Buffer size */
89 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
90 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
92 #define RTL8169_TX_TIMEOUT (6*HZ)
93 #define RTL8169_PHY_TIMEOUT (10*HZ)
95 #define RTL_EEPROM_SIG cpu_to_le32(0x8129)
96 #define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
97 #define RTL_EEPROM_SIG_ADDR 0x0000
99 /* write/read MMIO register */
100 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
101 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
102 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
103 #define RTL_R8(reg) readb (ioaddr + (reg))
104 #define RTL_R16(reg) readw (ioaddr + (reg))
105 #define RTL_R32(reg) readl (ioaddr + (reg))
107 enum mac_version {
108 RTL_GIGA_MAC_VER_01 = 0,
109 RTL_GIGA_MAC_VER_02,
110 RTL_GIGA_MAC_VER_03,
111 RTL_GIGA_MAC_VER_04,
112 RTL_GIGA_MAC_VER_05,
113 RTL_GIGA_MAC_VER_06,
114 RTL_GIGA_MAC_VER_07,
115 RTL_GIGA_MAC_VER_08,
116 RTL_GIGA_MAC_VER_09,
117 RTL_GIGA_MAC_VER_10,
118 RTL_GIGA_MAC_VER_11,
119 RTL_GIGA_MAC_VER_12,
120 RTL_GIGA_MAC_VER_13,
121 RTL_GIGA_MAC_VER_14,
122 RTL_GIGA_MAC_VER_15,
123 RTL_GIGA_MAC_VER_16,
124 RTL_GIGA_MAC_VER_17,
125 RTL_GIGA_MAC_VER_18,
126 RTL_GIGA_MAC_VER_19,
127 RTL_GIGA_MAC_VER_20,
128 RTL_GIGA_MAC_VER_21,
129 RTL_GIGA_MAC_VER_22,
130 RTL_GIGA_MAC_VER_23,
131 RTL_GIGA_MAC_VER_24,
132 RTL_GIGA_MAC_VER_25,
133 RTL_GIGA_MAC_VER_26,
134 RTL_GIGA_MAC_VER_27,
135 RTL_GIGA_MAC_VER_28,
136 RTL_GIGA_MAC_VER_29,
137 RTL_GIGA_MAC_VER_30,
138 RTL_GIGA_MAC_VER_31,
139 RTL_GIGA_MAC_VER_32,
140 RTL_GIGA_MAC_VER_33,
141 RTL_GIGA_MAC_VER_34,
142 RTL_GIGA_MAC_VER_35,
143 RTL_GIGA_MAC_VER_36,
144 RTL_GIGA_MAC_VER_37,
145 RTL_GIGA_MAC_VER_38,
146 RTL_GIGA_MAC_VER_39,
147 RTL_GIGA_MAC_VER_40,
148 RTL_GIGA_MAC_VER_41,
149 RTL_GIGA_MAC_NONE = 0xff,
152 enum rtl_tx_desc_version {
153 RTL_TD_0 = 0,
154 RTL_TD_1 = 1,
157 #define JUMBO_1K ETH_DATA_LEN
158 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
159 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
160 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
161 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
163 #define _R(NAME,TD,FW,SZ,B) { \
164 .name = NAME, \
165 .txd_version = TD, \
166 .fw_name = FW, \
167 .jumbo_max = SZ, \
168 .jumbo_tx_csum = B \
171 static const struct {
172 const char *name;
173 enum rtl_tx_desc_version txd_version;
174 const char *fw_name;
175 u16 jumbo_max;
176 bool jumbo_tx_csum;
177 } rtl_chip_infos[] = {
178 /* PCI devices. */
179 [RTL_GIGA_MAC_VER_01] =
180 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_02] =
182 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_03] =
184 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
185 [RTL_GIGA_MAC_VER_04] =
186 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
187 [RTL_GIGA_MAC_VER_05] =
188 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
189 [RTL_GIGA_MAC_VER_06] =
190 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
191 /* PCI-E devices. */
192 [RTL_GIGA_MAC_VER_07] =
193 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_08] =
195 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
196 [RTL_GIGA_MAC_VER_09] =
197 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
198 [RTL_GIGA_MAC_VER_10] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_11] =
201 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
202 [RTL_GIGA_MAC_VER_12] =
203 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
204 [RTL_GIGA_MAC_VER_13] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_14] =
207 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
208 [RTL_GIGA_MAC_VER_15] =
209 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
210 [RTL_GIGA_MAC_VER_16] =
211 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
212 [RTL_GIGA_MAC_VER_17] =
213 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
214 [RTL_GIGA_MAC_VER_18] =
215 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_19] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_20] =
219 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_21] =
221 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_22] =
223 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
224 [RTL_GIGA_MAC_VER_23] =
225 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
226 [RTL_GIGA_MAC_VER_24] =
227 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
228 [RTL_GIGA_MAC_VER_25] =
229 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
230 JUMBO_9K, false),
231 [RTL_GIGA_MAC_VER_26] =
232 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
233 JUMBO_9K, false),
234 [RTL_GIGA_MAC_VER_27] =
235 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
236 [RTL_GIGA_MAC_VER_28] =
237 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
238 [RTL_GIGA_MAC_VER_29] =
239 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
240 JUMBO_1K, true),
241 [RTL_GIGA_MAC_VER_30] =
242 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
243 JUMBO_1K, true),
244 [RTL_GIGA_MAC_VER_31] =
245 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
246 [RTL_GIGA_MAC_VER_32] =
247 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
248 JUMBO_9K, false),
249 [RTL_GIGA_MAC_VER_33] =
250 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
251 JUMBO_9K, false),
252 [RTL_GIGA_MAC_VER_34] =
253 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
254 JUMBO_9K, false),
255 [RTL_GIGA_MAC_VER_35] =
256 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
257 JUMBO_9K, false),
258 [RTL_GIGA_MAC_VER_36] =
259 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
260 JUMBO_9K, false),
261 [RTL_GIGA_MAC_VER_37] =
262 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
263 JUMBO_1K, true),
264 [RTL_GIGA_MAC_VER_38] =
265 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
266 JUMBO_9K, false),
267 [RTL_GIGA_MAC_VER_39] =
268 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
269 JUMBO_1K, true),
270 [RTL_GIGA_MAC_VER_40] =
271 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
272 JUMBO_9K, false),
273 [RTL_GIGA_MAC_VER_41] =
274 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
276 #undef _R
278 enum cfg_version {
279 RTL_CFG_0 = 0x00,
280 RTL_CFG_1,
281 RTL_CFG_2
284 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
285 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
286 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
287 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
289 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
290 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
291 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
292 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
293 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
294 { PCI_VENDOR_ID_LINKSYS, 0x1032,
295 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
296 { 0x0001, 0x8168,
297 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
298 {0,},
301 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
303 static int rx_buf_sz = 16383;
304 static int use_dac;
305 static struct {
306 u32 msg_enable;
307 } debug = { -1 };
309 enum rtl_registers {
310 MAC0 = 0, /* Ethernet hardware address. */
311 MAC4 = 4,
312 MAR0 = 8, /* Multicast filter. */
313 CounterAddrLow = 0x10,
314 CounterAddrHigh = 0x14,
315 TxDescStartAddrLow = 0x20,
316 TxDescStartAddrHigh = 0x24,
317 TxHDescStartAddrLow = 0x28,
318 TxHDescStartAddrHigh = 0x2c,
319 FLASH = 0x30,
320 ERSR = 0x36,
321 ChipCmd = 0x37,
322 TxPoll = 0x38,
323 IntrMask = 0x3c,
324 IntrStatus = 0x3e,
326 TxConfig = 0x40,
327 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
328 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
330 RxConfig = 0x44,
331 #define RX128_INT_EN (1 << 15) /* 8111c and later */
332 #define RX_MULTI_EN (1 << 14) /* 8111c only */
333 #define RXCFG_FIFO_SHIFT 13
334 /* No threshold before first PCI xfer */
335 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
336 #define RXCFG_DMA_SHIFT 8
337 /* Unlimited maximum PCI burst. */
338 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
340 RxMissed = 0x4c,
341 Cfg9346 = 0x50,
342 Config0 = 0x51,
343 Config1 = 0x52,
344 Config2 = 0x53,
345 #define PME_SIGNAL (1 << 5) /* 8168c and later */
347 Config3 = 0x54,
348 Config4 = 0x55,
349 Config5 = 0x56,
350 MultiIntr = 0x5c,
351 PHYAR = 0x60,
352 PHYstatus = 0x6c,
353 RxMaxSize = 0xda,
354 CPlusCmd = 0xe0,
355 IntrMitigate = 0xe2,
356 RxDescAddrLow = 0xe4,
357 RxDescAddrHigh = 0xe8,
358 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
360 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
362 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
364 #define TxPacketMax (8064 >> 7)
365 #define EarlySize 0x27
367 FuncEvent = 0xf0,
368 FuncEventMask = 0xf4,
369 FuncPresetState = 0xf8,
370 FuncForceEvent = 0xfc,
373 enum rtl8110_registers {
374 TBICSR = 0x64,
375 TBI_ANAR = 0x68,
376 TBI_LPAR = 0x6a,
379 enum rtl8168_8101_registers {
380 CSIDR = 0x64,
381 CSIAR = 0x68,
382 #define CSIAR_FLAG 0x80000000
383 #define CSIAR_WRITE_CMD 0x80000000
384 #define CSIAR_BYTE_ENABLE 0x0f
385 #define CSIAR_BYTE_ENABLE_SHIFT 12
386 #define CSIAR_ADDR_MASK 0x0fff
387 #define CSIAR_FUNC_CARD 0x00000000
388 #define CSIAR_FUNC_SDIO 0x00010000
389 #define CSIAR_FUNC_NIC 0x00020000
390 PMCH = 0x6f,
391 EPHYAR = 0x80,
392 #define EPHYAR_FLAG 0x80000000
393 #define EPHYAR_WRITE_CMD 0x80000000
394 #define EPHYAR_REG_MASK 0x1f
395 #define EPHYAR_REG_SHIFT 16
396 #define EPHYAR_DATA_MASK 0xffff
397 DLLPR = 0xd0,
398 #define PFM_EN (1 << 6)
399 DBG_REG = 0xd1,
400 #define FIX_NAK_1 (1 << 4)
401 #define FIX_NAK_2 (1 << 3)
402 TWSI = 0xd2,
403 MCU = 0xd3,
404 #define NOW_IS_OOB (1 << 7)
405 #define TX_EMPTY (1 << 5)
406 #define RX_EMPTY (1 << 4)
407 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
408 #define EN_NDP (1 << 3)
409 #define EN_OOB_RESET (1 << 2)
410 #define LINK_LIST_RDY (1 << 1)
411 EFUSEAR = 0xdc,
412 #define EFUSEAR_FLAG 0x80000000
413 #define EFUSEAR_WRITE_CMD 0x80000000
414 #define EFUSEAR_READ_CMD 0x00000000
415 #define EFUSEAR_REG_MASK 0x03ff
416 #define EFUSEAR_REG_SHIFT 8
417 #define EFUSEAR_DATA_MASK 0xff
420 enum rtl8168_registers {
421 LED_FREQ = 0x1a,
422 EEE_LED = 0x1b,
423 ERIDR = 0x70,
424 ERIAR = 0x74,
425 #define ERIAR_FLAG 0x80000000
426 #define ERIAR_WRITE_CMD 0x80000000
427 #define ERIAR_READ_CMD 0x00000000
428 #define ERIAR_ADDR_BYTE_ALIGN 4
429 #define ERIAR_TYPE_SHIFT 16
430 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
431 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
432 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
433 #define ERIAR_MASK_SHIFT 12
434 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
435 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
436 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
437 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
438 EPHY_RXER_NUM = 0x7c,
439 OCPDR = 0xb0, /* OCP GPHY access */
440 #define OCPDR_WRITE_CMD 0x80000000
441 #define OCPDR_READ_CMD 0x00000000
442 #define OCPDR_REG_MASK 0x7f
443 #define OCPDR_GPHY_REG_SHIFT 16
444 #define OCPDR_DATA_MASK 0xffff
445 OCPAR = 0xb4,
446 #define OCPAR_FLAG 0x80000000
447 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
448 #define OCPAR_GPHY_READ_CMD 0x0000f060
449 GPHY_OCP = 0xb8,
450 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
451 MISC = 0xf0, /* 8168e only. */
452 #define TXPLA_RST (1 << 29)
453 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
454 #define PWM_EN (1 << 22)
455 #define RXDV_GATED_EN (1 << 19)
456 #define EARLY_TALLY_EN (1 << 16)
459 enum rtl_register_content {
460 /* InterruptStatusBits */
461 SYSErr = 0x8000,
462 PCSTimeout = 0x4000,
463 SWInt = 0x0100,
464 TxDescUnavail = 0x0080,
465 RxFIFOOver = 0x0040,
466 LinkChg = 0x0020,
467 RxOverflow = 0x0010,
468 TxErr = 0x0008,
469 TxOK = 0x0004,
470 RxErr = 0x0002,
471 RxOK = 0x0001,
473 /* RxStatusDesc */
474 RxBOVF = (1 << 24),
475 RxFOVF = (1 << 23),
476 RxRWT = (1 << 22),
477 RxRES = (1 << 21),
478 RxRUNT = (1 << 20),
479 RxCRC = (1 << 19),
481 /* ChipCmdBits */
482 StopReq = 0x80,
483 CmdReset = 0x10,
484 CmdRxEnb = 0x08,
485 CmdTxEnb = 0x04,
486 RxBufEmpty = 0x01,
488 /* TXPoll register p.5 */
489 HPQ = 0x80, /* Poll cmd on the high prio queue */
490 NPQ = 0x40, /* Poll cmd on the low prio queue */
491 FSWInt = 0x01, /* Forced software interrupt */
493 /* Cfg9346Bits */
494 Cfg9346_Lock = 0x00,
495 Cfg9346_Unlock = 0xc0,
497 /* rx_mode_bits */
498 AcceptErr = 0x20,
499 AcceptRunt = 0x10,
500 AcceptBroadcast = 0x08,
501 AcceptMulticast = 0x04,
502 AcceptMyPhys = 0x02,
503 AcceptAllPhys = 0x01,
504 #define RX_CONFIG_ACCEPT_MASK 0x3f
506 /* TxConfigBits */
507 TxInterFrameGapShift = 24,
508 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
510 /* Config1 register p.24 */
511 LEDS1 = (1 << 7),
512 LEDS0 = (1 << 6),
513 Speed_down = (1 << 4),
514 MEMMAP = (1 << 3),
515 IOMAP = (1 << 2),
516 VPD = (1 << 1),
517 PMEnable = (1 << 0), /* Power Management Enable */
519 /* Config2 register p. 25 */
520 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
521 PCI_Clock_66MHz = 0x01,
522 PCI_Clock_33MHz = 0x00,
524 /* Config3 register p.25 */
525 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
526 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
527 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
528 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
530 /* Config4 register */
531 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
533 /* Config5 register p.27 */
534 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
535 MWF = (1 << 5), /* Accept Multicast wakeup frame */
536 UWF = (1 << 4), /* Accept Unicast wakeup frame */
537 Spi_en = (1 << 3),
538 LanWake = (1 << 1), /* LanWake enable/disable */
539 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
541 /* TBICSR p.28 */
542 TBIReset = 0x80000000,
543 TBILoopback = 0x40000000,
544 TBINwEnable = 0x20000000,
545 TBINwRestart = 0x10000000,
546 TBILinkOk = 0x02000000,
547 TBINwComplete = 0x01000000,
549 /* CPlusCmd p.31 */
550 EnableBist = (1 << 15), // 8168 8101
551 Mac_dbgo_oe = (1 << 14), // 8168 8101
552 Normal_mode = (1 << 13), // unused
553 Force_half_dup = (1 << 12), // 8168 8101
554 Force_rxflow_en = (1 << 11), // 8168 8101
555 Force_txflow_en = (1 << 10), // 8168 8101
556 Cxpl_dbg_sel = (1 << 9), // 8168 8101
557 ASF = (1 << 8), // 8168 8101
558 PktCntrDisable = (1 << 7), // 8168 8101
559 Mac_dbgo_sel = 0x001c, // 8168
560 RxVlan = (1 << 6),
561 RxChkSum = (1 << 5),
562 PCIDAC = (1 << 4),
563 PCIMulRW = (1 << 3),
564 INTT_0 = 0x0000, // 8168
565 INTT_1 = 0x0001, // 8168
566 INTT_2 = 0x0002, // 8168
567 INTT_3 = 0x0003, // 8168
569 /* rtl8169_PHYstatus */
570 TBI_Enable = 0x80,
571 TxFlowCtrl = 0x40,
572 RxFlowCtrl = 0x20,
573 _1000bpsF = 0x10,
574 _100bps = 0x08,
575 _10bps = 0x04,
576 LinkStatus = 0x02,
577 FullDup = 0x01,
579 /* _TBICSRBit */
580 TBILinkOK = 0x02000000,
582 /* DumpCounterCommand */
583 CounterDump = 0x8,
586 enum rtl_desc_bit {
587 /* First doubleword. */
588 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
589 RingEnd = (1 << 30), /* End of descriptor ring */
590 FirstFrag = (1 << 29), /* First segment of a packet */
591 LastFrag = (1 << 28), /* Final segment of a packet */
594 /* Generic case. */
595 enum rtl_tx_desc_bit {
596 /* First doubleword. */
597 TD_LSO = (1 << 27), /* Large Send Offload */
598 #define TD_MSS_MAX 0x07ffu /* MSS value */
600 /* Second doubleword. */
601 TxVlanTag = (1 << 17), /* Add VLAN tag */
604 /* 8169, 8168b and 810x except 8102e. */
605 enum rtl_tx_desc_bit_0 {
606 /* First doubleword. */
607 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
608 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
609 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
610 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
613 /* 8102e, 8168c and beyond. */
614 enum rtl_tx_desc_bit_1 {
615 /* Second doubleword. */
616 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
617 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
618 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
619 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
622 static const struct rtl_tx_desc_info {
623 struct {
624 u32 udp;
625 u32 tcp;
626 } checksum;
627 u16 mss_shift;
628 u16 opts_offset;
629 } tx_desc_info [] = {
630 [RTL_TD_0] = {
631 .checksum = {
632 .udp = TD0_IP_CS | TD0_UDP_CS,
633 .tcp = TD0_IP_CS | TD0_TCP_CS
635 .mss_shift = TD0_MSS_SHIFT,
636 .opts_offset = 0
638 [RTL_TD_1] = {
639 .checksum = {
640 .udp = TD1_IP_CS | TD1_UDP_CS,
641 .tcp = TD1_IP_CS | TD1_TCP_CS
643 .mss_shift = TD1_MSS_SHIFT,
644 .opts_offset = 1
648 enum rtl_rx_desc_bit {
649 /* Rx private */
650 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
651 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
653 #define RxProtoUDP (PID1)
654 #define RxProtoTCP (PID0)
655 #define RxProtoIP (PID1 | PID0)
656 #define RxProtoMask RxProtoIP
658 IPFail = (1 << 16), /* IP checksum failed */
659 UDPFail = (1 << 15), /* UDP/IP checksum failed */
660 TCPFail = (1 << 14), /* TCP/IP checksum failed */
661 RxVlanTag = (1 << 16), /* VLAN tag available */
664 #define RsvdMask 0x3fffc000
666 struct TxDesc {
667 __le32 opts1;
668 __le32 opts2;
669 __le64 addr;
672 struct RxDesc {
673 __le32 opts1;
674 __le32 opts2;
675 __le64 addr;
678 struct ring_info {
679 struct sk_buff *skb;
680 u32 len;
681 u8 __pad[sizeof(void *) - sizeof(u32)];
684 enum features {
685 RTL_FEATURE_WOL = (1 << 0),
686 RTL_FEATURE_MSI = (1 << 1),
687 RTL_FEATURE_GMII = (1 << 2),
690 struct rtl8169_counters {
691 __le64 tx_packets;
692 __le64 rx_packets;
693 __le64 tx_errors;
694 __le32 rx_errors;
695 __le16 rx_missed;
696 __le16 align_errors;
697 __le32 tx_one_collision;
698 __le32 tx_multi_collision;
699 __le64 rx_unicast;
700 __le64 rx_broadcast;
701 __le32 rx_multicast;
702 __le16 tx_aborted;
703 __le16 tx_underun;
706 enum rtl_flag {
707 RTL_FLAG_TASK_ENABLED,
708 RTL_FLAG_TASK_SLOW_PENDING,
709 RTL_FLAG_TASK_RESET_PENDING,
710 RTL_FLAG_TASK_PHY_PENDING,
711 RTL_FLAG_MAX
714 struct rtl8169_stats {
715 u64 packets;
716 u64 bytes;
717 struct u64_stats_sync syncp;
720 struct rtl8169_private {
721 void __iomem *mmio_addr; /* memory map physical address */
722 struct pci_dev *pci_dev;
723 struct net_device *dev;
724 struct napi_struct napi;
725 u32 msg_enable;
726 u16 txd_version;
727 u16 mac_version;
728 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
729 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
730 u32 dirty_rx;
731 u32 dirty_tx;
732 struct rtl8169_stats rx_stats;
733 struct rtl8169_stats tx_stats;
734 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
735 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
736 dma_addr_t TxPhyAddr;
737 dma_addr_t RxPhyAddr;
738 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
739 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
740 struct timer_list timer;
741 u16 cp_cmd;
743 u16 event_slow;
745 struct mdio_ops {
746 void (*write)(struct rtl8169_private *, int, int);
747 int (*read)(struct rtl8169_private *, int);
748 } mdio_ops;
750 struct pll_power_ops {
751 void (*down)(struct rtl8169_private *);
752 void (*up)(struct rtl8169_private *);
753 } pll_power_ops;
755 struct jumbo_ops {
756 void (*enable)(struct rtl8169_private *);
757 void (*disable)(struct rtl8169_private *);
758 } jumbo_ops;
760 struct csi_ops {
761 void (*write)(struct rtl8169_private *, int, int);
762 u32 (*read)(struct rtl8169_private *, int);
763 } csi_ops;
765 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
766 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
767 void (*phy_reset_enable)(struct rtl8169_private *tp);
768 void (*hw_start)(struct net_device *);
769 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
770 unsigned int (*link_ok)(void __iomem *);
771 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
773 struct {
774 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
775 struct mutex mutex;
776 struct work_struct work;
777 } wk;
779 unsigned features;
781 struct mii_if_info mii;
782 struct rtl8169_counters counters;
783 u32 saved_wolopts;
784 u32 opts1_mask;
786 struct rtl_fw {
787 const struct firmware *fw;
789 #define RTL_VER_SIZE 32
791 char version[RTL_VER_SIZE];
793 struct rtl_fw_phy_action {
794 __le32 *code;
795 size_t size;
796 } phy_action;
797 } *rtl_fw;
798 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
800 u32 ocp_base;
803 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
804 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
805 module_param(use_dac, int, 0);
806 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
807 module_param_named(debug, debug.msg_enable, int, 0);
808 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
809 MODULE_LICENSE("GPL");
810 MODULE_VERSION(RTL8169_VERSION);
811 MODULE_FIRMWARE(FIRMWARE_8168D_1);
812 MODULE_FIRMWARE(FIRMWARE_8168D_2);
813 MODULE_FIRMWARE(FIRMWARE_8168E_1);
814 MODULE_FIRMWARE(FIRMWARE_8168E_2);
815 MODULE_FIRMWARE(FIRMWARE_8168E_3);
816 MODULE_FIRMWARE(FIRMWARE_8105E_1);
817 MODULE_FIRMWARE(FIRMWARE_8168F_1);
818 MODULE_FIRMWARE(FIRMWARE_8168F_2);
819 MODULE_FIRMWARE(FIRMWARE_8402_1);
820 MODULE_FIRMWARE(FIRMWARE_8411_1);
821 MODULE_FIRMWARE(FIRMWARE_8106E_1);
822 MODULE_FIRMWARE(FIRMWARE_8168G_1);
824 static void rtl_lock_work(struct rtl8169_private *tp)
826 mutex_lock(&tp->wk.mutex);
829 static void rtl_unlock_work(struct rtl8169_private *tp)
831 mutex_unlock(&tp->wk.mutex);
834 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
836 int cap = pci_pcie_cap(pdev);
838 if (cap) {
839 u16 ctl;
841 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
842 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
843 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
847 struct rtl_cond {
848 bool (*check)(struct rtl8169_private *);
849 const char *msg;
852 static void rtl_udelay(unsigned int d)
854 udelay(d);
857 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
858 void (*delay)(unsigned int), unsigned int d, int n,
859 bool high)
861 int i;
863 for (i = 0; i < n; i++) {
864 delay(d);
865 if (c->check(tp) == high)
866 return true;
868 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
869 c->msg, !high, n, d);
870 return false;
873 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
874 const struct rtl_cond *c,
875 unsigned int d, int n)
877 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
880 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
881 const struct rtl_cond *c,
882 unsigned int d, int n)
884 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
887 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
888 const struct rtl_cond *c,
889 unsigned int d, int n)
891 return rtl_loop_wait(tp, c, msleep, d, n, true);
894 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
895 const struct rtl_cond *c,
896 unsigned int d, int n)
898 return rtl_loop_wait(tp, c, msleep, d, n, false);
901 #define DECLARE_RTL_COND(name) \
902 static bool name ## _check(struct rtl8169_private *); \
904 static const struct rtl_cond name = { \
905 .check = name ## _check, \
906 .msg = #name \
907 }; \
909 static bool name ## _check(struct rtl8169_private *tp)
911 DECLARE_RTL_COND(rtl_ocpar_cond)
913 void __iomem *ioaddr = tp->mmio_addr;
915 return RTL_R32(OCPAR) & OCPAR_FLAG;
918 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
920 void __iomem *ioaddr = tp->mmio_addr;
922 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
924 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
925 RTL_R32(OCPDR) : ~0;
928 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
930 void __iomem *ioaddr = tp->mmio_addr;
932 RTL_W32(OCPDR, data);
933 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
935 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
938 DECLARE_RTL_COND(rtl_eriar_cond)
940 void __iomem *ioaddr = tp->mmio_addr;
942 return RTL_R32(ERIAR) & ERIAR_FLAG;
945 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
947 void __iomem *ioaddr = tp->mmio_addr;
949 RTL_W8(ERIDR, cmd);
950 RTL_W32(ERIAR, 0x800010e8);
951 msleep(2);
953 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
954 return;
956 ocp_write(tp, 0x1, 0x30, 0x00000001);
959 #define OOB_CMD_RESET 0x00
960 #define OOB_CMD_DRIVER_START 0x05
961 #define OOB_CMD_DRIVER_STOP 0x06
963 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
965 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
968 DECLARE_RTL_COND(rtl_ocp_read_cond)
970 u16 reg;
972 reg = rtl8168_get_ocp_reg(tp);
974 return ocp_read(tp, 0x0f, reg) & 0x00000800;
977 static void rtl8168_driver_start(struct rtl8169_private *tp)
979 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
981 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
984 static void rtl8168_driver_stop(struct rtl8169_private *tp)
986 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
988 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
991 static int r8168dp_check_dash(struct rtl8169_private *tp)
993 u16 reg = rtl8168_get_ocp_reg(tp);
995 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
998 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
1000 if (reg & 0xffff0001) {
1001 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
1002 return true;
1004 return false;
1007 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1009 void __iomem *ioaddr = tp->mmio_addr;
1011 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1014 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1016 void __iomem *ioaddr = tp->mmio_addr;
1018 if (rtl_ocp_reg_failure(tp, reg))
1019 return;
1021 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1023 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1026 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1028 void __iomem *ioaddr = tp->mmio_addr;
1030 if (rtl_ocp_reg_failure(tp, reg))
1031 return 0;
1033 RTL_W32(GPHY_OCP, reg << 15);
1035 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1036 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1039 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1041 int val;
1043 val = r8168_phy_ocp_read(tp, reg);
1044 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1047 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1049 void __iomem *ioaddr = tp->mmio_addr;
1051 if (rtl_ocp_reg_failure(tp, reg))
1052 return;
1054 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1057 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1059 void __iomem *ioaddr = tp->mmio_addr;
1061 if (rtl_ocp_reg_failure(tp, reg))
1062 return 0;
1064 RTL_W32(OCPDR, reg << 15);
1066 return RTL_R32(OCPDR);
1069 #define OCP_STD_PHY_BASE 0xa400
1071 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1073 if (reg == 0x1f) {
1074 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1075 return;
1078 if (tp->ocp_base != OCP_STD_PHY_BASE)
1079 reg -= 0x10;
1081 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1084 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1086 if (tp->ocp_base != OCP_STD_PHY_BASE)
1087 reg -= 0x10;
1089 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1092 DECLARE_RTL_COND(rtl_phyar_cond)
1094 void __iomem *ioaddr = tp->mmio_addr;
1096 return RTL_R32(PHYAR) & 0x80000000;
1099 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1101 void __iomem *ioaddr = tp->mmio_addr;
1103 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1105 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1107 * According to hardware specs a 20us delay is required after write
1108 * complete indication, but before sending next command.
1110 udelay(20);
1113 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1115 void __iomem *ioaddr = tp->mmio_addr;
1116 int value;
1118 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1120 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1121 RTL_R32(PHYAR) & 0xffff : ~0;
1124 * According to hardware specs a 20us delay is required after read
1125 * complete indication, but before sending next command.
1127 udelay(20);
1129 return value;
1132 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1134 void __iomem *ioaddr = tp->mmio_addr;
1136 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1137 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1138 RTL_W32(EPHY_RXER_NUM, 0);
1140 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1143 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1145 r8168dp_1_mdio_access(tp, reg,
1146 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1149 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1151 void __iomem *ioaddr = tp->mmio_addr;
1153 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1155 mdelay(1);
1156 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1157 RTL_W32(EPHY_RXER_NUM, 0);
1159 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1160 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1163 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1165 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1167 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1170 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1172 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1175 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1177 void __iomem *ioaddr = tp->mmio_addr;
1179 r8168dp_2_mdio_start(ioaddr);
1181 r8169_mdio_write(tp, reg, value);
1183 r8168dp_2_mdio_stop(ioaddr);
1186 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1188 void __iomem *ioaddr = tp->mmio_addr;
1189 int value;
1191 r8168dp_2_mdio_start(ioaddr);
1193 value = r8169_mdio_read(tp, reg);
1195 r8168dp_2_mdio_stop(ioaddr);
1197 return value;
1200 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1202 tp->mdio_ops.write(tp, location, val);
1205 static int rtl_readphy(struct rtl8169_private *tp, int location)
1207 return tp->mdio_ops.read(tp, location);
1210 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1212 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1215 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1217 int val;
1219 val = rtl_readphy(tp, reg_addr);
1220 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1223 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1224 int val)
1226 struct rtl8169_private *tp = netdev_priv(dev);
1228 rtl_writephy(tp, location, val);
1231 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1233 struct rtl8169_private *tp = netdev_priv(dev);
1235 return rtl_readphy(tp, location);
1238 DECLARE_RTL_COND(rtl_ephyar_cond)
1240 void __iomem *ioaddr = tp->mmio_addr;
1242 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1245 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1247 void __iomem *ioaddr = tp->mmio_addr;
1249 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1250 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1252 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1254 udelay(10);
1257 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1259 void __iomem *ioaddr = tp->mmio_addr;
1261 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1263 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1264 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1267 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1268 u32 val, int type)
1270 void __iomem *ioaddr = tp->mmio_addr;
1272 BUG_ON((addr & 3) || (mask == 0));
1273 RTL_W32(ERIDR, val);
1274 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1276 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1279 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1281 void __iomem *ioaddr = tp->mmio_addr;
1283 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1285 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1286 RTL_R32(ERIDR) : ~0;
1289 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1290 u32 m, int type)
1292 u32 val;
1294 val = rtl_eri_read(tp, addr, type);
1295 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1298 struct exgmac_reg {
1299 u16 addr;
1300 u16 mask;
1301 u32 val;
1304 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1305 const struct exgmac_reg *r, int len)
1307 while (len-- > 0) {
1308 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1309 r++;
1313 DECLARE_RTL_COND(rtl_efusear_cond)
1315 void __iomem *ioaddr = tp->mmio_addr;
1317 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1320 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1322 void __iomem *ioaddr = tp->mmio_addr;
1324 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1326 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1327 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1330 static u16 rtl_get_events(struct rtl8169_private *tp)
1332 void __iomem *ioaddr = tp->mmio_addr;
1334 return RTL_R16(IntrStatus);
1337 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1339 void __iomem *ioaddr = tp->mmio_addr;
1341 RTL_W16(IntrStatus, bits);
1342 mmiowb();
1345 static void rtl_irq_disable(struct rtl8169_private *tp)
1347 void __iomem *ioaddr = tp->mmio_addr;
1349 RTL_W16(IntrMask, 0);
1350 mmiowb();
1353 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1355 void __iomem *ioaddr = tp->mmio_addr;
1357 RTL_W16(IntrMask, bits);
1360 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1361 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1362 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1364 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1366 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1369 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1371 void __iomem *ioaddr = tp->mmio_addr;
1373 rtl_irq_disable(tp);
1374 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1375 RTL_R8(ChipCmd);
1378 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1380 void __iomem *ioaddr = tp->mmio_addr;
1382 return RTL_R32(TBICSR) & TBIReset;
1385 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1387 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1390 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1392 return RTL_R32(TBICSR) & TBILinkOk;
1395 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1397 return RTL_R8(PHYstatus) & LinkStatus;
1400 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1402 void __iomem *ioaddr = tp->mmio_addr;
1404 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1407 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1409 unsigned int val;
1411 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1412 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1415 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1417 void __iomem *ioaddr = tp->mmio_addr;
1418 struct net_device *dev = tp->dev;
1420 if (!netif_running(dev))
1421 return;
1423 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1424 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1425 if (RTL_R8(PHYstatus) & _1000bpsF) {
1426 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1427 ERIAR_EXGMAC);
1428 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1429 ERIAR_EXGMAC);
1430 } else if (RTL_R8(PHYstatus) & _100bps) {
1431 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1432 ERIAR_EXGMAC);
1433 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1434 ERIAR_EXGMAC);
1435 } else {
1436 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1437 ERIAR_EXGMAC);
1438 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1439 ERIAR_EXGMAC);
1441 /* Reset packet filter */
1442 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1443 ERIAR_EXGMAC);
1444 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1445 ERIAR_EXGMAC);
1446 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1447 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1448 if (RTL_R8(PHYstatus) & _1000bpsF) {
1449 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1450 ERIAR_EXGMAC);
1451 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1452 ERIAR_EXGMAC);
1453 } else {
1454 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1455 ERIAR_EXGMAC);
1456 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1457 ERIAR_EXGMAC);
1459 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1460 if (RTL_R8(PHYstatus) & _10bps) {
1461 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1462 ERIAR_EXGMAC);
1463 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1464 ERIAR_EXGMAC);
1465 } else {
1466 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1467 ERIAR_EXGMAC);
1472 static void __rtl8169_check_link_status(struct net_device *dev,
1473 struct rtl8169_private *tp,
1474 void __iomem *ioaddr, bool pm)
1476 if (tp->link_ok(ioaddr)) {
1477 rtl_link_chg_patch(tp);
1478 /* This is to cancel a scheduled suspend if there's one. */
1479 if (pm)
1480 pm_request_resume(&tp->pci_dev->dev);
1481 netif_carrier_on(dev);
1482 if (net_ratelimit())
1483 netif_info(tp, ifup, dev, "link up\n");
1484 } else {
1485 netif_carrier_off(dev);
1486 netif_info(tp, ifdown, dev, "link down\n");
1487 if (pm)
1488 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1492 static void rtl8169_check_link_status(struct net_device *dev,
1493 struct rtl8169_private *tp,
1494 void __iomem *ioaddr)
1496 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1499 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1501 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1503 void __iomem *ioaddr = tp->mmio_addr;
1504 u8 options;
1505 u32 wolopts = 0;
1507 options = RTL_R8(Config1);
1508 if (!(options & PMEnable))
1509 return 0;
1511 options = RTL_R8(Config3);
1512 if (options & LinkUp)
1513 wolopts |= WAKE_PHY;
1514 if (options & MagicPacket)
1515 wolopts |= WAKE_MAGIC;
1517 options = RTL_R8(Config5);
1518 if (options & UWF)
1519 wolopts |= WAKE_UCAST;
1520 if (options & BWF)
1521 wolopts |= WAKE_BCAST;
1522 if (options & MWF)
1523 wolopts |= WAKE_MCAST;
1525 return wolopts;
1528 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1530 struct rtl8169_private *tp = netdev_priv(dev);
1532 rtl_lock_work(tp);
1534 wol->supported = WAKE_ANY;
1535 wol->wolopts = __rtl8169_get_wol(tp);
1537 rtl_unlock_work(tp);
1540 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1542 void __iomem *ioaddr = tp->mmio_addr;
1543 unsigned int i;
1544 static const struct {
1545 u32 opt;
1546 u16 reg;
1547 u8 mask;
1548 } cfg[] = {
1549 { WAKE_PHY, Config3, LinkUp },
1550 { WAKE_MAGIC, Config3, MagicPacket },
1551 { WAKE_UCAST, Config5, UWF },
1552 { WAKE_BCAST, Config5, BWF },
1553 { WAKE_MCAST, Config5, MWF },
1554 { WAKE_ANY, Config5, LanWake }
1556 u8 options;
1558 RTL_W8(Cfg9346, Cfg9346_Unlock);
1560 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1561 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1562 if (wolopts & cfg[i].opt)
1563 options |= cfg[i].mask;
1564 RTL_W8(cfg[i].reg, options);
1567 switch (tp->mac_version) {
1568 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1569 options = RTL_R8(Config1) & ~PMEnable;
1570 if (wolopts)
1571 options |= PMEnable;
1572 RTL_W8(Config1, options);
1573 break;
1574 default:
1575 options = RTL_R8(Config2) & ~PME_SIGNAL;
1576 if (wolopts)
1577 options |= PME_SIGNAL;
1578 RTL_W8(Config2, options);
1579 break;
1582 RTL_W8(Cfg9346, Cfg9346_Lock);
1585 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1587 struct rtl8169_private *tp = netdev_priv(dev);
1589 rtl_lock_work(tp);
1591 if (wol->wolopts)
1592 tp->features |= RTL_FEATURE_WOL;
1593 else
1594 tp->features &= ~RTL_FEATURE_WOL;
1595 __rtl8169_set_wol(tp, wol->wolopts);
1597 rtl_unlock_work(tp);
1599 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1601 return 0;
1604 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1606 return rtl_chip_infos[tp->mac_version].fw_name;
1609 static void rtl8169_get_drvinfo(struct net_device *dev,
1610 struct ethtool_drvinfo *info)
1612 struct rtl8169_private *tp = netdev_priv(dev);
1613 struct rtl_fw *rtl_fw = tp->rtl_fw;
1615 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1616 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1617 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1618 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1619 if (!IS_ERR_OR_NULL(rtl_fw))
1620 strlcpy(info->fw_version, rtl_fw->version,
1621 sizeof(info->fw_version));
1624 static int rtl8169_get_regs_len(struct net_device *dev)
1626 return R8169_REGS_SIZE;
1629 static int rtl8169_set_speed_tbi(struct net_device *dev,
1630 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1632 struct rtl8169_private *tp = netdev_priv(dev);
1633 void __iomem *ioaddr = tp->mmio_addr;
1634 int ret = 0;
1635 u32 reg;
1637 reg = RTL_R32(TBICSR);
1638 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1639 (duplex == DUPLEX_FULL)) {
1640 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1641 } else if (autoneg == AUTONEG_ENABLE)
1642 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1643 else {
1644 netif_warn(tp, link, dev,
1645 "incorrect speed setting refused in TBI mode\n");
1646 ret = -EOPNOTSUPP;
1649 return ret;
1652 static int rtl8169_set_speed_xmii(struct net_device *dev,
1653 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1655 struct rtl8169_private *tp = netdev_priv(dev);
1656 int giga_ctrl, bmcr;
1657 int rc = -EINVAL;
1659 rtl_writephy(tp, 0x1f, 0x0000);
1661 if (autoneg == AUTONEG_ENABLE) {
1662 int auto_nego;
1664 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1665 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1666 ADVERTISE_100HALF | ADVERTISE_100FULL);
1668 if (adv & ADVERTISED_10baseT_Half)
1669 auto_nego |= ADVERTISE_10HALF;
1670 if (adv & ADVERTISED_10baseT_Full)
1671 auto_nego |= ADVERTISE_10FULL;
1672 if (adv & ADVERTISED_100baseT_Half)
1673 auto_nego |= ADVERTISE_100HALF;
1674 if (adv & ADVERTISED_100baseT_Full)
1675 auto_nego |= ADVERTISE_100FULL;
1677 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1679 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1680 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1682 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1683 if (tp->mii.supports_gmii) {
1684 if (adv & ADVERTISED_1000baseT_Half)
1685 giga_ctrl |= ADVERTISE_1000HALF;
1686 if (adv & ADVERTISED_1000baseT_Full)
1687 giga_ctrl |= ADVERTISE_1000FULL;
1688 } else if (adv & (ADVERTISED_1000baseT_Half |
1689 ADVERTISED_1000baseT_Full)) {
1690 netif_info(tp, link, dev,
1691 "PHY does not support 1000Mbps\n");
1692 goto out;
1695 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1697 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1698 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1699 } else {
1700 giga_ctrl = 0;
1702 if (speed == SPEED_10)
1703 bmcr = 0;
1704 else if (speed == SPEED_100)
1705 bmcr = BMCR_SPEED100;
1706 else
1707 goto out;
1709 if (duplex == DUPLEX_FULL)
1710 bmcr |= BMCR_FULLDPLX;
1713 rtl_writephy(tp, MII_BMCR, bmcr);
1715 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1716 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1717 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1718 rtl_writephy(tp, 0x17, 0x2138);
1719 rtl_writephy(tp, 0x0e, 0x0260);
1720 } else {
1721 rtl_writephy(tp, 0x17, 0x2108);
1722 rtl_writephy(tp, 0x0e, 0x0000);
1726 rc = 0;
1727 out:
1728 return rc;
1731 static int rtl8169_set_speed(struct net_device *dev,
1732 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1734 struct rtl8169_private *tp = netdev_priv(dev);
1735 int ret;
1737 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1738 if (ret < 0)
1739 goto out;
1741 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1742 (advertising & ADVERTISED_1000baseT_Full)) {
1743 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1745 out:
1746 return ret;
1749 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1751 struct rtl8169_private *tp = netdev_priv(dev);
1752 int ret;
1754 del_timer_sync(&tp->timer);
1756 rtl_lock_work(tp);
1757 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1758 cmd->duplex, cmd->advertising);
1759 rtl_unlock_work(tp);
1761 return ret;
1764 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1765 netdev_features_t features)
1767 struct rtl8169_private *tp = netdev_priv(dev);
1769 if (dev->mtu > TD_MSS_MAX)
1770 features &= ~NETIF_F_ALL_TSO;
1772 if (dev->mtu > JUMBO_1K &&
1773 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1774 features &= ~NETIF_F_IP_CSUM;
1776 return features;
1779 static void __rtl8169_set_features(struct net_device *dev,
1780 netdev_features_t features)
1782 struct rtl8169_private *tp = netdev_priv(dev);
1783 netdev_features_t changed = features ^ dev->features;
1784 void __iomem *ioaddr = tp->mmio_addr;
1786 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1787 return;
1789 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1790 if (features & NETIF_F_RXCSUM)
1791 tp->cp_cmd |= RxChkSum;
1792 else
1793 tp->cp_cmd &= ~RxChkSum;
1795 if (dev->features & NETIF_F_HW_VLAN_RX)
1796 tp->cp_cmd |= RxVlan;
1797 else
1798 tp->cp_cmd &= ~RxVlan;
1800 RTL_W16(CPlusCmd, tp->cp_cmd);
1801 RTL_R16(CPlusCmd);
1803 if (changed & NETIF_F_RXALL) {
1804 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1805 if (features & NETIF_F_RXALL)
1806 tmp |= (AcceptErr | AcceptRunt);
1807 RTL_W32(RxConfig, tmp);
1811 static int rtl8169_set_features(struct net_device *dev,
1812 netdev_features_t features)
1814 struct rtl8169_private *tp = netdev_priv(dev);
1816 rtl_lock_work(tp);
1817 __rtl8169_set_features(dev, features);
1818 rtl_unlock_work(tp);
1820 return 0;
1824 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1825 struct sk_buff *skb)
1827 return (vlan_tx_tag_present(skb)) ?
1828 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1831 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1833 u32 opts2 = le32_to_cpu(desc->opts2);
1835 if (opts2 & RxVlanTag)
1836 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1838 desc->opts2 = 0;
1841 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1843 struct rtl8169_private *tp = netdev_priv(dev);
1844 void __iomem *ioaddr = tp->mmio_addr;
1845 u32 status;
1847 cmd->supported =
1848 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1849 cmd->port = PORT_FIBRE;
1850 cmd->transceiver = XCVR_INTERNAL;
1852 status = RTL_R32(TBICSR);
1853 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1854 cmd->autoneg = !!(status & TBINwEnable);
1856 ethtool_cmd_speed_set(cmd, SPEED_1000);
1857 cmd->duplex = DUPLEX_FULL; /* Always set */
1859 return 0;
1862 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1864 struct rtl8169_private *tp = netdev_priv(dev);
1866 return mii_ethtool_gset(&tp->mii, cmd);
1869 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1871 struct rtl8169_private *tp = netdev_priv(dev);
1872 int rc;
1874 rtl_lock_work(tp);
1875 rc = tp->get_settings(dev, cmd);
1876 rtl_unlock_work(tp);
1878 return rc;
1881 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1882 void *p)
1884 struct rtl8169_private *tp = netdev_priv(dev);
1886 if (regs->len > R8169_REGS_SIZE)
1887 regs->len = R8169_REGS_SIZE;
1889 rtl_lock_work(tp);
1890 memcpy_fromio(p, tp->mmio_addr, regs->len);
1891 rtl_unlock_work(tp);
1894 static u32 rtl8169_get_msglevel(struct net_device *dev)
1896 struct rtl8169_private *tp = netdev_priv(dev);
1898 return tp->msg_enable;
1901 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1903 struct rtl8169_private *tp = netdev_priv(dev);
1905 tp->msg_enable = value;
1908 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1909 "tx_packets",
1910 "rx_packets",
1911 "tx_errors",
1912 "rx_errors",
1913 "rx_missed",
1914 "align_errors",
1915 "tx_single_collisions",
1916 "tx_multi_collisions",
1917 "unicast",
1918 "broadcast",
1919 "multicast",
1920 "tx_aborted",
1921 "tx_underrun",
1924 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1926 switch (sset) {
1927 case ETH_SS_STATS:
1928 return ARRAY_SIZE(rtl8169_gstrings);
1929 default:
1930 return -EOPNOTSUPP;
1934 DECLARE_RTL_COND(rtl_counters_cond)
1936 void __iomem *ioaddr = tp->mmio_addr;
1938 return RTL_R32(CounterAddrLow) & CounterDump;
1941 static void rtl8169_update_counters(struct net_device *dev)
1943 struct rtl8169_private *tp = netdev_priv(dev);
1944 void __iomem *ioaddr = tp->mmio_addr;
1945 struct device *d = &tp->pci_dev->dev;
1946 struct rtl8169_counters *counters;
1947 dma_addr_t paddr;
1948 u32 cmd;
1951 * Some chips are unable to dump tally counters when the receiver
1952 * is disabled.
1954 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1955 return;
1957 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1958 if (!counters)
1959 return;
1961 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1962 cmd = (u64)paddr & DMA_BIT_MASK(32);
1963 RTL_W32(CounterAddrLow, cmd);
1964 RTL_W32(CounterAddrLow, cmd | CounterDump);
1966 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1967 memcpy(&tp->counters, counters, sizeof(*counters));
1969 RTL_W32(CounterAddrLow, 0);
1970 RTL_W32(CounterAddrHigh, 0);
1972 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1975 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1976 struct ethtool_stats *stats, u64 *data)
1978 struct rtl8169_private *tp = netdev_priv(dev);
1980 ASSERT_RTNL();
1982 rtl8169_update_counters(dev);
1984 data[0] = le64_to_cpu(tp->counters.tx_packets);
1985 data[1] = le64_to_cpu(tp->counters.rx_packets);
1986 data[2] = le64_to_cpu(tp->counters.tx_errors);
1987 data[3] = le32_to_cpu(tp->counters.rx_errors);
1988 data[4] = le16_to_cpu(tp->counters.rx_missed);
1989 data[5] = le16_to_cpu(tp->counters.align_errors);
1990 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1991 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1992 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1993 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1994 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1995 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1996 data[12] = le16_to_cpu(tp->counters.tx_underun);
1999 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2001 switch(stringset) {
2002 case ETH_SS_STATS:
2003 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
2004 break;
2008 static const struct ethtool_ops rtl8169_ethtool_ops = {
2009 .get_drvinfo = rtl8169_get_drvinfo,
2010 .get_regs_len = rtl8169_get_regs_len,
2011 .get_link = ethtool_op_get_link,
2012 .get_settings = rtl8169_get_settings,
2013 .set_settings = rtl8169_set_settings,
2014 .get_msglevel = rtl8169_get_msglevel,
2015 .set_msglevel = rtl8169_set_msglevel,
2016 .get_regs = rtl8169_get_regs,
2017 .get_wol = rtl8169_get_wol,
2018 .set_wol = rtl8169_set_wol,
2019 .get_strings = rtl8169_get_strings,
2020 .get_sset_count = rtl8169_get_sset_count,
2021 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2022 .get_ts_info = ethtool_op_get_ts_info,
2025 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2026 struct net_device *dev, u8 default_version)
2028 void __iomem *ioaddr = tp->mmio_addr;
2030 * The driver currently handles the 8168Bf and the 8168Be identically
2031 * but they can be identified more specifically through the test below
2032 * if needed:
2034 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2036 * Same thing for the 8101Eb and the 8101Ec:
2038 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2040 static const struct rtl_mac_info {
2041 u32 mask;
2042 u32 val;
2043 int mac_version;
2044 } mac_info[] = {
2045 /* 8168G family. */
2046 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2047 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2049 /* 8168F family. */
2050 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2051 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2052 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2054 /* 8168E family. */
2055 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2056 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2057 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2058 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2060 /* 8168D family. */
2061 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2062 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2063 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2065 /* 8168DP family. */
2066 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2067 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2068 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2070 /* 8168C family. */
2071 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2072 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2073 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2074 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2075 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2076 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2077 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2078 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2079 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2081 /* 8168B family. */
2082 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2083 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2084 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2085 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2087 /* 8101 family. */
2088 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2089 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2090 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2091 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2092 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2093 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2094 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2095 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2096 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2097 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2098 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2099 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2100 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2101 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2102 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2103 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2104 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2105 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2106 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2107 /* FIXME: where did these entries come from ? -- FR */
2108 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2109 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2111 /* 8110 family. */
2112 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2113 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2114 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2115 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2116 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2117 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2119 /* Catch-all */
2120 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2122 const struct rtl_mac_info *p = mac_info;
2123 u32 reg;
2125 reg = RTL_R32(TxConfig);
2126 while ((reg & p->mask) != p->val)
2127 p++;
2128 tp->mac_version = p->mac_version;
2130 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2131 netif_notice(tp, probe, dev,
2132 "unknown MAC, using family default\n");
2133 tp->mac_version = default_version;
2137 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2139 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2142 struct phy_reg {
2143 u16 reg;
2144 u16 val;
2147 static void rtl_writephy_batch(struct rtl8169_private *tp,
2148 const struct phy_reg *regs, int len)
2150 while (len-- > 0) {
2151 rtl_writephy(tp, regs->reg, regs->val);
2152 regs++;
2156 #define PHY_READ 0x00000000
2157 #define PHY_DATA_OR 0x10000000
2158 #define PHY_DATA_AND 0x20000000
2159 #define PHY_BJMPN 0x30000000
2160 #define PHY_READ_EFUSE 0x40000000
2161 #define PHY_READ_MAC_BYTE 0x50000000
2162 #define PHY_WRITE_MAC_BYTE 0x60000000
2163 #define PHY_CLEAR_READCOUNT 0x70000000
2164 #define PHY_WRITE 0x80000000
2165 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2166 #define PHY_COMP_EQ_SKIPN 0xa0000000
2167 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2168 #define PHY_WRITE_PREVIOUS 0xc0000000
2169 #define PHY_SKIPN 0xd0000000
2170 #define PHY_DELAY_MS 0xe0000000
2171 #define PHY_WRITE_ERI_WORD 0xf0000000
2173 struct fw_info {
2174 u32 magic;
2175 char version[RTL_VER_SIZE];
2176 __le32 fw_start;
2177 __le32 fw_len;
2178 u8 chksum;
2179 } __packed;
2181 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2183 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2185 const struct firmware *fw = rtl_fw->fw;
2186 struct fw_info *fw_info = (struct fw_info *)fw->data;
2187 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2188 char *version = rtl_fw->version;
2189 bool rc = false;
2191 if (fw->size < FW_OPCODE_SIZE)
2192 goto out;
2194 if (!fw_info->magic) {
2195 size_t i, size, start;
2196 u8 checksum = 0;
2198 if (fw->size < sizeof(*fw_info))
2199 goto out;
2201 for (i = 0; i < fw->size; i++)
2202 checksum += fw->data[i];
2203 if (checksum != 0)
2204 goto out;
2206 start = le32_to_cpu(fw_info->fw_start);
2207 if (start > fw->size)
2208 goto out;
2210 size = le32_to_cpu(fw_info->fw_len);
2211 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2212 goto out;
2214 memcpy(version, fw_info->version, RTL_VER_SIZE);
2216 pa->code = (__le32 *)(fw->data + start);
2217 pa->size = size;
2218 } else {
2219 if (fw->size % FW_OPCODE_SIZE)
2220 goto out;
2222 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2224 pa->code = (__le32 *)fw->data;
2225 pa->size = fw->size / FW_OPCODE_SIZE;
2227 version[RTL_VER_SIZE - 1] = 0;
2229 rc = true;
2230 out:
2231 return rc;
2234 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2235 struct rtl_fw_phy_action *pa)
2237 bool rc = false;
2238 size_t index;
2240 for (index = 0; index < pa->size; index++) {
2241 u32 action = le32_to_cpu(pa->code[index]);
2242 u32 regno = (action & 0x0fff0000) >> 16;
2244 switch(action & 0xf0000000) {
2245 case PHY_READ:
2246 case PHY_DATA_OR:
2247 case PHY_DATA_AND:
2248 case PHY_READ_EFUSE:
2249 case PHY_CLEAR_READCOUNT:
2250 case PHY_WRITE:
2251 case PHY_WRITE_PREVIOUS:
2252 case PHY_DELAY_MS:
2253 break;
2255 case PHY_BJMPN:
2256 if (regno > index) {
2257 netif_err(tp, ifup, tp->dev,
2258 "Out of range of firmware\n");
2259 goto out;
2261 break;
2262 case PHY_READCOUNT_EQ_SKIP:
2263 if (index + 2 >= pa->size) {
2264 netif_err(tp, ifup, tp->dev,
2265 "Out of range of firmware\n");
2266 goto out;
2268 break;
2269 case PHY_COMP_EQ_SKIPN:
2270 case PHY_COMP_NEQ_SKIPN:
2271 case PHY_SKIPN:
2272 if (index + 1 + regno >= pa->size) {
2273 netif_err(tp, ifup, tp->dev,
2274 "Out of range of firmware\n");
2275 goto out;
2277 break;
2279 case PHY_READ_MAC_BYTE:
2280 case PHY_WRITE_MAC_BYTE:
2281 case PHY_WRITE_ERI_WORD:
2282 default:
2283 netif_err(tp, ifup, tp->dev,
2284 "Invalid action 0x%08x\n", action);
2285 goto out;
2288 rc = true;
2289 out:
2290 return rc;
2293 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2295 struct net_device *dev = tp->dev;
2296 int rc = -EINVAL;
2298 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2299 netif_err(tp, ifup, dev, "invalid firwmare\n");
2300 goto out;
2303 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2304 rc = 0;
2305 out:
2306 return rc;
2309 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2311 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2312 u32 predata, count;
2313 size_t index;
2315 predata = count = 0;
2317 for (index = 0; index < pa->size; ) {
2318 u32 action = le32_to_cpu(pa->code[index]);
2319 u32 data = action & 0x0000ffff;
2320 u32 regno = (action & 0x0fff0000) >> 16;
2322 if (!action)
2323 break;
2325 switch(action & 0xf0000000) {
2326 case PHY_READ:
2327 predata = rtl_readphy(tp, regno);
2328 count++;
2329 index++;
2330 break;
2331 case PHY_DATA_OR:
2332 predata |= data;
2333 index++;
2334 break;
2335 case PHY_DATA_AND:
2336 predata &= data;
2337 index++;
2338 break;
2339 case PHY_BJMPN:
2340 index -= regno;
2341 break;
2342 case PHY_READ_EFUSE:
2343 predata = rtl8168d_efuse_read(tp, regno);
2344 index++;
2345 break;
2346 case PHY_CLEAR_READCOUNT:
2347 count = 0;
2348 index++;
2349 break;
2350 case PHY_WRITE:
2351 rtl_writephy(tp, regno, data);
2352 index++;
2353 break;
2354 case PHY_READCOUNT_EQ_SKIP:
2355 index += (count == data) ? 2 : 1;
2356 break;
2357 case PHY_COMP_EQ_SKIPN:
2358 if (predata == data)
2359 index += regno;
2360 index++;
2361 break;
2362 case PHY_COMP_NEQ_SKIPN:
2363 if (predata != data)
2364 index += regno;
2365 index++;
2366 break;
2367 case PHY_WRITE_PREVIOUS:
2368 rtl_writephy(tp, regno, predata);
2369 index++;
2370 break;
2371 case PHY_SKIPN:
2372 index += regno + 1;
2373 break;
2374 case PHY_DELAY_MS:
2375 mdelay(data);
2376 index++;
2377 break;
2379 case PHY_READ_MAC_BYTE:
2380 case PHY_WRITE_MAC_BYTE:
2381 case PHY_WRITE_ERI_WORD:
2382 default:
2383 BUG();
2388 static void rtl_release_firmware(struct rtl8169_private *tp)
2390 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2391 release_firmware(tp->rtl_fw->fw);
2392 kfree(tp->rtl_fw);
2394 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2397 static void rtl_apply_firmware(struct rtl8169_private *tp)
2399 struct rtl_fw *rtl_fw = tp->rtl_fw;
2401 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2402 if (!IS_ERR_OR_NULL(rtl_fw))
2403 rtl_phy_write_fw(tp, rtl_fw);
2406 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2408 if (rtl_readphy(tp, reg) != val)
2409 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2410 else
2411 rtl_apply_firmware(tp);
2414 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2416 static const struct phy_reg phy_reg_init[] = {
2417 { 0x1f, 0x0001 },
2418 { 0x06, 0x006e },
2419 { 0x08, 0x0708 },
2420 { 0x15, 0x4000 },
2421 { 0x18, 0x65c7 },
2423 { 0x1f, 0x0001 },
2424 { 0x03, 0x00a1 },
2425 { 0x02, 0x0008 },
2426 { 0x01, 0x0120 },
2427 { 0x00, 0x1000 },
2428 { 0x04, 0x0800 },
2429 { 0x04, 0x0000 },
2431 { 0x03, 0xff41 },
2432 { 0x02, 0xdf60 },
2433 { 0x01, 0x0140 },
2434 { 0x00, 0x0077 },
2435 { 0x04, 0x7800 },
2436 { 0x04, 0x7000 },
2438 { 0x03, 0x802f },
2439 { 0x02, 0x4f02 },
2440 { 0x01, 0x0409 },
2441 { 0x00, 0xf0f9 },
2442 { 0x04, 0x9800 },
2443 { 0x04, 0x9000 },
2445 { 0x03, 0xdf01 },
2446 { 0x02, 0xdf20 },
2447 { 0x01, 0xff95 },
2448 { 0x00, 0xba00 },
2449 { 0x04, 0xa800 },
2450 { 0x04, 0xa000 },
2452 { 0x03, 0xff41 },
2453 { 0x02, 0xdf20 },
2454 { 0x01, 0x0140 },
2455 { 0x00, 0x00bb },
2456 { 0x04, 0xb800 },
2457 { 0x04, 0xb000 },
2459 { 0x03, 0xdf41 },
2460 { 0x02, 0xdc60 },
2461 { 0x01, 0x6340 },
2462 { 0x00, 0x007d },
2463 { 0x04, 0xd800 },
2464 { 0x04, 0xd000 },
2466 { 0x03, 0xdf01 },
2467 { 0x02, 0xdf20 },
2468 { 0x01, 0x100a },
2469 { 0x00, 0xa0ff },
2470 { 0x04, 0xf800 },
2471 { 0x04, 0xf000 },
2473 { 0x1f, 0x0000 },
2474 { 0x0b, 0x0000 },
2475 { 0x00, 0x9200 }
2478 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2481 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2483 static const struct phy_reg phy_reg_init[] = {
2484 { 0x1f, 0x0002 },
2485 { 0x01, 0x90d0 },
2486 { 0x1f, 0x0000 }
2489 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2492 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2494 struct pci_dev *pdev = tp->pci_dev;
2496 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2497 (pdev->subsystem_device != 0xe000))
2498 return;
2500 rtl_writephy(tp, 0x1f, 0x0001);
2501 rtl_writephy(tp, 0x10, 0xf01b);
2502 rtl_writephy(tp, 0x1f, 0x0000);
2505 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2507 static const struct phy_reg phy_reg_init[] = {
2508 { 0x1f, 0x0001 },
2509 { 0x04, 0x0000 },
2510 { 0x03, 0x00a1 },
2511 { 0x02, 0x0008 },
2512 { 0x01, 0x0120 },
2513 { 0x00, 0x1000 },
2514 { 0x04, 0x0800 },
2515 { 0x04, 0x9000 },
2516 { 0x03, 0x802f },
2517 { 0x02, 0x4f02 },
2518 { 0x01, 0x0409 },
2519 { 0x00, 0xf099 },
2520 { 0x04, 0x9800 },
2521 { 0x04, 0xa000 },
2522 { 0x03, 0xdf01 },
2523 { 0x02, 0xdf20 },
2524 { 0x01, 0xff95 },
2525 { 0x00, 0xba00 },
2526 { 0x04, 0xa800 },
2527 { 0x04, 0xf000 },
2528 { 0x03, 0xdf01 },
2529 { 0x02, 0xdf20 },
2530 { 0x01, 0x101a },
2531 { 0x00, 0xa0ff },
2532 { 0x04, 0xf800 },
2533 { 0x04, 0x0000 },
2534 { 0x1f, 0x0000 },
2536 { 0x1f, 0x0001 },
2537 { 0x10, 0xf41b },
2538 { 0x14, 0xfb54 },
2539 { 0x18, 0xf5c7 },
2540 { 0x1f, 0x0000 },
2542 { 0x1f, 0x0001 },
2543 { 0x17, 0x0cc0 },
2544 { 0x1f, 0x0000 }
2547 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2549 rtl8169scd_hw_phy_config_quirk(tp);
2552 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2554 static const struct phy_reg phy_reg_init[] = {
2555 { 0x1f, 0x0001 },
2556 { 0x04, 0x0000 },
2557 { 0x03, 0x00a1 },
2558 { 0x02, 0x0008 },
2559 { 0x01, 0x0120 },
2560 { 0x00, 0x1000 },
2561 { 0x04, 0x0800 },
2562 { 0x04, 0x9000 },
2563 { 0x03, 0x802f },
2564 { 0x02, 0x4f02 },
2565 { 0x01, 0x0409 },
2566 { 0x00, 0xf099 },
2567 { 0x04, 0x9800 },
2568 { 0x04, 0xa000 },
2569 { 0x03, 0xdf01 },
2570 { 0x02, 0xdf20 },
2571 { 0x01, 0xff95 },
2572 { 0x00, 0xba00 },
2573 { 0x04, 0xa800 },
2574 { 0x04, 0xf000 },
2575 { 0x03, 0xdf01 },
2576 { 0x02, 0xdf20 },
2577 { 0x01, 0x101a },
2578 { 0x00, 0xa0ff },
2579 { 0x04, 0xf800 },
2580 { 0x04, 0x0000 },
2581 { 0x1f, 0x0000 },
2583 { 0x1f, 0x0001 },
2584 { 0x0b, 0x8480 },
2585 { 0x1f, 0x0000 },
2587 { 0x1f, 0x0001 },
2588 { 0x18, 0x67c7 },
2589 { 0x04, 0x2000 },
2590 { 0x03, 0x002f },
2591 { 0x02, 0x4360 },
2592 { 0x01, 0x0109 },
2593 { 0x00, 0x3022 },
2594 { 0x04, 0x2800 },
2595 { 0x1f, 0x0000 },
2597 { 0x1f, 0x0001 },
2598 { 0x17, 0x0cc0 },
2599 { 0x1f, 0x0000 }
2602 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2605 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2607 static const struct phy_reg phy_reg_init[] = {
2608 { 0x10, 0xf41b },
2609 { 0x1f, 0x0000 }
2612 rtl_writephy(tp, 0x1f, 0x0001);
2613 rtl_patchphy(tp, 0x16, 1 << 0);
2615 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2618 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2620 static const struct phy_reg phy_reg_init[] = {
2621 { 0x1f, 0x0001 },
2622 { 0x10, 0xf41b },
2623 { 0x1f, 0x0000 }
2626 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2629 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2631 static const struct phy_reg phy_reg_init[] = {
2632 { 0x1f, 0x0000 },
2633 { 0x1d, 0x0f00 },
2634 { 0x1f, 0x0002 },
2635 { 0x0c, 0x1ec8 },
2636 { 0x1f, 0x0000 }
2639 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2642 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2644 static const struct phy_reg phy_reg_init[] = {
2645 { 0x1f, 0x0001 },
2646 { 0x1d, 0x3d98 },
2647 { 0x1f, 0x0000 }
2650 rtl_writephy(tp, 0x1f, 0x0000);
2651 rtl_patchphy(tp, 0x14, 1 << 5);
2652 rtl_patchphy(tp, 0x0d, 1 << 5);
2654 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2657 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2659 static const struct phy_reg phy_reg_init[] = {
2660 { 0x1f, 0x0001 },
2661 { 0x12, 0x2300 },
2662 { 0x1f, 0x0002 },
2663 { 0x00, 0x88d4 },
2664 { 0x01, 0x82b1 },
2665 { 0x03, 0x7002 },
2666 { 0x08, 0x9e30 },
2667 { 0x09, 0x01f0 },
2668 { 0x0a, 0x5500 },
2669 { 0x0c, 0x00c8 },
2670 { 0x1f, 0x0003 },
2671 { 0x12, 0xc096 },
2672 { 0x16, 0x000a },
2673 { 0x1f, 0x0000 },
2674 { 0x1f, 0x0000 },
2675 { 0x09, 0x2000 },
2676 { 0x09, 0x0000 }
2679 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2681 rtl_patchphy(tp, 0x14, 1 << 5);
2682 rtl_patchphy(tp, 0x0d, 1 << 5);
2683 rtl_writephy(tp, 0x1f, 0x0000);
2686 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2688 static const struct phy_reg phy_reg_init[] = {
2689 { 0x1f, 0x0001 },
2690 { 0x12, 0x2300 },
2691 { 0x03, 0x802f },
2692 { 0x02, 0x4f02 },
2693 { 0x01, 0x0409 },
2694 { 0x00, 0xf099 },
2695 { 0x04, 0x9800 },
2696 { 0x04, 0x9000 },
2697 { 0x1d, 0x3d98 },
2698 { 0x1f, 0x0002 },
2699 { 0x0c, 0x7eb8 },
2700 { 0x06, 0x0761 },
2701 { 0x1f, 0x0003 },
2702 { 0x16, 0x0f0a },
2703 { 0x1f, 0x0000 }
2706 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2708 rtl_patchphy(tp, 0x16, 1 << 0);
2709 rtl_patchphy(tp, 0x14, 1 << 5);
2710 rtl_patchphy(tp, 0x0d, 1 << 5);
2711 rtl_writephy(tp, 0x1f, 0x0000);
2714 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2716 static const struct phy_reg phy_reg_init[] = {
2717 { 0x1f, 0x0001 },
2718 { 0x12, 0x2300 },
2719 { 0x1d, 0x3d98 },
2720 { 0x1f, 0x0002 },
2721 { 0x0c, 0x7eb8 },
2722 { 0x06, 0x5461 },
2723 { 0x1f, 0x0003 },
2724 { 0x16, 0x0f0a },
2725 { 0x1f, 0x0000 }
2728 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2730 rtl_patchphy(tp, 0x16, 1 << 0);
2731 rtl_patchphy(tp, 0x14, 1 << 5);
2732 rtl_patchphy(tp, 0x0d, 1 << 5);
2733 rtl_writephy(tp, 0x1f, 0x0000);
2736 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2738 rtl8168c_3_hw_phy_config(tp);
2741 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2743 static const struct phy_reg phy_reg_init_0[] = {
2744 /* Channel Estimation */
2745 { 0x1f, 0x0001 },
2746 { 0x06, 0x4064 },
2747 { 0x07, 0x2863 },
2748 { 0x08, 0x059c },
2749 { 0x09, 0x26b4 },
2750 { 0x0a, 0x6a19 },
2751 { 0x0b, 0xdcc8 },
2752 { 0x10, 0xf06d },
2753 { 0x14, 0x7f68 },
2754 { 0x18, 0x7fd9 },
2755 { 0x1c, 0xf0ff },
2756 { 0x1d, 0x3d9c },
2757 { 0x1f, 0x0003 },
2758 { 0x12, 0xf49f },
2759 { 0x13, 0x070b },
2760 { 0x1a, 0x05ad },
2761 { 0x14, 0x94c0 },
2764 * Tx Error Issue
2765 * Enhance line driver power
2767 { 0x1f, 0x0002 },
2768 { 0x06, 0x5561 },
2769 { 0x1f, 0x0005 },
2770 { 0x05, 0x8332 },
2771 { 0x06, 0x5561 },
2774 * Can not link to 1Gbps with bad cable
2775 * Decrease SNR threshold form 21.07dB to 19.04dB
2777 { 0x1f, 0x0001 },
2778 { 0x17, 0x0cc0 },
2780 { 0x1f, 0x0000 },
2781 { 0x0d, 0xf880 }
2784 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2787 * Rx Error Issue
2788 * Fine Tune Switching regulator parameter
2790 rtl_writephy(tp, 0x1f, 0x0002);
2791 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2792 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2794 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2795 static const struct phy_reg phy_reg_init[] = {
2796 { 0x1f, 0x0002 },
2797 { 0x05, 0x669a },
2798 { 0x1f, 0x0005 },
2799 { 0x05, 0x8330 },
2800 { 0x06, 0x669a },
2801 { 0x1f, 0x0002 }
2803 int val;
2805 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2807 val = rtl_readphy(tp, 0x0d);
2809 if ((val & 0x00ff) != 0x006c) {
2810 static const u32 set[] = {
2811 0x0065, 0x0066, 0x0067, 0x0068,
2812 0x0069, 0x006a, 0x006b, 0x006c
2814 int i;
2816 rtl_writephy(tp, 0x1f, 0x0002);
2818 val &= 0xff00;
2819 for (i = 0; i < ARRAY_SIZE(set); i++)
2820 rtl_writephy(tp, 0x0d, val | set[i]);
2822 } else {
2823 static const struct phy_reg phy_reg_init[] = {
2824 { 0x1f, 0x0002 },
2825 { 0x05, 0x6662 },
2826 { 0x1f, 0x0005 },
2827 { 0x05, 0x8330 },
2828 { 0x06, 0x6662 }
2831 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2834 /* RSET couple improve */
2835 rtl_writephy(tp, 0x1f, 0x0002);
2836 rtl_patchphy(tp, 0x0d, 0x0300);
2837 rtl_patchphy(tp, 0x0f, 0x0010);
2839 /* Fine tune PLL performance */
2840 rtl_writephy(tp, 0x1f, 0x0002);
2841 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2842 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2844 rtl_writephy(tp, 0x1f, 0x0005);
2845 rtl_writephy(tp, 0x05, 0x001b);
2847 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2849 rtl_writephy(tp, 0x1f, 0x0000);
2852 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2854 static const struct phy_reg phy_reg_init_0[] = {
2855 /* Channel Estimation */
2856 { 0x1f, 0x0001 },
2857 { 0x06, 0x4064 },
2858 { 0x07, 0x2863 },
2859 { 0x08, 0x059c },
2860 { 0x09, 0x26b4 },
2861 { 0x0a, 0x6a19 },
2862 { 0x0b, 0xdcc8 },
2863 { 0x10, 0xf06d },
2864 { 0x14, 0x7f68 },
2865 { 0x18, 0x7fd9 },
2866 { 0x1c, 0xf0ff },
2867 { 0x1d, 0x3d9c },
2868 { 0x1f, 0x0003 },
2869 { 0x12, 0xf49f },
2870 { 0x13, 0x070b },
2871 { 0x1a, 0x05ad },
2872 { 0x14, 0x94c0 },
2875 * Tx Error Issue
2876 * Enhance line driver power
2878 { 0x1f, 0x0002 },
2879 { 0x06, 0x5561 },
2880 { 0x1f, 0x0005 },
2881 { 0x05, 0x8332 },
2882 { 0x06, 0x5561 },
2885 * Can not link to 1Gbps with bad cable
2886 * Decrease SNR threshold form 21.07dB to 19.04dB
2888 { 0x1f, 0x0001 },
2889 { 0x17, 0x0cc0 },
2891 { 0x1f, 0x0000 },
2892 { 0x0d, 0xf880 }
2895 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2897 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2898 static const struct phy_reg phy_reg_init[] = {
2899 { 0x1f, 0x0002 },
2900 { 0x05, 0x669a },
2901 { 0x1f, 0x0005 },
2902 { 0x05, 0x8330 },
2903 { 0x06, 0x669a },
2905 { 0x1f, 0x0002 }
2907 int val;
2909 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2911 val = rtl_readphy(tp, 0x0d);
2912 if ((val & 0x00ff) != 0x006c) {
2913 static const u32 set[] = {
2914 0x0065, 0x0066, 0x0067, 0x0068,
2915 0x0069, 0x006a, 0x006b, 0x006c
2917 int i;
2919 rtl_writephy(tp, 0x1f, 0x0002);
2921 val &= 0xff00;
2922 for (i = 0; i < ARRAY_SIZE(set); i++)
2923 rtl_writephy(tp, 0x0d, val | set[i]);
2925 } else {
2926 static const struct phy_reg phy_reg_init[] = {
2927 { 0x1f, 0x0002 },
2928 { 0x05, 0x2642 },
2929 { 0x1f, 0x0005 },
2930 { 0x05, 0x8330 },
2931 { 0x06, 0x2642 }
2934 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2937 /* Fine tune PLL performance */
2938 rtl_writephy(tp, 0x1f, 0x0002);
2939 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2940 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2942 /* Switching regulator Slew rate */
2943 rtl_writephy(tp, 0x1f, 0x0002);
2944 rtl_patchphy(tp, 0x0f, 0x0017);
2946 rtl_writephy(tp, 0x1f, 0x0005);
2947 rtl_writephy(tp, 0x05, 0x001b);
2949 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2951 rtl_writephy(tp, 0x1f, 0x0000);
2954 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2956 static const struct phy_reg phy_reg_init[] = {
2957 { 0x1f, 0x0002 },
2958 { 0x10, 0x0008 },
2959 { 0x0d, 0x006c },
2961 { 0x1f, 0x0000 },
2962 { 0x0d, 0xf880 },
2964 { 0x1f, 0x0001 },
2965 { 0x17, 0x0cc0 },
2967 { 0x1f, 0x0001 },
2968 { 0x0b, 0xa4d8 },
2969 { 0x09, 0x281c },
2970 { 0x07, 0x2883 },
2971 { 0x0a, 0x6b35 },
2972 { 0x1d, 0x3da4 },
2973 { 0x1c, 0xeffd },
2974 { 0x14, 0x7f52 },
2975 { 0x18, 0x7fc6 },
2976 { 0x08, 0x0601 },
2977 { 0x06, 0x4063 },
2978 { 0x10, 0xf074 },
2979 { 0x1f, 0x0003 },
2980 { 0x13, 0x0789 },
2981 { 0x12, 0xf4bd },
2982 { 0x1a, 0x04fd },
2983 { 0x14, 0x84b0 },
2984 { 0x1f, 0x0000 },
2985 { 0x00, 0x9200 },
2987 { 0x1f, 0x0005 },
2988 { 0x01, 0x0340 },
2989 { 0x1f, 0x0001 },
2990 { 0x04, 0x4000 },
2991 { 0x03, 0x1d21 },
2992 { 0x02, 0x0c32 },
2993 { 0x01, 0x0200 },
2994 { 0x00, 0x5554 },
2995 { 0x04, 0x4800 },
2996 { 0x04, 0x4000 },
2997 { 0x04, 0xf000 },
2998 { 0x03, 0xdf01 },
2999 { 0x02, 0xdf20 },
3000 { 0x01, 0x101a },
3001 { 0x00, 0xa0ff },
3002 { 0x04, 0xf800 },
3003 { 0x04, 0xf000 },
3004 { 0x1f, 0x0000 },
3006 { 0x1f, 0x0007 },
3007 { 0x1e, 0x0023 },
3008 { 0x16, 0x0000 },
3009 { 0x1f, 0x0000 }
3012 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3015 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3017 static const struct phy_reg phy_reg_init[] = {
3018 { 0x1f, 0x0001 },
3019 { 0x17, 0x0cc0 },
3021 { 0x1f, 0x0007 },
3022 { 0x1e, 0x002d },
3023 { 0x18, 0x0040 },
3024 { 0x1f, 0x0000 }
3027 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3028 rtl_patchphy(tp, 0x0d, 1 << 5);
3031 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3033 static const struct phy_reg phy_reg_init[] = {
3034 /* Enable Delay cap */
3035 { 0x1f, 0x0005 },
3036 { 0x05, 0x8b80 },
3037 { 0x06, 0xc896 },
3038 { 0x1f, 0x0000 },
3040 /* Channel estimation fine tune */
3041 { 0x1f, 0x0001 },
3042 { 0x0b, 0x6c20 },
3043 { 0x07, 0x2872 },
3044 { 0x1c, 0xefff },
3045 { 0x1f, 0x0003 },
3046 { 0x14, 0x6420 },
3047 { 0x1f, 0x0000 },
3049 /* Update PFM & 10M TX idle timer */
3050 { 0x1f, 0x0007 },
3051 { 0x1e, 0x002f },
3052 { 0x15, 0x1919 },
3053 { 0x1f, 0x0000 },
3055 { 0x1f, 0x0007 },
3056 { 0x1e, 0x00ac },
3057 { 0x18, 0x0006 },
3058 { 0x1f, 0x0000 }
3061 rtl_apply_firmware(tp);
3063 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3065 /* DCO enable for 10M IDLE Power */
3066 rtl_writephy(tp, 0x1f, 0x0007);
3067 rtl_writephy(tp, 0x1e, 0x0023);
3068 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3069 rtl_writephy(tp, 0x1f, 0x0000);
3071 /* For impedance matching */
3072 rtl_writephy(tp, 0x1f, 0x0002);
3073 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3074 rtl_writephy(tp, 0x1f, 0x0000);
3076 /* PHY auto speed down */
3077 rtl_writephy(tp, 0x1f, 0x0007);
3078 rtl_writephy(tp, 0x1e, 0x002d);
3079 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3080 rtl_writephy(tp, 0x1f, 0x0000);
3081 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3083 rtl_writephy(tp, 0x1f, 0x0005);
3084 rtl_writephy(tp, 0x05, 0x8b86);
3085 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3086 rtl_writephy(tp, 0x1f, 0x0000);
3088 rtl_writephy(tp, 0x1f, 0x0005);
3089 rtl_writephy(tp, 0x05, 0x8b85);
3090 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3091 rtl_writephy(tp, 0x1f, 0x0007);
3092 rtl_writephy(tp, 0x1e, 0x0020);
3093 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3094 rtl_writephy(tp, 0x1f, 0x0006);
3095 rtl_writephy(tp, 0x00, 0x5a00);
3096 rtl_writephy(tp, 0x1f, 0x0000);
3097 rtl_writephy(tp, 0x0d, 0x0007);
3098 rtl_writephy(tp, 0x0e, 0x003c);
3099 rtl_writephy(tp, 0x0d, 0x4007);
3100 rtl_writephy(tp, 0x0e, 0x0000);
3101 rtl_writephy(tp, 0x0d, 0x0000);
3104 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3106 static const struct phy_reg phy_reg_init[] = {
3107 /* Enable Delay cap */
3108 { 0x1f, 0x0004 },
3109 { 0x1f, 0x0007 },
3110 { 0x1e, 0x00ac },
3111 { 0x18, 0x0006 },
3112 { 0x1f, 0x0002 },
3113 { 0x1f, 0x0000 },
3114 { 0x1f, 0x0000 },
3116 /* Channel estimation fine tune */
3117 { 0x1f, 0x0003 },
3118 { 0x09, 0xa20f },
3119 { 0x1f, 0x0000 },
3120 { 0x1f, 0x0000 },
3122 /* Green Setting */
3123 { 0x1f, 0x0005 },
3124 { 0x05, 0x8b5b },
3125 { 0x06, 0x9222 },
3126 { 0x05, 0x8b6d },
3127 { 0x06, 0x8000 },
3128 { 0x05, 0x8b76 },
3129 { 0x06, 0x8000 },
3130 { 0x1f, 0x0000 }
3133 rtl_apply_firmware(tp);
3135 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3137 /* For 4-corner performance improve */
3138 rtl_writephy(tp, 0x1f, 0x0005);
3139 rtl_writephy(tp, 0x05, 0x8b80);
3140 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3141 rtl_writephy(tp, 0x1f, 0x0000);
3143 /* PHY auto speed down */
3144 rtl_writephy(tp, 0x1f, 0x0004);
3145 rtl_writephy(tp, 0x1f, 0x0007);
3146 rtl_writephy(tp, 0x1e, 0x002d);
3147 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3148 rtl_writephy(tp, 0x1f, 0x0002);
3149 rtl_writephy(tp, 0x1f, 0x0000);
3150 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3152 /* improve 10M EEE waveform */
3153 rtl_writephy(tp, 0x1f, 0x0005);
3154 rtl_writephy(tp, 0x05, 0x8b86);
3155 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3156 rtl_writephy(tp, 0x1f, 0x0000);
3158 /* Improve 2-pair detection performance */
3159 rtl_writephy(tp, 0x1f, 0x0005);
3160 rtl_writephy(tp, 0x05, 0x8b85);
3161 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3162 rtl_writephy(tp, 0x1f, 0x0000);
3164 /* EEE setting */
3165 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3166 rtl_writephy(tp, 0x1f, 0x0005);
3167 rtl_writephy(tp, 0x05, 0x8b85);
3168 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3169 rtl_writephy(tp, 0x1f, 0x0004);
3170 rtl_writephy(tp, 0x1f, 0x0007);
3171 rtl_writephy(tp, 0x1e, 0x0020);
3172 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3173 rtl_writephy(tp, 0x1f, 0x0002);
3174 rtl_writephy(tp, 0x1f, 0x0000);
3175 rtl_writephy(tp, 0x0d, 0x0007);
3176 rtl_writephy(tp, 0x0e, 0x003c);
3177 rtl_writephy(tp, 0x0d, 0x4007);
3178 rtl_writephy(tp, 0x0e, 0x0000);
3179 rtl_writephy(tp, 0x0d, 0x0000);
3181 /* Green feature */
3182 rtl_writephy(tp, 0x1f, 0x0003);
3183 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3184 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3185 rtl_writephy(tp, 0x1f, 0x0000);
3188 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3190 /* For 4-corner performance improve */
3191 rtl_writephy(tp, 0x1f, 0x0005);
3192 rtl_writephy(tp, 0x05, 0x8b80);
3193 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3194 rtl_writephy(tp, 0x1f, 0x0000);
3196 /* PHY auto speed down */
3197 rtl_writephy(tp, 0x1f, 0x0007);
3198 rtl_writephy(tp, 0x1e, 0x002d);
3199 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3200 rtl_writephy(tp, 0x1f, 0x0000);
3201 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3203 /* Improve 10M EEE waveform */
3204 rtl_writephy(tp, 0x1f, 0x0005);
3205 rtl_writephy(tp, 0x05, 0x8b86);
3206 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3207 rtl_writephy(tp, 0x1f, 0x0000);
3210 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3212 static const struct phy_reg phy_reg_init[] = {
3213 /* Channel estimation fine tune */
3214 { 0x1f, 0x0003 },
3215 { 0x09, 0xa20f },
3216 { 0x1f, 0x0000 },
3218 /* Modify green table for giga & fnet */
3219 { 0x1f, 0x0005 },
3220 { 0x05, 0x8b55 },
3221 { 0x06, 0x0000 },
3222 { 0x05, 0x8b5e },
3223 { 0x06, 0x0000 },
3224 { 0x05, 0x8b67 },
3225 { 0x06, 0x0000 },
3226 { 0x05, 0x8b70 },
3227 { 0x06, 0x0000 },
3228 { 0x1f, 0x0000 },
3229 { 0x1f, 0x0007 },
3230 { 0x1e, 0x0078 },
3231 { 0x17, 0x0000 },
3232 { 0x19, 0x00fb },
3233 { 0x1f, 0x0000 },
3235 /* Modify green table for 10M */
3236 { 0x1f, 0x0005 },
3237 { 0x05, 0x8b79 },
3238 { 0x06, 0xaa00 },
3239 { 0x1f, 0x0000 },
3241 /* Disable hiimpedance detection (RTCT) */
3242 { 0x1f, 0x0003 },
3243 { 0x01, 0x328a },
3244 { 0x1f, 0x0000 }
3247 rtl_apply_firmware(tp);
3249 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3251 rtl8168f_hw_phy_config(tp);
3253 /* Improve 2-pair detection performance */
3254 rtl_writephy(tp, 0x1f, 0x0005);
3255 rtl_writephy(tp, 0x05, 0x8b85);
3256 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3257 rtl_writephy(tp, 0x1f, 0x0000);
3260 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3262 rtl_apply_firmware(tp);
3264 rtl8168f_hw_phy_config(tp);
3267 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3269 static const struct phy_reg phy_reg_init[] = {
3270 /* Channel estimation fine tune */
3271 { 0x1f, 0x0003 },
3272 { 0x09, 0xa20f },
3273 { 0x1f, 0x0000 },
3275 /* Modify green table for giga & fnet */
3276 { 0x1f, 0x0005 },
3277 { 0x05, 0x8b55 },
3278 { 0x06, 0x0000 },
3279 { 0x05, 0x8b5e },
3280 { 0x06, 0x0000 },
3281 { 0x05, 0x8b67 },
3282 { 0x06, 0x0000 },
3283 { 0x05, 0x8b70 },
3284 { 0x06, 0x0000 },
3285 { 0x1f, 0x0000 },
3286 { 0x1f, 0x0007 },
3287 { 0x1e, 0x0078 },
3288 { 0x17, 0x0000 },
3289 { 0x19, 0x00aa },
3290 { 0x1f, 0x0000 },
3292 /* Modify green table for 10M */
3293 { 0x1f, 0x0005 },
3294 { 0x05, 0x8b79 },
3295 { 0x06, 0xaa00 },
3296 { 0x1f, 0x0000 },
3298 /* Disable hiimpedance detection (RTCT) */
3299 { 0x1f, 0x0003 },
3300 { 0x01, 0x328a },
3301 { 0x1f, 0x0000 }
3305 rtl_apply_firmware(tp);
3307 rtl8168f_hw_phy_config(tp);
3309 /* Improve 2-pair detection performance */
3310 rtl_writephy(tp, 0x1f, 0x0005);
3311 rtl_writephy(tp, 0x05, 0x8b85);
3312 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3313 rtl_writephy(tp, 0x1f, 0x0000);
3315 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3317 /* Modify green table for giga */
3318 rtl_writephy(tp, 0x1f, 0x0005);
3319 rtl_writephy(tp, 0x05, 0x8b54);
3320 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3321 rtl_writephy(tp, 0x05, 0x8b5d);
3322 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3323 rtl_writephy(tp, 0x05, 0x8a7c);
3324 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3325 rtl_writephy(tp, 0x05, 0x8a7f);
3326 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3327 rtl_writephy(tp, 0x05, 0x8a82);
3328 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3329 rtl_writephy(tp, 0x05, 0x8a85);
3330 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3331 rtl_writephy(tp, 0x05, 0x8a88);
3332 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3333 rtl_writephy(tp, 0x1f, 0x0000);
3335 /* uc same-seed solution */
3336 rtl_writephy(tp, 0x1f, 0x0005);
3337 rtl_writephy(tp, 0x05, 0x8b85);
3338 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3339 rtl_writephy(tp, 0x1f, 0x0000);
3341 /* eee setting */
3342 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3343 rtl_writephy(tp, 0x1f, 0x0005);
3344 rtl_writephy(tp, 0x05, 0x8b85);
3345 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3346 rtl_writephy(tp, 0x1f, 0x0004);
3347 rtl_writephy(tp, 0x1f, 0x0007);
3348 rtl_writephy(tp, 0x1e, 0x0020);
3349 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3350 rtl_writephy(tp, 0x1f, 0x0000);
3351 rtl_writephy(tp, 0x0d, 0x0007);
3352 rtl_writephy(tp, 0x0e, 0x003c);
3353 rtl_writephy(tp, 0x0d, 0x4007);
3354 rtl_writephy(tp, 0x0e, 0x0000);
3355 rtl_writephy(tp, 0x0d, 0x0000);
3357 /* Green feature */
3358 rtl_writephy(tp, 0x1f, 0x0003);
3359 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3360 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3361 rtl_writephy(tp, 0x1f, 0x0000);
3364 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3366 static const u16 mac_ocp_patch[] = {
3367 0xe008, 0xe01b, 0xe01d, 0xe01f,
3368 0xe021, 0xe023, 0xe025, 0xe027,
3369 0x49d2, 0xf10d, 0x766c, 0x49e2,
3370 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3372 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3373 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3374 0xbe00, 0xb416, 0x0076, 0xe86c,
3375 0xc602, 0xbe00, 0x0000, 0xc602,
3377 0xbe00, 0x0000, 0xc602, 0xbe00,
3378 0x0000, 0xc602, 0xbe00, 0x0000,
3379 0xc602, 0xbe00, 0x0000, 0xc602,
3380 0xbe00, 0x0000, 0xc602, 0xbe00,
3382 0x0000, 0x0000, 0x0000, 0x0000
3384 u32 i;
3386 /* Patch code for GPHY reset */
3387 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3388 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3389 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3390 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3392 rtl_apply_firmware(tp);
3394 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3395 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3396 else
3397 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3399 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3400 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3401 else
3402 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3404 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3405 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3407 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3408 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3410 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3413 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3415 static const struct phy_reg phy_reg_init[] = {
3416 { 0x1f, 0x0003 },
3417 { 0x08, 0x441d },
3418 { 0x01, 0x9100 },
3419 { 0x1f, 0x0000 }
3422 rtl_writephy(tp, 0x1f, 0x0000);
3423 rtl_patchphy(tp, 0x11, 1 << 12);
3424 rtl_patchphy(tp, 0x19, 1 << 13);
3425 rtl_patchphy(tp, 0x10, 1 << 15);
3427 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3430 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3432 static const struct phy_reg phy_reg_init[] = {
3433 { 0x1f, 0x0005 },
3434 { 0x1a, 0x0000 },
3435 { 0x1f, 0x0000 },
3437 { 0x1f, 0x0004 },
3438 { 0x1c, 0x0000 },
3439 { 0x1f, 0x0000 },
3441 { 0x1f, 0x0001 },
3442 { 0x15, 0x7701 },
3443 { 0x1f, 0x0000 }
3446 /* Disable ALDPS before ram code */
3447 rtl_writephy(tp, 0x1f, 0x0000);
3448 rtl_writephy(tp, 0x18, 0x0310);
3449 msleep(100);
3451 rtl_apply_firmware(tp);
3453 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3456 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3458 /* Disable ALDPS before setting firmware */
3459 rtl_writephy(tp, 0x1f, 0x0000);
3460 rtl_writephy(tp, 0x18, 0x0310);
3461 msleep(20);
3463 rtl_apply_firmware(tp);
3465 /* EEE setting */
3466 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3467 rtl_writephy(tp, 0x1f, 0x0004);
3468 rtl_writephy(tp, 0x10, 0x401f);
3469 rtl_writephy(tp, 0x19, 0x7030);
3470 rtl_writephy(tp, 0x1f, 0x0000);
3473 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3475 static const struct phy_reg phy_reg_init[] = {
3476 { 0x1f, 0x0004 },
3477 { 0x10, 0xc07f },
3478 { 0x19, 0x7030 },
3479 { 0x1f, 0x0000 }
3482 /* Disable ALDPS before ram code */
3483 rtl_writephy(tp, 0x1f, 0x0000);
3484 rtl_writephy(tp, 0x18, 0x0310);
3485 msleep(100);
3487 rtl_apply_firmware(tp);
3489 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3490 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3492 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3495 static void rtl_hw_phy_config(struct net_device *dev)
3497 struct rtl8169_private *tp = netdev_priv(dev);
3499 rtl8169_print_mac_version(tp);
3501 switch (tp->mac_version) {
3502 case RTL_GIGA_MAC_VER_01:
3503 break;
3504 case RTL_GIGA_MAC_VER_02:
3505 case RTL_GIGA_MAC_VER_03:
3506 rtl8169s_hw_phy_config(tp);
3507 break;
3508 case RTL_GIGA_MAC_VER_04:
3509 rtl8169sb_hw_phy_config(tp);
3510 break;
3511 case RTL_GIGA_MAC_VER_05:
3512 rtl8169scd_hw_phy_config(tp);
3513 break;
3514 case RTL_GIGA_MAC_VER_06:
3515 rtl8169sce_hw_phy_config(tp);
3516 break;
3517 case RTL_GIGA_MAC_VER_07:
3518 case RTL_GIGA_MAC_VER_08:
3519 case RTL_GIGA_MAC_VER_09:
3520 rtl8102e_hw_phy_config(tp);
3521 break;
3522 case RTL_GIGA_MAC_VER_11:
3523 rtl8168bb_hw_phy_config(tp);
3524 break;
3525 case RTL_GIGA_MAC_VER_12:
3526 rtl8168bef_hw_phy_config(tp);
3527 break;
3528 case RTL_GIGA_MAC_VER_17:
3529 rtl8168bef_hw_phy_config(tp);
3530 break;
3531 case RTL_GIGA_MAC_VER_18:
3532 rtl8168cp_1_hw_phy_config(tp);
3533 break;
3534 case RTL_GIGA_MAC_VER_19:
3535 rtl8168c_1_hw_phy_config(tp);
3536 break;
3537 case RTL_GIGA_MAC_VER_20:
3538 rtl8168c_2_hw_phy_config(tp);
3539 break;
3540 case RTL_GIGA_MAC_VER_21:
3541 rtl8168c_3_hw_phy_config(tp);
3542 break;
3543 case RTL_GIGA_MAC_VER_22:
3544 rtl8168c_4_hw_phy_config(tp);
3545 break;
3546 case RTL_GIGA_MAC_VER_23:
3547 case RTL_GIGA_MAC_VER_24:
3548 rtl8168cp_2_hw_phy_config(tp);
3549 break;
3550 case RTL_GIGA_MAC_VER_25:
3551 rtl8168d_1_hw_phy_config(tp);
3552 break;
3553 case RTL_GIGA_MAC_VER_26:
3554 rtl8168d_2_hw_phy_config(tp);
3555 break;
3556 case RTL_GIGA_MAC_VER_27:
3557 rtl8168d_3_hw_phy_config(tp);
3558 break;
3559 case RTL_GIGA_MAC_VER_28:
3560 rtl8168d_4_hw_phy_config(tp);
3561 break;
3562 case RTL_GIGA_MAC_VER_29:
3563 case RTL_GIGA_MAC_VER_30:
3564 rtl8105e_hw_phy_config(tp);
3565 break;
3566 case RTL_GIGA_MAC_VER_31:
3567 /* None. */
3568 break;
3569 case RTL_GIGA_MAC_VER_32:
3570 case RTL_GIGA_MAC_VER_33:
3571 rtl8168e_1_hw_phy_config(tp);
3572 break;
3573 case RTL_GIGA_MAC_VER_34:
3574 rtl8168e_2_hw_phy_config(tp);
3575 break;
3576 case RTL_GIGA_MAC_VER_35:
3577 rtl8168f_1_hw_phy_config(tp);
3578 break;
3579 case RTL_GIGA_MAC_VER_36:
3580 rtl8168f_2_hw_phy_config(tp);
3581 break;
3583 case RTL_GIGA_MAC_VER_37:
3584 rtl8402_hw_phy_config(tp);
3585 break;
3587 case RTL_GIGA_MAC_VER_38:
3588 rtl8411_hw_phy_config(tp);
3589 break;
3591 case RTL_GIGA_MAC_VER_39:
3592 rtl8106e_hw_phy_config(tp);
3593 break;
3595 case RTL_GIGA_MAC_VER_40:
3596 rtl8168g_1_hw_phy_config(tp);
3597 break;
3599 case RTL_GIGA_MAC_VER_41:
3600 default:
3601 break;
3605 static void rtl_phy_work(struct rtl8169_private *tp)
3607 struct timer_list *timer = &tp->timer;
3608 void __iomem *ioaddr = tp->mmio_addr;
3609 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3611 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3613 if (tp->phy_reset_pending(tp)) {
3615 * A busy loop could burn quite a few cycles on nowadays CPU.
3616 * Let's delay the execution of the timer for a few ticks.
3618 timeout = HZ/10;
3619 goto out_mod_timer;
3622 if (tp->link_ok(ioaddr))
3623 return;
3625 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3627 tp->phy_reset_enable(tp);
3629 out_mod_timer:
3630 mod_timer(timer, jiffies + timeout);
3633 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3635 if (!test_and_set_bit(flag, tp->wk.flags))
3636 schedule_work(&tp->wk.work);
3639 static void rtl8169_phy_timer(unsigned long __opaque)
3641 struct net_device *dev = (struct net_device *)__opaque;
3642 struct rtl8169_private *tp = netdev_priv(dev);
3644 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3647 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3648 void __iomem *ioaddr)
3650 iounmap(ioaddr);
3651 pci_release_regions(pdev);
3652 pci_clear_mwi(pdev);
3653 pci_disable_device(pdev);
3654 free_netdev(dev);
3657 DECLARE_RTL_COND(rtl_phy_reset_cond)
3659 return tp->phy_reset_pending(tp);
3662 static void rtl8169_phy_reset(struct net_device *dev,
3663 struct rtl8169_private *tp)
3665 tp->phy_reset_enable(tp);
3666 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3669 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3671 void __iomem *ioaddr = tp->mmio_addr;
3673 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3674 (RTL_R8(PHYstatus) & TBI_Enable);
3677 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3679 void __iomem *ioaddr = tp->mmio_addr;
3681 rtl_hw_phy_config(dev);
3683 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3684 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3685 RTL_W8(0x82, 0x01);
3688 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3690 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3691 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3693 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3694 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3695 RTL_W8(0x82, 0x01);
3696 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3697 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3700 rtl8169_phy_reset(dev, tp);
3702 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3703 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3704 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3705 (tp->mii.supports_gmii ?
3706 ADVERTISED_1000baseT_Half |
3707 ADVERTISED_1000baseT_Full : 0));
3709 if (rtl_tbi_enabled(tp))
3710 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3713 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3715 void __iomem *ioaddr = tp->mmio_addr;
3716 u32 high;
3717 u32 low;
3719 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3720 high = addr[4] | (addr[5] << 8);
3722 rtl_lock_work(tp);
3724 RTL_W8(Cfg9346, Cfg9346_Unlock);
3726 RTL_W32(MAC4, high);
3727 RTL_R32(MAC4);
3729 RTL_W32(MAC0, low);
3730 RTL_R32(MAC0);
3732 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3733 const struct exgmac_reg e[] = {
3734 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3735 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3736 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3737 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3738 low >> 16 },
3741 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3744 RTL_W8(Cfg9346, Cfg9346_Lock);
3746 rtl_unlock_work(tp);
3749 static int rtl_set_mac_address(struct net_device *dev, void *p)
3751 struct rtl8169_private *tp = netdev_priv(dev);
3752 struct sockaddr *addr = p;
3754 if (!is_valid_ether_addr(addr->sa_data))
3755 return -EADDRNOTAVAIL;
3757 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3759 rtl_rar_set(tp, dev->dev_addr);
3761 return 0;
3764 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3766 struct rtl8169_private *tp = netdev_priv(dev);
3767 struct mii_ioctl_data *data = if_mii(ifr);
3769 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3772 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3773 struct mii_ioctl_data *data, int cmd)
3775 switch (cmd) {
3776 case SIOCGMIIPHY:
3777 data->phy_id = 32; /* Internal PHY */
3778 return 0;
3780 case SIOCGMIIREG:
3781 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3782 return 0;
3784 case SIOCSMIIREG:
3785 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3786 return 0;
3788 return -EOPNOTSUPP;
3791 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3793 return -EOPNOTSUPP;
3796 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3798 if (tp->features & RTL_FEATURE_MSI) {
3799 pci_disable_msi(pdev);
3800 tp->features &= ~RTL_FEATURE_MSI;
3804 static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
3806 struct mdio_ops *ops = &tp->mdio_ops;
3808 switch (tp->mac_version) {
3809 case RTL_GIGA_MAC_VER_27:
3810 ops->write = r8168dp_1_mdio_write;
3811 ops->read = r8168dp_1_mdio_read;
3812 break;
3813 case RTL_GIGA_MAC_VER_28:
3814 case RTL_GIGA_MAC_VER_31:
3815 ops->write = r8168dp_2_mdio_write;
3816 ops->read = r8168dp_2_mdio_read;
3817 break;
3818 case RTL_GIGA_MAC_VER_40:
3819 case RTL_GIGA_MAC_VER_41:
3820 ops->write = r8168g_mdio_write;
3821 ops->read = r8168g_mdio_read;
3822 break;
3823 default:
3824 ops->write = r8169_mdio_write;
3825 ops->read = r8169_mdio_read;
3826 break;
3830 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3832 void __iomem *ioaddr = tp->mmio_addr;
3834 switch (tp->mac_version) {
3835 case RTL_GIGA_MAC_VER_29:
3836 case RTL_GIGA_MAC_VER_30:
3837 case RTL_GIGA_MAC_VER_32:
3838 case RTL_GIGA_MAC_VER_33:
3839 case RTL_GIGA_MAC_VER_34:
3840 case RTL_GIGA_MAC_VER_37:
3841 case RTL_GIGA_MAC_VER_38:
3842 case RTL_GIGA_MAC_VER_39:
3843 case RTL_GIGA_MAC_VER_40:
3844 case RTL_GIGA_MAC_VER_41:
3845 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3846 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3847 break;
3848 default:
3849 break;
3853 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3855 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3856 return false;
3858 rtl_writephy(tp, 0x1f, 0x0000);
3859 rtl_writephy(tp, MII_BMCR, 0x0000);
3861 rtl_wol_suspend_quirk(tp);
3863 return true;
3866 static void r810x_phy_power_down(struct rtl8169_private *tp)
3868 rtl_writephy(tp, 0x1f, 0x0000);
3869 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3872 static void r810x_phy_power_up(struct rtl8169_private *tp)
3874 rtl_writephy(tp, 0x1f, 0x0000);
3875 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3878 static void r810x_pll_power_down(struct rtl8169_private *tp)
3880 void __iomem *ioaddr = tp->mmio_addr;
3882 if (rtl_wol_pll_power_down(tp))
3883 return;
3885 r810x_phy_power_down(tp);
3887 switch (tp->mac_version) {
3888 case RTL_GIGA_MAC_VER_07:
3889 case RTL_GIGA_MAC_VER_08:
3890 case RTL_GIGA_MAC_VER_09:
3891 case RTL_GIGA_MAC_VER_10:
3892 case RTL_GIGA_MAC_VER_13:
3893 case RTL_GIGA_MAC_VER_16:
3894 break;
3895 default:
3896 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3897 break;
3901 static void r810x_pll_power_up(struct rtl8169_private *tp)
3903 void __iomem *ioaddr = tp->mmio_addr;
3905 r810x_phy_power_up(tp);
3907 switch (tp->mac_version) {
3908 case RTL_GIGA_MAC_VER_07:
3909 case RTL_GIGA_MAC_VER_08:
3910 case RTL_GIGA_MAC_VER_09:
3911 case RTL_GIGA_MAC_VER_10:
3912 case RTL_GIGA_MAC_VER_13:
3913 case RTL_GIGA_MAC_VER_16:
3914 break;
3915 default:
3916 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3917 break;
3921 static void r8168_phy_power_up(struct rtl8169_private *tp)
3923 rtl_writephy(tp, 0x1f, 0x0000);
3924 switch (tp->mac_version) {
3925 case RTL_GIGA_MAC_VER_11:
3926 case RTL_GIGA_MAC_VER_12:
3927 case RTL_GIGA_MAC_VER_17:
3928 case RTL_GIGA_MAC_VER_18:
3929 case RTL_GIGA_MAC_VER_19:
3930 case RTL_GIGA_MAC_VER_20:
3931 case RTL_GIGA_MAC_VER_21:
3932 case RTL_GIGA_MAC_VER_22:
3933 case RTL_GIGA_MAC_VER_23:
3934 case RTL_GIGA_MAC_VER_24:
3935 case RTL_GIGA_MAC_VER_25:
3936 case RTL_GIGA_MAC_VER_26:
3937 case RTL_GIGA_MAC_VER_27:
3938 case RTL_GIGA_MAC_VER_28:
3939 case RTL_GIGA_MAC_VER_31:
3940 rtl_writephy(tp, 0x0e, 0x0000);
3941 break;
3942 default:
3943 break;
3945 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3948 static void r8168_phy_power_down(struct rtl8169_private *tp)
3950 rtl_writephy(tp, 0x1f, 0x0000);
3951 switch (tp->mac_version) {
3952 case RTL_GIGA_MAC_VER_32:
3953 case RTL_GIGA_MAC_VER_33:
3954 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3955 break;
3957 case RTL_GIGA_MAC_VER_11:
3958 case RTL_GIGA_MAC_VER_12:
3959 case RTL_GIGA_MAC_VER_17:
3960 case RTL_GIGA_MAC_VER_18:
3961 case RTL_GIGA_MAC_VER_19:
3962 case RTL_GIGA_MAC_VER_20:
3963 case RTL_GIGA_MAC_VER_21:
3964 case RTL_GIGA_MAC_VER_22:
3965 case RTL_GIGA_MAC_VER_23:
3966 case RTL_GIGA_MAC_VER_24:
3967 case RTL_GIGA_MAC_VER_25:
3968 case RTL_GIGA_MAC_VER_26:
3969 case RTL_GIGA_MAC_VER_27:
3970 case RTL_GIGA_MAC_VER_28:
3971 case RTL_GIGA_MAC_VER_31:
3972 rtl_writephy(tp, 0x0e, 0x0200);
3973 default:
3974 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3975 break;
3979 static void r8168_pll_power_down(struct rtl8169_private *tp)
3981 void __iomem *ioaddr = tp->mmio_addr;
3983 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3984 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3985 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3986 r8168dp_check_dash(tp)) {
3987 return;
3990 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3991 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3992 (RTL_R16(CPlusCmd) & ASF)) {
3993 return;
3996 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3997 tp->mac_version == RTL_GIGA_MAC_VER_33)
3998 rtl_ephy_write(tp, 0x19, 0xff64);
4000 if (rtl_wol_pll_power_down(tp))
4001 return;
4003 r8168_phy_power_down(tp);
4005 switch (tp->mac_version) {
4006 case RTL_GIGA_MAC_VER_25:
4007 case RTL_GIGA_MAC_VER_26:
4008 case RTL_GIGA_MAC_VER_27:
4009 case RTL_GIGA_MAC_VER_28:
4010 case RTL_GIGA_MAC_VER_31:
4011 case RTL_GIGA_MAC_VER_32:
4012 case RTL_GIGA_MAC_VER_33:
4013 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4014 break;
4018 static void r8168_pll_power_up(struct rtl8169_private *tp)
4020 void __iomem *ioaddr = tp->mmio_addr;
4022 switch (tp->mac_version) {
4023 case RTL_GIGA_MAC_VER_25:
4024 case RTL_GIGA_MAC_VER_26:
4025 case RTL_GIGA_MAC_VER_27:
4026 case RTL_GIGA_MAC_VER_28:
4027 case RTL_GIGA_MAC_VER_31:
4028 case RTL_GIGA_MAC_VER_32:
4029 case RTL_GIGA_MAC_VER_33:
4030 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4031 break;
4034 r8168_phy_power_up(tp);
4037 static void rtl_generic_op(struct rtl8169_private *tp,
4038 void (*op)(struct rtl8169_private *))
4040 if (op)
4041 op(tp);
4044 static void rtl_pll_power_down(struct rtl8169_private *tp)
4046 rtl_generic_op(tp, tp->pll_power_ops.down);
4049 static void rtl_pll_power_up(struct rtl8169_private *tp)
4051 rtl_generic_op(tp, tp->pll_power_ops.up);
4054 static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
4056 struct pll_power_ops *ops = &tp->pll_power_ops;
4058 switch (tp->mac_version) {
4059 case RTL_GIGA_MAC_VER_07:
4060 case RTL_GIGA_MAC_VER_08:
4061 case RTL_GIGA_MAC_VER_09:
4062 case RTL_GIGA_MAC_VER_10:
4063 case RTL_GIGA_MAC_VER_16:
4064 case RTL_GIGA_MAC_VER_29:
4065 case RTL_GIGA_MAC_VER_30:
4066 case RTL_GIGA_MAC_VER_37:
4067 case RTL_GIGA_MAC_VER_39:
4068 ops->down = r810x_pll_power_down;
4069 ops->up = r810x_pll_power_up;
4070 break;
4072 case RTL_GIGA_MAC_VER_11:
4073 case RTL_GIGA_MAC_VER_12:
4074 case RTL_GIGA_MAC_VER_17:
4075 case RTL_GIGA_MAC_VER_18:
4076 case RTL_GIGA_MAC_VER_19:
4077 case RTL_GIGA_MAC_VER_20:
4078 case RTL_GIGA_MAC_VER_21:
4079 case RTL_GIGA_MAC_VER_22:
4080 case RTL_GIGA_MAC_VER_23:
4081 case RTL_GIGA_MAC_VER_24:
4082 case RTL_GIGA_MAC_VER_25:
4083 case RTL_GIGA_MAC_VER_26:
4084 case RTL_GIGA_MAC_VER_27:
4085 case RTL_GIGA_MAC_VER_28:
4086 case RTL_GIGA_MAC_VER_31:
4087 case RTL_GIGA_MAC_VER_32:
4088 case RTL_GIGA_MAC_VER_33:
4089 case RTL_GIGA_MAC_VER_34:
4090 case RTL_GIGA_MAC_VER_35:
4091 case RTL_GIGA_MAC_VER_36:
4092 case RTL_GIGA_MAC_VER_38:
4093 case RTL_GIGA_MAC_VER_40:
4094 case RTL_GIGA_MAC_VER_41:
4095 ops->down = r8168_pll_power_down;
4096 ops->up = r8168_pll_power_up;
4097 break;
4099 default:
4100 ops->down = NULL;
4101 ops->up = NULL;
4102 break;
4106 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4108 void __iomem *ioaddr = tp->mmio_addr;
4110 switch (tp->mac_version) {
4111 case RTL_GIGA_MAC_VER_01:
4112 case RTL_GIGA_MAC_VER_02:
4113 case RTL_GIGA_MAC_VER_03:
4114 case RTL_GIGA_MAC_VER_04:
4115 case RTL_GIGA_MAC_VER_05:
4116 case RTL_GIGA_MAC_VER_06:
4117 case RTL_GIGA_MAC_VER_10:
4118 case RTL_GIGA_MAC_VER_11:
4119 case RTL_GIGA_MAC_VER_12:
4120 case RTL_GIGA_MAC_VER_13:
4121 case RTL_GIGA_MAC_VER_14:
4122 case RTL_GIGA_MAC_VER_15:
4123 case RTL_GIGA_MAC_VER_16:
4124 case RTL_GIGA_MAC_VER_17:
4125 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4126 break;
4127 case RTL_GIGA_MAC_VER_18:
4128 case RTL_GIGA_MAC_VER_19:
4129 case RTL_GIGA_MAC_VER_20:
4130 case RTL_GIGA_MAC_VER_21:
4131 case RTL_GIGA_MAC_VER_22:
4132 case RTL_GIGA_MAC_VER_23:
4133 case RTL_GIGA_MAC_VER_24:
4134 case RTL_GIGA_MAC_VER_34:
4135 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4136 break;
4137 default:
4138 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4139 break;
4143 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4145 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
4148 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4150 void __iomem *ioaddr = tp->mmio_addr;
4152 RTL_W8(Cfg9346, Cfg9346_Unlock);
4153 rtl_generic_op(tp, tp->jumbo_ops.enable);
4154 RTL_W8(Cfg9346, Cfg9346_Lock);
4157 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4159 void __iomem *ioaddr = tp->mmio_addr;
4161 RTL_W8(Cfg9346, Cfg9346_Unlock);
4162 rtl_generic_op(tp, tp->jumbo_ops.disable);
4163 RTL_W8(Cfg9346, Cfg9346_Lock);
4166 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4168 void __iomem *ioaddr = tp->mmio_addr;
4170 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4171 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4172 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4175 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4177 void __iomem *ioaddr = tp->mmio_addr;
4179 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4180 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4181 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4184 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4186 void __iomem *ioaddr = tp->mmio_addr;
4188 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4191 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4193 void __iomem *ioaddr = tp->mmio_addr;
4195 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4198 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4200 void __iomem *ioaddr = tp->mmio_addr;
4202 RTL_W8(MaxTxPacketSize, 0x3f);
4203 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4204 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4205 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4208 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4210 void __iomem *ioaddr = tp->mmio_addr;
4212 RTL_W8(MaxTxPacketSize, 0x0c);
4213 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4214 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4215 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4218 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4220 rtl_tx_performance_tweak(tp->pci_dev,
4221 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4224 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4226 rtl_tx_performance_tweak(tp->pci_dev,
4227 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4230 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4232 void __iomem *ioaddr = tp->mmio_addr;
4234 r8168b_0_hw_jumbo_enable(tp);
4236 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4239 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4241 void __iomem *ioaddr = tp->mmio_addr;
4243 r8168b_0_hw_jumbo_disable(tp);
4245 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4248 static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
4250 struct jumbo_ops *ops = &tp->jumbo_ops;
4252 switch (tp->mac_version) {
4253 case RTL_GIGA_MAC_VER_11:
4254 ops->disable = r8168b_0_hw_jumbo_disable;
4255 ops->enable = r8168b_0_hw_jumbo_enable;
4256 break;
4257 case RTL_GIGA_MAC_VER_12:
4258 case RTL_GIGA_MAC_VER_17:
4259 ops->disable = r8168b_1_hw_jumbo_disable;
4260 ops->enable = r8168b_1_hw_jumbo_enable;
4261 break;
4262 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4263 case RTL_GIGA_MAC_VER_19:
4264 case RTL_GIGA_MAC_VER_20:
4265 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4266 case RTL_GIGA_MAC_VER_22:
4267 case RTL_GIGA_MAC_VER_23:
4268 case RTL_GIGA_MAC_VER_24:
4269 case RTL_GIGA_MAC_VER_25:
4270 case RTL_GIGA_MAC_VER_26:
4271 ops->disable = r8168c_hw_jumbo_disable;
4272 ops->enable = r8168c_hw_jumbo_enable;
4273 break;
4274 case RTL_GIGA_MAC_VER_27:
4275 case RTL_GIGA_MAC_VER_28:
4276 ops->disable = r8168dp_hw_jumbo_disable;
4277 ops->enable = r8168dp_hw_jumbo_enable;
4278 break;
4279 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4280 case RTL_GIGA_MAC_VER_32:
4281 case RTL_GIGA_MAC_VER_33:
4282 case RTL_GIGA_MAC_VER_34:
4283 ops->disable = r8168e_hw_jumbo_disable;
4284 ops->enable = r8168e_hw_jumbo_enable;
4285 break;
4288 * No action needed for jumbo frames with 8169.
4289 * No jumbo for 810x at all.
4291 case RTL_GIGA_MAC_VER_40:
4292 case RTL_GIGA_MAC_VER_41:
4293 default:
4294 ops->disable = NULL;
4295 ops->enable = NULL;
4296 break;
4300 DECLARE_RTL_COND(rtl_chipcmd_cond)
4302 void __iomem *ioaddr = tp->mmio_addr;
4304 return RTL_R8(ChipCmd) & CmdReset;
4307 static void rtl_hw_reset(struct rtl8169_private *tp)
4309 void __iomem *ioaddr = tp->mmio_addr;
4311 RTL_W8(ChipCmd, CmdReset);
4313 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4316 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4318 struct rtl_fw *rtl_fw;
4319 const char *name;
4320 int rc = -ENOMEM;
4322 name = rtl_lookup_firmware_name(tp);
4323 if (!name)
4324 goto out_no_firmware;
4326 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4327 if (!rtl_fw)
4328 goto err_warn;
4330 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4331 if (rc < 0)
4332 goto err_free;
4334 rc = rtl_check_firmware(tp, rtl_fw);
4335 if (rc < 0)
4336 goto err_release_firmware;
4338 tp->rtl_fw = rtl_fw;
4339 out:
4340 return;
4342 err_release_firmware:
4343 release_firmware(rtl_fw->fw);
4344 err_free:
4345 kfree(rtl_fw);
4346 err_warn:
4347 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4348 name, rc);
4349 out_no_firmware:
4350 tp->rtl_fw = NULL;
4351 goto out;
4354 static void rtl_request_firmware(struct rtl8169_private *tp)
4356 if (IS_ERR(tp->rtl_fw))
4357 rtl_request_uncached_firmware(tp);
4360 static void rtl_rx_close(struct rtl8169_private *tp)
4362 void __iomem *ioaddr = tp->mmio_addr;
4364 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4367 DECLARE_RTL_COND(rtl_npq_cond)
4369 void __iomem *ioaddr = tp->mmio_addr;
4371 return RTL_R8(TxPoll) & NPQ;
4374 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4376 void __iomem *ioaddr = tp->mmio_addr;
4378 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4381 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4383 void __iomem *ioaddr = tp->mmio_addr;
4385 /* Disable interrupts */
4386 rtl8169_irq_mask_and_ack(tp);
4388 rtl_rx_close(tp);
4390 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4391 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4392 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4393 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4394 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4395 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4396 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4397 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4398 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4399 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4400 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4401 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4402 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4403 } else {
4404 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4405 udelay(100);
4408 rtl_hw_reset(tp);
4411 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4413 void __iomem *ioaddr = tp->mmio_addr;
4415 /* Set DMA burst size and Interframe Gap Time */
4416 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4417 (InterFrameGap << TxInterFrameGapShift));
4420 static void rtl_hw_start(struct net_device *dev)
4422 struct rtl8169_private *tp = netdev_priv(dev);
4424 tp->hw_start(dev);
4426 rtl_irq_enable_all(tp);
4429 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4430 void __iomem *ioaddr)
4433 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4434 * register to be written before TxDescAddrLow to work.
4435 * Switching from MMIO to I/O access fixes the issue as well.
4437 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4438 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4439 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4440 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4443 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4445 u16 cmd;
4447 cmd = RTL_R16(CPlusCmd);
4448 RTL_W16(CPlusCmd, cmd);
4449 return cmd;
4452 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4454 /* Low hurts. Let's disable the filtering. */
4455 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4458 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4460 static const struct rtl_cfg2_info {
4461 u32 mac_version;
4462 u32 clk;
4463 u32 val;
4464 } cfg2_info [] = {
4465 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4466 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4467 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4468 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4470 const struct rtl_cfg2_info *p = cfg2_info;
4471 unsigned int i;
4472 u32 clk;
4474 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4475 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4476 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4477 RTL_W32(0x7c, p->val);
4478 break;
4483 static void rtl_set_rx_mode(struct net_device *dev)
4485 struct rtl8169_private *tp = netdev_priv(dev);
4486 void __iomem *ioaddr = tp->mmio_addr;
4487 u32 mc_filter[2]; /* Multicast hash filter */
4488 int rx_mode;
4489 u32 tmp = 0;
4491 if (dev->flags & IFF_PROMISC) {
4492 /* Unconditionally log net taps. */
4493 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4494 rx_mode =
4495 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4496 AcceptAllPhys;
4497 mc_filter[1] = mc_filter[0] = 0xffffffff;
4498 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4499 (dev->flags & IFF_ALLMULTI)) {
4500 /* Too many to filter perfectly -- accept all multicasts. */
4501 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4502 mc_filter[1] = mc_filter[0] = 0xffffffff;
4503 } else {
4504 struct netdev_hw_addr *ha;
4506 rx_mode = AcceptBroadcast | AcceptMyPhys;
4507 mc_filter[1] = mc_filter[0] = 0;
4508 netdev_for_each_mc_addr(ha, dev) {
4509 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4510 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4511 rx_mode |= AcceptMulticast;
4515 if (dev->features & NETIF_F_RXALL)
4516 rx_mode |= (AcceptErr | AcceptRunt);
4518 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4520 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4521 u32 data = mc_filter[0];
4523 mc_filter[0] = swab32(mc_filter[1]);
4524 mc_filter[1] = swab32(data);
4527 RTL_W32(MAR0 + 4, mc_filter[1]);
4528 RTL_W32(MAR0 + 0, mc_filter[0]);
4530 RTL_W32(RxConfig, tmp);
4533 static void rtl_hw_start_8169(struct net_device *dev)
4535 struct rtl8169_private *tp = netdev_priv(dev);
4536 void __iomem *ioaddr = tp->mmio_addr;
4537 struct pci_dev *pdev = tp->pci_dev;
4539 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4540 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4541 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4544 RTL_W8(Cfg9346, Cfg9346_Unlock);
4545 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4546 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4547 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4548 tp->mac_version == RTL_GIGA_MAC_VER_04)
4549 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4551 rtl_init_rxcfg(tp);
4553 RTL_W8(EarlyTxThres, NoEarlyTx);
4555 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4557 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4558 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4559 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4560 tp->mac_version == RTL_GIGA_MAC_VER_04)
4561 rtl_set_rx_tx_config_registers(tp);
4563 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4565 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4566 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4567 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4568 "Bit-3 and bit-14 MUST be 1\n");
4569 tp->cp_cmd |= (1 << 14);
4572 RTL_W16(CPlusCmd, tp->cp_cmd);
4574 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4577 * Undocumented corner. Supposedly:
4578 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4580 RTL_W16(IntrMitigate, 0x0000);
4582 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4584 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4585 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4586 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4587 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4588 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4589 rtl_set_rx_tx_config_registers(tp);
4592 RTL_W8(Cfg9346, Cfg9346_Lock);
4594 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4595 RTL_R8(IntrMask);
4597 RTL_W32(RxMissed, 0);
4599 rtl_set_rx_mode(dev);
4601 /* no early-rx interrupts */
4602 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4605 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4607 if (tp->csi_ops.write)
4608 tp->csi_ops.write(tp, addr, value);
4611 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4613 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4616 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4618 u32 csi;
4620 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4621 rtl_csi_write(tp, 0x070c, csi | bits);
4624 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4626 rtl_csi_access_enable(tp, 0x17000000);
4629 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4631 rtl_csi_access_enable(tp, 0x27000000);
4634 DECLARE_RTL_COND(rtl_csiar_cond)
4636 void __iomem *ioaddr = tp->mmio_addr;
4638 return RTL_R32(CSIAR) & CSIAR_FLAG;
4641 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4643 void __iomem *ioaddr = tp->mmio_addr;
4645 RTL_W32(CSIDR, value);
4646 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4647 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4649 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4652 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4654 void __iomem *ioaddr = tp->mmio_addr;
4656 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4657 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4659 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4660 RTL_R32(CSIDR) : ~0;
4663 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4665 void __iomem *ioaddr = tp->mmio_addr;
4667 RTL_W32(CSIDR, value);
4668 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4669 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4670 CSIAR_FUNC_NIC);
4672 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4675 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4677 void __iomem *ioaddr = tp->mmio_addr;
4679 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4680 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4682 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4683 RTL_R32(CSIDR) : ~0;
4686 static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
4688 struct csi_ops *ops = &tp->csi_ops;
4690 switch (tp->mac_version) {
4691 case RTL_GIGA_MAC_VER_01:
4692 case RTL_GIGA_MAC_VER_02:
4693 case RTL_GIGA_MAC_VER_03:
4694 case RTL_GIGA_MAC_VER_04:
4695 case RTL_GIGA_MAC_VER_05:
4696 case RTL_GIGA_MAC_VER_06:
4697 case RTL_GIGA_MAC_VER_10:
4698 case RTL_GIGA_MAC_VER_11:
4699 case RTL_GIGA_MAC_VER_12:
4700 case RTL_GIGA_MAC_VER_13:
4701 case RTL_GIGA_MAC_VER_14:
4702 case RTL_GIGA_MAC_VER_15:
4703 case RTL_GIGA_MAC_VER_16:
4704 case RTL_GIGA_MAC_VER_17:
4705 ops->write = NULL;
4706 ops->read = NULL;
4707 break;
4709 case RTL_GIGA_MAC_VER_37:
4710 case RTL_GIGA_MAC_VER_38:
4711 ops->write = r8402_csi_write;
4712 ops->read = r8402_csi_read;
4713 break;
4715 default:
4716 ops->write = r8169_csi_write;
4717 ops->read = r8169_csi_read;
4718 break;
4722 struct ephy_info {
4723 unsigned int offset;
4724 u16 mask;
4725 u16 bits;
4728 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4729 int len)
4731 u16 w;
4733 while (len-- > 0) {
4734 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4735 rtl_ephy_write(tp, e->offset, w);
4736 e++;
4740 static void rtl_disable_clock_request(struct pci_dev *pdev)
4742 int cap = pci_pcie_cap(pdev);
4744 if (cap) {
4745 u16 ctl;
4747 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4748 ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
4749 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4753 static void rtl_enable_clock_request(struct pci_dev *pdev)
4755 int cap = pci_pcie_cap(pdev);
4757 if (cap) {
4758 u16 ctl;
4760 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
4761 ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
4762 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
4766 #define R8168_CPCMD_QUIRK_MASK (\
4767 EnableBist | \
4768 Mac_dbgo_oe | \
4769 Force_half_dup | \
4770 Force_rxflow_en | \
4771 Force_txflow_en | \
4772 Cxpl_dbg_sel | \
4773 ASF | \
4774 PktCntrDisable | \
4775 Mac_dbgo_sel)
4777 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4779 void __iomem *ioaddr = tp->mmio_addr;
4780 struct pci_dev *pdev = tp->pci_dev;
4782 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4784 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4786 rtl_tx_performance_tweak(pdev,
4787 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4790 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4792 void __iomem *ioaddr = tp->mmio_addr;
4794 rtl_hw_start_8168bb(tp);
4796 RTL_W8(MaxTxPacketSize, TxPacketMax);
4798 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4801 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4803 void __iomem *ioaddr = tp->mmio_addr;
4804 struct pci_dev *pdev = tp->pci_dev;
4806 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4808 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4810 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4812 rtl_disable_clock_request(pdev);
4814 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4817 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4819 static const struct ephy_info e_info_8168cp[] = {
4820 { 0x01, 0, 0x0001 },
4821 { 0x02, 0x0800, 0x1000 },
4822 { 0x03, 0, 0x0042 },
4823 { 0x06, 0x0080, 0x0000 },
4824 { 0x07, 0, 0x2000 }
4827 rtl_csi_access_enable_2(tp);
4829 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4831 __rtl_hw_start_8168cp(tp);
4834 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4836 void __iomem *ioaddr = tp->mmio_addr;
4837 struct pci_dev *pdev = tp->pci_dev;
4839 rtl_csi_access_enable_2(tp);
4841 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4843 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4845 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4848 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4850 void __iomem *ioaddr = tp->mmio_addr;
4851 struct pci_dev *pdev = tp->pci_dev;
4853 rtl_csi_access_enable_2(tp);
4855 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4857 /* Magic. */
4858 RTL_W8(DBG_REG, 0x20);
4860 RTL_W8(MaxTxPacketSize, TxPacketMax);
4862 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4864 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4867 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4869 void __iomem *ioaddr = tp->mmio_addr;
4870 static const struct ephy_info e_info_8168c_1[] = {
4871 { 0x02, 0x0800, 0x1000 },
4872 { 0x03, 0, 0x0002 },
4873 { 0x06, 0x0080, 0x0000 }
4876 rtl_csi_access_enable_2(tp);
4878 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4880 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4882 __rtl_hw_start_8168cp(tp);
4885 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4887 static const struct ephy_info e_info_8168c_2[] = {
4888 { 0x01, 0, 0x0001 },
4889 { 0x03, 0x0400, 0x0220 }
4892 rtl_csi_access_enable_2(tp);
4894 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4896 __rtl_hw_start_8168cp(tp);
4899 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4901 rtl_hw_start_8168c_2(tp);
4904 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4906 rtl_csi_access_enable_2(tp);
4908 __rtl_hw_start_8168cp(tp);
4911 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4913 void __iomem *ioaddr = tp->mmio_addr;
4914 struct pci_dev *pdev = tp->pci_dev;
4916 rtl_csi_access_enable_2(tp);
4918 rtl_disable_clock_request(pdev);
4920 RTL_W8(MaxTxPacketSize, TxPacketMax);
4922 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4924 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4927 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4929 void __iomem *ioaddr = tp->mmio_addr;
4930 struct pci_dev *pdev = tp->pci_dev;
4932 rtl_csi_access_enable_1(tp);
4934 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4936 RTL_W8(MaxTxPacketSize, TxPacketMax);
4938 rtl_disable_clock_request(pdev);
4941 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4943 void __iomem *ioaddr = tp->mmio_addr;
4944 struct pci_dev *pdev = tp->pci_dev;
4945 static const struct ephy_info e_info_8168d_4[] = {
4946 { 0x0b, ~0, 0x48 },
4947 { 0x19, 0x20, 0x50 },
4948 { 0x0c, ~0, 0x20 }
4950 int i;
4952 rtl_csi_access_enable_1(tp);
4954 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4956 RTL_W8(MaxTxPacketSize, TxPacketMax);
4958 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4959 const struct ephy_info *e = e_info_8168d_4 + i;
4960 u16 w;
4962 w = rtl_ephy_read(tp, e->offset);
4963 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4966 rtl_enable_clock_request(pdev);
4969 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4971 void __iomem *ioaddr = tp->mmio_addr;
4972 struct pci_dev *pdev = tp->pci_dev;
4973 static const struct ephy_info e_info_8168e_1[] = {
4974 { 0x00, 0x0200, 0x0100 },
4975 { 0x00, 0x0000, 0x0004 },
4976 { 0x06, 0x0002, 0x0001 },
4977 { 0x06, 0x0000, 0x0030 },
4978 { 0x07, 0x0000, 0x2000 },
4979 { 0x00, 0x0000, 0x0020 },
4980 { 0x03, 0x5800, 0x2000 },
4981 { 0x03, 0x0000, 0x0001 },
4982 { 0x01, 0x0800, 0x1000 },
4983 { 0x07, 0x0000, 0x4000 },
4984 { 0x1e, 0x0000, 0x2000 },
4985 { 0x19, 0xffff, 0xfe6c },
4986 { 0x0a, 0x0000, 0x0040 }
4989 rtl_csi_access_enable_2(tp);
4991 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4993 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4995 RTL_W8(MaxTxPacketSize, TxPacketMax);
4997 rtl_disable_clock_request(pdev);
4999 /* Reset tx FIFO pointer */
5000 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5001 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5003 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5006 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5008 void __iomem *ioaddr = tp->mmio_addr;
5009 struct pci_dev *pdev = tp->pci_dev;
5010 static const struct ephy_info e_info_8168e_2[] = {
5011 { 0x09, 0x0000, 0x0080 },
5012 { 0x19, 0x0000, 0x0224 }
5015 rtl_csi_access_enable_1(tp);
5017 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5019 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5021 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5022 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5023 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5024 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5025 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5026 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5027 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5028 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5030 RTL_W8(MaxTxPacketSize, EarlySize);
5032 rtl_disable_clock_request(pdev);
5034 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5035 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5037 /* Adjust EEE LED frequency */
5038 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5040 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5041 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5042 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5045 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5047 void __iomem *ioaddr = tp->mmio_addr;
5048 struct pci_dev *pdev = tp->pci_dev;
5050 rtl_csi_access_enable_2(tp);
5052 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5054 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5055 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5056 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5057 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5058 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5059 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5060 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5061 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5062 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5063 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5065 RTL_W8(MaxTxPacketSize, EarlySize);
5067 rtl_disable_clock_request(pdev);
5069 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5070 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5071 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5072 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5073 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5076 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5078 void __iomem *ioaddr = tp->mmio_addr;
5079 static const struct ephy_info e_info_8168f_1[] = {
5080 { 0x06, 0x00c0, 0x0020 },
5081 { 0x08, 0x0001, 0x0002 },
5082 { 0x09, 0x0000, 0x0080 },
5083 { 0x19, 0x0000, 0x0224 }
5086 rtl_hw_start_8168f(tp);
5088 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5090 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5092 /* Adjust EEE LED frequency */
5093 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5096 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5098 static const struct ephy_info e_info_8168f_1[] = {
5099 { 0x06, 0x00c0, 0x0020 },
5100 { 0x0f, 0xffff, 0x5200 },
5101 { 0x1e, 0x0000, 0x4000 },
5102 { 0x19, 0x0000, 0x0224 }
5105 rtl_hw_start_8168f(tp);
5107 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5109 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5112 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5114 void __iomem *ioaddr = tp->mmio_addr;
5115 struct pci_dev *pdev = tp->pci_dev;
5117 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5118 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5119 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5120 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5122 rtl_csi_access_enable_1(tp);
5124 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5126 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5127 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5129 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5130 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5131 RTL_W8(MaxTxPacketSize, EarlySize);
5133 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5134 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5136 /* Adjust EEE LED frequency */
5137 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5139 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5142 static void rtl_hw_start_8168(struct net_device *dev)
5144 struct rtl8169_private *tp = netdev_priv(dev);
5145 void __iomem *ioaddr = tp->mmio_addr;
5147 RTL_W8(Cfg9346, Cfg9346_Unlock);
5149 RTL_W8(MaxTxPacketSize, TxPacketMax);
5151 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5153 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5155 RTL_W16(CPlusCmd, tp->cp_cmd);
5157 RTL_W16(IntrMitigate, 0x5151);
5159 /* Work around for RxFIFO overflow. */
5160 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5161 tp->event_slow |= RxFIFOOver | PCSTimeout;
5162 tp->event_slow &= ~RxOverflow;
5165 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5167 rtl_set_rx_mode(dev);
5169 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5170 (InterFrameGap << TxInterFrameGapShift));
5172 RTL_R8(IntrMask);
5174 switch (tp->mac_version) {
5175 case RTL_GIGA_MAC_VER_11:
5176 rtl_hw_start_8168bb(tp);
5177 break;
5179 case RTL_GIGA_MAC_VER_12:
5180 case RTL_GIGA_MAC_VER_17:
5181 rtl_hw_start_8168bef(tp);
5182 break;
5184 case RTL_GIGA_MAC_VER_18:
5185 rtl_hw_start_8168cp_1(tp);
5186 break;
5188 case RTL_GIGA_MAC_VER_19:
5189 rtl_hw_start_8168c_1(tp);
5190 break;
5192 case RTL_GIGA_MAC_VER_20:
5193 rtl_hw_start_8168c_2(tp);
5194 break;
5196 case RTL_GIGA_MAC_VER_21:
5197 rtl_hw_start_8168c_3(tp);
5198 break;
5200 case RTL_GIGA_MAC_VER_22:
5201 rtl_hw_start_8168c_4(tp);
5202 break;
5204 case RTL_GIGA_MAC_VER_23:
5205 rtl_hw_start_8168cp_2(tp);
5206 break;
5208 case RTL_GIGA_MAC_VER_24:
5209 rtl_hw_start_8168cp_3(tp);
5210 break;
5212 case RTL_GIGA_MAC_VER_25:
5213 case RTL_GIGA_MAC_VER_26:
5214 case RTL_GIGA_MAC_VER_27:
5215 rtl_hw_start_8168d(tp);
5216 break;
5218 case RTL_GIGA_MAC_VER_28:
5219 rtl_hw_start_8168d_4(tp);
5220 break;
5222 case RTL_GIGA_MAC_VER_31:
5223 rtl_hw_start_8168dp(tp);
5224 break;
5226 case RTL_GIGA_MAC_VER_32:
5227 case RTL_GIGA_MAC_VER_33:
5228 rtl_hw_start_8168e_1(tp);
5229 break;
5230 case RTL_GIGA_MAC_VER_34:
5231 rtl_hw_start_8168e_2(tp);
5232 break;
5234 case RTL_GIGA_MAC_VER_35:
5235 case RTL_GIGA_MAC_VER_36:
5236 rtl_hw_start_8168f_1(tp);
5237 break;
5239 case RTL_GIGA_MAC_VER_38:
5240 rtl_hw_start_8411(tp);
5241 break;
5243 case RTL_GIGA_MAC_VER_40:
5244 case RTL_GIGA_MAC_VER_41:
5245 rtl_hw_start_8168g_1(tp);
5246 break;
5248 default:
5249 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5250 dev->name, tp->mac_version);
5251 break;
5254 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5256 RTL_W8(Cfg9346, Cfg9346_Lock);
5258 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5261 #define R810X_CPCMD_QUIRK_MASK (\
5262 EnableBist | \
5263 Mac_dbgo_oe | \
5264 Force_half_dup | \
5265 Force_rxflow_en | \
5266 Force_txflow_en | \
5267 Cxpl_dbg_sel | \
5268 ASF | \
5269 PktCntrDisable | \
5270 Mac_dbgo_sel)
5272 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5274 void __iomem *ioaddr = tp->mmio_addr;
5275 struct pci_dev *pdev = tp->pci_dev;
5276 static const struct ephy_info e_info_8102e_1[] = {
5277 { 0x01, 0, 0x6e65 },
5278 { 0x02, 0, 0x091f },
5279 { 0x03, 0, 0xc2f9 },
5280 { 0x06, 0, 0xafb5 },
5281 { 0x07, 0, 0x0e00 },
5282 { 0x19, 0, 0xec80 },
5283 { 0x01, 0, 0x2e65 },
5284 { 0x01, 0, 0x6e65 }
5286 u8 cfg1;
5288 rtl_csi_access_enable_2(tp);
5290 RTL_W8(DBG_REG, FIX_NAK_1);
5292 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5294 RTL_W8(Config1,
5295 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5296 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5298 cfg1 = RTL_R8(Config1);
5299 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5300 RTL_W8(Config1, cfg1 & ~LEDS0);
5302 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5305 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5307 void __iomem *ioaddr = tp->mmio_addr;
5308 struct pci_dev *pdev = tp->pci_dev;
5310 rtl_csi_access_enable_2(tp);
5312 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5314 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5315 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5318 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5320 rtl_hw_start_8102e_2(tp);
5322 rtl_ephy_write(tp, 0x03, 0xc2f9);
5325 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5327 void __iomem *ioaddr = tp->mmio_addr;
5328 static const struct ephy_info e_info_8105e_1[] = {
5329 { 0x07, 0, 0x4000 },
5330 { 0x19, 0, 0x0200 },
5331 { 0x19, 0, 0x0020 },
5332 { 0x1e, 0, 0x2000 },
5333 { 0x03, 0, 0x0001 },
5334 { 0x19, 0, 0x0100 },
5335 { 0x19, 0, 0x0004 },
5336 { 0x0a, 0, 0x0020 }
5339 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5340 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5342 /* Disable Early Tally Counter */
5343 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5345 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5346 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5348 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5351 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5353 rtl_hw_start_8105e_1(tp);
5354 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5357 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5359 void __iomem *ioaddr = tp->mmio_addr;
5360 static const struct ephy_info e_info_8402[] = {
5361 { 0x19, 0xffff, 0xff64 },
5362 { 0x1e, 0, 0x4000 }
5365 rtl_csi_access_enable_2(tp);
5367 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5368 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5370 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5371 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5373 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5375 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5377 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5378 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5379 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5380 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5381 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5382 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5383 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5386 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5388 void __iomem *ioaddr = tp->mmio_addr;
5390 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5391 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5393 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5394 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5395 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5398 static void rtl_hw_start_8101(struct net_device *dev)
5400 struct rtl8169_private *tp = netdev_priv(dev);
5401 void __iomem *ioaddr = tp->mmio_addr;
5402 struct pci_dev *pdev = tp->pci_dev;
5404 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5405 tp->event_slow &= ~RxFIFOOver;
5407 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5408 tp->mac_version == RTL_GIGA_MAC_VER_16) {
5409 int cap = pci_pcie_cap(pdev);
5411 if (cap) {
5412 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
5413 PCI_EXP_DEVCTL_NOSNOOP_EN);
5417 RTL_W8(Cfg9346, Cfg9346_Unlock);
5419 switch (tp->mac_version) {
5420 case RTL_GIGA_MAC_VER_07:
5421 rtl_hw_start_8102e_1(tp);
5422 break;
5424 case RTL_GIGA_MAC_VER_08:
5425 rtl_hw_start_8102e_3(tp);
5426 break;
5428 case RTL_GIGA_MAC_VER_09:
5429 rtl_hw_start_8102e_2(tp);
5430 break;
5432 case RTL_GIGA_MAC_VER_29:
5433 rtl_hw_start_8105e_1(tp);
5434 break;
5435 case RTL_GIGA_MAC_VER_30:
5436 rtl_hw_start_8105e_2(tp);
5437 break;
5439 case RTL_GIGA_MAC_VER_37:
5440 rtl_hw_start_8402(tp);
5441 break;
5443 case RTL_GIGA_MAC_VER_39:
5444 rtl_hw_start_8106(tp);
5445 break;
5448 RTL_W8(Cfg9346, Cfg9346_Lock);
5450 RTL_W8(MaxTxPacketSize, TxPacketMax);
5452 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5454 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5455 RTL_W16(CPlusCmd, tp->cp_cmd);
5457 RTL_W16(IntrMitigate, 0x0000);
5459 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5461 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5462 rtl_set_rx_tx_config_registers(tp);
5464 RTL_R8(IntrMask);
5466 rtl_set_rx_mode(dev);
5468 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5471 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5473 struct rtl8169_private *tp = netdev_priv(dev);
5475 if (new_mtu < ETH_ZLEN ||
5476 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5477 return -EINVAL;
5479 if (new_mtu > ETH_DATA_LEN)
5480 rtl_hw_jumbo_enable(tp);
5481 else
5482 rtl_hw_jumbo_disable(tp);
5484 dev->mtu = new_mtu;
5485 netdev_update_features(dev);
5487 return 0;
5490 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5492 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5493 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5496 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5497 void **data_buff, struct RxDesc *desc)
5499 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5500 DMA_FROM_DEVICE);
5502 kfree(*data_buff);
5503 *data_buff = NULL;
5504 rtl8169_make_unusable_by_asic(desc);
5507 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5509 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5511 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5514 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5515 u32 rx_buf_sz)
5517 desc->addr = cpu_to_le64(mapping);
5518 wmb();
5519 rtl8169_mark_to_asic(desc, rx_buf_sz);
5522 static inline void *rtl8169_align(void *data)
5524 return (void *)ALIGN((long)data, 16);
5527 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5528 struct RxDesc *desc)
5530 void *data;
5531 dma_addr_t mapping;
5532 struct device *d = &tp->pci_dev->dev;
5533 struct net_device *dev = tp->dev;
5534 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5536 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5537 if (!data)
5538 return NULL;
5540 if (rtl8169_align(data) != data) {
5541 kfree(data);
5542 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5543 if (!data)
5544 return NULL;
5547 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5548 DMA_FROM_DEVICE);
5549 if (unlikely(dma_mapping_error(d, mapping))) {
5550 if (net_ratelimit())
5551 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5552 goto err_out;
5555 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5556 return data;
5558 err_out:
5559 kfree(data);
5560 return NULL;
5563 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5565 unsigned int i;
5567 for (i = 0; i < NUM_RX_DESC; i++) {
5568 if (tp->Rx_databuff[i]) {
5569 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5570 tp->RxDescArray + i);
5575 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5577 desc->opts1 |= cpu_to_le32(RingEnd);
5580 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5582 unsigned int i;
5584 for (i = 0; i < NUM_RX_DESC; i++) {
5585 void *data;
5587 if (tp->Rx_databuff[i])
5588 continue;
5590 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5591 if (!data) {
5592 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5593 goto err_out;
5595 tp->Rx_databuff[i] = data;
5598 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5599 return 0;
5601 err_out:
5602 rtl8169_rx_clear(tp);
5603 return -ENOMEM;
5606 static int rtl8169_init_ring(struct net_device *dev)
5608 struct rtl8169_private *tp = netdev_priv(dev);
5610 rtl8169_init_ring_indexes(tp);
5612 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5613 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5615 return rtl8169_rx_fill(tp);
5618 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5619 struct TxDesc *desc)
5621 unsigned int len = tx_skb->len;
5623 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5625 desc->opts1 = 0x00;
5626 desc->opts2 = 0x00;
5627 desc->addr = 0x00;
5628 tx_skb->len = 0;
5631 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5632 unsigned int n)
5634 unsigned int i;
5636 for (i = 0; i < n; i++) {
5637 unsigned int entry = (start + i) % NUM_TX_DESC;
5638 struct ring_info *tx_skb = tp->tx_skb + entry;
5639 unsigned int len = tx_skb->len;
5641 if (len) {
5642 struct sk_buff *skb = tx_skb->skb;
5644 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5645 tp->TxDescArray + entry);
5646 if (skb) {
5647 tp->dev->stats.tx_dropped++;
5648 dev_kfree_skb(skb);
5649 tx_skb->skb = NULL;
5655 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5657 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5658 tp->cur_tx = tp->dirty_tx = 0;
5661 static void rtl_reset_work(struct rtl8169_private *tp)
5663 struct net_device *dev = tp->dev;
5664 int i;
5666 napi_disable(&tp->napi);
5667 netif_stop_queue(dev);
5668 synchronize_sched();
5670 rtl8169_hw_reset(tp);
5672 for (i = 0; i < NUM_RX_DESC; i++)
5673 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5675 rtl8169_tx_clear(tp);
5676 rtl8169_init_ring_indexes(tp);
5678 napi_enable(&tp->napi);
5679 rtl_hw_start(dev);
5680 netif_wake_queue(dev);
5681 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5684 static void rtl8169_tx_timeout(struct net_device *dev)
5686 struct rtl8169_private *tp = netdev_priv(dev);
5688 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5691 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5692 u32 *opts)
5694 struct skb_shared_info *info = skb_shinfo(skb);
5695 unsigned int cur_frag, entry;
5696 struct TxDesc * uninitialized_var(txd);
5697 struct device *d = &tp->pci_dev->dev;
5699 entry = tp->cur_tx;
5700 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5701 const skb_frag_t *frag = info->frags + cur_frag;
5702 dma_addr_t mapping;
5703 u32 status, len;
5704 void *addr;
5706 entry = (entry + 1) % NUM_TX_DESC;
5708 txd = tp->TxDescArray + entry;
5709 len = skb_frag_size(frag);
5710 addr = skb_frag_address(frag);
5711 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5712 if (unlikely(dma_mapping_error(d, mapping))) {
5713 if (net_ratelimit())
5714 netif_err(tp, drv, tp->dev,
5715 "Failed to map TX fragments DMA!\n");
5716 goto err_out;
5719 /* Anti gcc 2.95.3 bugware (sic) */
5720 status = opts[0] | len |
5721 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5723 txd->opts1 = cpu_to_le32(status);
5724 txd->opts2 = cpu_to_le32(opts[1]);
5725 txd->addr = cpu_to_le64(mapping);
5727 tp->tx_skb[entry].len = len;
5730 if (cur_frag) {
5731 tp->tx_skb[entry].skb = skb;
5732 txd->opts1 |= cpu_to_le32(LastFrag);
5735 return cur_frag;
5737 err_out:
5738 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5739 return -EIO;
5742 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5743 struct sk_buff *skb, u32 *opts)
5745 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5746 u32 mss = skb_shinfo(skb)->gso_size;
5747 int offset = info->opts_offset;
5749 if (mss) {
5750 opts[0] |= TD_LSO;
5751 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5752 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5753 const struct iphdr *ip = ip_hdr(skb);
5755 if (ip->protocol == IPPROTO_TCP)
5756 opts[offset] |= info->checksum.tcp;
5757 else if (ip->protocol == IPPROTO_UDP)
5758 opts[offset] |= info->checksum.udp;
5759 else
5760 WARN_ON_ONCE(1);
5764 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5765 struct net_device *dev)
5767 struct rtl8169_private *tp = netdev_priv(dev);
5768 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5769 struct TxDesc *txd = tp->TxDescArray + entry;
5770 void __iomem *ioaddr = tp->mmio_addr;
5771 struct device *d = &tp->pci_dev->dev;
5772 dma_addr_t mapping;
5773 u32 status, len;
5774 u32 opts[2];
5775 int frags;
5777 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5778 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5779 goto err_stop_0;
5782 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5783 goto err_stop_0;
5785 len = skb_headlen(skb);
5786 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5787 if (unlikely(dma_mapping_error(d, mapping))) {
5788 if (net_ratelimit())
5789 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5790 goto err_dma_0;
5793 tp->tx_skb[entry].len = len;
5794 txd->addr = cpu_to_le64(mapping);
5796 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
5797 opts[0] = DescOwn;
5799 rtl8169_tso_csum(tp, skb, opts);
5801 frags = rtl8169_xmit_frags(tp, skb, opts);
5802 if (frags < 0)
5803 goto err_dma_1;
5804 else if (frags)
5805 opts[0] |= FirstFrag;
5806 else {
5807 opts[0] |= FirstFrag | LastFrag;
5808 tp->tx_skb[entry].skb = skb;
5811 txd->opts2 = cpu_to_le32(opts[1]);
5813 skb_tx_timestamp(skb);
5815 wmb();
5817 /* Anti gcc 2.95.3 bugware (sic) */
5818 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5819 txd->opts1 = cpu_to_le32(status);
5821 tp->cur_tx += frags + 1;
5823 wmb();
5825 RTL_W8(TxPoll, NPQ);
5827 mmiowb();
5829 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5830 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5831 * not miss a ring update when it notices a stopped queue.
5833 smp_wmb();
5834 netif_stop_queue(dev);
5835 /* Sync with rtl_tx:
5836 * - publish queue status and cur_tx ring index (write barrier)
5837 * - refresh dirty_tx ring index (read barrier).
5838 * May the current thread have a pessimistic view of the ring
5839 * status and forget to wake up queue, a racing rtl_tx thread
5840 * can't.
5842 smp_mb();
5843 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5844 netif_wake_queue(dev);
5847 return NETDEV_TX_OK;
5849 err_dma_1:
5850 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5851 err_dma_0:
5852 dev_kfree_skb(skb);
5853 dev->stats.tx_dropped++;
5854 return NETDEV_TX_OK;
5856 err_stop_0:
5857 netif_stop_queue(dev);
5858 dev->stats.tx_dropped++;
5859 return NETDEV_TX_BUSY;
5862 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5864 struct rtl8169_private *tp = netdev_priv(dev);
5865 struct pci_dev *pdev = tp->pci_dev;
5866 u16 pci_status, pci_cmd;
5868 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5869 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5871 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5872 pci_cmd, pci_status);
5875 * The recovery sequence below admits a very elaborated explanation:
5876 * - it seems to work;
5877 * - I did not see what else could be done;
5878 * - it makes iop3xx happy.
5880 * Feel free to adjust to your needs.
5882 if (pdev->broken_parity_status)
5883 pci_cmd &= ~PCI_COMMAND_PARITY;
5884 else
5885 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5887 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5889 pci_write_config_word(pdev, PCI_STATUS,
5890 pci_status & (PCI_STATUS_DETECTED_PARITY |
5891 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5892 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5894 /* The infamous DAC f*ckup only happens at boot time */
5895 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5896 void __iomem *ioaddr = tp->mmio_addr;
5898 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5899 tp->cp_cmd &= ~PCIDAC;
5900 RTL_W16(CPlusCmd, tp->cp_cmd);
5901 dev->features &= ~NETIF_F_HIGHDMA;
5904 rtl8169_hw_reset(tp);
5906 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5909 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5911 unsigned int dirty_tx, tx_left;
5913 dirty_tx = tp->dirty_tx;
5914 smp_rmb();
5915 tx_left = tp->cur_tx - dirty_tx;
5917 while (tx_left > 0) {
5918 unsigned int entry = dirty_tx % NUM_TX_DESC;
5919 struct ring_info *tx_skb = tp->tx_skb + entry;
5920 u32 status;
5922 rmb();
5923 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5924 if (status & DescOwn)
5925 break;
5927 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5928 tp->TxDescArray + entry);
5929 if (status & LastFrag) {
5930 u64_stats_update_begin(&tp->tx_stats.syncp);
5931 tp->tx_stats.packets++;
5932 tp->tx_stats.bytes += tx_skb->skb->len;
5933 u64_stats_update_end(&tp->tx_stats.syncp);
5934 dev_kfree_skb(tx_skb->skb);
5935 tx_skb->skb = NULL;
5937 dirty_tx++;
5938 tx_left--;
5941 if (tp->dirty_tx != dirty_tx) {
5942 tp->dirty_tx = dirty_tx;
5943 /* Sync with rtl8169_start_xmit:
5944 * - publish dirty_tx ring index (write barrier)
5945 * - refresh cur_tx ring index and queue status (read barrier)
5946 * May the current thread miss the stopped queue condition,
5947 * a racing xmit thread can only have a right view of the
5948 * ring status.
5950 smp_mb();
5951 if (netif_queue_stopped(dev) &&
5952 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5953 netif_wake_queue(dev);
5956 * 8168 hack: TxPoll requests are lost when the Tx packets are
5957 * too close. Let's kick an extra TxPoll request when a burst
5958 * of start_xmit activity is detected (if it is not detected,
5959 * it is slow enough). -- FR
5961 if (tp->cur_tx != dirty_tx) {
5962 void __iomem *ioaddr = tp->mmio_addr;
5964 RTL_W8(TxPoll, NPQ);
5969 static inline int rtl8169_fragmented_frame(u32 status)
5971 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5974 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5976 u32 status = opts1 & RxProtoMask;
5978 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5979 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5980 skb->ip_summed = CHECKSUM_UNNECESSARY;
5981 else
5982 skb_checksum_none_assert(skb);
5985 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5986 struct rtl8169_private *tp,
5987 int pkt_size,
5988 dma_addr_t addr)
5990 struct sk_buff *skb;
5991 struct device *d = &tp->pci_dev->dev;
5993 data = rtl8169_align(data);
5994 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5995 prefetch(data);
5996 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5997 if (skb)
5998 memcpy(skb->data, data, pkt_size);
5999 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
6001 return skb;
6004 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
6006 unsigned int cur_rx, rx_left;
6007 unsigned int count;
6009 cur_rx = tp->cur_rx;
6010 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
6011 rx_left = min(rx_left, budget);
6013 for (; rx_left > 0; rx_left--, cur_rx++) {
6014 unsigned int entry = cur_rx % NUM_RX_DESC;
6015 struct RxDesc *desc = tp->RxDescArray + entry;
6016 u32 status;
6018 rmb();
6019 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6021 if (status & DescOwn)
6022 break;
6023 if (unlikely(status & RxRES)) {
6024 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6025 status);
6026 dev->stats.rx_errors++;
6027 if (status & (RxRWT | RxRUNT))
6028 dev->stats.rx_length_errors++;
6029 if (status & RxCRC)
6030 dev->stats.rx_crc_errors++;
6031 if (status & RxFOVF) {
6032 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6033 dev->stats.rx_fifo_errors++;
6035 if ((status & (RxRUNT | RxCRC)) &&
6036 !(status & (RxRWT | RxFOVF)) &&
6037 (dev->features & NETIF_F_RXALL))
6038 goto process_pkt;
6040 rtl8169_mark_to_asic(desc, rx_buf_sz);
6041 } else {
6042 struct sk_buff *skb;
6043 dma_addr_t addr;
6044 int pkt_size;
6046 process_pkt:
6047 addr = le64_to_cpu(desc->addr);
6048 if (likely(!(dev->features & NETIF_F_RXFCS)))
6049 pkt_size = (status & 0x00003fff) - 4;
6050 else
6051 pkt_size = status & 0x00003fff;
6054 * The driver does not support incoming fragmented
6055 * frames. They are seen as a symptom of over-mtu
6056 * sized frames.
6058 if (unlikely(rtl8169_fragmented_frame(status))) {
6059 dev->stats.rx_dropped++;
6060 dev->stats.rx_length_errors++;
6061 rtl8169_mark_to_asic(desc, rx_buf_sz);
6062 continue;
6065 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6066 tp, pkt_size, addr);
6067 rtl8169_mark_to_asic(desc, rx_buf_sz);
6068 if (!skb) {
6069 dev->stats.rx_dropped++;
6070 continue;
6073 rtl8169_rx_csum(skb, status);
6074 skb_put(skb, pkt_size);
6075 skb->protocol = eth_type_trans(skb, dev);
6077 rtl8169_rx_vlan_tag(desc, skb);
6079 napi_gro_receive(&tp->napi, skb);
6081 u64_stats_update_begin(&tp->rx_stats.syncp);
6082 tp->rx_stats.packets++;
6083 tp->rx_stats.bytes += pkt_size;
6084 u64_stats_update_end(&tp->rx_stats.syncp);
6087 /* Work around for AMD plateform. */
6088 if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
6089 (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
6090 desc->opts2 = 0;
6091 cur_rx++;
6095 count = cur_rx - tp->cur_rx;
6096 tp->cur_rx = cur_rx;
6098 tp->dirty_rx += count;
6100 return count;
6103 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6105 struct net_device *dev = dev_instance;
6106 struct rtl8169_private *tp = netdev_priv(dev);
6107 int handled = 0;
6108 u16 status;
6110 status = rtl_get_events(tp);
6111 if (status && status != 0xffff) {
6112 status &= RTL_EVENT_NAPI | tp->event_slow;
6113 if (status) {
6114 handled = 1;
6116 rtl_irq_disable(tp);
6117 napi_schedule(&tp->napi);
6120 return IRQ_RETVAL(handled);
6124 * Workqueue context.
6126 static void rtl_slow_event_work(struct rtl8169_private *tp)
6128 struct net_device *dev = tp->dev;
6129 u16 status;
6131 status = rtl_get_events(tp) & tp->event_slow;
6132 rtl_ack_events(tp, status);
6134 if (unlikely(status & RxFIFOOver)) {
6135 switch (tp->mac_version) {
6136 /* Work around for rx fifo overflow */
6137 case RTL_GIGA_MAC_VER_11:
6138 netif_stop_queue(dev);
6139 /* XXX - Hack alert. See rtl_task(). */
6140 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6141 default:
6142 break;
6146 if (unlikely(status & SYSErr))
6147 rtl8169_pcierr_interrupt(dev);
6149 if (status & LinkChg)
6150 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6152 rtl_irq_enable_all(tp);
6155 static void rtl_task(struct work_struct *work)
6157 static const struct {
6158 int bitnr;
6159 void (*action)(struct rtl8169_private *);
6160 } rtl_work[] = {
6161 /* XXX - keep rtl_slow_event_work() as first element. */
6162 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6163 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6164 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6166 struct rtl8169_private *tp =
6167 container_of(work, struct rtl8169_private, wk.work);
6168 struct net_device *dev = tp->dev;
6169 int i;
6171 rtl_lock_work(tp);
6173 if (!netif_running(dev) ||
6174 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6175 goto out_unlock;
6177 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6178 bool pending;
6180 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6181 if (pending)
6182 rtl_work[i].action(tp);
6185 out_unlock:
6186 rtl_unlock_work(tp);
6189 static int rtl8169_poll(struct napi_struct *napi, int budget)
6191 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6192 struct net_device *dev = tp->dev;
6193 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6194 int work_done= 0;
6195 u16 status;
6197 status = rtl_get_events(tp);
6198 rtl_ack_events(tp, status & ~tp->event_slow);
6200 if (status & RTL_EVENT_NAPI_RX)
6201 work_done = rtl_rx(dev, tp, (u32) budget);
6203 if (status & RTL_EVENT_NAPI_TX)
6204 rtl_tx(dev, tp);
6206 if (status & tp->event_slow) {
6207 enable_mask &= ~tp->event_slow;
6209 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6212 if (work_done < budget) {
6213 napi_complete(napi);
6215 rtl_irq_enable(tp, enable_mask);
6216 mmiowb();
6219 return work_done;
6222 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6224 struct rtl8169_private *tp = netdev_priv(dev);
6226 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6227 return;
6229 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6230 RTL_W32(RxMissed, 0);
6233 static void rtl8169_down(struct net_device *dev)
6235 struct rtl8169_private *tp = netdev_priv(dev);
6236 void __iomem *ioaddr = tp->mmio_addr;
6238 del_timer_sync(&tp->timer);
6240 napi_disable(&tp->napi);
6241 netif_stop_queue(dev);
6243 rtl8169_hw_reset(tp);
6245 * At this point device interrupts can not be enabled in any function,
6246 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6247 * and napi is disabled (rtl8169_poll).
6249 rtl8169_rx_missed(dev, ioaddr);
6251 /* Give a racing hard_start_xmit a few cycles to complete. */
6252 synchronize_sched();
6254 rtl8169_tx_clear(tp);
6256 rtl8169_rx_clear(tp);
6258 rtl_pll_power_down(tp);
6261 static int rtl8169_close(struct net_device *dev)
6263 struct rtl8169_private *tp = netdev_priv(dev);
6264 struct pci_dev *pdev = tp->pci_dev;
6266 pm_runtime_get_sync(&pdev->dev);
6268 /* Update counters before going down */
6269 rtl8169_update_counters(dev);
6271 rtl_lock_work(tp);
6272 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6274 rtl8169_down(dev);
6275 rtl_unlock_work(tp);
6277 free_irq(pdev->irq, dev);
6279 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6280 tp->RxPhyAddr);
6281 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6282 tp->TxPhyAddr);
6283 tp->TxDescArray = NULL;
6284 tp->RxDescArray = NULL;
6286 pm_runtime_put_sync(&pdev->dev);
6288 return 0;
6291 #ifdef CONFIG_NET_POLL_CONTROLLER
6292 static void rtl8169_netpoll(struct net_device *dev)
6294 struct rtl8169_private *tp = netdev_priv(dev);
6296 rtl8169_interrupt(tp->pci_dev->irq, dev);
6298 #endif
6300 static int rtl_open(struct net_device *dev)
6302 struct rtl8169_private *tp = netdev_priv(dev);
6303 void __iomem *ioaddr = tp->mmio_addr;
6304 struct pci_dev *pdev = tp->pci_dev;
6305 int retval = -ENOMEM;
6307 pm_runtime_get_sync(&pdev->dev);
6310 * Rx and Tx descriptors needs 256 bytes alignment.
6311 * dma_alloc_coherent provides more.
6313 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6314 &tp->TxPhyAddr, GFP_KERNEL);
6315 if (!tp->TxDescArray)
6316 goto err_pm_runtime_put;
6318 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6319 &tp->RxPhyAddr, GFP_KERNEL);
6320 if (!tp->RxDescArray)
6321 goto err_free_tx_0;
6323 retval = rtl8169_init_ring(dev);
6324 if (retval < 0)
6325 goto err_free_rx_1;
6327 INIT_WORK(&tp->wk.work, rtl_task);
6329 smp_mb();
6331 rtl_request_firmware(tp);
6333 retval = request_irq(pdev->irq, rtl8169_interrupt,
6334 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6335 dev->name, dev);
6336 if (retval < 0)
6337 goto err_release_fw_2;
6339 rtl_lock_work(tp);
6341 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6343 napi_enable(&tp->napi);
6345 rtl8169_init_phy(dev, tp);
6347 __rtl8169_set_features(dev, dev->features);
6349 rtl_pll_power_up(tp);
6351 rtl_hw_start(dev);
6353 netif_start_queue(dev);
6355 rtl_unlock_work(tp);
6357 tp->saved_wolopts = 0;
6358 pm_runtime_put_noidle(&pdev->dev);
6360 rtl8169_check_link_status(dev, tp, ioaddr);
6361 out:
6362 return retval;
6364 err_release_fw_2:
6365 rtl_release_firmware(tp);
6366 rtl8169_rx_clear(tp);
6367 err_free_rx_1:
6368 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6369 tp->RxPhyAddr);
6370 tp->RxDescArray = NULL;
6371 err_free_tx_0:
6372 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6373 tp->TxPhyAddr);
6374 tp->TxDescArray = NULL;
6375 err_pm_runtime_put:
6376 pm_runtime_put_noidle(&pdev->dev);
6377 goto out;
6380 static struct rtnl_link_stats64 *
6381 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6383 struct rtl8169_private *tp = netdev_priv(dev);
6384 void __iomem *ioaddr = tp->mmio_addr;
6385 unsigned int start;
6387 if (netif_running(dev))
6388 rtl8169_rx_missed(dev, ioaddr);
6390 do {
6391 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6392 stats->rx_packets = tp->rx_stats.packets;
6393 stats->rx_bytes = tp->rx_stats.bytes;
6394 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6397 do {
6398 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6399 stats->tx_packets = tp->tx_stats.packets;
6400 stats->tx_bytes = tp->tx_stats.bytes;
6401 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6403 stats->rx_dropped = dev->stats.rx_dropped;
6404 stats->tx_dropped = dev->stats.tx_dropped;
6405 stats->rx_length_errors = dev->stats.rx_length_errors;
6406 stats->rx_errors = dev->stats.rx_errors;
6407 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6408 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6409 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6411 return stats;
6414 static void rtl8169_net_suspend(struct net_device *dev)
6416 struct rtl8169_private *tp = netdev_priv(dev);
6418 if (!netif_running(dev))
6419 return;
6421 netif_device_detach(dev);
6422 netif_stop_queue(dev);
6424 rtl_lock_work(tp);
6425 napi_disable(&tp->napi);
6426 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6427 rtl_unlock_work(tp);
6429 rtl_pll_power_down(tp);
6432 #ifdef CONFIG_PM
6434 static int rtl8169_suspend(struct device *device)
6436 struct pci_dev *pdev = to_pci_dev(device);
6437 struct net_device *dev = pci_get_drvdata(pdev);
6439 rtl8169_net_suspend(dev);
6441 return 0;
6444 static void __rtl8169_resume(struct net_device *dev)
6446 struct rtl8169_private *tp = netdev_priv(dev);
6448 netif_device_attach(dev);
6450 rtl_pll_power_up(tp);
6452 rtl_lock_work(tp);
6453 napi_enable(&tp->napi);
6454 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6455 rtl_unlock_work(tp);
6457 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6460 static int rtl8169_resume(struct device *device)
6462 struct pci_dev *pdev = to_pci_dev(device);
6463 struct net_device *dev = pci_get_drvdata(pdev);
6464 struct rtl8169_private *tp = netdev_priv(dev);
6466 rtl8169_init_phy(dev, tp);
6468 if (netif_running(dev))
6469 __rtl8169_resume(dev);
6471 return 0;
6474 static int rtl8169_runtime_suspend(struct device *device)
6476 struct pci_dev *pdev = to_pci_dev(device);
6477 struct net_device *dev = pci_get_drvdata(pdev);
6478 struct rtl8169_private *tp = netdev_priv(dev);
6480 if (!tp->TxDescArray)
6481 return 0;
6483 rtl_lock_work(tp);
6484 tp->saved_wolopts = __rtl8169_get_wol(tp);
6485 __rtl8169_set_wol(tp, WAKE_ANY);
6486 rtl_unlock_work(tp);
6488 rtl8169_net_suspend(dev);
6490 return 0;
6493 static int rtl8169_runtime_resume(struct device *device)
6495 struct pci_dev *pdev = to_pci_dev(device);
6496 struct net_device *dev = pci_get_drvdata(pdev);
6497 struct rtl8169_private *tp = netdev_priv(dev);
6499 if (!tp->TxDescArray)
6500 return 0;
6502 rtl_lock_work(tp);
6503 __rtl8169_set_wol(tp, tp->saved_wolopts);
6504 tp->saved_wolopts = 0;
6505 rtl_unlock_work(tp);
6507 rtl8169_init_phy(dev, tp);
6509 __rtl8169_resume(dev);
6511 return 0;
6514 static int rtl8169_runtime_idle(struct device *device)
6516 struct pci_dev *pdev = to_pci_dev(device);
6517 struct net_device *dev = pci_get_drvdata(pdev);
6518 struct rtl8169_private *tp = netdev_priv(dev);
6520 return tp->TxDescArray ? -EBUSY : 0;
6523 static const struct dev_pm_ops rtl8169_pm_ops = {
6524 .suspend = rtl8169_suspend,
6525 .resume = rtl8169_resume,
6526 .freeze = rtl8169_suspend,
6527 .thaw = rtl8169_resume,
6528 .poweroff = rtl8169_suspend,
6529 .restore = rtl8169_resume,
6530 .runtime_suspend = rtl8169_runtime_suspend,
6531 .runtime_resume = rtl8169_runtime_resume,
6532 .runtime_idle = rtl8169_runtime_idle,
6535 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6537 #else /* !CONFIG_PM */
6539 #define RTL8169_PM_OPS NULL
6541 #endif /* !CONFIG_PM */
6543 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6545 void __iomem *ioaddr = tp->mmio_addr;
6547 /* WoL fails with 8168b when the receiver is disabled. */
6548 switch (tp->mac_version) {
6549 case RTL_GIGA_MAC_VER_11:
6550 case RTL_GIGA_MAC_VER_12:
6551 case RTL_GIGA_MAC_VER_17:
6552 pci_clear_master(tp->pci_dev);
6554 RTL_W8(ChipCmd, CmdRxEnb);
6555 /* PCI commit */
6556 RTL_R8(ChipCmd);
6557 break;
6558 default:
6559 break;
6563 static void rtl_shutdown(struct pci_dev *pdev)
6565 struct net_device *dev = pci_get_drvdata(pdev);
6566 struct rtl8169_private *tp = netdev_priv(dev);
6567 struct device *d = &pdev->dev;
6569 pm_runtime_get_sync(d);
6571 rtl8169_net_suspend(dev);
6573 /* Restore original MAC address */
6574 rtl_rar_set(tp, dev->perm_addr);
6576 rtl8169_hw_reset(tp);
6578 if (system_state == SYSTEM_POWER_OFF) {
6579 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6580 rtl_wol_suspend_quirk(tp);
6581 rtl_wol_shutdown_quirk(tp);
6584 pci_wake_from_d3(pdev, true);
6585 pci_set_power_state(pdev, PCI_D3hot);
6588 pm_runtime_put_noidle(d);
6591 static void __devexit rtl_remove_one(struct pci_dev *pdev)
6593 struct net_device *dev = pci_get_drvdata(pdev);
6594 struct rtl8169_private *tp = netdev_priv(dev);
6596 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6597 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6598 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6599 rtl8168_driver_stop(tp);
6602 cancel_work_sync(&tp->wk.work);
6604 netif_napi_del(&tp->napi);
6606 unregister_netdev(dev);
6608 rtl_release_firmware(tp);
6610 if (pci_dev_run_wake(pdev))
6611 pm_runtime_get_noresume(&pdev->dev);
6613 /* restore original MAC address */
6614 rtl_rar_set(tp, dev->perm_addr);
6616 rtl_disable_msi(pdev, tp);
6617 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6618 pci_set_drvdata(pdev, NULL);
6621 static const struct net_device_ops rtl_netdev_ops = {
6622 .ndo_open = rtl_open,
6623 .ndo_stop = rtl8169_close,
6624 .ndo_get_stats64 = rtl8169_get_stats64,
6625 .ndo_start_xmit = rtl8169_start_xmit,
6626 .ndo_tx_timeout = rtl8169_tx_timeout,
6627 .ndo_validate_addr = eth_validate_addr,
6628 .ndo_change_mtu = rtl8169_change_mtu,
6629 .ndo_fix_features = rtl8169_fix_features,
6630 .ndo_set_features = rtl8169_set_features,
6631 .ndo_set_mac_address = rtl_set_mac_address,
6632 .ndo_do_ioctl = rtl8169_ioctl,
6633 .ndo_set_rx_mode = rtl_set_rx_mode,
6634 #ifdef CONFIG_NET_POLL_CONTROLLER
6635 .ndo_poll_controller = rtl8169_netpoll,
6636 #endif
6640 static const struct rtl_cfg_info {
6641 void (*hw_start)(struct net_device *);
6642 unsigned int region;
6643 unsigned int align;
6644 u16 event_slow;
6645 unsigned features;
6646 u8 default_ver;
6647 } rtl_cfg_infos [] = {
6648 [RTL_CFG_0] = {
6649 .hw_start = rtl_hw_start_8169,
6650 .region = 1,
6651 .align = 0,
6652 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6653 .features = RTL_FEATURE_GMII,
6654 .default_ver = RTL_GIGA_MAC_VER_01,
6656 [RTL_CFG_1] = {
6657 .hw_start = rtl_hw_start_8168,
6658 .region = 2,
6659 .align = 8,
6660 .event_slow = SYSErr | LinkChg | RxOverflow,
6661 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6662 .default_ver = RTL_GIGA_MAC_VER_11,
6664 [RTL_CFG_2] = {
6665 .hw_start = rtl_hw_start_8101,
6666 .region = 2,
6667 .align = 8,
6668 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6669 PCSTimeout,
6670 .features = RTL_FEATURE_MSI,
6671 .default_ver = RTL_GIGA_MAC_VER_13,
6675 /* Cfg9346_Unlock assumed. */
6676 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6677 const struct rtl_cfg_info *cfg)
6679 void __iomem *ioaddr = tp->mmio_addr;
6680 unsigned msi = 0;
6681 u8 cfg2;
6683 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6684 if (cfg->features & RTL_FEATURE_MSI) {
6685 if (pci_enable_msi(tp->pci_dev)) {
6686 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6687 } else {
6688 cfg2 |= MSIEnable;
6689 msi = RTL_FEATURE_MSI;
6692 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6693 RTL_W8(Config2, cfg2);
6694 return msi;
6697 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6699 void __iomem *ioaddr = tp->mmio_addr;
6701 return RTL_R8(MCU) & LINK_LIST_RDY;
6704 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6706 void __iomem *ioaddr = tp->mmio_addr;
6708 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6711 static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
6713 void __iomem *ioaddr = tp->mmio_addr;
6714 u32 data;
6716 tp->ocp_base = OCP_STD_PHY_BASE;
6718 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6720 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6721 return;
6723 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6724 return;
6726 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6727 msleep(1);
6728 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6730 data = r8168_mac_ocp_read(tp, 0xe8de);
6731 data &= ~(1 << 14);
6732 r8168_mac_ocp_write(tp, 0xe8de, data);
6734 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6735 return;
6737 data = r8168_mac_ocp_read(tp, 0xe8de);
6738 data |= (1 << 15);
6739 r8168_mac_ocp_write(tp, 0xe8de, data);
6741 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6742 return;
6745 static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
6747 switch (tp->mac_version) {
6748 case RTL_GIGA_MAC_VER_40:
6749 case RTL_GIGA_MAC_VER_41:
6750 rtl_hw_init_8168g(tp);
6751 break;
6753 default:
6754 break;
6758 static int __devinit
6759 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6761 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6762 const unsigned int region = cfg->region;
6763 struct rtl8169_private *tp;
6764 struct mii_if_info *mii;
6765 struct net_device *dev;
6766 void __iomem *ioaddr;
6767 int chipset, i;
6768 int rc;
6770 if (netif_msg_drv(&debug)) {
6771 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6772 MODULENAME, RTL8169_VERSION);
6775 dev = alloc_etherdev(sizeof (*tp));
6776 if (!dev) {
6777 rc = -ENOMEM;
6778 goto out;
6781 SET_NETDEV_DEV(dev, &pdev->dev);
6782 dev->netdev_ops = &rtl_netdev_ops;
6783 tp = netdev_priv(dev);
6784 tp->dev = dev;
6785 tp->pci_dev = pdev;
6786 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6788 mii = &tp->mii;
6789 mii->dev = dev;
6790 mii->mdio_read = rtl_mdio_read;
6791 mii->mdio_write = rtl_mdio_write;
6792 mii->phy_id_mask = 0x1f;
6793 mii->reg_num_mask = 0x1f;
6794 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6796 /* disable ASPM completely as that cause random device stop working
6797 * problems as well as full system hangs for some PCIe devices users */
6798 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6799 PCIE_LINK_STATE_CLKPM);
6801 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6802 rc = pci_enable_device(pdev);
6803 if (rc < 0) {
6804 netif_err(tp, probe, dev, "enable failure\n");
6805 goto err_out_free_dev_1;
6808 if (pci_set_mwi(pdev) < 0)
6809 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6811 /* make sure PCI base addr 1 is MMIO */
6812 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6813 netif_err(tp, probe, dev,
6814 "region #%d not an MMIO resource, aborting\n",
6815 region);
6816 rc = -ENODEV;
6817 goto err_out_mwi_2;
6820 /* check for weird/broken PCI region reporting */
6821 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6822 netif_err(tp, probe, dev,
6823 "Invalid PCI region size(s), aborting\n");
6824 rc = -ENODEV;
6825 goto err_out_mwi_2;
6828 rc = pci_request_regions(pdev, MODULENAME);
6829 if (rc < 0) {
6830 netif_err(tp, probe, dev, "could not request regions\n");
6831 goto err_out_mwi_2;
6834 tp->cp_cmd = RxChkSum;
6836 if ((sizeof(dma_addr_t) > 4) &&
6837 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6838 tp->cp_cmd |= PCIDAC;
6839 dev->features |= NETIF_F_HIGHDMA;
6840 } else {
6841 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6842 if (rc < 0) {
6843 netif_err(tp, probe, dev, "DMA configuration failed\n");
6844 goto err_out_free_res_3;
6848 /* ioremap MMIO region */
6849 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6850 if (!ioaddr) {
6851 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6852 rc = -EIO;
6853 goto err_out_free_res_3;
6855 tp->mmio_addr = ioaddr;
6857 if (!pci_is_pcie(pdev))
6858 netif_info(tp, probe, dev, "not PCI Express\n");
6860 /* Identify chip attached to board */
6861 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6863 rtl_init_rxcfg(tp);
6865 rtl_irq_disable(tp);
6867 rtl_hw_initialize(tp);
6869 rtl_hw_reset(tp);
6871 rtl_ack_events(tp, 0xffff);
6873 pci_set_master(pdev);
6876 * Pretend we are using VLANs; This bypasses a nasty bug where
6877 * Interrupts stop flowing on high load on 8110SCd controllers.
6879 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6880 tp->cp_cmd |= RxVlan;
6882 rtl_init_mdio_ops(tp);
6883 rtl_init_pll_power_ops(tp);
6884 rtl_init_jumbo_ops(tp);
6885 rtl_init_csi_ops(tp);
6887 rtl8169_print_mac_version(tp);
6889 chipset = tp->mac_version;
6890 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6892 RTL_W8(Cfg9346, Cfg9346_Unlock);
6893 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6894 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6895 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6896 tp->features |= RTL_FEATURE_WOL;
6897 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6898 tp->features |= RTL_FEATURE_WOL;
6899 tp->features |= rtl_try_msi(tp, cfg);
6900 RTL_W8(Cfg9346, Cfg9346_Lock);
6902 if (rtl_tbi_enabled(tp)) {
6903 tp->set_speed = rtl8169_set_speed_tbi;
6904 tp->get_settings = rtl8169_gset_tbi;
6905 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6906 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6907 tp->link_ok = rtl8169_tbi_link_ok;
6908 tp->do_ioctl = rtl_tbi_ioctl;
6909 } else {
6910 tp->set_speed = rtl8169_set_speed_xmii;
6911 tp->get_settings = rtl8169_gset_xmii;
6912 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6913 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6914 tp->link_ok = rtl8169_xmii_link_ok;
6915 tp->do_ioctl = rtl_xmii_ioctl;
6918 mutex_init(&tp->wk.mutex);
6920 /* Get MAC address */
6921 for (i = 0; i < ETH_ALEN; i++)
6922 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6923 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6925 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6926 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6928 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6930 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6931 * properly for all devices */
6932 dev->features |= NETIF_F_RXCSUM |
6933 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6935 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6936 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6937 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6938 NETIF_F_HIGHDMA;
6940 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6941 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6942 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6944 dev->hw_features |= NETIF_F_RXALL;
6945 dev->hw_features |= NETIF_F_RXFCS;
6947 tp->hw_start = cfg->hw_start;
6948 tp->event_slow = cfg->event_slow;
6950 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6951 ~(RxBOVF | RxFOVF) : ~0;
6953 init_timer(&tp->timer);
6954 tp->timer.data = (unsigned long) dev;
6955 tp->timer.function = rtl8169_phy_timer;
6957 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6959 rc = register_netdev(dev);
6960 if (rc < 0)
6961 goto err_out_msi_4;
6963 pci_set_drvdata(pdev, dev);
6965 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6966 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6967 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6968 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6969 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6970 "tx checksumming: %s]\n",
6971 rtl_chip_infos[chipset].jumbo_max,
6972 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6975 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6976 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6977 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6978 rtl8168_driver_start(tp);
6981 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6983 if (pci_dev_run_wake(pdev))
6984 pm_runtime_put_noidle(&pdev->dev);
6986 netif_carrier_off(dev);
6988 out:
6989 return rc;
6991 err_out_msi_4:
6992 netif_napi_del(&tp->napi);
6993 rtl_disable_msi(pdev, tp);
6994 iounmap(ioaddr);
6995 err_out_free_res_3:
6996 pci_release_regions(pdev);
6997 err_out_mwi_2:
6998 pci_clear_mwi(pdev);
6999 pci_disable_device(pdev);
7000 err_out_free_dev_1:
7001 free_netdev(dev);
7002 goto out;
7005 static struct pci_driver rtl8169_pci_driver = {
7006 .name = MODULENAME,
7007 .id_table = rtl8169_pci_tbl,
7008 .probe = rtl_init_one,
7009 .remove = __devexit_p(rtl_remove_one),
7010 .shutdown = rtl_shutdown,
7011 .driver.pm = RTL8169_PM_OPS,
7014 static int __init rtl8169_init_module(void)
7016 return pci_register_driver(&rtl8169_pci_driver);
7019 static void __exit rtl8169_cleanup_module(void)
7021 pci_unregister_driver(&rtl8169_pci_driver);
7024 module_init(rtl8169_init_module);
7025 module_exit(rtl8169_cleanup_module);