1 /*******************************************************************************
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 /* ethtool support for e1000 */
33 #include <asm/uaccess.h>
35 extern char e1000_driver_name
[];
36 extern char e1000_driver_version
[];
38 extern int e1000_up(struct e1000_adapter
*adapter
);
39 extern void e1000_down(struct e1000_adapter
*adapter
);
40 extern void e1000_reset(struct e1000_adapter
*adapter
);
41 extern int e1000_set_spd_dplx(struct e1000_adapter
*adapter
, uint16_t spddplx
);
42 extern int e1000_setup_all_rx_resources(struct e1000_adapter
*adapter
);
43 extern int e1000_setup_all_tx_resources(struct e1000_adapter
*adapter
);
44 extern void e1000_free_all_rx_resources(struct e1000_adapter
*adapter
);
45 extern void e1000_free_all_tx_resources(struct e1000_adapter
*adapter
);
46 extern void e1000_update_stats(struct e1000_adapter
*adapter
);
49 char stat_string
[ETH_GSTRING_LEN
];
54 #define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
55 offsetof(struct e1000_adapter, m)
56 static const struct e1000_stats e1000_gstrings_stats
[] = {
57 { "rx_packets", E1000_STAT(net_stats
.rx_packets
) },
58 { "tx_packets", E1000_STAT(net_stats
.tx_packets
) },
59 { "rx_bytes", E1000_STAT(net_stats
.rx_bytes
) },
60 { "tx_bytes", E1000_STAT(net_stats
.tx_bytes
) },
61 { "rx_errors", E1000_STAT(net_stats
.rx_errors
) },
62 { "tx_errors", E1000_STAT(net_stats
.tx_errors
) },
63 { "rx_dropped", E1000_STAT(net_stats
.rx_dropped
) },
64 { "tx_dropped", E1000_STAT(net_stats
.tx_dropped
) },
65 { "multicast", E1000_STAT(net_stats
.multicast
) },
66 { "collisions", E1000_STAT(net_stats
.collisions
) },
67 { "rx_length_errors", E1000_STAT(net_stats
.rx_length_errors
) },
68 { "rx_over_errors", E1000_STAT(net_stats
.rx_over_errors
) },
69 { "rx_crc_errors", E1000_STAT(net_stats
.rx_crc_errors
) },
70 { "rx_frame_errors", E1000_STAT(net_stats
.rx_frame_errors
) },
71 { "rx_fifo_errors", E1000_STAT(net_stats
.rx_fifo_errors
) },
72 { "rx_no_buffer_count", E1000_STAT(stats
.rnbc
) },
73 { "rx_missed_errors", E1000_STAT(net_stats
.rx_missed_errors
) },
74 { "tx_aborted_errors", E1000_STAT(net_stats
.tx_aborted_errors
) },
75 { "tx_carrier_errors", E1000_STAT(net_stats
.tx_carrier_errors
) },
76 { "tx_fifo_errors", E1000_STAT(net_stats
.tx_fifo_errors
) },
77 { "tx_heartbeat_errors", E1000_STAT(net_stats
.tx_heartbeat_errors
) },
78 { "tx_window_errors", E1000_STAT(net_stats
.tx_window_errors
) },
79 { "tx_abort_late_coll", E1000_STAT(stats
.latecol
) },
80 { "tx_deferred_ok", E1000_STAT(stats
.dc
) },
81 { "tx_single_coll_ok", E1000_STAT(stats
.scc
) },
82 { "tx_multi_coll_ok", E1000_STAT(stats
.mcc
) },
83 { "rx_long_length_errors", E1000_STAT(stats
.roc
) },
84 { "rx_short_length_errors", E1000_STAT(stats
.ruc
) },
85 { "rx_align_errors", E1000_STAT(stats
.algnerrc
) },
86 { "tx_tcp_seg_good", E1000_STAT(stats
.tsctc
) },
87 { "tx_tcp_seg_failed", E1000_STAT(stats
.tsctfc
) },
88 { "rx_flow_control_xon", E1000_STAT(stats
.xonrxc
) },
89 { "rx_flow_control_xoff", E1000_STAT(stats
.xoffrxc
) },
90 { "tx_flow_control_xon", E1000_STAT(stats
.xontxc
) },
91 { "tx_flow_control_xoff", E1000_STAT(stats
.xofftxc
) },
92 { "rx_long_byte_count", E1000_STAT(stats
.gorcl
) },
93 { "rx_csum_offload_good", E1000_STAT(hw_csum_good
) },
94 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err
) },
95 { "rx_header_split", E1000_STAT(rx_hdr_split
) },
97 #define E1000_STATS_LEN \
98 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
99 static const char e1000_gstrings_test
[][ETH_GSTRING_LEN
] = {
100 "Register test (offline)", "Eeprom test (offline)",
101 "Interrupt test (offline)", "Loopback test (offline)",
102 "Link test (on/offline)"
104 #define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN
107 e1000_get_settings(struct net_device
*netdev
, struct ethtool_cmd
*ecmd
)
109 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
110 struct e1000_hw
*hw
= &adapter
->hw
;
112 if(hw
->media_type
== e1000_media_type_copper
) {
114 ecmd
->supported
= (SUPPORTED_10baseT_Half
|
115 SUPPORTED_10baseT_Full
|
116 SUPPORTED_100baseT_Half
|
117 SUPPORTED_100baseT_Full
|
118 SUPPORTED_1000baseT_Full
|
122 ecmd
->advertising
= ADVERTISED_TP
;
124 if(hw
->autoneg
== 1) {
125 ecmd
->advertising
|= ADVERTISED_Autoneg
;
127 /* the e1000 autoneg seems to match ethtool nicely */
129 ecmd
->advertising
|= hw
->autoneg_advertised
;
132 ecmd
->port
= PORT_TP
;
133 ecmd
->phy_address
= hw
->phy_addr
;
135 if(hw
->mac_type
== e1000_82543
)
136 ecmd
->transceiver
= XCVR_EXTERNAL
;
138 ecmd
->transceiver
= XCVR_INTERNAL
;
141 ecmd
->supported
= (SUPPORTED_1000baseT_Full
|
145 ecmd
->advertising
= (ADVERTISED_1000baseT_Full
|
149 ecmd
->port
= PORT_FIBRE
;
151 if(hw
->mac_type
>= e1000_82545
)
152 ecmd
->transceiver
= XCVR_INTERNAL
;
154 ecmd
->transceiver
= XCVR_EXTERNAL
;
157 if(netif_carrier_ok(adapter
->netdev
)) {
159 e1000_get_speed_and_duplex(hw
, &adapter
->link_speed
,
160 &adapter
->link_duplex
);
161 ecmd
->speed
= adapter
->link_speed
;
163 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
164 * and HALF_DUPLEX != DUPLEX_HALF */
166 if(adapter
->link_duplex
== FULL_DUPLEX
)
167 ecmd
->duplex
= DUPLEX_FULL
;
169 ecmd
->duplex
= DUPLEX_HALF
;
175 ecmd
->autoneg
= ((hw
->media_type
== e1000_media_type_fiber
) ||
176 hw
->autoneg
) ? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
181 e1000_set_settings(struct net_device
*netdev
, struct ethtool_cmd
*ecmd
)
183 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
184 struct e1000_hw
*hw
= &adapter
->hw
;
186 if(ecmd
->autoneg
== AUTONEG_ENABLE
) {
188 if(hw
->media_type
== e1000_media_type_fiber
)
189 hw
->autoneg_advertised
= ADVERTISED_1000baseT_Full
|
193 hw
->autoneg_advertised
= ADVERTISED_10baseT_Half
|
194 ADVERTISED_10baseT_Full
|
195 ADVERTISED_100baseT_Half
|
196 ADVERTISED_100baseT_Full
|
197 ADVERTISED_1000baseT_Full
|
200 ecmd
->advertising
= hw
->autoneg_advertised
;
202 if(e1000_set_spd_dplx(adapter
, ecmd
->speed
+ ecmd
->duplex
))
207 if(netif_running(adapter
->netdev
)) {
209 e1000_reset(adapter
);
212 e1000_reset(adapter
);
218 e1000_get_pauseparam(struct net_device
*netdev
,
219 struct ethtool_pauseparam
*pause
)
221 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
222 struct e1000_hw
*hw
= &adapter
->hw
;
225 (adapter
->fc_autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
);
227 if(hw
->fc
== e1000_fc_rx_pause
)
229 else if(hw
->fc
== e1000_fc_tx_pause
)
231 else if(hw
->fc
== e1000_fc_full
) {
238 e1000_set_pauseparam(struct net_device
*netdev
,
239 struct ethtool_pauseparam
*pause
)
241 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
242 struct e1000_hw
*hw
= &adapter
->hw
;
244 adapter
->fc_autoneg
= pause
->autoneg
;
246 if(pause
->rx_pause
&& pause
->tx_pause
)
247 hw
->fc
= e1000_fc_full
;
248 else if(pause
->rx_pause
&& !pause
->tx_pause
)
249 hw
->fc
= e1000_fc_rx_pause
;
250 else if(!pause
->rx_pause
&& pause
->tx_pause
)
251 hw
->fc
= e1000_fc_tx_pause
;
252 else if(!pause
->rx_pause
&& !pause
->tx_pause
)
253 hw
->fc
= e1000_fc_none
;
255 hw
->original_fc
= hw
->fc
;
257 if(adapter
->fc_autoneg
== AUTONEG_ENABLE
) {
258 if(netif_running(adapter
->netdev
)) {
262 e1000_reset(adapter
);
265 return ((hw
->media_type
== e1000_media_type_fiber
) ?
266 e1000_setup_link(hw
) : e1000_force_mac_fc(hw
));
272 e1000_get_rx_csum(struct net_device
*netdev
)
274 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
275 return adapter
->rx_csum
;
279 e1000_set_rx_csum(struct net_device
*netdev
, uint32_t data
)
281 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
282 adapter
->rx_csum
= data
;
284 if(netif_running(netdev
)) {
288 e1000_reset(adapter
);
293 e1000_get_tx_csum(struct net_device
*netdev
)
295 return (netdev
->features
& NETIF_F_HW_CSUM
) != 0;
299 e1000_set_tx_csum(struct net_device
*netdev
, uint32_t data
)
301 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
303 if(adapter
->hw
.mac_type
< e1000_82543
) {
310 netdev
->features
|= NETIF_F_HW_CSUM
;
312 netdev
->features
&= ~NETIF_F_HW_CSUM
;
319 e1000_set_tso(struct net_device
*netdev
, uint32_t data
)
321 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
322 if((adapter
->hw
.mac_type
< e1000_82544
) ||
323 (adapter
->hw
.mac_type
== e1000_82547
))
324 return data
? -EINVAL
: 0;
327 netdev
->features
|= NETIF_F_TSO
;
329 netdev
->features
&= ~NETIF_F_TSO
;
332 #endif /* NETIF_F_TSO */
335 e1000_get_msglevel(struct net_device
*netdev
)
337 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
338 return adapter
->msg_enable
;
342 e1000_set_msglevel(struct net_device
*netdev
, uint32_t data
)
344 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
345 adapter
->msg_enable
= data
;
349 e1000_get_regs_len(struct net_device
*netdev
)
351 #define E1000_REGS_LEN 32
352 return E1000_REGS_LEN
* sizeof(uint32_t);
356 e1000_get_regs(struct net_device
*netdev
,
357 struct ethtool_regs
*regs
, void *p
)
359 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
360 struct e1000_hw
*hw
= &adapter
->hw
;
361 uint32_t *regs_buff
= p
;
364 memset(p
, 0, E1000_REGS_LEN
* sizeof(uint32_t));
366 regs
->version
= (1 << 24) | (hw
->revision_id
<< 16) | hw
->device_id
;
368 regs_buff
[0] = E1000_READ_REG(hw
, CTRL
);
369 regs_buff
[1] = E1000_READ_REG(hw
, STATUS
);
371 regs_buff
[2] = E1000_READ_REG(hw
, RCTL
);
372 regs_buff
[3] = E1000_READ_REG(hw
, RDLEN
);
373 regs_buff
[4] = E1000_READ_REG(hw
, RDH
);
374 regs_buff
[5] = E1000_READ_REG(hw
, RDT
);
375 regs_buff
[6] = E1000_READ_REG(hw
, RDTR
);
377 regs_buff
[7] = E1000_READ_REG(hw
, TCTL
);
378 regs_buff
[8] = E1000_READ_REG(hw
, TDLEN
);
379 regs_buff
[9] = E1000_READ_REG(hw
, TDH
);
380 regs_buff
[10] = E1000_READ_REG(hw
, TDT
);
381 regs_buff
[11] = E1000_READ_REG(hw
, TIDV
);
383 regs_buff
[12] = adapter
->hw
.phy_type
; /* PHY type (IGP=1, M88=0) */
384 if(hw
->phy_type
== e1000_phy_igp
) {
385 e1000_write_phy_reg(hw
, IGP01E1000_PHY_PAGE_SELECT
,
386 IGP01E1000_PHY_AGC_A
);
387 e1000_read_phy_reg(hw
, IGP01E1000_PHY_AGC_A
&
388 IGP01E1000_PHY_PAGE_SELECT
, &phy_data
);
389 regs_buff
[13] = (uint32_t)phy_data
; /* cable length */
390 e1000_write_phy_reg(hw
, IGP01E1000_PHY_PAGE_SELECT
,
391 IGP01E1000_PHY_AGC_B
);
392 e1000_read_phy_reg(hw
, IGP01E1000_PHY_AGC_B
&
393 IGP01E1000_PHY_PAGE_SELECT
, &phy_data
);
394 regs_buff
[14] = (uint32_t)phy_data
; /* cable length */
395 e1000_write_phy_reg(hw
, IGP01E1000_PHY_PAGE_SELECT
,
396 IGP01E1000_PHY_AGC_C
);
397 e1000_read_phy_reg(hw
, IGP01E1000_PHY_AGC_C
&
398 IGP01E1000_PHY_PAGE_SELECT
, &phy_data
);
399 regs_buff
[15] = (uint32_t)phy_data
; /* cable length */
400 e1000_write_phy_reg(hw
, IGP01E1000_PHY_PAGE_SELECT
,
401 IGP01E1000_PHY_AGC_D
);
402 e1000_read_phy_reg(hw
, IGP01E1000_PHY_AGC_D
&
403 IGP01E1000_PHY_PAGE_SELECT
, &phy_data
);
404 regs_buff
[16] = (uint32_t)phy_data
; /* cable length */
405 regs_buff
[17] = 0; /* extended 10bt distance (not needed) */
406 e1000_write_phy_reg(hw
, IGP01E1000_PHY_PAGE_SELECT
, 0x0);
407 e1000_read_phy_reg(hw
, IGP01E1000_PHY_PORT_STATUS
&
408 IGP01E1000_PHY_PAGE_SELECT
, &phy_data
);
409 regs_buff
[18] = (uint32_t)phy_data
; /* cable polarity */
410 e1000_write_phy_reg(hw
, IGP01E1000_PHY_PAGE_SELECT
,
411 IGP01E1000_PHY_PCS_INIT_REG
);
412 e1000_read_phy_reg(hw
, IGP01E1000_PHY_PCS_INIT_REG
&
413 IGP01E1000_PHY_PAGE_SELECT
, &phy_data
);
414 regs_buff
[19] = (uint32_t)phy_data
; /* cable polarity */
415 regs_buff
[20] = 0; /* polarity correction enabled (always) */
416 regs_buff
[22] = 0; /* phy receive errors (unavailable) */
417 regs_buff
[23] = regs_buff
[18]; /* mdix mode */
418 e1000_write_phy_reg(hw
, IGP01E1000_PHY_PAGE_SELECT
, 0x0);
420 e1000_read_phy_reg(hw
, M88E1000_PHY_SPEC_STATUS
, &phy_data
);
421 regs_buff
[13] = (uint32_t)phy_data
; /* cable length */
422 regs_buff
[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
423 regs_buff
[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
424 regs_buff
[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
425 e1000_read_phy_reg(hw
, M88E1000_PHY_SPEC_CTRL
, &phy_data
);
426 regs_buff
[17] = (uint32_t)phy_data
; /* extended 10bt distance */
427 regs_buff
[18] = regs_buff
[13]; /* cable polarity */
428 regs_buff
[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
429 regs_buff
[20] = regs_buff
[17]; /* polarity correction */
430 /* phy receive errors */
431 regs_buff
[22] = adapter
->phy_stats
.receive_errors
;
432 regs_buff
[23] = regs_buff
[13]; /* mdix mode */
434 regs_buff
[21] = adapter
->phy_stats
.idle_errors
; /* phy idle errors */
435 e1000_read_phy_reg(hw
, PHY_1000T_STATUS
, &phy_data
);
436 regs_buff
[24] = (uint32_t)phy_data
; /* phy local receiver status */
437 regs_buff
[25] = regs_buff
[24]; /* phy remote receiver status */
438 if(hw
->mac_type
>= e1000_82540
&&
439 hw
->media_type
== e1000_media_type_copper
) {
440 regs_buff
[26] = E1000_READ_REG(hw
, MANC
);
445 e1000_get_eeprom_len(struct net_device
*netdev
)
447 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
448 return adapter
->hw
.eeprom
.word_size
* 2;
452 e1000_get_eeprom(struct net_device
*netdev
,
453 struct ethtool_eeprom
*eeprom
, uint8_t *bytes
)
455 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
456 struct e1000_hw
*hw
= &adapter
->hw
;
457 uint16_t *eeprom_buff
;
458 int first_word
, last_word
;
465 eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
467 first_word
= eeprom
->offset
>> 1;
468 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
470 eeprom_buff
= kmalloc(sizeof(uint16_t) *
471 (last_word
- first_word
+ 1), GFP_KERNEL
);
475 if(hw
->eeprom
.type
== e1000_eeprom_spi
)
476 ret_val
= e1000_read_eeprom(hw
, first_word
,
477 last_word
- first_word
+ 1,
480 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
481 if((ret_val
= e1000_read_eeprom(hw
, first_word
+ i
, 1,
486 /* Device's eeprom is always little-endian, word addressable */
487 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
488 le16_to_cpus(&eeprom_buff
[i
]);
490 memcpy(bytes
, (uint8_t *)eeprom_buff
+ (eeprom
->offset
& 1),
498 e1000_set_eeprom(struct net_device
*netdev
,
499 struct ethtool_eeprom
*eeprom
, uint8_t *bytes
)
501 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
502 struct e1000_hw
*hw
= &adapter
->hw
;
503 uint16_t *eeprom_buff
;
505 int max_len
, first_word
, last_word
, ret_val
= 0;
511 if(eeprom
->magic
!= (hw
->vendor_id
| (hw
->device_id
<< 16)))
514 max_len
= hw
->eeprom
.word_size
* 2;
516 first_word
= eeprom
->offset
>> 1;
517 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
518 eeprom_buff
= kmalloc(max_len
, GFP_KERNEL
);
522 ptr
= (void *)eeprom_buff
;
524 if(eeprom
->offset
& 1) {
525 /* need read/modify/write of first changed EEPROM word */
526 /* only the second byte of the word is being modified */
527 ret_val
= e1000_read_eeprom(hw
, first_word
, 1,
531 if(((eeprom
->offset
+ eeprom
->len
) & 1) && (ret_val
== 0)) {
532 /* need read/modify/write of last changed EEPROM word */
533 /* only the first byte of the word is being modified */
534 ret_val
= e1000_read_eeprom(hw
, last_word
, 1,
535 &eeprom_buff
[last_word
- first_word
]);
538 /* Device's eeprom is always little-endian, word addressable */
539 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
540 le16_to_cpus(&eeprom_buff
[i
]);
542 memcpy(ptr
, bytes
, eeprom
->len
);
544 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
545 eeprom_buff
[i
] = cpu_to_le16(eeprom_buff
[i
]);
547 ret_val
= e1000_write_eeprom(hw
, first_word
,
548 last_word
- first_word
+ 1, eeprom_buff
);
550 /* Update the checksum over the first part of the EEPROM if needed
551 * and flush shadow RAM for 82573 conrollers */
552 if((ret_val
== 0) && ((first_word
<= EEPROM_CHECKSUM_REG
) ||
553 (hw
->mac_type
== e1000_82573
)))
554 e1000_update_eeprom_checksum(hw
);
561 e1000_get_drvinfo(struct net_device
*netdev
,
562 struct ethtool_drvinfo
*drvinfo
)
564 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
566 strncpy(drvinfo
->driver
, e1000_driver_name
, 32);
567 strncpy(drvinfo
->version
, e1000_driver_version
, 32);
568 strncpy(drvinfo
->fw_version
, "N/A", 32);
569 strncpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
), 32);
570 drvinfo
->n_stats
= E1000_STATS_LEN
;
571 drvinfo
->testinfo_len
= E1000_TEST_LEN
;
572 drvinfo
->regdump_len
= e1000_get_regs_len(netdev
);
573 drvinfo
->eedump_len
= e1000_get_eeprom_len(netdev
);
577 e1000_get_ringparam(struct net_device
*netdev
,
578 struct ethtool_ringparam
*ring
)
580 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
581 e1000_mac_type mac_type
= adapter
->hw
.mac_type
;
582 struct e1000_tx_ring
*txdr
= adapter
->tx_ring
;
583 struct e1000_rx_ring
*rxdr
= adapter
->rx_ring
;
585 ring
->rx_max_pending
= (mac_type
< e1000_82544
) ? E1000_MAX_RXD
:
587 ring
->tx_max_pending
= (mac_type
< e1000_82544
) ? E1000_MAX_TXD
:
589 ring
->rx_mini_max_pending
= 0;
590 ring
->rx_jumbo_max_pending
= 0;
591 ring
->rx_pending
= rxdr
->count
;
592 ring
->tx_pending
= txdr
->count
;
593 ring
->rx_mini_pending
= 0;
594 ring
->rx_jumbo_pending
= 0;
598 e1000_set_ringparam(struct net_device
*netdev
,
599 struct ethtool_ringparam
*ring
)
601 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
602 e1000_mac_type mac_type
= adapter
->hw
.mac_type
;
603 struct e1000_tx_ring
*txdr
, *tx_old
, *tx_new
;
604 struct e1000_rx_ring
*rxdr
, *rx_old
, *rx_new
;
605 int i
, err
, tx_ring_size
, rx_ring_size
;
607 tx_ring_size
= sizeof(struct e1000_tx_ring
) * adapter
->num_queues
;
608 rx_ring_size
= sizeof(struct e1000_rx_ring
) * adapter
->num_queues
;
610 if (netif_running(adapter
->netdev
))
613 tx_old
= adapter
->tx_ring
;
614 rx_old
= adapter
->rx_ring
;
616 adapter
->tx_ring
= kmalloc(tx_ring_size
, GFP_KERNEL
);
617 if (!adapter
->tx_ring
) {
621 memset(adapter
->tx_ring
, 0, tx_ring_size
);
623 adapter
->rx_ring
= kmalloc(rx_ring_size
, GFP_KERNEL
);
624 if (!adapter
->rx_ring
) {
625 kfree(adapter
->tx_ring
);
629 memset(adapter
->rx_ring
, 0, rx_ring_size
);
631 txdr
= adapter
->tx_ring
;
632 rxdr
= adapter
->rx_ring
;
634 if((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
637 rxdr
->count
= max(ring
->rx_pending
,(uint32_t)E1000_MIN_RXD
);
638 rxdr
->count
= min(rxdr
->count
,(uint32_t)(mac_type
< e1000_82544
?
639 E1000_MAX_RXD
: E1000_MAX_82544_RXD
));
640 E1000_ROUNDUP(rxdr
->count
, REQ_RX_DESCRIPTOR_MULTIPLE
);
642 txdr
->count
= max(ring
->tx_pending
,(uint32_t)E1000_MIN_TXD
);
643 txdr
->count
= min(txdr
->count
,(uint32_t)(mac_type
< e1000_82544
?
644 E1000_MAX_TXD
: E1000_MAX_82544_TXD
));
645 E1000_ROUNDUP(txdr
->count
, REQ_TX_DESCRIPTOR_MULTIPLE
);
647 for (i
= 0; i
< adapter
->num_queues
; i
++) {
648 txdr
[i
].count
= txdr
->count
;
649 rxdr
[i
].count
= rxdr
->count
;
652 if(netif_running(adapter
->netdev
)) {
653 /* Try to get new resources before deleting old */
654 if ((err
= e1000_setup_all_rx_resources(adapter
)))
656 if ((err
= e1000_setup_all_tx_resources(adapter
)))
659 /* save the new, restore the old in order to free it,
660 * then restore the new back again */
662 rx_new
= adapter
->rx_ring
;
663 tx_new
= adapter
->tx_ring
;
664 adapter
->rx_ring
= rx_old
;
665 adapter
->tx_ring
= tx_old
;
666 e1000_free_all_rx_resources(adapter
);
667 e1000_free_all_tx_resources(adapter
);
670 adapter
->rx_ring
= rx_new
;
671 adapter
->tx_ring
= tx_new
;
672 if((err
= e1000_up(adapter
)))
678 e1000_free_all_rx_resources(adapter
);
680 adapter
->rx_ring
= rx_old
;
681 adapter
->tx_ring
= tx_old
;
686 #define REG_PATTERN_TEST(R, M, W) \
688 uint32_t pat, value; \
690 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
691 for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
692 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
693 value = E1000_READ_REG(&adapter->hw, R); \
694 if(value != (test[pat] & W & M)) { \
695 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
696 "0x%08X expected 0x%08X\n", \
697 E1000_##R, value, (test[pat] & W & M)); \
698 *data = (adapter->hw.mac_type < e1000_82543) ? \
699 E1000_82542_##R : E1000_##R; \
705 #define REG_SET_AND_CHECK(R, M, W) \
708 E1000_WRITE_REG(&adapter->hw, R, W & M); \
709 value = E1000_READ_REG(&adapter->hw, R); \
710 if((W & M) != (value & M)) { \
711 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
712 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
713 *data = (adapter->hw.mac_type < e1000_82543) ? \
714 E1000_82542_##R : E1000_##R; \
720 e1000_reg_test(struct e1000_adapter
*adapter
, uint64_t *data
)
722 uint32_t value
, before
, after
;
725 /* The status register is Read Only, so a write should fail.
726 * Some bits that get toggled are ignored.
728 switch (adapter
->hw
.mac_type
) {
729 /* there are several bits on newer hardware that are r/w */
742 before
= E1000_READ_REG(&adapter
->hw
, STATUS
);
743 value
= (E1000_READ_REG(&adapter
->hw
, STATUS
) & toggle
);
744 E1000_WRITE_REG(&adapter
->hw
, STATUS
, toggle
);
745 after
= E1000_READ_REG(&adapter
->hw
, STATUS
) & toggle
;
747 DPRINTK(DRV
, ERR
, "failed STATUS register test got: "
748 "0x%08X expected: 0x%08X\n", after
, value
);
752 /* restore previous status */
753 E1000_WRITE_REG(&adapter
->hw
, STATUS
, before
);
755 REG_PATTERN_TEST(FCAL
, 0xFFFFFFFF, 0xFFFFFFFF);
756 REG_PATTERN_TEST(FCAH
, 0x0000FFFF, 0xFFFFFFFF);
757 REG_PATTERN_TEST(FCT
, 0x0000FFFF, 0xFFFFFFFF);
758 REG_PATTERN_TEST(VET
, 0x0000FFFF, 0xFFFFFFFF);
759 REG_PATTERN_TEST(RDTR
, 0x0000FFFF, 0xFFFFFFFF);
760 REG_PATTERN_TEST(RDBAH
, 0xFFFFFFFF, 0xFFFFFFFF);
761 REG_PATTERN_TEST(RDLEN
, 0x000FFF80, 0x000FFFFF);
762 REG_PATTERN_TEST(RDH
, 0x0000FFFF, 0x0000FFFF);
763 REG_PATTERN_TEST(RDT
, 0x0000FFFF, 0x0000FFFF);
764 REG_PATTERN_TEST(FCRTH
, 0x0000FFF8, 0x0000FFF8);
765 REG_PATTERN_TEST(FCTTV
, 0x0000FFFF, 0x0000FFFF);
766 REG_PATTERN_TEST(TIPG
, 0x3FFFFFFF, 0x3FFFFFFF);
767 REG_PATTERN_TEST(TDBAH
, 0xFFFFFFFF, 0xFFFFFFFF);
768 REG_PATTERN_TEST(TDLEN
, 0x000FFF80, 0x000FFFFF);
770 REG_SET_AND_CHECK(RCTL
, 0xFFFFFFFF, 0x00000000);
771 REG_SET_AND_CHECK(RCTL
, 0x06DFB3FE, 0x003FFFFB);
772 REG_SET_AND_CHECK(TCTL
, 0xFFFFFFFF, 0x00000000);
774 if(adapter
->hw
.mac_type
>= e1000_82543
) {
776 REG_SET_AND_CHECK(RCTL
, 0x06DFB3FE, 0xFFFFFFFF);
777 REG_PATTERN_TEST(RDBAL
, 0xFFFFFFF0, 0xFFFFFFFF);
778 REG_PATTERN_TEST(TXCW
, 0xC000FFFF, 0x0000FFFF);
779 REG_PATTERN_TEST(TDBAL
, 0xFFFFFFF0, 0xFFFFFFFF);
780 REG_PATTERN_TEST(TIDV
, 0x0000FFFF, 0x0000FFFF);
782 for(i
= 0; i
< E1000_RAR_ENTRIES
; i
++) {
783 REG_PATTERN_TEST(RA
+ ((i
<< 1) << 2), 0xFFFFFFFF,
785 REG_PATTERN_TEST(RA
+ (((i
<< 1) + 1) << 2), 0x8003FFFF,
791 REG_SET_AND_CHECK(RCTL
, 0xFFFFFFFF, 0x01FFFFFF);
792 REG_PATTERN_TEST(RDBAL
, 0xFFFFF000, 0xFFFFFFFF);
793 REG_PATTERN_TEST(TXCW
, 0x0000FFFF, 0x0000FFFF);
794 REG_PATTERN_TEST(TDBAL
, 0xFFFFF000, 0xFFFFFFFF);
798 for(i
= 0; i
< E1000_MC_TBL_SIZE
; i
++)
799 REG_PATTERN_TEST(MTA
+ (i
<< 2), 0xFFFFFFFF, 0xFFFFFFFF);
806 e1000_eeprom_test(struct e1000_adapter
*adapter
, uint64_t *data
)
809 uint16_t checksum
= 0;
813 /* Read and add up the contents of the EEPROM */
814 for(i
= 0; i
< (EEPROM_CHECKSUM_REG
+ 1); i
++) {
815 if((e1000_read_eeprom(&adapter
->hw
, i
, 1, &temp
)) < 0) {
822 /* If Checksum is not Correct return error else test passed */
823 if((checksum
!= (uint16_t) EEPROM_SUM
) && !(*data
))
830 e1000_test_intr(int irq
,
832 struct pt_regs
*regs
)
834 struct net_device
*netdev
= (struct net_device
*) data
;
835 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
837 adapter
->test_icr
|= E1000_READ_REG(&adapter
->hw
, ICR
);
843 e1000_intr_test(struct e1000_adapter
*adapter
, uint64_t *data
)
845 struct net_device
*netdev
= adapter
->netdev
;
846 uint32_t mask
, i
=0, shared_int
= TRUE
;
847 uint32_t irq
= adapter
->pdev
->irq
;
851 /* Hook up test interrupt handler just for this test */
852 if(!request_irq(irq
, &e1000_test_intr
, 0, netdev
->name
, netdev
)) {
854 } else if(request_irq(irq
, &e1000_test_intr
, SA_SHIRQ
,
855 netdev
->name
, netdev
)){
860 /* Disable all the interrupts */
861 E1000_WRITE_REG(&adapter
->hw
, IMC
, 0xFFFFFFFF);
864 /* Test each interrupt */
867 /* Interrupt to test */
871 /* Disable the interrupt to be reported in
872 * the cause register and then force the same
873 * interrupt and see if one gets posted. If
874 * an interrupt was posted to the bus, the
877 adapter
->test_icr
= 0;
878 E1000_WRITE_REG(&adapter
->hw
, IMC
, mask
);
879 E1000_WRITE_REG(&adapter
->hw
, ICS
, mask
);
882 if(adapter
->test_icr
& mask
) {
888 /* Enable the interrupt to be reported in
889 * the cause register and then force the same
890 * interrupt and see if one gets posted. If
891 * an interrupt was not posted to the bus, the
894 adapter
->test_icr
= 0;
895 E1000_WRITE_REG(&adapter
->hw
, IMS
, mask
);
896 E1000_WRITE_REG(&adapter
->hw
, ICS
, mask
);
899 if(!(adapter
->test_icr
& mask
)) {
905 /* Disable the other interrupts to be reported in
906 * the cause register and then force the other
907 * interrupts and see if any get posted. If
908 * an interrupt was posted to the bus, the
911 adapter
->test_icr
= 0;
912 E1000_WRITE_REG(&adapter
->hw
, IMC
, ~mask
& 0x00007FFF);
913 E1000_WRITE_REG(&adapter
->hw
, ICS
, ~mask
& 0x00007FFF);
916 if(adapter
->test_icr
) {
923 /* Disable all the interrupts */
924 E1000_WRITE_REG(&adapter
->hw
, IMC
, 0xFFFFFFFF);
927 /* Unhook test interrupt handler */
928 free_irq(irq
, netdev
);
934 e1000_free_desc_rings(struct e1000_adapter
*adapter
)
936 struct e1000_tx_ring
*txdr
= &adapter
->test_tx_ring
;
937 struct e1000_rx_ring
*rxdr
= &adapter
->test_rx_ring
;
938 struct pci_dev
*pdev
= adapter
->pdev
;
941 if(txdr
->desc
&& txdr
->buffer_info
) {
942 for(i
= 0; i
< txdr
->count
; i
++) {
943 if(txdr
->buffer_info
[i
].dma
)
944 pci_unmap_single(pdev
, txdr
->buffer_info
[i
].dma
,
945 txdr
->buffer_info
[i
].length
,
947 if(txdr
->buffer_info
[i
].skb
)
948 dev_kfree_skb(txdr
->buffer_info
[i
].skb
);
952 if(rxdr
->desc
&& rxdr
->buffer_info
) {
953 for(i
= 0; i
< rxdr
->count
; i
++) {
954 if(rxdr
->buffer_info
[i
].dma
)
955 pci_unmap_single(pdev
, rxdr
->buffer_info
[i
].dma
,
956 rxdr
->buffer_info
[i
].length
,
958 if(rxdr
->buffer_info
[i
].skb
)
959 dev_kfree_skb(rxdr
->buffer_info
[i
].skb
);
964 pci_free_consistent(pdev
, txdr
->size
, txdr
->desc
, txdr
->dma
);
966 pci_free_consistent(pdev
, rxdr
->size
, rxdr
->desc
, rxdr
->dma
);
968 if(txdr
->buffer_info
)
969 kfree(txdr
->buffer_info
);
970 if(rxdr
->buffer_info
)
971 kfree(rxdr
->buffer_info
);
977 e1000_setup_desc_rings(struct e1000_adapter
*adapter
)
979 struct e1000_tx_ring
*txdr
= &adapter
->test_tx_ring
;
980 struct e1000_rx_ring
*rxdr
= &adapter
->test_rx_ring
;
981 struct pci_dev
*pdev
= adapter
->pdev
;
983 int size
, i
, ret_val
;
985 /* Setup Tx descriptor ring and Tx buffers */
988 txdr
->count
= E1000_DEFAULT_TXD
;
990 size
= txdr
->count
* sizeof(struct e1000_buffer
);
991 if(!(txdr
->buffer_info
= kmalloc(size
, GFP_KERNEL
))) {
995 memset(txdr
->buffer_info
, 0, size
);
997 txdr
->size
= txdr
->count
* sizeof(struct e1000_tx_desc
);
998 E1000_ROUNDUP(txdr
->size
, 4096);
999 if(!(txdr
->desc
= pci_alloc_consistent(pdev
, txdr
->size
, &txdr
->dma
))) {
1003 memset(txdr
->desc
, 0, txdr
->size
);
1004 txdr
->next_to_use
= txdr
->next_to_clean
= 0;
1006 E1000_WRITE_REG(&adapter
->hw
, TDBAL
,
1007 ((uint64_t) txdr
->dma
& 0x00000000FFFFFFFF));
1008 E1000_WRITE_REG(&adapter
->hw
, TDBAH
, ((uint64_t) txdr
->dma
>> 32));
1009 E1000_WRITE_REG(&adapter
->hw
, TDLEN
,
1010 txdr
->count
* sizeof(struct e1000_tx_desc
));
1011 E1000_WRITE_REG(&adapter
->hw
, TDH
, 0);
1012 E1000_WRITE_REG(&adapter
->hw
, TDT
, 0);
1013 E1000_WRITE_REG(&adapter
->hw
, TCTL
,
1014 E1000_TCTL_PSP
| E1000_TCTL_EN
|
1015 E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
|
1016 E1000_FDX_COLLISION_DISTANCE
<< E1000_COLD_SHIFT
);
1018 for(i
= 0; i
< txdr
->count
; i
++) {
1019 struct e1000_tx_desc
*tx_desc
= E1000_TX_DESC(*txdr
, i
);
1020 struct sk_buff
*skb
;
1021 unsigned int size
= 1024;
1023 if(!(skb
= alloc_skb(size
, GFP_KERNEL
))) {
1028 txdr
->buffer_info
[i
].skb
= skb
;
1029 txdr
->buffer_info
[i
].length
= skb
->len
;
1030 txdr
->buffer_info
[i
].dma
=
1031 pci_map_single(pdev
, skb
->data
, skb
->len
,
1033 tx_desc
->buffer_addr
= cpu_to_le64(txdr
->buffer_info
[i
].dma
);
1034 tx_desc
->lower
.data
= cpu_to_le32(skb
->len
);
1035 tx_desc
->lower
.data
|= cpu_to_le32(E1000_TXD_CMD_EOP
|
1036 E1000_TXD_CMD_IFCS
|
1038 tx_desc
->upper
.data
= 0;
1041 /* Setup Rx descriptor ring and Rx buffers */
1044 rxdr
->count
= E1000_DEFAULT_RXD
;
1046 size
= rxdr
->count
* sizeof(struct e1000_buffer
);
1047 if(!(rxdr
->buffer_info
= kmalloc(size
, GFP_KERNEL
))) {
1051 memset(rxdr
->buffer_info
, 0, size
);
1053 rxdr
->size
= rxdr
->count
* sizeof(struct e1000_rx_desc
);
1054 if(!(rxdr
->desc
= pci_alloc_consistent(pdev
, rxdr
->size
, &rxdr
->dma
))) {
1058 memset(rxdr
->desc
, 0, rxdr
->size
);
1059 rxdr
->next_to_use
= rxdr
->next_to_clean
= 0;
1061 rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
1062 E1000_WRITE_REG(&adapter
->hw
, RCTL
, rctl
& ~E1000_RCTL_EN
);
1063 E1000_WRITE_REG(&adapter
->hw
, RDBAL
,
1064 ((uint64_t) rxdr
->dma
& 0xFFFFFFFF));
1065 E1000_WRITE_REG(&adapter
->hw
, RDBAH
, ((uint64_t) rxdr
->dma
>> 32));
1066 E1000_WRITE_REG(&adapter
->hw
, RDLEN
, rxdr
->size
);
1067 E1000_WRITE_REG(&adapter
->hw
, RDH
, 0);
1068 E1000_WRITE_REG(&adapter
->hw
, RDT
, 0);
1069 rctl
= E1000_RCTL_EN
| E1000_RCTL_BAM
| E1000_RCTL_SZ_2048
|
1070 E1000_RCTL_LBM_NO
| E1000_RCTL_RDMTS_HALF
|
1071 (adapter
->hw
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
1072 E1000_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
1074 for(i
= 0; i
< rxdr
->count
; i
++) {
1075 struct e1000_rx_desc
*rx_desc
= E1000_RX_DESC(*rxdr
, i
);
1076 struct sk_buff
*skb
;
1078 if(!(skb
= alloc_skb(E1000_RXBUFFER_2048
+ NET_IP_ALIGN
,
1083 skb_reserve(skb
, NET_IP_ALIGN
);
1084 rxdr
->buffer_info
[i
].skb
= skb
;
1085 rxdr
->buffer_info
[i
].length
= E1000_RXBUFFER_2048
;
1086 rxdr
->buffer_info
[i
].dma
=
1087 pci_map_single(pdev
, skb
->data
, E1000_RXBUFFER_2048
,
1088 PCI_DMA_FROMDEVICE
);
1089 rx_desc
->buffer_addr
= cpu_to_le64(rxdr
->buffer_info
[i
].dma
);
1090 memset(skb
->data
, 0x00, skb
->len
);
1096 e1000_free_desc_rings(adapter
);
1101 e1000_phy_disable_receiver(struct e1000_adapter
*adapter
)
1103 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1104 e1000_write_phy_reg(&adapter
->hw
, 29, 0x001F);
1105 e1000_write_phy_reg(&adapter
->hw
, 30, 0x8FFC);
1106 e1000_write_phy_reg(&adapter
->hw
, 29, 0x001A);
1107 e1000_write_phy_reg(&adapter
->hw
, 30, 0x8FF0);
1111 e1000_phy_reset_clk_and_crs(struct e1000_adapter
*adapter
)
1115 /* Because we reset the PHY above, we need to re-force TX_CLK in the
1116 * Extended PHY Specific Control Register to 25MHz clock. This
1117 * value defaults back to a 2.5MHz clock when the PHY is reset.
1119 e1000_read_phy_reg(&adapter
->hw
, M88E1000_EXT_PHY_SPEC_CTRL
, &phy_reg
);
1120 phy_reg
|= M88E1000_EPSCR_TX_CLK_25
;
1121 e1000_write_phy_reg(&adapter
->hw
,
1122 M88E1000_EXT_PHY_SPEC_CTRL
, phy_reg
);
1124 /* In addition, because of the s/w reset above, we need to enable
1125 * CRS on TX. This must be set for both full and half duplex
1128 e1000_read_phy_reg(&adapter
->hw
, M88E1000_PHY_SPEC_CTRL
, &phy_reg
);
1129 phy_reg
|= M88E1000_PSCR_ASSERT_CRS_ON_TX
;
1130 e1000_write_phy_reg(&adapter
->hw
,
1131 M88E1000_PHY_SPEC_CTRL
, phy_reg
);
1135 e1000_nonintegrated_phy_loopback(struct e1000_adapter
*adapter
)
1140 /* Setup the Device Control Register for PHY loopback test. */
1142 ctrl_reg
= E1000_READ_REG(&adapter
->hw
, CTRL
);
1143 ctrl_reg
|= (E1000_CTRL_ILOS
| /* Invert Loss-Of-Signal */
1144 E1000_CTRL_FRCSPD
| /* Set the Force Speed Bit */
1145 E1000_CTRL_FRCDPX
| /* Set the Force Duplex Bit */
1146 E1000_CTRL_SPD_1000
| /* Force Speed to 1000 */
1147 E1000_CTRL_FD
); /* Force Duplex to FULL */
1149 E1000_WRITE_REG(&adapter
->hw
, CTRL
, ctrl_reg
);
1151 /* Read the PHY Specific Control Register (0x10) */
1152 e1000_read_phy_reg(&adapter
->hw
, M88E1000_PHY_SPEC_CTRL
, &phy_reg
);
1154 /* Clear Auto-Crossover bits in PHY Specific Control Register
1157 phy_reg
&= ~M88E1000_PSCR_AUTO_X_MODE
;
1158 e1000_write_phy_reg(&adapter
->hw
, M88E1000_PHY_SPEC_CTRL
, phy_reg
);
1160 /* Perform software reset on the PHY */
1161 e1000_phy_reset(&adapter
->hw
);
1163 /* Have to setup TX_CLK and TX_CRS after software reset */
1164 e1000_phy_reset_clk_and_crs(adapter
);
1166 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
, 0x8100);
1168 /* Wait for reset to complete. */
1171 /* Have to setup TX_CLK and TX_CRS after software reset */
1172 e1000_phy_reset_clk_and_crs(adapter
);
1174 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1175 e1000_phy_disable_receiver(adapter
);
1177 /* Set the loopback bit in the PHY control register. */
1178 e1000_read_phy_reg(&adapter
->hw
, PHY_CTRL
, &phy_reg
);
1179 phy_reg
|= MII_CR_LOOPBACK
;
1180 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
, phy_reg
);
1182 /* Setup TX_CLK and TX_CRS one more time. */
1183 e1000_phy_reset_clk_and_crs(adapter
);
1185 /* Check Phy Configuration */
1186 e1000_read_phy_reg(&adapter
->hw
, PHY_CTRL
, &phy_reg
);
1187 if(phy_reg
!= 0x4100)
1190 e1000_read_phy_reg(&adapter
->hw
, M88E1000_EXT_PHY_SPEC_CTRL
, &phy_reg
);
1191 if(phy_reg
!= 0x0070)
1194 e1000_read_phy_reg(&adapter
->hw
, 29, &phy_reg
);
1195 if(phy_reg
!= 0x001A)
1202 e1000_integrated_phy_loopback(struct e1000_adapter
*adapter
)
1204 uint32_t ctrl_reg
= 0;
1205 uint32_t stat_reg
= 0;
1207 adapter
->hw
.autoneg
= FALSE
;
1209 if(adapter
->hw
.phy_type
== e1000_phy_m88
) {
1210 /* Auto-MDI/MDIX Off */
1211 e1000_write_phy_reg(&adapter
->hw
,
1212 M88E1000_PHY_SPEC_CTRL
, 0x0808);
1213 /* reset to update Auto-MDI/MDIX */
1214 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
, 0x9140);
1216 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
, 0x8140);
1218 /* force 1000, set loopback */
1219 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
, 0x4140);
1221 /* Now set up the MAC to the same speed/duplex as the PHY. */
1222 ctrl_reg
= E1000_READ_REG(&adapter
->hw
, CTRL
);
1223 ctrl_reg
&= ~E1000_CTRL_SPD_SEL
; /* Clear the speed sel bits */
1224 ctrl_reg
|= (E1000_CTRL_FRCSPD
| /* Set the Force Speed Bit */
1225 E1000_CTRL_FRCDPX
| /* Set the Force Duplex Bit */
1226 E1000_CTRL_SPD_1000
|/* Force Speed to 1000 */
1227 E1000_CTRL_FD
); /* Force Duplex to FULL */
1229 if(adapter
->hw
.media_type
== e1000_media_type_copper
&&
1230 adapter
->hw
.phy_type
== e1000_phy_m88
) {
1231 ctrl_reg
|= E1000_CTRL_ILOS
; /* Invert Loss of Signal */
1233 /* Set the ILOS bit on the fiber Nic is half
1234 * duplex link is detected. */
1235 stat_reg
= E1000_READ_REG(&adapter
->hw
, STATUS
);
1236 if((stat_reg
& E1000_STATUS_FD
) == 0)
1237 ctrl_reg
|= (E1000_CTRL_ILOS
| E1000_CTRL_SLU
);
1240 E1000_WRITE_REG(&adapter
->hw
, CTRL
, ctrl_reg
);
1242 /* Disable the receiver on the PHY so when a cable is plugged in, the
1243 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1245 if(adapter
->hw
.phy_type
== e1000_phy_m88
)
1246 e1000_phy_disable_receiver(adapter
);
1254 e1000_set_phy_loopback(struct e1000_adapter
*adapter
)
1256 uint16_t phy_reg
= 0;
1259 switch (adapter
->hw
.mac_type
) {
1261 if(adapter
->hw
.media_type
== e1000_media_type_copper
) {
1262 /* Attempt to setup Loopback mode on Non-integrated PHY.
1263 * Some PHY registers get corrupted at random, so
1264 * attempt this 10 times.
1266 while(e1000_nonintegrated_phy_loopback(adapter
) &&
1276 case e1000_82545_rev_3
:
1278 case e1000_82546_rev_3
:
1280 case e1000_82541_rev_2
:
1282 case e1000_82547_rev_2
:
1286 return e1000_integrated_phy_loopback(adapter
);
1290 /* Default PHY loopback work is to read the MII
1291 * control register and assert bit 14 (loopback mode).
1293 e1000_read_phy_reg(&adapter
->hw
, PHY_CTRL
, &phy_reg
);
1294 phy_reg
|= MII_CR_LOOPBACK
;
1295 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
, phy_reg
);
1304 e1000_setup_loopback_test(struct e1000_adapter
*adapter
)
1308 if(adapter
->hw
.media_type
== e1000_media_type_fiber
||
1309 adapter
->hw
.media_type
== e1000_media_type_internal_serdes
) {
1310 if(adapter
->hw
.mac_type
== e1000_82545
||
1311 adapter
->hw
.mac_type
== e1000_82546
||
1312 adapter
->hw
.mac_type
== e1000_82545_rev_3
||
1313 adapter
->hw
.mac_type
== e1000_82546_rev_3
)
1314 return e1000_set_phy_loopback(adapter
);
1316 rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
1317 rctl
|= E1000_RCTL_LBM_TCVR
;
1318 E1000_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
1321 } else if(adapter
->hw
.media_type
== e1000_media_type_copper
)
1322 return e1000_set_phy_loopback(adapter
);
1328 e1000_loopback_cleanup(struct e1000_adapter
*adapter
)
1333 rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
1334 rctl
&= ~(E1000_RCTL_LBM_TCVR
| E1000_RCTL_LBM_MAC
);
1335 E1000_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
1337 if(adapter
->hw
.media_type
== e1000_media_type_copper
||
1338 ((adapter
->hw
.media_type
== e1000_media_type_fiber
||
1339 adapter
->hw
.media_type
== e1000_media_type_internal_serdes
) &&
1340 (adapter
->hw
.mac_type
== e1000_82545
||
1341 adapter
->hw
.mac_type
== e1000_82546
||
1342 adapter
->hw
.mac_type
== e1000_82545_rev_3
||
1343 adapter
->hw
.mac_type
== e1000_82546_rev_3
))) {
1344 adapter
->hw
.autoneg
= TRUE
;
1345 e1000_read_phy_reg(&adapter
->hw
, PHY_CTRL
, &phy_reg
);
1346 if(phy_reg
& MII_CR_LOOPBACK
) {
1347 phy_reg
&= ~MII_CR_LOOPBACK
;
1348 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
, phy_reg
);
1349 e1000_phy_reset(&adapter
->hw
);
1355 e1000_create_lbtest_frame(struct sk_buff
*skb
, unsigned int frame_size
)
1357 memset(skb
->data
, 0xFF, frame_size
);
1358 frame_size
= (frame_size
% 2) ? (frame_size
- 1) : frame_size
;
1359 memset(&skb
->data
[frame_size
/ 2], 0xAA, frame_size
/ 2 - 1);
1360 memset(&skb
->data
[frame_size
/ 2 + 10], 0xBE, 1);
1361 memset(&skb
->data
[frame_size
/ 2 + 12], 0xAF, 1);
1365 e1000_check_lbtest_frame(struct sk_buff
*skb
, unsigned int frame_size
)
1367 frame_size
= (frame_size
% 2) ? (frame_size
- 1) : frame_size
;
1368 if(*(skb
->data
+ 3) == 0xFF) {
1369 if((*(skb
->data
+ frame_size
/ 2 + 10) == 0xBE) &&
1370 (*(skb
->data
+ frame_size
/ 2 + 12) == 0xAF)) {
1378 e1000_run_loopback_test(struct e1000_adapter
*adapter
)
1380 struct e1000_tx_ring
*txdr
= &adapter
->test_tx_ring
;
1381 struct e1000_rx_ring
*rxdr
= &adapter
->test_rx_ring
;
1382 struct pci_dev
*pdev
= adapter
->pdev
;
1383 int i
, j
, k
, l
, lc
, good_cnt
, ret_val
=0;
1386 E1000_WRITE_REG(&adapter
->hw
, RDT
, rxdr
->count
- 1);
1388 /* Calculate the loop count based on the largest descriptor ring
1389 * The idea is to wrap the largest ring a number of times using 64
1390 * send/receive pairs during each loop
1393 if(rxdr
->count
<= txdr
->count
)
1394 lc
= ((txdr
->count
/ 64) * 2) + 1;
1396 lc
= ((rxdr
->count
/ 64) * 2) + 1;
1399 for(j
= 0; j
<= lc
; j
++) { /* loop count loop */
1400 for(i
= 0; i
< 64; i
++) { /* send the packets */
1401 e1000_create_lbtest_frame(txdr
->buffer_info
[i
].skb
,
1403 pci_dma_sync_single_for_device(pdev
,
1404 txdr
->buffer_info
[k
].dma
,
1405 txdr
->buffer_info
[k
].length
,
1407 if(unlikely(++k
== txdr
->count
)) k
= 0;
1409 E1000_WRITE_REG(&adapter
->hw
, TDT
, k
);
1411 time
= jiffies
; /* set the start time for the receive */
1413 do { /* receive the sent packets */
1414 pci_dma_sync_single_for_cpu(pdev
,
1415 rxdr
->buffer_info
[l
].dma
,
1416 rxdr
->buffer_info
[l
].length
,
1417 PCI_DMA_FROMDEVICE
);
1419 ret_val
= e1000_check_lbtest_frame(
1420 rxdr
->buffer_info
[l
].skb
,
1424 if(unlikely(++l
== rxdr
->count
)) l
= 0;
1425 /* time + 20 msecs (200 msecs on 2.4) is more than
1426 * enough time to complete the receives, if it's
1427 * exceeded, break and error off
1429 } while (good_cnt
< 64 && jiffies
< (time
+ 20));
1430 if(good_cnt
!= 64) {
1431 ret_val
= 13; /* ret_val is the same as mis-compare */
1434 if(jiffies
>= (time
+ 2)) {
1435 ret_val
= 14; /* error code for time out error */
1438 } /* end loop count loop */
1443 e1000_loopback_test(struct e1000_adapter
*adapter
, uint64_t *data
)
1445 if((*data
= e1000_setup_desc_rings(adapter
))) goto err_loopback
;
1446 if((*data
= e1000_setup_loopback_test(adapter
))) goto err_loopback
;
1447 *data
= e1000_run_loopback_test(adapter
);
1448 e1000_loopback_cleanup(adapter
);
1449 e1000_free_desc_rings(adapter
);
1455 e1000_link_test(struct e1000_adapter
*adapter
, uint64_t *data
)
1458 if (adapter
->hw
.media_type
== e1000_media_type_internal_serdes
) {
1460 adapter
->hw
.serdes_link_down
= TRUE
;
1462 /* On some blade server designs, link establishment
1463 * could take as long as 2-3 minutes */
1465 e1000_check_for_link(&adapter
->hw
);
1466 if (adapter
->hw
.serdes_link_down
== FALSE
)
1469 } while (i
++ < 3750);
1473 e1000_check_for_link(&adapter
->hw
);
1474 if(adapter
->hw
.autoneg
) /* if auto_neg is set wait for it */
1477 if(!(E1000_READ_REG(&adapter
->hw
, STATUS
) & E1000_STATUS_LU
)) {
1485 e1000_diag_test_count(struct net_device
*netdev
)
1487 return E1000_TEST_LEN
;
1491 e1000_diag_test(struct net_device
*netdev
,
1492 struct ethtool_test
*eth_test
, uint64_t *data
)
1494 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1495 boolean_t if_running
= netif_running(netdev
);
1497 if(eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
1500 /* save speed, duplex, autoneg settings */
1501 uint16_t autoneg_advertised
= adapter
->hw
.autoneg_advertised
;
1502 uint8_t forced_speed_duplex
= adapter
->hw
.forced_speed_duplex
;
1503 uint8_t autoneg
= adapter
->hw
.autoneg
;
1505 /* Link test performed before hardware reset so autoneg doesn't
1506 * interfere with test result */
1507 if(e1000_link_test(adapter
, &data
[4]))
1508 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1511 e1000_down(adapter
);
1513 e1000_reset(adapter
);
1515 if(e1000_reg_test(adapter
, &data
[0]))
1516 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1518 e1000_reset(adapter
);
1519 if(e1000_eeprom_test(adapter
, &data
[1]))
1520 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1522 e1000_reset(adapter
);
1523 if(e1000_intr_test(adapter
, &data
[2]))
1524 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1526 e1000_reset(adapter
);
1527 if(e1000_loopback_test(adapter
, &data
[3]))
1528 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1530 /* restore speed, duplex, autoneg settings */
1531 adapter
->hw
.autoneg_advertised
= autoneg_advertised
;
1532 adapter
->hw
.forced_speed_duplex
= forced_speed_duplex
;
1533 adapter
->hw
.autoneg
= autoneg
;
1535 e1000_reset(adapter
);
1540 if(e1000_link_test(adapter
, &data
[4]))
1541 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1543 /* Offline tests aren't run; pass by default */
1549 msleep_interruptible(4 * 1000);
1553 e1000_get_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
1555 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1556 struct e1000_hw
*hw
= &adapter
->hw
;
1558 switch(adapter
->hw
.device_id
) {
1559 case E1000_DEV_ID_82542
:
1560 case E1000_DEV_ID_82543GC_FIBER
:
1561 case E1000_DEV_ID_82543GC_COPPER
:
1562 case E1000_DEV_ID_82544EI_FIBER
:
1563 case E1000_DEV_ID_82546EB_QUAD_COPPER
:
1564 case E1000_DEV_ID_82545EM_FIBER
:
1565 case E1000_DEV_ID_82545EM_COPPER
:
1570 case E1000_DEV_ID_82546EB_FIBER
:
1571 case E1000_DEV_ID_82546GB_FIBER
:
1572 /* Wake events only supported on port A for dual fiber */
1573 if(E1000_READ_REG(hw
, STATUS
) & E1000_STATUS_FUNC_1
) {
1581 wol
->supported
= WAKE_UCAST
| WAKE_MCAST
|
1582 WAKE_BCAST
| WAKE_MAGIC
;
1585 if(adapter
->wol
& E1000_WUFC_EX
)
1586 wol
->wolopts
|= WAKE_UCAST
;
1587 if(adapter
->wol
& E1000_WUFC_MC
)
1588 wol
->wolopts
|= WAKE_MCAST
;
1589 if(adapter
->wol
& E1000_WUFC_BC
)
1590 wol
->wolopts
|= WAKE_BCAST
;
1591 if(adapter
->wol
& E1000_WUFC_MAG
)
1592 wol
->wolopts
|= WAKE_MAGIC
;
1598 e1000_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
1600 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1601 struct e1000_hw
*hw
= &adapter
->hw
;
1603 switch(adapter
->hw
.device_id
) {
1604 case E1000_DEV_ID_82542
:
1605 case E1000_DEV_ID_82543GC_FIBER
:
1606 case E1000_DEV_ID_82543GC_COPPER
:
1607 case E1000_DEV_ID_82544EI_FIBER
:
1608 case E1000_DEV_ID_82546EB_QUAD_COPPER
:
1609 case E1000_DEV_ID_82545EM_FIBER
:
1610 case E1000_DEV_ID_82545EM_COPPER
:
1611 return wol
->wolopts
? -EOPNOTSUPP
: 0;
1613 case E1000_DEV_ID_82546EB_FIBER
:
1614 case E1000_DEV_ID_82546GB_FIBER
:
1615 /* Wake events only supported on port A for dual fiber */
1616 if(E1000_READ_REG(hw
, STATUS
) & E1000_STATUS_FUNC_1
)
1617 return wol
->wolopts
? -EOPNOTSUPP
: 0;
1621 if(wol
->wolopts
& (WAKE_PHY
| WAKE_ARP
| WAKE_MAGICSECURE
))
1626 if(wol
->wolopts
& WAKE_UCAST
)
1627 adapter
->wol
|= E1000_WUFC_EX
;
1628 if(wol
->wolopts
& WAKE_MCAST
)
1629 adapter
->wol
|= E1000_WUFC_MC
;
1630 if(wol
->wolopts
& WAKE_BCAST
)
1631 adapter
->wol
|= E1000_WUFC_BC
;
1632 if(wol
->wolopts
& WAKE_MAGIC
)
1633 adapter
->wol
|= E1000_WUFC_MAG
;
1639 /* toggle LED 4 times per second = 2 "blinks" per second */
1640 #define E1000_ID_INTERVAL (HZ/4)
1642 /* bit defines for adapter->led_status */
1643 #define E1000_LED_ON 0
1646 e1000_led_blink_callback(unsigned long data
)
1648 struct e1000_adapter
*adapter
= (struct e1000_adapter
*) data
;
1650 if(test_and_change_bit(E1000_LED_ON
, &adapter
->led_status
))
1651 e1000_led_off(&adapter
->hw
);
1653 e1000_led_on(&adapter
->hw
);
1655 mod_timer(&adapter
->blink_timer
, jiffies
+ E1000_ID_INTERVAL
);
1659 e1000_phys_id(struct net_device
*netdev
, uint32_t data
)
1661 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1663 if(!data
|| data
> (uint32_t)(MAX_SCHEDULE_TIMEOUT
/ HZ
))
1664 data
= (uint32_t)(MAX_SCHEDULE_TIMEOUT
/ HZ
);
1666 if(adapter
->hw
.mac_type
< e1000_82571
) {
1667 if(!adapter
->blink_timer
.function
) {
1668 init_timer(&adapter
->blink_timer
);
1669 adapter
->blink_timer
.function
= e1000_led_blink_callback
;
1670 adapter
->blink_timer
.data
= (unsigned long) adapter
;
1672 e1000_setup_led(&adapter
->hw
);
1673 mod_timer(&adapter
->blink_timer
, jiffies
);
1674 msleep_interruptible(data
* 1000);
1675 del_timer_sync(&adapter
->blink_timer
);
1678 E1000_WRITE_REG(&adapter
->hw
, LEDCTL
, (E1000_LEDCTL_LED2_BLINK_RATE
|
1679 E1000_LEDCTL_LED1_BLINK
| E1000_LEDCTL_LED2_BLINK
|
1680 (E1000_LEDCTL_MODE_LED_ON
<< E1000_LEDCTL_LED2_MODE_SHIFT
) |
1681 (E1000_LEDCTL_MODE_LINK_ACTIVITY
<< E1000_LEDCTL_LED1_MODE_SHIFT
) |
1682 (E1000_LEDCTL_MODE_LED_OFF
<< E1000_LEDCTL_LED0_MODE_SHIFT
)));
1683 msleep_interruptible(data
* 1000);
1686 e1000_led_off(&adapter
->hw
);
1687 clear_bit(E1000_LED_ON
, &adapter
->led_status
);
1688 e1000_cleanup_led(&adapter
->hw
);
1694 e1000_nway_reset(struct net_device
*netdev
)
1696 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1697 if(netif_running(netdev
)) {
1698 e1000_down(adapter
);
1705 e1000_get_stats_count(struct net_device
*netdev
)
1707 return E1000_STATS_LEN
;
1711 e1000_get_ethtool_stats(struct net_device
*netdev
,
1712 struct ethtool_stats
*stats
, uint64_t *data
)
1714 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1717 e1000_update_stats(adapter
);
1718 for(i
= 0; i
< E1000_STATS_LEN
; i
++) {
1719 char *p
= (char *)adapter
+e1000_gstrings_stats
[i
].stat_offset
;
1720 data
[i
] = (e1000_gstrings_stats
[i
].sizeof_stat
==
1721 sizeof(uint64_t)) ? *(uint64_t *)p
: *(uint32_t *)p
;
1726 e1000_get_strings(struct net_device
*netdev
, uint32_t stringset
, uint8_t *data
)
1732 memcpy(data
, *e1000_gstrings_test
,
1733 E1000_TEST_LEN
*ETH_GSTRING_LEN
);
1736 for (i
=0; i
< E1000_STATS_LEN
; i
++) {
1737 memcpy(data
+ i
* ETH_GSTRING_LEN
,
1738 e1000_gstrings_stats
[i
].stat_string
,
1745 struct ethtool_ops e1000_ethtool_ops
= {
1746 .get_settings
= e1000_get_settings
,
1747 .set_settings
= e1000_set_settings
,
1748 .get_drvinfo
= e1000_get_drvinfo
,
1749 .get_regs_len
= e1000_get_regs_len
,
1750 .get_regs
= e1000_get_regs
,
1751 .get_wol
= e1000_get_wol
,
1752 .set_wol
= e1000_set_wol
,
1753 .get_msglevel
= e1000_get_msglevel
,
1754 .set_msglevel
= e1000_set_msglevel
,
1755 .nway_reset
= e1000_nway_reset
,
1756 .get_link
= ethtool_op_get_link
,
1757 .get_eeprom_len
= e1000_get_eeprom_len
,
1758 .get_eeprom
= e1000_get_eeprom
,
1759 .set_eeprom
= e1000_set_eeprom
,
1760 .get_ringparam
= e1000_get_ringparam
,
1761 .set_ringparam
= e1000_set_ringparam
,
1762 .get_pauseparam
= e1000_get_pauseparam
,
1763 .set_pauseparam
= e1000_set_pauseparam
,
1764 .get_rx_csum
= e1000_get_rx_csum
,
1765 .set_rx_csum
= e1000_set_rx_csum
,
1766 .get_tx_csum
= e1000_get_tx_csum
,
1767 .set_tx_csum
= e1000_set_tx_csum
,
1768 .get_sg
= ethtool_op_get_sg
,
1769 .set_sg
= ethtool_op_set_sg
,
1771 .get_tso
= ethtool_op_get_tso
,
1772 .set_tso
= e1000_set_tso
,
1774 .self_test_count
= e1000_diag_test_count
,
1775 .self_test
= e1000_diag_test
,
1776 .get_strings
= e1000_get_strings
,
1777 .phys_id
= e1000_phys_id
,
1778 .get_stats_count
= e1000_get_stats_count
,
1779 .get_ethtool_stats
= e1000_get_ethtool_stats
,
1780 .get_perm_addr
= ethtool_op_get_perm_addr
,
1783 void e1000_set_ethtool_ops(struct net_device
*netdev
)
1785 SET_ETHTOOL_OPS(netdev
, &e1000_ethtool_ops
);