1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/pci.h>
29 #include <linux/delay.h>
30 #include <linux/sched.h>
33 #include "ixgbe_phy.h"
35 #define IXGBE_X540_MAX_TX_QUEUES 128
36 #define IXGBE_X540_MAX_RX_QUEUES 128
37 #define IXGBE_X540_RAR_ENTRIES 128
38 #define IXGBE_X540_MC_TBL_SIZE 128
39 #define IXGBE_X540_VFT_TBL_SIZE 128
41 static s32
ixgbe_update_flash_X540(struct ixgbe_hw
*hw
);
42 static s32
ixgbe_poll_flash_update_done_X540(struct ixgbe_hw
*hw
);
43 static s32
ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw
*hw
, u16 mask
);
44 static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw
*hw
, u16 mask
);
45 static s32
ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw
*hw
);
46 static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw
*hw
);
48 static enum ixgbe_media_type
ixgbe_get_media_type_X540(struct ixgbe_hw
*hw
)
50 return ixgbe_media_type_copper
;
53 static s32
ixgbe_get_invariants_X540(struct ixgbe_hw
*hw
)
55 struct ixgbe_mac_info
*mac
= &hw
->mac
;
57 /* Call PHY identify routine to get the phy type */
58 ixgbe_identify_phy_generic(hw
);
60 mac
->mcft_size
= IXGBE_X540_MC_TBL_SIZE
;
61 mac
->vft_size
= IXGBE_X540_VFT_TBL_SIZE
;
62 mac
->num_rar_entries
= IXGBE_X540_RAR_ENTRIES
;
63 mac
->max_rx_queues
= IXGBE_X540_MAX_RX_QUEUES
;
64 mac
->max_tx_queues
= IXGBE_X540_MAX_TX_QUEUES
;
65 mac
->max_msix_vectors
= ixgbe_get_pcie_msix_count_generic(hw
);
71 * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
72 * @hw: pointer to hardware structure
73 * @speed: new link speed
74 * @autoneg: true if autonegotiation enabled
75 * @autoneg_wait_to_complete: true when waiting for completion is needed
77 static s32
ixgbe_setup_mac_link_X540(struct ixgbe_hw
*hw
,
78 ixgbe_link_speed speed
, bool autoneg
,
79 bool autoneg_wait_to_complete
)
81 return hw
->phy
.ops
.setup_link_speed(hw
, speed
, autoneg
,
82 autoneg_wait_to_complete
);
86 * ixgbe_reset_hw_X540 - Perform hardware reset
87 * @hw: pointer to hardware structure
89 * Resets the hardware by resetting the transmit and receive units, masks
90 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
93 static s32
ixgbe_reset_hw_X540(struct ixgbe_hw
*hw
)
95 ixgbe_link_speed link_speed
;
103 bool link_up
= false;
105 /* Call adapter stop to disable tx/rx and clear interrupts */
106 hw
->mac
.ops
.stop_adapter(hw
);
109 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
110 * access and verify no pending requests before reset
112 ixgbe_disable_pcie_master(hw
);
116 * Issue global reset to the MAC. Needs to be SW reset if link is up.
117 * If link reset is used when link is up, it might reset the PHY when
118 * mng is using it. If link is down or the flag to force full link
119 * reset is set, then perform link reset.
121 if (hw
->force_full_reset
) {
122 reset_bit
= IXGBE_CTRL_LNK_RST
;
124 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
126 reset_bit
= IXGBE_CTRL_LNK_RST
;
128 reset_bit
= IXGBE_CTRL_RST
;
131 ctrl
= IXGBE_READ_REG(hw
, IXGBE_CTRL
);
132 IXGBE_WRITE_REG(hw
, IXGBE_CTRL
, (ctrl
| reset_bit
));
133 IXGBE_WRITE_FLUSH(hw
);
135 /* Poll for reset bit to self-clear indicating reset is complete */
136 for (i
= 0; i
< 10; i
++) {
138 ctrl
= IXGBE_READ_REG(hw
, IXGBE_CTRL
);
139 if (!(ctrl
& reset_bit
))
142 if (ctrl
& reset_bit
) {
143 status
= IXGBE_ERR_RESET_FAILED
;
144 hw_dbg(hw
, "Reset polling failed to complete.\n");
148 * Double resets are required for recovery from certain error
149 * conditions. Between resets, it is necessary to stall to allow time
150 * for any pending HW events to complete. We use 1usec since that is
151 * what is needed for ixgbe_disable_pcie_master(). The second reset
152 * then clears out any effects of those events.
154 if (hw
->mac
.flags
& IXGBE_FLAGS_DOUBLE_RESET_REQUIRED
) {
155 hw
->mac
.flags
&= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED
;
160 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
161 ctrl_ext
= IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
162 ctrl_ext
|= IXGBE_CTRL_EXT_PFRSTD
;
163 IXGBE_WRITE_REG(hw
, IXGBE_CTRL_EXT
, ctrl_ext
);
167 /* Set the Rx packet buffer size. */
168 IXGBE_WRITE_REG(hw
, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT
);
170 /* Store the permanent mac address */
171 hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.perm_addr
);
174 * Store the original AUTOC/AUTOC2 values if they have not been
175 * stored off yet. Otherwise restore the stored original
176 * values since the reset operation sets back to defaults.
178 autoc
= IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
179 autoc2
= IXGBE_READ_REG(hw
, IXGBE_AUTOC2
);
180 if (hw
->mac
.orig_link_settings_stored
== false) {
181 hw
->mac
.orig_autoc
= autoc
;
182 hw
->mac
.orig_autoc2
= autoc2
;
183 hw
->mac
.orig_link_settings_stored
= true;
185 if (autoc
!= hw
->mac
.orig_autoc
)
186 IXGBE_WRITE_REG(hw
, IXGBE_AUTOC
, (hw
->mac
.orig_autoc
|
187 IXGBE_AUTOC_AN_RESTART
));
189 if ((autoc2
& IXGBE_AUTOC2_UPPER_MASK
) !=
190 (hw
->mac
.orig_autoc2
& IXGBE_AUTOC2_UPPER_MASK
)) {
191 autoc2
&= ~IXGBE_AUTOC2_UPPER_MASK
;
192 autoc2
|= (hw
->mac
.orig_autoc2
&
193 IXGBE_AUTOC2_UPPER_MASK
);
194 IXGBE_WRITE_REG(hw
, IXGBE_AUTOC2
, autoc2
);
199 * Store MAC address from RAR0, clear receive address registers, and
200 * clear the multicast table. Also reset num_rar_entries to 128,
201 * since we modify this value when programming the SAN MAC address.
203 hw
->mac
.num_rar_entries
= IXGBE_X540_MAX_TX_QUEUES
;
204 hw
->mac
.ops
.init_rx_addrs(hw
);
206 /* Store the permanent mac address */
207 hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.perm_addr
);
209 /* Store the permanent SAN mac address */
210 hw
->mac
.ops
.get_san_mac_addr(hw
, hw
->mac
.san_addr
);
212 /* Add the SAN MAC address to the RAR only if it's a valid address */
213 if (ixgbe_validate_mac_addr(hw
->mac
.san_addr
) == 0) {
214 hw
->mac
.ops
.set_rar(hw
, hw
->mac
.num_rar_entries
- 1,
215 hw
->mac
.san_addr
, 0, IXGBE_RAH_AV
);
217 /* Reserve the last RAR for the SAN MAC address */
218 hw
->mac
.num_rar_entries
--;
221 /* Store the alternative WWNN/WWPN prefix */
222 hw
->mac
.ops
.get_wwn_prefix(hw
, &hw
->mac
.wwnn_prefix
,
223 &hw
->mac
.wwpn_prefix
);
229 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
230 * @hw: pointer to hardware structure
232 * Determines physical layer capabilities of the current configuration.
234 static u32
ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw
*hw
)
236 u32 physical_layer
= IXGBE_PHYSICAL_LAYER_UNKNOWN
;
239 hw
->phy
.ops
.identify(hw
);
241 hw
->phy
.ops
.read_reg(hw
, MDIO_PMA_EXTABLE
, MDIO_MMD_PMAPMD
,
243 if (ext_ability
& MDIO_PMA_EXTABLE_10GBT
)
244 physical_layer
|= IXGBE_PHYSICAL_LAYER_10GBASE_T
;
245 if (ext_ability
& MDIO_PMA_EXTABLE_1000BT
)
246 physical_layer
|= IXGBE_PHYSICAL_LAYER_1000BASE_T
;
247 if (ext_ability
& MDIO_PMA_EXTABLE_100BTX
)
248 physical_layer
|= IXGBE_PHYSICAL_LAYER_100BASE_TX
;
250 return physical_layer
;
254 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
255 * @hw: pointer to hardware structure
257 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
258 * ixgbe_hw struct in order to set up EEPROM access.
260 static s32
ixgbe_init_eeprom_params_X540(struct ixgbe_hw
*hw
)
262 struct ixgbe_eeprom_info
*eeprom
= &hw
->eeprom
;
266 if (eeprom
->type
== ixgbe_eeprom_uninitialized
) {
267 eeprom
->semaphore_delay
= 10;
268 eeprom
->type
= ixgbe_flash
;
270 eec
= IXGBE_READ_REG(hw
, IXGBE_EEC
);
271 eeprom_size
= (u16
)((eec
& IXGBE_EEC_SIZE
) >>
272 IXGBE_EEC_SIZE_SHIFT
);
273 eeprom
->word_size
= 1 << (eeprom_size
+
274 IXGBE_EEPROM_WORD_SIZE_SHIFT
);
276 hw_dbg(hw
, "Eeprom params: type = %d, size = %d\n",
277 eeprom
->type
, eeprom
->word_size
);
284 * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
285 * @hw: pointer to hardware structure
286 * @offset: offset of word in the EEPROM to read
287 * @data: word read from the EERPOM
289 static s32
ixgbe_read_eerd_X540(struct ixgbe_hw
*hw
, u16 offset
, u16
*data
)
293 if (hw
->mac
.ops
.acquire_swfw_sync(hw
, IXGBE_GSSR_EEP_SM
) == 0)
294 status
= ixgbe_read_eerd_generic(hw
, offset
, data
);
296 status
= IXGBE_ERR_SWFW_SYNC
;
298 ixgbe_release_swfw_sync_X540(hw
, IXGBE_GSSR_EEP_SM
);
303 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
304 * @hw: pointer to hardware structure
305 * @offset: offset of word in the EEPROM to write
306 * @data: word write to the EEPROM
308 * Write a 16 bit word to the EEPROM using the EEWR register.
310 static s32
ixgbe_write_eewr_X540(struct ixgbe_hw
*hw
, u16 offset
, u16 data
)
315 hw
->eeprom
.ops
.init_params(hw
);
317 if (offset
>= hw
->eeprom
.word_size
) {
318 status
= IXGBE_ERR_EEPROM
;
322 eewr
= (offset
<< IXGBE_EEPROM_RW_ADDR_SHIFT
) |
323 (data
<< IXGBE_EEPROM_RW_REG_DATA
) |
324 IXGBE_EEPROM_RW_REG_START
;
326 if (hw
->mac
.ops
.acquire_swfw_sync(hw
, IXGBE_GSSR_EEP_SM
) == 0) {
327 status
= ixgbe_poll_eerd_eewr_done(hw
, IXGBE_NVM_POLL_WRITE
);
329 hw_dbg(hw
, "Eeprom write EEWR timed out\n");
333 IXGBE_WRITE_REG(hw
, IXGBE_EEWR
, eewr
);
335 status
= ixgbe_poll_eerd_eewr_done(hw
, IXGBE_NVM_POLL_WRITE
);
337 hw_dbg(hw
, "Eeprom write EEWR timed out\n");
341 status
= IXGBE_ERR_SWFW_SYNC
;
345 ixgbe_release_swfw_sync_X540(hw
, IXGBE_GSSR_EEP_SM
);
350 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
351 * @hw: pointer to hardware structure
353 static u16
ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw
*hw
)
362 /* Include 0x0-0x3F in the checksum */
363 for (i
= 0; i
< IXGBE_EEPROM_CHECKSUM
; i
++) {
364 if (hw
->eeprom
.ops
.read(hw
, i
, &word
) != 0) {
365 hw_dbg(hw
, "EEPROM read failed\n");
372 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
373 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
375 for (i
= IXGBE_PCIE_ANALOG_PTR
; i
< IXGBE_FW_PTR
; i
++) {
376 if (i
== IXGBE_PHY_PTR
|| i
== IXGBE_OPTION_ROM_PTR
)
379 if (hw
->eeprom
.ops
.read(hw
, i
, &pointer
) != 0) {
380 hw_dbg(hw
, "EEPROM read failed\n");
384 /* Skip pointer section if the pointer is invalid. */
385 if (pointer
== 0xFFFF || pointer
== 0 ||
386 pointer
>= hw
->eeprom
.word_size
)
389 if (hw
->eeprom
.ops
.read(hw
, pointer
, &length
) != 0) {
390 hw_dbg(hw
, "EEPROM read failed\n");
394 /* Skip pointer section if length is invalid. */
395 if (length
== 0xFFFF || length
== 0 ||
396 (pointer
+ length
) >= hw
->eeprom
.word_size
)
399 for (j
= pointer
+1; j
<= pointer
+length
; j
++) {
400 if (hw
->eeprom
.ops
.read(hw
, j
, &word
) != 0) {
401 hw_dbg(hw
, "EEPROM read failed\n");
408 checksum
= (u16
)IXGBE_EEPROM_SUM
- checksum
;
414 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
415 * @hw: pointer to hardware structure
417 * After writing EEPROM to shadow RAM using EEWR register, software calculates
418 * checksum and updates the EEPROM and instructs the hardware to update
421 static s32
ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw
*hw
)
425 status
= ixgbe_update_eeprom_checksum_generic(hw
);
428 status
= ixgbe_update_flash_X540(hw
);
434 * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
435 * @hw: pointer to hardware structure
437 * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
438 * EEPROM from shadow RAM to the flash device.
440 static s32
ixgbe_update_flash_X540(struct ixgbe_hw
*hw
)
443 s32 status
= IXGBE_ERR_EEPROM
;
445 status
= ixgbe_poll_flash_update_done_X540(hw
);
446 if (status
== IXGBE_ERR_EEPROM
) {
447 hw_dbg(hw
, "Flash update time out\n");
451 flup
= IXGBE_READ_REG(hw
, IXGBE_EEC
) | IXGBE_EEC_FLUP
;
452 IXGBE_WRITE_REG(hw
, IXGBE_EEC
, flup
);
454 status
= ixgbe_poll_flash_update_done_X540(hw
);
456 hw_dbg(hw
, "Flash update complete\n");
458 hw_dbg(hw
, "Flash update time out\n");
460 if (hw
->revision_id
== 0) {
461 flup
= IXGBE_READ_REG(hw
, IXGBE_EEC
);
463 if (flup
& IXGBE_EEC_SEC1VAL
) {
464 flup
|= IXGBE_EEC_FLUP
;
465 IXGBE_WRITE_REG(hw
, IXGBE_EEC
, flup
);
468 status
= ixgbe_poll_flash_update_done_X540(hw
);
470 hw_dbg(hw
, "Flash update complete\n");
472 hw_dbg(hw
, "Flash update time out\n");
480 * ixgbe_poll_flash_update_done_X540 - Poll flash update status
481 * @hw: pointer to hardware structure
483 * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
484 * flash update is done.
486 static s32
ixgbe_poll_flash_update_done_X540(struct ixgbe_hw
*hw
)
490 s32 status
= IXGBE_ERR_EEPROM
;
492 for (i
= 0; i
< IXGBE_FLUDONE_ATTEMPTS
; i
++) {
493 reg
= IXGBE_READ_REG(hw
, IXGBE_EEC
);
494 if (reg
& IXGBE_EEC_FLUDONE
) {
504 * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
505 * @hw: pointer to hardware structure
506 * @mask: Mask to specify which semaphore to acquire
508 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
509 * the specified function (CSR, PHY0, PHY1, NVM, Flash)
511 static s32
ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw
*hw
, u16 mask
)
515 u32 fwmask
= mask
<< 5;
520 if (swmask
== IXGBE_GSSR_EEP_SM
)
521 hwmask
= IXGBE_GSSR_FLASH_SM
;
523 for (i
= 0; i
< timeout
; i
++) {
525 * SW NVM semaphore bit is used for access to all
526 * SW_FW_SYNC bits (not just NVM)
528 if (ixgbe_get_swfw_sync_semaphore(hw
))
529 return IXGBE_ERR_SWFW_SYNC
;
531 swfw_sync
= IXGBE_READ_REG(hw
, IXGBE_SWFW_SYNC
);
532 if (!(swfw_sync
& (fwmask
| swmask
| hwmask
))) {
534 IXGBE_WRITE_REG(hw
, IXGBE_SWFW_SYNC
, swfw_sync
);
535 ixgbe_release_swfw_sync_semaphore(hw
);
539 * Firmware currently using resource (fwmask),
540 * hardware currently using resource (hwmask),
541 * or other software thread currently using
544 ixgbe_release_swfw_sync_semaphore(hw
);
550 * If the resource is not released by the FW/HW the SW can assume that
551 * the FW/HW malfunctions. In that case the SW should sets the
552 * SW bit(s) of the requested resource(s) while ignoring the
553 * corresponding FW/HW bits in the SW_FW_SYNC register.
556 swfw_sync
= IXGBE_READ_REG(hw
, IXGBE_SWFW_SYNC
);
557 if (swfw_sync
& (fwmask
| hwmask
)) {
558 if (ixgbe_get_swfw_sync_semaphore(hw
))
559 return IXGBE_ERR_SWFW_SYNC
;
562 IXGBE_WRITE_REG(hw
, IXGBE_SWFW_SYNC
, swfw_sync
);
563 ixgbe_release_swfw_sync_semaphore(hw
);
572 * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
573 * @hw: pointer to hardware structure
574 * @mask: Mask to specify which semaphore to release
576 * Releases the SWFW semaphore throught the SW_FW_SYNC register
577 * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
579 static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw
*hw
, u16 mask
)
584 ixgbe_get_swfw_sync_semaphore(hw
);
586 swfw_sync
= IXGBE_READ_REG(hw
, IXGBE_SWFW_SYNC
);
587 swfw_sync
&= ~swmask
;
588 IXGBE_WRITE_REG(hw
, IXGBE_SWFW_SYNC
, swfw_sync
);
590 ixgbe_release_swfw_sync_semaphore(hw
);
595 * ixgbe_get_nvm_semaphore - Get hardware semaphore
596 * @hw: pointer to hardware structure
598 * Sets the hardware semaphores so SW/FW can gain control of shared resources
600 static s32
ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw
*hw
)
602 s32 status
= IXGBE_ERR_EEPROM
;
607 /* Get SMBI software semaphore between device drivers first */
608 for (i
= 0; i
< timeout
; i
++) {
610 * If the SMBI bit is 0 when we read it, then the bit will be
611 * set and we have the semaphore
613 swsm
= IXGBE_READ_REG(hw
, IXGBE_SWSM
);
614 if (!(swsm
& IXGBE_SWSM_SMBI
)) {
621 /* Now get the semaphore between SW/FW through the REGSMP bit */
623 for (i
= 0; i
< timeout
; i
++) {
624 swsm
= IXGBE_READ_REG(hw
, IXGBE_SWFW_SYNC
);
625 if (!(swsm
& IXGBE_SWFW_REGSMP
))
631 hw_dbg(hw
, "Software semaphore SMBI between device drivers "
639 * ixgbe_release_nvm_semaphore - Release hardware semaphore
640 * @hw: pointer to hardware structure
642 * This function clears hardware semaphore bits.
644 static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw
*hw
)
648 /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
650 swsm
= IXGBE_READ_REG(hw
, IXGBE_SWSM
);
651 swsm
&= ~IXGBE_SWSM_SMBI
;
652 IXGBE_WRITE_REG(hw
, IXGBE_SWSM
, swsm
);
654 swsm
= IXGBE_READ_REG(hw
, IXGBE_SWFW_SYNC
);
655 swsm
&= ~IXGBE_SWFW_REGSMP
;
656 IXGBE_WRITE_REG(hw
, IXGBE_SWFW_SYNC
, swsm
);
658 IXGBE_WRITE_FLUSH(hw
);
661 static struct ixgbe_mac_operations mac_ops_X540
= {
662 .init_hw
= &ixgbe_init_hw_generic
,
663 .reset_hw
= &ixgbe_reset_hw_X540
,
664 .start_hw
= &ixgbe_start_hw_generic
,
665 .clear_hw_cntrs
= &ixgbe_clear_hw_cntrs_generic
,
666 .get_media_type
= &ixgbe_get_media_type_X540
,
667 .get_supported_physical_layer
=
668 &ixgbe_get_supported_physical_layer_X540
,
669 .enable_rx_dma
= &ixgbe_enable_rx_dma_generic
,
670 .get_mac_addr
= &ixgbe_get_mac_addr_generic
,
671 .get_san_mac_addr
= &ixgbe_get_san_mac_addr_generic
,
672 .get_device_caps
= NULL
,
673 .get_wwn_prefix
= &ixgbe_get_wwn_prefix_generic
,
674 .stop_adapter
= &ixgbe_stop_adapter_generic
,
675 .get_bus_info
= &ixgbe_get_bus_info_generic
,
676 .set_lan_id
= &ixgbe_set_lan_id_multi_port_pcie
,
677 .read_analog_reg8
= NULL
,
678 .write_analog_reg8
= NULL
,
679 .setup_link
= &ixgbe_setup_mac_link_X540
,
680 .check_link
= &ixgbe_check_mac_link_generic
,
681 .get_link_capabilities
= &ixgbe_get_copper_link_capabilities_generic
,
682 .led_on
= &ixgbe_led_on_generic
,
683 .led_off
= &ixgbe_led_off_generic
,
684 .blink_led_start
= &ixgbe_blink_led_start_generic
,
685 .blink_led_stop
= &ixgbe_blink_led_stop_generic
,
686 .set_rar
= &ixgbe_set_rar_generic
,
687 .clear_rar
= &ixgbe_clear_rar_generic
,
688 .set_vmdq
= &ixgbe_set_vmdq_generic
,
689 .clear_vmdq
= &ixgbe_clear_vmdq_generic
,
690 .init_rx_addrs
= &ixgbe_init_rx_addrs_generic
,
691 .update_mc_addr_list
= &ixgbe_update_mc_addr_list_generic
,
692 .enable_mc
= &ixgbe_enable_mc_generic
,
693 .disable_mc
= &ixgbe_disable_mc_generic
,
694 .clear_vfta
= &ixgbe_clear_vfta_generic
,
695 .set_vfta
= &ixgbe_set_vfta_generic
,
696 .fc_enable
= &ixgbe_fc_enable_generic
,
697 .init_uta_tables
= &ixgbe_init_uta_tables_generic
,
699 .set_mac_anti_spoofing
= &ixgbe_set_mac_anti_spoofing
,
700 .set_vlan_anti_spoofing
= &ixgbe_set_vlan_anti_spoofing
,
701 .acquire_swfw_sync
= &ixgbe_acquire_swfw_sync_X540
,
702 .release_swfw_sync
= &ixgbe_release_swfw_sync_X540
,
705 static struct ixgbe_eeprom_operations eeprom_ops_X540
= {
706 .init_params
= &ixgbe_init_eeprom_params_X540
,
707 .read
= &ixgbe_read_eerd_X540
,
708 .write
= &ixgbe_write_eewr_X540
,
709 .calc_checksum
= &ixgbe_calc_eeprom_checksum_X540
,
710 .validate_checksum
= &ixgbe_validate_eeprom_checksum_generic
,
711 .update_checksum
= &ixgbe_update_eeprom_checksum_X540
,
714 static struct ixgbe_phy_operations phy_ops_X540
= {
715 .identify
= &ixgbe_identify_phy_generic
,
716 .identify_sfp
= &ixgbe_identify_sfp_module_generic
,
719 .read_reg
= &ixgbe_read_phy_reg_generic
,
720 .write_reg
= &ixgbe_write_phy_reg_generic
,
721 .setup_link
= &ixgbe_setup_phy_link_generic
,
722 .setup_link_speed
= &ixgbe_setup_phy_link_speed_generic
,
723 .read_i2c_byte
= &ixgbe_read_i2c_byte_generic
,
724 .write_i2c_byte
= &ixgbe_write_i2c_byte_generic
,
725 .read_i2c_eeprom
= &ixgbe_read_i2c_eeprom_generic
,
726 .write_i2c_eeprom
= &ixgbe_write_i2c_eeprom_generic
,
727 .check_overtemp
= &ixgbe_tn_check_overtemp
,
730 struct ixgbe_info ixgbe_X540_info
= {
731 .mac
= ixgbe_mac_X540
,
732 .get_invariants
= &ixgbe_get_invariants_X540
,
733 .mac_ops
= &mac_ops_X540
,
734 .eeprom_ops
= &eeprom_ops_X540
,
735 .phy_ops
= &phy_ops_X540
,
736 .mbx_ops
= &mbx_ops_generic
,