1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 ******************************************************************************/
32 #include <linux/types.h>
33 #include <linux/if_ether.h>
36 #include "e1000_i210.h"
39 * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
40 * @hw: pointer to the HW structure
42 * Acquire the HW semaphore to access the PHY or NVM
44 static s32
igb_get_hw_semaphore_i210(struct e1000_hw
*hw
)
47 s32 timeout
= hw
->nvm
.word_size
+ 1;
50 /* Get the SW semaphore */
52 swsm
= rd32(E1000_SWSM
);
53 if (!(swsm
& E1000_SWSM_SMBI
))
61 /* In rare circumstances, the SW semaphore may already be held
62 * unintentionally. Clear the semaphore once before giving up.
64 if (hw
->dev_spec
._82575
.clear_semaphore_once
) {
65 hw
->dev_spec
._82575
.clear_semaphore_once
= false;
66 igb_put_hw_semaphore(hw
);
67 for (i
= 0; i
< timeout
; i
++) {
68 swsm
= rd32(E1000_SWSM
);
69 if (!(swsm
& E1000_SWSM_SMBI
))
76 /* If we do not have the semaphore here, we have to give up. */
78 hw_dbg("Driver can't access device - SMBI bit is set.\n");
79 return -E1000_ERR_NVM
;
83 /* Get the FW semaphore. */
84 for (i
= 0; i
< timeout
; i
++) {
85 swsm
= rd32(E1000_SWSM
);
86 wr32(E1000_SWSM
, swsm
| E1000_SWSM_SWESMBI
);
88 /* Semaphore acquired if bit latched */
89 if (rd32(E1000_SWSM
) & E1000_SWSM_SWESMBI
)
96 /* Release semaphores */
97 igb_put_hw_semaphore(hw
);
98 hw_dbg("Driver can't access the NVM\n");
99 return -E1000_ERR_NVM
;
102 return E1000_SUCCESS
;
106 * igb_acquire_nvm_i210 - Request for access to EEPROM
107 * @hw: pointer to the HW structure
109 * Acquire the necessary semaphores for exclusive access to the EEPROM.
110 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
111 * Return successful if access grant bit set, else clear the request for
112 * EEPROM access and return -E1000_ERR_NVM (-1).
114 s32
igb_acquire_nvm_i210(struct e1000_hw
*hw
)
116 return igb_acquire_swfw_sync_i210(hw
, E1000_SWFW_EEP_SM
);
120 * igb_release_nvm_i210 - Release exclusive access to EEPROM
121 * @hw: pointer to the HW structure
123 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
124 * then release the semaphores acquired.
126 void igb_release_nvm_i210(struct e1000_hw
*hw
)
128 igb_release_swfw_sync_i210(hw
, E1000_SWFW_EEP_SM
);
132 * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
133 * @hw: pointer to the HW structure
134 * @mask: specifies which semaphore to acquire
136 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
137 * will also specify which port we're acquiring the lock for.
139 s32
igb_acquire_swfw_sync_i210(struct e1000_hw
*hw
, u16 mask
)
143 u32 fwmask
= mask
<< 16;
144 s32 ret_val
= E1000_SUCCESS
;
145 s32 i
= 0, timeout
= 200; /* FIXME: find real value to use here */
147 while (i
< timeout
) {
148 if (igb_get_hw_semaphore_i210(hw
)) {
149 ret_val
= -E1000_ERR_SWFW_SYNC
;
153 swfw_sync
= rd32(E1000_SW_FW_SYNC
);
154 if (!(swfw_sync
& (fwmask
| swmask
)))
157 /* Firmware currently using resource (fwmask) */
158 igb_put_hw_semaphore(hw
);
164 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
165 ret_val
= -E1000_ERR_SWFW_SYNC
;
170 wr32(E1000_SW_FW_SYNC
, swfw_sync
);
172 igb_put_hw_semaphore(hw
);
178 * igb_release_swfw_sync_i210 - Release SW/FW semaphore
179 * @hw: pointer to the HW structure
180 * @mask: specifies which semaphore to acquire
182 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
183 * will also specify which port we're releasing the lock for.
185 void igb_release_swfw_sync_i210(struct e1000_hw
*hw
, u16 mask
)
189 while (igb_get_hw_semaphore_i210(hw
) != E1000_SUCCESS
)
192 swfw_sync
= rd32(E1000_SW_FW_SYNC
);
194 wr32(E1000_SW_FW_SYNC
, swfw_sync
);
196 igb_put_hw_semaphore(hw
);
200 * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
201 * @hw: pointer to the HW structure
202 * @offset: offset of word in the Shadow Ram to read
203 * @words: number of words to read
204 * @data: word read from the Shadow Ram
206 * Reads a 16 bit word from the Shadow Ram using the EERD register.
207 * Uses necessary synchronization semaphores.
209 s32
igb_read_nvm_srrd_i210(struct e1000_hw
*hw
, u16 offset
, u16 words
,
212 s32 status
= E1000_SUCCESS
;
215 /* We cannot hold synchronization semaphores for too long,
216 * because of forceful takeover procedure. However it is more efficient
217 * to read in bursts than synchronizing access for each word.
219 for (i
= 0; i
< words
; i
+= E1000_EERD_EEWR_MAX_COUNT
) {
220 count
= (words
- i
) / E1000_EERD_EEWR_MAX_COUNT
> 0 ?
221 E1000_EERD_EEWR_MAX_COUNT
: (words
- i
);
222 if (hw
->nvm
.ops
.acquire(hw
) == E1000_SUCCESS
) {
223 status
= igb_read_nvm_eerd(hw
, offset
, count
,
225 hw
->nvm
.ops
.release(hw
);
227 status
= E1000_ERR_SWFW_SYNC
;
230 if (status
!= E1000_SUCCESS
)
238 * igb_write_nvm_srwr - Write to Shadow Ram using EEWR
239 * @hw: pointer to the HW structure
240 * @offset: offset within the Shadow Ram to be written to
241 * @words: number of words to write
242 * @data: 16 bit word(s) to be written to the Shadow Ram
244 * Writes data to Shadow Ram at offset using EEWR register.
246 * If igb_update_nvm_checksum is not called after this function , the
247 * Shadow Ram will most likely contain an invalid checksum.
249 static s32
igb_write_nvm_srwr(struct e1000_hw
*hw
, u16 offset
, u16 words
,
252 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
254 u32 attempts
= 100000;
255 s32 ret_val
= E1000_SUCCESS
;
257 /* A check for invalid values: offset too large, too many words,
258 * too many words for the offset, and not enough words.
260 if ((offset
>= nvm
->word_size
) || (words
> (nvm
->word_size
- offset
)) ||
262 hw_dbg("nvm parameter(s) out of bounds\n");
263 ret_val
= -E1000_ERR_NVM
;
267 for (i
= 0; i
< words
; i
++) {
268 eewr
= ((offset
+i
) << E1000_NVM_RW_ADDR_SHIFT
) |
269 (data
[i
] << E1000_NVM_RW_REG_DATA
) |
270 E1000_NVM_RW_REG_START
;
272 wr32(E1000_SRWR
, eewr
);
274 for (k
= 0; k
< attempts
; k
++) {
275 if (E1000_NVM_RW_REG_DONE
&
277 ret_val
= E1000_SUCCESS
;
283 if (ret_val
!= E1000_SUCCESS
) {
284 hw_dbg("Shadow RAM write EEWR timed out\n");
294 * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
295 * @hw: pointer to the HW structure
296 * @offset: offset within the Shadow RAM to be written to
297 * @words: number of words to write
298 * @data: 16 bit word(s) to be written to the Shadow RAM
300 * Writes data to Shadow RAM at offset using EEWR register.
302 * If e1000_update_nvm_checksum is not called after this function , the
303 * data will not be committed to FLASH and also Shadow RAM will most likely
304 * contain an invalid checksum.
306 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
309 s32
igb_write_nvm_srwr_i210(struct e1000_hw
*hw
, u16 offset
, u16 words
,
312 s32 status
= E1000_SUCCESS
;
315 /* We cannot hold synchronization semaphores for too long,
316 * because of forceful takeover procedure. However it is more efficient
317 * to write in bursts than synchronizing access for each word.
319 for (i
= 0; i
< words
; i
+= E1000_EERD_EEWR_MAX_COUNT
) {
320 count
= (words
- i
) / E1000_EERD_EEWR_MAX_COUNT
> 0 ?
321 E1000_EERD_EEWR_MAX_COUNT
: (words
- i
);
322 if (hw
->nvm
.ops
.acquire(hw
) == E1000_SUCCESS
) {
323 status
= igb_write_nvm_srwr(hw
, offset
, count
,
325 hw
->nvm
.ops
.release(hw
);
327 status
= E1000_ERR_SWFW_SYNC
;
330 if (status
!= E1000_SUCCESS
)
338 * igb_read_invm_word_i210 - Reads OTP
339 * @hw: pointer to the HW structure
340 * @address: the word address (aka eeprom offset) to read
341 * @data: pointer to the data read
343 * Reads 16-bit words from the OTP. Return error when the word is not
346 static s32
igb_read_invm_word_i210(struct e1000_hw
*hw
, u8 address
, u16
*data
)
348 s32 status
= -E1000_ERR_INVM_VALUE_NOT_FOUND
;
351 u8 record_type
, word_address
;
353 for (i
= 0; i
< E1000_INVM_SIZE
; i
++) {
354 invm_dword
= rd32(E1000_INVM_DATA_REG(i
));
355 /* Get record type */
356 record_type
= INVM_DWORD_TO_RECORD_TYPE(invm_dword
);
357 if (record_type
== E1000_INVM_UNINITIALIZED_STRUCTURE
)
359 if (record_type
== E1000_INVM_CSR_AUTOLOAD_STRUCTURE
)
360 i
+= E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS
;
361 if (record_type
== E1000_INVM_RSA_KEY_SHA256_STRUCTURE
)
362 i
+= E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS
;
363 if (record_type
== E1000_INVM_WORD_AUTOLOAD_STRUCTURE
) {
364 word_address
= INVM_DWORD_TO_WORD_ADDRESS(invm_dword
);
365 if (word_address
== address
) {
366 *data
= INVM_DWORD_TO_WORD_DATA(invm_dword
);
367 hw_dbg("Read INVM Word 0x%02x = %x",
369 status
= E1000_SUCCESS
;
374 if (status
!= E1000_SUCCESS
)
375 hw_dbg("Requested word 0x%02x not found in OTP\n", address
);
380 * igb_read_invm_i210 - Read invm wrapper function for I210/I211
381 * @hw: pointer to the HW structure
382 * @words: number of words to read
383 * @data: pointer to the data read
385 * Wrapper function to return data formerly found in the NVM.
387 static s32
igb_read_invm_i210(struct e1000_hw
*hw
, u16 offset
,
388 u16 words __always_unused
, u16
*data
)
390 s32 ret_val
= E1000_SUCCESS
;
392 /* Only the MAC addr is required to be present in the iNVM */
395 ret_val
= igb_read_invm_word_i210(hw
, (u8
)offset
, &data
[0]);
396 ret_val
|= igb_read_invm_word_i210(hw
, (u8
)offset
+1,
398 ret_val
|= igb_read_invm_word_i210(hw
, (u8
)offset
+2,
400 if (ret_val
!= E1000_SUCCESS
)
401 hw_dbg("MAC Addr not found in iNVM\n");
403 case NVM_INIT_CTRL_2
:
404 ret_val
= igb_read_invm_word_i210(hw
, (u8
)offset
, data
);
405 if (ret_val
!= E1000_SUCCESS
) {
406 *data
= NVM_INIT_CTRL_2_DEFAULT_I211
;
407 ret_val
= E1000_SUCCESS
;
410 case NVM_INIT_CTRL_4
:
411 ret_val
= igb_read_invm_word_i210(hw
, (u8
)offset
, data
);
412 if (ret_val
!= E1000_SUCCESS
) {
413 *data
= NVM_INIT_CTRL_4_DEFAULT_I211
;
414 ret_val
= E1000_SUCCESS
;
418 ret_val
= igb_read_invm_word_i210(hw
, (u8
)offset
, data
);
419 if (ret_val
!= E1000_SUCCESS
) {
420 *data
= NVM_LED_1_CFG_DEFAULT_I211
;
421 ret_val
= E1000_SUCCESS
;
424 case NVM_LED_0_2_CFG
:
425 ret_val
= igb_read_invm_word_i210(hw
, (u8
)offset
, data
);
426 if (ret_val
!= E1000_SUCCESS
) {
427 *data
= NVM_LED_0_2_CFG_DEFAULT_I211
;
428 ret_val
= E1000_SUCCESS
;
431 case NVM_ID_LED_SETTINGS
:
432 ret_val
= igb_read_invm_word_i210(hw
, (u8
)offset
, data
);
433 if (ret_val
!= E1000_SUCCESS
) {
434 *data
= ID_LED_RESERVED_FFFF
;
435 ret_val
= E1000_SUCCESS
;
438 *data
= hw
->subsystem_device_id
;
441 *data
= hw
->subsystem_vendor_id
;
444 *data
= hw
->device_id
;
447 *data
= hw
->vendor_id
;
450 hw_dbg("NVM word 0x%02x is not mapped.\n", offset
);
451 *data
= NVM_RESERVED_WORD
;
458 * igb_read_invm_version - Reads iNVM version and image type
459 * @hw: pointer to the HW structure
460 * @invm_ver: version structure for the version read
462 * Reads iNVM version and image type.
464 s32
igb_read_invm_version(struct e1000_hw
*hw
,
465 struct e1000_fw_version
*invm_ver
) {
467 u32
*next_record
= NULL
;
470 u32 invm_blocks
= E1000_INVM_SIZE
- (E1000_INVM_ULT_BYTES_SIZE
/
471 E1000_INVM_RECORD_SIZE_IN_BYTES
);
472 u32 buffer
[E1000_INVM_SIZE
];
473 s32 status
= -E1000_ERR_INVM_VALUE_NOT_FOUND
;
476 /* Read iNVM memory */
477 for (i
= 0; i
< E1000_INVM_SIZE
; i
++) {
478 invm_dword
= rd32(E1000_INVM_DATA_REG(i
));
479 buffer
[i
] = invm_dword
;
482 /* Read version number */
483 for (i
= 1; i
< invm_blocks
; i
++) {
484 record
= &buffer
[invm_blocks
- i
];
485 next_record
= &buffer
[invm_blocks
- i
+ 1];
487 /* Check if we have first version location used */
488 if ((i
== 1) && ((*record
& E1000_INVM_VER_FIELD_ONE
) == 0)) {
490 status
= E1000_SUCCESS
;
493 /* Check if we have second version location used */
495 ((*record
& E1000_INVM_VER_FIELD_TWO
) == 0)) {
496 version
= (*record
& E1000_INVM_VER_FIELD_ONE
) >> 3;
497 status
= E1000_SUCCESS
;
500 /* Check if we have odd version location
501 * used and it is the last one used
503 else if ((((*record
& E1000_INVM_VER_FIELD_ONE
) == 0) &&
504 ((*record
& 0x3) == 0)) || (((*record
& 0x3) != 0) &&
506 version
= (*next_record
& E1000_INVM_VER_FIELD_TWO
)
508 status
= E1000_SUCCESS
;
511 /* Check if we have even version location
512 * used and it is the last one used
514 else if (((*record
& E1000_INVM_VER_FIELD_TWO
) == 0) &&
515 ((*record
& 0x3) == 0)) {
516 version
= (*record
& E1000_INVM_VER_FIELD_ONE
) >> 3;
517 status
= E1000_SUCCESS
;
522 if (status
== E1000_SUCCESS
) {
523 invm_ver
->invm_major
= (version
& E1000_INVM_MAJOR_MASK
)
524 >> E1000_INVM_MAJOR_SHIFT
;
525 invm_ver
->invm_minor
= version
& E1000_INVM_MINOR_MASK
;
527 /* Read Image Type */
528 for (i
= 1; i
< invm_blocks
; i
++) {
529 record
= &buffer
[invm_blocks
- i
];
530 next_record
= &buffer
[invm_blocks
- i
+ 1];
532 /* Check if we have image type in first location used */
533 if ((i
== 1) && ((*record
& E1000_INVM_IMGTYPE_FIELD
) == 0)) {
534 invm_ver
->invm_img_type
= 0;
535 status
= E1000_SUCCESS
;
538 /* Check if we have image type in first location used */
539 else if ((((*record
& 0x3) == 0) &&
540 ((*record
& E1000_INVM_IMGTYPE_FIELD
) == 0)) ||
541 ((((*record
& 0x3) != 0) && (i
!= 1)))) {
542 invm_ver
->invm_img_type
=
543 (*next_record
& E1000_INVM_IMGTYPE_FIELD
) >> 23;
544 status
= E1000_SUCCESS
;
552 * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
553 * @hw: pointer to the HW structure
555 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
556 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
558 s32
igb_validate_nvm_checksum_i210(struct e1000_hw
*hw
)
560 s32 status
= E1000_SUCCESS
;
561 s32 (*read_op_ptr
)(struct e1000_hw
*, u16
, u16
, u16
*);
563 if (hw
->nvm
.ops
.acquire(hw
) == E1000_SUCCESS
) {
565 /* Replace the read function with semaphore grabbing with
566 * the one that skips this for a while.
567 * We have semaphore taken already here.
569 read_op_ptr
= hw
->nvm
.ops
.read
;
570 hw
->nvm
.ops
.read
= igb_read_nvm_eerd
;
572 status
= igb_validate_nvm_checksum(hw
);
574 /* Revert original read operation. */
575 hw
->nvm
.ops
.read
= read_op_ptr
;
577 hw
->nvm
.ops
.release(hw
);
579 status
= E1000_ERR_SWFW_SYNC
;
586 * igb_update_nvm_checksum_i210 - Update EEPROM checksum
587 * @hw: pointer to the HW structure
589 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
590 * up to the checksum. Then calculates the EEPROM checksum and writes the
591 * value to the EEPROM. Next commit EEPROM data onto the Flash.
593 s32
igb_update_nvm_checksum_i210(struct e1000_hw
*hw
)
595 s32 ret_val
= E1000_SUCCESS
;
599 /* Read the first word from the EEPROM. If this times out or fails, do
600 * not continue or we could be in for a very long wait while every
603 ret_val
= igb_read_nvm_eerd(hw
, 0, 1, &nvm_data
);
604 if (ret_val
!= E1000_SUCCESS
) {
605 hw_dbg("EEPROM read failed\n");
609 if (hw
->nvm
.ops
.acquire(hw
) == E1000_SUCCESS
) {
610 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
611 * because we do not want to take the synchronization
612 * semaphores twice here.
615 for (i
= 0; i
< NVM_CHECKSUM_REG
; i
++) {
616 ret_val
= igb_read_nvm_eerd(hw
, i
, 1, &nvm_data
);
618 hw
->nvm
.ops
.release(hw
);
619 hw_dbg("NVM Read Error while updating checksum.\n");
622 checksum
+= nvm_data
;
624 checksum
= (u16
) NVM_SUM
- checksum
;
625 ret_val
= igb_write_nvm_srwr(hw
, NVM_CHECKSUM_REG
, 1,
627 if (ret_val
!= E1000_SUCCESS
) {
628 hw
->nvm
.ops
.release(hw
);
629 hw_dbg("NVM Write Error while updating checksum.\n");
633 hw
->nvm
.ops
.release(hw
);
635 ret_val
= igb_update_flash_i210(hw
);
637 ret_val
= -E1000_ERR_SWFW_SYNC
;
644 * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
645 * @hw: pointer to the HW structure
648 static s32
igb_pool_flash_update_done_i210(struct e1000_hw
*hw
)
650 s32 ret_val
= -E1000_ERR_NVM
;
653 for (i
= 0; i
< E1000_FLUDONE_ATTEMPTS
; i
++) {
654 reg
= rd32(E1000_EECD
);
655 if (reg
& E1000_EECD_FLUDONE_I210
) {
656 ret_val
= E1000_SUCCESS
;
666 * igb_get_flash_presence_i210 - Check if flash device is detected.
667 * @hw: pointer to the HW structure
670 bool igb_get_flash_presence_i210(struct e1000_hw
*hw
)
673 bool ret_val
= false;
675 eec
= rd32(E1000_EECD
);
676 if (eec
& E1000_EECD_FLASH_DETECTED_I210
)
683 * igb_update_flash_i210 - Commit EEPROM to the flash
684 * @hw: pointer to the HW structure
687 s32
igb_update_flash_i210(struct e1000_hw
*hw
)
689 s32 ret_val
= E1000_SUCCESS
;
692 ret_val
= igb_pool_flash_update_done_i210(hw
);
693 if (ret_val
== -E1000_ERR_NVM
) {
694 hw_dbg("Flash update time out\n");
698 flup
= rd32(E1000_EECD
) | E1000_EECD_FLUPD_I210
;
699 wr32(E1000_EECD
, flup
);
701 ret_val
= igb_pool_flash_update_done_i210(hw
);
702 if (ret_val
== E1000_SUCCESS
)
703 hw_dbg("Flash update complete\n");
705 hw_dbg("Flash update time out\n");
712 * igb_valid_led_default_i210 - Verify a valid default LED config
713 * @hw: pointer to the HW structure
714 * @data: pointer to the NVM (EEPROM)
716 * Read the EEPROM for the current default LED configuration. If the
717 * LED configuration is not valid, set to a valid LED configuration.
719 s32
igb_valid_led_default_i210(struct e1000_hw
*hw
, u16
*data
)
723 ret_val
= hw
->nvm
.ops
.read(hw
, NVM_ID_LED_SETTINGS
, 1, data
);
725 hw_dbg("NVM Read Error\n");
729 if (*data
== ID_LED_RESERVED_0000
|| *data
== ID_LED_RESERVED_FFFF
) {
730 switch (hw
->phy
.media_type
) {
731 case e1000_media_type_internal_serdes
:
732 *data
= ID_LED_DEFAULT_I210_SERDES
;
734 case e1000_media_type_copper
:
736 *data
= ID_LED_DEFAULT_I210
;
745 * __igb_access_xmdio_reg - Read/write XMDIO register
746 * @hw: pointer to the HW structure
747 * @address: XMDIO address to program
748 * @dev_addr: device address to program
749 * @data: pointer to value to read/write from/to the XMDIO address
750 * @read: boolean flag to indicate read or write
752 static s32
__igb_access_xmdio_reg(struct e1000_hw
*hw
, u16 address
,
753 u8 dev_addr
, u16
*data
, bool read
)
755 s32 ret_val
= E1000_SUCCESS
;
757 ret_val
= hw
->phy
.ops
.write_reg(hw
, E1000_MMDAC
, dev_addr
);
761 ret_val
= hw
->phy
.ops
.write_reg(hw
, E1000_MMDAAD
, address
);
765 ret_val
= hw
->phy
.ops
.write_reg(hw
, E1000_MMDAC
, E1000_MMDAC_FUNC_DATA
|
771 ret_val
= hw
->phy
.ops
.read_reg(hw
, E1000_MMDAAD
, data
);
773 ret_val
= hw
->phy
.ops
.write_reg(hw
, E1000_MMDAAD
, *data
);
777 /* Recalibrate the device back to 0 */
778 ret_val
= hw
->phy
.ops
.write_reg(hw
, E1000_MMDAC
, 0);
786 * igb_read_xmdio_reg - Read XMDIO register
787 * @hw: pointer to the HW structure
788 * @addr: XMDIO address to program
789 * @dev_addr: device address to program
790 * @data: value to be read from the EMI address
792 s32
igb_read_xmdio_reg(struct e1000_hw
*hw
, u16 addr
, u8 dev_addr
, u16
*data
)
794 return __igb_access_xmdio_reg(hw
, addr
, dev_addr
, data
, true);
798 * igb_write_xmdio_reg - Write XMDIO register
799 * @hw: pointer to the HW structure
800 * @addr: XMDIO address to program
801 * @dev_addr: device address to program
802 * @data: value to be written to the XMDIO address
804 s32
igb_write_xmdio_reg(struct e1000_hw
*hw
, u16 addr
, u8 dev_addr
, u16 data
)
806 return __igb_access_xmdio_reg(hw
, addr
, dev_addr
, &data
, false);
810 * igb_init_nvm_params_i210 - Init NVM func ptrs.
811 * @hw: pointer to the HW structure
813 s32
igb_init_nvm_params_i210(struct e1000_hw
*hw
)
816 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
818 nvm
->ops
.acquire
= igb_acquire_nvm_i210
;
819 nvm
->ops
.release
= igb_release_nvm_i210
;
820 nvm
->ops
.valid_led_default
= igb_valid_led_default_i210
;
822 /* NVM Function Pointers */
823 if (igb_get_flash_presence_i210(hw
)) {
824 hw
->nvm
.type
= e1000_nvm_flash_hw
;
825 nvm
->ops
.read
= igb_read_nvm_srrd_i210
;
826 nvm
->ops
.write
= igb_write_nvm_srwr_i210
;
827 nvm
->ops
.validate
= igb_validate_nvm_checksum_i210
;
828 nvm
->ops
.update
= igb_update_nvm_checksum_i210
;
830 hw
->nvm
.type
= e1000_nvm_invm
;
831 nvm
->ops
.read
= igb_read_invm_i210
;
832 nvm
->ops
.write
= NULL
;
833 nvm
->ops
.validate
= NULL
;
834 nvm
->ops
.update
= NULL
;
840 * igb_pll_workaround_i210
841 * @hw: pointer to the HW structure
843 * Works around an errata in the PLL circuit where it occasionally
844 * provides the wrong clock frequency after power up.
846 s32
igb_pll_workaround_i210(struct e1000_hw
*hw
)
849 u32 wuc
, mdicnfg
, ctrl
, ctrl_ext
, reg_val
;
850 u16 nvm_word
, phy_word
, pci_word
, tmp_nvm
;
853 /* Get and set needed register values */
854 wuc
= rd32(E1000_WUC
);
855 mdicnfg
= rd32(E1000_MDICNFG
);
856 reg_val
= mdicnfg
& ~E1000_MDICNFG_EXT_MDIO
;
857 wr32(E1000_MDICNFG
, reg_val
);
859 /* Get data from NVM, or set default */
860 ret_val
= igb_read_invm_word_i210(hw
, E1000_INVM_AUTOLOAD
,
863 nvm_word
= E1000_INVM_DEFAULT_AL
;
864 tmp_nvm
= nvm_word
| E1000_INVM_PLL_WO_VAL
;
865 for (i
= 0; i
< E1000_MAX_PLL_TRIES
; i
++) {
866 /* check current state directly from internal PHY */
867 igb_read_phy_reg_gs40g(hw
, (E1000_PHY_PLL_FREQ_PAGE
|
868 E1000_PHY_PLL_FREQ_REG
), &phy_word
);
869 if ((phy_word
& E1000_PHY_PLL_UNCONF
)
870 != E1000_PHY_PLL_UNCONF
) {
874 ret_val
= -E1000_ERR_PHY
;
876 /* directly reset the internal PHY */
877 ctrl
= rd32(E1000_CTRL
);
878 wr32(E1000_CTRL
, ctrl
|E1000_CTRL_PHY_RST
);
880 ctrl_ext
= rd32(E1000_CTRL_EXT
);
881 ctrl_ext
|= (E1000_CTRL_EXT_PHYPDEN
| E1000_CTRL_EXT_SDLPE
);
882 wr32(E1000_CTRL_EXT
, ctrl_ext
);
885 reg_val
= (E1000_INVM_AUTOLOAD
<< 4) | (tmp_nvm
<< 16);
886 wr32(E1000_EEARBC_I210
, reg_val
);
888 igb_read_pci_cfg(hw
, E1000_PCI_PMCSR
, &pci_word
);
889 pci_word
|= E1000_PCI_PMCSR_D3
;
890 igb_write_pci_cfg(hw
, E1000_PCI_PMCSR
, &pci_word
);
891 usleep_range(1000, 2000);
892 pci_word
&= ~E1000_PCI_PMCSR_D3
;
893 igb_write_pci_cfg(hw
, E1000_PCI_PMCSR
, &pci_word
);
894 reg_val
= (E1000_INVM_AUTOLOAD
<< 4) | (nvm_word
<< 16);
895 wr32(E1000_EEARBC_I210
, reg_val
);
897 /* restore WUC register */
898 wr32(E1000_WUC
, wuc
);
900 /* restore MDICNFG setting */
901 wr32(E1000_MDICNFG
, mdicnfg
);