1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 * 82562G 10/100 Network Connection
31 * 82562G-2 10/100 Network Connection
32 * 82562GT 10/100 Network Connection
33 * 82562GT-2 10/100 Network Connection
34 * 82562V 10/100 Network Connection
35 * 82562V-2 10/100 Network Connection
36 * 82566DC-2 Gigabit Network Connection
37 * 82566DC Gigabit Network Connection
38 * 82566DM-2 Gigabit Network Connection
39 * 82566DM Gigabit Network Connection
40 * 82566MC Gigabit Network Connection
41 * 82566MM Gigabit Network Connection
42 * 82567LM Gigabit Network Connection
43 * 82567LF Gigabit Network Connection
44 * 82567V Gigabit Network Connection
45 * 82567LM-2 Gigabit Network Connection
46 * 82567LF-2 Gigabit Network Connection
47 * 82567V-2 Gigabit Network Connection
48 * 82567LF-3 Gigabit Network Connection
49 * 82567LM-3 Gigabit Network Connection
50 * 82567LM-4 Gigabit Network Connection
51 * 82577LM Gigabit Network Connection
52 * 82577LC Gigabit Network Connection
53 * 82578DM Gigabit Network Connection
54 * 82578DC Gigabit Network Connection
55 * 82579LM Gigabit Network Connection
56 * 82579V Gigabit Network Connection
61 #define ICH_FLASH_GFPREG 0x0000
62 #define ICH_FLASH_HSFSTS 0x0004
63 #define ICH_FLASH_HSFCTL 0x0006
64 #define ICH_FLASH_FADDR 0x0008
65 #define ICH_FLASH_FDATA0 0x0010
66 #define ICH_FLASH_PR0 0x0074
68 #define ICH_FLASH_READ_COMMAND_TIMEOUT 500
69 #define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
70 #define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
71 #define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
72 #define ICH_FLASH_CYCLE_REPEAT_COUNT 10
74 #define ICH_CYCLE_READ 0
75 #define ICH_CYCLE_WRITE 2
76 #define ICH_CYCLE_ERASE 3
78 #define FLASH_GFPREG_BASE_MASK 0x1FFF
79 #define FLASH_SECTOR_ADDR_SHIFT 12
81 #define ICH_FLASH_SEG_SIZE_256 256
82 #define ICH_FLASH_SEG_SIZE_4K 4096
83 #define ICH_FLASH_SEG_SIZE_8K 8192
84 #define ICH_FLASH_SEG_SIZE_64K 65536
87 #define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
88 /* FW established a valid mode */
89 #define E1000_ICH_FWSM_FW_VALID 0x00008000
91 #define E1000_ICH_MNG_IAMT_MODE 0x2
93 #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
94 (ID_LED_DEF1_OFF2 << 8) | \
95 (ID_LED_DEF1_ON2 << 4) | \
98 #define E1000_ICH_NVM_SIG_WORD 0x13
99 #define E1000_ICH_NVM_SIG_MASK 0xC000
100 #define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
101 #define E1000_ICH_NVM_SIG_VALUE 0x80
103 #define E1000_ICH8_LAN_INIT_TIMEOUT 1500
105 #define E1000_FEXTNVM_SW_CONFIG 1
106 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
108 #define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
109 #define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
111 #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
112 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
113 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
115 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
117 #define E1000_ICH_RAR_ENTRIES 7
118 #define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
119 #define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
121 #define PHY_PAGE_SHIFT 5
122 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
123 ((reg) & MAX_PHY_REG_ADDRESS))
124 #define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
125 #define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
127 #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
128 #define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
129 #define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
131 #define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
133 #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
135 /* SMBus Control Phy Register */
136 #define CV_SMB_CTRL PHY_REG(769, 23)
137 #define CV_SMB_CTRL_FORCE_SMBUS 0x0001
139 /* SMBus Address Phy Register */
140 #define HV_SMB_ADDR PHY_REG(768, 26)
141 #define HV_SMB_ADDR_MASK 0x007F
142 #define HV_SMB_ADDR_PEC_EN 0x0200
143 #define HV_SMB_ADDR_VALID 0x0080
144 #define HV_SMB_ADDR_FREQ_MASK 0x1100
145 #define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
146 #define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
148 /* PHY Power Management Control */
149 #define HV_PM_CTRL PHY_REG(770, 17)
150 #define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
152 /* PHY Low Power Idle Control */
153 #define I82579_LPI_CTRL PHY_REG(772, 20)
154 #define I82579_LPI_CTRL_ENABLE_MASK 0x6000
155 #define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
158 #define I82579_EMI_ADDR 0x10
159 #define I82579_EMI_DATA 0x11
160 #define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
161 #define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
162 #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
163 #define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
164 #define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
165 #define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
167 /* Intel Rapid Start Technology Support */
168 #define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
169 #define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
170 #define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
171 #define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
172 #define I217_CGFREG PHY_REG(772, 29)
173 #define I217_CGFREG_ENABLE_MTA_RESET 0x0002
174 #define I217_MEMPWR PHY_REG(772, 26)
175 #define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
177 /* Strapping Option Register - RO */
178 #define E1000_STRAP 0x0000C
179 #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
180 #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
181 #define E1000_STRAP_SMT_FREQ_MASK 0x00003000
182 #define E1000_STRAP_SMT_FREQ_SHIFT 12
184 /* OEM Bits Phy Register */
185 #define HV_OEM_BITS PHY_REG(768, 25)
186 #define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
187 #define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
188 #define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
190 #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
191 #define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
193 /* KMRN Mode Control */
194 #define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
195 #define HV_KMRN_MDIO_SLOW 0x0400
197 /* KMRN FIFO Control and Status */
198 #define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
199 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
200 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
202 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
203 /* Offset 04h HSFSTS */
204 union ich8_hws_flash_status
{
206 u16 flcdone
:1; /* bit 0 Flash Cycle Done */
207 u16 flcerr
:1; /* bit 1 Flash Cycle Error */
208 u16 dael
:1; /* bit 2 Direct Access error Log */
209 u16 berasesz
:2; /* bit 4:3 Sector Erase Size */
210 u16 flcinprog
:1; /* bit 5 flash cycle in Progress */
211 u16 reserved1
:2; /* bit 13:6 Reserved */
212 u16 reserved2
:6; /* bit 13:6 Reserved */
213 u16 fldesvalid
:1; /* bit 14 Flash Descriptor Valid */
214 u16 flockdn
:1; /* bit 15 Flash Config Lock-Down */
219 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
220 /* Offset 06h FLCTL */
221 union ich8_hws_flash_ctrl
{
222 struct ich8_hsflctl
{
223 u16 flcgo
:1; /* 0 Flash Cycle Go */
224 u16 flcycle
:2; /* 2:1 Flash Cycle */
225 u16 reserved
:5; /* 7:3 Reserved */
226 u16 fldbcount
:2; /* 9:8 Flash Data Byte Count */
227 u16 flockdn
:6; /* 15:10 Reserved */
232 /* ICH Flash Region Access Permissions */
233 union ich8_hws_flash_regacc
{
235 u32 grra
:8; /* 0:7 GbE region Read Access */
236 u32 grwa
:8; /* 8:15 GbE region Write Access */
237 u32 gmrag
:8; /* 23:16 GbE Master Read Access Grant */
238 u32 gmwag
:8; /* 31:24 GbE Master Write Access Grant */
243 /* ICH Flash Protected Region */
244 union ich8_flash_protected_range
{
246 u32 base
:13; /* 0:12 Protected Range Base */
247 u32 reserved1
:2; /* 13:14 Reserved */
248 u32 rpe
:1; /* 15 Read Protection Enable */
249 u32 limit
:13; /* 16:28 Protected Range Limit */
250 u32 reserved2
:2; /* 29:30 Reserved */
251 u32 wpe
:1; /* 31 Write Protection Enable */
256 static s32
e1000_setup_link_ich8lan(struct e1000_hw
*hw
);
257 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw
*hw
);
258 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw
*hw
);
259 static s32
e1000_erase_flash_bank_ich8lan(struct e1000_hw
*hw
, u32 bank
);
260 static s32
e1000_retry_write_flash_byte_ich8lan(struct e1000_hw
*hw
,
261 u32 offset
, u8 byte
);
262 static s32
e1000_read_flash_byte_ich8lan(struct e1000_hw
*hw
, u32 offset
,
264 static s32
e1000_read_flash_word_ich8lan(struct e1000_hw
*hw
, u32 offset
,
266 static s32
e1000_read_flash_data_ich8lan(struct e1000_hw
*hw
, u32 offset
,
268 static s32
e1000_setup_copper_link_ich8lan(struct e1000_hw
*hw
);
269 static s32
e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw
*hw
);
270 static s32
e1000_get_cfg_done_ich8lan(struct e1000_hw
*hw
);
271 static s32
e1000_cleanup_led_ich8lan(struct e1000_hw
*hw
);
272 static s32
e1000_led_on_ich8lan(struct e1000_hw
*hw
);
273 static s32
e1000_led_off_ich8lan(struct e1000_hw
*hw
);
274 static s32
e1000_id_led_init_pchlan(struct e1000_hw
*hw
);
275 static s32
e1000_setup_led_pchlan(struct e1000_hw
*hw
);
276 static s32
e1000_cleanup_led_pchlan(struct e1000_hw
*hw
);
277 static s32
e1000_led_on_pchlan(struct e1000_hw
*hw
);
278 static s32
e1000_led_off_pchlan(struct e1000_hw
*hw
);
279 static s32
e1000_set_lplu_state_pchlan(struct e1000_hw
*hw
, bool active
);
280 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw
*hw
);
281 static void e1000_lan_init_done_ich8lan(struct e1000_hw
*hw
);
282 static s32
e1000_k1_gig_workaround_hv(struct e1000_hw
*hw
, bool link
);
283 static s32
e1000_set_mdio_slow_mode_hv(struct e1000_hw
*hw
);
284 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw
*hw
);
285 static bool e1000_check_mng_mode_pchlan(struct e1000_hw
*hw
);
286 static void e1000_rar_set_pch2lan(struct e1000_hw
*hw
, u8
*addr
, u32 index
);
287 static void e1000_rar_set_pch_lpt(struct e1000_hw
*hw
, u8
*addr
, u32 index
);
288 static s32
e1000_k1_workaround_lv(struct e1000_hw
*hw
);
289 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw
*hw
, bool gate
);
291 static inline u16
__er16flash(struct e1000_hw
*hw
, unsigned long reg
)
293 return readw(hw
->flash_address
+ reg
);
296 static inline u32
__er32flash(struct e1000_hw
*hw
, unsigned long reg
)
298 return readl(hw
->flash_address
+ reg
);
301 static inline void __ew16flash(struct e1000_hw
*hw
, unsigned long reg
, u16 val
)
303 writew(val
, hw
->flash_address
+ reg
);
306 static inline void __ew32flash(struct e1000_hw
*hw
, unsigned long reg
, u32 val
)
308 writel(val
, hw
->flash_address
+ reg
);
311 #define er16flash(reg) __er16flash(hw, (reg))
312 #define er32flash(reg) __er32flash(hw, (reg))
313 #define ew16flash(reg, val) __ew16flash(hw, (reg), (val))
314 #define ew32flash(reg, val) __ew32flash(hw, (reg), (val))
317 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
318 * @hw: pointer to the HW structure
320 * Test access to the PHY registers by reading the PHY ID registers. If
321 * the PHY ID is already known (e.g. resume path) compare it with known ID,
322 * otherwise assume the read PHY ID is correct if it is valid.
324 * Assumes the sw/fw/hw semaphore is already acquired.
326 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw
*hw
)
333 for (retry_count
= 0; retry_count
< 2; retry_count
++) {
334 ret_val
= e1e_rphy_locked(hw
, PHY_ID1
, &phy_reg
);
335 if (ret_val
|| (phy_reg
== 0xFFFF))
337 phy_id
= (u32
)(phy_reg
<< 16);
339 ret_val
= e1e_rphy_locked(hw
, PHY_ID2
, &phy_reg
);
340 if (ret_val
|| (phy_reg
== 0xFFFF)) {
344 phy_id
|= (u32
)(phy_reg
& PHY_REVISION_MASK
);
349 if (hw
->phy
.id
== phy_id
)
353 hw
->phy
.revision
= (u32
)(phy_reg
& ~PHY_REVISION_MASK
);
358 * In case the PHY needs to be in mdio slow mode,
359 * set slow mode and try to get the PHY id again.
361 hw
->phy
.ops
.release(hw
);
362 ret_val
= e1000_set_mdio_slow_mode_hv(hw
);
364 ret_val
= e1000e_get_phy_id(hw
);
365 hw
->phy
.ops
.acquire(hw
);
371 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
372 * @hw: pointer to the HW structure
374 * Workarounds/flow necessary for PHY initialization during driver load
377 static s32
e1000_init_phy_workarounds_pchlan(struct e1000_hw
*hw
)
379 u32 mac_reg
, fwsm
= er32(FWSM
);
383 ret_val
= hw
->phy
.ops
.acquire(hw
);
385 e_dbg("Failed to initialize PHY flow\n");
390 * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
391 * inaccessible and resetting the PHY is not blocked, toggle the
392 * LANPHYPC Value bit to force the interconnect to PCIe mode.
394 switch (hw
->mac
.type
) {
396 if (e1000_phy_is_accessible_pchlan(hw
))
400 * Before toggling LANPHYPC, see if PHY is accessible by
401 * forcing MAC to SMBus mode first.
403 mac_reg
= er32(CTRL_EXT
);
404 mac_reg
|= E1000_CTRL_EXT_FORCE_SMBUS
;
405 ew32(CTRL_EXT
, mac_reg
);
410 * Gate automatic PHY configuration by hardware on
413 if ((hw
->mac
.type
== e1000_pch2lan
) &&
414 !(fwsm
& E1000_ICH_FWSM_FW_VALID
))
415 e1000_gate_hw_phy_config_ich8lan(hw
, true);
417 if (e1000_phy_is_accessible_pchlan(hw
)) {
418 if (hw
->mac
.type
== e1000_pch_lpt
) {
419 /* Unforce SMBus mode in PHY */
420 e1e_rphy_locked(hw
, CV_SMB_CTRL
, &phy_reg
);
421 phy_reg
&= ~CV_SMB_CTRL_FORCE_SMBUS
;
422 e1e_wphy_locked(hw
, CV_SMB_CTRL
, phy_reg
);
424 /* Unforce SMBus mode in MAC */
425 mac_reg
= er32(CTRL_EXT
);
426 mac_reg
&= ~E1000_CTRL_EXT_FORCE_SMBUS
;
427 ew32(CTRL_EXT
, mac_reg
);
434 if ((hw
->mac
.type
== e1000_pchlan
) &&
435 (fwsm
& E1000_ICH_FWSM_FW_VALID
))
438 if (hw
->phy
.ops
.check_reset_block(hw
)) {
439 e_dbg("Required LANPHYPC toggle blocked by ME\n");
443 e_dbg("Toggling LANPHYPC\n");
445 /* Set Phy Config Counter to 50msec */
446 mac_reg
= er32(FEXTNVM3
);
447 mac_reg
&= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK
;
448 mac_reg
|= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC
;
449 ew32(FEXTNVM3
, mac_reg
);
451 /* Toggle LANPHYPC Value bit */
452 mac_reg
= er32(CTRL
);
453 mac_reg
|= E1000_CTRL_LANPHYPC_OVERRIDE
;
454 mac_reg
&= ~E1000_CTRL_LANPHYPC_VALUE
;
458 mac_reg
&= ~E1000_CTRL_LANPHYPC_OVERRIDE
;
461 if (hw
->mac
.type
< e1000_pch_lpt
) {
466 usleep_range(5000, 10000);
467 } while (!(er32(CTRL_EXT
) &
468 E1000_CTRL_EXT_LPCD
) && count
--);
475 hw
->phy
.ops
.release(hw
);
478 * Reset the PHY before any access to it. Doing so, ensures
479 * that the PHY is in a known good state before we read/write
480 * PHY registers. The generic reset is sufficient here,
481 * because we haven't determined the PHY type yet.
483 ret_val
= e1000e_phy_hw_reset_generic(hw
);
485 /* Ungate automatic PHY configuration on non-managed 82579 */
486 if ((hw
->mac
.type
== e1000_pch2lan
) &&
487 !(fwsm
& E1000_ICH_FWSM_FW_VALID
)) {
488 usleep_range(10000, 20000);
489 e1000_gate_hw_phy_config_ich8lan(hw
, false);
496 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
497 * @hw: pointer to the HW structure
499 * Initialize family-specific PHY parameters and function pointers.
501 static s32
e1000_init_phy_params_pchlan(struct e1000_hw
*hw
)
503 struct e1000_phy_info
*phy
= &hw
->phy
;
507 phy
->reset_delay_us
= 100;
509 phy
->ops
.set_page
= e1000_set_page_igp
;
510 phy
->ops
.read_reg
= e1000_read_phy_reg_hv
;
511 phy
->ops
.read_reg_locked
= e1000_read_phy_reg_hv_locked
;
512 phy
->ops
.read_reg_page
= e1000_read_phy_reg_page_hv
;
513 phy
->ops
.set_d0_lplu_state
= e1000_set_lplu_state_pchlan
;
514 phy
->ops
.set_d3_lplu_state
= e1000_set_lplu_state_pchlan
;
515 phy
->ops
.write_reg
= e1000_write_phy_reg_hv
;
516 phy
->ops
.write_reg_locked
= e1000_write_phy_reg_hv_locked
;
517 phy
->ops
.write_reg_page
= e1000_write_phy_reg_page_hv
;
518 phy
->ops
.power_up
= e1000_power_up_phy_copper
;
519 phy
->ops
.power_down
= e1000_power_down_phy_copper_ich8lan
;
520 phy
->autoneg_mask
= AUTONEG_ADVERTISE_SPEED_DEFAULT
;
522 phy
->id
= e1000_phy_unknown
;
524 ret_val
= e1000_init_phy_workarounds_pchlan(hw
);
528 if (phy
->id
== e1000_phy_unknown
)
529 switch (hw
->mac
.type
) {
531 ret_val
= e1000e_get_phy_id(hw
);
534 if ((phy
->id
!= 0) && (phy
->id
!= PHY_REVISION_MASK
))
540 * In case the PHY needs to be in mdio slow mode,
541 * set slow mode and try to get the PHY id again.
543 ret_val
= e1000_set_mdio_slow_mode_hv(hw
);
546 ret_val
= e1000e_get_phy_id(hw
);
551 phy
->type
= e1000e_get_phy_type_from_id(phy
->id
);
554 case e1000_phy_82577
:
555 case e1000_phy_82579
:
557 phy
->ops
.check_polarity
= e1000_check_polarity_82577
;
558 phy
->ops
.force_speed_duplex
=
559 e1000_phy_force_speed_duplex_82577
;
560 phy
->ops
.get_cable_length
= e1000_get_cable_length_82577
;
561 phy
->ops
.get_info
= e1000_get_phy_info_82577
;
562 phy
->ops
.commit
= e1000e_phy_sw_reset
;
564 case e1000_phy_82578
:
565 phy
->ops
.check_polarity
= e1000_check_polarity_m88
;
566 phy
->ops
.force_speed_duplex
= e1000e_phy_force_speed_duplex_m88
;
567 phy
->ops
.get_cable_length
= e1000e_get_cable_length_m88
;
568 phy
->ops
.get_info
= e1000e_get_phy_info_m88
;
571 ret_val
= -E1000_ERR_PHY
;
579 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
580 * @hw: pointer to the HW structure
582 * Initialize family-specific PHY parameters and function pointers.
584 static s32
e1000_init_phy_params_ich8lan(struct e1000_hw
*hw
)
586 struct e1000_phy_info
*phy
= &hw
->phy
;
591 phy
->reset_delay_us
= 100;
593 phy
->ops
.power_up
= e1000_power_up_phy_copper
;
594 phy
->ops
.power_down
= e1000_power_down_phy_copper_ich8lan
;
597 * We may need to do this twice - once for IGP and if that fails,
598 * we'll set BM func pointers and try again
600 ret_val
= e1000e_determine_phy_address(hw
);
602 phy
->ops
.write_reg
= e1000e_write_phy_reg_bm
;
603 phy
->ops
.read_reg
= e1000e_read_phy_reg_bm
;
604 ret_val
= e1000e_determine_phy_address(hw
);
606 e_dbg("Cannot determine PHY addr. Erroring out\n");
612 while ((e1000_phy_unknown
== e1000e_get_phy_type_from_id(phy
->id
)) &&
614 usleep_range(1000, 2000);
615 ret_val
= e1000e_get_phy_id(hw
);
622 case IGP03E1000_E_PHY_ID
:
623 phy
->type
= e1000_phy_igp_3
;
624 phy
->autoneg_mask
= AUTONEG_ADVERTISE_SPEED_DEFAULT
;
625 phy
->ops
.read_reg_locked
= e1000e_read_phy_reg_igp_locked
;
626 phy
->ops
.write_reg_locked
= e1000e_write_phy_reg_igp_locked
;
627 phy
->ops
.get_info
= e1000e_get_phy_info_igp
;
628 phy
->ops
.check_polarity
= e1000_check_polarity_igp
;
629 phy
->ops
.force_speed_duplex
= e1000e_phy_force_speed_duplex_igp
;
632 case IFE_PLUS_E_PHY_ID
:
634 phy
->type
= e1000_phy_ife
;
635 phy
->autoneg_mask
= E1000_ALL_NOT_GIG
;
636 phy
->ops
.get_info
= e1000_get_phy_info_ife
;
637 phy
->ops
.check_polarity
= e1000_check_polarity_ife
;
638 phy
->ops
.force_speed_duplex
= e1000_phy_force_speed_duplex_ife
;
640 case BME1000_E_PHY_ID
:
641 phy
->type
= e1000_phy_bm
;
642 phy
->autoneg_mask
= AUTONEG_ADVERTISE_SPEED_DEFAULT
;
643 phy
->ops
.read_reg
= e1000e_read_phy_reg_bm
;
644 phy
->ops
.write_reg
= e1000e_write_phy_reg_bm
;
645 phy
->ops
.commit
= e1000e_phy_sw_reset
;
646 phy
->ops
.get_info
= e1000e_get_phy_info_m88
;
647 phy
->ops
.check_polarity
= e1000_check_polarity_m88
;
648 phy
->ops
.force_speed_duplex
= e1000e_phy_force_speed_duplex_m88
;
651 return -E1000_ERR_PHY
;
659 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
660 * @hw: pointer to the HW structure
662 * Initialize family-specific NVM parameters and function
665 static s32
e1000_init_nvm_params_ich8lan(struct e1000_hw
*hw
)
667 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
668 struct e1000_dev_spec_ich8lan
*dev_spec
= &hw
->dev_spec
.ich8lan
;
669 u32 gfpreg
, sector_base_addr
, sector_end_addr
;
672 /* Can't read flash registers if the register set isn't mapped. */
673 if (!hw
->flash_address
) {
674 e_dbg("ERROR: Flash registers not mapped\n");
675 return -E1000_ERR_CONFIG
;
678 nvm
->type
= e1000_nvm_flash_sw
;
680 gfpreg
= er32flash(ICH_FLASH_GFPREG
);
683 * sector_X_addr is a "sector"-aligned address (4096 bytes)
684 * Add 1 to sector_end_addr since this sector is included in
687 sector_base_addr
= gfpreg
& FLASH_GFPREG_BASE_MASK
;
688 sector_end_addr
= ((gfpreg
>> 16) & FLASH_GFPREG_BASE_MASK
) + 1;
690 /* flash_base_addr is byte-aligned */
691 nvm
->flash_base_addr
= sector_base_addr
<< FLASH_SECTOR_ADDR_SHIFT
;
694 * find total size of the NVM, then cut in half since the total
695 * size represents two separate NVM banks.
697 nvm
->flash_bank_size
= (sector_end_addr
- sector_base_addr
)
698 << FLASH_SECTOR_ADDR_SHIFT
;
699 nvm
->flash_bank_size
/= 2;
700 /* Adjust to word count */
701 nvm
->flash_bank_size
/= sizeof(u16
);
703 nvm
->word_size
= E1000_ICH8_SHADOW_RAM_WORDS
;
705 /* Clear shadow ram */
706 for (i
= 0; i
< nvm
->word_size
; i
++) {
707 dev_spec
->shadow_ram
[i
].modified
= false;
708 dev_spec
->shadow_ram
[i
].value
= 0xFFFF;
715 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
716 * @hw: pointer to the HW structure
718 * Initialize family-specific MAC parameters and function
721 static s32
e1000_init_mac_params_ich8lan(struct e1000_hw
*hw
)
723 struct e1000_mac_info
*mac
= &hw
->mac
;
725 /* Set media type function pointer */
726 hw
->phy
.media_type
= e1000_media_type_copper
;
728 /* Set mta register count */
729 mac
->mta_reg_count
= 32;
730 /* Set rar entry count */
731 mac
->rar_entry_count
= E1000_ICH_RAR_ENTRIES
;
732 if (mac
->type
== e1000_ich8lan
)
733 mac
->rar_entry_count
--;
735 mac
->has_fwsm
= true;
736 /* ARC subsystem not supported */
737 mac
->arc_subsystem_valid
= false;
738 /* Adaptive IFS supported */
739 mac
->adaptive_ifs
= true;
741 /* LED and other operations */
746 /* check management mode */
747 mac
->ops
.check_mng_mode
= e1000_check_mng_mode_ich8lan
;
749 mac
->ops
.id_led_init
= e1000e_id_led_init_generic
;
751 mac
->ops
.blink_led
= e1000e_blink_led_generic
;
753 mac
->ops
.setup_led
= e1000e_setup_led_generic
;
755 mac
->ops
.cleanup_led
= e1000_cleanup_led_ich8lan
;
756 /* turn on/off LED */
757 mac
->ops
.led_on
= e1000_led_on_ich8lan
;
758 mac
->ops
.led_off
= e1000_led_off_ich8lan
;
761 mac
->rar_entry_count
= E1000_PCH2_RAR_ENTRIES
;
762 mac
->ops
.rar_set
= e1000_rar_set_pch2lan
;
766 /* check management mode */
767 mac
->ops
.check_mng_mode
= e1000_check_mng_mode_pchlan
;
769 mac
->ops
.id_led_init
= e1000_id_led_init_pchlan
;
771 mac
->ops
.setup_led
= e1000_setup_led_pchlan
;
773 mac
->ops
.cleanup_led
= e1000_cleanup_led_pchlan
;
774 /* turn on/off LED */
775 mac
->ops
.led_on
= e1000_led_on_pchlan
;
776 mac
->ops
.led_off
= e1000_led_off_pchlan
;
782 if (mac
->type
== e1000_pch_lpt
) {
783 mac
->rar_entry_count
= E1000_PCH_LPT_RAR_ENTRIES
;
784 mac
->ops
.rar_set
= e1000_rar_set_pch_lpt
;
787 /* Enable PCS Lock-loss workaround for ICH8 */
788 if (mac
->type
== e1000_ich8lan
)
789 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw
, true);
792 * Gate automatic PHY configuration by hardware on managed
795 if ((mac
->type
== e1000_pch2lan
|| mac
->type
== e1000_pch_lpt
) &&
796 (er32(FWSM
) & E1000_ICH_FWSM_FW_VALID
))
797 e1000_gate_hw_phy_config_ich8lan(hw
, true);
803 * e1000_set_eee_pchlan - Enable/disable EEE support
804 * @hw: pointer to the HW structure
806 * Enable/disable EEE based on setting in dev_spec structure. The bits in
807 * the LPI Control register will remain set only if/when link is up.
809 static s32
e1000_set_eee_pchlan(struct e1000_hw
*hw
)
811 struct e1000_dev_spec_ich8lan
*dev_spec
= &hw
->dev_spec
.ich8lan
;
815 if ((hw
->phy
.type
!= e1000_phy_82579
) &&
816 (hw
->phy
.type
!= e1000_phy_i217
))
819 ret_val
= e1e_rphy(hw
, I82579_LPI_CTRL
, &phy_reg
);
823 if (dev_spec
->eee_disable
)
824 phy_reg
&= ~I82579_LPI_CTRL_ENABLE_MASK
;
826 phy_reg
|= I82579_LPI_CTRL_ENABLE_MASK
;
828 ret_val
= e1e_wphy(hw
, I82579_LPI_CTRL
, phy_reg
);
832 if ((hw
->phy
.type
== e1000_phy_i217
) && !dev_spec
->eee_disable
) {
833 /* Save off link partner's EEE ability */
834 ret_val
= hw
->phy
.ops
.acquire(hw
);
837 ret_val
= e1e_wphy_locked(hw
, I82579_EMI_ADDR
,
838 I217_EEE_LP_ABILITY
);
841 e1e_rphy_locked(hw
, I82579_EMI_DATA
, &dev_spec
->eee_lp_ability
);
844 * EEE is not supported in 100Half, so ignore partner's EEE
845 * in 100 ability if full-duplex is not advertised.
847 e1e_rphy_locked(hw
, PHY_LP_ABILITY
, &phy_reg
);
848 if (!(phy_reg
& NWAY_LPAR_100TX_FD_CAPS
))
849 dev_spec
->eee_lp_ability
&= ~I217_EEE_100_SUPPORTED
;
851 hw
->phy
.ops
.release(hw
);
858 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
859 * @hw: pointer to the HW structure
861 * Checks to see of the link status of the hardware has changed. If a
862 * change in link status has been detected, then we read the PHY registers
863 * to get the current speed/duplex if link exists.
865 static s32
e1000_check_for_copper_link_ich8lan(struct e1000_hw
*hw
)
867 struct e1000_mac_info
*mac
= &hw
->mac
;
873 * We only want to go out to the PHY registers to see if Auto-Neg
874 * has completed and/or if our link status has changed. The
875 * get_link_status flag is set upon receiving a Link Status
876 * Change or Rx Sequence Error interrupt.
878 if (!mac
->get_link_status
)
882 * First we want to see if the MII Status Register reports
883 * link. If so, then we want to get the current speed/duplex
886 ret_val
= e1000e_phy_has_link_generic(hw
, 1, 0, &link
);
890 if (hw
->mac
.type
== e1000_pchlan
) {
891 ret_val
= e1000_k1_gig_workaround_hv(hw
, link
);
896 /* Clear link partner's EEE ability */
897 hw
->dev_spec
.ich8lan
.eee_lp_ability
= 0;
900 return 0; /* No link detected */
902 mac
->get_link_status
= false;
904 switch (hw
->mac
.type
) {
906 ret_val
= e1000_k1_workaround_lv(hw
);
911 if (hw
->phy
.type
== e1000_phy_82578
) {
912 ret_val
= e1000_link_stall_workaround_hv(hw
);
918 * Workaround for PCHx parts in half-duplex:
919 * Set the number of preambles removed from the packet
920 * when it is passed from the PHY to the MAC to prevent
921 * the MAC from misinterpreting the packet type.
923 e1e_rphy(hw
, HV_KMRN_FIFO_CTRLSTA
, &phy_reg
);
924 phy_reg
&= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK
;
926 if ((er32(STATUS
) & E1000_STATUS_FD
) != E1000_STATUS_FD
)
927 phy_reg
|= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT
);
929 e1e_wphy(hw
, HV_KMRN_FIFO_CTRLSTA
, phy_reg
);
936 * Check if there was DownShift, must be checked
937 * immediately after link-up
939 e1000e_check_downshift(hw
);
941 /* Enable/Disable EEE after link up */
942 ret_val
= e1000_set_eee_pchlan(hw
);
947 * If we are forcing speed/duplex, then we simply return since
948 * we have already determined whether we have link or not.
951 return -E1000_ERR_CONFIG
;
954 * Auto-Neg is enabled. Auto Speed Detection takes care
955 * of MAC speed/duplex configuration. So we only need to
956 * configure Collision Distance in the MAC.
958 mac
->ops
.config_collision_dist(hw
);
961 * Configure Flow Control now that Auto-Neg has completed.
962 * First, we need to restore the desired flow control
963 * settings because we may have had to re-autoneg with a
964 * different link partner.
966 ret_val
= e1000e_config_fc_after_link_up(hw
);
968 e_dbg("Error configuring flow control\n");
973 static s32
e1000_get_variants_ich8lan(struct e1000_adapter
*adapter
)
975 struct e1000_hw
*hw
= &adapter
->hw
;
978 rc
= e1000_init_mac_params_ich8lan(hw
);
982 rc
= e1000_init_nvm_params_ich8lan(hw
);
986 switch (hw
->mac
.type
) {
990 rc
= e1000_init_phy_params_ich8lan(hw
);
995 rc
= e1000_init_phy_params_pchlan(hw
);
1004 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
1005 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
1007 if ((adapter
->hw
.phy
.type
== e1000_phy_ife
) ||
1008 ((adapter
->hw
.mac
.type
>= e1000_pch2lan
) &&
1009 (!(er32(CTRL_EXT
) & E1000_CTRL_EXT_LSECCK
)))) {
1010 adapter
->flags
&= ~FLAG_HAS_JUMBO_FRAMES
;
1011 adapter
->max_hw_frame_size
= ETH_FRAME_LEN
+ ETH_FCS_LEN
;
1013 hw
->mac
.ops
.blink_led
= NULL
;
1016 if ((adapter
->hw
.mac
.type
== e1000_ich8lan
) &&
1017 (adapter
->hw
.phy
.type
!= e1000_phy_ife
))
1018 adapter
->flags
|= FLAG_LSC_GIG_SPEED_DROP
;
1020 /* Enable workaround for 82579 w/ ME enabled */
1021 if ((adapter
->hw
.mac
.type
== e1000_pch2lan
) &&
1022 (er32(FWSM
) & E1000_ICH_FWSM_FW_VALID
))
1023 adapter
->flags2
|= FLAG2_PCIM2PCI_ARBITER_WA
;
1025 /* Disable EEE by default until IEEE802.3az spec is finalized */
1026 if (adapter
->flags2
& FLAG2_HAS_EEE
)
1027 adapter
->hw
.dev_spec
.ich8lan
.eee_disable
= true;
1032 static DEFINE_MUTEX(nvm_mutex
);
1035 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1036 * @hw: pointer to the HW structure
1038 * Acquires the mutex for performing NVM operations.
1040 static s32
e1000_acquire_nvm_ich8lan(struct e1000_hw
*hw
)
1042 mutex_lock(&nvm_mutex
);
1048 * e1000_release_nvm_ich8lan - Release NVM mutex
1049 * @hw: pointer to the HW structure
1051 * Releases the mutex used while performing NVM operations.
1053 static void e1000_release_nvm_ich8lan(struct e1000_hw
*hw
)
1055 mutex_unlock(&nvm_mutex
);
1059 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1060 * @hw: pointer to the HW structure
1062 * Acquires the software control flag for performing PHY and select
1065 static s32
e1000_acquire_swflag_ich8lan(struct e1000_hw
*hw
)
1067 u32 extcnf_ctrl
, timeout
= PHY_CFG_TIMEOUT
;
1070 if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE
,
1071 &hw
->adapter
->state
)) {
1072 e_dbg("contention for Phy access\n");
1073 return -E1000_ERR_PHY
;
1077 extcnf_ctrl
= er32(EXTCNF_CTRL
);
1078 if (!(extcnf_ctrl
& E1000_EXTCNF_CTRL_SWFLAG
))
1086 e_dbg("SW has already locked the resource.\n");
1087 ret_val
= -E1000_ERR_CONFIG
;
1091 timeout
= SW_FLAG_TIMEOUT
;
1093 extcnf_ctrl
|= E1000_EXTCNF_CTRL_SWFLAG
;
1094 ew32(EXTCNF_CTRL
, extcnf_ctrl
);
1097 extcnf_ctrl
= er32(EXTCNF_CTRL
);
1098 if (extcnf_ctrl
& E1000_EXTCNF_CTRL_SWFLAG
)
1106 e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1107 er32(FWSM
), extcnf_ctrl
);
1108 extcnf_ctrl
&= ~E1000_EXTCNF_CTRL_SWFLAG
;
1109 ew32(EXTCNF_CTRL
, extcnf_ctrl
);
1110 ret_val
= -E1000_ERR_CONFIG
;
1116 clear_bit(__E1000_ACCESS_SHARED_RESOURCE
, &hw
->adapter
->state
);
1122 * e1000_release_swflag_ich8lan - Release software control flag
1123 * @hw: pointer to the HW structure
1125 * Releases the software control flag for performing PHY and select
1128 static void e1000_release_swflag_ich8lan(struct e1000_hw
*hw
)
1132 extcnf_ctrl
= er32(EXTCNF_CTRL
);
1134 if (extcnf_ctrl
& E1000_EXTCNF_CTRL_SWFLAG
) {
1135 extcnf_ctrl
&= ~E1000_EXTCNF_CTRL_SWFLAG
;
1136 ew32(EXTCNF_CTRL
, extcnf_ctrl
);
1138 e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
1141 clear_bit(__E1000_ACCESS_SHARED_RESOURCE
, &hw
->adapter
->state
);
1145 * e1000_check_mng_mode_ich8lan - Checks management mode
1146 * @hw: pointer to the HW structure
1148 * This checks if the adapter has any manageability enabled.
1149 * This is a function pointer entry point only called by read/write
1150 * routines for the PHY and NVM parts.
1152 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw
*hw
)
1157 return (fwsm
& E1000_ICH_FWSM_FW_VALID
) &&
1158 ((fwsm
& E1000_FWSM_MODE_MASK
) ==
1159 (E1000_ICH_MNG_IAMT_MODE
<< E1000_FWSM_MODE_SHIFT
));
1163 * e1000_check_mng_mode_pchlan - Checks management mode
1164 * @hw: pointer to the HW structure
1166 * This checks if the adapter has iAMT enabled.
1167 * This is a function pointer entry point only called by read/write
1168 * routines for the PHY and NVM parts.
1170 static bool e1000_check_mng_mode_pchlan(struct e1000_hw
*hw
)
1175 return (fwsm
& E1000_ICH_FWSM_FW_VALID
) &&
1176 (fwsm
& (E1000_ICH_MNG_IAMT_MODE
<< E1000_FWSM_MODE_SHIFT
));
1180 * e1000_rar_set_pch2lan - Set receive address register
1181 * @hw: pointer to the HW structure
1182 * @addr: pointer to the receive address
1183 * @index: receive address array register
1185 * Sets the receive address array register at index to the address passed
1186 * in by addr. For 82579, RAR[0] is the base address register that is to
1187 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1188 * Use SHRA[0-3] in place of those reserved for ME.
1190 static void e1000_rar_set_pch2lan(struct e1000_hw
*hw
, u8
*addr
, u32 index
)
1192 u32 rar_low
, rar_high
;
1195 * HW expects these in little endian so we reverse the byte order
1196 * from network order (big endian) to little endian
1198 rar_low
= ((u32
)addr
[0] |
1199 ((u32
)addr
[1] << 8) |
1200 ((u32
)addr
[2] << 16) | ((u32
)addr
[3] << 24));
1202 rar_high
= ((u32
)addr
[4] | ((u32
)addr
[5] << 8));
1204 /* If MAC address zero, no need to set the AV bit */
1205 if (rar_low
|| rar_high
)
1206 rar_high
|= E1000_RAH_AV
;
1209 ew32(RAL(index
), rar_low
);
1211 ew32(RAH(index
), rar_high
);
1216 if (index
< hw
->mac
.rar_entry_count
) {
1219 ret_val
= e1000_acquire_swflag_ich8lan(hw
);
1223 ew32(SHRAL(index
- 1), rar_low
);
1225 ew32(SHRAH(index
- 1), rar_high
);
1228 e1000_release_swflag_ich8lan(hw
);
1230 /* verify the register updates */
1231 if ((er32(SHRAL(index
- 1)) == rar_low
) &&
1232 (er32(SHRAH(index
- 1)) == rar_high
))
1235 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1236 (index
- 1), er32(FWSM
));
1240 e_dbg("Failed to write receive address at index %d\n", index
);
1244 * e1000_rar_set_pch_lpt - Set receive address registers
1245 * @hw: pointer to the HW structure
1246 * @addr: pointer to the receive address
1247 * @index: receive address array register
1249 * Sets the receive address register array at index to the address passed
1250 * in by addr. For LPT, RAR[0] is the base address register that is to
1251 * contain the MAC address. SHRA[0-10] are the shared receive address
1252 * registers that are shared between the Host and manageability engine (ME).
1254 static void e1000_rar_set_pch_lpt(struct e1000_hw
*hw
, u8
*addr
, u32 index
)
1256 u32 rar_low
, rar_high
;
1260 * HW expects these in little endian so we reverse the byte order
1261 * from network order (big endian) to little endian
1263 rar_low
= ((u32
)addr
[0] | ((u32
)addr
[1] << 8) |
1264 ((u32
)addr
[2] << 16) | ((u32
)addr
[3] << 24));
1266 rar_high
= ((u32
)addr
[4] | ((u32
)addr
[5] << 8));
1268 /* If MAC address zero, no need to set the AV bit */
1269 if (rar_low
|| rar_high
)
1270 rar_high
|= E1000_RAH_AV
;
1273 ew32(RAL(index
), rar_low
);
1275 ew32(RAH(index
), rar_high
);
1281 * The manageability engine (ME) can lock certain SHRAR registers that
1282 * it is using - those registers are unavailable for use.
1284 if (index
< hw
->mac
.rar_entry_count
) {
1285 wlock_mac
= er32(FWSM
) & E1000_FWSM_WLOCK_MAC_MASK
;
1286 wlock_mac
>>= E1000_FWSM_WLOCK_MAC_SHIFT
;
1288 /* Check if all SHRAR registers are locked */
1292 if ((wlock_mac
== 0) || (index
<= wlock_mac
)) {
1295 ret_val
= e1000_acquire_swflag_ich8lan(hw
);
1300 ew32(SHRAL_PCH_LPT(index
- 1), rar_low
);
1302 ew32(SHRAH_PCH_LPT(index
- 1), rar_high
);
1305 e1000_release_swflag_ich8lan(hw
);
1307 /* verify the register updates */
1308 if ((er32(SHRAL_PCH_LPT(index
- 1)) == rar_low
) &&
1309 (er32(SHRAH_PCH_LPT(index
- 1)) == rar_high
))
1315 e_dbg("Failed to write receive address at index %d\n", index
);
1319 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1320 * @hw: pointer to the HW structure
1322 * Checks if firmware is blocking the reset of the PHY.
1323 * This is a function pointer entry point only called by
1326 static s32
e1000_check_reset_block_ich8lan(struct e1000_hw
*hw
)
1332 return (fwsm
& E1000_ICH_FWSM_RSPCIPHY
) ? 0 : E1000_BLK_PHY_RESET
;
1336 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1337 * @hw: pointer to the HW structure
1339 * Assumes semaphore already acquired.
1342 static s32
e1000_write_smbus_addr(struct e1000_hw
*hw
)
1345 u32 strap
= er32(STRAP
);
1346 u32 freq
= (strap
& E1000_STRAP_SMT_FREQ_MASK
) >>
1347 E1000_STRAP_SMT_FREQ_SHIFT
;
1350 strap
&= E1000_STRAP_SMBUS_ADDRESS_MASK
;
1352 ret_val
= e1000_read_phy_reg_hv_locked(hw
, HV_SMB_ADDR
, &phy_data
);
1356 phy_data
&= ~HV_SMB_ADDR_MASK
;
1357 phy_data
|= (strap
>> E1000_STRAP_SMBUS_ADDRESS_SHIFT
);
1358 phy_data
|= HV_SMB_ADDR_PEC_EN
| HV_SMB_ADDR_VALID
;
1360 if (hw
->phy
.type
== e1000_phy_i217
) {
1361 /* Restore SMBus frequency */
1363 phy_data
&= ~HV_SMB_ADDR_FREQ_MASK
;
1364 phy_data
|= (freq
& (1 << 0)) <<
1365 HV_SMB_ADDR_FREQ_LOW_SHIFT
;
1366 phy_data
|= (freq
& (1 << 1)) <<
1367 (HV_SMB_ADDR_FREQ_HIGH_SHIFT
- 1);
1369 e_dbg("Unsupported SMB frequency in PHY\n");
1373 return e1000_write_phy_reg_hv_locked(hw
, HV_SMB_ADDR
, phy_data
);
1377 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1378 * @hw: pointer to the HW structure
1380 * SW should configure the LCD from the NVM extended configuration region
1381 * as a workaround for certain parts.
1383 static s32
e1000_sw_lcd_config_ich8lan(struct e1000_hw
*hw
)
1385 struct e1000_phy_info
*phy
= &hw
->phy
;
1386 u32 i
, data
, cnf_size
, cnf_base_addr
, sw_cfg_mask
;
1388 u16 word_addr
, reg_data
, reg_addr
, phy_page
= 0;
1391 * Initialize the PHY from the NVM on ICH platforms. This
1392 * is needed due to an issue where the NVM configuration is
1393 * not properly autoloaded after power transitions.
1394 * Therefore, after each PHY reset, we will load the
1395 * configuration data out of the NVM manually.
1397 switch (hw
->mac
.type
) {
1399 if (phy
->type
!= e1000_phy_igp_3
)
1402 if ((hw
->adapter
->pdev
->device
== E1000_DEV_ID_ICH8_IGP_AMT
) ||
1403 (hw
->adapter
->pdev
->device
== E1000_DEV_ID_ICH8_IGP_C
)) {
1404 sw_cfg_mask
= E1000_FEXTNVM_SW_CONFIG
;
1411 sw_cfg_mask
= E1000_FEXTNVM_SW_CONFIG_ICH8M
;
1417 ret_val
= hw
->phy
.ops
.acquire(hw
);
1421 data
= er32(FEXTNVM
);
1422 if (!(data
& sw_cfg_mask
))
1426 * Make sure HW does not configure LCD from PHY
1427 * extended configuration before SW configuration
1429 data
= er32(EXTCNF_CTRL
);
1430 if ((hw
->mac
.type
< e1000_pch2lan
) &&
1431 (data
& E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE
))
1434 cnf_size
= er32(EXTCNF_SIZE
);
1435 cnf_size
&= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK
;
1436 cnf_size
>>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT
;
1440 cnf_base_addr
= data
& E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK
;
1441 cnf_base_addr
>>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT
;
1443 if (((hw
->mac
.type
== e1000_pchlan
) &&
1444 !(data
& E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE
)) ||
1445 (hw
->mac
.type
> e1000_pchlan
)) {
1447 * HW configures the SMBus address and LEDs when the
1448 * OEM and LCD Write Enable bits are set in the NVM.
1449 * When both NVM bits are cleared, SW will configure
1452 ret_val
= e1000_write_smbus_addr(hw
);
1456 data
= er32(LEDCTL
);
1457 ret_val
= e1000_write_phy_reg_hv_locked(hw
, HV_LED_CONFIG
,
1463 /* Configure LCD from extended configuration region. */
1465 /* cnf_base_addr is in DWORD */
1466 word_addr
= (u16
)(cnf_base_addr
<< 1);
1468 for (i
= 0; i
< cnf_size
; i
++) {
1469 ret_val
= e1000_read_nvm(hw
, (word_addr
+ i
* 2), 1,
1474 ret_val
= e1000_read_nvm(hw
, (word_addr
+ i
* 2 + 1),
1479 /* Save off the PHY page for future writes. */
1480 if (reg_addr
== IGP01E1000_PHY_PAGE_SELECT
) {
1481 phy_page
= reg_data
;
1485 reg_addr
&= PHY_REG_MASK
;
1486 reg_addr
|= phy_page
;
1488 ret_val
= e1e_wphy_locked(hw
, (u32
)reg_addr
, reg_data
);
1494 hw
->phy
.ops
.release(hw
);
1499 * e1000_k1_gig_workaround_hv - K1 Si workaround
1500 * @hw: pointer to the HW structure
1501 * @link: link up bool flag
1503 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1504 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1505 * If link is down, the function will restore the default K1 setting located
1508 static s32
e1000_k1_gig_workaround_hv(struct e1000_hw
*hw
, bool link
)
1512 bool k1_enable
= hw
->dev_spec
.ich8lan
.nvm_k1_enabled
;
1514 if (hw
->mac
.type
!= e1000_pchlan
)
1517 /* Wrap the whole flow with the sw flag */
1518 ret_val
= hw
->phy
.ops
.acquire(hw
);
1522 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1524 if (hw
->phy
.type
== e1000_phy_82578
) {
1525 ret_val
= e1e_rphy_locked(hw
, BM_CS_STATUS
,
1530 status_reg
&= BM_CS_STATUS_LINK_UP
|
1531 BM_CS_STATUS_RESOLVED
|
1532 BM_CS_STATUS_SPEED_MASK
;
1534 if (status_reg
== (BM_CS_STATUS_LINK_UP
|
1535 BM_CS_STATUS_RESOLVED
|
1536 BM_CS_STATUS_SPEED_1000
))
1540 if (hw
->phy
.type
== e1000_phy_82577
) {
1541 ret_val
= e1e_rphy_locked(hw
, HV_M_STATUS
, &status_reg
);
1545 status_reg
&= HV_M_STATUS_LINK_UP
|
1546 HV_M_STATUS_AUTONEG_COMPLETE
|
1547 HV_M_STATUS_SPEED_MASK
;
1549 if (status_reg
== (HV_M_STATUS_LINK_UP
|
1550 HV_M_STATUS_AUTONEG_COMPLETE
|
1551 HV_M_STATUS_SPEED_1000
))
1555 /* Link stall fix for link up */
1556 ret_val
= e1e_wphy_locked(hw
, PHY_REG(770, 19), 0x0100);
1561 /* Link stall fix for link down */
1562 ret_val
= e1e_wphy_locked(hw
, PHY_REG(770, 19), 0x4100);
1567 ret_val
= e1000_configure_k1_ich8lan(hw
, k1_enable
);
1570 hw
->phy
.ops
.release(hw
);
1576 * e1000_configure_k1_ich8lan - Configure K1 power state
1577 * @hw: pointer to the HW structure
1578 * @enable: K1 state to configure
1580 * Configure the K1 power state based on the provided parameter.
1581 * Assumes semaphore already acquired.
1583 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1585 s32
e1000_configure_k1_ich8lan(struct e1000_hw
*hw
, bool k1_enable
)
1593 ret_val
= e1000e_read_kmrn_reg_locked(hw
, E1000_KMRNCTRLSTA_K1_CONFIG
,
1599 kmrn_reg
|= E1000_KMRNCTRLSTA_K1_ENABLE
;
1601 kmrn_reg
&= ~E1000_KMRNCTRLSTA_K1_ENABLE
;
1603 ret_val
= e1000e_write_kmrn_reg_locked(hw
, E1000_KMRNCTRLSTA_K1_CONFIG
,
1609 ctrl_ext
= er32(CTRL_EXT
);
1610 ctrl_reg
= er32(CTRL
);
1612 reg
= ctrl_reg
& ~(E1000_CTRL_SPD_1000
| E1000_CTRL_SPD_100
);
1613 reg
|= E1000_CTRL_FRCSPD
;
1616 ew32(CTRL_EXT
, ctrl_ext
| E1000_CTRL_EXT_SPD_BYPS
);
1619 ew32(CTRL
, ctrl_reg
);
1620 ew32(CTRL_EXT
, ctrl_ext
);
1628 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1629 * @hw: pointer to the HW structure
1630 * @d0_state: boolean if entering d0 or d3 device state
1632 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1633 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1634 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1636 static s32
e1000_oem_bits_config_ich8lan(struct e1000_hw
*hw
, bool d0_state
)
1642 if (hw
->mac
.type
< e1000_pchlan
)
1645 ret_val
= hw
->phy
.ops
.acquire(hw
);
1649 if (hw
->mac
.type
== e1000_pchlan
) {
1650 mac_reg
= er32(EXTCNF_CTRL
);
1651 if (mac_reg
& E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE
)
1655 mac_reg
= er32(FEXTNVM
);
1656 if (!(mac_reg
& E1000_FEXTNVM_SW_CONFIG_ICH8M
))
1659 mac_reg
= er32(PHY_CTRL
);
1661 ret_val
= e1e_rphy_locked(hw
, HV_OEM_BITS
, &oem_reg
);
1665 oem_reg
&= ~(HV_OEM_BITS_GBE_DIS
| HV_OEM_BITS_LPLU
);
1668 if (mac_reg
& E1000_PHY_CTRL_GBE_DISABLE
)
1669 oem_reg
|= HV_OEM_BITS_GBE_DIS
;
1671 if (mac_reg
& E1000_PHY_CTRL_D0A_LPLU
)
1672 oem_reg
|= HV_OEM_BITS_LPLU
;
1674 if (mac_reg
& (E1000_PHY_CTRL_GBE_DISABLE
|
1675 E1000_PHY_CTRL_NOND0A_GBE_DISABLE
))
1676 oem_reg
|= HV_OEM_BITS_GBE_DIS
;
1678 if (mac_reg
& (E1000_PHY_CTRL_D0A_LPLU
|
1679 E1000_PHY_CTRL_NOND0A_LPLU
))
1680 oem_reg
|= HV_OEM_BITS_LPLU
;
1683 /* Set Restart auto-neg to activate the bits */
1684 if ((d0_state
|| (hw
->mac
.type
!= e1000_pchlan
)) &&
1685 !hw
->phy
.ops
.check_reset_block(hw
))
1686 oem_reg
|= HV_OEM_BITS_RESTART_AN
;
1688 ret_val
= e1e_wphy_locked(hw
, HV_OEM_BITS
, oem_reg
);
1691 hw
->phy
.ops
.release(hw
);
1698 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1699 * @hw: pointer to the HW structure
1701 static s32
e1000_set_mdio_slow_mode_hv(struct e1000_hw
*hw
)
1706 ret_val
= e1e_rphy(hw
, HV_KMRN_MODE_CTRL
, &data
);
1710 data
|= HV_KMRN_MDIO_SLOW
;
1712 ret_val
= e1e_wphy(hw
, HV_KMRN_MODE_CTRL
, data
);
1718 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1719 * done after every PHY reset.
1721 static s32
e1000_hv_phy_workarounds_ich8lan(struct e1000_hw
*hw
)
1726 if (hw
->mac
.type
!= e1000_pchlan
)
1729 /* Set MDIO slow mode before any other MDIO access */
1730 if (hw
->phy
.type
== e1000_phy_82577
) {
1731 ret_val
= e1000_set_mdio_slow_mode_hv(hw
);
1736 if (((hw
->phy
.type
== e1000_phy_82577
) &&
1737 ((hw
->phy
.revision
== 1) || (hw
->phy
.revision
== 2))) ||
1738 ((hw
->phy
.type
== e1000_phy_82578
) && (hw
->phy
.revision
== 1))) {
1739 /* Disable generation of early preamble */
1740 ret_val
= e1e_wphy(hw
, PHY_REG(769, 25), 0x4431);
1744 /* Preamble tuning for SSC */
1745 ret_val
= e1e_wphy(hw
, HV_KMRN_FIFO_CTRLSTA
, 0xA204);
1750 if (hw
->phy
.type
== e1000_phy_82578
) {
1752 * Return registers to default by doing a soft reset then
1753 * writing 0x3140 to the control register.
1755 if (hw
->phy
.revision
< 2) {
1756 e1000e_phy_sw_reset(hw
);
1757 ret_val
= e1e_wphy(hw
, PHY_CONTROL
, 0x3140);
1762 ret_val
= hw
->phy
.ops
.acquire(hw
);
1767 ret_val
= e1000e_write_phy_reg_mdic(hw
, IGP01E1000_PHY_PAGE_SELECT
, 0);
1768 hw
->phy
.ops
.release(hw
);
1773 * Configure the K1 Si workaround during phy reset assuming there is
1774 * link so that it disables K1 if link is in 1Gbps.
1776 ret_val
= e1000_k1_gig_workaround_hv(hw
, true);
1780 /* Workaround for link disconnects on a busy hub in half duplex */
1781 ret_val
= hw
->phy
.ops
.acquire(hw
);
1784 ret_val
= e1e_rphy_locked(hw
, BM_PORT_GEN_CFG
, &phy_data
);
1787 ret_val
= e1e_wphy_locked(hw
, BM_PORT_GEN_CFG
, phy_data
& 0x00FF);
1789 hw
->phy
.ops
.release(hw
);
1795 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1796 * @hw: pointer to the HW structure
1798 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw
*hw
)
1804 ret_val
= hw
->phy
.ops
.acquire(hw
);
1807 ret_val
= e1000_enable_phy_wakeup_reg_access_bm(hw
, &phy_reg
);
1811 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1812 for (i
= 0; i
< (hw
->mac
.rar_entry_count
+ 4); i
++) {
1813 mac_reg
= er32(RAL(i
));
1814 hw
->phy
.ops
.write_reg_page(hw
, BM_RAR_L(i
),
1815 (u16
)(mac_reg
& 0xFFFF));
1816 hw
->phy
.ops
.write_reg_page(hw
, BM_RAR_M(i
),
1817 (u16
)((mac_reg
>> 16) & 0xFFFF));
1819 mac_reg
= er32(RAH(i
));
1820 hw
->phy
.ops
.write_reg_page(hw
, BM_RAR_H(i
),
1821 (u16
)(mac_reg
& 0xFFFF));
1822 hw
->phy
.ops
.write_reg_page(hw
, BM_RAR_CTRL(i
),
1823 (u16
)((mac_reg
& E1000_RAH_AV
)
1827 e1000_disable_phy_wakeup_reg_access_bm(hw
, &phy_reg
);
1830 hw
->phy
.ops
.release(hw
);
1834 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1836 * @hw: pointer to the HW structure
1837 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1839 s32
e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw
*hw
, bool enable
)
1846 if (hw
->mac
.type
< e1000_pch2lan
)
1849 /* disable Rx path while enabling/disabling workaround */
1850 e1e_rphy(hw
, PHY_REG(769, 20), &phy_reg
);
1851 ret_val
= e1e_wphy(hw
, PHY_REG(769, 20), phy_reg
| (1 << 14));
1857 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1858 * SHRAL/H) and initial CRC values to the MAC
1860 for (i
= 0; i
< (hw
->mac
.rar_entry_count
+ 4); i
++) {
1861 u8 mac_addr
[ETH_ALEN
] = {0};
1862 u32 addr_high
, addr_low
;
1864 addr_high
= er32(RAH(i
));
1865 if (!(addr_high
& E1000_RAH_AV
))
1867 addr_low
= er32(RAL(i
));
1868 mac_addr
[0] = (addr_low
& 0xFF);
1869 mac_addr
[1] = ((addr_low
>> 8) & 0xFF);
1870 mac_addr
[2] = ((addr_low
>> 16) & 0xFF);
1871 mac_addr
[3] = ((addr_low
>> 24) & 0xFF);
1872 mac_addr
[4] = (addr_high
& 0xFF);
1873 mac_addr
[5] = ((addr_high
>> 8) & 0xFF);
1875 ew32(PCH_RAICC(i
), ~ether_crc_le(ETH_ALEN
, mac_addr
));
1878 /* Write Rx addresses to the PHY */
1879 e1000_copy_rx_addrs_to_phy_ich8lan(hw
);
1881 /* Enable jumbo frame workaround in the MAC */
1882 mac_reg
= er32(FFLT_DBG
);
1883 mac_reg
&= ~(1 << 14);
1884 mac_reg
|= (7 << 15);
1885 ew32(FFLT_DBG
, mac_reg
);
1887 mac_reg
= er32(RCTL
);
1888 mac_reg
|= E1000_RCTL_SECRC
;
1889 ew32(RCTL
, mac_reg
);
1891 ret_val
= e1000e_read_kmrn_reg(hw
,
1892 E1000_KMRNCTRLSTA_CTRL_OFFSET
,
1896 ret_val
= e1000e_write_kmrn_reg(hw
,
1897 E1000_KMRNCTRLSTA_CTRL_OFFSET
,
1901 ret_val
= e1000e_read_kmrn_reg(hw
,
1902 E1000_KMRNCTRLSTA_HD_CTRL
,
1906 data
&= ~(0xF << 8);
1908 ret_val
= e1000e_write_kmrn_reg(hw
,
1909 E1000_KMRNCTRLSTA_HD_CTRL
,
1914 /* Enable jumbo frame workaround in the PHY */
1915 e1e_rphy(hw
, PHY_REG(769, 23), &data
);
1916 data
&= ~(0x7F << 5);
1917 data
|= (0x37 << 5);
1918 ret_val
= e1e_wphy(hw
, PHY_REG(769, 23), data
);
1921 e1e_rphy(hw
, PHY_REG(769, 16), &data
);
1923 ret_val
= e1e_wphy(hw
, PHY_REG(769, 16), data
);
1926 e1e_rphy(hw
, PHY_REG(776, 20), &data
);
1927 data
&= ~(0x3FF << 2);
1928 data
|= (0x1A << 2);
1929 ret_val
= e1e_wphy(hw
, PHY_REG(776, 20), data
);
1932 ret_val
= e1e_wphy(hw
, PHY_REG(776, 23), 0xF100);
1935 e1e_rphy(hw
, HV_PM_CTRL
, &data
);
1936 ret_val
= e1e_wphy(hw
, HV_PM_CTRL
, data
| (1 << 10));
1940 /* Write MAC register values back to h/w defaults */
1941 mac_reg
= er32(FFLT_DBG
);
1942 mac_reg
&= ~(0xF << 14);
1943 ew32(FFLT_DBG
, mac_reg
);
1945 mac_reg
= er32(RCTL
);
1946 mac_reg
&= ~E1000_RCTL_SECRC
;
1947 ew32(RCTL
, mac_reg
);
1949 ret_val
= e1000e_read_kmrn_reg(hw
,
1950 E1000_KMRNCTRLSTA_CTRL_OFFSET
,
1954 ret_val
= e1000e_write_kmrn_reg(hw
,
1955 E1000_KMRNCTRLSTA_CTRL_OFFSET
,
1959 ret_val
= e1000e_read_kmrn_reg(hw
,
1960 E1000_KMRNCTRLSTA_HD_CTRL
,
1964 data
&= ~(0xF << 8);
1966 ret_val
= e1000e_write_kmrn_reg(hw
,
1967 E1000_KMRNCTRLSTA_HD_CTRL
,
1972 /* Write PHY register values back to h/w defaults */
1973 e1e_rphy(hw
, PHY_REG(769, 23), &data
);
1974 data
&= ~(0x7F << 5);
1975 ret_val
= e1e_wphy(hw
, PHY_REG(769, 23), data
);
1978 e1e_rphy(hw
, PHY_REG(769, 16), &data
);
1980 ret_val
= e1e_wphy(hw
, PHY_REG(769, 16), data
);
1983 e1e_rphy(hw
, PHY_REG(776, 20), &data
);
1984 data
&= ~(0x3FF << 2);
1986 ret_val
= e1e_wphy(hw
, PHY_REG(776, 20), data
);
1989 ret_val
= e1e_wphy(hw
, PHY_REG(776, 23), 0x7E00);
1992 e1e_rphy(hw
, HV_PM_CTRL
, &data
);
1993 ret_val
= e1e_wphy(hw
, HV_PM_CTRL
, data
& ~(1 << 10));
1998 /* re-enable Rx path after enabling/disabling workaround */
1999 return e1e_wphy(hw
, PHY_REG(769, 20), phy_reg
& ~(1 << 14));
2003 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2004 * done after every PHY reset.
2006 static s32
e1000_lv_phy_workarounds_ich8lan(struct e1000_hw
*hw
)
2010 if (hw
->mac
.type
!= e1000_pch2lan
)
2013 /* Set MDIO slow mode before any other MDIO access */
2014 ret_val
= e1000_set_mdio_slow_mode_hv(hw
);
2016 ret_val
= hw
->phy
.ops
.acquire(hw
);
2019 ret_val
= e1e_wphy_locked(hw
, I82579_EMI_ADDR
, I82579_MSE_THRESHOLD
);
2022 /* set MSE higher to enable link to stay up when noise is high */
2023 ret_val
= e1e_wphy_locked(hw
, I82579_EMI_DATA
, 0x0034);
2026 ret_val
= e1e_wphy_locked(hw
, I82579_EMI_ADDR
, I82579_MSE_LINK_DOWN
);
2029 /* drop link after 5 times MSE threshold was reached */
2030 ret_val
= e1e_wphy_locked(hw
, I82579_EMI_DATA
, 0x0005);
2032 hw
->phy
.ops
.release(hw
);
2038 * e1000_k1_gig_workaround_lv - K1 Si workaround
2039 * @hw: pointer to the HW structure
2041 * Workaround to set the K1 beacon duration for 82579 parts
2043 static s32
e1000_k1_workaround_lv(struct e1000_hw
*hw
)
2050 if (hw
->mac
.type
!= e1000_pch2lan
)
2053 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
2054 ret_val
= e1e_rphy(hw
, HV_M_STATUS
, &status_reg
);
2058 if ((status_reg
& (HV_M_STATUS_LINK_UP
| HV_M_STATUS_AUTONEG_COMPLETE
))
2059 == (HV_M_STATUS_LINK_UP
| HV_M_STATUS_AUTONEG_COMPLETE
)) {
2060 mac_reg
= er32(FEXTNVM4
);
2061 mac_reg
&= ~E1000_FEXTNVM4_BEACON_DURATION_MASK
;
2063 ret_val
= e1e_rphy(hw
, I82579_LPI_CTRL
, &phy_reg
);
2067 if (status_reg
& HV_M_STATUS_SPEED_1000
) {
2070 mac_reg
|= E1000_FEXTNVM4_BEACON_DURATION_8USEC
;
2071 phy_reg
&= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT
;
2072 /* LV 1G Packet drop issue wa */
2073 ret_val
= e1e_rphy(hw
, HV_PM_CTRL
, &pm_phy_reg
);
2076 pm_phy_reg
&= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA
;
2077 ret_val
= e1e_wphy(hw
, HV_PM_CTRL
, pm_phy_reg
);
2081 mac_reg
|= E1000_FEXTNVM4_BEACON_DURATION_16USEC
;
2082 phy_reg
|= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT
;
2084 ew32(FEXTNVM4
, mac_reg
);
2085 ret_val
= e1e_wphy(hw
, I82579_LPI_CTRL
, phy_reg
);
2092 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2093 * @hw: pointer to the HW structure
2094 * @gate: boolean set to true to gate, false to ungate
2096 * Gate/ungate the automatic PHY configuration via hardware; perform
2097 * the configuration via software instead.
2099 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw
*hw
, bool gate
)
2103 if (hw
->mac
.type
< e1000_pch2lan
)
2106 extcnf_ctrl
= er32(EXTCNF_CTRL
);
2109 extcnf_ctrl
|= E1000_EXTCNF_CTRL_GATE_PHY_CFG
;
2111 extcnf_ctrl
&= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG
;
2113 ew32(EXTCNF_CTRL
, extcnf_ctrl
);
2117 * e1000_lan_init_done_ich8lan - Check for PHY config completion
2118 * @hw: pointer to the HW structure
2120 * Check the appropriate indication the MAC has finished configuring the
2121 * PHY after a software reset.
2123 static void e1000_lan_init_done_ich8lan(struct e1000_hw
*hw
)
2125 u32 data
, loop
= E1000_ICH8_LAN_INIT_TIMEOUT
;
2127 /* Wait for basic configuration completes before proceeding */
2129 data
= er32(STATUS
);
2130 data
&= E1000_STATUS_LAN_INIT_DONE
;
2132 } while ((!data
) && --loop
);
2135 * If basic configuration is incomplete before the above loop
2136 * count reaches 0, loading the configuration from NVM will
2137 * leave the PHY in a bad state possibly resulting in no link.
2140 e_dbg("LAN_INIT_DONE not set, increase timeout\n");
2142 /* Clear the Init Done bit for the next init event */
2143 data
= er32(STATUS
);
2144 data
&= ~E1000_STATUS_LAN_INIT_DONE
;
2149 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2150 * @hw: pointer to the HW structure
2152 static s32
e1000_post_phy_reset_ich8lan(struct e1000_hw
*hw
)
2157 if (hw
->phy
.ops
.check_reset_block(hw
))
2160 /* Allow time for h/w to get to quiescent state after reset */
2161 usleep_range(10000, 20000);
2163 /* Perform any necessary post-reset workarounds */
2164 switch (hw
->mac
.type
) {
2166 ret_val
= e1000_hv_phy_workarounds_ich8lan(hw
);
2171 ret_val
= e1000_lv_phy_workarounds_ich8lan(hw
);
2179 /* Clear the host wakeup bit after lcd reset */
2180 if (hw
->mac
.type
>= e1000_pchlan
) {
2181 e1e_rphy(hw
, BM_PORT_GEN_CFG
, ®
);
2182 reg
&= ~BM_WUC_HOST_WU_BIT
;
2183 e1e_wphy(hw
, BM_PORT_GEN_CFG
, reg
);
2186 /* Configure the LCD with the extended configuration region in NVM */
2187 ret_val
= e1000_sw_lcd_config_ich8lan(hw
);
2191 /* Configure the LCD with the OEM bits in NVM */
2192 ret_val
= e1000_oem_bits_config_ich8lan(hw
, true);
2194 if (hw
->mac
.type
== e1000_pch2lan
) {
2195 /* Ungate automatic PHY configuration on non-managed 82579 */
2196 if (!(er32(FWSM
) & E1000_ICH_FWSM_FW_VALID
)) {
2197 usleep_range(10000, 20000);
2198 e1000_gate_hw_phy_config_ich8lan(hw
, false);
2201 /* Set EEE LPI Update Timer to 200usec */
2202 ret_val
= hw
->phy
.ops
.acquire(hw
);
2205 ret_val
= e1e_wphy_locked(hw
, I82579_EMI_ADDR
,
2206 I82579_LPI_UPDATE_TIMER
);
2208 ret_val
= e1e_wphy_locked(hw
, I82579_EMI_DATA
, 0x1387);
2209 hw
->phy
.ops
.release(hw
);
2216 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2217 * @hw: pointer to the HW structure
2220 * This is a function pointer entry point called by drivers
2221 * or other shared routines.
2223 static s32
e1000_phy_hw_reset_ich8lan(struct e1000_hw
*hw
)
2227 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2228 if ((hw
->mac
.type
== e1000_pch2lan
) &&
2229 !(er32(FWSM
) & E1000_ICH_FWSM_FW_VALID
))
2230 e1000_gate_hw_phy_config_ich8lan(hw
, true);
2232 ret_val
= e1000e_phy_hw_reset_generic(hw
);
2236 return e1000_post_phy_reset_ich8lan(hw
);
2240 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2241 * @hw: pointer to the HW structure
2242 * @active: true to enable LPLU, false to disable
2244 * Sets the LPLU state according to the active flag. For PCH, if OEM write
2245 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2246 * the phy speed. This function will manually set the LPLU bit and restart
2247 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
2248 * since it configures the same bit.
2250 static s32
e1000_set_lplu_state_pchlan(struct e1000_hw
*hw
, bool active
)
2255 ret_val
= e1e_rphy(hw
, HV_OEM_BITS
, &oem_reg
);
2260 oem_reg
|= HV_OEM_BITS_LPLU
;
2262 oem_reg
&= ~HV_OEM_BITS_LPLU
;
2264 if (!hw
->phy
.ops
.check_reset_block(hw
))
2265 oem_reg
|= HV_OEM_BITS_RESTART_AN
;
2267 return e1e_wphy(hw
, HV_OEM_BITS
, oem_reg
);
2271 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2272 * @hw: pointer to the HW structure
2273 * @active: true to enable LPLU, false to disable
2275 * Sets the LPLU D0 state according to the active flag. When
2276 * activating LPLU this function also disables smart speed
2277 * and vice versa. LPLU will not be activated unless the
2278 * device autonegotiation advertisement meets standards of
2279 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2280 * This is a function pointer entry point only called by
2281 * PHY setup routines.
2283 static s32
e1000_set_d0_lplu_state_ich8lan(struct e1000_hw
*hw
, bool active
)
2285 struct e1000_phy_info
*phy
= &hw
->phy
;
2290 if (phy
->type
== e1000_phy_ife
)
2293 phy_ctrl
= er32(PHY_CTRL
);
2296 phy_ctrl
|= E1000_PHY_CTRL_D0A_LPLU
;
2297 ew32(PHY_CTRL
, phy_ctrl
);
2299 if (phy
->type
!= e1000_phy_igp_3
)
2303 * Call gig speed drop workaround on LPLU before accessing
2306 if (hw
->mac
.type
== e1000_ich8lan
)
2307 e1000e_gig_downshift_workaround_ich8lan(hw
);
2309 /* When LPLU is enabled, we should disable SmartSpeed */
2310 ret_val
= e1e_rphy(hw
, IGP01E1000_PHY_PORT_CONFIG
, &data
);
2311 data
&= ~IGP01E1000_PSCFR_SMART_SPEED
;
2312 ret_val
= e1e_wphy(hw
, IGP01E1000_PHY_PORT_CONFIG
, data
);
2316 phy_ctrl
&= ~E1000_PHY_CTRL_D0A_LPLU
;
2317 ew32(PHY_CTRL
, phy_ctrl
);
2319 if (phy
->type
!= e1000_phy_igp_3
)
2323 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2324 * during Dx states where the power conservation is most
2325 * important. During driver activity we should enable
2326 * SmartSpeed, so performance is maintained.
2328 if (phy
->smart_speed
== e1000_smart_speed_on
) {
2329 ret_val
= e1e_rphy(hw
, IGP01E1000_PHY_PORT_CONFIG
,
2334 data
|= IGP01E1000_PSCFR_SMART_SPEED
;
2335 ret_val
= e1e_wphy(hw
, IGP01E1000_PHY_PORT_CONFIG
,
2339 } else if (phy
->smart_speed
== e1000_smart_speed_off
) {
2340 ret_val
= e1e_rphy(hw
, IGP01E1000_PHY_PORT_CONFIG
,
2345 data
&= ~IGP01E1000_PSCFR_SMART_SPEED
;
2346 ret_val
= e1e_wphy(hw
, IGP01E1000_PHY_PORT_CONFIG
,
2357 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2358 * @hw: pointer to the HW structure
2359 * @active: true to enable LPLU, false to disable
2361 * Sets the LPLU D3 state according to the active flag. When
2362 * activating LPLU this function also disables smart speed
2363 * and vice versa. LPLU will not be activated unless the
2364 * device autonegotiation advertisement meets standards of
2365 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2366 * This is a function pointer entry point only called by
2367 * PHY setup routines.
2369 static s32
e1000_set_d3_lplu_state_ich8lan(struct e1000_hw
*hw
, bool active
)
2371 struct e1000_phy_info
*phy
= &hw
->phy
;
2376 phy_ctrl
= er32(PHY_CTRL
);
2379 phy_ctrl
&= ~E1000_PHY_CTRL_NOND0A_LPLU
;
2380 ew32(PHY_CTRL
, phy_ctrl
);
2382 if (phy
->type
!= e1000_phy_igp_3
)
2386 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2387 * during Dx states where the power conservation is most
2388 * important. During driver activity we should enable
2389 * SmartSpeed, so performance is maintained.
2391 if (phy
->smart_speed
== e1000_smart_speed_on
) {
2392 ret_val
= e1e_rphy(hw
, IGP01E1000_PHY_PORT_CONFIG
,
2397 data
|= IGP01E1000_PSCFR_SMART_SPEED
;
2398 ret_val
= e1e_wphy(hw
, IGP01E1000_PHY_PORT_CONFIG
,
2402 } else if (phy
->smart_speed
== e1000_smart_speed_off
) {
2403 ret_val
= e1e_rphy(hw
, IGP01E1000_PHY_PORT_CONFIG
,
2408 data
&= ~IGP01E1000_PSCFR_SMART_SPEED
;
2409 ret_val
= e1e_wphy(hw
, IGP01E1000_PHY_PORT_CONFIG
,
2414 } else if ((phy
->autoneg_advertised
== E1000_ALL_SPEED_DUPLEX
) ||
2415 (phy
->autoneg_advertised
== E1000_ALL_NOT_GIG
) ||
2416 (phy
->autoneg_advertised
== E1000_ALL_10_SPEED
)) {
2417 phy_ctrl
|= E1000_PHY_CTRL_NOND0A_LPLU
;
2418 ew32(PHY_CTRL
, phy_ctrl
);
2420 if (phy
->type
!= e1000_phy_igp_3
)
2424 * Call gig speed drop workaround on LPLU before accessing
2427 if (hw
->mac
.type
== e1000_ich8lan
)
2428 e1000e_gig_downshift_workaround_ich8lan(hw
);
2430 /* When LPLU is enabled, we should disable SmartSpeed */
2431 ret_val
= e1e_rphy(hw
, IGP01E1000_PHY_PORT_CONFIG
, &data
);
2435 data
&= ~IGP01E1000_PSCFR_SMART_SPEED
;
2436 ret_val
= e1e_wphy(hw
, IGP01E1000_PHY_PORT_CONFIG
, data
);
2443 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2444 * @hw: pointer to the HW structure
2445 * @bank: pointer to the variable that returns the active bank
2447 * Reads signature byte from the NVM using the flash access registers.
2448 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2450 static s32
e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw
*hw
, u32
*bank
)
2453 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
2454 u32 bank1_offset
= nvm
->flash_bank_size
* sizeof(u16
);
2455 u32 act_offset
= E1000_ICH_NVM_SIG_WORD
* 2 + 1;
2459 switch (hw
->mac
.type
) {
2463 if ((eecd
& E1000_EECD_SEC1VAL_VALID_MASK
) ==
2464 E1000_EECD_SEC1VAL_VALID_MASK
) {
2465 if (eecd
& E1000_EECD_SEC1VAL
)
2472 e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2475 /* set bank to 0 in case flash read fails */
2479 ret_val
= e1000_read_flash_byte_ich8lan(hw
, act_offset
,
2483 if ((sig_byte
& E1000_ICH_NVM_VALID_SIG_MASK
) ==
2484 E1000_ICH_NVM_SIG_VALUE
) {
2490 ret_val
= e1000_read_flash_byte_ich8lan(hw
, act_offset
+
2495 if ((sig_byte
& E1000_ICH_NVM_VALID_SIG_MASK
) ==
2496 E1000_ICH_NVM_SIG_VALUE
) {
2501 e_dbg("ERROR: No valid NVM bank present\n");
2502 return -E1000_ERR_NVM
;
2507 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2508 * @hw: pointer to the HW structure
2509 * @offset: The offset (in bytes) of the word(s) to read.
2510 * @words: Size of data to read in words
2511 * @data: Pointer to the word(s) to read at offset.
2513 * Reads a word(s) from the NVM using the flash access registers.
2515 static s32
e1000_read_nvm_ich8lan(struct e1000_hw
*hw
, u16 offset
, u16 words
,
2518 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
2519 struct e1000_dev_spec_ich8lan
*dev_spec
= &hw
->dev_spec
.ich8lan
;
2525 if ((offset
>= nvm
->word_size
) || (words
> nvm
->word_size
- offset
) ||
2527 e_dbg("nvm parameter(s) out of bounds\n");
2528 ret_val
= -E1000_ERR_NVM
;
2532 nvm
->ops
.acquire(hw
);
2534 ret_val
= e1000_valid_nvm_bank_detect_ich8lan(hw
, &bank
);
2536 e_dbg("Could not detect valid bank, assuming bank 0\n");
2540 act_offset
= (bank
) ? nvm
->flash_bank_size
: 0;
2541 act_offset
+= offset
;
2544 for (i
= 0; i
< words
; i
++) {
2545 if (dev_spec
->shadow_ram
[offset
+i
].modified
) {
2546 data
[i
] = dev_spec
->shadow_ram
[offset
+i
].value
;
2548 ret_val
= e1000_read_flash_word_ich8lan(hw
,
2557 nvm
->ops
.release(hw
);
2561 e_dbg("NVM read error: %d\n", ret_val
);
2567 * e1000_flash_cycle_init_ich8lan - Initialize flash
2568 * @hw: pointer to the HW structure
2570 * This function does initial flash setup so that a new read/write/erase cycle
2573 static s32
e1000_flash_cycle_init_ich8lan(struct e1000_hw
*hw
)
2575 union ich8_hws_flash_status hsfsts
;
2576 s32 ret_val
= -E1000_ERR_NVM
;
2578 hsfsts
.regval
= er16flash(ICH_FLASH_HSFSTS
);
2580 /* Check if the flash descriptor is valid */
2581 if (!hsfsts
.hsf_status
.fldesvalid
) {
2582 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n");
2583 return -E1000_ERR_NVM
;
2586 /* Clear FCERR and DAEL in hw status by writing 1 */
2587 hsfsts
.hsf_status
.flcerr
= 1;
2588 hsfsts
.hsf_status
.dael
= 1;
2590 ew16flash(ICH_FLASH_HSFSTS
, hsfsts
.regval
);
2593 * Either we should have a hardware SPI cycle in progress
2594 * bit to check against, in order to start a new cycle or
2595 * FDONE bit should be changed in the hardware so that it
2596 * is 1 after hardware reset, which can then be used as an
2597 * indication whether a cycle is in progress or has been
2601 if (!hsfsts
.hsf_status
.flcinprog
) {
2603 * There is no cycle running at present,
2604 * so we can start a cycle.
2605 * Begin by setting Flash Cycle Done.
2607 hsfsts
.hsf_status
.flcdone
= 1;
2608 ew16flash(ICH_FLASH_HSFSTS
, hsfsts
.regval
);
2614 * Otherwise poll for sometime so the current
2615 * cycle has a chance to end before giving up.
2617 for (i
= 0; i
< ICH_FLASH_READ_COMMAND_TIMEOUT
; i
++) {
2618 hsfsts
.regval
= er16flash(ICH_FLASH_HSFSTS
);
2619 if (!hsfsts
.hsf_status
.flcinprog
) {
2627 * Successful in waiting for previous cycle to timeout,
2628 * now set the Flash Cycle Done.
2630 hsfsts
.hsf_status
.flcdone
= 1;
2631 ew16flash(ICH_FLASH_HSFSTS
, hsfsts
.regval
);
2633 e_dbg("Flash controller busy, cannot get access\n");
2641 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2642 * @hw: pointer to the HW structure
2643 * @timeout: maximum time to wait for completion
2645 * This function starts a flash cycle and waits for its completion.
2647 static s32
e1000_flash_cycle_ich8lan(struct e1000_hw
*hw
, u32 timeout
)
2649 union ich8_hws_flash_ctrl hsflctl
;
2650 union ich8_hws_flash_status hsfsts
;
2653 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2654 hsflctl
.regval
= er16flash(ICH_FLASH_HSFCTL
);
2655 hsflctl
.hsf_ctrl
.flcgo
= 1;
2656 ew16flash(ICH_FLASH_HSFCTL
, hsflctl
.regval
);
2658 /* wait till FDONE bit is set to 1 */
2660 hsfsts
.regval
= er16flash(ICH_FLASH_HSFSTS
);
2661 if (hsfsts
.hsf_status
.flcdone
)
2664 } while (i
++ < timeout
);
2666 if (hsfsts
.hsf_status
.flcdone
&& !hsfsts
.hsf_status
.flcerr
)
2669 return -E1000_ERR_NVM
;
2673 * e1000_read_flash_word_ich8lan - Read word from flash
2674 * @hw: pointer to the HW structure
2675 * @offset: offset to data location
2676 * @data: pointer to the location for storing the data
2678 * Reads the flash word at offset into data. Offset is converted
2679 * to bytes before read.
2681 static s32
e1000_read_flash_word_ich8lan(struct e1000_hw
*hw
, u32 offset
,
2684 /* Must convert offset into bytes. */
2687 return e1000_read_flash_data_ich8lan(hw
, offset
, 2, data
);
2691 * e1000_read_flash_byte_ich8lan - Read byte from flash
2692 * @hw: pointer to the HW structure
2693 * @offset: The offset of the byte to read.
2694 * @data: Pointer to a byte to store the value read.
2696 * Reads a single byte from the NVM using the flash access registers.
2698 static s32
e1000_read_flash_byte_ich8lan(struct e1000_hw
*hw
, u32 offset
,
2704 ret_val
= e1000_read_flash_data_ich8lan(hw
, offset
, 1, &word
);
2714 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2715 * @hw: pointer to the HW structure
2716 * @offset: The offset (in bytes) of the byte or word to read.
2717 * @size: Size of data to read, 1=byte 2=word
2718 * @data: Pointer to the word to store the value read.
2720 * Reads a byte or word from the NVM using the flash access registers.
2722 static s32
e1000_read_flash_data_ich8lan(struct e1000_hw
*hw
, u32 offset
,
2725 union ich8_hws_flash_status hsfsts
;
2726 union ich8_hws_flash_ctrl hsflctl
;
2727 u32 flash_linear_addr
;
2729 s32 ret_val
= -E1000_ERR_NVM
;
2732 if (size
< 1 || size
> 2 || offset
> ICH_FLASH_LINEAR_ADDR_MASK
)
2733 return -E1000_ERR_NVM
;
2735 flash_linear_addr
= (ICH_FLASH_LINEAR_ADDR_MASK
& offset
) +
2736 hw
->nvm
.flash_base_addr
;
2741 ret_val
= e1000_flash_cycle_init_ich8lan(hw
);
2745 hsflctl
.regval
= er16flash(ICH_FLASH_HSFCTL
);
2746 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2747 hsflctl
.hsf_ctrl
.fldbcount
= size
- 1;
2748 hsflctl
.hsf_ctrl
.flcycle
= ICH_CYCLE_READ
;
2749 ew16flash(ICH_FLASH_HSFCTL
, hsflctl
.regval
);
2751 ew32flash(ICH_FLASH_FADDR
, flash_linear_addr
);
2753 ret_val
= e1000_flash_cycle_ich8lan(hw
,
2754 ICH_FLASH_READ_COMMAND_TIMEOUT
);
2757 * Check if FCERR is set to 1, if set to 1, clear it
2758 * and try the whole sequence a few more times, else
2759 * read in (shift in) the Flash Data0, the order is
2760 * least significant byte first msb to lsb
2763 flash_data
= er32flash(ICH_FLASH_FDATA0
);
2765 *data
= (u8
)(flash_data
& 0x000000FF);
2767 *data
= (u16
)(flash_data
& 0x0000FFFF);
2771 * If we've gotten here, then things are probably
2772 * completely hosed, but if the error condition is
2773 * detected, it won't hurt to give it another try...
2774 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2776 hsfsts
.regval
= er16flash(ICH_FLASH_HSFSTS
);
2777 if (hsfsts
.hsf_status
.flcerr
) {
2778 /* Repeat for some time before giving up. */
2780 } else if (!hsfsts
.hsf_status
.flcdone
) {
2781 e_dbg("Timeout error - flash cycle did not complete.\n");
2785 } while (count
++ < ICH_FLASH_CYCLE_REPEAT_COUNT
);
2791 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2792 * @hw: pointer to the HW structure
2793 * @offset: The offset (in bytes) of the word(s) to write.
2794 * @words: Size of data to write in words
2795 * @data: Pointer to the word(s) to write at offset.
2797 * Writes a byte or word to the NVM using the flash access registers.
2799 static s32
e1000_write_nvm_ich8lan(struct e1000_hw
*hw
, u16 offset
, u16 words
,
2802 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
2803 struct e1000_dev_spec_ich8lan
*dev_spec
= &hw
->dev_spec
.ich8lan
;
2806 if ((offset
>= nvm
->word_size
) || (words
> nvm
->word_size
- offset
) ||
2808 e_dbg("nvm parameter(s) out of bounds\n");
2809 return -E1000_ERR_NVM
;
2812 nvm
->ops
.acquire(hw
);
2814 for (i
= 0; i
< words
; i
++) {
2815 dev_spec
->shadow_ram
[offset
+i
].modified
= true;
2816 dev_spec
->shadow_ram
[offset
+i
].value
= data
[i
];
2819 nvm
->ops
.release(hw
);
2825 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2826 * @hw: pointer to the HW structure
2828 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2829 * which writes the checksum to the shadow ram. The changes in the shadow
2830 * ram are then committed to the EEPROM by processing each bank at a time
2831 * checking for the modified bit and writing only the pending changes.
2832 * After a successful commit, the shadow ram is cleared and is ready for
2835 static s32
e1000_update_nvm_checksum_ich8lan(struct e1000_hw
*hw
)
2837 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
2838 struct e1000_dev_spec_ich8lan
*dev_spec
= &hw
->dev_spec
.ich8lan
;
2839 u32 i
, act_offset
, new_bank_offset
, old_bank_offset
, bank
;
2843 ret_val
= e1000e_update_nvm_checksum_generic(hw
);
2847 if (nvm
->type
!= e1000_nvm_flash_sw
)
2850 nvm
->ops
.acquire(hw
);
2853 * We're writing to the opposite bank so if we're on bank 1,
2854 * write to bank 0 etc. We also need to erase the segment that
2855 * is going to be written
2857 ret_val
= e1000_valid_nvm_bank_detect_ich8lan(hw
, &bank
);
2859 e_dbg("Could not detect valid bank, assuming bank 0\n");
2864 new_bank_offset
= nvm
->flash_bank_size
;
2865 old_bank_offset
= 0;
2866 ret_val
= e1000_erase_flash_bank_ich8lan(hw
, 1);
2870 old_bank_offset
= nvm
->flash_bank_size
;
2871 new_bank_offset
= 0;
2872 ret_val
= e1000_erase_flash_bank_ich8lan(hw
, 0);
2877 for (i
= 0; i
< E1000_ICH8_SHADOW_RAM_WORDS
; i
++) {
2879 * Determine whether to write the value stored
2880 * in the other NVM bank or a modified value stored
2883 if (dev_spec
->shadow_ram
[i
].modified
) {
2884 data
= dev_spec
->shadow_ram
[i
].value
;
2886 ret_val
= e1000_read_flash_word_ich8lan(hw
, i
+
2894 * If the word is 0x13, then make sure the signature bits
2895 * (15:14) are 11b until the commit has completed.
2896 * This will allow us to write 10b which indicates the
2897 * signature is valid. We want to do this after the write
2898 * has completed so that we don't mark the segment valid
2899 * while the write is still in progress
2901 if (i
== E1000_ICH_NVM_SIG_WORD
)
2902 data
|= E1000_ICH_NVM_SIG_MASK
;
2904 /* Convert offset to bytes. */
2905 act_offset
= (i
+ new_bank_offset
) << 1;
2908 /* Write the bytes to the new bank. */
2909 ret_val
= e1000_retry_write_flash_byte_ich8lan(hw
,
2916 ret_val
= e1000_retry_write_flash_byte_ich8lan(hw
,
2924 * Don't bother writing the segment valid bits if sector
2925 * programming failed.
2928 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
2929 e_dbg("Flash commit failed.\n");
2934 * Finally validate the new segment by setting bit 15:14
2935 * to 10b in word 0x13 , this can be done without an
2936 * erase as well since these bits are 11 to start with
2937 * and we need to change bit 14 to 0b
2939 act_offset
= new_bank_offset
+ E1000_ICH_NVM_SIG_WORD
;
2940 ret_val
= e1000_read_flash_word_ich8lan(hw
, act_offset
, &data
);
2945 ret_val
= e1000_retry_write_flash_byte_ich8lan(hw
,
2952 * And invalidate the previously valid segment by setting
2953 * its signature word (0x13) high_byte to 0b. This can be
2954 * done without an erase because flash erase sets all bits
2955 * to 1's. We can write 1's to 0's without an erase
2957 act_offset
= (old_bank_offset
+ E1000_ICH_NVM_SIG_WORD
) * 2 + 1;
2958 ret_val
= e1000_retry_write_flash_byte_ich8lan(hw
, act_offset
, 0);
2962 /* Great! Everything worked, we can now clear the cached entries. */
2963 for (i
= 0; i
< E1000_ICH8_SHADOW_RAM_WORDS
; i
++) {
2964 dev_spec
->shadow_ram
[i
].modified
= false;
2965 dev_spec
->shadow_ram
[i
].value
= 0xFFFF;
2969 nvm
->ops
.release(hw
);
2972 * Reload the EEPROM, or else modifications will not appear
2973 * until after the next adapter reset.
2976 nvm
->ops
.reload(hw
);
2977 usleep_range(10000, 20000);
2982 e_dbg("NVM update error: %d\n", ret_val
);
2988 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2989 * @hw: pointer to the HW structure
2991 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2992 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2993 * calculated, in which case we need to calculate the checksum and set bit 6.
2995 static s32
e1000_validate_nvm_checksum_ich8lan(struct e1000_hw
*hw
)
3001 * Read 0x19 and check bit 6. If this bit is 0, the checksum
3002 * needs to be fixed. This bit is an indication that the NVM
3003 * was prepared by OEM software and did not calculate the
3004 * checksum...a likely scenario.
3006 ret_val
= e1000_read_nvm(hw
, 0x19, 1, &data
);
3010 if (!(data
& 0x40)) {
3012 ret_val
= e1000_write_nvm(hw
, 0x19, 1, &data
);
3015 ret_val
= e1000e_update_nvm_checksum(hw
);
3020 return e1000e_validate_nvm_checksum_generic(hw
);
3024 * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
3025 * @hw: pointer to the HW structure
3027 * To prevent malicious write/erase of the NVM, set it to be read-only
3028 * so that the hardware ignores all write/erase cycles of the NVM via
3029 * the flash control registers. The shadow-ram copy of the NVM will
3030 * still be updated, however any updates to this copy will not stick
3031 * across driver reloads.
3033 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw
*hw
)
3035 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
3036 union ich8_flash_protected_range pr0
;
3037 union ich8_hws_flash_status hsfsts
;
3040 nvm
->ops
.acquire(hw
);
3042 gfpreg
= er32flash(ICH_FLASH_GFPREG
);
3044 /* Write-protect GbE Sector of NVM */
3045 pr0
.regval
= er32flash(ICH_FLASH_PR0
);
3046 pr0
.range
.base
= gfpreg
& FLASH_GFPREG_BASE_MASK
;
3047 pr0
.range
.limit
= ((gfpreg
>> 16) & FLASH_GFPREG_BASE_MASK
);
3048 pr0
.range
.wpe
= true;
3049 ew32flash(ICH_FLASH_PR0
, pr0
.regval
);
3052 * Lock down a subset of GbE Flash Control Registers, e.g.
3053 * PR0 to prevent the write-protection from being lifted.
3054 * Once FLOCKDN is set, the registers protected by it cannot
3055 * be written until FLOCKDN is cleared by a hardware reset.
3057 hsfsts
.regval
= er16flash(ICH_FLASH_HSFSTS
);
3058 hsfsts
.hsf_status
.flockdn
= true;
3059 ew32flash(ICH_FLASH_HSFSTS
, hsfsts
.regval
);
3061 nvm
->ops
.release(hw
);
3065 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3066 * @hw: pointer to the HW structure
3067 * @offset: The offset (in bytes) of the byte/word to read.
3068 * @size: Size of data to read, 1=byte 2=word
3069 * @data: The byte(s) to write to the NVM.
3071 * Writes one/two bytes to the NVM using the flash access registers.
3073 static s32
e1000_write_flash_data_ich8lan(struct e1000_hw
*hw
, u32 offset
,
3076 union ich8_hws_flash_status hsfsts
;
3077 union ich8_hws_flash_ctrl hsflctl
;
3078 u32 flash_linear_addr
;
3083 if (size
< 1 || size
> 2 || data
> size
* 0xff ||
3084 offset
> ICH_FLASH_LINEAR_ADDR_MASK
)
3085 return -E1000_ERR_NVM
;
3087 flash_linear_addr
= (ICH_FLASH_LINEAR_ADDR_MASK
& offset
) +
3088 hw
->nvm
.flash_base_addr
;
3093 ret_val
= e1000_flash_cycle_init_ich8lan(hw
);
3097 hsflctl
.regval
= er16flash(ICH_FLASH_HSFCTL
);
3098 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3099 hsflctl
.hsf_ctrl
.fldbcount
= size
-1;
3100 hsflctl
.hsf_ctrl
.flcycle
= ICH_CYCLE_WRITE
;
3101 ew16flash(ICH_FLASH_HSFCTL
, hsflctl
.regval
);
3103 ew32flash(ICH_FLASH_FADDR
, flash_linear_addr
);
3106 flash_data
= (u32
)data
& 0x00FF;
3108 flash_data
= (u32
)data
;
3110 ew32flash(ICH_FLASH_FDATA0
, flash_data
);
3113 * check if FCERR is set to 1 , if set to 1, clear it
3114 * and try the whole sequence a few more times else done
3116 ret_val
= e1000_flash_cycle_ich8lan(hw
,
3117 ICH_FLASH_WRITE_COMMAND_TIMEOUT
);
3122 * If we're here, then things are most likely
3123 * completely hosed, but if the error condition
3124 * is detected, it won't hurt to give it another
3125 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3127 hsfsts
.regval
= er16flash(ICH_FLASH_HSFSTS
);
3128 if (hsfsts
.hsf_status
.flcerr
)
3129 /* Repeat for some time before giving up. */
3131 if (!hsfsts
.hsf_status
.flcdone
) {
3132 e_dbg("Timeout error - flash cycle did not complete.\n");
3135 } while (count
++ < ICH_FLASH_CYCLE_REPEAT_COUNT
);
3141 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3142 * @hw: pointer to the HW structure
3143 * @offset: The index of the byte to read.
3144 * @data: The byte to write to the NVM.
3146 * Writes a single byte to the NVM using the flash access registers.
3148 static s32
e1000_write_flash_byte_ich8lan(struct e1000_hw
*hw
, u32 offset
,
3151 u16 word
= (u16
)data
;
3153 return e1000_write_flash_data_ich8lan(hw
, offset
, 1, word
);
3157 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3158 * @hw: pointer to the HW structure
3159 * @offset: The offset of the byte to write.
3160 * @byte: The byte to write to the NVM.
3162 * Writes a single byte to the NVM using the flash access registers.
3163 * Goes through a retry algorithm before giving up.
3165 static s32
e1000_retry_write_flash_byte_ich8lan(struct e1000_hw
*hw
,
3166 u32 offset
, u8 byte
)
3169 u16 program_retries
;
3171 ret_val
= e1000_write_flash_byte_ich8lan(hw
, offset
, byte
);
3175 for (program_retries
= 0; program_retries
< 100; program_retries
++) {
3176 e_dbg("Retrying Byte %2.2X at offset %u\n", byte
, offset
);
3178 ret_val
= e1000_write_flash_byte_ich8lan(hw
, offset
, byte
);
3182 if (program_retries
== 100)
3183 return -E1000_ERR_NVM
;
3189 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3190 * @hw: pointer to the HW structure
3191 * @bank: 0 for first bank, 1 for second bank, etc.
3193 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3194 * bank N is 4096 * N + flash_reg_addr.
3196 static s32
e1000_erase_flash_bank_ich8lan(struct e1000_hw
*hw
, u32 bank
)
3198 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
3199 union ich8_hws_flash_status hsfsts
;
3200 union ich8_hws_flash_ctrl hsflctl
;
3201 u32 flash_linear_addr
;
3202 /* bank size is in 16bit words - adjust to bytes */
3203 u32 flash_bank_size
= nvm
->flash_bank_size
* 2;
3206 s32 j
, iteration
, sector_size
;
3208 hsfsts
.regval
= er16flash(ICH_FLASH_HSFSTS
);
3211 * Determine HW Sector size: Read BERASE bits of hw flash status
3213 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3214 * consecutive sectors. The start index for the nth Hw sector
3215 * can be calculated as = bank * 4096 + n * 256
3216 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3217 * The start index for the nth Hw sector can be calculated
3219 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3220 * (ich9 only, otherwise error condition)
3221 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3223 switch (hsfsts
.hsf_status
.berasesz
) {
3225 /* Hw sector size 256 */
3226 sector_size
= ICH_FLASH_SEG_SIZE_256
;
3227 iteration
= flash_bank_size
/ ICH_FLASH_SEG_SIZE_256
;
3230 sector_size
= ICH_FLASH_SEG_SIZE_4K
;
3234 sector_size
= ICH_FLASH_SEG_SIZE_8K
;
3238 sector_size
= ICH_FLASH_SEG_SIZE_64K
;
3242 return -E1000_ERR_NVM
;
3245 /* Start with the base address, then add the sector offset. */
3246 flash_linear_addr
= hw
->nvm
.flash_base_addr
;
3247 flash_linear_addr
+= (bank
) ? flash_bank_size
: 0;
3249 for (j
= 0; j
< iteration
; j
++) {
3252 ret_val
= e1000_flash_cycle_init_ich8lan(hw
);
3257 * Write a value 11 (block Erase) in Flash
3258 * Cycle field in hw flash control
3260 hsflctl
.regval
= er16flash(ICH_FLASH_HSFCTL
);
3261 hsflctl
.hsf_ctrl
.flcycle
= ICH_CYCLE_ERASE
;
3262 ew16flash(ICH_FLASH_HSFCTL
, hsflctl
.regval
);
3265 * Write the last 24 bits of an index within the
3266 * block into Flash Linear address field in Flash
3269 flash_linear_addr
+= (j
* sector_size
);
3270 ew32flash(ICH_FLASH_FADDR
, flash_linear_addr
);
3272 ret_val
= e1000_flash_cycle_ich8lan(hw
,
3273 ICH_FLASH_ERASE_COMMAND_TIMEOUT
);
3278 * Check if FCERR is set to 1. If 1,
3279 * clear it and try the whole sequence
3280 * a few more times else Done
3282 hsfsts
.regval
= er16flash(ICH_FLASH_HSFSTS
);
3283 if (hsfsts
.hsf_status
.flcerr
)
3284 /* repeat for some time before giving up */
3286 else if (!hsfsts
.hsf_status
.flcdone
)
3288 } while (++count
< ICH_FLASH_CYCLE_REPEAT_COUNT
);
3295 * e1000_valid_led_default_ich8lan - Set the default LED settings
3296 * @hw: pointer to the HW structure
3297 * @data: Pointer to the LED settings
3299 * Reads the LED default settings from the NVM to data. If the NVM LED
3300 * settings is all 0's or F's, set the LED default to a valid LED default
3303 static s32
e1000_valid_led_default_ich8lan(struct e1000_hw
*hw
, u16
*data
)
3307 ret_val
= e1000_read_nvm(hw
, NVM_ID_LED_SETTINGS
, 1, data
);
3309 e_dbg("NVM Read Error\n");
3313 if (*data
== ID_LED_RESERVED_0000
||
3314 *data
== ID_LED_RESERVED_FFFF
)
3315 *data
= ID_LED_DEFAULT_ICH8LAN
;
3321 * e1000_id_led_init_pchlan - store LED configurations
3322 * @hw: pointer to the HW structure
3324 * PCH does not control LEDs via the LEDCTL register, rather it uses
3325 * the PHY LED configuration register.
3327 * PCH also does not have an "always on" or "always off" mode which
3328 * complicates the ID feature. Instead of using the "on" mode to indicate
3329 * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
3330 * use "link_up" mode. The LEDs will still ID on request if there is no
3331 * link based on logic in e1000_led_[on|off]_pchlan().
3333 static s32
e1000_id_led_init_pchlan(struct e1000_hw
*hw
)
3335 struct e1000_mac_info
*mac
= &hw
->mac
;
3337 const u32 ledctl_on
= E1000_LEDCTL_MODE_LINK_UP
;
3338 const u32 ledctl_off
= E1000_LEDCTL_MODE_LINK_UP
| E1000_PHY_LED0_IVRT
;
3339 u16 data
, i
, temp
, shift
;
3341 /* Get default ID LED modes */
3342 ret_val
= hw
->nvm
.ops
.valid_led_default(hw
, &data
);
3346 mac
->ledctl_default
= er32(LEDCTL
);
3347 mac
->ledctl_mode1
= mac
->ledctl_default
;
3348 mac
->ledctl_mode2
= mac
->ledctl_default
;
3350 for (i
= 0; i
< 4; i
++) {
3351 temp
= (data
>> (i
<< 2)) & E1000_LEDCTL_LED0_MODE_MASK
;
3354 case ID_LED_ON1_DEF2
:
3355 case ID_LED_ON1_ON2
:
3356 case ID_LED_ON1_OFF2
:
3357 mac
->ledctl_mode1
&= ~(E1000_PHY_LED0_MASK
<< shift
);
3358 mac
->ledctl_mode1
|= (ledctl_on
<< shift
);
3360 case ID_LED_OFF1_DEF2
:
3361 case ID_LED_OFF1_ON2
:
3362 case ID_LED_OFF1_OFF2
:
3363 mac
->ledctl_mode1
&= ~(E1000_PHY_LED0_MASK
<< shift
);
3364 mac
->ledctl_mode1
|= (ledctl_off
<< shift
);
3371 case ID_LED_DEF1_ON2
:
3372 case ID_LED_ON1_ON2
:
3373 case ID_LED_OFF1_ON2
:
3374 mac
->ledctl_mode2
&= ~(E1000_PHY_LED0_MASK
<< shift
);
3375 mac
->ledctl_mode2
|= (ledctl_on
<< shift
);
3377 case ID_LED_DEF1_OFF2
:
3378 case ID_LED_ON1_OFF2
:
3379 case ID_LED_OFF1_OFF2
:
3380 mac
->ledctl_mode2
&= ~(E1000_PHY_LED0_MASK
<< shift
);
3381 mac
->ledctl_mode2
|= (ledctl_off
<< shift
);
3393 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3394 * @hw: pointer to the HW structure
3396 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3397 * register, so the the bus width is hard coded.
3399 static s32
e1000_get_bus_info_ich8lan(struct e1000_hw
*hw
)
3401 struct e1000_bus_info
*bus
= &hw
->bus
;
3404 ret_val
= e1000e_get_bus_info_pcie(hw
);
3407 * ICH devices are "PCI Express"-ish. They have
3408 * a configuration space, but do not contain
3409 * PCI Express Capability registers, so bus width
3410 * must be hardcoded.
3412 if (bus
->width
== e1000_bus_width_unknown
)
3413 bus
->width
= e1000_bus_width_pcie_x1
;
3419 * e1000_reset_hw_ich8lan - Reset the hardware
3420 * @hw: pointer to the HW structure
3422 * Does a full reset of the hardware which includes a reset of the PHY and
3425 static s32
e1000_reset_hw_ich8lan(struct e1000_hw
*hw
)
3427 struct e1000_dev_spec_ich8lan
*dev_spec
= &hw
->dev_spec
.ich8lan
;
3433 * Prevent the PCI-E bus from sticking if there is no TLP connection
3434 * on the last TLP read/write transaction when MAC is reset.
3436 ret_val
= e1000e_disable_pcie_master(hw
);
3438 e_dbg("PCI-E Master disable polling has failed.\n");
3440 e_dbg("Masking off all interrupts\n");
3441 ew32(IMC
, 0xffffffff);
3444 * Disable the Transmit and Receive units. Then delay to allow
3445 * any pending transactions to complete before we hit the MAC
3446 * with the global reset.
3449 ew32(TCTL
, E1000_TCTL_PSP
);
3452 usleep_range(10000, 20000);
3454 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3455 if (hw
->mac
.type
== e1000_ich8lan
) {
3456 /* Set Tx and Rx buffer allocation to 8k apiece. */
3457 ew32(PBA
, E1000_PBA_8K
);
3458 /* Set Packet Buffer Size to 16k. */
3459 ew32(PBS
, E1000_PBS_16K
);
3462 if (hw
->mac
.type
== e1000_pchlan
) {
3463 /* Save the NVM K1 bit setting */
3464 ret_val
= e1000_read_nvm(hw
, E1000_NVM_K1_CONFIG
, 1, &kum_cfg
);
3468 if (kum_cfg
& E1000_NVM_K1_ENABLE
)
3469 dev_spec
->nvm_k1_enabled
= true;
3471 dev_spec
->nvm_k1_enabled
= false;
3476 if (!hw
->phy
.ops
.check_reset_block(hw
)) {
3478 * Full-chip reset requires MAC and PHY reset at the same
3479 * time to make sure the interface between MAC and the
3480 * external PHY is reset.
3482 ctrl
|= E1000_CTRL_PHY_RST
;
3485 * Gate automatic PHY configuration by hardware on
3488 if ((hw
->mac
.type
== e1000_pch2lan
) &&
3489 !(er32(FWSM
) & E1000_ICH_FWSM_FW_VALID
))
3490 e1000_gate_hw_phy_config_ich8lan(hw
, true);
3492 ret_val
= e1000_acquire_swflag_ich8lan(hw
);
3493 e_dbg("Issuing a global reset to ich8lan\n");
3494 ew32(CTRL
, (ctrl
| E1000_CTRL_RST
));
3495 /* cannot issue a flush here because it hangs the hardware */
3498 /* Set Phy Config Counter to 50msec */
3499 if (hw
->mac
.type
== e1000_pch2lan
) {
3500 reg
= er32(FEXTNVM3
);
3501 reg
&= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK
;
3502 reg
|= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC
;
3503 ew32(FEXTNVM3
, reg
);
3507 clear_bit(__E1000_ACCESS_SHARED_RESOURCE
, &hw
->adapter
->state
);
3509 if (ctrl
& E1000_CTRL_PHY_RST
) {
3510 ret_val
= hw
->phy
.ops
.get_cfg_done(hw
);
3514 ret_val
= e1000_post_phy_reset_ich8lan(hw
);
3520 * For PCH, this write will make sure that any noise
3521 * will be detected as a CRC error and be dropped rather than show up
3522 * as a bad packet to the DMA engine.
3524 if (hw
->mac
.type
== e1000_pchlan
)
3525 ew32(CRC_OFFSET
, 0x65656565);
3527 ew32(IMC
, 0xffffffff);
3530 reg
= er32(KABGTXD
);
3531 reg
|= E1000_KABGTXD_BGSQLBIAS
;
3538 * e1000_init_hw_ich8lan - Initialize the hardware
3539 * @hw: pointer to the HW structure
3541 * Prepares the hardware for transmit and receive by doing the following:
3542 * - initialize hardware bits
3543 * - initialize LED identification
3544 * - setup receive address registers
3545 * - setup flow control
3546 * - setup transmit descriptors
3547 * - clear statistics
3549 static s32
e1000_init_hw_ich8lan(struct e1000_hw
*hw
)
3551 struct e1000_mac_info
*mac
= &hw
->mac
;
3552 u32 ctrl_ext
, txdctl
, snoop
;
3556 e1000_initialize_hw_bits_ich8lan(hw
);
3558 /* Initialize identification LED */
3559 ret_val
= mac
->ops
.id_led_init(hw
);
3561 e_dbg("Error initializing identification LED\n");
3562 /* This is not fatal and we should not stop init due to this */
3564 /* Setup the receive address. */
3565 e1000e_init_rx_addrs(hw
, mac
->rar_entry_count
);
3567 /* Zero out the Multicast HASH table */
3568 e_dbg("Zeroing the MTA\n");
3569 for (i
= 0; i
< mac
->mta_reg_count
; i
++)
3570 E1000_WRITE_REG_ARRAY(hw
, E1000_MTA
, i
, 0);
3573 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3574 * the ME. Disable wakeup by clearing the host wakeup bit.
3575 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3577 if (hw
->phy
.type
== e1000_phy_82578
) {
3578 e1e_rphy(hw
, BM_PORT_GEN_CFG
, &i
);
3579 i
&= ~BM_WUC_HOST_WU_BIT
;
3580 e1e_wphy(hw
, BM_PORT_GEN_CFG
, i
);
3581 ret_val
= e1000_phy_hw_reset_ich8lan(hw
);
3586 /* Setup link and flow control */
3587 ret_val
= mac
->ops
.setup_link(hw
);
3589 /* Set the transmit descriptor write-back policy for both queues */
3590 txdctl
= er32(TXDCTL(0));
3591 txdctl
= (txdctl
& ~E1000_TXDCTL_WTHRESH
) |
3592 E1000_TXDCTL_FULL_TX_DESC_WB
;
3593 txdctl
= (txdctl
& ~E1000_TXDCTL_PTHRESH
) |
3594 E1000_TXDCTL_MAX_TX_DESC_PREFETCH
;
3595 ew32(TXDCTL(0), txdctl
);
3596 txdctl
= er32(TXDCTL(1));
3597 txdctl
= (txdctl
& ~E1000_TXDCTL_WTHRESH
) |
3598 E1000_TXDCTL_FULL_TX_DESC_WB
;
3599 txdctl
= (txdctl
& ~E1000_TXDCTL_PTHRESH
) |
3600 E1000_TXDCTL_MAX_TX_DESC_PREFETCH
;
3601 ew32(TXDCTL(1), txdctl
);
3604 * ICH8 has opposite polarity of no_snoop bits.
3605 * By default, we should use snoop behavior.
3607 if (mac
->type
== e1000_ich8lan
)
3608 snoop
= PCIE_ICH8_SNOOP_ALL
;
3610 snoop
= (u32
) ~(PCIE_NO_SNOOP_ALL
);
3611 e1000e_set_pcie_no_snoop(hw
, snoop
);
3613 ctrl_ext
= er32(CTRL_EXT
);
3614 ctrl_ext
|= E1000_CTRL_EXT_RO_DIS
;
3615 ew32(CTRL_EXT
, ctrl_ext
);
3618 * Clear all of the statistics registers (clear on read). It is
3619 * important that we do this after we have tried to establish link
3620 * because the symbol error count will increment wildly if there
3623 e1000_clear_hw_cntrs_ich8lan(hw
);
3628 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3629 * @hw: pointer to the HW structure
3631 * Sets/Clears required hardware bits necessary for correctly setting up the
3632 * hardware for transmit and receive.
3634 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw
*hw
)
3638 /* Extended Device Control */
3639 reg
= er32(CTRL_EXT
);
3641 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3642 if (hw
->mac
.type
>= e1000_pchlan
)
3643 reg
|= E1000_CTRL_EXT_PHYPDEN
;
3644 ew32(CTRL_EXT
, reg
);
3646 /* Transmit Descriptor Control 0 */
3647 reg
= er32(TXDCTL(0));
3649 ew32(TXDCTL(0), reg
);
3651 /* Transmit Descriptor Control 1 */
3652 reg
= er32(TXDCTL(1));
3654 ew32(TXDCTL(1), reg
);
3656 /* Transmit Arbitration Control 0 */
3657 reg
= er32(TARC(0));
3658 if (hw
->mac
.type
== e1000_ich8lan
)
3659 reg
|= (1 << 28) | (1 << 29);
3660 reg
|= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3663 /* Transmit Arbitration Control 1 */
3664 reg
= er32(TARC(1));
3665 if (er32(TCTL
) & E1000_TCTL_MULR
)
3669 reg
|= (1 << 24) | (1 << 26) | (1 << 30);
3673 if (hw
->mac
.type
== e1000_ich8lan
) {
3680 * work-around descriptor data corruption issue during nfs v2 udp
3681 * traffic, just disable the nfs filtering capability
3684 reg
|= (E1000_RFCTL_NFSW_DIS
| E1000_RFCTL_NFSR_DIS
);
3687 * Disable IPv6 extension header parsing because some malformed
3688 * IPv6 headers can hang the Rx.
3690 if (hw
->mac
.type
== e1000_ich8lan
)
3691 reg
|= (E1000_RFCTL_IPV6_EX_DIS
| E1000_RFCTL_NEW_IPV6_EXT_DIS
);
3696 * e1000_setup_link_ich8lan - Setup flow control and link settings
3697 * @hw: pointer to the HW structure
3699 * Determines which flow control settings to use, then configures flow
3700 * control. Calls the appropriate media-specific link configuration
3701 * function. Assuming the adapter has a valid link partner, a valid link
3702 * should be established. Assumes the hardware has previously been reset
3703 * and the transmitter and receiver are not enabled.
3705 static s32
e1000_setup_link_ich8lan(struct e1000_hw
*hw
)
3709 if (hw
->phy
.ops
.check_reset_block(hw
))
3713 * ICH parts do not have a word in the NVM to determine
3714 * the default flow control setting, so we explicitly
3717 if (hw
->fc
.requested_mode
== e1000_fc_default
) {
3718 /* Workaround h/w hang when Tx flow control enabled */
3719 if (hw
->mac
.type
== e1000_pchlan
)
3720 hw
->fc
.requested_mode
= e1000_fc_rx_pause
;
3722 hw
->fc
.requested_mode
= e1000_fc_full
;
3726 * Save off the requested flow control mode for use later. Depending
3727 * on the link partner's capabilities, we may or may not use this mode.
3729 hw
->fc
.current_mode
= hw
->fc
.requested_mode
;
3731 e_dbg("After fix-ups FlowControl is now = %x\n",
3732 hw
->fc
.current_mode
);
3734 /* Continue to configure the copper link. */
3735 ret_val
= hw
->mac
.ops
.setup_physical_interface(hw
);
3739 ew32(FCTTV
, hw
->fc
.pause_time
);
3740 if ((hw
->phy
.type
== e1000_phy_82578
) ||
3741 (hw
->phy
.type
== e1000_phy_82579
) ||
3742 (hw
->phy
.type
== e1000_phy_i217
) ||
3743 (hw
->phy
.type
== e1000_phy_82577
)) {
3744 ew32(FCRTV_PCH
, hw
->fc
.refresh_time
);
3746 ret_val
= e1e_wphy(hw
, PHY_REG(BM_PORT_CTRL_PAGE
, 27),
3752 return e1000e_set_fc_watermarks(hw
);
3756 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3757 * @hw: pointer to the HW structure
3759 * Configures the kumeran interface to the PHY to wait the appropriate time
3760 * when polling the PHY, then call the generic setup_copper_link to finish
3761 * configuring the copper link.
3763 static s32
e1000_setup_copper_link_ich8lan(struct e1000_hw
*hw
)
3770 ctrl
|= E1000_CTRL_SLU
;
3771 ctrl
&= ~(E1000_CTRL_FRCSPD
| E1000_CTRL_FRCDPX
);
3775 * Set the mac to wait the maximum time between each iteration
3776 * and increase the max iterations when polling the phy;
3777 * this fixes erroneous timeouts at 10Mbps.
3779 ret_val
= e1000e_write_kmrn_reg(hw
, E1000_KMRNCTRLSTA_TIMEOUTS
, 0xFFFF);
3782 ret_val
= e1000e_read_kmrn_reg(hw
, E1000_KMRNCTRLSTA_INBAND_PARAM
,
3787 ret_val
= e1000e_write_kmrn_reg(hw
, E1000_KMRNCTRLSTA_INBAND_PARAM
,
3792 switch (hw
->phy
.type
) {
3793 case e1000_phy_igp_3
:
3794 ret_val
= e1000e_copper_link_setup_igp(hw
);
3799 case e1000_phy_82578
:
3800 ret_val
= e1000e_copper_link_setup_m88(hw
);
3804 case e1000_phy_82577
:
3805 case e1000_phy_82579
:
3806 case e1000_phy_i217
:
3807 ret_val
= e1000_copper_link_setup_82577(hw
);
3812 ret_val
= e1e_rphy(hw
, IFE_PHY_MDIX_CONTROL
, ®_data
);
3816 reg_data
&= ~IFE_PMC_AUTO_MDIX
;
3818 switch (hw
->phy
.mdix
) {
3820 reg_data
&= ~IFE_PMC_FORCE_MDIX
;
3823 reg_data
|= IFE_PMC_FORCE_MDIX
;
3827 reg_data
|= IFE_PMC_AUTO_MDIX
;
3830 ret_val
= e1e_wphy(hw
, IFE_PHY_MDIX_CONTROL
, reg_data
);
3838 return e1000e_setup_copper_link(hw
);
3842 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3843 * @hw: pointer to the HW structure
3844 * @speed: pointer to store current link speed
3845 * @duplex: pointer to store the current link duplex
3847 * Calls the generic get_speed_and_duplex to retrieve the current link
3848 * information and then calls the Kumeran lock loss workaround for links at
3851 static s32
e1000_get_link_up_info_ich8lan(struct e1000_hw
*hw
, u16
*speed
,
3856 ret_val
= e1000e_get_speed_and_duplex_copper(hw
, speed
, duplex
);
3860 if ((hw
->mac
.type
== e1000_ich8lan
) &&
3861 (hw
->phy
.type
== e1000_phy_igp_3
) &&
3862 (*speed
== SPEED_1000
)) {
3863 ret_val
= e1000_kmrn_lock_loss_workaround_ich8lan(hw
);
3870 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3871 * @hw: pointer to the HW structure
3873 * Work-around for 82566 Kumeran PCS lock loss:
3874 * On link status change (i.e. PCI reset, speed change) and link is up and
3876 * 0) if workaround is optionally disabled do nothing
3877 * 1) wait 1ms for Kumeran link to come up
3878 * 2) check Kumeran Diagnostic register PCS lock loss bit
3879 * 3) if not set the link is locked (all is good), otherwise...
3881 * 5) repeat up to 10 times
3882 * Note: this is only called for IGP3 copper when speed is 1gb.
3884 static s32
e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw
*hw
)
3886 struct e1000_dev_spec_ich8lan
*dev_spec
= &hw
->dev_spec
.ich8lan
;
3892 if (!dev_spec
->kmrn_lock_loss_workaround_enabled
)
3896 * Make sure link is up before proceeding. If not just return.
3897 * Attempting this while link is negotiating fouled up link
3900 ret_val
= e1000e_phy_has_link_generic(hw
, 1, 0, &link
);
3904 for (i
= 0; i
< 10; i
++) {
3905 /* read once to clear */
3906 ret_val
= e1e_rphy(hw
, IGP3_KMRN_DIAG
, &data
);
3909 /* and again to get new status */
3910 ret_val
= e1e_rphy(hw
, IGP3_KMRN_DIAG
, &data
);
3914 /* check for PCS lock */
3915 if (!(data
& IGP3_KMRN_DIAG_PCS_LOCK_LOSS
))
3918 /* Issue PHY reset */
3919 e1000_phy_hw_reset(hw
);
3922 /* Disable GigE link negotiation */
3923 phy_ctrl
= er32(PHY_CTRL
);
3924 phy_ctrl
|= (E1000_PHY_CTRL_GBE_DISABLE
|
3925 E1000_PHY_CTRL_NOND0A_GBE_DISABLE
);
3926 ew32(PHY_CTRL
, phy_ctrl
);
3929 * Call gig speed drop workaround on Gig disable before accessing
3932 e1000e_gig_downshift_workaround_ich8lan(hw
);
3934 /* unable to acquire PCS lock */
3935 return -E1000_ERR_PHY
;
3939 * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3940 * @hw: pointer to the HW structure
3941 * @state: boolean value used to set the current Kumeran workaround state
3943 * If ICH8, set the current Kumeran workaround state (enabled - true
3944 * /disabled - false).
3946 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw
*hw
,
3949 struct e1000_dev_spec_ich8lan
*dev_spec
= &hw
->dev_spec
.ich8lan
;
3951 if (hw
->mac
.type
!= e1000_ich8lan
) {
3952 e_dbg("Workaround applies to ICH8 only.\n");
3956 dev_spec
->kmrn_lock_loss_workaround_enabled
= state
;
3960 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3961 * @hw: pointer to the HW structure
3963 * Workaround for 82566 power-down on D3 entry:
3964 * 1) disable gigabit link
3965 * 2) write VR power-down enable
3967 * Continue if successful, else issue LCD reset and repeat
3969 void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw
*hw
)
3975 if (hw
->phy
.type
!= e1000_phy_igp_3
)
3978 /* Try the workaround twice (if needed) */
3981 reg
= er32(PHY_CTRL
);
3982 reg
|= (E1000_PHY_CTRL_GBE_DISABLE
|
3983 E1000_PHY_CTRL_NOND0A_GBE_DISABLE
);
3984 ew32(PHY_CTRL
, reg
);
3987 * Call gig speed drop workaround on Gig disable before
3988 * accessing any PHY registers
3990 if (hw
->mac
.type
== e1000_ich8lan
)
3991 e1000e_gig_downshift_workaround_ich8lan(hw
);
3993 /* Write VR power-down enable */
3994 e1e_rphy(hw
, IGP3_VR_CTRL
, &data
);
3995 data
&= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK
;
3996 e1e_wphy(hw
, IGP3_VR_CTRL
, data
| IGP3_VR_CTRL_MODE_SHUTDOWN
);
3998 /* Read it back and test */
3999 e1e_rphy(hw
, IGP3_VR_CTRL
, &data
);
4000 data
&= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK
;
4001 if ((data
== IGP3_VR_CTRL_MODE_SHUTDOWN
) || retry
)
4004 /* Issue PHY reset and repeat at most one more time */
4006 ew32(CTRL
, reg
| E1000_CTRL_PHY_RST
);
4012 * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4013 * @hw: pointer to the HW structure
4015 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4016 * LPLU, Gig disable, MDIC PHY reset):
4017 * 1) Set Kumeran Near-end loopback
4018 * 2) Clear Kumeran Near-end loopback
4019 * Should only be called for ICH8[m] devices with any 1G Phy.
4021 void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw
*hw
)
4026 if ((hw
->mac
.type
!= e1000_ich8lan
) || (hw
->phy
.type
== e1000_phy_ife
))
4029 ret_val
= e1000e_read_kmrn_reg(hw
, E1000_KMRNCTRLSTA_DIAG_OFFSET
,
4033 reg_data
|= E1000_KMRNCTRLSTA_DIAG_NELPBK
;
4034 ret_val
= e1000e_write_kmrn_reg(hw
, E1000_KMRNCTRLSTA_DIAG_OFFSET
,
4038 reg_data
&= ~E1000_KMRNCTRLSTA_DIAG_NELPBK
;
4039 ret_val
= e1000e_write_kmrn_reg(hw
, E1000_KMRNCTRLSTA_DIAG_OFFSET
,
4044 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4045 * @hw: pointer to the HW structure
4047 * During S0 to Sx transition, it is possible the link remains at gig
4048 * instead of negotiating to a lower speed. Before going to Sx, set
4049 * 'Gig Disable' to force link speed negotiation to a lower speed based on
4050 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
4051 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4052 * needs to be written.
4053 * Parts that support (and are linked to a partner which support) EEE in
4054 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4055 * than 10Mbps w/o EEE.
4057 void e1000_suspend_workarounds_ich8lan(struct e1000_hw
*hw
)
4059 struct e1000_dev_spec_ich8lan
*dev_spec
= &hw
->dev_spec
.ich8lan
;
4063 phy_ctrl
= er32(PHY_CTRL
);
4064 phy_ctrl
|= E1000_PHY_CTRL_GBE_DISABLE
;
4065 if (hw
->phy
.type
== e1000_phy_i217
) {
4068 ret_val
= hw
->phy
.ops
.acquire(hw
);
4072 if (!dev_spec
->eee_disable
) {
4075 ret_val
= e1e_wphy_locked(hw
, I82579_EMI_ADDR
,
4076 I217_EEE_ADVERTISEMENT
);
4079 e1e_rphy_locked(hw
, I82579_EMI_DATA
, &eee_advert
);
4082 * Disable LPLU if both link partners support 100BaseT
4083 * EEE and 100Full is advertised on both ends of the
4086 if ((eee_advert
& I217_EEE_100_SUPPORTED
) &&
4087 (dev_spec
->eee_lp_ability
&
4088 I217_EEE_100_SUPPORTED
) &&
4089 (hw
->phy
.autoneg_advertised
& ADVERTISE_100_FULL
))
4090 phy_ctrl
&= ~(E1000_PHY_CTRL_D0A_LPLU
|
4091 E1000_PHY_CTRL_NOND0A_LPLU
);
4095 * For i217 Intel Rapid Start Technology support,
4096 * when the system is going into Sx and no manageability engine
4097 * is present, the driver must configure proxy to reset only on
4098 * power good. LPI (Low Power Idle) state must also reset only
4099 * on power good, as well as the MTA (Multicast table array).
4100 * The SMBus release must also be disabled on LCD reset.
4102 if (!(er32(FWSM
) & E1000_ICH_FWSM_FW_VALID
)) {
4104 /* Enable proxy to reset only on power good. */
4105 e1e_rphy_locked(hw
, I217_PROXY_CTRL
, &phy_reg
);
4106 phy_reg
|= I217_PROXY_CTRL_AUTO_DISABLE
;
4107 e1e_wphy_locked(hw
, I217_PROXY_CTRL
, phy_reg
);
4110 * Set bit enable LPI (EEE) to reset only on
4113 e1e_rphy_locked(hw
, I217_SxCTRL
, &phy_reg
);
4114 phy_reg
|= I217_SxCTRL_ENABLE_LPI_RESET
;
4115 e1e_wphy_locked(hw
, I217_SxCTRL
, phy_reg
);
4117 /* Disable the SMB release on LCD reset. */
4118 e1e_rphy_locked(hw
, I217_MEMPWR
, &phy_reg
);
4119 phy_reg
&= ~I217_MEMPWR_DISABLE_SMB_RELEASE
;
4120 e1e_wphy_locked(hw
, I217_MEMPWR
, phy_reg
);
4124 * Enable MTA to reset for Intel Rapid Start Technology
4127 e1e_rphy_locked(hw
, I217_CGFREG
, &phy_reg
);
4128 phy_reg
|= I217_CGFREG_ENABLE_MTA_RESET
;
4129 e1e_wphy_locked(hw
, I217_CGFREG
, phy_reg
);
4132 hw
->phy
.ops
.release(hw
);
4135 ew32(PHY_CTRL
, phy_ctrl
);
4137 if (hw
->mac
.type
== e1000_ich8lan
)
4138 e1000e_gig_downshift_workaround_ich8lan(hw
);
4140 if (hw
->mac
.type
>= e1000_pchlan
) {
4141 e1000_oem_bits_config_ich8lan(hw
, false);
4143 /* Reset PHY to activate OEM bits on 82577/8 */
4144 if (hw
->mac
.type
== e1000_pchlan
)
4145 e1000e_phy_hw_reset_generic(hw
);
4147 ret_val
= hw
->phy
.ops
.acquire(hw
);
4150 e1000_write_smbus_addr(hw
);
4151 hw
->phy
.ops
.release(hw
);
4156 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4157 * @hw: pointer to the HW structure
4159 * During Sx to S0 transitions on non-managed devices or managed devices
4160 * on which PHY resets are not blocked, if the PHY registers cannot be
4161 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
4163 * On i217, setup Intel Rapid Start Technology.
4165 void e1000_resume_workarounds_pchlan(struct e1000_hw
*hw
)
4169 if (hw
->mac
.type
< e1000_pch2lan
)
4172 ret_val
= e1000_init_phy_workarounds_pchlan(hw
);
4174 e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val
);
4179 * For i217 Intel Rapid Start Technology support when the system
4180 * is transitioning from Sx and no manageability engine is present
4181 * configure SMBus to restore on reset, disable proxy, and enable
4182 * the reset on MTA (Multicast table array).
4184 if (hw
->phy
.type
== e1000_phy_i217
) {
4187 ret_val
= hw
->phy
.ops
.acquire(hw
);
4189 e_dbg("Failed to setup iRST\n");
4193 if (!(er32(FWSM
) & E1000_ICH_FWSM_FW_VALID
)) {
4195 * Restore clear on SMB if no manageability engine
4198 ret_val
= e1e_rphy_locked(hw
, I217_MEMPWR
, &phy_reg
);
4201 phy_reg
|= I217_MEMPWR_DISABLE_SMB_RELEASE
;
4202 e1e_wphy_locked(hw
, I217_MEMPWR
, phy_reg
);
4205 e1e_wphy_locked(hw
, I217_PROXY_CTRL
, 0);
4207 /* Enable reset on MTA */
4208 ret_val
= e1e_rphy_locked(hw
, I217_CGFREG
, &phy_reg
);
4211 phy_reg
&= ~I217_CGFREG_ENABLE_MTA_RESET
;
4212 e1e_wphy_locked(hw
, I217_CGFREG
, phy_reg
);
4215 e_dbg("Error %d in resume workarounds\n", ret_val
);
4216 hw
->phy
.ops
.release(hw
);
4221 * e1000_cleanup_led_ich8lan - Restore the default LED operation
4222 * @hw: pointer to the HW structure
4224 * Return the LED back to the default configuration.
4226 static s32
e1000_cleanup_led_ich8lan(struct e1000_hw
*hw
)
4228 if (hw
->phy
.type
== e1000_phy_ife
)
4229 return e1e_wphy(hw
, IFE_PHY_SPECIAL_CONTROL_LED
, 0);
4231 ew32(LEDCTL
, hw
->mac
.ledctl_default
);
4236 * e1000_led_on_ich8lan - Turn LEDs on
4237 * @hw: pointer to the HW structure
4241 static s32
e1000_led_on_ich8lan(struct e1000_hw
*hw
)
4243 if (hw
->phy
.type
== e1000_phy_ife
)
4244 return e1e_wphy(hw
, IFE_PHY_SPECIAL_CONTROL_LED
,
4245 (IFE_PSCL_PROBE_MODE
| IFE_PSCL_PROBE_LEDS_ON
));
4247 ew32(LEDCTL
, hw
->mac
.ledctl_mode2
);
4252 * e1000_led_off_ich8lan - Turn LEDs off
4253 * @hw: pointer to the HW structure
4255 * Turn off the LEDs.
4257 static s32
e1000_led_off_ich8lan(struct e1000_hw
*hw
)
4259 if (hw
->phy
.type
== e1000_phy_ife
)
4260 return e1e_wphy(hw
, IFE_PHY_SPECIAL_CONTROL_LED
,
4261 (IFE_PSCL_PROBE_MODE
|
4262 IFE_PSCL_PROBE_LEDS_OFF
));
4264 ew32(LEDCTL
, hw
->mac
.ledctl_mode1
);
4269 * e1000_setup_led_pchlan - Configures SW controllable LED
4270 * @hw: pointer to the HW structure
4272 * This prepares the SW controllable LED for use.
4274 static s32
e1000_setup_led_pchlan(struct e1000_hw
*hw
)
4276 return e1e_wphy(hw
, HV_LED_CONFIG
, (u16
)hw
->mac
.ledctl_mode1
);
4280 * e1000_cleanup_led_pchlan - Restore the default LED operation
4281 * @hw: pointer to the HW structure
4283 * Return the LED back to the default configuration.
4285 static s32
e1000_cleanup_led_pchlan(struct e1000_hw
*hw
)
4287 return e1e_wphy(hw
, HV_LED_CONFIG
, (u16
)hw
->mac
.ledctl_default
);
4291 * e1000_led_on_pchlan - Turn LEDs on
4292 * @hw: pointer to the HW structure
4296 static s32
e1000_led_on_pchlan(struct e1000_hw
*hw
)
4298 u16 data
= (u16
)hw
->mac
.ledctl_mode2
;
4302 * If no link, then turn LED on by setting the invert bit
4303 * for each LED that's mode is "link_up" in ledctl_mode2.
4305 if (!(er32(STATUS
) & E1000_STATUS_LU
)) {
4306 for (i
= 0; i
< 3; i
++) {
4307 led
= (data
>> (i
* 5)) & E1000_PHY_LED0_MASK
;
4308 if ((led
& E1000_PHY_LED0_MODE_MASK
) !=
4309 E1000_LEDCTL_MODE_LINK_UP
)
4311 if (led
& E1000_PHY_LED0_IVRT
)
4312 data
&= ~(E1000_PHY_LED0_IVRT
<< (i
* 5));
4314 data
|= (E1000_PHY_LED0_IVRT
<< (i
* 5));
4318 return e1e_wphy(hw
, HV_LED_CONFIG
, data
);
4322 * e1000_led_off_pchlan - Turn LEDs off
4323 * @hw: pointer to the HW structure
4325 * Turn off the LEDs.
4327 static s32
e1000_led_off_pchlan(struct e1000_hw
*hw
)
4329 u16 data
= (u16
)hw
->mac
.ledctl_mode1
;
4333 * If no link, then turn LED off by clearing the invert bit
4334 * for each LED that's mode is "link_up" in ledctl_mode1.
4336 if (!(er32(STATUS
) & E1000_STATUS_LU
)) {
4337 for (i
= 0; i
< 3; i
++) {
4338 led
= (data
>> (i
* 5)) & E1000_PHY_LED0_MASK
;
4339 if ((led
& E1000_PHY_LED0_MODE_MASK
) !=
4340 E1000_LEDCTL_MODE_LINK_UP
)
4342 if (led
& E1000_PHY_LED0_IVRT
)
4343 data
&= ~(E1000_PHY_LED0_IVRT
<< (i
* 5));
4345 data
|= (E1000_PHY_LED0_IVRT
<< (i
* 5));
4349 return e1e_wphy(hw
, HV_LED_CONFIG
, data
);
4353 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4354 * @hw: pointer to the HW structure
4356 * Read appropriate register for the config done bit for completion status
4357 * and configure the PHY through s/w for EEPROM-less parts.
4359 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4360 * config done bit, so only an error is logged and continues. If we were
4361 * to return with error, EEPROM-less silicon would not be able to be reset
4364 static s32
e1000_get_cfg_done_ich8lan(struct e1000_hw
*hw
)
4370 e1000e_get_cfg_done(hw
);
4372 /* Wait for indication from h/w that it has completed basic config */
4373 if (hw
->mac
.type
>= e1000_ich10lan
) {
4374 e1000_lan_init_done_ich8lan(hw
);
4376 ret_val
= e1000e_get_auto_rd_done(hw
);
4379 * When auto config read does not complete, do not
4380 * return with an error. This can happen in situations
4381 * where there is no eeprom and prevents getting link.
4383 e_dbg("Auto Read Done did not complete\n");
4388 /* Clear PHY Reset Asserted bit */
4389 status
= er32(STATUS
);
4390 if (status
& E1000_STATUS_PHYRA
)
4391 ew32(STATUS
, status
& ~E1000_STATUS_PHYRA
);
4393 e_dbg("PHY Reset Asserted not set - needs delay\n");
4395 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4396 if (hw
->mac
.type
<= e1000_ich9lan
) {
4397 if (!(er32(EECD
) & E1000_EECD_PRES
) &&
4398 (hw
->phy
.type
== e1000_phy_igp_3
)) {
4399 e1000e_phy_init_script_igp3(hw
);
4402 if (e1000_valid_nvm_bank_detect_ich8lan(hw
, &bank
)) {
4403 /* Maybe we should do a basic PHY config */
4404 e_dbg("EEPROM not present\n");
4405 ret_val
= -E1000_ERR_CONFIG
;
4413 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4414 * @hw: pointer to the HW structure
4416 * In the case of a PHY power down to save power, or to turn off link during a
4417 * driver unload, or wake on lan is not enabled, remove the link.
4419 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw
*hw
)
4421 /* If the management interface is not enabled, then power down */
4422 if (!(hw
->mac
.ops
.check_mng_mode(hw
) ||
4423 hw
->phy
.ops
.check_reset_block(hw
)))
4424 e1000_power_down_phy_copper(hw
);
4428 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4429 * @hw: pointer to the HW structure
4431 * Clears hardware counters specific to the silicon family and calls
4432 * clear_hw_cntrs_generic to clear all general purpose counters.
4434 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw
*hw
)
4439 e1000e_clear_hw_cntrs_base(hw
);
4455 /* Clear PHY statistics registers */
4456 if ((hw
->phy
.type
== e1000_phy_82578
) ||
4457 (hw
->phy
.type
== e1000_phy_82579
) ||
4458 (hw
->phy
.type
== e1000_phy_i217
) ||
4459 (hw
->phy
.type
== e1000_phy_82577
)) {
4460 ret_val
= hw
->phy
.ops
.acquire(hw
);
4463 ret_val
= hw
->phy
.ops
.set_page(hw
,
4464 HV_STATS_PAGE
<< IGP_PAGE_SHIFT
);
4467 hw
->phy
.ops
.read_reg_page(hw
, HV_SCC_UPPER
, &phy_data
);
4468 hw
->phy
.ops
.read_reg_page(hw
, HV_SCC_LOWER
, &phy_data
);
4469 hw
->phy
.ops
.read_reg_page(hw
, HV_ECOL_UPPER
, &phy_data
);
4470 hw
->phy
.ops
.read_reg_page(hw
, HV_ECOL_LOWER
, &phy_data
);
4471 hw
->phy
.ops
.read_reg_page(hw
, HV_MCC_UPPER
, &phy_data
);
4472 hw
->phy
.ops
.read_reg_page(hw
, HV_MCC_LOWER
, &phy_data
);
4473 hw
->phy
.ops
.read_reg_page(hw
, HV_LATECOL_UPPER
, &phy_data
);
4474 hw
->phy
.ops
.read_reg_page(hw
, HV_LATECOL_LOWER
, &phy_data
);
4475 hw
->phy
.ops
.read_reg_page(hw
, HV_COLC_UPPER
, &phy_data
);
4476 hw
->phy
.ops
.read_reg_page(hw
, HV_COLC_LOWER
, &phy_data
);
4477 hw
->phy
.ops
.read_reg_page(hw
, HV_DC_UPPER
, &phy_data
);
4478 hw
->phy
.ops
.read_reg_page(hw
, HV_DC_LOWER
, &phy_data
);
4479 hw
->phy
.ops
.read_reg_page(hw
, HV_TNCRS_UPPER
, &phy_data
);
4480 hw
->phy
.ops
.read_reg_page(hw
, HV_TNCRS_LOWER
, &phy_data
);
4482 hw
->phy
.ops
.release(hw
);
4486 static const struct e1000_mac_operations ich8_mac_ops
= {
4487 /* check_mng_mode dependent on mac type */
4488 .check_for_link
= e1000_check_for_copper_link_ich8lan
,
4489 /* cleanup_led dependent on mac type */
4490 .clear_hw_cntrs
= e1000_clear_hw_cntrs_ich8lan
,
4491 .get_bus_info
= e1000_get_bus_info_ich8lan
,
4492 .set_lan_id
= e1000_set_lan_id_single_port
,
4493 .get_link_up_info
= e1000_get_link_up_info_ich8lan
,
4494 /* led_on dependent on mac type */
4495 /* led_off dependent on mac type */
4496 .update_mc_addr_list
= e1000e_update_mc_addr_list_generic
,
4497 .reset_hw
= e1000_reset_hw_ich8lan
,
4498 .init_hw
= e1000_init_hw_ich8lan
,
4499 .setup_link
= e1000_setup_link_ich8lan
,
4500 .setup_physical_interface
= e1000_setup_copper_link_ich8lan
,
4501 /* id_led_init dependent on mac type */
4502 .config_collision_dist
= e1000e_config_collision_dist_generic
,
4503 .rar_set
= e1000e_rar_set_generic
,
4506 static const struct e1000_phy_operations ich8_phy_ops
= {
4507 .acquire
= e1000_acquire_swflag_ich8lan
,
4508 .check_reset_block
= e1000_check_reset_block_ich8lan
,
4510 .get_cfg_done
= e1000_get_cfg_done_ich8lan
,
4511 .get_cable_length
= e1000e_get_cable_length_igp_2
,
4512 .read_reg
= e1000e_read_phy_reg_igp
,
4513 .release
= e1000_release_swflag_ich8lan
,
4514 .reset
= e1000_phy_hw_reset_ich8lan
,
4515 .set_d0_lplu_state
= e1000_set_d0_lplu_state_ich8lan
,
4516 .set_d3_lplu_state
= e1000_set_d3_lplu_state_ich8lan
,
4517 .write_reg
= e1000e_write_phy_reg_igp
,
4520 static const struct e1000_nvm_operations ich8_nvm_ops
= {
4521 .acquire
= e1000_acquire_nvm_ich8lan
,
4522 .read
= e1000_read_nvm_ich8lan
,
4523 .release
= e1000_release_nvm_ich8lan
,
4524 .reload
= e1000e_reload_nvm_generic
,
4525 .update
= e1000_update_nvm_checksum_ich8lan
,
4526 .valid_led_default
= e1000_valid_led_default_ich8lan
,
4527 .validate
= e1000_validate_nvm_checksum_ich8lan
,
4528 .write
= e1000_write_nvm_ich8lan
,
4531 const struct e1000_info e1000_ich8_info
= {
4532 .mac
= e1000_ich8lan
,
4533 .flags
= FLAG_HAS_WOL
4535 | FLAG_HAS_CTRLEXT_ON_LOAD
4540 .max_hw_frame_size
= ETH_FRAME_LEN
+ ETH_FCS_LEN
,
4541 .get_variants
= e1000_get_variants_ich8lan
,
4542 .mac_ops
= &ich8_mac_ops
,
4543 .phy_ops
= &ich8_phy_ops
,
4544 .nvm_ops
= &ich8_nvm_ops
,
4547 const struct e1000_info e1000_ich9_info
= {
4548 .mac
= e1000_ich9lan
,
4549 .flags
= FLAG_HAS_JUMBO_FRAMES
4552 | FLAG_HAS_CTRLEXT_ON_LOAD
4557 .max_hw_frame_size
= DEFAULT_JUMBO
,
4558 .get_variants
= e1000_get_variants_ich8lan
,
4559 .mac_ops
= &ich8_mac_ops
,
4560 .phy_ops
= &ich8_phy_ops
,
4561 .nvm_ops
= &ich8_nvm_ops
,
4564 const struct e1000_info e1000_ich10_info
= {
4565 .mac
= e1000_ich10lan
,
4566 .flags
= FLAG_HAS_JUMBO_FRAMES
4569 | FLAG_HAS_CTRLEXT_ON_LOAD
4574 .max_hw_frame_size
= DEFAULT_JUMBO
,
4575 .get_variants
= e1000_get_variants_ich8lan
,
4576 .mac_ops
= &ich8_mac_ops
,
4577 .phy_ops
= &ich8_phy_ops
,
4578 .nvm_ops
= &ich8_nvm_ops
,
4581 const struct e1000_info e1000_pch_info
= {
4582 .mac
= e1000_pchlan
,
4583 .flags
= FLAG_IS_ICH
4585 | FLAG_HAS_CTRLEXT_ON_LOAD
4588 | FLAG_HAS_JUMBO_FRAMES
4589 | FLAG_DISABLE_FC_PAUSE_TIME
/* errata */
4591 .flags2
= FLAG2_HAS_PHY_STATS
,
4593 .max_hw_frame_size
= 4096,
4594 .get_variants
= e1000_get_variants_ich8lan
,
4595 .mac_ops
= &ich8_mac_ops
,
4596 .phy_ops
= &ich8_phy_ops
,
4597 .nvm_ops
= &ich8_nvm_ops
,
4600 const struct e1000_info e1000_pch2_info
= {
4601 .mac
= e1000_pch2lan
,
4602 .flags
= FLAG_IS_ICH
4604 | FLAG_HAS_CTRLEXT_ON_LOAD
4607 | FLAG_HAS_JUMBO_FRAMES
4609 .flags2
= FLAG2_HAS_PHY_STATS
4612 .max_hw_frame_size
= DEFAULT_JUMBO
,
4613 .get_variants
= e1000_get_variants_ich8lan
,
4614 .mac_ops
= &ich8_mac_ops
,
4615 .phy_ops
= &ich8_phy_ops
,
4616 .nvm_ops
= &ich8_nvm_ops
,
4619 const struct e1000_info e1000_pch_lpt_info
= {
4620 .mac
= e1000_pch_lpt
,
4621 .flags
= FLAG_IS_ICH
4623 | FLAG_HAS_CTRLEXT_ON_LOAD
4626 | FLAG_HAS_JUMBO_FRAMES
4628 .flags2
= FLAG2_HAS_PHY_STATS
4631 .max_hw_frame_size
= DEFAULT_JUMBO
,
4632 .get_variants
= e1000_get_variants_ich8lan
,
4633 .mac_ops
= &ich8_mac_ops
,
4634 .phy_ops
= &ich8_phy_ops
,
4635 .nvm_ops
= &ich8_nvm_ops
,