1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
4 #include <linux/if_ether.h>
5 #include <linux/delay.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
10 #include "e1000_mac.h"
14 static s32
igb_set_default_fc(struct e1000_hw
*hw
);
15 static s32
igb_set_fc_watermarks(struct e1000_hw
*hw
);
18 * igb_get_bus_info_pcie - Get PCIe bus information
19 * @hw: pointer to the HW structure
21 * Determines and stores the system bus information for a particular
22 * network interface. The following bus information is determined and stored:
23 * bus speed, bus width, type (PCIe), and PCIe function.
25 s32
igb_get_bus_info_pcie(struct e1000_hw
*hw
)
27 struct e1000_bus_info
*bus
= &hw
->bus
;
32 bus
->type
= e1000_bus_type_pci_express
;
34 ret_val
= igb_read_pcie_cap_reg(hw
,
38 bus
->width
= e1000_bus_width_unknown
;
39 bus
->speed
= e1000_bus_speed_unknown
;
41 switch (pcie_link_status
& PCI_EXP_LNKSTA_CLS
) {
42 case PCI_EXP_LNKSTA_CLS_2_5GB
:
43 bus
->speed
= e1000_bus_speed_2500
;
45 case PCI_EXP_LNKSTA_CLS_5_0GB
:
46 bus
->speed
= e1000_bus_speed_5000
;
49 bus
->speed
= e1000_bus_speed_unknown
;
53 bus
->width
= (enum e1000_bus_width
)((pcie_link_status
&
54 PCI_EXP_LNKSTA_NLW
) >>
55 PCI_EXP_LNKSTA_NLW_SHIFT
);
58 reg
= rd32(E1000_STATUS
);
59 bus
->func
= (reg
& E1000_STATUS_FUNC_MASK
) >> E1000_STATUS_FUNC_SHIFT
;
65 * igb_clear_vfta - Clear VLAN filter table
66 * @hw: pointer to the HW structure
68 * Clears the register array which contains the VLAN filter table by
69 * setting all the values to 0.
71 void igb_clear_vfta(struct e1000_hw
*hw
)
75 for (offset
= E1000_VLAN_FILTER_TBL_SIZE
; offset
--;)
76 hw
->mac
.ops
.write_vfta(hw
, offset
, 0);
80 * igb_write_vfta - Write value to VLAN filter table
81 * @hw: pointer to the HW structure
82 * @offset: register offset in VLAN filter table
83 * @value: register value written to VLAN filter table
85 * Writes value at the given offset in the register array which stores
86 * the VLAN filter table.
88 void igb_write_vfta(struct e1000_hw
*hw
, u32 offset
, u32 value
)
90 struct igb_adapter
*adapter
= hw
->back
;
92 array_wr32(E1000_VFTA
, offset
, value
);
95 adapter
->shadow_vfta
[offset
] = value
;
99 * igb_init_rx_addrs - Initialize receive address's
100 * @hw: pointer to the HW structure
101 * @rar_count: receive address registers
103 * Setups the receive address registers by setting the base receive address
104 * register to the devices MAC address and clearing all the other receive
105 * address registers to 0.
107 void igb_init_rx_addrs(struct e1000_hw
*hw
, u16 rar_count
)
110 u8 mac_addr
[ETH_ALEN
] = {0};
112 /* Setup the receive address */
113 hw_dbg("Programming MAC Address into RAR[0]\n");
115 hw
->mac
.ops
.rar_set(hw
, hw
->mac
.addr
, 0);
117 /* Zero out the other (rar_entry_count - 1) receive addresses */
118 hw_dbg("Clearing RAR[1-%u]\n", rar_count
-1);
119 for (i
= 1; i
< rar_count
; i
++)
120 hw
->mac
.ops
.rar_set(hw
, mac_addr
, i
);
124 * igb_find_vlvf_slot - find the VLAN id or the first empty slot
125 * @hw: pointer to hardware structure
126 * @vlan: VLAN id to write to VLAN filter
127 * @vlvf_bypass: skip VLVF if no match is found
129 * return the VLVF index where this VLAN id should be placed
132 static s32
igb_find_vlvf_slot(struct e1000_hw
*hw
, u32 vlan
, bool vlvf_bypass
)
134 s32 regindex
, first_empty_slot
;
137 /* short cut the special case */
141 /* if vlvf_bypass is set we don't want to use an empty slot, we
142 * will simply bypass the VLVF if there are no entries present in the
143 * VLVF that contain our VLAN
145 first_empty_slot
= vlvf_bypass
? -E1000_ERR_NO_SPACE
: 0;
147 /* Search for the VLAN id in the VLVF entries. Save off the first empty
148 * slot found along the way.
150 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
152 for (regindex
= E1000_VLVF_ARRAY_SIZE
; --regindex
> 0;) {
153 bits
= rd32(E1000_VLVF(regindex
)) & E1000_VLVF_VLANID_MASK
;
156 if (!first_empty_slot
&& !bits
)
157 first_empty_slot
= regindex
;
160 return first_empty_slot
? : -E1000_ERR_NO_SPACE
;
164 * igb_vfta_set - enable or disable vlan in VLAN filter table
165 * @hw: pointer to the HW structure
166 * @vlan: VLAN id to add or remove
167 * @vind: VMDq output index that maps queue to VLAN id
168 * @vlan_on: if true add filter, if false remove
170 * Sets or clears a bit in the VLAN filter table array based on VLAN id
171 * and if we are adding or removing the filter
173 s32
igb_vfta_set(struct e1000_hw
*hw
, u32 vlan
, u32 vind
,
174 bool vlan_on
, bool vlvf_bypass
)
176 struct igb_adapter
*adapter
= hw
->back
;
177 u32 regidx
, vfta_delta
, vfta
, bits
;
180 if ((vlan
> 4095) || (vind
> 7))
181 return -E1000_ERR_PARAM
;
183 /* this is a 2 part operation - first the VFTA, then the
184 * VLVF and VLVFB if VT Mode is set
185 * We don't write the VFTA until we know the VLVF part succeeded.
189 * The VFTA is a bitstring made up of 128 32-bit registers
190 * that enable the particular VLAN id, much like the MTA:
191 * bits[11-5]: which register
192 * bits[4-0]: which bit in the register
195 vfta_delta
= BIT(vlan
% 32);
196 vfta
= adapter
->shadow_vfta
[regidx
];
198 /* vfta_delta represents the difference between the current value
199 * of vfta and the value we want in the register. Since the diff
200 * is an XOR mask we can just update vfta using an XOR.
202 vfta_delta
&= vlan_on
? ~vfta
: vfta
;
208 * make sure the VLAN is in VLVF
209 * set the vind bit in the matching VLVFB
211 * clear the pool bit and possibly the vind
213 if (!adapter
->vfs_allocated_count
)
216 vlvf_index
= igb_find_vlvf_slot(hw
, vlan
, vlvf_bypass
);
217 if (vlvf_index
< 0) {
223 bits
= rd32(E1000_VLVF(vlvf_index
));
225 /* set the pool bit */
226 bits
|= BIT(E1000_VLVF_POOLSEL_SHIFT
+ vind
);
230 /* clear the pool bit */
231 bits
^= BIT(E1000_VLVF_POOLSEL_SHIFT
+ vind
);
233 if (!(bits
& E1000_VLVF_POOLSEL_MASK
)) {
234 /* Clear VFTA first, then disable VLVF. Otherwise
235 * we run the risk of stray packets leaking into
236 * the PF via the default pool
239 hw
->mac
.ops
.write_vfta(hw
, regidx
, vfta
);
241 /* disable VLVF and clear remaining bit from pool */
242 wr32(E1000_VLVF(vlvf_index
), 0);
247 /* If there are still bits set in the VLVFB registers
248 * for the VLAN ID indicated we need to see if the
249 * caller is requesting that we clear the VFTA entry bit.
250 * If the caller has requested that we clear the VFTA
251 * entry bit but there are still pools/VFs using this VLAN
252 * ID entry then ignore the request. We're not worried
253 * about the case where we're turning the VFTA VLAN ID
254 * entry bit on, only when requested to turn it off as
255 * there may be multiple pools and/or VFs using the
256 * VLAN ID entry. In that case we cannot clear the
257 * VFTA bit until all pools/VFs using that VLAN ID have also
258 * been cleared. This will be indicated by "bits" being
264 /* record pool change and enable VLAN ID if not already enabled */
265 wr32(E1000_VLVF(vlvf_index
), bits
| vlan
| E1000_VLVF_VLANID_ENABLE
);
268 /* bit was set/cleared before we started */
270 hw
->mac
.ops
.write_vfta(hw
, regidx
, vfta
);
276 * igb_check_alt_mac_addr - Check for alternate MAC addr
277 * @hw: pointer to the HW structure
279 * Checks the nvm for an alternate MAC address. An alternate MAC address
280 * can be setup by pre-boot software and must be treated like a permanent
281 * address and must override the actual permanent MAC address. If an
282 * alternate MAC address is found it is saved in the hw struct and
283 * programmed into RAR0 and the function returns success, otherwise the
284 * function returns an error.
286 s32
igb_check_alt_mac_addr(struct e1000_hw
*hw
)
290 u16 offset
, nvm_alt_mac_addr_offset
, nvm_data
;
291 u8 alt_mac_addr
[ETH_ALEN
];
293 /* Alternate MAC address is handled by the option ROM for 82580
294 * and newer. SW support not required.
296 if (hw
->mac
.type
>= e1000_82580
)
299 ret_val
= hw
->nvm
.ops
.read(hw
, NVM_ALT_MAC_ADDR_PTR
, 1,
300 &nvm_alt_mac_addr_offset
);
302 hw_dbg("NVM Read Error\n");
306 if ((nvm_alt_mac_addr_offset
== 0xFFFF) ||
307 (nvm_alt_mac_addr_offset
== 0x0000))
308 /* There is no Alternate MAC Address */
311 if (hw
->bus
.func
== E1000_FUNC_1
)
312 nvm_alt_mac_addr_offset
+= E1000_ALT_MAC_ADDRESS_OFFSET_LAN1
;
313 if (hw
->bus
.func
== E1000_FUNC_2
)
314 nvm_alt_mac_addr_offset
+= E1000_ALT_MAC_ADDRESS_OFFSET_LAN2
;
316 if (hw
->bus
.func
== E1000_FUNC_3
)
317 nvm_alt_mac_addr_offset
+= E1000_ALT_MAC_ADDRESS_OFFSET_LAN3
;
318 for (i
= 0; i
< ETH_ALEN
; i
+= 2) {
319 offset
= nvm_alt_mac_addr_offset
+ (i
>> 1);
320 ret_val
= hw
->nvm
.ops
.read(hw
, offset
, 1, &nvm_data
);
322 hw_dbg("NVM Read Error\n");
326 alt_mac_addr
[i
] = (u8
)(nvm_data
& 0xFF);
327 alt_mac_addr
[i
+ 1] = (u8
)(nvm_data
>> 8);
330 /* if multicast bit is set, the alternate address will not be used */
331 if (is_multicast_ether_addr(alt_mac_addr
)) {
332 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
336 /* We have a valid alternate MAC address, and we want to treat it the
337 * same as the normal permanent MAC address stored by the HW into the
338 * RAR. Do this by mapping this address into RAR0.
340 hw
->mac
.ops
.rar_set(hw
, alt_mac_addr
, 0);
347 * igb_rar_set - Set receive address register
348 * @hw: pointer to the HW structure
349 * @addr: pointer to the receive address
350 * @index: receive address array register
352 * Sets the receive address array register at index to the address passed
355 void igb_rar_set(struct e1000_hw
*hw
, u8
*addr
, u32 index
)
357 u32 rar_low
, rar_high
;
359 /* HW expects these in little endian so we reverse the byte order
360 * from network order (big endian) to little endian
362 rar_low
= ((u32
) addr
[0] |
363 ((u32
) addr
[1] << 8) |
364 ((u32
) addr
[2] << 16) | ((u32
) addr
[3] << 24));
366 rar_high
= ((u32
) addr
[4] | ((u32
) addr
[5] << 8));
368 /* If MAC address zero, no need to set the AV bit */
369 if (rar_low
|| rar_high
)
370 rar_high
|= E1000_RAH_AV
;
372 /* Some bridges will combine consecutive 32-bit writes into
373 * a single burst write, which will malfunction on some parts.
374 * The flushes avoid this.
376 wr32(E1000_RAL(index
), rar_low
);
378 wr32(E1000_RAH(index
), rar_high
);
383 * igb_mta_set - Set multicast filter table address
384 * @hw: pointer to the HW structure
385 * @hash_value: determines the MTA register and bit to set
387 * The multicast table address is a register array of 32-bit registers.
388 * The hash_value is used to determine what register the bit is in, the
389 * current value is read, the new bit is OR'd in and the new value is
390 * written back into the register.
392 void igb_mta_set(struct e1000_hw
*hw
, u32 hash_value
)
394 u32 hash_bit
, hash_reg
, mta
;
396 /* The MTA is a register array of 32-bit registers. It is
397 * treated like an array of (32*mta_reg_count) bits. We want to
398 * set bit BitArray[hash_value]. So we figure out what register
399 * the bit is in, read it, OR in the new bit, then write
400 * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
401 * mask to bits 31:5 of the hash value which gives us the
402 * register we're modifying. The hash bit within that register
403 * is determined by the lower 5 bits of the hash value.
405 hash_reg
= (hash_value
>> 5) & (hw
->mac
.mta_reg_count
- 1);
406 hash_bit
= hash_value
& 0x1F;
408 mta
= array_rd32(E1000_MTA
, hash_reg
);
410 mta
|= BIT(hash_bit
);
412 array_wr32(E1000_MTA
, hash_reg
, mta
);
417 * igb_hash_mc_addr - Generate a multicast hash value
418 * @hw: pointer to the HW structure
419 * @mc_addr: pointer to a multicast address
421 * Generates a multicast address hash value which is used to determine
422 * the multicast filter table array address and new table value. See
425 static u32
igb_hash_mc_addr(struct e1000_hw
*hw
, u8
*mc_addr
)
427 u32 hash_value
, hash_mask
;
430 /* Register count multiplied by bits per register */
431 hash_mask
= (hw
->mac
.mta_reg_count
* 32) - 1;
433 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
434 * where 0xFF would still fall within the hash mask.
436 while (hash_mask
>> bit_shift
!= 0xFF)
439 /* The portion of the address that is used for the hash table
440 * is determined by the mc_filter_type setting.
441 * The algorithm is such that there is a total of 8 bits of shifting.
442 * The bit_shift for a mc_filter_type of 0 represents the number of
443 * left-shifts where the MSB of mc_addr[5] would still fall within
444 * the hash_mask. Case 0 does this exactly. Since there are a total
445 * of 8 bits of shifting, then mc_addr[4] will shift right the
446 * remaining number of bits. Thus 8 - bit_shift. The rest of the
447 * cases are a variation of this algorithm...essentially raising the
448 * number of bits to shift mc_addr[5] left, while still keeping the
449 * 8-bit shifting total.
451 * For example, given the following Destination MAC Address and an
452 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
453 * we can see that the bit_shift for case 0 is 4. These are the hash
454 * values resulting from each mc_filter_type...
455 * [0] [1] [2] [3] [4] [5]
459 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
460 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
461 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
462 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
464 switch (hw
->mac
.mc_filter_type
) {
479 hash_value
= hash_mask
& (((mc_addr
[4] >> (8 - bit_shift
)) |
480 (((u16
) mc_addr
[5]) << bit_shift
)));
486 * igb_update_mc_addr_list - Update Multicast addresses
487 * @hw: pointer to the HW structure
488 * @mc_addr_list: array of multicast addresses to program
489 * @mc_addr_count: number of multicast addresses to program
491 * Updates entire Multicast Table Array.
492 * The caller must have a packed mc_addr_list of multicast addresses.
494 void igb_update_mc_addr_list(struct e1000_hw
*hw
,
495 u8
*mc_addr_list
, u32 mc_addr_count
)
497 u32 hash_value
, hash_bit
, hash_reg
;
500 /* clear mta_shadow */
501 memset(&hw
->mac
.mta_shadow
, 0, sizeof(hw
->mac
.mta_shadow
));
503 /* update mta_shadow from mc_addr_list */
504 for (i
= 0; (u32
) i
< mc_addr_count
; i
++) {
505 hash_value
= igb_hash_mc_addr(hw
, mc_addr_list
);
507 hash_reg
= (hash_value
>> 5) & (hw
->mac
.mta_reg_count
- 1);
508 hash_bit
= hash_value
& 0x1F;
510 hw
->mac
.mta_shadow
[hash_reg
] |= BIT(hash_bit
);
511 mc_addr_list
+= (ETH_ALEN
);
514 /* replace the entire MTA table */
515 for (i
= hw
->mac
.mta_reg_count
- 1; i
>= 0; i
--)
516 array_wr32(E1000_MTA
, i
, hw
->mac
.mta_shadow
[i
]);
521 * igb_clear_hw_cntrs_base - Clear base hardware counters
522 * @hw: pointer to the HW structure
524 * Clears the base hardware counters by reading the counter registers.
526 void igb_clear_hw_cntrs_base(struct e1000_hw
*hw
)
568 * igb_check_for_copper_link - Check for link (Copper)
569 * @hw: pointer to the HW structure
571 * Checks to see of the link status of the hardware has changed. If a
572 * change in link status has been detected, then we read the PHY registers
573 * to get the current speed/duplex if link exists.
575 s32
igb_check_for_copper_link(struct e1000_hw
*hw
)
577 struct e1000_mac_info
*mac
= &hw
->mac
;
581 /* We only want to go out to the PHY registers to see if Auto-Neg
582 * has completed and/or if our link status has changed. The
583 * get_link_status flag is set upon receiving a Link Status
584 * Change or Rx Sequence Error interrupt.
586 if (!mac
->get_link_status
) {
591 /* First we want to see if the MII Status Register reports
592 * link. If so, then we want to get the current speed/duplex
595 ret_val
= igb_phy_has_link(hw
, 1, 0, &link
);
600 goto out
; /* No link detected */
602 mac
->get_link_status
= false;
604 /* Check if there was DownShift, must be checked
605 * immediately after link-up
607 igb_check_downshift(hw
);
609 /* If we are forcing speed/duplex, then we simply return since
610 * we have already determined whether we have link or not.
613 ret_val
= -E1000_ERR_CONFIG
;
617 /* Auto-Neg is enabled. Auto Speed Detection takes care
618 * of MAC speed/duplex configuration. So we only need to
619 * configure Collision Distance in the MAC.
621 igb_config_collision_dist(hw
);
623 /* Configure Flow Control now that Auto-Neg has completed.
624 * First, we need to restore the desired flow control
625 * settings because we may have had to re-autoneg with a
626 * different link partner.
628 ret_val
= igb_config_fc_after_link_up(hw
);
630 hw_dbg("Error configuring flow control\n");
637 * igb_setup_link - Setup flow control and link settings
638 * @hw: pointer to the HW structure
640 * Determines which flow control settings to use, then configures flow
641 * control. Calls the appropriate media-specific link configuration
642 * function. Assuming the adapter has a valid link partner, a valid link
643 * should be established. Assumes the hardware has previously been reset
644 * and the transmitter and receiver are not enabled.
646 s32
igb_setup_link(struct e1000_hw
*hw
)
650 /* In the case of the phy reset being blocked, we already have a link.
651 * We do not need to set it up again.
653 if (igb_check_reset_block(hw
))
656 /* If requested flow control is set to default, set flow control
657 * based on the EEPROM flow control settings.
659 if (hw
->fc
.requested_mode
== e1000_fc_default
) {
660 ret_val
= igb_set_default_fc(hw
);
665 /* We want to save off the original Flow Control configuration just
666 * in case we get disconnected and then reconnected into a different
667 * hub or switch with different Flow Control capabilities.
669 hw
->fc
.current_mode
= hw
->fc
.requested_mode
;
671 hw_dbg("After fix-ups FlowControl is now = %x\n", hw
->fc
.current_mode
);
673 /* Call the necessary media_type subroutine to configure the link. */
674 ret_val
= hw
->mac
.ops
.setup_physical_interface(hw
);
678 /* Initialize the flow control address, type, and PAUSE timer
679 * registers to their default values. This is done even if flow
680 * control is disabled, because it does not hurt anything to
681 * initialize these registers.
683 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
684 wr32(E1000_FCT
, FLOW_CONTROL_TYPE
);
685 wr32(E1000_FCAH
, FLOW_CONTROL_ADDRESS_HIGH
);
686 wr32(E1000_FCAL
, FLOW_CONTROL_ADDRESS_LOW
);
688 wr32(E1000_FCTTV
, hw
->fc
.pause_time
);
690 ret_val
= igb_set_fc_watermarks(hw
);
698 * igb_config_collision_dist - Configure collision distance
699 * @hw: pointer to the HW structure
701 * Configures the collision distance to the default value and is used
702 * during link setup. Currently no func pointer exists and all
703 * implementations are handled in the generic version of this function.
705 void igb_config_collision_dist(struct e1000_hw
*hw
)
709 tctl
= rd32(E1000_TCTL
);
711 tctl
&= ~E1000_TCTL_COLD
;
712 tctl
|= E1000_COLLISION_DISTANCE
<< E1000_COLD_SHIFT
;
714 wr32(E1000_TCTL
, tctl
);
719 * igb_set_fc_watermarks - Set flow control high/low watermarks
720 * @hw: pointer to the HW structure
722 * Sets the flow control high/low threshold (watermark) registers. If
723 * flow control XON frame transmission is enabled, then set XON frame
724 * tansmission as well.
726 static s32
igb_set_fc_watermarks(struct e1000_hw
*hw
)
729 u32 fcrtl
= 0, fcrth
= 0;
731 /* Set the flow control receive threshold registers. Normally,
732 * these registers will be set to a default threshold that may be
733 * adjusted later by the driver's runtime code. However, if the
734 * ability to transmit pause frames is not enabled, then these
735 * registers will be set to 0.
737 if (hw
->fc
.current_mode
& e1000_fc_tx_pause
) {
738 /* We need to set up the Receive Threshold high and low water
739 * marks as well as (optionally) enabling the transmission of
742 fcrtl
= hw
->fc
.low_water
;
744 fcrtl
|= E1000_FCRTL_XONE
;
746 fcrth
= hw
->fc
.high_water
;
748 wr32(E1000_FCRTL
, fcrtl
);
749 wr32(E1000_FCRTH
, fcrth
);
755 * igb_set_default_fc - Set flow control default values
756 * @hw: pointer to the HW structure
758 * Read the EEPROM for the default values for flow control and store the
761 static s32
igb_set_default_fc(struct e1000_hw
*hw
)
767 /* Read and store word 0x0F of the EEPROM. This word contains bits
768 * that determine the hardware's default PAUSE (flow control) mode,
769 * a bit that determines whether the HW defaults to enabling or
770 * disabling auto-negotiation, and the direction of the
771 * SW defined pins. If there is no SW over-ride of the flow
772 * control setting, then the variable hw->fc will
773 * be initialized based on a value in the EEPROM.
775 if (hw
->mac
.type
== e1000_i350
)
776 lan_offset
= NVM_82580_LAN_FUNC_OFFSET(hw
->bus
.func
);
780 ret_val
= hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL2_REG
+ lan_offset
,
783 hw_dbg("NVM Read Error\n");
787 if ((nvm_data
& NVM_WORD0F_PAUSE_MASK
) == 0)
788 hw
->fc
.requested_mode
= e1000_fc_none
;
789 else if ((nvm_data
& NVM_WORD0F_PAUSE_MASK
) == NVM_WORD0F_ASM_DIR
)
790 hw
->fc
.requested_mode
= e1000_fc_tx_pause
;
792 hw
->fc
.requested_mode
= e1000_fc_full
;
799 * igb_force_mac_fc - Force the MAC's flow control settings
800 * @hw: pointer to the HW structure
802 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
803 * device control register to reflect the adapter settings. TFCE and RFCE
804 * need to be explicitly set by software when a copper PHY is used because
805 * autonegotiation is managed by the PHY rather than the MAC. Software must
806 * also configure these bits when link is forced on a fiber connection.
808 s32
igb_force_mac_fc(struct e1000_hw
*hw
)
813 ctrl
= rd32(E1000_CTRL
);
815 /* Because we didn't get link via the internal auto-negotiation
816 * mechanism (we either forced link or we got link via PHY
817 * auto-neg), we have to manually enable/disable transmit an
818 * receive flow control.
820 * The "Case" statement below enables/disable flow control
821 * according to the "hw->fc.current_mode" parameter.
823 * The possible values of the "fc" parameter are:
824 * 0: Flow control is completely disabled
825 * 1: Rx flow control is enabled (we can receive pause
826 * frames but not send pause frames).
827 * 2: Tx flow control is enabled (we can send pause frames
828 * frames but we do not receive pause frames).
829 * 3: Both Rx and TX flow control (symmetric) is enabled.
830 * other: No other values should be possible at this point.
832 hw_dbg("hw->fc.current_mode = %u\n", hw
->fc
.current_mode
);
834 switch (hw
->fc
.current_mode
) {
836 ctrl
&= (~(E1000_CTRL_TFCE
| E1000_CTRL_RFCE
));
838 case e1000_fc_rx_pause
:
839 ctrl
&= (~E1000_CTRL_TFCE
);
840 ctrl
|= E1000_CTRL_RFCE
;
842 case e1000_fc_tx_pause
:
843 ctrl
&= (~E1000_CTRL_RFCE
);
844 ctrl
|= E1000_CTRL_TFCE
;
847 ctrl
|= (E1000_CTRL_TFCE
| E1000_CTRL_RFCE
);
850 hw_dbg("Flow control param set incorrectly\n");
851 ret_val
= -E1000_ERR_CONFIG
;
855 wr32(E1000_CTRL
, ctrl
);
862 * igb_config_fc_after_link_up - Configures flow control after link
863 * @hw: pointer to the HW structure
865 * Checks the status of auto-negotiation after link up to ensure that the
866 * speed and duplex were not forced. If the link needed to be forced, then
867 * flow control needs to be forced also. If auto-negotiation is enabled
868 * and did not fail, then we configure flow control based on our link
871 s32
igb_config_fc_after_link_up(struct e1000_hw
*hw
)
873 struct e1000_mac_info
*mac
= &hw
->mac
;
875 u32 pcs_status_reg
, pcs_adv_reg
, pcs_lp_ability_reg
, pcs_ctrl_reg
;
876 u16 mii_status_reg
, mii_nway_adv_reg
, mii_nway_lp_ability_reg
;
879 /* Check for the case where we have fiber media and auto-neg failed
880 * so we had to force link. In this case, we need to force the
881 * configuration of the MAC to match the "fc" parameter.
883 if (mac
->autoneg_failed
) {
884 if (hw
->phy
.media_type
== e1000_media_type_internal_serdes
)
885 ret_val
= igb_force_mac_fc(hw
);
887 if (hw
->phy
.media_type
== e1000_media_type_copper
)
888 ret_val
= igb_force_mac_fc(hw
);
892 hw_dbg("Error forcing flow control settings\n");
896 /* Check for the case where we have copper media and auto-neg is
897 * enabled. In this case, we need to check and see if Auto-Neg
898 * has completed, and if so, how the PHY and link partner has
899 * flow control configured.
901 if ((hw
->phy
.media_type
== e1000_media_type_copper
) && mac
->autoneg
) {
902 /* Read the MII Status Register and check to see if AutoNeg
903 * has completed. We read this twice because this reg has
904 * some "sticky" (latched) bits.
906 ret_val
= hw
->phy
.ops
.read_reg(hw
, PHY_STATUS
,
910 ret_val
= hw
->phy
.ops
.read_reg(hw
, PHY_STATUS
,
915 if (!(mii_status_reg
& MII_SR_AUTONEG_COMPLETE
)) {
916 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
920 /* The AutoNeg process has completed, so we now need to
921 * read both the Auto Negotiation Advertisement
922 * Register (Address 4) and the Auto_Negotiation Base
923 * Page Ability Register (Address 5) to determine how
924 * flow control was negotiated.
926 ret_val
= hw
->phy
.ops
.read_reg(hw
, PHY_AUTONEG_ADV
,
930 ret_val
= hw
->phy
.ops
.read_reg(hw
, PHY_LP_ABILITY
,
931 &mii_nway_lp_ability_reg
);
935 /* Two bits in the Auto Negotiation Advertisement Register
936 * (Address 4) and two bits in the Auto Negotiation Base
937 * Page Ability Register (Address 5) determine flow control
938 * for both the PHY and the link partner. The following
939 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
940 * 1999, describes these PAUSE resolution bits and how flow
941 * control is determined based upon these settings.
942 * NOTE: DC = Don't Care
944 * LOCAL DEVICE | LINK PARTNER
945 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
946 *-------|---------|-------|---------|--------------------
947 * 0 | 0 | DC | DC | e1000_fc_none
948 * 0 | 1 | 0 | DC | e1000_fc_none
949 * 0 | 1 | 1 | 0 | e1000_fc_none
950 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
951 * 1 | 0 | 0 | DC | e1000_fc_none
952 * 1 | DC | 1 | DC | e1000_fc_full
953 * 1 | 1 | 0 | 0 | e1000_fc_none
954 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
956 * Are both PAUSE bits set to 1? If so, this implies
957 * Symmetric Flow Control is enabled at both ends. The
958 * ASM_DIR bits are irrelevant per the spec.
960 * For Symmetric Flow Control:
962 * LOCAL DEVICE | LINK PARTNER
963 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
964 *-------|---------|-------|---------|--------------------
965 * 1 | DC | 1 | DC | E1000_fc_full
968 if ((mii_nway_adv_reg
& NWAY_AR_PAUSE
) &&
969 (mii_nway_lp_ability_reg
& NWAY_LPAR_PAUSE
)) {
970 /* Now we need to check if the user selected RX ONLY
971 * of pause frames. In this case, we had to advertise
972 * FULL flow control because we could not advertise RX
973 * ONLY. Hence, we must now check to see if we need to
974 * turn OFF the TRANSMISSION of PAUSE frames.
976 if (hw
->fc
.requested_mode
== e1000_fc_full
) {
977 hw
->fc
.current_mode
= e1000_fc_full
;
978 hw_dbg("Flow Control = FULL.\n");
980 hw
->fc
.current_mode
= e1000_fc_rx_pause
;
981 hw_dbg("Flow Control = RX PAUSE frames only.\n");
984 /* For receiving PAUSE frames ONLY.
986 * LOCAL DEVICE | LINK PARTNER
987 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
988 *-------|---------|-------|---------|--------------------
989 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
991 else if (!(mii_nway_adv_reg
& NWAY_AR_PAUSE
) &&
992 (mii_nway_adv_reg
& NWAY_AR_ASM_DIR
) &&
993 (mii_nway_lp_ability_reg
& NWAY_LPAR_PAUSE
) &&
994 (mii_nway_lp_ability_reg
& NWAY_LPAR_ASM_DIR
)) {
995 hw
->fc
.current_mode
= e1000_fc_tx_pause
;
996 hw_dbg("Flow Control = TX PAUSE frames only.\n");
998 /* For transmitting PAUSE frames ONLY.
1000 * LOCAL DEVICE | LINK PARTNER
1001 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1002 *-------|---------|-------|---------|--------------------
1003 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1005 else if ((mii_nway_adv_reg
& NWAY_AR_PAUSE
) &&
1006 (mii_nway_adv_reg
& NWAY_AR_ASM_DIR
) &&
1007 !(mii_nway_lp_ability_reg
& NWAY_LPAR_PAUSE
) &&
1008 (mii_nway_lp_ability_reg
& NWAY_LPAR_ASM_DIR
)) {
1009 hw
->fc
.current_mode
= e1000_fc_rx_pause
;
1010 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1012 /* Per the IEEE spec, at this point flow control should be
1013 * disabled. However, we want to consider that we could
1014 * be connected to a legacy switch that doesn't advertise
1015 * desired flow control, but can be forced on the link
1016 * partner. So if we advertised no flow control, that is
1017 * what we will resolve to. If we advertised some kind of
1018 * receive capability (Rx Pause Only or Full Flow Control)
1019 * and the link partner advertised none, we will configure
1020 * ourselves to enable Rx Flow Control only. We can do
1021 * this safely for two reasons: If the link partner really
1022 * didn't want flow control enabled, and we enable Rx, no
1023 * harm done since we won't be receiving any PAUSE frames
1024 * anyway. If the intent on the link partner was to have
1025 * flow control enabled, then by us enabling RX only, we
1026 * can at least receive pause frames and process them.
1027 * This is a good idea because in most cases, since we are
1028 * predominantly a server NIC, more times than not we will
1029 * be asked to delay transmission of packets than asking
1030 * our link partner to pause transmission of frames.
1032 else if ((hw
->fc
.requested_mode
== e1000_fc_none
) ||
1033 (hw
->fc
.requested_mode
== e1000_fc_tx_pause
) ||
1034 (hw
->fc
.strict_ieee
)) {
1035 hw
->fc
.current_mode
= e1000_fc_none
;
1036 hw_dbg("Flow Control = NONE.\n");
1038 hw
->fc
.current_mode
= e1000_fc_rx_pause
;
1039 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1042 /* Now we need to do one last check... If we auto-
1043 * negotiated to HALF DUPLEX, flow control should not be
1044 * enabled per IEEE 802.3 spec.
1046 ret_val
= hw
->mac
.ops
.get_speed_and_duplex(hw
, &speed
, &duplex
);
1048 hw_dbg("Error getting link speed and duplex\n");
1052 if (duplex
== HALF_DUPLEX
)
1053 hw
->fc
.current_mode
= e1000_fc_none
;
1055 /* Now we call a subroutine to actually force the MAC
1056 * controller to use the correct flow control settings.
1058 ret_val
= igb_force_mac_fc(hw
);
1060 hw_dbg("Error forcing flow control settings\n");
1064 /* Check for the case where we have SerDes media and auto-neg is
1065 * enabled. In this case, we need to check and see if Auto-Neg
1066 * has completed, and if so, how the PHY and link partner has
1067 * flow control configured.
1069 if ((hw
->phy
.media_type
== e1000_media_type_internal_serdes
)
1071 /* Read the PCS_LSTS and check to see if AutoNeg
1074 pcs_status_reg
= rd32(E1000_PCS_LSTAT
);
1076 if (!(pcs_status_reg
& E1000_PCS_LSTS_AN_COMPLETE
)) {
1077 hw_dbg("PCS Auto Neg has not completed.\n");
1081 /* The AutoNeg process has completed, so we now need to
1082 * read both the Auto Negotiation Advertisement
1083 * Register (PCS_ANADV) and the Auto_Negotiation Base
1084 * Page Ability Register (PCS_LPAB) to determine how
1085 * flow control was negotiated.
1087 pcs_adv_reg
= rd32(E1000_PCS_ANADV
);
1088 pcs_lp_ability_reg
= rd32(E1000_PCS_LPAB
);
1090 /* Two bits in the Auto Negotiation Advertisement Register
1091 * (PCS_ANADV) and two bits in the Auto Negotiation Base
1092 * Page Ability Register (PCS_LPAB) determine flow control
1093 * for both the PHY and the link partner. The following
1094 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1095 * 1999, describes these PAUSE resolution bits and how flow
1096 * control is determined based upon these settings.
1097 * NOTE: DC = Don't Care
1099 * LOCAL DEVICE | LINK PARTNER
1100 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1101 *-------|---------|-------|---------|--------------------
1102 * 0 | 0 | DC | DC | e1000_fc_none
1103 * 0 | 1 | 0 | DC | e1000_fc_none
1104 * 0 | 1 | 1 | 0 | e1000_fc_none
1105 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1106 * 1 | 0 | 0 | DC | e1000_fc_none
1107 * 1 | DC | 1 | DC | e1000_fc_full
1108 * 1 | 1 | 0 | 0 | e1000_fc_none
1109 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1111 * Are both PAUSE bits set to 1? If so, this implies
1112 * Symmetric Flow Control is enabled at both ends. The
1113 * ASM_DIR bits are irrelevant per the spec.
1115 * For Symmetric Flow Control:
1117 * LOCAL DEVICE | LINK PARTNER
1118 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1119 *-------|---------|-------|---------|--------------------
1120 * 1 | DC | 1 | DC | e1000_fc_full
1123 if ((pcs_adv_reg
& E1000_TXCW_PAUSE
) &&
1124 (pcs_lp_ability_reg
& E1000_TXCW_PAUSE
)) {
1125 /* Now we need to check if the user selected Rx ONLY
1126 * of pause frames. In this case, we had to advertise
1127 * FULL flow control because we could not advertise Rx
1128 * ONLY. Hence, we must now check to see if we need to
1129 * turn OFF the TRANSMISSION of PAUSE frames.
1131 if (hw
->fc
.requested_mode
== e1000_fc_full
) {
1132 hw
->fc
.current_mode
= e1000_fc_full
;
1133 hw_dbg("Flow Control = FULL.\n");
1135 hw
->fc
.current_mode
= e1000_fc_rx_pause
;
1136 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1139 /* For receiving PAUSE frames ONLY.
1141 * LOCAL DEVICE | LINK PARTNER
1142 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1143 *-------|---------|-------|---------|--------------------
1144 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1146 else if (!(pcs_adv_reg
& E1000_TXCW_PAUSE
) &&
1147 (pcs_adv_reg
& E1000_TXCW_ASM_DIR
) &&
1148 (pcs_lp_ability_reg
& E1000_TXCW_PAUSE
) &&
1149 (pcs_lp_ability_reg
& E1000_TXCW_ASM_DIR
)) {
1150 hw
->fc
.current_mode
= e1000_fc_tx_pause
;
1151 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1153 /* For transmitting PAUSE frames ONLY.
1155 * LOCAL DEVICE | LINK PARTNER
1156 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1157 *-------|---------|-------|---------|--------------------
1158 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1160 else if ((pcs_adv_reg
& E1000_TXCW_PAUSE
) &&
1161 (pcs_adv_reg
& E1000_TXCW_ASM_DIR
) &&
1162 !(pcs_lp_ability_reg
& E1000_TXCW_PAUSE
) &&
1163 (pcs_lp_ability_reg
& E1000_TXCW_ASM_DIR
)) {
1164 hw
->fc
.current_mode
= e1000_fc_rx_pause
;
1165 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1167 /* Per the IEEE spec, at this point flow control
1168 * should be disabled.
1170 hw
->fc
.current_mode
= e1000_fc_none
;
1171 hw_dbg("Flow Control = NONE.\n");
1174 /* Now we call a subroutine to actually force the MAC
1175 * controller to use the correct flow control settings.
1177 pcs_ctrl_reg
= rd32(E1000_PCS_LCTL
);
1178 pcs_ctrl_reg
|= E1000_PCS_LCTL_FORCE_FCTRL
;
1179 wr32(E1000_PCS_LCTL
, pcs_ctrl_reg
);
1181 ret_val
= igb_force_mac_fc(hw
);
1183 hw_dbg("Error forcing flow control settings\n");
1193 * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
1194 * @hw: pointer to the HW structure
1195 * @speed: stores the current speed
1196 * @duplex: stores the current duplex
1198 * Read the status register for the current speed/duplex and store the current
1199 * speed and duplex for copper connections.
1201 s32
igb_get_speed_and_duplex_copper(struct e1000_hw
*hw
, u16
*speed
,
1206 status
= rd32(E1000_STATUS
);
1207 if (status
& E1000_STATUS_SPEED_1000
) {
1208 *speed
= SPEED_1000
;
1209 hw_dbg("1000 Mbs, ");
1210 } else if (status
& E1000_STATUS_SPEED_100
) {
1212 hw_dbg("100 Mbs, ");
1218 if (status
& E1000_STATUS_FD
) {
1219 *duplex
= FULL_DUPLEX
;
1220 hw_dbg("Full Duplex\n");
1222 *duplex
= HALF_DUPLEX
;
1223 hw_dbg("Half Duplex\n");
1230 * igb_get_hw_semaphore - Acquire hardware semaphore
1231 * @hw: pointer to the HW structure
1233 * Acquire the HW semaphore to access the PHY or NVM
1235 s32
igb_get_hw_semaphore(struct e1000_hw
*hw
)
1239 s32 timeout
= hw
->nvm
.word_size
+ 1;
1242 /* Get the SW semaphore */
1243 while (i
< timeout
) {
1244 swsm
= rd32(E1000_SWSM
);
1245 if (!(swsm
& E1000_SWSM_SMBI
))
1253 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1254 ret_val
= -E1000_ERR_NVM
;
1258 /* Get the FW semaphore. */
1259 for (i
= 0; i
< timeout
; i
++) {
1260 swsm
= rd32(E1000_SWSM
);
1261 wr32(E1000_SWSM
, swsm
| E1000_SWSM_SWESMBI
);
1263 /* Semaphore acquired if bit latched */
1264 if (rd32(E1000_SWSM
) & E1000_SWSM_SWESMBI
)
1271 /* Release semaphores */
1272 igb_put_hw_semaphore(hw
);
1273 hw_dbg("Driver can't access the NVM\n");
1274 ret_val
= -E1000_ERR_NVM
;
1283 * igb_put_hw_semaphore - Release hardware semaphore
1284 * @hw: pointer to the HW structure
1286 * Release hardware semaphore used to access the PHY or NVM
1288 void igb_put_hw_semaphore(struct e1000_hw
*hw
)
1292 swsm
= rd32(E1000_SWSM
);
1294 swsm
&= ~(E1000_SWSM_SMBI
| E1000_SWSM_SWESMBI
);
1296 wr32(E1000_SWSM
, swsm
);
1300 * igb_get_auto_rd_done - Check for auto read completion
1301 * @hw: pointer to the HW structure
1303 * Check EEPROM for Auto Read done bit.
1305 s32
igb_get_auto_rd_done(struct e1000_hw
*hw
)
1311 while (i
< AUTO_READ_DONE_TIMEOUT
) {
1312 if (rd32(E1000_EECD
) & E1000_EECD_AUTO_RD
)
1314 usleep_range(1000, 2000);
1318 if (i
== AUTO_READ_DONE_TIMEOUT
) {
1319 hw_dbg("Auto read by HW from NVM has not completed.\n");
1320 ret_val
= -E1000_ERR_RESET
;
1329 * igb_valid_led_default - Verify a valid default LED config
1330 * @hw: pointer to the HW structure
1331 * @data: pointer to the NVM (EEPROM)
1333 * Read the EEPROM for the current default LED configuration. If the
1334 * LED configuration is not valid, set to a valid LED configuration.
1336 static s32
igb_valid_led_default(struct e1000_hw
*hw
, u16
*data
)
1340 ret_val
= hw
->nvm
.ops
.read(hw
, NVM_ID_LED_SETTINGS
, 1, data
);
1342 hw_dbg("NVM Read Error\n");
1346 if (*data
== ID_LED_RESERVED_0000
|| *data
== ID_LED_RESERVED_FFFF
) {
1347 switch (hw
->phy
.media_type
) {
1348 case e1000_media_type_internal_serdes
:
1349 *data
= ID_LED_DEFAULT_82575_SERDES
;
1351 case e1000_media_type_copper
:
1353 *data
= ID_LED_DEFAULT
;
1363 * @hw: pointer to the HW structure
1366 s32
igb_id_led_init(struct e1000_hw
*hw
)
1368 struct e1000_mac_info
*mac
= &hw
->mac
;
1370 const u32 ledctl_mask
= 0x000000FF;
1371 const u32 ledctl_on
= E1000_LEDCTL_MODE_LED_ON
;
1372 const u32 ledctl_off
= E1000_LEDCTL_MODE_LED_OFF
;
1374 const u16 led_mask
= 0x0F;
1376 /* i210 and i211 devices have different LED mechanism */
1377 if ((hw
->mac
.type
== e1000_i210
) ||
1378 (hw
->mac
.type
== e1000_i211
))
1379 ret_val
= igb_valid_led_default_i210(hw
, &data
);
1381 ret_val
= igb_valid_led_default(hw
, &data
);
1386 mac
->ledctl_default
= rd32(E1000_LEDCTL
);
1387 mac
->ledctl_mode1
= mac
->ledctl_default
;
1388 mac
->ledctl_mode2
= mac
->ledctl_default
;
1390 for (i
= 0; i
< 4; i
++) {
1391 temp
= (data
>> (i
<< 2)) & led_mask
;
1393 case ID_LED_ON1_DEF2
:
1394 case ID_LED_ON1_ON2
:
1395 case ID_LED_ON1_OFF2
:
1396 mac
->ledctl_mode1
&= ~(ledctl_mask
<< (i
<< 3));
1397 mac
->ledctl_mode1
|= ledctl_on
<< (i
<< 3);
1399 case ID_LED_OFF1_DEF2
:
1400 case ID_LED_OFF1_ON2
:
1401 case ID_LED_OFF1_OFF2
:
1402 mac
->ledctl_mode1
&= ~(ledctl_mask
<< (i
<< 3));
1403 mac
->ledctl_mode1
|= ledctl_off
<< (i
<< 3);
1410 case ID_LED_DEF1_ON2
:
1411 case ID_LED_ON1_ON2
:
1412 case ID_LED_OFF1_ON2
:
1413 mac
->ledctl_mode2
&= ~(ledctl_mask
<< (i
<< 3));
1414 mac
->ledctl_mode2
|= ledctl_on
<< (i
<< 3);
1416 case ID_LED_DEF1_OFF2
:
1417 case ID_LED_ON1_OFF2
:
1418 case ID_LED_OFF1_OFF2
:
1419 mac
->ledctl_mode2
&= ~(ledctl_mask
<< (i
<< 3));
1420 mac
->ledctl_mode2
|= ledctl_off
<< (i
<< 3);
1433 * igb_cleanup_led - Set LED config to default operation
1434 * @hw: pointer to the HW structure
1436 * Remove the current LED configuration and set the LED configuration
1437 * to the default value, saved from the EEPROM.
1439 s32
igb_cleanup_led(struct e1000_hw
*hw
)
1441 wr32(E1000_LEDCTL
, hw
->mac
.ledctl_default
);
1446 * igb_blink_led - Blink LED
1447 * @hw: pointer to the HW structure
1449 * Blink the led's which are set to be on.
1451 s32
igb_blink_led(struct e1000_hw
*hw
)
1453 u32 ledctl_blink
= 0;
1456 if (hw
->phy
.media_type
== e1000_media_type_fiber
) {
1457 /* always blink LED0 for PCI-E fiber */
1458 ledctl_blink
= E1000_LEDCTL_LED0_BLINK
|
1459 (E1000_LEDCTL_MODE_LED_ON
<< E1000_LEDCTL_LED0_MODE_SHIFT
);
1461 /* Set the blink bit for each LED that's "on" (0x0E)
1462 * (or "off" if inverted) in ledctl_mode2. The blink
1463 * logic in hardware only works when mode is set to "on"
1464 * so it must be changed accordingly when the mode is
1465 * "off" and inverted.
1467 ledctl_blink
= hw
->mac
.ledctl_mode2
;
1468 for (i
= 0; i
< 32; i
+= 8) {
1469 u32 mode
= (hw
->mac
.ledctl_mode2
>> i
) &
1470 E1000_LEDCTL_LED0_MODE_MASK
;
1471 u32 led_default
= hw
->mac
.ledctl_default
>> i
;
1473 if ((!(led_default
& E1000_LEDCTL_LED0_IVRT
) &&
1474 (mode
== E1000_LEDCTL_MODE_LED_ON
)) ||
1475 ((led_default
& E1000_LEDCTL_LED0_IVRT
) &&
1476 (mode
== E1000_LEDCTL_MODE_LED_OFF
))) {
1478 ~(E1000_LEDCTL_LED0_MODE_MASK
<< i
);
1479 ledctl_blink
|= (E1000_LEDCTL_LED0_BLINK
|
1480 E1000_LEDCTL_MODE_LED_ON
) << i
;
1485 wr32(E1000_LEDCTL
, ledctl_blink
);
1491 * igb_led_off - Turn LED off
1492 * @hw: pointer to the HW structure
1496 s32
igb_led_off(struct e1000_hw
*hw
)
1498 switch (hw
->phy
.media_type
) {
1499 case e1000_media_type_copper
:
1500 wr32(E1000_LEDCTL
, hw
->mac
.ledctl_mode1
);
1510 * igb_disable_pcie_master - Disables PCI-express master access
1511 * @hw: pointer to the HW structure
1513 * Returns 0 (0) if successful, else returns -10
1514 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1515 * the master requests to be disabled.
1517 * Disables PCI-Express master access and verifies there are no pending
1520 s32
igb_disable_pcie_master(struct e1000_hw
*hw
)
1523 s32 timeout
= MASTER_DISABLE_TIMEOUT
;
1526 if (hw
->bus
.type
!= e1000_bus_type_pci_express
)
1529 ctrl
= rd32(E1000_CTRL
);
1530 ctrl
|= E1000_CTRL_GIO_MASTER_DISABLE
;
1531 wr32(E1000_CTRL
, ctrl
);
1534 if (!(rd32(E1000_STATUS
) &
1535 E1000_STATUS_GIO_MASTER_ENABLE
))
1542 hw_dbg("Master requests are pending.\n");
1543 ret_val
= -E1000_ERR_MASTER_REQUESTS_PENDING
;
1552 * igb_validate_mdi_setting - Verify MDI/MDIx settings
1553 * @hw: pointer to the HW structure
1555 * Verify that when not using auto-negotitation that MDI/MDIx is correctly
1556 * set, which is forced to MDI mode only.
1558 s32
igb_validate_mdi_setting(struct e1000_hw
*hw
)
1562 /* All MDI settings are supported on 82580 and newer. */
1563 if (hw
->mac
.type
>= e1000_82580
)
1566 if (!hw
->mac
.autoneg
&& (hw
->phy
.mdix
== 0 || hw
->phy
.mdix
== 3)) {
1567 hw_dbg("Invalid MDI setting detected\n");
1569 ret_val
= -E1000_ERR_CONFIG
;
1578 * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
1579 * @hw: pointer to the HW structure
1580 * @reg: 32bit register offset such as E1000_SCTL
1581 * @offset: register offset to write to
1582 * @data: data to write at register offset
1584 * Writes an address/data control type register. There are several of these
1585 * and they all have the format address << 8 | data and bit 31 is polled for
1588 s32
igb_write_8bit_ctrl_reg(struct e1000_hw
*hw
, u32 reg
,
1589 u32 offset
, u8 data
)
1591 u32 i
, regvalue
= 0;
1594 /* Set up the address and data */
1595 regvalue
= ((u32
)data
) | (offset
<< E1000_GEN_CTL_ADDRESS_SHIFT
);
1596 wr32(reg
, regvalue
);
1598 /* Poll the ready bit to see if the MDI read completed */
1599 for (i
= 0; i
< E1000_GEN_POLL_TIMEOUT
; i
++) {
1601 regvalue
= rd32(reg
);
1602 if (regvalue
& E1000_GEN_CTL_READY
)
1605 if (!(regvalue
& E1000_GEN_CTL_READY
)) {
1606 hw_dbg("Reg %08x did not indicate ready\n", reg
);
1607 ret_val
= -E1000_ERR_PHY
;
1616 * igb_enable_mng_pass_thru - Enable processing of ARP's
1617 * @hw: pointer to the HW structure
1619 * Verifies the hardware needs to leave interface enabled so that frames can
1620 * be directed to and from the management interface.
1622 bool igb_enable_mng_pass_thru(struct e1000_hw
*hw
)
1626 bool ret_val
= false;
1628 if (!hw
->mac
.asf_firmware_present
)
1631 manc
= rd32(E1000_MANC
);
1633 if (!(manc
& E1000_MANC_RCV_TCO_EN
))
1636 if (hw
->mac
.arc_subsystem_valid
) {
1637 fwsm
= rd32(E1000_FWSM
);
1638 factps
= rd32(E1000_FACTPS
);
1640 if (!(factps
& E1000_FACTPS_MNGCG
) &&
1641 ((fwsm
& E1000_FWSM_MODE_MASK
) ==
1642 (e1000_mng_mode_pt
<< E1000_FWSM_MODE_SHIFT
))) {
1647 if ((manc
& E1000_MANC_SMBUS_EN
) &&
1648 !(manc
& E1000_MANC_ASF_EN
)) {