1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
7 /* On Hyper-V, to reset, we need to read from this offset
8 * from the PCI config space. This is the mechanism used on
9 * Hyper-V to support PF/VF communication.
11 #define IXGBE_HV_RESET_OFFSET 0x201
13 static inline s32
ixgbevf_write_msg_read_ack(struct ixgbe_hw
*hw
, u32
*msg
,
14 u32
*retmsg
, u16 size
)
16 struct ixgbe_mbx_info
*mbx
= &hw
->mbx
;
17 s32 retval
= mbx
->ops
.write_posted(hw
, msg
, size
);
22 return mbx
->ops
.read_posted(hw
, retmsg
, size
);
26 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
27 * @hw: pointer to hardware structure
29 * Starts the hardware by filling the bus info structure and media type, clears
30 * all on chip counters, initializes receive address registers, multicast
31 * table, VLAN filter table, calls routine to set up link and flow control
32 * settings, and leaves transmit and receive units disabled and uninitialized
34 static s32
ixgbevf_start_hw_vf(struct ixgbe_hw
*hw
)
36 /* Clear adapter stopped flag */
37 hw
->adapter_stopped
= false;
43 * ixgbevf_init_hw_vf - virtual function hardware initialization
44 * @hw: pointer to hardware structure
46 * Initialize the hardware by resetting the hardware and then starting
49 static s32
ixgbevf_init_hw_vf(struct ixgbe_hw
*hw
)
51 s32 status
= hw
->mac
.ops
.start_hw(hw
);
53 hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.addr
);
59 * ixgbevf_reset_hw_vf - Performs hardware reset
60 * @hw: pointer to hardware structure
62 * Resets the hardware by resetting the transmit and receive units, masks and
63 * clears all interrupts.
65 static s32
ixgbevf_reset_hw_vf(struct ixgbe_hw
*hw
)
67 struct ixgbe_mbx_info
*mbx
= &hw
->mbx
;
68 u32 timeout
= IXGBE_VF_INIT_TIMEOUT
;
69 s32 ret_val
= IXGBE_ERR_INVALID_MAC_ADDR
;
70 u32 msgbuf
[IXGBE_VF_PERMADDR_MSG_LEN
];
71 u8
*addr
= (u8
*)(&msgbuf
[1]);
73 /* Call adapter stop to disable tx/rx and clear interrupts */
74 hw
->mac
.ops
.stop_adapter(hw
);
76 /* reset the api version */
77 hw
->api_version
= ixgbe_mbox_api_10
;
79 IXGBE_WRITE_REG(hw
, IXGBE_VFCTRL
, IXGBE_CTRL_RST
);
80 IXGBE_WRITE_FLUSH(hw
);
82 /* we cannot reset while the RSTI / RSTD bits are asserted */
83 while (!mbx
->ops
.check_for_rst(hw
) && timeout
) {
89 return IXGBE_ERR_RESET_FAILED
;
91 /* mailbox timeout can now become active */
92 mbx
->timeout
= IXGBE_VF_MBX_INIT_TIMEOUT
;
94 msgbuf
[0] = IXGBE_VF_RESET
;
95 mbx
->ops
.write_posted(hw
, msgbuf
, 1);
99 /* set our "perm_addr" based on info provided by PF
100 * also set up the mc_filter_type which is piggy backed
101 * on the mac address in word 3
103 ret_val
= mbx
->ops
.read_posted(hw
, msgbuf
, IXGBE_VF_PERMADDR_MSG_LEN
);
107 /* New versions of the PF may NACK the reset return message
108 * to indicate that no MAC address has yet been assigned for
111 if (msgbuf
[0] != (IXGBE_VF_RESET
| IXGBE_VT_MSGTYPE_ACK
) &&
112 msgbuf
[0] != (IXGBE_VF_RESET
| IXGBE_VT_MSGTYPE_NACK
))
113 return IXGBE_ERR_INVALID_MAC_ADDR
;
115 if (msgbuf
[0] == (IXGBE_VF_RESET
| IXGBE_VT_MSGTYPE_ACK
))
116 ether_addr_copy(hw
->mac
.perm_addr
, addr
);
118 hw
->mac
.mc_filter_type
= msgbuf
[IXGBE_VF_MC_TYPE_WORD
];
124 * Hyper-V variant; the VF/PF communication is through the PCI
126 * @hw: pointer to private hardware struct
128 static s32
ixgbevf_hv_reset_hw_vf(struct ixgbe_hw
*hw
)
130 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
131 struct ixgbevf_adapter
*adapter
= hw
->back
;
134 for (i
= 0; i
< 6; i
++)
135 pci_read_config_byte(adapter
->pdev
,
136 (i
+ IXGBE_HV_RESET_OFFSET
),
137 &hw
->mac
.perm_addr
[i
]);
140 pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
146 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
147 * @hw: pointer to hardware structure
149 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
150 * disables transmit and receive units. The adapter_stopped flag is used by
151 * the shared code and drivers to determine if the adapter is in a stopped
152 * state and should not touch the hardware.
154 static s32
ixgbevf_stop_hw_vf(struct ixgbe_hw
*hw
)
156 u32 number_of_queues
;
160 /* Set the adapter_stopped flag so other driver functions stop touching
163 hw
->adapter_stopped
= true;
165 /* Disable the receive unit by stopped each queue */
166 number_of_queues
= hw
->mac
.max_rx_queues
;
167 for (i
= 0; i
< number_of_queues
; i
++) {
168 reg_val
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(i
));
169 if (reg_val
& IXGBE_RXDCTL_ENABLE
) {
170 reg_val
&= ~IXGBE_RXDCTL_ENABLE
;
171 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(i
), reg_val
);
175 IXGBE_WRITE_FLUSH(hw
);
177 /* Clear interrupt mask to stop from interrupts being generated */
178 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, IXGBE_VF_IRQ_CLEAR_MASK
);
180 /* Clear any pending interrupts */
181 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
183 /* Disable the transmit unit. Each queue must be disabled. */
184 number_of_queues
= hw
->mac
.max_tx_queues
;
185 for (i
= 0; i
< number_of_queues
; i
++) {
186 reg_val
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(i
));
187 if (reg_val
& IXGBE_TXDCTL_ENABLE
) {
188 reg_val
&= ~IXGBE_TXDCTL_ENABLE
;
189 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(i
), reg_val
);
197 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
198 * @hw: pointer to hardware structure
199 * @mc_addr: the multicast address
201 * Extracts the 12 bits, from a multicast address, to determine which
202 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
203 * incoming Rx multicast addresses, to determine the bit-vector to check in
204 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
205 * by the MO field of the MCSTCTRL. The MO field is set during initialization
208 static s32
ixgbevf_mta_vector(struct ixgbe_hw
*hw
, u8
*mc_addr
)
212 switch (hw
->mac
.mc_filter_type
) {
213 case 0: /* use bits [47:36] of the address */
214 vector
= ((mc_addr
[4] >> 4) | (((u16
)mc_addr
[5]) << 4));
216 case 1: /* use bits [46:35] of the address */
217 vector
= ((mc_addr
[4] >> 3) | (((u16
)mc_addr
[5]) << 5));
219 case 2: /* use bits [45:34] of the address */
220 vector
= ((mc_addr
[4] >> 2) | (((u16
)mc_addr
[5]) << 6));
222 case 3: /* use bits [43:32] of the address */
223 vector
= ((mc_addr
[4]) | (((u16
)mc_addr
[5]) << 8));
225 default: /* Invalid mc_filter_type */
229 /* vector can only be 12-bits or boundary will be exceeded */
235 * ixgbevf_get_mac_addr_vf - Read device MAC address
236 * @hw: pointer to the HW structure
237 * @mac_addr: pointer to storage for retrieved MAC address
239 static s32
ixgbevf_get_mac_addr_vf(struct ixgbe_hw
*hw
, u8
*mac_addr
)
241 ether_addr_copy(mac_addr
, hw
->mac
.perm_addr
);
246 static s32
ixgbevf_set_uc_addr_vf(struct ixgbe_hw
*hw
, u32 index
, u8
*addr
)
248 u32 msgbuf
[3], msgbuf_chk
;
249 u8
*msg_addr
= (u8
*)(&msgbuf
[1]);
252 memset(msgbuf
, 0, sizeof(msgbuf
));
253 /* If index is one then this is the start of a new list and needs
254 * indication to the PF so it can do it's own list management.
255 * If it is zero then that tells the PF to just clear all of
256 * this VF's macvlans and there is no new list.
258 msgbuf
[0] |= index
<< IXGBE_VT_MSGINFO_SHIFT
;
259 msgbuf
[0] |= IXGBE_VF_SET_MACVLAN
;
260 msgbuf_chk
= msgbuf
[0];
263 ether_addr_copy(msg_addr
, addr
);
265 ret_val
= ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
268 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
270 if (msgbuf
[0] == (msgbuf_chk
| IXGBE_VT_MSGTYPE_NACK
))
277 static s32
ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw
*hw
, u32 index
, u8
*addr
)
283 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
284 * @hw: pointer to hardware structure
285 * @reta: buffer to fill with RETA contents.
286 * @num_rx_queues: Number of Rx queues configured for this port
288 * The "reta" buffer should be big enough to contain 32 registers.
290 * Returns: 0 on success.
291 * if API doesn't support this operation - (-EOPNOTSUPP).
293 int ixgbevf_get_reta_locked(struct ixgbe_hw
*hw
, u32
*reta
, int num_rx_queues
)
296 u32 msgbuf
[IXGBE_VFMAILBOX_SIZE
];
297 u32
*hw_reta
= &msgbuf
[1];
300 /* We have to use a mailbox for 82599 and x540 devices only.
301 * For these devices RETA has 128 entries.
302 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
303 * 16 RETA entries in each DWORD giving 2 bits to each entry.
305 int dwords
= IXGBEVF_82599_RETA_SIZE
/ 16;
307 /* We support the RSS querying for 82599 and x540 devices only.
308 * Thus return an error if API doesn't support RETA querying or querying
309 * is not supported for this device type.
311 switch (hw
->api_version
) {
312 case ixgbe_mbox_api_13
:
313 case ixgbe_mbox_api_12
:
314 if (hw
->mac
.type
< ixgbe_mac_X550_vf
)
321 msgbuf
[0] = IXGBE_VF_GET_RETA
;
323 err
= hw
->mbx
.ops
.write_posted(hw
, msgbuf
, 1);
328 err
= hw
->mbx
.ops
.read_posted(hw
, msgbuf
, dwords
+ 1);
333 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
335 /* If the operation has been refused by a PF return -EPERM */
336 if (msgbuf
[0] == (IXGBE_VF_GET_RETA
| IXGBE_VT_MSGTYPE_NACK
))
339 /* If we didn't get an ACK there must have been
340 * some sort of mailbox error so we should treat it
343 if (msgbuf
[0] != (IXGBE_VF_GET_RETA
| IXGBE_VT_MSGTYPE_ACK
))
344 return IXGBE_ERR_MBX
;
346 /* ixgbevf doesn't support more than 2 queues at the moment */
347 if (num_rx_queues
> 1)
350 for (i
= 0; i
< dwords
; i
++)
351 for (j
= 0; j
< 16; j
++)
352 reta
[i
* 16 + j
] = (hw_reta
[i
] >> (2 * j
)) & mask
;
358 * ixgbevf_get_rss_key_locked - get the RSS Random Key
359 * @hw: pointer to the HW structure
360 * @rss_key: buffer to fill with RSS Hash Key contents.
362 * The "rss_key" buffer should be big enough to contain 10 registers.
364 * Returns: 0 on success.
365 * if API doesn't support this operation - (-EOPNOTSUPP).
367 int ixgbevf_get_rss_key_locked(struct ixgbe_hw
*hw
, u8
*rss_key
)
370 u32 msgbuf
[IXGBE_VFMAILBOX_SIZE
];
372 /* We currently support the RSS Random Key retrieval for 82599 and x540
375 * Thus return an error if API doesn't support RSS Random Key retrieval
376 * or if the operation is not supported for this device type.
378 switch (hw
->api_version
) {
379 case ixgbe_mbox_api_13
:
380 case ixgbe_mbox_api_12
:
381 if (hw
->mac
.type
< ixgbe_mac_X550_vf
)
388 msgbuf
[0] = IXGBE_VF_GET_RSS_KEY
;
389 err
= hw
->mbx
.ops
.write_posted(hw
, msgbuf
, 1);
394 err
= hw
->mbx
.ops
.read_posted(hw
, msgbuf
, 11);
399 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
401 /* If the operation has been refused by a PF return -EPERM */
402 if (msgbuf
[0] == (IXGBE_VF_GET_RSS_KEY
| IXGBE_VT_MSGTYPE_NACK
))
405 /* If we didn't get an ACK there must have been
406 * some sort of mailbox error so we should treat it
409 if (msgbuf
[0] != (IXGBE_VF_GET_RSS_KEY
| IXGBE_VT_MSGTYPE_ACK
))
410 return IXGBE_ERR_MBX
;
412 memcpy(rss_key
, msgbuf
+ 1, IXGBEVF_RSS_HASH_KEY_SIZE
);
418 * ixgbevf_set_rar_vf - set device MAC address
419 * @hw: pointer to hardware structure
420 * @index: Receive address register to write
421 * @addr: Address to put into receive address register
422 * @vmdq: Unused in this implementation
424 static s32
ixgbevf_set_rar_vf(struct ixgbe_hw
*hw
, u32 index
, u8
*addr
,
428 u8
*msg_addr
= (u8
*)(&msgbuf
[1]);
431 memset(msgbuf
, 0, sizeof(msgbuf
));
432 msgbuf
[0] = IXGBE_VF_SET_MAC_ADDR
;
433 ether_addr_copy(msg_addr
, addr
);
435 ret_val
= ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
437 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
439 /* if nacked the address was rejected, use "perm_addr" */
441 (msgbuf
[0] == (IXGBE_VF_SET_MAC_ADDR
| IXGBE_VT_MSGTYPE_NACK
))) {
442 ixgbevf_get_mac_addr_vf(hw
, hw
->mac
.addr
);
443 return IXGBE_ERR_MBX
;
450 * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
451 * @hw: pointer to hardware structure
452 * @index: Receive address register to write
453 * @addr: Address to put into receive address register
454 * @vmdq: Unused in this implementation
456 * We don't really allow setting the device MAC address. However,
457 * if the address being set is the permanent MAC address we will
460 static s32
ixgbevf_hv_set_rar_vf(struct ixgbe_hw
*hw
, u32 index
, u8
*addr
,
463 if (ether_addr_equal(addr
, hw
->mac
.perm_addr
))
470 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
471 * @hw: pointer to the HW structure
472 * @netdev: pointer to net device structure
474 * Updates the Multicast Table Array.
476 static s32
ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw
*hw
,
477 struct net_device
*netdev
)
479 struct netdev_hw_addr
*ha
;
480 u32 msgbuf
[IXGBE_VFMAILBOX_SIZE
];
481 u16
*vector_list
= (u16
*)&msgbuf
[1];
484 /* Each entry in the list uses 1 16 bit word. We have 30
485 * 16 bit words available in our HW msg buffer (minus 1 for the
486 * msg type). That's 30 hash values if we pack 'em right. If
487 * there are more than 30 MC addresses to add then punt the
488 * extras for now and then add code to handle more than 30 later.
489 * It would be unusual for a server to request that many multi-cast
490 * addresses except for in large enterprise network environments.
493 cnt
= netdev_mc_count(netdev
);
496 msgbuf
[0] = IXGBE_VF_SET_MULTICAST
;
497 msgbuf
[0] |= cnt
<< IXGBE_VT_MSGINFO_SHIFT
;
500 netdev_for_each_mc_addr(ha
, netdev
) {
503 if (is_link_local_ether_addr(ha
->addr
))
506 vector_list
[i
++] = ixgbevf_mta_vector(hw
, ha
->addr
);
509 ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
, IXGBE_VFMAILBOX_SIZE
);
515 * Hyper-V variant - just a stub.
519 static s32
ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw
*hw
,
520 struct net_device
*netdev
)
526 * ixgbevf_update_xcast_mode - Update Multicast mode
527 * @hw: pointer to the HW structure
528 * @xcast_mode: new multicast mode
530 * Updates the Multicast Mode of VF.
532 static s32
ixgbevf_update_xcast_mode(struct ixgbe_hw
*hw
, int xcast_mode
)
537 switch (hw
->api_version
) {
538 case ixgbe_mbox_api_12
:
539 /* promisc introduced in 1.3 version */
540 if (xcast_mode
== IXGBEVF_XCAST_MODE_PROMISC
)
543 case ixgbe_mbox_api_13
:
549 msgbuf
[0] = IXGBE_VF_UPDATE_XCAST_MODE
;
550 msgbuf
[1] = xcast_mode
;
552 err
= ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
557 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
558 if (msgbuf
[0] == (IXGBE_VF_UPDATE_XCAST_MODE
| IXGBE_VT_MSGTYPE_NACK
))
565 * Hyper-V variant - just a stub.
567 * @xcast_mode: unused
569 static s32
ixgbevf_hv_update_xcast_mode(struct ixgbe_hw
*hw
, int xcast_mode
)
575 * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
576 * @hw: pointer to the HW structure
577 * @vlan: 12 bit VLAN ID
578 * @vind: unused by VF drivers
579 * @vlan_on: if true then set bit, else clear bit
581 static s32
ixgbevf_set_vfta_vf(struct ixgbe_hw
*hw
, u32 vlan
, u32 vind
,
587 msgbuf
[0] = IXGBE_VF_SET_VLAN
;
589 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
590 msgbuf
[0] |= vlan_on
<< IXGBE_VT_MSGINFO_SHIFT
;
592 err
= ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
597 /* remove extra bits from the message */
598 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
599 msgbuf
[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT
);
601 if (msgbuf
[0] != (IXGBE_VF_SET_VLAN
| IXGBE_VT_MSGTYPE_ACK
))
602 err
= IXGBE_ERR_INVALID_ARGUMENT
;
609 * Hyper-V variant - just a stub.
615 static s32
ixgbevf_hv_set_vfta_vf(struct ixgbe_hw
*hw
, u32 vlan
, u32 vind
,
622 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
623 * @hw: pointer to hardware structure
624 * @speed: Unused in this implementation
625 * @autoneg: Unused in this implementation
626 * @autoneg_wait_to_complete: Unused in this implementation
628 * Do nothing and return success. VF drivers are not allowed to change
629 * global settings. Maintained for driver compatibility.
631 static s32
ixgbevf_setup_mac_link_vf(struct ixgbe_hw
*hw
,
632 ixgbe_link_speed speed
, bool autoneg
,
633 bool autoneg_wait_to_complete
)
639 * ixgbevf_check_mac_link_vf - Get link/speed status
640 * @hw: pointer to hardware structure
641 * @speed: pointer to link speed
642 * @link_up: true is link is up, false otherwise
643 * @autoneg_wait_to_complete: unused
645 * Reads the links register to determine if link is up and the current speed
647 static s32
ixgbevf_check_mac_link_vf(struct ixgbe_hw
*hw
,
648 ixgbe_link_speed
*speed
,
650 bool autoneg_wait_to_complete
)
652 struct ixgbe_mbx_info
*mbx
= &hw
->mbx
;
653 struct ixgbe_mac_info
*mac
= &hw
->mac
;
658 /* If we were hit with a reset drop the link */
659 if (!mbx
->ops
.check_for_rst(hw
) || !mbx
->timeout
)
660 mac
->get_link_status
= true;
662 if (!mac
->get_link_status
)
665 /* if link status is down no point in checking to see if pf is up */
666 links_reg
= IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
667 if (!(links_reg
& IXGBE_LINKS_UP
))
670 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
671 * before the link status is correct
673 if (mac
->type
== ixgbe_mac_82599_vf
) {
676 for (i
= 0; i
< 5; i
++) {
678 links_reg
= IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
680 if (!(links_reg
& IXGBE_LINKS_UP
))
685 switch (links_reg
& IXGBE_LINKS_SPEED_82599
) {
686 case IXGBE_LINKS_SPEED_10G_82599
:
687 *speed
= IXGBE_LINK_SPEED_10GB_FULL
;
689 case IXGBE_LINKS_SPEED_1G_82599
:
690 *speed
= IXGBE_LINK_SPEED_1GB_FULL
;
692 case IXGBE_LINKS_SPEED_100_82599
:
693 *speed
= IXGBE_LINK_SPEED_100_FULL
;
697 /* if the read failed it could just be a mailbox collision, best wait
698 * until we are called again and don't report an error
700 if (mbx
->ops
.read(hw
, &in_msg
, 1))
703 if (!(in_msg
& IXGBE_VT_MSGTYPE_CTS
)) {
704 /* msg is not CTS and is NACK we must have lost CTS status */
705 if (in_msg
& IXGBE_VT_MSGTYPE_NACK
)
710 /* the pf is talking, if we timed out in the past we reinit */
716 /* if we passed all the tests above then the link is up and we no
717 * longer need to check for link
719 mac
->get_link_status
= false;
722 *link_up
= !mac
->get_link_status
;
727 * Hyper-V variant; there is no mailbox communication.
728 * @hw: pointer to private hardware struct
729 * @speed: pointer to link speed
730 * @link_up: true is link is up, false otherwise
731 * @autoneg_wait_to_complete: unused
733 static s32
ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw
*hw
,
734 ixgbe_link_speed
*speed
,
736 bool autoneg_wait_to_complete
)
738 struct ixgbe_mbx_info
*mbx
= &hw
->mbx
;
739 struct ixgbe_mac_info
*mac
= &hw
->mac
;
742 /* If we were hit with a reset drop the link */
743 if (!mbx
->ops
.check_for_rst(hw
) || !mbx
->timeout
)
744 mac
->get_link_status
= true;
746 if (!mac
->get_link_status
)
749 /* if link status is down no point in checking to see if pf is up */
750 links_reg
= IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
751 if (!(links_reg
& IXGBE_LINKS_UP
))
754 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
755 * before the link status is correct
757 if (mac
->type
== ixgbe_mac_82599_vf
) {
760 for (i
= 0; i
< 5; i
++) {
762 links_reg
= IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
764 if (!(links_reg
& IXGBE_LINKS_UP
))
769 switch (links_reg
& IXGBE_LINKS_SPEED_82599
) {
770 case IXGBE_LINKS_SPEED_10G_82599
:
771 *speed
= IXGBE_LINK_SPEED_10GB_FULL
;
773 case IXGBE_LINKS_SPEED_1G_82599
:
774 *speed
= IXGBE_LINK_SPEED_1GB_FULL
;
776 case IXGBE_LINKS_SPEED_100_82599
:
777 *speed
= IXGBE_LINK_SPEED_100_FULL
;
781 /* if we passed all the tests above then the link is up and we no
782 * longer need to check for link
784 mac
->get_link_status
= false;
787 *link_up
= !mac
->get_link_status
;
792 * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
793 * @hw: pointer to the HW structure
794 * @max_size: value to assign to max frame size
796 static s32
ixgbevf_set_rlpml_vf(struct ixgbe_hw
*hw
, u16 max_size
)
801 msgbuf
[0] = IXGBE_VF_SET_LPE
;
802 msgbuf
[1] = max_size
;
804 ret_val
= ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
808 if ((msgbuf
[0] & IXGBE_VF_SET_LPE
) &&
809 (msgbuf
[0] & IXGBE_VT_MSGTYPE_NACK
))
810 return IXGBE_ERR_MBX
;
816 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
817 * @hw: pointer to the HW structure
818 * @max_size: value to assign to max frame size
821 static s32
ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw
*hw
, u16 max_size
)
825 /* If we are on Hyper-V, we implement this functionality
828 reg
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(0));
830 reg
|= ((max_size
+ 4) | IXGBE_RXDCTL_RLPML_EN
);
831 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(0), reg
);
837 * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
838 * @hw: pointer to the HW structure
839 * @api: integer containing requested API version
841 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw
*hw
, int api
)
846 /* Negotiate the mailbox API version */
847 msg
[0] = IXGBE_VF_API_NEGOTIATE
;
851 err
= ixgbevf_write_msg_read_ack(hw
, msg
, msg
, ARRAY_SIZE(msg
));
853 msg
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
855 /* Store value and return 0 on success */
856 if (msg
[0] == (IXGBE_VF_API_NEGOTIATE
| IXGBE_VT_MSGTYPE_ACK
)) {
857 hw
->api_version
= api
;
861 err
= IXGBE_ERR_INVALID_ARGUMENT
;
868 * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
869 * @hw: pointer to the HW structure
870 * @api: integer containing requested API version
871 * Hyper-V version - only ixgbe_mbox_api_10 supported.
873 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw
*hw
, int api
)
875 /* Hyper-V only supports api version ixgbe_mbox_api_10 */
876 if (api
!= ixgbe_mbox_api_10
)
877 return IXGBE_ERR_INVALID_ARGUMENT
;
882 int ixgbevf_get_queues(struct ixgbe_hw
*hw
, unsigned int *num_tcs
,
883 unsigned int *default_tc
)
888 /* do nothing if API doesn't support ixgbevf_get_queues */
889 switch (hw
->api_version
) {
890 case ixgbe_mbox_api_11
:
891 case ixgbe_mbox_api_12
:
892 case ixgbe_mbox_api_13
:
898 /* Fetch queue configuration from the PF */
899 msg
[0] = IXGBE_VF_GET_QUEUE
;
900 msg
[1] = msg
[2] = msg
[3] = msg
[4] = 0;
902 err
= ixgbevf_write_msg_read_ack(hw
, msg
, msg
, ARRAY_SIZE(msg
));
904 msg
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
906 /* if we we didn't get an ACK there must have been
907 * some sort of mailbox error so we should treat it
910 if (msg
[0] != (IXGBE_VF_GET_QUEUE
| IXGBE_VT_MSGTYPE_ACK
))
911 return IXGBE_ERR_MBX
;
913 /* record and validate values from message */
914 hw
->mac
.max_tx_queues
= msg
[IXGBE_VF_TX_QUEUES
];
915 if (hw
->mac
.max_tx_queues
== 0 ||
916 hw
->mac
.max_tx_queues
> IXGBE_VF_MAX_TX_QUEUES
)
917 hw
->mac
.max_tx_queues
= IXGBE_VF_MAX_TX_QUEUES
;
919 hw
->mac
.max_rx_queues
= msg
[IXGBE_VF_RX_QUEUES
];
920 if (hw
->mac
.max_rx_queues
== 0 ||
921 hw
->mac
.max_rx_queues
> IXGBE_VF_MAX_RX_QUEUES
)
922 hw
->mac
.max_rx_queues
= IXGBE_VF_MAX_RX_QUEUES
;
924 *num_tcs
= msg
[IXGBE_VF_TRANS_VLAN
];
925 /* in case of unknown state assume we cannot tag frames */
926 if (*num_tcs
> hw
->mac
.max_rx_queues
)
929 *default_tc
= msg
[IXGBE_VF_DEF_QUEUE
];
930 /* default to queue 0 on out-of-bounds queue number */
931 if (*default_tc
>= hw
->mac
.max_tx_queues
)
938 static const struct ixgbe_mac_operations ixgbevf_mac_ops
= {
939 .init_hw
= ixgbevf_init_hw_vf
,
940 .reset_hw
= ixgbevf_reset_hw_vf
,
941 .start_hw
= ixgbevf_start_hw_vf
,
942 .get_mac_addr
= ixgbevf_get_mac_addr_vf
,
943 .stop_adapter
= ixgbevf_stop_hw_vf
,
944 .setup_link
= ixgbevf_setup_mac_link_vf
,
945 .check_link
= ixgbevf_check_mac_link_vf
,
946 .negotiate_api_version
= ixgbevf_negotiate_api_version_vf
,
947 .set_rar
= ixgbevf_set_rar_vf
,
948 .update_mc_addr_list
= ixgbevf_update_mc_addr_list_vf
,
949 .update_xcast_mode
= ixgbevf_update_xcast_mode
,
950 .set_uc_addr
= ixgbevf_set_uc_addr_vf
,
951 .set_vfta
= ixgbevf_set_vfta_vf
,
952 .set_rlpml
= ixgbevf_set_rlpml_vf
,
955 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops
= {
956 .init_hw
= ixgbevf_init_hw_vf
,
957 .reset_hw
= ixgbevf_hv_reset_hw_vf
,
958 .start_hw
= ixgbevf_start_hw_vf
,
959 .get_mac_addr
= ixgbevf_get_mac_addr_vf
,
960 .stop_adapter
= ixgbevf_stop_hw_vf
,
961 .setup_link
= ixgbevf_setup_mac_link_vf
,
962 .check_link
= ixgbevf_hv_check_mac_link_vf
,
963 .negotiate_api_version
= ixgbevf_hv_negotiate_api_version_vf
,
964 .set_rar
= ixgbevf_hv_set_rar_vf
,
965 .update_mc_addr_list
= ixgbevf_hv_update_mc_addr_list_vf
,
966 .update_xcast_mode
= ixgbevf_hv_update_xcast_mode
,
967 .set_uc_addr
= ixgbevf_hv_set_uc_addr_vf
,
968 .set_vfta
= ixgbevf_hv_set_vfta_vf
,
969 .set_rlpml
= ixgbevf_hv_set_rlpml_vf
,
972 const struct ixgbevf_info ixgbevf_82599_vf_info
= {
973 .mac
= ixgbe_mac_82599_vf
,
974 .mac_ops
= &ixgbevf_mac_ops
,
977 const struct ixgbevf_info ixgbevf_82599_vf_hv_info
= {
978 .mac
= ixgbe_mac_82599_vf
,
979 .mac_ops
= &ixgbevf_hv_mac_ops
,
982 const struct ixgbevf_info ixgbevf_X540_vf_info
= {
983 .mac
= ixgbe_mac_X540_vf
,
984 .mac_ops
= &ixgbevf_mac_ops
,
987 const struct ixgbevf_info ixgbevf_X540_vf_hv_info
= {
988 .mac
= ixgbe_mac_X540_vf
,
989 .mac_ops
= &ixgbevf_hv_mac_ops
,
992 const struct ixgbevf_info ixgbevf_X550_vf_info
= {
993 .mac
= ixgbe_mac_X550_vf
,
994 .mac_ops
= &ixgbevf_mac_ops
,
997 const struct ixgbevf_info ixgbevf_X550_vf_hv_info
= {
998 .mac
= ixgbe_mac_X550_vf
,
999 .mac_ops
= &ixgbevf_hv_mac_ops
,
1002 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info
= {
1003 .mac
= ixgbe_mac_X550EM_x_vf
,
1004 .mac_ops
= &ixgbevf_mac_ops
,
1007 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info
= {
1008 .mac
= ixgbe_mac_X550EM_x_vf
,
1009 .mac_ops
= &ixgbevf_hv_mac_ops
,
1012 const struct ixgbevf_info ixgbevf_x550em_a_vf_info
= {
1013 .mac
= ixgbe_mac_x550em_a_vf
,
1014 .mac_ops
= &ixgbevf_mac_ops
,