1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
7 /* On Hyper-V, to reset, we need to read from this offset
8 * from the PCI config space. This is the mechanism used on
9 * Hyper-V to support PF/VF communication.
11 #define IXGBE_HV_RESET_OFFSET 0x201
13 static inline s32
ixgbevf_write_msg_read_ack(struct ixgbe_hw
*hw
, u32
*msg
,
14 u32
*retmsg
, u16 size
)
16 struct ixgbe_mbx_info
*mbx
= &hw
->mbx
;
17 s32 retval
= mbx
->ops
.write_posted(hw
, msg
, size
);
22 return mbx
->ops
.read_posted(hw
, retmsg
, size
);
26 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
27 * @hw: pointer to hardware structure
29 * Starts the hardware by filling the bus info structure and media type, clears
30 * all on chip counters, initializes receive address registers, multicast
31 * table, VLAN filter table, calls routine to set up link and flow control
32 * settings, and leaves transmit and receive units disabled and uninitialized
34 static s32
ixgbevf_start_hw_vf(struct ixgbe_hw
*hw
)
36 /* Clear adapter stopped flag */
37 hw
->adapter_stopped
= false;
43 * ixgbevf_init_hw_vf - virtual function hardware initialization
44 * @hw: pointer to hardware structure
46 * Initialize the hardware by resetting the hardware and then starting
49 static s32
ixgbevf_init_hw_vf(struct ixgbe_hw
*hw
)
51 s32 status
= hw
->mac
.ops
.start_hw(hw
);
53 hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.addr
);
59 * ixgbevf_reset_hw_vf - Performs hardware reset
60 * @hw: pointer to hardware structure
62 * Resets the hardware by resetting the transmit and receive units, masks and
63 * clears all interrupts.
65 static s32
ixgbevf_reset_hw_vf(struct ixgbe_hw
*hw
)
67 struct ixgbe_mbx_info
*mbx
= &hw
->mbx
;
68 u32 timeout
= IXGBE_VF_INIT_TIMEOUT
;
69 s32 ret_val
= IXGBE_ERR_INVALID_MAC_ADDR
;
70 u32 msgbuf
[IXGBE_VF_PERMADDR_MSG_LEN
];
71 u8
*addr
= (u8
*)(&msgbuf
[1]);
73 /* Call adapter stop to disable tx/rx and clear interrupts */
74 hw
->mac
.ops
.stop_adapter(hw
);
76 /* reset the api version */
77 hw
->api_version
= ixgbe_mbox_api_10
;
79 IXGBE_WRITE_REG(hw
, IXGBE_VFCTRL
, IXGBE_CTRL_RST
);
80 IXGBE_WRITE_FLUSH(hw
);
82 /* we cannot reset while the RSTI / RSTD bits are asserted */
83 while (!mbx
->ops
.check_for_rst(hw
) && timeout
) {
89 return IXGBE_ERR_RESET_FAILED
;
91 /* mailbox timeout can now become active */
92 mbx
->timeout
= IXGBE_VF_MBX_INIT_TIMEOUT
;
94 msgbuf
[0] = IXGBE_VF_RESET
;
95 mbx
->ops
.write_posted(hw
, msgbuf
, 1);
99 /* set our "perm_addr" based on info provided by PF
100 * also set up the mc_filter_type which is piggy backed
101 * on the mac address in word 3
103 ret_val
= mbx
->ops
.read_posted(hw
, msgbuf
, IXGBE_VF_PERMADDR_MSG_LEN
);
107 /* New versions of the PF may NACK the reset return message
108 * to indicate that no MAC address has yet been assigned for
111 if (msgbuf
[0] != (IXGBE_VF_RESET
| IXGBE_VT_MSGTYPE_ACK
) &&
112 msgbuf
[0] != (IXGBE_VF_RESET
| IXGBE_VT_MSGTYPE_NACK
))
113 return IXGBE_ERR_INVALID_MAC_ADDR
;
115 if (msgbuf
[0] == (IXGBE_VF_RESET
| IXGBE_VT_MSGTYPE_ACK
))
116 ether_addr_copy(hw
->mac
.perm_addr
, addr
);
118 hw
->mac
.mc_filter_type
= msgbuf
[IXGBE_VF_MC_TYPE_WORD
];
124 * Hyper-V variant; the VF/PF communication is through the PCI
126 * @hw: pointer to private hardware struct
128 static s32
ixgbevf_hv_reset_hw_vf(struct ixgbe_hw
*hw
)
130 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
131 struct ixgbevf_adapter
*adapter
= hw
->back
;
134 for (i
= 0; i
< 6; i
++)
135 pci_read_config_byte(adapter
->pdev
,
136 (i
+ IXGBE_HV_RESET_OFFSET
),
137 &hw
->mac
.perm_addr
[i
]);
140 pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
146 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
147 * @hw: pointer to hardware structure
149 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
150 * disables transmit and receive units. The adapter_stopped flag is used by
151 * the shared code and drivers to determine if the adapter is in a stopped
152 * state and should not touch the hardware.
154 static s32
ixgbevf_stop_hw_vf(struct ixgbe_hw
*hw
)
156 u32 number_of_queues
;
160 /* Set the adapter_stopped flag so other driver functions stop touching
163 hw
->adapter_stopped
= true;
165 /* Disable the receive unit by stopped each queue */
166 number_of_queues
= hw
->mac
.max_rx_queues
;
167 for (i
= 0; i
< number_of_queues
; i
++) {
168 reg_val
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(i
));
169 if (reg_val
& IXGBE_RXDCTL_ENABLE
) {
170 reg_val
&= ~IXGBE_RXDCTL_ENABLE
;
171 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(i
), reg_val
);
175 IXGBE_WRITE_FLUSH(hw
);
177 /* Clear interrupt mask to stop from interrupts being generated */
178 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, IXGBE_VF_IRQ_CLEAR_MASK
);
180 /* Clear any pending interrupts */
181 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
183 /* Disable the transmit unit. Each queue must be disabled. */
184 number_of_queues
= hw
->mac
.max_tx_queues
;
185 for (i
= 0; i
< number_of_queues
; i
++) {
186 reg_val
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(i
));
187 if (reg_val
& IXGBE_TXDCTL_ENABLE
) {
188 reg_val
&= ~IXGBE_TXDCTL_ENABLE
;
189 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(i
), reg_val
);
197 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
198 * @hw: pointer to hardware structure
199 * @mc_addr: the multicast address
201 * Extracts the 12 bits, from a multicast address, to determine which
202 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
203 * incoming Rx multicast addresses, to determine the bit-vector to check in
204 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
205 * by the MO field of the MCSTCTRL. The MO field is set during initialization
208 static s32
ixgbevf_mta_vector(struct ixgbe_hw
*hw
, u8
*mc_addr
)
212 switch (hw
->mac
.mc_filter_type
) {
213 case 0: /* use bits [47:36] of the address */
214 vector
= ((mc_addr
[4] >> 4) | (((u16
)mc_addr
[5]) << 4));
216 case 1: /* use bits [46:35] of the address */
217 vector
= ((mc_addr
[4] >> 3) | (((u16
)mc_addr
[5]) << 5));
219 case 2: /* use bits [45:34] of the address */
220 vector
= ((mc_addr
[4] >> 2) | (((u16
)mc_addr
[5]) << 6));
222 case 3: /* use bits [43:32] of the address */
223 vector
= ((mc_addr
[4]) | (((u16
)mc_addr
[5]) << 8));
225 default: /* Invalid mc_filter_type */
229 /* vector can only be 12-bits or boundary will be exceeded */
235 * ixgbevf_get_mac_addr_vf - Read device MAC address
236 * @hw: pointer to the HW structure
237 * @mac_addr: pointer to storage for retrieved MAC address
239 static s32
ixgbevf_get_mac_addr_vf(struct ixgbe_hw
*hw
, u8
*mac_addr
)
241 ether_addr_copy(mac_addr
, hw
->mac
.perm_addr
);
246 static s32
ixgbevf_set_uc_addr_vf(struct ixgbe_hw
*hw
, u32 index
, u8
*addr
)
248 u32 msgbuf
[3], msgbuf_chk
;
249 u8
*msg_addr
= (u8
*)(&msgbuf
[1]);
252 memset(msgbuf
, 0, sizeof(msgbuf
));
253 /* If index is one then this is the start of a new list and needs
254 * indication to the PF so it can do it's own list management.
255 * If it is zero then that tells the PF to just clear all of
256 * this VF's macvlans and there is no new list.
258 msgbuf
[0] |= index
<< IXGBE_VT_MSGINFO_SHIFT
;
259 msgbuf
[0] |= IXGBE_VF_SET_MACVLAN
;
260 msgbuf_chk
= msgbuf
[0];
263 ether_addr_copy(msg_addr
, addr
);
265 ret_val
= ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
268 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
270 if (msgbuf
[0] == (msgbuf_chk
| IXGBE_VT_MSGTYPE_NACK
))
277 static s32
ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw
*hw
, u32 index
, u8
*addr
)
283 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
284 * @hw: pointer to hardware structure
285 * @reta: buffer to fill with RETA contents.
286 * @num_rx_queues: Number of Rx queues configured for this port
288 * The "reta" buffer should be big enough to contain 32 registers.
290 * Returns: 0 on success.
291 * if API doesn't support this operation - (-EOPNOTSUPP).
293 int ixgbevf_get_reta_locked(struct ixgbe_hw
*hw
, u32
*reta
, int num_rx_queues
)
296 u32 msgbuf
[IXGBE_VFMAILBOX_SIZE
];
297 u32
*hw_reta
= &msgbuf
[1];
300 /* We have to use a mailbox for 82599 and x540 devices only.
301 * For these devices RETA has 128 entries.
302 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
303 * 16 RETA entries in each DWORD giving 2 bits to each entry.
305 int dwords
= IXGBEVF_82599_RETA_SIZE
/ 16;
307 /* We support the RSS querying for 82599 and x540 devices only.
308 * Thus return an error if API doesn't support RETA querying or querying
309 * is not supported for this device type.
311 switch (hw
->api_version
) {
312 case ixgbe_mbox_api_14
:
313 case ixgbe_mbox_api_13
:
314 case ixgbe_mbox_api_12
:
315 if (hw
->mac
.type
< ixgbe_mac_X550_vf
)
322 msgbuf
[0] = IXGBE_VF_GET_RETA
;
324 err
= hw
->mbx
.ops
.write_posted(hw
, msgbuf
, 1);
329 err
= hw
->mbx
.ops
.read_posted(hw
, msgbuf
, dwords
+ 1);
334 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
336 /* If the operation has been refused by a PF return -EPERM */
337 if (msgbuf
[0] == (IXGBE_VF_GET_RETA
| IXGBE_VT_MSGTYPE_NACK
))
340 /* If we didn't get an ACK there must have been
341 * some sort of mailbox error so we should treat it
344 if (msgbuf
[0] != (IXGBE_VF_GET_RETA
| IXGBE_VT_MSGTYPE_ACK
))
345 return IXGBE_ERR_MBX
;
347 /* ixgbevf doesn't support more than 2 queues at the moment */
348 if (num_rx_queues
> 1)
351 for (i
= 0; i
< dwords
; i
++)
352 for (j
= 0; j
< 16; j
++)
353 reta
[i
* 16 + j
] = (hw_reta
[i
] >> (2 * j
)) & mask
;
359 * ixgbevf_get_rss_key_locked - get the RSS Random Key
360 * @hw: pointer to the HW structure
361 * @rss_key: buffer to fill with RSS Hash Key contents.
363 * The "rss_key" buffer should be big enough to contain 10 registers.
365 * Returns: 0 on success.
366 * if API doesn't support this operation - (-EOPNOTSUPP).
368 int ixgbevf_get_rss_key_locked(struct ixgbe_hw
*hw
, u8
*rss_key
)
371 u32 msgbuf
[IXGBE_VFMAILBOX_SIZE
];
373 /* We currently support the RSS Random Key retrieval for 82599 and x540
376 * Thus return an error if API doesn't support RSS Random Key retrieval
377 * or if the operation is not supported for this device type.
379 switch (hw
->api_version
) {
380 case ixgbe_mbox_api_14
:
381 case ixgbe_mbox_api_13
:
382 case ixgbe_mbox_api_12
:
383 if (hw
->mac
.type
< ixgbe_mac_X550_vf
)
390 msgbuf
[0] = IXGBE_VF_GET_RSS_KEY
;
391 err
= hw
->mbx
.ops
.write_posted(hw
, msgbuf
, 1);
396 err
= hw
->mbx
.ops
.read_posted(hw
, msgbuf
, 11);
401 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
403 /* If the operation has been refused by a PF return -EPERM */
404 if (msgbuf
[0] == (IXGBE_VF_GET_RSS_KEY
| IXGBE_VT_MSGTYPE_NACK
))
407 /* If we didn't get an ACK there must have been
408 * some sort of mailbox error so we should treat it
411 if (msgbuf
[0] != (IXGBE_VF_GET_RSS_KEY
| IXGBE_VT_MSGTYPE_ACK
))
412 return IXGBE_ERR_MBX
;
414 memcpy(rss_key
, msgbuf
+ 1, IXGBEVF_RSS_HASH_KEY_SIZE
);
420 * ixgbevf_set_rar_vf - set device MAC address
421 * @hw: pointer to hardware structure
422 * @index: Receive address register to write
423 * @addr: Address to put into receive address register
424 * @vmdq: Unused in this implementation
426 static s32
ixgbevf_set_rar_vf(struct ixgbe_hw
*hw
, u32 index
, u8
*addr
,
430 u8
*msg_addr
= (u8
*)(&msgbuf
[1]);
433 memset(msgbuf
, 0, sizeof(msgbuf
));
434 msgbuf
[0] = IXGBE_VF_SET_MAC_ADDR
;
435 ether_addr_copy(msg_addr
, addr
);
437 ret_val
= ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
439 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
441 /* if nacked the address was rejected, use "perm_addr" */
443 (msgbuf
[0] == (IXGBE_VF_SET_MAC_ADDR
| IXGBE_VT_MSGTYPE_NACK
))) {
444 ixgbevf_get_mac_addr_vf(hw
, hw
->mac
.addr
);
445 return IXGBE_ERR_MBX
;
452 * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
453 * @hw: pointer to hardware structure
454 * @index: Receive address register to write
455 * @addr: Address to put into receive address register
456 * @vmdq: Unused in this implementation
458 * We don't really allow setting the device MAC address. However,
459 * if the address being set is the permanent MAC address we will
462 static s32
ixgbevf_hv_set_rar_vf(struct ixgbe_hw
*hw
, u32 index
, u8
*addr
,
465 if (ether_addr_equal(addr
, hw
->mac
.perm_addr
))
472 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
473 * @hw: pointer to the HW structure
474 * @netdev: pointer to net device structure
476 * Updates the Multicast Table Array.
478 static s32
ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw
*hw
,
479 struct net_device
*netdev
)
481 struct netdev_hw_addr
*ha
;
482 u32 msgbuf
[IXGBE_VFMAILBOX_SIZE
];
483 u16
*vector_list
= (u16
*)&msgbuf
[1];
486 /* Each entry in the list uses 1 16 bit word. We have 30
487 * 16 bit words available in our HW msg buffer (minus 1 for the
488 * msg type). That's 30 hash values if we pack 'em right. If
489 * there are more than 30 MC addresses to add then punt the
490 * extras for now and then add code to handle more than 30 later.
491 * It would be unusual for a server to request that many multi-cast
492 * addresses except for in large enterprise network environments.
495 cnt
= netdev_mc_count(netdev
);
498 msgbuf
[0] = IXGBE_VF_SET_MULTICAST
;
499 msgbuf
[0] |= cnt
<< IXGBE_VT_MSGINFO_SHIFT
;
502 netdev_for_each_mc_addr(ha
, netdev
) {
505 if (is_link_local_ether_addr(ha
->addr
))
508 vector_list
[i
++] = ixgbevf_mta_vector(hw
, ha
->addr
);
511 return ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
512 IXGBE_VFMAILBOX_SIZE
);
516 * Hyper-V variant - just a stub.
520 static s32
ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw
*hw
,
521 struct net_device
*netdev
)
527 * ixgbevf_update_xcast_mode - Update Multicast mode
528 * @hw: pointer to the HW structure
529 * @xcast_mode: new multicast mode
531 * Updates the Multicast Mode of VF.
533 static s32
ixgbevf_update_xcast_mode(struct ixgbe_hw
*hw
, int xcast_mode
)
538 switch (hw
->api_version
) {
539 case ixgbe_mbox_api_12
:
540 /* promisc introduced in 1.3 version */
541 if (xcast_mode
== IXGBEVF_XCAST_MODE_PROMISC
)
544 case ixgbe_mbox_api_14
:
545 case ixgbe_mbox_api_13
:
551 msgbuf
[0] = IXGBE_VF_UPDATE_XCAST_MODE
;
552 msgbuf
[1] = xcast_mode
;
554 err
= ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
559 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
560 if (msgbuf
[0] == (IXGBE_VF_UPDATE_XCAST_MODE
| IXGBE_VT_MSGTYPE_NACK
))
567 * Hyper-V variant - just a stub.
569 * @xcast_mode: unused
571 static s32
ixgbevf_hv_update_xcast_mode(struct ixgbe_hw
*hw
, int xcast_mode
)
577 * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
578 * @hw: pointer to the HW structure
579 * @vlan: 12 bit VLAN ID
580 * @vind: unused by VF drivers
581 * @vlan_on: if true then set bit, else clear bit
583 static s32
ixgbevf_set_vfta_vf(struct ixgbe_hw
*hw
, u32 vlan
, u32 vind
,
589 msgbuf
[0] = IXGBE_VF_SET_VLAN
;
591 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
592 msgbuf
[0] |= vlan_on
<< IXGBE_VT_MSGINFO_SHIFT
;
594 err
= ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
599 /* remove extra bits from the message */
600 msgbuf
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
601 msgbuf
[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT
);
603 if (msgbuf
[0] != (IXGBE_VF_SET_VLAN
| IXGBE_VT_MSGTYPE_ACK
))
604 err
= IXGBE_ERR_INVALID_ARGUMENT
;
611 * Hyper-V variant - just a stub.
617 static s32
ixgbevf_hv_set_vfta_vf(struct ixgbe_hw
*hw
, u32 vlan
, u32 vind
,
624 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
625 * @hw: pointer to hardware structure
626 * @speed: Unused in this implementation
627 * @autoneg: Unused in this implementation
628 * @autoneg_wait_to_complete: Unused in this implementation
630 * Do nothing and return success. VF drivers are not allowed to change
631 * global settings. Maintained for driver compatibility.
633 static s32
ixgbevf_setup_mac_link_vf(struct ixgbe_hw
*hw
,
634 ixgbe_link_speed speed
, bool autoneg
,
635 bool autoneg_wait_to_complete
)
641 * ixgbevf_check_mac_link_vf - Get link/speed status
642 * @hw: pointer to hardware structure
643 * @speed: pointer to link speed
644 * @link_up: true is link is up, false otherwise
645 * @autoneg_wait_to_complete: unused
647 * Reads the links register to determine if link is up and the current speed
649 static s32
ixgbevf_check_mac_link_vf(struct ixgbe_hw
*hw
,
650 ixgbe_link_speed
*speed
,
652 bool autoneg_wait_to_complete
)
654 struct ixgbe_mbx_info
*mbx
= &hw
->mbx
;
655 struct ixgbe_mac_info
*mac
= &hw
->mac
;
660 /* If we were hit with a reset drop the link */
661 if (!mbx
->ops
.check_for_rst(hw
) || !mbx
->timeout
)
662 mac
->get_link_status
= true;
664 if (!mac
->get_link_status
)
667 /* if link status is down no point in checking to see if pf is up */
668 links_reg
= IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
669 if (!(links_reg
& IXGBE_LINKS_UP
))
672 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
673 * before the link status is correct
675 if (mac
->type
== ixgbe_mac_82599_vf
) {
678 for (i
= 0; i
< 5; i
++) {
680 links_reg
= IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
682 if (!(links_reg
& IXGBE_LINKS_UP
))
687 switch (links_reg
& IXGBE_LINKS_SPEED_82599
) {
688 case IXGBE_LINKS_SPEED_10G_82599
:
689 *speed
= IXGBE_LINK_SPEED_10GB_FULL
;
691 case IXGBE_LINKS_SPEED_1G_82599
:
692 *speed
= IXGBE_LINK_SPEED_1GB_FULL
;
694 case IXGBE_LINKS_SPEED_100_82599
:
695 *speed
= IXGBE_LINK_SPEED_100_FULL
;
699 /* if the read failed it could just be a mailbox collision, best wait
700 * until we are called again and don't report an error
702 if (mbx
->ops
.read(hw
, &in_msg
, 1))
705 if (!(in_msg
& IXGBE_VT_MSGTYPE_CTS
)) {
706 /* msg is not CTS and is NACK we must have lost CTS status */
707 if (in_msg
& IXGBE_VT_MSGTYPE_NACK
)
712 /* the pf is talking, if we timed out in the past we reinit */
718 /* if we passed all the tests above then the link is up and we no
719 * longer need to check for link
721 mac
->get_link_status
= false;
724 *link_up
= !mac
->get_link_status
;
729 * Hyper-V variant; there is no mailbox communication.
730 * @hw: pointer to private hardware struct
731 * @speed: pointer to link speed
732 * @link_up: true is link is up, false otherwise
733 * @autoneg_wait_to_complete: unused
735 static s32
ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw
*hw
,
736 ixgbe_link_speed
*speed
,
738 bool autoneg_wait_to_complete
)
740 struct ixgbe_mbx_info
*mbx
= &hw
->mbx
;
741 struct ixgbe_mac_info
*mac
= &hw
->mac
;
744 /* If we were hit with a reset drop the link */
745 if (!mbx
->ops
.check_for_rst(hw
) || !mbx
->timeout
)
746 mac
->get_link_status
= true;
748 if (!mac
->get_link_status
)
751 /* if link status is down no point in checking to see if pf is up */
752 links_reg
= IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
753 if (!(links_reg
& IXGBE_LINKS_UP
))
756 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
757 * before the link status is correct
759 if (mac
->type
== ixgbe_mac_82599_vf
) {
762 for (i
= 0; i
< 5; i
++) {
764 links_reg
= IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
766 if (!(links_reg
& IXGBE_LINKS_UP
))
771 switch (links_reg
& IXGBE_LINKS_SPEED_82599
) {
772 case IXGBE_LINKS_SPEED_10G_82599
:
773 *speed
= IXGBE_LINK_SPEED_10GB_FULL
;
775 case IXGBE_LINKS_SPEED_1G_82599
:
776 *speed
= IXGBE_LINK_SPEED_1GB_FULL
;
778 case IXGBE_LINKS_SPEED_100_82599
:
779 *speed
= IXGBE_LINK_SPEED_100_FULL
;
783 /* if we passed all the tests above then the link is up and we no
784 * longer need to check for link
786 mac
->get_link_status
= false;
789 *link_up
= !mac
->get_link_status
;
794 * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
795 * @hw: pointer to the HW structure
796 * @max_size: value to assign to max frame size
798 static s32
ixgbevf_set_rlpml_vf(struct ixgbe_hw
*hw
, u16 max_size
)
803 msgbuf
[0] = IXGBE_VF_SET_LPE
;
804 msgbuf
[1] = max_size
;
806 ret_val
= ixgbevf_write_msg_read_ack(hw
, msgbuf
, msgbuf
,
810 if ((msgbuf
[0] & IXGBE_VF_SET_LPE
) &&
811 (msgbuf
[0] & IXGBE_VT_MSGTYPE_NACK
))
812 return IXGBE_ERR_MBX
;
818 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
819 * @hw: pointer to the HW structure
820 * @max_size: value to assign to max frame size
823 static s32
ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw
*hw
, u16 max_size
)
827 /* If we are on Hyper-V, we implement this functionality
830 reg
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(0));
832 reg
|= ((max_size
+ 4) | IXGBE_RXDCTL_RLPML_EN
);
833 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(0), reg
);
839 * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
840 * @hw: pointer to the HW structure
841 * @api: integer containing requested API version
843 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw
*hw
, int api
)
848 /* Negotiate the mailbox API version */
849 msg
[0] = IXGBE_VF_API_NEGOTIATE
;
853 err
= ixgbevf_write_msg_read_ack(hw
, msg
, msg
, ARRAY_SIZE(msg
));
855 msg
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
857 /* Store value and return 0 on success */
858 if (msg
[0] == (IXGBE_VF_API_NEGOTIATE
| IXGBE_VT_MSGTYPE_ACK
)) {
859 hw
->api_version
= api
;
863 err
= IXGBE_ERR_INVALID_ARGUMENT
;
870 * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
871 * @hw: pointer to the HW structure
872 * @api: integer containing requested API version
873 * Hyper-V version - only ixgbe_mbox_api_10 supported.
875 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw
*hw
, int api
)
877 /* Hyper-V only supports api version ixgbe_mbox_api_10 */
878 if (api
!= ixgbe_mbox_api_10
)
879 return IXGBE_ERR_INVALID_ARGUMENT
;
884 int ixgbevf_get_queues(struct ixgbe_hw
*hw
, unsigned int *num_tcs
,
885 unsigned int *default_tc
)
890 /* do nothing if API doesn't support ixgbevf_get_queues */
891 switch (hw
->api_version
) {
892 case ixgbe_mbox_api_11
:
893 case ixgbe_mbox_api_12
:
894 case ixgbe_mbox_api_13
:
895 case ixgbe_mbox_api_14
:
901 /* Fetch queue configuration from the PF */
902 msg
[0] = IXGBE_VF_GET_QUEUE
;
903 msg
[1] = msg
[2] = msg
[3] = msg
[4] = 0;
905 err
= ixgbevf_write_msg_read_ack(hw
, msg
, msg
, ARRAY_SIZE(msg
));
907 msg
[0] &= ~IXGBE_VT_MSGTYPE_CTS
;
909 /* if we we didn't get an ACK there must have been
910 * some sort of mailbox error so we should treat it
913 if (msg
[0] != (IXGBE_VF_GET_QUEUE
| IXGBE_VT_MSGTYPE_ACK
))
914 return IXGBE_ERR_MBX
;
916 /* record and validate values from message */
917 hw
->mac
.max_tx_queues
= msg
[IXGBE_VF_TX_QUEUES
];
918 if (hw
->mac
.max_tx_queues
== 0 ||
919 hw
->mac
.max_tx_queues
> IXGBE_VF_MAX_TX_QUEUES
)
920 hw
->mac
.max_tx_queues
= IXGBE_VF_MAX_TX_QUEUES
;
922 hw
->mac
.max_rx_queues
= msg
[IXGBE_VF_RX_QUEUES
];
923 if (hw
->mac
.max_rx_queues
== 0 ||
924 hw
->mac
.max_rx_queues
> IXGBE_VF_MAX_RX_QUEUES
)
925 hw
->mac
.max_rx_queues
= IXGBE_VF_MAX_RX_QUEUES
;
927 *num_tcs
= msg
[IXGBE_VF_TRANS_VLAN
];
928 /* in case of unknown state assume we cannot tag frames */
929 if (*num_tcs
> hw
->mac
.max_rx_queues
)
932 *default_tc
= msg
[IXGBE_VF_DEF_QUEUE
];
933 /* default to queue 0 on out-of-bounds queue number */
934 if (*default_tc
>= hw
->mac
.max_tx_queues
)
941 static const struct ixgbe_mac_operations ixgbevf_mac_ops
= {
942 .init_hw
= ixgbevf_init_hw_vf
,
943 .reset_hw
= ixgbevf_reset_hw_vf
,
944 .start_hw
= ixgbevf_start_hw_vf
,
945 .get_mac_addr
= ixgbevf_get_mac_addr_vf
,
946 .stop_adapter
= ixgbevf_stop_hw_vf
,
947 .setup_link
= ixgbevf_setup_mac_link_vf
,
948 .check_link
= ixgbevf_check_mac_link_vf
,
949 .negotiate_api_version
= ixgbevf_negotiate_api_version_vf
,
950 .set_rar
= ixgbevf_set_rar_vf
,
951 .update_mc_addr_list
= ixgbevf_update_mc_addr_list_vf
,
952 .update_xcast_mode
= ixgbevf_update_xcast_mode
,
953 .set_uc_addr
= ixgbevf_set_uc_addr_vf
,
954 .set_vfta
= ixgbevf_set_vfta_vf
,
955 .set_rlpml
= ixgbevf_set_rlpml_vf
,
958 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops
= {
959 .init_hw
= ixgbevf_init_hw_vf
,
960 .reset_hw
= ixgbevf_hv_reset_hw_vf
,
961 .start_hw
= ixgbevf_start_hw_vf
,
962 .get_mac_addr
= ixgbevf_get_mac_addr_vf
,
963 .stop_adapter
= ixgbevf_stop_hw_vf
,
964 .setup_link
= ixgbevf_setup_mac_link_vf
,
965 .check_link
= ixgbevf_hv_check_mac_link_vf
,
966 .negotiate_api_version
= ixgbevf_hv_negotiate_api_version_vf
,
967 .set_rar
= ixgbevf_hv_set_rar_vf
,
968 .update_mc_addr_list
= ixgbevf_hv_update_mc_addr_list_vf
,
969 .update_xcast_mode
= ixgbevf_hv_update_xcast_mode
,
970 .set_uc_addr
= ixgbevf_hv_set_uc_addr_vf
,
971 .set_vfta
= ixgbevf_hv_set_vfta_vf
,
972 .set_rlpml
= ixgbevf_hv_set_rlpml_vf
,
975 const struct ixgbevf_info ixgbevf_82599_vf_info
= {
976 .mac
= ixgbe_mac_82599_vf
,
977 .mac_ops
= &ixgbevf_mac_ops
,
980 const struct ixgbevf_info ixgbevf_82599_vf_hv_info
= {
981 .mac
= ixgbe_mac_82599_vf
,
982 .mac_ops
= &ixgbevf_hv_mac_ops
,
985 const struct ixgbevf_info ixgbevf_X540_vf_info
= {
986 .mac
= ixgbe_mac_X540_vf
,
987 .mac_ops
= &ixgbevf_mac_ops
,
990 const struct ixgbevf_info ixgbevf_X540_vf_hv_info
= {
991 .mac
= ixgbe_mac_X540_vf
,
992 .mac_ops
= &ixgbevf_hv_mac_ops
,
995 const struct ixgbevf_info ixgbevf_X550_vf_info
= {
996 .mac
= ixgbe_mac_X550_vf
,
997 .mac_ops
= &ixgbevf_mac_ops
,
1000 const struct ixgbevf_info ixgbevf_X550_vf_hv_info
= {
1001 .mac
= ixgbe_mac_X550_vf
,
1002 .mac_ops
= &ixgbevf_hv_mac_ops
,
1005 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info
= {
1006 .mac
= ixgbe_mac_X550EM_x_vf
,
1007 .mac_ops
= &ixgbevf_mac_ops
,
1010 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info
= {
1011 .mac
= ixgbe_mac_X550EM_x_vf
,
1012 .mac_ops
= &ixgbevf_hv_mac_ops
,
1015 const struct ixgbevf_info ixgbevf_x550em_a_vf_info
= {
1016 .mac
= ixgbe_mac_x550em_a_vf
,
1017 .mac_ops
= &ixgbevf_mac_ops
,