1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
9 * ice_validate_vf_id - helper to check if VF ID is valid
10 * @pf: pointer to the PF structure
11 * @vf_id: the ID of the VF to check
13 static int ice_validate_vf_id(struct ice_pf
*pf
, int vf_id
)
15 if (vf_id
>= pf
->num_alloc_vfs
) {
16 dev_err(ice_pf_to_dev(pf
), "Invalid VF ID: %d\n", vf_id
);
23 * ice_check_vf_init - helper to check if VF init complete
24 * @pf: pointer to the PF structure
25 * @vf: the pointer to the VF to check
27 static int ice_check_vf_init(struct ice_pf
*pf
, struct ice_vf
*vf
)
29 if (!test_bit(ICE_VF_STATE_INIT
, vf
->vf_states
)) {
30 dev_err(ice_pf_to_dev(pf
), "VF ID: %d in reset. Try again.\n",
38 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
39 * @pf: pointer to the PF structure
40 * @v_opcode: operation code
41 * @v_retval: return value
42 * @msg: pointer to the msg buffer
46 ice_vc_vf_broadcast(struct ice_pf
*pf
, enum virtchnl_ops v_opcode
,
47 enum virtchnl_status_code v_retval
, u8
*msg
, u16 msglen
)
49 struct ice_hw
*hw
= &pf
->hw
;
52 ice_for_each_vf(pf
, i
) {
53 struct ice_vf
*vf
= &pf
->vf
[i
];
55 /* Not all vfs are enabled so skip the ones that are not */
56 if (!test_bit(ICE_VF_STATE_INIT
, vf
->vf_states
) &&
57 !test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
))
60 /* Ignore return value on purpose - a given VF may fail, but
61 * we need to keep going and send to all of them
63 ice_aq_send_msg_to_vf(hw
, vf
->vf_id
, v_opcode
, v_retval
, msg
,
69 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
70 * @vf: pointer to the VF structure
71 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
72 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
73 * @link_up: whether or not to set the link up/down
76 ice_set_pfe_link(struct ice_vf
*vf
, struct virtchnl_pf_event
*pfe
,
77 int ice_link_speed
, bool link_up
)
79 if (vf
->driver_caps
& VIRTCHNL_VF_CAP_ADV_LINK_SPEED
) {
80 pfe
->event_data
.link_event_adv
.link_status
= link_up
;
82 pfe
->event_data
.link_event_adv
.link_speed
=
83 ice_conv_link_speed_to_virtchnl(true, ice_link_speed
);
85 pfe
->event_data
.link_event
.link_status
= link_up
;
86 /* Legacy method for virtchnl link speeds */
87 pfe
->event_data
.link_event
.link_speed
=
88 (enum virtchnl_link_speed
)
89 ice_conv_link_speed_to_virtchnl(false, ice_link_speed
);
94 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
95 * @vf: the VF to check
97 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
100 static bool ice_vf_has_no_qs_ena(struct ice_vf
*vf
)
102 return (!bitmap_weight(vf
->rxq_ena
, ICE_MAX_RSS_QS_PER_VF
) &&
103 !bitmap_weight(vf
->txq_ena
, ICE_MAX_RSS_QS_PER_VF
));
107 * ice_is_vf_link_up - check if the VF's link is up
108 * @vf: VF to check if link is up
110 static bool ice_is_vf_link_up(struct ice_vf
*vf
)
112 struct ice_pf
*pf
= vf
->pf
;
114 if (ice_check_vf_init(pf
, vf
))
117 if (ice_vf_has_no_qs_ena(vf
))
119 else if (vf
->link_forced
)
122 return pf
->hw
.port_info
->phy
.link_info
.link_info
&
127 * ice_vc_notify_vf_link_state - Inform a VF of link status
128 * @vf: pointer to the VF structure
130 * send a link status message to a single VF
132 static void ice_vc_notify_vf_link_state(struct ice_vf
*vf
)
134 struct virtchnl_pf_event pfe
= { 0 };
135 struct ice_hw
*hw
= &vf
->pf
->hw
;
137 pfe
.event
= VIRTCHNL_EVENT_LINK_CHANGE
;
138 pfe
.severity
= PF_EVENT_SEVERITY_INFO
;
140 if (ice_is_vf_link_up(vf
))
141 ice_set_pfe_link(vf
, &pfe
,
142 hw
->port_info
->phy
.link_info
.link_speed
, true);
144 ice_set_pfe_link(vf
, &pfe
, ICE_AQ_LINK_SPEED_UNKNOWN
, false);
146 ice_aq_send_msg_to_vf(hw
, vf
->vf_id
, VIRTCHNL_OP_EVENT
,
147 VIRTCHNL_STATUS_SUCCESS
, (u8
*)&pfe
,
152 * ice_free_vf_res - Free a VF's resources
153 * @vf: pointer to the VF info
155 static void ice_free_vf_res(struct ice_vf
*vf
)
157 struct ice_pf
*pf
= vf
->pf
;
158 int i
, last_vector_idx
;
160 /* First, disable VF's configuration API to prevent OS from
161 * accessing the VF's VSI after it's freed or invalidated.
163 clear_bit(ICE_VF_STATE_INIT
, vf
->vf_states
);
165 /* free VSI and disconnect it from the parent uplink */
166 if (vf
->lan_vsi_idx
) {
167 ice_vsi_release(pf
->vsi
[vf
->lan_vsi_idx
]);
173 last_vector_idx
= vf
->first_vector_idx
+ pf
->num_msix_per_vf
- 1;
175 /* clear VF MDD event information */
176 memset(&vf
->mdd_tx_events
, 0, sizeof(vf
->mdd_tx_events
));
177 memset(&vf
->mdd_rx_events
, 0, sizeof(vf
->mdd_rx_events
));
179 /* Disable interrupts so that VF starts in a known state */
180 for (i
= vf
->first_vector_idx
; i
<= last_vector_idx
; i
++) {
181 wr32(&pf
->hw
, GLINT_DYN_CTL(i
), GLINT_DYN_CTL_CLEARPBA_M
);
184 /* reset some of the state variables keeping track of the resources */
185 clear_bit(ICE_VF_STATE_MC_PROMISC
, vf
->vf_states
);
186 clear_bit(ICE_VF_STATE_UC_PROMISC
, vf
->vf_states
);
190 * ice_dis_vf_mappings
191 * @vf: pointer to the VF structure
193 static void ice_dis_vf_mappings(struct ice_vf
*vf
)
195 struct ice_pf
*pf
= vf
->pf
;
202 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
204 dev
= ice_pf_to_dev(pf
);
205 wr32(hw
, VPINT_ALLOC(vf
->vf_id
), 0);
206 wr32(hw
, VPINT_ALLOC_PCI(vf
->vf_id
), 0);
208 first
= vf
->first_vector_idx
;
209 last
= first
+ pf
->num_msix_per_vf
- 1;
210 for (v
= first
; v
<= last
; v
++) {
213 reg
= (((1 << GLINT_VECT2FUNC_IS_PF_S
) &
214 GLINT_VECT2FUNC_IS_PF_M
) |
215 ((hw
->pf_id
<< GLINT_VECT2FUNC_PF_NUM_S
) &
216 GLINT_VECT2FUNC_PF_NUM_M
));
217 wr32(hw
, GLINT_VECT2FUNC(v
), reg
);
220 if (vsi
->tx_mapping_mode
== ICE_VSI_MAP_CONTIG
)
221 wr32(hw
, VPLAN_TX_QBASE(vf
->vf_id
), 0);
223 dev_err(dev
, "Scattered mode for VF Tx queues is not yet implemented\n");
225 if (vsi
->rx_mapping_mode
== ICE_VSI_MAP_CONTIG
)
226 wr32(hw
, VPLAN_RX_QBASE(vf
->vf_id
), 0);
228 dev_err(dev
, "Scattered mode for VF Rx queues is not yet implemented\n");
232 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
233 * @pf: pointer to the PF structure
235 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
236 * the pf->sriov_base_vector.
238 * Returns 0 on success, and -EINVAL on error.
240 static int ice_sriov_free_msix_res(struct ice_pf
*pf
)
242 struct ice_res_tracker
*res
;
247 res
= pf
->irq_tracker
;
251 /* give back irq_tracker resources used */
252 WARN_ON(pf
->sriov_base_vector
< res
->num_entries
);
254 pf
->sriov_base_vector
= 0;
260 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
261 * @vf: pointer to the VF structure
263 void ice_set_vf_state_qs_dis(struct ice_vf
*vf
)
265 /* Clear Rx/Tx enabled queues flag */
266 bitmap_zero(vf
->txq_ena
, ICE_MAX_RSS_QS_PER_VF
);
267 bitmap_zero(vf
->rxq_ena
, ICE_MAX_RSS_QS_PER_VF
);
268 clear_bit(ICE_VF_STATE_QS_ENA
, vf
->vf_states
);
272 * ice_dis_vf_qs - Disable the VF queues
273 * @vf: pointer to the VF structure
275 static void ice_dis_vf_qs(struct ice_vf
*vf
)
277 struct ice_pf
*pf
= vf
->pf
;
280 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
282 ice_vsi_stop_lan_tx_rings(vsi
, ICE_NO_RESET
, vf
->vf_id
);
283 ice_vsi_stop_all_rx_rings(vsi
);
284 ice_set_vf_state_qs_dis(vf
);
288 * ice_free_vfs - Free all VFs
289 * @pf: pointer to the PF structure
291 void ice_free_vfs(struct ice_pf
*pf
)
293 struct device
*dev
= ice_pf_to_dev(pf
);
294 struct ice_hw
*hw
= &pf
->hw
;
300 while (test_and_set_bit(__ICE_VF_DIS
, pf
->state
))
301 usleep_range(1000, 2000);
303 /* Disable IOV before freeing resources. This lets any VF drivers
304 * running in the host get themselves cleaned up before we yank
305 * the carpet out from underneath their feet.
307 if (!pci_vfs_assigned(pf
->pdev
))
308 pci_disable_sriov(pf
->pdev
);
310 dev_warn(dev
, "VFs are assigned - not disabling SR-IOV\n");
312 /* Avoid wait time by stopping all VFs at the same time */
313 ice_for_each_vf(pf
, i
)
314 if (test_bit(ICE_VF_STATE_QS_ENA
, pf
->vf
[i
].vf_states
))
315 ice_dis_vf_qs(&pf
->vf
[i
]);
317 tmp
= pf
->num_alloc_vfs
;
318 pf
->num_qps_per_vf
= 0;
319 pf
->num_alloc_vfs
= 0;
320 for (i
= 0; i
< tmp
; i
++) {
321 if (test_bit(ICE_VF_STATE_INIT
, pf
->vf
[i
].vf_states
)) {
322 /* disable VF qp mappings and set VF disable state */
323 ice_dis_vf_mappings(&pf
->vf
[i
]);
324 set_bit(ICE_VF_STATE_DIS
, pf
->vf
[i
].vf_states
);
325 ice_free_vf_res(&pf
->vf
[i
]);
329 if (ice_sriov_free_msix_res(pf
))
330 dev_err(dev
, "Failed to free MSIX resources used by SR-IOV\n");
332 devm_kfree(dev
, pf
->vf
);
335 /* This check is for when the driver is unloaded while VFs are
336 * assigned. Setting the number of VFs to 0 through sysfs is caught
337 * before this function ever gets called.
339 if (!pci_vfs_assigned(pf
->pdev
)) {
342 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
343 * work correctly when SR-IOV gets re-enabled.
345 for (vf_id
= 0; vf_id
< tmp
; vf_id
++) {
346 u32 reg_idx
, bit_idx
;
348 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
349 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
350 wr32(hw
, GLGEN_VFLRSTAT(reg_idx
), BIT(bit_idx
));
353 clear_bit(__ICE_VF_DIS
, pf
->state
);
354 clear_bit(ICE_FLAG_SRIOV_ENA
, pf
->flags
);
358 * ice_trigger_vf_reset - Reset a VF on HW
359 * @vf: pointer to the VF structure
360 * @is_vflr: true if VFLR was issued, false if not
361 * @is_pfr: true if the reset was triggered due to a previous PFR
363 * Trigger hardware to start a reset for a particular VF. Expects the caller
364 * to wait the proper amount of time to allow hardware to reset the VF before
365 * it cleans up and restores VF functionality.
367 static void ice_trigger_vf_reset(struct ice_vf
*vf
, bool is_vflr
, bool is_pfr
)
369 struct ice_pf
*pf
= vf
->pf
;
370 u32 reg
, reg_idx
, bit_idx
;
375 dev
= ice_pf_to_dev(pf
);
377 vf_abs_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
379 /* Inform VF that it is no longer active, as a warning */
380 clear_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
);
382 /* Disable VF's configuration API during reset. The flag is re-enabled
383 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
384 * It's normally disabled in ice_free_vf_res(), but it's safer
385 * to do it earlier to give some time to finish to any VF config
386 * functions that may still be running at this point.
388 clear_bit(ICE_VF_STATE_INIT
, vf
->vf_states
);
390 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
391 * in the case of VFR. If this is done for PFR, it can mess up VF
392 * resets because the VF driver may already have started cleanup
393 * by the time we get here.
396 wr32(hw
, VF_MBX_ARQLEN(vf
->vf_id
), 0);
398 /* In the case of a VFLR, the HW has already reset the VF and we
399 * just need to clean up, so don't hit the VFRTRIG register.
402 /* reset VF using VPGEN_VFRTRIG reg */
403 reg
= rd32(hw
, VPGEN_VFRTRIG(vf
->vf_id
));
404 reg
|= VPGEN_VFRTRIG_VFSWR_M
;
405 wr32(hw
, VPGEN_VFRTRIG(vf
->vf_id
), reg
);
407 /* clear the VFLR bit in GLGEN_VFLRSTAT */
408 reg_idx
= (vf_abs_id
) / 32;
409 bit_idx
= (vf_abs_id
) % 32;
410 wr32(hw
, GLGEN_VFLRSTAT(reg_idx
), BIT(bit_idx
));
413 wr32(hw
, PF_PCI_CIAA
,
414 VF_DEVICE_STATUS
| (vf_abs_id
<< PF_PCI_CIAA_VF_NUM_S
));
415 for (i
= 0; i
< ICE_PCI_CIAD_WAIT_COUNT
; i
++) {
416 reg
= rd32(hw
, PF_PCI_CIAD
);
417 /* no transactions pending so stop polling */
418 if ((reg
& VF_TRANS_PENDING_M
) == 0)
421 dev_err(dev
, "VF %d PCI transactions stuck\n", vf
->vf_id
);
422 udelay(ICE_PCI_CIAD_WAIT_DELAY_US
);
427 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
428 * @vsi: the VSI to update
429 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
430 * @enable: true for enable PVID false for disable
432 static int ice_vsi_manage_pvid(struct ice_vsi
*vsi
, u16 pvid_info
, bool enable
)
434 struct ice_hw
*hw
= &vsi
->back
->hw
;
435 struct ice_aqc_vsi_props
*info
;
436 struct ice_vsi_ctx
*ctxt
;
437 enum ice_status status
;
440 ctxt
= kzalloc(sizeof(*ctxt
), GFP_KERNEL
);
444 ctxt
->info
= vsi
->info
;
447 info
->vlan_flags
= ICE_AQ_VSI_VLAN_MODE_UNTAGGED
|
448 ICE_AQ_VSI_PVLAN_INSERT_PVID
|
449 ICE_AQ_VSI_VLAN_EMOD_STR
;
450 info
->sw_flags2
|= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA
;
452 info
->vlan_flags
= ICE_AQ_VSI_VLAN_EMOD_NOTHING
|
453 ICE_AQ_VSI_VLAN_MODE_ALL
;
454 info
->sw_flags2
&= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA
;
457 info
->pvid
= cpu_to_le16(pvid_info
);
458 info
->valid_sections
= cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID
|
459 ICE_AQ_VSI_PROP_SW_VALID
);
461 status
= ice_update_vsi(hw
, vsi
->idx
, ctxt
, NULL
);
463 dev_info(ice_hw_to_dev(hw
), "update VSI for port VLAN failed, err %d aq_err %d\n",
464 status
, hw
->adminq
.sq_last_status
);
469 vsi
->info
.vlan_flags
= info
->vlan_flags
;
470 vsi
->info
.sw_flags2
= info
->sw_flags2
;
471 vsi
->info
.pvid
= info
->pvid
;
478 * ice_vf_vsi_setup - Set up a VF VSI
479 * @pf: board private structure
480 * @pi: pointer to the port_info instance
481 * @vf_id: defines VF ID to which this VSI connects.
483 * Returns pointer to the successfully allocated VSI struct on success,
484 * otherwise returns NULL on failure.
486 static struct ice_vsi
*
487 ice_vf_vsi_setup(struct ice_pf
*pf
, struct ice_port_info
*pi
, u16 vf_id
)
489 return ice_vsi_setup(pf
, pi
, ICE_VSI_VF
, vf_id
);
493 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
494 * @pf: pointer to PF structure
495 * @vf: pointer to VF that the first MSIX vector index is being calculated for
497 * This returns the first MSIX vector index in PF space that is used by this VF.
498 * This index is used when accessing PF relative registers such as
499 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
500 * This will always be the OICR index in the AVF driver so any functionality
501 * using vf->first_vector_idx for queue configuration will have to increment by
502 * 1 to avoid meddling with the OICR index.
504 static int ice_calc_vf_first_vector_idx(struct ice_pf
*pf
, struct ice_vf
*vf
)
506 return pf
->sriov_base_vector
+ vf
->vf_id
* pf
->num_msix_per_vf
;
510 * ice_alloc_vsi_res - Setup VF VSI and its resources
511 * @vf: pointer to the VF structure
513 * Returns 0 on success, negative value on failure
515 static int ice_alloc_vsi_res(struct ice_vf
*vf
)
517 struct ice_pf
*pf
= vf
->pf
;
518 LIST_HEAD(tmp_add_list
);
519 u8 broadcast
[ETH_ALEN
];
524 dev
= ice_pf_to_dev(pf
);
525 /* first vector index is the VFs OICR index */
526 vf
->first_vector_idx
= ice_calc_vf_first_vector_idx(pf
, vf
);
528 vsi
= ice_vf_vsi_setup(pf
, pf
->hw
.port_info
, vf
->vf_id
);
530 dev_err(dev
, "Failed to create VF VSI\n");
534 vf
->lan_vsi_idx
= vsi
->idx
;
535 vf
->lan_vsi_num
= vsi
->vsi_num
;
537 /* Check if port VLAN exist before, and restore it accordingly */
538 if (vf
->port_vlan_info
) {
539 ice_vsi_manage_pvid(vsi
, vf
->port_vlan_info
, true);
540 if (ice_vsi_add_vlan(vsi
, vf
->port_vlan_info
& VLAN_VID_MASK
))
541 dev_warn(ice_pf_to_dev(pf
), "Failed to add Port VLAN %d filter for VF %d\n",
542 vf
->port_vlan_info
& VLAN_VID_MASK
, vf
->vf_id
);
544 /* set VLAN 0 filter by default when no port VLAN is
545 * enabled. If a port VLAN is enabled we don't want
546 * untagged broadcast/multicast traffic seen on the VF
549 if (ice_vsi_add_vlan(vsi
, 0))
550 dev_warn(ice_pf_to_dev(pf
), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
554 eth_broadcast_addr(broadcast
);
556 status
= ice_add_mac_to_list(vsi
, &tmp_add_list
, broadcast
);
558 goto ice_alloc_vsi_res_exit
;
560 if (is_valid_ether_addr(vf
->dflt_lan_addr
.addr
)) {
561 status
= ice_add_mac_to_list(vsi
, &tmp_add_list
,
562 vf
->dflt_lan_addr
.addr
);
564 goto ice_alloc_vsi_res_exit
;
567 status
= ice_add_mac(&pf
->hw
, &tmp_add_list
);
569 dev_err(dev
, "could not add mac filters error %d\n", status
);
573 /* Clear this bit after VF initialization since we shouldn't reclaim
574 * and reassign interrupts for synchronous or asynchronous VFR events.
575 * We don't want to reconfigure interrupts since AVF driver doesn't
576 * expect vector assignment to be changed unless there is a request for
579 ice_alloc_vsi_res_exit
:
580 ice_free_fltr_list(dev
, &tmp_add_list
);
585 * ice_alloc_vf_res - Allocate VF resources
586 * @vf: pointer to the VF structure
588 static int ice_alloc_vf_res(struct ice_vf
*vf
)
590 struct ice_pf
*pf
= vf
->pf
;
591 int tx_rx_queue_left
;
594 /* Update number of VF queues, in case VF had requested for queue
597 tx_rx_queue_left
= min_t(int, ice_get_avail_txq_count(pf
),
598 ice_get_avail_rxq_count(pf
));
599 tx_rx_queue_left
+= pf
->num_qps_per_vf
;
600 if (vf
->num_req_qs
&& vf
->num_req_qs
<= tx_rx_queue_left
&&
601 vf
->num_req_qs
!= vf
->num_vf_qs
)
602 vf
->num_vf_qs
= vf
->num_req_qs
;
604 /* setup VF VSI and necessary resources */
605 status
= ice_alloc_vsi_res(vf
);
607 goto ice_alloc_vf_res_exit
;
610 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
612 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
614 /* VF is now completely initialized */
615 set_bit(ICE_VF_STATE_INIT
, vf
->vf_states
);
619 ice_alloc_vf_res_exit
:
625 * ice_ena_vf_mappings
626 * @vf: pointer to the VF structure
628 * Enable VF vectors and queues allocation by writing the details into
629 * respective registers.
631 static void ice_ena_vf_mappings(struct ice_vf
*vf
)
633 int abs_vf_id
, abs_first
, abs_last
;
634 struct ice_pf
*pf
= vf
->pf
;
641 dev
= ice_pf_to_dev(pf
);
643 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
644 first
= vf
->first_vector_idx
;
645 last
= (first
+ pf
->num_msix_per_vf
) - 1;
646 abs_first
= first
+ pf
->hw
.func_caps
.common_cap
.msix_vector_first_id
;
647 abs_last
= (abs_first
+ pf
->num_msix_per_vf
) - 1;
648 abs_vf_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
650 /* VF Vector allocation */
651 reg
= (((abs_first
<< VPINT_ALLOC_FIRST_S
) & VPINT_ALLOC_FIRST_M
) |
652 ((abs_last
<< VPINT_ALLOC_LAST_S
) & VPINT_ALLOC_LAST_M
) |
653 VPINT_ALLOC_VALID_M
);
654 wr32(hw
, VPINT_ALLOC(vf
->vf_id
), reg
);
656 reg
= (((abs_first
<< VPINT_ALLOC_PCI_FIRST_S
)
657 & VPINT_ALLOC_PCI_FIRST_M
) |
658 ((abs_last
<< VPINT_ALLOC_PCI_LAST_S
) & VPINT_ALLOC_PCI_LAST_M
) |
659 VPINT_ALLOC_PCI_VALID_M
);
660 wr32(hw
, VPINT_ALLOC_PCI(vf
->vf_id
), reg
);
661 /* map the interrupts to its functions */
662 for (v
= first
; v
<= last
; v
++) {
663 reg
= (((abs_vf_id
<< GLINT_VECT2FUNC_VF_NUM_S
) &
664 GLINT_VECT2FUNC_VF_NUM_M
) |
665 ((hw
->pf_id
<< GLINT_VECT2FUNC_PF_NUM_S
) &
666 GLINT_VECT2FUNC_PF_NUM_M
));
667 wr32(hw
, GLINT_VECT2FUNC(v
), reg
);
670 /* Map mailbox interrupt. We put an explicit 0 here to remind us that
671 * VF admin queue interrupts will go to VF MSI-X vector 0.
673 wr32(hw
, VPINT_MBX_CTL(abs_vf_id
), VPINT_MBX_CTL_CAUSE_ENA_M
| 0);
674 /* set regardless of mapping mode */
675 wr32(hw
, VPLAN_TXQ_MAPENA(vf
->vf_id
), VPLAN_TXQ_MAPENA_TX_ENA_M
);
677 /* VF Tx queues allocation */
678 if (vsi
->tx_mapping_mode
== ICE_VSI_MAP_CONTIG
) {
679 /* set the VF PF Tx queue range
680 * VFNUMQ value should be set to (number of queues - 1). A value
681 * of 0 means 1 queue and a value of 255 means 256 queues
683 reg
= (((vsi
->txq_map
[0] << VPLAN_TX_QBASE_VFFIRSTQ_S
) &
684 VPLAN_TX_QBASE_VFFIRSTQ_M
) |
685 (((vsi
->alloc_txq
- 1) << VPLAN_TX_QBASE_VFNUMQ_S
) &
686 VPLAN_TX_QBASE_VFNUMQ_M
));
687 wr32(hw
, VPLAN_TX_QBASE(vf
->vf_id
), reg
);
689 dev_err(dev
, "Scattered mode for VF Tx queues is not yet implemented\n");
692 /* set regardless of mapping mode */
693 wr32(hw
, VPLAN_RXQ_MAPENA(vf
->vf_id
), VPLAN_RXQ_MAPENA_RX_ENA_M
);
695 /* VF Rx queues allocation */
696 if (vsi
->rx_mapping_mode
== ICE_VSI_MAP_CONTIG
) {
697 /* set the VF PF Rx queue range
698 * VFNUMQ value should be set to (number of queues - 1). A value
699 * of 0 means 1 queue and a value of 255 means 256 queues
701 reg
= (((vsi
->rxq_map
[0] << VPLAN_RX_QBASE_VFFIRSTQ_S
) &
702 VPLAN_RX_QBASE_VFFIRSTQ_M
) |
703 (((vsi
->alloc_txq
- 1) << VPLAN_RX_QBASE_VFNUMQ_S
) &
704 VPLAN_RX_QBASE_VFNUMQ_M
));
705 wr32(hw
, VPLAN_RX_QBASE(vf
->vf_id
), reg
);
707 dev_err(dev
, "Scattered mode for VF Rx queues is not yet implemented\n");
713 * @pf: pointer to the PF structure
714 * @avail_res: available resources in the PF structure
715 * @max_res: maximum resources that can be given per VF
716 * @min_res: minimum resources that can be given per VF
718 * Returns non-zero value if resources (queues/vectors) are available or
719 * returns zero if PF cannot accommodate for all num_alloc_vfs.
722 ice_determine_res(struct ice_pf
*pf
, u16 avail_res
, u16 max_res
, u16 min_res
)
724 bool checked_min_res
= false;
727 /* start by checking if PF can assign max number of resources for
729 * if yes, return number per VF
730 * If no, divide by 2 and roundup, check again
731 * repeat the loop till we reach a point where even minimum resources
732 * are not available, in that case return 0
735 while ((res
>= min_res
) && !checked_min_res
) {
738 num_all_res
= pf
->num_alloc_vfs
* res
;
739 if (num_all_res
<= avail_res
)
743 checked_min_res
= true;
745 res
= DIV_ROUND_UP(res
, 2);
751 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
752 * @vf: VF to calculate the register index for
753 * @q_vector: a q_vector associated to the VF
755 int ice_calc_vf_reg_idx(struct ice_vf
*vf
, struct ice_q_vector
*q_vector
)
759 if (!vf
|| !q_vector
)
764 /* always add one to account for the OICR being the first MSIX */
765 return pf
->sriov_base_vector
+ pf
->num_msix_per_vf
* vf
->vf_id
+
770 * ice_get_max_valid_res_idx - Get the max valid resource index
771 * @res: pointer to the resource to find the max valid index for
773 * Start from the end of the ice_res_tracker and return right when we find the
774 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
775 * valid for SR-IOV because it is the only consumer that manipulates the
776 * res->end and this is always called when res->end is set to res->num_entries.
778 static int ice_get_max_valid_res_idx(struct ice_res_tracker
*res
)
785 for (i
= res
->num_entries
- 1; i
>= 0; i
--)
786 if (res
->list
[i
] & ICE_RES_VALID_BIT
)
793 * ice_sriov_set_msix_res - Set any used MSIX resources
794 * @pf: pointer to PF structure
795 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
797 * This function allows SR-IOV resources to be taken from the end of the PF's
798 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
799 * just set the pf->sriov_base_vector and return success.
801 * If there are not enough resources available, return an error. This should
802 * always be caught by ice_set_per_vf_res().
804 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
805 * in the PF's space available for SR-IOV.
807 static int ice_sriov_set_msix_res(struct ice_pf
*pf
, u16 num_msix_needed
)
809 u16 total_vectors
= pf
->hw
.func_caps
.common_cap
.num_msix_vectors
;
810 int vectors_used
= pf
->irq_tracker
->num_entries
;
811 int sriov_base_vector
;
813 sriov_base_vector
= total_vectors
- num_msix_needed
;
815 /* make sure we only grab irq_tracker entries from the list end and
816 * that we have enough available MSIX vectors
818 if (sriov_base_vector
< vectors_used
)
821 pf
->sriov_base_vector
= sriov_base_vector
;
827 * ice_set_per_vf_res - check if vectors and queues are available
828 * @pf: pointer to the PF structure
830 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
831 * get more vectors and can enable more queues per VF. Note that this does not
832 * grab any vectors from the SW pool already allocated. Also note, that all
833 * vector counts include one for each VF's miscellaneous interrupt vector
836 * Minimum VFs - 2 vectors, 1 queue pair
837 * Small VFs - 5 vectors, 4 queue pairs
838 * Medium VFs - 17 vectors, 16 queue pairs
840 * Second, determine number of queue pairs per VF by starting with a pre-defined
841 * maximum each VF supports. If this is not possible, then we adjust based on
842 * queue pairs available on the device.
844 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
845 * by each VF during VF initialization and reset.
847 static int ice_set_per_vf_res(struct ice_pf
*pf
)
849 int max_valid_res_idx
= ice_get_max_valid_res_idx(pf
->irq_tracker
);
850 int msix_avail_per_vf
, msix_avail_for_sriov
;
851 struct device
*dev
= ice_pf_to_dev(pf
);
852 u16 num_msix_per_vf
, num_txq
, num_rxq
;
854 if (!pf
->num_alloc_vfs
|| max_valid_res_idx
< 0)
857 /* determine MSI-X resources per VF */
858 msix_avail_for_sriov
= pf
->hw
.func_caps
.common_cap
.num_msix_vectors
-
859 pf
->irq_tracker
->num_entries
;
860 msix_avail_per_vf
= msix_avail_for_sriov
/ pf
->num_alloc_vfs
;
861 if (msix_avail_per_vf
>= ICE_NUM_VF_MSIX_MED
) {
862 num_msix_per_vf
= ICE_NUM_VF_MSIX_MED
;
863 } else if (msix_avail_per_vf
>= ICE_NUM_VF_MSIX_SMALL
) {
864 num_msix_per_vf
= ICE_NUM_VF_MSIX_SMALL
;
865 } else if (msix_avail_per_vf
>= ICE_MIN_INTR_PER_VF
) {
866 num_msix_per_vf
= ICE_MIN_INTR_PER_VF
;
868 dev_err(dev
, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
869 msix_avail_for_sriov
, ICE_MIN_INTR_PER_VF
,
874 /* determine queue resources per VF */
875 num_txq
= ice_determine_res(pf
, ice_get_avail_txq_count(pf
),
877 num_msix_per_vf
- ICE_NONQ_VECS_VF
,
878 ICE_MAX_RSS_QS_PER_VF
),
881 num_rxq
= ice_determine_res(pf
, ice_get_avail_rxq_count(pf
),
883 num_msix_per_vf
- ICE_NONQ_VECS_VF
,
884 ICE_MAX_RSS_QS_PER_VF
),
887 if (!num_txq
|| !num_rxq
) {
888 dev_err(dev
, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
889 ICE_MIN_QS_PER_VF
, pf
->num_alloc_vfs
);
893 if (ice_sriov_set_msix_res(pf
, num_msix_per_vf
* pf
->num_alloc_vfs
)) {
894 dev_err(dev
, "Unable to set MSI-X resources for %d VFs\n",
899 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
900 pf
->num_qps_per_vf
= min_t(int, num_txq
, num_rxq
);
901 pf
->num_msix_per_vf
= num_msix_per_vf
;
902 dev_info(dev
, "Enabling %d VFs with %d vectors and %d queues per VF\n",
903 pf
->num_alloc_vfs
, pf
->num_msix_per_vf
, pf
->num_qps_per_vf
);
909 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
910 * @vf: pointer to the VF structure
912 * Cleanup a VF after the hardware reset is finished. Expects the caller to
913 * have verified whether the reset is finished properly, and ensure the
914 * minimum amount of wait time has passed. Reallocate VF resources back to make
917 static void ice_cleanup_and_realloc_vf(struct ice_vf
*vf
)
919 struct ice_pf
*pf
= vf
->pf
;
925 /* PF software completes the flow by notifying VF that reset flow is
926 * completed. This is done by enabling hardware by clearing the reset
927 * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
928 * register to VFR completed (done at the end of this function)
929 * By doing this we allow HW to access VF memory at any point. If we
930 * did it any sooner, HW could access memory while it was being freed
931 * in ice_free_vf_res(), causing an IOMMU fault.
933 * On the other hand, this needs to be done ASAP, because the VF driver
934 * is waiting for this to happen and may report a timeout. It's
935 * harmless, but it gets logged into Guest OS kernel log, so best avoid
938 reg
= rd32(hw
, VPGEN_VFRTRIG(vf
->vf_id
));
939 reg
&= ~VPGEN_VFRTRIG_VFSWR_M
;
940 wr32(hw
, VPGEN_VFRTRIG(vf
->vf_id
), reg
);
942 /* reallocate VF resources to finish resetting the VSI state */
943 if (!ice_alloc_vf_res(vf
)) {
944 ice_ena_vf_mappings(vf
);
945 set_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
);
946 clear_bit(ICE_VF_STATE_DIS
, vf
->vf_states
);
949 /* Tell the VF driver the reset is done. This needs to be done only
950 * after VF has been fully initialized, because the VF driver may
951 * request resources immediately after setting this flag.
953 wr32(hw
, VFGEN_RSTAT(vf
->vf_id
), VIRTCHNL_VFR_VFACTIVE
);
957 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
958 * @vf: pointer to the VF info
959 * @vsi: the VSI being configured
960 * @promisc_m: mask of promiscuous config bits
961 * @rm_promisc: promisc flag request from the VF to remove or add filter
963 * This function configures VF VSI promiscuous mode, based on the VF requests,
964 * for Unicast, Multicast and VLAN
966 static enum ice_status
967 ice_vf_set_vsi_promisc(struct ice_vf
*vf
, struct ice_vsi
*vsi
, u8 promisc_m
,
970 struct ice_pf
*pf
= vf
->pf
;
971 enum ice_status status
= 0;
976 status
= ice_set_vlan_vsi_promisc(hw
, vsi
->idx
, promisc_m
,
978 } else if (vf
->port_vlan_info
) {
980 status
= ice_clear_vsi_promisc(hw
, vsi
->idx
, promisc_m
,
983 status
= ice_set_vsi_promisc(hw
, vsi
->idx
, promisc_m
,
987 status
= ice_clear_vsi_promisc(hw
, vsi
->idx
, promisc_m
,
990 status
= ice_set_vsi_promisc(hw
, vsi
->idx
, promisc_m
,
998 * ice_config_res_vfs - Finalize allocation of VFs resources in one go
999 * @pf: pointer to the PF structure
1001 * This function is being called as last part of resetting all VFs, or when
1002 * configuring VFs for the first time, where there is no resource to be freed
1003 * Returns true if resources were properly allocated for all VFs, and false
1006 static bool ice_config_res_vfs(struct ice_pf
*pf
)
1008 struct device
*dev
= ice_pf_to_dev(pf
);
1009 struct ice_hw
*hw
= &pf
->hw
;
1012 if (ice_set_per_vf_res(pf
)) {
1013 dev_err(dev
, "Cannot allocate VF resources, try with fewer number of VFs\n");
1017 /* rearm global interrupts */
1018 if (test_and_clear_bit(__ICE_OICR_INTR_DIS
, pf
->state
))
1019 ice_irq_dynamic_ena(hw
, NULL
, NULL
);
1021 /* Finish resetting each VF and allocate resources */
1022 ice_for_each_vf(pf
, v
) {
1023 struct ice_vf
*vf
= &pf
->vf
[v
];
1025 vf
->num_vf_qs
= pf
->num_qps_per_vf
;
1026 dev_dbg(dev
, "VF-id %d has %d queues configured\n", vf
->vf_id
,
1028 ice_cleanup_and_realloc_vf(vf
);
1032 clear_bit(__ICE_VF_DIS
, pf
->state
);
1038 * ice_reset_all_vfs - reset all allocated VFs in one go
1039 * @pf: pointer to the PF structure
1040 * @is_vflr: true if VFLR was issued, false if not
1042 * First, tell the hardware to reset each VF, then do all the waiting in one
1043 * chunk, and finally finish restoring each VF after the wait. This is useful
1044 * during PF routines which need to reset all VFs, as otherwise it must perform
1045 * these resets in a serialized fashion.
1047 * Returns true if any VFs were reset, and false otherwise.
1049 bool ice_reset_all_vfs(struct ice_pf
*pf
, bool is_vflr
)
1051 struct device
*dev
= ice_pf_to_dev(pf
);
1052 struct ice_hw
*hw
= &pf
->hw
;
1056 /* If we don't have any VFs, then there is nothing to reset */
1057 if (!pf
->num_alloc_vfs
)
1060 /* If VFs have been disabled, there is no need to reset */
1061 if (test_and_set_bit(__ICE_VF_DIS
, pf
->state
))
1064 /* Begin reset on all VFs at once */
1065 ice_for_each_vf(pf
, v
)
1066 ice_trigger_vf_reset(&pf
->vf
[v
], is_vflr
, true);
1068 ice_for_each_vf(pf
, v
) {
1069 struct ice_vsi
*vsi
;
1072 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
1073 if (test_bit(ICE_VF_STATE_QS_ENA
, vf
->vf_states
))
1075 ice_dis_vsi_txq(vsi
->port_info
, vsi
->idx
, 0, 0, NULL
, NULL
,
1076 NULL
, ICE_VF_RESET
, vf
->vf_id
, NULL
);
1079 /* HW requires some time to make sure it can flush the FIFO for a VF
1080 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1081 * sequence to make sure that it has completed. We'll keep track of
1082 * the VFs using a simple iterator that increments once that VF has
1083 * finished resetting.
1085 for (i
= 0, v
= 0; i
< 10 && v
< pf
->num_alloc_vfs
; i
++) {
1086 /* Check each VF in sequence */
1087 while (v
< pf
->num_alloc_vfs
) {
1091 reg
= rd32(hw
, VPGEN_VFRSTAT(vf
->vf_id
));
1092 if (!(reg
& VPGEN_VFRSTAT_VFRD_M
)) {
1093 /* only delay if the check failed */
1094 usleep_range(10, 20);
1098 /* If the current VF has finished resetting, move on
1099 * to the next VF in sequence.
1105 /* Display a warning if at least one VF didn't manage to reset in
1106 * time, but continue on with the operation.
1108 if (v
< pf
->num_alloc_vfs
)
1109 dev_warn(dev
, "VF reset check timeout\n");
1111 /* free VF resources to begin resetting the VSI state */
1112 ice_for_each_vf(pf
, v
) {
1115 ice_free_vf_res(vf
);
1117 /* Free VF queues as well, and reallocate later.
1118 * If a given VF has different number of queues
1119 * configured, the request for update will come
1120 * via mailbox communication.
1125 if (ice_sriov_free_msix_res(pf
))
1126 dev_err(dev
, "Failed to free MSIX resources used by SR-IOV\n");
1128 if (!ice_config_res_vfs(pf
))
1135 * ice_is_vf_disabled
1136 * @vf: pointer to the VF info
1138 * Returns true if the PF or VF is disabled, false otherwise.
1140 static bool ice_is_vf_disabled(struct ice_vf
*vf
)
1142 struct ice_pf
*pf
= vf
->pf
;
1144 /* If the PF has been disabled, there is no need resetting VF until
1145 * PF is active again. Similarly, if the VF has been disabled, this
1146 * means something else is resetting the VF, so we shouldn't continue.
1147 * Otherwise, set disable VF state bit for actual reset, and continue.
1149 return (test_bit(__ICE_VF_DIS
, pf
->state
) ||
1150 test_bit(ICE_VF_STATE_DIS
, vf
->vf_states
));
1154 * ice_reset_vf - Reset a particular VF
1155 * @vf: pointer to the VF structure
1156 * @is_vflr: true if VFLR was issued, false if not
1158 * Returns true if the VF is currently in reset, resets successfully, or resets
1159 * are disabled and false otherwise.
1161 bool ice_reset_vf(struct ice_vf
*vf
, bool is_vflr
)
1163 struct ice_pf
*pf
= vf
->pf
;
1164 struct ice_vsi
*vsi
;
1172 dev
= ice_pf_to_dev(pf
);
1174 if (test_bit(__ICE_VF_RESETS_DISABLED
, pf
->state
)) {
1175 dev_dbg(dev
, "Trying to reset VF %d, but all VF resets are disabled\n",
1180 if (ice_is_vf_disabled(vf
)) {
1181 dev_dbg(dev
, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1186 /* Set VF disable bit state here, before triggering reset */
1187 set_bit(ICE_VF_STATE_DIS
, vf
->vf_states
);
1188 ice_trigger_vf_reset(vf
, is_vflr
, false);
1190 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
1192 if (test_bit(ICE_VF_STATE_QS_ENA
, vf
->vf_states
))
1195 /* Call Disable LAN Tx queue AQ whether or not queues are
1196 * enabled. This is needed for successful completion of VFR.
1198 ice_dis_vsi_txq(vsi
->port_info
, vsi
->idx
, 0, 0, NULL
, NULL
,
1199 NULL
, ICE_VF_RESET
, vf
->vf_id
, NULL
);
1202 /* poll VPGEN_VFRSTAT reg to make sure
1203 * that reset is complete
1205 for (i
= 0; i
< 10; i
++) {
1206 /* VF reset requires driver to first reset the VF and then
1207 * poll the status register to make sure that the reset
1208 * completed successfully.
1210 reg
= rd32(hw
, VPGEN_VFRSTAT(vf
->vf_id
));
1211 if (reg
& VPGEN_VFRSTAT_VFRD_M
) {
1216 /* only sleep if the reset is not done */
1217 usleep_range(10, 20);
1220 /* Display a warning if VF didn't manage to reset in time, but need to
1221 * continue on with the operation.
1224 dev_warn(dev
, "VF reset check timeout on VF %d\n", vf
->vf_id
);
1226 /* disable promiscuous modes in case they were enabled
1227 * ignore any error if disabling process failed
1229 if (test_bit(ICE_VF_STATE_UC_PROMISC
, vf
->vf_states
) ||
1230 test_bit(ICE_VF_STATE_MC_PROMISC
, vf
->vf_states
)) {
1231 if (vf
->port_vlan_info
|| vsi
->num_vlan
)
1232 promisc_m
= ICE_UCAST_VLAN_PROMISC_BITS
;
1234 promisc_m
= ICE_UCAST_PROMISC_BITS
;
1236 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
1237 if (ice_vf_set_vsi_promisc(vf
, vsi
, promisc_m
, true))
1238 dev_err(dev
, "disabling promiscuous mode failed\n");
1241 /* free VF resources to begin resetting the VSI state */
1242 ice_free_vf_res(vf
);
1244 ice_cleanup_and_realloc_vf(vf
);
1252 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1253 * @pf: pointer to the PF structure
1255 void ice_vc_notify_link_state(struct ice_pf
*pf
)
1259 ice_for_each_vf(pf
, i
)
1260 ice_vc_notify_vf_link_state(&pf
->vf
[i
]);
1264 * ice_vc_notify_reset - Send pending reset message to all VFs
1265 * @pf: pointer to the PF structure
1267 * indicate a pending reset to all VFs on a given PF
1269 void ice_vc_notify_reset(struct ice_pf
*pf
)
1271 struct virtchnl_pf_event pfe
;
1273 if (!pf
->num_alloc_vfs
)
1276 pfe
.event
= VIRTCHNL_EVENT_RESET_IMPENDING
;
1277 pfe
.severity
= PF_EVENT_SEVERITY_CERTAIN_DOOM
;
1278 ice_vc_vf_broadcast(pf
, VIRTCHNL_OP_EVENT
, VIRTCHNL_STATUS_SUCCESS
,
1279 (u8
*)&pfe
, sizeof(struct virtchnl_pf_event
));
1283 * ice_vc_notify_vf_reset - Notify VF of a reset event
1284 * @vf: pointer to the VF structure
1286 static void ice_vc_notify_vf_reset(struct ice_vf
*vf
)
1288 struct virtchnl_pf_event pfe
;
1295 if (ice_validate_vf_id(pf
, vf
->vf_id
))
1298 /* Bail out if VF is in disabled state, neither initialized, nor active
1299 * state - otherwise proceed with notifications
1301 if ((!test_bit(ICE_VF_STATE_INIT
, vf
->vf_states
) &&
1302 !test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
)) ||
1303 test_bit(ICE_VF_STATE_DIS
, vf
->vf_states
))
1306 pfe
.event
= VIRTCHNL_EVENT_RESET_IMPENDING
;
1307 pfe
.severity
= PF_EVENT_SEVERITY_CERTAIN_DOOM
;
1308 ice_aq_send_msg_to_vf(&pf
->hw
, vf
->vf_id
, VIRTCHNL_OP_EVENT
,
1309 VIRTCHNL_STATUS_SUCCESS
, (u8
*)&pfe
, sizeof(pfe
),
1314 * ice_alloc_vfs - Allocate and set up VFs resources
1315 * @pf: pointer to the PF structure
1316 * @num_alloc_vfs: number of VFs to allocate
1318 static int ice_alloc_vfs(struct ice_pf
*pf
, u16 num_alloc_vfs
)
1320 struct device
*dev
= ice_pf_to_dev(pf
);
1321 struct ice_hw
*hw
= &pf
->hw
;
1325 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1326 wr32(hw
, GLINT_DYN_CTL(pf
->oicr_idx
),
1327 ICE_ITR_NONE
<< GLINT_DYN_CTL_ITR_INDX_S
);
1328 set_bit(__ICE_OICR_INTR_DIS
, pf
->state
);
1331 ret
= pci_enable_sriov(pf
->pdev
, num_alloc_vfs
);
1333 pf
->num_alloc_vfs
= 0;
1334 goto err_unroll_intr
;
1336 /* allocate memory */
1337 vfs
= devm_kcalloc(dev
, num_alloc_vfs
, sizeof(*vfs
), GFP_KERNEL
);
1340 goto err_pci_disable_sriov
;
1343 pf
->num_alloc_vfs
= num_alloc_vfs
;
1345 /* apply default profile */
1346 ice_for_each_vf(pf
, i
) {
1348 vfs
[i
].vf_sw_id
= pf
->first_sw
;
1351 /* assign default capabilities */
1352 set_bit(ICE_VIRTCHNL_VF_CAP_L2
, &vfs
[i
].vf_caps
);
1353 vfs
[i
].spoofchk
= true;
1356 /* VF resources get allocated with initialization */
1357 if (!ice_config_res_vfs(pf
)) {
1359 goto err_unroll_sriov
;
1366 devm_kfree(dev
, vfs
);
1368 pf
->num_alloc_vfs
= 0;
1369 err_pci_disable_sriov
:
1370 pci_disable_sriov(pf
->pdev
);
1372 /* rearm interrupts here */
1373 ice_irq_dynamic_ena(hw
, NULL
, NULL
);
1374 clear_bit(__ICE_OICR_INTR_DIS
, pf
->state
);
1379 * ice_pf_state_is_nominal - checks the PF for nominal state
1380 * @pf: pointer to PF to check
1382 * Check the PF's state for a collection of bits that would indicate
1383 * the PF is in a state that would inhibit normal operation for
1384 * driver functionality.
1386 * Returns true if PF is in a nominal state.
1387 * Returns false otherwise
1389 static bool ice_pf_state_is_nominal(struct ice_pf
*pf
)
1391 DECLARE_BITMAP(check_bits
, __ICE_STATE_NBITS
) = { 0 };
1396 bitmap_set(check_bits
, 0, __ICE_STATE_NOMINAL_CHECK_BITS
);
1397 if (bitmap_intersects(pf
->state
, check_bits
, __ICE_STATE_NBITS
))
1404 * ice_pci_sriov_ena - Enable or change number of VFs
1405 * @pf: pointer to the PF structure
1406 * @num_vfs: number of VFs to allocate
1408 static int ice_pci_sriov_ena(struct ice_pf
*pf
, int num_vfs
)
1410 int pre_existing_vfs
= pci_num_vf(pf
->pdev
);
1411 struct device
*dev
= ice_pf_to_dev(pf
);
1414 if (!ice_pf_state_is_nominal(pf
)) {
1415 dev_err(dev
, "Cannot enable SR-IOV, device not ready\n");
1419 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE
, pf
->flags
)) {
1420 dev_err(dev
, "This device is not capable of SR-IOV\n");
1424 if (pre_existing_vfs
&& pre_existing_vfs
!= num_vfs
)
1426 else if (pre_existing_vfs
&& pre_existing_vfs
== num_vfs
)
1429 if (num_vfs
> pf
->num_vfs_supported
) {
1430 dev_err(dev
, "Can't enable %d VFs, max VFs supported is %d\n",
1431 num_vfs
, pf
->num_vfs_supported
);
1435 dev_info(dev
, "Allocating %d VFs\n", num_vfs
);
1436 err
= ice_alloc_vfs(pf
, num_vfs
);
1438 dev_err(dev
, "Failed to enable SR-IOV: %d\n", err
);
1442 set_bit(ICE_FLAG_SRIOV_ENA
, pf
->flags
);
1447 * ice_sriov_configure - Enable or change number of VFs via sysfs
1448 * @pdev: pointer to a pci_dev structure
1449 * @num_vfs: number of VFs to allocate
1451 * This function is called when the user updates the number of VFs in sysfs.
1453 int ice_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
1455 struct ice_pf
*pf
= pci_get_drvdata(pdev
);
1456 struct device
*dev
= ice_pf_to_dev(pf
);
1458 if (ice_is_safe_mode(pf
)) {
1459 dev_err(dev
, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1464 return ice_pci_sriov_ena(pf
, num_vfs
);
1466 if (!pci_vfs_assigned(pdev
)) {
1469 dev_err(dev
, "can't free VFs because some are assigned to VMs.\n");
1477 * ice_process_vflr_event - Free VF resources via IRQ calls
1478 * @pf: pointer to the PF structure
1480 * called from the VFLR IRQ handler to
1481 * free up VF resources and state variables
1483 void ice_process_vflr_event(struct ice_pf
*pf
)
1485 struct ice_hw
*hw
= &pf
->hw
;
1489 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING
, pf
->state
) ||
1493 ice_for_each_vf(pf
, vf_id
) {
1494 struct ice_vf
*vf
= &pf
->vf
[vf_id
];
1495 u32 reg_idx
, bit_idx
;
1497 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
1498 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
1499 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1500 reg
= rd32(hw
, GLGEN_VFLRSTAT(reg_idx
));
1501 if (reg
& BIT(bit_idx
))
1502 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1503 ice_reset_vf(vf
, true);
1508 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
1509 * @vf: pointer to the VF info
1511 static void ice_vc_reset_vf(struct ice_vf
*vf
)
1513 ice_vc_notify_vf_reset(vf
);
1514 ice_reset_vf(vf
, false);
1518 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1519 * @pf: PF used to index all VFs
1520 * @pfq: queue index relative to the PF's function space
1522 * If no VF is found who owns the pfq then return NULL, otherwise return a
1523 * pointer to the VF who owns the pfq
1525 static struct ice_vf
*ice_get_vf_from_pfq(struct ice_pf
*pf
, u16 pfq
)
1529 ice_for_each_vf(pf
, vf_id
) {
1530 struct ice_vf
*vf
= &pf
->vf
[vf_id
];
1531 struct ice_vsi
*vsi
;
1534 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
1536 ice_for_each_rxq(vsi
, rxq_idx
)
1537 if (vsi
->rxq_map
[rxq_idx
] == pfq
)
1545 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1546 * @pf: PF used for conversion
1547 * @globalq: global queue index used to convert to PF space queue index
1549 static u32
ice_globalq_to_pfq(struct ice_pf
*pf
, u32 globalq
)
1551 return globalq
- pf
->hw
.func_caps
.common_cap
.rxq_first_id
;
1555 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1556 * @pf: PF that the LAN overflow event happened on
1557 * @event: structure holding the event information for the LAN overflow event
1559 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1560 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1561 * reset on the offending VF.
1564 ice_vf_lan_overflow_event(struct ice_pf
*pf
, struct ice_rq_event_info
*event
)
1566 u32 gldcb_rtctq
, queue
;
1569 gldcb_rtctq
= le32_to_cpu(event
->desc
.params
.lan_overflow
.prtdcb_ruptq
);
1570 dev_dbg(ice_pf_to_dev(pf
), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq
);
1572 /* event returns device global Rx queue number */
1573 queue
= (gldcb_rtctq
& GLDCB_RTCTQ_RXQNUM_M
) >>
1574 GLDCB_RTCTQ_RXQNUM_S
;
1576 vf
= ice_get_vf_from_pfq(pf
, ice_globalq_to_pfq(pf
, queue
));
1580 ice_vc_reset_vf(vf
);
1584 * ice_vc_send_msg_to_vf - Send message to VF
1585 * @vf: pointer to the VF info
1586 * @v_opcode: virtual channel opcode
1587 * @v_retval: virtual channel return value
1588 * @msg: pointer to the msg buffer
1589 * @msglen: msg length
1594 ice_vc_send_msg_to_vf(struct ice_vf
*vf
, u32 v_opcode
,
1595 enum virtchnl_status_code v_retval
, u8
*msg
, u16 msglen
)
1597 enum ice_status aq_ret
;
1605 if (ice_validate_vf_id(pf
, vf
->vf_id
))
1608 dev
= ice_pf_to_dev(pf
);
1610 /* single place to detect unsuccessful return values */
1612 vf
->num_inval_msgs
++;
1613 dev_info(dev
, "VF %d failed opcode %d, retval: %d\n", vf
->vf_id
,
1614 v_opcode
, v_retval
);
1615 if (vf
->num_inval_msgs
> ICE_DFLT_NUM_INVAL_MSGS_ALLOWED
) {
1616 dev_err(dev
, "Number of invalid messages exceeded for VF %d\n",
1618 dev_err(dev
, "Use PF Control I/F to enable the VF\n");
1619 set_bit(ICE_VF_STATE_DIS
, vf
->vf_states
);
1623 vf
->num_valid_msgs
++;
1624 /* reset the invalid counter, if a valid message is received. */
1625 vf
->num_inval_msgs
= 0;
1628 aq_ret
= ice_aq_send_msg_to_vf(&pf
->hw
, vf
->vf_id
, v_opcode
, v_retval
,
1630 if (aq_ret
&& pf
->hw
.mailboxq
.sq_last_status
!= ICE_AQ_RC_ENOSYS
) {
1631 dev_info(dev
, "Unable to send the message to VF %d ret %d aq_err %d\n",
1632 vf
->vf_id
, aq_ret
, pf
->hw
.mailboxq
.sq_last_status
);
1640 * ice_vc_get_ver_msg
1641 * @vf: pointer to the VF info
1642 * @msg: pointer to the msg buffer
1644 * called from the VF to request the API version used by the PF
1646 static int ice_vc_get_ver_msg(struct ice_vf
*vf
, u8
*msg
)
1648 struct virtchnl_version_info info
= {
1649 VIRTCHNL_VERSION_MAJOR
, VIRTCHNL_VERSION_MINOR
1652 vf
->vf_ver
= *(struct virtchnl_version_info
*)msg
;
1653 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1654 if (VF_IS_V10(&vf
->vf_ver
))
1655 info
.minor
= VIRTCHNL_VERSION_MINOR_NO_VF_CAPS
;
1657 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_VERSION
,
1658 VIRTCHNL_STATUS_SUCCESS
, (u8
*)&info
,
1659 sizeof(struct virtchnl_version_info
));
1663 * ice_vc_get_vf_res_msg
1664 * @vf: pointer to the VF info
1665 * @msg: pointer to the msg buffer
1667 * called from the VF to request its resources
1669 static int ice_vc_get_vf_res_msg(struct ice_vf
*vf
, u8
*msg
)
1671 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
1672 struct virtchnl_vf_resource
*vfres
= NULL
;
1673 struct ice_pf
*pf
= vf
->pf
;
1674 struct ice_vsi
*vsi
;
1678 if (ice_check_vf_init(pf
, vf
)) {
1679 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1683 len
= sizeof(struct virtchnl_vf_resource
);
1685 vfres
= kzalloc(len
, GFP_KERNEL
);
1687 v_ret
= VIRTCHNL_STATUS_ERR_NO_MEMORY
;
1691 if (VF_IS_V11(&vf
->vf_ver
))
1692 vf
->driver_caps
= *(u32
*)msg
;
1694 vf
->driver_caps
= VIRTCHNL_VF_OFFLOAD_L2
|
1695 VIRTCHNL_VF_OFFLOAD_RSS_REG
|
1696 VIRTCHNL_VF_OFFLOAD_VLAN
;
1698 vfres
->vf_cap_flags
= VIRTCHNL_VF_OFFLOAD_L2
;
1699 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
1701 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1705 if (!vsi
->info
.pvid
)
1706 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_VLAN
;
1708 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RSS_PF
) {
1709 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RSS_PF
;
1711 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RSS_AQ
)
1712 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RSS_AQ
;
1714 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RSS_REG
;
1717 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
)
1718 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
;
1720 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_ENCAP
)
1721 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_ENCAP
;
1723 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM
)
1724 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM
;
1726 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RX_POLLING
)
1727 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RX_POLLING
;
1729 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
)
1730 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
;
1732 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_REQ_QUEUES
)
1733 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES
;
1735 if (vf
->driver_caps
& VIRTCHNL_VF_CAP_ADV_LINK_SPEED
)
1736 vfres
->vf_cap_flags
|= VIRTCHNL_VF_CAP_ADV_LINK_SPEED
;
1738 vfres
->num_vsis
= 1;
1739 /* Tx and Rx queue are equal for VF */
1740 vfres
->num_queue_pairs
= vsi
->num_txq
;
1741 vfres
->max_vectors
= pf
->num_msix_per_vf
;
1742 vfres
->rss_key_size
= ICE_VSIQF_HKEY_ARRAY_SIZE
;
1743 vfres
->rss_lut_size
= ICE_VSIQF_HLUT_ARRAY_SIZE
;
1745 vfres
->vsi_res
[0].vsi_id
= vf
->lan_vsi_num
;
1746 vfres
->vsi_res
[0].vsi_type
= VIRTCHNL_VSI_SRIOV
;
1747 vfres
->vsi_res
[0].num_queue_pairs
= vsi
->num_txq
;
1748 ether_addr_copy(vfres
->vsi_res
[0].default_mac_addr
,
1749 vf
->dflt_lan_addr
.addr
);
1751 /* match guest capabilities */
1752 vf
->driver_caps
= vfres
->vf_cap_flags
;
1754 set_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
);
1757 /* send the response back to the VF */
1758 ret
= ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_GET_VF_RESOURCES
, v_ret
,
1766 * ice_vc_reset_vf_msg
1767 * @vf: pointer to the VF info
1769 * called from the VF to reset itself,
1770 * unlike other virtchnl messages, PF driver
1771 * doesn't send the response back to the VF
1773 static void ice_vc_reset_vf_msg(struct ice_vf
*vf
)
1775 if (test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
))
1776 ice_reset_vf(vf
, false);
1780 * ice_find_vsi_from_id
1781 * @pf: the PF structure to search for the VSI
1782 * @id: ID of the VSI it is searching for
1784 * searches for the VSI with the given ID
1786 static struct ice_vsi
*ice_find_vsi_from_id(struct ice_pf
*pf
, u16 id
)
1790 ice_for_each_vsi(pf
, i
)
1791 if (pf
->vsi
[i
] && pf
->vsi
[i
]->vsi_num
== id
)
1798 * ice_vc_isvalid_vsi_id
1799 * @vf: pointer to the VF info
1800 * @vsi_id: VF relative VSI ID
1802 * check for the valid VSI ID
1804 static bool ice_vc_isvalid_vsi_id(struct ice_vf
*vf
, u16 vsi_id
)
1806 struct ice_pf
*pf
= vf
->pf
;
1807 struct ice_vsi
*vsi
;
1809 vsi
= ice_find_vsi_from_id(pf
, vsi_id
);
1811 return (vsi
&& (vsi
->vf_id
== vf
->vf_id
));
1815 * ice_vc_isvalid_q_id
1816 * @vf: pointer to the VF info
1818 * @qid: VSI relative queue ID
1820 * check for the valid queue ID
1822 static bool ice_vc_isvalid_q_id(struct ice_vf
*vf
, u16 vsi_id
, u8 qid
)
1824 struct ice_vsi
*vsi
= ice_find_vsi_from_id(vf
->pf
, vsi_id
);
1825 /* allocated Tx and Rx queues should be always equal for VF VSI */
1826 return (vsi
&& (qid
< vsi
->alloc_txq
));
1830 * ice_vc_isvalid_ring_len
1831 * @ring_len: length of ring
1833 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
1836 static bool ice_vc_isvalid_ring_len(u16 ring_len
)
1838 return ring_len
== 0 ||
1839 (ring_len
>= ICE_MIN_NUM_DESC
&&
1840 ring_len
<= ICE_MAX_NUM_DESC
&&
1841 !(ring_len
% ICE_REQ_DESC_MULTIPLE
));
1845 * ice_vc_config_rss_key
1846 * @vf: pointer to the VF info
1847 * @msg: pointer to the msg buffer
1849 * Configure the VF's RSS key
1851 static int ice_vc_config_rss_key(struct ice_vf
*vf
, u8
*msg
)
1853 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
1854 struct virtchnl_rss_key
*vrk
=
1855 (struct virtchnl_rss_key
*)msg
;
1856 struct ice_pf
*pf
= vf
->pf
;
1857 struct ice_vsi
*vsi
;
1859 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
)) {
1860 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1864 if (!ice_vc_isvalid_vsi_id(vf
, vrk
->vsi_id
)) {
1865 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1869 if (vrk
->key_len
!= ICE_VSIQF_HKEY_ARRAY_SIZE
) {
1870 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1874 if (!test_bit(ICE_FLAG_RSS_ENA
, vf
->pf
->flags
)) {
1875 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1879 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
1881 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1885 if (ice_set_rss(vsi
, vrk
->key
, NULL
, 0))
1886 v_ret
= VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR
;
1888 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_CONFIG_RSS_KEY
, v_ret
,
1893 * ice_vc_config_rss_lut
1894 * @vf: pointer to the VF info
1895 * @msg: pointer to the msg buffer
1897 * Configure the VF's RSS LUT
1899 static int ice_vc_config_rss_lut(struct ice_vf
*vf
, u8
*msg
)
1901 struct virtchnl_rss_lut
*vrl
= (struct virtchnl_rss_lut
*)msg
;
1902 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
1903 struct ice_pf
*pf
= vf
->pf
;
1904 struct ice_vsi
*vsi
;
1906 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
)) {
1907 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1911 if (!ice_vc_isvalid_vsi_id(vf
, vrl
->vsi_id
)) {
1912 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1916 if (vrl
->lut_entries
!= ICE_VSIQF_HLUT_ARRAY_SIZE
) {
1917 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1921 if (!test_bit(ICE_FLAG_RSS_ENA
, vf
->pf
->flags
)) {
1922 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1926 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
1928 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
1932 if (ice_set_rss(vsi
, NULL
, vrl
->lut
, ICE_VSIQF_HLUT_ARRAY_SIZE
))
1933 v_ret
= VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR
;
1935 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_CONFIG_RSS_LUT
, v_ret
,
1940 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
1941 * @vf: The VF being resseting
1943 * The max poll time is about ~800ms, which is about the maximum time it takes
1944 * for a VF to be reset and/or a VF driver to be removed.
1946 static void ice_wait_on_vf_reset(struct ice_vf
*vf
)
1950 for (i
= 0; i
< ICE_MAX_VF_RESET_TRIES
; i
++) {
1951 if (test_bit(ICE_VF_STATE_INIT
, vf
->vf_states
))
1953 msleep(ICE_MAX_VF_RESET_SLEEP_MS
);
1958 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
1959 * @vf: VF to check if it's ready to be configured/queried
1961 * The purpose of this function is to make sure the VF is not in reset, not
1962 * disabled, and initialized so it can be configured and/or queried by a host
1965 static int ice_check_vf_ready_for_cfg(struct ice_vf
*vf
)
1969 ice_wait_on_vf_reset(vf
);
1971 if (ice_is_vf_disabled(vf
))
1975 if (ice_check_vf_init(pf
, vf
))
1982 * ice_set_vf_spoofchk
1983 * @netdev: network interface device structure
1984 * @vf_id: VF identifier
1985 * @ena: flag to enable or disable feature
1987 * Enable or disable VF spoof checking
1989 int ice_set_vf_spoofchk(struct net_device
*netdev
, int vf_id
, bool ena
)
1991 struct ice_netdev_priv
*np
= netdev_priv(netdev
);
1992 struct ice_pf
*pf
= np
->vsi
->back
;
1993 struct ice_vsi_ctx
*ctx
;
1994 struct ice_vsi
*vf_vsi
;
1995 enum ice_status status
;
2000 dev
= ice_pf_to_dev(pf
);
2001 if (ice_validate_vf_id(pf
, vf_id
))
2004 vf
= &pf
->vf
[vf_id
];
2005 ret
= ice_check_vf_ready_for_cfg(vf
);
2009 vf_vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2011 netdev_err(netdev
, "VSI %d for VF %d is null\n",
2012 vf
->lan_vsi_idx
, vf
->vf_id
);
2016 if (vf_vsi
->type
!= ICE_VSI_VF
) {
2017 netdev_err(netdev
, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2018 vf_vsi
->type
, vf_vsi
->vsi_num
, vf
->vf_id
);
2022 if (ena
== vf
->spoofchk
) {
2023 dev_dbg(dev
, "VF spoofchk already %s\n", ena
? "ON" : "OFF");
2027 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
2031 ctx
->info
.sec_flags
= vf_vsi
->info
.sec_flags
;
2032 ctx
->info
.valid_sections
= cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID
);
2034 ctx
->info
.sec_flags
|=
2035 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF
|
2036 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA
<<
2037 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S
);
2039 ctx
->info
.sec_flags
&=
2040 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF
|
2041 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA
<<
2042 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S
));
2045 status
= ice_update_vsi(&pf
->hw
, vf_vsi
->idx
, ctx
, NULL
);
2047 dev_err(dev
, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
2048 ena
? "en" : "dis", vf
->vf_id
, vf_vsi
->vsi_num
, status
);
2053 /* only update spoofchk state and VSI context on success */
2054 vf_vsi
->info
.sec_flags
= ctx
->info
.sec_flags
;
2063 * ice_vc_get_stats_msg
2064 * @vf: pointer to the VF info
2065 * @msg: pointer to the msg buffer
2067 * called from the VF to get VSI stats
2069 static int ice_vc_get_stats_msg(struct ice_vf
*vf
, u8
*msg
)
2071 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
2072 struct virtchnl_queue_select
*vqs
=
2073 (struct virtchnl_queue_select
*)msg
;
2074 struct ice_eth_stats stats
= { 0 };
2075 struct ice_pf
*pf
= vf
->pf
;
2076 struct ice_vsi
*vsi
;
2078 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
)) {
2079 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2083 if (!ice_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
2084 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2088 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2090 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2094 ice_update_eth_stats(vsi
);
2096 stats
= vsi
->eth_stats
;
2099 /* send the response to the VF */
2100 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_GET_STATS
, v_ret
,
2101 (u8
*)&stats
, sizeof(stats
));
2105 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2106 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2108 * Return true on successful validation, else false
2110 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select
*vqs
)
2112 if ((!vqs
->rx_queues
&& !vqs
->tx_queues
) ||
2113 vqs
->rx_queues
>= BIT(ICE_MAX_RSS_QS_PER_VF
) ||
2114 vqs
->tx_queues
>= BIT(ICE_MAX_RSS_QS_PER_VF
))
2122 * @vf: pointer to the VF info
2123 * @msg: pointer to the msg buffer
2125 * called from the VF to enable all or specific queue(s)
2127 static int ice_vc_ena_qs_msg(struct ice_vf
*vf
, u8
*msg
)
2129 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
2130 struct virtchnl_queue_select
*vqs
=
2131 (struct virtchnl_queue_select
*)msg
;
2132 struct ice_pf
*pf
= vf
->pf
;
2133 struct ice_vsi
*vsi
;
2134 unsigned long q_map
;
2137 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
)) {
2138 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2142 if (!ice_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
2143 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2147 if (!ice_vc_validate_vqs_bitmaps(vqs
)) {
2148 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2152 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2154 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2158 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2159 * Tx queue group list was configured and the context bits were
2160 * programmed using ice_vsi_cfg_txqs
2162 q_map
= vqs
->rx_queues
;
2163 for_each_set_bit(vf_q_id
, &q_map
, ICE_MAX_RSS_QS_PER_VF
) {
2164 if (!ice_vc_isvalid_q_id(vf
, vqs
->vsi_id
, vf_q_id
)) {
2165 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2169 /* Skip queue if enabled */
2170 if (test_bit(vf_q_id
, vf
->rxq_ena
))
2173 if (ice_vsi_ctrl_one_rx_ring(vsi
, true, vf_q_id
, true)) {
2174 dev_err(ice_pf_to_dev(vsi
->back
), "Failed to enable Rx ring %d on VSI %d\n",
2175 vf_q_id
, vsi
->vsi_num
);
2176 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2180 set_bit(vf_q_id
, vf
->rxq_ena
);
2183 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2184 q_map
= vqs
->tx_queues
;
2185 for_each_set_bit(vf_q_id
, &q_map
, ICE_MAX_RSS_QS_PER_VF
) {
2186 if (!ice_vc_isvalid_q_id(vf
, vqs
->vsi_id
, vf_q_id
)) {
2187 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2191 /* Skip queue if enabled */
2192 if (test_bit(vf_q_id
, vf
->txq_ena
))
2195 set_bit(vf_q_id
, vf
->txq_ena
);
2198 /* Set flag to indicate that queues are enabled */
2199 if (v_ret
== VIRTCHNL_STATUS_SUCCESS
)
2200 set_bit(ICE_VF_STATE_QS_ENA
, vf
->vf_states
);
2203 /* send the response to the VF */
2204 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_ENABLE_QUEUES
, v_ret
,
2210 * @vf: pointer to the VF info
2211 * @msg: pointer to the msg buffer
2213 * called from the VF to disable all or specific
2216 static int ice_vc_dis_qs_msg(struct ice_vf
*vf
, u8
*msg
)
2218 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
2219 struct virtchnl_queue_select
*vqs
=
2220 (struct virtchnl_queue_select
*)msg
;
2221 struct ice_pf
*pf
= vf
->pf
;
2222 struct ice_vsi
*vsi
;
2223 unsigned long q_map
;
2226 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
) &&
2227 !test_bit(ICE_VF_STATE_QS_ENA
, vf
->vf_states
)) {
2228 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2232 if (!ice_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
2233 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2237 if (!ice_vc_validate_vqs_bitmaps(vqs
)) {
2238 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2242 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2244 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2248 if (vqs
->tx_queues
) {
2249 q_map
= vqs
->tx_queues
;
2251 for_each_set_bit(vf_q_id
, &q_map
, ICE_MAX_RSS_QS_PER_VF
) {
2252 struct ice_ring
*ring
= vsi
->tx_rings
[vf_q_id
];
2253 struct ice_txq_meta txq_meta
= { 0 };
2255 if (!ice_vc_isvalid_q_id(vf
, vqs
->vsi_id
, vf_q_id
)) {
2256 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2260 /* Skip queue if not enabled */
2261 if (!test_bit(vf_q_id
, vf
->txq_ena
))
2264 ice_fill_txq_meta(vsi
, ring
, &txq_meta
);
2266 if (ice_vsi_stop_tx_ring(vsi
, ICE_NO_RESET
, vf
->vf_id
,
2268 dev_err(ice_pf_to_dev(vsi
->back
), "Failed to stop Tx ring %d on VSI %d\n",
2269 vf_q_id
, vsi
->vsi_num
);
2270 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2274 /* Clear enabled queues flag */
2275 clear_bit(vf_q_id
, vf
->txq_ena
);
2279 q_map
= vqs
->rx_queues
;
2280 /* speed up Rx queue disable by batching them if possible */
2282 bitmap_equal(&q_map
, vf
->rxq_ena
, ICE_MAX_RSS_QS_PER_VF
)) {
2283 if (ice_vsi_stop_all_rx_rings(vsi
)) {
2284 dev_err(ice_pf_to_dev(vsi
->back
), "Failed to stop all Rx rings on VSI %d\n",
2286 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2290 bitmap_zero(vf
->rxq_ena
, ICE_MAX_RSS_QS_PER_VF
);
2292 for_each_set_bit(vf_q_id
, &q_map
, ICE_MAX_RSS_QS_PER_VF
) {
2293 if (!ice_vc_isvalid_q_id(vf
, vqs
->vsi_id
, vf_q_id
)) {
2294 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2298 /* Skip queue if not enabled */
2299 if (!test_bit(vf_q_id
, vf
->rxq_ena
))
2302 if (ice_vsi_ctrl_one_rx_ring(vsi
, false, vf_q_id
,
2304 dev_err(ice_pf_to_dev(vsi
->back
), "Failed to stop Rx ring %d on VSI %d\n",
2305 vf_q_id
, vsi
->vsi_num
);
2306 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2310 /* Clear enabled queues flag */
2311 clear_bit(vf_q_id
, vf
->rxq_ena
);
2315 /* Clear enabled queues flag */
2316 if (v_ret
== VIRTCHNL_STATUS_SUCCESS
&& ice_vf_has_no_qs_ena(vf
))
2317 clear_bit(ICE_VF_STATE_QS_ENA
, vf
->vf_states
);
2320 /* send the response to the VF */
2321 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_DISABLE_QUEUES
, v_ret
,
2327 * @vf: pointer to the VF info
2328 * @vsi: the VSI being configured
2329 * @vector_id: vector ID
2330 * @map: vector map for mapping vectors to queues
2331 * @q_vector: structure for interrupt vector
2332 * configure the IRQ to queue map
2335 ice_cfg_interrupt(struct ice_vf
*vf
, struct ice_vsi
*vsi
, u16 vector_id
,
2336 struct virtchnl_vector_map
*map
,
2337 struct ice_q_vector
*q_vector
)
2339 u16 vsi_q_id
, vsi_q_id_idx
;
2342 q_vector
->num_ring_rx
= 0;
2343 q_vector
->num_ring_tx
= 0;
2345 qmap
= map
->rxq_map
;
2346 for_each_set_bit(vsi_q_id_idx
, &qmap
, ICE_MAX_RSS_QS_PER_VF
) {
2347 vsi_q_id
= vsi_q_id_idx
;
2349 if (!ice_vc_isvalid_q_id(vf
, vsi
->vsi_num
, vsi_q_id
))
2350 return VIRTCHNL_STATUS_ERR_PARAM
;
2352 q_vector
->num_ring_rx
++;
2353 q_vector
->rx
.itr_idx
= map
->rxitr_idx
;
2354 vsi
->rx_rings
[vsi_q_id
]->q_vector
= q_vector
;
2355 ice_cfg_rxq_interrupt(vsi
, vsi_q_id
, vector_id
,
2356 q_vector
->rx
.itr_idx
);
2359 qmap
= map
->txq_map
;
2360 for_each_set_bit(vsi_q_id_idx
, &qmap
, ICE_MAX_RSS_QS_PER_VF
) {
2361 vsi_q_id
= vsi_q_id_idx
;
2363 if (!ice_vc_isvalid_q_id(vf
, vsi
->vsi_num
, vsi_q_id
))
2364 return VIRTCHNL_STATUS_ERR_PARAM
;
2366 q_vector
->num_ring_tx
++;
2367 q_vector
->tx
.itr_idx
= map
->txitr_idx
;
2368 vsi
->tx_rings
[vsi_q_id
]->q_vector
= q_vector
;
2369 ice_cfg_txq_interrupt(vsi
, vsi_q_id
, vector_id
,
2370 q_vector
->tx
.itr_idx
);
2373 return VIRTCHNL_STATUS_SUCCESS
;
2377 * ice_vc_cfg_irq_map_msg
2378 * @vf: pointer to the VF info
2379 * @msg: pointer to the msg buffer
2381 * called from the VF to configure the IRQ to queue map
2383 static int ice_vc_cfg_irq_map_msg(struct ice_vf
*vf
, u8
*msg
)
2385 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
2386 u16 num_q_vectors_mapped
, vsi_id
, vector_id
;
2387 struct virtchnl_irq_map_info
*irqmap_info
;
2388 struct virtchnl_vector_map
*map
;
2389 struct ice_pf
*pf
= vf
->pf
;
2390 struct ice_vsi
*vsi
;
2393 irqmap_info
= (struct virtchnl_irq_map_info
*)msg
;
2394 num_q_vectors_mapped
= irqmap_info
->num_vectors
;
2396 /* Check to make sure number of VF vectors mapped is not greater than
2397 * number of VF vectors originally allocated, and check that
2398 * there is actually at least a single VF queue vector mapped
2400 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
) ||
2401 pf
->num_msix_per_vf
< num_q_vectors_mapped
||
2402 !num_q_vectors_mapped
) {
2403 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2407 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2409 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2413 for (i
= 0; i
< num_q_vectors_mapped
; i
++) {
2414 struct ice_q_vector
*q_vector
;
2416 map
= &irqmap_info
->vecmap
[i
];
2418 vector_id
= map
->vector_id
;
2419 vsi_id
= map
->vsi_id
;
2420 /* vector_id is always 0-based for each VF, and can never be
2421 * larger than or equal to the max allowed interrupts per VF
2423 if (!(vector_id
< pf
->num_msix_per_vf
) ||
2424 !ice_vc_isvalid_vsi_id(vf
, vsi_id
) ||
2425 (!vector_id
&& (map
->rxq_map
|| map
->txq_map
))) {
2426 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2430 /* No need to map VF miscellaneous or rogue vector */
2434 /* Subtract non queue vector from vector_id passed by VF
2435 * to get actual number of VSI queue vector array index
2437 q_vector
= vsi
->q_vectors
[vector_id
- ICE_NONQ_VECS_VF
];
2439 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2443 /* lookout for the invalid queue index */
2444 v_ret
= (enum virtchnl_status_code
)
2445 ice_cfg_interrupt(vf
, vsi
, vector_id
, map
, q_vector
);
2451 /* send the response to the VF */
2452 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_CONFIG_IRQ_MAP
, v_ret
,
2458 * @vf: pointer to the VF info
2459 * @msg: pointer to the msg buffer
2461 * called from the VF to configure the Rx/Tx queues
2463 static int ice_vc_cfg_qs_msg(struct ice_vf
*vf
, u8
*msg
)
2465 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
2466 struct virtchnl_vsi_queue_config_info
*qci
=
2467 (struct virtchnl_vsi_queue_config_info
*)msg
;
2468 struct virtchnl_queue_pair_info
*qpi
;
2469 u16 num_rxq
= 0, num_txq
= 0;
2470 struct ice_pf
*pf
= vf
->pf
;
2471 struct ice_vsi
*vsi
;
2474 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
)) {
2475 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2479 if (!ice_vc_isvalid_vsi_id(vf
, qci
->vsi_id
)) {
2480 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2484 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2486 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2490 if (qci
->num_queue_pairs
> ICE_MAX_RSS_QS_PER_VF
||
2491 qci
->num_queue_pairs
> min_t(u16
, vsi
->alloc_txq
, vsi
->alloc_rxq
)) {
2492 dev_err(ice_pf_to_dev(pf
), "VF-%d requesting more than supported number of queues: %d\n",
2493 vf
->vf_id
, min_t(u16
, vsi
->alloc_txq
, vsi
->alloc_rxq
));
2494 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2498 for (i
= 0; i
< qci
->num_queue_pairs
; i
++) {
2499 qpi
= &qci
->qpair
[i
];
2500 if (qpi
->txq
.vsi_id
!= qci
->vsi_id
||
2501 qpi
->rxq
.vsi_id
!= qci
->vsi_id
||
2502 qpi
->rxq
.queue_id
!= qpi
->txq
.queue_id
||
2503 qpi
->txq
.headwb_enabled
||
2504 !ice_vc_isvalid_ring_len(qpi
->txq
.ring_len
) ||
2505 !ice_vc_isvalid_ring_len(qpi
->rxq
.ring_len
) ||
2506 !ice_vc_isvalid_q_id(vf
, qci
->vsi_id
, qpi
->txq
.queue_id
)) {
2507 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2510 /* copy Tx queue info from VF into VSI */
2511 if (qpi
->txq
.ring_len
> 0) {
2513 vsi
->tx_rings
[i
]->dma
= qpi
->txq
.dma_ring_addr
;
2514 vsi
->tx_rings
[i
]->count
= qpi
->txq
.ring_len
;
2517 /* copy Rx queue info from VF into VSI */
2518 if (qpi
->rxq
.ring_len
> 0) {
2520 vsi
->rx_rings
[i
]->dma
= qpi
->rxq
.dma_ring_addr
;
2521 vsi
->rx_rings
[i
]->count
= qpi
->rxq
.ring_len
;
2523 if (qpi
->rxq
.databuffer_size
!= 0 &&
2524 (qpi
->rxq
.databuffer_size
> ((16 * 1024) - 128) ||
2525 qpi
->rxq
.databuffer_size
< 1024)) {
2526 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2529 vsi
->rx_buf_len
= qpi
->rxq
.databuffer_size
;
2530 vsi
->rx_rings
[i
]->rx_buf_len
= vsi
->rx_buf_len
;
2531 if (qpi
->rxq
.max_pkt_size
>= (16 * 1024) ||
2532 qpi
->rxq
.max_pkt_size
< 64) {
2533 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2538 vsi
->max_frame
= qpi
->rxq
.max_pkt_size
;
2541 /* VF can request to configure less than allocated queues
2542 * or default allocated queues. So update the VSI with new number
2544 vsi
->num_txq
= num_txq
;
2545 vsi
->num_rxq
= num_rxq
;
2546 /* All queues of VF VSI are in TC 0 */
2547 vsi
->tc_cfg
.tc_info
[0].qcount_tx
= num_txq
;
2548 vsi
->tc_cfg
.tc_info
[0].qcount_rx
= num_rxq
;
2550 if (ice_vsi_cfg_lan_txqs(vsi
) || ice_vsi_cfg_rxqs(vsi
))
2551 v_ret
= VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR
;
2554 /* send the response to the VF */
2555 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_CONFIG_VSI_QUEUES
, v_ret
,
2561 * @vf: pointer to the VF info
2563 static bool ice_is_vf_trusted(struct ice_vf
*vf
)
2565 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
2569 * ice_can_vf_change_mac
2570 * @vf: pointer to the VF info
2572 * Return true if the VF is allowed to change its MAC filters, false otherwise
2574 static bool ice_can_vf_change_mac(struct ice_vf
*vf
)
2576 /* If the VF MAC address has been set administratively (via the
2577 * ndo_set_vf_mac command), then deny permission to the VF to
2578 * add/delete unicast MAC addresses, unless the VF is trusted
2580 if (vf
->pf_set_mac
&& !ice_is_vf_trusted(vf
))
2587 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
2588 * @vf: pointer to the VF info
2589 * @vsi: pointer to the VF's VSI
2590 * @mac_addr: MAC address to add
2593 ice_vc_add_mac_addr(struct ice_vf
*vf
, struct ice_vsi
*vsi
, u8
*mac_addr
)
2595 struct device
*dev
= ice_pf_to_dev(vf
->pf
);
2596 enum ice_status status
;
2598 /* default unicast MAC already added */
2599 if (ether_addr_equal(mac_addr
, vf
->dflt_lan_addr
.addr
))
2602 if (is_unicast_ether_addr(mac_addr
) && !ice_can_vf_change_mac(vf
)) {
2603 dev_err(dev
, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2607 status
= ice_vsi_cfg_mac_fltr(vsi
, mac_addr
, true);
2608 if (status
== ICE_ERR_ALREADY_EXISTS
) {
2609 dev_err(dev
, "MAC %pM already exists for VF %d\n", mac_addr
,
2612 } else if (status
) {
2613 dev_err(dev
, "Failed to add MAC %pM for VF %d\n, error %d\n",
2614 mac_addr
, vf
->vf_id
, status
);
2618 /* only set dflt_lan_addr once */
2619 if (is_zero_ether_addr(vf
->dflt_lan_addr
.addr
) &&
2620 is_unicast_ether_addr(mac_addr
))
2621 ether_addr_copy(vf
->dflt_lan_addr
.addr
, mac_addr
);
2629 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
2630 * @vf: pointer to the VF info
2631 * @vsi: pointer to the VF's VSI
2632 * @mac_addr: MAC address to delete
2635 ice_vc_del_mac_addr(struct ice_vf
*vf
, struct ice_vsi
*vsi
, u8
*mac_addr
)
2637 struct device
*dev
= ice_pf_to_dev(vf
->pf
);
2638 enum ice_status status
;
2640 if (!ice_can_vf_change_mac(vf
) &&
2641 ether_addr_equal(mac_addr
, vf
->dflt_lan_addr
.addr
))
2644 status
= ice_vsi_cfg_mac_fltr(vsi
, mac_addr
, false);
2645 if (status
== ICE_ERR_DOES_NOT_EXIST
) {
2646 dev_err(dev
, "MAC %pM does not exist for VF %d\n", mac_addr
,
2649 } else if (status
) {
2650 dev_err(dev
, "Failed to delete MAC %pM for VF %d, error %d\n",
2651 mac_addr
, vf
->vf_id
, status
);
2655 if (ether_addr_equal(mac_addr
, vf
->dflt_lan_addr
.addr
))
2656 eth_zero_addr(vf
->dflt_lan_addr
.addr
);
2664 * ice_vc_handle_mac_addr_msg
2665 * @vf: pointer to the VF info
2666 * @msg: pointer to the msg buffer
2667 * @set: true if MAC filters are being set, false otherwise
2669 * add guest MAC address filter
2672 ice_vc_handle_mac_addr_msg(struct ice_vf
*vf
, u8
*msg
, bool set
)
2674 int (*ice_vc_cfg_mac
)
2675 (struct ice_vf
*vf
, struct ice_vsi
*vsi
, u8
*mac_addr
);
2676 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
2677 struct virtchnl_ether_addr_list
*al
=
2678 (struct virtchnl_ether_addr_list
*)msg
;
2679 struct ice_pf
*pf
= vf
->pf
;
2680 enum virtchnl_ops vc_op
;
2681 struct ice_vsi
*vsi
;
2685 vc_op
= VIRTCHNL_OP_ADD_ETH_ADDR
;
2686 ice_vc_cfg_mac
= ice_vc_add_mac_addr
;
2688 vc_op
= VIRTCHNL_OP_DEL_ETH_ADDR
;
2689 ice_vc_cfg_mac
= ice_vc_del_mac_addr
;
2692 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
) ||
2693 !ice_vc_isvalid_vsi_id(vf
, al
->vsi_id
)) {
2694 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2695 goto handle_mac_exit
;
2698 /* If this VF is not privileged, then we can't add more than a
2699 * limited number of addresses. Check to make sure that the
2700 * additions do not push us over the limit.
2702 if (set
&& !ice_is_vf_trusted(vf
) &&
2703 (vf
->num_mac
+ al
->num_elements
) > ICE_MAX_MACADDR_PER_VF
) {
2704 dev_err(ice_pf_to_dev(pf
), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
2706 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2707 goto handle_mac_exit
;
2710 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2712 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2713 goto handle_mac_exit
;
2716 for (i
= 0; i
< al
->num_elements
; i
++) {
2717 u8
*mac_addr
= al
->list
[i
].addr
;
2720 if (is_broadcast_ether_addr(mac_addr
) ||
2721 is_zero_ether_addr(mac_addr
))
2724 result
= ice_vc_cfg_mac(vf
, vsi
, mac_addr
);
2725 if (result
== -EEXIST
|| result
== -ENOENT
) {
2727 } else if (result
) {
2728 v_ret
= VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR
;
2729 goto handle_mac_exit
;
2734 /* send the response to the VF */
2735 return ice_vc_send_msg_to_vf(vf
, vc_op
, v_ret
, NULL
, 0);
2739 * ice_vc_add_mac_addr_msg
2740 * @vf: pointer to the VF info
2741 * @msg: pointer to the msg buffer
2743 * add guest MAC address filter
2745 static int ice_vc_add_mac_addr_msg(struct ice_vf
*vf
, u8
*msg
)
2747 return ice_vc_handle_mac_addr_msg(vf
, msg
, true);
2751 * ice_vc_del_mac_addr_msg
2752 * @vf: pointer to the VF info
2753 * @msg: pointer to the msg buffer
2755 * remove guest MAC address filter
2757 static int ice_vc_del_mac_addr_msg(struct ice_vf
*vf
, u8
*msg
)
2759 return ice_vc_handle_mac_addr_msg(vf
, msg
, false);
2763 * ice_vc_request_qs_msg
2764 * @vf: pointer to the VF info
2765 * @msg: pointer to the msg buffer
2767 * VFs get a default number of queues but can use this message to request a
2768 * different number. If the request is successful, PF will reset the VF and
2769 * return 0. If unsuccessful, PF will send message informing VF of number of
2770 * available queue pairs via virtchnl message response to VF.
2772 static int ice_vc_request_qs_msg(struct ice_vf
*vf
, u8
*msg
)
2774 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
2775 struct virtchnl_vf_res_request
*vfres
=
2776 (struct virtchnl_vf_res_request
*)msg
;
2777 u16 req_queues
= vfres
->num_queue_pairs
;
2778 struct ice_pf
*pf
= vf
->pf
;
2779 u16 max_allowed_vf_queues
;
2780 u16 tx_rx_queue_left
;
2784 dev
= ice_pf_to_dev(pf
);
2785 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
)) {
2786 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2790 cur_queues
= vf
->num_vf_qs
;
2791 tx_rx_queue_left
= min_t(u16
, ice_get_avail_txq_count(pf
),
2792 ice_get_avail_rxq_count(pf
));
2793 max_allowed_vf_queues
= tx_rx_queue_left
+ cur_queues
;
2795 dev_err(dev
, "VF %d tried to request 0 queues. Ignoring.\n",
2797 } else if (req_queues
> ICE_MAX_RSS_QS_PER_VF
) {
2798 dev_err(dev
, "VF %d tried to request more than %d queues.\n",
2799 vf
->vf_id
, ICE_MAX_RSS_QS_PER_VF
);
2800 vfres
->num_queue_pairs
= ICE_MAX_RSS_QS_PER_VF
;
2801 } else if (req_queues
> cur_queues
&&
2802 req_queues
- cur_queues
> tx_rx_queue_left
) {
2803 dev_warn(dev
, "VF %d requested %u more queues, but only %u left.\n",
2804 vf
->vf_id
, req_queues
- cur_queues
, tx_rx_queue_left
);
2805 vfres
->num_queue_pairs
= min_t(u16
, max_allowed_vf_queues
,
2806 ICE_MAX_RSS_QS_PER_VF
);
2808 /* request is successful, then reset VF */
2809 vf
->num_req_qs
= req_queues
;
2810 ice_vc_reset_vf(vf
);
2811 dev_info(dev
, "VF %d granted request of %u queues.\n",
2812 vf
->vf_id
, req_queues
);
2817 /* send the response to the VF */
2818 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_REQUEST_QUEUES
,
2819 v_ret
, (u8
*)vfres
, sizeof(*vfres
));
2823 * ice_set_vf_port_vlan
2824 * @netdev: network interface device structure
2825 * @vf_id: VF identifier
2826 * @vlan_id: VLAN ID being set
2827 * @qos: priority setting
2828 * @vlan_proto: VLAN protocol
2830 * program VF Port VLAN ID and/or QoS
2833 ice_set_vf_port_vlan(struct net_device
*netdev
, int vf_id
, u16 vlan_id
, u8 qos
,
2836 struct ice_pf
*pf
= ice_netdev_to_pf(netdev
);
2837 struct ice_vsi
*vsi
;
2843 dev
= ice_pf_to_dev(pf
);
2844 if (ice_validate_vf_id(pf
, vf_id
))
2847 if (vlan_id
>= VLAN_N_VID
|| qos
> 7) {
2848 dev_err(dev
, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
2849 vf_id
, vlan_id
, qos
);
2853 if (vlan_proto
!= htons(ETH_P_8021Q
)) {
2854 dev_err(dev
, "VF VLAN protocol is not supported\n");
2855 return -EPROTONOSUPPORT
;
2858 vf
= &pf
->vf
[vf_id
];
2859 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2861 ret
= ice_check_vf_ready_for_cfg(vf
);
2865 vlanprio
= vlan_id
| (qos
<< VLAN_PRIO_SHIFT
);
2867 if (vf
->port_vlan_info
== vlanprio
) {
2868 /* duplicate request, so just return success */
2869 dev_dbg(dev
, "Duplicate pvid %d request\n", vlanprio
);
2873 if (vlan_id
|| qos
) {
2874 /* remove VLAN 0 filter set by default when transitioning from
2875 * no port VLAN to a port VLAN. No change to old port VLAN on
2878 ret
= ice_vsi_kill_vlan(vsi
, 0);
2881 ret
= ice_vsi_manage_pvid(vsi
, vlanprio
, true);
2885 /* add VLAN 0 filter back when transitioning from port VLAN to
2886 * no port VLAN. No change to old port VLAN on failure.
2888 ret
= ice_vsi_add_vlan(vsi
, 0);
2891 ret
= ice_vsi_manage_pvid(vsi
, 0, false);
2897 dev_info(dev
, "Setting VLAN %d, QoS 0x%x on VF %d\n",
2898 vlan_id
, qos
, vf_id
);
2900 /* add VLAN filter for the port VLAN */
2901 ret
= ice_vsi_add_vlan(vsi
, vlan_id
);
2905 /* remove old port VLAN filter with valid VLAN ID or QoS fields */
2906 if (vf
->port_vlan_info
)
2907 ice_vsi_kill_vlan(vsi
, vf
->port_vlan_info
& VLAN_VID_MASK
);
2909 /* keep port VLAN information persistent on resets */
2910 vf
->port_vlan_info
= le16_to_cpu(vsi
->info
.pvid
);
2916 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
2917 * @caps: VF driver negotiated capabilities
2919 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
2921 static bool ice_vf_vlan_offload_ena(u32 caps
)
2923 return !!(caps
& VIRTCHNL_VF_OFFLOAD_VLAN
);
2927 * ice_vc_process_vlan_msg
2928 * @vf: pointer to the VF info
2929 * @msg: pointer to the msg buffer
2930 * @add_v: Add VLAN if true, otherwise delete VLAN
2932 * Process virtchnl op to add or remove programmed guest VLAN ID
2934 static int ice_vc_process_vlan_msg(struct ice_vf
*vf
, u8
*msg
, bool add_v
)
2936 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
2937 struct virtchnl_vlan_filter_list
*vfl
=
2938 (struct virtchnl_vlan_filter_list
*)msg
;
2939 struct ice_pf
*pf
= vf
->pf
;
2940 bool vlan_promisc
= false;
2941 struct ice_vsi
*vsi
;
2948 dev
= ice_pf_to_dev(pf
);
2949 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
)) {
2950 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2954 if (!ice_vf_vlan_offload_ena(vf
->driver_caps
)) {
2955 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2959 if (!ice_vc_isvalid_vsi_id(vf
, vfl
->vsi_id
)) {
2960 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2964 for (i
= 0; i
< vfl
->num_elements
; i
++) {
2965 if (vfl
->vlan_id
[i
] >= VLAN_N_VID
) {
2966 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2967 dev_err(dev
, "invalid VF VLAN id %d\n",
2974 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2976 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2980 if (add_v
&& !ice_is_vf_trusted(vf
) &&
2981 vsi
->num_vlan
>= ICE_MAX_VLAN_PER_VF
) {
2982 dev_info(dev
, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2984 /* There is no need to let VF know about being not trusted,
2985 * so we can just return success message here
2990 if (vsi
->info
.pvid
) {
2991 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
2995 if (test_bit(ICE_VF_STATE_UC_PROMISC
, vf
->vf_states
) ||
2996 test_bit(ICE_VF_STATE_MC_PROMISC
, vf
->vf_states
))
2997 vlan_promisc
= true;
3000 for (i
= 0; i
< vfl
->num_elements
; i
++) {
3001 u16 vid
= vfl
->vlan_id
[i
];
3003 if (!ice_is_vf_trusted(vf
) &&
3004 vsi
->num_vlan
>= ICE_MAX_VLAN_PER_VF
) {
3005 dev_info(dev
, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3007 /* There is no need to let VF know about being
3008 * not trusted, so we can just return success
3009 * message here as well.
3014 /* we add VLAN 0 by default for each VF so we can enable
3015 * Tx VLAN anti-spoof without triggering MDD events so
3016 * we don't need to add it again here
3021 status
= ice_vsi_add_vlan(vsi
, vid
);
3023 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3027 /* Enable VLAN pruning when non-zero VLAN is added */
3028 if (!vlan_promisc
&& vid
&&
3029 !ice_vsi_is_vlan_pruning_ena(vsi
)) {
3030 status
= ice_cfg_vlan_pruning(vsi
, true, false);
3032 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3033 dev_err(dev
, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
3037 } else if (vlan_promisc
) {
3038 /* Enable Ucast/Mcast VLAN promiscuous mode */
3039 promisc_m
= ICE_PROMISC_VLAN_TX
|
3040 ICE_PROMISC_VLAN_RX
;
3042 status
= ice_set_vsi_promisc(hw
, vsi
->idx
,
3045 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3046 dev_err(dev
, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
3052 /* In case of non_trusted VF, number of VLAN elements passed
3053 * to PF for removal might be greater than number of VLANs
3054 * filter programmed for that VF - So, use actual number of
3055 * VLANS added earlier with add VLAN opcode. In order to avoid
3056 * removing VLAN that doesn't exist, which result to sending
3057 * erroneous failed message back to the VF
3061 num_vf_vlan
= vsi
->num_vlan
;
3062 for (i
= 0; i
< vfl
->num_elements
&& i
< num_vf_vlan
; i
++) {
3063 u16 vid
= vfl
->vlan_id
[i
];
3065 /* we add VLAN 0 by default for each VF so we can enable
3066 * Tx VLAN anti-spoof without triggering MDD events so
3067 * we don't want a VIRTCHNL request to remove it
3072 /* Make sure ice_vsi_kill_vlan is successful before
3073 * updating VLAN information
3075 status
= ice_vsi_kill_vlan(vsi
, vid
);
3077 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3081 /* Disable VLAN pruning when only VLAN 0 is left */
3082 if (vsi
->num_vlan
== 1 &&
3083 ice_vsi_is_vlan_pruning_ena(vsi
))
3084 ice_cfg_vlan_pruning(vsi
, false, false);
3086 /* Disable Unicast/Multicast VLAN promiscuous mode */
3088 promisc_m
= ICE_PROMISC_VLAN_TX
|
3089 ICE_PROMISC_VLAN_RX
;
3091 ice_clear_vsi_promisc(hw
, vsi
->idx
,
3098 /* send the response to the VF */
3100 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_ADD_VLAN
, v_ret
,
3103 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_DEL_VLAN
, v_ret
,
3108 * ice_vc_add_vlan_msg
3109 * @vf: pointer to the VF info
3110 * @msg: pointer to the msg buffer
3112 * Add and program guest VLAN ID
3114 static int ice_vc_add_vlan_msg(struct ice_vf
*vf
, u8
*msg
)
3116 return ice_vc_process_vlan_msg(vf
, msg
, true);
3120 * ice_vc_remove_vlan_msg
3121 * @vf: pointer to the VF info
3122 * @msg: pointer to the msg buffer
3124 * remove programmed guest VLAN ID
3126 static int ice_vc_remove_vlan_msg(struct ice_vf
*vf
, u8
*msg
)
3128 return ice_vc_process_vlan_msg(vf
, msg
, false);
3132 * ice_vc_ena_vlan_stripping
3133 * @vf: pointer to the VF info
3135 * Enable VLAN header stripping for a given VF
3137 static int ice_vc_ena_vlan_stripping(struct ice_vf
*vf
)
3139 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
3140 struct ice_pf
*pf
= vf
->pf
;
3141 struct ice_vsi
*vsi
;
3143 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
)) {
3144 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3148 if (!ice_vf_vlan_offload_ena(vf
->driver_caps
)) {
3149 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3153 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3154 if (ice_vsi_manage_vlan_stripping(vsi
, true))
3155 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3158 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING
,
3163 * ice_vc_dis_vlan_stripping
3164 * @vf: pointer to the VF info
3166 * Disable VLAN header stripping for a given VF
3168 static int ice_vc_dis_vlan_stripping(struct ice_vf
*vf
)
3170 enum virtchnl_status_code v_ret
= VIRTCHNL_STATUS_SUCCESS
;
3171 struct ice_pf
*pf
= vf
->pf
;
3172 struct ice_vsi
*vsi
;
3174 if (!test_bit(ICE_VF_STATE_ACTIVE
, vf
->vf_states
)) {
3175 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3179 if (!ice_vf_vlan_offload_ena(vf
->driver_caps
)) {
3180 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3184 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3186 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3190 if (ice_vsi_manage_vlan_stripping(vsi
, false))
3191 v_ret
= VIRTCHNL_STATUS_ERR_PARAM
;
3194 return ice_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING
,
3199 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3200 * @vf: VF to enable/disable VLAN stripping for on initialization
3202 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3203 * the flag is cleared then we want to disable stripping. For example, the flag
3204 * will be cleared when port VLANs are configured by the administrator before
3205 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3208 static int ice_vf_init_vlan_stripping(struct ice_vf
*vf
)
3210 struct ice_vsi
*vsi
= vf
->pf
->vsi
[vf
->lan_vsi_idx
];
3215 /* don't modify stripping if port VLAN is configured */
3219 if (ice_vf_vlan_offload_ena(vf
->driver_caps
))
3220 return ice_vsi_manage_vlan_stripping(vsi
, true);
3222 return ice_vsi_manage_vlan_stripping(vsi
, false);
3226 * ice_vc_process_vf_msg - Process request from VF
3227 * @pf: pointer to the PF structure
3228 * @event: pointer to the AQ event
3230 * called from the common asq/arq handler to
3231 * process request from VF
3233 void ice_vc_process_vf_msg(struct ice_pf
*pf
, struct ice_rq_event_info
*event
)
3235 u32 v_opcode
= le32_to_cpu(event
->desc
.cookie_high
);
3236 s16 vf_id
= le16_to_cpu(event
->desc
.retval
);
3237 u16 msglen
= event
->msg_len
;
3238 u8
*msg
= event
->msg_buf
;
3239 struct ice_vf
*vf
= NULL
;
3243 dev
= ice_pf_to_dev(pf
);
3244 if (ice_validate_vf_id(pf
, vf_id
)) {
3249 vf
= &pf
->vf
[vf_id
];
3251 /* Check if VF is disabled. */
3252 if (test_bit(ICE_VF_STATE_DIS
, vf
->vf_states
)) {
3257 /* Perform basic checks on the msg */
3258 err
= virtchnl_vc_validate_vf_msg(&vf
->vf_ver
, v_opcode
, msg
, msglen
);
3260 if (err
== VIRTCHNL_STATUS_ERR_PARAM
)
3268 ice_vc_send_msg_to_vf(vf
, v_opcode
, VIRTCHNL_STATUS_ERR_PARAM
,
3270 dev_err(dev
, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3271 vf_id
, v_opcode
, msglen
, err
);
3276 case VIRTCHNL_OP_VERSION
:
3277 err
= ice_vc_get_ver_msg(vf
, msg
);
3279 case VIRTCHNL_OP_GET_VF_RESOURCES
:
3280 err
= ice_vc_get_vf_res_msg(vf
, msg
);
3281 if (ice_vf_init_vlan_stripping(vf
))
3282 dev_err(dev
, "Failed to initialize VLAN stripping for VF %d\n",
3284 ice_vc_notify_vf_link_state(vf
);
3286 case VIRTCHNL_OP_RESET_VF
:
3287 ice_vc_reset_vf_msg(vf
);
3289 case VIRTCHNL_OP_ADD_ETH_ADDR
:
3290 err
= ice_vc_add_mac_addr_msg(vf
, msg
);
3292 case VIRTCHNL_OP_DEL_ETH_ADDR
:
3293 err
= ice_vc_del_mac_addr_msg(vf
, msg
);
3295 case VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
3296 err
= ice_vc_cfg_qs_msg(vf
, msg
);
3298 case VIRTCHNL_OP_ENABLE_QUEUES
:
3299 err
= ice_vc_ena_qs_msg(vf
, msg
);
3300 ice_vc_notify_vf_link_state(vf
);
3302 case VIRTCHNL_OP_DISABLE_QUEUES
:
3303 err
= ice_vc_dis_qs_msg(vf
, msg
);
3305 case VIRTCHNL_OP_REQUEST_QUEUES
:
3306 err
= ice_vc_request_qs_msg(vf
, msg
);
3308 case VIRTCHNL_OP_CONFIG_IRQ_MAP
:
3309 err
= ice_vc_cfg_irq_map_msg(vf
, msg
);
3311 case VIRTCHNL_OP_CONFIG_RSS_KEY
:
3312 err
= ice_vc_config_rss_key(vf
, msg
);
3314 case VIRTCHNL_OP_CONFIG_RSS_LUT
:
3315 err
= ice_vc_config_rss_lut(vf
, msg
);
3317 case VIRTCHNL_OP_GET_STATS
:
3318 err
= ice_vc_get_stats_msg(vf
, msg
);
3320 case VIRTCHNL_OP_ADD_VLAN
:
3321 err
= ice_vc_add_vlan_msg(vf
, msg
);
3323 case VIRTCHNL_OP_DEL_VLAN
:
3324 err
= ice_vc_remove_vlan_msg(vf
, msg
);
3326 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING
:
3327 err
= ice_vc_ena_vlan_stripping(vf
);
3329 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING
:
3330 err
= ice_vc_dis_vlan_stripping(vf
);
3332 case VIRTCHNL_OP_UNKNOWN
:
3334 dev_err(dev
, "Unsupported opcode %d from VF %d\n", v_opcode
,
3336 err
= ice_vc_send_msg_to_vf(vf
, v_opcode
,
3337 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
,
3342 /* Helper function cares less about error return values here
3343 * as it is busy with pending work.
3345 dev_info(dev
, "PF failed to honor VF %d, opcode %d, error %d\n",
3346 vf_id
, v_opcode
, err
);
3352 * @netdev: network interface device structure
3353 * @vf_id: VF identifier
3354 * @ivi: VF configuration structure
3356 * return VF configuration
3359 ice_get_vf_cfg(struct net_device
*netdev
, int vf_id
, struct ifla_vf_info
*ivi
)
3361 struct ice_pf
*pf
= ice_netdev_to_pf(netdev
);
3364 if (ice_validate_vf_id(pf
, vf_id
))
3367 vf
= &pf
->vf
[vf_id
];
3369 if (ice_check_vf_init(pf
, vf
))
3373 ether_addr_copy(ivi
->mac
, vf
->dflt_lan_addr
.addr
);
3375 /* VF configuration for VLAN and applicable QoS */
3376 ivi
->vlan
= vf
->port_vlan_info
& VLAN_VID_MASK
;
3377 ivi
->qos
= (vf
->port_vlan_info
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
3379 ivi
->trusted
= vf
->trusted
;
3380 ivi
->spoofchk
= vf
->spoofchk
;
3381 if (!vf
->link_forced
)
3382 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
3383 else if (vf
->link_up
)
3384 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
3386 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
3387 ivi
->max_tx_rate
= vf
->tx_rate
;
3388 ivi
->min_tx_rate
= 0;
3394 * @netdev: network interface device structure
3395 * @vf_id: VF identifier
3398 * program VF MAC address
3400 int ice_set_vf_mac(struct net_device
*netdev
, int vf_id
, u8
*mac
)
3402 struct ice_pf
*pf
= ice_netdev_to_pf(netdev
);
3406 if (ice_validate_vf_id(pf
, vf_id
))
3409 if (is_zero_ether_addr(mac
) || is_multicast_ether_addr(mac
)) {
3410 netdev_err(netdev
, "%pM not a valid unicast address\n", mac
);
3414 vf
= &pf
->vf
[vf_id
];
3415 ret
= ice_check_vf_ready_for_cfg(vf
);
3419 /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
3420 * flow will use the updated dflt_lan_addr and add a MAC filter
3421 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
3422 * set the MAC address for this VF.
3424 ether_addr_copy(vf
->dflt_lan_addr
.addr
, mac
);
3425 vf
->pf_set_mac
= true;
3426 netdev_info(netdev
, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
3429 ice_vc_reset_vf(vf
);
3435 * @netdev: network interface device structure
3436 * @vf_id: VF identifier
3437 * @trusted: Boolean value to enable/disable trusted VF
3439 * Enable or disable a given VF as trusted
3441 int ice_set_vf_trust(struct net_device
*netdev
, int vf_id
, bool trusted
)
3443 struct ice_pf
*pf
= ice_netdev_to_pf(netdev
);
3447 if (ice_validate_vf_id(pf
, vf_id
))
3450 vf
= &pf
->vf
[vf_id
];
3451 ret
= ice_check_vf_ready_for_cfg(vf
);
3455 /* Check if already trusted */
3456 if (trusted
== vf
->trusted
)
3459 vf
->trusted
= trusted
;
3460 ice_vc_reset_vf(vf
);
3461 dev_info(ice_pf_to_dev(pf
), "VF %u is now %strusted\n",
3462 vf_id
, trusted
? "" : "un");
3468 * ice_set_vf_link_state
3469 * @netdev: network interface device structure
3470 * @vf_id: VF identifier
3471 * @link_state: required link state
3473 * Set VF's link state, irrespective of physical link state status
3475 int ice_set_vf_link_state(struct net_device
*netdev
, int vf_id
, int link_state
)
3477 struct ice_pf
*pf
= ice_netdev_to_pf(netdev
);
3481 if (ice_validate_vf_id(pf
, vf_id
))
3484 vf
= &pf
->vf
[vf_id
];
3485 ret
= ice_check_vf_ready_for_cfg(vf
);
3489 switch (link_state
) {
3490 case IFLA_VF_LINK_STATE_AUTO
:
3491 vf
->link_forced
= false;
3493 case IFLA_VF_LINK_STATE_ENABLE
:
3494 vf
->link_forced
= true;
3497 case IFLA_VF_LINK_STATE_DISABLE
:
3498 vf
->link_forced
= true;
3499 vf
->link_up
= false;
3505 ice_vc_notify_vf_link_state(vf
);
3511 * ice_get_vf_stats - populate some stats for the VF
3512 * @netdev: the netdev of the PF
3513 * @vf_id: the host OS identifier (0-255)
3514 * @vf_stats: pointer to the OS memory to be initialized
3516 int ice_get_vf_stats(struct net_device
*netdev
, int vf_id
,
3517 struct ifla_vf_stats
*vf_stats
)
3519 struct ice_pf
*pf
= ice_netdev_to_pf(netdev
);
3520 struct ice_eth_stats
*stats
;
3521 struct ice_vsi
*vsi
;
3525 if (ice_validate_vf_id(pf
, vf_id
))
3528 vf
= &pf
->vf
[vf_id
];
3529 ret
= ice_check_vf_ready_for_cfg(vf
);
3533 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3537 ice_update_eth_stats(vsi
);
3538 stats
= &vsi
->eth_stats
;
3540 memset(vf_stats
, 0, sizeof(*vf_stats
));
3542 vf_stats
->rx_packets
= stats
->rx_unicast
+ stats
->rx_broadcast
+
3543 stats
->rx_multicast
;
3544 vf_stats
->tx_packets
= stats
->tx_unicast
+ stats
->tx_broadcast
+
3545 stats
->tx_multicast
;
3546 vf_stats
->rx_bytes
= stats
->rx_bytes
;
3547 vf_stats
->tx_bytes
= stats
->tx_bytes
;
3548 vf_stats
->broadcast
= stats
->rx_broadcast
;
3549 vf_stats
->multicast
= stats
->rx_multicast
;
3550 vf_stats
->rx_dropped
= stats
->rx_discards
;
3551 vf_stats
->tx_dropped
= stats
->tx_discards
;
3557 * ice_print_vfs_mdd_event - print VFs malicious driver detect event
3558 * @pf: pointer to the PF structure
3560 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
3562 void ice_print_vfs_mdd_events(struct ice_pf
*pf
)
3564 struct device
*dev
= ice_pf_to_dev(pf
);
3565 struct ice_hw
*hw
= &pf
->hw
;
3568 /* check that there are pending MDD events to print */
3569 if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING
, pf
->state
))
3572 /* VF MDD event logs are rate limited to one second intervals */
3573 if (time_is_after_jiffies(pf
->last_printed_mdd_jiffies
+ HZ
* 1))
3576 pf
->last_printed_mdd_jiffies
= jiffies
;
3578 ice_for_each_vf(pf
, i
) {
3579 struct ice_vf
*vf
= &pf
->vf
[i
];
3581 /* only print Rx MDD event message if there are new events */
3582 if (vf
->mdd_rx_events
.count
!= vf
->mdd_rx_events
.last_printed
) {
3583 vf
->mdd_rx_events
.last_printed
=
3584 vf
->mdd_rx_events
.count
;
3586 dev_info(dev
, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
3587 vf
->mdd_rx_events
.count
, hw
->pf_id
, i
,
3588 vf
->dflt_lan_addr
.addr
,
3589 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF
, pf
->flags
)
3593 /* only print Tx MDD event message if there are new events */
3594 if (vf
->mdd_tx_events
.count
!= vf
->mdd_tx_events
.last_printed
) {
3595 vf
->mdd_tx_events
.last_printed
=
3596 vf
->mdd_tx_events
.count
;
3598 dev_info(dev
, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
3599 vf
->mdd_tx_events
.count
, hw
->pf_id
, i
,
3600 vf
->dflt_lan_addr
.addr
);