1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
5 #include "i40e_lan_hmc.h"
6 #include "i40e_virtchnl_pf.h"
8 /*********************notification routines***********************/
11 * i40e_vc_vf_broadcast
12 * @pf: pointer to the PF structure
13 * @v_opcode: operation code
14 * @v_retval: return value
15 * @msg: pointer to the msg buffer
18 * send a message to all VFs on a given PF
20 static void i40e_vc_vf_broadcast(struct i40e_pf
*pf
,
21 enum virtchnl_ops v_opcode
,
22 int v_retval
, u8
*msg
,
25 struct i40e_hw
*hw
= &pf
->hw
;
26 struct i40e_vf
*vf
= pf
->vf
;
29 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++, vf
++) {
30 int abs_vf_id
= vf
->vf_id
+ (int)hw
->func_caps
.vf_base_id
;
31 /* Not all vfs are enabled so skip the ones that are not */
32 if (!test_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
) &&
33 !test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
))
36 /* Ignore return value on purpose - a given VF may fail, but
37 * we need to keep going and send to all of them
39 i40e_aq_send_msg_to_vf(hw
, abs_vf_id
, v_opcode
, v_retval
,
45 * i40e_vc_link_speed2mbps
46 * converts i40e_aq_link_speed to integer value of Mbps
47 * @link_speed: the speed to convert
49 * return the speed as direct value of Mbps.
52 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed
)
55 case I40E_LINK_SPEED_100MB
:
57 case I40E_LINK_SPEED_1GB
:
59 case I40E_LINK_SPEED_2_5GB
:
61 case I40E_LINK_SPEED_5GB
:
63 case I40E_LINK_SPEED_10GB
:
65 case I40E_LINK_SPEED_20GB
:
67 case I40E_LINK_SPEED_25GB
:
69 case I40E_LINK_SPEED_40GB
:
71 case I40E_LINK_SPEED_UNKNOWN
:
78 * i40e_set_vf_link_state
79 * @vf: pointer to the VF structure
80 * @pfe: pointer to PF event structure
81 * @ls: pointer to link status structure
83 * set a link state on a single vf
85 static void i40e_set_vf_link_state(struct i40e_vf
*vf
,
86 struct virtchnl_pf_event
*pfe
, struct i40e_link_status
*ls
)
88 u8 link_status
= ls
->link_info
& I40E_AQ_LINK_UP
;
91 link_status
= vf
->link_up
;
93 if (vf
->driver_caps
& VIRTCHNL_VF_CAP_ADV_LINK_SPEED
) {
94 pfe
->event_data
.link_event_adv
.link_speed
= link_status
?
95 i40e_vc_link_speed2mbps(ls
->link_speed
) : 0;
96 pfe
->event_data
.link_event_adv
.link_status
= link_status
;
98 pfe
->event_data
.link_event
.link_speed
= link_status
?
99 i40e_virtchnl_link_speed(ls
->link_speed
) : 0;
100 pfe
->event_data
.link_event
.link_status
= link_status
;
105 * i40e_vc_notify_vf_link_state
106 * @vf: pointer to the VF structure
108 * send a link status message to a single VF
110 static void i40e_vc_notify_vf_link_state(struct i40e_vf
*vf
)
112 struct virtchnl_pf_event pfe
;
113 struct i40e_pf
*pf
= vf
->pf
;
114 struct i40e_hw
*hw
= &pf
->hw
;
115 struct i40e_link_status
*ls
= &pf
->hw
.phy
.link_info
;
116 int abs_vf_id
= vf
->vf_id
+ (int)hw
->func_caps
.vf_base_id
;
118 pfe
.event
= VIRTCHNL_EVENT_LINK_CHANGE
;
119 pfe
.severity
= PF_EVENT_SEVERITY_INFO
;
121 i40e_set_vf_link_state(vf
, &pfe
, ls
);
123 i40e_aq_send_msg_to_vf(hw
, abs_vf_id
, VIRTCHNL_OP_EVENT
,
124 0, (u8
*)&pfe
, sizeof(pfe
), NULL
);
128 * i40e_vc_notify_link_state
129 * @pf: pointer to the PF structure
131 * send a link status message to all VFs on a given PF
133 void i40e_vc_notify_link_state(struct i40e_pf
*pf
)
137 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++)
138 i40e_vc_notify_vf_link_state(&pf
->vf
[i
]);
142 * i40e_vc_notify_reset
143 * @pf: pointer to the PF structure
145 * indicate a pending reset to all VFs on a given PF
147 void i40e_vc_notify_reset(struct i40e_pf
*pf
)
149 struct virtchnl_pf_event pfe
;
151 pfe
.event
= VIRTCHNL_EVENT_RESET_IMPENDING
;
152 pfe
.severity
= PF_EVENT_SEVERITY_CERTAIN_DOOM
;
153 i40e_vc_vf_broadcast(pf
, VIRTCHNL_OP_EVENT
, 0,
154 (u8
*)&pfe
, sizeof(struct virtchnl_pf_event
));
157 #ifdef CONFIG_PCI_IOV
158 void i40e_restore_all_vfs_msi_state(struct pci_dev
*pdev
)
163 /* Continue only if this is a PF */
164 if (!pdev
->is_physfn
)
167 if (!pci_num_vf(pdev
))
170 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
172 struct pci_dev
*vf_dev
= NULL
;
174 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_VF_DID
, &vf_id
);
175 while ((vf_dev
= pci_get_device(pdev
->vendor
, vf_id
, vf_dev
))) {
176 if (vf_dev
->is_virtfn
&& vf_dev
->physfn
== pdev
)
177 pci_restore_msi_state(vf_dev
);
181 #endif /* CONFIG_PCI_IOV */
184 * i40e_vc_notify_vf_reset
185 * @vf: pointer to the VF structure
187 * indicate a pending reset to the given VF
189 void i40e_vc_notify_vf_reset(struct i40e_vf
*vf
)
191 struct virtchnl_pf_event pfe
;
194 /* validate the request */
195 if (!vf
|| vf
->vf_id
>= vf
->pf
->num_alloc_vfs
)
198 /* verify if the VF is in either init or active before proceeding */
199 if (!test_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
) &&
200 !test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
))
203 abs_vf_id
= vf
->vf_id
+ (int)vf
->pf
->hw
.func_caps
.vf_base_id
;
205 pfe
.event
= VIRTCHNL_EVENT_RESET_IMPENDING
;
206 pfe
.severity
= PF_EVENT_SEVERITY_CERTAIN_DOOM
;
207 i40e_aq_send_msg_to_vf(&vf
->pf
->hw
, abs_vf_id
, VIRTCHNL_OP_EVENT
,
209 sizeof(struct virtchnl_pf_event
), NULL
);
211 /***********************misc routines*****************************/
215 * @vf: pointer to the VF info
216 * @notify_vf: notify vf about reset or not
219 static void i40e_vc_reset_vf(struct i40e_vf
*vf
, bool notify_vf
)
221 struct i40e_pf
*pf
= vf
->pf
;
225 i40e_vc_notify_vf_reset(vf
);
227 /* We want to ensure that an actual reset occurs initiated after this
228 * function was called. However, we do not want to wait forever, so
229 * we'll give a reasonable time and print a message if we failed to
232 for (i
= 0; i
< 20; i
++) {
233 /* If PF is in VFs releasing state reset VF is impossible,
236 if (test_bit(__I40E_VFS_RELEASING
, pf
->state
))
238 if (i40e_reset_vf(vf
, false))
240 usleep_range(10000, 20000);
244 dev_warn(&vf
->pf
->pdev
->dev
,
245 "Failed to initiate reset for VF %d after 200 milliseconds\n",
248 dev_dbg(&vf
->pf
->pdev
->dev
,
249 "Failed to initiate reset for VF %d after 200 milliseconds\n",
254 * i40e_vc_isvalid_vsi_id
255 * @vf: pointer to the VF info
256 * @vsi_id: VF relative VSI id
258 * check for the valid VSI id
260 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf
*vf
, u16 vsi_id
)
262 struct i40e_pf
*pf
= vf
->pf
;
263 struct i40e_vsi
*vsi
= i40e_find_vsi_from_id(pf
, vsi_id
);
265 return (vsi
&& (vsi
->vf_id
== vf
->vf_id
));
269 * i40e_vc_isvalid_queue_id
270 * @vf: pointer to the VF info
272 * @qid: vsi relative queue id
274 * check for the valid queue id
276 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf
*vf
, u16 vsi_id
,
279 struct i40e_pf
*pf
= vf
->pf
;
280 struct i40e_vsi
*vsi
= i40e_find_vsi_from_id(pf
, vsi_id
);
282 return (vsi
&& (qid
< vsi
->alloc_queue_pairs
));
286 * i40e_vc_isvalid_vector_id
287 * @vf: pointer to the VF info
288 * @vector_id: VF relative vector id
290 * check for the valid vector id
292 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf
*vf
, u32 vector_id
)
294 struct i40e_pf
*pf
= vf
->pf
;
296 return vector_id
< pf
->hw
.func_caps
.num_msix_vectors_vf
;
299 /***********************vf resource mgmt routines*****************/
302 * i40e_vc_get_pf_queue_id
303 * @vf: pointer to the VF info
304 * @vsi_id: id of VSI as provided by the FW
305 * @vsi_queue_id: vsi relative queue id
307 * return PF relative queue id
309 static u16
i40e_vc_get_pf_queue_id(struct i40e_vf
*vf
, u16 vsi_id
,
312 struct i40e_pf
*pf
= vf
->pf
;
313 struct i40e_vsi
*vsi
= i40e_find_vsi_from_id(pf
, vsi_id
);
314 u16 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
319 if (le16_to_cpu(vsi
->info
.mapping_flags
) &
320 I40E_AQ_VSI_QUE_MAP_NONCONTIG
)
322 le16_to_cpu(vsi
->info
.queue_mapping
[vsi_queue_id
]);
324 pf_queue_id
= le16_to_cpu(vsi
->info
.queue_mapping
[0]) +
331 * i40e_get_real_pf_qid
332 * @vf: pointer to the VF info
334 * @queue_id: queue number
336 * wrapper function to get pf_queue_id handling ADq code as well
338 static u16
i40e_get_real_pf_qid(struct i40e_vf
*vf
, u16 vsi_id
, u16 queue_id
)
342 if (vf
->adq_enabled
) {
343 /* Although VF considers all the queues(can be 1 to 16) as its
344 * own but they may actually belong to different VSIs(up to 4).
345 * We need to find which queues belongs to which VSI.
347 for (i
= 0; i
< vf
->num_tc
; i
++) {
348 if (queue_id
< vf
->ch
[i
].num_qps
) {
349 vsi_id
= vf
->ch
[i
].vsi_id
;
352 /* find right queue id which is relative to a
355 queue_id
-= vf
->ch
[i
].num_qps
;
359 return i40e_vc_get_pf_queue_id(vf
, vsi_id
, queue_id
);
363 * i40e_config_irq_link_list
364 * @vf: pointer to the VF info
365 * @vsi_id: id of VSI as given by the FW
366 * @vecmap: irq map info
368 * configure irq link list from the map
370 static void i40e_config_irq_link_list(struct i40e_vf
*vf
, u16 vsi_id
,
371 struct virtchnl_vector_map
*vecmap
)
373 unsigned long linklistmap
= 0, tempmap
;
374 struct i40e_pf
*pf
= vf
->pf
;
375 struct i40e_hw
*hw
= &pf
->hw
;
376 u16 vsi_queue_id
, pf_queue_id
;
377 enum i40e_queue_type qtype
;
378 u16 next_q
, vector_id
, size
;
382 vector_id
= vecmap
->vector_id
;
385 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
387 reg_idx
= I40E_VPINT_LNKLSTN(
388 ((pf
->hw
.func_caps
.num_msix_vectors_vf
- 1) * vf
->vf_id
) +
391 if (vecmap
->rxq_map
== 0 && vecmap
->txq_map
== 0) {
392 /* Special case - No queues mapped on this vector */
393 wr32(hw
, reg_idx
, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK
);
396 tempmap
= vecmap
->rxq_map
;
397 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
398 linklistmap
|= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES
*
402 tempmap
= vecmap
->txq_map
;
403 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
404 linklistmap
|= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES
*
408 size
= I40E_MAX_VSI_QP
* I40E_VIRTCHNL_SUPPORTED_QTYPES
;
409 next_q
= find_first_bit(&linklistmap
, size
);
410 if (unlikely(next_q
== size
))
413 vsi_queue_id
= next_q
/ I40E_VIRTCHNL_SUPPORTED_QTYPES
;
414 qtype
= next_q
% I40E_VIRTCHNL_SUPPORTED_QTYPES
;
415 pf_queue_id
= i40e_get_real_pf_qid(vf
, vsi_id
, vsi_queue_id
);
416 reg
= ((qtype
<< I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
) | pf_queue_id
);
418 wr32(hw
, reg_idx
, reg
);
420 while (next_q
< size
) {
422 case I40E_QUEUE_TYPE_RX
:
423 reg_idx
= I40E_QINT_RQCTL(pf_queue_id
);
424 itr_idx
= vecmap
->rxitr_idx
;
426 case I40E_QUEUE_TYPE_TX
:
427 reg_idx
= I40E_QINT_TQCTL(pf_queue_id
);
428 itr_idx
= vecmap
->txitr_idx
;
434 next_q
= find_next_bit(&linklistmap
, size
, next_q
+ 1);
436 vsi_queue_id
= next_q
/ I40E_VIRTCHNL_SUPPORTED_QTYPES
;
437 qtype
= next_q
% I40E_VIRTCHNL_SUPPORTED_QTYPES
;
438 pf_queue_id
= i40e_get_real_pf_qid(vf
,
442 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
446 /* format for the RQCTL & TQCTL regs is same */
448 (qtype
<< I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
) |
449 (pf_queue_id
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
) |
450 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT
) |
451 (itr_idx
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
);
452 wr32(hw
, reg_idx
, reg
);
455 /* if the vf is running in polling mode and using interrupt zero,
456 * need to disable auto-mask on enabling zero interrupt for VFs.
458 if ((vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RX_POLLING
) &&
460 reg
= rd32(hw
, I40E_GLINT_CTL
);
461 if (!(reg
& I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK
)) {
462 reg
|= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK
;
463 wr32(hw
, I40E_GLINT_CTL
, reg
);
472 * i40e_release_rdma_qvlist
473 * @vf: pointer to the VF.
476 static void i40e_release_rdma_qvlist(struct i40e_vf
*vf
)
478 struct i40e_pf
*pf
= vf
->pf
;
479 struct virtchnl_rdma_qvlist_info
*qvlist_info
= vf
->qvlist_info
;
483 if (!vf
->qvlist_info
)
486 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
487 for (i
= 0; i
< qvlist_info
->num_vectors
; i
++) {
488 struct virtchnl_rdma_qv_info
*qv_info
;
489 u32 next_q_index
, next_q_type
;
490 struct i40e_hw
*hw
= &pf
->hw
;
491 u32 v_idx
, reg_idx
, reg
;
493 qv_info
= &qvlist_info
->qv_info
[i
];
494 v_idx
= qv_info
->v_idx
;
495 if (qv_info
->ceq_idx
!= I40E_QUEUE_INVALID_IDX
) {
496 /* Figure out the queue after CEQ and make that the
499 reg_idx
= (msix_vf
- 1) * vf
->vf_id
+ qv_info
->ceq_idx
;
500 reg
= rd32(hw
, I40E_VPINT_CEQCTL(reg_idx
));
501 next_q_index
= FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK
,
503 next_q_type
= FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK
,
506 reg_idx
= ((msix_vf
- 1) * vf
->vf_id
) + (v_idx
- 1);
507 reg
= (next_q_index
&
508 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
) |
510 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
);
512 wr32(hw
, I40E_VPINT_LNKLSTN(reg_idx
), reg
);
515 kfree(vf
->qvlist_info
);
516 vf
->qvlist_info
= NULL
;
520 * i40e_config_rdma_qvlist
521 * @vf: pointer to the VF info
522 * @qvlist_info: queue and vector list
524 * Return 0 on success or < 0 on error
527 i40e_config_rdma_qvlist(struct i40e_vf
*vf
,
528 struct virtchnl_rdma_qvlist_info
*qvlist_info
)
530 struct i40e_pf
*pf
= vf
->pf
;
531 struct i40e_hw
*hw
= &pf
->hw
;
532 struct virtchnl_rdma_qv_info
*qv_info
;
533 u32 v_idx
, i
, reg_idx
, reg
;
534 u32 next_q_idx
, next_q_type
;
539 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
541 if (qvlist_info
->num_vectors
> msix_vf
) {
542 dev_warn(&pf
->pdev
->dev
,
543 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
544 qvlist_info
->num_vectors
,
550 kfree(vf
->qvlist_info
);
551 size
= virtchnl_struct_size(vf
->qvlist_info
, qv_info
,
552 qvlist_info
->num_vectors
);
553 vf
->qvlist_info
= kzalloc(size
, GFP_KERNEL
);
554 if (!vf
->qvlist_info
) {
558 vf
->qvlist_info
->num_vectors
= qvlist_info
->num_vectors
;
560 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
561 for (i
= 0; i
< qvlist_info
->num_vectors
; i
++) {
562 qv_info
= &qvlist_info
->qv_info
[i
];
564 /* Validate vector id belongs to this vf */
565 if (!i40e_vc_isvalid_vector_id(vf
, qv_info
->v_idx
)) {
570 v_idx
= qv_info
->v_idx
;
572 vf
->qvlist_info
->qv_info
[i
] = *qv_info
;
574 reg_idx
= ((msix_vf
- 1) * vf
->vf_id
) + (v_idx
- 1);
575 /* We might be sharing the interrupt, so get the first queue
576 * index and type, push it down the list by adding the new
577 * queue on top. Also link it with the new queue in CEQCTL.
579 reg
= rd32(hw
, I40E_VPINT_LNKLSTN(reg_idx
));
580 next_q_idx
= FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
,
582 next_q_type
= FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK
,
585 if (qv_info
->ceq_idx
!= I40E_QUEUE_INVALID_IDX
) {
586 reg_idx
= (msix_vf
- 1) * vf
->vf_id
+ qv_info
->ceq_idx
;
587 reg
= (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK
|
588 (v_idx
<< I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT
) |
589 (qv_info
->itr_idx
<< I40E_VPINT_CEQCTL_ITR_INDX_SHIFT
) |
590 (next_q_type
<< I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT
) |
591 (next_q_idx
<< I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT
));
592 wr32(hw
, I40E_VPINT_CEQCTL(reg_idx
), reg
);
594 reg_idx
= ((msix_vf
- 1) * vf
->vf_id
) + (v_idx
- 1);
595 reg
= (qv_info
->ceq_idx
&
596 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
) |
597 (I40E_QUEUE_TYPE_PE_CEQ
<<
598 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
);
599 wr32(hw
, I40E_VPINT_LNKLSTN(reg_idx
), reg
);
602 if (qv_info
->aeq_idx
!= I40E_QUEUE_INVALID_IDX
) {
603 reg
= (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK
|
604 (v_idx
<< I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT
) |
605 (qv_info
->itr_idx
<< I40E_VPINT_AEQCTL_ITR_INDX_SHIFT
));
607 wr32(hw
, I40E_VPINT_AEQCTL(vf
->vf_id
), reg
);
613 kfree(vf
->qvlist_info
);
614 vf
->qvlist_info
= NULL
;
620 * i40e_config_vsi_tx_queue
621 * @vf: pointer to the VF info
622 * @vsi_id: id of VSI as provided by the FW
623 * @vsi_queue_id: vsi relative queue index
624 * @info: config. info
628 static int i40e_config_vsi_tx_queue(struct i40e_vf
*vf
, u16 vsi_id
,
630 struct virtchnl_txq_info
*info
)
632 struct i40e_pf
*pf
= vf
->pf
;
633 struct i40e_hw
*hw
= &pf
->hw
;
634 struct i40e_hmc_obj_txq tx_ctx
;
635 struct i40e_vsi
*vsi
;
640 if (!i40e_vc_isvalid_vsi_id(vf
, info
->vsi_id
)) {
644 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_id
, vsi_queue_id
);
645 vsi
= i40e_find_vsi_from_id(pf
, vsi_id
);
651 /* clear the context structure first */
652 memset(&tx_ctx
, 0, sizeof(struct i40e_hmc_obj_txq
));
654 /* only set the required fields */
655 tx_ctx
.base
= info
->dma_ring_addr
/ 128;
656 tx_ctx
.qlen
= info
->ring_len
;
657 tx_ctx
.rdylist
= le16_to_cpu(vsi
->info
.qs_handle
[0]);
658 tx_ctx
.rdylist_act
= 0;
659 tx_ctx
.head_wb_ena
= info
->headwb_enabled
;
660 tx_ctx
.head_wb_addr
= info
->dma_headwb_addr
;
662 /* clear the context in the HMC */
663 ret
= i40e_clear_lan_tx_queue_context(hw
, pf_queue_id
);
665 dev_err(&pf
->pdev
->dev
,
666 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
672 /* set the context in the HMC */
673 ret
= i40e_set_lan_tx_queue_context(hw
, pf_queue_id
, &tx_ctx
);
675 dev_err(&pf
->pdev
->dev
,
676 "Failed to set VF LAN Tx queue context %d error: %d\n",
682 /* associate this queue with the PCI VF function */
683 qtx_ctl
= I40E_QTX_CTL_VF_QUEUE
;
684 qtx_ctl
|= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK
, hw
->pf_id
);
685 qtx_ctl
|= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK
,
686 vf
->vf_id
+ hw
->func_caps
.vf_base_id
);
687 wr32(hw
, I40E_QTX_CTL(pf_queue_id
), qtx_ctl
);
695 * i40e_config_vsi_rx_queue
696 * @vf: pointer to the VF info
697 * @vsi_id: id of VSI as provided by the FW
698 * @vsi_queue_id: vsi relative queue index
699 * @info: config. info
703 static int i40e_config_vsi_rx_queue(struct i40e_vf
*vf
, u16 vsi_id
,
705 struct virtchnl_rxq_info
*info
)
707 u16 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_id
, vsi_queue_id
);
708 struct i40e_pf
*pf
= vf
->pf
;
709 struct i40e_vsi
*vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
710 struct i40e_hw
*hw
= &pf
->hw
;
711 struct i40e_hmc_obj_rxq rx_ctx
;
714 /* clear the context structure first */
715 memset(&rx_ctx
, 0, sizeof(struct i40e_hmc_obj_rxq
));
717 /* only set the required fields */
718 rx_ctx
.base
= info
->dma_ring_addr
/ 128;
719 rx_ctx
.qlen
= info
->ring_len
;
721 if (info
->splithdr_enabled
) {
722 rx_ctx
.hsplit_0
= I40E_RX_SPLIT_L2
|
724 I40E_RX_SPLIT_TCP_UDP
|
726 /* header length validation */
727 if (info
->hdr_size
> ((2 * 1024) - 64)) {
731 rx_ctx
.hbuff
= info
->hdr_size
>> I40E_RXQ_CTX_HBUFF_SHIFT
;
733 /* set split mode 10b */
734 rx_ctx
.dtype
= I40E_RX_DTYPE_HEADER_SPLIT
;
737 /* databuffer length validation */
738 if (info
->databuffer_size
> ((16 * 1024) - 128)) {
742 rx_ctx
.dbuff
= info
->databuffer_size
>> I40E_RXQ_CTX_DBUFF_SHIFT
;
744 /* max pkt. length validation */
745 if (info
->max_pkt_size
>= (16 * 1024) || info
->max_pkt_size
< 64) {
749 rx_ctx
.rxmax
= info
->max_pkt_size
;
751 /* if port VLAN is configured increase the max packet size */
753 rx_ctx
.rxmax
+= VLAN_HLEN
;
755 /* enable 32bytes desc always */
759 rx_ctx
.lrxqthresh
= 1;
764 /* clear the context in the HMC */
765 ret
= i40e_clear_lan_rx_queue_context(hw
, pf_queue_id
);
767 dev_err(&pf
->pdev
->dev
,
768 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
774 /* set the context in the HMC */
775 ret
= i40e_set_lan_rx_queue_context(hw
, pf_queue_id
, &rx_ctx
);
777 dev_err(&pf
->pdev
->dev
,
778 "Failed to set VF LAN Rx queue context %d error: %d\n",
790 * @vf: pointer to the VF info
791 * @idx: VSI index, applies only for ADq mode, zero otherwise
793 * alloc VF vsi context & resources
795 static int i40e_alloc_vsi_res(struct i40e_vf
*vf
, u8 idx
)
797 struct i40e_mac_filter
*f
= NULL
;
798 struct i40e_vsi
*main_vsi
, *vsi
;
799 struct i40e_pf
*pf
= vf
->pf
;
803 main_vsi
= i40e_pf_get_main_vsi(pf
);
804 vsi
= i40e_vsi_setup(pf
, I40E_VSI_SRIOV
, main_vsi
->seid
, vf
->vf_id
);
807 dev_err(&pf
->pdev
->dev
,
808 "add vsi failed for VF %d, aq_err %d\n",
809 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
811 goto error_alloc_vsi_res
;
815 u64 hena
= i40e_pf_get_default_rss_hena(pf
);
816 u8 broadcast
[ETH_ALEN
];
818 vf
->lan_vsi_idx
= vsi
->idx
;
819 vf
->lan_vsi_id
= vsi
->id
;
820 /* If the port VLAN has been configured and then the
821 * VF driver was removed then the VSI port VLAN
822 * configuration was destroyed. Check if there is
823 * a port VLAN and restore the VSI configuration if
826 if (vf
->port_vlan_id
)
827 i40e_vsi_add_pvid(vsi
, vf
->port_vlan_id
);
829 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
830 if (is_valid_ether_addr(vf
->default_lan_addr
.addr
)) {
831 f
= i40e_add_mac_filter(vsi
,
832 vf
->default_lan_addr
.addr
);
834 dev_info(&pf
->pdev
->dev
,
835 "Could not add MAC filter %pM for VF %d\n",
836 vf
->default_lan_addr
.addr
, vf
->vf_id
);
838 eth_broadcast_addr(broadcast
);
839 f
= i40e_add_mac_filter(vsi
, broadcast
);
841 dev_info(&pf
->pdev
->dev
,
842 "Could not allocate VF broadcast filter\n");
843 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
844 wr32(&pf
->hw
, I40E_VFQF_HENA1(0, vf
->vf_id
), (u32
)hena
);
845 wr32(&pf
->hw
, I40E_VFQF_HENA1(1, vf
->vf_id
), (u32
)(hena
>> 32));
846 /* program mac filter only for VF VSI */
847 ret
= i40e_sync_vsi_filters(vsi
);
849 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
852 /* storing VSI index and id for ADq and don't apply the mac filter */
853 if (vf
->adq_enabled
) {
854 vf
->ch
[idx
].vsi_idx
= vsi
->idx
;
855 vf
->ch
[idx
].vsi_id
= vsi
->id
;
858 /* Set VF bandwidth if specified */
860 max_tx_rate
= vf
->tx_rate
;
861 } else if (vf
->ch
[idx
].max_tx_rate
) {
862 max_tx_rate
= vf
->ch
[idx
].max_tx_rate
;
866 max_tx_rate
= div_u64(max_tx_rate
, I40E_BW_CREDIT_DIVISOR
);
867 ret
= i40e_aq_config_vsi_bw_limit(&pf
->hw
, vsi
->seid
,
868 max_tx_rate
, 0, NULL
);
870 dev_err(&pf
->pdev
->dev
, "Unable to set tx rate, VF %d, error code %d.\n",
879 * i40e_map_pf_queues_to_vsi
880 * @vf: pointer to the VF info
882 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
883 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
885 static void i40e_map_pf_queues_to_vsi(struct i40e_vf
*vf
)
887 struct i40e_pf
*pf
= vf
->pf
;
888 struct i40e_hw
*hw
= &pf
->hw
;
889 u32 reg
, num_tc
= 1; /* VF has at least one traffic class */
896 for (i
= 0; i
< num_tc
; i
++) {
897 if (vf
->adq_enabled
) {
898 qps
= vf
->ch
[i
].num_qps
;
899 vsi_id
= vf
->ch
[i
].vsi_id
;
901 qps
= pf
->vsi
[vf
->lan_vsi_idx
]->alloc_queue_pairs
;
902 vsi_id
= vf
->lan_vsi_id
;
905 for (j
= 0; j
< 7; j
++) {
910 u16 qid
= i40e_vc_get_pf_queue_id(vf
,
914 qid
= i40e_vc_get_pf_queue_id(vf
, vsi_id
,
918 i40e_write_rx_ctl(hw
,
919 I40E_VSILAN_QTABLE(j
, vsi_id
),
926 * i40e_map_pf_to_vf_queues
927 * @vf: pointer to the VF info
929 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
930 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
932 static void i40e_map_pf_to_vf_queues(struct i40e_vf
*vf
)
934 struct i40e_pf
*pf
= vf
->pf
;
935 struct i40e_hw
*hw
= &pf
->hw
;
936 u32 reg
, total_qps
= 0;
937 u32 qps
, num_tc
= 1; /* VF has at least one traffic class */
944 for (i
= 0; i
< num_tc
; i
++) {
945 if (vf
->adq_enabled
) {
946 qps
= vf
->ch
[i
].num_qps
;
947 vsi_id
= vf
->ch
[i
].vsi_id
;
949 qps
= pf
->vsi
[vf
->lan_vsi_idx
]->alloc_queue_pairs
;
950 vsi_id
= vf
->lan_vsi_id
;
953 for (j
= 0; j
< qps
; j
++) {
954 qid
= i40e_vc_get_pf_queue_id(vf
, vsi_id
, j
);
956 reg
= (qid
& I40E_VPLAN_QTABLE_QINDEX_MASK
);
957 wr32(hw
, I40E_VPLAN_QTABLE(total_qps
, vf
->vf_id
),
965 * i40e_enable_vf_mappings
966 * @vf: pointer to the VF info
970 static void i40e_enable_vf_mappings(struct i40e_vf
*vf
)
972 struct i40e_pf
*pf
= vf
->pf
;
973 struct i40e_hw
*hw
= &pf
->hw
;
976 /* Tell the hardware we're using noncontiguous mapping. HW requires
977 * that VF queues be mapped using this method, even when they are
978 * contiguous in real life
980 i40e_write_rx_ctl(hw
, I40E_VSILAN_QBASE(vf
->lan_vsi_id
),
981 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK
);
983 /* enable VF vplan_qtable mappings */
984 reg
= I40E_VPLAN_MAPENA_TXRX_ENA_MASK
;
985 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), reg
);
987 i40e_map_pf_to_vf_queues(vf
);
988 i40e_map_pf_queues_to_vsi(vf
);
994 * i40e_disable_vf_mappings
995 * @vf: pointer to the VF info
997 * disable VF mappings
999 static void i40e_disable_vf_mappings(struct i40e_vf
*vf
)
1001 struct i40e_pf
*pf
= vf
->pf
;
1002 struct i40e_hw
*hw
= &pf
->hw
;
1005 /* disable qp mappings */
1006 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), 0);
1007 for (i
= 0; i
< I40E_MAX_VSI_QP
; i
++)
1008 wr32(hw
, I40E_VPLAN_QTABLE(i
, vf
->vf_id
),
1009 I40E_QUEUE_END_OF_LIST
);
1015 * @vf: pointer to the VF info
1019 static void i40e_free_vf_res(struct i40e_vf
*vf
)
1021 struct i40e_pf
*pf
= vf
->pf
;
1022 struct i40e_hw
*hw
= &pf
->hw
;
1026 /* Start by disabling VF's configuration API to prevent the OS from
1027 * accessing the VF's VSI after it's freed / invalidated.
1029 clear_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
);
1031 /* It's possible the VF had requeuested more queues than the default so
1032 * do the accounting here when we're about to free them.
1034 if (vf
->num_queue_pairs
> I40E_DEFAULT_QUEUES_PER_VF
) {
1035 pf
->queues_left
+= vf
->num_queue_pairs
-
1036 I40E_DEFAULT_QUEUES_PER_VF
;
1039 /* free vsi & disconnect it from the parent uplink */
1040 if (vf
->lan_vsi_idx
) {
1041 i40e_vsi_release(pf
->vsi
[vf
->lan_vsi_idx
]);
1042 vf
->lan_vsi_idx
= 0;
1046 /* do the accounting and remove additional ADq VSI's */
1047 if (vf
->adq_enabled
&& vf
->ch
[0].vsi_idx
) {
1048 for (j
= 0; j
< vf
->num_tc
; j
++) {
1049 /* At this point VSI0 is already released so don't
1050 * release it again and only clear their values in
1051 * structure variables
1054 i40e_vsi_release(pf
->vsi
[vf
->ch
[j
].vsi_idx
]);
1055 vf
->ch
[j
].vsi_idx
= 0;
1056 vf
->ch
[j
].vsi_id
= 0;
1059 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
1061 /* disable interrupts so the VF starts in a known state */
1062 for (i
= 0; i
< msix_vf
; i
++) {
1063 /* format is same for both registers */
1065 reg_idx
= I40E_VFINT_DYN_CTL0(vf
->vf_id
);
1067 reg_idx
= I40E_VFINT_DYN_CTLN(((msix_vf
- 1) *
1070 wr32(hw
, reg_idx
, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK
);
1074 /* clear the irq settings */
1075 for (i
= 0; i
< msix_vf
; i
++) {
1076 /* format is same for both registers */
1078 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
1080 reg_idx
= I40E_VPINT_LNKLSTN(((msix_vf
- 1) *
1083 reg
= (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK
|
1084 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
);
1085 wr32(hw
, reg_idx
, reg
);
1088 /* reset some of the state variables keeping track of the resources */
1089 vf
->num_queue_pairs
= 0;
1090 clear_bit(I40E_VF_STATE_MC_PROMISC
, &vf
->vf_states
);
1091 clear_bit(I40E_VF_STATE_UC_PROMISC
, &vf
->vf_states
);
1096 * @vf: pointer to the VF info
1098 * allocate VF resources
1100 static int i40e_alloc_vf_res(struct i40e_vf
*vf
)
1102 struct i40e_pf
*pf
= vf
->pf
;
1103 int total_queue_pairs
= 0;
1106 if (vf
->num_req_queues
&&
1107 vf
->num_req_queues
<= pf
->queues_left
+ I40E_DEFAULT_QUEUES_PER_VF
)
1108 pf
->num_vf_qps
= vf
->num_req_queues
;
1110 pf
->num_vf_qps
= I40E_DEFAULT_QUEUES_PER_VF
;
1112 /* allocate hw vsi context & associated resources */
1113 ret
= i40e_alloc_vsi_res(vf
, 0);
1116 total_queue_pairs
+= pf
->vsi
[vf
->lan_vsi_idx
]->alloc_queue_pairs
;
1118 /* allocate additional VSIs based on tc information for ADq */
1119 if (vf
->adq_enabled
) {
1120 if (pf
->queues_left
>=
1121 (I40E_MAX_VF_QUEUES
- I40E_DEFAULT_QUEUES_PER_VF
)) {
1122 /* TC 0 always belongs to VF VSI */
1123 for (idx
= 1; idx
< vf
->num_tc
; idx
++) {
1124 ret
= i40e_alloc_vsi_res(vf
, idx
);
1128 /* send correct number of queues */
1129 total_queue_pairs
= I40E_MAX_VF_QUEUES
;
1131 dev_info(&pf
->pdev
->dev
, "VF %d: Not enough queues to allocate, disabling ADq\n",
1133 vf
->adq_enabled
= false;
1137 /* We account for each VF to get a default number of queue pairs. If
1138 * the VF has now requested more, we need to account for that to make
1139 * certain we never request more queues than we actually have left in
1142 if (total_queue_pairs
> I40E_DEFAULT_QUEUES_PER_VF
)
1144 total_queue_pairs
- I40E_DEFAULT_QUEUES_PER_VF
;
1147 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
1149 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
1151 /* store the total qps number for the runtime
1154 vf
->num_queue_pairs
= total_queue_pairs
;
1156 /* VF is now completely initialized */
1157 set_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
);
1161 i40e_free_vf_res(vf
);
1166 #define VF_DEVICE_STATUS 0xAA
1167 #define VF_TRANS_PENDING_MASK 0x20
1169 * i40e_quiesce_vf_pci
1170 * @vf: pointer to the VF structure
1172 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1173 * if the transactions never clear.
1175 static int i40e_quiesce_vf_pci(struct i40e_vf
*vf
)
1177 struct i40e_pf
*pf
= vf
->pf
;
1178 struct i40e_hw
*hw
= &pf
->hw
;
1182 vf_abs_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
1184 wr32(hw
, I40E_PF_PCI_CIAA
,
1185 VF_DEVICE_STATUS
| (vf_abs_id
<< I40E_PF_PCI_CIAA_VF_NUM_SHIFT
));
1186 for (i
= 0; i
< 100; i
++) {
1187 reg
= rd32(hw
, I40E_PF_PCI_CIAD
);
1188 if ((reg
& VF_TRANS_PENDING_MASK
) == 0)
1196 * __i40e_getnum_vf_vsi_vlan_filters
1197 * @vsi: pointer to the vsi
1199 * called to get the number of VLANs offloaded on this VF
1201 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi
*vsi
)
1203 struct i40e_mac_filter
*f
;
1204 u16 num_vlans
= 0, bkt
;
1206 hash_for_each(vsi
->mac_filter_hash
, bkt
, f
, hlist
) {
1207 if (f
->vlan
>= 0 && f
->vlan
<= I40E_MAX_VLANID
)
1215 * i40e_getnum_vf_vsi_vlan_filters
1216 * @vsi: pointer to the vsi
1218 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1220 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi
*vsi
)
1224 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1225 num_vlans
= __i40e_getnum_vf_vsi_vlan_filters(vsi
);
1226 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1232 * i40e_get_vlan_list_sync
1233 * @vsi: pointer to the VSI
1234 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1235 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1236 * This array is allocated here, but has to be freed in caller.
1238 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1240 static void i40e_get_vlan_list_sync(struct i40e_vsi
*vsi
, u16
*num_vlans
,
1243 struct i40e_mac_filter
*f
;
1247 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1248 *num_vlans
= __i40e_getnum_vf_vsi_vlan_filters(vsi
);
1249 *vlan_list
= kcalloc(*num_vlans
, sizeof(**vlan_list
), GFP_ATOMIC
);
1253 hash_for_each(vsi
->mac_filter_hash
, bkt
, f
, hlist
) {
1254 if (f
->vlan
< 0 || f
->vlan
> I40E_MAX_VLANID
)
1256 (*vlan_list
)[i
++] = f
->vlan
;
1259 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1263 * i40e_set_vsi_promisc
1264 * @vf: pointer to the VF struct
1266 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1268 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1270 * @vl: List of VLANs - apply filter for given VLANs
1271 * @num_vlans: Number of elements in @vl
1274 i40e_set_vsi_promisc(struct i40e_vf
*vf
, u16 seid
, bool multi_enable
,
1275 bool unicast_enable
, s16
*vl
, u16 num_vlans
)
1277 struct i40e_pf
*pf
= vf
->pf
;
1278 struct i40e_hw
*hw
= &pf
->hw
;
1279 int aq_ret
, aq_tmp
= 0;
1282 /* No VLAN to set promisc on, set on VSI */
1283 if (!num_vlans
|| !vl
) {
1284 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(hw
, seid
,
1288 int aq_err
= pf
->hw
.aq
.asq_last_status
;
1290 dev_err(&pf
->pdev
->dev
,
1291 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1294 i40e_aq_str(&pf
->hw
, aq_err
));
1299 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(hw
, seid
,
1304 int aq_err
= pf
->hw
.aq
.asq_last_status
;
1306 dev_err(&pf
->pdev
->dev
,
1307 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1310 i40e_aq_str(&pf
->hw
, aq_err
));
1316 for (i
= 0; i
< num_vlans
; i
++) {
1317 aq_ret
= i40e_aq_set_vsi_mc_promisc_on_vlan(hw
, seid
,
1321 int aq_err
= pf
->hw
.aq
.asq_last_status
;
1323 dev_err(&pf
->pdev
->dev
,
1324 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1327 i40e_aq_str(&pf
->hw
, aq_err
));
1333 aq_ret
= i40e_aq_set_vsi_uc_promisc_on_vlan(hw
, seid
,
1337 int aq_err
= pf
->hw
.aq
.asq_last_status
;
1339 dev_err(&pf
->pdev
->dev
,
1340 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1343 i40e_aq_str(&pf
->hw
, aq_err
));
1357 * i40e_config_vf_promiscuous_mode
1358 * @vf: pointer to the VF info
1360 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1361 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1363 * Called from the VF to configure the promiscuous mode of
1364 * VF vsis and from the VF reset path to reset promiscuous mode.
1366 static int i40e_config_vf_promiscuous_mode(struct i40e_vf
*vf
,
1371 struct i40e_pf
*pf
= vf
->pf
;
1372 struct i40e_vsi
*vsi
;
1377 vsi
= i40e_find_vsi_from_id(pf
, vsi_id
);
1378 if (!i40e_vc_isvalid_vsi_id(vf
, vsi_id
) || !vsi
)
1381 if (vf
->port_vlan_id
) {
1382 aq_ret
= i40e_set_vsi_promisc(vf
, vsi
->seid
, allmulti
,
1383 alluni
, &vf
->port_vlan_id
, 1);
1385 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi
)) {
1386 i40e_get_vlan_list_sync(vsi
, &num_vlans
, &vl
);
1391 aq_ret
= i40e_set_vsi_promisc(vf
, vsi
->seid
, allmulti
, alluni
,
1397 /* no VLANs to set on, set on VSI */
1398 aq_ret
= i40e_set_vsi_promisc(vf
, vsi
->seid
, allmulti
, alluni
,
1404 * i40e_sync_vfr_reset
1405 * @hw: pointer to hw struct
1406 * @vf_id: VF identifier
1408 * Before trigger hardware reset, we need to know if no other process has
1409 * reserved the hardware for any reset operations. This check is done by
1410 * examining the status of the RSTAT1 register used to signal the reset.
1412 static int i40e_sync_vfr_reset(struct i40e_hw
*hw
, int vf_id
)
1417 for (i
= 0; i
< I40E_VFR_WAIT_COUNT
; i
++) {
1418 reg
= rd32(hw
, I40E_VFINT_ICR0_ENA(vf_id
)) &
1419 I40E_VFINT_ICR0_ADMINQ_MASK
;
1423 usleep_range(100, 200);
1430 * i40e_trigger_vf_reset
1431 * @vf: pointer to the VF structure
1432 * @flr: VFLR was issued or not
1434 * Trigger hardware to start a reset for a particular VF. Expects the caller
1435 * to wait the proper amount of time to allow hardware to reset the VF before
1436 * it cleans up and restores VF functionality.
1438 static void i40e_trigger_vf_reset(struct i40e_vf
*vf
, bool flr
)
1440 struct i40e_pf
*pf
= vf
->pf
;
1441 struct i40e_hw
*hw
= &pf
->hw
;
1442 u32 reg
, reg_idx
, bit_idx
;
1447 vf_active
= test_and_clear_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
);
1449 /* Disable VF's configuration API during reset. The flag is re-enabled
1450 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1451 * It's normally disabled in i40e_free_vf_res(), but it's safer
1452 * to do it earlier to give some time to finish to any VF config
1453 * functions that may still be running at this point.
1455 clear_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
);
1457 /* In the case of a VFLR, the HW has already reset the VF and we
1458 * just need to clean up, so don't hit the VFRTRIG register.
1461 /* Sync VFR reset before trigger next one */
1462 radq
= rd32(hw
, I40E_VFINT_ICR0_ENA(vf
->vf_id
)) &
1463 I40E_VFINT_ICR0_ADMINQ_MASK
;
1464 if (vf_active
&& !radq
)
1465 /* waiting for finish reset by virtual driver */
1466 if (i40e_sync_vfr_reset(hw
, vf
->vf_id
))
1467 dev_info(&pf
->pdev
->dev
,
1468 "Reset VF %d never finished\n",
1471 /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1472 * in progress state in rstat1 register.
1474 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
1475 reg
|= I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
1476 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
1479 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1480 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf
->vf_id
) / 32;
1481 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf
->vf_id
) % 32;
1482 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), BIT(bit_idx
));
1485 if (i40e_quiesce_vf_pci(vf
))
1486 dev_err(&pf
->pdev
->dev
, "VF %d PCI transactions stuck\n",
1491 * i40e_cleanup_reset_vf
1492 * @vf: pointer to the VF structure
1494 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1495 * have verified whether the reset is finished properly, and ensure the
1496 * minimum amount of wait time has passed.
1498 static void i40e_cleanup_reset_vf(struct i40e_vf
*vf
)
1500 struct i40e_pf
*pf
= vf
->pf
;
1501 struct i40e_hw
*hw
= &pf
->hw
;
1504 /* disable promisc modes in case they were enabled */
1505 i40e_config_vf_promiscuous_mode(vf
, vf
->lan_vsi_id
, false, false);
1507 /* free VF resources to begin resetting the VSI state */
1508 i40e_free_vf_res(vf
);
1510 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1511 * By doing this we allow HW to access VF memory at any point. If we
1512 * did it any sooner, HW could access memory while it was being freed
1513 * in i40e_free_vf_res(), causing an IOMMU fault.
1515 * On the other hand, this needs to be done ASAP, because the VF driver
1516 * is waiting for this to happen and may report a timeout. It's
1517 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1520 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
1521 reg
&= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
1522 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
1524 /* reallocate VF resources to finish resetting the VSI state */
1525 if (!i40e_alloc_vf_res(vf
)) {
1526 int abs_vf_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
1527 i40e_enable_vf_mappings(vf
);
1528 set_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
);
1529 clear_bit(I40E_VF_STATE_DISABLED
, &vf
->vf_states
);
1530 /* Do not notify the client during VF init */
1531 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE
,
1533 i40e_notify_client_of_vf_reset(pf
, abs_vf_id
);
1537 /* Tell the VF driver the reset is done. This needs to be done only
1538 * after VF has been fully initialized, because the VF driver may
1539 * request resources immediately after setting this flag.
1541 wr32(hw
, I40E_VFGEN_RSTAT1(vf
->vf_id
), VIRTCHNL_VFR_VFACTIVE
);
1546 * @vf: pointer to the VF structure
1547 * @flr: VFLR was issued or not
1549 * Returns true if the VF is in reset, resets successfully, or resets
1550 * are disabled and false otherwise.
1552 bool i40e_reset_vf(struct i40e_vf
*vf
, bool flr
)
1554 struct i40e_pf
*pf
= vf
->pf
;
1555 struct i40e_hw
*hw
= &pf
->hw
;
1560 if (test_bit(__I40E_VF_RESETS_DISABLED
, pf
->state
))
1563 /* Bail out if VFs are disabled. */
1564 if (test_bit(__I40E_VF_DISABLE
, pf
->state
))
1567 /* If VF is being reset already we don't need to continue. */
1568 if (test_and_set_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
))
1571 i40e_trigger_vf_reset(vf
, flr
);
1573 /* poll VPGEN_VFRSTAT reg to make sure
1574 * that reset is complete
1576 for (i
= 0; i
< 10; i
++) {
1577 /* VF reset requires driver to first reset the VF and then
1578 * poll the status register to make sure that the reset
1579 * completed successfully. Due to internal HW FIFO flushes,
1580 * we must wait 10ms before the register will be valid.
1582 usleep_range(10000, 20000);
1583 reg
= rd32(hw
, I40E_VPGEN_VFRSTAT(vf
->vf_id
));
1584 if (reg
& I40E_VPGEN_VFRSTAT_VFRD_MASK
) {
1591 usleep_range(10000, 20000);
1594 dev_err(&pf
->pdev
->dev
, "VF reset check timeout on VF %d\n",
1596 usleep_range(10000, 20000);
1598 /* On initial reset, we don't have any queues to disable */
1599 if (vf
->lan_vsi_idx
!= 0)
1600 i40e_vsi_stop_rings(pf
->vsi
[vf
->lan_vsi_idx
]);
1602 i40e_cleanup_reset_vf(vf
);
1605 usleep_range(20000, 40000);
1606 clear_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
);
1612 * i40e_reset_all_vfs
1613 * @pf: pointer to the PF structure
1614 * @flr: VFLR was issued or not
1616 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1617 * VF, then do all the waiting in one chunk, and finally finish restoring each
1618 * VF after the wait. This is useful during PF routines which need to reset
1619 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1621 * Returns true if any VFs were reset, and false otherwise.
1623 bool i40e_reset_all_vfs(struct i40e_pf
*pf
, bool flr
)
1625 struct i40e_hw
*hw
= &pf
->hw
;
1630 /* If we don't have any VFs, then there is nothing to reset */
1631 if (!pf
->num_alloc_vfs
)
1634 /* If VFs have been disabled, there is no need to reset */
1635 if (test_and_set_bit(__I40E_VF_DISABLE
, pf
->state
))
1638 /* Begin reset on all VFs at once */
1639 for (vf
= &pf
->vf
[0]; vf
< &pf
->vf
[pf
->num_alloc_vfs
]; ++vf
) {
1640 /* If VF is being reset no need to trigger reset again */
1641 if (!test_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
))
1642 i40e_trigger_vf_reset(vf
, flr
);
1645 /* HW requires some time to make sure it can flush the FIFO for a VF
1646 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1647 * sequence to make sure that it has completed. We'll keep track of
1648 * the VFs using a simple iterator that increments once that VF has
1649 * finished resetting.
1651 for (i
= 0, vf
= &pf
->vf
[0]; i
< 10 && vf
< &pf
->vf
[pf
->num_alloc_vfs
]; ++i
) {
1652 usleep_range(10000, 20000);
1654 /* Check each VF in sequence, beginning with the VF to fail
1655 * the previous check.
1657 while (vf
< &pf
->vf
[pf
->num_alloc_vfs
]) {
1658 if (!test_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
)) {
1659 reg
= rd32(hw
, I40E_VPGEN_VFRSTAT(vf
->vf_id
));
1660 if (!(reg
& I40E_VPGEN_VFRSTAT_VFRD_MASK
))
1664 /* If the current VF has finished resetting, move on
1665 * to the next VF in sequence.
1672 usleep_range(10000, 20000);
1674 /* Display a warning if at least one VF didn't manage to reset in
1675 * time, but continue on with the operation.
1677 if (vf
< &pf
->vf
[pf
->num_alloc_vfs
])
1678 dev_err(&pf
->pdev
->dev
, "VF reset check timeout on VF %d\n",
1680 usleep_range(10000, 20000);
1682 /* Begin disabling all the rings associated with VFs, but do not wait
1685 for (vf
= &pf
->vf
[0]; vf
< &pf
->vf
[pf
->num_alloc_vfs
]; ++vf
) {
1686 /* On initial reset, we don't have any queues to disable */
1687 if (vf
->lan_vsi_idx
== 0)
1690 /* If VF is reset in another thread just continue */
1691 if (test_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
))
1694 i40e_vsi_stop_rings_no_wait(pf
->vsi
[vf
->lan_vsi_idx
]);
1697 /* Now that we've notified HW to disable all of the VF rings, wait
1698 * until they finish.
1700 for (vf
= &pf
->vf
[0]; vf
< &pf
->vf
[pf
->num_alloc_vfs
]; ++vf
) {
1701 /* On initial reset, we don't have any queues to disable */
1702 if (vf
->lan_vsi_idx
== 0)
1705 /* If VF is reset in another thread just continue */
1706 if (test_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
))
1709 i40e_vsi_wait_queues_disabled(pf
->vsi
[vf
->lan_vsi_idx
]);
1712 /* Hw may need up to 50ms to finish disabling the RX queues. We
1713 * minimize the wait by delaying only once for all VFs.
1717 /* Finish the reset on each VF */
1718 for (vf
= &pf
->vf
[0]; vf
< &pf
->vf
[pf
->num_alloc_vfs
]; ++vf
) {
1719 /* If VF is reset in another thread just continue */
1720 if (test_bit(I40E_VF_STATE_RESETTING
, &vf
->vf_states
))
1723 i40e_cleanup_reset_vf(vf
);
1727 usleep_range(20000, 40000);
1728 clear_bit(__I40E_VF_DISABLE
, pf
->state
);
1735 * @pf: pointer to the PF structure
1739 void i40e_free_vfs(struct i40e_pf
*pf
)
1741 struct i40e_hw
*hw
= &pf
->hw
;
1742 u32 reg_idx
, bit_idx
;
1748 set_bit(__I40E_VFS_RELEASING
, pf
->state
);
1749 while (test_and_set_bit(__I40E_VF_DISABLE
, pf
->state
))
1750 usleep_range(1000, 2000);
1752 i40e_notify_client_of_vf_enable(pf
, 0);
1754 /* Disable IOV before freeing resources. This lets any VF drivers
1755 * running in the host get themselves cleaned up before we yank
1756 * the carpet out from underneath their feet.
1758 if (!pci_vfs_assigned(pf
->pdev
))
1759 pci_disable_sriov(pf
->pdev
);
1761 dev_warn(&pf
->pdev
->dev
, "VFs are assigned - not disabling SR-IOV\n");
1763 /* Amortize wait time by stopping all VFs at the same time */
1764 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
1765 if (test_bit(I40E_VF_STATE_INIT
, &pf
->vf
[i
].vf_states
))
1768 i40e_vsi_stop_rings_no_wait(pf
->vsi
[pf
->vf
[i
].lan_vsi_idx
]);
1771 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
1772 if (test_bit(I40E_VF_STATE_INIT
, &pf
->vf
[i
].vf_states
))
1775 i40e_vsi_wait_queues_disabled(pf
->vsi
[pf
->vf
[i
].lan_vsi_idx
]);
1778 /* free up VF resources */
1779 tmp
= pf
->num_alloc_vfs
;
1780 pf
->num_alloc_vfs
= 0;
1781 for (i
= 0; i
< tmp
; i
++) {
1782 if (test_bit(I40E_VF_STATE_INIT
, &pf
->vf
[i
].vf_states
))
1783 i40e_free_vf_res(&pf
->vf
[i
]);
1784 /* disable qp mappings */
1785 i40e_disable_vf_mappings(&pf
->vf
[i
]);
1791 /* This check is for when the driver is unloaded while VFs are
1792 * assigned. Setting the number of VFs to 0 through sysfs is caught
1793 * before this function ever gets called.
1795 if (!pci_vfs_assigned(pf
->pdev
)) {
1796 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1797 * work correctly when SR-IOV gets re-enabled.
1799 for (vf_id
= 0; vf_id
< tmp
; vf_id
++) {
1800 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
1801 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
1802 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), BIT(bit_idx
));
1805 clear_bit(__I40E_VF_DISABLE
, pf
->state
);
1806 clear_bit(__I40E_VFS_RELEASING
, pf
->state
);
1809 #ifdef CONFIG_PCI_IOV
1812 * @pf: pointer to the PF structure
1813 * @num_alloc_vfs: number of VFs to allocate
1815 * allocate VF resources
1817 int i40e_alloc_vfs(struct i40e_pf
*pf
, u16 num_alloc_vfs
)
1819 struct i40e_vf
*vfs
;
1822 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1823 i40e_irq_dynamic_disable_icr0(pf
);
1825 /* Check to see if we're just allocating resources for extant VFs */
1826 if (pci_num_vf(pf
->pdev
) != num_alloc_vfs
) {
1827 ret
= pci_enable_sriov(pf
->pdev
, num_alloc_vfs
);
1829 clear_bit(I40E_FLAG_VEB_MODE_ENA
, pf
->flags
);
1830 pf
->num_alloc_vfs
= 0;
1834 /* allocate memory */
1835 vfs
= kcalloc(num_alloc_vfs
, sizeof(struct i40e_vf
), GFP_KERNEL
);
1842 /* apply default profile */
1843 for (i
= 0; i
< num_alloc_vfs
; i
++) {
1845 vfs
[i
].parent_type
= I40E_SWITCH_ELEMENT_TYPE_VEB
;
1848 /* assign default capabilities */
1849 set_bit(I40E_VIRTCHNL_VF_CAP_L2
, &vfs
[i
].vf_caps
);
1850 vfs
[i
].spoofchk
= true;
1852 set_bit(I40E_VF_STATE_PRE_ENABLE
, &vfs
[i
].vf_states
);
1855 pf
->num_alloc_vfs
= num_alloc_vfs
;
1857 /* VF resources get allocated during reset */
1858 i40e_reset_all_vfs(pf
, false);
1860 i40e_notify_client_of_vf_enable(pf
, num_alloc_vfs
);
1866 /* Re-enable interrupt 0. */
1867 i40e_irq_dynamic_enable_icr0(pf
);
1873 * i40e_pci_sriov_enable
1874 * @pdev: pointer to a pci_dev structure
1875 * @num_vfs: number of VFs to allocate
1877 * Enable or change the number of VFs
1879 static int i40e_pci_sriov_enable(struct pci_dev
*pdev
, int num_vfs
)
1881 #ifdef CONFIG_PCI_IOV
1882 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
1883 int pre_existing_vfs
= pci_num_vf(pdev
);
1886 if (test_bit(__I40E_TESTING
, pf
->state
)) {
1887 dev_warn(&pdev
->dev
,
1888 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1893 if (pre_existing_vfs
&& pre_existing_vfs
!= num_vfs
)
1895 else if (pre_existing_vfs
&& pre_existing_vfs
== num_vfs
)
1898 if (num_vfs
> pf
->num_req_vfs
) {
1899 dev_warn(&pdev
->dev
, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1900 num_vfs
, pf
->num_req_vfs
);
1905 dev_info(&pdev
->dev
, "Allocating %d VFs.\n", num_vfs
);
1906 err
= i40e_alloc_vfs(pf
, num_vfs
);
1908 dev_warn(&pdev
->dev
, "Failed to enable SR-IOV: %d\n", err
);
1922 * i40e_pci_sriov_configure
1923 * @pdev: pointer to a pci_dev structure
1924 * @num_vfs: number of VFs to allocate
1926 * Enable or change the number of VFs. Called when the user updates the number
1929 int i40e_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
1931 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
1934 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
1935 dev_warn(&pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
1940 if (!test_bit(I40E_FLAG_VEB_MODE_ENA
, pf
->flags
)) {
1941 set_bit(I40E_FLAG_VEB_MODE_ENA
, pf
->flags
);
1942 i40e_do_reset_safe(pf
, I40E_PF_RESET_AND_REBUILD_FLAG
);
1944 ret
= i40e_pci_sriov_enable(pdev
, num_vfs
);
1945 goto sriov_configure_out
;
1948 if (!pci_vfs_assigned(pf
->pdev
)) {
1950 clear_bit(I40E_FLAG_VEB_MODE_ENA
, pf
->flags
);
1951 i40e_do_reset_safe(pf
, I40E_PF_RESET_AND_REBUILD_FLAG
);
1953 dev_warn(&pdev
->dev
, "Unable to free VFs because some are assigned to VMs.\n");
1955 goto sriov_configure_out
;
1957 sriov_configure_out
:
1958 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
1962 /***********************virtual channel routines******************/
1965 * i40e_vc_send_msg_to_vf
1966 * @vf: pointer to the VF info
1967 * @v_opcode: virtual channel opcode
1968 * @v_retval: virtual channel return value
1969 * @msg: pointer to the msg buffer
1970 * @msglen: msg length
1974 static int i40e_vc_send_msg_to_vf(struct i40e_vf
*vf
, u32 v_opcode
,
1975 u32 v_retval
, u8
*msg
, u16 msglen
)
1982 /* validate the request */
1983 if (!vf
|| vf
->vf_id
>= vf
->pf
->num_alloc_vfs
)
1988 abs_vf_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
1990 aq_ret
= i40e_aq_send_msg_to_vf(hw
, abs_vf_id
, v_opcode
, v_retval
,
1993 dev_info(&pf
->pdev
->dev
,
1994 "Unable to send the message to VF %d aq_err %d\n",
1995 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
2003 * i40e_vc_send_resp_to_vf
2004 * @vf: pointer to the VF info
2005 * @opcode: operation code
2006 * @retval: return value
2008 * send resp msg to VF
2010 static int i40e_vc_send_resp_to_vf(struct i40e_vf
*vf
,
2011 enum virtchnl_ops opcode
,
2014 return i40e_vc_send_msg_to_vf(vf
, opcode
, retval
, NULL
, 0);
2018 * i40e_sync_vf_state
2019 * @vf: pointer to the VF info
2022 * Called from a VF message to synchronize the service with a potential
2025 static bool i40e_sync_vf_state(struct i40e_vf
*vf
, enum i40e_vf_states state
)
2029 /* When handling some messages, it needs VF state to be set.
2030 * It is possible that this flag is cleared during VF reset,
2031 * so there is a need to wait until the end of the reset to
2032 * handle the request message correctly.
2034 for (i
= 0; i
< I40E_VF_STATE_WAIT_COUNT
; i
++) {
2035 if (test_bit(state
, &vf
->vf_states
))
2037 usleep_range(10000, 20000);
2040 return test_bit(state
, &vf
->vf_states
);
2044 * i40e_vc_get_version_msg
2045 * @vf: pointer to the VF info
2046 * @msg: pointer to the msg buffer
2048 * called from the VF to request the API version used by the PF
2050 static int i40e_vc_get_version_msg(struct i40e_vf
*vf
, u8
*msg
)
2052 struct virtchnl_version_info info
= {
2053 VIRTCHNL_VERSION_MAJOR
, VIRTCHNL_VERSION_MINOR
2056 vf
->vf_ver
= *(struct virtchnl_version_info
*)msg
;
2057 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2058 if (VF_IS_V10(&vf
->vf_ver
))
2059 info
.minor
= VIRTCHNL_VERSION_MINOR_NO_VF_CAPS
;
2060 return i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_VERSION
,
2062 sizeof(struct virtchnl_version_info
));
2066 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2067 * @vf: pointer to VF structure
2069 static void i40e_del_qch(struct i40e_vf
*vf
)
2071 struct i40e_pf
*pf
= vf
->pf
;
2074 /* first element in the array belongs to primary VF VSI and we shouldn't
2075 * delete it. We should however delete the rest of the VSIs created
2077 for (i
= 1; i
< vf
->num_tc
; i
++) {
2078 if (vf
->ch
[i
].vsi_idx
) {
2079 i40e_vsi_release(pf
->vsi
[vf
->ch
[i
].vsi_idx
]);
2080 vf
->ch
[i
].vsi_idx
= 0;
2081 vf
->ch
[i
].vsi_id
= 0;
2087 * i40e_vc_get_max_frame_size
2088 * @vf: pointer to the VF
2090 * Max frame size is determined based on the current port's max frame size and
2091 * whether a port VLAN is configured on this VF. The VF is not aware whether
2092 * it's in a port VLAN so the PF needs to account for this in max frame size
2093 * checks and sending the max frame size to the VF.
2095 static u16
i40e_vc_get_max_frame_size(struct i40e_vf
*vf
)
2097 u16 max_frame_size
= vf
->pf
->hw
.phy
.link_info
.max_frame_size
;
2099 if (vf
->port_vlan_id
)
2100 max_frame_size
-= VLAN_HLEN
;
2102 return max_frame_size
;
2106 * i40e_vc_get_vf_resources_msg
2107 * @vf: pointer to the VF info
2108 * @msg: pointer to the msg buffer
2110 * called from the VF to request its resources
2112 static int i40e_vc_get_vf_resources_msg(struct i40e_vf
*vf
, u8
*msg
)
2114 struct virtchnl_vf_resource
*vfres
= NULL
;
2115 struct i40e_pf
*pf
= vf
->pf
;
2116 struct i40e_vsi
*vsi
;
2122 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_INIT
)) {
2127 len
= virtchnl_struct_size(vfres
, vsi_res
, num_vsis
);
2128 vfres
= kzalloc(len
, GFP_KERNEL
);
2134 if (VF_IS_V11(&vf
->vf_ver
))
2135 vf
->driver_caps
= *(u32
*)msg
;
2137 vf
->driver_caps
= VIRTCHNL_VF_OFFLOAD_L2
|
2138 VIRTCHNL_VF_OFFLOAD_RSS_REG
|
2139 VIRTCHNL_VF_OFFLOAD_VLAN
;
2141 vfres
->vf_cap_flags
= VIRTCHNL_VF_OFFLOAD_L2
;
2142 vfres
->vf_cap_flags
|= VIRTCHNL_VF_CAP_ADV_LINK_SPEED
;
2143 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2144 if (!vsi
->info
.pvid
)
2145 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_VLAN
;
2147 if (i40e_vf_client_capable(pf
, vf
->vf_id
) &&
2148 (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RDMA
)) {
2149 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RDMA
;
2150 set_bit(I40E_VF_STATE_RDMAENA
, &vf
->vf_states
);
2152 clear_bit(I40E_VF_STATE_RDMAENA
, &vf
->vf_states
);
2155 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RSS_PF
) {
2156 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RSS_PF
;
2158 if (test_bit(I40E_HW_CAP_RSS_AQ
, pf
->hw
.caps
) &&
2159 (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RSS_AQ
))
2160 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RSS_AQ
;
2162 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RSS_REG
;
2165 if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE
, pf
->hw
.caps
)) {
2166 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
)
2167 vfres
->vf_cap_flags
|=
2168 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
;
2171 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_ENCAP
)
2172 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_ENCAP
;
2174 if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM
, pf
->hw
.caps
) &&
2175 (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM
))
2176 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM
;
2178 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_RX_POLLING
) {
2179 if (test_bit(I40E_FLAG_MFP_ENA
, pf
->flags
)) {
2180 dev_err(&pf
->pdev
->dev
,
2181 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2186 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_RX_POLLING
;
2189 if (test_bit(I40E_HW_CAP_WB_ON_ITR
, pf
->hw
.caps
)) {
2190 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
)
2191 vfres
->vf_cap_flags
|=
2192 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
;
2195 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_REQ_QUEUES
)
2196 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES
;
2198 if (vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_ADQ
)
2199 vfres
->vf_cap_flags
|= VIRTCHNL_VF_OFFLOAD_ADQ
;
2201 vfres
->num_vsis
= num_vsis
;
2202 vfres
->num_queue_pairs
= vf
->num_queue_pairs
;
2203 vfres
->max_vectors
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
2204 vfres
->rss_key_size
= I40E_HKEY_ARRAY_SIZE
;
2205 vfres
->rss_lut_size
= I40E_VF_HLUT_ARRAY_SIZE
;
2206 vfres
->max_mtu
= i40e_vc_get_max_frame_size(vf
);
2208 if (vf
->lan_vsi_idx
) {
2209 vfres
->vsi_res
[0].vsi_id
= vf
->lan_vsi_id
;
2210 vfres
->vsi_res
[0].vsi_type
= VIRTCHNL_VSI_SRIOV
;
2211 vfres
->vsi_res
[0].num_queue_pairs
= vsi
->alloc_queue_pairs
;
2212 /* VFs only use TC 0 */
2213 vfres
->vsi_res
[0].qset_handle
2214 = le16_to_cpu(vsi
->info
.qs_handle
[0]);
2215 if (!(vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_USO
) && !vf
->pf_set_mac
) {
2216 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2217 i40e_del_mac_filter(vsi
, vf
->default_lan_addr
.addr
);
2218 eth_zero_addr(vf
->default_lan_addr
.addr
);
2219 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2221 ether_addr_copy(vfres
->vsi_res
[0].default_mac_addr
,
2222 vf
->default_lan_addr
.addr
);
2224 set_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
);
2227 /* send the response back to the VF */
2228 ret
= i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_GET_VF_RESOURCES
,
2229 aq_ret
, (u8
*)vfres
, len
);
2236 * i40e_vc_config_promiscuous_mode_msg
2237 * @vf: pointer to the VF info
2238 * @msg: pointer to the msg buffer
2240 * called from the VF to configure the promiscuous mode of
2243 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf
*vf
, u8
*msg
)
2245 struct virtchnl_promisc_info
*info
=
2246 (struct virtchnl_promisc_info
*)msg
;
2247 struct i40e_pf
*pf
= vf
->pf
;
2248 bool allmulti
= false;
2249 bool alluni
= false;
2252 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
2256 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
)) {
2257 dev_err(&pf
->pdev
->dev
,
2258 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2261 /* Lie to the VF on purpose, because this is an error we can
2262 * ignore. Unprivileged VF is not a virtual channel error.
2268 if (info
->flags
> I40E_MAX_VF_PROMISC_FLAGS
) {
2273 if (!i40e_vc_isvalid_vsi_id(vf
, info
->vsi_id
)) {
2278 /* Multicast promiscuous handling*/
2279 if (info
->flags
& FLAG_VF_MULTICAST_PROMISC
)
2282 if (info
->flags
& FLAG_VF_UNICAST_PROMISC
)
2284 aq_ret
= i40e_config_vf_promiscuous_mode(vf
, info
->vsi_id
, allmulti
,
2290 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC
,
2292 dev_info(&pf
->pdev
->dev
,
2293 "VF %d successfully set multicast promiscuous mode\n",
2295 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC
,
2297 dev_info(&pf
->pdev
->dev
,
2298 "VF %d successfully unset multicast promiscuous mode\n",
2302 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC
,
2304 dev_info(&pf
->pdev
->dev
,
2305 "VF %d successfully set unicast promiscuous mode\n",
2307 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC
,
2309 dev_info(&pf
->pdev
->dev
,
2310 "VF %d successfully unset unicast promiscuous mode\n",
2314 /* send the response to the VF */
2315 return i40e_vc_send_resp_to_vf(vf
,
2316 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
,
2321 * i40e_vc_config_queues_msg
2322 * @vf: pointer to the VF info
2323 * @msg: pointer to the msg buffer
2325 * called from the VF to configure the rx/tx
2328 static int i40e_vc_config_queues_msg(struct i40e_vf
*vf
, u8
*msg
)
2330 struct virtchnl_vsi_queue_config_info
*qci
=
2331 (struct virtchnl_vsi_queue_config_info
*)msg
;
2332 struct virtchnl_queue_pair_info
*qpi
;
2333 u16 vsi_id
, vsi_queue_id
= 0;
2334 struct i40e_pf
*pf
= vf
->pf
;
2335 int i
, j
= 0, idx
= 0;
2336 struct i40e_vsi
*vsi
;
2337 u16 num_qps_all
= 0;
2340 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
2345 if (!i40e_vc_isvalid_vsi_id(vf
, qci
->vsi_id
)) {
2350 if (qci
->num_queue_pairs
> I40E_MAX_VF_QUEUES
) {
2355 if (vf
->adq_enabled
) {
2356 for (i
= 0; i
< vf
->num_tc
; i
++)
2357 num_qps_all
+= vf
->ch
[i
].num_qps
;
2358 if (num_qps_all
!= qci
->num_queue_pairs
) {
2364 vsi_id
= qci
->vsi_id
;
2366 for (i
= 0; i
< qci
->num_queue_pairs
; i
++) {
2367 qpi
= &qci
->qpair
[i
];
2369 if (!vf
->adq_enabled
) {
2370 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
,
2371 qpi
->txq
.queue_id
)) {
2376 vsi_queue_id
= qpi
->txq
.queue_id
;
2378 if (qpi
->txq
.vsi_id
!= qci
->vsi_id
||
2379 qpi
->rxq
.vsi_id
!= qci
->vsi_id
||
2380 qpi
->rxq
.queue_id
!= vsi_queue_id
) {
2386 if (vf
->adq_enabled
) {
2387 if (idx
>= ARRAY_SIZE(vf
->ch
)) {
2391 vsi_id
= vf
->ch
[idx
].vsi_id
;
2394 if (i40e_config_vsi_rx_queue(vf
, vsi_id
, vsi_queue_id
,
2396 i40e_config_vsi_tx_queue(vf
, vsi_id
, vsi_queue_id
,
2402 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2403 * VF does not know about these additional VSIs and all
2404 * it cares is about its own queues. PF configures these queues
2405 * to its appropriate VSIs based on TC mapping
2407 if (vf
->adq_enabled
) {
2408 if (idx
>= ARRAY_SIZE(vf
->ch
)) {
2412 if (j
== (vf
->ch
[idx
].num_qps
- 1)) {
2414 j
= 0; /* resetting the queue count */
2422 /* set vsi num_queue_pairs in use to num configured by VF */
2423 if (!vf
->adq_enabled
) {
2424 pf
->vsi
[vf
->lan_vsi_idx
]->num_queue_pairs
=
2425 qci
->num_queue_pairs
;
2427 for (i
= 0; i
< vf
->num_tc
; i
++) {
2428 vsi
= pf
->vsi
[vf
->ch
[i
].vsi_idx
];
2429 vsi
->num_queue_pairs
= vf
->ch
[i
].num_qps
;
2431 if (i40e_update_adq_vsi_queues(vsi
, i
)) {
2439 /* send the response to the VF */
2440 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_CONFIG_VSI_QUEUES
,
2445 * i40e_validate_queue_map - check queue map is valid
2446 * @vf: the VF structure pointer
2448 * @queuemap: Tx or Rx queue map
2450 * check if Tx or Rx queue map is valid
2452 static int i40e_validate_queue_map(struct i40e_vf
*vf
, u16 vsi_id
,
2453 unsigned long queuemap
)
2455 u16 vsi_queue_id
, queue_id
;
2457 for_each_set_bit(vsi_queue_id
, &queuemap
, I40E_MAX_VSI_QP
) {
2458 if (vf
->adq_enabled
) {
2459 vsi_id
= vf
->ch
[vsi_queue_id
/ I40E_MAX_VF_VSI
].vsi_id
;
2460 queue_id
= (vsi_queue_id
% I40E_DEFAULT_QUEUES_PER_VF
);
2462 queue_id
= vsi_queue_id
;
2465 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
, queue_id
))
2473 * i40e_vc_config_irq_map_msg
2474 * @vf: pointer to the VF info
2475 * @msg: pointer to the msg buffer
2477 * called from the VF to configure the irq to
2480 static int i40e_vc_config_irq_map_msg(struct i40e_vf
*vf
, u8
*msg
)
2482 struct virtchnl_irq_map_info
*irqmap_info
=
2483 (struct virtchnl_irq_map_info
*)msg
;
2484 struct virtchnl_vector_map
*map
;
2489 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
2494 if (irqmap_info
->num_vectors
>
2495 vf
->pf
->hw
.func_caps
.num_msix_vectors_vf
) {
2500 for (i
= 0; i
< irqmap_info
->num_vectors
; i
++) {
2501 map
= &irqmap_info
->vecmap
[i
];
2502 /* validate msg params */
2503 if (!i40e_vc_isvalid_vector_id(vf
, map
->vector_id
) ||
2504 !i40e_vc_isvalid_vsi_id(vf
, map
->vsi_id
)) {
2508 vsi_id
= map
->vsi_id
;
2510 if (i40e_validate_queue_map(vf
, vsi_id
, map
->rxq_map
)) {
2515 if (i40e_validate_queue_map(vf
, vsi_id
, map
->txq_map
)) {
2520 i40e_config_irq_link_list(vf
, vsi_id
, map
);
2523 /* send the response to the VF */
2524 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_CONFIG_IRQ_MAP
,
2529 * i40e_ctrl_vf_tx_rings
2530 * @vsi: the SRIOV VSI being configured
2531 * @q_map: bit map of the queues to be enabled
2532 * @enable: start or stop the queue
2534 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi
*vsi
, unsigned long q_map
,
2537 struct i40e_pf
*pf
= vsi
->back
;
2541 for_each_set_bit(q_id
, &q_map
, I40E_MAX_VF_QUEUES
) {
2542 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
2543 vsi
->base_queue
+ q_id
,
2544 false /*is xdp*/, enable
);
2552 * i40e_ctrl_vf_rx_rings
2553 * @vsi: the SRIOV VSI being configured
2554 * @q_map: bit map of the queues to be enabled
2555 * @enable: start or stop the queue
2557 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi
*vsi
, unsigned long q_map
,
2560 struct i40e_pf
*pf
= vsi
->back
;
2564 for_each_set_bit(q_id
, &q_map
, I40E_MAX_VF_QUEUES
) {
2565 ret
= i40e_control_wait_rx_q(pf
, vsi
->base_queue
+ q_id
,
2574 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2575 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2577 * Returns true if validation was successful, else false.
2579 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select
*vqs
)
2581 if ((!vqs
->rx_queues
&& !vqs
->tx_queues
) ||
2582 vqs
->rx_queues
>= BIT(I40E_MAX_VF_QUEUES
) ||
2583 vqs
->tx_queues
>= BIT(I40E_MAX_VF_QUEUES
))
2590 * i40e_vc_enable_queues_msg
2591 * @vf: pointer to the VF info
2592 * @msg: pointer to the msg buffer
2594 * called from the VF to enable all or specific queue(s)
2596 static int i40e_vc_enable_queues_msg(struct i40e_vf
*vf
, u8
*msg
)
2598 struct virtchnl_queue_select
*vqs
=
2599 (struct virtchnl_queue_select
*)msg
;
2600 struct i40e_pf
*pf
= vf
->pf
;
2604 if (vf
->is_disabled_from_host
) {
2606 dev_info(&pf
->pdev
->dev
,
2607 "Admin has disabled VF %d, will not enable queues\n",
2612 if (!test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
)) {
2617 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
2622 if (!i40e_vc_validate_vqs_bitmaps(vqs
)) {
2627 /* Use the queue bit map sent by the VF */
2628 if (i40e_ctrl_vf_rx_rings(pf
->vsi
[vf
->lan_vsi_idx
], vqs
->rx_queues
,
2633 if (i40e_ctrl_vf_tx_rings(pf
->vsi
[vf
->lan_vsi_idx
], vqs
->tx_queues
,
2639 /* need to start the rings for additional ADq VSI's as well */
2640 if (vf
->adq_enabled
) {
2641 /* zero belongs to LAN VSI */
2642 for (i
= 1; i
< vf
->num_tc
; i
++) {
2643 if (i40e_vsi_start_rings(pf
->vsi
[vf
->ch
[i
].vsi_idx
]))
2649 /* send the response to the VF */
2650 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_ENABLE_QUEUES
,
2655 * i40e_vc_disable_queues_msg
2656 * @vf: pointer to the VF info
2657 * @msg: pointer to the msg buffer
2659 * called from the VF to disable all or specific
2662 static int i40e_vc_disable_queues_msg(struct i40e_vf
*vf
, u8
*msg
)
2664 struct virtchnl_queue_select
*vqs
=
2665 (struct virtchnl_queue_select
*)msg
;
2666 struct i40e_pf
*pf
= vf
->pf
;
2669 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
2674 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
2679 if (!i40e_vc_validate_vqs_bitmaps(vqs
)) {
2684 /* Use the queue bit map sent by the VF */
2685 if (i40e_ctrl_vf_tx_rings(pf
->vsi
[vf
->lan_vsi_idx
], vqs
->tx_queues
,
2690 if (i40e_ctrl_vf_rx_rings(pf
->vsi
[vf
->lan_vsi_idx
], vqs
->rx_queues
,
2696 /* send the response to the VF */
2697 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DISABLE_QUEUES
,
2702 * i40e_check_enough_queue - find big enough queue number
2703 * @vf: pointer to the VF info
2704 * @needed: the number of items needed
2706 * Returns the base item index of the queue, or negative for error
2708 static int i40e_check_enough_queue(struct i40e_vf
*vf
, u16 needed
)
2710 unsigned int i
, cur_queues
, more
, pool_size
;
2711 struct i40e_lump_tracking
*pile
;
2712 struct i40e_pf
*pf
= vf
->pf
;
2713 struct i40e_vsi
*vsi
;
2715 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2716 cur_queues
= vsi
->alloc_queue_pairs
;
2718 /* if current allocated queues are enough for need */
2719 if (cur_queues
>= needed
)
2720 return vsi
->base_queue
;
2723 if (cur_queues
> 0) {
2724 /* if the allocated queues are not zero
2725 * just check if there are enough queues for more
2726 * behind the allocated queues.
2728 more
= needed
- cur_queues
;
2729 for (i
= vsi
->base_queue
+ cur_queues
;
2730 i
< pile
->num_entries
; i
++) {
2731 if (pile
->list
[i
] & I40E_PILE_VALID_BIT
)
2735 /* there is enough */
2736 return vsi
->base_queue
;
2741 for (i
= 0; i
< pile
->num_entries
; i
++) {
2742 if (pile
->list
[i
] & I40E_PILE_VALID_BIT
) {
2746 if (needed
<= ++pool_size
)
2747 /* there is enough */
2755 * i40e_vc_request_queues_msg
2756 * @vf: pointer to the VF info
2757 * @msg: pointer to the msg buffer
2759 * VFs get a default number of queues but can use this message to request a
2760 * different number. If the request is successful, PF will reset the VF and
2761 * return 0. If unsuccessful, PF will send message informing VF of number of
2762 * available queues and return result of sending VF a message.
2764 static int i40e_vc_request_queues_msg(struct i40e_vf
*vf
, u8
*msg
)
2766 struct virtchnl_vf_res_request
*vfres
=
2767 (struct virtchnl_vf_res_request
*)msg
;
2768 u16 req_pairs
= vfres
->num_queue_pairs
;
2769 u8 cur_pairs
= vf
->num_queue_pairs
;
2770 struct i40e_pf
*pf
= vf
->pf
;
2772 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
))
2775 if (req_pairs
> I40E_MAX_VF_QUEUES
) {
2776 dev_err(&pf
->pdev
->dev
,
2777 "VF %d tried to request more than %d queues.\n",
2779 I40E_MAX_VF_QUEUES
);
2780 vfres
->num_queue_pairs
= I40E_MAX_VF_QUEUES
;
2781 } else if (req_pairs
- cur_pairs
> pf
->queues_left
) {
2782 dev_warn(&pf
->pdev
->dev
,
2783 "VF %d requested %d more queues, but only %d left.\n",
2785 req_pairs
- cur_pairs
,
2787 vfres
->num_queue_pairs
= pf
->queues_left
+ cur_pairs
;
2788 } else if (i40e_check_enough_queue(vf
, req_pairs
) < 0) {
2789 dev_warn(&pf
->pdev
->dev
,
2790 "VF %d requested %d more queues, but there is not enough for it.\n",
2792 req_pairs
- cur_pairs
);
2793 vfres
->num_queue_pairs
= cur_pairs
;
2795 /* successful request */
2796 vf
->num_req_queues
= req_pairs
;
2797 i40e_vc_reset_vf(vf
, true);
2801 return i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_REQUEST_QUEUES
, 0,
2802 (u8
*)vfres
, sizeof(*vfres
));
2806 * i40e_vc_get_stats_msg
2807 * @vf: pointer to the VF info
2808 * @msg: pointer to the msg buffer
2810 * called from the VF to get vsi stats
2812 static int i40e_vc_get_stats_msg(struct i40e_vf
*vf
, u8
*msg
)
2814 struct virtchnl_queue_select
*vqs
=
2815 (struct virtchnl_queue_select
*)msg
;
2816 struct i40e_pf
*pf
= vf
->pf
;
2817 struct i40e_eth_stats stats
;
2819 struct i40e_vsi
*vsi
;
2821 memset(&stats
, 0, sizeof(struct i40e_eth_stats
));
2823 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
2828 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
2833 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2838 i40e_update_eth_stats(vsi
);
2839 stats
= vsi
->eth_stats
;
2842 /* send the response back to the VF */
2843 return i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_GET_STATS
, aq_ret
,
2844 (u8
*)&stats
, sizeof(stats
));
2848 * i40e_can_vf_change_mac
2849 * @vf: pointer to the VF info
2851 * Return true if the VF is allowed to change its MAC filters, false otherwise
2853 static bool i40e_can_vf_change_mac(struct i40e_vf
*vf
)
2855 /* If the VF MAC address has been set administratively (via the
2856 * ndo_set_vf_mac command), then deny permission to the VF to
2857 * add/delete unicast MAC addresses, unless the VF is trusted
2859 if (vf
->pf_set_mac
&& !vf
->trusted
)
2865 #define I40E_MAX_MACVLAN_PER_HW 3072
2866 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
2868 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2869 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2871 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2872 #define I40E_VC_MAX_VLAN_PER_VF 16
2874 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
2875 ({ typeof(vf_num) vf_num_ = (vf_num); \
2876 typeof(num_ports) num_ports_ = (num_ports); \
2877 ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
2878 I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
2879 I40E_VC_MAX_MAC_ADDR_PER_VF; })
2881 * i40e_check_vf_permission
2882 * @vf: pointer to the VF info
2883 * @al: MAC address list from virtchnl
2885 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2886 * if any address in the list is not valid. Checks the following conditions:
2888 * 1) broadcast and zero addresses are never valid
2889 * 2) unicast addresses are not allowed if the VMM has administratively set
2890 * the VF MAC address, unless the VF is marked as privileged.
2891 * 3) There is enough space to add all the addresses.
2893 * Note that to guarantee consistency, it is expected this function be called
2894 * while holding the mac_filter_hash_lock, as otherwise the current number of
2895 * addresses might not be accurate.
2897 static inline int i40e_check_vf_permission(struct i40e_vf
*vf
,
2898 struct virtchnl_ether_addr_list
*al
)
2900 struct i40e_pf
*pf
= vf
->pf
;
2901 struct i40e_vsi
*vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
2902 struct i40e_hw
*hw
= &pf
->hw
;
2903 int mac2add_cnt
= 0;
2906 for (i
= 0; i
< al
->num_elements
; i
++) {
2907 struct i40e_mac_filter
*f
;
2908 u8
*addr
= al
->list
[i
].addr
;
2910 if (is_broadcast_ether_addr(addr
) ||
2911 is_zero_ether_addr(addr
)) {
2912 dev_err(&pf
->pdev
->dev
, "invalid VF MAC addr %pM\n",
2917 /* If the host VMM administrator has set the VF MAC address
2918 * administratively via the ndo_set_vf_mac command then deny
2919 * permission to the VF to add or delete unicast MAC addresses.
2920 * Unless the VF is privileged and then it can do whatever.
2921 * The VF may request to set the MAC address filter already
2922 * assigned to it so do not return an error in that case.
2924 if (!i40e_can_vf_change_mac(vf
) &&
2925 !is_multicast_ether_addr(addr
) &&
2926 !ether_addr_equal(addr
, vf
->default_lan_addr
.addr
)) {
2927 dev_err(&pf
->pdev
->dev
,
2928 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2932 /*count filters that really will be added*/
2933 f
= i40e_find_mac(vsi
, addr
);
2938 /* If this VF is not privileged, then we can't add more than a limited
2939 * number of addresses. Check to make sure that the additions do not
2940 * push us over the limit.
2942 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
)) {
2943 if ((i40e_count_filters(vsi
) + mac2add_cnt
) >
2944 I40E_VC_MAX_MAC_ADDR_PER_VF
) {
2945 dev_err(&pf
->pdev
->dev
,
2946 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2949 /* If this VF is trusted, it can use more resources than untrusted.
2950 * However to ensure that every trusted VF has appropriate number of
2951 * resources, divide whole pool of resources per port and then across
2955 if ((i40e_count_filters(vsi
) + mac2add_cnt
) >
2956 I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf
->num_alloc_vfs
,
2958 dev_err(&pf
->pdev
->dev
,
2959 "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2967 * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
2968 * @vc_ether_addr: used to extract the type
2971 i40e_vc_ether_addr_type(struct virtchnl_ether_addr
*vc_ether_addr
)
2973 return vc_ether_addr
->type
& VIRTCHNL_ETHER_ADDR_TYPE_MASK
;
2977 * i40e_is_vc_addr_legacy
2978 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2980 * check if the MAC address is from an older VF
2983 i40e_is_vc_addr_legacy(struct virtchnl_ether_addr
*vc_ether_addr
)
2985 return i40e_vc_ether_addr_type(vc_ether_addr
) ==
2986 VIRTCHNL_ETHER_ADDR_LEGACY
;
2990 * i40e_is_vc_addr_primary
2991 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2993 * check if the MAC address is the VF's primary MAC
2994 * This function should only be called when the MAC address in
2995 * virtchnl_ether_addr is a valid unicast MAC
2998 i40e_is_vc_addr_primary(struct virtchnl_ether_addr
*vc_ether_addr
)
3000 return i40e_vc_ether_addr_type(vc_ether_addr
) ==
3001 VIRTCHNL_ETHER_ADDR_PRIMARY
;
3005 * i40e_update_vf_mac_addr
3007 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
3009 * update the VF's cached hardware MAC if allowed
3012 i40e_update_vf_mac_addr(struct i40e_vf
*vf
,
3013 struct virtchnl_ether_addr
*vc_ether_addr
)
3015 u8
*mac_addr
= vc_ether_addr
->addr
;
3017 if (!is_valid_ether_addr(mac_addr
))
3020 /* If request to add MAC filter is a primary request update its default
3021 * MAC address with the requested one. If it is a legacy request then
3022 * check if current default is empty if so update the default MAC
3024 if (i40e_is_vc_addr_primary(vc_ether_addr
)) {
3025 ether_addr_copy(vf
->default_lan_addr
.addr
, mac_addr
);
3026 } else if (i40e_is_vc_addr_legacy(vc_ether_addr
)) {
3027 if (is_zero_ether_addr(vf
->default_lan_addr
.addr
))
3028 ether_addr_copy(vf
->default_lan_addr
.addr
, mac_addr
);
3033 * i40e_vc_add_mac_addr_msg
3034 * @vf: pointer to the VF info
3035 * @msg: pointer to the msg buffer
3037 * add guest mac address filter
3039 static int i40e_vc_add_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
)
3041 struct virtchnl_ether_addr_list
*al
=
3042 (struct virtchnl_ether_addr_list
*)msg
;
3043 struct i40e_pf
*pf
= vf
->pf
;
3044 struct i40e_vsi
*vsi
= NULL
;
3048 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
) ||
3049 !i40e_vc_isvalid_vsi_id(vf
, al
->vsi_id
)) {
3054 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3056 /* Lock once, because all function inside for loop accesses VSI's
3057 * MAC filter list which needs to be protected using same lock.
3059 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
3061 ret
= i40e_check_vf_permission(vf
, al
);
3063 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3067 /* add new addresses to the list */
3068 for (i
= 0; i
< al
->num_elements
; i
++) {
3069 struct i40e_mac_filter
*f
;
3071 f
= i40e_find_mac(vsi
, al
->list
[i
].addr
);
3073 f
= i40e_add_mac_filter(vsi
, al
->list
[i
].addr
);
3076 dev_err(&pf
->pdev
->dev
,
3077 "Unable to add MAC filter %pM for VF %d\n",
3078 al
->list
[i
].addr
, vf
->vf_id
);
3080 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3084 i40e_update_vf_mac_addr(vf
, &al
->list
[i
]);
3086 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3088 /* program the updated filter list */
3089 ret
= i40e_sync_vsi_filters(vsi
);
3091 dev_err(&pf
->pdev
->dev
, "Unable to program VF %d MAC filters, error %d\n",
3095 /* send the response to the VF */
3096 return i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_ADD_ETH_ADDR
,
3101 * i40e_vc_del_mac_addr_msg
3102 * @vf: pointer to the VF info
3103 * @msg: pointer to the msg buffer
3105 * remove guest mac address filter
3107 static int i40e_vc_del_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
)
3109 struct virtchnl_ether_addr_list
*al
=
3110 (struct virtchnl_ether_addr_list
*)msg
;
3111 bool was_unimac_deleted
= false;
3112 struct i40e_pf
*pf
= vf
->pf
;
3113 struct i40e_vsi
*vsi
= NULL
;
3117 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
) ||
3118 !i40e_vc_isvalid_vsi_id(vf
, al
->vsi_id
)) {
3123 for (i
= 0; i
< al
->num_elements
; i
++) {
3124 if (is_broadcast_ether_addr(al
->list
[i
].addr
) ||
3125 is_zero_ether_addr(al
->list
[i
].addr
)) {
3126 dev_err(&pf
->pdev
->dev
, "Invalid MAC addr %pM for VF %d\n",
3127 al
->list
[i
].addr
, vf
->vf_id
);
3132 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3134 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
3135 /* delete addresses from the list */
3136 for (i
= 0; i
< al
->num_elements
; i
++) {
3137 const u8
*addr
= al
->list
[i
].addr
;
3139 /* Allow to delete VF primary MAC only if it was not set
3140 * administratively by PF or if VF is trusted.
3142 if (ether_addr_equal(addr
, vf
->default_lan_addr
.addr
)) {
3143 if (i40e_can_vf_change_mac(vf
))
3144 was_unimac_deleted
= true;
3149 if (i40e_del_mac_filter(vsi
, al
->list
[i
].addr
)) {
3151 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3156 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3158 if (was_unimac_deleted
)
3159 eth_zero_addr(vf
->default_lan_addr
.addr
);
3161 /* program the updated filter list */
3162 ret
= i40e_sync_vsi_filters(vsi
);
3164 dev_err(&pf
->pdev
->dev
, "Unable to program VF %d MAC filters, error %d\n",
3167 if (vf
->trusted
&& was_unimac_deleted
) {
3168 struct i40e_mac_filter
*f
;
3169 struct hlist_node
*h
;
3173 /* set last unicast mac address as default */
3174 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
3175 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
3176 if (is_valid_ether_addr(f
->macaddr
))
3177 macaddr
= f
->macaddr
;
3180 ether_addr_copy(vf
->default_lan_addr
.addr
, macaddr
);
3181 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
3184 /* send the response to the VF */
3185 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DEL_ETH_ADDR
, ret
);
3189 * i40e_vc_add_vlan_msg
3190 * @vf: pointer to the VF info
3191 * @msg: pointer to the msg buffer
3193 * program guest vlan id
3195 static int i40e_vc_add_vlan_msg(struct i40e_vf
*vf
, u8
*msg
)
3197 struct virtchnl_vlan_filter_list
*vfl
=
3198 (struct virtchnl_vlan_filter_list
*)msg
;
3199 struct i40e_pf
*pf
= vf
->pf
;
3200 struct i40e_vsi
*vsi
= NULL
;
3204 if ((vf
->num_vlan
>= I40E_VC_MAX_VLAN_PER_VF
) &&
3205 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
)) {
3206 dev_err(&pf
->pdev
->dev
,
3207 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3210 if (!test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
) ||
3211 !i40e_vc_isvalid_vsi_id(vf
, vfl
->vsi_id
)) {
3216 for (i
= 0; i
< vfl
->num_elements
; i
++) {
3217 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
3219 dev_err(&pf
->pdev
->dev
,
3220 "invalid VF VLAN id %d\n", vfl
->vlan_id
[i
]);
3224 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3225 if (vsi
->info
.pvid
) {
3230 i40e_vlan_stripping_enable(vsi
);
3231 for (i
= 0; i
< vfl
->num_elements
; i
++) {
3232 /* add new VLAN filter */
3233 int ret
= i40e_vsi_add_vlan(vsi
, vfl
->vlan_id
[i
]);
3237 if (test_bit(I40E_VF_STATE_UC_PROMISC
, &vf
->vf_states
))
3238 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf
->hw
, vsi
->seid
,
3242 if (test_bit(I40E_VF_STATE_MC_PROMISC
, &vf
->vf_states
))
3243 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf
->hw
, vsi
->seid
,
3249 dev_err(&pf
->pdev
->dev
,
3250 "Unable to add VLAN filter %d for VF %d, error %d\n",
3251 vfl
->vlan_id
[i
], vf
->vf_id
, ret
);
3255 /* send the response to the VF */
3256 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_ADD_VLAN
, aq_ret
);
3260 * i40e_vc_remove_vlan_msg
3261 * @vf: pointer to the VF info
3262 * @msg: pointer to the msg buffer
3264 * remove programmed guest vlan id
3266 static int i40e_vc_remove_vlan_msg(struct i40e_vf
*vf
, u8
*msg
)
3268 struct virtchnl_vlan_filter_list
*vfl
=
3269 (struct virtchnl_vlan_filter_list
*)msg
;
3270 struct i40e_pf
*pf
= vf
->pf
;
3271 struct i40e_vsi
*vsi
= NULL
;
3275 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
) ||
3276 !i40e_vc_isvalid_vsi_id(vf
, vfl
->vsi_id
)) {
3281 for (i
= 0; i
< vfl
->num_elements
; i
++) {
3282 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
3288 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3289 if (vsi
->info
.pvid
) {
3290 if (vfl
->num_elements
> 1 || vfl
->vlan_id
[0])
3295 for (i
= 0; i
< vfl
->num_elements
; i
++) {
3296 i40e_vsi_kill_vlan(vsi
, vfl
->vlan_id
[i
]);
3299 if (test_bit(I40E_VF_STATE_UC_PROMISC
, &vf
->vf_states
))
3300 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf
->hw
, vsi
->seid
,
3304 if (test_bit(I40E_VF_STATE_MC_PROMISC
, &vf
->vf_states
))
3305 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf
->hw
, vsi
->seid
,
3312 /* send the response to the VF */
3313 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DEL_VLAN
, aq_ret
);
3318 * @vf: pointer to the VF info
3319 * @msg: pointer to the msg buffer
3320 * @msglen: msg length
3322 * called from the VF for the iwarp msgs
3324 static int i40e_vc_rdma_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
3326 struct i40e_pf
*pf
= vf
->pf
;
3327 struct i40e_vsi
*main_vsi
;
3331 if (!test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
) ||
3332 !test_bit(I40E_VF_STATE_RDMAENA
, &vf
->vf_states
)) {
3337 main_vsi
= i40e_pf_get_main_vsi(pf
);
3338 abs_vf_id
= vf
->vf_id
+ pf
->hw
.func_caps
.vf_base_id
;
3339 i40e_notify_client_of_vf_msg(main_vsi
, abs_vf_id
, msg
, msglen
);
3342 /* send the response to the VF */
3343 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_RDMA
,
3348 * i40e_vc_rdma_qvmap_msg
3349 * @vf: pointer to the VF info
3350 * @msg: pointer to the msg buffer
3351 * @config: config qvmap or release it
3353 * called from the VF for the iwarp msgs
3355 static int i40e_vc_rdma_qvmap_msg(struct i40e_vf
*vf
, u8
*msg
, bool config
)
3357 struct virtchnl_rdma_qvlist_info
*qvlist_info
=
3358 (struct virtchnl_rdma_qvlist_info
*)msg
;
3361 if (!test_bit(I40E_VF_STATE_ACTIVE
, &vf
->vf_states
) ||
3362 !test_bit(I40E_VF_STATE_RDMAENA
, &vf
->vf_states
)) {
3368 if (i40e_config_rdma_qvlist(vf
, qvlist_info
))
3371 i40e_release_rdma_qvlist(vf
);
3375 /* send the response to the VF */
3376 return i40e_vc_send_resp_to_vf(vf
,
3377 config
? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
:
3378 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP
,
3383 * i40e_vc_config_rss_key
3384 * @vf: pointer to the VF info
3385 * @msg: pointer to the msg buffer
3387 * Configure the VF's RSS key
3389 static int i40e_vc_config_rss_key(struct i40e_vf
*vf
, u8
*msg
)
3391 struct virtchnl_rss_key
*vrk
=
3392 (struct virtchnl_rss_key
*)msg
;
3393 struct i40e_pf
*pf
= vf
->pf
;
3394 struct i40e_vsi
*vsi
= NULL
;
3397 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
) ||
3398 !i40e_vc_isvalid_vsi_id(vf
, vrk
->vsi_id
) ||
3399 vrk
->key_len
!= I40E_HKEY_ARRAY_SIZE
) {
3404 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3405 aq_ret
= i40e_config_rss(vsi
, vrk
->key
, NULL
, 0);
3407 /* send the response to the VF */
3408 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_CONFIG_RSS_KEY
,
3413 * i40e_vc_config_rss_lut
3414 * @vf: pointer to the VF info
3415 * @msg: pointer to the msg buffer
3417 * Configure the VF's RSS LUT
3419 static int i40e_vc_config_rss_lut(struct i40e_vf
*vf
, u8
*msg
)
3421 struct virtchnl_rss_lut
*vrl
=
3422 (struct virtchnl_rss_lut
*)msg
;
3423 struct i40e_pf
*pf
= vf
->pf
;
3424 struct i40e_vsi
*vsi
= NULL
;
3428 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
) ||
3429 !i40e_vc_isvalid_vsi_id(vf
, vrl
->vsi_id
) ||
3430 vrl
->lut_entries
!= I40E_VF_HLUT_ARRAY_SIZE
) {
3435 for (i
= 0; i
< vrl
->lut_entries
; i
++)
3436 if (vrl
->lut
[i
] >= vf
->num_queue_pairs
) {
3441 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3442 aq_ret
= i40e_config_rss(vsi
, NULL
, vrl
->lut
, I40E_VF_HLUT_ARRAY_SIZE
);
3443 /* send the response to the VF */
3445 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_CONFIG_RSS_LUT
,
3450 * i40e_vc_get_rss_hena
3451 * @vf: pointer to the VF info
3452 * @msg: pointer to the msg buffer
3454 * Return the RSS HENA bits allowed by the hardware
3456 static int i40e_vc_get_rss_hena(struct i40e_vf
*vf
, u8
*msg
)
3458 struct virtchnl_rss_hena
*vrh
= NULL
;
3459 struct i40e_pf
*pf
= vf
->pf
;
3463 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3467 len
= sizeof(struct virtchnl_rss_hena
);
3469 vrh
= kzalloc(len
, GFP_KERNEL
);
3475 vrh
->hena
= i40e_pf_get_default_rss_hena(pf
);
3477 /* send the response back to the VF */
3478 aq_ret
= i40e_vc_send_msg_to_vf(vf
, VIRTCHNL_OP_GET_RSS_HENA_CAPS
,
3479 aq_ret
, (u8
*)vrh
, len
);
3485 * i40e_vc_set_rss_hena
3486 * @vf: pointer to the VF info
3487 * @msg: pointer to the msg buffer
3489 * Set the RSS HENA bits for the VF
3491 static int i40e_vc_set_rss_hena(struct i40e_vf
*vf
, u8
*msg
)
3493 struct virtchnl_rss_hena
*vrh
=
3494 (struct virtchnl_rss_hena
*)msg
;
3495 struct i40e_pf
*pf
= vf
->pf
;
3496 struct i40e_hw
*hw
= &pf
->hw
;
3499 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3503 i40e_write_rx_ctl(hw
, I40E_VFQF_HENA1(0, vf
->vf_id
), (u32
)vrh
->hena
);
3504 i40e_write_rx_ctl(hw
, I40E_VFQF_HENA1(1, vf
->vf_id
),
3505 (u32
)(vrh
->hena
>> 32));
3507 /* send the response to the VF */
3509 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_SET_RSS_HENA
, aq_ret
);
3513 * i40e_vc_enable_vlan_stripping
3514 * @vf: pointer to the VF info
3515 * @msg: pointer to the msg buffer
3517 * Enable vlan header stripping for the VF
3519 static int i40e_vc_enable_vlan_stripping(struct i40e_vf
*vf
, u8
*msg
)
3521 struct i40e_vsi
*vsi
;
3524 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3529 vsi
= vf
->pf
->vsi
[vf
->lan_vsi_idx
];
3530 i40e_vlan_stripping_enable(vsi
);
3532 /* send the response to the VF */
3534 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING
,
3539 * i40e_vc_disable_vlan_stripping
3540 * @vf: pointer to the VF info
3541 * @msg: pointer to the msg buffer
3543 * Disable vlan header stripping for the VF
3545 static int i40e_vc_disable_vlan_stripping(struct i40e_vf
*vf
, u8
*msg
)
3547 struct i40e_vsi
*vsi
;
3550 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3555 vsi
= vf
->pf
->vsi
[vf
->lan_vsi_idx
];
3556 i40e_vlan_stripping_disable(vsi
);
3558 /* send the response to the VF */
3560 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING
,
3565 * i40e_validate_cloud_filter
3566 * @vf: pointer to VF structure
3567 * @tc_filter: pointer to filter requested
3569 * This function validates cloud filter programmed as TC filter for ADq
3571 static int i40e_validate_cloud_filter(struct i40e_vf
*vf
,
3572 struct virtchnl_filter
*tc_filter
)
3574 struct virtchnl_l4_spec mask
= tc_filter
->mask
.tcp_spec
;
3575 struct virtchnl_l4_spec data
= tc_filter
->data
.tcp_spec
;
3576 struct i40e_pf
*pf
= vf
->pf
;
3577 struct i40e_vsi
*vsi
= NULL
;
3578 struct i40e_mac_filter
*f
;
3579 struct hlist_node
*h
;
3583 if (tc_filter
->action
!= VIRTCHNL_ACTION_TC_REDIRECT
) {
3584 dev_info(&pf
->pdev
->dev
,
3585 "VF %d: ADQ doesn't support this action (%d)\n",
3586 vf
->vf_id
, tc_filter
->action
);
3590 /* action_meta is TC number here to which the filter is applied */
3591 if (!tc_filter
->action_meta
||
3592 tc_filter
->action_meta
> vf
->num_tc
) {
3593 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid TC number %u\n",
3594 vf
->vf_id
, tc_filter
->action_meta
);
3598 /* Check filter if it's programmed for advanced mode or basic mode.
3599 * There are two ADq modes (for VF only),
3600 * 1. Basic mode: intended to allow as many filter options as possible
3601 * to be added to a VF in Non-trusted mode. Main goal is
3602 * to add filters to its own MAC and VLAN id.
3603 * 2. Advanced mode: is for allowing filters to be applied other than
3604 * its own MAC or VLAN. This mode requires the VF to be
3607 if (mask
.dst_mac
[0] && !mask
.dst_ip
[0]) {
3608 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
3609 f
= i40e_find_mac(vsi
, data
.dst_mac
);
3612 dev_info(&pf
->pdev
->dev
,
3613 "Destination MAC %pM doesn't belong to VF %d\n",
3614 data
.dst_mac
, vf
->vf_id
);
3619 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
,
3621 if (f
->vlan
== ntohs(data
.vlan_id
)) {
3627 dev_info(&pf
->pdev
->dev
,
3628 "VF %d doesn't have any VLAN id %u\n",
3629 vf
->vf_id
, ntohs(data
.vlan_id
));
3634 /* Check if VF is trusted */
3635 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
)) {
3636 dev_err(&pf
->pdev
->dev
,
3637 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3643 if (mask
.dst_mac
[0] & data
.dst_mac
[0]) {
3644 if (is_broadcast_ether_addr(data
.dst_mac
) ||
3645 is_zero_ether_addr(data
.dst_mac
)) {
3646 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid Dest MAC addr %pM\n",
3647 vf
->vf_id
, data
.dst_mac
);
3652 if (mask
.src_mac
[0] & data
.src_mac
[0]) {
3653 if (is_broadcast_ether_addr(data
.src_mac
) ||
3654 is_zero_ether_addr(data
.src_mac
)) {
3655 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid Source MAC addr %pM\n",
3656 vf
->vf_id
, data
.src_mac
);
3661 if (mask
.dst_port
& data
.dst_port
) {
3662 if (!data
.dst_port
) {
3663 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid Dest port\n",
3669 if (mask
.src_port
& data
.src_port
) {
3670 if (!data
.src_port
) {
3671 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid Source port\n",
3677 if (tc_filter
->flow_type
!= VIRTCHNL_TCP_V6_FLOW
&&
3678 tc_filter
->flow_type
!= VIRTCHNL_TCP_V4_FLOW
) {
3679 dev_info(&pf
->pdev
->dev
, "VF %d: Invalid Flow type\n",
3684 if (mask
.vlan_id
& data
.vlan_id
) {
3685 if (ntohs(data
.vlan_id
) > I40E_MAX_VLANID
) {
3686 dev_info(&pf
->pdev
->dev
, "VF %d: invalid VLAN ID\n",
3698 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3699 * @vf: pointer to the VF info
3700 * @seid: seid of the vsi it is searching for
3702 static struct i40e_vsi
*i40e_find_vsi_from_seid(struct i40e_vf
*vf
, u16 seid
)
3704 struct i40e_pf
*pf
= vf
->pf
;
3705 struct i40e_vsi
*vsi
= NULL
;
3708 for (i
= 0; i
< vf
->num_tc
; i
++) {
3709 vsi
= i40e_find_vsi_from_id(pf
, vf
->ch
[i
].vsi_id
);
3710 if (vsi
&& vsi
->seid
== seid
)
3717 * i40e_del_all_cloud_filters
3718 * @vf: pointer to the VF info
3720 * This function deletes all cloud filters
3722 static void i40e_del_all_cloud_filters(struct i40e_vf
*vf
)
3724 struct i40e_cloud_filter
*cfilter
= NULL
;
3725 struct i40e_pf
*pf
= vf
->pf
;
3726 struct i40e_vsi
*vsi
= NULL
;
3727 struct hlist_node
*node
;
3730 hlist_for_each_entry_safe(cfilter
, node
,
3731 &vf
->cloud_filter_list
, cloud_node
) {
3732 vsi
= i40e_find_vsi_from_seid(vf
, cfilter
->seid
);
3735 dev_err(&pf
->pdev
->dev
, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3736 vf
->vf_id
, cfilter
->seid
);
3740 if (cfilter
->dst_port
)
3741 ret
= i40e_add_del_cloud_filter_big_buf(vsi
, cfilter
,
3744 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
, false);
3746 dev_err(&pf
->pdev
->dev
,
3747 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3748 vf
->vf_id
, ERR_PTR(ret
),
3749 i40e_aq_str(&pf
->hw
,
3750 pf
->hw
.aq
.asq_last_status
));
3752 hlist_del(&cfilter
->cloud_node
);
3754 vf
->num_cloud_filters
--;
3759 * i40e_vc_del_cloud_filter
3760 * @vf: pointer to the VF info
3761 * @msg: pointer to the msg buffer
3763 * This function deletes a cloud filter programmed as TC filter for ADq
3765 static int i40e_vc_del_cloud_filter(struct i40e_vf
*vf
, u8
*msg
)
3767 struct virtchnl_filter
*vcf
= (struct virtchnl_filter
*)msg
;
3768 struct virtchnl_l4_spec mask
= vcf
->mask
.tcp_spec
;
3769 struct virtchnl_l4_spec tcf
= vcf
->data
.tcp_spec
;
3770 struct i40e_cloud_filter cfilter
, *cf
= NULL
;
3771 struct i40e_pf
*pf
= vf
->pf
;
3772 struct i40e_vsi
*vsi
= NULL
;
3773 struct hlist_node
*node
;
3777 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3782 if (!vf
->adq_enabled
) {
3783 dev_info(&pf
->pdev
->dev
,
3784 "VF %d: ADq not enabled, can't apply cloud filter\n",
3790 if (i40e_validate_cloud_filter(vf
, vcf
)) {
3791 dev_info(&pf
->pdev
->dev
,
3792 "VF %d: Invalid input, can't apply cloud filter\n",
3798 memset(&cfilter
, 0, sizeof(cfilter
));
3799 /* parse destination mac address */
3800 for (i
= 0; i
< ETH_ALEN
; i
++)
3801 cfilter
.dst_mac
[i
] = mask
.dst_mac
[i
] & tcf
.dst_mac
[i
];
3803 /* parse source mac address */
3804 for (i
= 0; i
< ETH_ALEN
; i
++)
3805 cfilter
.src_mac
[i
] = mask
.src_mac
[i
] & tcf
.src_mac
[i
];
3807 cfilter
.vlan_id
= mask
.vlan_id
& tcf
.vlan_id
;
3808 cfilter
.dst_port
= mask
.dst_port
& tcf
.dst_port
;
3809 cfilter
.src_port
= mask
.src_port
& tcf
.src_port
;
3811 switch (vcf
->flow_type
) {
3812 case VIRTCHNL_TCP_V4_FLOW
:
3813 cfilter
.n_proto
= ETH_P_IP
;
3814 if (mask
.dst_ip
[0] & tcf
.dst_ip
[0])
3815 memcpy(&cfilter
.ip
.v4
.dst_ip
, tcf
.dst_ip
,
3816 ARRAY_SIZE(tcf
.dst_ip
));
3817 else if (mask
.src_ip
[0] & tcf
.dst_ip
[0])
3818 memcpy(&cfilter
.ip
.v4
.src_ip
, tcf
.src_ip
,
3819 ARRAY_SIZE(tcf
.dst_ip
));
3821 case VIRTCHNL_TCP_V6_FLOW
:
3822 cfilter
.n_proto
= ETH_P_IPV6
;
3823 if (mask
.dst_ip
[3] & tcf
.dst_ip
[3])
3824 memcpy(&cfilter
.ip
.v6
.dst_ip6
, tcf
.dst_ip
,
3825 sizeof(cfilter
.ip
.v6
.dst_ip6
));
3826 if (mask
.src_ip
[3] & tcf
.src_ip
[3])
3827 memcpy(&cfilter
.ip
.v6
.src_ip6
, tcf
.src_ip
,
3828 sizeof(cfilter
.ip
.v6
.src_ip6
));
3831 /* TC filter can be configured based on different combinations
3832 * and in this case IP is not a part of filter config
3834 dev_info(&pf
->pdev
->dev
, "VF %d: Flow type not configured\n",
3838 /* get the vsi to which the tc belongs to */
3839 vsi
= pf
->vsi
[vf
->ch
[vcf
->action_meta
].vsi_idx
];
3840 cfilter
.seid
= vsi
->seid
;
3841 cfilter
.flags
= vcf
->field_flags
;
3843 /* Deleting TC filter */
3845 ret
= i40e_add_del_cloud_filter_big_buf(vsi
, &cfilter
, false);
3847 ret
= i40e_add_del_cloud_filter(vsi
, &cfilter
, false);
3849 dev_err(&pf
->pdev
->dev
,
3850 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3851 vf
->vf_id
, ERR_PTR(ret
),
3852 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
3856 hlist_for_each_entry_safe(cf
, node
,
3857 &vf
->cloud_filter_list
, cloud_node
) {
3858 if (cf
->seid
!= cfilter
.seid
)
3861 if (cfilter
.dst_port
!= cf
->dst_port
)
3863 if (mask
.dst_mac
[0])
3864 if (!ether_addr_equal(cf
->src_mac
, cfilter
.src_mac
))
3866 /* for ipv4 data to be valid, only first byte of mask is set */
3867 if (cfilter
.n_proto
== ETH_P_IP
&& mask
.dst_ip
[0])
3868 if (memcmp(&cfilter
.ip
.v4
.dst_ip
, &cf
->ip
.v4
.dst_ip
,
3869 ARRAY_SIZE(tcf
.dst_ip
)))
3871 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3872 if (cfilter
.n_proto
== ETH_P_IPV6
&& mask
.dst_ip
[3])
3873 if (memcmp(&cfilter
.ip
.v6
.dst_ip6
, &cf
->ip
.v6
.dst_ip6
,
3874 sizeof(cfilter
.ip
.v6
.src_ip6
)))
3877 if (cfilter
.vlan_id
!= cf
->vlan_id
)
3880 hlist_del(&cf
->cloud_node
);
3882 vf
->num_cloud_filters
--;
3886 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DEL_CLOUD_FILTER
,
3891 * i40e_vc_add_cloud_filter
3892 * @vf: pointer to the VF info
3893 * @msg: pointer to the msg buffer
3895 * This function adds a cloud filter programmed as TC filter for ADq
3897 static int i40e_vc_add_cloud_filter(struct i40e_vf
*vf
, u8
*msg
)
3899 struct virtchnl_filter
*vcf
= (struct virtchnl_filter
*)msg
;
3900 struct virtchnl_l4_spec mask
= vcf
->mask
.tcp_spec
;
3901 struct virtchnl_l4_spec tcf
= vcf
->data
.tcp_spec
;
3902 struct i40e_cloud_filter
*cfilter
= NULL
;
3903 struct i40e_pf
*pf
= vf
->pf
;
3904 struct i40e_vsi
*vsi
= NULL
;
3908 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
3913 if (!vf
->adq_enabled
) {
3914 dev_info(&pf
->pdev
->dev
,
3915 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3921 if (i40e_validate_cloud_filter(vf
, vcf
)) {
3922 dev_info(&pf
->pdev
->dev
,
3923 "VF %d: Invalid input/s, can't apply cloud filter\n",
3929 cfilter
= kzalloc(sizeof(*cfilter
), GFP_KERNEL
);
3935 /* parse destination mac address */
3936 for (i
= 0; i
< ETH_ALEN
; i
++)
3937 cfilter
->dst_mac
[i
] = mask
.dst_mac
[i
] & tcf
.dst_mac
[i
];
3939 /* parse source mac address */
3940 for (i
= 0; i
< ETH_ALEN
; i
++)
3941 cfilter
->src_mac
[i
] = mask
.src_mac
[i
] & tcf
.src_mac
[i
];
3943 cfilter
->vlan_id
= mask
.vlan_id
& tcf
.vlan_id
;
3944 cfilter
->dst_port
= mask
.dst_port
& tcf
.dst_port
;
3945 cfilter
->src_port
= mask
.src_port
& tcf
.src_port
;
3947 switch (vcf
->flow_type
) {
3948 case VIRTCHNL_TCP_V4_FLOW
:
3949 cfilter
->n_proto
= ETH_P_IP
;
3950 if (mask
.dst_ip
[0] & tcf
.dst_ip
[0])
3951 memcpy(&cfilter
->ip
.v4
.dst_ip
, tcf
.dst_ip
,
3952 ARRAY_SIZE(tcf
.dst_ip
));
3953 else if (mask
.src_ip
[0] & tcf
.dst_ip
[0])
3954 memcpy(&cfilter
->ip
.v4
.src_ip
, tcf
.src_ip
,
3955 ARRAY_SIZE(tcf
.dst_ip
));
3957 case VIRTCHNL_TCP_V6_FLOW
:
3958 cfilter
->n_proto
= ETH_P_IPV6
;
3959 if (mask
.dst_ip
[3] & tcf
.dst_ip
[3])
3960 memcpy(&cfilter
->ip
.v6
.dst_ip6
, tcf
.dst_ip
,
3961 sizeof(cfilter
->ip
.v6
.dst_ip6
));
3962 if (mask
.src_ip
[3] & tcf
.src_ip
[3])
3963 memcpy(&cfilter
->ip
.v6
.src_ip6
, tcf
.src_ip
,
3964 sizeof(cfilter
->ip
.v6
.src_ip6
));
3967 /* TC filter can be configured based on different combinations
3968 * and in this case IP is not a part of filter config
3970 dev_info(&pf
->pdev
->dev
, "VF %d: Flow type not configured\n",
3974 /* get the VSI to which the TC belongs to */
3975 vsi
= pf
->vsi
[vf
->ch
[vcf
->action_meta
].vsi_idx
];
3976 cfilter
->seid
= vsi
->seid
;
3977 cfilter
->flags
= vcf
->field_flags
;
3979 /* Adding cloud filter programmed as TC filter */
3981 aq_ret
= i40e_add_del_cloud_filter_big_buf(vsi
, cfilter
, true);
3983 aq_ret
= i40e_add_del_cloud_filter(vsi
, cfilter
, true);
3985 dev_err(&pf
->pdev
->dev
,
3986 "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
3987 vf
->vf_id
, ERR_PTR(aq_ret
),
3988 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
3992 INIT_HLIST_NODE(&cfilter
->cloud_node
);
3993 hlist_add_head(&cfilter
->cloud_node
, &vf
->cloud_filter_list
);
3994 /* release the pointer passing it to the collection */
3996 vf
->num_cloud_filters
++;
4000 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_ADD_CLOUD_FILTER
,
4005 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
4006 * @vf: pointer to the VF info
4007 * @msg: pointer to the msg buffer
4009 static int i40e_vc_add_qch_msg(struct i40e_vf
*vf
, u8
*msg
)
4011 struct virtchnl_tc_info
*tci
=
4012 (struct virtchnl_tc_info
*)msg
;
4013 struct i40e_pf
*pf
= vf
->pf
;
4014 struct i40e_link_status
*ls
= &pf
->hw
.phy
.link_info
;
4015 int i
, adq_request_qps
= 0;
4019 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
4024 /* ADq cannot be applied if spoof check is ON */
4026 dev_err(&pf
->pdev
->dev
,
4027 "Spoof check is ON, turn it OFF to enable ADq\n");
4032 if (!(vf
->driver_caps
& VIRTCHNL_VF_OFFLOAD_ADQ
)) {
4033 dev_err(&pf
->pdev
->dev
,
4034 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
4040 /* max number of traffic classes for VF currently capped at 4 */
4041 if (!tci
->num_tc
|| tci
->num_tc
> I40E_MAX_VF_VSI
) {
4042 dev_err(&pf
->pdev
->dev
,
4043 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
4044 vf
->vf_id
, tci
->num_tc
, I40E_MAX_VF_VSI
);
4049 /* validate queues for each TC */
4050 for (i
= 0; i
< tci
->num_tc
; i
++)
4051 if (!tci
->list
[i
].count
||
4052 tci
->list
[i
].count
> I40E_DEFAULT_QUEUES_PER_VF
) {
4053 dev_err(&pf
->pdev
->dev
,
4054 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
4055 vf
->vf_id
, i
, tci
->list
[i
].count
,
4056 I40E_DEFAULT_QUEUES_PER_VF
);
4061 /* need Max VF queues but already have default number of queues */
4062 adq_request_qps
= I40E_MAX_VF_QUEUES
- I40E_DEFAULT_QUEUES_PER_VF
;
4064 if (pf
->queues_left
< adq_request_qps
) {
4065 dev_err(&pf
->pdev
->dev
,
4066 "No queues left to allocate to VF %d\n",
4071 /* we need to allocate max VF queues to enable ADq so as to
4072 * make sure ADq enabled VF always gets back queues when it
4073 * goes through a reset.
4075 vf
->num_queue_pairs
= I40E_MAX_VF_QUEUES
;
4078 /* get link speed in MB to validate rate limit */
4079 speed
= i40e_vc_link_speed2mbps(ls
->link_speed
);
4080 if (speed
== SPEED_UNKNOWN
) {
4081 dev_err(&pf
->pdev
->dev
,
4082 "Cannot detect link speed\n");
4087 /* parse data from the queue channel info */
4088 vf
->num_tc
= tci
->num_tc
;
4089 for (i
= 0; i
< vf
->num_tc
; i
++) {
4090 if (tci
->list
[i
].max_tx_rate
) {
4091 if (tci
->list
[i
].max_tx_rate
> speed
) {
4092 dev_err(&pf
->pdev
->dev
,
4093 "Invalid max tx rate %llu specified for VF %d.",
4094 tci
->list
[i
].max_tx_rate
,
4099 vf
->ch
[i
].max_tx_rate
=
4100 tci
->list
[i
].max_tx_rate
;
4103 vf
->ch
[i
].num_qps
= tci
->list
[i
].count
;
4106 /* set this flag only after making sure all inputs are sane */
4107 vf
->adq_enabled
= true;
4109 /* reset the VF in order to allocate resources */
4110 i40e_vc_reset_vf(vf
, true);
4114 /* send the response to the VF */
4116 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_ENABLE_CHANNELS
,
4121 * i40e_vc_del_qch_msg
4122 * @vf: pointer to the VF info
4123 * @msg: pointer to the msg buffer
4125 static int i40e_vc_del_qch_msg(struct i40e_vf
*vf
, u8
*msg
)
4127 struct i40e_pf
*pf
= vf
->pf
;
4130 if (!i40e_sync_vf_state(vf
, I40E_VF_STATE_ACTIVE
)) {
4135 if (vf
->adq_enabled
) {
4136 i40e_del_all_cloud_filters(vf
);
4138 vf
->adq_enabled
= false;
4140 dev_info(&pf
->pdev
->dev
,
4141 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4144 dev_info(&pf
->pdev
->dev
, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4149 /* reset the VF in order to allocate resources */
4150 i40e_vc_reset_vf(vf
, true);
4155 return i40e_vc_send_resp_to_vf(vf
, VIRTCHNL_OP_DISABLE_CHANNELS
,
4160 * i40e_vc_process_vf_msg
4161 * @pf: pointer to the PF structure
4162 * @vf_id: source VF id
4163 * @v_opcode: operation code
4164 * @v_retval: unused return value code
4165 * @msg: pointer to the msg buffer
4166 * @msglen: msg length
4168 * called from the common aeq/arq handler to
4169 * process request from VF
4171 int i40e_vc_process_vf_msg(struct i40e_pf
*pf
, s16 vf_id
, u32 v_opcode
,
4172 u32 __always_unused v_retval
, u8
*msg
, u16 msglen
)
4174 struct i40e_hw
*hw
= &pf
->hw
;
4175 int local_vf_id
= vf_id
- (s16
)hw
->func_caps
.vf_base_id
;
4179 pf
->vf_aq_requests
++;
4180 if (local_vf_id
< 0 || local_vf_id
>= pf
->num_alloc_vfs
)
4182 vf
= &(pf
->vf
[local_vf_id
]);
4184 /* Check if VF is disabled. */
4185 if (test_bit(I40E_VF_STATE_DISABLED
, &vf
->vf_states
))
4188 /* perform basic checks on the msg */
4189 ret
= virtchnl_vc_validate_vf_msg(&vf
->vf_ver
, v_opcode
, msg
, msglen
);
4192 i40e_vc_send_resp_to_vf(vf
, v_opcode
, -EINVAL
);
4193 dev_err(&pf
->pdev
->dev
, "Invalid message from VF %d, opcode %d, len %d\n",
4194 local_vf_id
, v_opcode
, msglen
);
4199 case VIRTCHNL_OP_VERSION
:
4200 ret
= i40e_vc_get_version_msg(vf
, msg
);
4202 case VIRTCHNL_OP_GET_VF_RESOURCES
:
4203 ret
= i40e_vc_get_vf_resources_msg(vf
, msg
);
4204 i40e_vc_notify_vf_link_state(vf
);
4206 case VIRTCHNL_OP_RESET_VF
:
4207 i40e_vc_reset_vf(vf
, false);
4210 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
4211 ret
= i40e_vc_config_promiscuous_mode_msg(vf
, msg
);
4213 case VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
4214 ret
= i40e_vc_config_queues_msg(vf
, msg
);
4216 case VIRTCHNL_OP_CONFIG_IRQ_MAP
:
4217 ret
= i40e_vc_config_irq_map_msg(vf
, msg
);
4219 case VIRTCHNL_OP_ENABLE_QUEUES
:
4220 ret
= i40e_vc_enable_queues_msg(vf
, msg
);
4221 i40e_vc_notify_vf_link_state(vf
);
4223 case VIRTCHNL_OP_DISABLE_QUEUES
:
4224 ret
= i40e_vc_disable_queues_msg(vf
, msg
);
4226 case VIRTCHNL_OP_ADD_ETH_ADDR
:
4227 ret
= i40e_vc_add_mac_addr_msg(vf
, msg
);
4229 case VIRTCHNL_OP_DEL_ETH_ADDR
:
4230 ret
= i40e_vc_del_mac_addr_msg(vf
, msg
);
4232 case VIRTCHNL_OP_ADD_VLAN
:
4233 ret
= i40e_vc_add_vlan_msg(vf
, msg
);
4235 case VIRTCHNL_OP_DEL_VLAN
:
4236 ret
= i40e_vc_remove_vlan_msg(vf
, msg
);
4238 case VIRTCHNL_OP_GET_STATS
:
4239 ret
= i40e_vc_get_stats_msg(vf
, msg
);
4241 case VIRTCHNL_OP_RDMA
:
4242 ret
= i40e_vc_rdma_msg(vf
, msg
, msglen
);
4244 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
:
4245 ret
= i40e_vc_rdma_qvmap_msg(vf
, msg
, true);
4247 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP
:
4248 ret
= i40e_vc_rdma_qvmap_msg(vf
, msg
, false);
4250 case VIRTCHNL_OP_CONFIG_RSS_KEY
:
4251 ret
= i40e_vc_config_rss_key(vf
, msg
);
4253 case VIRTCHNL_OP_CONFIG_RSS_LUT
:
4254 ret
= i40e_vc_config_rss_lut(vf
, msg
);
4256 case VIRTCHNL_OP_GET_RSS_HENA_CAPS
:
4257 ret
= i40e_vc_get_rss_hena(vf
, msg
);
4259 case VIRTCHNL_OP_SET_RSS_HENA
:
4260 ret
= i40e_vc_set_rss_hena(vf
, msg
);
4262 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING
:
4263 ret
= i40e_vc_enable_vlan_stripping(vf
, msg
);
4265 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING
:
4266 ret
= i40e_vc_disable_vlan_stripping(vf
, msg
);
4268 case VIRTCHNL_OP_REQUEST_QUEUES
:
4269 ret
= i40e_vc_request_queues_msg(vf
, msg
);
4271 case VIRTCHNL_OP_ENABLE_CHANNELS
:
4272 ret
= i40e_vc_add_qch_msg(vf
, msg
);
4274 case VIRTCHNL_OP_DISABLE_CHANNELS
:
4275 ret
= i40e_vc_del_qch_msg(vf
, msg
);
4277 case VIRTCHNL_OP_ADD_CLOUD_FILTER
:
4278 ret
= i40e_vc_add_cloud_filter(vf
, msg
);
4280 case VIRTCHNL_OP_DEL_CLOUD_FILTER
:
4281 ret
= i40e_vc_del_cloud_filter(vf
, msg
);
4283 case VIRTCHNL_OP_UNKNOWN
:
4285 dev_err(&pf
->pdev
->dev
, "Unsupported opcode %d from VF %d\n",
4286 v_opcode
, local_vf_id
);
4287 ret
= i40e_vc_send_resp_to_vf(vf
, v_opcode
,
4296 * i40e_vc_process_vflr_event
4297 * @pf: pointer to the PF structure
4299 * called from the vlfr irq handler to
4300 * free up VF resources and state variables
4302 int i40e_vc_process_vflr_event(struct i40e_pf
*pf
)
4304 struct i40e_hw
*hw
= &pf
->hw
;
4305 u32 reg
, reg_idx
, bit_idx
;
4309 if (!test_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
))
4312 /* Re-enable the VFLR interrupt cause here, before looking for which
4313 * VF got reset. Otherwise, if another VF gets a reset while the
4314 * first one is being processed, that interrupt will be lost, and
4315 * that VF will be stuck in reset forever.
4317 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
4318 reg
|= I40E_PFINT_ICR0_ENA_VFLR_MASK
;
4319 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
4322 clear_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
);
4323 for (vf_id
= 0; vf_id
< pf
->num_alloc_vfs
; vf_id
++) {
4324 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
4325 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
4326 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
4327 vf
= &pf
->vf
[vf_id
];
4328 reg
= rd32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
));
4329 if (reg
& BIT(bit_idx
))
4330 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4331 i40e_reset_vf(vf
, true);
4339 * @pf: the physical function
4340 * @vf_id: VF identifier
4342 * Check that the VF is enabled and the VSI exists.
4344 * Returns 0 on success, negative on failure
4346 static int i40e_validate_vf(struct i40e_pf
*pf
, int vf_id
)
4348 struct i40e_vsi
*vsi
;
4352 if (vf_id
>= pf
->num_alloc_vfs
) {
4353 dev_err(&pf
->pdev
->dev
,
4354 "Invalid VF Identifier %d\n", vf_id
);
4358 vf
= &pf
->vf
[vf_id
];
4359 vsi
= i40e_find_vsi_from_id(pf
, vf
->lan_vsi_id
);
4367 * i40e_check_vf_init_timeout
4368 * @vf: the virtual function
4370 * Check that the VF's initialization was successfully done and if not
4371 * wait up to 300ms for its finish.
4373 * Returns true when VF is initialized, false on timeout
4375 static bool i40e_check_vf_init_timeout(struct i40e_vf
*vf
)
4379 /* When the VF is resetting wait until it is done.
4380 * It can take up to 200 milliseconds, but wait for
4381 * up to 300 milliseconds to be safe.
4383 for (i
= 0; i
< 15; i
++) {
4384 if (test_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
))
4389 if (!test_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
)) {
4390 dev_err(&vf
->pf
->pdev
->dev
,
4391 "VF %d still in reset. Try again.\n", vf
->vf_id
);
4399 * i40e_ndo_set_vf_mac
4400 * @netdev: network interface device structure
4401 * @vf_id: VF identifier
4404 * program VF mac address
4406 int i40e_ndo_set_vf_mac(struct net_device
*netdev
, int vf_id
, u8
*mac
)
4408 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4409 struct i40e_vsi
*vsi
= np
->vsi
;
4410 struct i40e_pf
*pf
= vsi
->back
;
4411 struct i40e_mac_filter
*f
;
4414 struct hlist_node
*h
;
4417 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4418 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4422 /* validate the request */
4423 ret
= i40e_validate_vf(pf
, vf_id
);
4427 vf
= &pf
->vf
[vf_id
];
4428 if (!i40e_check_vf_init_timeout(vf
)) {
4432 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4434 if (is_multicast_ether_addr(mac
)) {
4435 dev_err(&pf
->pdev
->dev
,
4436 "Invalid Ethernet address %pM for VF %d\n", mac
, vf_id
);
4441 /* Lock once because below invoked function add/del_filter requires
4442 * mac_filter_hash_lock to be held
4444 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
4446 /* delete the temporary mac address */
4447 if (!is_zero_ether_addr(vf
->default_lan_addr
.addr
))
4448 i40e_del_mac_filter(vsi
, vf
->default_lan_addr
.addr
);
4450 /* Delete all the filters for this VSI - we're going to kill it
4453 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
)
4454 __i40e_del_filter(vsi
, f
);
4456 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
4458 /* program mac filter */
4459 if (i40e_sync_vsi_filters(vsi
)) {
4460 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
4464 ether_addr_copy(vf
->default_lan_addr
.addr
, mac
);
4466 if (is_zero_ether_addr(mac
)) {
4467 vf
->pf_set_mac
= false;
4468 dev_info(&pf
->pdev
->dev
, "Removing MAC on VF %d\n", vf_id
);
4470 vf
->pf_set_mac
= true;
4471 dev_info(&pf
->pdev
->dev
, "Setting MAC %pM on VF %d\n",
4475 /* Force the VF interface down so it has to bring up with new MAC
4478 i40e_vc_reset_vf(vf
, true);
4479 dev_info(&pf
->pdev
->dev
, "Bring down and up the VF interface to make this change effective.\n");
4482 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4487 * i40e_ndo_set_vf_port_vlan
4488 * @netdev: network interface device structure
4489 * @vf_id: VF identifier
4490 * @vlan_id: mac address
4491 * @qos: priority setting
4492 * @vlan_proto: vlan protocol
4494 * program VF vlan id and/or qos
4496 int i40e_ndo_set_vf_port_vlan(struct net_device
*netdev
, int vf_id
,
4497 u16 vlan_id
, u8 qos
, __be16 vlan_proto
)
4499 u16 vlanprio
= vlan_id
| (qos
<< I40E_VLAN_PRIORITY_SHIFT
);
4500 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4501 bool allmulti
= false, alluni
= false;
4502 struct i40e_pf
*pf
= np
->vsi
->back
;
4503 struct i40e_vsi
*vsi
;
4507 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4508 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4512 /* validate the request */
4513 ret
= i40e_validate_vf(pf
, vf_id
);
4517 if ((vlan_id
> I40E_MAX_VLANID
) || (qos
> 7)) {
4518 dev_err(&pf
->pdev
->dev
, "Invalid VF Parameters\n");
4523 if (vlan_proto
!= htons(ETH_P_8021Q
)) {
4524 dev_err(&pf
->pdev
->dev
, "VF VLAN protocol is not supported\n");
4525 ret
= -EPROTONOSUPPORT
;
4529 vf
= &pf
->vf
[vf_id
];
4530 if (!i40e_check_vf_init_timeout(vf
)) {
4534 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4536 if (le16_to_cpu(vsi
->info
.pvid
) == vlanprio
)
4537 /* duplicate request, so just return success */
4540 i40e_vlan_stripping_enable(vsi
);
4542 /* Locked once because multiple functions below iterate list */
4543 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
4545 /* Check for condition where there was already a port VLAN ID
4546 * filter set and now it is being deleted by setting it to zero.
4547 * Additionally check for the condition where there was a port
4548 * VLAN but now there is a new and different port VLAN being set.
4549 * Before deleting all the old VLAN filters we must add new ones
4550 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4551 * MAC addresses deleted.
4553 if ((!(vlan_id
|| qos
) ||
4554 vlanprio
!= le16_to_cpu(vsi
->info
.pvid
)) &&
4556 ret
= i40e_add_vlan_all_mac(vsi
, I40E_VLAN_ANY
);
4558 dev_info(&vsi
->back
->pdev
->dev
,
4559 "add VF VLAN failed, ret=%d aq_err=%d\n", ret
,
4560 vsi
->back
->hw
.aq
.asq_last_status
);
4561 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
4566 if (vsi
->info
.pvid
) {
4567 /* remove all filters on the old VLAN */
4568 i40e_rm_vlan_all_mac(vsi
, (le16_to_cpu(vsi
->info
.pvid
) &
4572 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
4574 /* disable promisc modes in case they were enabled */
4575 ret
= i40e_config_vf_promiscuous_mode(vf
, vf
->lan_vsi_id
,
4578 dev_err(&pf
->pdev
->dev
, "Unable to config VF promiscuous mode\n");
4583 ret
= i40e_vsi_add_pvid(vsi
, vlanprio
);
4585 i40e_vsi_remove_pvid(vsi
);
4586 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
4589 dev_info(&pf
->pdev
->dev
, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4590 vlan_id
, qos
, vf_id
);
4592 /* add new VLAN filter for each MAC */
4593 ret
= i40e_add_vlan_all_mac(vsi
, vlan_id
);
4595 dev_info(&vsi
->back
->pdev
->dev
,
4596 "add VF VLAN failed, ret=%d aq_err=%d\n", ret
,
4597 vsi
->back
->hw
.aq
.asq_last_status
);
4598 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
4602 /* remove the previously added non-VLAN MAC filters */
4603 i40e_rm_vlan_all_mac(vsi
, I40E_VLAN_ANY
);
4606 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
4608 if (test_bit(I40E_VF_STATE_UC_PROMISC
, &vf
->vf_states
))
4611 if (test_bit(I40E_VF_STATE_MC_PROMISC
, &vf
->vf_states
))
4614 /* Schedule the worker thread to take care of applying changes */
4615 i40e_service_event_schedule(vsi
->back
);
4618 dev_err(&pf
->pdev
->dev
, "Unable to update VF vsi context\n");
4622 /* The Port VLAN needs to be saved across resets the same as the
4623 * default LAN MAC address.
4625 vf
->port_vlan_id
= le16_to_cpu(vsi
->info
.pvid
);
4627 i40e_vc_reset_vf(vf
, true);
4628 /* During reset the VF got a new VSI, so refresh a pointer. */
4629 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4631 ret
= i40e_config_vf_promiscuous_mode(vf
, vsi
->id
, allmulti
, alluni
);
4633 dev_err(&pf
->pdev
->dev
, "Unable to config vf promiscuous mode\n");
4640 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4645 * i40e_ndo_set_vf_bw
4646 * @netdev: network interface device structure
4647 * @vf_id: VF identifier
4648 * @min_tx_rate: Minimum Tx rate
4649 * @max_tx_rate: Maximum Tx rate
4651 * configure VF Tx rate
4653 int i40e_ndo_set_vf_bw(struct net_device
*netdev
, int vf_id
, int min_tx_rate
,
4656 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4657 struct i40e_pf
*pf
= np
->vsi
->back
;
4658 struct i40e_vsi
*vsi
;
4662 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4663 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4667 /* validate the request */
4668 ret
= i40e_validate_vf(pf
, vf_id
);
4673 dev_err(&pf
->pdev
->dev
, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4674 min_tx_rate
, vf_id
);
4679 vf
= &pf
->vf
[vf_id
];
4680 if (!i40e_check_vf_init_timeout(vf
)) {
4684 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4686 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, max_tx_rate
);
4690 vf
->tx_rate
= max_tx_rate
;
4692 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4697 * i40e_ndo_get_vf_config
4698 * @netdev: network interface device structure
4699 * @vf_id: VF identifier
4700 * @ivi: VF configuration structure
4702 * return VF configuration
4704 int i40e_ndo_get_vf_config(struct net_device
*netdev
,
4705 int vf_id
, struct ifla_vf_info
*ivi
)
4707 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4708 struct i40e_vsi
*vsi
= np
->vsi
;
4709 struct i40e_pf
*pf
= vsi
->back
;
4713 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4714 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4718 /* validate the request */
4719 ret
= i40e_validate_vf(pf
, vf_id
);
4723 vf
= &pf
->vf
[vf_id
];
4724 /* first vsi is always the LAN vsi */
4725 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4733 ether_addr_copy(ivi
->mac
, vf
->default_lan_addr
.addr
);
4735 ivi
->max_tx_rate
= vf
->tx_rate
;
4736 ivi
->min_tx_rate
= 0;
4737 ivi
->vlan
= le16_get_bits(vsi
->info
.pvid
, I40E_VLAN_MASK
);
4738 ivi
->qos
= le16_get_bits(vsi
->info
.pvid
, I40E_PRIORITY_MASK
);
4739 if (vf
->link_forced
== false)
4740 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
4741 else if (vf
->link_up
== true)
4742 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
4744 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
4745 ivi
->spoofchk
= vf
->spoofchk
;
4746 ivi
->trusted
= vf
->trusted
;
4750 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4755 * i40e_ndo_set_vf_link_state
4756 * @netdev: network interface device structure
4757 * @vf_id: VF identifier
4758 * @link: required link state
4760 * Set the link state of a specified VF, regardless of physical link state
4762 int i40e_ndo_set_vf_link_state(struct net_device
*netdev
, int vf_id
, int link
)
4764 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4765 struct i40e_pf
*pf
= np
->vsi
->back
;
4766 struct i40e_link_status
*ls
= &pf
->hw
.phy
.link_info
;
4767 struct virtchnl_pf_event pfe
;
4768 struct i40e_hw
*hw
= &pf
->hw
;
4769 struct i40e_vsi
*vsi
;
4770 unsigned long q_map
;
4776 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4777 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4781 /* validate the request */
4782 if (vf_id
>= pf
->num_alloc_vfs
) {
4783 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
4788 vf
= &pf
->vf
[vf_id
];
4789 abs_vf_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
4791 pfe
.event
= VIRTCHNL_EVENT_LINK_CHANGE
;
4792 pfe
.severity
= PF_EVENT_SEVERITY_INFO
;
4795 case IFLA_VF_LINK_STATE_AUTO
:
4796 vf
->link_forced
= false;
4797 vf
->is_disabled_from_host
= false;
4798 /* reset needed to reinit VF resources */
4799 i40e_vc_reset_vf(vf
, true);
4800 i40e_set_vf_link_state(vf
, &pfe
, ls
);
4802 case IFLA_VF_LINK_STATE_ENABLE
:
4803 vf
->link_forced
= true;
4805 vf
->is_disabled_from_host
= false;
4806 /* reset needed to reinit VF resources */
4807 i40e_vc_reset_vf(vf
, true);
4808 i40e_set_vf_link_state(vf
, &pfe
, ls
);
4810 case IFLA_VF_LINK_STATE_DISABLE
:
4811 vf
->link_forced
= true;
4812 vf
->link_up
= false;
4813 i40e_set_vf_link_state(vf
, &pfe
, ls
);
4815 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4816 q_map
= BIT(vsi
->num_queue_pairs
) - 1;
4818 vf
->is_disabled_from_host
= true;
4820 /* Try to stop both Tx&Rx rings even if one of the calls fails
4821 * to ensure we stop the rings even in case of errors.
4822 * If any of them returns with an error then the first
4823 * error that occurred will be returned.
4825 tmp
= i40e_ctrl_vf_tx_rings(vsi
, q_map
, false);
4826 ret
= i40e_ctrl_vf_rx_rings(vsi
, q_map
, false);
4828 ret
= tmp
? tmp
: ret
;
4834 /* Notify the VF of its new link state */
4835 i40e_aq_send_msg_to_vf(hw
, abs_vf_id
, VIRTCHNL_OP_EVENT
,
4836 0, (u8
*)&pfe
, sizeof(pfe
), NULL
);
4839 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4844 * i40e_ndo_set_vf_spoofchk
4845 * @netdev: network interface device structure
4846 * @vf_id: VF identifier
4847 * @enable: flag to enable or disable feature
4849 * Enable or disable VF spoof checking
4851 int i40e_ndo_set_vf_spoofchk(struct net_device
*netdev
, int vf_id
, bool enable
)
4853 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4854 struct i40e_vsi
*vsi
= np
->vsi
;
4855 struct i40e_pf
*pf
= vsi
->back
;
4856 struct i40e_vsi_context ctxt
;
4857 struct i40e_hw
*hw
= &pf
->hw
;
4861 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4862 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4866 /* validate the request */
4867 if (vf_id
>= pf
->num_alloc_vfs
) {
4868 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
4873 vf
= &(pf
->vf
[vf_id
]);
4874 if (!i40e_check_vf_init_timeout(vf
)) {
4879 if (enable
== vf
->spoofchk
)
4882 vf
->spoofchk
= enable
;
4883 memset(&ctxt
, 0, sizeof(ctxt
));
4884 ctxt
.seid
= pf
->vsi
[vf
->lan_vsi_idx
]->seid
;
4885 ctxt
.pf_num
= pf
->hw
.pf_id
;
4886 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
4888 ctxt
.info
.sec_flags
|= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
|
4889 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
);
4890 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
4892 dev_err(&pf
->pdev
->dev
, "Error %d updating VSI parameters\n",
4897 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4902 * i40e_ndo_set_vf_trust
4903 * @netdev: network interface device structure of the pf
4904 * @vf_id: VF identifier
4905 * @setting: trust setting
4907 * Enable or disable VF trust setting
4909 int i40e_ndo_set_vf_trust(struct net_device
*netdev
, int vf_id
, bool setting
)
4911 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4912 struct i40e_pf
*pf
= np
->vsi
->back
;
4916 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
)) {
4917 dev_warn(&pf
->pdev
->dev
, "Unable to configure VFs, other operation is pending.\n");
4921 /* validate the request */
4922 if (vf_id
>= pf
->num_alloc_vfs
) {
4923 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
4928 if (test_bit(I40E_FLAG_MFP_ENA
, pf
->flags
)) {
4929 dev_err(&pf
->pdev
->dev
, "Trusted VF not supported in MFP mode.\n");
4934 vf
= &pf
->vf
[vf_id
];
4936 if (setting
== vf
->trusted
)
4939 vf
->trusted
= setting
;
4941 /* request PF to sync mac/vlan filters for the VF */
4942 set_bit(__I40E_MACVLAN_SYNC_PENDING
, pf
->state
);
4943 pf
->vsi
[vf
->lan_vsi_idx
]->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
4945 i40e_vc_reset_vf(vf
, true);
4946 dev_info(&pf
->pdev
->dev
, "VF %u is now %strusted\n",
4947 vf_id
, setting
? "" : "un");
4949 if (vf
->adq_enabled
) {
4951 dev_info(&pf
->pdev
->dev
,
4952 "VF %u no longer Trusted, deleting all cloud filters\n",
4954 i40e_del_all_cloud_filters(vf
);
4959 clear_bit(__I40E_VIRTCHNL_OP_PENDING
, pf
->state
);
4964 * i40e_get_vf_stats - populate some stats for the VF
4965 * @netdev: the netdev of the PF
4966 * @vf_id: the host OS identifier (0-127)
4967 * @vf_stats: pointer to the OS memory to be initialized
4969 int i40e_get_vf_stats(struct net_device
*netdev
, int vf_id
,
4970 struct ifla_vf_stats
*vf_stats
)
4972 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4973 struct i40e_pf
*pf
= np
->vsi
->back
;
4974 struct i40e_eth_stats
*stats
;
4975 struct i40e_vsi
*vsi
;
4978 /* validate the request */
4979 if (i40e_validate_vf(pf
, vf_id
))
4982 vf
= &pf
->vf
[vf_id
];
4983 if (!test_bit(I40E_VF_STATE_INIT
, &vf
->vf_states
)) {
4984 dev_err(&pf
->pdev
->dev
, "VF %d in reset. Try again.\n", vf_id
);
4988 vsi
= pf
->vsi
[vf
->lan_vsi_idx
];
4992 i40e_update_eth_stats(vsi
);
4993 stats
= &vsi
->eth_stats
;
4995 memset(vf_stats
, 0, sizeof(*vf_stats
));
4997 vf_stats
->rx_packets
= stats
->rx_unicast
+ stats
->rx_broadcast
+
4998 stats
->rx_multicast
;
4999 vf_stats
->tx_packets
= stats
->tx_unicast
+ stats
->tx_broadcast
+
5000 stats
->tx_multicast
;
5001 vf_stats
->rx_bytes
= stats
->rx_bytes
;
5002 vf_stats
->tx_bytes
= stats
->tx_bytes
;
5003 vf_stats
->broadcast
= stats
->rx_broadcast
;
5004 vf_stats
->multicast
= stats
->rx_multicast
;
5005 vf_stats
->rx_dropped
= stats
->rx_discards
+ stats
->rx_discards_other
;
5006 vf_stats
->tx_dropped
= stats
->tx_discards
;