1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/crc32.h>
8 #include <linux/etherdevice.h>
10 #include "qed_sriov.h"
13 static void *qed_vf_pf_prep(struct qed_hwfn
*p_hwfn
, u16 type
, u16 length
)
15 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
18 /* This lock is released when we receive PF's response
19 * in qed_send_msg2pf().
20 * So, qed_vf_pf_prep() and qed_send_msg2pf()
21 * must come in sequence.
23 mutex_lock(&(p_iov
->mutex
));
27 "preparing to send 0x%04x tlv over vf pf channel\n",
30 /* Reset Requst offset */
31 p_iov
->offset
= (u8
*)p_iov
->vf2pf_request
;
33 /* Clear mailbox - both request and reply */
34 memset(p_iov
->vf2pf_request
, 0, sizeof(union vfpf_tlvs
));
35 memset(p_iov
->pf2vf_reply
, 0, sizeof(union pfvf_tlvs
));
37 /* Init type and length */
38 p_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, type
, length
);
40 /* Init first tlv header */
41 ((struct vfpf_first_tlv
*)p_tlv
)->reply_address
=
42 (u64
)p_iov
->pf2vf_reply_phys
;
47 static void qed_vf_pf_req_end(struct qed_hwfn
*p_hwfn
, int req_status
)
49 union pfvf_tlvs
*resp
= p_hwfn
->vf_iov_info
->pf2vf_reply
;
51 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
52 "VF request status = 0x%x, PF reply status = 0x%x\n",
53 req_status
, resp
->default_resp
.hdr
.status
);
55 mutex_unlock(&(p_hwfn
->vf_iov_info
->mutex
));
58 #define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
59 #define QED_VF_CHANNEL_USLEEP_DELAY 100
60 #define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
61 #define QED_VF_CHANNEL_MSLEEP_DELAY 25
63 static int qed_send_msg2pf(struct qed_hwfn
*p_hwfn
, u8
*done
, u32 resp_size
)
65 union vfpf_tlvs
*p_req
= p_hwfn
->vf_iov_info
->vf2pf_request
;
66 struct ustorm_trigger_vf_zone trigger
;
67 struct ustorm_vf_zone
*zone_data
;
70 zone_data
= (struct ustorm_vf_zone
*)PXP_VF_BAR0_START_USDM_ZONE_B
;
72 /* output tlvs list */
73 qed_dp_tlv_list(p_hwfn
, p_req
);
75 /* need to add the END TLV to the message size */
76 resp_size
+= sizeof(struct channel_list_end_tlv
);
78 /* Send TLVs over HW channel */
79 memset(&trigger
, 0, sizeof(struct ustorm_trigger_vf_zone
));
80 trigger
.vf_pf_msg_valid
= 1;
84 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
85 GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
86 PXP_CONCRETE_FID_PFID
),
87 upper_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
),
88 lower_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
),
89 &zone_data
->non_trigger
.vf_pf_msg_addr
,
90 *((u32
*)&trigger
), &zone_data
->trigger
);
93 (uintptr_t)&zone_data
->non_trigger
.vf_pf_msg_addr
.lo
,
94 lower_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
));
97 (uintptr_t)&zone_data
->non_trigger
.vf_pf_msg_addr
.hi
,
98 upper_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
));
100 /* The message data must be written first, to prevent trigger before
105 REG_WR(p_hwfn
, (uintptr_t)&zone_data
->trigger
, *((u32
*)&trigger
));
107 /* When PF would be done with the response, it would write back to the
108 * `done' address from a coherent DMA zone. Poll until then.
111 iter
= QED_VF_CHANNEL_USLEEP_ITERATIONS
;
112 while (!*done
&& iter
--) {
113 udelay(QED_VF_CHANNEL_USLEEP_DELAY
);
117 iter
= QED_VF_CHANNEL_MSLEEP_ITERATIONS
;
118 while (!*done
&& iter
--) {
119 msleep(QED_VF_CHANNEL_MSLEEP_DELAY
);
125 "VF <-- PF Timeout [Type %d]\n",
126 p_req
->first_tlv
.tl
.type
);
129 if ((*done
!= PFVF_STATUS_SUCCESS
) &&
130 (*done
!= PFVF_STATUS_NO_RESOURCE
))
132 "PF response: %d [Type %d]\n",
133 *done
, p_req
->first_tlv
.tl
.type
);
135 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
136 "PF response: %d [Type %d]\n",
137 *done
, p_req
->first_tlv
.tl
.type
);
143 static void qed_vf_pf_add_qid(struct qed_hwfn
*p_hwfn
,
144 struct qed_queue_cid
*p_cid
)
146 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
147 struct vfpf_qid_tlv
*p_qid_tlv
;
149 /* Only add QIDs for the queue if it was negotiated with PF */
150 if (!(p_iov
->acquire_resp
.pfdev_info
.capabilities
&
151 PFVF_ACQUIRE_CAP_QUEUE_QIDS
))
154 p_qid_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
155 CHANNEL_TLV_QID
, sizeof(*p_qid_tlv
));
156 p_qid_tlv
->qid
= p_cid
->qid_usage_idx
;
159 static int _qed_vf_pf_release(struct qed_hwfn
*p_hwfn
, bool b_final
)
161 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
162 struct pfvf_def_resp_tlv
*resp
;
163 struct vfpf_first_tlv
*req
;
167 /* clear mailbox and prep first tlv */
168 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_RELEASE
, sizeof(*req
));
170 /* add list termination tlv */
171 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
172 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
174 resp
= &p_iov
->pf2vf_reply
->default_resp
;
175 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
177 if (!rc
&& resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
180 qed_vf_pf_req_end(p_hwfn
, rc
);
184 p_hwfn
->b_int_enabled
= 0;
186 if (p_iov
->vf2pf_request
)
187 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
188 sizeof(union vfpf_tlvs
),
189 p_iov
->vf2pf_request
,
190 p_iov
->vf2pf_request_phys
);
191 if (p_iov
->pf2vf_reply
)
192 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
193 sizeof(union pfvf_tlvs
),
194 p_iov
->pf2vf_reply
, p_iov
->pf2vf_reply_phys
);
196 if (p_iov
->bulletin
.p_virt
) {
197 size
= sizeof(struct qed_bulletin_content
);
198 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
200 p_iov
->bulletin
.p_virt
, p_iov
->bulletin
.phys
);
203 kfree(p_hwfn
->vf_iov_info
);
204 p_hwfn
->vf_iov_info
= NULL
;
209 int qed_vf_pf_release(struct qed_hwfn
*p_hwfn
)
211 return _qed_vf_pf_release(p_hwfn
, true);
214 #define VF_ACQUIRE_THRESH 3
215 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn
*p_hwfn
,
216 struct vf_pf_resc_request
*p_req
,
217 struct pf_vf_resc
*p_resp
)
221 "PF unwilling to fulfill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
228 p_req
->num_mac_filters
,
229 p_resp
->num_mac_filters
,
230 p_req
->num_vlan_filters
,
231 p_resp
->num_vlan_filters
,
232 p_req
->num_mc_filters
,
233 p_resp
->num_mc_filters
, p_req
->num_cids
, p_resp
->num_cids
);
235 /* humble our request */
236 p_req
->num_txqs
= p_resp
->num_txqs
;
237 p_req
->num_rxqs
= p_resp
->num_rxqs
;
238 p_req
->num_sbs
= p_resp
->num_sbs
;
239 p_req
->num_mac_filters
= p_resp
->num_mac_filters
;
240 p_req
->num_vlan_filters
= p_resp
->num_vlan_filters
;
241 p_req
->num_mc_filters
= p_resp
->num_mc_filters
;
242 p_req
->num_cids
= p_resp
->num_cids
;
245 static int qed_vf_pf_acquire(struct qed_hwfn
*p_hwfn
)
247 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
248 struct pfvf_acquire_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->acquire_resp
;
249 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
250 struct vf_pf_resc_request
*p_resc
;
251 u8 retry_cnt
= VF_ACQUIRE_THRESH
;
252 bool resources_acquired
= false;
253 struct vfpf_acquire_tlv
*req
;
254 int rc
= 0, attempts
= 0;
256 /* clear mailbox and prep first tlv */
257 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_ACQUIRE
, sizeof(*req
));
258 p_resc
= &req
->resc_request
;
260 /* starting filling the request */
261 req
->vfdev_info
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
263 p_resc
->num_rxqs
= QED_MAX_VF_CHAINS_PER_PF
;
264 p_resc
->num_txqs
= QED_MAX_VF_CHAINS_PER_PF
;
265 p_resc
->num_sbs
= QED_MAX_VF_CHAINS_PER_PF
;
266 p_resc
->num_mac_filters
= QED_ETH_VF_NUM_MAC_FILTERS
;
267 p_resc
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
268 p_resc
->num_cids
= QED_ETH_VF_DEFAULT_NUM_CIDS
;
270 req
->vfdev_info
.os_type
= VFPF_ACQUIRE_OS_LINUX
;
271 req
->vfdev_info
.fw_major
= FW_MAJOR_VERSION
;
272 req
->vfdev_info
.fw_minor
= FW_MINOR_VERSION
;
273 req
->vfdev_info
.fw_revision
= FW_REVISION_VERSION
;
274 req
->vfdev_info
.fw_engineering
= FW_ENGINEERING_VERSION
;
275 req
->vfdev_info
.eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
276 req
->vfdev_info
.eth_fp_hsi_minor
= ETH_HSI_VER_MINOR
;
278 /* Fill capability field with any non-deprecated config we support */
279 req
->vfdev_info
.capabilities
|= VFPF_ACQUIRE_CAP_100G
;
281 /* If we've mapped the doorbell bar, try using queue qids */
282 if (p_iov
->b_doorbell_bar
) {
283 req
->vfdev_info
.capabilities
|= VFPF_ACQUIRE_CAP_PHYSICAL_BAR
|
284 VFPF_ACQUIRE_CAP_QUEUE_QIDS
;
285 p_resc
->num_cids
= QED_ETH_VF_MAX_NUM_CIDS
;
288 /* pf 2 vf bulletin board address */
289 req
->bulletin_addr
= p_iov
->bulletin
.phys
;
290 req
->bulletin_size
= p_iov
->bulletin
.size
;
292 /* add list termination tlv */
293 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
294 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
296 while (!resources_acquired
) {
298 QED_MSG_IOV
, "attempting to acquire resources\n");
300 /* Clear response buffer, as this might be a re-send */
301 memset(p_iov
->pf2vf_reply
, 0, sizeof(union pfvf_tlvs
));
303 /* send acquire request */
304 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
306 /* Re-try acquire in case of vf-pf hw channel timeout */
307 if (retry_cnt
&& rc
== -EBUSY
) {
308 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
309 "VF retrying to acquire due to VPC timeout\n");
317 /* copy acquire response from buffer to p_hwfn */
318 memcpy(&p_iov
->acquire_resp
, resp
, sizeof(p_iov
->acquire_resp
));
322 if (resp
->hdr
.status
== PFVF_STATUS_SUCCESS
) {
323 /* PF agrees to allocate our resources */
324 if (!(resp
->pfdev_info
.capabilities
&
325 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
)) {
326 /* It's possible legacy PF mistakenly accepted;
327 * but we don't care - simply mark it as
328 * legacy and continue.
330 req
->vfdev_info
.capabilities
|=
331 VFPF_ACQUIRE_CAP_PRE_FP_HSI
;
333 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "resources acquired\n");
334 resources_acquired
= true;
335 } else if (resp
->hdr
.status
== PFVF_STATUS_NO_RESOURCE
&&
336 attempts
< VF_ACQUIRE_THRESH
) {
337 qed_vf_pf_acquire_reduce_resc(p_hwfn
, p_resc
,
339 } else if (resp
->hdr
.status
== PFVF_STATUS_NOT_SUPPORTED
) {
340 if (pfdev_info
->major_fp_hsi
&&
341 (pfdev_info
->major_fp_hsi
!= ETH_HSI_VER_MAJOR
)) {
343 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
344 pfdev_info
->major_fp_hsi
,
345 pfdev_info
->minor_fp_hsi
,
348 pfdev_info
->major_fp_hsi
);
353 if (!pfdev_info
->major_fp_hsi
) {
354 if (req
->vfdev_info
.capabilities
&
355 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
357 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
362 "PF is old - try re-acquire to see if it supports FW-version override\n");
363 req
->vfdev_info
.capabilities
|=
364 VFPF_ACQUIRE_CAP_PRE_FP_HSI
;
369 /* If PF/VF are using same Major, PF must have had
370 * it's reasons. Simply fail.
372 DP_NOTICE(p_hwfn
, "PF rejected acquisition by VF\n");
377 "PF returned error %d to VF acquisition request\n",
384 /* Mark the PF as legacy, if needed */
385 if (req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_PRE_FP_HSI
)
386 p_iov
->b_pre_fp_hsi
= true;
388 /* In case PF doesn't support multi-queue Tx, update the number of
389 * CIDs to reflect the number of queues [older PFs didn't fill that
392 if (!(resp
->pfdev_info
.capabilities
& PFVF_ACQUIRE_CAP_QUEUE_QIDS
))
393 resp
->resc
.num_cids
= resp
->resc
.num_rxqs
+ resp
->resc
.num_txqs
;
395 /* Update bulletin board size with response from PF */
396 p_iov
->bulletin
.size
= resp
->bulletin_size
;
399 p_hwfn
->cdev
->type
= resp
->pfdev_info
.dev_type
;
400 p_hwfn
->cdev
->chip_rev
= resp
->pfdev_info
.chip_rev
;
402 p_hwfn
->cdev
->chip_num
= pfdev_info
->chip_num
& 0xffff;
404 /* Learn of the possibility of CMT */
405 if (IS_LEAD_HWFN(p_hwfn
)) {
406 if (resp
->pfdev_info
.capabilities
& PFVF_ACQUIRE_CAP_100G
) {
407 DP_NOTICE(p_hwfn
, "100g VF\n");
408 p_hwfn
->cdev
->num_hwfns
= 2;
412 if (!p_iov
->b_pre_fp_hsi
&&
413 (resp
->pfdev_info
.minor_fp_hsi
< ETH_HSI_VER_MINOR
)) {
415 "PF is using older fastpath HSI; %02x.%02x is configured\n",
416 ETH_HSI_VER_MAJOR
, resp
->pfdev_info
.minor_fp_hsi
);
420 qed_vf_pf_req_end(p_hwfn
, rc
);
425 u32
qed_vf_hw_bar_size(struct qed_hwfn
*p_hwfn
, enum BAR_ID bar_id
)
429 /* Regview size is fixed */
430 if (bar_id
== BAR_ID_0
)
433 /* Doorbell is received from PF */
434 bar_size
= p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
.bar_size
;
436 return 1 << bar_size
;
440 int qed_vf_hw_prepare(struct qed_hwfn
*p_hwfn
)
442 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(p_hwfn
->cdev
);
443 struct qed_vf_iov
*p_iov
;
447 /* Set number of hwfns - might be overriden once leading hwfn learns
448 * actual configuration from PF.
450 if (IS_LEAD_HWFN(p_hwfn
))
451 p_hwfn
->cdev
->num_hwfns
= 1;
453 reg
= PXP_VF_BAR0_ME_OPAQUE_ADDRESS
;
454 p_hwfn
->hw_info
.opaque_fid
= (u16
)REG_RD(p_hwfn
, reg
);
456 reg
= PXP_VF_BAR0_ME_CONCRETE_ADDRESS
;
457 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, reg
);
459 /* Allocate vf sriov info */
460 p_iov
= kzalloc(sizeof(*p_iov
), GFP_KERNEL
);
464 /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
465 * value, but there are several incompatibily scenarios where that
466 * would be incorrect and we'd need to override it.
468 if (!p_hwfn
->doorbells
) {
469 p_hwfn
->doorbells
= (u8 __iomem
*)p_hwfn
->regview
+
470 PXP_VF_BAR0_START_DQ
;
471 } else if (p_hwfn
== p_lead
) {
472 /* For leading hw-function, value is always correct, but need
473 * to handle scenario where legacy PF would not support 100g
476 p_iov
->b_doorbell_bar
= true;
478 /* here, value would be correct ONLY if the leading hwfn
479 * received indication that mapped-bars are supported.
481 if (p_lead
->vf_iov_info
->b_doorbell_bar
)
482 p_iov
->b_doorbell_bar
= true;
484 p_hwfn
->doorbells
= (u8 __iomem
*)
485 p_hwfn
->regview
+ PXP_VF_BAR0_START_DQ
;
488 /* Allocate vf2pf msg */
489 p_iov
->vf2pf_request
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
490 sizeof(union vfpf_tlvs
),
491 &p_iov
->vf2pf_request_phys
,
493 if (!p_iov
->vf2pf_request
)
496 p_iov
->pf2vf_reply
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
497 sizeof(union pfvf_tlvs
),
498 &p_iov
->pf2vf_reply_phys
,
500 if (!p_iov
->pf2vf_reply
)
501 goto free_vf2pf_request
;
505 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
506 p_iov
->vf2pf_request
,
507 (u64
) p_iov
->vf2pf_request_phys
,
508 p_iov
->pf2vf_reply
, (u64
)p_iov
->pf2vf_reply_phys
);
510 /* Allocate Bulletin board */
511 p_iov
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
512 p_iov
->bulletin
.p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
513 p_iov
->bulletin
.size
,
514 &p_iov
->bulletin
.phys
,
516 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
517 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
518 p_iov
->bulletin
.p_virt
,
519 (u64
)p_iov
->bulletin
.phys
, p_iov
->bulletin
.size
);
521 mutex_init(&p_iov
->mutex
);
523 p_hwfn
->vf_iov_info
= p_iov
;
525 p_hwfn
->hw_info
.personality
= QED_PCI_ETH
;
527 rc
= qed_vf_pf_acquire(p_hwfn
);
529 /* If VF is 100g using a mapped bar and PF is too old to support that,
530 * acquisition would succeed - but the VF would have no way knowing
531 * the size of the doorbell bar configured in HW and thus will not
532 * know how to split it for 2nd hw-function.
533 * In this case we re-try without the indication of the mapped
536 if (!rc
&& p_iov
->b_doorbell_bar
&&
537 !qed_vf_hw_bar_size(p_hwfn
, BAR_ID_1
) &&
538 (p_hwfn
->cdev
->num_hwfns
> 1)) {
539 rc
= _qed_vf_pf_release(p_hwfn
, false);
543 p_iov
->b_doorbell_bar
= false;
544 p_hwfn
->doorbells
= (u8 __iomem
*)p_hwfn
->regview
+
545 PXP_VF_BAR0_START_DQ
;
546 rc
= qed_vf_pf_acquire(p_hwfn
);
549 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
550 "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
551 p_hwfn
->regview
, p_hwfn
->doorbells
, p_hwfn
->cdev
->doorbells
);
556 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
557 sizeof(union vfpf_tlvs
),
558 p_iov
->vf2pf_request
, p_iov
->vf2pf_request_phys
);
564 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
565 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
566 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
569 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv
*p_req
,
570 struct qed_tunn_update_type
*p_src
,
571 enum qed_tunn_mode mask
, u8
*p_cls
)
573 if (p_src
->b_update_mode
) {
574 p_req
->tun_mode_update_mask
|= BIT(mask
);
576 if (p_src
->b_mode_enabled
)
577 p_req
->tunn_mode
|= BIT(mask
);
580 *p_cls
= p_src
->tun_cls
;
584 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv
*p_req
,
585 struct qed_tunn_update_type
*p_src
,
586 enum qed_tunn_mode mask
,
587 u8
*p_cls
, struct qed_tunn_update_udp_port
*p_port
,
588 u8
*p_update_port
, u16
*p_udp_port
)
590 if (p_port
->b_update_port
) {
592 *p_udp_port
= p_port
->port
;
595 __qed_vf_prep_tunn_req_tlv(p_req
, p_src
, mask
, p_cls
);
598 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info
*p_tun
)
600 if (p_tun
->vxlan
.b_mode_enabled
)
601 p_tun
->vxlan
.b_update_mode
= true;
602 if (p_tun
->l2_geneve
.b_mode_enabled
)
603 p_tun
->l2_geneve
.b_update_mode
= true;
604 if (p_tun
->ip_geneve
.b_mode_enabled
)
605 p_tun
->ip_geneve
.b_update_mode
= true;
606 if (p_tun
->l2_gre
.b_mode_enabled
)
607 p_tun
->l2_gre
.b_update_mode
= true;
608 if (p_tun
->ip_gre
.b_mode_enabled
)
609 p_tun
->ip_gre
.b_update_mode
= true;
611 p_tun
->b_update_rx_cls
= true;
612 p_tun
->b_update_tx_cls
= true;
616 __qed_vf_update_tunn_param(struct qed_tunn_update_type
*p_tun
,
617 u16 feature_mask
, u8 tunn_mode
,
618 u8 tunn_cls
, enum qed_tunn_mode val
)
620 if (feature_mask
& BIT(val
)) {
621 p_tun
->b_mode_enabled
= tunn_mode
;
622 p_tun
->tun_cls
= tunn_cls
;
624 p_tun
->b_mode_enabled
= false;
628 static void qed_vf_update_tunn_param(struct qed_hwfn
*p_hwfn
,
629 struct qed_tunnel_info
*p_tun
,
630 struct pfvf_update_tunn_param_tlv
*p_resp
)
632 /* Update mode and classes provided by PF */
633 u16 feat_mask
= p_resp
->tunn_feature_mask
;
635 __qed_vf_update_tunn_param(&p_tun
->vxlan
, feat_mask
,
636 p_resp
->vxlan_mode
, p_resp
->vxlan_clss
,
637 QED_MODE_VXLAN_TUNN
);
638 __qed_vf_update_tunn_param(&p_tun
->l2_geneve
, feat_mask
,
639 p_resp
->l2geneve_mode
,
640 p_resp
->l2geneve_clss
,
641 QED_MODE_L2GENEVE_TUNN
);
642 __qed_vf_update_tunn_param(&p_tun
->ip_geneve
, feat_mask
,
643 p_resp
->ipgeneve_mode
,
644 p_resp
->ipgeneve_clss
,
645 QED_MODE_IPGENEVE_TUNN
);
646 __qed_vf_update_tunn_param(&p_tun
->l2_gre
, feat_mask
,
647 p_resp
->l2gre_mode
, p_resp
->l2gre_clss
,
648 QED_MODE_L2GRE_TUNN
);
649 __qed_vf_update_tunn_param(&p_tun
->ip_gre
, feat_mask
,
650 p_resp
->ipgre_mode
, p_resp
->ipgre_clss
,
651 QED_MODE_IPGRE_TUNN
);
652 p_tun
->geneve_port
.port
= p_resp
->geneve_udp_port
;
653 p_tun
->vxlan_port
.port
= p_resp
->vxlan_udp_port
;
655 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
656 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
657 p_tun
->vxlan
.b_mode_enabled
, p_tun
->l2_geneve
.b_mode_enabled
,
658 p_tun
->ip_geneve
.b_mode_enabled
,
659 p_tun
->l2_gre
.b_mode_enabled
, p_tun
->ip_gre
.b_mode_enabled
);
662 int qed_vf_pf_tunnel_param_update(struct qed_hwfn
*p_hwfn
,
663 struct qed_tunnel_info
*p_src
)
665 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
666 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
667 struct pfvf_update_tunn_param_tlv
*p_resp
;
668 struct vfpf_update_tunn_param_tlv
*p_req
;
671 p_req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_UPDATE_TUNN_PARAM
,
674 if (p_src
->b_update_rx_cls
&& p_src
->b_update_tx_cls
)
675 p_req
->update_tun_cls
= 1;
677 qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->vxlan
, QED_MODE_VXLAN_TUNN
,
678 &p_req
->vxlan_clss
, &p_src
->vxlan_port
,
679 &p_req
->update_vxlan_port
,
681 qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->l2_geneve
,
682 QED_MODE_L2GENEVE_TUNN
,
683 &p_req
->l2geneve_clss
, &p_src
->geneve_port
,
684 &p_req
->update_geneve_port
,
685 &p_req
->geneve_port
);
686 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->ip_geneve
,
687 QED_MODE_IPGENEVE_TUNN
,
688 &p_req
->ipgeneve_clss
);
689 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->l2_gre
,
690 QED_MODE_L2GRE_TUNN
, &p_req
->l2gre_clss
);
691 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->ip_gre
,
692 QED_MODE_IPGRE_TUNN
, &p_req
->ipgre_clss
);
694 /* add list termination tlv */
695 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
696 CHANNEL_TLV_LIST_END
,
697 sizeof(struct channel_list_end_tlv
));
699 p_resp
= &p_iov
->pf2vf_reply
->tunn_param_resp
;
700 rc
= qed_send_msg2pf(p_hwfn
, &p_resp
->hdr
.status
, sizeof(*p_resp
));
705 if (p_resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
706 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
707 "Failed to update tunnel parameters\n");
711 qed_vf_update_tunn_param(p_hwfn
, p_tun
, p_resp
);
713 qed_vf_pf_req_end(p_hwfn
, rc
);
718 qed_vf_pf_rxq_start(struct qed_hwfn
*p_hwfn
,
719 struct qed_queue_cid
*p_cid
,
721 dma_addr_t bd_chain_phys_addr
,
722 dma_addr_t cqe_pbl_addr
,
723 u16 cqe_pbl_size
, void __iomem
**pp_prod
)
725 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
726 struct pfvf_start_queue_resp_tlv
*resp
;
727 struct vfpf_start_rxq_tlv
*req
;
728 u8 rx_qid
= p_cid
->rel
.queue_id
;
731 /* clear mailbox and prep first tlv */
732 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_START_RXQ
, sizeof(*req
));
734 req
->rx_qid
= rx_qid
;
735 req
->cqe_pbl_addr
= cqe_pbl_addr
;
736 req
->cqe_pbl_size
= cqe_pbl_size
;
737 req
->rxq_addr
= bd_chain_phys_addr
;
738 req
->hw_sb
= p_cid
->sb_igu_id
;
739 req
->sb_index
= p_cid
->sb_idx
;
740 req
->bd_max_bytes
= bd_max_bytes
;
743 /* If PF is legacy, we'll need to calculate producers ourselves
744 * as well as clean them.
746 if (p_iov
->b_pre_fp_hsi
) {
747 u8 hw_qid
= p_iov
->acquire_resp
.resc
.hw_qid
[rx_qid
];
748 u32 init_prod_val
= 0;
750 *pp_prod
= (u8 __iomem
*)
752 MSTORM_QZONE_START(p_hwfn
->cdev
) +
753 hw_qid
* MSTORM_QZONE_SIZE
;
755 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
756 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u32
),
757 (u32
*)(&init_prod_val
));
760 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
762 /* add list termination tlv */
763 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
764 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
766 resp
= &p_iov
->pf2vf_reply
->queue_start
;
767 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
771 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
776 /* Learn the address of the producer from the response */
777 if (!p_iov
->b_pre_fp_hsi
) {
778 u32 init_prod_val
= 0;
780 *pp_prod
= (u8 __iomem
*)p_hwfn
->regview
+ resp
->offset
;
781 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
782 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
783 rx_qid
, *pp_prod
, resp
->offset
);
785 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
786 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u32
),
787 (u32
*)&init_prod_val
);
790 qed_vf_pf_req_end(p_hwfn
, rc
);
795 int qed_vf_pf_rxq_stop(struct qed_hwfn
*p_hwfn
,
796 struct qed_queue_cid
*p_cid
, bool cqe_completion
)
798 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
799 struct vfpf_stop_rxqs_tlv
*req
;
800 struct pfvf_def_resp_tlv
*resp
;
803 /* clear mailbox and prep first tlv */
804 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_STOP_RXQS
, sizeof(*req
));
806 req
->rx_qid
= p_cid
->rel
.queue_id
;
808 req
->cqe_completion
= cqe_completion
;
810 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
812 /* add list termination tlv */
813 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
814 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
816 resp
= &p_iov
->pf2vf_reply
->default_resp
;
817 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
821 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
827 qed_vf_pf_req_end(p_hwfn
, rc
);
833 qed_vf_pf_txq_start(struct qed_hwfn
*p_hwfn
,
834 struct qed_queue_cid
*p_cid
,
836 u16 pbl_size
, void __iomem
**pp_doorbell
)
838 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
839 struct pfvf_start_queue_resp_tlv
*resp
;
840 struct vfpf_start_txq_tlv
*req
;
841 u16 qid
= p_cid
->rel
.queue_id
;
844 /* clear mailbox and prep first tlv */
845 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_START_TXQ
, sizeof(*req
));
850 req
->pbl_addr
= pbl_addr
;
851 req
->pbl_size
= pbl_size
;
852 req
->hw_sb
= p_cid
->sb_igu_id
;
853 req
->sb_index
= p_cid
->sb_idx
;
855 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
857 /* add list termination tlv */
858 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
859 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
861 resp
= &p_iov
->pf2vf_reply
->queue_start
;
862 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
866 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
871 /* Modern PFs provide the actual offsets, while legacy
872 * provided only the queue id.
874 if (!p_iov
->b_pre_fp_hsi
) {
875 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+ resp
->offset
;
877 u8 cid
= p_iov
->acquire_resp
.resc
.cid
[qid
];
879 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+
884 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
885 "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n",
886 qid
, p_cid
->qid_usage_idx
, *pp_doorbell
, resp
->offset
);
888 qed_vf_pf_req_end(p_hwfn
, rc
);
893 int qed_vf_pf_txq_stop(struct qed_hwfn
*p_hwfn
, struct qed_queue_cid
*p_cid
)
895 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
896 struct vfpf_stop_txqs_tlv
*req
;
897 struct pfvf_def_resp_tlv
*resp
;
900 /* clear mailbox and prep first tlv */
901 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_STOP_TXQS
, sizeof(*req
));
903 req
->tx_qid
= p_cid
->rel
.queue_id
;
906 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
908 /* add list termination tlv */
909 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
910 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
912 resp
= &p_iov
->pf2vf_reply
->default_resp
;
913 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
917 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
923 qed_vf_pf_req_end(p_hwfn
, rc
);
928 int qed_vf_pf_vport_start(struct qed_hwfn
*p_hwfn
,
931 u8 inner_vlan_removal
,
932 enum qed_tpa_mode tpa_mode
,
933 u8 max_buffers_per_cqe
, u8 only_untagged
)
935 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
936 struct vfpf_vport_start_tlv
*req
;
937 struct pfvf_def_resp_tlv
*resp
;
940 /* clear mailbox and prep first tlv */
941 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_START
, sizeof(*req
));
944 req
->vport_id
= vport_id
;
945 req
->inner_vlan_removal
= inner_vlan_removal
;
946 req
->tpa_mode
= tpa_mode
;
947 req
->max_buffers_per_cqe
= max_buffers_per_cqe
;
948 req
->only_untagged
= only_untagged
;
951 for (i
= 0; i
< p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_sbs
; i
++) {
952 struct qed_sb_info
*p_sb
= p_hwfn
->vf_iov_info
->sbs_info
[i
];
955 req
->sb_addr
[i
] = p_sb
->sb_phys
;
958 /* add list termination tlv */
959 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
960 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
962 resp
= &p_iov
->pf2vf_reply
->default_resp
;
963 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
967 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
973 qed_vf_pf_req_end(p_hwfn
, rc
);
978 int qed_vf_pf_vport_stop(struct qed_hwfn
*p_hwfn
)
980 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
981 struct pfvf_def_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->default_resp
;
984 /* clear mailbox and prep first tlv */
985 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_TEARDOWN
,
986 sizeof(struct vfpf_first_tlv
));
988 /* add list termination tlv */
989 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
990 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
992 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
996 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1002 qed_vf_pf_req_end(p_hwfn
, rc
);
1008 qed_vf_handle_vp_update_is_needed(struct qed_hwfn
*p_hwfn
,
1009 struct qed_sp_vport_update_params
*p_data
,
1013 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
:
1014 return !!(p_data
->update_vport_active_rx_flg
||
1015 p_data
->update_vport_active_tx_flg
);
1016 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
:
1017 return !!p_data
->update_tx_switching_flg
;
1018 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
:
1019 return !!p_data
->update_inner_vlan_removal_flg
;
1020 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
:
1021 return !!p_data
->update_accept_any_vlan_flg
;
1022 case CHANNEL_TLV_VPORT_UPDATE_MCAST
:
1023 return !!p_data
->update_approx_mcast_flg
;
1024 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
:
1025 return !!(p_data
->accept_flags
.update_rx_mode_config
||
1026 p_data
->accept_flags
.update_tx_mode_config
);
1027 case CHANNEL_TLV_VPORT_UPDATE_RSS
:
1028 return !!p_data
->rss_params
;
1029 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
:
1030 return !!p_data
->sge_tpa_params
;
1032 DP_INFO(p_hwfn
, "Unexpected vport-update TLV[%d]\n",
1039 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn
*p_hwfn
,
1040 struct qed_sp_vport_update_params
*p_data
)
1042 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1043 struct pfvf_def_resp_tlv
*p_resp
;
1046 for (tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1047 tlv
< CHANNEL_TLV_VPORT_UPDATE_MAX
; tlv
++) {
1048 if (!qed_vf_handle_vp_update_is_needed(p_hwfn
, p_data
, tlv
))
1051 p_resp
= (struct pfvf_def_resp_tlv
*)
1052 qed_iov_search_list_tlvs(p_hwfn
, p_iov
->pf2vf_reply
,
1054 if (p_resp
&& p_resp
->hdr
.status
)
1055 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1056 "TLV[%d] Configuration %s\n",
1058 (p_resp
&& p_resp
->hdr
.status
) ? "succeeded"
1063 int qed_vf_pf_vport_update(struct qed_hwfn
*p_hwfn
,
1064 struct qed_sp_vport_update_params
*p_params
)
1066 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1067 struct vfpf_vport_update_tlv
*req
;
1068 struct pfvf_def_resp_tlv
*resp
;
1069 u8 update_rx
, update_tx
;
1074 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1075 resp_size
= sizeof(*resp
);
1077 update_rx
= p_params
->update_vport_active_rx_flg
;
1078 update_tx
= p_params
->update_vport_active_tx_flg
;
1080 /* clear mailbox and prep header tlv */
1081 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_UPDATE
, sizeof(*req
));
1083 /* Prepare extended tlvs */
1084 if (update_rx
|| update_tx
) {
1085 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
1087 size
= sizeof(struct vfpf_vport_update_activate_tlv
);
1088 p_act_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1089 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
,
1091 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1094 p_act_tlv
->update_rx
= update_rx
;
1095 p_act_tlv
->active_rx
= p_params
->vport_active_rx_flg
;
1099 p_act_tlv
->update_tx
= update_tx
;
1100 p_act_tlv
->active_tx
= p_params
->vport_active_tx_flg
;
1104 if (p_params
->update_tx_switching_flg
) {
1105 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
1107 size
= sizeof(struct vfpf_vport_update_tx_switch_tlv
);
1108 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1109 p_tx_switch_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1111 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1113 p_tx_switch_tlv
->tx_switching
= p_params
->tx_switching_flg
;
1116 if (p_params
->update_approx_mcast_flg
) {
1117 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
1119 size
= sizeof(struct vfpf_vport_update_mcast_bin_tlv
);
1120 p_mcast_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1121 CHANNEL_TLV_VPORT_UPDATE_MCAST
, size
);
1122 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1124 memcpy(p_mcast_tlv
->bins
, p_params
->bins
,
1125 sizeof(u32
) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
1128 update_rx
= p_params
->accept_flags
.update_rx_mode_config
;
1129 update_tx
= p_params
->accept_flags
.update_tx_mode_config
;
1131 if (update_rx
|| update_tx
) {
1132 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
1134 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1135 size
= sizeof(struct vfpf_vport_update_accept_param_tlv
);
1136 p_accept_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, tlv
, size
);
1137 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1140 p_accept_tlv
->update_rx_mode
= update_rx
;
1141 p_accept_tlv
->rx_accept_filter
=
1142 p_params
->accept_flags
.rx_accept_filter
;
1146 p_accept_tlv
->update_tx_mode
= update_tx
;
1147 p_accept_tlv
->tx_accept_filter
=
1148 p_params
->accept_flags
.tx_accept_filter
;
1152 if (p_params
->rss_params
) {
1153 struct qed_rss_params
*rss_params
= p_params
->rss_params
;
1154 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
1157 size
= sizeof(struct vfpf_vport_update_rss_tlv
);
1158 p_rss_tlv
= qed_add_tlv(p_hwfn
,
1160 CHANNEL_TLV_VPORT_UPDATE_RSS
, size
);
1161 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1163 if (rss_params
->update_rss_config
)
1164 p_rss_tlv
->update_rss_flags
|=
1165 VFPF_UPDATE_RSS_CONFIG_FLAG
;
1166 if (rss_params
->update_rss_capabilities
)
1167 p_rss_tlv
->update_rss_flags
|=
1168 VFPF_UPDATE_RSS_CAPS_FLAG
;
1169 if (rss_params
->update_rss_ind_table
)
1170 p_rss_tlv
->update_rss_flags
|=
1171 VFPF_UPDATE_RSS_IND_TABLE_FLAG
;
1172 if (rss_params
->update_rss_key
)
1173 p_rss_tlv
->update_rss_flags
|= VFPF_UPDATE_RSS_KEY_FLAG
;
1175 p_rss_tlv
->rss_enable
= rss_params
->rss_enable
;
1176 p_rss_tlv
->rss_caps
= rss_params
->rss_caps
;
1177 p_rss_tlv
->rss_table_size_log
= rss_params
->rss_table_size_log
;
1179 table_size
= min_t(int, T_ETH_INDIRECTION_TABLE_SIZE
,
1180 1 << p_rss_tlv
->rss_table_size_log
);
1181 for (i
= 0; i
< table_size
; i
++) {
1182 struct qed_queue_cid
*p_queue
;
1184 p_queue
= rss_params
->rss_ind_table
[i
];
1185 p_rss_tlv
->rss_ind_table
[i
] = p_queue
->rel
.queue_id
;
1187 memcpy(p_rss_tlv
->rss_key
, rss_params
->rss_key
,
1188 sizeof(rss_params
->rss_key
));
1191 if (p_params
->update_accept_any_vlan_flg
) {
1192 struct vfpf_vport_update_accept_any_vlan_tlv
*p_any_vlan_tlv
;
1194 size
= sizeof(struct vfpf_vport_update_accept_any_vlan_tlv
);
1195 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1196 p_any_vlan_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, tlv
, size
);
1198 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1199 p_any_vlan_tlv
->accept_any_vlan
= p_params
->accept_any_vlan
;
1200 p_any_vlan_tlv
->update_accept_any_vlan_flg
=
1201 p_params
->update_accept_any_vlan_flg
;
1204 /* add list termination tlv */
1205 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1206 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1208 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, resp_size
);
1212 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1217 qed_vf_handle_vp_update_tlvs_resp(p_hwfn
, p_params
);
1220 qed_vf_pf_req_end(p_hwfn
, rc
);
1225 int qed_vf_pf_reset(struct qed_hwfn
*p_hwfn
)
1227 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1228 struct pfvf_def_resp_tlv
*resp
;
1229 struct vfpf_first_tlv
*req
;
1232 /* clear mailbox and prep first tlv */
1233 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_CLOSE
, sizeof(*req
));
1235 /* add list termination tlv */
1236 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1237 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1239 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1240 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1244 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1249 p_hwfn
->b_int_enabled
= 0;
1252 qed_vf_pf_req_end(p_hwfn
, rc
);
1257 void qed_vf_pf_filter_mcast(struct qed_hwfn
*p_hwfn
,
1258 struct qed_filter_mcast
*p_filter_cmd
)
1260 struct qed_sp_vport_update_params sp_params
;
1263 memset(&sp_params
, 0, sizeof(sp_params
));
1264 sp_params
.update_approx_mcast_flg
= 1;
1266 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1267 for (i
= 0; i
< p_filter_cmd
->num_mc_addrs
; i
++) {
1270 bit
= qed_mcast_bin_from_mac(p_filter_cmd
->mac
[i
]);
1271 sp_params
.bins
[bit
/ 32] |= 1 << (bit
% 32);
1275 qed_vf_pf_vport_update(p_hwfn
, &sp_params
);
1278 int qed_vf_pf_filter_ucast(struct qed_hwfn
*p_hwfn
,
1279 struct qed_filter_ucast
*p_ucast
)
1281 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1282 struct vfpf_ucast_filter_tlv
*req
;
1283 struct pfvf_def_resp_tlv
*resp
;
1286 /* clear mailbox and prep first tlv */
1287 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_UCAST_FILTER
, sizeof(*req
));
1288 req
->opcode
= (u8
) p_ucast
->opcode
;
1289 req
->type
= (u8
) p_ucast
->type
;
1290 memcpy(req
->mac
, p_ucast
->mac
, ETH_ALEN
);
1291 req
->vlan
= p_ucast
->vlan
;
1293 /* add list termination tlv */
1294 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1295 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1297 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1298 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1302 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1308 qed_vf_pf_req_end(p_hwfn
, rc
);
1313 int qed_vf_pf_int_cleanup(struct qed_hwfn
*p_hwfn
)
1315 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1316 struct pfvf_def_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->default_resp
;
1319 /* clear mailbox and prep first tlv */
1320 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_INT_CLEANUP
,
1321 sizeof(struct vfpf_first_tlv
));
1323 /* add list termination tlv */
1324 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1325 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1327 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1331 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1337 qed_vf_pf_req_end(p_hwfn
, rc
);
1342 int qed_vf_pf_get_coalesce(struct qed_hwfn
*p_hwfn
,
1343 u16
*p_coal
, struct qed_queue_cid
*p_cid
)
1345 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1346 struct pfvf_read_coal_resp_tlv
*resp
;
1347 struct vfpf_read_coal_req_tlv
*req
;
1350 /* clear mailbox and prep header tlv */
1351 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_COALESCE_READ
, sizeof(*req
));
1352 req
->qid
= p_cid
->rel
.queue_id
;
1353 req
->is_rx
= p_cid
->b_is_rx
? 1 : 0;
1355 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1356 sizeof(struct channel_list_end_tlv
));
1357 resp
= &p_iov
->pf2vf_reply
->read_coal_resp
;
1359 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1363 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
1366 *p_coal
= resp
->coal
;
1368 qed_vf_pf_req_end(p_hwfn
, rc
);
1374 qed_vf_pf_bulletin_update_mac(struct qed_hwfn
*p_hwfn
,
1377 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1378 struct vfpf_bulletin_update_mac_tlv
*p_req
;
1379 struct pfvf_def_resp_tlv
*p_resp
;
1385 /* clear mailbox and prep header tlv */
1386 p_req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_BULLETIN_UPDATE_MAC
,
1388 ether_addr_copy(p_req
->mac
, p_mac
);
1389 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1390 "Requesting bulletin update for MAC[%pM]\n", p_mac
);
1392 /* add list termination tlv */
1393 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1394 sizeof(struct channel_list_end_tlv
));
1396 p_resp
= &p_iov
->pf2vf_reply
->default_resp
;
1397 rc
= qed_send_msg2pf(p_hwfn
, &p_resp
->hdr
.status
, sizeof(*p_resp
));
1398 qed_vf_pf_req_end(p_hwfn
, rc
);
1403 qed_vf_pf_set_coalesce(struct qed_hwfn
*p_hwfn
,
1404 u16 rx_coal
, u16 tx_coal
, struct qed_queue_cid
*p_cid
)
1406 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1407 struct vfpf_update_coalesce
*req
;
1408 struct pfvf_def_resp_tlv
*resp
;
1411 /* clear mailbox and prep header tlv */
1412 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_COALESCE_UPDATE
, sizeof(*req
));
1414 req
->rx_coal
= rx_coal
;
1415 req
->tx_coal
= tx_coal
;
1416 req
->qid
= p_cid
->rel
.queue_id
;
1420 "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
1421 rx_coal
, tx_coal
, req
->qid
);
1423 /* add list termination tlv */
1424 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1425 sizeof(struct channel_list_end_tlv
));
1427 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1428 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1432 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
1436 p_hwfn
->cdev
->rx_coalesce_usecs
= rx_coal
;
1439 p_hwfn
->cdev
->tx_coalesce_usecs
= tx_coal
;
1442 qed_vf_pf_req_end(p_hwfn
, rc
);
1446 u16
qed_vf_get_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 sb_id
)
1448 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1451 DP_NOTICE(p_hwfn
, "vf_sriov_info isn't initialized\n");
1455 return p_iov
->acquire_resp
.resc
.hw_sbs
[sb_id
].hw_sb_id
;
1458 void qed_vf_set_sb_info(struct qed_hwfn
*p_hwfn
,
1459 u16 sb_id
, struct qed_sb_info
*p_sb
)
1461 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1464 DP_NOTICE(p_hwfn
, "vf_sriov_info isn't initialized\n");
1468 if (sb_id
>= PFVF_MAX_SBS_PER_VF
) {
1469 DP_NOTICE(p_hwfn
, "Can't configure SB %04x\n", sb_id
);
1473 p_iov
->sbs_info
[sb_id
] = p_sb
;
1476 int qed_vf_read_bulletin(struct qed_hwfn
*p_hwfn
, u8
*p_change
)
1478 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1479 struct qed_bulletin_content shadow
;
1482 crc_size
= sizeof(p_iov
->bulletin
.p_virt
->crc
);
1485 /* Need to guarantee PF is not in the middle of writing it */
1486 memcpy(&shadow
, p_iov
->bulletin
.p_virt
, p_iov
->bulletin
.size
);
1488 /* If version did not update, no need to do anything */
1489 if (shadow
.version
== p_iov
->bulletin_shadow
.version
)
1492 /* Verify the bulletin we see is valid */
1493 crc
= crc32(0, (u8
*)&shadow
+ crc_size
,
1494 p_iov
->bulletin
.size
- crc_size
);
1495 if (crc
!= shadow
.crc
)
1498 /* Set the shadow bulletin and process it */
1499 memcpy(&p_iov
->bulletin_shadow
, &shadow
, p_iov
->bulletin
.size
);
1501 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1502 "Read a bulletin update %08x\n", shadow
.version
);
1509 void __qed_vf_get_link_params(struct qed_hwfn
*p_hwfn
,
1510 struct qed_mcp_link_params
*p_params
,
1511 struct qed_bulletin_content
*p_bulletin
)
1513 memset(p_params
, 0, sizeof(*p_params
));
1515 p_params
->speed
.autoneg
= p_bulletin
->req_autoneg
;
1516 p_params
->speed
.advertised_speeds
= p_bulletin
->req_adv_speed
;
1517 p_params
->speed
.forced_speed
= p_bulletin
->req_forced_speed
;
1518 p_params
->pause
.autoneg
= p_bulletin
->req_autoneg_pause
;
1519 p_params
->pause
.forced_rx
= p_bulletin
->req_forced_rx
;
1520 p_params
->pause
.forced_tx
= p_bulletin
->req_forced_tx
;
1521 p_params
->loopback_mode
= p_bulletin
->req_loopback
;
1524 void qed_vf_get_link_params(struct qed_hwfn
*p_hwfn
,
1525 struct qed_mcp_link_params
*params
)
1527 __qed_vf_get_link_params(p_hwfn
, params
,
1528 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1531 void __qed_vf_get_link_state(struct qed_hwfn
*p_hwfn
,
1532 struct qed_mcp_link_state
*p_link
,
1533 struct qed_bulletin_content
*p_bulletin
)
1535 memset(p_link
, 0, sizeof(*p_link
));
1537 p_link
->link_up
= p_bulletin
->link_up
;
1538 p_link
->speed
= p_bulletin
->speed
;
1539 p_link
->full_duplex
= p_bulletin
->full_duplex
;
1540 p_link
->an
= p_bulletin
->autoneg
;
1541 p_link
->an_complete
= p_bulletin
->autoneg_complete
;
1542 p_link
->parallel_detection
= p_bulletin
->parallel_detection
;
1543 p_link
->pfc_enabled
= p_bulletin
->pfc_enabled
;
1544 p_link
->partner_adv_speed
= p_bulletin
->partner_adv_speed
;
1545 p_link
->partner_tx_flow_ctrl_en
= p_bulletin
->partner_tx_flow_ctrl_en
;
1546 p_link
->partner_rx_flow_ctrl_en
= p_bulletin
->partner_rx_flow_ctrl_en
;
1547 p_link
->partner_adv_pause
= p_bulletin
->partner_adv_pause
;
1548 p_link
->sfp_tx_fault
= p_bulletin
->sfp_tx_fault
;
1551 void qed_vf_get_link_state(struct qed_hwfn
*p_hwfn
,
1552 struct qed_mcp_link_state
*link
)
1554 __qed_vf_get_link_state(p_hwfn
, link
,
1555 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1558 void __qed_vf_get_link_caps(struct qed_hwfn
*p_hwfn
,
1559 struct qed_mcp_link_capabilities
*p_link_caps
,
1560 struct qed_bulletin_content
*p_bulletin
)
1562 memset(p_link_caps
, 0, sizeof(*p_link_caps
));
1563 p_link_caps
->speed_capabilities
= p_bulletin
->capability_speed
;
1566 void qed_vf_get_link_caps(struct qed_hwfn
*p_hwfn
,
1567 struct qed_mcp_link_capabilities
*p_link_caps
)
1569 __qed_vf_get_link_caps(p_hwfn
, p_link_caps
,
1570 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1573 void qed_vf_get_num_rxqs(struct qed_hwfn
*p_hwfn
, u8
*num_rxqs
)
1575 *num_rxqs
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_rxqs
;
1578 void qed_vf_get_num_txqs(struct qed_hwfn
*p_hwfn
, u8
*num_txqs
)
1580 *num_txqs
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_txqs
;
1583 void qed_vf_get_num_cids(struct qed_hwfn
*p_hwfn
, u8
*num_cids
)
1585 *num_cids
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_cids
;
1588 void qed_vf_get_port_mac(struct qed_hwfn
*p_hwfn
, u8
*port_mac
)
1591 p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
.port_mac
, ETH_ALEN
);
1594 void qed_vf_get_num_vlan_filters(struct qed_hwfn
*p_hwfn
, u8
*num_vlan_filters
)
1596 struct qed_vf_iov
*p_vf
;
1598 p_vf
= p_hwfn
->vf_iov_info
;
1599 *num_vlan_filters
= p_vf
->acquire_resp
.resc
.num_vlan_filters
;
1602 void qed_vf_get_num_mac_filters(struct qed_hwfn
*p_hwfn
, u8
*num_mac_filters
)
1604 struct qed_vf_iov
*p_vf
= p_hwfn
->vf_iov_info
;
1606 *num_mac_filters
= p_vf
->acquire_resp
.resc
.num_mac_filters
;
1609 bool qed_vf_check_mac(struct qed_hwfn
*p_hwfn
, u8
*mac
)
1611 struct qed_bulletin_content
*bulletin
;
1613 bulletin
= &p_hwfn
->vf_iov_info
->bulletin_shadow
;
1614 if (!(bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)))
1617 /* Forbid VF from changing a MAC enforced by PF */
1618 if (ether_addr_equal(bulletin
->mac
, mac
))
1624 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn
*hwfn
,
1625 u8
*dst_mac
, u8
*p_is_forced
)
1627 struct qed_bulletin_content
*bulletin
;
1629 bulletin
= &hwfn
->vf_iov_info
->bulletin_shadow
;
1631 if (bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) {
1634 } else if (bulletin
->valid_bitmap
& (1 << VFPF_BULLETIN_MAC_ADDR
)) {
1641 ether_addr_copy(dst_mac
, bulletin
->mac
);
1647 qed_vf_bulletin_get_udp_ports(struct qed_hwfn
*p_hwfn
,
1648 u16
*p_vxlan_port
, u16
*p_geneve_port
)
1650 struct qed_bulletin_content
*p_bulletin
;
1652 p_bulletin
= &p_hwfn
->vf_iov_info
->bulletin_shadow
;
1654 *p_vxlan_port
= p_bulletin
->vxlan_udp_port
;
1655 *p_geneve_port
= p_bulletin
->geneve_udp_port
;
1658 void qed_vf_get_fw_version(struct qed_hwfn
*p_hwfn
,
1659 u16
*fw_major
, u16
*fw_minor
,
1660 u16
*fw_rev
, u16
*fw_eng
)
1662 struct pf_vf_pfdev_info
*info
;
1664 info
= &p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
;
1666 *fw_major
= info
->fw_major
;
1667 *fw_minor
= info
->fw_minor
;
1668 *fw_rev
= info
->fw_rev
;
1669 *fw_eng
= info
->fw_eng
;
1672 static void qed_handle_bulletin_change(struct qed_hwfn
*hwfn
)
1674 struct qed_eth_cb_ops
*ops
= hwfn
->cdev
->protocol_ops
.eth
;
1675 u8 mac
[ETH_ALEN
], is_mac_exist
, is_mac_forced
;
1676 void *cookie
= hwfn
->cdev
->ops_cookie
;
1677 u16 vxlan_port
, geneve_port
;
1679 qed_vf_bulletin_get_udp_ports(hwfn
, &vxlan_port
, &geneve_port
);
1680 is_mac_exist
= qed_vf_bulletin_get_forced_mac(hwfn
, mac
,
1682 if (is_mac_exist
&& cookie
)
1683 ops
->force_mac(cookie
, mac
, !!is_mac_forced
);
1685 ops
->ports_update(cookie
, vxlan_port
, geneve_port
);
1687 /* Always update link configuration according to bulletin */
1688 qed_link_update(hwfn
, NULL
);
1691 void qed_iov_vf_task(struct work_struct
*work
)
1693 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
1697 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
1700 /* Handle bulletin board changes */
1701 qed_vf_read_bulletin(hwfn
, &change
);
1702 if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG
,
1703 &hwfn
->iov_task_flags
))
1706 qed_handle_bulletin_change(hwfn
);
1708 /* As VF is polling bulletin board, need to constantly re-schedule */
1709 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, HZ
);