1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/crc32.h>
8 #include <linux/etherdevice.h>
10 #include "qed_sriov.h"
13 static void *qed_vf_pf_prep(struct qed_hwfn
*p_hwfn
, u16 type
, u16 length
)
15 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
18 /* This lock is released when we receive PF's response
19 * in qed_send_msg2pf().
20 * So, qed_vf_pf_prep() and qed_send_msg2pf()
21 * must come in sequence.
23 mutex_lock(&(p_iov
->mutex
));
27 "preparing to send 0x%04x tlv over vf pf channel\n",
30 /* Reset Request offset */
31 p_iov
->offset
= (u8
*)p_iov
->vf2pf_request
;
33 /* Clear mailbox - both request and reply */
34 memset(p_iov
->vf2pf_request
, 0, sizeof(union vfpf_tlvs
));
35 memset(p_iov
->pf2vf_reply
, 0, sizeof(union pfvf_tlvs
));
37 /* Init type and length */
38 p_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, type
, length
);
40 /* Init first tlv header */
41 ((struct vfpf_first_tlv
*)p_tlv
)->reply_address
=
42 (u64
)p_iov
->pf2vf_reply_phys
;
47 static void qed_vf_pf_req_end(struct qed_hwfn
*p_hwfn
, int req_status
)
49 union pfvf_tlvs
*resp
= p_hwfn
->vf_iov_info
->pf2vf_reply
;
51 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
52 "VF request status = 0x%x, PF reply status = 0x%x\n",
53 req_status
, resp
->default_resp
.hdr
.status
);
55 mutex_unlock(&(p_hwfn
->vf_iov_info
->mutex
));
58 #define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
59 #define QED_VF_CHANNEL_USLEEP_DELAY 100
60 #define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
61 #define QED_VF_CHANNEL_MSLEEP_DELAY 25
63 static int qed_send_msg2pf(struct qed_hwfn
*p_hwfn
, u8
*done
)
65 union vfpf_tlvs
*p_req
= p_hwfn
->vf_iov_info
->vf2pf_request
;
66 struct ustorm_trigger_vf_zone trigger
;
67 struct ustorm_vf_zone
*zone_data
;
70 zone_data
= (struct ustorm_vf_zone
*)PXP_VF_BAR0_START_USDM_ZONE_B
;
72 /* output tlvs list */
73 qed_dp_tlv_list(p_hwfn
, p_req
);
75 /* Send TLVs over HW channel */
76 memset(&trigger
, 0, sizeof(struct ustorm_trigger_vf_zone
));
77 trigger
.vf_pf_msg_valid
= 1;
81 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
82 GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
83 PXP_CONCRETE_FID_PFID
),
84 upper_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
),
85 lower_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
),
86 &zone_data
->non_trigger
.vf_pf_msg_addr
,
87 *((u32
*)&trigger
), &zone_data
->trigger
);
90 (uintptr_t)&zone_data
->non_trigger
.vf_pf_msg_addr
.lo
,
91 lower_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
));
94 (uintptr_t)&zone_data
->non_trigger
.vf_pf_msg_addr
.hi
,
95 upper_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
));
97 /* The message data must be written first, to prevent trigger before
102 REG_WR(p_hwfn
, (uintptr_t)&zone_data
->trigger
, *((u32
*)&trigger
));
104 /* When PF would be done with the response, it would write back to the
105 * `done' address from a coherent DMA zone. Poll until then.
108 iter
= QED_VF_CHANNEL_USLEEP_ITERATIONS
;
109 while (!*done
&& iter
--) {
110 udelay(QED_VF_CHANNEL_USLEEP_DELAY
);
114 iter
= QED_VF_CHANNEL_MSLEEP_ITERATIONS
;
115 while (!*done
&& iter
--) {
116 msleep(QED_VF_CHANNEL_MSLEEP_DELAY
);
122 "VF <-- PF Timeout [Type %d]\n",
123 p_req
->first_tlv
.tl
.type
);
126 if ((*done
!= PFVF_STATUS_SUCCESS
) &&
127 (*done
!= PFVF_STATUS_NO_RESOURCE
))
129 "PF response: %d [Type %d]\n",
130 *done
, p_req
->first_tlv
.tl
.type
);
132 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
133 "PF response: %d [Type %d]\n",
134 *done
, p_req
->first_tlv
.tl
.type
);
140 static void qed_vf_pf_add_qid(struct qed_hwfn
*p_hwfn
,
141 struct qed_queue_cid
*p_cid
)
143 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
144 struct vfpf_qid_tlv
*p_qid_tlv
;
146 /* Only add QIDs for the queue if it was negotiated with PF */
147 if (!(p_iov
->acquire_resp
.pfdev_info
.capabilities
&
148 PFVF_ACQUIRE_CAP_QUEUE_QIDS
))
151 p_qid_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
152 CHANNEL_TLV_QID
, sizeof(*p_qid_tlv
));
153 p_qid_tlv
->qid
= p_cid
->qid_usage_idx
;
156 static int _qed_vf_pf_release(struct qed_hwfn
*p_hwfn
, bool b_final
)
158 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
159 struct pfvf_def_resp_tlv
*resp
;
160 struct vfpf_first_tlv
*req
;
164 /* clear mailbox and prep first tlv */
165 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_RELEASE
, sizeof(*req
));
167 /* add list termination tlv */
168 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
169 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
171 resp
= &p_iov
->pf2vf_reply
->default_resp
;
172 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
174 if (!rc
&& resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
177 qed_vf_pf_req_end(p_hwfn
, rc
);
181 p_hwfn
->b_int_enabled
= 0;
183 if (p_iov
->vf2pf_request
)
184 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
185 sizeof(union vfpf_tlvs
),
186 p_iov
->vf2pf_request
,
187 p_iov
->vf2pf_request_phys
);
188 if (p_iov
->pf2vf_reply
)
189 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
190 sizeof(union pfvf_tlvs
),
191 p_iov
->pf2vf_reply
, p_iov
->pf2vf_reply_phys
);
193 if (p_iov
->bulletin
.p_virt
) {
194 size
= sizeof(struct qed_bulletin_content
);
195 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
197 p_iov
->bulletin
.p_virt
, p_iov
->bulletin
.phys
);
200 kfree(p_hwfn
->vf_iov_info
);
201 p_hwfn
->vf_iov_info
= NULL
;
206 int qed_vf_pf_release(struct qed_hwfn
*p_hwfn
)
208 return _qed_vf_pf_release(p_hwfn
, true);
211 #define VF_ACQUIRE_THRESH 3
212 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn
*p_hwfn
,
213 struct vf_pf_resc_request
*p_req
,
214 struct pf_vf_resc
*p_resp
)
218 "PF unwilling to fulfill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
225 p_req
->num_mac_filters
,
226 p_resp
->num_mac_filters
,
227 p_req
->num_vlan_filters
,
228 p_resp
->num_vlan_filters
,
229 p_req
->num_mc_filters
,
230 p_resp
->num_mc_filters
, p_req
->num_cids
, p_resp
->num_cids
);
232 /* humble our request */
233 p_req
->num_txqs
= p_resp
->num_txqs
;
234 p_req
->num_rxqs
= p_resp
->num_rxqs
;
235 p_req
->num_sbs
= p_resp
->num_sbs
;
236 p_req
->num_mac_filters
= p_resp
->num_mac_filters
;
237 p_req
->num_vlan_filters
= p_resp
->num_vlan_filters
;
238 p_req
->num_mc_filters
= p_resp
->num_mc_filters
;
239 p_req
->num_cids
= p_resp
->num_cids
;
242 static int qed_vf_pf_acquire(struct qed_hwfn
*p_hwfn
)
244 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
245 struct pfvf_acquire_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->acquire_resp
;
246 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
247 struct vf_pf_resc_request
*p_resc
;
248 u8 retry_cnt
= VF_ACQUIRE_THRESH
;
249 bool resources_acquired
= false;
250 struct vfpf_acquire_tlv
*req
;
251 int rc
= 0, attempts
= 0;
253 /* clear mailbox and prep first tlv */
254 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_ACQUIRE
, sizeof(*req
));
255 p_resc
= &req
->resc_request
;
257 /* starting filling the request */
258 req
->vfdev_info
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
260 p_resc
->num_rxqs
= QED_MAX_VF_CHAINS_PER_PF
;
261 p_resc
->num_txqs
= QED_MAX_VF_CHAINS_PER_PF
;
262 p_resc
->num_sbs
= QED_MAX_VF_CHAINS_PER_PF
;
263 p_resc
->num_mac_filters
= QED_ETH_VF_NUM_MAC_FILTERS
;
264 p_resc
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
265 p_resc
->num_cids
= QED_ETH_VF_DEFAULT_NUM_CIDS
;
267 req
->vfdev_info
.os_type
= VFPF_ACQUIRE_OS_LINUX
;
268 req
->vfdev_info
.fw_major
= FW_MAJOR_VERSION
;
269 req
->vfdev_info
.fw_minor
= FW_MINOR_VERSION
;
270 req
->vfdev_info
.fw_revision
= FW_REVISION_VERSION
;
271 req
->vfdev_info
.fw_engineering
= FW_ENGINEERING_VERSION
;
272 req
->vfdev_info
.eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
273 req
->vfdev_info
.eth_fp_hsi_minor
= ETH_HSI_VER_MINOR
;
275 /* Fill capability field with any non-deprecated config we support */
276 req
->vfdev_info
.capabilities
|= VFPF_ACQUIRE_CAP_100G
;
278 /* If we've mapped the doorbell bar, try using queue qids */
279 if (p_iov
->b_doorbell_bar
) {
280 req
->vfdev_info
.capabilities
|= VFPF_ACQUIRE_CAP_PHYSICAL_BAR
|
281 VFPF_ACQUIRE_CAP_QUEUE_QIDS
;
282 p_resc
->num_cids
= QED_ETH_VF_MAX_NUM_CIDS
;
285 /* pf 2 vf bulletin board address */
286 req
->bulletin_addr
= p_iov
->bulletin
.phys
;
287 req
->bulletin_size
= p_iov
->bulletin
.size
;
289 /* add list termination tlv */
290 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
291 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
293 while (!resources_acquired
) {
295 QED_MSG_IOV
, "attempting to acquire resources\n");
297 /* Clear response buffer, as this might be a re-send */
298 memset(p_iov
->pf2vf_reply
, 0, sizeof(union pfvf_tlvs
));
300 /* send acquire request */
301 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
303 /* Re-try acquire in case of vf-pf hw channel timeout */
304 if (retry_cnt
&& rc
== -EBUSY
) {
305 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
306 "VF retrying to acquire due to VPC timeout\n");
314 /* copy acquire response from buffer to p_hwfn */
315 memcpy(&p_iov
->acquire_resp
, resp
, sizeof(p_iov
->acquire_resp
));
319 if (resp
->hdr
.status
== PFVF_STATUS_SUCCESS
) {
320 /* PF agrees to allocate our resources */
321 if (!(resp
->pfdev_info
.capabilities
&
322 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
)) {
323 /* It's possible legacy PF mistakenly accepted;
324 * but we don't care - simply mark it as
325 * legacy and continue.
327 req
->vfdev_info
.capabilities
|=
328 VFPF_ACQUIRE_CAP_PRE_FP_HSI
;
330 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "resources acquired\n");
331 resources_acquired
= true;
332 } else if (resp
->hdr
.status
== PFVF_STATUS_NO_RESOURCE
&&
333 attempts
< VF_ACQUIRE_THRESH
) {
334 qed_vf_pf_acquire_reduce_resc(p_hwfn
, p_resc
,
336 } else if (resp
->hdr
.status
== PFVF_STATUS_NOT_SUPPORTED
) {
337 if (pfdev_info
->major_fp_hsi
&&
338 (pfdev_info
->major_fp_hsi
!= ETH_HSI_VER_MAJOR
)) {
340 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
341 pfdev_info
->major_fp_hsi
,
342 pfdev_info
->minor_fp_hsi
,
345 pfdev_info
->major_fp_hsi
);
350 if (!pfdev_info
->major_fp_hsi
) {
351 if (req
->vfdev_info
.capabilities
&
352 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
354 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
359 "PF is old - try re-acquire to see if it supports FW-version override\n");
360 req
->vfdev_info
.capabilities
|=
361 VFPF_ACQUIRE_CAP_PRE_FP_HSI
;
366 /* If PF/VF are using same Major, PF must have had
367 * it's reasons. Simply fail.
369 DP_NOTICE(p_hwfn
, "PF rejected acquisition by VF\n");
374 "PF returned error %d to VF acquisition request\n",
381 /* Mark the PF as legacy, if needed */
382 if (req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_PRE_FP_HSI
)
383 p_iov
->b_pre_fp_hsi
= true;
385 /* In case PF doesn't support multi-queue Tx, update the number of
386 * CIDs to reflect the number of queues [older PFs didn't fill that
389 if (!(resp
->pfdev_info
.capabilities
& PFVF_ACQUIRE_CAP_QUEUE_QIDS
))
390 resp
->resc
.num_cids
= resp
->resc
.num_rxqs
+ resp
->resc
.num_txqs
;
392 /* Update bulletin board size with response from PF */
393 p_iov
->bulletin
.size
= resp
->bulletin_size
;
396 p_hwfn
->cdev
->type
= resp
->pfdev_info
.dev_type
;
397 p_hwfn
->cdev
->chip_rev
= resp
->pfdev_info
.chip_rev
;
399 p_hwfn
->cdev
->chip_num
= pfdev_info
->chip_num
& 0xffff;
401 /* Learn of the possibility of CMT */
402 if (IS_LEAD_HWFN(p_hwfn
)) {
403 if (resp
->pfdev_info
.capabilities
& PFVF_ACQUIRE_CAP_100G
) {
404 DP_NOTICE(p_hwfn
, "100g VF\n");
405 p_hwfn
->cdev
->num_hwfns
= 2;
409 if (!p_iov
->b_pre_fp_hsi
&&
410 (resp
->pfdev_info
.minor_fp_hsi
< ETH_HSI_VER_MINOR
)) {
412 "PF is using older fastpath HSI; %02x.%02x is configured\n",
413 ETH_HSI_VER_MAJOR
, resp
->pfdev_info
.minor_fp_hsi
);
417 qed_vf_pf_req_end(p_hwfn
, rc
);
422 u32
qed_vf_hw_bar_size(struct qed_hwfn
*p_hwfn
, enum BAR_ID bar_id
)
426 /* Regview size is fixed */
427 if (bar_id
== BAR_ID_0
)
430 /* Doorbell is received from PF */
431 bar_size
= p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
.bar_size
;
433 return 1 << bar_size
;
437 int qed_vf_hw_prepare(struct qed_hwfn
*p_hwfn
)
439 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(p_hwfn
->cdev
);
440 struct qed_vf_iov
*p_iov
;
444 /* Set number of hwfns - might be overridden once leading hwfn learns
445 * actual configuration from PF.
447 if (IS_LEAD_HWFN(p_hwfn
))
448 p_hwfn
->cdev
->num_hwfns
= 1;
450 reg
= PXP_VF_BAR0_ME_OPAQUE_ADDRESS
;
451 p_hwfn
->hw_info
.opaque_fid
= (u16
)REG_RD(p_hwfn
, reg
);
453 reg
= PXP_VF_BAR0_ME_CONCRETE_ADDRESS
;
454 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, reg
);
456 /* Allocate vf sriov info */
457 p_iov
= kzalloc(sizeof(*p_iov
), GFP_KERNEL
);
461 /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
462 * value, but there are several incompatibily scenarios where that
463 * would be incorrect and we'd need to override it.
465 if (!p_hwfn
->doorbells
) {
466 p_hwfn
->doorbells
= (u8 __iomem
*)p_hwfn
->regview
+
467 PXP_VF_BAR0_START_DQ
;
468 } else if (p_hwfn
== p_lead
) {
469 /* For leading hw-function, value is always correct, but need
470 * to handle scenario where legacy PF would not support 100g
473 p_iov
->b_doorbell_bar
= true;
475 /* here, value would be correct ONLY if the leading hwfn
476 * received indication that mapped-bars are supported.
478 if (p_lead
->vf_iov_info
->b_doorbell_bar
)
479 p_iov
->b_doorbell_bar
= true;
481 p_hwfn
->doorbells
= (u8 __iomem
*)
482 p_hwfn
->regview
+ PXP_VF_BAR0_START_DQ
;
485 /* Allocate vf2pf msg */
486 p_iov
->vf2pf_request
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
487 sizeof(union vfpf_tlvs
),
488 &p_iov
->vf2pf_request_phys
,
490 if (!p_iov
->vf2pf_request
)
493 p_iov
->pf2vf_reply
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
494 sizeof(union pfvf_tlvs
),
495 &p_iov
->pf2vf_reply_phys
,
497 if (!p_iov
->pf2vf_reply
)
498 goto free_vf2pf_request
;
502 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
503 p_iov
->vf2pf_request
,
504 (u64
)p_iov
->vf2pf_request_phys
,
505 p_iov
->pf2vf_reply
, (u64
)p_iov
->pf2vf_reply_phys
);
507 /* Allocate Bulletin board */
508 p_iov
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
509 p_iov
->bulletin
.p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
510 p_iov
->bulletin
.size
,
511 &p_iov
->bulletin
.phys
,
513 if (!p_iov
->bulletin
.p_virt
)
514 goto free_pf2vf_reply
;
516 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
517 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
518 p_iov
->bulletin
.p_virt
,
519 (u64
)p_iov
->bulletin
.phys
, p_iov
->bulletin
.size
);
521 mutex_init(&p_iov
->mutex
);
523 p_hwfn
->vf_iov_info
= p_iov
;
525 p_hwfn
->hw_info
.personality
= QED_PCI_ETH
;
527 rc
= qed_vf_pf_acquire(p_hwfn
);
529 /* If VF is 100g using a mapped bar and PF is too old to support that,
530 * acquisition would succeed - but the VF would have no way knowing
531 * the size of the doorbell bar configured in HW and thus will not
532 * know how to split it for 2nd hw-function.
533 * In this case we re-try without the indication of the mapped
536 if (!rc
&& p_iov
->b_doorbell_bar
&&
537 !qed_vf_hw_bar_size(p_hwfn
, BAR_ID_1
) &&
538 (p_hwfn
->cdev
->num_hwfns
> 1)) {
539 rc
= _qed_vf_pf_release(p_hwfn
, false);
543 p_iov
->b_doorbell_bar
= false;
544 p_hwfn
->doorbells
= (u8 __iomem
*)p_hwfn
->regview
+
545 PXP_VF_BAR0_START_DQ
;
546 rc
= qed_vf_pf_acquire(p_hwfn
);
549 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
550 "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
551 p_hwfn
->regview
, p_hwfn
->doorbells
, p_hwfn
->cdev
->doorbells
);
556 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
557 sizeof(union pfvf_tlvs
),
558 p_iov
->pf2vf_reply
, p_iov
->pf2vf_reply_phys
);
560 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
561 sizeof(union vfpf_tlvs
),
562 p_iov
->vf2pf_request
, p_iov
->vf2pf_request_phys
);
569 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
570 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
571 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
574 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv
*p_req
,
575 struct qed_tunn_update_type
*p_src
,
576 enum qed_tunn_mode mask
, u8
*p_cls
)
578 if (p_src
->b_update_mode
) {
579 p_req
->tun_mode_update_mask
|= BIT(mask
);
581 if (p_src
->b_mode_enabled
)
582 p_req
->tunn_mode
|= BIT(mask
);
585 *p_cls
= p_src
->tun_cls
;
589 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv
*p_req
,
590 struct qed_tunn_update_type
*p_src
,
591 enum qed_tunn_mode mask
,
592 u8
*p_cls
, struct qed_tunn_update_udp_port
*p_port
,
593 u8
*p_update_port
, u16
*p_udp_port
)
595 if (p_port
->b_update_port
) {
597 *p_udp_port
= p_port
->port
;
600 __qed_vf_prep_tunn_req_tlv(p_req
, p_src
, mask
, p_cls
);
603 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info
*p_tun
)
605 if (p_tun
->vxlan
.b_mode_enabled
)
606 p_tun
->vxlan
.b_update_mode
= true;
607 if (p_tun
->l2_geneve
.b_mode_enabled
)
608 p_tun
->l2_geneve
.b_update_mode
= true;
609 if (p_tun
->ip_geneve
.b_mode_enabled
)
610 p_tun
->ip_geneve
.b_update_mode
= true;
611 if (p_tun
->l2_gre
.b_mode_enabled
)
612 p_tun
->l2_gre
.b_update_mode
= true;
613 if (p_tun
->ip_gre
.b_mode_enabled
)
614 p_tun
->ip_gre
.b_update_mode
= true;
616 p_tun
->b_update_rx_cls
= true;
617 p_tun
->b_update_tx_cls
= true;
621 __qed_vf_update_tunn_param(struct qed_tunn_update_type
*p_tun
,
622 u16 feature_mask
, u8 tunn_mode
,
623 u8 tunn_cls
, enum qed_tunn_mode val
)
625 if (feature_mask
& BIT(val
)) {
626 p_tun
->b_mode_enabled
= tunn_mode
;
627 p_tun
->tun_cls
= tunn_cls
;
629 p_tun
->b_mode_enabled
= false;
633 static void qed_vf_update_tunn_param(struct qed_hwfn
*p_hwfn
,
634 struct qed_tunnel_info
*p_tun
,
635 struct pfvf_update_tunn_param_tlv
*p_resp
)
637 /* Update mode and classes provided by PF */
638 u16 feat_mask
= p_resp
->tunn_feature_mask
;
640 __qed_vf_update_tunn_param(&p_tun
->vxlan
, feat_mask
,
641 p_resp
->vxlan_mode
, p_resp
->vxlan_clss
,
642 QED_MODE_VXLAN_TUNN
);
643 __qed_vf_update_tunn_param(&p_tun
->l2_geneve
, feat_mask
,
644 p_resp
->l2geneve_mode
,
645 p_resp
->l2geneve_clss
,
646 QED_MODE_L2GENEVE_TUNN
);
647 __qed_vf_update_tunn_param(&p_tun
->ip_geneve
, feat_mask
,
648 p_resp
->ipgeneve_mode
,
649 p_resp
->ipgeneve_clss
,
650 QED_MODE_IPGENEVE_TUNN
);
651 __qed_vf_update_tunn_param(&p_tun
->l2_gre
, feat_mask
,
652 p_resp
->l2gre_mode
, p_resp
->l2gre_clss
,
653 QED_MODE_L2GRE_TUNN
);
654 __qed_vf_update_tunn_param(&p_tun
->ip_gre
, feat_mask
,
655 p_resp
->ipgre_mode
, p_resp
->ipgre_clss
,
656 QED_MODE_IPGRE_TUNN
);
657 p_tun
->geneve_port
.port
= p_resp
->geneve_udp_port
;
658 p_tun
->vxlan_port
.port
= p_resp
->vxlan_udp_port
;
660 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
661 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
662 p_tun
->vxlan
.b_mode_enabled
, p_tun
->l2_geneve
.b_mode_enabled
,
663 p_tun
->ip_geneve
.b_mode_enabled
,
664 p_tun
->l2_gre
.b_mode_enabled
, p_tun
->ip_gre
.b_mode_enabled
);
667 int qed_vf_pf_tunnel_param_update(struct qed_hwfn
*p_hwfn
,
668 struct qed_tunnel_info
*p_src
)
670 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
671 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
672 struct pfvf_update_tunn_param_tlv
*p_resp
;
673 struct vfpf_update_tunn_param_tlv
*p_req
;
676 p_req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_UPDATE_TUNN_PARAM
,
679 if (p_src
->b_update_rx_cls
&& p_src
->b_update_tx_cls
)
680 p_req
->update_tun_cls
= 1;
682 qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->vxlan
, QED_MODE_VXLAN_TUNN
,
683 &p_req
->vxlan_clss
, &p_src
->vxlan_port
,
684 &p_req
->update_vxlan_port
,
686 qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->l2_geneve
,
687 QED_MODE_L2GENEVE_TUNN
,
688 &p_req
->l2geneve_clss
, &p_src
->geneve_port
,
689 &p_req
->update_geneve_port
,
690 &p_req
->geneve_port
);
691 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->ip_geneve
,
692 QED_MODE_IPGENEVE_TUNN
,
693 &p_req
->ipgeneve_clss
);
694 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->l2_gre
,
695 QED_MODE_L2GRE_TUNN
, &p_req
->l2gre_clss
);
696 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->ip_gre
,
697 QED_MODE_IPGRE_TUNN
, &p_req
->ipgre_clss
);
699 /* add list termination tlv */
700 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
701 CHANNEL_TLV_LIST_END
,
702 sizeof(struct channel_list_end_tlv
));
704 p_resp
= &p_iov
->pf2vf_reply
->tunn_param_resp
;
705 rc
= qed_send_msg2pf(p_hwfn
, &p_resp
->hdr
.status
);
710 if (p_resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
711 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
712 "Failed to update tunnel parameters\n");
716 qed_vf_update_tunn_param(p_hwfn
, p_tun
, p_resp
);
718 qed_vf_pf_req_end(p_hwfn
, rc
);
723 qed_vf_pf_rxq_start(struct qed_hwfn
*p_hwfn
,
724 struct qed_queue_cid
*p_cid
,
726 dma_addr_t bd_chain_phys_addr
,
727 dma_addr_t cqe_pbl_addr
,
728 u16 cqe_pbl_size
, void __iomem
**pp_prod
)
730 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
731 struct pfvf_start_queue_resp_tlv
*resp
;
732 struct vfpf_start_rxq_tlv
*req
;
733 u8 rx_qid
= p_cid
->rel
.queue_id
;
736 /* clear mailbox and prep first tlv */
737 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_START_RXQ
, sizeof(*req
));
739 req
->rx_qid
= rx_qid
;
740 req
->cqe_pbl_addr
= cqe_pbl_addr
;
741 req
->cqe_pbl_size
= cqe_pbl_size
;
742 req
->rxq_addr
= bd_chain_phys_addr
;
743 req
->hw_sb
= p_cid
->sb_igu_id
;
744 req
->sb_index
= p_cid
->sb_idx
;
745 req
->bd_max_bytes
= bd_max_bytes
;
748 /* If PF is legacy, we'll need to calculate producers ourselves
749 * as well as clean them.
751 if (p_iov
->b_pre_fp_hsi
) {
752 u8 hw_qid
= p_iov
->acquire_resp
.resc
.hw_qid
[rx_qid
];
753 u32 init_prod_val
= 0;
755 *pp_prod
= (u8 __iomem
*)
757 MSTORM_QZONE_START(p_hwfn
->cdev
) +
758 hw_qid
* MSTORM_QZONE_SIZE
;
760 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
761 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u32
),
762 (u32
*)(&init_prod_val
));
765 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
767 /* add list termination tlv */
768 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
769 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
771 resp
= &p_iov
->pf2vf_reply
->queue_start
;
772 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
776 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
781 /* Learn the address of the producer from the response */
782 if (!p_iov
->b_pre_fp_hsi
) {
783 u32 init_prod_val
= 0;
785 *pp_prod
= (u8 __iomem
*)p_hwfn
->regview
+ resp
->offset
;
786 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
787 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
788 rx_qid
, *pp_prod
, resp
->offset
);
790 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
791 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u32
),
792 (u32
*)&init_prod_val
);
795 qed_vf_pf_req_end(p_hwfn
, rc
);
800 int qed_vf_pf_rxq_stop(struct qed_hwfn
*p_hwfn
,
801 struct qed_queue_cid
*p_cid
, bool cqe_completion
)
803 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
804 struct vfpf_stop_rxqs_tlv
*req
;
805 struct pfvf_def_resp_tlv
*resp
;
808 /* clear mailbox and prep first tlv */
809 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_STOP_RXQS
, sizeof(*req
));
811 req
->rx_qid
= p_cid
->rel
.queue_id
;
813 req
->cqe_completion
= cqe_completion
;
815 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
817 /* add list termination tlv */
818 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
819 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
821 resp
= &p_iov
->pf2vf_reply
->default_resp
;
822 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
826 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
832 qed_vf_pf_req_end(p_hwfn
, rc
);
838 qed_vf_pf_txq_start(struct qed_hwfn
*p_hwfn
,
839 struct qed_queue_cid
*p_cid
,
841 u16 pbl_size
, void __iomem
**pp_doorbell
)
843 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
844 struct pfvf_start_queue_resp_tlv
*resp
;
845 struct vfpf_start_txq_tlv
*req
;
846 u16 qid
= p_cid
->rel
.queue_id
;
849 /* clear mailbox and prep first tlv */
850 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_START_TXQ
, sizeof(*req
));
855 req
->pbl_addr
= pbl_addr
;
856 req
->pbl_size
= pbl_size
;
857 req
->hw_sb
= p_cid
->sb_igu_id
;
858 req
->sb_index
= p_cid
->sb_idx
;
860 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
862 /* add list termination tlv */
863 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
864 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
866 resp
= &p_iov
->pf2vf_reply
->queue_start
;
867 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
871 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
876 /* Modern PFs provide the actual offsets, while legacy
877 * provided only the queue id.
879 if (!p_iov
->b_pre_fp_hsi
) {
880 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+ resp
->offset
;
882 u8 cid
= p_iov
->acquire_resp
.resc
.cid
[qid
];
884 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+
889 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
890 "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n",
891 qid
, p_cid
->qid_usage_idx
, *pp_doorbell
, resp
->offset
);
893 qed_vf_pf_req_end(p_hwfn
, rc
);
898 int qed_vf_pf_txq_stop(struct qed_hwfn
*p_hwfn
, struct qed_queue_cid
*p_cid
)
900 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
901 struct vfpf_stop_txqs_tlv
*req
;
902 struct pfvf_def_resp_tlv
*resp
;
905 /* clear mailbox and prep first tlv */
906 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_STOP_TXQS
, sizeof(*req
));
908 req
->tx_qid
= p_cid
->rel
.queue_id
;
911 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
913 /* add list termination tlv */
914 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
915 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
917 resp
= &p_iov
->pf2vf_reply
->default_resp
;
918 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
922 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
928 qed_vf_pf_req_end(p_hwfn
, rc
);
933 int qed_vf_pf_vport_start(struct qed_hwfn
*p_hwfn
,
936 u8 inner_vlan_removal
,
937 enum qed_tpa_mode tpa_mode
,
938 u8 max_buffers_per_cqe
, u8 only_untagged
)
940 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
941 struct vfpf_vport_start_tlv
*req
;
942 struct pfvf_def_resp_tlv
*resp
;
945 /* clear mailbox and prep first tlv */
946 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_START
, sizeof(*req
));
949 req
->vport_id
= vport_id
;
950 req
->inner_vlan_removal
= inner_vlan_removal
;
951 req
->tpa_mode
= tpa_mode
;
952 req
->max_buffers_per_cqe
= max_buffers_per_cqe
;
953 req
->only_untagged
= only_untagged
;
956 for (i
= 0; i
< p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_sbs
; i
++) {
957 struct qed_sb_info
*p_sb
= p_hwfn
->vf_iov_info
->sbs_info
[i
];
960 req
->sb_addr
[i
] = p_sb
->sb_phys
;
963 /* add list termination tlv */
964 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
965 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
967 resp
= &p_iov
->pf2vf_reply
->default_resp
;
968 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
972 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
978 qed_vf_pf_req_end(p_hwfn
, rc
);
983 int qed_vf_pf_vport_stop(struct qed_hwfn
*p_hwfn
)
985 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
986 struct pfvf_def_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->default_resp
;
989 /* clear mailbox and prep first tlv */
990 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_TEARDOWN
,
991 sizeof(struct vfpf_first_tlv
));
993 /* add list termination tlv */
994 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
995 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
997 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
1001 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1007 qed_vf_pf_req_end(p_hwfn
, rc
);
1013 qed_vf_handle_vp_update_is_needed(struct qed_hwfn
*p_hwfn
,
1014 struct qed_sp_vport_update_params
*p_data
,
1018 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
:
1019 return !!(p_data
->update_vport_active_rx_flg
||
1020 p_data
->update_vport_active_tx_flg
);
1021 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
:
1022 return !!p_data
->update_tx_switching_flg
;
1023 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
:
1024 return !!p_data
->update_inner_vlan_removal_flg
;
1025 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
:
1026 return !!p_data
->update_accept_any_vlan_flg
;
1027 case CHANNEL_TLV_VPORT_UPDATE_MCAST
:
1028 return !!p_data
->update_approx_mcast_flg
;
1029 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
:
1030 return !!(p_data
->accept_flags
.update_rx_mode_config
||
1031 p_data
->accept_flags
.update_tx_mode_config
);
1032 case CHANNEL_TLV_VPORT_UPDATE_RSS
:
1033 return !!p_data
->rss_params
;
1034 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
:
1035 return !!p_data
->sge_tpa_params
;
1037 DP_INFO(p_hwfn
, "Unexpected vport-update TLV[%d]\n",
1044 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn
*p_hwfn
,
1045 struct qed_sp_vport_update_params
*p_data
)
1047 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1048 struct pfvf_def_resp_tlv
*p_resp
;
1051 for (tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1052 tlv
< CHANNEL_TLV_VPORT_UPDATE_MAX
; tlv
++) {
1053 if (!qed_vf_handle_vp_update_is_needed(p_hwfn
, p_data
, tlv
))
1056 p_resp
= (struct pfvf_def_resp_tlv
*)
1057 qed_iov_search_list_tlvs(p_hwfn
, p_iov
->pf2vf_reply
,
1059 if (p_resp
&& p_resp
->hdr
.status
)
1060 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1061 "TLV[%d] Configuration %s\n",
1063 (p_resp
&& p_resp
->hdr
.status
) ? "succeeded"
1068 int qed_vf_pf_vport_update(struct qed_hwfn
*p_hwfn
,
1069 struct qed_sp_vport_update_params
*p_params
)
1071 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1072 struct vfpf_vport_update_tlv
*req
;
1073 struct pfvf_def_resp_tlv
*resp
;
1074 u8 update_rx
, update_tx
;
1078 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1080 update_rx
= p_params
->update_vport_active_rx_flg
;
1081 update_tx
= p_params
->update_vport_active_tx_flg
;
1083 /* clear mailbox and prep header tlv */
1084 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_UPDATE
, sizeof(*req
));
1086 /* Prepare extended tlvs */
1087 if (update_rx
|| update_tx
) {
1088 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
1090 size
= sizeof(struct vfpf_vport_update_activate_tlv
);
1091 p_act_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1092 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
,
1096 p_act_tlv
->update_rx
= update_rx
;
1097 p_act_tlv
->active_rx
= p_params
->vport_active_rx_flg
;
1101 p_act_tlv
->update_tx
= update_tx
;
1102 p_act_tlv
->active_tx
= p_params
->vport_active_tx_flg
;
1106 if (p_params
->update_tx_switching_flg
) {
1107 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
1109 size
= sizeof(struct vfpf_vport_update_tx_switch_tlv
);
1110 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1111 p_tx_switch_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1114 p_tx_switch_tlv
->tx_switching
= p_params
->tx_switching_flg
;
1117 if (p_params
->update_approx_mcast_flg
) {
1118 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
1120 size
= sizeof(struct vfpf_vport_update_mcast_bin_tlv
);
1121 p_mcast_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1122 CHANNEL_TLV_VPORT_UPDATE_MCAST
, size
);
1124 memcpy(p_mcast_tlv
->bins
, p_params
->bins
,
1125 sizeof(u32
) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
1128 update_rx
= p_params
->accept_flags
.update_rx_mode_config
;
1129 update_tx
= p_params
->accept_flags
.update_tx_mode_config
;
1131 if (update_rx
|| update_tx
) {
1132 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
1134 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1135 size
= sizeof(struct vfpf_vport_update_accept_param_tlv
);
1136 p_accept_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, tlv
, size
);
1139 p_accept_tlv
->update_rx_mode
= update_rx
;
1140 p_accept_tlv
->rx_accept_filter
=
1141 p_params
->accept_flags
.rx_accept_filter
;
1145 p_accept_tlv
->update_tx_mode
= update_tx
;
1146 p_accept_tlv
->tx_accept_filter
=
1147 p_params
->accept_flags
.tx_accept_filter
;
1151 if (p_params
->rss_params
) {
1152 struct qed_rss_params
*rss_params
= p_params
->rss_params
;
1153 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
1156 size
= sizeof(struct vfpf_vport_update_rss_tlv
);
1157 p_rss_tlv
= qed_add_tlv(p_hwfn
,
1159 CHANNEL_TLV_VPORT_UPDATE_RSS
, size
);
1161 if (rss_params
->update_rss_config
)
1162 p_rss_tlv
->update_rss_flags
|=
1163 VFPF_UPDATE_RSS_CONFIG_FLAG
;
1164 if (rss_params
->update_rss_capabilities
)
1165 p_rss_tlv
->update_rss_flags
|=
1166 VFPF_UPDATE_RSS_CAPS_FLAG
;
1167 if (rss_params
->update_rss_ind_table
)
1168 p_rss_tlv
->update_rss_flags
|=
1169 VFPF_UPDATE_RSS_IND_TABLE_FLAG
;
1170 if (rss_params
->update_rss_key
)
1171 p_rss_tlv
->update_rss_flags
|= VFPF_UPDATE_RSS_KEY_FLAG
;
1173 p_rss_tlv
->rss_enable
= rss_params
->rss_enable
;
1174 p_rss_tlv
->rss_caps
= rss_params
->rss_caps
;
1175 p_rss_tlv
->rss_table_size_log
= rss_params
->rss_table_size_log
;
1177 table_size
= min_t(int, T_ETH_INDIRECTION_TABLE_SIZE
,
1178 1 << p_rss_tlv
->rss_table_size_log
);
1179 for (i
= 0; i
< table_size
; i
++) {
1180 struct qed_queue_cid
*p_queue
;
1182 p_queue
= rss_params
->rss_ind_table
[i
];
1183 p_rss_tlv
->rss_ind_table
[i
] = p_queue
->rel
.queue_id
;
1185 memcpy(p_rss_tlv
->rss_key
, rss_params
->rss_key
,
1186 sizeof(rss_params
->rss_key
));
1189 if (p_params
->update_accept_any_vlan_flg
) {
1190 struct vfpf_vport_update_accept_any_vlan_tlv
*p_any_vlan_tlv
;
1192 size
= sizeof(struct vfpf_vport_update_accept_any_vlan_tlv
);
1193 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1194 p_any_vlan_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, tlv
, size
);
1196 p_any_vlan_tlv
->accept_any_vlan
= p_params
->accept_any_vlan
;
1197 p_any_vlan_tlv
->update_accept_any_vlan_flg
=
1198 p_params
->update_accept_any_vlan_flg
;
1201 /* add list termination tlv */
1202 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1203 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1205 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
1209 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1214 qed_vf_handle_vp_update_tlvs_resp(p_hwfn
, p_params
);
1217 qed_vf_pf_req_end(p_hwfn
, rc
);
1222 int qed_vf_pf_reset(struct qed_hwfn
*p_hwfn
)
1224 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1225 struct pfvf_def_resp_tlv
*resp
;
1226 struct vfpf_first_tlv
*req
;
1229 /* clear mailbox and prep first tlv */
1230 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_CLOSE
, sizeof(*req
));
1232 /* add list termination tlv */
1233 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1234 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1236 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1237 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
1241 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1246 p_hwfn
->b_int_enabled
= 0;
1249 qed_vf_pf_req_end(p_hwfn
, rc
);
1254 void qed_vf_pf_filter_mcast(struct qed_hwfn
*p_hwfn
,
1255 struct qed_filter_mcast
*p_filter_cmd
)
1257 struct qed_sp_vport_update_params sp_params
;
1260 memset(&sp_params
, 0, sizeof(sp_params
));
1261 sp_params
.update_approx_mcast_flg
= 1;
1263 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1264 for (i
= 0; i
< p_filter_cmd
->num_mc_addrs
; i
++) {
1267 bit
= qed_mcast_bin_from_mac(p_filter_cmd
->mac
[i
]);
1268 sp_params
.bins
[bit
/ 32] |= 1 << (bit
% 32);
1272 qed_vf_pf_vport_update(p_hwfn
, &sp_params
);
1275 int qed_vf_pf_filter_ucast(struct qed_hwfn
*p_hwfn
,
1276 struct qed_filter_ucast
*p_ucast
)
1278 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1279 struct vfpf_ucast_filter_tlv
*req
;
1280 struct pfvf_def_resp_tlv
*resp
;
1283 /* clear mailbox and prep first tlv */
1284 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_UCAST_FILTER
, sizeof(*req
));
1285 req
->opcode
= (u8
)p_ucast
->opcode
;
1286 req
->type
= (u8
)p_ucast
->type
;
1287 memcpy(req
->mac
, p_ucast
->mac
, ETH_ALEN
);
1288 req
->vlan
= p_ucast
->vlan
;
1290 /* add list termination tlv */
1291 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1292 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1294 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1295 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
1299 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1305 qed_vf_pf_req_end(p_hwfn
, rc
);
1310 int qed_vf_pf_int_cleanup(struct qed_hwfn
*p_hwfn
)
1312 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1313 struct pfvf_def_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->default_resp
;
1316 /* clear mailbox and prep first tlv */
1317 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_INT_CLEANUP
,
1318 sizeof(struct vfpf_first_tlv
));
1320 /* add list termination tlv */
1321 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1322 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1324 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
1328 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1334 qed_vf_pf_req_end(p_hwfn
, rc
);
1339 int qed_vf_pf_get_coalesce(struct qed_hwfn
*p_hwfn
,
1340 u16
*p_coal
, struct qed_queue_cid
*p_cid
)
1342 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1343 struct pfvf_read_coal_resp_tlv
*resp
;
1344 struct vfpf_read_coal_req_tlv
*req
;
1347 /* clear mailbox and prep header tlv */
1348 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_COALESCE_READ
, sizeof(*req
));
1349 req
->qid
= p_cid
->rel
.queue_id
;
1350 req
->is_rx
= p_cid
->b_is_rx
? 1 : 0;
1352 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1353 sizeof(struct channel_list_end_tlv
));
1354 resp
= &p_iov
->pf2vf_reply
->read_coal_resp
;
1356 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
1360 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
1363 *p_coal
= resp
->coal
;
1365 qed_vf_pf_req_end(p_hwfn
, rc
);
1371 qed_vf_pf_bulletin_update_mac(struct qed_hwfn
*p_hwfn
,
1374 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1375 struct vfpf_bulletin_update_mac_tlv
*p_req
;
1376 struct pfvf_def_resp_tlv
*p_resp
;
1382 /* clear mailbox and prep header tlv */
1383 p_req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_BULLETIN_UPDATE_MAC
,
1385 ether_addr_copy(p_req
->mac
, p_mac
);
1386 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1387 "Requesting bulletin update for MAC[%pM]\n", p_mac
);
1389 /* add list termination tlv */
1390 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1391 sizeof(struct channel_list_end_tlv
));
1393 p_resp
= &p_iov
->pf2vf_reply
->default_resp
;
1394 rc
= qed_send_msg2pf(p_hwfn
, &p_resp
->hdr
.status
);
1395 qed_vf_pf_req_end(p_hwfn
, rc
);
1400 qed_vf_pf_set_coalesce(struct qed_hwfn
*p_hwfn
,
1401 u16 rx_coal
, u16 tx_coal
, struct qed_queue_cid
*p_cid
)
1403 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1404 struct vfpf_update_coalesce
*req
;
1405 struct pfvf_def_resp_tlv
*resp
;
1408 /* clear mailbox and prep header tlv */
1409 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_COALESCE_UPDATE
, sizeof(*req
));
1411 req
->rx_coal
= rx_coal
;
1412 req
->tx_coal
= tx_coal
;
1413 req
->qid
= p_cid
->rel
.queue_id
;
1417 "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
1418 rx_coal
, tx_coal
, req
->qid
);
1420 /* add list termination tlv */
1421 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1422 sizeof(struct channel_list_end_tlv
));
1424 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1425 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
);
1429 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
1433 p_hwfn
->cdev
->rx_coalesce_usecs
= rx_coal
;
1436 p_hwfn
->cdev
->tx_coalesce_usecs
= tx_coal
;
1439 qed_vf_pf_req_end(p_hwfn
, rc
);
1443 u16
qed_vf_get_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 sb_id
)
1445 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1448 DP_NOTICE(p_hwfn
, "vf_sriov_info isn't initialized\n");
1452 return p_iov
->acquire_resp
.resc
.hw_sbs
[sb_id
].hw_sb_id
;
1455 void qed_vf_set_sb_info(struct qed_hwfn
*p_hwfn
,
1456 u16 sb_id
, struct qed_sb_info
*p_sb
)
1458 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1461 DP_NOTICE(p_hwfn
, "vf_sriov_info isn't initialized\n");
1465 if (sb_id
>= PFVF_MAX_SBS_PER_VF
) {
1466 DP_NOTICE(p_hwfn
, "Can't configure SB %04x\n", sb_id
);
1470 p_iov
->sbs_info
[sb_id
] = p_sb
;
1473 int qed_vf_read_bulletin(struct qed_hwfn
*p_hwfn
, u8
*p_change
)
1475 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1476 struct qed_bulletin_content shadow
;
1479 crc_size
= sizeof(p_iov
->bulletin
.p_virt
->crc
);
1482 /* Need to guarantee PF is not in the middle of writing it */
1483 memcpy(&shadow
, p_iov
->bulletin
.p_virt
, p_iov
->bulletin
.size
);
1485 /* If version did not update, no need to do anything */
1486 if (shadow
.version
== p_iov
->bulletin_shadow
.version
)
1489 /* Verify the bulletin we see is valid */
1490 crc
= crc32(0, (u8
*)&shadow
+ crc_size
,
1491 p_iov
->bulletin
.size
- crc_size
);
1492 if (crc
!= shadow
.crc
)
1495 /* Set the shadow bulletin and process it */
1496 memcpy(&p_iov
->bulletin_shadow
, &shadow
, p_iov
->bulletin
.size
);
1498 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1499 "Read a bulletin update %08x\n", shadow
.version
);
1506 void __qed_vf_get_link_params(struct qed_hwfn
*p_hwfn
,
1507 struct qed_mcp_link_params
*p_params
,
1508 struct qed_bulletin_content
*p_bulletin
)
1510 memset(p_params
, 0, sizeof(*p_params
));
1512 p_params
->speed
.autoneg
= p_bulletin
->req_autoneg
;
1513 p_params
->speed
.advertised_speeds
= p_bulletin
->req_adv_speed
;
1514 p_params
->speed
.forced_speed
= p_bulletin
->req_forced_speed
;
1515 p_params
->pause
.autoneg
= p_bulletin
->req_autoneg_pause
;
1516 p_params
->pause
.forced_rx
= p_bulletin
->req_forced_rx
;
1517 p_params
->pause
.forced_tx
= p_bulletin
->req_forced_tx
;
1518 p_params
->loopback_mode
= p_bulletin
->req_loopback
;
1521 void qed_vf_get_link_params(struct qed_hwfn
*p_hwfn
,
1522 struct qed_mcp_link_params
*params
)
1524 __qed_vf_get_link_params(p_hwfn
, params
,
1525 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1528 void __qed_vf_get_link_state(struct qed_hwfn
*p_hwfn
,
1529 struct qed_mcp_link_state
*p_link
,
1530 struct qed_bulletin_content
*p_bulletin
)
1532 memset(p_link
, 0, sizeof(*p_link
));
1534 p_link
->link_up
= p_bulletin
->link_up
;
1535 p_link
->speed
= p_bulletin
->speed
;
1536 p_link
->full_duplex
= p_bulletin
->full_duplex
;
1537 p_link
->an
= p_bulletin
->autoneg
;
1538 p_link
->an_complete
= p_bulletin
->autoneg_complete
;
1539 p_link
->parallel_detection
= p_bulletin
->parallel_detection
;
1540 p_link
->pfc_enabled
= p_bulletin
->pfc_enabled
;
1541 p_link
->partner_adv_speed
= p_bulletin
->partner_adv_speed
;
1542 p_link
->partner_tx_flow_ctrl_en
= p_bulletin
->partner_tx_flow_ctrl_en
;
1543 p_link
->partner_rx_flow_ctrl_en
= p_bulletin
->partner_rx_flow_ctrl_en
;
1544 p_link
->partner_adv_pause
= p_bulletin
->partner_adv_pause
;
1545 p_link
->sfp_tx_fault
= p_bulletin
->sfp_tx_fault
;
1548 void qed_vf_get_link_state(struct qed_hwfn
*p_hwfn
,
1549 struct qed_mcp_link_state
*link
)
1551 __qed_vf_get_link_state(p_hwfn
, link
,
1552 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1555 void __qed_vf_get_link_caps(struct qed_hwfn
*p_hwfn
,
1556 struct qed_mcp_link_capabilities
*p_link_caps
,
1557 struct qed_bulletin_content
*p_bulletin
)
1559 memset(p_link_caps
, 0, sizeof(*p_link_caps
));
1560 p_link_caps
->speed_capabilities
= p_bulletin
->capability_speed
;
1563 void qed_vf_get_link_caps(struct qed_hwfn
*p_hwfn
,
1564 struct qed_mcp_link_capabilities
*p_link_caps
)
1566 __qed_vf_get_link_caps(p_hwfn
, p_link_caps
,
1567 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1570 void qed_vf_get_num_rxqs(struct qed_hwfn
*p_hwfn
, u8
*num_rxqs
)
1572 *num_rxqs
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_rxqs
;
1575 void qed_vf_get_num_txqs(struct qed_hwfn
*p_hwfn
, u8
*num_txqs
)
1577 *num_txqs
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_txqs
;
1580 void qed_vf_get_num_cids(struct qed_hwfn
*p_hwfn
, u8
*num_cids
)
1582 *num_cids
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_cids
;
1585 void qed_vf_get_port_mac(struct qed_hwfn
*p_hwfn
, u8
*port_mac
)
1588 p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
.port_mac
, ETH_ALEN
);
1591 void qed_vf_get_num_vlan_filters(struct qed_hwfn
*p_hwfn
, u8
*num_vlan_filters
)
1593 struct qed_vf_iov
*p_vf
;
1595 p_vf
= p_hwfn
->vf_iov_info
;
1596 *num_vlan_filters
= p_vf
->acquire_resp
.resc
.num_vlan_filters
;
1599 void qed_vf_get_num_mac_filters(struct qed_hwfn
*p_hwfn
, u8
*num_mac_filters
)
1601 struct qed_vf_iov
*p_vf
= p_hwfn
->vf_iov_info
;
1603 *num_mac_filters
= p_vf
->acquire_resp
.resc
.num_mac_filters
;
1606 bool qed_vf_check_mac(struct qed_hwfn
*p_hwfn
, u8
*mac
)
1608 struct qed_bulletin_content
*bulletin
;
1610 bulletin
= &p_hwfn
->vf_iov_info
->bulletin_shadow
;
1611 if (!(bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)))
1614 /* Forbid VF from changing a MAC enforced by PF */
1615 if (ether_addr_equal(bulletin
->mac
, mac
))
1621 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn
*hwfn
,
1622 u8
*dst_mac
, u8
*p_is_forced
)
1624 struct qed_bulletin_content
*bulletin
;
1626 bulletin
= &hwfn
->vf_iov_info
->bulletin_shadow
;
1628 if (bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) {
1631 } else if (bulletin
->valid_bitmap
& (1 << VFPF_BULLETIN_MAC_ADDR
)) {
1638 ether_addr_copy(dst_mac
, bulletin
->mac
);
1644 qed_vf_bulletin_get_udp_ports(struct qed_hwfn
*p_hwfn
,
1645 u16
*p_vxlan_port
, u16
*p_geneve_port
)
1647 struct qed_bulletin_content
*p_bulletin
;
1649 p_bulletin
= &p_hwfn
->vf_iov_info
->bulletin_shadow
;
1651 *p_vxlan_port
= p_bulletin
->vxlan_udp_port
;
1652 *p_geneve_port
= p_bulletin
->geneve_udp_port
;
1655 void qed_vf_get_fw_version(struct qed_hwfn
*p_hwfn
,
1656 u16
*fw_major
, u16
*fw_minor
,
1657 u16
*fw_rev
, u16
*fw_eng
)
1659 struct pf_vf_pfdev_info
*info
;
1661 info
= &p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
;
1663 *fw_major
= info
->fw_major
;
1664 *fw_minor
= info
->fw_minor
;
1665 *fw_rev
= info
->fw_rev
;
1666 *fw_eng
= info
->fw_eng
;
1669 static void qed_handle_bulletin_change(struct qed_hwfn
*hwfn
)
1671 struct qed_eth_cb_ops
*ops
= hwfn
->cdev
->protocol_ops
.eth
;
1672 u8 mac
[ETH_ALEN
], is_mac_exist
, is_mac_forced
;
1673 void *cookie
= hwfn
->cdev
->ops_cookie
;
1674 u16 vxlan_port
, geneve_port
;
1676 qed_vf_bulletin_get_udp_ports(hwfn
, &vxlan_port
, &geneve_port
);
1677 is_mac_exist
= qed_vf_bulletin_get_forced_mac(hwfn
, mac
,
1679 if (is_mac_exist
&& cookie
)
1680 ops
->force_mac(cookie
, mac
, !!is_mac_forced
);
1682 ops
->ports_update(cookie
, vxlan_port
, geneve_port
);
1684 /* Always update link configuration according to bulletin */
1685 qed_link_update(hwfn
, NULL
);
1688 void qed_iov_vf_task(struct work_struct
*work
)
1690 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
1694 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
1697 /* Handle bulletin board changes */
1698 qed_vf_read_bulletin(hwfn
, &change
);
1699 if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG
,
1700 &hwfn
->iov_task_flags
))
1703 qed_handle_bulletin_change(hwfn
);
1705 /* As VF is polling bulletin board, need to constantly re-schedule */
1706 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, HZ
);