1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/crc32.h>
34 #include <linux/etherdevice.h>
36 #include "qed_sriov.h"
39 static void *qed_vf_pf_prep(struct qed_hwfn
*p_hwfn
, u16 type
, u16 length
)
41 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
44 /* This lock is released when we receive PF's response
45 * in qed_send_msg2pf().
46 * So, qed_vf_pf_prep() and qed_send_msg2pf()
47 * must come in sequence.
49 mutex_lock(&(p_iov
->mutex
));
53 "preparing to send 0x%04x tlv over vf pf channel\n",
56 /* Reset Requst offset */
57 p_iov
->offset
= (u8
*)p_iov
->vf2pf_request
;
59 /* Clear mailbox - both request and reply */
60 memset(p_iov
->vf2pf_request
, 0, sizeof(union vfpf_tlvs
));
61 memset(p_iov
->pf2vf_reply
, 0, sizeof(union pfvf_tlvs
));
63 /* Init type and length */
64 p_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, type
, length
);
66 /* Init first tlv header */
67 ((struct vfpf_first_tlv
*)p_tlv
)->reply_address
=
68 (u64
)p_iov
->pf2vf_reply_phys
;
73 static void qed_vf_pf_req_end(struct qed_hwfn
*p_hwfn
, int req_status
)
75 union pfvf_tlvs
*resp
= p_hwfn
->vf_iov_info
->pf2vf_reply
;
77 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
78 "VF request status = 0x%x, PF reply status = 0x%x\n",
79 req_status
, resp
->default_resp
.hdr
.status
);
81 mutex_unlock(&(p_hwfn
->vf_iov_info
->mutex
));
84 static int qed_send_msg2pf(struct qed_hwfn
*p_hwfn
, u8
*done
, u32 resp_size
)
86 union vfpf_tlvs
*p_req
= p_hwfn
->vf_iov_info
->vf2pf_request
;
87 struct ustorm_trigger_vf_zone trigger
;
88 struct ustorm_vf_zone
*zone_data
;
89 int rc
= 0, time
= 100;
91 zone_data
= (struct ustorm_vf_zone
*)PXP_VF_BAR0_START_USDM_ZONE_B
;
93 /* output tlvs list */
94 qed_dp_tlv_list(p_hwfn
, p_req
);
96 /* need to add the END TLV to the message size */
97 resp_size
+= sizeof(struct channel_list_end_tlv
);
99 /* Send TLVs over HW channel */
100 memset(&trigger
, 0, sizeof(struct ustorm_trigger_vf_zone
));
101 trigger
.vf_pf_msg_valid
= 1;
105 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
106 GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
107 PXP_CONCRETE_FID_PFID
),
108 upper_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
),
109 lower_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
),
110 &zone_data
->non_trigger
.vf_pf_msg_addr
,
111 *((u32
*)&trigger
), &zone_data
->trigger
);
114 (uintptr_t)&zone_data
->non_trigger
.vf_pf_msg_addr
.lo
,
115 lower_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
));
118 (uintptr_t)&zone_data
->non_trigger
.vf_pf_msg_addr
.hi
,
119 upper_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
));
121 /* The message data must be written first, to prevent trigger before
126 REG_WR(p_hwfn
, (uintptr_t)&zone_data
->trigger
, *((u32
*)&trigger
));
128 /* When PF would be done with the response, it would write back to the
129 * `done' address. Poll until then.
131 while ((!*done
) && time
) {
138 "VF <-- PF Timeout [Type %d]\n",
139 p_req
->first_tlv
.tl
.type
);
142 if ((*done
!= PFVF_STATUS_SUCCESS
) &&
143 (*done
!= PFVF_STATUS_NO_RESOURCE
))
145 "PF response: %d [Type %d]\n",
146 *done
, p_req
->first_tlv
.tl
.type
);
148 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
149 "PF response: %d [Type %d]\n",
150 *done
, p_req
->first_tlv
.tl
.type
);
156 static void qed_vf_pf_add_qid(struct qed_hwfn
*p_hwfn
,
157 struct qed_queue_cid
*p_cid
)
159 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
160 struct vfpf_qid_tlv
*p_qid_tlv
;
162 /* Only add QIDs for the queue if it was negotiated with PF */
163 if (!(p_iov
->acquire_resp
.pfdev_info
.capabilities
&
164 PFVF_ACQUIRE_CAP_QUEUE_QIDS
))
167 p_qid_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
168 CHANNEL_TLV_QID
, sizeof(*p_qid_tlv
));
169 p_qid_tlv
->qid
= p_cid
->qid_usage_idx
;
172 static int _qed_vf_pf_release(struct qed_hwfn
*p_hwfn
, bool b_final
)
174 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
175 struct pfvf_def_resp_tlv
*resp
;
176 struct vfpf_first_tlv
*req
;
180 /* clear mailbox and prep first tlv */
181 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_RELEASE
, sizeof(*req
));
183 /* add list termination tlv */
184 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
185 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
187 resp
= &p_iov
->pf2vf_reply
->default_resp
;
188 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
190 if (!rc
&& resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
193 qed_vf_pf_req_end(p_hwfn
, rc
);
197 p_hwfn
->b_int_enabled
= 0;
199 if (p_iov
->vf2pf_request
)
200 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
201 sizeof(union vfpf_tlvs
),
202 p_iov
->vf2pf_request
,
203 p_iov
->vf2pf_request_phys
);
204 if (p_iov
->pf2vf_reply
)
205 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
206 sizeof(union pfvf_tlvs
),
207 p_iov
->pf2vf_reply
, p_iov
->pf2vf_reply_phys
);
209 if (p_iov
->bulletin
.p_virt
) {
210 size
= sizeof(struct qed_bulletin_content
);
211 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
213 p_iov
->bulletin
.p_virt
, p_iov
->bulletin
.phys
);
216 kfree(p_hwfn
->vf_iov_info
);
217 p_hwfn
->vf_iov_info
= NULL
;
222 int qed_vf_pf_release(struct qed_hwfn
*p_hwfn
)
224 return _qed_vf_pf_release(p_hwfn
, true);
227 #define VF_ACQUIRE_THRESH 3
228 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn
*p_hwfn
,
229 struct vf_pf_resc_request
*p_req
,
230 struct pf_vf_resc
*p_resp
)
234 "PF unwilling to fulfill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
241 p_req
->num_mac_filters
,
242 p_resp
->num_mac_filters
,
243 p_req
->num_vlan_filters
,
244 p_resp
->num_vlan_filters
,
245 p_req
->num_mc_filters
,
246 p_resp
->num_mc_filters
, p_req
->num_cids
, p_resp
->num_cids
);
248 /* humble our request */
249 p_req
->num_txqs
= p_resp
->num_txqs
;
250 p_req
->num_rxqs
= p_resp
->num_rxqs
;
251 p_req
->num_sbs
= p_resp
->num_sbs
;
252 p_req
->num_mac_filters
= p_resp
->num_mac_filters
;
253 p_req
->num_vlan_filters
= p_resp
->num_vlan_filters
;
254 p_req
->num_mc_filters
= p_resp
->num_mc_filters
;
255 p_req
->num_cids
= p_resp
->num_cids
;
258 static int qed_vf_pf_acquire(struct qed_hwfn
*p_hwfn
)
260 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
261 struct pfvf_acquire_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->acquire_resp
;
262 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
263 struct vf_pf_resc_request
*p_resc
;
264 u8 retry_cnt
= VF_ACQUIRE_THRESH
;
265 bool resources_acquired
= false;
266 struct vfpf_acquire_tlv
*req
;
267 int rc
= 0, attempts
= 0;
269 /* clear mailbox and prep first tlv */
270 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_ACQUIRE
, sizeof(*req
));
271 p_resc
= &req
->resc_request
;
273 /* starting filling the request */
274 req
->vfdev_info
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
276 p_resc
->num_rxqs
= QED_MAX_VF_CHAINS_PER_PF
;
277 p_resc
->num_txqs
= QED_MAX_VF_CHAINS_PER_PF
;
278 p_resc
->num_sbs
= QED_MAX_VF_CHAINS_PER_PF
;
279 p_resc
->num_mac_filters
= QED_ETH_VF_NUM_MAC_FILTERS
;
280 p_resc
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
281 p_resc
->num_cids
= QED_ETH_VF_DEFAULT_NUM_CIDS
;
283 req
->vfdev_info
.os_type
= VFPF_ACQUIRE_OS_LINUX
;
284 req
->vfdev_info
.fw_major
= FW_MAJOR_VERSION
;
285 req
->vfdev_info
.fw_minor
= FW_MINOR_VERSION
;
286 req
->vfdev_info
.fw_revision
= FW_REVISION_VERSION
;
287 req
->vfdev_info
.fw_engineering
= FW_ENGINEERING_VERSION
;
288 req
->vfdev_info
.eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
289 req
->vfdev_info
.eth_fp_hsi_minor
= ETH_HSI_VER_MINOR
;
291 /* Fill capability field with any non-deprecated config we support */
292 req
->vfdev_info
.capabilities
|= VFPF_ACQUIRE_CAP_100G
;
294 /* If we've mapped the doorbell bar, try using queue qids */
295 if (p_iov
->b_doorbell_bar
) {
296 req
->vfdev_info
.capabilities
|= VFPF_ACQUIRE_CAP_PHYSICAL_BAR
|
297 VFPF_ACQUIRE_CAP_QUEUE_QIDS
;
298 p_resc
->num_cids
= QED_ETH_VF_MAX_NUM_CIDS
;
301 /* pf 2 vf bulletin board address */
302 req
->bulletin_addr
= p_iov
->bulletin
.phys
;
303 req
->bulletin_size
= p_iov
->bulletin
.size
;
305 /* add list termination tlv */
306 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
307 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
309 while (!resources_acquired
) {
311 QED_MSG_IOV
, "attempting to acquire resources\n");
313 /* Clear response buffer, as this might be a re-send */
314 memset(p_iov
->pf2vf_reply
, 0, sizeof(union pfvf_tlvs
));
316 /* send acquire request */
317 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
319 /* Re-try acquire in case of vf-pf hw channel timeout */
320 if (retry_cnt
&& rc
== -EBUSY
) {
321 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
322 "VF retrying to acquire due to VPC timeout\n");
330 /* copy acquire response from buffer to p_hwfn */
331 memcpy(&p_iov
->acquire_resp
, resp
, sizeof(p_iov
->acquire_resp
));
335 if (resp
->hdr
.status
== PFVF_STATUS_SUCCESS
) {
336 /* PF agrees to allocate our resources */
337 if (!(resp
->pfdev_info
.capabilities
&
338 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
)) {
339 /* It's possible legacy PF mistakenly accepted;
340 * but we don't care - simply mark it as
341 * legacy and continue.
343 req
->vfdev_info
.capabilities
|=
344 VFPF_ACQUIRE_CAP_PRE_FP_HSI
;
346 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "resources acquired\n");
347 resources_acquired
= true;
348 } else if (resp
->hdr
.status
== PFVF_STATUS_NO_RESOURCE
&&
349 attempts
< VF_ACQUIRE_THRESH
) {
350 qed_vf_pf_acquire_reduce_resc(p_hwfn
, p_resc
,
352 } else if (resp
->hdr
.status
== PFVF_STATUS_NOT_SUPPORTED
) {
353 if (pfdev_info
->major_fp_hsi
&&
354 (pfdev_info
->major_fp_hsi
!= ETH_HSI_VER_MAJOR
)) {
356 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
357 pfdev_info
->major_fp_hsi
,
358 pfdev_info
->minor_fp_hsi
,
361 pfdev_info
->major_fp_hsi
);
366 if (!pfdev_info
->major_fp_hsi
) {
367 if (req
->vfdev_info
.capabilities
&
368 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
370 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
375 "PF is old - try re-acquire to see if it supports FW-version override\n");
376 req
->vfdev_info
.capabilities
|=
377 VFPF_ACQUIRE_CAP_PRE_FP_HSI
;
382 /* If PF/VF are using same Major, PF must have had
383 * it's reasons. Simply fail.
385 DP_NOTICE(p_hwfn
, "PF rejected acquisition by VF\n");
390 "PF returned error %d to VF acquisition request\n",
397 /* Mark the PF as legacy, if needed */
398 if (req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_PRE_FP_HSI
)
399 p_iov
->b_pre_fp_hsi
= true;
401 /* In case PF doesn't support multi-queue Tx, update the number of
402 * CIDs to reflect the number of queues [older PFs didn't fill that
405 if (!(resp
->pfdev_info
.capabilities
& PFVF_ACQUIRE_CAP_QUEUE_QIDS
))
406 resp
->resc
.num_cids
= resp
->resc
.num_rxqs
+ resp
->resc
.num_txqs
;
408 /* Update bulletin board size with response from PF */
409 p_iov
->bulletin
.size
= resp
->bulletin_size
;
412 p_hwfn
->cdev
->type
= resp
->pfdev_info
.dev_type
;
413 p_hwfn
->cdev
->chip_rev
= resp
->pfdev_info
.chip_rev
;
415 p_hwfn
->cdev
->chip_num
= pfdev_info
->chip_num
& 0xffff;
417 /* Learn of the possibility of CMT */
418 if (IS_LEAD_HWFN(p_hwfn
)) {
419 if (resp
->pfdev_info
.capabilities
& PFVF_ACQUIRE_CAP_100G
) {
420 DP_NOTICE(p_hwfn
, "100g VF\n");
421 p_hwfn
->cdev
->num_hwfns
= 2;
425 if (!p_iov
->b_pre_fp_hsi
&&
426 (resp
->pfdev_info
.minor_fp_hsi
< ETH_HSI_VER_MINOR
)) {
428 "PF is using older fastpath HSI; %02x.%02x is configured\n",
429 ETH_HSI_VER_MAJOR
, resp
->pfdev_info
.minor_fp_hsi
);
433 qed_vf_pf_req_end(p_hwfn
, rc
);
438 u32
qed_vf_hw_bar_size(struct qed_hwfn
*p_hwfn
, enum BAR_ID bar_id
)
442 /* Regview size is fixed */
443 if (bar_id
== BAR_ID_0
)
446 /* Doorbell is received from PF */
447 bar_size
= p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
.bar_size
;
449 return 1 << bar_size
;
453 int qed_vf_hw_prepare(struct qed_hwfn
*p_hwfn
)
455 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(p_hwfn
->cdev
);
456 struct qed_vf_iov
*p_iov
;
460 /* Set number of hwfns - might be overriden once leading hwfn learns
461 * actual configuration from PF.
463 if (IS_LEAD_HWFN(p_hwfn
))
464 p_hwfn
->cdev
->num_hwfns
= 1;
466 reg
= PXP_VF_BAR0_ME_OPAQUE_ADDRESS
;
467 p_hwfn
->hw_info
.opaque_fid
= (u16
)REG_RD(p_hwfn
, reg
);
469 reg
= PXP_VF_BAR0_ME_CONCRETE_ADDRESS
;
470 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, reg
);
472 /* Allocate vf sriov info */
473 p_iov
= kzalloc(sizeof(*p_iov
), GFP_KERNEL
);
477 /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
478 * value, but there are several incompatibily scenarios where that
479 * would be incorrect and we'd need to override it.
481 if (!p_hwfn
->doorbells
) {
482 p_hwfn
->doorbells
= (u8 __iomem
*)p_hwfn
->regview
+
483 PXP_VF_BAR0_START_DQ
;
484 } else if (p_hwfn
== p_lead
) {
485 /* For leading hw-function, value is always correct, but need
486 * to handle scenario where legacy PF would not support 100g
489 p_iov
->b_doorbell_bar
= true;
491 /* here, value would be correct ONLY if the leading hwfn
492 * received indication that mapped-bars are supported.
494 if (p_lead
->vf_iov_info
->b_doorbell_bar
)
495 p_iov
->b_doorbell_bar
= true;
497 p_hwfn
->doorbells
= (u8 __iomem
*)
498 p_hwfn
->regview
+ PXP_VF_BAR0_START_DQ
;
501 /* Allocate vf2pf msg */
502 p_iov
->vf2pf_request
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
503 sizeof(union vfpf_tlvs
),
504 &p_iov
->vf2pf_request_phys
,
506 if (!p_iov
->vf2pf_request
)
509 p_iov
->pf2vf_reply
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
510 sizeof(union pfvf_tlvs
),
511 &p_iov
->pf2vf_reply_phys
,
513 if (!p_iov
->pf2vf_reply
)
514 goto free_vf2pf_request
;
518 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
519 p_iov
->vf2pf_request
,
520 (u64
) p_iov
->vf2pf_request_phys
,
521 p_iov
->pf2vf_reply
, (u64
)p_iov
->pf2vf_reply_phys
);
523 /* Allocate Bulletin board */
524 p_iov
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
525 p_iov
->bulletin
.p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
526 p_iov
->bulletin
.size
,
527 &p_iov
->bulletin
.phys
,
529 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
530 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
531 p_iov
->bulletin
.p_virt
,
532 (u64
)p_iov
->bulletin
.phys
, p_iov
->bulletin
.size
);
534 mutex_init(&p_iov
->mutex
);
536 p_hwfn
->vf_iov_info
= p_iov
;
538 p_hwfn
->hw_info
.personality
= QED_PCI_ETH
;
540 rc
= qed_vf_pf_acquire(p_hwfn
);
542 /* If VF is 100g using a mapped bar and PF is too old to support that,
543 * acquisition would succeed - but the VF would have no way knowing
544 * the size of the doorbell bar configured in HW and thus will not
545 * know how to split it for 2nd hw-function.
546 * In this case we re-try without the indication of the mapped
549 if (!rc
&& p_iov
->b_doorbell_bar
&&
550 !qed_vf_hw_bar_size(p_hwfn
, BAR_ID_1
) &&
551 (p_hwfn
->cdev
->num_hwfns
> 1)) {
552 rc
= _qed_vf_pf_release(p_hwfn
, false);
556 p_iov
->b_doorbell_bar
= false;
557 p_hwfn
->doorbells
= (u8 __iomem
*)p_hwfn
->regview
+
558 PXP_VF_BAR0_START_DQ
;
559 rc
= qed_vf_pf_acquire(p_hwfn
);
562 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
563 "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
564 p_hwfn
->regview
, p_hwfn
->doorbells
, p_hwfn
->cdev
->doorbells
);
569 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
570 sizeof(union vfpf_tlvs
),
571 p_iov
->vf2pf_request
, p_iov
->vf2pf_request_phys
);
577 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
578 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
579 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
582 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv
*p_req
,
583 struct qed_tunn_update_type
*p_src
,
584 enum qed_tunn_mode mask
, u8
*p_cls
)
586 if (p_src
->b_update_mode
) {
587 p_req
->tun_mode_update_mask
|= BIT(mask
);
589 if (p_src
->b_mode_enabled
)
590 p_req
->tunn_mode
|= BIT(mask
);
593 *p_cls
= p_src
->tun_cls
;
597 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv
*p_req
,
598 struct qed_tunn_update_type
*p_src
,
599 enum qed_tunn_mode mask
,
600 u8
*p_cls
, struct qed_tunn_update_udp_port
*p_port
,
601 u8
*p_update_port
, u16
*p_udp_port
)
603 if (p_port
->b_update_port
) {
605 *p_udp_port
= p_port
->port
;
608 __qed_vf_prep_tunn_req_tlv(p_req
, p_src
, mask
, p_cls
);
611 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info
*p_tun
)
613 if (p_tun
->vxlan
.b_mode_enabled
)
614 p_tun
->vxlan
.b_update_mode
= true;
615 if (p_tun
->l2_geneve
.b_mode_enabled
)
616 p_tun
->l2_geneve
.b_update_mode
= true;
617 if (p_tun
->ip_geneve
.b_mode_enabled
)
618 p_tun
->ip_geneve
.b_update_mode
= true;
619 if (p_tun
->l2_gre
.b_mode_enabled
)
620 p_tun
->l2_gre
.b_update_mode
= true;
621 if (p_tun
->ip_gre
.b_mode_enabled
)
622 p_tun
->ip_gre
.b_update_mode
= true;
624 p_tun
->b_update_rx_cls
= true;
625 p_tun
->b_update_tx_cls
= true;
629 __qed_vf_update_tunn_param(struct qed_tunn_update_type
*p_tun
,
630 u16 feature_mask
, u8 tunn_mode
,
631 u8 tunn_cls
, enum qed_tunn_mode val
)
633 if (feature_mask
& BIT(val
)) {
634 p_tun
->b_mode_enabled
= tunn_mode
;
635 p_tun
->tun_cls
= tunn_cls
;
637 p_tun
->b_mode_enabled
= false;
641 static void qed_vf_update_tunn_param(struct qed_hwfn
*p_hwfn
,
642 struct qed_tunnel_info
*p_tun
,
643 struct pfvf_update_tunn_param_tlv
*p_resp
)
645 /* Update mode and classes provided by PF */
646 u16 feat_mask
= p_resp
->tunn_feature_mask
;
648 __qed_vf_update_tunn_param(&p_tun
->vxlan
, feat_mask
,
649 p_resp
->vxlan_mode
, p_resp
->vxlan_clss
,
650 QED_MODE_VXLAN_TUNN
);
651 __qed_vf_update_tunn_param(&p_tun
->l2_geneve
, feat_mask
,
652 p_resp
->l2geneve_mode
,
653 p_resp
->l2geneve_clss
,
654 QED_MODE_L2GENEVE_TUNN
);
655 __qed_vf_update_tunn_param(&p_tun
->ip_geneve
, feat_mask
,
656 p_resp
->ipgeneve_mode
,
657 p_resp
->ipgeneve_clss
,
658 QED_MODE_IPGENEVE_TUNN
);
659 __qed_vf_update_tunn_param(&p_tun
->l2_gre
, feat_mask
,
660 p_resp
->l2gre_mode
, p_resp
->l2gre_clss
,
661 QED_MODE_L2GRE_TUNN
);
662 __qed_vf_update_tunn_param(&p_tun
->ip_gre
, feat_mask
,
663 p_resp
->ipgre_mode
, p_resp
->ipgre_clss
,
664 QED_MODE_IPGRE_TUNN
);
665 p_tun
->geneve_port
.port
= p_resp
->geneve_udp_port
;
666 p_tun
->vxlan_port
.port
= p_resp
->vxlan_udp_port
;
668 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
669 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
670 p_tun
->vxlan
.b_mode_enabled
, p_tun
->l2_geneve
.b_mode_enabled
,
671 p_tun
->ip_geneve
.b_mode_enabled
,
672 p_tun
->l2_gre
.b_mode_enabled
, p_tun
->ip_gre
.b_mode_enabled
);
675 int qed_vf_pf_tunnel_param_update(struct qed_hwfn
*p_hwfn
,
676 struct qed_tunnel_info
*p_src
)
678 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
679 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
680 struct pfvf_update_tunn_param_tlv
*p_resp
;
681 struct vfpf_update_tunn_param_tlv
*p_req
;
684 p_req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_UPDATE_TUNN_PARAM
,
687 if (p_src
->b_update_rx_cls
&& p_src
->b_update_tx_cls
)
688 p_req
->update_tun_cls
= 1;
690 qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->vxlan
, QED_MODE_VXLAN_TUNN
,
691 &p_req
->vxlan_clss
, &p_src
->vxlan_port
,
692 &p_req
->update_vxlan_port
,
694 qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->l2_geneve
,
695 QED_MODE_L2GENEVE_TUNN
,
696 &p_req
->l2geneve_clss
, &p_src
->geneve_port
,
697 &p_req
->update_geneve_port
,
698 &p_req
->geneve_port
);
699 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->ip_geneve
,
700 QED_MODE_IPGENEVE_TUNN
,
701 &p_req
->ipgeneve_clss
);
702 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->l2_gre
,
703 QED_MODE_L2GRE_TUNN
, &p_req
->l2gre_clss
);
704 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->ip_gre
,
705 QED_MODE_IPGRE_TUNN
, &p_req
->ipgre_clss
);
707 /* add list termination tlv */
708 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
709 CHANNEL_TLV_LIST_END
,
710 sizeof(struct channel_list_end_tlv
));
712 p_resp
= &p_iov
->pf2vf_reply
->tunn_param_resp
;
713 rc
= qed_send_msg2pf(p_hwfn
, &p_resp
->hdr
.status
, sizeof(*p_resp
));
718 if (p_resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
719 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
720 "Failed to update tunnel parameters\n");
724 qed_vf_update_tunn_param(p_hwfn
, p_tun
, p_resp
);
726 qed_vf_pf_req_end(p_hwfn
, rc
);
731 qed_vf_pf_rxq_start(struct qed_hwfn
*p_hwfn
,
732 struct qed_queue_cid
*p_cid
,
734 dma_addr_t bd_chain_phys_addr
,
735 dma_addr_t cqe_pbl_addr
,
736 u16 cqe_pbl_size
, void __iomem
**pp_prod
)
738 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
739 struct pfvf_start_queue_resp_tlv
*resp
;
740 struct vfpf_start_rxq_tlv
*req
;
741 u8 rx_qid
= p_cid
->rel
.queue_id
;
744 /* clear mailbox and prep first tlv */
745 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_START_RXQ
, sizeof(*req
));
747 req
->rx_qid
= rx_qid
;
748 req
->cqe_pbl_addr
= cqe_pbl_addr
;
749 req
->cqe_pbl_size
= cqe_pbl_size
;
750 req
->rxq_addr
= bd_chain_phys_addr
;
751 req
->hw_sb
= p_cid
->sb_igu_id
;
752 req
->sb_index
= p_cid
->sb_idx
;
753 req
->bd_max_bytes
= bd_max_bytes
;
756 /* If PF is legacy, we'll need to calculate producers ourselves
757 * as well as clean them.
759 if (p_iov
->b_pre_fp_hsi
) {
760 u8 hw_qid
= p_iov
->acquire_resp
.resc
.hw_qid
[rx_qid
];
761 u32 init_prod_val
= 0;
763 *pp_prod
= (u8 __iomem
*)
765 MSTORM_QZONE_START(p_hwfn
->cdev
) +
766 hw_qid
* MSTORM_QZONE_SIZE
;
768 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
769 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u32
),
770 (u32
*)(&init_prod_val
));
773 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
775 /* add list termination tlv */
776 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
777 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
779 resp
= &p_iov
->pf2vf_reply
->queue_start
;
780 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
784 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
789 /* Learn the address of the producer from the response */
790 if (!p_iov
->b_pre_fp_hsi
) {
791 u32 init_prod_val
= 0;
793 *pp_prod
= (u8 __iomem
*)p_hwfn
->regview
+ resp
->offset
;
794 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
795 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
796 rx_qid
, *pp_prod
, resp
->offset
);
798 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
799 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u32
),
800 (u32
*)&init_prod_val
);
803 qed_vf_pf_req_end(p_hwfn
, rc
);
808 int qed_vf_pf_rxq_stop(struct qed_hwfn
*p_hwfn
,
809 struct qed_queue_cid
*p_cid
, bool cqe_completion
)
811 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
812 struct vfpf_stop_rxqs_tlv
*req
;
813 struct pfvf_def_resp_tlv
*resp
;
816 /* clear mailbox and prep first tlv */
817 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_STOP_RXQS
, sizeof(*req
));
819 req
->rx_qid
= p_cid
->rel
.queue_id
;
821 req
->cqe_completion
= cqe_completion
;
823 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
825 /* add list termination tlv */
826 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
827 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
829 resp
= &p_iov
->pf2vf_reply
->default_resp
;
830 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
834 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
840 qed_vf_pf_req_end(p_hwfn
, rc
);
846 qed_vf_pf_txq_start(struct qed_hwfn
*p_hwfn
,
847 struct qed_queue_cid
*p_cid
,
849 u16 pbl_size
, void __iomem
**pp_doorbell
)
851 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
852 struct pfvf_start_queue_resp_tlv
*resp
;
853 struct vfpf_start_txq_tlv
*req
;
854 u16 qid
= p_cid
->rel
.queue_id
;
857 /* clear mailbox and prep first tlv */
858 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_START_TXQ
, sizeof(*req
));
863 req
->pbl_addr
= pbl_addr
;
864 req
->pbl_size
= pbl_size
;
865 req
->hw_sb
= p_cid
->sb_igu_id
;
866 req
->sb_index
= p_cid
->sb_idx
;
868 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
870 /* add list termination tlv */
871 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
872 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
874 resp
= &p_iov
->pf2vf_reply
->queue_start
;
875 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
879 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
884 /* Modern PFs provide the actual offsets, while legacy
885 * provided only the queue id.
887 if (!p_iov
->b_pre_fp_hsi
) {
888 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+ resp
->offset
;
890 u8 cid
= p_iov
->acquire_resp
.resc
.cid
[qid
];
892 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+
897 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
898 "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n",
899 qid
, p_cid
->qid_usage_idx
, *pp_doorbell
, resp
->offset
);
901 qed_vf_pf_req_end(p_hwfn
, rc
);
906 int qed_vf_pf_txq_stop(struct qed_hwfn
*p_hwfn
, struct qed_queue_cid
*p_cid
)
908 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
909 struct vfpf_stop_txqs_tlv
*req
;
910 struct pfvf_def_resp_tlv
*resp
;
913 /* clear mailbox and prep first tlv */
914 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_STOP_TXQS
, sizeof(*req
));
916 req
->tx_qid
= p_cid
->rel
.queue_id
;
919 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
921 /* add list termination tlv */
922 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
923 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
925 resp
= &p_iov
->pf2vf_reply
->default_resp
;
926 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
930 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
936 qed_vf_pf_req_end(p_hwfn
, rc
);
941 int qed_vf_pf_vport_start(struct qed_hwfn
*p_hwfn
,
944 u8 inner_vlan_removal
,
945 enum qed_tpa_mode tpa_mode
,
946 u8 max_buffers_per_cqe
, u8 only_untagged
)
948 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
949 struct vfpf_vport_start_tlv
*req
;
950 struct pfvf_def_resp_tlv
*resp
;
953 /* clear mailbox and prep first tlv */
954 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_START
, sizeof(*req
));
957 req
->vport_id
= vport_id
;
958 req
->inner_vlan_removal
= inner_vlan_removal
;
959 req
->tpa_mode
= tpa_mode
;
960 req
->max_buffers_per_cqe
= max_buffers_per_cqe
;
961 req
->only_untagged
= only_untagged
;
964 for (i
= 0; i
< p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_sbs
; i
++) {
965 struct qed_sb_info
*p_sb
= p_hwfn
->vf_iov_info
->sbs_info
[i
];
968 req
->sb_addr
[i
] = p_sb
->sb_phys
;
971 /* add list termination tlv */
972 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
973 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
975 resp
= &p_iov
->pf2vf_reply
->default_resp
;
976 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
980 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
986 qed_vf_pf_req_end(p_hwfn
, rc
);
991 int qed_vf_pf_vport_stop(struct qed_hwfn
*p_hwfn
)
993 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
994 struct pfvf_def_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->default_resp
;
997 /* clear mailbox and prep first tlv */
998 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_TEARDOWN
,
999 sizeof(struct vfpf_first_tlv
));
1001 /* add list termination tlv */
1002 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1003 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1005 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1009 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1015 qed_vf_pf_req_end(p_hwfn
, rc
);
1021 qed_vf_handle_vp_update_is_needed(struct qed_hwfn
*p_hwfn
,
1022 struct qed_sp_vport_update_params
*p_data
,
1026 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
:
1027 return !!(p_data
->update_vport_active_rx_flg
||
1028 p_data
->update_vport_active_tx_flg
);
1029 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
:
1030 return !!p_data
->update_tx_switching_flg
;
1031 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
:
1032 return !!p_data
->update_inner_vlan_removal_flg
;
1033 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
:
1034 return !!p_data
->update_accept_any_vlan_flg
;
1035 case CHANNEL_TLV_VPORT_UPDATE_MCAST
:
1036 return !!p_data
->update_approx_mcast_flg
;
1037 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
:
1038 return !!(p_data
->accept_flags
.update_rx_mode_config
||
1039 p_data
->accept_flags
.update_tx_mode_config
);
1040 case CHANNEL_TLV_VPORT_UPDATE_RSS
:
1041 return !!p_data
->rss_params
;
1042 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
:
1043 return !!p_data
->sge_tpa_params
;
1045 DP_INFO(p_hwfn
, "Unexpected vport-update TLV[%d]\n",
1052 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn
*p_hwfn
,
1053 struct qed_sp_vport_update_params
*p_data
)
1055 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1056 struct pfvf_def_resp_tlv
*p_resp
;
1059 for (tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1060 tlv
< CHANNEL_TLV_VPORT_UPDATE_MAX
; tlv
++) {
1061 if (!qed_vf_handle_vp_update_is_needed(p_hwfn
, p_data
, tlv
))
1064 p_resp
= (struct pfvf_def_resp_tlv
*)
1065 qed_iov_search_list_tlvs(p_hwfn
, p_iov
->pf2vf_reply
,
1067 if (p_resp
&& p_resp
->hdr
.status
)
1068 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1069 "TLV[%d] Configuration %s\n",
1071 (p_resp
&& p_resp
->hdr
.status
) ? "succeeded"
1076 int qed_vf_pf_vport_update(struct qed_hwfn
*p_hwfn
,
1077 struct qed_sp_vport_update_params
*p_params
)
1079 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1080 struct vfpf_vport_update_tlv
*req
;
1081 struct pfvf_def_resp_tlv
*resp
;
1082 u8 update_rx
, update_tx
;
1087 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1088 resp_size
= sizeof(*resp
);
1090 update_rx
= p_params
->update_vport_active_rx_flg
;
1091 update_tx
= p_params
->update_vport_active_tx_flg
;
1093 /* clear mailbox and prep header tlv */
1094 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_UPDATE
, sizeof(*req
));
1096 /* Prepare extended tlvs */
1097 if (update_rx
|| update_tx
) {
1098 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
1100 size
= sizeof(struct vfpf_vport_update_activate_tlv
);
1101 p_act_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1102 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
,
1104 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1107 p_act_tlv
->update_rx
= update_rx
;
1108 p_act_tlv
->active_rx
= p_params
->vport_active_rx_flg
;
1112 p_act_tlv
->update_tx
= update_tx
;
1113 p_act_tlv
->active_tx
= p_params
->vport_active_tx_flg
;
1117 if (p_params
->update_tx_switching_flg
) {
1118 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
1120 size
= sizeof(struct vfpf_vport_update_tx_switch_tlv
);
1121 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1122 p_tx_switch_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1124 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1126 p_tx_switch_tlv
->tx_switching
= p_params
->tx_switching_flg
;
1129 if (p_params
->update_approx_mcast_flg
) {
1130 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
1132 size
= sizeof(struct vfpf_vport_update_mcast_bin_tlv
);
1133 p_mcast_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1134 CHANNEL_TLV_VPORT_UPDATE_MCAST
, size
);
1135 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1137 memcpy(p_mcast_tlv
->bins
, p_params
->bins
,
1138 sizeof(u32
) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
1141 update_rx
= p_params
->accept_flags
.update_rx_mode_config
;
1142 update_tx
= p_params
->accept_flags
.update_tx_mode_config
;
1144 if (update_rx
|| update_tx
) {
1145 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
1147 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1148 size
= sizeof(struct vfpf_vport_update_accept_param_tlv
);
1149 p_accept_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, tlv
, size
);
1150 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1153 p_accept_tlv
->update_rx_mode
= update_rx
;
1154 p_accept_tlv
->rx_accept_filter
=
1155 p_params
->accept_flags
.rx_accept_filter
;
1159 p_accept_tlv
->update_tx_mode
= update_tx
;
1160 p_accept_tlv
->tx_accept_filter
=
1161 p_params
->accept_flags
.tx_accept_filter
;
1165 if (p_params
->rss_params
) {
1166 struct qed_rss_params
*rss_params
= p_params
->rss_params
;
1167 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
1170 size
= sizeof(struct vfpf_vport_update_rss_tlv
);
1171 p_rss_tlv
= qed_add_tlv(p_hwfn
,
1173 CHANNEL_TLV_VPORT_UPDATE_RSS
, size
);
1174 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1176 if (rss_params
->update_rss_config
)
1177 p_rss_tlv
->update_rss_flags
|=
1178 VFPF_UPDATE_RSS_CONFIG_FLAG
;
1179 if (rss_params
->update_rss_capabilities
)
1180 p_rss_tlv
->update_rss_flags
|=
1181 VFPF_UPDATE_RSS_CAPS_FLAG
;
1182 if (rss_params
->update_rss_ind_table
)
1183 p_rss_tlv
->update_rss_flags
|=
1184 VFPF_UPDATE_RSS_IND_TABLE_FLAG
;
1185 if (rss_params
->update_rss_key
)
1186 p_rss_tlv
->update_rss_flags
|= VFPF_UPDATE_RSS_KEY_FLAG
;
1188 p_rss_tlv
->rss_enable
= rss_params
->rss_enable
;
1189 p_rss_tlv
->rss_caps
= rss_params
->rss_caps
;
1190 p_rss_tlv
->rss_table_size_log
= rss_params
->rss_table_size_log
;
1192 table_size
= min_t(int, T_ETH_INDIRECTION_TABLE_SIZE
,
1193 1 << p_rss_tlv
->rss_table_size_log
);
1194 for (i
= 0; i
< table_size
; i
++) {
1195 struct qed_queue_cid
*p_queue
;
1197 p_queue
= rss_params
->rss_ind_table
[i
];
1198 p_rss_tlv
->rss_ind_table
[i
] = p_queue
->rel
.queue_id
;
1200 memcpy(p_rss_tlv
->rss_key
, rss_params
->rss_key
,
1201 sizeof(rss_params
->rss_key
));
1204 if (p_params
->update_accept_any_vlan_flg
) {
1205 struct vfpf_vport_update_accept_any_vlan_tlv
*p_any_vlan_tlv
;
1207 size
= sizeof(struct vfpf_vport_update_accept_any_vlan_tlv
);
1208 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1209 p_any_vlan_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, tlv
, size
);
1211 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1212 p_any_vlan_tlv
->accept_any_vlan
= p_params
->accept_any_vlan
;
1213 p_any_vlan_tlv
->update_accept_any_vlan_flg
=
1214 p_params
->update_accept_any_vlan_flg
;
1217 /* add list termination tlv */
1218 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1219 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1221 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, resp_size
);
1225 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1230 qed_vf_handle_vp_update_tlvs_resp(p_hwfn
, p_params
);
1233 qed_vf_pf_req_end(p_hwfn
, rc
);
1238 int qed_vf_pf_reset(struct qed_hwfn
*p_hwfn
)
1240 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1241 struct pfvf_def_resp_tlv
*resp
;
1242 struct vfpf_first_tlv
*req
;
1245 /* clear mailbox and prep first tlv */
1246 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_CLOSE
, sizeof(*req
));
1248 /* add list termination tlv */
1249 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1250 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1252 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1253 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1257 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1262 p_hwfn
->b_int_enabled
= 0;
1265 qed_vf_pf_req_end(p_hwfn
, rc
);
1270 void qed_vf_pf_filter_mcast(struct qed_hwfn
*p_hwfn
,
1271 struct qed_filter_mcast
*p_filter_cmd
)
1273 struct qed_sp_vport_update_params sp_params
;
1276 memset(&sp_params
, 0, sizeof(sp_params
));
1277 sp_params
.update_approx_mcast_flg
= 1;
1279 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1280 for (i
= 0; i
< p_filter_cmd
->num_mc_addrs
; i
++) {
1283 bit
= qed_mcast_bin_from_mac(p_filter_cmd
->mac
[i
]);
1284 sp_params
.bins
[bit
/ 32] |= 1 << (bit
% 32);
1288 qed_vf_pf_vport_update(p_hwfn
, &sp_params
);
1291 int qed_vf_pf_filter_ucast(struct qed_hwfn
*p_hwfn
,
1292 struct qed_filter_ucast
*p_ucast
)
1294 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1295 struct vfpf_ucast_filter_tlv
*req
;
1296 struct pfvf_def_resp_tlv
*resp
;
1299 /* clear mailbox and prep first tlv */
1300 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_UCAST_FILTER
, sizeof(*req
));
1301 req
->opcode
= (u8
) p_ucast
->opcode
;
1302 req
->type
= (u8
) p_ucast
->type
;
1303 memcpy(req
->mac
, p_ucast
->mac
, ETH_ALEN
);
1304 req
->vlan
= p_ucast
->vlan
;
1306 /* add list termination tlv */
1307 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1308 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1310 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1311 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1315 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1321 qed_vf_pf_req_end(p_hwfn
, rc
);
1326 int qed_vf_pf_int_cleanup(struct qed_hwfn
*p_hwfn
)
1328 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1329 struct pfvf_def_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->default_resp
;
1332 /* clear mailbox and prep first tlv */
1333 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_INT_CLEANUP
,
1334 sizeof(struct vfpf_first_tlv
));
1336 /* add list termination tlv */
1337 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1338 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1340 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1344 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1350 qed_vf_pf_req_end(p_hwfn
, rc
);
1355 int qed_vf_pf_get_coalesce(struct qed_hwfn
*p_hwfn
,
1356 u16
*p_coal
, struct qed_queue_cid
*p_cid
)
1358 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1359 struct pfvf_read_coal_resp_tlv
*resp
;
1360 struct vfpf_read_coal_req_tlv
*req
;
1363 /* clear mailbox and prep header tlv */
1364 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_COALESCE_READ
, sizeof(*req
));
1365 req
->qid
= p_cid
->rel
.queue_id
;
1366 req
->is_rx
= p_cid
->b_is_rx
? 1 : 0;
1368 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1369 sizeof(struct channel_list_end_tlv
));
1370 resp
= &p_iov
->pf2vf_reply
->read_coal_resp
;
1372 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1376 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
1379 *p_coal
= resp
->coal
;
1381 qed_vf_pf_req_end(p_hwfn
, rc
);
1387 qed_vf_pf_bulletin_update_mac(struct qed_hwfn
*p_hwfn
,
1390 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1391 struct vfpf_bulletin_update_mac_tlv
*p_req
;
1392 struct pfvf_def_resp_tlv
*p_resp
;
1398 /* clear mailbox and prep header tlv */
1399 p_req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_BULLETIN_UPDATE_MAC
,
1401 ether_addr_copy(p_req
->mac
, p_mac
);
1402 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1403 "Requesting bulletin update for MAC[%pM]\n", p_mac
);
1405 /* add list termination tlv */
1406 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1407 sizeof(struct channel_list_end_tlv
));
1409 p_resp
= &p_iov
->pf2vf_reply
->default_resp
;
1410 rc
= qed_send_msg2pf(p_hwfn
, &p_resp
->hdr
.status
, sizeof(*p_resp
));
1411 qed_vf_pf_req_end(p_hwfn
, rc
);
1416 qed_vf_pf_set_coalesce(struct qed_hwfn
*p_hwfn
,
1417 u16 rx_coal
, u16 tx_coal
, struct qed_queue_cid
*p_cid
)
1419 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1420 struct vfpf_update_coalesce
*req
;
1421 struct pfvf_def_resp_tlv
*resp
;
1424 /* clear mailbox and prep header tlv */
1425 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_COALESCE_UPDATE
, sizeof(*req
));
1427 req
->rx_coal
= rx_coal
;
1428 req
->tx_coal
= tx_coal
;
1429 req
->qid
= p_cid
->rel
.queue_id
;
1433 "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
1434 rx_coal
, tx_coal
, req
->qid
);
1436 /* add list termination tlv */
1437 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1438 sizeof(struct channel_list_end_tlv
));
1440 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1441 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1445 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
1449 p_hwfn
->cdev
->rx_coalesce_usecs
= rx_coal
;
1452 p_hwfn
->cdev
->tx_coalesce_usecs
= tx_coal
;
1455 qed_vf_pf_req_end(p_hwfn
, rc
);
1459 u16
qed_vf_get_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 sb_id
)
1461 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1464 DP_NOTICE(p_hwfn
, "vf_sriov_info isn't initialized\n");
1468 return p_iov
->acquire_resp
.resc
.hw_sbs
[sb_id
].hw_sb_id
;
1471 void qed_vf_set_sb_info(struct qed_hwfn
*p_hwfn
,
1472 u16 sb_id
, struct qed_sb_info
*p_sb
)
1474 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1477 DP_NOTICE(p_hwfn
, "vf_sriov_info isn't initialized\n");
1481 if (sb_id
>= PFVF_MAX_SBS_PER_VF
) {
1482 DP_NOTICE(p_hwfn
, "Can't configure SB %04x\n", sb_id
);
1486 p_iov
->sbs_info
[sb_id
] = p_sb
;
1489 int qed_vf_read_bulletin(struct qed_hwfn
*p_hwfn
, u8
*p_change
)
1491 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1492 struct qed_bulletin_content shadow
;
1495 crc_size
= sizeof(p_iov
->bulletin
.p_virt
->crc
);
1498 /* Need to guarantee PF is not in the middle of writing it */
1499 memcpy(&shadow
, p_iov
->bulletin
.p_virt
, p_iov
->bulletin
.size
);
1501 /* If version did not update, no need to do anything */
1502 if (shadow
.version
== p_iov
->bulletin_shadow
.version
)
1505 /* Verify the bulletin we see is valid */
1506 crc
= crc32(0, (u8
*)&shadow
+ crc_size
,
1507 p_iov
->bulletin
.size
- crc_size
);
1508 if (crc
!= shadow
.crc
)
1511 /* Set the shadow bulletin and process it */
1512 memcpy(&p_iov
->bulletin_shadow
, &shadow
, p_iov
->bulletin
.size
);
1514 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1515 "Read a bulletin update %08x\n", shadow
.version
);
1522 void __qed_vf_get_link_params(struct qed_hwfn
*p_hwfn
,
1523 struct qed_mcp_link_params
*p_params
,
1524 struct qed_bulletin_content
*p_bulletin
)
1526 memset(p_params
, 0, sizeof(*p_params
));
1528 p_params
->speed
.autoneg
= p_bulletin
->req_autoneg
;
1529 p_params
->speed
.advertised_speeds
= p_bulletin
->req_adv_speed
;
1530 p_params
->speed
.forced_speed
= p_bulletin
->req_forced_speed
;
1531 p_params
->pause
.autoneg
= p_bulletin
->req_autoneg_pause
;
1532 p_params
->pause
.forced_rx
= p_bulletin
->req_forced_rx
;
1533 p_params
->pause
.forced_tx
= p_bulletin
->req_forced_tx
;
1534 p_params
->loopback_mode
= p_bulletin
->req_loopback
;
1537 void qed_vf_get_link_params(struct qed_hwfn
*p_hwfn
,
1538 struct qed_mcp_link_params
*params
)
1540 __qed_vf_get_link_params(p_hwfn
, params
,
1541 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1544 void __qed_vf_get_link_state(struct qed_hwfn
*p_hwfn
,
1545 struct qed_mcp_link_state
*p_link
,
1546 struct qed_bulletin_content
*p_bulletin
)
1548 memset(p_link
, 0, sizeof(*p_link
));
1550 p_link
->link_up
= p_bulletin
->link_up
;
1551 p_link
->speed
= p_bulletin
->speed
;
1552 p_link
->full_duplex
= p_bulletin
->full_duplex
;
1553 p_link
->an
= p_bulletin
->autoneg
;
1554 p_link
->an_complete
= p_bulletin
->autoneg_complete
;
1555 p_link
->parallel_detection
= p_bulletin
->parallel_detection
;
1556 p_link
->pfc_enabled
= p_bulletin
->pfc_enabled
;
1557 p_link
->partner_adv_speed
= p_bulletin
->partner_adv_speed
;
1558 p_link
->partner_tx_flow_ctrl_en
= p_bulletin
->partner_tx_flow_ctrl_en
;
1559 p_link
->partner_rx_flow_ctrl_en
= p_bulletin
->partner_rx_flow_ctrl_en
;
1560 p_link
->partner_adv_pause
= p_bulletin
->partner_adv_pause
;
1561 p_link
->sfp_tx_fault
= p_bulletin
->sfp_tx_fault
;
1564 void qed_vf_get_link_state(struct qed_hwfn
*p_hwfn
,
1565 struct qed_mcp_link_state
*link
)
1567 __qed_vf_get_link_state(p_hwfn
, link
,
1568 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1571 void __qed_vf_get_link_caps(struct qed_hwfn
*p_hwfn
,
1572 struct qed_mcp_link_capabilities
*p_link_caps
,
1573 struct qed_bulletin_content
*p_bulletin
)
1575 memset(p_link_caps
, 0, sizeof(*p_link_caps
));
1576 p_link_caps
->speed_capabilities
= p_bulletin
->capability_speed
;
1579 void qed_vf_get_link_caps(struct qed_hwfn
*p_hwfn
,
1580 struct qed_mcp_link_capabilities
*p_link_caps
)
1582 __qed_vf_get_link_caps(p_hwfn
, p_link_caps
,
1583 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1586 void qed_vf_get_num_rxqs(struct qed_hwfn
*p_hwfn
, u8
*num_rxqs
)
1588 *num_rxqs
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_rxqs
;
1591 void qed_vf_get_num_txqs(struct qed_hwfn
*p_hwfn
, u8
*num_txqs
)
1593 *num_txqs
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_txqs
;
1596 void qed_vf_get_num_cids(struct qed_hwfn
*p_hwfn
, u8
*num_cids
)
1598 *num_cids
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_cids
;
1601 void qed_vf_get_port_mac(struct qed_hwfn
*p_hwfn
, u8
*port_mac
)
1604 p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
.port_mac
, ETH_ALEN
);
1607 void qed_vf_get_num_vlan_filters(struct qed_hwfn
*p_hwfn
, u8
*num_vlan_filters
)
1609 struct qed_vf_iov
*p_vf
;
1611 p_vf
= p_hwfn
->vf_iov_info
;
1612 *num_vlan_filters
= p_vf
->acquire_resp
.resc
.num_vlan_filters
;
1615 void qed_vf_get_num_mac_filters(struct qed_hwfn
*p_hwfn
, u8
*num_mac_filters
)
1617 struct qed_vf_iov
*p_vf
= p_hwfn
->vf_iov_info
;
1619 *num_mac_filters
= p_vf
->acquire_resp
.resc
.num_mac_filters
;
1622 bool qed_vf_check_mac(struct qed_hwfn
*p_hwfn
, u8
*mac
)
1624 struct qed_bulletin_content
*bulletin
;
1626 bulletin
= &p_hwfn
->vf_iov_info
->bulletin_shadow
;
1627 if (!(bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)))
1630 /* Forbid VF from changing a MAC enforced by PF */
1631 if (ether_addr_equal(bulletin
->mac
, mac
))
1637 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn
*hwfn
,
1638 u8
*dst_mac
, u8
*p_is_forced
)
1640 struct qed_bulletin_content
*bulletin
;
1642 bulletin
= &hwfn
->vf_iov_info
->bulletin_shadow
;
1644 if (bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) {
1647 } else if (bulletin
->valid_bitmap
& (1 << VFPF_BULLETIN_MAC_ADDR
)) {
1654 ether_addr_copy(dst_mac
, bulletin
->mac
);
1660 qed_vf_bulletin_get_udp_ports(struct qed_hwfn
*p_hwfn
,
1661 u16
*p_vxlan_port
, u16
*p_geneve_port
)
1663 struct qed_bulletin_content
*p_bulletin
;
1665 p_bulletin
= &p_hwfn
->vf_iov_info
->bulletin_shadow
;
1667 *p_vxlan_port
= p_bulletin
->vxlan_udp_port
;
1668 *p_geneve_port
= p_bulletin
->geneve_udp_port
;
1671 void qed_vf_get_fw_version(struct qed_hwfn
*p_hwfn
,
1672 u16
*fw_major
, u16
*fw_minor
,
1673 u16
*fw_rev
, u16
*fw_eng
)
1675 struct pf_vf_pfdev_info
*info
;
1677 info
= &p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
;
1679 *fw_major
= info
->fw_major
;
1680 *fw_minor
= info
->fw_minor
;
1681 *fw_rev
= info
->fw_rev
;
1682 *fw_eng
= info
->fw_eng
;
1685 static void qed_handle_bulletin_change(struct qed_hwfn
*hwfn
)
1687 struct qed_eth_cb_ops
*ops
= hwfn
->cdev
->protocol_ops
.eth
;
1688 u8 mac
[ETH_ALEN
], is_mac_exist
, is_mac_forced
;
1689 void *cookie
= hwfn
->cdev
->ops_cookie
;
1690 u16 vxlan_port
, geneve_port
;
1692 qed_vf_bulletin_get_udp_ports(hwfn
, &vxlan_port
, &geneve_port
);
1693 is_mac_exist
= qed_vf_bulletin_get_forced_mac(hwfn
, mac
,
1695 if (is_mac_exist
&& cookie
)
1696 ops
->force_mac(cookie
, mac
, !!is_mac_forced
);
1698 ops
->ports_update(cookie
, vxlan_port
, geneve_port
);
1700 /* Always update link configuration according to bulletin */
1701 qed_link_update(hwfn
, NULL
);
1704 void qed_iov_vf_task(struct work_struct
*work
)
1706 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
1710 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
1713 /* Handle bulletin board changes */
1714 qed_vf_read_bulletin(hwfn
, &change
);
1715 if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG
,
1716 &hwfn
->iov_task_flags
))
1719 qed_handle_bulletin_change(hwfn
);
1721 /* As VF is polling bulletin board, need to constantly re-schedule */
1722 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, HZ
);