1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/crc32.h>
34 #include <linux/etherdevice.h>
36 #include "qed_sriov.h"
39 static void *qed_vf_pf_prep(struct qed_hwfn
*p_hwfn
, u16 type
, u16 length
)
41 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
44 /* This lock is released when we receive PF's response
45 * in qed_send_msg2pf().
46 * So, qed_vf_pf_prep() and qed_send_msg2pf()
47 * must come in sequence.
49 mutex_lock(&(p_iov
->mutex
));
53 "preparing to send 0x%04x tlv over vf pf channel\n",
56 /* Reset Requst offset */
57 p_iov
->offset
= (u8
*)p_iov
->vf2pf_request
;
59 /* Clear mailbox - both request and reply */
60 memset(p_iov
->vf2pf_request
, 0, sizeof(union vfpf_tlvs
));
61 memset(p_iov
->pf2vf_reply
, 0, sizeof(union pfvf_tlvs
));
63 /* Init type and length */
64 p_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, type
, length
);
66 /* Init first tlv header */
67 ((struct vfpf_first_tlv
*)p_tlv
)->reply_address
=
68 (u64
)p_iov
->pf2vf_reply_phys
;
73 static void qed_vf_pf_req_end(struct qed_hwfn
*p_hwfn
, int req_status
)
75 union pfvf_tlvs
*resp
= p_hwfn
->vf_iov_info
->pf2vf_reply
;
77 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
78 "VF request status = 0x%x, PF reply status = 0x%x\n",
79 req_status
, resp
->default_resp
.hdr
.status
);
81 mutex_unlock(&(p_hwfn
->vf_iov_info
->mutex
));
84 #define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
85 #define QED_VF_CHANNEL_USLEEP_DELAY 100
86 #define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
87 #define QED_VF_CHANNEL_MSLEEP_DELAY 25
89 static int qed_send_msg2pf(struct qed_hwfn
*p_hwfn
, u8
*done
, u32 resp_size
)
91 union vfpf_tlvs
*p_req
= p_hwfn
->vf_iov_info
->vf2pf_request
;
92 struct ustorm_trigger_vf_zone trigger
;
93 struct ustorm_vf_zone
*zone_data
;
96 zone_data
= (struct ustorm_vf_zone
*)PXP_VF_BAR0_START_USDM_ZONE_B
;
98 /* output tlvs list */
99 qed_dp_tlv_list(p_hwfn
, p_req
);
101 /* need to add the END TLV to the message size */
102 resp_size
+= sizeof(struct channel_list_end_tlv
);
104 /* Send TLVs over HW channel */
105 memset(&trigger
, 0, sizeof(struct ustorm_trigger_vf_zone
));
106 trigger
.vf_pf_msg_valid
= 1;
110 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
111 GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
112 PXP_CONCRETE_FID_PFID
),
113 upper_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
),
114 lower_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
),
115 &zone_data
->non_trigger
.vf_pf_msg_addr
,
116 *((u32
*)&trigger
), &zone_data
->trigger
);
119 (uintptr_t)&zone_data
->non_trigger
.vf_pf_msg_addr
.lo
,
120 lower_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
));
123 (uintptr_t)&zone_data
->non_trigger
.vf_pf_msg_addr
.hi
,
124 upper_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
));
126 /* The message data must be written first, to prevent trigger before
131 REG_WR(p_hwfn
, (uintptr_t)&zone_data
->trigger
, *((u32
*)&trigger
));
133 /* When PF would be done with the response, it would write back to the
134 * `done' address from a coherent DMA zone. Poll until then.
137 iter
= QED_VF_CHANNEL_USLEEP_ITERATIONS
;
138 while (!*done
&& iter
--) {
139 udelay(QED_VF_CHANNEL_USLEEP_DELAY
);
143 iter
= QED_VF_CHANNEL_MSLEEP_ITERATIONS
;
144 while (!*done
&& iter
--) {
145 msleep(QED_VF_CHANNEL_MSLEEP_DELAY
);
151 "VF <-- PF Timeout [Type %d]\n",
152 p_req
->first_tlv
.tl
.type
);
155 if ((*done
!= PFVF_STATUS_SUCCESS
) &&
156 (*done
!= PFVF_STATUS_NO_RESOURCE
))
158 "PF response: %d [Type %d]\n",
159 *done
, p_req
->first_tlv
.tl
.type
);
161 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
162 "PF response: %d [Type %d]\n",
163 *done
, p_req
->first_tlv
.tl
.type
);
169 static void qed_vf_pf_add_qid(struct qed_hwfn
*p_hwfn
,
170 struct qed_queue_cid
*p_cid
)
172 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
173 struct vfpf_qid_tlv
*p_qid_tlv
;
175 /* Only add QIDs for the queue if it was negotiated with PF */
176 if (!(p_iov
->acquire_resp
.pfdev_info
.capabilities
&
177 PFVF_ACQUIRE_CAP_QUEUE_QIDS
))
180 p_qid_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
181 CHANNEL_TLV_QID
, sizeof(*p_qid_tlv
));
182 p_qid_tlv
->qid
= p_cid
->qid_usage_idx
;
185 static int _qed_vf_pf_release(struct qed_hwfn
*p_hwfn
, bool b_final
)
187 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
188 struct pfvf_def_resp_tlv
*resp
;
189 struct vfpf_first_tlv
*req
;
193 /* clear mailbox and prep first tlv */
194 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_RELEASE
, sizeof(*req
));
196 /* add list termination tlv */
197 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
198 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
200 resp
= &p_iov
->pf2vf_reply
->default_resp
;
201 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
203 if (!rc
&& resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
206 qed_vf_pf_req_end(p_hwfn
, rc
);
210 p_hwfn
->b_int_enabled
= 0;
212 if (p_iov
->vf2pf_request
)
213 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
214 sizeof(union vfpf_tlvs
),
215 p_iov
->vf2pf_request
,
216 p_iov
->vf2pf_request_phys
);
217 if (p_iov
->pf2vf_reply
)
218 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
219 sizeof(union pfvf_tlvs
),
220 p_iov
->pf2vf_reply
, p_iov
->pf2vf_reply_phys
);
222 if (p_iov
->bulletin
.p_virt
) {
223 size
= sizeof(struct qed_bulletin_content
);
224 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
226 p_iov
->bulletin
.p_virt
, p_iov
->bulletin
.phys
);
229 kfree(p_hwfn
->vf_iov_info
);
230 p_hwfn
->vf_iov_info
= NULL
;
235 int qed_vf_pf_release(struct qed_hwfn
*p_hwfn
)
237 return _qed_vf_pf_release(p_hwfn
, true);
240 #define VF_ACQUIRE_THRESH 3
241 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn
*p_hwfn
,
242 struct vf_pf_resc_request
*p_req
,
243 struct pf_vf_resc
*p_resp
)
247 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
254 p_req
->num_mac_filters
,
255 p_resp
->num_mac_filters
,
256 p_req
->num_vlan_filters
,
257 p_resp
->num_vlan_filters
,
258 p_req
->num_mc_filters
,
259 p_resp
->num_mc_filters
, p_req
->num_cids
, p_resp
->num_cids
);
261 /* humble our request */
262 p_req
->num_txqs
= p_resp
->num_txqs
;
263 p_req
->num_rxqs
= p_resp
->num_rxqs
;
264 p_req
->num_sbs
= p_resp
->num_sbs
;
265 p_req
->num_mac_filters
= p_resp
->num_mac_filters
;
266 p_req
->num_vlan_filters
= p_resp
->num_vlan_filters
;
267 p_req
->num_mc_filters
= p_resp
->num_mc_filters
;
268 p_req
->num_cids
= p_resp
->num_cids
;
271 static int qed_vf_pf_acquire(struct qed_hwfn
*p_hwfn
)
273 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
274 struct pfvf_acquire_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->acquire_resp
;
275 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
276 struct vf_pf_resc_request
*p_resc
;
277 u8 retry_cnt
= VF_ACQUIRE_THRESH
;
278 bool resources_acquired
= false;
279 struct vfpf_acquire_tlv
*req
;
280 int rc
= 0, attempts
= 0;
282 /* clear mailbox and prep first tlv */
283 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_ACQUIRE
, sizeof(*req
));
284 p_resc
= &req
->resc_request
;
286 /* starting filling the request */
287 req
->vfdev_info
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
289 p_resc
->num_rxqs
= QED_MAX_VF_CHAINS_PER_PF
;
290 p_resc
->num_txqs
= QED_MAX_VF_CHAINS_PER_PF
;
291 p_resc
->num_sbs
= QED_MAX_VF_CHAINS_PER_PF
;
292 p_resc
->num_mac_filters
= QED_ETH_VF_NUM_MAC_FILTERS
;
293 p_resc
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
294 p_resc
->num_cids
= QED_ETH_VF_DEFAULT_NUM_CIDS
;
296 req
->vfdev_info
.os_type
= VFPF_ACQUIRE_OS_LINUX
;
297 req
->vfdev_info
.fw_major
= FW_MAJOR_VERSION
;
298 req
->vfdev_info
.fw_minor
= FW_MINOR_VERSION
;
299 req
->vfdev_info
.fw_revision
= FW_REVISION_VERSION
;
300 req
->vfdev_info
.fw_engineering
= FW_ENGINEERING_VERSION
;
301 req
->vfdev_info
.eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
302 req
->vfdev_info
.eth_fp_hsi_minor
= ETH_HSI_VER_MINOR
;
304 /* Fill capability field with any non-deprecated config we support */
305 req
->vfdev_info
.capabilities
|= VFPF_ACQUIRE_CAP_100G
;
307 /* If we've mapped the doorbell bar, try using queue qids */
308 if (p_iov
->b_doorbell_bar
) {
309 req
->vfdev_info
.capabilities
|= VFPF_ACQUIRE_CAP_PHYSICAL_BAR
|
310 VFPF_ACQUIRE_CAP_QUEUE_QIDS
;
311 p_resc
->num_cids
= QED_ETH_VF_MAX_NUM_CIDS
;
314 /* pf 2 vf bulletin board address */
315 req
->bulletin_addr
= p_iov
->bulletin
.phys
;
316 req
->bulletin_size
= p_iov
->bulletin
.size
;
318 /* add list termination tlv */
319 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
320 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
322 while (!resources_acquired
) {
324 QED_MSG_IOV
, "attempting to acquire resources\n");
326 /* Clear response buffer, as this might be a re-send */
327 memset(p_iov
->pf2vf_reply
, 0, sizeof(union pfvf_tlvs
));
329 /* send acquire request */
330 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
332 /* Re-try acquire in case of vf-pf hw channel timeout */
333 if (retry_cnt
&& rc
== -EBUSY
) {
334 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
335 "VF retrying to acquire due to VPC timeout\n");
343 /* copy acquire response from buffer to p_hwfn */
344 memcpy(&p_iov
->acquire_resp
, resp
, sizeof(p_iov
->acquire_resp
));
348 if (resp
->hdr
.status
== PFVF_STATUS_SUCCESS
) {
349 /* PF agrees to allocate our resources */
350 if (!(resp
->pfdev_info
.capabilities
&
351 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
)) {
352 /* It's possible legacy PF mistakenly accepted;
353 * but we don't care - simply mark it as
354 * legacy and continue.
356 req
->vfdev_info
.capabilities
|=
357 VFPF_ACQUIRE_CAP_PRE_FP_HSI
;
359 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "resources acquired\n");
360 resources_acquired
= true;
361 } else if (resp
->hdr
.status
== PFVF_STATUS_NO_RESOURCE
&&
362 attempts
< VF_ACQUIRE_THRESH
) {
363 qed_vf_pf_acquire_reduce_resc(p_hwfn
, p_resc
,
365 } else if (resp
->hdr
.status
== PFVF_STATUS_NOT_SUPPORTED
) {
366 if (pfdev_info
->major_fp_hsi
&&
367 (pfdev_info
->major_fp_hsi
!= ETH_HSI_VER_MAJOR
)) {
369 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
370 pfdev_info
->major_fp_hsi
,
371 pfdev_info
->minor_fp_hsi
,
374 pfdev_info
->major_fp_hsi
);
379 if (!pfdev_info
->major_fp_hsi
) {
380 if (req
->vfdev_info
.capabilities
&
381 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
383 "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
388 "PF is old - try re-acquire to see if it supports FW-version override\n");
389 req
->vfdev_info
.capabilities
|=
390 VFPF_ACQUIRE_CAP_PRE_FP_HSI
;
395 /* If PF/VF are using same Major, PF must have had
396 * it's reasons. Simply fail.
398 DP_NOTICE(p_hwfn
, "PF rejected acquisition by VF\n");
403 "PF returned error %d to VF acquisition request\n",
410 /* Mark the PF as legacy, if needed */
411 if (req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_PRE_FP_HSI
)
412 p_iov
->b_pre_fp_hsi
= true;
414 /* In case PF doesn't support multi-queue Tx, update the number of
415 * CIDs to reflect the number of queues [older PFs didn't fill that
418 if (!(resp
->pfdev_info
.capabilities
& PFVF_ACQUIRE_CAP_QUEUE_QIDS
))
419 resp
->resc
.num_cids
= resp
->resc
.num_rxqs
+ resp
->resc
.num_txqs
;
421 /* Update bulletin board size with response from PF */
422 p_iov
->bulletin
.size
= resp
->bulletin_size
;
425 p_hwfn
->cdev
->type
= resp
->pfdev_info
.dev_type
;
426 p_hwfn
->cdev
->chip_rev
= resp
->pfdev_info
.chip_rev
;
428 p_hwfn
->cdev
->chip_num
= pfdev_info
->chip_num
& 0xffff;
430 /* Learn of the possibility of CMT */
431 if (IS_LEAD_HWFN(p_hwfn
)) {
432 if (resp
->pfdev_info
.capabilities
& PFVF_ACQUIRE_CAP_100G
) {
433 DP_NOTICE(p_hwfn
, "100g VF\n");
434 p_hwfn
->cdev
->num_hwfns
= 2;
438 if (!p_iov
->b_pre_fp_hsi
&&
439 (resp
->pfdev_info
.minor_fp_hsi
< ETH_HSI_VER_MINOR
)) {
441 "PF is using older fastpath HSI; %02x.%02x is configured\n",
442 ETH_HSI_VER_MAJOR
, resp
->pfdev_info
.minor_fp_hsi
);
446 qed_vf_pf_req_end(p_hwfn
, rc
);
451 u32
qed_vf_hw_bar_size(struct qed_hwfn
*p_hwfn
, enum BAR_ID bar_id
)
455 /* Regview size is fixed */
456 if (bar_id
== BAR_ID_0
)
459 /* Doorbell is received from PF */
460 bar_size
= p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
.bar_size
;
462 return 1 << bar_size
;
466 int qed_vf_hw_prepare(struct qed_hwfn
*p_hwfn
)
468 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(p_hwfn
->cdev
);
469 struct qed_vf_iov
*p_iov
;
473 /* Set number of hwfns - might be overriden once leading hwfn learns
474 * actual configuration from PF.
476 if (IS_LEAD_HWFN(p_hwfn
))
477 p_hwfn
->cdev
->num_hwfns
= 1;
479 reg
= PXP_VF_BAR0_ME_OPAQUE_ADDRESS
;
480 p_hwfn
->hw_info
.opaque_fid
= (u16
)REG_RD(p_hwfn
, reg
);
482 reg
= PXP_VF_BAR0_ME_CONCRETE_ADDRESS
;
483 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, reg
);
485 /* Allocate vf sriov info */
486 p_iov
= kzalloc(sizeof(*p_iov
), GFP_KERNEL
);
490 /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
491 * value, but there are several incompatibily scenarios where that
492 * would be incorrect and we'd need to override it.
494 if (!p_hwfn
->doorbells
) {
495 p_hwfn
->doorbells
= (u8 __iomem
*)p_hwfn
->regview
+
496 PXP_VF_BAR0_START_DQ
;
497 } else if (p_hwfn
== p_lead
) {
498 /* For leading hw-function, value is always correct, but need
499 * to handle scenario where legacy PF would not support 100g
502 p_iov
->b_doorbell_bar
= true;
504 /* here, value would be correct ONLY if the leading hwfn
505 * received indication that mapped-bars are supported.
507 if (p_lead
->vf_iov_info
->b_doorbell_bar
)
508 p_iov
->b_doorbell_bar
= true;
510 p_hwfn
->doorbells
= (u8 __iomem
*)
511 p_hwfn
->regview
+ PXP_VF_BAR0_START_DQ
;
514 /* Allocate vf2pf msg */
515 p_iov
->vf2pf_request
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
516 sizeof(union vfpf_tlvs
),
517 &p_iov
->vf2pf_request_phys
,
519 if (!p_iov
->vf2pf_request
)
522 p_iov
->pf2vf_reply
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
523 sizeof(union pfvf_tlvs
),
524 &p_iov
->pf2vf_reply_phys
,
526 if (!p_iov
->pf2vf_reply
)
527 goto free_vf2pf_request
;
531 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
532 p_iov
->vf2pf_request
,
533 (u64
) p_iov
->vf2pf_request_phys
,
534 p_iov
->pf2vf_reply
, (u64
)p_iov
->pf2vf_reply_phys
);
536 /* Allocate Bulletin board */
537 p_iov
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
538 p_iov
->bulletin
.p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
539 p_iov
->bulletin
.size
,
540 &p_iov
->bulletin
.phys
,
542 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
543 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
544 p_iov
->bulletin
.p_virt
,
545 (u64
)p_iov
->bulletin
.phys
, p_iov
->bulletin
.size
);
547 mutex_init(&p_iov
->mutex
);
549 p_hwfn
->vf_iov_info
= p_iov
;
551 p_hwfn
->hw_info
.personality
= QED_PCI_ETH
;
553 rc
= qed_vf_pf_acquire(p_hwfn
);
555 /* If VF is 100g using a mapped bar and PF is too old to support that,
556 * acquisition would succeed - but the VF would have no way knowing
557 * the size of the doorbell bar configured in HW and thus will not
558 * know how to split it for 2nd hw-function.
559 * In this case we re-try without the indication of the mapped
562 if (!rc
&& p_iov
->b_doorbell_bar
&&
563 !qed_vf_hw_bar_size(p_hwfn
, BAR_ID_1
) &&
564 (p_hwfn
->cdev
->num_hwfns
> 1)) {
565 rc
= _qed_vf_pf_release(p_hwfn
, false);
569 p_iov
->b_doorbell_bar
= false;
570 p_hwfn
->doorbells
= (u8 __iomem
*)p_hwfn
->regview
+
571 PXP_VF_BAR0_START_DQ
;
572 rc
= qed_vf_pf_acquire(p_hwfn
);
575 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
576 "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
577 p_hwfn
->regview
, p_hwfn
->doorbells
, p_hwfn
->cdev
->doorbells
);
582 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
583 sizeof(union vfpf_tlvs
),
584 p_iov
->vf2pf_request
, p_iov
->vf2pf_request_phys
);
590 #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
591 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
592 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
595 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv
*p_req
,
596 struct qed_tunn_update_type
*p_src
,
597 enum qed_tunn_mode mask
, u8
*p_cls
)
599 if (p_src
->b_update_mode
) {
600 p_req
->tun_mode_update_mask
|= BIT(mask
);
602 if (p_src
->b_mode_enabled
)
603 p_req
->tunn_mode
|= BIT(mask
);
606 *p_cls
= p_src
->tun_cls
;
610 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv
*p_req
,
611 struct qed_tunn_update_type
*p_src
,
612 enum qed_tunn_mode mask
,
613 u8
*p_cls
, struct qed_tunn_update_udp_port
*p_port
,
614 u8
*p_update_port
, u16
*p_udp_port
)
616 if (p_port
->b_update_port
) {
618 *p_udp_port
= p_port
->port
;
621 __qed_vf_prep_tunn_req_tlv(p_req
, p_src
, mask
, p_cls
);
624 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info
*p_tun
)
626 if (p_tun
->vxlan
.b_mode_enabled
)
627 p_tun
->vxlan
.b_update_mode
= true;
628 if (p_tun
->l2_geneve
.b_mode_enabled
)
629 p_tun
->l2_geneve
.b_update_mode
= true;
630 if (p_tun
->ip_geneve
.b_mode_enabled
)
631 p_tun
->ip_geneve
.b_update_mode
= true;
632 if (p_tun
->l2_gre
.b_mode_enabled
)
633 p_tun
->l2_gre
.b_update_mode
= true;
634 if (p_tun
->ip_gre
.b_mode_enabled
)
635 p_tun
->ip_gre
.b_update_mode
= true;
637 p_tun
->b_update_rx_cls
= true;
638 p_tun
->b_update_tx_cls
= true;
642 __qed_vf_update_tunn_param(struct qed_tunn_update_type
*p_tun
,
643 u16 feature_mask
, u8 tunn_mode
,
644 u8 tunn_cls
, enum qed_tunn_mode val
)
646 if (feature_mask
& BIT(val
)) {
647 p_tun
->b_mode_enabled
= tunn_mode
;
648 p_tun
->tun_cls
= tunn_cls
;
650 p_tun
->b_mode_enabled
= false;
654 static void qed_vf_update_tunn_param(struct qed_hwfn
*p_hwfn
,
655 struct qed_tunnel_info
*p_tun
,
656 struct pfvf_update_tunn_param_tlv
*p_resp
)
658 /* Update mode and classes provided by PF */
659 u16 feat_mask
= p_resp
->tunn_feature_mask
;
661 __qed_vf_update_tunn_param(&p_tun
->vxlan
, feat_mask
,
662 p_resp
->vxlan_mode
, p_resp
->vxlan_clss
,
663 QED_MODE_VXLAN_TUNN
);
664 __qed_vf_update_tunn_param(&p_tun
->l2_geneve
, feat_mask
,
665 p_resp
->l2geneve_mode
,
666 p_resp
->l2geneve_clss
,
667 QED_MODE_L2GENEVE_TUNN
);
668 __qed_vf_update_tunn_param(&p_tun
->ip_geneve
, feat_mask
,
669 p_resp
->ipgeneve_mode
,
670 p_resp
->ipgeneve_clss
,
671 QED_MODE_IPGENEVE_TUNN
);
672 __qed_vf_update_tunn_param(&p_tun
->l2_gre
, feat_mask
,
673 p_resp
->l2gre_mode
, p_resp
->l2gre_clss
,
674 QED_MODE_L2GRE_TUNN
);
675 __qed_vf_update_tunn_param(&p_tun
->ip_gre
, feat_mask
,
676 p_resp
->ipgre_mode
, p_resp
->ipgre_clss
,
677 QED_MODE_IPGRE_TUNN
);
678 p_tun
->geneve_port
.port
= p_resp
->geneve_udp_port
;
679 p_tun
->vxlan_port
.port
= p_resp
->vxlan_udp_port
;
681 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
682 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
683 p_tun
->vxlan
.b_mode_enabled
, p_tun
->l2_geneve
.b_mode_enabled
,
684 p_tun
->ip_geneve
.b_mode_enabled
,
685 p_tun
->l2_gre
.b_mode_enabled
, p_tun
->ip_gre
.b_mode_enabled
);
688 int qed_vf_pf_tunnel_param_update(struct qed_hwfn
*p_hwfn
,
689 struct qed_tunnel_info
*p_src
)
691 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
692 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
693 struct pfvf_update_tunn_param_tlv
*p_resp
;
694 struct vfpf_update_tunn_param_tlv
*p_req
;
697 p_req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_UPDATE_TUNN_PARAM
,
700 if (p_src
->b_update_rx_cls
&& p_src
->b_update_tx_cls
)
701 p_req
->update_tun_cls
= 1;
703 qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->vxlan
, QED_MODE_VXLAN_TUNN
,
704 &p_req
->vxlan_clss
, &p_src
->vxlan_port
,
705 &p_req
->update_vxlan_port
,
707 qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->l2_geneve
,
708 QED_MODE_L2GENEVE_TUNN
,
709 &p_req
->l2geneve_clss
, &p_src
->geneve_port
,
710 &p_req
->update_geneve_port
,
711 &p_req
->geneve_port
);
712 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->ip_geneve
,
713 QED_MODE_IPGENEVE_TUNN
,
714 &p_req
->ipgeneve_clss
);
715 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->l2_gre
,
716 QED_MODE_L2GRE_TUNN
, &p_req
->l2gre_clss
);
717 __qed_vf_prep_tunn_req_tlv(p_req
, &p_src
->ip_gre
,
718 QED_MODE_IPGRE_TUNN
, &p_req
->ipgre_clss
);
720 /* add list termination tlv */
721 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
722 CHANNEL_TLV_LIST_END
,
723 sizeof(struct channel_list_end_tlv
));
725 p_resp
= &p_iov
->pf2vf_reply
->tunn_param_resp
;
726 rc
= qed_send_msg2pf(p_hwfn
, &p_resp
->hdr
.status
, sizeof(*p_resp
));
731 if (p_resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
732 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
733 "Failed to update tunnel parameters\n");
737 qed_vf_update_tunn_param(p_hwfn
, p_tun
, p_resp
);
739 qed_vf_pf_req_end(p_hwfn
, rc
);
744 qed_vf_pf_rxq_start(struct qed_hwfn
*p_hwfn
,
745 struct qed_queue_cid
*p_cid
,
747 dma_addr_t bd_chain_phys_addr
,
748 dma_addr_t cqe_pbl_addr
,
749 u16 cqe_pbl_size
, void __iomem
**pp_prod
)
751 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
752 struct pfvf_start_queue_resp_tlv
*resp
;
753 struct vfpf_start_rxq_tlv
*req
;
754 u8 rx_qid
= p_cid
->rel
.queue_id
;
757 /* clear mailbox and prep first tlv */
758 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_START_RXQ
, sizeof(*req
));
760 req
->rx_qid
= rx_qid
;
761 req
->cqe_pbl_addr
= cqe_pbl_addr
;
762 req
->cqe_pbl_size
= cqe_pbl_size
;
763 req
->rxq_addr
= bd_chain_phys_addr
;
764 req
->hw_sb
= p_cid
->sb_igu_id
;
765 req
->sb_index
= p_cid
->sb_idx
;
766 req
->bd_max_bytes
= bd_max_bytes
;
769 /* If PF is legacy, we'll need to calculate producers ourselves
770 * as well as clean them.
772 if (p_iov
->b_pre_fp_hsi
) {
773 u8 hw_qid
= p_iov
->acquire_resp
.resc
.hw_qid
[rx_qid
];
774 u32 init_prod_val
= 0;
776 *pp_prod
= (u8 __iomem
*)
778 MSTORM_QZONE_START(p_hwfn
->cdev
) +
779 hw_qid
* MSTORM_QZONE_SIZE
;
781 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
782 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u32
),
783 (u32
*)(&init_prod_val
));
786 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
788 /* add list termination tlv */
789 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
790 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
792 resp
= &p_iov
->pf2vf_reply
->queue_start
;
793 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
797 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
802 /* Learn the address of the producer from the response */
803 if (!p_iov
->b_pre_fp_hsi
) {
804 u32 init_prod_val
= 0;
806 *pp_prod
= (u8 __iomem
*)p_hwfn
->regview
+ resp
->offset
;
807 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
808 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
809 rx_qid
, *pp_prod
, resp
->offset
);
811 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
812 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u32
),
813 (u32
*)&init_prod_val
);
816 qed_vf_pf_req_end(p_hwfn
, rc
);
821 int qed_vf_pf_rxq_stop(struct qed_hwfn
*p_hwfn
,
822 struct qed_queue_cid
*p_cid
, bool cqe_completion
)
824 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
825 struct vfpf_stop_rxqs_tlv
*req
;
826 struct pfvf_def_resp_tlv
*resp
;
829 /* clear mailbox and prep first tlv */
830 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_STOP_RXQS
, sizeof(*req
));
832 req
->rx_qid
= p_cid
->rel
.queue_id
;
834 req
->cqe_completion
= cqe_completion
;
836 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
838 /* add list termination tlv */
839 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
840 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
842 resp
= &p_iov
->pf2vf_reply
->default_resp
;
843 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
847 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
853 qed_vf_pf_req_end(p_hwfn
, rc
);
859 qed_vf_pf_txq_start(struct qed_hwfn
*p_hwfn
,
860 struct qed_queue_cid
*p_cid
,
862 u16 pbl_size
, void __iomem
**pp_doorbell
)
864 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
865 struct pfvf_start_queue_resp_tlv
*resp
;
866 struct vfpf_start_txq_tlv
*req
;
867 u16 qid
= p_cid
->rel
.queue_id
;
870 /* clear mailbox and prep first tlv */
871 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_START_TXQ
, sizeof(*req
));
876 req
->pbl_addr
= pbl_addr
;
877 req
->pbl_size
= pbl_size
;
878 req
->hw_sb
= p_cid
->sb_igu_id
;
879 req
->sb_index
= p_cid
->sb_idx
;
881 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
883 /* add list termination tlv */
884 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
885 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
887 resp
= &p_iov
->pf2vf_reply
->queue_start
;
888 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
892 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
897 /* Modern PFs provide the actual offsets, while legacy
898 * provided only the queue id.
900 if (!p_iov
->b_pre_fp_hsi
) {
901 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+ resp
->offset
;
903 u8 cid
= p_iov
->acquire_resp
.resc
.cid
[qid
];
905 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+
910 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
911 "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n",
912 qid
, p_cid
->qid_usage_idx
, *pp_doorbell
, resp
->offset
);
914 qed_vf_pf_req_end(p_hwfn
, rc
);
919 int qed_vf_pf_txq_stop(struct qed_hwfn
*p_hwfn
, struct qed_queue_cid
*p_cid
)
921 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
922 struct vfpf_stop_txqs_tlv
*req
;
923 struct pfvf_def_resp_tlv
*resp
;
926 /* clear mailbox and prep first tlv */
927 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_STOP_TXQS
, sizeof(*req
));
929 req
->tx_qid
= p_cid
->rel
.queue_id
;
932 qed_vf_pf_add_qid(p_hwfn
, p_cid
);
934 /* add list termination tlv */
935 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
936 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
938 resp
= &p_iov
->pf2vf_reply
->default_resp
;
939 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
943 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
949 qed_vf_pf_req_end(p_hwfn
, rc
);
954 int qed_vf_pf_vport_start(struct qed_hwfn
*p_hwfn
,
957 u8 inner_vlan_removal
,
958 enum qed_tpa_mode tpa_mode
,
959 u8 max_buffers_per_cqe
, u8 only_untagged
)
961 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
962 struct vfpf_vport_start_tlv
*req
;
963 struct pfvf_def_resp_tlv
*resp
;
966 /* clear mailbox and prep first tlv */
967 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_START
, sizeof(*req
));
970 req
->vport_id
= vport_id
;
971 req
->inner_vlan_removal
= inner_vlan_removal
;
972 req
->tpa_mode
= tpa_mode
;
973 req
->max_buffers_per_cqe
= max_buffers_per_cqe
;
974 req
->only_untagged
= only_untagged
;
977 for (i
= 0; i
< p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_sbs
; i
++) {
978 struct qed_sb_info
*p_sb
= p_hwfn
->vf_iov_info
->sbs_info
[i
];
981 req
->sb_addr
[i
] = p_sb
->sb_phys
;
984 /* add list termination tlv */
985 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
986 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
988 resp
= &p_iov
->pf2vf_reply
->default_resp
;
989 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
993 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
999 qed_vf_pf_req_end(p_hwfn
, rc
);
1004 int qed_vf_pf_vport_stop(struct qed_hwfn
*p_hwfn
)
1006 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1007 struct pfvf_def_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->default_resp
;
1010 /* clear mailbox and prep first tlv */
1011 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_TEARDOWN
,
1012 sizeof(struct vfpf_first_tlv
));
1014 /* add list termination tlv */
1015 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1016 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1018 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1022 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1028 qed_vf_pf_req_end(p_hwfn
, rc
);
1034 qed_vf_handle_vp_update_is_needed(struct qed_hwfn
*p_hwfn
,
1035 struct qed_sp_vport_update_params
*p_data
,
1039 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
:
1040 return !!(p_data
->update_vport_active_rx_flg
||
1041 p_data
->update_vport_active_tx_flg
);
1042 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
:
1043 return !!p_data
->update_tx_switching_flg
;
1044 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
:
1045 return !!p_data
->update_inner_vlan_removal_flg
;
1046 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
:
1047 return !!p_data
->update_accept_any_vlan_flg
;
1048 case CHANNEL_TLV_VPORT_UPDATE_MCAST
:
1049 return !!p_data
->update_approx_mcast_flg
;
1050 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
:
1051 return !!(p_data
->accept_flags
.update_rx_mode_config
||
1052 p_data
->accept_flags
.update_tx_mode_config
);
1053 case CHANNEL_TLV_VPORT_UPDATE_RSS
:
1054 return !!p_data
->rss_params
;
1055 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
:
1056 return !!p_data
->sge_tpa_params
;
1058 DP_INFO(p_hwfn
, "Unexpected vport-update TLV[%d]\n",
1065 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn
*p_hwfn
,
1066 struct qed_sp_vport_update_params
*p_data
)
1068 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1069 struct pfvf_def_resp_tlv
*p_resp
;
1072 for (tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1073 tlv
< CHANNEL_TLV_VPORT_UPDATE_MAX
; tlv
++) {
1074 if (!qed_vf_handle_vp_update_is_needed(p_hwfn
, p_data
, tlv
))
1077 p_resp
= (struct pfvf_def_resp_tlv
*)
1078 qed_iov_search_list_tlvs(p_hwfn
, p_iov
->pf2vf_reply
,
1080 if (p_resp
&& p_resp
->hdr
.status
)
1081 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1082 "TLV[%d] Configuration %s\n",
1084 (p_resp
&& p_resp
->hdr
.status
) ? "succeeded"
1089 int qed_vf_pf_vport_update(struct qed_hwfn
*p_hwfn
,
1090 struct qed_sp_vport_update_params
*p_params
)
1092 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1093 struct vfpf_vport_update_tlv
*req
;
1094 struct pfvf_def_resp_tlv
*resp
;
1095 u8 update_rx
, update_tx
;
1100 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1101 resp_size
= sizeof(*resp
);
1103 update_rx
= p_params
->update_vport_active_rx_flg
;
1104 update_tx
= p_params
->update_vport_active_tx_flg
;
1106 /* clear mailbox and prep header tlv */
1107 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_UPDATE
, sizeof(*req
));
1109 /* Prepare extended tlvs */
1110 if (update_rx
|| update_tx
) {
1111 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
1113 size
= sizeof(struct vfpf_vport_update_activate_tlv
);
1114 p_act_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1115 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
,
1117 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1120 p_act_tlv
->update_rx
= update_rx
;
1121 p_act_tlv
->active_rx
= p_params
->vport_active_rx_flg
;
1125 p_act_tlv
->update_tx
= update_tx
;
1126 p_act_tlv
->active_tx
= p_params
->vport_active_tx_flg
;
1130 if (p_params
->update_tx_switching_flg
) {
1131 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
1133 size
= sizeof(struct vfpf_vport_update_tx_switch_tlv
);
1134 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1135 p_tx_switch_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1137 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1139 p_tx_switch_tlv
->tx_switching
= p_params
->tx_switching_flg
;
1142 if (p_params
->update_approx_mcast_flg
) {
1143 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
1145 size
= sizeof(struct vfpf_vport_update_mcast_bin_tlv
);
1146 p_mcast_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1147 CHANNEL_TLV_VPORT_UPDATE_MCAST
, size
);
1148 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1150 memcpy(p_mcast_tlv
->bins
, p_params
->bins
,
1151 sizeof(u32
) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
1154 update_rx
= p_params
->accept_flags
.update_rx_mode_config
;
1155 update_tx
= p_params
->accept_flags
.update_tx_mode_config
;
1157 if (update_rx
|| update_tx
) {
1158 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
1160 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1161 size
= sizeof(struct vfpf_vport_update_accept_param_tlv
);
1162 p_accept_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, tlv
, size
);
1163 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1166 p_accept_tlv
->update_rx_mode
= update_rx
;
1167 p_accept_tlv
->rx_accept_filter
=
1168 p_params
->accept_flags
.rx_accept_filter
;
1172 p_accept_tlv
->update_tx_mode
= update_tx
;
1173 p_accept_tlv
->tx_accept_filter
=
1174 p_params
->accept_flags
.tx_accept_filter
;
1178 if (p_params
->rss_params
) {
1179 struct qed_rss_params
*rss_params
= p_params
->rss_params
;
1180 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
1183 size
= sizeof(struct vfpf_vport_update_rss_tlv
);
1184 p_rss_tlv
= qed_add_tlv(p_hwfn
,
1186 CHANNEL_TLV_VPORT_UPDATE_RSS
, size
);
1187 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1189 if (rss_params
->update_rss_config
)
1190 p_rss_tlv
->update_rss_flags
|=
1191 VFPF_UPDATE_RSS_CONFIG_FLAG
;
1192 if (rss_params
->update_rss_capabilities
)
1193 p_rss_tlv
->update_rss_flags
|=
1194 VFPF_UPDATE_RSS_CAPS_FLAG
;
1195 if (rss_params
->update_rss_ind_table
)
1196 p_rss_tlv
->update_rss_flags
|=
1197 VFPF_UPDATE_RSS_IND_TABLE_FLAG
;
1198 if (rss_params
->update_rss_key
)
1199 p_rss_tlv
->update_rss_flags
|= VFPF_UPDATE_RSS_KEY_FLAG
;
1201 p_rss_tlv
->rss_enable
= rss_params
->rss_enable
;
1202 p_rss_tlv
->rss_caps
= rss_params
->rss_caps
;
1203 p_rss_tlv
->rss_table_size_log
= rss_params
->rss_table_size_log
;
1205 table_size
= min_t(int, T_ETH_INDIRECTION_TABLE_SIZE
,
1206 1 << p_rss_tlv
->rss_table_size_log
);
1207 for (i
= 0; i
< table_size
; i
++) {
1208 struct qed_queue_cid
*p_queue
;
1210 p_queue
= rss_params
->rss_ind_table
[i
];
1211 p_rss_tlv
->rss_ind_table
[i
] = p_queue
->rel
.queue_id
;
1213 memcpy(p_rss_tlv
->rss_key
, rss_params
->rss_key
,
1214 sizeof(rss_params
->rss_key
));
1217 if (p_params
->update_accept_any_vlan_flg
) {
1218 struct vfpf_vport_update_accept_any_vlan_tlv
*p_any_vlan_tlv
;
1220 size
= sizeof(struct vfpf_vport_update_accept_any_vlan_tlv
);
1221 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1222 p_any_vlan_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, tlv
, size
);
1224 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
1225 p_any_vlan_tlv
->accept_any_vlan
= p_params
->accept_any_vlan
;
1226 p_any_vlan_tlv
->update_accept_any_vlan_flg
=
1227 p_params
->update_accept_any_vlan_flg
;
1230 /* add list termination tlv */
1231 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1232 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1234 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, resp_size
);
1238 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1243 qed_vf_handle_vp_update_tlvs_resp(p_hwfn
, p_params
);
1246 qed_vf_pf_req_end(p_hwfn
, rc
);
1251 int qed_vf_pf_reset(struct qed_hwfn
*p_hwfn
)
1253 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1254 struct pfvf_def_resp_tlv
*resp
;
1255 struct vfpf_first_tlv
*req
;
1258 /* clear mailbox and prep first tlv */
1259 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_CLOSE
, sizeof(*req
));
1261 /* add list termination tlv */
1262 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1263 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1265 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1266 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1270 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1275 p_hwfn
->b_int_enabled
= 0;
1278 qed_vf_pf_req_end(p_hwfn
, rc
);
1283 void qed_vf_pf_filter_mcast(struct qed_hwfn
*p_hwfn
,
1284 struct qed_filter_mcast
*p_filter_cmd
)
1286 struct qed_sp_vport_update_params sp_params
;
1289 memset(&sp_params
, 0, sizeof(sp_params
));
1290 sp_params
.update_approx_mcast_flg
= 1;
1292 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1293 for (i
= 0; i
< p_filter_cmd
->num_mc_addrs
; i
++) {
1296 bit
= qed_mcast_bin_from_mac(p_filter_cmd
->mac
[i
]);
1297 sp_params
.bins
[bit
/ 32] |= 1 << (bit
% 32);
1301 qed_vf_pf_vport_update(p_hwfn
, &sp_params
);
1304 int qed_vf_pf_filter_ucast(struct qed_hwfn
*p_hwfn
,
1305 struct qed_filter_ucast
*p_ucast
)
1307 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1308 struct vfpf_ucast_filter_tlv
*req
;
1309 struct pfvf_def_resp_tlv
*resp
;
1312 /* clear mailbox and prep first tlv */
1313 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_UCAST_FILTER
, sizeof(*req
));
1314 req
->opcode
= (u8
) p_ucast
->opcode
;
1315 req
->type
= (u8
) p_ucast
->type
;
1316 memcpy(req
->mac
, p_ucast
->mac
, ETH_ALEN
);
1317 req
->vlan
= p_ucast
->vlan
;
1319 /* add list termination tlv */
1320 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1321 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1323 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1324 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1328 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1334 qed_vf_pf_req_end(p_hwfn
, rc
);
1339 int qed_vf_pf_int_cleanup(struct qed_hwfn
*p_hwfn
)
1341 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1342 struct pfvf_def_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->default_resp
;
1345 /* clear mailbox and prep first tlv */
1346 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_INT_CLEANUP
,
1347 sizeof(struct vfpf_first_tlv
));
1349 /* add list termination tlv */
1350 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
1351 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
1353 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1357 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
1363 qed_vf_pf_req_end(p_hwfn
, rc
);
1368 int qed_vf_pf_get_coalesce(struct qed_hwfn
*p_hwfn
,
1369 u16
*p_coal
, struct qed_queue_cid
*p_cid
)
1371 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1372 struct pfvf_read_coal_resp_tlv
*resp
;
1373 struct vfpf_read_coal_req_tlv
*req
;
1376 /* clear mailbox and prep header tlv */
1377 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_COALESCE_READ
, sizeof(*req
));
1378 req
->qid
= p_cid
->rel
.queue_id
;
1379 req
->is_rx
= p_cid
->b_is_rx
? 1 : 0;
1381 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1382 sizeof(struct channel_list_end_tlv
));
1383 resp
= &p_iov
->pf2vf_reply
->read_coal_resp
;
1385 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1389 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
1392 *p_coal
= resp
->coal
;
1394 qed_vf_pf_req_end(p_hwfn
, rc
);
1400 qed_vf_pf_bulletin_update_mac(struct qed_hwfn
*p_hwfn
,
1403 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1404 struct vfpf_bulletin_update_mac_tlv
*p_req
;
1405 struct pfvf_def_resp_tlv
*p_resp
;
1411 /* clear mailbox and prep header tlv */
1412 p_req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_BULLETIN_UPDATE_MAC
,
1414 ether_addr_copy(p_req
->mac
, p_mac
);
1415 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1416 "Requesting bulletin update for MAC[%pM]\n", p_mac
);
1418 /* add list termination tlv */
1419 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1420 sizeof(struct channel_list_end_tlv
));
1422 p_resp
= &p_iov
->pf2vf_reply
->default_resp
;
1423 rc
= qed_send_msg2pf(p_hwfn
, &p_resp
->hdr
.status
, sizeof(*p_resp
));
1424 qed_vf_pf_req_end(p_hwfn
, rc
);
1429 qed_vf_pf_set_coalesce(struct qed_hwfn
*p_hwfn
,
1430 u16 rx_coal
, u16 tx_coal
, struct qed_queue_cid
*p_cid
)
1432 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1433 struct vfpf_update_coalesce
*req
;
1434 struct pfvf_def_resp_tlv
*resp
;
1437 /* clear mailbox and prep header tlv */
1438 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_COALESCE_UPDATE
, sizeof(*req
));
1440 req
->rx_coal
= rx_coal
;
1441 req
->tx_coal
= tx_coal
;
1442 req
->qid
= p_cid
->rel
.queue_id
;
1446 "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
1447 rx_coal
, tx_coal
, req
->qid
);
1449 /* add list termination tlv */
1450 qed_add_tlv(p_hwfn
, &p_iov
->offset
, CHANNEL_TLV_LIST_END
,
1451 sizeof(struct channel_list_end_tlv
));
1453 resp
= &p_iov
->pf2vf_reply
->default_resp
;
1454 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
1458 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
1462 p_hwfn
->cdev
->rx_coalesce_usecs
= rx_coal
;
1465 p_hwfn
->cdev
->tx_coalesce_usecs
= tx_coal
;
1468 qed_vf_pf_req_end(p_hwfn
, rc
);
1472 u16
qed_vf_get_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 sb_id
)
1474 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1477 DP_NOTICE(p_hwfn
, "vf_sriov_info isn't initialized\n");
1481 return p_iov
->acquire_resp
.resc
.hw_sbs
[sb_id
].hw_sb_id
;
1484 void qed_vf_set_sb_info(struct qed_hwfn
*p_hwfn
,
1485 u16 sb_id
, struct qed_sb_info
*p_sb
)
1487 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1490 DP_NOTICE(p_hwfn
, "vf_sriov_info isn't initialized\n");
1494 if (sb_id
>= PFVF_MAX_SBS_PER_VF
) {
1495 DP_NOTICE(p_hwfn
, "Can't configure SB %04x\n", sb_id
);
1499 p_iov
->sbs_info
[sb_id
] = p_sb
;
1502 int qed_vf_read_bulletin(struct qed_hwfn
*p_hwfn
, u8
*p_change
)
1504 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1505 struct qed_bulletin_content shadow
;
1508 crc_size
= sizeof(p_iov
->bulletin
.p_virt
->crc
);
1511 /* Need to guarantee PF is not in the middle of writing it */
1512 memcpy(&shadow
, p_iov
->bulletin
.p_virt
, p_iov
->bulletin
.size
);
1514 /* If version did not update, no need to do anything */
1515 if (shadow
.version
== p_iov
->bulletin_shadow
.version
)
1518 /* Verify the bulletin we see is valid */
1519 crc
= crc32(0, (u8
*)&shadow
+ crc_size
,
1520 p_iov
->bulletin
.size
- crc_size
);
1521 if (crc
!= shadow
.crc
)
1524 /* Set the shadow bulletin and process it */
1525 memcpy(&p_iov
->bulletin_shadow
, &shadow
, p_iov
->bulletin
.size
);
1527 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1528 "Read a bulletin update %08x\n", shadow
.version
);
1535 void __qed_vf_get_link_params(struct qed_hwfn
*p_hwfn
,
1536 struct qed_mcp_link_params
*p_params
,
1537 struct qed_bulletin_content
*p_bulletin
)
1539 memset(p_params
, 0, sizeof(*p_params
));
1541 p_params
->speed
.autoneg
= p_bulletin
->req_autoneg
;
1542 p_params
->speed
.advertised_speeds
= p_bulletin
->req_adv_speed
;
1543 p_params
->speed
.forced_speed
= p_bulletin
->req_forced_speed
;
1544 p_params
->pause
.autoneg
= p_bulletin
->req_autoneg_pause
;
1545 p_params
->pause
.forced_rx
= p_bulletin
->req_forced_rx
;
1546 p_params
->pause
.forced_tx
= p_bulletin
->req_forced_tx
;
1547 p_params
->loopback_mode
= p_bulletin
->req_loopback
;
1550 void qed_vf_get_link_params(struct qed_hwfn
*p_hwfn
,
1551 struct qed_mcp_link_params
*params
)
1553 __qed_vf_get_link_params(p_hwfn
, params
,
1554 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1557 void __qed_vf_get_link_state(struct qed_hwfn
*p_hwfn
,
1558 struct qed_mcp_link_state
*p_link
,
1559 struct qed_bulletin_content
*p_bulletin
)
1561 memset(p_link
, 0, sizeof(*p_link
));
1563 p_link
->link_up
= p_bulletin
->link_up
;
1564 p_link
->speed
= p_bulletin
->speed
;
1565 p_link
->full_duplex
= p_bulletin
->full_duplex
;
1566 p_link
->an
= p_bulletin
->autoneg
;
1567 p_link
->an_complete
= p_bulletin
->autoneg_complete
;
1568 p_link
->parallel_detection
= p_bulletin
->parallel_detection
;
1569 p_link
->pfc_enabled
= p_bulletin
->pfc_enabled
;
1570 p_link
->partner_adv_speed
= p_bulletin
->partner_adv_speed
;
1571 p_link
->partner_tx_flow_ctrl_en
= p_bulletin
->partner_tx_flow_ctrl_en
;
1572 p_link
->partner_rx_flow_ctrl_en
= p_bulletin
->partner_rx_flow_ctrl_en
;
1573 p_link
->partner_adv_pause
= p_bulletin
->partner_adv_pause
;
1574 p_link
->sfp_tx_fault
= p_bulletin
->sfp_tx_fault
;
1577 void qed_vf_get_link_state(struct qed_hwfn
*p_hwfn
,
1578 struct qed_mcp_link_state
*link
)
1580 __qed_vf_get_link_state(p_hwfn
, link
,
1581 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1584 void __qed_vf_get_link_caps(struct qed_hwfn
*p_hwfn
,
1585 struct qed_mcp_link_capabilities
*p_link_caps
,
1586 struct qed_bulletin_content
*p_bulletin
)
1588 memset(p_link_caps
, 0, sizeof(*p_link_caps
));
1589 p_link_caps
->speed_capabilities
= p_bulletin
->capability_speed
;
1592 void qed_vf_get_link_caps(struct qed_hwfn
*p_hwfn
,
1593 struct qed_mcp_link_capabilities
*p_link_caps
)
1595 __qed_vf_get_link_caps(p_hwfn
, p_link_caps
,
1596 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1599 void qed_vf_get_num_rxqs(struct qed_hwfn
*p_hwfn
, u8
*num_rxqs
)
1601 *num_rxqs
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_rxqs
;
1604 void qed_vf_get_num_txqs(struct qed_hwfn
*p_hwfn
, u8
*num_txqs
)
1606 *num_txqs
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_txqs
;
1609 void qed_vf_get_num_cids(struct qed_hwfn
*p_hwfn
, u8
*num_cids
)
1611 *num_cids
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_cids
;
1614 void qed_vf_get_port_mac(struct qed_hwfn
*p_hwfn
, u8
*port_mac
)
1617 p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
.port_mac
, ETH_ALEN
);
1620 void qed_vf_get_num_vlan_filters(struct qed_hwfn
*p_hwfn
, u8
*num_vlan_filters
)
1622 struct qed_vf_iov
*p_vf
;
1624 p_vf
= p_hwfn
->vf_iov_info
;
1625 *num_vlan_filters
= p_vf
->acquire_resp
.resc
.num_vlan_filters
;
1628 void qed_vf_get_num_mac_filters(struct qed_hwfn
*p_hwfn
, u8
*num_mac_filters
)
1630 struct qed_vf_iov
*p_vf
= p_hwfn
->vf_iov_info
;
1632 *num_mac_filters
= p_vf
->acquire_resp
.resc
.num_mac_filters
;
1635 bool qed_vf_check_mac(struct qed_hwfn
*p_hwfn
, u8
*mac
)
1637 struct qed_bulletin_content
*bulletin
;
1639 bulletin
= &p_hwfn
->vf_iov_info
->bulletin_shadow
;
1640 if (!(bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)))
1643 /* Forbid VF from changing a MAC enforced by PF */
1644 if (ether_addr_equal(bulletin
->mac
, mac
))
1650 static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn
*hwfn
,
1651 u8
*dst_mac
, u8
*p_is_forced
)
1653 struct qed_bulletin_content
*bulletin
;
1655 bulletin
= &hwfn
->vf_iov_info
->bulletin_shadow
;
1657 if (bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) {
1660 } else if (bulletin
->valid_bitmap
& (1 << VFPF_BULLETIN_MAC_ADDR
)) {
1667 ether_addr_copy(dst_mac
, bulletin
->mac
);
1673 qed_vf_bulletin_get_udp_ports(struct qed_hwfn
*p_hwfn
,
1674 u16
*p_vxlan_port
, u16
*p_geneve_port
)
1676 struct qed_bulletin_content
*p_bulletin
;
1678 p_bulletin
= &p_hwfn
->vf_iov_info
->bulletin_shadow
;
1680 *p_vxlan_port
= p_bulletin
->vxlan_udp_port
;
1681 *p_geneve_port
= p_bulletin
->geneve_udp_port
;
1684 void qed_vf_get_fw_version(struct qed_hwfn
*p_hwfn
,
1685 u16
*fw_major
, u16
*fw_minor
,
1686 u16
*fw_rev
, u16
*fw_eng
)
1688 struct pf_vf_pfdev_info
*info
;
1690 info
= &p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
;
1692 *fw_major
= info
->fw_major
;
1693 *fw_minor
= info
->fw_minor
;
1694 *fw_rev
= info
->fw_rev
;
1695 *fw_eng
= info
->fw_eng
;
1698 static void qed_handle_bulletin_change(struct qed_hwfn
*hwfn
)
1700 struct qed_eth_cb_ops
*ops
= hwfn
->cdev
->protocol_ops
.eth
;
1701 u8 mac
[ETH_ALEN
], is_mac_exist
, is_mac_forced
;
1702 void *cookie
= hwfn
->cdev
->ops_cookie
;
1703 u16 vxlan_port
, geneve_port
;
1705 qed_vf_bulletin_get_udp_ports(hwfn
, &vxlan_port
, &geneve_port
);
1706 is_mac_exist
= qed_vf_bulletin_get_forced_mac(hwfn
, mac
,
1708 if (is_mac_exist
&& cookie
)
1709 ops
->force_mac(cookie
, mac
, !!is_mac_forced
);
1711 ops
->ports_update(cookie
, vxlan_port
, geneve_port
);
1713 /* Always update link configuration according to bulletin */
1714 qed_link_update(hwfn
, NULL
);
1717 void qed_iov_vf_task(struct work_struct
*work
)
1719 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
1723 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
1726 /* Handle bulletin board changes */
1727 qed_vf_read_bulletin(hwfn
, &change
);
1728 if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG
,
1729 &hwfn
->iov_task_flags
))
1732 qed_handle_bulletin_change(hwfn
);
1734 /* As VF is polling bulletin board, need to constantly re-schedule */
1735 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, HZ
);