1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/netdevice.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/interrupt.h>
16 #include <linux/pci.h>
17 #include <linux/etherdevice.h>
18 #include <rdma/ib_verbs.h>
21 #include "bnxt_hwrm.h"
24 #ifdef CONFIG_BNXT_DCB
25 static int bnxt_queue_to_tc(struct bnxt
*bp
, u8 queue_id
)
29 for (i
= 0; i
< bp
->max_tc
; i
++) {
30 if (bp
->q_info
[i
].queue_id
== queue_id
) {
31 for (j
= 0; j
< bp
->max_tc
; j
++) {
32 if (bp
->tc_to_qidx
[j
] == i
)
40 static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
42 struct hwrm_queue_pri2cos_cfg_input
*req
;
46 rc
= hwrm_req_init(bp
, req
, HWRM_QUEUE_PRI2COS_CFG
);
50 req
->flags
= cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
|
51 QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN
);
53 pri2cos
= &req
->pri0_cos_queue_id
;
54 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
57 req
->enables
|= cpu_to_le32(
58 QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID
<< i
);
60 qidx
= bp
->tc_to_qidx
[ets
->prio_tc
[i
]];
61 pri2cos
[i
] = bp
->q_info
[qidx
].queue_id
;
63 return hwrm_req_send(bp
, req
);
66 static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
68 struct hwrm_queue_pri2cos_qcfg_output
*resp
;
69 struct hwrm_queue_pri2cos_qcfg_input
*req
;
72 rc
= hwrm_req_init(bp
, req
, HWRM_QUEUE_PRI2COS_QCFG
);
76 req
->flags
= cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN
);
77 resp
= hwrm_req_hold(bp
, req
);
78 rc
= hwrm_req_send(bp
, req
);
80 u8
*pri2cos
= &resp
->pri0_cos_queue_id
;
83 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
84 u8 queue_id
= pri2cos
[i
];
87 tc
= bnxt_queue_to_tc(bp
, queue_id
);
92 hwrm_req_drop(bp
, req
);
96 static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt
*bp
, struct ieee_ets
*ets
,
99 struct hwrm_queue_cos2bw_cfg_input
*req
;
100 struct bnxt_cos2bw_cfg cos2bw
;
103 rc
= hwrm_req_init(bp
, req
, HWRM_QUEUE_COS2BW_CFG
);
107 for (i
= 0; i
< max_tc
; i
++) {
108 u8 qidx
= bp
->tc_to_qidx
[i
];
110 req
->enables
|= cpu_to_le32(
111 QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID
<<
114 memset(&cos2bw
, 0, sizeof(cos2bw
));
115 cos2bw
.queue_id
= bp
->q_info
[qidx
].queue_id
;
116 if (ets
->tc_tsa
[i
] == IEEE_8021QAZ_TSA_STRICT
) {
118 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP
;
122 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS
;
123 cos2bw
.bw_weight
= ets
->tc_tx_bw
[i
];
124 /* older firmware requires min_bw to be set to the
125 * same weight value in percent.
128 cpu_to_le32((ets
->tc_tx_bw
[i
] * 100) |
129 BW_VALUE_UNIT_PERCENT1_100
);
132 req
->queue_id0
= cos2bw
.queue_id
;
133 req
->queue_id0_min_bw
= cos2bw
.min_bw
;
134 req
->queue_id0_max_bw
= cos2bw
.max_bw
;
135 req
->queue_id0_tsa_assign
= cos2bw
.tsa
;
136 req
->queue_id0_pri_lvl
= cos2bw
.pri_lvl
;
137 req
->queue_id0_bw_weight
= cos2bw
.bw_weight
;
139 memcpy(&req
->cfg
[i
- 1], &cos2bw
.cfg
, sizeof(cos2bw
.cfg
));
142 return hwrm_req_send(bp
, req
);
145 static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
147 struct hwrm_queue_cos2bw_qcfg_output
*resp
;
148 struct hwrm_queue_cos2bw_qcfg_input
*req
;
149 struct bnxt_cos2bw_cfg cos2bw
;
152 rc
= hwrm_req_init(bp
, req
, HWRM_QUEUE_COS2BW_QCFG
);
156 resp
= hwrm_req_hold(bp
, req
);
157 rc
= hwrm_req_send(bp
, req
);
159 hwrm_req_drop(bp
, req
);
163 for (i
= 0; i
< bp
->max_tc
; i
++) {
167 cos2bw
.queue_id
= resp
->queue_id0
;
168 cos2bw
.min_bw
= resp
->queue_id0_min_bw
;
169 cos2bw
.max_bw
= resp
->queue_id0_max_bw
;
170 cos2bw
.tsa
= resp
->queue_id0_tsa_assign
;
171 cos2bw
.pri_lvl
= resp
->queue_id0_pri_lvl
;
172 cos2bw
.bw_weight
= resp
->queue_id0_bw_weight
;
174 memcpy(&cos2bw
.cfg
, &resp
->cfg
[i
- 1], sizeof(cos2bw
.cfg
));
177 tc
= bnxt_queue_to_tc(bp
, cos2bw
.queue_id
);
182 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP
) {
183 ets
->tc_tsa
[tc
] = IEEE_8021QAZ_TSA_STRICT
;
185 ets
->tc_tsa
[tc
] = IEEE_8021QAZ_TSA_ETS
;
186 ets
->tc_tx_bw
[tc
] = cos2bw
.bw_weight
;
189 hwrm_req_drop(bp
, req
);
193 static int bnxt_queue_remap(struct bnxt
*bp
, unsigned int lltc_mask
)
195 unsigned long qmap
= 0;
196 int max
= bp
->max_tc
;
199 /* Assign lossless TCs first */
200 for (i
= 0, j
= 0; i
< max
; ) {
201 if (lltc_mask
& (1 << i
)) {
202 if (BNXT_LLQ(bp
->q_info
[j
].queue_profile
)) {
203 bp
->tc_to_qidx
[i
] = j
;
213 for (i
= 0, j
= 0; i
< max
; i
++) {
214 if (lltc_mask
& (1 << i
))
216 j
= find_next_zero_bit(&qmap
, max
, j
);
217 bp
->tc_to_qidx
[i
] = j
;
222 if (netif_running(bp
->dev
)) {
223 bnxt_close_nic(bp
, false, false);
224 rc
= bnxt_open_nic(bp
, false, false);
226 netdev_warn(bp
->dev
, "failed to open NIC, rc = %d\n", rc
);
235 rc
= bnxt_hwrm_queue_cos2bw_cfg(bp
, bp
->ieee_ets
, tc
);
237 netdev_warn(bp
->dev
, "failed to config BW, rc = %d\n", rc
);
240 rc
= bnxt_hwrm_queue_pri2cos_cfg(bp
, bp
->ieee_ets
);
242 netdev_warn(bp
->dev
, "failed to config prio, rc = %d\n", rc
);
249 static int bnxt_hwrm_queue_pfc_cfg(struct bnxt
*bp
, struct ieee_pfc
*pfc
)
251 struct hwrm_queue_pfcenable_cfg_input
*req
;
252 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
253 unsigned int tc_mask
= 0, pri_mask
= 0;
254 u8 i
, pri
, lltc_count
= 0;
255 bool need_q_remap
= false;
261 for (i
= 0; i
< bp
->max_tc
; i
++) {
262 for (pri
= 0; pri
< IEEE_8021QAZ_MAX_TCS
; pri
++) {
263 if ((pfc
->pfc_en
& (1 << pri
)) &&
264 (my_ets
->prio_tc
[pri
] == i
)) {
265 pri_mask
|= 1 << pri
;
269 if (tc_mask
& (1 << i
))
272 if (lltc_count
> bp
->max_lltc
)
275 for (i
= 0; i
< bp
->max_tc
; i
++) {
276 if (tc_mask
& (1 << i
)) {
277 u8 qidx
= bp
->tc_to_qidx
[i
];
279 if (!BNXT_LLQ(bp
->q_info
[qidx
].queue_profile
)) {
287 bnxt_queue_remap(bp
, tc_mask
);
289 rc
= hwrm_req_init(bp
, req
, HWRM_QUEUE_PFCENABLE_CFG
);
293 req
->flags
= cpu_to_le32(pri_mask
);
294 return hwrm_req_send(bp
, req
);
297 static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt
*bp
, struct ieee_pfc
*pfc
)
299 struct hwrm_queue_pfcenable_qcfg_output
*resp
;
300 struct hwrm_queue_pfcenable_qcfg_input
*req
;
304 rc
= hwrm_req_init(bp
, req
, HWRM_QUEUE_PFCENABLE_QCFG
);
308 resp
= hwrm_req_hold(bp
, req
);
309 rc
= hwrm_req_send(bp
, req
);
311 hwrm_req_drop(bp
, req
);
315 pri_mask
= le32_to_cpu(resp
->flags
);
316 pfc
->pfc_en
= pri_mask
;
317 hwrm_req_drop(bp
, req
);
321 static int bnxt_hwrm_set_dcbx_app(struct bnxt
*bp
, struct dcb_app
*app
,
324 struct hwrm_fw_set_structured_data_input
*set
;
325 struct hwrm_fw_get_structured_data_input
*get
;
326 struct hwrm_struct_data_dcbx_app
*fw_app
;
327 struct hwrm_struct_hdr
*data
;
332 if (bp
->hwrm_spec_code
< 0x10601)
335 rc
= hwrm_req_init(bp
, get
, HWRM_FW_GET_STRUCTURED_DATA
);
339 hwrm_req_hold(bp
, get
);
340 hwrm_req_alloc_flags(bp
, get
, GFP_KERNEL
| __GFP_ZERO
);
342 n
= IEEE_8021QAZ_MAX_TCS
;
343 data_len
= sizeof(*data
) + sizeof(*fw_app
) * n
;
344 data
= hwrm_req_dma_slice(bp
, get
, data_len
, &mapping
);
350 get
->dest_data_addr
= cpu_to_le64(mapping
);
351 get
->structure_id
= cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP
);
352 get
->subtype
= cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL
);
354 rc
= hwrm_req_send(bp
, get
);
358 fw_app
= (struct hwrm_struct_data_dcbx_app
*)(data
+ 1);
360 if (data
->struct_id
!= cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP
)) {
366 for (i
= 0; i
< n
; i
++, fw_app
++) {
367 if (fw_app
->protocol_id
== cpu_to_be16(app
->protocol
) &&
368 fw_app
->protocol_selector
== app
->selector
&&
369 fw_app
->priority
== app
->priority
) {
379 fw_app
->protocol_id
= cpu_to_be16(app
->protocol
);
380 fw_app
->protocol_selector
= app
->selector
;
381 fw_app
->priority
= app
->priority
;
386 /* not found, nothing to delete */
390 len
= (n
- 1 - i
) * sizeof(*fw_app
);
392 memmove(fw_app
, fw_app
+ 1, len
);
394 memset(fw_app
+ n
, 0, sizeof(*fw_app
));
397 data
->len
= cpu_to_le16(sizeof(*fw_app
) * n
);
398 data
->subtype
= cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL
);
400 rc
= hwrm_req_init(bp
, set
, HWRM_FW_SET_STRUCTURED_DATA
);
404 set
->src_data_addr
= cpu_to_le64(mapping
);
405 set
->data_len
= cpu_to_le16(sizeof(*data
) + sizeof(*fw_app
) * n
);
407 rc
= hwrm_req_send(bp
, set
);
410 hwrm_req_drop(bp
, get
); /* dropping get request and associated slice */
414 static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt
*bp
)
416 struct hwrm_queue_dscp_qcaps_output
*resp
;
417 struct hwrm_queue_dscp_qcaps_input
*req
;
420 bp
->max_dscp_value
= 0;
421 if (bp
->hwrm_spec_code
< 0x10800 || BNXT_VF(bp
))
424 rc
= hwrm_req_init(bp
, req
, HWRM_QUEUE_DSCP_QCAPS
);
428 resp
= hwrm_req_hold(bp
, req
);
429 rc
= hwrm_req_send_silent(bp
, req
);
431 bp
->max_dscp_value
= (1 << resp
->num_dscp_bits
) - 1;
432 if (bp
->max_dscp_value
< 0x3f)
433 bp
->max_dscp_value
= 0;
435 hwrm_req_drop(bp
, req
);
439 static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt
*bp
, struct dcb_app
*app
,
442 struct hwrm_queue_dscp2pri_cfg_input
*req
;
443 struct bnxt_dscp2pri_entry
*dscp2pri
;
447 if (bp
->hwrm_spec_code
< 0x10800)
450 rc
= hwrm_req_init(bp
, req
, HWRM_QUEUE_DSCP2PRI_CFG
);
454 dscp2pri
= hwrm_req_dma_slice(bp
, req
, sizeof(*dscp2pri
), &mapping
);
456 hwrm_req_drop(bp
, req
);
460 req
->src_data_addr
= cpu_to_le64(mapping
);
461 dscp2pri
->dscp
= app
->protocol
;
463 dscp2pri
->mask
= 0x3f;
466 dscp2pri
->pri
= app
->priority
;
467 req
->entry_cnt
= cpu_to_le16(1);
468 rc
= hwrm_req_send(bp
, req
);
472 static int bnxt_ets_validate(struct bnxt
*bp
, struct ieee_ets
*ets
, u8
*tc
)
474 int total_ets_bw
= 0;
479 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
480 if (ets
->prio_tc
[i
] > bp
->max_tc
) {
481 netdev_err(bp
->dev
, "priority to TC mapping exceeds TC count %d\n",
485 if (ets
->prio_tc
[i
] > max_tc
)
486 max_tc
= ets
->prio_tc
[i
];
488 if ((ets
->tc_tx_bw
[i
] || ets
->tc_tsa
[i
]) && i
> bp
->max_tc
)
491 switch (ets
->tc_tsa
[i
]) {
492 case IEEE_8021QAZ_TSA_STRICT
:
494 case IEEE_8021QAZ_TSA_ETS
:
495 total_ets_bw
+= ets
->tc_tx_bw
[i
];
496 zero
= zero
|| !ets
->tc_tx_bw
[i
];
502 if (total_ets_bw
> 100) {
503 netdev_warn(bp
->dev
, "rejecting ETS config exceeding available bandwidth\n");
506 if (zero
&& total_ets_bw
== 100) {
507 netdev_warn(bp
->dev
, "rejecting ETS config starving a TC\n");
511 if (max_tc
>= bp
->max_tc
)
518 static int bnxt_dcbnl_ieee_getets(struct net_device
*dev
, struct ieee_ets
*ets
)
520 struct bnxt
*bp
= netdev_priv(dev
);
521 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
524 ets
->ets_cap
= bp
->max_tc
;
527 if (bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
)
530 my_ets
= kzalloc(sizeof(*my_ets
), GFP_KERNEL
);
533 rc
= bnxt_hwrm_queue_cos2bw_qcfg(bp
, my_ets
);
536 rc
= bnxt_hwrm_queue_pri2cos_qcfg(bp
, my_ets
);
541 bp
->ieee_ets
= my_ets
;
544 ets
->cbs
= my_ets
->cbs
;
545 memcpy(ets
->tc_tx_bw
, my_ets
->tc_tx_bw
, sizeof(ets
->tc_tx_bw
));
546 memcpy(ets
->tc_rx_bw
, my_ets
->tc_rx_bw
, sizeof(ets
->tc_rx_bw
));
547 memcpy(ets
->tc_tsa
, my_ets
->tc_tsa
, sizeof(ets
->tc_tsa
));
548 memcpy(ets
->prio_tc
, my_ets
->prio_tc
, sizeof(ets
->prio_tc
));
555 static int bnxt_dcbnl_ieee_setets(struct net_device
*dev
, struct ieee_ets
*ets
)
557 struct bnxt
*bp
= netdev_priv(dev
);
558 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
562 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
563 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
566 rc
= bnxt_ets_validate(bp
, ets
, &max_tc
);
569 my_ets
= kzalloc(sizeof(*my_ets
), GFP_KERNEL
);
572 /* initialize PRI2TC mappings to invalid value */
573 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
574 my_ets
->prio_tc
[i
] = IEEE_8021QAZ_MAX_TCS
;
575 bp
->ieee_ets
= my_ets
;
577 rc
= bnxt_setup_mq_tc(dev
, max_tc
);
580 rc
= bnxt_hwrm_queue_cos2bw_cfg(bp
, ets
, max_tc
);
583 rc
= bnxt_hwrm_queue_pri2cos_cfg(bp
, ets
);
586 memcpy(my_ets
, ets
, sizeof(*my_ets
));
591 static int bnxt_dcbnl_ieee_getpfc(struct net_device
*dev
, struct ieee_pfc
*pfc
)
593 struct bnxt
*bp
= netdev_priv(dev
);
594 __le64
*stats
= bp
->port_stats
.hw_stats
;
595 struct ieee_pfc
*my_pfc
= bp
->ieee_pfc
;
599 pfc
->pfc_cap
= bp
->max_lltc
;
602 if (bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
)
605 my_pfc
= kzalloc(sizeof(*my_pfc
), GFP_KERNEL
);
608 bp
->ieee_pfc
= my_pfc
;
609 rc
= bnxt_hwrm_queue_pfc_qcfg(bp
, my_pfc
);
614 pfc
->pfc_en
= my_pfc
->pfc_en
;
615 pfc
->mbc
= my_pfc
->mbc
;
616 pfc
->delay
= my_pfc
->delay
;
621 rx_off
= BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0
);
622 tx_off
= BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0
);
623 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++, rx_off
++, tx_off
++) {
624 pfc
->requests
[i
] = le64_to_cpu(*(stats
+ tx_off
));
625 pfc
->indications
[i
] = le64_to_cpu(*(stats
+ rx_off
));
631 static int bnxt_dcbnl_ieee_setpfc(struct net_device
*dev
, struct ieee_pfc
*pfc
)
633 struct bnxt
*bp
= netdev_priv(dev
);
634 struct ieee_pfc
*my_pfc
= bp
->ieee_pfc
;
637 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
638 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
) ||
639 (bp
->phy_flags
& BNXT_PHY_FL_NO_PAUSE
))
643 my_pfc
= kzalloc(sizeof(*my_pfc
), GFP_KERNEL
);
646 bp
->ieee_pfc
= my_pfc
;
648 rc
= bnxt_hwrm_queue_pfc_cfg(bp
, pfc
);
650 memcpy(my_pfc
, pfc
, sizeof(*my_pfc
));
655 static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt
*bp
, struct dcb_app
*app
)
657 if (app
->selector
== IEEE_8021QAZ_APP_SEL_DSCP
) {
658 if (!bp
->max_dscp_value
)
660 if (app
->protocol
> bp
->max_dscp_value
)
666 static int bnxt_dcbnl_ieee_setapp(struct net_device
*dev
, struct dcb_app
*app
)
668 struct bnxt
*bp
= netdev_priv(dev
);
671 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
672 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
675 rc
= bnxt_dcbnl_ieee_dscp_app_prep(bp
, app
);
679 rc
= dcb_ieee_setapp(dev
, app
);
683 if ((app
->selector
== IEEE_8021QAZ_APP_SEL_ETHERTYPE
&&
684 app
->protocol
== ETH_P_IBOE
) ||
685 (app
->selector
== IEEE_8021QAZ_APP_SEL_DGRAM
&&
686 app
->protocol
== ROCE_V2_UDP_DPORT
))
687 rc
= bnxt_hwrm_set_dcbx_app(bp
, app
, true);
689 if (app
->selector
== IEEE_8021QAZ_APP_SEL_DSCP
)
690 rc
= bnxt_hwrm_queue_dscp2pri_cfg(bp
, app
, true);
695 static int bnxt_dcbnl_ieee_delapp(struct net_device
*dev
, struct dcb_app
*app
)
697 struct bnxt
*bp
= netdev_priv(dev
);
700 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
701 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
704 rc
= bnxt_dcbnl_ieee_dscp_app_prep(bp
, app
);
708 rc
= dcb_ieee_delapp(dev
, app
);
711 if ((app
->selector
== IEEE_8021QAZ_APP_SEL_ETHERTYPE
&&
712 app
->protocol
== ETH_P_IBOE
) ||
713 (app
->selector
== IEEE_8021QAZ_APP_SEL_DGRAM
&&
714 app
->protocol
== ROCE_V2_UDP_DPORT
))
715 rc
= bnxt_hwrm_set_dcbx_app(bp
, app
, false);
717 if (app
->selector
== IEEE_8021QAZ_APP_SEL_DSCP
)
718 rc
= bnxt_hwrm_queue_dscp2pri_cfg(bp
, app
, false);
723 static u8
bnxt_dcbnl_getdcbx(struct net_device
*dev
)
725 struct bnxt
*bp
= netdev_priv(dev
);
730 static u8
bnxt_dcbnl_setdcbx(struct net_device
*dev
, u8 mode
)
732 struct bnxt
*bp
= netdev_priv(dev
);
734 /* All firmware DCBX settings are set in NVRAM */
735 if (bp
->dcbx_cap
& DCB_CAP_DCBX_LLD_MANAGED
)
738 if (mode
& DCB_CAP_DCBX_HOST
) {
739 if (BNXT_VF(bp
) || (bp
->fw_cap
& BNXT_FW_CAP_LLDP_AGENT
))
742 /* only support IEEE */
743 if ((mode
& DCB_CAP_DCBX_VER_CEE
) ||
744 !(mode
& DCB_CAP_DCBX_VER_IEEE
))
748 if (mode
== bp
->dcbx_cap
)
755 static const struct dcbnl_rtnl_ops dcbnl_ops
= {
756 .ieee_getets
= bnxt_dcbnl_ieee_getets
,
757 .ieee_setets
= bnxt_dcbnl_ieee_setets
,
758 .ieee_getpfc
= bnxt_dcbnl_ieee_getpfc
,
759 .ieee_setpfc
= bnxt_dcbnl_ieee_setpfc
,
760 .ieee_setapp
= bnxt_dcbnl_ieee_setapp
,
761 .ieee_delapp
= bnxt_dcbnl_ieee_delapp
,
762 .getdcbx
= bnxt_dcbnl_getdcbx
,
763 .setdcbx
= bnxt_dcbnl_setdcbx
,
766 void bnxt_dcb_init(struct bnxt
*bp
)
769 if (bp
->hwrm_spec_code
< 0x10501)
772 bnxt_hwrm_queue_dscp_qcaps(bp
);
773 bp
->dcbx_cap
= DCB_CAP_DCBX_VER_IEEE
;
774 if (BNXT_PF(bp
) && !(bp
->fw_cap
& BNXT_FW_CAP_LLDP_AGENT
))
775 bp
->dcbx_cap
|= DCB_CAP_DCBX_HOST
;
776 else if (bp
->fw_cap
& BNXT_FW_CAP_DCBX_AGENT
)
777 bp
->dcbx_cap
|= DCB_CAP_DCBX_LLD_MANAGED
;
778 bp
->dev
->dcbnl_ops
= &dcbnl_ops
;
781 void bnxt_dcb_free(struct bnxt
*bp
)
791 void bnxt_dcb_init(struct bnxt
*bp
)
795 void bnxt_dcb_free(struct bnxt
*bp
)