1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/netdevice.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/interrupt.h>
16 #include <linux/pci.h>
17 #include <linux/etherdevice.h>
18 #include <rdma/ib_verbs.h>
23 #ifdef CONFIG_BNXT_DCB
24 static int bnxt_queue_to_tc(struct bnxt
*bp
, u8 queue_id
)
28 for (i
= 0; i
< bp
->max_tc
; i
++) {
29 if (bp
->q_info
[i
].queue_id
== queue_id
) {
30 for (j
= 0; j
< bp
->max_tc
; j
++) {
31 if (bp
->tc_to_qidx
[j
] == i
)
39 static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
41 struct hwrm_queue_pri2cos_cfg_input req
= {0};
45 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PRI2COS_CFG
, -1, -1);
46 req
.flags
= cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
|
47 QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN
);
49 pri2cos
= &req
.pri0_cos_queue_id
;
50 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
53 req
.enables
|= cpu_to_le32(
54 QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID
<< i
);
56 qidx
= bp
->tc_to_qidx
[ets
->prio_tc
[i
]];
57 pri2cos
[i
] = bp
->q_info
[qidx
].queue_id
;
59 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
63 static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
65 struct hwrm_queue_pri2cos_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
66 struct hwrm_queue_pri2cos_qcfg_input req
= {0};
69 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PRI2COS_QCFG
, -1, -1);
70 req
.flags
= cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN
);
72 mutex_lock(&bp
->hwrm_cmd_lock
);
73 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
75 u8
*pri2cos
= &resp
->pri0_cos_queue_id
;
78 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
79 u8 queue_id
= pri2cos
[i
];
82 tc
= bnxt_queue_to_tc(bp
, queue_id
);
87 mutex_unlock(&bp
->hwrm_cmd_lock
);
91 static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt
*bp
, struct ieee_ets
*ets
,
94 struct hwrm_queue_cos2bw_cfg_input req
= {0};
95 struct bnxt_cos2bw_cfg cos2bw
;
99 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_COS2BW_CFG
, -1, -1);
100 for (i
= 0; i
< max_tc
; i
++) {
101 u8 qidx
= bp
->tc_to_qidx
[i
];
103 req
.enables
|= cpu_to_le32(
104 QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID
<<
107 memset(&cos2bw
, 0, sizeof(cos2bw
));
108 cos2bw
.queue_id
= bp
->q_info
[qidx
].queue_id
;
109 if (ets
->tc_tsa
[i
] == IEEE_8021QAZ_TSA_STRICT
) {
111 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP
;
115 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS
;
116 cos2bw
.bw_weight
= ets
->tc_tx_bw
[i
];
117 /* older firmware requires min_bw to be set to the
118 * same weight value in percent.
121 cpu_to_le32((ets
->tc_tx_bw
[i
] * 100) |
122 BW_VALUE_UNIT_PERCENT1_100
);
124 data
= &req
.unused_0
+ qidx
* (sizeof(cos2bw
) - 4);
125 memcpy(data
, &cos2bw
.queue_id
, sizeof(cos2bw
) - 4);
127 req
.queue_id0
= cos2bw
.queue_id
;
131 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
135 static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
137 struct hwrm_queue_cos2bw_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
138 struct hwrm_queue_cos2bw_qcfg_input req
= {0};
139 struct bnxt_cos2bw_cfg cos2bw
;
143 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_COS2BW_QCFG
, -1, -1);
145 mutex_lock(&bp
->hwrm_cmd_lock
);
146 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
148 mutex_unlock(&bp
->hwrm_cmd_lock
);
152 data
= &resp
->queue_id0
+ offsetof(struct bnxt_cos2bw_cfg
, queue_id
);
153 for (i
= 0; i
< bp
->max_tc
; i
++, data
+= sizeof(cos2bw
) - 4) {
156 memcpy(&cos2bw
.queue_id
, data
, sizeof(cos2bw
) - 4);
158 cos2bw
.queue_id
= resp
->queue_id0
;
160 tc
= bnxt_queue_to_tc(bp
, cos2bw
.queue_id
);
165 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP
) {
166 ets
->tc_tsa
[tc
] = IEEE_8021QAZ_TSA_STRICT
;
168 ets
->tc_tsa
[tc
] = IEEE_8021QAZ_TSA_ETS
;
169 ets
->tc_tx_bw
[tc
] = cos2bw
.bw_weight
;
172 mutex_unlock(&bp
->hwrm_cmd_lock
);
176 static int bnxt_queue_remap(struct bnxt
*bp
, unsigned int lltc_mask
)
178 unsigned long qmap
= 0;
179 int max
= bp
->max_tc
;
182 /* Assign lossless TCs first */
183 for (i
= 0, j
= 0; i
< max
; ) {
184 if (lltc_mask
& (1 << i
)) {
185 if (BNXT_LLQ(bp
->q_info
[j
].queue_profile
)) {
186 bp
->tc_to_qidx
[i
] = j
;
196 for (i
= 0, j
= 0; i
< max
; i
++) {
197 if (lltc_mask
& (1 << i
))
199 j
= find_next_zero_bit(&qmap
, max
, j
);
200 bp
->tc_to_qidx
[i
] = j
;
205 if (netif_running(bp
->dev
)) {
206 bnxt_close_nic(bp
, false, false);
207 rc
= bnxt_open_nic(bp
, false, false);
209 netdev_warn(bp
->dev
, "failed to open NIC, rc = %d\n", rc
);
214 int tc
= netdev_get_num_tc(bp
->dev
);
218 rc
= bnxt_hwrm_queue_cos2bw_cfg(bp
, bp
->ieee_ets
, tc
);
220 netdev_warn(bp
->dev
, "failed to config BW, rc = %d\n", rc
);
223 rc
= bnxt_hwrm_queue_pri2cos_cfg(bp
, bp
->ieee_ets
);
225 netdev_warn(bp
->dev
, "failed to config prio, rc = %d\n", rc
);
232 static int bnxt_hwrm_queue_pfc_cfg(struct bnxt
*bp
, struct ieee_pfc
*pfc
)
234 struct hwrm_queue_pfcenable_cfg_input req
= {0};
235 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
236 unsigned int tc_mask
= 0, pri_mask
= 0;
237 u8 i
, pri
, lltc_count
= 0;
238 bool need_q_remap
= false;
244 for (i
= 0; i
< bp
->max_tc
; i
++) {
245 for (pri
= 0; pri
< IEEE_8021QAZ_MAX_TCS
; pri
++) {
246 if ((pfc
->pfc_en
& (1 << pri
)) &&
247 (my_ets
->prio_tc
[pri
] == i
)) {
248 pri_mask
|= 1 << pri
;
252 if (tc_mask
& (1 << i
))
255 if (lltc_count
> bp
->max_lltc
)
258 for (i
= 0; i
< bp
->max_tc
; i
++) {
259 if (tc_mask
& (1 << i
)) {
260 u8 qidx
= bp
->tc_to_qidx
[i
];
262 if (!BNXT_LLQ(bp
->q_info
[qidx
].queue_profile
)) {
270 rc
= bnxt_queue_remap(bp
, tc_mask
);
272 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PFCENABLE_CFG
, -1, -1);
273 req
.flags
= cpu_to_le32(pri_mask
);
274 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
281 static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt
*bp
, struct ieee_pfc
*pfc
)
283 struct hwrm_queue_pfcenable_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
284 struct hwrm_queue_pfcenable_qcfg_input req
= {0};
288 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PFCENABLE_QCFG
, -1, -1);
290 mutex_lock(&bp
->hwrm_cmd_lock
);
291 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
293 mutex_unlock(&bp
->hwrm_cmd_lock
);
297 pri_mask
= le32_to_cpu(resp
->flags
);
298 pfc
->pfc_en
= pri_mask
;
299 mutex_unlock(&bp
->hwrm_cmd_lock
);
303 static int bnxt_hwrm_set_dcbx_app(struct bnxt
*bp
, struct dcb_app
*app
,
306 struct hwrm_fw_set_structured_data_input set
= {0};
307 struct hwrm_fw_get_structured_data_input get
= {0};
308 struct hwrm_struct_data_dcbx_app
*fw_app
;
309 struct hwrm_struct_hdr
*data
;
314 if (bp
->hwrm_spec_code
< 0x10601)
317 n
= IEEE_8021QAZ_MAX_TCS
;
318 data_len
= sizeof(*data
) + sizeof(*fw_app
) * n
;
319 data
= dma_zalloc_coherent(&bp
->pdev
->dev
, data_len
, &mapping
,
324 bnxt_hwrm_cmd_hdr_init(bp
, &get
, HWRM_FW_GET_STRUCTURED_DATA
, -1, -1);
325 get
.dest_data_addr
= cpu_to_le64(mapping
);
326 get
.structure_id
= cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP
);
327 get
.subtype
= cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL
);
329 rc
= hwrm_send_message(bp
, &get
, sizeof(get
), HWRM_CMD_TIMEOUT
);
333 fw_app
= (struct hwrm_struct_data_dcbx_app
*)(data
+ 1);
335 if (data
->struct_id
!= cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP
)) {
341 for (i
= 0; i
< n
; i
++, fw_app
++) {
342 if (fw_app
->protocol_id
== cpu_to_be16(app
->protocol
) &&
343 fw_app
->protocol_selector
== app
->selector
&&
344 fw_app
->priority
== app
->priority
) {
354 fw_app
->protocol_id
= cpu_to_be16(app
->protocol
);
355 fw_app
->protocol_selector
= app
->selector
;
356 fw_app
->priority
= app
->priority
;
361 /* not found, nothing to delete */
365 len
= (n
- 1 - i
) * sizeof(*fw_app
);
367 memmove(fw_app
, fw_app
+ 1, len
);
369 memset(fw_app
+ n
, 0, sizeof(*fw_app
));
372 data
->len
= cpu_to_le16(sizeof(*fw_app
) * n
);
373 data
->subtype
= cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL
);
375 bnxt_hwrm_cmd_hdr_init(bp
, &set
, HWRM_FW_SET_STRUCTURED_DATA
, -1, -1);
376 set
.src_data_addr
= cpu_to_le64(mapping
);
377 set
.data_len
= cpu_to_le16(sizeof(*data
) + sizeof(*fw_app
) * n
);
379 rc
= hwrm_send_message(bp
, &set
, sizeof(set
), HWRM_CMD_TIMEOUT
);
384 dma_free_coherent(&bp
->pdev
->dev
, data_len
, data
, mapping
);
388 static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt
*bp
)
390 struct hwrm_queue_dscp_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
391 struct hwrm_queue_dscp_qcaps_input req
= {0};
394 if (bp
->hwrm_spec_code
< 0x10800 || BNXT_VF(bp
))
397 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_DSCP_QCAPS
, -1, -1);
398 mutex_lock(&bp
->hwrm_cmd_lock
);
399 rc
= _hwrm_send_message_silent(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
401 bp
->max_dscp_value
= (1 << resp
->num_dscp_bits
) - 1;
402 if (bp
->max_dscp_value
< 0x3f)
403 bp
->max_dscp_value
= 0;
406 mutex_unlock(&bp
->hwrm_cmd_lock
);
410 static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt
*bp
, struct dcb_app
*app
,
413 struct hwrm_queue_dscp2pri_cfg_input req
= {0};
414 struct bnxt_dscp2pri_entry
*dscp2pri
;
418 if (bp
->hwrm_spec_code
< 0x10800)
421 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_DSCP2PRI_CFG
, -1, -1);
422 dscp2pri
= dma_alloc_coherent(&bp
->pdev
->dev
, sizeof(*dscp2pri
),
423 &mapping
, GFP_KERNEL
);
427 req
.src_data_addr
= cpu_to_le64(mapping
);
428 dscp2pri
->dscp
= app
->protocol
;
430 dscp2pri
->mask
= 0x3f;
433 dscp2pri
->pri
= app
->priority
;
434 req
.entry_cnt
= cpu_to_le16(1);
435 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
438 dma_free_coherent(&bp
->pdev
->dev
, sizeof(*dscp2pri
), dscp2pri
,
443 static int bnxt_ets_validate(struct bnxt
*bp
, struct ieee_ets
*ets
, u8
*tc
)
445 int total_ets_bw
= 0;
449 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
450 if (ets
->prio_tc
[i
] > bp
->max_tc
) {
451 netdev_err(bp
->dev
, "priority to TC mapping exceeds TC count %d\n",
455 if (ets
->prio_tc
[i
] > max_tc
)
456 max_tc
= ets
->prio_tc
[i
];
458 if ((ets
->tc_tx_bw
[i
] || ets
->tc_tsa
[i
]) && i
> bp
->max_tc
)
461 switch (ets
->tc_tsa
[i
]) {
462 case IEEE_8021QAZ_TSA_STRICT
:
464 case IEEE_8021QAZ_TSA_ETS
:
465 total_ets_bw
+= ets
->tc_tx_bw
[i
];
471 if (total_ets_bw
> 100)
478 static int bnxt_dcbnl_ieee_getets(struct net_device
*dev
, struct ieee_ets
*ets
)
480 struct bnxt
*bp
= netdev_priv(dev
);
481 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
484 ets
->ets_cap
= bp
->max_tc
;
487 if (bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
)
490 my_ets
= kzalloc(sizeof(*my_ets
), GFP_KERNEL
);
493 rc
= bnxt_hwrm_queue_cos2bw_qcfg(bp
, my_ets
);
496 rc
= bnxt_hwrm_queue_pri2cos_qcfg(bp
, my_ets
);
501 bp
->ieee_ets
= my_ets
;
504 ets
->cbs
= my_ets
->cbs
;
505 memcpy(ets
->tc_tx_bw
, my_ets
->tc_tx_bw
, sizeof(ets
->tc_tx_bw
));
506 memcpy(ets
->tc_rx_bw
, my_ets
->tc_rx_bw
, sizeof(ets
->tc_rx_bw
));
507 memcpy(ets
->tc_tsa
, my_ets
->tc_tsa
, sizeof(ets
->tc_tsa
));
508 memcpy(ets
->prio_tc
, my_ets
->prio_tc
, sizeof(ets
->prio_tc
));
515 static int bnxt_dcbnl_ieee_setets(struct net_device
*dev
, struct ieee_ets
*ets
)
517 struct bnxt
*bp
= netdev_priv(dev
);
518 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
522 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
523 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
526 rc
= bnxt_ets_validate(bp
, ets
, &max_tc
);
529 my_ets
= kzalloc(sizeof(*my_ets
), GFP_KERNEL
);
532 /* initialize PRI2TC mappings to invalid value */
533 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
534 my_ets
->prio_tc
[i
] = IEEE_8021QAZ_MAX_TCS
;
535 bp
->ieee_ets
= my_ets
;
537 rc
= bnxt_setup_mq_tc(dev
, max_tc
);
540 rc
= bnxt_hwrm_queue_cos2bw_cfg(bp
, ets
, max_tc
);
543 rc
= bnxt_hwrm_queue_pri2cos_cfg(bp
, ets
);
546 memcpy(my_ets
, ets
, sizeof(*my_ets
));
551 static int bnxt_dcbnl_ieee_getpfc(struct net_device
*dev
, struct ieee_pfc
*pfc
)
553 struct bnxt
*bp
= netdev_priv(dev
);
554 __le64
*stats
= (__le64
*)bp
->hw_rx_port_stats
;
555 struct ieee_pfc
*my_pfc
= bp
->ieee_pfc
;
559 pfc
->pfc_cap
= bp
->max_lltc
;
562 if (bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
)
565 my_pfc
= kzalloc(sizeof(*my_pfc
), GFP_KERNEL
);
568 bp
->ieee_pfc
= my_pfc
;
569 rc
= bnxt_hwrm_queue_pfc_qcfg(bp
, my_pfc
);
574 pfc
->pfc_en
= my_pfc
->pfc_en
;
575 pfc
->mbc
= my_pfc
->mbc
;
576 pfc
->delay
= my_pfc
->delay
;
581 rx_off
= BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0
);
582 tx_off
= BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0
);
583 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++, rx_off
++, tx_off
++) {
584 pfc
->requests
[i
] = le64_to_cpu(*(stats
+ tx_off
));
585 pfc
->indications
[i
] = le64_to_cpu(*(stats
+ rx_off
));
591 static int bnxt_dcbnl_ieee_setpfc(struct net_device
*dev
, struct ieee_pfc
*pfc
)
593 struct bnxt
*bp
= netdev_priv(dev
);
594 struct ieee_pfc
*my_pfc
= bp
->ieee_pfc
;
597 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
598 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
602 my_pfc
= kzalloc(sizeof(*my_pfc
), GFP_KERNEL
);
605 bp
->ieee_pfc
= my_pfc
;
607 rc
= bnxt_hwrm_queue_pfc_cfg(bp
, pfc
);
609 memcpy(my_pfc
, pfc
, sizeof(*my_pfc
));
614 static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt
*bp
, struct dcb_app
*app
)
616 if (app
->selector
== IEEE_8021QAZ_APP_SEL_DSCP
) {
617 if (!bp
->max_dscp_value
)
619 if (app
->protocol
> bp
->max_dscp_value
)
625 static int bnxt_dcbnl_ieee_setapp(struct net_device
*dev
, struct dcb_app
*app
)
627 struct bnxt
*bp
= netdev_priv(dev
);
630 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
631 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
634 rc
= bnxt_dcbnl_ieee_dscp_app_prep(bp
, app
);
638 rc
= dcb_ieee_setapp(dev
, app
);
642 if ((app
->selector
== IEEE_8021QAZ_APP_SEL_ETHERTYPE
&&
643 app
->protocol
== ETH_P_IBOE
) ||
644 (app
->selector
== IEEE_8021QAZ_APP_SEL_DGRAM
&&
645 app
->protocol
== ROCE_V2_UDP_DPORT
))
646 rc
= bnxt_hwrm_set_dcbx_app(bp
, app
, true);
648 if (app
->selector
== IEEE_8021QAZ_APP_SEL_DSCP
)
649 rc
= bnxt_hwrm_queue_dscp2pri_cfg(bp
, app
, true);
654 static int bnxt_dcbnl_ieee_delapp(struct net_device
*dev
, struct dcb_app
*app
)
656 struct bnxt
*bp
= netdev_priv(dev
);
659 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
660 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
663 rc
= bnxt_dcbnl_ieee_dscp_app_prep(bp
, app
);
667 rc
= dcb_ieee_delapp(dev
, app
);
670 if ((app
->selector
== IEEE_8021QAZ_APP_SEL_ETHERTYPE
&&
671 app
->protocol
== ETH_P_IBOE
) ||
672 (app
->selector
== IEEE_8021QAZ_APP_SEL_DGRAM
&&
673 app
->protocol
== ROCE_V2_UDP_DPORT
))
674 rc
= bnxt_hwrm_set_dcbx_app(bp
, app
, false);
676 if (app
->selector
== IEEE_8021QAZ_APP_SEL_DSCP
)
677 rc
= bnxt_hwrm_queue_dscp2pri_cfg(bp
, app
, false);
682 static u8
bnxt_dcbnl_getdcbx(struct net_device
*dev
)
684 struct bnxt
*bp
= netdev_priv(dev
);
689 static u8
bnxt_dcbnl_setdcbx(struct net_device
*dev
, u8 mode
)
691 struct bnxt
*bp
= netdev_priv(dev
);
693 /* All firmware DCBX settings are set in NVRAM */
694 if (bp
->dcbx_cap
& DCB_CAP_DCBX_LLD_MANAGED
)
697 if (mode
& DCB_CAP_DCBX_HOST
) {
698 if (BNXT_VF(bp
) || (bp
->fw_cap
& BNXT_FW_CAP_LLDP_AGENT
))
701 /* only support IEEE */
702 if ((mode
& DCB_CAP_DCBX_VER_CEE
) ||
703 !(mode
& DCB_CAP_DCBX_VER_IEEE
))
707 if (mode
== bp
->dcbx_cap
)
714 static const struct dcbnl_rtnl_ops dcbnl_ops
= {
715 .ieee_getets
= bnxt_dcbnl_ieee_getets
,
716 .ieee_setets
= bnxt_dcbnl_ieee_setets
,
717 .ieee_getpfc
= bnxt_dcbnl_ieee_getpfc
,
718 .ieee_setpfc
= bnxt_dcbnl_ieee_setpfc
,
719 .ieee_setapp
= bnxt_dcbnl_ieee_setapp
,
720 .ieee_delapp
= bnxt_dcbnl_ieee_delapp
,
721 .getdcbx
= bnxt_dcbnl_getdcbx
,
722 .setdcbx
= bnxt_dcbnl_setdcbx
,
725 void bnxt_dcb_init(struct bnxt
*bp
)
727 if (bp
->hwrm_spec_code
< 0x10501)
730 bnxt_hwrm_queue_dscp_qcaps(bp
);
731 bp
->dcbx_cap
= DCB_CAP_DCBX_VER_IEEE
;
732 if (BNXT_PF(bp
) && !(bp
->fw_cap
& BNXT_FW_CAP_LLDP_AGENT
))
733 bp
->dcbx_cap
|= DCB_CAP_DCBX_HOST
;
734 else if (bp
->fw_cap
& BNXT_FW_CAP_DCBX_AGENT
)
735 bp
->dcbx_cap
|= DCB_CAP_DCBX_LLD_MANAGED
;
736 bp
->dev
->dcbnl_ops
= &dcbnl_ops
;
739 void bnxt_dcb_free(struct bnxt
*bp
)
749 void bnxt_dcb_init(struct bnxt
*bp
)
753 void bnxt_dcb_free(struct bnxt
*bp
)