1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/netdevice.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/interrupt.h>
16 #include <linux/pci.h>
17 #include <linux/etherdevice.h>
18 #include <rdma/ib_verbs.h>
23 #ifdef CONFIG_BNXT_DCB
24 static int bnxt_queue_to_tc(struct bnxt
*bp
, u8 queue_id
)
28 for (i
= 0; i
< bp
->max_tc
; i
++) {
29 if (bp
->q_info
[i
].queue_id
== queue_id
) {
30 for (j
= 0; j
< bp
->max_tc
; j
++) {
31 if (bp
->tc_to_qidx
[j
] == i
)
39 static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
41 struct hwrm_queue_pri2cos_cfg_input req
= {0};
45 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PRI2COS_CFG
, -1, -1);
46 req
.flags
= cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
|
47 QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN
);
49 pri2cos
= &req
.pri0_cos_queue_id
;
50 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
53 req
.enables
|= cpu_to_le32(
54 QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID
<< i
);
56 qidx
= bp
->tc_to_qidx
[ets
->prio_tc
[i
]];
57 pri2cos
[i
] = bp
->q_info
[qidx
].queue_id
;
59 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
63 static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
65 struct hwrm_queue_pri2cos_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
66 struct hwrm_queue_pri2cos_qcfg_input req
= {0};
69 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PRI2COS_QCFG
, -1, -1);
70 req
.flags
= cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN
);
72 mutex_lock(&bp
->hwrm_cmd_lock
);
73 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
75 u8
*pri2cos
= &resp
->pri0_cos_queue_id
;
78 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
79 u8 queue_id
= pri2cos
[i
];
82 tc
= bnxt_queue_to_tc(bp
, queue_id
);
87 mutex_unlock(&bp
->hwrm_cmd_lock
);
91 static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt
*bp
, struct ieee_ets
*ets
,
94 struct hwrm_queue_cos2bw_cfg_input req
= {0};
95 struct bnxt_cos2bw_cfg cos2bw
;
99 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_COS2BW_CFG
, -1, -1);
100 for (i
= 0; i
< max_tc
; i
++) {
101 u8 qidx
= bp
->tc_to_qidx
[i
];
103 req
.enables
|= cpu_to_le32(
104 QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID
<<
107 memset(&cos2bw
, 0, sizeof(cos2bw
));
108 cos2bw
.queue_id
= bp
->q_info
[qidx
].queue_id
;
109 if (ets
->tc_tsa
[i
] == IEEE_8021QAZ_TSA_STRICT
) {
111 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP
;
115 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS
;
116 cos2bw
.bw_weight
= ets
->tc_tx_bw
[i
];
117 /* older firmware requires min_bw to be set to the
118 * same weight value in percent.
121 cpu_to_le32((ets
->tc_tx_bw
[i
] * 100) |
122 BW_VALUE_UNIT_PERCENT1_100
);
124 data
= &req
.unused_0
+ qidx
* (sizeof(cos2bw
) - 4);
125 memcpy(data
, &cos2bw
.queue_id
, sizeof(cos2bw
) - 4);
127 req
.queue_id0
= cos2bw
.queue_id
;
131 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
135 static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt
*bp
, struct ieee_ets
*ets
)
137 struct hwrm_queue_cos2bw_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
138 struct hwrm_queue_cos2bw_qcfg_input req
= {0};
139 struct bnxt_cos2bw_cfg cos2bw
;
143 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_COS2BW_QCFG
, -1, -1);
145 mutex_lock(&bp
->hwrm_cmd_lock
);
146 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
148 mutex_unlock(&bp
->hwrm_cmd_lock
);
152 data
= &resp
->queue_id0
+ offsetof(struct bnxt_cos2bw_cfg
, queue_id
);
153 for (i
= 0; i
< bp
->max_tc
; i
++, data
+= sizeof(cos2bw
) - 4) {
156 memcpy(&cos2bw
.queue_id
, data
, sizeof(cos2bw
) - 4);
158 cos2bw
.queue_id
= resp
->queue_id0
;
160 tc
= bnxt_queue_to_tc(bp
, cos2bw
.queue_id
);
165 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP
) {
166 ets
->tc_tsa
[tc
] = IEEE_8021QAZ_TSA_STRICT
;
168 ets
->tc_tsa
[tc
] = IEEE_8021QAZ_TSA_ETS
;
169 ets
->tc_tx_bw
[tc
] = cos2bw
.bw_weight
;
172 mutex_unlock(&bp
->hwrm_cmd_lock
);
176 static int bnxt_queue_remap(struct bnxt
*bp
, unsigned int lltc_mask
)
178 unsigned long qmap
= 0;
179 int max
= bp
->max_tc
;
182 /* Assign lossless TCs first */
183 for (i
= 0, j
= 0; i
< max
; ) {
184 if (lltc_mask
& (1 << i
)) {
185 if (BNXT_LLQ(bp
->q_info
[j
].queue_profile
)) {
186 bp
->tc_to_qidx
[i
] = j
;
196 for (i
= 0, j
= 0; i
< max
; i
++) {
197 if (lltc_mask
& (1 << i
))
199 j
= find_next_zero_bit(&qmap
, max
, j
);
200 bp
->tc_to_qidx
[i
] = j
;
205 if (netif_running(bp
->dev
)) {
206 bnxt_close_nic(bp
, false, false);
207 rc
= bnxt_open_nic(bp
, false, false);
209 netdev_warn(bp
->dev
, "failed to open NIC, rc = %d\n", rc
);
214 int tc
= netdev_get_num_tc(bp
->dev
);
218 rc
= bnxt_hwrm_queue_cos2bw_cfg(bp
, bp
->ieee_ets
, tc
);
220 netdev_warn(bp
->dev
, "failed to config BW, rc = %d\n", rc
);
223 rc
= bnxt_hwrm_queue_pri2cos_cfg(bp
, bp
->ieee_ets
);
225 netdev_warn(bp
->dev
, "failed to config prio, rc = %d\n", rc
);
232 static int bnxt_hwrm_queue_pfc_cfg(struct bnxt
*bp
, struct ieee_pfc
*pfc
)
234 struct hwrm_queue_pfcenable_cfg_input req
= {0};
235 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
236 unsigned int tc_mask
= 0, pri_mask
= 0;
237 u8 i
, pri
, lltc_count
= 0;
238 bool need_q_remap
= false;
244 for (i
= 0; i
< bp
->max_tc
; i
++) {
245 for (pri
= 0; pri
< IEEE_8021QAZ_MAX_TCS
; pri
++) {
246 if ((pfc
->pfc_en
& (1 << pri
)) &&
247 (my_ets
->prio_tc
[pri
] == i
)) {
248 pri_mask
|= 1 << pri
;
252 if (tc_mask
& (1 << i
))
255 if (lltc_count
> bp
->max_lltc
)
258 for (i
= 0; i
< bp
->max_tc
; i
++) {
259 if (tc_mask
& (1 << i
)) {
260 u8 qidx
= bp
->tc_to_qidx
[i
];
262 if (!BNXT_LLQ(bp
->q_info
[qidx
].queue_profile
)) {
270 rc
= bnxt_queue_remap(bp
, tc_mask
);
272 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PFCENABLE_CFG
, -1, -1);
273 req
.flags
= cpu_to_le32(pri_mask
);
274 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
281 static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt
*bp
, struct ieee_pfc
*pfc
)
283 struct hwrm_queue_pfcenable_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
284 struct hwrm_queue_pfcenable_qcfg_input req
= {0};
288 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_PFCENABLE_QCFG
, -1, -1);
290 mutex_lock(&bp
->hwrm_cmd_lock
);
291 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
293 mutex_unlock(&bp
->hwrm_cmd_lock
);
297 pri_mask
= le32_to_cpu(resp
->flags
);
298 pfc
->pfc_en
= pri_mask
;
299 mutex_unlock(&bp
->hwrm_cmd_lock
);
303 static int bnxt_hwrm_set_dcbx_app(struct bnxt
*bp
, struct dcb_app
*app
,
306 struct hwrm_fw_set_structured_data_input set
= {0};
307 struct hwrm_fw_get_structured_data_input get
= {0};
308 struct hwrm_struct_data_dcbx_app
*fw_app
;
309 struct hwrm_struct_hdr
*data
;
314 if (bp
->hwrm_spec_code
< 0x10601)
317 n
= IEEE_8021QAZ_MAX_TCS
;
318 data_len
= sizeof(*data
) + sizeof(*fw_app
) * n
;
319 data
= dma_alloc_coherent(&bp
->pdev
->dev
, data_len
, &mapping
,
324 bnxt_hwrm_cmd_hdr_init(bp
, &get
, HWRM_FW_GET_STRUCTURED_DATA
, -1, -1);
325 get
.dest_data_addr
= cpu_to_le64(mapping
);
326 get
.structure_id
= cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP
);
327 get
.subtype
= cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL
);
329 rc
= hwrm_send_message(bp
, &get
, sizeof(get
), HWRM_CMD_TIMEOUT
);
333 fw_app
= (struct hwrm_struct_data_dcbx_app
*)(data
+ 1);
335 if (data
->struct_id
!= cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP
)) {
341 for (i
= 0; i
< n
; i
++, fw_app
++) {
342 if (fw_app
->protocol_id
== cpu_to_be16(app
->protocol
) &&
343 fw_app
->protocol_selector
== app
->selector
&&
344 fw_app
->priority
== app
->priority
) {
354 fw_app
->protocol_id
= cpu_to_be16(app
->protocol
);
355 fw_app
->protocol_selector
= app
->selector
;
356 fw_app
->priority
= app
->priority
;
361 /* not found, nothing to delete */
365 len
= (n
- 1 - i
) * sizeof(*fw_app
);
367 memmove(fw_app
, fw_app
+ 1, len
);
369 memset(fw_app
+ n
, 0, sizeof(*fw_app
));
372 data
->len
= cpu_to_le16(sizeof(*fw_app
) * n
);
373 data
->subtype
= cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL
);
375 bnxt_hwrm_cmd_hdr_init(bp
, &set
, HWRM_FW_SET_STRUCTURED_DATA
, -1, -1);
376 set
.src_data_addr
= cpu_to_le64(mapping
);
377 set
.data_len
= cpu_to_le16(sizeof(*data
) + sizeof(*fw_app
) * n
);
379 rc
= hwrm_send_message(bp
, &set
, sizeof(set
), HWRM_CMD_TIMEOUT
);
382 dma_free_coherent(&bp
->pdev
->dev
, data_len
, data
, mapping
);
386 static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt
*bp
)
388 struct hwrm_queue_dscp_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
389 struct hwrm_queue_dscp_qcaps_input req
= {0};
392 bp
->max_dscp_value
= 0;
393 if (bp
->hwrm_spec_code
< 0x10800 || BNXT_VF(bp
))
396 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_DSCP_QCAPS
, -1, -1);
397 mutex_lock(&bp
->hwrm_cmd_lock
);
398 rc
= _hwrm_send_message_silent(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
400 bp
->max_dscp_value
= (1 << resp
->num_dscp_bits
) - 1;
401 if (bp
->max_dscp_value
< 0x3f)
402 bp
->max_dscp_value
= 0;
405 mutex_unlock(&bp
->hwrm_cmd_lock
);
409 static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt
*bp
, struct dcb_app
*app
,
412 struct hwrm_queue_dscp2pri_cfg_input req
= {0};
413 struct bnxt_dscp2pri_entry
*dscp2pri
;
417 if (bp
->hwrm_spec_code
< 0x10800)
420 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_DSCP2PRI_CFG
, -1, -1);
421 dscp2pri
= dma_alloc_coherent(&bp
->pdev
->dev
, sizeof(*dscp2pri
),
422 &mapping
, GFP_KERNEL
);
426 req
.src_data_addr
= cpu_to_le64(mapping
);
427 dscp2pri
->dscp
= app
->protocol
;
429 dscp2pri
->mask
= 0x3f;
432 dscp2pri
->pri
= app
->priority
;
433 req
.entry_cnt
= cpu_to_le16(1);
434 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
435 dma_free_coherent(&bp
->pdev
->dev
, sizeof(*dscp2pri
), dscp2pri
,
440 static int bnxt_ets_validate(struct bnxt
*bp
, struct ieee_ets
*ets
, u8
*tc
)
442 int total_ets_bw
= 0;
446 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
447 if (ets
->prio_tc
[i
] > bp
->max_tc
) {
448 netdev_err(bp
->dev
, "priority to TC mapping exceeds TC count %d\n",
452 if (ets
->prio_tc
[i
] > max_tc
)
453 max_tc
= ets
->prio_tc
[i
];
455 if ((ets
->tc_tx_bw
[i
] || ets
->tc_tsa
[i
]) && i
> bp
->max_tc
)
458 switch (ets
->tc_tsa
[i
]) {
459 case IEEE_8021QAZ_TSA_STRICT
:
461 case IEEE_8021QAZ_TSA_ETS
:
462 total_ets_bw
+= ets
->tc_tx_bw
[i
];
468 if (total_ets_bw
> 100)
471 if (max_tc
>= bp
->max_tc
)
478 static int bnxt_dcbnl_ieee_getets(struct net_device
*dev
, struct ieee_ets
*ets
)
480 struct bnxt
*bp
= netdev_priv(dev
);
481 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
483 ets
->ets_cap
= bp
->max_tc
;
488 if (bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
)
491 my_ets
= kzalloc(sizeof(*my_ets
), GFP_KERNEL
);
494 rc
= bnxt_hwrm_queue_cos2bw_qcfg(bp
, my_ets
);
497 rc
= bnxt_hwrm_queue_pri2cos_qcfg(bp
, my_ets
);
502 ets
->cbs
= my_ets
->cbs
;
503 memcpy(ets
->tc_tx_bw
, my_ets
->tc_tx_bw
, sizeof(ets
->tc_tx_bw
));
504 memcpy(ets
->tc_rx_bw
, my_ets
->tc_rx_bw
, sizeof(ets
->tc_rx_bw
));
505 memcpy(ets
->tc_tsa
, my_ets
->tc_tsa
, sizeof(ets
->tc_tsa
));
506 memcpy(ets
->prio_tc
, my_ets
->prio_tc
, sizeof(ets
->prio_tc
));
510 static int bnxt_dcbnl_ieee_setets(struct net_device
*dev
, struct ieee_ets
*ets
)
512 struct bnxt
*bp
= netdev_priv(dev
);
513 struct ieee_ets
*my_ets
= bp
->ieee_ets
;
517 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
518 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
521 rc
= bnxt_ets_validate(bp
, ets
, &max_tc
);
524 my_ets
= kzalloc(sizeof(*my_ets
), GFP_KERNEL
);
527 /* initialize PRI2TC mappings to invalid value */
528 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
529 my_ets
->prio_tc
[i
] = IEEE_8021QAZ_MAX_TCS
;
530 bp
->ieee_ets
= my_ets
;
532 rc
= bnxt_setup_mq_tc(dev
, max_tc
);
535 rc
= bnxt_hwrm_queue_cos2bw_cfg(bp
, ets
, max_tc
);
538 rc
= bnxt_hwrm_queue_pri2cos_cfg(bp
, ets
);
541 memcpy(my_ets
, ets
, sizeof(*my_ets
));
546 static int bnxt_dcbnl_ieee_getpfc(struct net_device
*dev
, struct ieee_pfc
*pfc
)
548 struct bnxt
*bp
= netdev_priv(dev
);
549 __le64
*stats
= (__le64
*)bp
->hw_rx_port_stats
;
550 struct ieee_pfc
*my_pfc
= bp
->ieee_pfc
;
554 pfc
->pfc_cap
= bp
->max_lltc
;
557 if (bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
)
560 my_pfc
= kzalloc(sizeof(*my_pfc
), GFP_KERNEL
);
563 bp
->ieee_pfc
= my_pfc
;
564 rc
= bnxt_hwrm_queue_pfc_qcfg(bp
, my_pfc
);
569 pfc
->pfc_en
= my_pfc
->pfc_en
;
570 pfc
->mbc
= my_pfc
->mbc
;
571 pfc
->delay
= my_pfc
->delay
;
576 rx_off
= BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0
);
577 tx_off
= BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0
);
578 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++, rx_off
++, tx_off
++) {
579 pfc
->requests
[i
] = le64_to_cpu(*(stats
+ tx_off
));
580 pfc
->indications
[i
] = le64_to_cpu(*(stats
+ rx_off
));
586 static int bnxt_dcbnl_ieee_setpfc(struct net_device
*dev
, struct ieee_pfc
*pfc
)
588 struct bnxt
*bp
= netdev_priv(dev
);
589 struct ieee_pfc
*my_pfc
= bp
->ieee_pfc
;
592 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
593 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
597 my_pfc
= kzalloc(sizeof(*my_pfc
), GFP_KERNEL
);
600 bp
->ieee_pfc
= my_pfc
;
602 rc
= bnxt_hwrm_queue_pfc_cfg(bp
, pfc
);
604 memcpy(my_pfc
, pfc
, sizeof(*my_pfc
));
609 static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt
*bp
, struct dcb_app
*app
)
611 if (app
->selector
== IEEE_8021QAZ_APP_SEL_DSCP
) {
612 if (!bp
->max_dscp_value
)
614 if (app
->protocol
> bp
->max_dscp_value
)
620 static int bnxt_dcbnl_ieee_setapp(struct net_device
*dev
, struct dcb_app
*app
)
622 struct bnxt
*bp
= netdev_priv(dev
);
625 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
626 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
629 rc
= bnxt_dcbnl_ieee_dscp_app_prep(bp
, app
);
633 rc
= dcb_ieee_setapp(dev
, app
);
637 if ((app
->selector
== IEEE_8021QAZ_APP_SEL_ETHERTYPE
&&
638 app
->protocol
== ETH_P_IBOE
) ||
639 (app
->selector
== IEEE_8021QAZ_APP_SEL_DGRAM
&&
640 app
->protocol
== ROCE_V2_UDP_DPORT
))
641 rc
= bnxt_hwrm_set_dcbx_app(bp
, app
, true);
643 if (app
->selector
== IEEE_8021QAZ_APP_SEL_DSCP
)
644 rc
= bnxt_hwrm_queue_dscp2pri_cfg(bp
, app
, true);
649 static int bnxt_dcbnl_ieee_delapp(struct net_device
*dev
, struct dcb_app
*app
)
651 struct bnxt
*bp
= netdev_priv(dev
);
654 if (!(bp
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
655 !(bp
->dcbx_cap
& DCB_CAP_DCBX_HOST
))
658 rc
= bnxt_dcbnl_ieee_dscp_app_prep(bp
, app
);
662 rc
= dcb_ieee_delapp(dev
, app
);
665 if ((app
->selector
== IEEE_8021QAZ_APP_SEL_ETHERTYPE
&&
666 app
->protocol
== ETH_P_IBOE
) ||
667 (app
->selector
== IEEE_8021QAZ_APP_SEL_DGRAM
&&
668 app
->protocol
== ROCE_V2_UDP_DPORT
))
669 rc
= bnxt_hwrm_set_dcbx_app(bp
, app
, false);
671 if (app
->selector
== IEEE_8021QAZ_APP_SEL_DSCP
)
672 rc
= bnxt_hwrm_queue_dscp2pri_cfg(bp
, app
, false);
677 static u8
bnxt_dcbnl_getdcbx(struct net_device
*dev
)
679 struct bnxt
*bp
= netdev_priv(dev
);
684 static u8
bnxt_dcbnl_setdcbx(struct net_device
*dev
, u8 mode
)
686 struct bnxt
*bp
= netdev_priv(dev
);
688 /* All firmware DCBX settings are set in NVRAM */
689 if (bp
->dcbx_cap
& DCB_CAP_DCBX_LLD_MANAGED
)
692 if (mode
& DCB_CAP_DCBX_HOST
) {
693 if (BNXT_VF(bp
) || (bp
->fw_cap
& BNXT_FW_CAP_LLDP_AGENT
))
696 /* only support IEEE */
697 if ((mode
& DCB_CAP_DCBX_VER_CEE
) ||
698 !(mode
& DCB_CAP_DCBX_VER_IEEE
))
702 if (mode
== bp
->dcbx_cap
)
709 static const struct dcbnl_rtnl_ops dcbnl_ops
= {
710 .ieee_getets
= bnxt_dcbnl_ieee_getets
,
711 .ieee_setets
= bnxt_dcbnl_ieee_setets
,
712 .ieee_getpfc
= bnxt_dcbnl_ieee_getpfc
,
713 .ieee_setpfc
= bnxt_dcbnl_ieee_setpfc
,
714 .ieee_setapp
= bnxt_dcbnl_ieee_setapp
,
715 .ieee_delapp
= bnxt_dcbnl_ieee_delapp
,
716 .getdcbx
= bnxt_dcbnl_getdcbx
,
717 .setdcbx
= bnxt_dcbnl_setdcbx
,
720 void bnxt_dcb_init(struct bnxt
*bp
)
723 if (bp
->hwrm_spec_code
< 0x10501)
726 bnxt_hwrm_queue_dscp_qcaps(bp
);
727 bp
->dcbx_cap
= DCB_CAP_DCBX_VER_IEEE
;
728 if (BNXT_PF(bp
) && !(bp
->fw_cap
& BNXT_FW_CAP_LLDP_AGENT
))
729 bp
->dcbx_cap
|= DCB_CAP_DCBX_HOST
;
730 else if (bp
->fw_cap
& BNXT_FW_CAP_DCBX_AGENT
)
731 bp
->dcbx_cap
|= DCB_CAP_DCBX_LLD_MANAGED
;
732 bp
->dev
->dcbnl_ops
= &dcbnl_ops
;
735 void bnxt_dcb_free(struct bnxt
*bp
)
745 void bnxt_dcb_init(struct bnxt
*bp
)
749 void bnxt_dcb_free(struct bnxt
*bp
)