1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include "hclge_main.h"
11 static int hclge_ieee_ets_to_tm_info(struct hclge_dev
*hdev
,
16 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
17 switch (ets
->tc_tsa
[i
]) {
18 case IEEE_8021QAZ_TSA_STRICT
:
19 hdev
->tm_info
.tc_info
[i
].tc_sch_mode
=
21 hdev
->tm_info
.pg_info
[0].tc_dwrr
[i
] = 0;
23 case IEEE_8021QAZ_TSA_ETS
:
24 hdev
->tm_info
.tc_info
[i
].tc_sch_mode
=
26 hdev
->tm_info
.pg_info
[0].tc_dwrr
[i
] =
30 /* Hardware only supports SP (strict priority)
31 * or ETS (enhanced transmission selection)
32 * algorithms, if we receive some other value
33 * from dcbnl, then throw an error.
39 hclge_tm_prio_tc_info_update(hdev
, ets
->prio_tc
);
44 static void hclge_tm_info_to_ieee_ets(struct hclge_dev
*hdev
,
49 memset(ets
, 0, sizeof(*ets
));
51 ets
->ets_cap
= hdev
->tc_max
;
53 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
54 ets
->prio_tc
[i
] = hdev
->tm_info
.prio_tc
[i
];
55 if (i
< hdev
->tm_info
.num_tc
)
56 ets
->tc_tx_bw
[i
] = hdev
->tm_info
.pg_info
[0].tc_dwrr
[i
];
60 if (hdev
->tm_info
.tc_info
[i
].tc_sch_mode
==
62 ets
->tc_tsa
[i
] = IEEE_8021QAZ_TSA_STRICT
;
64 ets
->tc_tsa
[i
] = IEEE_8021QAZ_TSA_ETS
;
69 static int hclge_ieee_getets(struct hnae3_handle
*h
, struct ieee_ets
*ets
)
71 struct hclge_vport
*vport
= hclge_get_vport(h
);
72 struct hclge_dev
*hdev
= vport
->back
;
74 hclge_tm_info_to_ieee_ets(hdev
, ets
);
79 static int hclge_dcb_common_validate(struct hclge_dev
*hdev
, u8 num_tc
,
84 if (num_tc
> hdev
->tc_max
) {
85 dev_err(&hdev
->pdev
->dev
,
86 "tc num checking failed, %u > tc_max(%u)\n",
87 num_tc
, hdev
->tc_max
);
91 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
92 if (prio_tc
[i
] >= num_tc
) {
93 dev_err(&hdev
->pdev
->dev
,
94 "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
95 i
, prio_tc
[i
], num_tc
);
100 if (num_tc
> hdev
->vport
[0].alloc_tqps
) {
101 dev_err(&hdev
->pdev
->dev
,
102 "allocated tqp checking failed, %u > tqp(%u)\n",
103 num_tc
, hdev
->vport
[0].alloc_tqps
);
110 static u8
hclge_ets_tc_changed(struct hclge_dev
*hdev
, struct ieee_ets
*ets
,
116 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
117 if (ets
->prio_tc
[i
] != hdev
->tm_info
.prio_tc
[i
])
120 if (ets
->prio_tc
[i
] > max_tc_id
)
121 max_tc_id
= ets
->prio_tc
[i
];
124 /* return max tc number, max tc id need to plus 1 */
125 return max_tc_id
+ 1;
128 static int hclge_ets_sch_mode_validate(struct hclge_dev
*hdev
,
129 struct ieee_ets
*ets
, bool *changed
,
132 bool has_ets_tc
= false;
133 u32 total_ets_bw
= 0;
136 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
137 switch (ets
->tc_tsa
[i
]) {
138 case IEEE_8021QAZ_TSA_STRICT
:
139 if (hdev
->tm_info
.tc_info
[i
].tc_sch_mode
!=
143 case IEEE_8021QAZ_TSA_ETS
:
145 dev_err(&hdev
->pdev
->dev
,
146 "tc%u is disabled, cannot set ets bw\n",
151 /* The hardware will switch to sp mode if bandwidth is
152 * 0, so limit ets bandwidth must be greater than 0.
154 if (!ets
->tc_tx_bw
[i
]) {
155 dev_err(&hdev
->pdev
->dev
,
156 "tc%u ets bw cannot be 0\n", i
);
160 if (hdev
->tm_info
.tc_info
[i
].tc_sch_mode
!=
164 total_ets_bw
+= ets
->tc_tx_bw
[i
];
172 if (has_ets_tc
&& total_ets_bw
!= BW_PERCENT
)
178 static int hclge_ets_validate(struct hclge_dev
*hdev
, struct ieee_ets
*ets
,
179 u8
*tc
, bool *changed
)
184 tc_num
= hclge_ets_tc_changed(hdev
, ets
, changed
);
186 ret
= hclge_dcb_common_validate(hdev
, tc_num
, ets
->prio_tc
);
190 ret
= hclge_ets_sch_mode_validate(hdev
, ets
, changed
, tc_num
);
195 if (*tc
!= hdev
->tm_info
.num_tc
)
201 static int hclge_map_update(struct hclge_dev
*hdev
)
205 ret
= hclge_tm_schd_setup_hw(hdev
);
209 ret
= hclge_pause_setup_hw(hdev
, false);
213 ret
= hclge_buffer_alloc(hdev
);
217 hclge_comm_rss_indir_init_cfg(hdev
->ae_dev
, &hdev
->rss_cfg
);
219 return hclge_rss_init_hw(hdev
);
222 static int hclge_notify_down_uinit(struct hclge_dev
*hdev
)
226 ret
= hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
230 ret
= hclge_tm_flush_cfg(hdev
, true);
234 return hclge_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
237 static int hclge_notify_init_up(struct hclge_dev
*hdev
)
241 ret
= hclge_notify_client(hdev
, HNAE3_INIT_CLIENT
);
245 ret
= hclge_tm_flush_cfg(hdev
, false);
249 return hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
252 static int hclge_ieee_setets(struct hnae3_handle
*h
, struct ieee_ets
*ets
)
254 struct hclge_vport
*vport
= hclge_get_vport(h
);
255 struct net_device
*netdev
= h
->kinfo
.netdev
;
256 struct hclge_dev
*hdev
= vport
->back
;
257 bool map_changed
= false;
261 if (!(hdev
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
) ||
262 h
->kinfo
.tc_info
.mqprio_active
)
265 ret
= hclge_ets_validate(hdev
, ets
, &num_tc
, &map_changed
);
270 netif_dbg(h
, drv
, netdev
, "set ets\n");
272 ret
= hclge_notify_down_uinit(hdev
);
277 hclge_tm_schd_info_update(hdev
, num_tc
);
278 h
->kinfo
.tc_info
.dcb_ets_active
= num_tc
> 1;
280 ret
= hclge_ieee_ets_to_tm_info(hdev
, ets
);
285 ret
= hclge_map_update(hdev
);
289 return hclge_notify_init_up(hdev
);
292 return hclge_tm_dwrr_cfg(hdev
);
298 hclge_notify_init_up(hdev
);
303 static int hclge_ieee_getpfc(struct hnae3_handle
*h
, struct ieee_pfc
*pfc
)
305 struct hclge_vport
*vport
= hclge_get_vport(h
);
306 struct hclge_dev
*hdev
= vport
->back
;
309 memset(pfc
, 0, sizeof(*pfc
));
310 pfc
->pfc_cap
= hdev
->pfc_max
;
311 pfc
->pfc_en
= hdev
->tm_info
.pfc_en
;
313 ret
= hclge_mac_update_stats(hdev
);
315 dev_err(&hdev
->pdev
->dev
,
316 "failed to update MAC stats, ret = %d.\n", ret
);
320 hclge_pfc_tx_stats_get(hdev
, pfc
->requests
);
321 hclge_pfc_rx_stats_get(hdev
, pfc
->indications
);
326 static int hclge_ieee_setpfc(struct hnae3_handle
*h
, struct ieee_pfc
*pfc
)
328 struct hclge_vport
*vport
= hclge_get_vport(h
);
329 struct net_device
*netdev
= h
->kinfo
.netdev
;
330 struct hclge_dev
*hdev
= vport
->back
;
331 u8 i
, j
, pfc_map
, *prio_tc
;
332 int last_bad_ret
= 0;
335 if (!(hdev
->dcbx_cap
& DCB_CAP_DCBX_VER_IEEE
))
338 if (pfc
->pfc_en
== hdev
->tm_info
.pfc_en
)
341 prio_tc
= hdev
->tm_info
.prio_tc
;
344 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
345 for (j
= 0; j
< HNAE3_MAX_USER_PRIO
; j
++) {
346 if ((prio_tc
[j
] == i
) && (pfc
->pfc_en
& BIT(j
))) {
353 hdev
->tm_info
.hw_pfc_map
= pfc_map
;
354 hdev
->tm_info
.pfc_en
= pfc
->pfc_en
;
356 netif_dbg(h
, drv
, netdev
,
357 "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
358 pfc
->pfc_en
, pfc_map
, hdev
->tm_info
.num_tc
);
360 hclge_tm_pfc_info_update(hdev
);
362 ret
= hclge_pause_setup_hw(hdev
, false);
366 ret
= hclge_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
370 ret
= hclge_tm_flush_cfg(hdev
, true);
374 /* No matter whether the following operations are performed
375 * successfully or not, disabling the tm flush and notify
376 * the network status to up are necessary.
377 * Do not return immediately.
379 ret
= hclge_buffer_alloc(hdev
);
383 ret
= hclge_tm_flush_cfg(hdev
, false);
387 ret
= hclge_notify_client(hdev
, HNAE3_UP_CLIENT
);
394 static int hclge_ieee_setapp(struct hnae3_handle
*h
, struct dcb_app
*app
)
396 struct hclge_vport
*vport
= hclge_get_vport(h
);
397 struct net_device
*netdev
= h
->kinfo
.netdev
;
398 struct hclge_dev
*hdev
= vport
->back
;
399 struct dcb_app old_app
;
402 if (app
->selector
!= IEEE_8021QAZ_APP_SEL_DSCP
||
403 app
->protocol
>= HNAE3_MAX_DSCP
||
404 app
->priority
>= HNAE3_MAX_USER_PRIO
)
407 dev_info(&hdev
->pdev
->dev
, "setapp dscp=%u priority=%u\n",
408 app
->protocol
, app
->priority
);
410 if (app
->priority
== h
->kinfo
.dscp_prio
[app
->protocol
])
413 ret
= dcb_ieee_setapp(netdev
, app
);
417 old_app
.selector
= IEEE_8021QAZ_APP_SEL_DSCP
;
418 old_app
.protocol
= app
->protocol
;
419 old_app
.priority
= h
->kinfo
.dscp_prio
[app
->protocol
];
421 h
->kinfo
.dscp_prio
[app
->protocol
] = app
->priority
;
422 ret
= hclge_dscp_to_tc_map(hdev
);
424 dev_err(&hdev
->pdev
->dev
,
425 "failed to set dscp to tc map, ret = %d\n", ret
);
426 h
->kinfo
.dscp_prio
[app
->protocol
] = old_app
.priority
;
427 (void)dcb_ieee_delapp(netdev
, app
);
431 vport
->nic
.kinfo
.tc_map_mode
= HNAE3_TC_MAP_MODE_DSCP
;
432 if (old_app
.priority
== HNAE3_PRIO_ID_INVALID
)
433 h
->kinfo
.dscp_app_cnt
++;
435 ret
= dcb_ieee_delapp(netdev
, &old_app
);
440 static int hclge_ieee_delapp(struct hnae3_handle
*h
, struct dcb_app
*app
)
442 struct hclge_vport
*vport
= hclge_get_vport(h
);
443 struct net_device
*netdev
= h
->kinfo
.netdev
;
444 struct hclge_dev
*hdev
= vport
->back
;
447 if (app
->selector
!= IEEE_8021QAZ_APP_SEL_DSCP
||
448 app
->protocol
>= HNAE3_MAX_DSCP
||
449 app
->priority
>= HNAE3_MAX_USER_PRIO
||
450 app
->priority
!= h
->kinfo
.dscp_prio
[app
->protocol
])
453 dev_info(&hdev
->pdev
->dev
, "delapp dscp=%u priority=%u\n",
454 app
->protocol
, app
->priority
);
456 ret
= dcb_ieee_delapp(netdev
, app
);
460 h
->kinfo
.dscp_prio
[app
->protocol
] = HNAE3_PRIO_ID_INVALID
;
461 ret
= hclge_dscp_to_tc_map(hdev
);
463 dev_err(&hdev
->pdev
->dev
,
464 "failed to del dscp to tc map, ret = %d\n", ret
);
465 h
->kinfo
.dscp_prio
[app
->protocol
] = app
->priority
;
466 (void)dcb_ieee_setapp(netdev
, app
);
470 if (h
->kinfo
.dscp_app_cnt
)
471 h
->kinfo
.dscp_app_cnt
--;
473 if (!h
->kinfo
.dscp_app_cnt
) {
474 vport
->nic
.kinfo
.tc_map_mode
= HNAE3_TC_MAP_MODE_PRIO
;
475 ret
= hclge_up_to_tc_map(hdev
);
481 /* DCBX configuration */
482 static u8
hclge_getdcbx(struct hnae3_handle
*h
)
484 struct hclge_vport
*vport
= hclge_get_vport(h
);
485 struct hclge_dev
*hdev
= vport
->back
;
487 if (h
->kinfo
.tc_info
.mqprio_active
)
490 return hdev
->dcbx_cap
;
493 static u8
hclge_setdcbx(struct hnae3_handle
*h
, u8 mode
)
495 struct hclge_vport
*vport
= hclge_get_vport(h
);
496 struct net_device
*netdev
= h
->kinfo
.netdev
;
497 struct hclge_dev
*hdev
= vport
->back
;
499 netif_dbg(h
, drv
, netdev
, "set dcbx: mode=%u\n", mode
);
501 /* No support for LLD_MANAGED modes or CEE */
502 if ((mode
& DCB_CAP_DCBX_LLD_MANAGED
) ||
503 (mode
& DCB_CAP_DCBX_VER_CEE
) ||
504 !(mode
& DCB_CAP_DCBX_HOST
))
507 hdev
->dcbx_cap
= mode
;
512 static int hclge_mqprio_qopt_check(struct hclge_dev
*hdev
,
513 struct tc_mqprio_qopt_offload
*mqprio_qopt
)
519 if (!mqprio_qopt
->qopt
.num_tc
) {
520 mqprio_qopt
->qopt
.num_tc
= 1;
524 ret
= hclge_dcb_common_validate(hdev
, mqprio_qopt
->qopt
.num_tc
,
525 mqprio_qopt
->qopt
.prio_tc_map
);
529 for (i
= 0; i
< mqprio_qopt
->qopt
.num_tc
; i
++) {
530 if (!is_power_of_2(mqprio_qopt
->qopt
.count
[i
])) {
531 dev_err(&hdev
->pdev
->dev
,
532 "qopt queue count must be power of 2\n");
536 if (mqprio_qopt
->qopt
.count
[i
] > hdev
->pf_rss_size_max
) {
537 dev_err(&hdev
->pdev
->dev
,
538 "qopt queue count should be no more than %u\n",
539 hdev
->pf_rss_size_max
);
543 if (mqprio_qopt
->qopt
.offset
[i
] != queue_sum
) {
544 dev_err(&hdev
->pdev
->dev
,
545 "qopt queue offset must start from 0, and being continuous\n");
549 if (mqprio_qopt
->min_rate
[i
] || mqprio_qopt
->max_rate
[i
]) {
550 dev_err(&hdev
->pdev
->dev
,
551 "qopt tx_rate is not supported\n");
555 queue_sum
= mqprio_qopt
->qopt
.offset
[i
];
556 queue_sum
+= mqprio_qopt
->qopt
.count
[i
];
558 if (hdev
->vport
[0].alloc_tqps
< queue_sum
) {
559 dev_err(&hdev
->pdev
->dev
,
560 "qopt queue count sum should be less than %u\n",
561 hdev
->vport
[0].alloc_tqps
);
568 static void hclge_sync_mqprio_qopt(struct hnae3_tc_info
*tc_info
,
569 struct tc_mqprio_qopt_offload
*mqprio_qopt
)
571 memset(tc_info
, 0, sizeof(*tc_info
));
572 tc_info
->num_tc
= mqprio_qopt
->qopt
.num_tc
;
573 memcpy(tc_info
->prio_tc
, mqprio_qopt
->qopt
.prio_tc_map
,
574 sizeof_field(struct hnae3_tc_info
, prio_tc
));
575 memcpy(tc_info
->tqp_count
, mqprio_qopt
->qopt
.count
,
576 sizeof_field(struct hnae3_tc_info
, tqp_count
));
577 memcpy(tc_info
->tqp_offset
, mqprio_qopt
->qopt
.offset
,
578 sizeof_field(struct hnae3_tc_info
, tqp_offset
));
581 static int hclge_config_tc(struct hclge_dev
*hdev
,
582 struct hnae3_tc_info
*tc_info
)
586 hclge_tm_schd_info_update(hdev
, tc_info
->num_tc
);
587 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++)
588 hdev
->tm_info
.prio_tc
[i
] = tc_info
->prio_tc
[i
];
590 return hclge_map_update(hdev
);
593 /* Set up TC for hardware offloaded mqprio in channel mode */
594 static int hclge_setup_tc(struct hnae3_handle
*h
,
595 struct tc_mqprio_qopt_offload
*mqprio_qopt
)
597 struct hclge_vport
*vport
= hclge_get_vport(h
);
598 struct hnae3_knic_private_info
*kinfo
;
599 struct hclge_dev
*hdev
= vport
->back
;
600 struct hnae3_tc_info old_tc_info
;
601 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
604 /* if client unregistered, it's not allowed to change
605 * mqprio configuration, which may cause uninit ring
608 if (!test_bit(HCLGE_STATE_NIC_REGISTERED
, &hdev
->state
))
611 kinfo
= &vport
->nic
.kinfo
;
612 if (kinfo
->tc_info
.dcb_ets_active
)
615 ret
= hclge_mqprio_qopt_check(hdev
, mqprio_qopt
);
617 dev_err(&hdev
->pdev
->dev
,
618 "failed to check mqprio qopt params, ret = %d\n", ret
);
622 kinfo
->tc_info
.mqprio_destroy
= !tc
;
624 ret
= hclge_notify_down_uinit(hdev
);
628 memcpy(&old_tc_info
, &kinfo
->tc_info
, sizeof(old_tc_info
));
629 hclge_sync_mqprio_qopt(&kinfo
->tc_info
, mqprio_qopt
);
630 kinfo
->tc_info
.mqprio_active
= tc
> 0;
632 ret
= hclge_config_tc(hdev
, &kinfo
->tc_info
);
636 return hclge_notify_init_up(hdev
);
640 dev_warn(&hdev
->pdev
->dev
,
641 "failed to destroy mqprio, will active after reset, ret = %d\n",
645 memcpy(&kinfo
->tc_info
, &old_tc_info
, sizeof(old_tc_info
));
646 if (hclge_config_tc(hdev
, &kinfo
->tc_info
))
647 dev_err(&hdev
->pdev
->dev
,
648 "failed to roll back tc configuration\n");
650 hclge_notify_init_up(hdev
);
655 static const struct hnae3_dcb_ops hns3_dcb_ops
= {
656 .ieee_getets
= hclge_ieee_getets
,
657 .ieee_setets
= hclge_ieee_setets
,
658 .ieee_getpfc
= hclge_ieee_getpfc
,
659 .ieee_setpfc
= hclge_ieee_setpfc
,
660 .ieee_setapp
= hclge_ieee_setapp
,
661 .ieee_delapp
= hclge_ieee_delapp
,
662 .getdcbx
= hclge_getdcbx
,
663 .setdcbx
= hclge_setdcbx
,
664 .setup_tc
= hclge_setup_tc
,
667 void hclge_dcb_ops_set(struct hclge_dev
*hdev
)
669 struct hclge_vport
*vport
= hdev
->vport
;
670 struct hnae3_knic_private_info
*kinfo
;
672 /* Hdev does not support DCB or vport is
673 * not a pf, then dcb_ops is not set.
675 if (!hnae3_dev_dcb_supported(hdev
) ||
676 vport
->vport_id
!= 0)
679 kinfo
= &vport
->nic
.kinfo
;
680 kinfo
->dcb_ops
= &hns3_dcb_ops
;
681 hdev
->dcbx_cap
= DCB_CAP_DCBX_VER_IEEE
| DCB_CAP_DCBX_HOST
;