1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
7 #include "hclge_main.h"
10 enum hclge_shaper_level
{
11 HCLGE_SHAPER_LVL_PRI
= 0,
12 HCLGE_SHAPER_LVL_PG
= 1,
13 HCLGE_SHAPER_LVL_PORT
= 2,
14 HCLGE_SHAPER_LVL_QSET
= 3,
15 HCLGE_SHAPER_LVL_CNT
= 4,
16 HCLGE_SHAPER_LVL_VF
= 0,
17 HCLGE_SHAPER_LVL_PF
= 1,
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
27 * @ir: Rate to be config, its unit is Mbps
28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
29 * @ir_para: parameters of IR shaper
30 * @max_tm_rate: max tm rate is available to config
34 * IR_b * (2 ^ IR_u) * 8
35 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
38 * @return: 0: calculate sucessful, negative: fail
40 static int hclge_shaper_para_calc(u32 ir
, u8 shaper_level
,
41 struct hclge_shaper_ir_para
*ir_para
,
44 #define DEFAULT_SHAPER_IR_B 126
45 #define DIVISOR_CLK (1000 * 8)
46 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
48 static const u16 tick_array
[HCLGE_SHAPER_LVL_CNT
] = {
49 6 * 256, /* Prioriy level */
50 6 * 32, /* Prioriy group level */
51 6 * 8, /* Port level */
52 6 * 256 /* Qset level */
60 if (shaper_level
>= HCLGE_SHAPER_LVL_CNT
||
64 tick
= tick_array
[shaper_level
];
67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 * the formula is changed to:
70 * ir_calc = ---------------- * 1000
73 ir_calc
= (DEFAULT_DIVISOR_IR_B
+ (tick
>> 1) - 1) / tick
;
76 ir_para
->ir_b
= DEFAULT_SHAPER_IR_B
;
81 } else if (ir_calc
> ir
) {
82 /* Increasing the denominator to select ir_s value */
83 while (ir_calc
>= ir
&& ir
) {
85 ir_calc
= DEFAULT_DIVISOR_IR_B
/
86 (tick
* (1 << ir_s_calc
));
89 ir_para
->ir_b
= (ir
* tick
* (1 << ir_s_calc
) +
90 (DIVISOR_CLK
>> 1)) / DIVISOR_CLK
;
92 /* Increasing the numerator to select ir_u value */
95 while (ir_calc
< ir
) {
97 numerator
= DEFAULT_DIVISOR_IR_B
* (1 << ir_u_calc
);
98 ir_calc
= (numerator
+ (tick
>> 1)) / tick
;
102 ir_para
->ir_b
= DEFAULT_SHAPER_IR_B
;
104 u32 denominator
= DIVISOR_CLK
* (1 << --ir_u_calc
);
105 ir_para
->ir_b
= (ir
* tick
+ (denominator
>> 1)) /
110 ir_para
->ir_u
= ir_u_calc
;
111 ir_para
->ir_s
= ir_s_calc
;
116 static const u16 hclge_pfc_tx_stats_offset
[] = {
117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num
),
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num
),
119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num
),
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num
),
121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num
),
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num
),
123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num
),
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num
)
127 static const u16 hclge_pfc_rx_stats_offset
[] = {
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num
),
129 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num
),
130 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num
),
131 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num
),
132 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num
),
133 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num
),
134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num
),
135 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num
)
138 static void hclge_pfc_stats_get(struct hclge_dev
*hdev
, bool tx
, u64
*stats
)
144 offset
= hclge_pfc_tx_stats_offset
;
146 offset
= hclge_pfc_rx_stats_offset
;
148 for (i
= 0; i
< HCLGE_MAX_TC_NUM
; i
++)
149 stats
[i
] = HCLGE_STATS_READ(&hdev
->mac_stats
, offset
[i
]);
152 void hclge_pfc_rx_stats_get(struct hclge_dev
*hdev
, u64
*stats
)
154 hclge_pfc_stats_get(hdev
, false, stats
);
157 void hclge_pfc_tx_stats_get(struct hclge_dev
*hdev
, u64
*stats
)
159 hclge_pfc_stats_get(hdev
, true, stats
);
162 int hclge_mac_pause_en_cfg(struct hclge_dev
*hdev
, bool tx
, bool rx
)
164 struct hclge_desc desc
;
166 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PAUSE_EN
, false);
168 desc
.data
[0] = cpu_to_le32((tx
? HCLGE_TX_MAC_PAUSE_EN_MSK
: 0) |
169 (rx
? HCLGE_RX_MAC_PAUSE_EN_MSK
: 0));
171 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
174 int hclge_pfc_pause_en_cfg(struct hclge_dev
*hdev
, u8 tx_rx_bitmap
,
177 struct hclge_desc desc
;
178 struct hclge_pfc_en_cmd
*pfc
= (struct hclge_pfc_en_cmd
*)desc
.data
;
180 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_PFC_PAUSE_EN
, false);
182 pfc
->tx_rx_en_bitmap
= tx_rx_bitmap
;
183 pfc
->pri_en_bitmap
= pfc_bitmap
;
185 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
188 static int hclge_pause_param_cfg(struct hclge_dev
*hdev
, const u8
*addr
,
189 u8 pause_trans_gap
, u16 pause_trans_time
)
191 struct hclge_cfg_pause_param_cmd
*pause_param
;
192 struct hclge_desc desc
;
194 pause_param
= (struct hclge_cfg_pause_param_cmd
*)desc
.data
;
196 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PARA
, false);
198 ether_addr_copy(pause_param
->mac_addr
, addr
);
199 ether_addr_copy(pause_param
->mac_addr_extra
, addr
);
200 pause_param
->pause_trans_gap
= pause_trans_gap
;
201 pause_param
->pause_trans_time
= cpu_to_le16(pause_trans_time
);
203 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
206 int hclge_pause_addr_cfg(struct hclge_dev
*hdev
, const u8
*mac_addr
)
208 struct hclge_cfg_pause_param_cmd
*pause_param
;
209 struct hclge_desc desc
;
214 pause_param
= (struct hclge_cfg_pause_param_cmd
*)desc
.data
;
216 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_CFG_MAC_PARA
, true);
218 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
222 trans_gap
= pause_param
->pause_trans_gap
;
223 trans_time
= le16_to_cpu(pause_param
->pause_trans_time
);
225 return hclge_pause_param_cfg(hdev
, mac_addr
, trans_gap
, trans_time
);
228 static int hclge_fill_pri_array(struct hclge_dev
*hdev
, u8
*pri
, u8 pri_id
)
232 tc
= hdev
->tm_info
.prio_tc
[pri_id
];
234 if (tc
>= hdev
->tm_info
.num_tc
)
238 * the register for priority has four bytes, the first bytes includes
239 * priority0 and priority1, the higher 4bit stands for priority1
240 * while the lower 4bit stands for priority0, as below:
241 * first byte: | pri_1 | pri_0 |
242 * second byte: | pri_3 | pri_2 |
243 * third byte: | pri_5 | pri_4 |
244 * fourth byte: | pri_7 | pri_6 |
246 pri
[pri_id
>> 1] |= tc
<< ((pri_id
& 1) * 4);
251 int hclge_up_to_tc_map(struct hclge_dev
*hdev
)
253 struct hclge_desc desc
;
254 u8
*pri
= (u8
*)desc
.data
;
258 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_PRI_TO_TC_MAPPING
, false);
260 for (pri_id
= 0; pri_id
< HNAE3_MAX_USER_PRIO
; pri_id
++) {
261 ret
= hclge_fill_pri_array(hdev
, pri
, pri_id
);
266 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
269 static void hclge_dscp_to_prio_map_init(struct hclge_dev
*hdev
)
273 hdev
->vport
[0].nic
.kinfo
.tc_map_mode
= HNAE3_TC_MAP_MODE_PRIO
;
274 hdev
->vport
[0].nic
.kinfo
.dscp_app_cnt
= 0;
275 for (i
= 0; i
< HNAE3_MAX_DSCP
; i
++)
276 hdev
->vport
[0].nic
.kinfo
.dscp_prio
[i
] = HNAE3_PRIO_ID_INVALID
;
279 int hclge_dscp_to_tc_map(struct hclge_dev
*hdev
)
281 struct hclge_desc desc
[HCLGE_DSCP_MAP_TC_BD_NUM
];
282 u8
*req0
= (u8
*)desc
[0].data
;
283 u8
*req1
= (u8
*)desc
[1].data
;
284 u8 pri_id
, tc_id
, i
, j
;
286 hclge_cmd_setup_basic_desc(&desc
[0], HCLGE_OPC_QOS_MAP
, false);
287 desc
[0].flag
|= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT
);
288 hclge_cmd_setup_basic_desc(&desc
[1], HCLGE_OPC_QOS_MAP
, false);
290 /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
291 for (i
= 0; i
< HNAE3_MAX_DSCP
/ HCLGE_DSCP_MAP_TC_BD_NUM
; i
++) {
292 pri_id
= hdev
->vport
[0].nic
.kinfo
.dscp_prio
[i
];
293 pri_id
= pri_id
== HNAE3_PRIO_ID_INVALID
? 0 : pri_id
;
294 tc_id
= hdev
->tm_info
.prio_tc
[pri_id
];
295 /* Each dscp setting has 4 bits, so each byte saves two dscp
298 req0
[i
>> 1] |= tc_id
<< HCLGE_DSCP_TC_SHIFT(i
);
300 j
= i
+ HNAE3_MAX_DSCP
/ HCLGE_DSCP_MAP_TC_BD_NUM
;
301 pri_id
= hdev
->vport
[0].nic
.kinfo
.dscp_prio
[j
];
302 pri_id
= pri_id
== HNAE3_PRIO_ID_INVALID
? 0 : pri_id
;
303 tc_id
= hdev
->tm_info
.prio_tc
[pri_id
];
304 req1
[i
>> 1] |= tc_id
<< HCLGE_DSCP_TC_SHIFT(i
);
307 return hclge_cmd_send(&hdev
->hw
, desc
, HCLGE_DSCP_MAP_TC_BD_NUM
);
310 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev
*hdev
,
311 u8 pg_id
, u8 pri_bit_map
)
313 struct hclge_pg_to_pri_link_cmd
*map
;
314 struct hclge_desc desc
;
316 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_TO_PRI_LINK
, false);
318 map
= (struct hclge_pg_to_pri_link_cmd
*)desc
.data
;
321 map
->pri_bit_map
= pri_bit_map
;
323 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
326 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev
*hdev
, u16 qs_id
, u8 pri
,
329 struct hclge_qs_to_pri_link_cmd
*map
;
330 struct hclge_desc desc
;
332 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_TO_PRI_LINK
, false);
334 map
= (struct hclge_qs_to_pri_link_cmd
*)desc
.data
;
336 map
->qs_id
= cpu_to_le16(qs_id
);
338 map
->link_vld
= link_vld
? HCLGE_TM_QS_PRI_LINK_VLD_MSK
: 0;
340 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
343 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev
*hdev
,
346 struct hclge_nq_to_qs_link_cmd
*map
;
347 struct hclge_desc desc
;
351 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NQ_TO_QS_LINK
, false);
353 map
= (struct hclge_nq_to_qs_link_cmd
*)desc
.data
;
355 map
->nq_id
= cpu_to_le16(q_id
);
357 /* convert qs_id to the following format to support qset_id >= 1024
358 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
361 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
362 * | qs_id_h | vld | qs_id_l |
364 qs_id_l
= hnae3_get_field(qs_id
, HCLGE_TM_QS_ID_L_MSK
,
366 qs_id_h
= hnae3_get_field(qs_id
, HCLGE_TM_QS_ID_H_MSK
,
368 hnae3_set_field(qs_id
, HCLGE_TM_QS_ID_L_MSK
, HCLGE_TM_QS_ID_L_S
,
370 hnae3_set_field(qs_id
, HCLGE_TM_QS_ID_H_EXT_MSK
, HCLGE_TM_QS_ID_H_EXT_S
,
372 map
->qset_id
= cpu_to_le16(qs_id
| HCLGE_TM_Q_QS_LINK_VLD_MSK
);
374 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
377 static int hclge_tm_pg_weight_cfg(struct hclge_dev
*hdev
, u8 pg_id
,
380 struct hclge_pg_weight_cmd
*weight
;
381 struct hclge_desc desc
;
383 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_WEIGHT
, false);
385 weight
= (struct hclge_pg_weight_cmd
*)desc
.data
;
387 weight
->pg_id
= pg_id
;
390 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
393 static int hclge_tm_pri_weight_cfg(struct hclge_dev
*hdev
, u8 pri_id
,
396 struct hclge_priority_weight_cmd
*weight
;
397 struct hclge_desc desc
;
399 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_WEIGHT
, false);
401 weight
= (struct hclge_priority_weight_cmd
*)desc
.data
;
403 weight
->pri_id
= pri_id
;
406 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
409 static int hclge_tm_qs_weight_cfg(struct hclge_dev
*hdev
, u16 qs_id
,
412 struct hclge_qs_weight_cmd
*weight
;
413 struct hclge_desc desc
;
415 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_WEIGHT
, false);
417 weight
= (struct hclge_qs_weight_cmd
*)desc
.data
;
419 weight
->qs_id
= cpu_to_le16(qs_id
);
422 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
425 static u32
hclge_tm_get_shapping_para(u8 ir_b
, u8 ir_u
, u8 ir_s
,
428 u32 shapping_para
= 0;
430 hclge_tm_set_field(shapping_para
, IR_B
, ir_b
);
431 hclge_tm_set_field(shapping_para
, IR_U
, ir_u
);
432 hclge_tm_set_field(shapping_para
, IR_S
, ir_s
);
433 hclge_tm_set_field(shapping_para
, BS_B
, bs_b
);
434 hclge_tm_set_field(shapping_para
, BS_S
, bs_s
);
436 return shapping_para
;
439 static int hclge_tm_pg_shapping_cfg(struct hclge_dev
*hdev
,
440 enum hclge_shap_bucket bucket
, u8 pg_id
,
441 u32 shapping_para
, u32 rate
)
443 struct hclge_pg_shapping_cmd
*shap_cfg_cmd
;
444 enum hclge_opcode_type opcode
;
445 struct hclge_desc desc
;
447 opcode
= bucket
? HCLGE_OPC_TM_PG_P_SHAPPING
:
448 HCLGE_OPC_TM_PG_C_SHAPPING
;
449 hclge_cmd_setup_basic_desc(&desc
, opcode
, false);
451 shap_cfg_cmd
= (struct hclge_pg_shapping_cmd
*)desc
.data
;
453 shap_cfg_cmd
->pg_id
= pg_id
;
455 shap_cfg_cmd
->pg_shapping_para
= cpu_to_le32(shapping_para
);
457 hnae3_set_bit(shap_cfg_cmd
->flag
, HCLGE_TM_RATE_VLD
, 1);
459 shap_cfg_cmd
->pg_rate
= cpu_to_le32(rate
);
461 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
464 int hclge_tm_port_shaper_cfg(struct hclge_dev
*hdev
)
466 struct hclge_port_shapping_cmd
*shap_cfg_cmd
;
467 struct hclge_shaper_ir_para ir_para
;
468 struct hclge_desc desc
;
472 ret
= hclge_shaper_para_calc(hdev
->hw
.mac
.speed
, HCLGE_SHAPER_LVL_PORT
,
474 hdev
->ae_dev
->dev_specs
.max_tm_rate
);
478 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PORT_SHAPPING
, false);
479 shap_cfg_cmd
= (struct hclge_port_shapping_cmd
*)desc
.data
;
481 shapping_para
= hclge_tm_get_shapping_para(ir_para
.ir_b
, ir_para
.ir_u
,
483 HCLGE_SHAPER_BS_U_DEF
,
484 HCLGE_SHAPER_BS_S_DEF
);
486 shap_cfg_cmd
->port_shapping_para
= cpu_to_le32(shapping_para
);
488 hnae3_set_bit(shap_cfg_cmd
->flag
, HCLGE_TM_RATE_VLD
, 1);
490 shap_cfg_cmd
->port_rate
= cpu_to_le32(hdev
->hw
.mac
.speed
);
492 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
495 static int hclge_tm_pri_shapping_cfg(struct hclge_dev
*hdev
,
496 enum hclge_shap_bucket bucket
, u8 pri_id
,
497 u32 shapping_para
, u32 rate
)
499 struct hclge_pri_shapping_cmd
*shap_cfg_cmd
;
500 enum hclge_opcode_type opcode
;
501 struct hclge_desc desc
;
503 opcode
= bucket
? HCLGE_OPC_TM_PRI_P_SHAPPING
:
504 HCLGE_OPC_TM_PRI_C_SHAPPING
;
506 hclge_cmd_setup_basic_desc(&desc
, opcode
, false);
508 shap_cfg_cmd
= (struct hclge_pri_shapping_cmd
*)desc
.data
;
510 shap_cfg_cmd
->pri_id
= pri_id
;
512 shap_cfg_cmd
->pri_shapping_para
= cpu_to_le32(shapping_para
);
514 hnae3_set_bit(shap_cfg_cmd
->flag
, HCLGE_TM_RATE_VLD
, 1);
516 shap_cfg_cmd
->pri_rate
= cpu_to_le32(rate
);
518 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
521 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev
*hdev
, u8 pg_id
)
523 struct hclge_desc desc
;
525 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_SCH_MODE_CFG
, false);
527 if (hdev
->tm_info
.pg_info
[pg_id
].pg_sch_mode
== HCLGE_SCH_MODE_DWRR
)
528 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
532 desc
.data
[0] = cpu_to_le32(pg_id
);
534 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
537 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev
*hdev
, u8 pri_id
)
539 struct hclge_desc desc
;
541 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_SCH_MODE_CFG
, false);
543 if (hdev
->tm_info
.tc_info
[pri_id
].tc_sch_mode
== HCLGE_SCH_MODE_DWRR
)
544 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
548 desc
.data
[0] = cpu_to_le32(pri_id
);
550 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
553 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev
*hdev
, u16 qs_id
, u8 mode
)
555 struct hclge_desc desc
;
557 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_SCH_MODE_CFG
, false);
559 if (mode
== HCLGE_SCH_MODE_DWRR
)
560 desc
.data
[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK
);
564 desc
.data
[0] = cpu_to_le32(qs_id
);
566 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
569 static int hclge_tm_qs_bp_cfg(struct hclge_dev
*hdev
, u8 tc
, u8 grp_id
,
572 struct hclge_bp_to_qs_map_cmd
*bp_to_qs_map_cmd
;
573 struct hclge_desc desc
;
575 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_BP_TO_QSET_MAPPING
,
578 bp_to_qs_map_cmd
= (struct hclge_bp_to_qs_map_cmd
*)desc
.data
;
580 bp_to_qs_map_cmd
->tc_id
= tc
;
581 bp_to_qs_map_cmd
->qs_group_id
= grp_id
;
582 bp_to_qs_map_cmd
->qs_bit_map
= cpu_to_le32(bit_map
);
584 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
587 int hclge_tm_qs_shaper_cfg(struct hclge_vport
*vport
, int max_tx_rate
)
589 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
590 struct hclge_qs_shapping_cmd
*shap_cfg_cmd
;
591 struct hclge_shaper_ir_para ir_para
;
592 struct hclge_dev
*hdev
= vport
->back
;
593 struct hclge_desc desc
;
598 max_tx_rate
= hdev
->ae_dev
->dev_specs
.max_tm_rate
;
600 ret
= hclge_shaper_para_calc(max_tx_rate
, HCLGE_SHAPER_LVL_QSET
,
602 hdev
->ae_dev
->dev_specs
.max_tm_rate
);
606 shaper_para
= hclge_tm_get_shapping_para(ir_para
.ir_b
, ir_para
.ir_u
,
608 HCLGE_SHAPER_BS_U_DEF
,
609 HCLGE_SHAPER_BS_S_DEF
);
611 for (i
= 0; i
< kinfo
->tc_info
.num_tc
; i
++) {
612 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QCN_SHAPPING_CFG
,
615 shap_cfg_cmd
= (struct hclge_qs_shapping_cmd
*)desc
.data
;
616 shap_cfg_cmd
->qs_id
= cpu_to_le16(vport
->qs_offset
+ i
);
617 shap_cfg_cmd
->qs_shapping_para
= cpu_to_le32(shaper_para
);
619 hnae3_set_bit(shap_cfg_cmd
->flag
, HCLGE_TM_RATE_VLD
, 1);
620 shap_cfg_cmd
->qs_rate
= cpu_to_le32(max_tx_rate
);
622 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
624 dev_err(&hdev
->pdev
->dev
,
625 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
626 vport
->vport_id
, shap_cfg_cmd
->qs_id
,
635 static u16
hclge_vport_get_max_rss_size(struct hclge_vport
*vport
)
637 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
638 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
;
639 struct hclge_dev
*hdev
= vport
->back
;
640 u16 max_rss_size
= 0;
643 if (!tc_info
->mqprio_active
)
644 return vport
->alloc_tqps
/ tc_info
->num_tc
;
646 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
647 if (!(hdev
->hw_tc_map
& BIT(i
)) || i
>= tc_info
->num_tc
)
649 if (max_rss_size
< tc_info
->tqp_count
[i
])
650 max_rss_size
= tc_info
->tqp_count
[i
];
656 static u16
hclge_vport_get_tqp_num(struct hclge_vport
*vport
)
658 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
659 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
;
660 struct hclge_dev
*hdev
= vport
->back
;
664 if (!tc_info
->mqprio_active
)
665 return kinfo
->rss_size
* tc_info
->num_tc
;
667 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
668 if (hdev
->hw_tc_map
& BIT(i
) && i
< tc_info
->num_tc
)
669 sum
+= tc_info
->tqp_count
[i
];
675 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport
*vport
)
677 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
678 struct hclge_dev
*hdev
= vport
->back
;
679 u16 vport_max_rss_size
;
682 /* TC configuration is shared by PF/VF in one port, only allow
683 * one tc for VF for simplicity. VF's vport_id is non zero.
685 if (vport
->vport_id
) {
686 kinfo
->tc_info
.max_tc
= 1;
687 kinfo
->tc_info
.num_tc
= 1;
688 vport
->qs_offset
= HNAE3_MAX_TC
+
689 vport
->vport_id
- HCLGE_VF_VPORT_START_NUM
;
690 vport_max_rss_size
= hdev
->vf_rss_size_max
;
692 kinfo
->tc_info
.max_tc
= hdev
->tc_max
;
693 kinfo
->tc_info
.num_tc
=
694 min_t(u16
, vport
->alloc_tqps
, hdev
->tm_info
.num_tc
);
695 vport
->qs_offset
= 0;
696 vport_max_rss_size
= hdev
->pf_rss_size_max
;
699 max_rss_size
= min_t(u16
, vport_max_rss_size
,
700 hclge_vport_get_max_rss_size(vport
));
702 /* Set to user value, no larger than max_rss_size. */
703 if (kinfo
->req_rss_size
!= kinfo
->rss_size
&& kinfo
->req_rss_size
&&
704 kinfo
->req_rss_size
<= max_rss_size
) {
705 dev_info(&hdev
->pdev
->dev
, "rss changes from %u to %u\n",
706 kinfo
->rss_size
, kinfo
->req_rss_size
);
707 kinfo
->rss_size
= kinfo
->req_rss_size
;
708 } else if (kinfo
->rss_size
> max_rss_size
||
709 (!kinfo
->req_rss_size
&& kinfo
->rss_size
< max_rss_size
)) {
710 /* Set to the maximum specification value (max_rss_size). */
711 kinfo
->rss_size
= max_rss_size
;
715 static void hclge_tm_vport_tc_info_update(struct hclge_vport
*vport
)
717 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
718 struct hclge_dev
*hdev
= vport
->back
;
721 hclge_tm_update_kinfo_rss_size(vport
);
722 kinfo
->num_tqps
= hclge_vport_get_tqp_num(vport
);
723 vport
->dwrr
= 100; /* 100 percent as init */
724 vport
->bw_limit
= hdev
->tm_info
.pg_info
[0].bw_limit
;
726 if (vport
->vport_id
== PF_VPORT_ID
)
727 hdev
->rss_cfg
.rss_size
= kinfo
->rss_size
;
729 /* when enable mqprio, the tc_info has been updated. */
730 if (kinfo
->tc_info
.mqprio_active
)
733 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
734 if (hdev
->hw_tc_map
& BIT(i
) && i
< kinfo
->tc_info
.num_tc
) {
735 kinfo
->tc_info
.tqp_offset
[i
] = i
* kinfo
->rss_size
;
736 kinfo
->tc_info
.tqp_count
[i
] = kinfo
->rss_size
;
738 /* Set to default queue if TC is disable */
739 kinfo
->tc_info
.tqp_offset
[i
] = 0;
740 kinfo
->tc_info
.tqp_count
[i
] = 1;
744 memcpy(kinfo
->tc_info
.prio_tc
, hdev
->tm_info
.prio_tc
,
745 sizeof_field(struct hnae3_tc_info
, prio_tc
));
748 static void hclge_tm_vport_info_update(struct hclge_dev
*hdev
)
750 struct hclge_vport
*vport
= hdev
->vport
;
753 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
754 hclge_tm_vport_tc_info_update(vport
);
760 static void hclge_tm_tc_info_init(struct hclge_dev
*hdev
)
765 for (i
= 0; i
< hdev
->tc_max
; i
++) {
766 if (i
< hdev
->tm_info
.num_tc
) {
767 tc_sch_mode
= HCLGE_SCH_MODE_DWRR
;
768 bw_limit
= hdev
->tm_info
.pg_info
[0].bw_limit
;
770 tc_sch_mode
= HCLGE_SCH_MODE_SP
;
774 hdev
->tm_info
.tc_info
[i
].tc_id
= i
;
775 hdev
->tm_info
.tc_info
[i
].tc_sch_mode
= tc_sch_mode
;
776 hdev
->tm_info
.tc_info
[i
].pgid
= 0;
777 hdev
->tm_info
.tc_info
[i
].bw_limit
= bw_limit
;
780 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++)
781 hdev
->tm_info
.prio_tc
[i
] =
782 (i
>= hdev
->tm_info
.num_tc
) ? 0 : i
;
785 static void hclge_tm_pg_info_init(struct hclge_dev
*hdev
)
787 #define BW_PERCENT 100
788 #define DEFAULT_BW_WEIGHT 1
792 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
795 hdev
->tm_info
.pg_dwrr
[i
] = i
? 0 : BW_PERCENT
;
797 hdev
->tm_info
.pg_info
[i
].pg_id
= i
;
798 hdev
->tm_info
.pg_info
[i
].pg_sch_mode
= HCLGE_SCH_MODE_DWRR
;
800 hdev
->tm_info
.pg_info
[i
].bw_limit
=
801 hdev
->ae_dev
->dev_specs
.max_tm_rate
;
806 hdev
->tm_info
.pg_info
[i
].tc_bit_map
= hdev
->hw_tc_map
;
807 for (k
= 0; k
< hdev
->tm_info
.num_tc
; k
++)
808 hdev
->tm_info
.pg_info
[i
].tc_dwrr
[k
] = BW_PERCENT
;
809 for (; k
< HNAE3_MAX_TC
; k
++)
810 hdev
->tm_info
.pg_info
[i
].tc_dwrr
[k
] = DEFAULT_BW_WEIGHT
;
814 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev
*hdev
)
816 if (hdev
->tm_info
.num_tc
== 1 && !hdev
->tm_info
.pfc_en
) {
817 if (hdev
->fc_mode_last_time
== HCLGE_FC_PFC
)
818 dev_warn(&hdev
->pdev
->dev
,
819 "Only 1 tc used, but last mode is FC_PFC\n");
821 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
822 } else if (hdev
->tm_info
.fc_mode
!= HCLGE_FC_PFC
) {
823 /* fc_mode_last_time record the last fc_mode when
824 * DCB is enabled, so that fc_mode can be set to
825 * the correct value when DCB is disabled.
827 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
828 hdev
->tm_info
.fc_mode
= HCLGE_FC_PFC
;
832 static void hclge_update_fc_mode(struct hclge_dev
*hdev
)
834 if (!hdev
->tm_info
.pfc_en
) {
835 hdev
->tm_info
.fc_mode
= hdev
->fc_mode_last_time
;
839 if (hdev
->tm_info
.fc_mode
!= HCLGE_FC_PFC
) {
840 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
841 hdev
->tm_info
.fc_mode
= HCLGE_FC_PFC
;
845 void hclge_tm_pfc_info_update(struct hclge_dev
*hdev
)
847 if (hdev
->ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
)
848 hclge_update_fc_mode(hdev
);
850 hclge_update_fc_mode_by_dcb_flag(hdev
);
853 static void hclge_tm_schd_info_init(struct hclge_dev
*hdev
)
855 hclge_tm_pg_info_init(hdev
);
857 hclge_tm_tc_info_init(hdev
);
859 hclge_tm_vport_info_update(hdev
);
861 hclge_tm_pfc_info_update(hdev
);
864 static int hclge_tm_pg_to_pri_map(struct hclge_dev
*hdev
)
869 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
872 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
874 ret
= hclge_tm_pg_to_pri_map_cfg(
875 hdev
, i
, hdev
->tm_info
.pg_info
[i
].tc_bit_map
);
883 static int hclge_tm_pg_shaper_cfg(struct hclge_dev
*hdev
)
885 u32 max_tm_rate
= hdev
->ae_dev
->dev_specs
.max_tm_rate
;
886 struct hclge_shaper_ir_para ir_para
;
892 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
896 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
897 u32 rate
= hdev
->tm_info
.pg_info
[i
].bw_limit
;
899 /* Calc shaper para */
900 ret
= hclge_shaper_para_calc(rate
, HCLGE_SHAPER_LVL_PG
,
901 &ir_para
, max_tm_rate
);
905 shaper_para
= hclge_tm_get_shapping_para(0, 0, 0,
906 HCLGE_SHAPER_BS_U_DEF
,
907 HCLGE_SHAPER_BS_S_DEF
);
908 ret
= hclge_tm_pg_shapping_cfg(hdev
,
909 HCLGE_TM_SHAP_C_BUCKET
, i
,
914 shaper_para
= hclge_tm_get_shapping_para(ir_para
.ir_b
,
917 HCLGE_SHAPER_BS_U_DEF
,
918 HCLGE_SHAPER_BS_S_DEF
);
919 ret
= hclge_tm_pg_shapping_cfg(hdev
,
920 HCLGE_TM_SHAP_P_BUCKET
, i
,
929 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev
*hdev
)
935 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
)
939 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
941 ret
= hclge_tm_pg_weight_cfg(hdev
, i
, hdev
->tm_info
.pg_dwrr
[i
]);
949 static int hclge_vport_q_to_qs_map(struct hclge_dev
*hdev
,
950 struct hclge_vport
*vport
)
952 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
953 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
;
954 struct hnae3_queue
**tqp
= kinfo
->tqp
;
958 for (i
= 0; i
< tc_info
->num_tc
; i
++) {
959 for (j
= 0; j
< tc_info
->tqp_count
[i
]; j
++) {
960 struct hnae3_queue
*q
= tqp
[tc_info
->tqp_offset
[i
] + j
];
962 ret
= hclge_tm_q_to_qs_map_cfg(hdev
,
963 hclge_get_queue_id(q
),
964 vport
->qs_offset
+ i
);
973 static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev
*hdev
)
975 struct hclge_vport
*vport
= hdev
->vport
;
979 /* Cfg qs -> pri mapping, one by one mapping */
980 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
981 struct hnae3_knic_private_info
*kinfo
= &vport
[k
].nic
.kinfo
;
983 for (i
= 0; i
< kinfo
->tc_info
.max_tc
; i
++) {
984 u8 pri
= i
< kinfo
->tc_info
.num_tc
? i
: 0;
985 bool link_vld
= i
< kinfo
->tc_info
.num_tc
;
987 ret
= hclge_tm_qs_to_pri_map_cfg(hdev
,
988 vport
[k
].qs_offset
+ i
,
998 static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev
*hdev
)
1000 struct hclge_vport
*vport
= hdev
->vport
;
1004 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
1005 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++)
1006 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1007 ret
= hclge_tm_qs_to_pri_map_cfg(hdev
,
1008 vport
[k
].qs_offset
+ i
,
1017 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev
*hdev
)
1019 struct hclge_vport
*vport
= hdev
->vport
;
1023 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
)
1024 ret
= hclge_tm_pri_q_qs_cfg_tc_base(hdev
);
1025 else if (hdev
->tx_sch_mode
== HCLGE_FLAG_VNET_BASE_SCH_MODE
)
1026 ret
= hclge_tm_pri_q_qs_cfg_vnet_base(hdev
);
1033 /* Cfg q -> qs mapping */
1034 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1035 ret
= hclge_vport_q_to_qs_map(hdev
, vport
);
1045 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev
*hdev
)
1047 u32 max_tm_rate
= hdev
->ae_dev
->dev_specs
.max_tm_rate
;
1048 struct hclge_shaper_ir_para ir_para
;
1049 u32 shaper_para_c
, shaper_para_p
;
1053 for (i
= 0; i
< hdev
->tc_max
; i
++) {
1054 u32 rate
= hdev
->tm_info
.tc_info
[i
].bw_limit
;
1057 ret
= hclge_shaper_para_calc(rate
, HCLGE_SHAPER_LVL_PRI
,
1058 &ir_para
, max_tm_rate
);
1062 shaper_para_c
= hclge_tm_get_shapping_para(0, 0, 0,
1063 HCLGE_SHAPER_BS_U_DEF
,
1064 HCLGE_SHAPER_BS_S_DEF
);
1065 shaper_para_p
= hclge_tm_get_shapping_para(ir_para
.ir_b
,
1068 HCLGE_SHAPER_BS_U_DEF
,
1069 HCLGE_SHAPER_BS_S_DEF
);
1075 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_C_BUCKET
, i
,
1076 shaper_para_c
, rate
);
1080 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_P_BUCKET
, i
,
1081 shaper_para_p
, rate
);
1089 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport
*vport
)
1091 struct hclge_dev
*hdev
= vport
->back
;
1092 struct hclge_shaper_ir_para ir_para
;
1096 ret
= hclge_shaper_para_calc(vport
->bw_limit
, HCLGE_SHAPER_LVL_VF
,
1098 hdev
->ae_dev
->dev_specs
.max_tm_rate
);
1102 shaper_para
= hclge_tm_get_shapping_para(0, 0, 0,
1103 HCLGE_SHAPER_BS_U_DEF
,
1104 HCLGE_SHAPER_BS_S_DEF
);
1105 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_C_BUCKET
,
1106 vport
->vport_id
, shaper_para
,
1111 shaper_para
= hclge_tm_get_shapping_para(ir_para
.ir_b
, ir_para
.ir_u
,
1113 HCLGE_SHAPER_BS_U_DEF
,
1114 HCLGE_SHAPER_BS_S_DEF
);
1115 ret
= hclge_tm_pri_shapping_cfg(hdev
, HCLGE_TM_SHAP_P_BUCKET
,
1116 vport
->vport_id
, shaper_para
,
1124 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport
*vport
)
1126 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1127 struct hclge_dev
*hdev
= vport
->back
;
1128 u32 max_tm_rate
= hdev
->ae_dev
->dev_specs
.max_tm_rate
;
1129 struct hclge_shaper_ir_para ir_para
;
1133 for (i
= 0; i
< kinfo
->tc_info
.num_tc
; i
++) {
1134 ret
= hclge_shaper_para_calc(hdev
->tm_info
.tc_info
[i
].bw_limit
,
1135 HCLGE_SHAPER_LVL_QSET
,
1136 &ir_para
, max_tm_rate
);
1144 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev
*hdev
)
1146 struct hclge_vport
*vport
= hdev
->vport
;
1150 /* Need config vport shaper */
1151 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1152 ret
= hclge_tm_pri_vnet_base_shaper_pri_cfg(vport
);
1156 ret
= hclge_tm_pri_vnet_base_shaper_qs_cfg(vport
);
1166 static int hclge_tm_pri_shaper_cfg(struct hclge_dev
*hdev
)
1170 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
1171 ret
= hclge_tm_pri_tc_base_shaper_cfg(hdev
);
1175 ret
= hclge_tm_pri_vnet_base_shaper_cfg(hdev
);
1183 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev
*hdev
)
1185 struct hclge_vport
*vport
= hdev
->vport
;
1186 struct hclge_pg_info
*pg_info
;
1191 for (i
= 0; i
< hdev
->tc_max
; i
++) {
1193 &hdev
->tm_info
.pg_info
[hdev
->tm_info
.tc_info
[i
].pgid
];
1194 dwrr
= pg_info
->tc_dwrr
[i
];
1196 ret
= hclge_tm_pri_weight_cfg(hdev
, i
, dwrr
);
1200 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1201 struct hnae3_knic_private_info
*kinfo
= &vport
[k
].nic
.kinfo
;
1203 if (i
>= kinfo
->tc_info
.max_tc
)
1206 dwrr
= i
< kinfo
->tc_info
.num_tc
? vport
[k
].dwrr
: 0;
1207 ret
= hclge_tm_qs_weight_cfg(
1208 hdev
, vport
[k
].qs_offset
+ i
,
1218 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev
*hdev
)
1220 #define DEFAULT_TC_OFFSET 14
1222 struct hclge_ets_tc_weight_cmd
*ets_weight
;
1223 struct hclge_desc desc
;
1226 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_ETS_TC_WEIGHT
, false);
1227 ets_weight
= (struct hclge_ets_tc_weight_cmd
*)desc
.data
;
1229 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1230 struct hclge_pg_info
*pg_info
;
1232 pg_info
= &hdev
->tm_info
.pg_info
[hdev
->tm_info
.tc_info
[i
].pgid
];
1233 ets_weight
->tc_weight
[i
] = pg_info
->tc_dwrr
[i
];
1236 ets_weight
->weight_offset
= DEFAULT_TC_OFFSET
;
1238 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1241 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport
*vport
)
1243 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1244 struct hclge_dev
*hdev
= vport
->back
;
1249 ret
= hclge_tm_pri_weight_cfg(hdev
, vport
->vport_id
, vport
->dwrr
);
1254 for (i
= 0; i
< kinfo
->tc_info
.num_tc
; i
++) {
1255 ret
= hclge_tm_qs_weight_cfg(
1256 hdev
, vport
->qs_offset
+ i
,
1257 hdev
->tm_info
.pg_info
[0].tc_dwrr
[i
]);
1265 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev
*hdev
)
1267 struct hclge_vport
*vport
= hdev
->vport
;
1271 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1272 ret
= hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport
);
1282 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev
*hdev
)
1286 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
1287 ret
= hclge_tm_pri_tc_base_dwrr_cfg(hdev
);
1291 if (!hnae3_dev_dcb_supported(hdev
))
1294 ret
= hclge_tm_ets_tc_dwrr_cfg(hdev
);
1295 if (ret
== -EOPNOTSUPP
) {
1296 dev_warn(&hdev
->pdev
->dev
,
1297 "fw %08x doesn't support ets tc weight cmd\n",
1304 ret
= hclge_tm_pri_vnet_base_dwrr_cfg(hdev
);
1312 static int hclge_tm_map_cfg(struct hclge_dev
*hdev
)
1316 ret
= hclge_up_to_tc_map(hdev
);
1320 if (hdev
->vport
[0].nic
.kinfo
.tc_map_mode
== HNAE3_TC_MAP_MODE_DSCP
) {
1321 ret
= hclge_dscp_to_tc_map(hdev
);
1326 ret
= hclge_tm_pg_to_pri_map(hdev
);
1330 return hclge_tm_pri_q_qs_cfg(hdev
);
1333 static int hclge_tm_shaper_cfg(struct hclge_dev
*hdev
)
1337 ret
= hclge_tm_port_shaper_cfg(hdev
);
1341 ret
= hclge_tm_pg_shaper_cfg(hdev
);
1345 return hclge_tm_pri_shaper_cfg(hdev
);
1348 int hclge_tm_dwrr_cfg(struct hclge_dev
*hdev
)
1352 ret
= hclge_tm_pg_dwrr_cfg(hdev
);
1356 return hclge_tm_pri_dwrr_cfg(hdev
);
1359 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev
*hdev
)
1364 /* Only being config on TC-Based scheduler mode */
1365 if (hdev
->tx_sch_mode
== HCLGE_FLAG_VNET_BASE_SCH_MODE
)
1368 for (i
= 0; i
< hdev
->tm_info
.num_pg
; i
++) {
1369 ret
= hclge_tm_pg_schd_mode_cfg(hdev
, i
);
1377 static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev
*hdev
, u8 pri_id
)
1379 struct hclge_vport
*vport
= hdev
->vport
;
1384 ret
= hclge_tm_pri_schd_mode_cfg(hdev
, pri_id
);
1388 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1389 struct hnae3_knic_private_info
*kinfo
= &vport
[i
].nic
.kinfo
;
1391 if (pri_id
>= kinfo
->tc_info
.max_tc
)
1394 mode
= pri_id
< kinfo
->tc_info
.num_tc
? HCLGE_SCH_MODE_DWRR
:
1396 ret
= hclge_tm_qs_schd_mode_cfg(hdev
,
1397 vport
[i
].qs_offset
+ pri_id
,
1406 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport
*vport
)
1408 struct hnae3_knic_private_info
*kinfo
= &vport
->nic
.kinfo
;
1409 struct hclge_dev
*hdev
= vport
->back
;
1413 if (vport
->vport_id
>= HNAE3_MAX_TC
)
1416 ret
= hclge_tm_pri_schd_mode_cfg(hdev
, vport
->vport_id
);
1420 for (i
= 0; i
< kinfo
->tc_info
.num_tc
; i
++) {
1421 u8 sch_mode
= hdev
->tm_info
.tc_info
[i
].tc_sch_mode
;
1423 ret
= hclge_tm_qs_schd_mode_cfg(hdev
, vport
->qs_offset
+ i
,
1432 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev
*hdev
)
1434 struct hclge_vport
*vport
= hdev
->vport
;
1438 if (hdev
->tx_sch_mode
== HCLGE_FLAG_TC_BASE_SCH_MODE
) {
1439 for (i
= 0; i
< hdev
->tc_max
; i
++) {
1440 ret
= hclge_tm_schd_mode_tc_base_cfg(hdev
, i
);
1445 for (i
= 0; i
< hdev
->num_alloc_vport
; i
++) {
1446 ret
= hclge_tm_schd_mode_vnet_base_cfg(vport
);
1457 static int hclge_tm_schd_mode_hw(struct hclge_dev
*hdev
)
1461 ret
= hclge_tm_lvl2_schd_mode_cfg(hdev
);
1465 return hclge_tm_lvl34_schd_mode_cfg(hdev
);
1468 int hclge_tm_schd_setup_hw(struct hclge_dev
*hdev
)
1472 /* Cfg tm mapping */
1473 ret
= hclge_tm_map_cfg(hdev
);
1478 ret
= hclge_tm_shaper_cfg(hdev
);
1483 ret
= hclge_tm_dwrr_cfg(hdev
);
1487 /* Cfg schd mode for each level schd */
1488 ret
= hclge_tm_schd_mode_hw(hdev
);
1492 return hclge_tm_flush_cfg(hdev
, false);
1495 static int hclge_pause_param_setup_hw(struct hclge_dev
*hdev
)
1497 struct hclge_mac
*mac
= &hdev
->hw
.mac
;
1499 return hclge_pause_param_cfg(hdev
, mac
->mac_addr
,
1500 HCLGE_DEFAULT_PAUSE_TRANS_GAP
,
1501 HCLGE_DEFAULT_PAUSE_TRANS_TIME
);
1504 static int hclge_pfc_setup_hw(struct hclge_dev
*hdev
)
1506 u8 enable_bitmap
= 0;
1508 if (hdev
->tm_info
.fc_mode
== HCLGE_FC_PFC
)
1509 enable_bitmap
= HCLGE_TX_MAC_PAUSE_EN_MSK
|
1510 HCLGE_RX_MAC_PAUSE_EN_MSK
;
1512 return hclge_pfc_pause_en_cfg(hdev
, enable_bitmap
,
1513 hdev
->tm_info
.pfc_en
);
1516 /* for the queues that use for backpress, divides to several groups,
1517 * each group contains 32 queue sets, which can be represented by u32 bitmap.
1519 static int hclge_bp_setup_hw(struct hclge_dev
*hdev
, u8 tc
)
1521 u16 grp_id_shift
= HCLGE_BP_GRP_ID_S
;
1522 u16 grp_id_mask
= HCLGE_BP_GRP_ID_M
;
1523 u8 grp_num
= HCLGE_BP_GRP_NUM
;
1526 if (hdev
->num_tqps
> HCLGE_TQP_MAX_SIZE_DEV_V2
) {
1527 grp_num
= HCLGE_BP_EXT_GRP_NUM
;
1528 grp_id_mask
= HCLGE_BP_EXT_GRP_ID_M
;
1529 grp_id_shift
= HCLGE_BP_EXT_GRP_ID_S
;
1532 for (i
= 0; i
< grp_num
; i
++) {
1536 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1537 struct hclge_vport
*vport
= &hdev
->vport
[k
];
1538 u16 qs_id
= vport
->qs_offset
+ tc
;
1541 grp
= hnae3_get_field(qs_id
, grp_id_mask
, grp_id_shift
);
1542 sub_grp
= hnae3_get_field(qs_id
, HCLGE_BP_SUB_GRP_ID_M
,
1543 HCLGE_BP_SUB_GRP_ID_S
);
1545 qs_bitmap
|= (1 << sub_grp
);
1548 ret
= hclge_tm_qs_bp_cfg(hdev
, tc
, i
, qs_bitmap
);
1556 int hclge_mac_pause_setup_hw(struct hclge_dev
*hdev
)
1560 switch (hdev
->tm_info
.fc_mode
) {
1565 case HCLGE_FC_RX_PAUSE
:
1569 case HCLGE_FC_TX_PAUSE
:
1586 return hclge_mac_pause_en_cfg(hdev
, tx_en
, rx_en
);
1589 static int hclge_tm_bp_setup(struct hclge_dev
*hdev
)
1594 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++) {
1595 ret
= hclge_bp_setup_hw(hdev
, i
);
1603 int hclge_pause_setup_hw(struct hclge_dev
*hdev
, bool init
)
1607 ret
= hclge_pause_param_setup_hw(hdev
);
1611 ret
= hclge_mac_pause_setup_hw(hdev
);
1615 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1616 if (!hnae3_dev_dcb_supported(hdev
))
1619 /* GE MAC does not support PFC, when driver is initializing and MAC
1620 * is in GE Mode, ignore the error here, otherwise initialization
1623 ret
= hclge_pfc_setup_hw(hdev
);
1624 if (init
&& ret
== -EOPNOTSUPP
)
1625 dev_warn(&hdev
->pdev
->dev
, "GE MAC does not support pfc\n");
1627 dev_err(&hdev
->pdev
->dev
, "config pfc failed! ret = %d\n",
1632 return hclge_tm_bp_setup(hdev
);
1635 void hclge_tm_prio_tc_info_update(struct hclge_dev
*hdev
, u8
*prio_tc
)
1637 struct hclge_vport
*vport
= hdev
->vport
;
1638 struct hnae3_knic_private_info
*kinfo
;
1641 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
1642 hdev
->tm_info
.prio_tc
[i
] = prio_tc
[i
];
1644 for (k
= 0; k
< hdev
->num_alloc_vport
; k
++) {
1645 kinfo
= &vport
[k
].nic
.kinfo
;
1646 kinfo
->tc_info
.prio_tc
[i
] = prio_tc
[i
];
1651 void hclge_tm_schd_info_update(struct hclge_dev
*hdev
, u8 num_tc
)
1656 hdev
->tm_info
.num_tc
= num_tc
;
1658 for (i
= 0; i
< hdev
->tm_info
.num_tc
; i
++)
1663 hdev
->tm_info
.num_tc
= 1;
1666 hdev
->hw_tc_map
= bit_map
;
1668 hclge_tm_schd_info_init(hdev
);
1671 int hclge_tm_init_hw(struct hclge_dev
*hdev
, bool init
)
1675 if ((hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
) &&
1676 (hdev
->tx_sch_mode
!= HCLGE_FLAG_VNET_BASE_SCH_MODE
))
1679 ret
= hclge_tm_schd_setup_hw(hdev
);
1683 ret
= hclge_pause_setup_hw(hdev
, init
);
1690 int hclge_tm_schd_init(struct hclge_dev
*hdev
)
1692 /* fc_mode is HCLGE_FC_FULL on reset */
1693 hdev
->tm_info
.fc_mode
= HCLGE_FC_FULL
;
1694 hdev
->fc_mode_last_time
= hdev
->tm_info
.fc_mode
;
1696 if (hdev
->tx_sch_mode
!= HCLGE_FLAG_TC_BASE_SCH_MODE
&&
1697 hdev
->tm_info
.num_pg
!= 1)
1700 hclge_tm_schd_info_init(hdev
);
1701 hclge_dscp_to_prio_map_init(hdev
);
1703 return hclge_tm_init_hw(hdev
, true);
1706 int hclge_tm_vport_map_update(struct hclge_dev
*hdev
)
1708 struct hclge_vport
*vport
= hdev
->vport
;
1711 hclge_tm_vport_tc_info_update(vport
);
1713 ret
= hclge_vport_q_to_qs_map(hdev
, vport
);
1717 if (hdev
->tm_info
.num_tc
== 1 && !hdev
->tm_info
.pfc_en
)
1720 return hclge_tm_bp_setup(hdev
);
1723 int hclge_tm_get_qset_num(struct hclge_dev
*hdev
, u16
*qset_num
)
1725 struct hclge_tm_nodes_cmd
*nodes
;
1726 struct hclge_desc desc
;
1729 if (hdev
->ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
) {
1730 /* Each PF has 8 qsets and each VF has 1 qset */
1731 *qset_num
= HCLGE_TM_PF_MAX_QSET_NUM
+ pci_num_vf(hdev
->pdev
);
1735 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NODES
, true);
1736 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1738 dev_err(&hdev
->pdev
->dev
,
1739 "failed to get qset num, ret = %d\n", ret
);
1743 nodes
= (struct hclge_tm_nodes_cmd
*)desc
.data
;
1744 *qset_num
= le16_to_cpu(nodes
->qset_num
);
1748 int hclge_tm_get_pri_num(struct hclge_dev
*hdev
, u8
*pri_num
)
1750 struct hclge_tm_nodes_cmd
*nodes
;
1751 struct hclge_desc desc
;
1754 if (hdev
->ae_dev
->dev_version
<= HNAE3_DEVICE_VERSION_V2
) {
1755 *pri_num
= HCLGE_TM_PF_MAX_PRI_NUM
;
1759 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NODES
, true);
1760 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1762 dev_err(&hdev
->pdev
->dev
,
1763 "failed to get pri num, ret = %d\n", ret
);
1767 nodes
= (struct hclge_tm_nodes_cmd
*)desc
.data
;
1768 *pri_num
= nodes
->pri_num
;
1772 int hclge_tm_get_qset_map_pri(struct hclge_dev
*hdev
, u16 qset_id
, u8
*priority
,
1775 struct hclge_qs_to_pri_link_cmd
*map
;
1776 struct hclge_desc desc
;
1779 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_TO_PRI_LINK
, true);
1780 map
= (struct hclge_qs_to_pri_link_cmd
*)desc
.data
;
1781 map
->qs_id
= cpu_to_le16(qset_id
);
1782 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1784 dev_err(&hdev
->pdev
->dev
,
1785 "failed to get qset map priority, ret = %d\n", ret
);
1789 *priority
= map
->priority
;
1790 *link_vld
= map
->link_vld
;
1794 int hclge_tm_get_qset_sch_mode(struct hclge_dev
*hdev
, u16 qset_id
, u8
*mode
)
1796 struct hclge_qs_sch_mode_cfg_cmd
*qs_sch_mode
;
1797 struct hclge_desc desc
;
1800 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_SCH_MODE_CFG
, true);
1801 qs_sch_mode
= (struct hclge_qs_sch_mode_cfg_cmd
*)desc
.data
;
1802 qs_sch_mode
->qs_id
= cpu_to_le16(qset_id
);
1803 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1805 dev_err(&hdev
->pdev
->dev
,
1806 "failed to get qset sch mode, ret = %d\n", ret
);
1810 *mode
= qs_sch_mode
->sch_mode
;
1814 int hclge_tm_get_qset_weight(struct hclge_dev
*hdev
, u16 qset_id
, u8
*weight
)
1816 struct hclge_qs_weight_cmd
*qs_weight
;
1817 struct hclge_desc desc
;
1820 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_QS_WEIGHT
, true);
1821 qs_weight
= (struct hclge_qs_weight_cmd
*)desc
.data
;
1822 qs_weight
->qs_id
= cpu_to_le16(qset_id
);
1823 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1825 dev_err(&hdev
->pdev
->dev
,
1826 "failed to get qset weight, ret = %d\n", ret
);
1830 *weight
= qs_weight
->dwrr
;
1834 int hclge_tm_get_qset_shaper(struct hclge_dev
*hdev
, u16 qset_id
,
1835 struct hclge_tm_shaper_para
*para
)
1837 struct hclge_qs_shapping_cmd
*shap_cfg_cmd
;
1838 struct hclge_desc desc
;
1842 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QCN_SHAPPING_CFG
, true);
1843 shap_cfg_cmd
= (struct hclge_qs_shapping_cmd
*)desc
.data
;
1844 shap_cfg_cmd
->qs_id
= cpu_to_le16(qset_id
);
1845 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1847 dev_err(&hdev
->pdev
->dev
,
1848 "failed to get qset %u shaper, ret = %d\n", qset_id
,
1853 shapping_para
= le32_to_cpu(shap_cfg_cmd
->qs_shapping_para
);
1854 para
->ir_b
= hclge_tm_get_field(shapping_para
, IR_B
);
1855 para
->ir_u
= hclge_tm_get_field(shapping_para
, IR_U
);
1856 para
->ir_s
= hclge_tm_get_field(shapping_para
, IR_S
);
1857 para
->bs_b
= hclge_tm_get_field(shapping_para
, BS_B
);
1858 para
->bs_s
= hclge_tm_get_field(shapping_para
, BS_S
);
1859 para
->flag
= shap_cfg_cmd
->flag
;
1860 para
->rate
= le32_to_cpu(shap_cfg_cmd
->qs_rate
);
1864 int hclge_tm_get_pri_sch_mode(struct hclge_dev
*hdev
, u8 pri_id
, u8
*mode
)
1866 struct hclge_pri_sch_mode_cfg_cmd
*pri_sch_mode
;
1867 struct hclge_desc desc
;
1870 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_SCH_MODE_CFG
, true);
1871 pri_sch_mode
= (struct hclge_pri_sch_mode_cfg_cmd
*)desc
.data
;
1872 pri_sch_mode
->pri_id
= pri_id
;
1873 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1875 dev_err(&hdev
->pdev
->dev
,
1876 "failed to get priority sch mode, ret = %d\n", ret
);
1880 *mode
= pri_sch_mode
->sch_mode
;
1884 int hclge_tm_get_pri_weight(struct hclge_dev
*hdev
, u8 pri_id
, u8
*weight
)
1886 struct hclge_priority_weight_cmd
*priority_weight
;
1887 struct hclge_desc desc
;
1890 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PRI_WEIGHT
, true);
1891 priority_weight
= (struct hclge_priority_weight_cmd
*)desc
.data
;
1892 priority_weight
->pri_id
= pri_id
;
1893 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1895 dev_err(&hdev
->pdev
->dev
,
1896 "failed to get priority weight, ret = %d\n", ret
);
1900 *weight
= priority_weight
->dwrr
;
1904 int hclge_tm_get_pri_shaper(struct hclge_dev
*hdev
, u8 pri_id
,
1905 enum hclge_opcode_type cmd
,
1906 struct hclge_tm_shaper_para
*para
)
1908 struct hclge_pri_shapping_cmd
*shap_cfg_cmd
;
1909 struct hclge_desc desc
;
1913 if (cmd
!= HCLGE_OPC_TM_PRI_C_SHAPPING
&&
1914 cmd
!= HCLGE_OPC_TM_PRI_P_SHAPPING
)
1917 hclge_cmd_setup_basic_desc(&desc
, cmd
, true);
1918 shap_cfg_cmd
= (struct hclge_pri_shapping_cmd
*)desc
.data
;
1919 shap_cfg_cmd
->pri_id
= pri_id
;
1920 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1922 dev_err(&hdev
->pdev
->dev
,
1923 "failed to get priority shaper(%#x), ret = %d\n",
1928 shapping_para
= le32_to_cpu(shap_cfg_cmd
->pri_shapping_para
);
1929 para
->ir_b
= hclge_tm_get_field(shapping_para
, IR_B
);
1930 para
->ir_u
= hclge_tm_get_field(shapping_para
, IR_U
);
1931 para
->ir_s
= hclge_tm_get_field(shapping_para
, IR_S
);
1932 para
->bs_b
= hclge_tm_get_field(shapping_para
, BS_B
);
1933 para
->bs_s
= hclge_tm_get_field(shapping_para
, BS_S
);
1934 para
->flag
= shap_cfg_cmd
->flag
;
1935 para
->rate
= le32_to_cpu(shap_cfg_cmd
->pri_rate
);
1939 int hclge_tm_get_q_to_qs_map(struct hclge_dev
*hdev
, u16 q_id
, u16
*qset_id
)
1941 struct hclge_nq_to_qs_link_cmd
*map
;
1942 struct hclge_desc desc
;
1947 map
= (struct hclge_nq_to_qs_link_cmd
*)desc
.data
;
1948 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_NQ_TO_QS_LINK
, true);
1949 map
->nq_id
= cpu_to_le16(q_id
);
1950 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1952 dev_err(&hdev
->pdev
->dev
,
1953 "failed to get queue to qset map, ret = %d\n", ret
);
1956 *qset_id
= le16_to_cpu(map
->qset_id
);
1958 /* convert qset_id to the following format, drop the vld bit
1959 * | qs_id_h | vld | qs_id_l |
1960 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
1963 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
1965 qs_id_l
= hnae3_get_field(*qset_id
, HCLGE_TM_QS_ID_L_MSK
,
1966 HCLGE_TM_QS_ID_L_S
);
1967 qs_id_h
= hnae3_get_field(*qset_id
, HCLGE_TM_QS_ID_H_EXT_MSK
,
1968 HCLGE_TM_QS_ID_H_EXT_S
);
1970 hnae3_set_field(*qset_id
, HCLGE_TM_QS_ID_L_MSK
, HCLGE_TM_QS_ID_L_S
,
1972 hnae3_set_field(*qset_id
, HCLGE_TM_QS_ID_H_MSK
, HCLGE_TM_QS_ID_H_S
,
1977 int hclge_tm_get_q_to_tc(struct hclge_dev
*hdev
, u16 q_id
, u8
*tc_id
)
1979 #define HCLGE_TM_TC_MASK 0x7
1981 struct hclge_tqp_tx_queue_tc_cmd
*tc
;
1982 struct hclge_desc desc
;
1985 tc
= (struct hclge_tqp_tx_queue_tc_cmd
*)desc
.data
;
1986 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TQP_TX_QUEUE_TC
, true);
1987 tc
->queue_id
= cpu_to_le16(q_id
);
1988 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
1990 dev_err(&hdev
->pdev
->dev
,
1991 "failed to get queue to tc map, ret = %d\n", ret
);
1995 *tc_id
= tc
->tc_id
& HCLGE_TM_TC_MASK
;
1999 int hclge_tm_get_pg_to_pri_map(struct hclge_dev
*hdev
, u8 pg_id
,
2002 struct hclge_pg_to_pri_link_cmd
*map
;
2003 struct hclge_desc desc
;
2006 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_TO_PRI_LINK
, true);
2007 map
= (struct hclge_pg_to_pri_link_cmd
*)desc
.data
;
2009 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2011 dev_err(&hdev
->pdev
->dev
,
2012 "failed to get pg to pri map, ret = %d\n", ret
);
2016 *pri_bit_map
= map
->pri_bit_map
;
2020 int hclge_tm_get_pg_weight(struct hclge_dev
*hdev
, u8 pg_id
, u8
*weight
)
2022 struct hclge_pg_weight_cmd
*pg_weight_cmd
;
2023 struct hclge_desc desc
;
2026 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_WEIGHT
, true);
2027 pg_weight_cmd
= (struct hclge_pg_weight_cmd
*)desc
.data
;
2028 pg_weight_cmd
->pg_id
= pg_id
;
2029 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2031 dev_err(&hdev
->pdev
->dev
,
2032 "failed to get pg weight, ret = %d\n", ret
);
2036 *weight
= pg_weight_cmd
->dwrr
;
2040 int hclge_tm_get_pg_sch_mode(struct hclge_dev
*hdev
, u8 pg_id
, u8
*mode
)
2042 struct hclge_desc desc
;
2045 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PG_SCH_MODE_CFG
, true);
2046 desc
.data
[0] = cpu_to_le32(pg_id
);
2047 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2049 dev_err(&hdev
->pdev
->dev
,
2050 "failed to get pg sch mode, ret = %d\n", ret
);
2054 *mode
= (u8
)le32_to_cpu(desc
.data
[1]);
2058 int hclge_tm_get_pg_shaper(struct hclge_dev
*hdev
, u8 pg_id
,
2059 enum hclge_opcode_type cmd
,
2060 struct hclge_tm_shaper_para
*para
)
2062 struct hclge_pg_shapping_cmd
*shap_cfg_cmd
;
2063 struct hclge_desc desc
;
2067 if (cmd
!= HCLGE_OPC_TM_PG_C_SHAPPING
&&
2068 cmd
!= HCLGE_OPC_TM_PG_P_SHAPPING
)
2071 hclge_cmd_setup_basic_desc(&desc
, cmd
, true);
2072 shap_cfg_cmd
= (struct hclge_pg_shapping_cmd
*)desc
.data
;
2073 shap_cfg_cmd
->pg_id
= pg_id
;
2074 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2076 dev_err(&hdev
->pdev
->dev
,
2077 "failed to get pg shaper(%#x), ret = %d\n",
2082 shapping_para
= le32_to_cpu(shap_cfg_cmd
->pg_shapping_para
);
2083 para
->ir_b
= hclge_tm_get_field(shapping_para
, IR_B
);
2084 para
->ir_u
= hclge_tm_get_field(shapping_para
, IR_U
);
2085 para
->ir_s
= hclge_tm_get_field(shapping_para
, IR_S
);
2086 para
->bs_b
= hclge_tm_get_field(shapping_para
, BS_B
);
2087 para
->bs_s
= hclge_tm_get_field(shapping_para
, BS_S
);
2088 para
->flag
= shap_cfg_cmd
->flag
;
2089 para
->rate
= le32_to_cpu(shap_cfg_cmd
->pg_rate
);
2093 int hclge_tm_get_port_shaper(struct hclge_dev
*hdev
,
2094 struct hclge_tm_shaper_para
*para
)
2096 struct hclge_port_shapping_cmd
*port_shap_cfg_cmd
;
2097 struct hclge_desc desc
;
2101 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_PORT_SHAPPING
, true);
2102 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2104 dev_err(&hdev
->pdev
->dev
,
2105 "failed to get port shaper, ret = %d\n", ret
);
2109 port_shap_cfg_cmd
= (struct hclge_port_shapping_cmd
*)desc
.data
;
2110 shapping_para
= le32_to_cpu(port_shap_cfg_cmd
->port_shapping_para
);
2111 para
->ir_b
= hclge_tm_get_field(shapping_para
, IR_B
);
2112 para
->ir_u
= hclge_tm_get_field(shapping_para
, IR_U
);
2113 para
->ir_s
= hclge_tm_get_field(shapping_para
, IR_S
);
2114 para
->bs_b
= hclge_tm_get_field(shapping_para
, BS_B
);
2115 para
->bs_s
= hclge_tm_get_field(shapping_para
, BS_S
);
2116 para
->flag
= port_shap_cfg_cmd
->flag
;
2117 para
->rate
= le32_to_cpu(port_shap_cfg_cmd
->port_rate
);
2122 int hclge_tm_flush_cfg(struct hclge_dev
*hdev
, bool enable
)
2124 struct hclge_desc desc
;
2127 if (!hnae3_ae_dev_tm_flush_supported(hdev
))
2130 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_TM_FLUSH
, false);
2132 desc
.data
[0] = cpu_to_le32(enable
? HCLGE_TM_FLUSH_EN_MSK
: 0);
2134 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
2136 dev_err(&hdev
->pdev
->dev
,
2137 "failed to config tm flush, ret = %d\n", ret
);
2142 msleep(HCLGE_TM_FLUSH_TIME_MS
);
2147 void hclge_reset_tc_config(struct hclge_dev
*hdev
)
2149 struct hclge_vport
*vport
= &hdev
->vport
[0];
2150 struct hnae3_knic_private_info
*kinfo
;
2152 kinfo
= &vport
->nic
.kinfo
;
2154 if (!kinfo
->tc_info
.mqprio_destroy
)
2157 /* clear tc info, including mqprio_destroy and mqprio_active */
2158 memset(&kinfo
->tc_info
, 0, sizeof(kinfo
->tc_info
));
2159 hclge_tm_schd_info_update(hdev
, 0);
2160 hclge_comm_rss_indir_init_cfg(hdev
->ae_dev
, &hdev
->rss_cfg
);