MAINTAINERS: Remove Noralf Trønnes as driver maintainer
[drm/drm-misc.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_tm.h
blob0985916629d3bf679da1abe025c284920cea0c3a
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #ifndef __HCLGE_TM_H
5 #define __HCLGE_TM_H
7 #include <linux/types.h>
9 #include "hnae3.h"
11 struct hclge_dev;
12 struct hclge_vport;
13 enum hclge_opcode_type;
15 /* MAC Pause */
16 #define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0)
17 #define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1)
19 #define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
21 #define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0x7F
22 #define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
24 /* SP or DWRR */
25 #define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
26 #define HCLGE_TM_TX_SCHD_SP_MSK 0xFE
28 #define HCLGE_ETHER_MAX_RATE 100000
30 #define HCLGE_TM_PF_MAX_PRI_NUM 8
31 #define HCLGE_TM_PF_MAX_QSET_NUM 8
33 #define HCLGE_DSCP_MAP_TC_BD_NUM 2
34 #define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
36 #define HCLGE_TM_FLUSH_TIME_MS 10
37 #define HCLGE_TM_FLUSH_EN_MSK BIT(0)
39 struct hclge_pg_to_pri_link_cmd {
40 u8 pg_id;
41 u8 rsvd1[3];
42 u8 pri_bit_map;
45 struct hclge_qs_to_pri_link_cmd {
46 __le16 qs_id;
47 __le16 rsvd;
48 u8 priority;
49 #define HCLGE_TM_QS_PRI_LINK_VLD_MSK BIT(0)
50 u8 link_vld;
53 struct hclge_nq_to_qs_link_cmd {
54 __le16 nq_id;
55 __le16 rsvd;
56 #define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10)
57 #define HCLGE_TM_QS_ID_L_MSK GENMASK(9, 0)
58 #define HCLGE_TM_QS_ID_L_S 0
59 #define HCLGE_TM_QS_ID_H_MSK GENMASK(14, 10)
60 #define HCLGE_TM_QS_ID_H_S 10
61 #define HCLGE_TM_QS_ID_H_EXT_S 11
62 #define HCLGE_TM_QS_ID_H_EXT_MSK GENMASK(15, 11)
63 __le16 qset_id;
66 struct hclge_tqp_tx_queue_tc_cmd {
67 __le16 queue_id;
68 __le16 rsvd;
69 u8 tc_id;
70 u8 rev[3];
73 struct hclge_pg_weight_cmd {
74 u8 pg_id;
75 u8 dwrr;
78 struct hclge_priority_weight_cmd {
79 u8 pri_id;
80 u8 dwrr;
83 struct hclge_pri_sch_mode_cfg_cmd {
84 u8 pri_id;
85 u8 rsvd[3];
86 u8 sch_mode;
89 struct hclge_qs_sch_mode_cfg_cmd {
90 __le16 qs_id;
91 u8 rsvd[2];
92 u8 sch_mode;
95 struct hclge_qs_weight_cmd {
96 __le16 qs_id;
97 u8 dwrr;
100 struct hclge_ets_tc_weight_cmd {
101 u8 tc_weight[HNAE3_MAX_TC];
102 u8 weight_offset;
103 u8 rsvd[15];
106 #define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0)
107 #define HCLGE_TM_SHAP_IR_B_LSH 0
108 #define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8)
109 #define HCLGE_TM_SHAP_IR_U_LSH 8
110 #define HCLGE_TM_SHAP_IR_S_MSK GENMASK(15, 12)
111 #define HCLGE_TM_SHAP_IR_S_LSH 12
112 #define HCLGE_TM_SHAP_BS_B_MSK GENMASK(20, 16)
113 #define HCLGE_TM_SHAP_BS_B_LSH 16
114 #define HCLGE_TM_SHAP_BS_S_MSK GENMASK(25, 21)
115 #define HCLGE_TM_SHAP_BS_S_LSH 21
117 enum hclge_shap_bucket {
118 HCLGE_TM_SHAP_C_BUCKET = 0,
119 HCLGE_TM_SHAP_P_BUCKET,
122 /* set bit HCLGE_TM_RATE_VLD to 1 means use 'rate' to config shaping */
123 #define HCLGE_TM_RATE_VLD 0
125 struct hclge_pri_shapping_cmd {
126 u8 pri_id;
127 u8 rsvd[3];
128 __le32 pri_shapping_para;
129 u8 flag;
130 u8 rsvd1[3];
131 __le32 pri_rate;
134 struct hclge_pg_shapping_cmd {
135 u8 pg_id;
136 u8 rsvd[3];
137 __le32 pg_shapping_para;
138 u8 flag;
139 u8 rsvd1[3];
140 __le32 pg_rate;
143 struct hclge_qs_shapping_cmd {
144 __le16 qs_id;
145 u8 rsvd[2];
146 __le32 qs_shapping_para;
147 u8 flag;
148 u8 rsvd1[3];
149 __le32 qs_rate;
152 #define HCLGE_BP_GRP_NUM 32
153 #define HCLGE_BP_SUB_GRP_ID_S 0
154 #define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
155 #define HCLGE_BP_GRP_ID_S 5
156 #define HCLGE_BP_GRP_ID_M GENMASK(9, 5)
158 #define HCLGE_BP_EXT_GRP_NUM 40
159 #define HCLGE_BP_EXT_GRP_ID_S 5
160 #define HCLGE_BP_EXT_GRP_ID_M GENMASK(10, 5)
162 struct hclge_bp_to_qs_map_cmd {
163 u8 tc_id;
164 u8 rsvd[2];
165 u8 qs_group_id;
166 __le32 qs_bit_map;
167 u32 rsvd1;
170 #define HCLGE_PFC_DISABLE 0
171 #define HCLGE_PFC_TX_RX_DISABLE 0
173 struct hclge_pfc_en_cmd {
174 u8 tx_rx_en_bitmap;
175 u8 pri_en_bitmap;
178 struct hclge_cfg_pause_param_cmd {
179 u8 mac_addr[ETH_ALEN];
180 u8 pause_trans_gap;
181 u8 rsvd;
182 __le16 pause_trans_time;
183 u8 rsvd1[6];
184 /* extra mac address to do double check for pause frame */
185 u8 mac_addr_extra[ETH_ALEN];
186 u16 rsvd2;
189 struct hclge_pfc_stats_cmd {
190 __le64 pkt_num[3];
193 struct hclge_port_shapping_cmd {
194 __le32 port_shapping_para;
195 u8 flag;
196 u8 rsvd[3];
197 __le32 port_rate;
200 struct hclge_shaper_ir_para {
201 u8 ir_b; /* IR_B parameter of IR shaper */
202 u8 ir_u; /* IR_U parameter of IR shaper */
203 u8 ir_s; /* IR_S parameter of IR shaper */
206 struct hclge_tm_nodes_cmd {
207 u8 pg_base_id;
208 u8 pri_base_id;
209 __le16 qset_base_id;
210 __le16 queue_base_id;
211 u8 pg_num;
212 u8 pri_num;
213 __le16 qset_num;
214 __le16 queue_num;
217 struct hclge_tm_shaper_para {
218 u32 rate;
219 u8 ir_b;
220 u8 ir_u;
221 u8 ir_s;
222 u8 bs_b;
223 u8 bs_s;
224 u8 flag;
227 #define hclge_tm_set_field(dest, string, val) \
228 hnae3_set_field((dest), \
229 (HCLGE_TM_SHAP_##string##_MSK), \
230 (HCLGE_TM_SHAP_##string##_LSH), val)
231 #define hclge_tm_get_field(src, string) \
232 hnae3_get_field((src), HCLGE_TM_SHAP_##string##_MSK, \
233 HCLGE_TM_SHAP_##string##_LSH)
235 int hclge_tm_schd_init(struct hclge_dev *hdev);
236 int hclge_tm_vport_map_update(struct hclge_dev *hdev);
237 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init);
238 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev);
239 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
240 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
241 void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
242 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
243 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
244 int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
245 u8 pfc_bitmap);
246 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
247 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
248 int hclge_mac_pause_setup_hw(struct hclge_dev *hdev);
249 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
250 void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
251 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
252 int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev);
253 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
254 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
255 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
256 u8 *link_vld);
257 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode);
258 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight);
259 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
260 struct hclge_tm_shaper_para *para);
261 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode);
262 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight);
263 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
264 enum hclge_opcode_type cmd,
265 struct hclge_tm_shaper_para *para);
266 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id);
267 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id);
268 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
269 u8 *pri_bit_map);
270 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight);
271 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode);
272 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
273 enum hclge_opcode_type cmd,
274 struct hclge_tm_shaper_para *para);
275 int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
276 struct hclge_tm_shaper_para *para);
277 int hclge_up_to_tc_map(struct hclge_dev *hdev);
278 int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
279 int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable);
280 void hclge_reset_tc_config(struct hclge_dev *hdev);
281 #endif